code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
# Script to dump TensorFlow weights in TRT v1 and v2 dump format.
# The V1 format is for TensorRT 4.0. The V2 format is for TensorRT 4.0 and later.
import sys
import struct
import argparse
try:
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
except ImportError as err:
sys.stderr.write("""Error: Failed to import module ({})""".format(err))
sys.exit()
parser = argparse.ArgumentParser(description="TensorFlow Weight Dumper")
parser.add_argument(
"-m",
"--model",
required=True,
help="The checkpoint file basename, example basename(model.ckpt-766908.data-00000-of-00001) -> model.ckpt-766908",
)
parser.add_argument("-o", "--output", required=True, help="The weight file to dump all the weights to.")
parser.add_argument("-1", "--wtsv1", required=False, default=False, type=bool, help="Dump the weights in the wts v1.")
opt = parser.parse_args()
if opt.wtsv1:
print("Outputting the trained weights in TensorRT's wts v1 format. This format is documented as:")
print("Line 0: <number of buffers in the file>")
print("Line 1-Num: [buffer name] [buffer type] [buffer size] <hex values>")
else:
print("Outputting the trained weights in TensorRT's wts v2 format. This format is documented as:")
print("Line 0: <number of buffers in the file>")
print("Line 1-Num: [buffer name] [buffer type] [(buffer shape{e.g. (1, 2, 3)}] <buffer shaped size bytes of data>")
inputbase = opt.model
outputbase = opt.output
def float_to_hex(f):
return hex(struct.unpack("<I", struct.pack("<f", f))[0])
def getTRTType(tensor):
if tf.as_dtype(tensor.dtype) == tf.float32:
return 0
if tf.as_dtype(tensor.dtype) == tf.float16:
return 1
if tf.as_dtype(tensor.dtype) == tf.int8:
return 2
if tf.as_dtype(tensor.dtype) == tf.int32:
return 3
print("Tensor data type of %s is not supported in TensorRT" % (tensor.dtype))
sys.exit()
try:
# Open output file
if opt.wtsv1:
outputFileName = outputbase + ".wts"
else:
outputFileName = outputbase + ".wts2"
outputFile = open(outputFileName, "w")
# read vars from checkpoint
reader = pywrap_tensorflow.NewCheckpointReader(inputbase)
var_to_shape_map = reader.get_variable_to_shape_map()
# Record count of weights
count = 0
for key in sorted(var_to_shape_map):
count += 1
outputFile.write("%s\n" % (count))
# Dump the weights in either v1 or v2 format
for key in sorted(var_to_shape_map):
tensor = reader.get_tensor(key)
file_key = key.replace("/", "_")
typeOfElem = getTRTType(tensor)
val = tensor.shape
if opt.wtsv1:
val = tensor.size
print("%s %s %s " % (file_key, typeOfElem, val))
flat_tensor = tensor.flatten()
outputFile.write("%s 0 %s " % (file_key, val))
if opt.wtsv1:
for weight in flat_tensor:
hexval = float_to_hex(float(weight))
outputFile.write("%s " % (hexval[2:]))
else:
outputFile.write(flat_tensor.tobytes())
outputFile.write("\n")
outputFile.close()
except Exception as e: # pylint: disable=broad-except
print(str(e))
if "corrupted compressed block contents" in str(e):
print("It's likely that your checkpoint file has been compressed " "with SNAPPY.")
if "Data loss" in str(e) and (any([e in inputbase for e in [".index", ".meta", ".data"]])):
proposed_file = ".".join(inputbase.split(".")[0:-1])
v2_file_error_template = """
It's likely that this is a V2 checkpoint and you need to provide the filename
*prefix*. Try removing the '.' and extension. Try:
inspect checkpoint --file_name = {}"""
print(v2_file_error_template.format(proposed_file)) | /rllte_core-0.0.1b1.tar.gz/rllte_core-0.0.1b1/deployment/c++/common/dumpTFWts.py | 0.466603 | 0.539529 | dumpTFWts.py | pypi |
import pandas as pd
import requests
from loguru import logger as log
from prometheus_client import Summary
HTTP_REQUEST_TIME = Summary(
"rlm_request_response_seconds", "time spent waiting for RLM to return data"
)
HTML_PARSING_TIME = Summary(
"rlm_parse_data_seconds", "time spent parsing data from the RLM response"
)
@HTTP_REQUEST_TIME.time()
def send_http_request(url, data, timeout):
"""Wrapper for sending (and timing) an HTTP POST request.
Parameters are identical to those of `requests.post()` with the same names.
Exceptions are silenced into log messages as they shouldn't be passed on
when running in service mode.
Returns
-------
str or None
The `text` property of the response object created by the `post()` call
or `None` in case the call raised an exception.
"""
try:
response = requests.post(url=url, data=data, timeout=timeout)
except Exception as err: # pylint: disable-msg=broad-except
log.error(f"Failed fetching data from RLM: {err}")
return None
return response.text
@HTML_PARSING_TIME.time()
def parse_html_into_dataframes(html, header):
"""Wrapper for parsing the RLM data into a dataframe (and timing it).
Exceptions are silenced into log messages as they shouldn't be passed on
when running in service mode.
Parameters
----------
html : str
The HTML containing the tables to be parsed by Pandas.
header : int or list-like
The row to use to make the column headers, passed on directly to the
`read_html()` call.
Returns
-------
list(pandas.DataFrame) or None
A list of dataframe objects, one per table, or `None` in case parsing
the data failed or the input was `None`.
"""
if html is None: # happens if the HTTP request failed
return None
try:
tables = pd.read_html(html, header=header)
except Exception as err: # pylint: disable-msg=broad-except
log.error(f"Failed parsing tables from HTML: {err}")
return None
return tables
class RlmCollector:
"""Abstract base collector class."""
def __init__(self, config):
# log.trace(f"Instantiating {self.__class__}...")
self.base_uri = f"{config.rlm_uri}"
log.debug(f"Using base URI: [{self.base_uri}]")
self._uri = None
self.postdata = None
@property
def uri(self):
"""Getter method for the `uri` attribute.
Raises
------
TypeError
Raised in case the `uri` attribute is set to `None`.
"""
if self._uri is None:
raise TypeError("Instance doesn't have its `uri` attribute set!")
return self._uri
def set_stats_name(self, value):
"""Set name of the stats to request and process.
This method actually sets the `uri` instance attribute depending on the
`rlm_uri` configuration value.
Parameters
----------
value : str
The name of the stats to request from RLM, e.g. `rlmstat_lic_process`.
"""
if self.base_uri[0:5] == "http:":
full_uri = f"{self.base_uri}/goform/{value}"
else:
full_uri = f"{self.base_uri}/{value}.html"
log.trace(f"Setting `uri` value of {self.__class__} to [{full_uri}]...")
self._uri = full_uri
def collect(self):
"""Request metrics from RLM and parse them into a dataframe.
Returns
-------
list[DataFrame]
"""
log.trace(f"Collecting data from [{self.uri}]...")
html = self.uri
if html[0:4] == "http": # it URI starts with 'http' request the data:
html = send_http_request(url=self.uri, data=self.postdata, timeout=5)
tables = parse_html_into_dataframes(html, header=0)
return tables
class LicProcessCollector(RlmCollector):
"""Collector for "lic_process" data."""
def __init__(self, config):
log.trace(f"Instantiating {self.__class__}...")
super().__init__(config)
self.set_stats_name("rlmstat_lic_process")
self.postdata = {
"isv": config.isv,
"instance": "0",
"host": "",
"wb": "rlmstat_lic",
"pool": "0",
"password": "",
"ok": "Refresh",
} | /rlm_prometheus-0.3.2.tar.gz/rlm_prometheus-0.3.2/src/rlm_prometheus/collector.py | 0.900253 | 0.347011 | collector.py | pypi |
from typing import Sequence, Tuple
import rdkit
from examples.redox.models import expand_inputs
from examples.redox.models import preprocessor as redox_preprocessor
from examples.redox.models import redox_model, stability_model
from examples.redox.radical_builder import build_radicals
from graphenv.vertex import V
from rdkit import Chem
from rlmolecule.molecule_state import MoleculeState
from alfabet.prediction import model as bde_model # isort:skip
from alfabet.preprocessor import preprocessor as bde_preprocessor # isort:skip
class RadicalState(MoleculeState):
def _get_terminal_actions(self) -> Sequence[V]:
"""For the radical optimization, each 'terminal' state chooses an atom to be a
radical center
Returns:
Sequence[V]: A list of terminal states
"""
return [
self.new(radical, force_terminal=True)
for radical in build_radicals(self.molecule)
]
@property
def reward(self) -> float:
if self.forced_terminal:
reward, stats = self.calc_reward(self.molecule)
self.data.log_reward([self.smiles, reward, stats])
return reward
else:
return 0.0
def calc_reward(self) -> float:
""" """
model_inputs = expand_inputs(redox_preprocessor(self.molecule))
spins, buried_vol = stability_model.predict_step(model_inputs)
ionization_energy, electron_affinity = (
redox_model.predict_step(model_inputs).numpy().tolist()[0]
)
spins = spins.numpy().flatten()
buried_vol = buried_vol.numpy().flatten()
atom_index = int(spins.argmax())
max_spin = spins[atom_index]
spin_buried_vol = buried_vol[atom_index]
# atom_type = self.molecule.GetAtomWithIdx(atom_index).GetSymbol()
v_diff = ionization_energy - electron_affinity
bde, bde_diff = self.calc_bde()
ea_range = (-0.5, 0.2)
ie_range = (0.5, 1.2)
v_range = (1, 1.7)
bde_range = (60, 80)
# This is a bit of a placeholder; but the range for spin is about 1/50th that
# of buried volume.
reward = (
(1 - max_spin) * 50
+ spin_buried_vol
+ 100
* (
self.windowed_loss(electron_affinity, ea_range)
+ self.windowed_loss(ionization_energy, ie_range)
+ self.windowed_loss(v_diff, v_range)
+ self.windowed_loss(bde, bde_range)
)
/ 4
)
# the addition of bde_diff was to help ensure that
# the stable radical had the lowest bde in the molecule
# + 25 / (1 + np.exp(-(bde_diff - 10)))
stats = {
"max_spin": max_spin,
"spin_buried_vol": spin_buried_vol,
"ionization_energy": ionization_energy,
"electron_affinity": electron_affinity,
"bde": bde,
"bde_diff": bde_diff,
}
stats = {key: str(val) for key, val in stats.items()}
return reward, stats
def calc_bde(self):
"""calculate the X-H bde, and the difference to the next-weakest X-H bde in
kcal/mol"""
bde_inputs = self.prepare_for_bde()
model_inputs = expand_inputs(bde_preprocessor(bde_inputs["mol_smiles"]))
pred_bdes = bde_model.predict(model_inputs, verbose=0)
pred_bdes = pred_bdes[0][0, :, 0]
bde_radical = pred_bdes[bde_inputs["bond_index"]]
if len(bde_inputs["other_h_bonds"]) == 0:
bde_diff = 30.0 # Just an arbitrary large number
else:
other_h_bdes = pred_bdes[bde_inputs["other_h_bonds"]]
bde_diff = (other_h_bdes - bde_radical).min()
return bde_radical, bde_diff
def prepare_for_bde(self):
mol = rdkit.Chem.Mol(self.molecule)
radical_index = None
for i, atom in enumerate(mol.GetAtoms()):
if atom.GetNumRadicalElectrons() != 0:
assert radical_index is None
radical_index = i
atom.SetNumExplicitHs(atom.GetNumExplicitHs() + 1)
atom.SetNumRadicalElectrons(0)
break
radical_rank = Chem.CanonicalRankAtoms(mol, includeChirality=True)[
radical_index
]
mol_smiles = Chem.MolToSmiles(mol)
# TODO this line seems redundant
mol = Chem.MolFromSmiles(mol_smiles)
radical_index_reordered = list(
Chem.CanonicalRankAtoms(mol, includeChirality=True)
).index(radical_rank)
molH = Chem.AddHs(mol)
for bond in molH.GetAtomWithIdx(radical_index_reordered).GetBonds():
if "H" in {bond.GetBeginAtom().GetSymbol(), bond.GetEndAtom().GetSymbol()}:
bond_index = bond.GetIdx()
break
h_bond_indices = [
bond.GetIdx()
for bond in filter(
lambda bond: (
(bond.GetEndAtom().GetSymbol() == "H")
| (bond.GetBeginAtom().GetSymbol() == "H")
),
molH.GetBonds(),
)
]
other_h_bonds = list(set(h_bond_indices) - {bond_index})
return {
"mol_smiles": mol_smiles,
"radical_index_mol": radical_index_reordered,
"bond_index": bond_index,
"other_h_bonds": other_h_bonds,
}
def windowed_loss(self, target: float, desired_range: Tuple[float, float]) -> float:
"""Returns 0 if the molecule is in the middle of the desired range,
scaled loss otherwise."""
span = desired_range[1] - desired_range[0]
lower_lim = desired_range[0] + span / 6
upper_lim = desired_range[1] - span / 6
if target < lower_lim:
return max(1 - 3 * (abs(target - lower_lim) / span), 0)
elif target > upper_lim:
return max(1 - 3 * (abs(target - upper_lim) / span), 0)
else:
return 1 | /rlmolecule-0.0.8.tar.gz/rlmolecule-0.0.8/examples/redox/radical_state.py | 0.878118 | 0.558447 | radical_state.py | pypi |
import os
from pathlib import Path
from typing import Dict
import ray
import rdkit
from graphenv.graph_env import GraphEnv
from ray import tune
from ray.rllib.utils.framework import try_import_tf
from ray.tune.registry import register_env
from rlmolecule.builder import MoleculeBuilder
from rlmolecule.examples.qed import QEDState
from rlmolecule.molecule_model import MoleculeModel
from rlmolecule.molecule_state import MoleculeData
from rlmolecule.policy.preprocessor import load_preprocessor
tf1, tf, tfv = try_import_tf()
num_gpus = len(tf.config.list_physical_devices("GPU"))
#print(f"{num_gpus = }")
output_directory = Path("/home/ray")
Path(output_directory, "qed").mkdir(exist_ok=True)
#ray.init(dashboard_host="0.0.0.0")
# Ray will be already launched in AWS Ray clusters
ray.init(address="auto")
max_atoms = 40
def create_env(config: Dict):
"""When not running in local_mode, there are often issues in allowing ray to copy
`MoleculeState` to distribute the environment on worker nodes, since actor handles
are copied and not initialized correctly.
To solve this, it's best to delay `MoleculeState` (and the dataclass) initialization
until needed on each ray worker through the `register_env` method.
Here, we create and return an initialized `GraphEnv` object.
"""
qed_data = MoleculeData(
MoleculeBuilder(max_atoms=max_atoms, cache=True, gdb_filter=False),
max_num_actions=32,
prune_terminal_states=True,
log_reward_filepath=Path(output_directory, "qed", "eagle_results.csv"),
)
qed_state = QEDState(rdkit.Chem.MolFromSmiles("C"), qed_data, smiles="C",)
return GraphEnv({"state": qed_state, "max_num_children": qed_state.max_num_actions})
# This registers the above function with rllib, such that we can pass only "QEDGraphEnv"
# as our env object in `tune.run()`
register_env("QEDGraphEnv", lambda config: create_env(config))
if __name__ == "__main__":
custom_model = MoleculeModel
tune.run(
"PPO",
config=dict(
**{
"env": "QEDGraphEnv",
"model": {
"custom_model": custom_model,
"custom_model_config": {
"preprocessor": load_preprocessor(),
"features": 32,
"num_messages": 1,
},
},
"num_gpus": 1 if num_gpus >= 1 else 0,
"framework": "tf2",
"eager_tracing": True,
"batch_mode": "complete_episodes",
"gamma": 1.0,
"num_workers": 33,
"lr": 0.001,
"entropy_coeff": 0.001,
"num_sgd_iter": 5,
"train_batch_size": 4000,
},
),
local_dir=Path(output_directory, "ray_results"))
ray.shutdown() | /rlmolecule-0.0.8.tar.gz/rlmolecule-0.0.8/examples/benchmarks/run_qed_aws.py | 0.753648 | 0.325976 | run_qed_aws.py | pypi |
import os
from pathlib import Path
from typing import Dict
import ray
import rdkit
from graphenv.graph_env import GraphEnv
from ray import tune
from ray.rllib.utils.framework import try_import_tf
from ray.tune.registry import register_env
from rlmolecule.builder import MoleculeBuilder
from rlmolecule.examples.qed import QEDState
from rlmolecule.molecule_model import MoleculeModel
from rlmolecule.molecule_state import MoleculeData
from rlmolecule.policy.preprocessor import load_preprocessor
tf1, tf, tfv = try_import_tf()
num_gpus = len(tf.config.list_physical_devices("GPU"))
print(f"{num_gpus = }")
output_directory = Path("/scratch", os.environ["USER"])
Path(output_directory, "qed").mkdir(exist_ok=True)
ray.init(dashboard_host="0.0.0.0")
max_atoms = 40
def create_env(config: Dict):
"""When not running in local_mode, there are often issues in allowing ray to copy
`MoleculeState` to distribute the environment on worker nodes, since actor handles
are copied and not initialized correctly.
To solve this, it's best to delay `MoleculeState` (and the dataclass) initialization
until needed on each ray worker through the `register_env` method.
Here, we create and return an initialized `GraphEnv` object.
"""
qed_data = MoleculeData(
MoleculeBuilder(max_atoms=max_atoms, cache=True, gdb_filter=False),
max_num_actions=32,
prune_terminal_states=True,
log_reward_filepath=Path(output_directory, "qed", "eagle_results.csv"),
)
qed_state = QEDState(rdkit.Chem.MolFromSmiles("C"), qed_data, smiles="C",)
return GraphEnv({"state": qed_state, "max_num_children": qed_state.max_num_actions})
# This registers the above function with rllib, such that we can pass only "QEDGraphEnv"
# as our env object in `tune.run()`
register_env("QEDGraphEnv", lambda config: create_env(config))
if __name__ == "__main__":
custom_model = MoleculeModel
tune.run(
"PPO",
config=dict(
**{
"env": "QEDGraphEnv",
"model": {
"custom_model": custom_model,
"custom_model_config": {
"preprocessor": load_preprocessor(),
"features": 32,
"num_messages": 1,
},
},
"num_gpus": 1 if num_gpus >= 1 else 0,
"framework": "tf2",
"eager_tracing": True,
"batch_mode": "complete_episodes",
"gamma": 1.0,
"num_workers": 33,
"lr": 0.001,
"entropy_coeff": 0.001,
"num_sgd_iter": 5,
"train_batch_size": 4000,
},
),
local_dir=Path(output_directory, "ray_results"),
)
ray.shutdown() | /rlmolecule-0.0.8.tar.gz/rlmolecule-0.0.8/examples/benchmarks/qed/run_qed.py | 0.814643 | 0.309154 | run_qed.py | pypi |
import os
from pathlib import Path
from typing import Dict
import ray
import rdkit
from graphenv.graph_env import GraphEnv
from ray import tune
from ray.rllib.utils.framework import try_import_tf
from ray.tune.registry import register_env
from rlmolecule.builder import MoleculeBuilder
from rlmolecule.examples.qed import QEDState
from rlmolecule.molecule_model import MoleculeModel
from rlmolecule.molecule_state import MoleculeData
from rlmolecule.policy.preprocessor import load_preprocessor
tf1, tf, tfv = try_import_tf()
num_gpus = len(tf.config.list_physical_devices("GPU"))
#print(f"{num_gpus = }")
output_directory = Path("/home/ray")
Path(output_directory, "qed").mkdir(exist_ok=True)
#ray.init(dashboard_host="0.0.0.0")
# Ray will be already launched in AWS Ray clusters
ray.init(address="auto")
max_atoms = 40
def create_env(config: Dict):
"""When not running in local_mode, there are often issues in allowing ray to copy
`MoleculeState` to distribute the environment on worker nodes, since actor handles
are copied and not initialized correctly.
To solve this, it's best to delay `MoleculeState` (and the dataclass) initialization
until needed on each ray worker through the `register_env` method.
Here, we create and return an initialized `GraphEnv` object.
"""
qed_data = MoleculeData(
MoleculeBuilder(max_atoms=max_atoms, cache=True, gdb_filter=False),
max_num_actions=32,
prune_terminal_states=True,
log_reward_filepath=Path(output_directory, "qed", "eagle_results.csv"),
)
qed_state = QEDState(rdkit.Chem.MolFromSmiles("C"), qed_data, smiles="C",)
return GraphEnv({"state": qed_state, "max_num_children": qed_state.max_num_actions})
# This registers the above function with rllib, such that we can pass only "QEDGraphEnv"
# as our env object in `tune.run()`
register_env("QEDGraphEnv", lambda config: create_env(config))
if __name__ == "__main__":
custom_model = MoleculeModel
tune.run(
"PPO",
config=dict(
**{
"env": "QEDGraphEnv",
"model": {
"custom_model": custom_model,
"custom_model_config": {
"preprocessor": load_preprocessor(),
"features": 32,
"num_messages": 1,
},
},
"num_gpus": 1 if num_gpus >= 1 else 0,
"framework": "tf2",
"eager_tracing": True,
"batch_mode": "complete_episodes",
"gamma": 1.0,
"num_workers": 33,
"lr": 0.001,
"entropy_coeff": 0.001,
"num_sgd_iter": 5,
"train_batch_size": 4000,
},
),
local_dir=Path(output_directory, "ray_results"))
ray.shutdown() | /rlmolecule-0.0.8.tar.gz/rlmolecule-0.0.8/examples/benchmarks/qed/run_qed_aws.py | 0.753648 | 0.325976 | run_qed_aws.py | pypi |
"""Utils module for rlog_generator."""
import datetime
import logging
import random
import socket
import struct
import sys
import yaml
log = logging.getLogger(__name__)
def load_config(yaml_file):
"""Return a Python object given a YAML file
Arguments:
yaml_file {str} -- path of YAML file
Returns:
obj -- Python object of YAML file
"""
with open(yaml_file, 'r') as f:
log.debug(f"Loading file {yaml_file}")
return yaml.load(f, Loader=yaml.FullLoader)
def randint(min_value, max_value):
"""Return random integer in range [min_value, max_value],
including both end points
Arguments:
min_value {int} -- min value
max_value {int} -- max value
Returns:
int -- random integer in range [min_value, max_value]
"""
return random.randint(int(min_value), int(max_value))
def randip():
"""Return random IP address
Returns:
str -- IP address
"""
return socket.inet_ntoa(struct.pack('>I', random.randint(1, 0xffffffff)))
def get_function(function_str, module=sys.modules[__name__]):
"""Return the function from its string name as func_name
Example: with the name 'func_randint'
you will get the function name 'randint'
Arguments:
function_str {str} -- name of function preceded by 'func_'
Keyword Arguments:
module {module obj} -- module object with the function
(default: {sys.modules[__name__]})
Returns:
obj function -- function of module
"""
function_str = function_str.split("_")[1]
return getattr(module, function_str)
def exec_function_str(function_str):
"""Return the value of all string function with/without
parameters.
Example: a complete string 'func_randint 1 10' runs the function
randint(1, 10)
Arguments:
function_str {str} -- complete string function
Returns:
any -- value of string function
"""
tokens = function_str.split()
func = get_function(tokens[0])
if len(tokens) == 1:
return func()
else:
return func(*tokens[1:])
def get_random_value(field_value):
"""Return the random value of field value in pattern configuration
Arguments:
field_value {str/list} -- value of field in pattern configuration
Raises:
ValueError: raised when field value is not valid
Returns:
any -- random value
"""
if isinstance(field_value, str):
return exec_function_str(field_value)
elif isinstance(field_value, list):
return random.choice(field_value)
else:
raise ValueError('field value can be a string or a list')
def get_template_log(template, fields):
"""Return a random log from template string in Python formatting string
(https://docs.python.org/3/library/string.html#custom-string-formatting)
Arguments:
template {str} -- template string in Python formatting string
fields {[type]} -- dict field from pattern configuration file
Returns:
str -- random log generated from template
"""
values = {k: get_random_value(v) for k, v in fields.items()}
now = datetime.datetime.now()
return template.format(now, **values)
def custom_log(level="WARNING", name=None): # pragma: no cover
if name:
log = logging.getLogger(name)
else:
log = logging.getLogger()
log.setLevel(level)
ch = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter(
"%(asctime)s | "
"%(name)s | "
"%(module)s | "
"%(funcName)s | "
"%(levelname)s | "
"%(message)s")
ch.setFormatter(formatter)
log.addHandler(ch)
return log | /rlog-generator-0.2.0.tar.gz/rlog-generator-0.2.0/rlog_generator/utils.py | 0.754463 | 0.346818 | utils.py | pypi |
from collections import Iterable, Sequence
from .codec import consume_length_prefix, consume_payload
from .exceptions import DecodingError
from .atomic import Atomic
def decode_lazy(rlp, sedes=None, **sedes_kwargs):
"""Decode an RLP encoded object in a lazy fashion.
If the encoded object is a bytestring, this function acts similar to
:func:`rlp.decode`. If it is a list however, a :class:`LazyList` is
returned instead. This object will decode the string lazily, avoiding
both horizontal and vertical traversing as much as possible.
The way `sedes` is applied depends on the decoded object: If it is a string
`sedes` deserializes it as a whole; if it is a list, each element is
deserialized individually. In both cases, `sedes_kwargs` are passed on.
Note that, if a deserializer is used, only "horizontal" but not
"vertical lazyness" can be preserved.
:param rlp: the RLP string to decode
:param sedes: an object implementing a method ``deserialize(code)`` which
is used as described above, or ``None`` if no
deserialization should be performed
:param \*\*sedes_kwargs: additional keyword arguments that will be passed
to the deserializers
:returns: either the already decoded and deserialized object (if encoded as
a string) or an instance of :class:`rlp.LazyList`
"""
item, end = consume_item_lazy(rlp, 0)
if end != len(rlp):
raise DecodingError('RLP length prefix announced wrong length', rlp)
if isinstance(item, LazyList):
item.sedes = sedes
item.sedes_kwargs = sedes_kwargs
return item
elif sedes:
return sedes.deserialize(item, **sedes_kwargs)
else:
return item
def consume_item_lazy(rlp, start):
"""Read an item from an RLP string lazily.
If the length prefix announces a string, the string is read; if it
announces a list, a :class:`LazyList` is created.
:param rlp: the rlp string to read from
:param start: the position at which to start reading
:returns: a tuple ``(item, end)`` where ``item`` is the read string or a
:class:`LazyList` and ``end`` is the position of the first
unprocessed byte.
"""
p, t, l, s = consume_length_prefix(rlp, start)
if t is bytes:
item, _, end = consume_payload(rlp, p, s, bytes, l)
return item, end
else:
assert t is list
return LazyList(rlp, s, s + l), s + l
class LazyList(Sequence):
"""A RLP encoded list which decodes itself when necessary.
Both indexing with positive indices and iterating are supported.
Getting the length with :func:`len` is possible as well but requires full
horizontal encoding.
:param rlp: the rlp string in which the list is encoded
:param start: the position of the first payload byte of the encoded list
:param end: the position of the last payload byte of the encoded list
:param sedes: a sedes object which deserializes each element of the list,
or ``None`` for no deserialization
:param \*\*sedes_kwargs: keyword arguments which will be passed on to the
deserializer
"""
def __init__(self, rlp, start, end, sedes=None, **sedes_kwargs):
self.rlp = rlp
self.start = start
self.end = end
self.index = start
self._elements = []
self._len = None
self.sedes = sedes
self.sedes_kwargs = sedes_kwargs
def next(self):
if self.index == self.end:
self._len = len(self._elements)
raise StopIteration
assert self.index < self.end
item, end = consume_item_lazy(self.rlp, self.index)
self.index = end
if self.sedes:
item = self.sedes.deserialize(item, **self.sedes_kwargs)
self._elements.append(item)
return item
def __getitem__(self, i):
if isinstance(i, slice):
if i.step is not None:
raise TypeError("Step not supported")
start = i.start
stop = i.stop
else:
start = i
stop = i + 1
if stop is None:
stop = self.end - 1
try:
while len(self._elements) < stop:
self.next()
except StopIteration:
assert self.index == self.end
raise IndexError('Index %s out of range' % i)
if isinstance(i, slice):
return self._elements[start:stop]
else:
return self._elements[start]
def __len__(self):
if not self._len:
try:
while True:
self.next()
except StopIteration:
self._len = len(self._elements)
return self._len
def peek(rlp, index, sedes=None):
"""Get a specific element from an rlp encoded nested list.
This function uses :func:`rlp.decode_lazy` and, thus, decodes only the
necessary parts of the string.
Usage example::
>>> import rlp
>>> rlpdata = rlp.encode([1, 2, [3, [4, 5]]])
>>> rlp.peek(rlpdata, 0, rlp.sedes.big_endian_int)
1
>>> rlp.peek(rlpdata, [2, 0], rlp.sedes.big_endian_int)
3
:param rlp: the rlp string
:param index: the index of the element to peek at (can be a list for
nested data)
:param sedes: a sedes used to deserialize the peeked at object, or `None`
if no deserialization should be performed
:raises: :exc:`IndexError` if `index` is invalid (out of range or too many
levels)
"""
ll = decode_lazy(rlp)
if not isinstance(index, Iterable):
index = [index]
for i in index:
if isinstance(ll, Atomic):
raise IndexError('Too many indices given')
ll = ll[i]
if sedes:
return sedes.deserialize(ll)
else:
return ll | /rlp-cython-2.1.7.tar.gz/rlp-cython-2.1.7/rlp/lazy.py | 0.895831 | 0.558628 | lazy.py | pypi |
class RLPException(Exception):
"""Base class for exceptions raised by this package."""
pass
class EncodingError(RLPException):
"""Exception raised if encoding fails.
:ivar obj: the object that could not be encoded
"""
def __init__(self, message, obj):
super(EncodingError, self).__init__(message)
self.obj = obj
class DecodingError(RLPException):
"""Exception raised if decoding fails.
:ivar rlp: the RLP string that could not be decoded
"""
def __init__(self, message, rlp):
super(DecodingError, self).__init__(message)
self.rlp = rlp
class SerializationError(RLPException):
"""Exception raised if serialization fails.
:ivar obj: the object that could not be serialized
"""
def __init__(self, message, obj):
super(SerializationError, self).__init__(message)
self.obj = obj
class ListSerializationError(SerializationError):
"""Exception raised if serialization by a :class:`sedes.List` fails.
:ivar element_exception: the exception that occurred during the serialization of one of the
elements, or `None` if the error is unrelated to a specific element
:ivar index: the index in the list that produced the error or `None` if the error is unrelated
to a specific element
"""
def __init__(self, message=None, obj=None, element_exception=None, index=None):
if message is None:
assert index is not None
assert element_exception is not None
message = ('Serialization failed because of element at index {} '
'("{}")'.format(index, str(element_exception)))
super(ListSerializationError, self).__init__(message, obj)
self.index = index
self.element_exception = element_exception
class ObjectSerializationError(SerializationError):
"""Exception raised if serialization of a :class:`sedes.Serializable` object fails.
:ivar sedes: the :class:`sedes.Serializable` that failed
:ivar list_exception: exception raised by the underlying list sedes, or `None` if no such
exception has been raised
:ivar field: name of the field of the object that produced the error, or `None` if no field
responsible for the error
"""
def __init__(self, message=None, obj=None, sedes=None, list_exception=None):
if message is None:
assert list_exception is not None
if list_exception.element_exception is None:
field = None
message = ('Serialization failed because of underlying list '
'("{}")'.format(str(list_exception)))
else:
assert sedes is not None
field = sedes._meta.field_names[list_exception.index]
message = ('Serialization failed because of field {} '
'("{}")'.format(field, str(list_exception.element_exception)))
else:
field = None
super(ObjectSerializationError, self).__init__(message, obj)
self.field = field
self.list_exception = list_exception
class DeserializationError(RLPException):
"""Exception raised if deserialization fails.
:ivar serial: the decoded RLP string that could not be deserialized
"""
def __init__(self, message, serial):
super(DeserializationError, self).__init__(message)
self.serial = serial
class ListDeserializationError(DeserializationError):
"""Exception raised if deserialization by a :class:`sedes.List` fails.
:ivar element_exception: the exception that occurred during the deserialization of one of the
elements, or `None` if the error is unrelated to a specific element
:ivar index: the index in the list that produced the error or `None` if the error is unrelated
to a specific element
"""
def __init__(self, message=None, serial=None, element_exception=None, index=None):
if not message:
assert index is not None
assert element_exception is not None
message = ('Deserialization failed because of element at index {} '
'("{}")'.format(index, str(element_exception)))
super(ListDeserializationError, self).__init__(message, serial)
self.index = index
self.element_exception = element_exception
class ObjectDeserializationError(DeserializationError):
"""Exception raised if deserialization by a :class:`sedes.Serializable` fails.
:ivar sedes: the :class:`sedes.Serializable` that failed
:ivar list_exception: exception raised by the underlying list sedes, or `None` if no such
exception has been raised
:ivar field: name of the field of the object that produced the error, or `None` if no field
responsible for the error
"""
def __init__(self, message=None, serial=None, sedes=None, list_exception=None):
if not message:
assert list_exception is not None
if list_exception.element_exception is None:
field = None
message = ('Deserialization failed because of underlying list '
'("{}")'.format(str(list_exception)))
else:
assert sedes is not None
field = sedes._meta.field_names[list_exception.index]
message = ('Deserialization failed because of field {} '
'("{}")'.format(field, str(list_exception.element_exception)))
super(ObjectDeserializationError, self).__init__(message, serial)
self.sedes = sedes
self.list_exception = list_exception
self.field = field | /rlp-cython-2.1.7.tar.gz/rlp-cython-2.1.7/rlp/exceptions.py | 0.931267 | 0.342737 | exceptions.py | pypi |
from eth_utils import (
int_to_big_endian,
big_endian_to_int,
)
from rlp_cython.exceptions import DeserializationError, SerializationError
class BigEndianInt(object):
"""A sedes for big endian integers.
:param l: the size of the serialized representation in bytes or `None` to
use the shortest possible one
"""
def __init__(self, l=None):
self.l = l
def serialize(self, obj):
if isinstance(obj, bool) or not isinstance(obj, int):
raise SerializationError('Can only serialize integers', obj)
if self.l is not None and obj >= 256**self.l:
raise SerializationError('Integer too large (does not fit in {} '
'bytes)'.format(self.l), obj)
if obj < 0:
raise SerializationError('Cannot serialize negative integers', obj)
if obj == 0:
s = b''
else:
s = int_to_big_endian(obj)
if self.l is not None:
return b'\x00' * max(0, self.l - len(s)) + s
else:
return s
def get_sede_identifier(self):
return 0
def deserialize(self, serial, to_list = False):
if self.l is not None and len(serial) != self.l:
raise DeserializationError('Invalid serialization (wrong size)',
serial)
if self.l is None and len(serial) > 0 and serial[0:1] == b'\x00':
raise DeserializationError('Invalid serialization (not minimal '
'length)', serial)
serial = serial or b'\x00'
return big_endian_to_int(serial)
class FBigEndianInt(object):
"""A sedes for big endian integers.
:param l: the size of the serialized representation in bytes or `None` to
use the shortest possible one
"""
def __init__(self):
pass
def get_sede_identifier(self):
return 1
def serialize(self, obj):
#doing int serialization in msgpack
return obj
def deserialize(self, serial, to_list = False):
#doing int deserialization in msgpack
return serial
big_endian_int = BigEndianInt()
f_big_endian_int = FBigEndianInt() | /rlp-cython-2.1.7.tar.gz/rlp-cython-2.1.7/rlp/sedes/big_endian_int.py | 0.727007 | 0.352898 | big_endian_int.py | pypi |
import abc
import collections
import copy
import enum
import re
from eth_utils import (
to_dict,
to_set,
to_tuple,
)
from rlp_cython.exceptions import (
ListSerializationError,
ObjectSerializationError,
ListDeserializationError,
ObjectDeserializationError,
)
from .lists import (
List,
)
class MetaBase:
fields = None
field_names = None
field_attrs = None
sedes = None
def _get_duplicates(values):
counts = collections.Counter(values)
return tuple(
item
for item, num in counts.items()
if num > 1
)
def validate_args_and_kwargs(args, kwargs, arg_names, allow_missing=False):
duplicate_arg_names = _get_duplicates(arg_names)
if duplicate_arg_names:
raise TypeError("Duplicate argument names: {0}".format(sorted(duplicate_arg_names)))
needed_kwargs = arg_names[len(args):]
used_kwargs = set(arg_names[:len(args)])
duplicate_kwargs = used_kwargs.intersection(kwargs.keys())
if duplicate_kwargs:
raise TypeError("Duplicate kwargs: {0}".format(sorted(duplicate_kwargs)))
unknown_kwargs = set(kwargs.keys()).difference(arg_names)
if unknown_kwargs:
raise TypeError("Unknown kwargs: {0}".format(sorted(unknown_kwargs)))
missing_kwargs = set(needed_kwargs).difference(kwargs.keys())
if not allow_missing and missing_kwargs:
raise TypeError("Missing kwargs: {0}".format(sorted(missing_kwargs)))
@to_tuple
def merge_kwargs_to_args(args, kwargs, arg_names, allow_missing=False):
validate_args_and_kwargs(args, kwargs, arg_names, allow_missing=allow_missing)
needed_kwargs = arg_names[len(args):]
yield from args
for arg_name in needed_kwargs:
yield kwargs[arg_name]
@to_dict
def merge_args_to_kwargs(args, kwargs, arg_names, allow_missing=False):
validate_args_and_kwargs(args, kwargs, arg_names, allow_missing=allow_missing)
yield from kwargs.items()
for value, name in zip(args, arg_names):
yield name, value
def _eq(left, right):
"""
Equality comparison that allows for equality between tuple and list types
with equivalent elements.
"""
if isinstance(left, (tuple, list)) and isinstance(right, (tuple, list)):
return len(left) == len(right) and all(_eq(*pair) for pair in zip(left, right))
else:
return left == right
class ChangesetState(enum.Enum):
INITIALIZED = 'INITIALIZED'
OPEN = 'OPEN'
CLOSED = 'CLOSED'
class ChangesetField:
field = None
def __init__(self, field):
self.field = field
def __get__(self, instance, type=None):
if instance is None:
return self
elif instance.__state__ is not ChangesetState.OPEN:
raise AttributeError("Changeset is not active. Attribute access not allowed")
else:
try:
return instance.__diff__[self.field]
except KeyError:
return getattr(instance.__original__, self.field)
def __set__(self, instance, value):
if instance.__state__ is not ChangesetState.OPEN:
raise AttributeError("Changeset is not active. Attribute access not allowed")
instance.__diff__[self.field] = value
class BaseChangeset:
# reference to the original Serializable instance.
__original__ = None
# the state of this fieldset. Initialized -> Open -> Closed
__state__ = None
# the field changes that have been made in this change
__diff__ = None
def __init__(self, obj, changes=None):
self.__original__ = obj
self.__state__ = ChangesetState.INITIALIZED
self.__diff__ = changes or {}
def commit(self):
obj = self.build_rlp()
self.close()
return obj
def build_rlp(self):
if self.__state__ == ChangesetState.OPEN:
field_kwargs = {
name: self.__diff__.get(name, self.__original__[name])
for name
in self.__original__._meta.field_names
}
return type(self.__original__)(**field_kwargs)
else:
raise ValueError("Cannot open Changeset which is not in the OPEN state")
def open(self):
if self.__state__ == ChangesetState.INITIALIZED:
self.__state__ = ChangesetState.OPEN
else:
raise ValueError("Cannot open Changeset which is not in the INITIALIZED state")
def close(self):
if self.__state__ == ChangesetState.OPEN:
self.__state__ = ChangesetState.CLOSED
else:
raise ValueError("Cannot open Changeset which is not in the INITIALIZED state")
def __enter__(self):
if self.__state__ == ChangesetState.INITIALIZED:
self.open()
return self
else:
raise ValueError("Cannot open Changeset which is not in the INITIALIZED state")
def __exit__(self, exc_type, exc_value, traceback):
if self.__state__ == ChangesetState.OPEN:
self.close()
def Changeset(obj, changes):
namespace = {
name: ChangesetField(name)
for name
in obj._meta.field_names
}
cls = type(
"{0}Changeset".format(obj.__class__.__name__),
(BaseChangeset,),
namespace,
)
return cls(obj, changes)
class BaseSerializable(collections.Sequence):
def __init__(self, *args, **kwargs):
if kwargs:
field_values = merge_kwargs_to_args(args, kwargs, self._meta.field_names)
else:
field_values = args
if len(field_values) != len(self._meta.field_names):
raise TypeError(
'Argument count mismatch. expected {0} - got {1} - missing {2}'.format(
len(self._meta.field_names),
len(field_values),
','.join(self._meta.field_names[len(field_values):]),
)
)
for value, attr in zip(field_values, self._meta.field_attrs):
setattr(self, attr, make_immutable(value))
_cached_rlp = None
def as_dict(self):
return dict(
(field, value)
for field, value
in zip(self._meta.field_names, self)
)
def __iter__(self):
for attr in self._meta.field_attrs:
yield getattr(self, attr)
def __getitem__(self, idx):
if isinstance(idx, int):
attr = self._meta.field_attrs[idx]
return getattr(self, attr)
elif isinstance(idx, slice):
field_slice = self._meta.field_attrs[idx]
return tuple(getattr(self, field) for field in field_slice)
elif isinstance(idx, str):
return getattr(self, idx)
else:
raise IndexError("Unsupported type for __getitem__: {0}".format(type(idx)))
def __len__(self):
return len(self._meta.fields)
def __eq__(self, other):
return isinstance(other, Serializable) and hash(self) == hash(other)
_hash_cache = None
def __hash__(self):
if self._hash_cache is None:
self._hash_cache = hash(tuple(self))
return self._hash_cache
@classmethod
def get_sede_identifier(cls):
try:
return cls._meta.sedes.get_sede_identifier()
except AttributeError:
return 0
@classmethod
def serialize(cls, obj):
try:
return cls._meta.sedes.serialize(obj)
except ListSerializationError as e:
raise ObjectSerializationError(obj=obj, sedes=cls, list_exception=e)
@classmethod
def deserialize(cls, serial, to_list = False, **extra_kwargs):
try:
values = cls._meta.sedes.deserialize(serial, to_list = to_list)
except ListDeserializationError as e:
raise ObjectDeserializationError(serial=serial, sedes=cls, list_exception=e)
args_as_kwargs = merge_args_to_kwargs(values, {}, cls._meta.field_names)
return cls(**args_as_kwargs, **extra_kwargs)
def copy(self, *args, **kwargs):
missing_overrides = set(
self._meta.field_names
).difference(
kwargs.keys()
).difference(
self._meta.field_names[:len(args)]
)
unchanged_kwargs = {
key: copy.deepcopy(value)
for key, value
in self.as_dict().items()
if key in missing_overrides
}
combined_kwargs = dict(**unchanged_kwargs, **kwargs)
all_kwargs = merge_args_to_kwargs(args, combined_kwargs, self._meta.field_names)
return type(self)(**all_kwargs)
def __copy__(self):
return self.copy()
def __deepcopy__(self, *args):
return self.copy()
_in_mutable_context = False
def build_changeset(self, *args, **kwargs):
args_as_kwargs = merge_args_to_kwargs(
args,
kwargs,
self._meta.field_names,
allow_missing=True,
)
return Changeset(self, changes=args_as_kwargs)
def make_immutable(value):
if isinstance(value, list):
return tuple(make_immutable(item) for item in value)
else:
return value
@to_tuple
def _mk_field_attrs(field_names, extra_namespace):
namespace = set(field_names).union(extra_namespace)
for field in field_names:
while True:
field = '_' + field
if field not in namespace:
namespace.add(field)
yield field
break
def _mk_field_property(field, attr):
def field_fn_getter(self):
return getattr(self, attr)
def field_fn_setter(self, value):
if not self._in_mutable_context:
raise AttributeError("can't set attribute")
setattr(self, attr, value)
return property(field_fn_getter, field_fn_setter)
IDENTIFIER_REGEX = re.compile(r"^[^\d\W]\w*\Z", re.UNICODE)
def _is_valid_identifier(value):
# Source: https://stackoverflow.com/questions/5474008/regular-expression-to-confirm-whether-a-string-is-a-valid-identifier-in-python # noqa: E501
if not isinstance(value, str):
return False
return bool(IDENTIFIER_REGEX.match(value))
@to_set
def _get_class_namespace(cls):
if hasattr(cls, '__dict__'):
yield from cls.__dict__.keys()
if hasattr(cls, '__slots__'):
yield from cls.__slots__
class SerializableBase(abc.ABCMeta):
def __new__(cls, name, bases, attrs):
super_new = super(SerializableBase, cls).__new__
serializable_bases = tuple(b for b in bases if isinstance(b, SerializableBase))
has_multiple_serializable_parents = len(serializable_bases) > 1
is_serializable_subclass = any(serializable_bases)
declares_fields = 'fields' in attrs
if not is_serializable_subclass:
# If this is the original creation of the `Serializable` class,
# just create the class.
return super_new(cls, name, bases, attrs)
elif not declares_fields:
if has_multiple_serializable_parents:
raise TypeError(
"Cannot create subclass from multiple parent `Serializable` "
"classes without explicit `fields` declaration."
)
else:
# This is just a vanilla subclass of a `Serializable` parent class.
parent_serializable = serializable_bases[0]
if hasattr(parent_serializable, '_meta'):
fields = parent_serializable._meta.fields
else:
# This is a subclass of `Serializable` which has no
# `fields`, likely intended for further subclassing.
return super_new(cls, name, bases, attrs)
else:
# ensure that the `fields` property is a tuple of tuples to ensure
# immutability.
fields = tuple(tuple(field) for field in attrs.pop('fields'))
# split the fields into names and sedes
field_names, sedes = zip(*fields)
# check that field names are unique
duplicate_field_names = _get_duplicates(field_names)
if duplicate_field_names:
raise TypeError(
"The following fields are duplicated in the `fields` "
"declaration: "
"{0}".format(",".join(sorted(duplicate_field_names)))
)
# check that field names are valid identifiers
invalid_field_names = {
field_name
for field_name
in field_names
if not _is_valid_identifier(field_name)
}
if invalid_field_names:
raise TypeError(
"The following field names are not valid python identifiers: {0}".format(
",".join("`{0}`".format(item) for item in sorted(invalid_field_names))
)
)
# extract all of the fields from parent `Serializable` classes.
parent_field_names = {
field_name
for base in serializable_bases if hasattr(base, '_meta')
for field_name in base._meta.field_names
}
# check that all fields from parent serializable classes are
# represented on this class.
missing_fields = parent_field_names.difference(field_names)
if missing_fields:
raise TypeError(
"Subclasses of `Serializable` **must** contain a full superset "
"of the fields defined in their parent classes. The following "
"fields are missing: "
"{0}".format(",".join(sorted(missing_fields)))
)
# the actual field values are stored in separate *private* attributes.
# This computes attribute names that don't conflict with other
# attributes already present on the class.
reserved_namespace = set(attrs.keys()).union(
attr
for base in bases
for parent_cls in base.__mro__
for attr in _get_class_namespace(parent_cls)
)
field_attrs = _mk_field_attrs(field_names, reserved_namespace)
# construct the Meta object to store field information for the class
meta_namespace = {
'fields': fields,
'field_attrs': field_attrs,
'field_names': field_names,
'sedes': List(sedes),
}
meta_base = attrs.pop('_meta', MetaBase)
meta = type(
'Meta',
(meta_base,),
meta_namespace,
)
attrs['_meta'] = meta
# construct `property` attributes for read only access to the fields.
field_props = tuple(
(field, _mk_field_property(field, attr))
for field, attr
in zip(meta.field_names, meta.field_attrs)
)
return super_new(
cls,
name,
bases,
dict(
field_props +
tuple(attrs.items())
),
)
class Serializable(BaseSerializable, metaclass=SerializableBase):
"""
The base class for serializable objects.
"""
pass | /rlp-cython-2.1.7.tar.gz/rlp-cython-2.1.7/rlp/sedes/serializable.py | 0.596903 | 0.151529 | serializable.py | pypi |
from rlp_cython.exceptions import SerializationError, DeserializationError
from rlp_cython.atomic import Atomic
class Text:
"""A sedes object for encoded text data of certain length.
:param min_length: the minimal length in encoded characters or `None` for no lower limit
:param max_length: the maximal length in encoded characters or `None` for no upper limit
:param allow_empty: if true, empty strings are considered valid even if
a minimum length is required otherwise
"""
def __init__(self, min_length=None, max_length=None, allow_empty=False, encoding='utf8'):
self.min_length = min_length or 0
if max_length is None:
self.max_length = float('inf')
else:
self.max_length = max_length
self.allow_empty = allow_empty
self.encoding = encoding
@classmethod
def fixed_length(cls, l, allow_empty=False):
"""Create a sedes for text data with exactly `l` encoded characters."""
return cls(l, l, allow_empty=allow_empty)
@classmethod
def is_valid_type(cls, obj):
return isinstance(obj, str)
def is_valid_length(self, l):
return any((
self.min_length <= l <= self.max_length,
self.allow_empty and l == 0
))
def serialize(self, obj):
if not self.is_valid_type(obj):
raise SerializationError('Object is not a serializable ({})'.format(type(obj)), obj)
if not self.is_valid_length(len(obj)):
raise SerializationError('Object has invalid length', obj)
return obj.encode(self.encoding)
def deserialize(self, serial, to_list = False):
if not isinstance(serial, Atomic):
m = 'Objects of type {} cannot be deserialized'
raise DeserializationError(m.format(type(serial).__name__), serial)
try:
text_value = serial.decode(self.encoding)
except UnicodeDecodeError as err:
raise DeserializationError(str(err), serial)
if self.is_valid_length(len(text_value)):
return text_value
else:
raise DeserializationError('{} has invalid length'.format(type(serial)), serial)
text = Text() | /rlp-cython-2.1.7.tar.gz/rlp-cython-2.1.7/rlp/sedes/text.py | 0.858244 | 0.314787 | text.py | pypi |
Tutorial
========
Basics
------
There are two types of fundamental items one can encode in RLP:
1) Strings of bytes
2) Lists of other items
In this package, byte strings are represented either as Python strings or as
``bytearrays``. Lists can be any sequence, e.g. ``lists`` or ``tuples``. To
encode these kinds of objects, use :func:`rlp.encode`::
>>> from rlp_cython import encode
>>> encode('ethereum')
b'\x88ethereum'
>>> encode('')
b'\x80'
>>> encode('Lorem ipsum dolor sit amet, consetetur sadipscing elitr.')
b'\xb88Lorem ipsum dolor sit amet, consetetur sadipscing elitr.'
>>> encode([])
b'\xc0'
>>> encode(['this', ['is', ('a', ('nested', 'list', []))]])
b'\xd9\x84this\xd3\x82is\xcfa\xcd\x86nested\x84list\xc0'
Decoding is just as simple::
>>> from rlp_cython import decode
>>> decode(b'\x88ethereum')
b'ethereum'
>>> decode(b'\x80')
b''
>>> decode(b'\xc0')
[]
>>> decode(b'\xd9\x84this\xd3\x82is\xcfa\xcd\x86nested\x84list\xc0')
[b'this', [b'is', [b'a', [b'nested', b'list', []]]]]
Now, what if we want to encode a different object, say, an integer? Let's try::
>>> encode(1503)
b'\x82\x05\xdf'
>>> decode(b'\x82\x05\xdf')
b'\x05\xdf'
Oops, what happened? Encoding worked fine, but :func:`rlp.decode` refused to
give an integer back. The reason is that RLP is typeless. It doesn't know if the
encoded data represents a number, a string, or a more complicated object. It
only distinguishes between byte strings and lists. Therefore, *pyrlp* guesses
how to serialize the object into a byte string (here, in big endian notation).
When encoded however, the type information is lost and :func:`rlp.decode`
returned the result in its most generic form, as a string. Thus, what we need
to do is deserialize the result afterwards.
Sedes objects
-------------
Serialization and its couterpart, deserialization, is done by, what we call,
*sedes objects* (borrowing from the word "codec"). For integers, the sedes
:mod:`rlp.sedes.big_endian_int` is in charge. To decode our integer, we can
pass this sedes to :func:`rlp.decode`::
>>> from rlp_cython.sedes import big_endian_int
>>> decode(b'\x82\x05\xdf', big_endian_int)
1503
For unicode strings, there's the sedes :mod:`rlp.sedes.binary`, which uses UTF-8
to convert to and from byte strings::
>>> from rlp_cython.sedes import binary
>>> encode(u'Ðapp')
b'\x85\xc3\x90app'
>>> decode(b'\x85\xc3\x90app', binary)
b'\xc3\x90app'
>>> print(decode(b'\x85\xc3\x90app', binary).decode('utf-8'))
Ðapp
Lists are a bit more difficult as they can contain arbitrarily complex
combinations of types. Therefore, we need to create a sedes object specific for
each list type. As base class for this we can use
:class:`rlp.sedes.List`::
>>> from rlp_cython.sedes import List
>>> encode([5, 'fdsa', 0])
b'\xc7\x05\x84fdsa\x80'
>>> sedes = List([big_endian_int, binary, big_endian_int])
>>> decode(b'\xc7\x05\x84fdsa\x80', sedes)
(5, b'fdsa', 0)
Unsurprisingly, it is also possible to nest :class:`rlp.List` objects::
>>> inner = List([binary, binary])
>>> outer = List([inner, inner, inner])
>>> decode(encode(['asdf', 'fdsa']), inner)
(b'asdf', b'fdsa')
>>> decode(encode([['a1', 'a2'], ['b1', 'b2'], ['c1', 'c2']]), outer)
((b'a1', b'a2'), (b'b1', b'b2'), (b'c1', b'c2'))
What Sedes Objects Actually Are
-------------------------------
We saw how to use sedes objects, but what exactly are they? They are
characterized by providing the following three member functions:
- ``serializable(obj)``
- ``serialize(obj)``
- ``deserialize(serial)``
The latter two are used to convert between a Python object and its
representation as byte strings or sequences. The former one may be called by
:func:`rlp.encode` to infer which sedes object to use for a given object (see
:ref:`inference-section`).
For basic types, the sedes object is usually a module (e.g.
:mod:`rlp.sedes.big_endian_int` and :mod:`rlp.sedes.binary`). Instances of
:class:`rlp.sedes.List` provide the sedes interface too, as well as the
class :class:`rlp.Serializable` which is discussed in the following section.
Encoding Custom Objects
-----------------------
Often, we want to encode our own objects in RLP. Examples from the Ethereum
world are transactions, blocks or anything send over the Wire. With *pyrlp*,
this is as easy as subclassing :class:`rlp.Serializable`::
>>> import rlp
>>> class Transaction(rlp.Serializable):
... fields = (
... ('sender', binary),
... ('receiver', binary),
... ('amount', big_endian_int)
... )
The class attribute :attr:`~rlp.Serializable.fields` is a sequence of 2-tuples
defining the field names and the corresponding sedes. For each name an instance
attribute is created, that can conveniently be initialized with
:meth:`~rlp.Serializable.__init__`::
>>> tx1 = Transaction(b'me', b'you', 255)
>>> tx2 = Transaction(amount=255, sender=b'you', receiver=b'me')
>>> tx1.amount
255
At serialization, the field names are dropped and the object is converted to a
list, where the provided sedes objects are used to serialize the object
attributes::
>>> Transaction.serialize(tx1)
[b'me', b'you', b'\xff']
>>> tx1 == Transaction.deserialize([b'me', b'you', b'\xff'])
True
As we can see, each subclass of :class:`rlp.Serializable` implements the sedes
responsible for its instances. Therefore, we can use :func:`rlp.encode` and
:func:`rlp.decode` as expected::
>>> encode(tx1)
b'\xc9\x82me\x83you\x81\xff'
>>> decode(b'\xc9\x82me\x83you\x81\xff', Transaction) == tx1
True
.. _inference-section:
Sedes Inference
---------------
As we have seen, :func:`rlp.encode` (or, rather, :func:`rlp.infer_sedes`)
tries to guess a sedes capable of serializing the object before encoding. In
this process, it follows the following steps:
1) Check if the object's class is a sedes object (like every subclass of
:class:`rlp.Serializable`). If so, its class is the sedes.
2) Check if one of the entries in :attr:`rlp.sedes.sedes_list` can serialize
the object (via ``serializable(obj)``). If so, this is the sedes.
3) Check if the object is a sequence. If so, build a
:class:`rlp.sedes.List` by recursively infering a sedes for each of its
elements.
4) If none of these steps was successful, sedes inference has failed.
If you have build your own basic sedes (e.g. for ``dicts`` or ``floats``), you
might want to hook in at step 2 and add it to :attr:`rlp.sedes.sedes_list`,
whereby it will be automatically be used by :func:`rlp.encode`.
Further Reading
---------------
This was basically everything there is to about this package. The technical
specification of RLP can be found either in the
`Ethereum wiki <https://github.com/ethereum/wiki/wiki/RLP>`_ or in Appendix B of
Gavin Woods `Yellow Paper <http://gavwood.com/Paper.pdf>`_. For more detailed
information about this package, have a look at the :ref:`API-reference` or the
source code.
| /rlp-cython-2.1.7.tar.gz/rlp-cython-2.1.7/docs/tutorial.rst | 0.942062 | 0.718557 | tutorial.rst | pypi |
from collections import Iterable, Sequence
from .codec import consume_length_prefix, consume_payload
from .exceptions import DecodingError
from .atomic import Atomic
def decode_lazy(rlp, sedes=None, **sedes_kwargs):
"""Decode an RLP encoded object in a lazy fashion.
If the encoded object is a bytestring, this function acts similar to
:func:`rlp.decode`. If it is a list however, a :class:`LazyList` is
returned instead. This object will decode the string lazily, avoiding
both horizontal and vertical traversing as much as possible.
The way `sedes` is applied depends on the decoded object: If it is a string
`sedes` deserializes it as a whole; if it is a list, each element is
deserialized individually. In both cases, `sedes_kwargs` are passed on.
Note that, if a deserializer is used, only "horizontal" but not
"vertical lazyness" can be preserved.
:param rlp: the RLP string to decode
:param sedes: an object implementing a method ``deserialize(code)`` which
is used as described above, or ``None`` if no
deserialization should be performed
:param \*\*sedes_kwargs: additional keyword arguments that will be passed
to the deserializers
:returns: either the already decoded and deserialized object (if encoded as
a string) or an instance of :class:`rlp.LazyList`
"""
item, end = consume_item_lazy(rlp, 0)
if end != len(rlp):
raise DecodingError('RLP length prefix announced wrong length', rlp)
if isinstance(item, LazyList):
item.sedes = sedes
item.sedes_kwargs = sedes_kwargs
return item
elif sedes:
return sedes.deserialize(item, **sedes_kwargs)
else:
return item
def consume_item_lazy(rlp, start):
"""Read an item from an RLP string lazily.
If the length prefix announces a string, the string is read; if it
announces a list, a :class:`LazyList` is created.
:param rlp: the rlp string to read from
:param start: the position at which to start reading
:returns: a tuple ``(item, end)`` where ``item`` is the read string or a
:class:`LazyList` and ``end`` is the position of the first
unprocessed byte.
"""
p, t, l, s = consume_length_prefix(rlp, start)
if t is bytes:
item, _, end = consume_payload(rlp, p, s, bytes, l)
return item, end
else:
assert t is list
return LazyList(rlp, s, s + l), s + l
class LazyList(Sequence):
"""A RLP encoded list which decodes itself when necessary.
Both indexing with positive indices and iterating are supported.
Getting the length with :func:`len` is possible as well but requires full
horizontal encoding.
:param rlp: the rlp string in which the list is encoded
:param start: the position of the first payload byte of the encoded list
:param end: the position of the last payload byte of the encoded list
:param sedes: a sedes object which deserializes each element of the list,
or ``None`` for no deserialization
:param \*\*sedes_kwargs: keyword arguments which will be passed on to the
deserializer
"""
def __init__(self, rlp, start, end, sedes=None, **sedes_kwargs):
self.rlp = rlp
self.start = start
self.end = end
self.index = start
self._elements = []
self._len = None
self.sedes = sedes
self.sedes_kwargs = sedes_kwargs
def next(self):
if self.index == self.end:
self._len = len(self._elements)
raise StopIteration
assert self.index < self.end
item, end = consume_item_lazy(self.rlp, self.index)
self.index = end
if self.sedes:
item = self.sedes.deserialize(item, **self.sedes_kwargs)
self._elements.append(item)
return item
def __getitem__(self, i):
if isinstance(i, slice):
if i.step is not None:
raise TypeError("Step not supported")
start = i.start
stop = i.stop
else:
start = i
stop = i + 1
if stop is None:
stop = self.end - 1
try:
while len(self._elements) < stop:
self.next()
except StopIteration:
assert self.index == self.end
raise IndexError('Index %s out of range' % i)
if isinstance(i, slice):
return self._elements[start:stop]
else:
return self._elements[start]
def __len__(self):
if not self._len:
try:
while True:
self.next()
except StopIteration:
self._len = len(self._elements)
return self._len
def peek(rlp, index, sedes=None):
"""Get a specific element from an rlp encoded nested list.
This function uses :func:`rlp.decode_lazy` and, thus, decodes only the
necessary parts of the string.
Usage example::
>>> import rlp
>>> rlpdata = rlp.encode([1, 2, [3, [4, 5]]])
>>> rlp.peek(rlpdata, 0, rlp.sedes.big_endian_int)
1
>>> rlp.peek(rlpdata, [2, 0], rlp.sedes.big_endian_int)
3
:param rlp: the rlp string
:param index: the index of the element to peek at (can be a list for
nested data)
:param sedes: a sedes used to deserialize the peeked at object, or `None`
if no deserialization should be performed
:raises: :exc:`IndexError` if `index` is invalid (out of range or too many
levels)
"""
ll = decode_lazy(rlp)
if not isinstance(index, Iterable):
index = [index]
for i in index:
if isinstance(ll, Atomic):
raise IndexError('Too many indices given')
ll = ll[i]
if sedes:
return sedes.deserialize(ll)
else:
return ll | /rlp-cython-2.1.7.tar.gz/rlp-cython-2.1.7/rlp_cython/lazy.py | 0.895831 | 0.558628 | lazy.py | pypi |
class RLPException(Exception):
"""Base class for exceptions raised by this package."""
pass
class EncodingError(RLPException):
"""Exception raised if encoding fails.
:ivar obj: the object that could not be encoded
"""
def __init__(self, message, obj):
super(EncodingError, self).__init__(message)
self.obj = obj
class DecodingError(RLPException):
"""Exception raised if decoding fails.
:ivar rlp: the RLP string that could not be decoded
"""
def __init__(self, message, rlp):
super(DecodingError, self).__init__(message)
self.rlp = rlp
class SerializationError(RLPException):
"""Exception raised if serialization fails.
:ivar obj: the object that could not be serialized
"""
def __init__(self, message, obj):
super(SerializationError, self).__init__(message)
self.obj = obj
class ListSerializationError(SerializationError):
"""Exception raised if serialization by a :class:`sedes.List` fails.
:ivar element_exception: the exception that occurred during the serialization of one of the
elements, or `None` if the error is unrelated to a specific element
:ivar index: the index in the list that produced the error or `None` if the error is unrelated
to a specific element
"""
def __init__(self, message=None, obj=None, element_exception=None, index=None):
if message is None:
assert index is not None
assert element_exception is not None
message = ('Serialization failed because of element at index {} '
'("{}")'.format(index, str(element_exception)))
super(ListSerializationError, self).__init__(message, obj)
self.index = index
self.element_exception = element_exception
class ObjectSerializationError(SerializationError):
"""Exception raised if serialization of a :class:`sedes.Serializable` object fails.
:ivar sedes: the :class:`sedes.Serializable` that failed
:ivar list_exception: exception raised by the underlying list sedes, or `None` if no such
exception has been raised
:ivar field: name of the field of the object that produced the error, or `None` if no field
responsible for the error
"""
def __init__(self, message=None, obj=None, sedes=None, list_exception=None):
if message is None:
assert list_exception is not None
if list_exception.element_exception is None:
field = None
message = ('Serialization failed because of underlying list '
'("{}")'.format(str(list_exception)))
else:
assert sedes is not None
field = sedes._meta.field_names[list_exception.index]
message = ('Serialization failed because of field {} '
'("{}")'.format(field, str(list_exception.element_exception)))
else:
field = None
super(ObjectSerializationError, self).__init__(message, obj)
self.field = field
self.list_exception = list_exception
class DeserializationError(RLPException):
"""Exception raised if deserialization fails.
:ivar serial: the decoded RLP string that could not be deserialized
"""
def __init__(self, message, serial):
super(DeserializationError, self).__init__(message)
self.serial = serial
class ListDeserializationError(DeserializationError):
"""Exception raised if deserialization by a :class:`sedes.List` fails.
:ivar element_exception: the exception that occurred during the deserialization of one of the
elements, or `None` if the error is unrelated to a specific element
:ivar index: the index in the list that produced the error or `None` if the error is unrelated
to a specific element
"""
def __init__(self, message=None, serial=None, element_exception=None, index=None):
if not message:
assert index is not None
assert element_exception is not None
message = ('Deserialization failed because of element at index {} '
'("{}")'.format(index, str(element_exception)))
super(ListDeserializationError, self).__init__(message, serial)
self.index = index
self.element_exception = element_exception
class ObjectDeserializationError(DeserializationError):
"""Exception raised if deserialization by a :class:`sedes.Serializable` fails.
:ivar sedes: the :class:`sedes.Serializable` that failed
:ivar list_exception: exception raised by the underlying list sedes, or `None` if no such
exception has been raised
:ivar field: name of the field of the object that produced the error, or `None` if no field
responsible for the error
"""
def __init__(self, message=None, serial=None, sedes=None, list_exception=None):
if not message:
assert list_exception is not None
if list_exception.element_exception is None:
field = None
message = ('Deserialization failed because of underlying list '
'("{}")'.format(str(list_exception)))
else:
assert sedes is not None
field = sedes._meta.field_names[list_exception.index]
message = ('Deserialization failed because of field {} '
'("{}")'.format(field, str(list_exception.element_exception)))
super(ObjectDeserializationError, self).__init__(message, serial)
self.sedes = sedes
self.list_exception = list_exception
self.field = field | /rlp-cython-2.1.7.tar.gz/rlp-cython-2.1.7/rlp_cython/exceptions.py | 0.931267 | 0.342737 | exceptions.py | pypi |
from eth_utils import (
int_to_big_endian,
big_endian_to_int,
)
from rlp_cython.exceptions import DeserializationError, SerializationError
class BigEndianInt(object):
"""A sedes for big endian integers.
:param l: the size of the serialized representation in bytes or `None` to
use the shortest possible one
"""
def __init__(self, l=None):
self.l = l
def serialize(self, obj):
if isinstance(obj, bool) or not isinstance(obj, int):
raise SerializationError('Can only serialize integers', obj)
if self.l is not None and obj >= 256**self.l:
raise SerializationError('Integer too large (does not fit in {} '
'bytes)'.format(self.l), obj)
if obj < 0:
raise SerializationError('Cannot serialize negative integers', obj)
if obj == 0:
s = b''
else:
s = int_to_big_endian(obj)
if self.l is not None:
return b'\x00' * max(0, self.l - len(s)) + s
else:
return s
def get_sede_identifier(self):
return 0
def deserialize(self, serial, to_list = False):
if self.l is not None and len(serial) != self.l:
raise DeserializationError('Invalid serialization (wrong size)',
serial)
if self.l is None and len(serial) > 0 and serial[0:1] == b'\x00':
raise DeserializationError('Invalid serialization (not minimal '
'length)', serial)
serial = serial or b'\x00'
return big_endian_to_int(serial)
class FBigEndianInt(object):
"""A sedes for big endian integers.
:param l: the size of the serialized representation in bytes or `None` to
use the shortest possible one
"""
def __init__(self):
pass
def get_sede_identifier(self):
return 1
def serialize(self, obj):
#doing int serialization in msgpack
return obj
def deserialize(self, serial, to_list = False):
#doing int deserialization in msgpack
return serial
big_endian_int = BigEndianInt()
f_big_endian_int = FBigEndianInt() | /rlp-cython-2.1.7.tar.gz/rlp-cython-2.1.7/rlp_cython/sedes/big_endian_int.py | 0.727007 | 0.352898 | big_endian_int.py | pypi |
import abc
import collections
import copy
import enum
import re
from eth_utils import (
to_dict,
to_set,
to_tuple,
)
from rlp_cython.exceptions import (
ListSerializationError,
ObjectSerializationError,
ListDeserializationError,
ObjectDeserializationError,
)
from .lists import (
List,
)
class MetaBase:
fields = None
field_names = None
field_attrs = None
sedes = None
def _get_duplicates(values):
counts = collections.Counter(values)
return tuple(
item
for item, num in counts.items()
if num > 1
)
def validate_args_and_kwargs(args, kwargs, arg_names, allow_missing=False):
duplicate_arg_names = _get_duplicates(arg_names)
if duplicate_arg_names:
raise TypeError("Duplicate argument names: {0}".format(sorted(duplicate_arg_names)))
needed_kwargs = arg_names[len(args):]
used_kwargs = set(arg_names[:len(args)])
duplicate_kwargs = used_kwargs.intersection(kwargs.keys())
if duplicate_kwargs:
raise TypeError("Duplicate kwargs: {0}".format(sorted(duplicate_kwargs)))
unknown_kwargs = set(kwargs.keys()).difference(arg_names)
if unknown_kwargs:
raise TypeError("Unknown kwargs: {0}".format(sorted(unknown_kwargs)))
missing_kwargs = set(needed_kwargs).difference(kwargs.keys())
if not allow_missing and missing_kwargs:
raise TypeError("Missing kwargs: {0}".format(sorted(missing_kwargs)))
@to_tuple
def merge_kwargs_to_args(args, kwargs, arg_names, allow_missing=False):
validate_args_and_kwargs(args, kwargs, arg_names, allow_missing=allow_missing)
needed_kwargs = arg_names[len(args):]
yield from args
for arg_name in needed_kwargs:
yield kwargs[arg_name]
@to_dict
def merge_args_to_kwargs(args, kwargs, arg_names, allow_missing=False):
validate_args_and_kwargs(args, kwargs, arg_names, allow_missing=allow_missing)
yield from kwargs.items()
for value, name in zip(args, arg_names):
yield name, value
def _eq(left, right):
"""
Equality comparison that allows for equality between tuple and list types
with equivalent elements.
"""
if isinstance(left, (tuple, list)) and isinstance(right, (tuple, list)):
return len(left) == len(right) and all(_eq(*pair) for pair in zip(left, right))
else:
return left == right
class ChangesetState(enum.Enum):
INITIALIZED = 'INITIALIZED'
OPEN = 'OPEN'
CLOSED = 'CLOSED'
class ChangesetField:
field = None
def __init__(self, field):
self.field = field
def __get__(self, instance, type=None):
if instance is None:
return self
elif instance.__state__ is not ChangesetState.OPEN:
raise AttributeError("Changeset is not active. Attribute access not allowed")
else:
try:
return instance.__diff__[self.field]
except KeyError:
return getattr(instance.__original__, self.field)
def __set__(self, instance, value):
if instance.__state__ is not ChangesetState.OPEN:
raise AttributeError("Changeset is not active. Attribute access not allowed")
instance.__diff__[self.field] = value
class BaseChangeset:
# reference to the original Serializable instance.
__original__ = None
# the state of this fieldset. Initialized -> Open -> Closed
__state__ = None
# the field changes that have been made in this change
__diff__ = None
def __init__(self, obj, changes=None):
self.__original__ = obj
self.__state__ = ChangesetState.INITIALIZED
self.__diff__ = changes or {}
def commit(self):
obj = self.build_rlp()
self.close()
return obj
def build_rlp(self):
if self.__state__ == ChangesetState.OPEN:
field_kwargs = {
name: self.__diff__.get(name, self.__original__[name])
for name
in self.__original__._meta.field_names
}
return type(self.__original__)(**field_kwargs)
else:
raise ValueError("Cannot open Changeset which is not in the OPEN state")
def open(self):
if self.__state__ == ChangesetState.INITIALIZED:
self.__state__ = ChangesetState.OPEN
else:
raise ValueError("Cannot open Changeset which is not in the INITIALIZED state")
def close(self):
if self.__state__ == ChangesetState.OPEN:
self.__state__ = ChangesetState.CLOSED
else:
raise ValueError("Cannot open Changeset which is not in the INITIALIZED state")
def __enter__(self):
if self.__state__ == ChangesetState.INITIALIZED:
self.open()
return self
else:
raise ValueError("Cannot open Changeset which is not in the INITIALIZED state")
def __exit__(self, exc_type, exc_value, traceback):
if self.__state__ == ChangesetState.OPEN:
self.close()
def Changeset(obj, changes):
namespace = {
name: ChangesetField(name)
for name
in obj._meta.field_names
}
cls = type(
"{0}Changeset".format(obj.__class__.__name__),
(BaseChangeset,),
namespace,
)
return cls(obj, changes)
class BaseSerializable(collections.Sequence):
def __init__(self, *args, **kwargs):
if kwargs:
field_values = merge_kwargs_to_args(args, kwargs, self._meta.field_names)
else:
field_values = args
if len(field_values) != len(self._meta.field_names):
raise TypeError(
'Argument count mismatch. expected {0} - got {1} - missing {2}'.format(
len(self._meta.field_names),
len(field_values),
','.join(self._meta.field_names[len(field_values):]),
)
)
for value, attr in zip(field_values, self._meta.field_attrs):
setattr(self, attr, make_immutable(value))
_cached_rlp = None
def as_dict(self):
return dict(
(field, value)
for field, value
in zip(self._meta.field_names, self)
)
def __iter__(self):
for attr in self._meta.field_attrs:
yield getattr(self, attr)
def __getitem__(self, idx):
if isinstance(idx, int):
attr = self._meta.field_attrs[idx]
return getattr(self, attr)
elif isinstance(idx, slice):
field_slice = self._meta.field_attrs[idx]
return tuple(getattr(self, field) for field in field_slice)
elif isinstance(idx, str):
return getattr(self, idx)
else:
raise IndexError("Unsupported type for __getitem__: {0}".format(type(idx)))
def __len__(self):
return len(self._meta.fields)
def __eq__(self, other):
return isinstance(other, Serializable) and hash(self) == hash(other)
_hash_cache = None
def __hash__(self):
if self._hash_cache is None:
self._hash_cache = hash(tuple(self))
return self._hash_cache
@classmethod
def get_sede_identifier(cls):
try:
return cls._meta.sedes.get_sede_identifier()
except AttributeError:
return 0
@classmethod
def serialize(cls, obj):
try:
return cls._meta.sedes.serialize(obj)
except ListSerializationError as e:
raise ObjectSerializationError(obj=obj, sedes=cls, list_exception=e)
@classmethod
def deserialize(cls, serial, to_list = False, **extra_kwargs):
try:
values = cls._meta.sedes.deserialize(serial, to_list = to_list)
except ListDeserializationError as e:
raise ObjectDeserializationError(serial=serial, sedes=cls, list_exception=e)
args_as_kwargs = merge_args_to_kwargs(values, {}, cls._meta.field_names)
return cls(**args_as_kwargs, **extra_kwargs)
def copy(self, *args, **kwargs):
missing_overrides = set(
self._meta.field_names
).difference(
kwargs.keys()
).difference(
self._meta.field_names[:len(args)]
)
unchanged_kwargs = {
key: copy.deepcopy(value)
for key, value
in self.as_dict().items()
if key in missing_overrides
}
combined_kwargs = dict(**unchanged_kwargs, **kwargs)
all_kwargs = merge_args_to_kwargs(args, combined_kwargs, self._meta.field_names)
return type(self)(**all_kwargs)
def __copy__(self):
return self.copy()
def __deepcopy__(self, *args):
return self.copy()
_in_mutable_context = False
def build_changeset(self, *args, **kwargs):
args_as_kwargs = merge_args_to_kwargs(
args,
kwargs,
self._meta.field_names,
allow_missing=True,
)
return Changeset(self, changes=args_as_kwargs)
def make_immutable(value):
if isinstance(value, list):
return tuple(make_immutable(item) for item in value)
else:
return value
@to_tuple
def _mk_field_attrs(field_names, extra_namespace):
namespace = set(field_names).union(extra_namespace)
for field in field_names:
while True:
field = '_' + field
if field not in namespace:
namespace.add(field)
yield field
break
def _mk_field_property(field, attr):
def field_fn_getter(self):
return getattr(self, attr)
def field_fn_setter(self, value):
if not self._in_mutable_context:
raise AttributeError("can't set attribute")
setattr(self, attr, value)
return property(field_fn_getter, field_fn_setter)
IDENTIFIER_REGEX = re.compile(r"^[^\d\W]\w*\Z", re.UNICODE)
def _is_valid_identifier(value):
# Source: https://stackoverflow.com/questions/5474008/regular-expression-to-confirm-whether-a-string-is-a-valid-identifier-in-python # noqa: E501
if not isinstance(value, str):
return False
return bool(IDENTIFIER_REGEX.match(value))
@to_set
def _get_class_namespace(cls):
if hasattr(cls, '__dict__'):
yield from cls.__dict__.keys()
if hasattr(cls, '__slots__'):
yield from cls.__slots__
class SerializableBase(abc.ABCMeta):
def __new__(cls, name, bases, attrs):
super_new = super(SerializableBase, cls).__new__
serializable_bases = tuple(b for b in bases if isinstance(b, SerializableBase))
has_multiple_serializable_parents = len(serializable_bases) > 1
is_serializable_subclass = any(serializable_bases)
declares_fields = 'fields' in attrs
if not is_serializable_subclass:
# If this is the original creation of the `Serializable` class,
# just create the class.
return super_new(cls, name, bases, attrs)
elif not declares_fields:
if has_multiple_serializable_parents:
raise TypeError(
"Cannot create subclass from multiple parent `Serializable` "
"classes without explicit `fields` declaration."
)
else:
# This is just a vanilla subclass of a `Serializable` parent class.
parent_serializable = serializable_bases[0]
if hasattr(parent_serializable, '_meta'):
fields = parent_serializable._meta.fields
else:
# This is a subclass of `Serializable` which has no
# `fields`, likely intended for further subclassing.
return super_new(cls, name, bases, attrs)
else:
# ensure that the `fields` property is a tuple of tuples to ensure
# immutability.
fields = tuple(tuple(field) for field in attrs.pop('fields'))
# split the fields into names and sedes
field_names, sedes = zip(*fields)
# check that field names are unique
duplicate_field_names = _get_duplicates(field_names)
if duplicate_field_names:
raise TypeError(
"The following fields are duplicated in the `fields` "
"declaration: "
"{0}".format(",".join(sorted(duplicate_field_names)))
)
# check that field names are valid identifiers
invalid_field_names = {
field_name
for field_name
in field_names
if not _is_valid_identifier(field_name)
}
if invalid_field_names:
raise TypeError(
"The following field names are not valid python identifiers: {0}".format(
",".join("`{0}`".format(item) for item in sorted(invalid_field_names))
)
)
# extract all of the fields from parent `Serializable` classes.
parent_field_names = {
field_name
for base in serializable_bases if hasattr(base, '_meta')
for field_name in base._meta.field_names
}
# check that all fields from parent serializable classes are
# represented on this class.
missing_fields = parent_field_names.difference(field_names)
if missing_fields:
raise TypeError(
"Subclasses of `Serializable` **must** contain a full superset "
"of the fields defined in their parent classes. The following "
"fields are missing: "
"{0}".format(",".join(sorted(missing_fields)))
)
# the actual field values are stored in separate *private* attributes.
# This computes attribute names that don't conflict with other
# attributes already present on the class.
reserved_namespace = set(attrs.keys()).union(
attr
for base in bases
for parent_cls in base.__mro__
for attr in _get_class_namespace(parent_cls)
)
field_attrs = _mk_field_attrs(field_names, reserved_namespace)
# construct the Meta object to store field information for the class
meta_namespace = {
'fields': fields,
'field_attrs': field_attrs,
'field_names': field_names,
'sedes': List(sedes),
}
meta_base = attrs.pop('_meta', MetaBase)
meta = type(
'Meta',
(meta_base,),
meta_namespace,
)
attrs['_meta'] = meta
# construct `property` attributes for read only access to the fields.
field_props = tuple(
(field, _mk_field_property(field, attr))
for field, attr
in zip(meta.field_names, meta.field_attrs)
)
return super_new(
cls,
name,
bases,
dict(
field_props +
tuple(attrs.items())
),
)
class Serializable(BaseSerializable, metaclass=SerializableBase):
"""
The base class for serializable objects.
"""
pass | /rlp-cython-2.1.7.tar.gz/rlp-cython-2.1.7/rlp_cython/sedes/serializable.py | 0.596903 | 0.151529 | serializable.py | pypi |
from rlp_cython.exceptions import SerializationError, DeserializationError
from rlp_cython.atomic import Atomic
class Text:
"""A sedes object for encoded text data of certain length.
:param min_length: the minimal length in encoded characters or `None` for no lower limit
:param max_length: the maximal length in encoded characters or `None` for no upper limit
:param allow_empty: if true, empty strings are considered valid even if
a minimum length is required otherwise
"""
def __init__(self, min_length=None, max_length=None, allow_empty=False, encoding='utf8'):
self.min_length = min_length or 0
if max_length is None:
self.max_length = float('inf')
else:
self.max_length = max_length
self.allow_empty = allow_empty
self.encoding = encoding
@classmethod
def fixed_length(cls, l, allow_empty=False):
"""Create a sedes for text data with exactly `l` encoded characters."""
return cls(l, l, allow_empty=allow_empty)
@classmethod
def is_valid_type(cls, obj):
return isinstance(obj, str)
def is_valid_length(self, l):
return any((
self.min_length <= l <= self.max_length,
self.allow_empty and l == 0
))
def serialize(self, obj):
if not self.is_valid_type(obj):
raise SerializationError('Object is not a serializable ({})'.format(type(obj)), obj)
if not self.is_valid_length(len(obj)):
raise SerializationError('Object has invalid length', obj)
return obj.encode(self.encoding)
def deserialize(self, serial, to_list = False):
if not isinstance(serial, Atomic):
m = 'Objects of type {} cannot be deserialized'
raise DeserializationError(m.format(type(serial).__name__), serial)
try:
text_value = serial.decode(self.encoding)
except UnicodeDecodeError as err:
raise DeserializationError(str(err), serial)
if self.is_valid_length(len(text_value)):
return text_value
else:
raise DeserializationError('{} has invalid length'.format(type(serial)), serial)
text = Text() | /rlp-cython-2.1.7.tar.gz/rlp-cython-2.1.7/rlp_cython/sedes/text.py | 0.858244 | 0.314787 | text.py | pypi |
from src.rlpe.constants import *
from src.rlpe.Agents.RL_agents import rl_agent
import numpy as np
# helper function for flattening irregular nested tuples
def mixed_flatten(x):
result = []
for el in x:
if hasattr(el, "__iter__"):
result.extend(mixed_flatten(el))
else:
result.append(el)
return result
# helper function for making a list of coordinates of interest
# make list of valid coords in environment within dist of the given loc
def get_nearby_coords(env, loc, dist): # option for later: add an option to change definition of distance
max_rows = env.num_rows - 1
max_cols = env.num_columns - 1
(x, y) = loc
result = []
for i in range(x - dist, x + dist + 1):
for j in range(y - dist, y + dist + 1):
if 0 <= i <= max_rows and 0 <= j <= max_cols:
result.append((i, j))
return result
def sample_anticipated_policy(policy_dict, num_states_in_partial_policy):
# get flat indices of sampled states
sampled_states_flat = np.random.choice(len(policy_dict), size=num_states_in_partial_policy, replace=False)
partial_sampled_policy = {}
for i, item in enumerate(policy_dict.items()):
if i in sampled_states_flat:
partial_sampled_policy[tuple(item[0])] = item[1]
return partial_sampled_policy
def is_interesting_state(state, passenger_origins, passenger_destinations):
taxi_location = [state[0], state[1]]
fuel_level = state[2]
passenger_location = [state[3], state[4]]
passenger_destination = [state[5], state[6]]
passenger_status = state[6]
fuel_is_full = (fuel_level == 100) or (not fuel_level)
taxi_in_interesting_location = (
(taxi_location[0] == passenger_location[0] and taxi_location[1] == passenger_location[1]) or (
taxi_location[0] == passenger_destination[0] and taxi_location[1] == passenger_destination[1]))
passenger_in_interesting_location = passenger_location in passenger_origins
# valid_passenger_destination = ((passenger_destination[0] == passenger_location[0]) and (
# passenger_destination[1] == passenger_location[1]) and passenger_status > 2) or (
# passenger_destination[0] != passenger_location[0]) or (
# passenger_destination[1] != passenger_location[1])
if fuel_is_full and taxi_in_interesting_location and passenger_in_interesting_location: # and valid_passenger_destination:
return True
return False
def get_possible_passenger_origins(env):
return env.passengers_locations
def get_possible_passenger_destinations(env):
return env.passengers_locations
def get_automatic_anticipated_policy_from_agent(env, agent_for_policy_generator, num_of_episodes,
num_states_in_partial_policy):
"""
get automatic anticipated policy from given agent
"""
# create agent
agent = rl_agent.create_agent(env, agent_for_policy_generator)
# train the agent in the environment
train_episode_reward_mean = rl_agent.run(agent, num_of_episodes, method=TRAIN)
policy_dict = agent.policy_dict
automatic_anticipated_policy = sample_anticipated_policy(policy_dict, env, num_states_in_partial_policy)
return automatic_anticipated_policy | /Observer/anticipated_policy_generator.py | 0.479016 | 0.523603 | anticipated_policy_generator.py | pypi |
# RLPipes
<img src="https://rlbase-data.s3.amazonaws.com/misc/assets/whitebgRLPipes+Logo.png" align="right" alt="logo" width="240" style = "border: none; float: right;">
 [](https://codecov.io/gh/Bishop-Laboratory/RLPipes)   
**RLPipes** is an upstream workflow for R-loop-mapping data.
The primary outputs of the pipeline are:
1. Coverage (.bw) tracks
2. Peaks (.broadpeak) files
3. Alignment (.bam) files
4. [RLSeq](https://github.com/Bishop-Laboratory/RLSeq) report (.html and .rda) files
Following RLPipes, the [RLSeq](https://github.com/Bishop-Laboratory/RLSeq) R
package can be used for more fine-grained downstream analysis.
## Install
The preferred installation method is `mamba` or `conda` (slower):
```shell
mamba create -n rlpipes -c bioconda -c conda-forge rlpipes
conda activate rlpipes
```
### Using `pip`
RLPipes can also be installed with `pip`. However, system dependencies will
still need to be installed. To accomplish this, do the following:
```shell
git clone https://github.com/Bishop-Laboratory/RLPipes.git
cd RLPipes/
conda install -c conda-forge mamba -y
mamba env create -f rlpipes.yml --force
conda activate rlpipes
python -m pip install -e .
```
## Basic Usage
To run RLPipes, you will need a `samples.csv` file that describes your samples.
Here is an example file provided for testing purposes:
|experiment|control |
|----------|----------|
|SRX113814 | |
|SRX1025890|SRX1025893|
|SRX1025899| |
The basic usage of RSeq follows a three-step process: **build**, **check** , and **run**.
### **Build**
`RLPipes build` generates a **config.json** file that controls the underlying `snakemake` workflow.
```shell
RLPipes build -m DRIP rlpipes_out/ tests/test_data/samples.csv
```
Output:
```shell
Success! RSeq has been initialized at the specified directory: rlpipes_out/
Run 'RLPipes check rlpipes_out/' to verify the configuration.
```
### **Check**
Verifies that the run will succeed and generates a plot of the workflow jobs.
```shell
RLPipes check rlpipes_out/
```
Output:
```shell
Success! The DAG has been generated successfully. You can view it here: rlpipes_out/dag.png
Run 'RLPipes run rlpipes_out/' to execute the workflow.
```
### **Run**
Executes the workflow rules.
```shell
RLPipes run rlpipes_out/
```
If multiple cores are available, they can be specified using the `--threads/-t` option.
```shell
RLPipes run -t 30 rlpipes_out/
```
## Usage Reference
Top-level usage:
```shell
Usage: RLPipes [OPTIONS] COMMAND [ARGS]...
RSeq: An R-loop mapping pipeline with built-in QC.
Options:
--version Show the version and exit.
--help Show this message and exit.
Commands:
build Configure an RSeq workflow.
check Validate an RSeq workflow.
run Execute an RSeq workflow.
```
### Build
```shell
Usage: RLPipes build [OPTIONS] RUN_DIR SAMPLES
Configure an RLPipes workflow.
RUN_DIR: Directory for RLPipes Execution. Will be created if it does not
exist.
SAMPLES: A CSV file with at least one column "experiment" that provides the
path to either local fastq files, bam files, or public sample accessions
(SRX or GSM). Input controls should be in the "control" column.
If providing paired-end fastq files, enter: "exp_1.fastq~exp_2.fastq".
Columns may also include "genome" and "mode" columns. These will override
the -g, -m, and -n options.
"genome" (-g/--genome) is not required if providing public data accessions.
Example #1: "RLPipes build -m DRIP outdir/ samples.csv"
samples.csv:
experiment
SRX113812
SRX113813
Example #2: "RLPipes build outdir/ samples.csv"
samples.csv:
experiment, control, genome, mode
qDRIP_siGL3_1.fq~qDRIP_siGL3_2.fq, , hg38, qDRIP
DRIPc_3T3.fq, Input_3T3.fq, mm10, DRIPc
Options:
-m, --mode TEXT The type of sequencing (e.g., "DRIP"). The available
options are currently: DRIP, DRIPc, qDRIP, sDRIP, ssDRIP,
R-ChIP, RR-ChIP, RDIP, S1-DRIP, DRIVE, RNH-CnR, and MapR
-g, --genome TEXT UCSC genome for samples (e.g., 'hg38'). Not required if
providing public data accessions.
-n, --name TEXT Sample names for use in output report. By default,
inferred from inputs.
--help Show this message and exit.
```
### Check
```shell
Usage: RLPipes check [OPTIONS] RUN_DIR
Validate an RLPipes workflow.
RUN_DIR: Directory configured with `RLPipes build` and ready for checking
and execution.
Options:
-s, --smargs TEXT Dict of arguments passed to the snakemake python API.
Default: "{'use_conda': True}". Read the snakemake
API reference for the full list of options.
-t, --threads INTEGER Number of threads to use. Default: 1
--bwamem2 Align with BWA-MEM2 instead of BWA. BWA MEM2 Needs >
70GB RAM avaialble to build index, but shows > 3x
speed increase. Default: False.
--macs2 Call peaks using macs2 instead of macs2
-G, --groupby TEXT Column(s) which identify biologically-meaningful
grouping(s) of samples (i.e., conditions). Can be
any column name from the `samples` CSV file. If using
public data accessions, it may also include "study".
NOTE: If --groupby is set and there R-loop-mapping
and expression samples within groups, expression-
matched analysis will be run. This can be disabled
with the --noexp flag.
Example #1: "RSeqCLI build outdir/ samples.csv
--groupcols tissue"
samples.csv:
experiment, mode, tissue
GSM1720615, DRIP, NT2
GSM1720616, DRIP, NT2
GSM1720619, DRIP, K562
Example #2: "RSeqCLI build outdir/ samples.csv
--groupby tissue"
samples.csv:
experiment, mode, tissue
GSM1720615, DRIP, NT2
GSM1720616, DRIP, NT2
GSM1720613, DRIPc, NT2
GSM1720614, DRIPc, NT2
GSM1720622, RNA-seq, NT2
GSM1720623, RNA-seq, NT2
--noexp If set, no expression-matched analysis will be
performed.
--noreport If set, RLSeq reports will not be generated.
--debug Run pipeline on subsampled number of reads (for
testing).
--tsv Obtain config from config.tsv file instead of
config.json.
--noaws If set, prefetch from SRA tools will be used to
download any public SRA data instead of AWS S3.
--help Show this message and exit.
```
### Run
```shell
Usage: RLPipes run [OPTIONS] RUN_DIR
Execute an RLPipes workflow.
RUN_DIR: Directory configured with `RLPipes build` and ready for checking
and execution.
Options:
-s, --smargs TEXT Dict of arguments passed to the snakemake python API.
Default: "{'use_conda': True}". Read the snakemake
API reference for the full list of options.
-t, --threads INTEGER Number of threads to use. Default: 1
--bwamem2 Align with BWA-MEM2 instead of BWA. BWA MEM2 Needs >
70GB RAM avaialble to build index, but shows > 3x
speed increase. Default: False.
--macs2 Call peaks using macs2 instead of macs2
-G, --groupby TEXT Column(s) which identify biologically-meaningful
grouping(s) of samples (i.e., conditions). Can be
any column name from the `samples` CSV file. If using
public data accessions, it may also include "study".
NOTE: If --groupby is set and there R-loop-mapping
and expression samples within groups, expression-
matched analysis will be run. This can be disabled
with the --noexp flag.
Example #1: "RSeqCLI build outdir/ samples.csv
--groupcols tissue"
samples.csv:
experiment, mode, tissue
GSM1720615, DRIP, NT2
GSM1720616, DRIP, NT2
GSM1720619, DRIP, K562
Example #2: "RSeqCLI build outdir/ samples.csv
--groupby tissue"
samples.csv:
experiment, mode, tissue
GSM1720615, DRIP, NT2
GSM1720616, DRIP, NT2
GSM1720613, DRIPc, NT2
GSM1720614, DRIPc, NT2
GSM1720622, RNA-seq, NT2
GSM1720623, RNA-seq, NT2
--noexp If set, no expression-matched analysis will be
performed.
--noreport If set, RLSeq reports will not be generated.
--debug Run pipeline on subsampled number of reads (for
testing).
--tsv Obtain config from config.tsv file instead of
config.json.
--help Show this message and exit.
```
| /rlpipes-0.9.3.tar.gz/rlpipes-0.9.3/README.md | 0.63341 | 0.896569 | README.md | pypi |
import torch
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import random
import math
import copy
from torch import nn
from collections import namedtuple
from itertools import count
from torch.distributions import Categorical
from tensorboardX import SummaryWriter
SavedAction = namedtuple('SavedAction', ['log_prob', 'value'])
class Agent:
def __init__(self,
env,
net,
name="",
learning_rate=3e-4,
optimizer=optim.Adam,
discount=0.99,
eval_episodes_count=100,
logdir='',
dev=None):
global device
device = dev
self.name = name
self.learning_rate = learning_rate # alpha
self.optimizer = optimizer
self.discount = discount # gamma
self.eval_episodes_count = eval_episodes_count # number of episodes for evaluation
self.env = env
self.net = net(self.env.observation_space_n, self.env.action_space_n).to(device)
self.logdir = logdir
def train(self):
writer = SummaryWriter(logdir=self.logdir, comment=f"-{self.name}" if self.name else "")
ep_idx = 1
running_reward = 0
# infinite episodes until threshold is met off
while True:
ep_reward, step_rewards, saved_actions, entropy = self.run_episode()
running_reward = 0.05 * ep_reward + (1 - 0.05) * running_reward
# returns
returns = self.calculate_returns(step_rewards)
returns = self.standardize_returns(returns)
# optimize policy_net
loss = self.optimize(returns, saved_actions, entropy)
# tensorboard metrics
writer.add_scalar("train/loss", loss, ep_idx)
writer.add_scalar("train/running_reward", running_reward, ep_idx)
# evaluate policy
if ep_idx % 500 == 0:
stop, avg_rewards = self.evaluate_policy(running_reward)
writer.add_scalar("train/avg_rewards", avg_rewards, ep_idx)
if stop:
break
ep_idx = ep_idx + 1
# save model
policy_name = self.name if self.name else "a2c"
torch.save(self.net.state_dict(), f"policies/{policy_name}")
writer.close()
def run_episode(self):
state = self.env.reset()
step_rewards = []
saved_actions = []
entropy = 0
# run a single episode
while True:
# choose an action
action, action_dist, dist_entropy = self.select_action(state, self.env.legal_actions, saved_actions)
# take a step in env
next_state, reward, done, _ = self.env.step(action)
# calculate entropy
entropy += dist_entropy
# accumulate rewards
step_rewards.append(reward)
state = next_state
if done:
return sum(step_rewards), step_rewards, saved_actions, entropy
def select_action(self, state, legal_actions, saved_actions):
mask = torch.zeros(self.env.action_space_n).index_fill(0, torch.LongTensor(legal_actions), 1)
action_dist, value = self.net(torch.Tensor(state).to(device), mask)
m = Categorical(action_dist)
action = m.sample()
saved_actions.append(SavedAction(m.log_prob(action), value.squeeze(0)))
return action.item(), action_dist, m.entropy()
def calculate_returns(self, step_rewards):
R = 0
returns = [] # list to save the true values
# calculate the true value using rewards returned from the environment
for r in step_rewards[::-1]:
# calculate the discounted value
R = r + R * self.discount
returns.insert(0, R)
return returns
def standardize_returns(self, returns):
# smallest positive number such that 1.0 + eps != 1.0
eps = np.finfo(np.float32).eps.item()
returns = torch.tensor(returns)
# calculate z-scores; standardize the distribution
return (returns - returns.mean()) / (returns.std() + eps)
def optimize(self, returns, saved_actions, entropy):
"""
Calculates actor and critic loss and performs backprop.
"""
policy_losses = [] # list to save actor (policy) loss
value_losses = [] # list to save critic (value) loss
for (log_prob, value), R in zip(saved_actions, returns):
advantage = R - value.item()
# calculate actor (policy) loss.
# scale probabilities by advantage
policy_losses.append(-log_prob * advantage)
# calculate critic (value) loss using L1 smooth loss
value_losses.append(F.smooth_l1_loss(value, torch.tensor([R])))
# sum up all the values of policy_losses and value_losses
loss = torch.stack(policy_losses).sum() + torch.stack(value_losses).sum() + 0.001 * entropy
# reset gradients
optimizer = self.optimizer(params=self.net.parameters(), lr=self.learning_rate)
optimizer.zero_grad()
# perform backprop; compute gradient
loss.backward()
# clip gradients
for param in self.net.parameters():
param.grad.data.clamp_(-1, 1)
# update net parameters
optimizer.step()
return loss
def evaluate_policy(self, running_reward):
rewards = []
for _ in range(self.eval_episodes_count):
state = self.env.reset()
ep_reward = 0
while True:
mask = torch.zeros(self.env.action_space_n).index_fill(0, torch.LongTensor(self.env.legal_actions), 1)
action_dist, value = self.net(torch.Tensor(state).to(device), mask)
action = torch.argmax(action_dist).item()
next_state, reward, done, _ = self.env.step(action)
rewards.append(reward)
state = next_state
if done:
break
avg_rewards = sum(rewards)/self.eval_episodes_count if self.eval_episodes_count > 0 else 0
# stop training if thresholds are met
running_reward_achieved = running_reward >= self.env.spec.reward_threshold
avg_rewards_achieved = avg_rewards >= self.env.spec.reward_threshold
return running_reward_achieved and avg_rewards_achieved, avg_rewards | /rlprop-0.0.4-py3-none-any.whl/prop/algorithms/a2c.py | 0.878718 | 0.330903 | a2c.py | pypi |
import torch
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import random
import math
import copy
import time
from collections import namedtuple
from itertools import count, compress
from tensorboardX import SummaryWriter
from prop.buffers.priority_replay_buffer import PrioritizedReplayBuffer
Transition = namedtuple('Transition',
('state', 'action', 'next_state', 'reward', 'mask'))
class Agent:
def __init__(self,
env,
net,
name="",
double=True,
learning_rate=3e-4,
batch_size=128,
optimizer=optim.Adam,
loss_cutoff=0.1,
max_std_dev=-1,
epsilon_start=1,
epsilon_end=0.1,
epsilon_decay=1000,
discount=0.99,
target_net_update=5000,
eval_episodes_count=1000,
eval_every=1000,
replay_buffer=PrioritizedReplayBuffer,
replay_buffer_capacity=1000000,
extra_metrics=None,
logdir=None,
dev=None):
global device
device = dev
self.name = name
self.double = double # double q learning
self.loss_cutoff = loss_cutoff # training stops at loss_cutoff
self.max_std_dev = max_std_dev # max std deviation allowed to stop training; >= 0 to activate
self.learning_rate = learning_rate # alpha
self.batch_size = batch_size
self.optimizer = optimizer
self.epsilon_start = epsilon_start # start with 100% exploration
self.epsilon_end = epsilon_end # end with 10% exploration
self.epsilon_decay = epsilon_decay # higher value = slower decay
self.discount = discount # gamma
self.target_net_update = target_net_update # number of steps to update target network
self.eval_episodes_count = eval_episodes_count # number of episodes to evaluate
self.eval_every = eval_every # number of steps to run evaluations at
self.replay_buffer = replay_buffer(replay_buffer_capacity)
self.env = env
self.policy_net = net(self.env.observation_space_n, self.env.action_space_n).to(device) # what drives current actions; uses epsilon.
self.target_net = net(self.env.observation_space_n, self.env.action_space_n).to(device) # copied from policy net periodically; greedy.
self.logdir = logdir
# init target_net
self.target_net.load_state_dict(self.policy_net.state_dict())
self.target_net.eval()
def train(self):
writer = SummaryWriter(logdir=self.logdir, comment=f"-{self.name}" if self.name else "")
steps = 1
recent_loss = []
recent_eval = []
avg_rewards = 0
while True:
# fill replay buffer with one episode from the current policy (epsilon is used)
self.load_replay_buffer(policy=self.policy_net, steps=steps)
# sample transitions
transitions, idxs, is_weights = self.replay_buffer.sample(self.batch_size)
if len(transitions) < self.batch_size:
continue
# optimize policy_net
loss = self.optimize(transitions, idxs, is_weights)
# keep track of recent losses and truncate list to latest `eval_every` losses
recent_loss.append(loss)
recent_loss = recent_loss[-self.eval_every:]
# tensorboard metrics
epsilon = Agent.eps(self.epsilon_start, self.epsilon_end, self.epsilon_decay, steps)
writer.add_scalar("env/epsilon", epsilon, steps)
writer.add_scalar("env/replay_buffer", len(self.replay_buffer), steps)
writer.add_scalar("train/loss", loss, steps)
# update the target network, copying all weights and biases in policy_net to target_net
if steps % self.target_net_update == 0:
self.target_net.load_state_dict(self.policy_net.state_dict())
# run evaluation
if steps % self.eval_every == 0:
avg_rewards, stddev = self.evaluate_policy(self.policy_net)
writer.add_scalar("train/avg_rewards", avg_rewards, steps)
writer.add_scalar("train/ep_rewards_std", stddev, steps)
recent_eval.append(avg_rewards)
recent_eval = recent_eval[-10:]
loss_achieved = sum(recent_loss)/len(recent_loss) <= self.loss_cutoff
avg_rewards_achieved = sum(recent_eval)/len(recent_eval) >= self.env.spec.reward_threshold
std_dev_achieved = (self.max_std_dev < 0) or (self.max_std_dev >= 0 and stddev <= self.max_std_dev)
if loss_achieved and avg_rewards_achieved and std_dev_achieved:
break
steps = steps + 1
# save model
policy_name = self.name if self.name else "dqn"
torch.save(self.policy_net.state_dict(), f"policies/{policy_name}")
writer.close()
@staticmethod
def eps(start, end, decay, steps):
# compute epsilon threshold
return end + (start - end) * math.exp(-1. * steps / decay)
@staticmethod
def legal_actions_to_mask(legal_actions, action_space_n):
mask = [0]*action_space_n
for n in legal_actions:
mask[n] = 1
return mask
def load_replay_buffer(self, policy=None, episodes_count=1, steps=0):
""" load replay buffer with episodes_count """
for eps_idx in range(episodes_count):
state = self.env.reset()
while True:
legal_actions = self.env.legal_actions
action = self.select_action(
policy=policy,
state=state,
epsilon=True,
steps=steps,
legal_actions=legal_actions).item()
# perform action
next_state, reward, done, _ = self.env.step(action)
# insert into replay buffer
mask = Agent.legal_actions_to_mask(legal_actions, self.env.action_space_n)
transition = Transition(state, action, next_state if not done else None, reward, mask)
# set error of new transitions to a very high number so they get sampled
self.replay_buffer.push(self.replay_buffer.tree.total, transition)
if done:
break
else:
# transition
state = next_state
def evaluate_policy(self, policy):
ep_rewards = []
for _ in range(self.eval_episodes_count):
self.env.seed(time.time())
state = self.env.reset()
ep_reward = 0
while True:
legal_actions = self.env.legal_actions
action = self.select_action(
policy=policy,
state=state,
epsilon=False,
legal_actions=legal_actions).item()
next_state, reward, done, _ = self.env.step(action)
ep_reward += reward
if done:
ep_rewards.append(ep_reward)
break
else:
state = next_state
return np.mean(ep_rewards), np.std(ep_rewards)
def select_action(self, policy, state, epsilon=False, steps=None, legal_actions=[]):
"""
selects an action with a chance of being random if epsilon is True,
otherwise selects the action produced by policy.
"""
if epsilon:
if steps == None:
raise ValueError(f"steps must be an integer. Got = {steps}")
# pick a random number
sample = random.random()
# see what the dice rolls
threshold = Agent.eps(self.epsilon_start, self.epsilon_end, self.epsilon_decay, steps)
if sample <= threshold:
# explore
action = random.choice([i for i in range(self.env.action_space_n+1) if i in legal_actions])
return torch.tensor([[action]], device=device, dtype=torch.long)
# greedy action
with torch.no_grad():
# index of highest value item returned from policy -> action
state = torch.Tensor(state).to(device)
mask = torch.zeros(self.env.action_space_n).index_fill(0, torch.LongTensor(legal_actions), 1)
return policy(state, mask).argmax().view(1, 1)
def optimize(self, transitions, idxs, is_weights):
# n transitions -> 1 transition with each attribute containing all the
# data point values along its axis.
# e.g. batch.action = list of all actions from each row
batch = Transition(*zip(*transitions))
# Compute state action values; the value of each action in batch according
# to policy_net (feeding it a state and emitting an probability distribution).
# These are the values that our current network think are right and we want to correct.
state_action_values = self.state_action_values(batch)
# compute expected state action values (reward + value of next state according to target_net)
expected_state_action_values = self.expected_state_action_values(batch)
# calculate difference between actual and expected action values
batch_loss = F.smooth_l1_loss(state_action_values, expected_state_action_values, reduction='none')
loss = (sum(batch_loss * torch.FloatTensor(is_weights).unsqueeze(1))/self.batch_size).squeeze()
# update priority
for i in range(self.batch_size):
self.replay_buffer.update(idxs[i], batch_loss[i].item())
# optimizer
optimizer = self.optimizer(params=self.policy_net.parameters(), lr=self.learning_rate)
optimizer.zero_grad()
# calculate gradients
loss.backward()
for param in self.policy_net.parameters():
# clip gradients
param.grad.data.clamp_(-1, 1)
# optimize policy_net
optimizer.step()
return loss
def state_action_values(self, batch):
"""
Compute Q(s_t, a) - the model computes Q(s_t), then we select the
columns of actions taken. These are the actions which would've been taken
for each batch state according to policy_net.
"""
# list -> tensor
state_batch = torch.Tensor(batch.state).to(device)
mask_batch = torch.Tensor(batch.mask).to(device)
action_batch = torch.Tensor(batch.action).to(device)
# get action values for each state in batch
state_action_values = self.policy_net(state_batch, mask_batch)
# select action from state_action_values according to action_batch value
return state_action_values.gather(1, action_batch.unsqueeze(1).long())
def expected_state_action_values(self, batch):
"""
Compute V(s_{t+1}) for all next states.
Expected values of actions for non_final_next_states are computed based
on the "older" target_net; selecting their best reward with max(1)[0].
This is merged based on the mask, such that we'll have either the expected
state value or 0 in case the state was final.
"""
# a bool list indicating if next_state is final (s is not None)
non_final_mask = torch.tensor(tuple(map(lambda s: s is not None, batch.next_state)), device=device, dtype=torch.bool)
non_final_next_states = torch.Tensor([s for s in batch.next_state if s is not None]).to(device)
# get legal actions for non final states; (i, v) -> (list of legal actions, non_final_state)
next_mask = torch.Tensor([i for (i, v) in zip(list(batch.mask), non_final_mask.tolist()) if v]).to(device)
# initialize next_state_values to zeros
next_state_values = torch.zeros(self.batch_size).to(device)
if len(non_final_next_states) > 0:
if self.double:
# double q learning: get actions from policy_net and get their values according to target_net; decoupling
# action selection from evaluation reduces the bias imposed by max in single dqn.
# next_state_actions: action selection according to policy_net; Q(st+1, a)
next_state_actions = self.policy_net(non_final_next_states, next_mask).max(1)[1].unsqueeze(-1)
# next_state_values: action evaluation according to target_net; max Q`(st+1, max Q(st+1, a) )
next_state_values[non_final_mask] = self.target_net(non_final_next_states, next_mask).gather(1, next_state_actions).squeeze(-1)
else:
# max Q`(st+1, a)
next_state_values[non_final_mask] = self.target_net(non_final_next_states, next_mask).max(1)[0].detach()
# Compute the expected Q values
# reward + max Q`(st+1, a) * discount
reward_batch = torch.Tensor([[r] for r in batch.reward]).to(device)
state_action_values = reward_batch + (next_state_values.unsqueeze(1) * self.discount)
return state_action_values | /rlprop-0.0.4-py3-none-any.whl/prop/algorithms/dqn.py | 0.844345 | 0.219494 | dqn.py | pypi |
import numpy
import random
import numpy as np
# stored as ( s, a, r, s_ ) in SumTree
class PrioritizedReplayBuffer:
def __init__(self, capacity, alpha=0.6, beta=0.4, beta_increment_per_sampling=0.001, e=0.01):
self.tree = SumTree(capacity)
self.alpha = alpha # (0 - no prioritization, 1 - full prioritization)
self.beta = beta # importance sampling; increase to 1 over time
self.beta_increment_per_sampling = beta_increment_per_sampling
self.e = e
def _get_priority(self, error):
# (td error + epsilon) ^ alpha
return (np.abs(error) + self.e) ** self.alpha
def push(self, error, transition):
p = self._get_priority(error)
self.tree.add(p, transition)
def sample(self, batch_size):
batch = []
idxs = []
segment = self.tree.total / batch_size
priorities = []
self.beta = np.min([1., self.beta + self.beta_increment_per_sampling])
for i in range(batch_size):
a = segment * i
b = segment * (i + 1)
s = random.uniform(a, b)
(idx, p, data) = self.tree.get(s)
priorities.append(p)
batch.append(data)
idxs.append(idx)
sampling_probabilities = priorities / self.tree.total
is_weight = np.power(self.tree.n_entries * sampling_probabilities, -self.beta)
is_weight /= is_weight.max()
return batch, idxs, is_weight
def update(self, idx, error):
p = self._get_priority(error)
self.tree.update(idx, p)
def __len__(self):
return self.tree.n_entries
# SumTree
# a binary tree data structure where the parent’s value is the sum of its children
class SumTree:
write = 0
def __init__(self, capacity):
self.capacity = capacity
self.tree = numpy.zeros(2 * capacity - 1)
self.data = numpy.zeros(capacity, dtype=object)
self.n_entries = 0
# update to the root node
def _propagate(self, idx, change):
parent = (idx - 1) // 2
self.tree[parent] += change
if parent != 0:
self._propagate(parent, change)
# find sample on leaf node
def _retrieve(self, idx, s):
left = 2 * idx + 1
right = left + 1
if left >= len(self.tree):
return idx
if s <= self.tree[left]:
return self._retrieve(left, s)
else:
return self._retrieve(right, s - self.tree[left])
@property
def total(self):
return self.tree[0]
# store priority and sample
def add(self, p, data):
idx = self.write + self.capacity - 1
self.data[self.write] = data
self.update(idx, p)
self.write += 1
if self.write >= self.capacity:
self.write = 0
if self.n_entries < self.capacity:
self.n_entries += 1
# update priority
def update(self, idx, p):
change = p - self.tree[idx]
self.tree[idx] = p
self._propagate(idx, change)
# get priority and sample
def get(self, s):
idx = self._retrieve(0, s)
dataIdx = idx - self.capacity + 1
return (idx, self.tree[idx], self.data[dataIdx]) | /rlprop-0.0.4-py3-none-any.whl/prop/buffers/priority_replay_buffer.py | 0.831349 | 0.313512 | priority_replay_buffer.py | pypi |
.. _make_agent:
.. this is a comment. see http://sphinx-doc.org/rest.html for markup instructions
Creating a New Agent
====================
This tutorial describes the standard RLPy :class:`~rlpy.Agents.Agent.Agent` interface,
and illustrates a brief example of creating a new learning agent.
.. Below taken directly from Agent.py
The Agent receives observations from the Domain and updates the
Representation accordingly.
In a typical Experiment, the Agent interacts with the Domain in discrete
timesteps.
At each Experiment timestep the Agent receives some observations from the Domain
which it uses to update the value function Representation of the Domain
(ie, on each call to its :func:`~rlpy.Agents.Agent.Agent.learn` function).
The Policy is used to select an action to perform.
This process (observe, update, act) repeats until some goal or fail state,
determined by the Domain, is reached. At this point the
Experiment determines
whether the agent starts over or has its current policy tested
(without any exploration).
.. note ::
You may want to review the namespace / inheritance / scoping
`rules in Python <https://docs.python.org/2/tutorial/classes.html>`_.
Requirements
------------
* Each learning agent must be a subclass of :class:`~rlpy.Agents.Agent.Agent`
and call
the :func:`~rlpy.Agents.Agent.Agent.__init__` function of the Agent superclass.
* Accordingly, each Agent must be instantiated with a Representation,
Policy, and Domain in the ``__init__()`` function
* Any randomization that occurs at object construction *MUST* occur in
the :func:`~rlpy.Agents.Agent.Agent.init_randomization` function,
which can be called by ``__init__()``.
* Any random calls should use ``self.random_state``, not ``random()`` or
``np.random()``, as this will ensure consistent seeded results during
experiments.
* After your agent is complete, you should define a unit test to ensure future
revisions do not alter behavior. See rlpy/tests for some examples.
REQUIRED Instance Variables
"""""""""""""""""""""""""""
---
REQUIRED Functions
""""""""""""""""""
:func:`~rlpy.Agents.Agent.Agent.learn` - called on every timestep (see documentation)
.. Note::
The Agent *MUST* call the (inherited) :func:`~rlpy.Agents.Agent.Agent.episodeTerminated`
function after learning if the transition led to a terminal state
(ie, ``learn()`` will return ``isTerminal=True``)
.. Note::
The ``learn()`` function *MUST* call the
:func:`~rlpy.Representations.Representation.Representation.pre_discover`
function at its beginning, and
:func:`~rlpy.Representations.Representation.Representation.post_discover`
at its end. This allows adaptive representations to add new features
(no effect on fixed ones).
Additional Information
----------------------
* As always, the agent can log messages using ``self.logger.info(<str>)``, see
the Python ``logger`` documentation
* You should log values assigned to custom parameters when ``__init__()`` is called.
* See :class:`~rlpy.Agents.Agent.Agent` for functions provided by the superclass.
Example: Creating the ``SARSA0`` Agent
--------------------------------------
In this example, we will create the standard SARSA learning agent (without
eligibility traces (ie the λ parameter= 0 always)).
This algorithm first computes the Temporal Difference Error,
essentially the difference between the prediction under the current
value function and what was actually observed
(see e.g. `Sutton and Barto's *Reinforcement Learning* (1998) <http://webdocs.cs.ualberta.ca/~sutton/book/ebook/node60.html>`_
or `Wikipedia <http://en.wikipedia.org/wiki/Temporal_difference_learning>`_).
It then updates the representation by summing the current function with
this TD error, weighted by a factor called the *learning rate*.
#. Create a new file in the current working directory, ``SARSA0.py``.
Add the header block at the top::
__copyright__ = "Copyright 2013, RLPy http://www.acl.mit.edu/RLPy"
__credits__ = ["Alborz Geramifard", "Robert H. Klein", "Christoph Dann",
"William Dabney", "Jonathan P. How"]
__license__ = "BSD 3-Clause"
__author__ = "Ray N. Forcement"
from rlpy.Agents.Agent import Agent, DescentAlgorithm
import numpy
#. Declare the class, create needed members variables (here a learning rate),
described above) and write a docstring description::
class SARSA0(DescentAlgorithm, Agent):
"""
Standard SARSA algorithm without eligibility trace (ie lambda=0)
"""
#. Copy the __init__ declaration from ``Agent`` and ``DescentAlgorithm``
in ``Agent.py``, and add needed parameters
(here the initial_learn_rate) and log them. (kwargs is a catch-all for
initialization parameters.) Then call the superclass constructor::
def __init__(self, policy, representation, discount_factor, initial_learn_rate=0.1, **kwargs):
super(SARSA0,self).__init__(policy=policy,
representation=representation, discount_factor=discount_factor, **kwargs)
self.logger.info("Initial learning rate:\t\t%0.2f" % initial_learn_rate)
#. Copy the learn() declaration and implement accordingly.
Here, compute the td-error, and use it to update
the value function estimate (by adjusting feature weights)::
def learn(self, s, p_actions, a, r, ns, np_actions, na, terminal):
# The previous state could never be terminal
# (otherwise the episode would have already terminated)
prevStateTerminal = False
# MUST call this at start of learn()
self.representation.pre_discover(s, prevStateTerminal, a, ns, terminal)
# Compute feature function values and next action to be taken
discount_factor = self.discount_factor # 'gamma' in literature
feat_weights = self.representation.weight_vec # Value function, expressed as feature weights
features_s = self.representation.phi(s, prevStateTerminal) # active feats in state
features = self.representation.phi_sa(s, prevStateTerminal, a, features_s) # active features or an (s,a) pair
features_prime_s= self.representation.phi(ns, terminal)
features_prime = self.representation.phi_sa(ns, terminal, na, features_prime_s)
nnz = count_nonzero(features_s) # Number of non-zero elements
# Compute td-error
td_error = r + np.dot(discount_factor * features_prime - features, feat_weights)
# Update value function (or if TD-learning diverges, take no action)
if nnz > 0:
feat_weights_old = feat_weights.copy()
feat_weights += self.learn_rate * td_error
if not np.all(np.isfinite(feat_weights)):
feat_weights = feat_weights_old
print "WARNING: TD-Learning diverged, theta reached infinity!"
# MUST call this at end of learn() - add new features to representation as required.
expanded = self.representation.post_discover(s, False, a, td_error, features_s)
# MUST call this at end of learn() - handle episode termination cleanup as required.
if terminal:
self.episodeTerminated()
.. note::
You can and should define helper functions in your agents as needed, and
arrange class hierarchy. (See eg TDControlAgent.py)
That's it! Now test the agent by creating a simple settings file on the domain of your choice.
An example experiment is given below:
.. literalinclude:: ../examples/tutorial/SARSA0_example.py
:language: python
:linenos:
What to do next?
----------------
In this Agent tutorial, we have seen how to
* Write a learning agent that inherits from the RLPy base ``Agent`` class
* Add the agent to RLPy and test it
Adding your component to RLPy
"""""""""""""""""""""""""""""
If you would like to add your component to RLPy, we recommend developing on the
development version (see :ref:`devInstall`).
Please use the following header at the top of each file::
__copyright__ = "Copyright 2013, RLPy http://www.acl.mit.edu/RLPy"
__credits__ = ["Alborz Geramifard", "Robert H. Klein", "Christoph Dann",
"William Dabney", "Jonathan P. How"]
__license__ = "BSD 3-Clause"
__author__ = "Tim Beaver"
* Fill in the appropriate ``__author__`` name and ``__credits__`` as needed.
Note that RLPy requires the BSD 3-Clause license.
* If you installed RLPy in a writeable directory, the className of the new
agent can be added to
the ``__init__.py`` file in the ``Agents/`` directory.
(This allows other files to import the new agent).
* If available, please include a link or reference to the publication associated
with this implementation (and note differences, if any).
If you would like to add your new agent to the RLPy project, we recommend
you branch the project and create a pull request to the
`RLPy repository <https://bitbucket.org/rlpy/rlpy>`_.
You can also email the community list ``rlpy@mit.edu`` for comments or
questions. To subscribe `click here <http://mailman.mit.edu/mailman/listinfo/rlpy>`_.
| /rlpy-1.3.8.tar.gz/rlpy-1.3.8/doc/make_agent.rst | 0.961335 | 0.720067 | make_agent.rst | pypi |
.. _tutorial:
Getting Started
===============
This tutorial covers the most common type of experiment in reinforcement
learning: the control experiment. An agent is supposed to find a good policy
while interacting with the domain.
.. note::
If you don't use the developer verion of rlpy but installed the toolbox via pip
you can get the example scripts referenced in this tutorial as follows:
Download the latest RLPy package from https://pypi.python.org/pypi/rlpy and extract
the `examples` folder from the archive. In this folder you find several examples
of how to use RLPy.
First Run
---------
Begin by looking at the file `examples/tutorial/gridworld.py`:
.. literalinclude:: ../examples/tutorial/gridworld.py
:language: python
:linenos:
The file is an example for a reinforcement learning experiment. The main
components of such an experiment are the **domain**, `GridWorld` in this case,
the **agent** (`Q_Learning`), which uses the **policy** `eGreedy` and the
value function **representation** `Tabular`. The **experiment** `Experiment` is
in charge of the execution of the experiment by handling the interaction
between the agent and the domain as well as storing the results on disk (see
also :ref:`overview`).
The function `make_experiment` gets an id, which specifies the random seeds
and a path where the results are stored. It returns an instance of an
`Experiment` which is ready to run. In line 53, such an experiment is created
and then executed in line 54 by calling its `run` method. The three parameters
of `run` control the graphical output. The result are plotted in line 57 and
subsequently stored in line 58.
You can run the file by executing it with the ipython shell from the rlpy
root directory::
ipython examples/tutorial/gridworld.py
.. tip::
We recommend using the IPython shell. Compared to the standard
interpreter it provides color output and better help functions. It is more
comportable to work with in general. See the `Ipython homepage`_ for
details.
.. note::
If you want to use the standard python shell make sure the rlpy root
directory is in the python seach path for modules. You can for example
use::
PYTHONPATH=. python examples/tutorial/gridworld.py
.. tip::
You can also use the IPython shell interactively and then run the script
from within the shell. To do this, first start the interactive python shell
with::
ipython
and then inside the ipython shell execute::
%run examples/tutorial/gridworld.py
This will not terminate the interpreter after running the file and allows
you to inspect the objects interactively afterwards (you can exit the shell
with CTRL + D).
.. _Ipython homepage: http://ipython.org
What Happens During a Control Experiment
-----------------------------------------
During an experiment, the agent performs a series of episodes, each of which
consists of a series of steps. Over the course of its lifetime, the agent
performs a total of `max_steps` learning steps, each of which consists of:
1. The agent choses an action given its (exploration) policy
2. The domain transitions to a new state
3. The agent observes the old and new state of the domain as well as the
reward for this transition and improves its policy based on this new
information
To track the performance of the agent, the quality of its current policy is
assessed `num_policy_checks` times during the experiment at uniformly spaced
intervals (and one more time right at the beginning).
At each policy check, the agent is allowed to interact with the domain
in what are called **performance runs**, with `checks_per_policy` runs
occuring in each. (Using these multiple samples helps smooth the resulting
performanace.)
During performance runs, the agent does not do any exploration but always
chooses actions optimal with respect to its value function.
Thus, each step in a performance run consists of:
1. The agent choses an action it thinks is optimal (e.g. greedy w.r.t. its
value function estimate)
2. The domain transitions to a new state
.. note::
No learning happens during performance runs. The total return for
each episode of performance runs is averaged to obtain a quality measure
of the agent's policy.
Graphical Output
----------------
While running the experiment you should see two windows, one showing the domain:
.. image:: gridworld_domain.png
:width: 400px
and one showing the value function:
.. image:: gridworld_valfun.png
:width: 400px
The Domain window is a visual representation of the domain (here, *GridWorld*)
and is useful in quickly judging or demonstrating the performance of an agent.
In this domain, the agent (triangle) has to move
from the start (blue) to the goal (green) location in the shortest distance possible,
while avoiding the pits (red). The agent receives -0.001 reward every step.
When it reaches the goal or a pit, it obtains rewards of +1.0 or and the episode
is terminated.
The value function window shows the value function and the resulting policy. It
is shown because `visualize_learning=True`.
Notice how the policy gradually converges to the optimal, direct route which avoids pits.
After successive iterations, the agent learns the high (green) value of being in
states that lie along the optimal path, even though they offer no immediate reward.
It also learns the low (red) value of unimportant / undesirable states.
The set of possible actions in each grid is highlighted by arrows, where the size of arrows
correspond to the state-action value function :math:`Q(s,a)`.
The best action is shown in black. If the agent has not learned the optimal policy
in some grid cells, it has not explored enough to learn the correct
action. (This often happens in Row 2, Column 1 of this example, where the
correct action is `left`.)
The agent likely still performs well though, since such states do not lie along
the optimal route from the initial state `s0`; they are only rarely reached
either because of :math:`\epsilon`-greedy policy which choses random actions with
probability :math:`\epsilon=0.2`, or noise in the domain which takes a random
action despite the one commanded.
Most domains in RLPy have a visualization like `GridWorld` and often also a
graphical presentation of the policy or value function.
At the end of the experiment another window called *Performance* pops up and
shows a plot of the average return during each policy assessment.
.. image:: gridworld_performance.png
:width: 400px
As we can see the agent learns after about 500 steps to obtain on average a
reward of 0.7. The theoretically optimal reward for a single run is 0.99.
However, the noise in the domain causes the agent to take the commanded
action only 70% of the time (see the domain initialization in line 32);
thus the total reward is correspondingly lower on average.
In fact, the policy learned by the agent after 500 steps is the optimal one.
Console Outputs
---------------
During execution of `examples/tutorial/gridworld.py`, you should see in the
console window output similar to the following::
647: E[0:00:01]-R[0:00:15]: Return=+0.97, Steps=33, Features = 20
1000 >>> E[0:00:04]-R[0:00:37]: Return=+0.99, Steps=11, Features = 20
1810: E[0:00:05]-R[0:00:23]: Return=+0.98, Steps=19, Features = 20
Each part has a specific meaning:
.. image:: rlpy_output.png
:width: 90 %
Lines with `>>>` are the averaged results of a policy assessment.
Results of policy assessments are always shown. The outcome of learning
episodes is shown only every second. You might therefore see no output for
learning episodes if your computer is fast enough to do all learning steps
between two policy assessments in less than one second.
.. note::
Throughout these experiments, if you see error messages similar to:
``rlpy/Tools/transformations.py:1886: UserWarning: failed to import module
_transformations`` you may safely ignore them. They merely reflect that
configuration does not support all features of rlpy.
A Slightly More Challenging Domain: Inverted Pole Balancing
-----------------------------------------------------------
We will now look at how to run experiments in batch and how to analyze and
compare the performance of different methods on the same task. To this end, we
compare different value function representations on the Cart-Pole Balancing task
with an infinite track. The task is to keep a pole balanced upright. The pole
is mounted on a cart which we can either push to the left or right.
The experimental setup is specified in `examples/tutorial/infTrackCartPole_tabular.py` with
a tabular representation and in `examples/tutorial/infTrackCartPole_rbfs.py` with radial
basis functions (RBFs). The content of `infTrackCartPole_rbfs.py` is
.. literalinclude:: ../examples/tutorial/infTrackCartPole_rbfs.py
:language: python
:linenos:
Again, as the first GridWorld example, the main content of the file is a
`make_experiment` function which takes an id, a path and some more optional
parameters and returns an :class:`Experiment.Experiment` instance.
This is the standard format of
an RLPy experiment description and will allow us to run it in parallel on
several cores on one computer or even on a computing cluster with numerous
machines.
The content of `infTrackCartPole_tabular.py` is very similar but
differs in the definition of the representation parameter of the agent.
Compared to our first example,
the experiment is now executed by calling its :func:`Experiments.Experiment.run_from_commandline` method.
This is a wrapper around :func:`Experiments.Experiment.run` and allows to specify the options for
visualization during the execution with command line arguments. You can for
example run::
ipython examples/tutorial/infTrackCartPole_tabular.py -- -l -p
from the command line to run the experiment with visualization of the
performance runs steps, policy and value function.
.. note::
The ------ is only necessary, when executing a script directly at start-up of
IPython. If our use the standard python interpreter or execute the file
from within IPython with `%run` you can omit the ------.
.. note::
As learning occurs, execution may appear to slow down; this is merely
because as the agent learns, it is able to balance the pendulum for a
greater number of steps, and so each episode takes longer.
.. image:: pendulum_learning.png
:width: 100 %
The value function (center), which plots pendulum angular rate against its angle, demonstrates
the highly undesirable states of a steeply inclined pendulum (near the horizontal) with high
angular velocity in the direction in which it is falling.
The policy (right) initially appears random, but converges to the shape shown, with distinct
black (counterclockwise torque action) and red (clockwise action) regions in the first and third
quadrants respectively, and a white stripe along the major diagonal between. This makes intuitive
sense; if the pendulum is left of center and/or moving counterclockwise (third quadrant), for example,
a corrective clockwise torque action should certainly be applied. The white stripe in between shows
that no torque should be applied to a balanced pendulum with no angular velocity, or if it lies off-center
but has angular velocity towards the balance point.
If you pass no command line
arguments, no visualization is shown and only the performance graph at the end
is produced. For an explanation of each command line argument type::
ipython examples/tutorial/infTrackCartPole_tabular.py -h
When we run the experiment with the tabular representation, we see that the
pendulum can be balanced sometimes, but not reliably.
In order to properly assess the quality of the learning algorithm using this
representation, we need to average over several independent learning sequences.
This means we need to execute the experiment with different seeds.
Running Experiments in Batch
----------------------------
The module :mod:`Tools.run` provides several functions that are helpful for
running experiments. The most important one is :func:`Tools.run.run`.
It allows us to run a specific experimental setup specified by a
`make_experiment` function in a file with multiple seeds in parallel. For
details see :func:`Tools.run.run`.
You find in `examples/tutorial/run_infTrackCartPole_batch.py` a short script with the
following content:
.. literalinclude:: ../examples/tutorial/run_infTrackCartPole_batch.py
:language: python
:linenos:
This script first runs the infinite track cartpole experiment with radial basis
functions ten times with seeds 1 to 10. Subsequently the same is done for the
experiment with tabular representation. Since we specified
`parallelization=joblib`, the joblib library is used to run the experiment in
parallel on all but one core of your computer.
You can execute this script with::
ipython examples/tutorial/run_infTrackCartPole_batch.py
.. note::
This might take a few minutes depending on your hardware, and you may see
minimal output during this time.
Analyzing Results
-----------------
Running experiments via :func:`Tools.run.run` automatically saves the results
to the specified path. If we run an :class:`Experiments.Experiment` instance
directly, we can store the results on disc with the
:func:`Experiments.Experiment.save` method. The outcomes are then stored in
the directory that is passed during initialization. The filename has the format
`XXX-results.json` where `XXX` is the id / seed of the experiment. The results
are stored in the JSON format that look for example like::
{"learning_steps": [0, 500, 1000, 1500, 2000, 2500, 3000, 3500, 4000, 4500, 5000],
"terminated": [1.0, 1.0, 1.0, 1.0, 0.9, 0.8, 0.3, 0.3, 0.0, 0.7, 0.0],
"return": [-1.0, -1.0, -1.0, -1.0, -0.9, -0.8, -0.3, -0.3, 0.0, -0.7, 0.0],
"learning_time": [0, 0.31999999999999995, 0.6799999999999998, 1.0099999999999998, 1.5599999999999996, 2.0300000000000002, 2.5300000000000002, 2.95, 3.3699999999999983, 3.7399999999999993, 4.11],
"num_features": [400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400],
"learning_episode": [0, 45, 71, 85, 99, 104, 110, 121, 136, 144, 152],
"discounted_return": [-0.6646429809896579, -0.529605466143065, -0.09102296558580342, -0.2085618862726307, -0.012117452394591856, -0.02237266958836346, -0.012851215851463843, -0.0026252190655709274, 0.0, -0.0647935684347749, 0.0],
"seed": 1,
"steps": [9.0, 14.1, 116.2, 49.3, 355.5, 524.2, 807.1, 822.4, 1000.0, 481.0, 1000.0]}
The measurements of each assessment of the learned policy is stored
sequentially under the corresponding name.
The module :mod:`Tools.results` provides a library of functions and classes that
simplify the analysis and visualization of results. See the the api documentation
for details.
To see the different effect of RBFs and tabular representation on the
performance of the algorithm, we will plot their average return for each policy
assessment. The script saved in `examples/tutorial/plot_result.py` shows us
how:
.. literalinclude:: ../examples/tutorial/plot_result.py
:language: python
:linenos:
First, we specify the results we specify the directories where the results are
stored and give them a label, here *RBFs* and *Tabular*. Then we create an
instance of :class:`Tools.results.MultiExperimentResults` which loads all
corresponding results an let us analyze and transform them. In line 7, we plot
the average return of each method over the number learning steps done so far.
Finally, the plot is saved in `./Results/Tutorial/plot.pdf` in the lossless pdf
format. When we run the script, we get the following plot
.. image:: pendulum_plot.png
:width: 500px
The shaded areas in the plot indicate the standard error of the sampling mean.
We see that with radial basis functions the agent is able to perform perfectly
after 2000 learning steps, but with the tabular representation, it stays at a
level of -0.4 return per episode. Since the value function only matters around
the center (zero angle, zero velocity), radial basis functions can capture the
necessary form there much more easily and therefore speed up the learning
process.
Tuning Hyperparameters
-----------------------
The behavior of each component of an agent can be drastically modified by its
parameters (or hyperparameters, in contrast to the parameters of the value
function that are learned). The module :mod:`Tools.hypersearch` provides tools
for optimizing these parameters to get the best out of the algorithms.
We first need to specify what the hyperparameters for a specific experimental
setup are and what values they can possibly take. We therefore again look at
part of
`examples/tutorial/infTrackCartPole_rbfs.py`
.. literalinclude:: ../examples/tutorial/infTrackCartPole_rbfs.py
:language: python
:linenos:
:lines: 11-30
The variable `param_space` contains the definition of the space of
hyperparameters we are considering. As the `make_experiment` function, the
variable needs to have exactly this name. For details on how this definition has to
look like we refer to `the documentation of hyperopt`_, the package we are
using for optimizing hyperparameters.
.. _the documentation of hyperopt: http://jaberg.github.io/hyperopt
For each hyperparameter (in this example `num_rbfs`, `resolution`, `lambda_`,
`boyan_N0` and `initial_alpha`), the `make_experiment` function has to have an
optional argument with the same name.
The script saved in `examples/tutorial/run_parametersearch.py` shows us
how to perform a quick search good parameters
.. literalinclude:: ../examples/tutorial/run_parametersearch.py
:language: python
:linenos:
.. warning:: Running this script might take a while (approx. 5-30 min)
The :func:`Tools.hypersearch.find_hyperparameters` function is the most
important tools for finding good parameters. For details on how to use it see
its api documentation.
During the optimization, the results of several an entire experimental run need
to be compressed into one target value. The parameter `objective` controls which
quantity to optimize. In this example, it is *maximize the reward*. We could
just take the return of the policy assessment with the most observations (the
final policy). However, this can lead to artifacts and causes all
hyperparameters that yield the same final performance to be considered equally
good, no matter how fast they reach this performance. Therefore, the target
value is computed as described below.
The target value is the weighted average over all measurements of the desired quantity
(e.g., the average return during each policy assessment).
The weights increase quadratically with the observation number, i.e., the
return achieved in the first policy assessment has weight 1, the second weight
2, 9, 16, ... This weighting scheme ensures makes the final performance most
important but also takes into account previous ones and therefore makes sure
that the convergence speed is reflected in the optimized value. This weighting
scheme has shown to be very robust in practice.
When we run the search, we obtain the following result:
{'initial_alpha': 0.3414408997566183,
'resolution': 21.0,
'num_rbfs': 6988.0,
'lambda\_': 0.38802888678400627,
'boyan_N0': 5781.602341902433}
.. note::
This parameters are not optimal. To obtain better ones, the number of
evaluations need to be increased to 50 - 100. Also, `trials_per_point=10`
makes the search more reliable. Be aware that 100 evaluations with 10
trials each result in 1000 experiment runs, which can take a very long time.
We can for example save these values by setting the default values in
`make_experiment` accordingly.
What to do next?
----------------
In this introduction, we have seen how to
* run a single experiment with visualizations for getting an intuition of a
domain and an agent
* run experiments in batch in parallel on multiple cores
* analyze and create plot the results of experiments
* optimize hyperparameters.
We covered the basic tasks of working with rlpy. You can see more examples of
experiments in the `examples` directory. If you want to implement a new
algorithm or problem, have a look at the api documentation. Contributions to
rlpy of each flavor are always welcome!
Staying Connected
-----------------
Feel free to join the rlpy list, rlpy@mit.edu by `clicking here <http://mailman.mit.edu/mailman/listinfo/rlpy>`_.
This list is intended for open discussion about questions, potential improvements, etc.
.. epigraph::
The only real mistake is the one from which we learn nothing.
-- John Powell
| /rlpy-1.3.8.tar.gz/rlpy-1.3.8/doc/tutorial.rst | 0.945914 | 0.949106 | tutorial.rst | pypi |
.. _make_domain:
.. this is a comment. see http://sphinx-doc.org/rest.html for markup instructions
Creating a New Domain
=====================
This tutorial describes the standard RLPy
:class:`~rlpy.Domains.Domain.Domain` interface,
and illustrates a brief example of creating a new problem domain.
.. Below taken directly from Domain.py
The Domain controls the environment in which the
:class:`~rlpy.Agents.Agent.Agent` resides as well as the reward function the
Agent is subject to.
The Agent interacts with the Domain in discrete timesteps called
*episodes* (see :func:`~rlpy.Domains.Domain.Domain.step`).
At each step, the Agent informs the Domain what indexed action it wants to
perform. The Domain then calculates the effects this action has on the
environment and updates its internal state accordingly.
It also returns the new state (*ns*) to the agent, along with a reward/penalty, (*r*)
and whether or not the episode is over (*terminal*), in which case the agent
is reset to its initial state.
This process repeats until the Domain determines that the Agent has either
completed its goal or failed.
The :py:class:`~rlpy.Experiments.Experiment.Experiment` controls this cycle.
Because Agents are designed to be agnostic to the Domain that they are
acting within and the problem they are trying to solve, the Domain needs
to completely describe everything related to the task. Therefore, the
Domain must not only define the observations that the Agent receives,
but also the states it can be in, the actions that it can perform, and the
relationships between the three.
.. warning::
While each dimension of the state *s* is either *continuous* or *discrete*,
discrete dimensions are assume to take nonnegative **integer** values
(ie, the index of the discrete state).
.. note ::
You may want to review the namespace / inheritance / scoping
`rules in Python <https://docs.python.org/2/tutorial/classes.html>`_.
Requirements
------------
* Each Domain must be a subclass of
:class:`~rlpy.Domains.Domain.Domain` and call the
:func:`~rlpy.Domains.Domain.Domain.__init__` function of the
Domain superclass.
* Any randomization that occurs at object construction *MUST* occur in
the :func:`~rlpy.Domains.Domain.Domain.init_randomization` function,
which can be called by ``__init__()``.
* Any random calls should use ``self.random_state``, not ``random()`` or
``np.random()``, as this will ensure consistent seeded results during
experiments.
* After your agent is complete, you should define a unit test to ensure future
revisions do not alter behavior. See rlpy/tests/test_domains for some examples.
REQUIRED Instance Variables
"""""""""""""""""""""""""""
The new Domain *MUST* set these variables *BEFORE* calling the
superclass ``__init__()`` function:
#. ``self.statespace_limits`` - Bounds on each dimension of the state space.
Each row corresponds to one dimension and has two elements [min, max].
Used for discretization of continuous dimensions.
#. ``self.continuous_dims`` - array of integers; each element is the index
(eg, row in ``statespace_limits`` above) of a continuous-valued dimension.
This array is empty if all states are discrete.
#. ``self.DimNames`` - array of strings, a name corresponding to each dimension
(eg one for each row in ``statespace_limits`` above)
#. ``self.episodeCap`` - integer, maximum number of steps before an episode
terminated (even if not in a terminal state).
#. ``actions_num`` - integer, the total number of possible actions (ie, the size
of the action space). This number **MUST** be a finite integer - continuous action
spaces are not currently supported.
#. ``discount_factor`` - float, the discount factor (gamma in literature)
by which rewards are reduced.
REQUIRED Functions
""""""""""""""""""
#. :func:`~rlpy.Domains.Domain.Domain.s0`,
(see linked documentation), which returns a (possibly random) state in the
domain, to be used at the start of an *episode*.
#. :func:`~rlpy.Domains.Domain.Domain.step`,
(see linked documentation), which returns the tuple ``(r,ns,terminal, pa)``
that results from taking action *a* from the current state (internal to the Domain).
* *r* is the reward obtained during the transition
* *ns* is the new state after the transition
* *terminal*, a boolean, is true if the new state *ns* is a terminal one to end the episode
* *pa*, an array of possible actions to take from the new state *ns*.
SPECIAL Functions
"""""""""""""""""
In many cases, the Domain will also override the functions:
#. :func:`~rlpy.Domains.Domain.Domain.isTerminal` - returns a boolean whether or
not the current (internal) state is terminal. Default is always return False.
#. :func:`~rlpy.Domains.Domain.Domain.possibleActions` - returns an array of
possible action indices, which often depend on the current state.
Default is to enumerate **every** possible action, regardless of current state.
OPTIONAL Functions
""""""""""""""""""
Optionally, define / override the following functions, used for visualization:
#. :func:`~rlpy.Domains.Domain.Domain.showDomain` - Visualization of domain based
on current internal state and an action, *a*.
Often the header will include an optional argument *s* to display instead
of the current internal state.
RLPy frequently uses `matplotlib <http://matplotlib.org/>`_
to accomplish this - see the example below.
#. :func:`~rlpy.Domains.Domain.Domain.showLearning` - Visualization of the "learning"
obtained so far on this domain, usually a value function plot and policy plot.
See the introductory tutorial for an example on :class:`~rlpy.Domains.Gridworld.GridWorld`
XX expectedStep(), XX
Additional Information
----------------------
* As always, the Domain can log messages using ``self.logger.info(<str>)``, see
Python ``logger`` doc.
* You should log values assigned to custom parameters when ``__init__()`` is called.
* See :class:`~rlpy.Domains.Domain.Domain` for functions
provided by the superclass, especially before defining
helper functions which might be redundant.
Example: Creating the ``ChainMDP`` Domain
-----------------------------------------------------------
In this example we will recreate the simple ``ChainMDP`` Domain, which consists
of *n* states that can only transition to *n-1* or *n+1*:
``s0 <-> s1 <-> ... <-> sn`` \n
The goal is to reach state ``sn`` from ``s0``, after which the episode terminates.
The agent can select from two actions: left [0] and right [1] (it never remains in same state).
But the transitions are noisy, and the opposite of the desired action is taken
instead with some probability.
Note that the optimal policy is to always go right.
#. Create a new file in your current working directory, ``ChainMDPTut.py``.
Add the header block at the top::
__copyright__ = "Copyright 2013, RLPy http://www.acl.mit.edu/RLPy"
__credits__ = ["Alborz Geramifard", "Robert H. Klein", "Christoph Dann",
"William Dabney", "Jonathan P. How"]
__license__ = "BSD 3-Clause"
__author__ = "Ray N. Forcement"
from rlpy.Tools import plt, mpatches, fromAtoB
from rlpy.Domains.Domain import Domain
import numpy as np
#. Declare the class, create needed members variables (here several objects to
be used for visualization and a few domain reward parameters), and write a
docstring description::
class ChainMDPTut(Domain):
"""
Tutorial Domain - nearly identical to ChainMDP.py
"""
#: Reward for each timestep spent in the goal region
GOAL_REWARD = 0
#: Reward for each timestep
STEP_REWARD = -1
#: Set by the domain = min(100,rows*cols)
episodeCap = 0
# Used for graphical normalization
MAX_RETURN = 1
# Used for graphical normalization
MIN_RETURN = 0
# Used for graphical shifting of arrows
SHIFT = .3
#:Used for graphical radius of states
RADIUS = .5
# Stores the graphical pathes for states so that we can later change their colors
circles = None
#: Number of states in the chain
chainSize = 0
# Y values used for drawing circles
Y = 1
#. Copy the __init__ declaration from ``Domain.py``, add needed parameters
(here the number of states in the chain, ``chainSize``), and log them.
Assign ``self.statespace_limits, self.episodeCap, self.continuous_dims, self.DimNames, self.actions_num,``
and ``self.discount_factor``.
Then call the superclass constructor::
def __init__(self, chainSize=2):
"""
:param chainSize: Number of states \'n\' in the chain.
"""
self.chainSize = chainSize
self.start = 0
self.goal = chainSize - 1
self.statespace_limits = np.array([[0,chainSize-1]])
self.episodeCap = 2*chainSize
self.continuous_dims = []
self.DimNames = ['State']
self.actions_num = 2
self.discount_factor = 0.9
super(ChainMDPTut,self).__init__()
#. Copy the ``step()`` and function declaration and implement it accordingly
to return the tuple (r,ns,isTerminal,possibleActions), and similarly for ``s0()``.
We want the agent to always start at state *[0]* to begin, and only achieves reward
and terminates when *s = [n-1]*::
def step(self,a):
s = self.state[0]
if a == 0: #left
ns = max(0,s-1)
if a == 1: #right
ns = min(self.chainSize-1,s+1)
self.state = np.array([ns])
terminal = self.isTerminal()
r = self.GOAL_REWARD if terminal else self.STEP_REWARD
return r, ns, terminal, self.possibleActions()
def s0(self):
self.state = np.array([0])
return self.state, self.isTerminal(), self.possibleActions()
#. In accordance with the above termination condition, override the ``isTerminal()``
function by copying its declaration from ``Domain.py``::
def isTerminal(self):
s = self.state
return (s[0] == self.chainSize - 1)
#. For debugging convenience, demonstration, and entertainment, create a domain
visualization by overriding the default (which is to do nothing).
With matplotlib, generally this involves first performing a check to see if
the figure object needs to be created (and adding objects accordingly),
otherwise merely updating existing plot objects based on the current ``self.state``
and action *a*::
def showDomain(self, a = 0):
#Draw the environment
s = self.state
s = s[0]
if self.circles is None: # We need to draw the figure for the first time
fig = plt.figure(1, (self.chainSize*2, 2))
ax = fig.add_axes([0, 0, 1, 1], frameon=False, aspect=1.)
ax.set_xlim(0, self.chainSize*2)
ax.set_ylim(0, 2)
ax.add_patch(mpatches.Circle((1+2*(self.chainSize-1), self.Y), self.RADIUS*1.1, fc="w")) #Make the last one double circle
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
self.circles = [mpatches.Circle((1+2*i, self.Y), self.RADIUS, fc="w") for i in np.arange(self.chainSize)]
for i in np.arange(self.chainSize):
ax.add_patch(self.circles[i])
if i != self.chainSize-1:
fromAtoB(1+2*i+self.SHIFT,self.Y+self.SHIFT,1+2*(i+1)-self.SHIFT, self.Y+self.SHIFT)
if i != self.chainSize-2: fromAtoB(1+2*(i+1)-self.SHIFT,self.Y-self.SHIFT,1+2*i+self.SHIFT, self.Y-self.SHIFT, 'r')
fromAtoB(.75,self.Y-1.5*self.SHIFT,.75,self.Y+1.5*self.SHIFT,'r',connectionstyle='arc3,rad=-1.2')
plt.show()
[p.set_facecolor('w') for p in self.circles]
self.circles[s].set_facecolor('k')
plt.draw()
.. note::
When first creating a matplotlib figure, you must call pl.show(); when
updating the figure on subsequent steps, use pl.draw().
That's it! Now test it by creating a simple settings file on the domain of your choice.
An example experiment is given below:
.. literalinclude:: ../examples/tutorial/ChainMDPTut_example.py
:language: python
:linenos:
What to do next?
----------------
In this Domain tutorial, we have seen how to
* Write a Domain that inherits from the RLPy base ``Domain`` class
* Override several base functions
* Create a visualization
* Add the Domain to RLPy and test it
Adding your component to RLPy
"""""""""""""""""""""""""""""
If you would like to add your component to RLPy, we recommend developing on the
development version (see :ref:`devInstall`).
Please use the following header template at the top of each file::
__copyright__ = "Copyright 2013, RLPy http://www.acl.mit.edu/RLPy"
__credits__ = ["Alborz Geramifard", "Robert H. Klein", "Christoph Dann",
"William Dabney", "Jonathan P. How"]
__license__ = "BSD 3-Clause"
__author__ = "Tim Beaver"
Fill in the appropriate ``__author__`` name and ``__credits__`` as needed.
Note that RLPy requires the BSD 3-Clause license.
* If you installed RLPy in a writeable directory, the className of the new
domain can be added to
the ``__init__.py`` file in the ``Domains/`` directory.
(This allows other files to import the new domain).
* If available, please include a link or reference to the publication associated
with this implementation (and note differences, if any).
If you would like to add your new domain to the RLPy project, we recommend
you branch the project and create a pull request to the
`RLPy repository <https://bitbucket.org/rlpy/rlpy>`_.
You can also email the community list ``rlpy@mit.edu`` for comments or
questions. To subscribe `click here <http://mailman.mit.edu/mailman/listinfo/rlpy>`_.
| /rlpy-1.3.8.tar.gz/rlpy-1.3.8/doc/make_domain.rst | 0.956695 | 0.695209 | make_domain.rst | pypi |
.. _make_rep:
.. this is a comment. see http://sphinx-doc.org/rest.html for markup instructions
Creating a New Representation
=============================
This tutorial describes the standard RLPy
:class:`~rlpy.Representations.Representation.Representation` interface,
and illustrates a brief example of creating a new value function representation.
.. Below taken directly from Representation.py
The Representation is the approximation of the
value function associated with a :py:class:`~rlpy.Domains.Domain.Domain`,
usually in some lower-dimensional feature space.
The Agent receives observations from the Domain on each step and calls
its :func:`~rlpy.Agents.Agent.Agent.learn` function, which is responsible for updating the
Representation accordingly.
Agents can later query the Representation for the value of being in a state
*V(s)* or the value of taking an action in a particular state
( known as the Q-function, *Q(s,a)* ).
.. note::
At present, it is assumed that the Linear Function approximator
family of representations is being used.
.. note ::
You may want to review the namespace / inheritance / scoping
`rules in Python <https://docs.python.org/2/tutorial/classes.html>`_.
Requirements
------------
* Each Representation must be a subclass of
:class:`~rlpy.Representations.Representation.Representation` and call the
:func:`~rlpy.Representations.Representation.Representation.__init__` function
of the Representation superclass.
* Accordingly, each Representation must be instantiated with
and a Domain in the ``__init__()`` function. Note that an optional
``discretization`` parameter may be used by discrete Representations
attempting to represent a value function over a continuous space.
It is ignored for discrete dimensions.
* Any randomization that occurs at object construction *MUST* occur in
the :func:`~rlpy.Representations.Representation.Represenation.init_randomization`
function, which can be called by ``__init__()``.
* Any random calls should use ``self.random_state``, not ``random()`` or
``np.random()``, as this will ensure consistent seeded results during experiments.
* After your Representation is complete, you should define a unit test to ensure
future revisions do not alter behavior. See rlpy/tests/test_representations
for some examples.
REQUIRED Instance Variables
"""""""""""""""""""""""""""
The new Representation *MUST* set the variables *BEFORE* calling the
superclass ``__init__()`` function:
#. ``self.isDynamic`` - bool: True if this Representation can add or
remove features during execution
#. ``self.features_num`` - int: The (initial) number of features in the representation
REQUIRED Functions
""""""""""""""""""
The new Representation *MUST* define two functions:
#. :func:`~rlpy.Representations.Representation.Representation.phi_nonTerminal`,
(see linked documentation), which returns a vector of feature function
values associated with a particular state.
#. :func:`~rlpy.Representations.Representation.Representation.featureType`,
(see linked documentation), which returns the data type of the underlying
feature functions (eg "float" or "bool").
SPECIAL Functions
"""""""""""""""""
Representations whose feature functions may change over the course of execution
(termed **adaptive** or **dynamic** Representations) should override
one or both functions below as needed.
Note that ``self.isDynamic`` should = ``True``.
#. :func:`~rlpy.Representations.Representation.Representation.pre_discover`
#. :func:`~rlpy.Representations.Representation.Representation.post_discover`
Additional Information
----------------------
* As always, the Representation can log messages using ``self.logger.info(<str>)``, see
Python ``logger`` doc.
* You should log values assigned to custom parameters when ``__init__()`` is called.
* See :class:`~rlpy.Representations.Representation.Representation` for functions
provided by the superclass, especially before defining
helper functions which might be redundant.
Example: Creating the ``IncrementalTabular`` Representation
-----------------------------------------------------------
In this example we will recreate the simple :class:`~rlpy.Representations.IncrementalTabular.IncrementalTabular` Representation, which
merely creates a binary feature function f\ :sub:`d`\ () that is associated with each
discrete state ``d`` we have encountered so far.
f\ :sub:`d`\ (s) = 1 when *d=s*, 0 elsewhere, ie, the vector of feature
functions evaluated at *s* will have all zero elements except one.
Note that this is identical to the :class:`~rlpy.Representations.Tabular.Tabular`
Representation, except that feature functions are only created as needed, not
instantiated for every single state at the outset.
Though simple, neither the ``Tabular`` nor ``IncrementalTabular`` representations
generalize to nearby
states in the domain, and can be intractable to use on large domains (as there
are as many feature functions as there are states in the entire space).
Continuous dimensions of ``s`` (assumed to be bounded in this Representation)
are discretized.
#. Create a new file in your current working directory, ``IncrTabularTut.py``.
Add the header block at the top::
__copyright__ = "Copyright 2013, RLPy http://www.acl.mit.edu/RLPy"
__credits__ = ["Alborz Geramifard", "Robert H. Klein", "Christoph Dann",
"William Dabney", "Jonathan P. How"]
__license__ = "BSD 3-Clause"
__author__ = "Ray N. Forcement"
from rlpy.Representations.Representation import Representation
import numpy as np
from copy import deepcopy
#. Declare the class, create needed members variables (here an optional hash
table to lookup feature function values previously computed), and write a
docstring description::
class IncrTabularTut(Representation):
"""
Tutorial representation: identical to IncrementalTabular
"""
hash = None
#. Copy the __init__ declaration from ``Representation.py``, add needed parameters
(here none), and log them.
Assign self.features_num and self.isDynamic, then
call the superclass constructor::
def __init__(self, domain, discretization=20):
self.hash = {}
self.features_num = 0
self.isDynamic = True
super(IncrTabularTut, self).__init__(domain, discretization)
#. Copy the ``phi_nonTerminal()`` function declaration and implement it accordingly
to return the vector of feature function values for a given state.
Here, lookup feature function values using self.hashState(s) provided by the
parent class.
Note here that self.hash should always contain hash_id if ``pre_discover()``
is called as required::
def phi_nonTerminal(self, s):
hash_id = self.hashState(s)
id = self.hash.get(hash_id)
F_s = np.zeros(self.features_num, bool)
if id is not None:
F_s[id] = 1
return F_s
#. Copy the ``featureType()`` function declaration and implement it accordingly
to return the datatype returned by each feature function.
Here, feature functions are binary, so the datatype is boolean::
def featureType(self):
return bool
#. Override parent functions as necessary; here we require a ``pre_discover()``
function to populate the hash table for each new encountered state::
def pre_discover(self, s, terminal, a, sn, terminaln):
return self._add_state(s) + self._add_state(sn)
#. Finally, define any needed helper functions::
def _add_state(self, s):
hash_id = self.hashState(s)
id = self.hash.get(hash_id)
if id is None:
#New State
self.features_num += 1
#New id = feature_num - 1
id = self.features_num - 1
self.hash[hash_id] = id
#Add a new element to the feature weight vector
self.addNewWeight()
return 1
return 0
def __deepcopy__(self, memo):
new_copy = IncrementalTabular(self.domain, self.discretization)
new_copy.hash = deepcopy(self.hash)
return new_copy
That's it! Now test your Representation by creating a simple settings file on the domain of your choice.
An example experiment is given below:
.. literalinclude:: ../examples/tutorial/IncrTabularTut_example.py
:language: python
:linenos:
What to do next?
----------------
In this Representation tutorial, we have seen how to
* Write an adaptive Representation that inherits from the RLPy
base ``Representation`` class
* Add the Representation to RLPy and test it
Adding your component to RLPy
"""""""""""""""""""""""""""""
If you would like to add your component to RLPy, we recommend developing on the
development version (see :ref:`devInstall`).
Please use the following header at the top of each file::
__copyright__ = "Copyright 2013, RLPy http://www.acl.mit.edu/RLPy"
__credits__ = ["Alborz Geramifard", "Robert H. Klein", "Christoph Dann",
"William Dabney", "Jonathan P. How"]
__license__ = "BSD 3-Clause"
__author__ = "Tim Beaver"
* Fill in the appropriate ``__author__`` name and ``__credits__`` as needed.
Note that RLPy requires the BSD 3-Clause license.
* If you installed RLPy in a writeable directory, the className of the new
representation can be added to
the ``__init__.py`` file in the ``Representations/`` directory.
(This allows other files to import the new representation).
* If available, please include a link or reference to the publication associated
with this implementation (and note differences, if any).
If you would like to add your new representation to the RLPy project, we recommend
you branch the project and create a pull request to the
`RLPy repository <https://bitbucket.org/rlpy/rlpy>`_.
You can also email the community list ``rlpy@mit.edu`` for comments or
questions. To subscribe `click here <http://mailman.mit.edu/mailman/listinfo/rlpy>`_.
| /rlpy-1.3.8.tar.gz/rlpy-1.3.8/doc/make_rep.rst | 0.948549 | 0.698291 | make_rep.rst | pypi |
import rlpy
import numpy as np
from hyperopt import hp
param_space = {
'kernel_resolution':
hp.loguniform("kernel_resolution", np.log(5), np.log(50)),
'discover_threshold':
hp.loguniform(
"discover_threshold",
np.log(1e4),
np.log(1e8)),
'lambda_': hp.uniform("lambda_", 0., 1.),
'boyan_N0': hp.loguniform("boyan_N0", np.log(1e1), np.log(1e5)),
'initial_learn_rate': hp.loguniform("initial_learn_rate", np.log(5e-2), np.log(1))}
def make_experiment(
exp_id=1, path="./Results/Temp/{domain}/{agent}/{representation}/",
discover_threshold=88044.,
boyan_N0=64502,
lambda_=0.43982644088,
initial_learn_rate=0.920244401,
kernel_resolution=11.6543336229):
opt = {}
opt["exp_id"] = exp_id
opt["path"] = path
opt["max_steps"] = 150000
opt["num_policy_checks"] = 30
opt["checks_per_policy"] = 1
active_threshold = 0.01
max_base_feat_sim = 0.5
sparsify = 1
domain = rlpy.Domains.BicycleRiding()
opt["domain"] = domain
kernel_width = (domain.statespace_limits[:, 1] - domain.statespace_limits[:, 0]) \
/ kernel_resolution
representation = rlpy.Representations.KernelizediFDD(domain, sparsify=sparsify,
kernel=rlpy.Representations.linf_triangle_kernel,
kernel_args=[kernel_width],
active_threshold=active_threshold,
discover_threshold=discover_threshold,
normalization=True,
max_active_base_feat=10,
max_base_feat_sim=max_base_feat_sim)
policy = rlpy.Policies.eGreedy(representation, epsilon=0.1)
# agent = SARSA(representation,policy,domain,initial_learn_rate=initial_learn_rate,
# lambda_=.0, learn_rate_decay_mode="boyan", boyan_N0=boyan_N0)
opt["agent"] = rlpy.Agents.Q_Learning(policy, representation, discount_factor=domain.discount_factor,
lambda_=lambda_, initial_learn_rate=initial_learn_rate,
learn_rate_decay_mode="boyan", boyan_N0=boyan_N0)
experiment = rlpy.Experiments.Experiment(**opt)
return experiment
if __name__ == '__main__':
from rlpy.Tools.run import run_profiled
# run_profiled(make_experiment)
experiment = make_experiment(1)
experiment.run(visualize_learning=True,
visualize_performance=True)
experiment.plot()
# experiment.save() | /rlpy-1.3.8.tar.gz/rlpy-1.3.8/examples/bicycle/kifdd_triangle.py | 0.443359 | 0.375449 | kifdd_triangle.py | pypi |
from rlpy.Domains import PuddleWorld
from rlpy.Agents import SARSA, Q_LEARNING
from rlpy.Representations import *
from rlpy.Policies import eGreedy
from rlpy.Experiments import Experiment
import numpy as np
from hyperopt import hp
param_space = {
'kernel_resolution':
hp.loguniform("kernel_resolution", np.log(3), np.log(100)),
'discover_threshold':
hp.loguniform(
"discover_threshold",
np.log(1e-2),
np.log(1e1)),
'lambda_': hp.uniform("lambda_", 0., 1.),
'boyan_N0': hp.loguniform("boyan_N0", np.log(1e1), np.log(1e5)),
'initial_learn_rate': hp.loguniform("initial_learn_rate", np.log(1e-3), np.log(1))}
def make_experiment(
exp_id=1, path="./Results/Temp/{domain}/{agent}/{representation}/",
discover_threshold=8.925,
boyan_N0=840.,
lambda_=0.5203,
initial_learn_rate=.7512,
kernel_resolution=26.4777):
opt = {}
opt["path"] = path
opt["exp_id"] = exp_id
opt["max_steps"] = 40000
opt["num_policy_checks"] = 20
opt["checks_per_policy"] = 100
active_threshold = 0.01
max_base_feat_sim = 0.5
sparsify = 1
domain = PuddleWorld()
opt["domain"] = domain
kernel_width = (
domain.statespace_limits[:,
1] - domain.statespace_limits[:,
0]) / kernel_resolution
representation = KernelizediFDD(domain, sparsify=sparsify,
kernel=linf_triangle_kernel,
kernel_args=[kernel_width],
active_threshold=active_threshold,
discover_threshold=discover_threshold,
normalization=True,
max_active_base_feat=10, max_base_feat_sim=max_base_feat_sim)
policy = eGreedy(representation, epsilon=0.1)
# agent = SARSA(representation,policy,domain,initial_learn_rate=1.,
# lambda_=0., learn_rate_decay_mode="boyan", boyan_N0=100)
opt["agent"] = Q_LEARNING(
policy, representation, discount_factor=domain.discount_factor,
lambda_=lambda_, initial_learn_rate=initial_learn_rate,
learn_rate_decay_mode="boyan", boyan_N0=boyan_N0)
experiment = Experiment(**opt)
return experiment
if __name__ == '__main__':
experiment = make_experiment(1)
experiment.run()
experiment.save() | /rlpy-1.3.8.tar.gz/rlpy-1.3.8/examples/puddleworld/kifdd_triangle.py | 0.504639 | 0.351311 | kifdd_triangle.py | pypi |
from rlpy.Domains.PuddleWorld import PuddleGapWorld
from rlpy.Agents import SARSA, Q_LEARNING
from rlpy.Representations import *
from rlpy.Policies import eGreedy, UniformRandom
from rlpy.Experiments import Experiment
import numpy as np
from hyperopt import hp
from rlpy.Representations import KernelizediFDD
param_space = {
'kernel_resolution':
hp.loguniform("kernel_resolution", np.log(3), np.log(100)),
'discover_threshold':
hp.loguniform(
"discover_threshold",
np.log(1e-2),
np.log(1e1)),
'lambda_': hp.uniform("lambda_", 0., 1.),
'boyan_N0': hp.loguniform("boyan_N0", np.log(1e1), np.log(1e5)),
'initial_learn_rate': hp.loguniform("initial_learn_rate", np.log(1e-3), np.log(1))}
def make_experiment(
exp_id=1, path="./Results/Temp/{domain}/{agent}/{representation}/",
discover_threshold=1e-20,
boyan_N0=1589.56,
lambda_=0.52738,
initial_learn_rate=.0124409,
kernel_resolution=8.5):
opt = {}
opt["path"] = path
opt["exp_id"] = exp_id
opt["max_steps"] = 100000
opt["num_policy_checks"] = 10
opt["checks_per_policy"] = 100
active_threshold = 0.01
max_base_feat_sim = 0.7
sparsify = 10
domain = PuddleGapWorld()
opt["domain"] = domain
kernel_width = (
domain.statespace_limits[:,
1] - domain.statespace_limits[:,
0]) / kernel_resolution
representation = KernelizediFDD(domain, sparsify=sparsify,
kernel=gaussian_kernel,
kernel_args=[kernel_width],
active_threshold=active_threshold,
discover_threshold=discover_threshold,
normalization=False,
max_active_base_feat=100, max_base_feat_sim=max_base_feat_sim)
# policy = UniformRandom(representation, )
policy = eGreedy(representation, epsilon=0.1)
# agent = SARSA(representation,policy,domain,initial_learn_rate=1.,
# lambda_=0., learn_rate_decay_mode="boyan", boyan_N0=100)
opt["agent"] = Q_LEARNING(
policy, representation, discount_factor=domain.discount_factor,
lambda_=lambda_, initial_learn_rate=initial_learn_rate,
learn_rate_decay_mode="boyan_const", boyan_N0=boyan_N0)
stat_bins_per_state_dim = 22
experiment = Experiment(**opt)
return experiment
if __name__ == '__main__':
experiment = make_experiment(1)
experiment.run(visualize_performance=-1, visualize_learning=True)
experiment.save() | /rlpy-1.3.8.tar.gz/rlpy-1.3.8/examples/puddleworld/kifdd_gauss_gap.py | 0.529993 | 0.400808 | kifdd_gauss_gap.py | pypi |
from rlpy.Domains import PuddleWorld
from rlpy.Agents import SARSA, Q_LEARNING
from rlpy.Representations import *
from rlpy.Policies import eGreedy
from rlpy.Experiments import Experiment
import numpy as np
from hyperopt import hp
from rlpy.Representations import KernelizediFDD
param_space = {
'kernel_resolution':
hp.loguniform("kernel_resolution", np.log(3), np.log(100)),
'discover_threshold':
hp.loguniform(
"discover_threshold",
np.log(1e-2),
np.log(1e1)),
'lambda_': hp.uniform("lambda_", 0., 1.),
'boyan_N0': hp.loguniform("boyan_N0", np.log(1e1), np.log(1e5)),
'initial_learn_rate': hp.loguniform("initial_learn_rate", np.log(1e-3), np.log(1))}
def make_experiment(
exp_id=1, path="./Results/Temp/{domain}/{agent}/{representation}/",
discover_threshold=.0807,
boyan_N0=389.56,
lambda_=0.52738,
initial_learn_rate=.424409,
kernel_resolution=8.567677):
opt = {}
opt["path"] = path
opt["exp_id"] = exp_id
opt["max_steps"] = 40000
opt["num_policy_checks"] = 20
opt["checks_per_policy"] = 100
active_threshold = 0.01
max_base_feat_sim = 0.5
sparsify = 1
domain = PuddleWorld()
opt["domain"] = domain
kernel_width = (
domain.statespace_limits[:,
1] - domain.statespace_limits[:,
0]) / kernel_resolution
representation = KernelizediFDD(domain, sparsify=sparsify,
kernel=gaussian_kernel,
kernel_args=[kernel_width],
active_threshold=active_threshold,
discover_threshold=discover_threshold,
normalization=True,
max_active_base_feat=10, max_base_feat_sim=max_base_feat_sim)
policy = eGreedy(representation, epsilon=0.1)
opt["agent"] = Q_LEARNING(policy, representation, discount_factor=domain.discount_factor,
lambda_=lambda_, initial_learn_rate=initial_learn_rate,
learn_rate_decay_mode="boyan", boyan_N0=boyan_N0)
experiment = Experiment(**opt)
return experiment
if __name__ == '__main__':
experiment = make_experiment(1)
experiment.run()
experiment.save() | /rlpy-1.3.8.tar.gz/rlpy-1.3.8/examples/puddleworld/kifdd_gauss.py | 0.50415 | 0.340567 | kifdd_gauss.py | pypi |
__author__ = "William Dabney"
from rlpy.Domains import GridWorld
from rlpy.Agents import Q_Learning
from rlpy.Representations import iFDDK, IndependentDiscretization
from rlpy.Policies import eGreedy
from rlpy.Experiments import Experiment
import os
def make_experiment(exp_id=1, path="./Results/Temp"):
"""
Each file specifying an experimental setup should contain a
make_experiment function which returns an instance of the Experiment
class with everything set up.
@param id: number used to seed the random number generators
@param path: output directory where logs and results are stored
"""
# Experiment variables
opt = {}
opt["path"] = path
opt["exp_id"] = exp_id
opt["max_steps"] = 100000
opt["num_policy_checks"] = 10
# Logging
# Domain:
# MAZE = '/Domains/GridWorldMaps/1x3.txt'
maze = os.path.join(GridWorld.default_map_dir, '4x5.txt')
domain = GridWorld(maze, noise=0.3)
opt["domain"] = domain
# Representation
discover_threshold = 1.
lambda_ = 0.3
initial_learn_rate = 0.11
boyan_N0 = 100
initial_rep = IndependentDiscretization(domain)
representation = iFDDK(domain, discover_threshold, initial_rep,
sparsify=True,
useCache=True, lazy=True,
lambda_=lambda_)
# Policy
policy = eGreedy(representation, epsilon=0.1)
# Agent
opt["agent"] = Q_Learning(
policy, representation, discount_factor=domain.discount_factor,
lambda_=lambda_, initial_learn_rate=initial_learn_rate,
learn_rate_decay_mode="boyan", boyan_N0=boyan_N0)
experiment = Experiment(**opt)
return experiment
if __name__ == '__main__':
path = "./Results/Temp/{domain}/{agent}/{representation}/"
experiment = make_experiment(1, path=path)
experiment.run(visualize_steps=False, # should each learning step be shown?
visualize_learning=True, # show performance runs?
visualize_performance=True) # show value function?
experiment.plot()
experiment.save() | /rlpy-1.3.8.tar.gz/rlpy-1.3.8/examples/gridworld/q-ifddk.py | 0.666062 | 0.355048 | q-ifddk.py | pypi |
from rlpy.Domains.FiniteTrackCartPole import FiniteCartPoleBalanceOriginal, FiniteCartPoleBalanceModern
from rlpy.Agents import SARSA, Q_LEARNING
from rlpy.Representations import *
from rlpy.Policies import eGreedy
from rlpy.Experiments import Experiment
import numpy as np
from hyperopt import hp
from rlpy.Representations import KernelizediFDD
param_space = {
'kernel_resolution':
hp.loguniform("kernel_resolution", np.log(5), np.log(50)),
'discover_threshold':
hp.loguniform(
"discover_threshold",
np.log(1e-1),
np.log(1e3)),
'lambda_': hp.uniform("lambda_", 0., 1.),
'boyan_N0': hp.loguniform("boyan_N0", np.log(1e1), np.log(1e5)),
'initial_learn_rate': hp.loguniform("initial_learn_rate", np.log(5e-2), np.log(1))}
def make_experiment(
exp_id=1, path="./Results/Temp/{domain}/{agent}/{representation}/",
discover_threshold=.21,
boyan_N0=37.,
lambda_=.9,
initial_learn_rate=.07,
kernel_resolution=13.14):
opt = {}
opt["path"] = path
opt["exp_id"] = exp_id
opt["max_steps"] = 30000
opt["num_policy_checks"] = 20
opt["checks_per_policy"] = 10
active_threshold = 0.01
max_base_feat_sim = 0.5
sparsify = 1
domain = FiniteCartPoleBalanceOriginal(good_reward=0.)
opt["domain"] = domain
# domain = FiniteCartPoleBalanceModern()
kernel_width = (domain.statespace_limits[:, 1] - domain.statespace_limits[:, 0]) \
/ kernel_resolution
representation = KernelizediFDD(domain, sparsify=sparsify,
kernel=linf_triangle_kernel,
kernel_args=[kernel_width],
active_threshold=active_threshold,
discover_threshold=discover_threshold,
normalization=True,
max_active_base_feat=10,
max_base_feat_sim=max_base_feat_sim)
policy = eGreedy(representation, epsilon=0.1)
# agent = SARSA(representation,policy,domain,initial_learn_rate=initial_learn_rate,
# lambda_=.0, learn_rate_decay_mode="boyan", boyan_N0=boyan_N0)
opt["agent"] = Q_LEARNING(
policy, representation, discount_factor=domain.discount_factor,
lambda_=lambda_, initial_learn_rate=initial_learn_rate,
learn_rate_decay_mode="boyan", boyan_N0=boyan_N0)
experiment = Experiment(**opt)
return experiment
if __name__ == '__main__':
from rlpy.Tools.run import run_profiled
# run_profiled(make_experiment)
experiment = make_experiment(1)
experiment.run(visualize_learning=True)
experiment.plot()
# experiment.save() | /rlpy-1.3.8.tar.gz/rlpy-1.3.8/examples/cartpole_orig/kifdd_triangle.py | 0.482673 | 0.377426 | kifdd_triangle.py | pypi |
from rlpy.Domains.FiniteTrackCartPole import FiniteCartPoleBalanceOriginal, FiniteCartPoleBalanceModern
from rlpy.Agents import SARSA, Q_LEARNING
from rlpy.Representations import *
from rlpy.Policies import eGreedy
from rlpy.Experiments import Experiment
import numpy as np
from hyperopt import hp
param_space = {'discretization': hp.quniform("discretization", 5, 50, 1),
'discover_threshold':
hp.loguniform(
"discover_threshold",
np.log(1e-1),
np.log(1e3)),
'lambda_': hp.uniform("lambda_", 0., 1.),
'boyan_N0': hp.loguniform("boyan_N0", np.log(1e1), np.log(1e5)),
'initial_learn_rate': hp.loguniform("initial_learn_rate", np.log(5e-2), np.log(1))}
def make_experiment(
exp_id=1, path="./Results/Temp/{domain}/{agent}/{representation}/",
discover_threshold=77.,
boyan_N0=11,
lambda_=0.9,
initial_learn_rate=.05,
discretization=47):
opt = {}
opt["path"] = path
opt["exp_id"] = exp_id
opt["max_steps"] = 30000
opt["num_policy_checks"] = 20
opt["checks_per_policy"] = 10
sparsify = 1
domain = FiniteCartPoleBalanceOriginal(good_reward=0.)
opt["domain"] = domain
initial_rep = IndependentDiscretization(
domain,
discretization=discretization)
representation = iFDD(domain, discover_threshold, initial_rep,
sparsify=sparsify,
discretization=discretization,
useCache=True,
iFDDPlus=True)
policy = eGreedy(representation, epsilon=0.1)
# agent = SARSA(representation,policy,domain,initial_learn_rate=initial_learn_rate,
# lambda_=.0, learn_rate_decay_mode="boyan", boyan_N0=boyan_N0)
opt["agent"] = Q_LEARNING(
policy, representation, discount_factor=domain.discount_factor,
lambda_=lambda_, initial_learn_rate=initial_learn_rate,
learn_rate_decay_mode="boyan", boyan_N0=boyan_N0)
experiment = Experiment(**opt)
return experiment
if __name__ == '__main__':
from rlpy.Tools.run import run_profiled
# run_profiled(make_experiment)
experiment = make_experiment(1)
experiment.run(visualize_learning=False, visualize_performance=True)
experiment.plot()
# experiment.save() | /rlpy-1.3.8.tar.gz/rlpy-1.3.8/examples/cartpole_orig/ifdd.py | 0.443359 | 0.378402 | ifdd.py | pypi |
from rlpy.Domains.FiniteTrackCartPole import FiniteCartPoleBalanceOriginal, FiniteCartPoleBalanceModern
from rlpy.Agents import SARSA, Q_LEARNING
from rlpy.Representations import *
from rlpy.Policies import eGreedy
from rlpy.Experiments import Experiment
import numpy as np
from hyperopt import hp
from rlpy.Representations import KernelizediFDD
param_space = {
'kernel_resolution':
hp.loguniform("kernel_resolution", np.log(5), np.log(50)),
'discover_threshold':
hp.loguniform(
"discover_threshold",
np.log(1e-1),
np.log(1e3)),
'lambda_': hp.uniform("lambda_", 0., 1.),
'boyan_N0': hp.loguniform("boyan_N0", np.log(1e1), np.log(1e5)),
'initial_learn_rate': hp.loguniform("initial_learn_rate", np.log(5e-2), np.log(1))}
def make_experiment(
exp_id=1, path="./Results/Temp/{domain}/{agent}/{representation}/",
discover_threshold=.21,
boyan_N0=37.,
lambda_=.9,
initial_learn_rate=.07,
kernel_resolution=13.14):
opt = {}
opt["path"] = path
opt["exp_id"] = exp_id
opt["max_steps"] = 30000
opt["num_policy_checks"] = 20
opt["checks_per_policy"] = 10
active_threshold = 0.01
max_base_feat_sim = 0.5
sparsify = 1
domain = FiniteCartPoleBalanceOriginal(good_reward=0.)
opt["domain"] = domain
# domain = FiniteCartPoleBalanceModern()
kernel_width = (domain.statespace_limits[:, 1] - domain.statespace_limits[:, 0]) \
/ kernel_resolution
representation = KernelizediFDD(domain, sparsify=sparsify,
kernel=gaussian_kernel,
kernel_args=[kernel_width],
active_threshold=active_threshold,
discover_threshold=discover_threshold,
normalization=True,
max_active_base_feat=10,
max_base_feat_sim=max_base_feat_sim)
policy = eGreedy(representation, epsilon=0.1)
# agent = SARSA(representation,policy,domain,initial_learn_rate=initial_learn_rate,
# lambda_=.0, learn_rate_decay_mode="boyan", boyan_N0=boyan_N0)
opt["agent"] = Q_LEARNING(
policy, representation, discount_factor=domain.discount_factor,
lambda_=lambda_, initial_learn_rate=initial_learn_rate,
learn_rate_decay_mode="boyan", boyan_N0=boyan_N0)
experiment = Experiment(**opt)
return experiment
if __name__ == '__main__':
from rlpy.Tools.run import run_profiled
# run_profiled(make_experiment)
experiment = make_experiment(1)
experiment.run(visualize_learning=True)
experiment.plot()
# experiment.save() | /rlpy-1.3.8.tar.gz/rlpy-1.3.8/examples/cartpole_orig/kifdd_gauss.py | 0.489503 | 0.374648 | kifdd_gauss.py | pypi |
from rlpy.Domains import Swimmer
from rlpy.Agents import Q_Learning, SARSA
from rlpy.Representations import *
from rlpy.Policies import eGreedy
from rlpy.Policies.SwimmerPolicy import SwimmerPolicy
from rlpy.Experiments import Experiment
import numpy as np
from hyperopt import hp
from rlpy.Representations import KernelizediFDD
param_space = {
'kernel_resolution':
hp.loguniform("kernel_resolution", np.log(5), np.log(50)),
'discover_threshold':
hp.loguniform(
"discover_threshold",
np.log(1e4),
np.log(1e8)),
'lambda_': hp.uniform("lambda_", 0., 1.),
'boyan_N0': hp.loguniform("boyan_N0", np.log(1e1), np.log(1e5)),
'initial_learn_rate': hp.loguniform("initial_learn_rate", np.log(5e-2), np.log(1))}
def make_experiment(
exp_id=1, path="./Results/Temp/{domain}/{agent}/{representation}/",
discover_threshold=.05,
boyan_N0=1885.42,
lambda_=0.5879,
initial_learn_rate=0.1,
kernel_resolution=10.7920):
opt = {}
opt["path"] = path
opt["exp_id"] = exp_id
opt["max_steps"] = 1000000
opt["num_policy_checks"] = 10
opt["checks_per_policy"] = 1
active_threshold = 0.05
max_base_feat_sim = 0.5
sparsify = 10
domain = Swimmer()
opt["domain"] = domain
kernel_width = (domain.statespace_limits[:, 1] - domain.statespace_limits[:, 0]) \
/ kernel_resolution
representation = KernelizediFDD(domain, sparsify=sparsify,
kernel=gaussian_kernel,
kernel_args=[kernel_width],
active_threshold=active_threshold,
discover_threshold=discover_threshold,
normalization=False,
max_active_base_feat=100,
max_base_feat_sim=max_base_feat_sim)
policy = SwimmerPolicy(representation)
#policy = eGreedy(representation, epsilon=0.1)
stat_bins_per_state_dim = 20
# agent = SARSA(representation,policy,domain,initial_learn_rate=initial_learn_rate,
# lambda_=.0, learn_rate_decay_mode="boyan", boyan_N0=boyan_N0)
opt["agent"] = SARSA(
policy, representation, discount_factor=domain.discount_factor,
lambda_=lambda_, initial_learn_rate=initial_learn_rate,
learn_rate_decay_mode="boyan", boyan_N0=boyan_N0)
experiment = Experiment(**opt)
return experiment
if __name__ == '__main__':
from rlpy.Tools.run import run_profiled
# run_profiled(make_experiment)
experiment = make_experiment(1)
experiment.run(visualize_performance=1, visualize_learning=True)
# experiment.plot()
# experiment.save()
from rlpy.Tools import plt
plt.figure()
for i in range(9):
plt.plot(experiment.state_counts_learn[i], label="Dim " + str(i))
plt.legend() | /rlpy-1.3.8.tar.gz/rlpy-1.3.8/examples/swimmer/kifdd_triangle.py | 0.537041 | 0.386242 | kifdd_triangle.py | pypi |
from rlpy.Domains import InfCartPoleBalance
from rlpy.Agents import SARSA, Q_LEARNING
from rlpy.Representations import *
from rlpy.Policies import eGreedy
from rlpy.Experiments import Experiment
import numpy as np
from hyperopt import hp
from rlpy.Representations import KernelizediFDD
param_space = {
'kernel_resolution':
hp.loguniform("kernel_resolution", np.log(3), np.log(100)),
'discover_threshold':
hp.loguniform(
"discover_threshold",
np.log(1e-2),
np.log(1e1)),
'lambda_': hp.uniform("lambda_", 0., 1.),
'boyan_N0': hp.loguniform("boyan_N0", np.log(1e1), np.log(1e5)),
'initial_learn_rate': hp.loguniform("initial_learn_rate", np.log(1e-3), np.log(1))}
def make_experiment(
exp_id=1, path="./Results/Temp/{domain}/{agent}/{representation}/",
discover_threshold=.02208,
lambda_=0.6756,
boyan_N0=480.72,
initial_learn_rate=.2911,
kernel_resolution=18.435):
opt = {}
opt["path"] = path
opt["exp_id"] = exp_id
opt["max_steps"] = 10000
opt["num_policy_checks"] = 20
opt["checks_per_policy"] = 10
active_threshold = 0.01
max_base_feat_sim = 0.5
sparsify = 1
domain = InfCartPoleBalance()
opt["domain"] = domain
kernel_width = (
domain.statespace_limits[:,
1] - domain.statespace_limits[:,
0]) / kernel_resolution
representation = KernelizediFDD(domain, sparsify=sparsify,
kernel=linf_triangle_kernel,
kernel_args=[kernel_width],
active_threshold=active_threshold,
discover_threshold=discover_threshold,
normalization=True,
max_active_base_feat=10, max_base_feat_sim=max_base_feat_sim)
policy = eGreedy(representation, epsilon=0.1)
# agent = SARSA(representation,policy,domain,initial_learn_rate=1.,
# lambda_=0., learn_rate_decay_mode="boyan", boyan_N0=100)
opt["agent"] = Q_LEARNING(
policy, representation, discount_factor=domain.discount_factor,
lambda_=lambda_, initial_learn_rate=initial_learn_rate,
learn_rate_decay_mode="boyan", boyan_N0=boyan_N0)
experiment = Experiment(**opt)
return experiment
if __name__ == '__main__':
experiment = make_experiment(1)
experiment.run()
experiment.save() | /rlpy-1.3.8.tar.gz/rlpy-1.3.8/examples/cartpole2d/kifdd_triangle.py | 0.5 | 0.331039 | kifdd_triangle.py | pypi |
from rlpy.Domains import InfCartPoleBalance
from rlpy.Agents import Greedy_GQ, SARSA, Q_Learning
from rlpy.Representations import *
from rlpy.Policies import eGreedy
from rlpy.Experiments import Experiment
import numpy as np
from hyperopt import hp
param_space = {'discretization': hp.quniform("discretization", 5, 40, 1),
'discover_threshold':
hp.loguniform(
"discover_threshold",
np.log(1e-2),
np.log(1e1)),
#'lambda_': hp.uniform("lambda_", 0., 1.),
'boyan_N0': hp.loguniform("boyan_N0", np.log(1e1), np.log(1e5)),
'initial_learn_rate': hp.loguniform("initial_learn_rate", np.log(1e-3), np.log(1))}
def make_experiment(
exp_id=1, path="./Results/Temp/{domain}/{agent}/{representation}/",
discover_threshold=0.013461679,
lambda_=0.,
boyan_N0=484.78006,
initial_learn_rate=0.5651405,
discretization=23.):
opt = {}
opt["path"] = path
opt["exp_id"] = exp_id
opt["max_steps"] = 50000
opt["num_policy_checks"] = 20
opt["checks_per_policy"] = 10
sparsify = True
kappa = 1e-7
domain = InfCartPoleBalance()
opt["domain"] = domain
initial_rep = IndependentDiscretization(
domain,
discretization=discretization)
representation = iFDD(domain, discover_threshold, initial_rep,
sparsify=sparsify,
discretization=discretization,
useCache=True,
iFDDPlus=1. - kappa)
policy = eGreedy(representation, epsilon=0.1)
opt["agent"] = Greedy_GQ(policy, representation,
discount_factor=domain.discount_factor,
lambda_=lambda_,
BetaCoef=1e-6,
initial_learn_rate=initial_learn_rate,
learn_rate_decay_mode="boyan", boyan_N0=boyan_N0)
experiment = Experiment(**opt)
return experiment
if __name__ == '__main__':
experiment = make_experiment(1)
experiment.run()
experiment.save() | /rlpy-1.3.8.tar.gz/rlpy-1.3.8/examples/cartpole2d/ggq-ifdd.py | 0.449876 | 0.360433 | ggq-ifdd.py | pypi |
from rlpy.Domains import InfCartPoleBalance
from rlpy.Agents import SARSA, Q_LEARNING
from rlpy.Representations import *
from rlpy.Policies import eGreedy
from rlpy.Experiments import Experiment
import numpy as np
from hyperopt import hp
param_space = {
'kernel_resolution':
hp.loguniform("kernel_resolution", np.log(3), np.log(100)),
'discover_threshold':
hp.loguniform(
"discover_threshold",
np.log(1e-2),
np.log(1e1)),
'lambda_': hp.uniform("lambda_", 0., 1.),
'boyan_N0': hp.loguniform("boyan_N0", np.log(1e1), np.log(1e5)),
'initial_learn_rate': hp.loguniform("initial_learn_rate", np.log(1e-3), np.log(1))}
def make_experiment(
exp_id=1, path="./Results/Temp/{domain}/{agent}/{representation}/",
discover_threshold=.01356,
boyan_N0=235.,
lambda_=0.6596,
initial_learn_rate=.993,
kernel_resolution=45.016):
opt = {}
opt["path"] = path
opt["exp_id"] = exp_id
opt["max_steps"] = 10000
opt["num_policy_checks"] = 20
opt["checks_per_policy"] = 10
active_threshold = 0.01
max_base_feat_sim = 0.5
sparsify = 1
domain = InfCartPoleBalance()
opt["domain"] = domain
kernel_width = (
domain.statespace_limits[:,
1] - domain.statespace_limits[:,
0]) / kernel_resolution
representation = KernelizediFDD(domain, sparsify=sparsify,
kernel=gaussian_kernel,
kernel_args=[kernel_width],
active_threshold=active_threshold,
discover_threshold=discover_threshold,
normalization=True,
max_active_base_feat=10, max_base_feat_sim=max_base_feat_sim)
policy = eGreedy(representation, epsilon=0.1)
# agent = SARSA(representation,policy,domain,initial_learn_rate=1.,
# lambda_=0., learn_rate_decay_mode="boyan", boyan_N0=100)
opt["agent"] = Q_LEARNING(
policy, representation,
discount_factor=domain.discount_factor,
lambda_=lambda_, initial_learn_rate=initial_learn_rate,
learn_rate_decay_mode="boyan", boyan_N0=boyan_N0)
experiment = Experiment(**opt)
return experiment
if __name__ == '__main__':
experiment = make_experiment(1)
experiment.run()
experiment.save() | /rlpy-1.3.8.tar.gz/rlpy-1.3.8/examples/cartpole2d/kifdd_gauss.py | 0.503662 | 0.343865 | kifdd_gauss.py | pypi |
from rlpy.Domains.HIVTreatment import HIVTreatment
from rlpy.Agents import Q_Learning
from rlpy.Representations import *
from rlpy.Policies import eGreedy
from rlpy.Experiments import Experiment
import numpy as np
from hyperopt import hp
from rlpy.Representations import KernelizediFDD
param_space = {
'kernel_resolution':
hp.loguniform("kernel_resolution", np.log(5), np.log(50)),
'discover_threshold':
hp.loguniform(
"discover_threshold",
np.log(1e4),
np.log(1e8)),
'lambda_': hp.uniform("lambda_", 0., 1.),
'boyan_N0': hp.loguniform("boyan_N0", np.log(1e1), np.log(1e5)),
'initial_learn_rate': hp.loguniform("initial_learn_rate", np.log(5e-2), np.log(1))}
def make_experiment(
exp_id=1, path="./Results/Temp/{domain}/{agent}/{representation}/",
discover_threshold=611850.81,
boyan_N0=1885.42,
lambda_=0.1879,
initial_learn_rate=0.87831,
kernel_resolution=14.7920):
opt = {}
opt["path"] = path
opt["exp_id"] = exp_id
opt["max_steps"] = 150000
opt["num_policy_checks"] = 30
opt["checks_per_policy"] = 1
active_threshold = 0.01
max_base_feat_sim = 0.5
sparsify = 1
domain = HIVTreatment()
opt["domain"] = domain
kernel_width = (domain.statespace_limits[:, 1] - domain.statespace_limits[:, 0]) \
/ kernel_resolution
representation = KernelizediFDD(domain, sparsify=sparsify,
kernel=linf_triangle_kernel,
kernel_args=[kernel_width],
active_threshold=active_threshold,
discover_threshold=discover_threshold,
normalization=True,
max_active_base_feat=10,
max_base_feat_sim=max_base_feat_sim)
policy = eGreedy(representation, epsilon=0.1)
# agent = SARSA(representation,policy,domain,initial_learn_rate=initial_learn_rate,
# lambda_=.0, learn_rate_decay_mode="boyan", boyan_N0=boyan_N0)
opt["agent"] = Q_Learning(
policy, representation, discount_factor=domain.discount_factor,
lambda_=lambda_, initial_learn_rate=initial_learn_rate,
learn_rate_decay_mode="boyan", boyan_N0=boyan_N0)
experiment = Experiment(**opt)
return experiment
if __name__ == '__main__':
from rlpy.Tools.run import run_profiled
# run_profiled(make_experiment)
experiment = make_experiment(1)
experiment.run(visualize_performance=1)
# experiment.plot()
# experiment.save() | /rlpy-1.3.8.tar.gz/rlpy-1.3.8/examples/hiv/kifdd_triangle.py | 0.459804 | 0.403038 | kifdd_triangle.py | pypi |
from rlpy.Domains.HIVTreatment import HIVTreatment
from rlpy.Agents import Q_Learning
from rlpy.Representations import *
from rlpy.Policies import eGreedy
from rlpy.Experiments import Experiment
import numpy as np
from hyperopt import hp
from rlpy.Representations import KernelizediFDD
param_space = {
'kernel_resolution':
hp.loguniform("kernel_resolution", np.log(5), np.log(50)),
'discover_threshold':
hp.loguniform(
"discover_threshold",
np.log(1e4),
np.log(1e8)),
'lambda_': hp.uniform("lambda_", 0., 1.),
'boyan_N0': hp.loguniform("boyan_N0", np.log(1e1), np.log(1e5)),
'initial_learn_rate': hp.loguniform("initial_learn_rate", np.log(5e-2), np.log(1))}
def make_experiment(
exp_id=1, path="./Results/Temp/{domain}/{agent}/{representation}/",
discover_threshold=8948708.75,
boyan_N0=627.12,
lambda_=0.5433,
initial_learn_rate=0.59812,
kernel_resolution=24.340):
opt = {}
opt["path"] = path
opt["exp_id"] = exp_id
opt["max_steps"] = 150000
opt["num_policy_checks"] = 30
opt["checks_per_policy"] = 1
active_threshold = 0.01
max_base_feat_sim = 0.5
sparsify = 1
domain = HIVTreatment()
opt["domain"] = domain
# domain = FiniteCartPoleBalanceModern()
kernel_width = (domain.statespace_limits[:, 1] - domain.statespace_limits[:, 0]) \
/ kernel_resolution
representation = KernelizediFDD(domain, sparsify=sparsify,
kernel=gaussian_kernel,
kernel_args=[kernel_width],
active_threshold=active_threshold,
discover_threshold=discover_threshold,
normalization=True,
max_active_base_feat=10,
max_base_feat_sim=max_base_feat_sim)
policy = eGreedy(representation, epsilon=0.1)
# agent = SARSA(representation,policy,domain,initial_learn_rate=initial_learn_rate,
# lambda_=.0, learn_rate_decay_mode="boyan", boyan_N0=boyan_N0)
opt["agent"] = Q_Learning(
policy, representation, discount_factor=domain.discount_factor,
lambda_=lambda_, initial_learn_rate=initial_learn_rate,
learn_rate_decay_mode="boyan", boyan_N0=boyan_N0)
experiment = Experiment(**opt)
return experiment
if __name__ == '__main__':
from rlpy.Tools.run import run_profiled
run_profiled(make_experiment)
#experiment = make_experiment(1)
# experiment.run(visualize_learning=True)
# experiment.plot()
# experiment.save() | /rlpy-1.3.8.tar.gz/rlpy-1.3.8/examples/hiv/kifdd.py | 0.467818 | 0.394201 | kifdd.py | pypi |
from rlpy.Domains import HelicopterHover
from rlpy.Agents import Q_Learning
from rlpy.Representations import *
from rlpy.Policies import eGreedy
from rlpy.Experiments import Experiment
import numpy as np
from hyperopt import hp
from rlpy.Representations import KernelizediFDD
param_space = {
'kernel_resolution':
hp.loguniform("kernel_resolution", np.log(5), np.log(50)),
'discover_threshold':
hp.loguniform(
"discover_threshold",
np.log(1e4),
np.log(1e8)),
'lambda_': hp.uniform("lambda_", 0., 1.),
'boyan_N0': hp.loguniform("boyan_N0", np.log(1e1), np.log(1e5)),
'initial_learn_rate': hp.loguniform("initial_learn_rate", np.log(5e-2), np.log(1))}
def make_experiment(
exp_id=1, path="./Results/Temp/{domain}/{agent}/{representation}/",
discover_threshold=1e6,
boyan_N0=5e5,
lambda_=0.5,
initial_learn_rate=0.9,
kernel_resolution=10):
opt = {}
opt["exp_id"] = exp_id
opt["path"] = path
opt["max_steps"] = 10000
opt["num_policy_checks"] = 30
opt["checks_per_policy"] = 1
active_threshold = 0.01
max_base_feat_sim = 0.5
sparsify = 1
domain = HelicopterHover()
opt["domain"] = domain
# domain = FiniteCartPoleBalanceModern()
kernel_width = (domain.statespace_limits[:, 1] - domain.statespace_limits[:, 0]) \
/ kernel_resolution
representation = KernelizediFDD(domain, sparsify=sparsify,
kernel=linf_triangle_kernel,
kernel_args=[kernel_width],
active_threshold=active_threshold,
discover_threshold=discover_threshold,
normalization=True,
max_active_base_feat=10,
max_base_feat_sim=max_base_feat_sim)
policy = eGreedy(representation, epsilon=0.1)
# lambda_=.0, learn_rate_decay_mode="boyan", boyan_N0=boyan_N0)
opt["agent"] = Q_Learning(
policy, representation, discount_factor=domain.discount_factor,
lambda_=lambda_, initial_learn_rate=initial_learn_rate,
learn_rate_decay_mode="boyan", boyan_N0=boyan_N0)
experiment = Experiment(**opt)
return experiment
if __name__ == '__main__':
from rlpy.Tools.run import run_profiled
run_profiled(make_experiment)
#experiment = make_experiment(1)
# experiment.run(visualize_learning=True)
# experiment.plot()
# experiment.save() | /rlpy-1.3.8.tar.gz/rlpy-1.3.8/examples/heli/kifdd.py | 0.46563 | 0.398699 | kifdd.py | pypi |
from rlpy.Domains.FiniteTrackCartPole import FiniteCartPoleBalanceOriginal, FiniteCartPoleBalanceModern
from rlpy.Agents import SARSA, Q_LEARNING
from rlpy.Representations import *
from rlpy.Policies import eGreedy
from rlpy.Experiments import Experiment
import numpy as np
from hyperopt import hp
param_space = {
'kernel_resolution':
hp.loguniform("kernel_resolution", np.log(5), np.log(50)),
'discover_threshold':
hp.loguniform(
"discover_threshold",
np.log(1e-1),
np.log(1e2)),
'boyan_N0': hp.loguniform("boyan_N0", np.log(1e1), np.log(1e5)),
'initial_learn_rate': hp.loguniform("initial_learn_rate", np.log(5e-2), np.log(1))}
def make_experiment(
exp_id=1, path="./Results/Temp/{domain}/{agent}/{representation}/",
discover_threshold=.21,
boyan_N0=200.,
initial_learn_rate=.1,
kernel_resolution=13.14):
opt = {}
opt["path"] = path
opt["exp_id"] = exp_id
opt["max_steps"] = 100000
opt["num_policy_checks"] = 20
opt["checks_per_policy"] = 1
active_threshold = 0.01
max_base_feat_sim = 0.5
sparsify = 1
domain = FiniteCartPoleBalanceModern()
opt["domain"] = domain
kernel_width = (domain.statespace_limits[:, 1] - domain.statespace_limits[:, 0]) \
/ kernel_resolution
representation = KernelizediFDD(domain, sparsify=sparsify,
kernel=gaussian_kernel,
kernel_args=[kernel_width],
active_threshold=active_threshold,
discover_threshold=discover_threshold,
normalization=True,
max_active_base_feat=10,
max_base_feat_sim=max_base_feat_sim)
policy = eGreedy(representation, epsilon=0.)
# agent = SARSA(representation,policy,domain,initial_learn_rate=initial_learn_rate,
# lambda_=.0, learn_rate_decay_mode="boyan", boyan_N0=boyan_N0)
opt["agent"] = Q_LEARNING(
policy, representation, discount_factor=domain.discount_factor,
lambda_=0.9, initial_learn_rate=initial_learn_rate,
learn_rate_decay_mode="boyan", boyan_N0=boyan_N0)
experiment = Experiment(**opt)
return experiment
if __name__ == '__main__':
experiment = make_experiment(1)
experiment.run(visualize_learning=True, visualize_performance=False)
experiment.plot()
# experiment.save() | /rlpy-1.3.8.tar.gz/rlpy-1.3.8/examples/cartpole_modern/kifdd.py | 0.533154 | 0.396594 | kifdd.py | pypi |
from rlpy.Domains import BlocksWorld
from rlpy.Agents import Greedy_GQ
from rlpy.Representations import *
from rlpy.Policies import eGreedy
from rlpy.Experiments import Experiment
import numpy as np
from hyperopt import hp
param_space = {'boyan_N0': hp.loguniform("boyan_N0", np.log(1e1), np.log(1e5)),
'initial_learn_rate': hp.loguniform("initial_learn_rate", np.log(5e-2), np.log(1))}
def make_experiment(
exp_id=1, path="./Results/Temp/{domain}/{agent}/{representation}/",
lambda_=0.,
boyan_N0=14.44946,
initial_learn_rate=0.240155681):
opt = {}
opt["path"] = path
opt["exp_id"] = exp_id
opt["max_steps"] = 100000
opt["num_policy_checks"] = 20
opt["checks_per_policy"] = 1
domain = BlocksWorld(blocks=6, noise=0.3, )
opt["domain"] = domain
mat = np.matrix("""1 1 1 0 0 0;
0 1 1 1 0 0;
0 0 1 1 1 0;
0 0 0 1 1 1;
0 0 1 0 1 1;
0 0 1 1 0 1;
1 0 1 1 0 0;
1 0 1 0 1 0;
1 0 0 1 1 0;
1 0 0 0 1 1;
1 0 1 0 0 1;
1 0 0 1 0 1;
1 1 0 1 0 0;
1 1 0 0 1 0;
1 1 0 0 0 1;
0 1 0 1 1 0;
0 1 0 0 1 1;
0 1 0 1 0 1;
0 1 1 0 1 0;
0 1 1 0 0 1""")
#assert(mat.shape[0] == 20)
representation = TileCoding(
domain, memory=2000, num_tilings=[1] * mat.shape[0],
resolution_matrix=mat * 6, safety="none")
policy = eGreedy(representation, epsilon=0.1)
opt["agent"] = Greedy_GQ(
policy, representation, discount_factor=domain.discount_factor,
lambda_=lambda_, initial_learn_rate=initial_learn_rate,
learn_rate_decay_mode="boyan", boyan_N0=boyan_N0)
experiment = Experiment(**opt)
return experiment
if __name__ == '__main__':
from rlpy.Tools.run import run_profiled
# run_profiled(make_experiment)
experiment = make_experiment(1)
experiment.run()
# experiment.plot()
# experiment.save() | /rlpy-1.3.8.tar.gz/rlpy-1.3.8/examples/blocksworld/ggq-tile.py | 0.547464 | 0.444987 | ggq-tile.py | pypi |
from rlpy.Domains import PST
from rlpy.Agents import Greedy_GQ
from rlpy.Representations import *
from rlpy.Policies import eGreedy
from rlpy.Experiments import Experiment
import numpy as np
from hyperopt import hp
param_space = { # 'discretization': hp.quniform("discretization", 5, 50, 1),
'discover_threshold': hp.loguniform("discover_threshold",
np.log(5e1), np.log(1e4)),
#'lambda_': hp.uniform("lambda_", 0., 1.),
'boyan_N0': hp.loguniform("boyan_N0", np.log(1e1), np.log(1e5)),
'initial_learn_rate': hp.loguniform("initial_learn_rate", np.log(5e-2), np.log(1))}
def make_experiment(
exp_id=1, path="./Results/Temp/{domain}/{agent}/{representation}/",
discover_threshold=960.51,
lambda_=0.,
boyan_N0=4206.,
initial_learn_rate=.7457):
opt = {}
opt["path"] = path
opt["exp_id"] = exp_id
opt["max_steps"] = 500000
opt["num_policy_checks"] = 30
opt["checks_per_policy"] = 10
sparsify = 1
ifddeps = 1e-7
beta_coef = 1e-6
domain = PST(NUM_UAV=4)
opt["domain"] = domain
initial_rep = IndependentDiscretization(domain)
representation = iFDD(domain, discover_threshold, initial_rep,
sparsify=sparsify,
# discretization=discretization,
useCache=True,
iFDDPlus=1 - ifddeps)
policy = eGreedy(representation, epsilon=0.1)
opt["agent"] = Greedy_GQ(policy, representation,
discount_factor=domain.discount_factor,
BetaCoef=beta_coef,
lambda_=lambda_, initial_learn_rate=initial_learn_rate,
learn_rate_decay_mode="boyan", boyan_N0=boyan_N0)
experiment = Experiment(**opt)
return experiment
if __name__ == '__main__':
from rlpy.Tools.run import run_profiled
run_profiled(make_experiment)
#experiment = make_experiment(1)
# experiment.run()
# experiment.plot()
# experiment.save() | /rlpy-1.3.8.tar.gz/rlpy-1.3.8/examples/uav/gq-ifdd.py | 0.40028 | 0.311427 | gq-ifdd.py | pypi |
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
import json
import os
import numpy as np
import glob
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = [
"Alborz Geramifard",
"Robert H. Klein",
"Christoph Dann",
"William Dabney",
"Jonathan P. How",
]
__license__ = "BSD 3-Clause"
def _thousands(x, pos):
"The two args are the value and tick position"
return "%1.0fk" % (x * 1e-3)
thousands_formatter = FuncFormatter(_thousands)
#: default labels for result quantities
default_labels = {
"learning_steps": "Learning Steps",
"return": "Average Return",
"discounted_return": "Discounted Return",
"learning_time": "Computation Time",
}
#: default colors used for plotting
default_colors = ["b", "g", "r", "c", "m", "y", "k", "purple"]
#: default markers used for plotting
default_markers = ["o", "v", "8", "s", "p", "*", "<", "h", "^", "H", "D", ">", "d"]
def thousand_format_xaxis():
"""set the xaxis labels to have a ...k format"""
plt.gca().xaxis.set_major_formatter(thousands_formatter)
def load_single(filename):
"""
loads and returns a single experiment stored in filename
returns None if file does not exist
"""
if not os.path.exists(filename):
return None
with open(filename) as f:
result = json.load(f)
return result
def get_all_result_paths(path, min_num=1):
"""
scan all subdirectories of a list of paths if they contain
at least min_num results
the list of paths with results are returned
"""
exp_paths = []
for p in os.walk(path):
dirname = p[0]
if contains_results(dirname, min_num):
exp_paths.append(dirname)
def load_results(path):
"""
returns a dictionary with the results of each run of an experiment stored
in path
The keys are the seeds of the single runs
"""
results = {}
for fn in glob.glob(os.path.join(path, "*-results.json")):
cur_result = load_single(fn)
results[cur_result["seed"]] = cur_result
return results
def contains_results(path, min_num=1):
"""
determines whether a directory contains at least min_num results or not
"""
return len(glob.glob(os.path.join(path, "*-results.json"))) >= min_num
def avg_quantity(results, quantity, pad=False):
"""
returns the average and standard deviation and number of observations
over all runs of a certain quantity.
If pad is true, missing entries for runs with less entries are filled with the last value
"""
length = max([len(v[quantity]) for v in results.values()])
mean = np.zeros(length)
std = np.zeros(length)
num = np.zeros(length, dtype="int")
last_values = {}
for i in range(length):
for k, v in results.items():
if len(v[quantity]) > i:
last_values[k] = v[quantity][i]
num[i] += 1
else:
if pad:
num[i] += 1
else:
last_values[k] = 0.0
mean[i] += last_values[k]
if num[i] > 0:
mean[i] /= num[i]
for k, v in results.items():
if len(v[quantity]) > i:
last_values[k] = v[quantity][i]
num[i] += 1
else:
if pad:
num[i] += 1
else:
last_values[k] = 0.0
std[i] += (last_values[k] - mean[i]) ** 2
if num[i] > 0:
std[i] /= num[i]
std[i] = np.sqrt(std[i])
return mean, std, num
def first_close_to_final(x, y, min_rel_proximity=0.05):
"""
returns the chronologically first value of x where
y was close to min_rel_proximity (y[-1] - y[0]) of
the final value of y, i.e., y[-1].
"""
min_abs_proximity = (y[-1] - y[0]) * min_rel_proximity
final_y = y[-1]
for i in range(len(x)):
if abs(y[i] - final_y) < min_abs_proximity:
return x[i]
def add_first_close_entries(
results, new_label="95_time", x="time", y="return", min_rel_proximity=0.05
):
"""
adds an entry to each result for the time required to get within
5% of the final quantity.
returns nothing as the results are added in place
"""
for v in results.values():
v[new_label] = first_close_to_final(x, y, min_rel_proximity)
class MultiExperimentResults(object):
"""provides tools to analyze, compare, load and plot results of several
different experiments each stored in a separate path"""
def __init__(self, paths):
"""
loads the data in paths
paths is a dictionary which maps labels to directories
alternatively, paths is a list, then the path itself is considered
as the label
"""
self.data = {}
if isinstance(paths, list):
paths = dict(list(zip(paths, paths)))
for label, path in paths.items():
self.data[label] = load_results(path)
def plot_avg_sem(
self,
x,
y,
pad_x=False,
pad_y=False,
xbars=False,
ybars=True,
colors=None,
markers=None,
xerror_every=1,
legend=True,
**kwargs
):
"""
plots quantity y over x (means and standard error of the mean).
The quantities are specified by their id strings,
i.e. "return" or "learning steps"
``pad_x, pad_y``: if not enough observations are present for some results,
should they be filled with the value of the last available obervation?\n
``xbars, ybars``: show standard error of the mean for the respective
quantity colors: dictionary which maps experiment keys to colors.\n
``markers``: dictionary which maps experiment keys to markers.
``xerror_exery``: show horizontal error bars only every .. observation.\n
``legend``: (Boolean) show legend below plot.\n
Returns the figure handle of the created plot
"""
style = {"linewidth": 2, "alpha": 0.7, "linestyle": "-", "markersize": 7}
if colors is None:
colors = dict(
[
(l, default_colors[i % len(default_colors)])
for i, l in enumerate(self.data.keys())
]
)
if markers is None:
markers = dict(
[
(l, default_markers[i % len(default_markers)])
for i, l in enumerate(self.data.keys())
]
)
style.update(kwargs)
min_ = np.inf
max_ = -np.inf
fig = plt.figure()
for label, results in list(self.data.items()):
style["color"] = colors[label]
style["marker"] = markers[label]
y_mean, y_std, y_num = avg_quantity(results, y, pad_y)
y_sem = y_std / np.sqrt(y_num)
x_mean, x_std, x_num = avg_quantity(results, x, pad_x)
x_sem = x_std / np.sqrt(x_num)
if xbars:
plt.errorbar(
x_mean,
y_mean,
xerr=x_sem,
label=label,
ecolor="k",
errorevery=xerror_every,
**style
)
else:
plt.plot(x_mean, y_mean, label=label, **style)
if ybars:
plt.fill_between(
x_mean,
y_mean - y_sem,
y_mean + y_sem,
alpha=0.3,
color=style["color"],
)
max_ = max(np.max(y_mean + y_sem), max_)
min_ = min(np.min(y_mean - y_sem), min_)
else:
max_ = max(y_mean.max(), max_)
min_ = min(y_mean.min(), min_)
# adjust visible space
y_lim = [min_ - 0.1 * abs(max_ - min_), max_ + 0.1 * abs(max_ - min_)]
if min_ != max_:
plt.ylim(y_lim)
# axis labels
xlabel = default_labels[x] if x in default_labels else x
ylabel = default_labels[y] if y in default_labels else y
plt.xlabel(xlabel, fontsize=16)
plt.ylabel(ylabel, fontsize=16)
if legend:
box = plt.gca().get_position()
plt.gca().set_position(
[box.x0, box.y0 + box.height * 0.2, box.width, box.height * 0.8]
)
legend_handle = plt.legend(
loc="upper center",
bbox_to_anchor=(0.5, -0.15),
fancybox=True,
shadow=True,
ncol=2,
)
return fig
def save_figure(figure, filename):
figure.savefig(filename, transparent=True, pad_inches=0.1, bbox_inches="tight") | /rlpy3-2.0.0a0-cp36-cp36m-win_amd64.whl/rlpy/Tools/results.py | 0.698535 | 0.490968 | results.py | pypi |
import click
from rlpy.Domains.Domain import Domain
from rlpy.Experiments import Experiment
def get_experiment(
domain_or_domain_selector,
agent_selector,
default_max_steps=1000,
default_num_policy_checks=10,
default_checks_per_policy=10,
other_options=[],
):
@click.group()
@click.option(
"--agent", type=str, default=None, help="The name of agent you want to run"
)
@click.option("--seed", type=int, default=1, help="The problem to learn")
@click.option(
"--max-steps",
type=int,
default=default_max_steps,
help="Total number of interactions",
)
@click.option(
"--num-policy-checks",
type=int,
default=default_num_policy_checks,
help="Total number of evaluation time",
)
@click.option(
"--checks-per-policy",
type=int,
default=default_checks_per_policy,
help="Number of evaluation per 1 evaluation time",
)
@click.option("--log-interval", type=int, default=10, help="Number of seconds")
@click.option(
"--log-dir",
type=str,
default="Results/Temp",
help="The directory to be used for storing the logs",
)
@click.pass_context
def experiment(
ctx,
agent,
seed,
max_steps,
num_policy_checks,
checks_per_policy,
log_interval,
log_dir,
**kwargs,
):
if isinstance(domain_or_domain_selector, Domain):
domain = domain_or_domain_selector
else:
domain = domain_or_domain_selector(**kwargs)
agent = agent_selector(agent, domain, max_steps, seed, **kwargs)
ctx.obj["experiment"] = Experiment(
agent,
domain,
exp_id=seed,
max_steps=max_steps,
num_policy_checks=num_policy_checks,
checks_per_policy=checks_per_policy,
log_interval=log_interval,
path=log_dir,
**kwargs,
)
for opt in other_options:
if not isinstance(opt, click.Option):
raise ValueError("Every item of agent_options must be click.Option!")
experiment.params.append(opt)
@experiment.command(help="Train the agent")
@click.option(
"--visualize-performance",
"--show-performance",
"-VP",
default=0,
type=int,
help="The number of visualization steps during performance runs",
)
@click.option(
"--visualize-learning",
"--show-learning",
"-VL",
is_flag=True,
help="Visualize of the learning status before each evaluation",
)
@click.option(
"--visualize-steps",
"--show-steps",
"-VS",
is_flag=True,
help="Visualize all steps during learning",
)
@click.option("--plot-save", is_flag=True, help="Save the result figure")
@click.option("--plot-show", is_flag=True, help="Show the result figure")
@click.pass_context
def train(
ctx,
visualize_performance,
visualize_learning,
visualize_steps,
plot_save,
plot_show,
):
exp = ctx.obj["experiment"]
exp.run(visualize_performance, visualize_learning, visualize_steps)
if plot_save or plot_show:
exp.plot(save=plot_save, show=plot_show)
exp.save()
return experiment
def run_experiment(*args, **kwargs):
get_experiment(*args, **kwargs)(obj={}) | /rlpy3-2.0.0a0-cp36-cp36m-win_amd64.whl/rlpy/Tools/cli.py | 0.553505 | 0.199815 | cli.py | pypi |
from .MDPSolver import MDPSolver
from rlpy.Tools import hhmmss, deltaT, className, clock, l_norm
import numpy as np
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = [
"Alborz Geramifard",
"Robert H. Klein",
"Christoph Dann",
"William Dabney",
"Jonathan P. How",
]
__license__ = "BSD 3-Clause"
__author__ = "Alborz Geramifard"
class ValueIteration(MDPSolver):
"""Value Iteration MDP Solver.
Args:
job_id (int): Job ID number used for running multiple jobs on a cluster.
representation (Representation): Representation used for the value function.
domain (Domain): Domain (MDP) to solve.
planning_time (int): Maximum amount of time in seconds allowed for planning. Defaults to inf (unlimited).
convergence_threshold (float): Threshold for determining if the value function has converged.
ns_samples (int): How many samples of the successor states to take.
project_path (str): Output path for saving the results of running the MDPSolver on a domain.
log_interval (int): Minimum number of seconds between displaying logged information.
show (bool): Enable visualization?
.. warning::
THE CURRENT IMPLEMENTATION ASSUMES *DETERMINISTIC* TRANSITIONS:
In other words, in each iteration, from each state, we only sample
each possible action **once**. \n
For stochastic domains, it is necessary to sample multiple times and
use the average.
"""
def solve(self):
"""Solve the domain MDP."""
self.start_time = clock() # Used to show the total time took the process
bellmanUpdates = 0 # used to track the performance improvement.
converged = False
iteration = 0
# Check for Tabular Representation
if not self.IsTabularRepresentation():
self.logger.error(
"Value Iteration works only with a tabular representation."
)
return 0
no_of_states = self.representation.agg_states_num
while self.hasTime() and not converged:
iteration += 1
# Store the weight vector for comparison
prev_weight_vec = self.representation.weight_vec.copy()
# Sweep The State Space
for i in range(no_of_states):
s = self.representation.stateID2state(i)
# Sweep through possible actions
for a in self.domain.possibleActions(s):
# Check for available planning time
if not self.hasTime():
break
self.BellmanBackup(s, a, ns_samples=self.ns_samples)
bellmanUpdates += 1
# Create Log
if bellmanUpdates % self.log_interval == 0:
performance_return, _, _, _ = self.performanceRun()
self.logger.info(
"[%s]: BellmanUpdates=%d, Return=%0.4f"
% (
hhmmss(deltaT(self.start_time)),
bellmanUpdates,
performance_return,
)
)
# check for convergence
weight_vec_change = l_norm(
prev_weight_vec - self.representation.weight_vec, np.inf
)
converged = weight_vec_change < self.convergence_threshold
# log the stats
performance_return, performance_steps, performance_term, performance_discounted_return = (
self.performanceRun()
)
self.logger.info(
"PI #%d [%s]: BellmanUpdates=%d, ||delta-weight_vec||=%0.4f, Return=%0.4f, Steps=%d"
% (
iteration,
hhmmss(deltaT(self.start_time)),
bellmanUpdates,
weight_vec_change,
performance_return,
performance_steps,
)
)
# Show the domain and value function
if self.show:
self.domain.show(a, s=s, representation=self.representation)
# store stats
self.result["bellman_updates"].append(bellmanUpdates)
self.result["return"].append(performance_return)
self.result["planning_time"].append(deltaT(self.start_time))
self.result["num_features"].append(self.representation.features_num)
self.result["steps"].append(performance_steps)
self.result["terminated"].append(performance_term)
self.result["discounted_return"].append(performance_discounted_return)
self.result["iteration"].append(iteration)
if converged:
self.logger.info("Converged!")
super(ValueIteration, self).solve() | /rlpy3-2.0.0a0-cp36-cp36m-win_amd64.whl/rlpy/MDPSolvers/ValueIteration.py | 0.858807 | 0.512998 | ValueIteration.py | pypi |
from .MDPSolver import MDPSolver
from rlpy.Tools import className, deltaT, hhmmss, clock, l_norm
from copy import deepcopy
from rlpy.Policies import eGreedy
import numpy as np
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = [
"Alborz Geramifard",
"Robert H. Klein",
"Christoph Dann",
"William Dabney",
"Jonathan P. How",
]
__license__ = "BSD 3-Clause"
__author__ = "Alborz Geramifard"
class PolicyIteration(MDPSolver):
"""
Policy Iteration MDP Solver.
Args:
job_id (int): Job ID number used for running multiple jobs on a cluster.
representation (Representation): Representation used for the value function.
domain (Domain): Domain (MDP) to solve.
planning_time (int): Maximum amount of time in seconds allowed for planning. Defaults to inf (unlimited).
convergence_threshold (float): Threshold for determining if the value function has converged.
ns_samples (int): How many samples of the successor states to take.
project_path (str): Output path for saving the results of running the MDPSolver on a domain.
log_interval (int): Minimum number of seconds between displaying logged information.
show (bool): Enable visualization?
max_PE_iterations (int): Maximum number of Policy evaluation iterations to run.
"""
def __init__(
self,
job_id,
representation,
domain,
planning_time=np.inf,
convergence_threshold=0.005,
ns_samples=100,
project_path=".",
log_interval=5000,
show=False,
max_PE_iterations=10,
):
super(PolicyIteration, self).__init__(
job_id,
representation,
domain,
planning_time,
convergence_threshold,
ns_samples,
project_path,
log_interval,
show,
)
self.max_PE_iterations = max_PE_iterations
self.bellmanUpdates = 0
self.logger.info("Max PE Iterations:\t%d" % self.max_PE_iterations)
def policyEvaluation(self, policy):
"""
Evaluate a given policy: this is done by applying the Bellman backup over all states until the change is less than
a given threshold.
Returns: convergence status as a boolean
"""
converged = False
policy_evaluation_iteration = 0
while (
not converged
and self.hasTime()
and policy_evaluation_iteration < self.max_PE_iterations
):
policy_evaluation_iteration += 1
# Sweep The State Space
for i in range(0, self.representation.agg_states_num):
# Check for solver time
if not self.hasTime():
break
# Map an state ID to state
s = self.representation.stateID2state(i)
# Skip terminal states and states with no possible action
possible_actions = self.domain.possibleActions(s=s)
if self.domain.isTerminal(s) or len(possible_actions) == 0:
continue
# Apply Bellman Backup
self.BellmanBackup(
s, policy.pi(s, False, possible_actions), self.ns_samples, policy
)
# Update number of backups
self.bellmanUpdates += 1
# Check for the performance
if self.bellmanUpdates % self.log_interval == 0:
performance_return = self.performanceRun()[0]
self.logger.info(
"[%s]: BellmanUpdates=%d, Return=%0.4f"
% (
hhmmss(deltaT(self.start_time)),
self.bellmanUpdates,
performance_return,
)
)
# check for convergence: L_infinity norm of the difference between the to the weight vector of representation
weight_vec_change = l_norm(
policy.representation.weight_vec - self.representation.weight_vec,
np.inf,
)
converged = weight_vec_change < self.convergence_threshold
# Log Status
self.logger.info(
"PE #%d [%s]: BellmanUpdates=%d, ||delta-weight_vec||=%0.4f"
% (
policy_evaluation_iteration,
hhmmss(deltaT(self.start_time)),
self.bellmanUpdates,
weight_vec_change,
)
)
# Show Plots
if self.show:
self.domain.show(
policy.pi(s, False, possible_actions), self.representation, s=s
)
return converged
def policyImprovement(self, policy):
""" Given a policy improve it by taking the greedy action in each state based on the value function
Returns the new policy
"""
policyChanges = 0
i = 0
while i < self.representation.agg_states_num and self.hasTime():
s = self.representation.stateID2state(i)
if not self.domain.isTerminal(s) and len(self.domain.possibleActions(s)):
for a in self.domain.possibleActions(s):
if not self.hasTime():
break
self.BellmanBackup(s, a, self.ns_samples, policy)
if policy.pi(
s, False, self.domain.possibleActions(s=s)
) != self.representation.bestAction(
s, False, self.domain.possibleActions(s=s)
):
policyChanges += 1
i += 1
# This will cause the policy to be copied over
policy.representation.weight_vec = self.representation.weight_vec.copy()
performance_return, performance_steps, performance_term, performance_discounted_return = (
self.performanceRun()
)
self.logger.info(
"PI #%d [%s]: BellmanUpdates=%d, Policy Change=%d, Return=%0.4f, Steps=%d"
% (
self.policy_improvement_iteration,
hhmmss(deltaT(self.start_time)),
self.bellmanUpdates,
policyChanges,
performance_return,
performance_steps,
)
)
# store stats
self.result["bellman_updates"].append(self.bellmanUpdates)
self.result["return"].append(performance_return)
self.result["planning_time"].append(deltaT(self.start_time))
self.result["num_features"].append(self.representation.features_num)
self.result["steps"].append(performance_steps)
self.result["terminated"].append(performance_term)
self.result["discounted_return"].append(performance_discounted_return)
self.result["policy_improvement_iteration"].append(
self.policy_improvement_iteration
)
return policy, policyChanges
def solve(self):
"""Solve the domain MDP."""
self.bellmanUpdates = 0
self.policy_improvement_iteration = 0
self.start_time = clock()
# Check for Tabular Representation
if not self.IsTabularRepresentation():
self.logger.error(
"Policy Iteration works only with a tabular representation."
)
return 0
# Initialize the policy
policy = eGreedy(
deepcopy(self.representation), epsilon=0, deterministic=True
) # Copy the representation so that the weight change during the evaluation does not change the policy
# Setup the number of policy changes to 1 so the while loop starts
policyChanges = True
while policyChanges and deltaT(self.start_time) < self.planning_time:
# Evaluate the policy
converged = self.policyEvaluation(policy)
# Improve the policy
self.policy_improvement_iteration += 1
policy, policyChanges = self.policyImprovement(policy)
super(PolicyIteration, self).solve() | /rlpy3-2.0.0a0-cp36-cp36m-win_amd64.whl/rlpy/MDPSolvers/PolicyIteration.py | 0.860574 | 0.396682 | PolicyIteration.py | pypi |
from .MDPSolver import MDPSolver
from rlpy.Tools import deltaT, hhmmss, randSet, className, clock
import numpy as np
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = [
"Alborz Geramifard",
"Robert H. Klein",
"Christoph Dann",
"William Dabney",
"Jonathan P. How",
]
__license__ = "BSD 3-Clause"
__author__ = "Alborz Geramifard"
class TrajectoryBasedValueIteration(MDPSolver):
"""Trajectory Based Value Iteration MDP Solver.
Args:
job_id (int): Job ID number used for running multiple jobs on a cluster.
representation (Representation): Representation used for the value function.
domain (Domain): Domain (MDP) to solve.
planning_time (int): Maximum amount of time in seconds allowed for planning. Defaults to inf (unlimited).
convergence_threshold (float): Threshold for determining if the value function has converged.
ns_samples (int): How many samples of the successor states to take.
project_path (str): Output path for saving the results of running the MDPSolver on a domain.
log_interval (int): Minimum number of seconds between displaying logged information.
show (bool): Enable visualization?
epsilon (float): Probability of taking a random action during each decision making.
"""
# Probability of taking a random action during each decision making
epsilon = None
# step size parameter to adjust the weights. If the representation is
# tabular you can set this to 1.
alpha = 0.1
# Minimum number of trajectories required for convergence in which the max
# bellman error was below the threshold
MIN_CONVERGED_TRAJECTORIES = 5
def __init__(
self,
job_id,
representation,
domain,
planning_time=np.inf,
convergence_threshold=0.005,
ns_samples=100,
project_path=".",
log_interval=500,
show=False,
epsilon=0.1,
):
super(TrajectoryBasedValueIteration, self).__init__(
job_id,
representation,
domain,
planning_time,
convergence_threshold,
ns_samples,
project_path,
log_interval,
show,
)
self.epsilon = epsilon
if className(representation) == "Tabular":
self.alpha = 1
def solve(self):
"""Solve the domain MDP."""
# Used to show the total time took the process
self.start_time = clock()
bellmanUpdates = 0
converged = False
iteration = 0
# Track the number of consequent trajectories with very small observed
# BellmanError
converged_trajectories = 0
while self.hasTime() and not converged:
# Generate a new episode e-greedy with the current values
max_Bellman_Error = 0
step = 0
terminal = False
s, terminal, p_actions = self.domain.s0()
a = (
self.representation.bestAction(s, terminal, p_actions)
if np.random.rand() > self.epsilon
else randSet(p_actions)
)
while not terminal and step < self.domain.episodeCap and self.hasTime():
new_Q = self.representation.Q_oneStepLookAhead(s, a, self.ns_samples)
phi_s = self.representation.phi(s, terminal)
phi_s_a = self.representation.phi_sa(s, terminal, a, phi_s)
old_Q = np.dot(phi_s_a, self.representation.weight_vec)
bellman_error = new_Q - old_Q
# print s, old_Q, new_Q, bellman_error
self.representation.weight_vec += self.alpha * bellman_error * phi_s_a
bellmanUpdates += 1
step += 1
# Discover features if the representation has the discover method
discover_func = getattr(
self.representation, "discover", None
) # None is the default value if the discover is not an attribute
if discover_func and callable(discover_func):
self.representation.discover(phi_s, bellman_error)
max_Bellman_Error = max(max_Bellman_Error, abs(bellman_error))
# Simulate new state and action on trajectory
_, s, terminal, p_actions = self.domain.step(a)
a = (
self.representation.bestAction(s, terminal, p_actions)
if np.random.rand() > self.epsilon
else randSet(p_actions)
)
# check for convergence
iteration += 1
if max_Bellman_Error < self.convergence_threshold:
converged_trajectories += 1
else:
converged_trajectories = 0
performance_return, performance_steps, performance_term, performance_discounted_return = (
self.performanceRun()
)
converged = converged_trajectories >= self.MIN_CONVERGED_TRAJECTORIES
self.logger.info(
"PI #%d [%s]: BellmanUpdates=%d, ||Bellman_Error||=%0.4f, Return=%0.4f, Steps=%d, Features=%d"
% (
iteration,
hhmmss(deltaT(self.start_time)),
bellmanUpdates,
max_Bellman_Error,
performance_return,
performance_steps,
self.representation.features_num,
)
)
if self.show:
self.domain.show(a, representation=self.representation, s=s)
# store stats
self.result["bellman_updates"].append(bellmanUpdates)
self.result["return"].append(performance_return)
self.result["planning_time"].append(deltaT(self.start_time))
self.result["num_features"].append(self.representation.features_num)
self.result["steps"].append(performance_steps)
self.result["terminated"].append(performance_term)
self.result["discounted_return"].append(performance_discounted_return)
self.result["iteration"].append(iteration)
if converged:
self.logger.info("Converged!")
super(TrajectoryBasedValueIteration, self).solve() | /rlpy3-2.0.0a0-cp36-cp36m-win_amd64.whl/rlpy/MDPSolvers/TrajectoryBasedValueIteration.py | 0.805632 | 0.546194 | TrajectoryBasedValueIteration.py | pypi |
from abc import ABC, abstractmethod
import numpy as np
import logging
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = [
"Alborz Geramifard",
"Robert H. Klein",
"Christoph Dann",
"William Dabney",
"Jonathan P. How",
]
__license__ = "BSD 3-Clause"
__author__ = "Alborz Geramifard"
class Agent(ABC):
"""Learning Agent for obtaining good policices.
The Agent receives observations from the Domain and incorporates their
new information into the representation, policy, etc. as needed.
In a typical Experiment, the Agent interacts with the Domain in discrete
timesteps.
At each Experiment timestep the Agent receives some observations from the Domain
which it uses to update the value function Representation of the Domain
(ie, on each call to its :py:meth:`~rlpy.Agents.Agent.Agent.learn` function).
The Policy is used to select an action to perform.
This process (observe, update, act) repeats until some goal or fail state,
determined by the Domain, is reached. At this point the
:py:class:`~rlpy.Experiments.Experiment.Experiment` determines
whether the agent starts over or has its current policy tested
(without any exploration).
:py:class:`~rlpy.Agents.Agent.Agent` is a base class that provides the basic
framework for all RL Agents. It provides the methods and attributes that
allow child classes to interact with the
:py:class:`~rlpy.Domains.Domain.Domain`,
:py:class:`~rlpy.Representations.Representation.Representation`,
:py:class:`~rlpy.Policies.Policy.Policy`, and
:py:class:`~rlpy.Experiments.Experiment.Experiment` classes within the
RLPy library.
.. note::
All new agent implementations should inherit from this class.
"""
# The Representation to be used by the Agent
representation = None
#: discount factor determining the optimal policy
discount_factor = None
#: The policy to be used by the agent
policy = None
#: The eligibility trace, which marks states as eligible for a learning
#: update. Used by \ref Agents.SARSA.SARSA "SARSA" agent when the
#: parameter lambda is set. See:
#: http://www.incompleteideas.net/sutton/book/7/node1.html
eligibility_trace = []
#: A simple object that records the prints in a file
logger = None
#: number of seen episodes
episode_count = 0
# A seeded numpy random number generator
random_state = None
def __init__(self, policy, representation, discount_factor, seed=1, **kwargs):
"""initialization.
:param representation: the :py:class:`~rlpy.Representation.Representation.Representation`
to use in learning the value function.
:param policy: the :py:class:`~rlpy.Policies.Policy.Policy` to use when selecting actions.
:param discount_factor: the discount factor of the optimal policy which should be
learned
:param initial_learn_rate: Initial learning rate to use (where applicable)
.. warning::
``initial_learn_rate`` should be set to 1 for automatic learning rate;
otherwise, initial_learn_rate will act as a permanent upper-bound on learn_rate.
:param learn_rate_decay_mode: The learning rate decay mode (where applicable)
:param boyan_N0: Initial Boyan rate parameter (when learn_rate_decay_mode='boyan')
"""
self.representation = representation
self.policy = policy
self.discount_factor = discount_factor
self.logger = logging.getLogger("rlpy.Agents." + self.__class__.__name__)
# a new stream of random numbers for each agent
self.random_state = np.random.RandomState(seed=seed)
def init_randomization(self):
"""
Any stochastic behavior in __init__() is broken out into this function
so that if the random seed is later changed (eg, by the Experiment),
other member variables and functions are updated accordingly.
"""
pass
@abstractmethod
def learn(self, s, p_actions, a, r, ns, np_actions, na, terminal):
"""
This function receives observations of a single transition and
learns from it.
.. note::
Each inheriting class (Agent) must implement this method.
:param s: original state
:param p_actions: possible actions in the original state
:param a: action taken
:param r: obtained reward
:param ns: next state
:param np_actions: possible actions in the next state
:param na: action taken in the next state
:param terminal: boolean indicating whether next state (ns) is terminal
"""
return NotImplementedError
def episodeTerminated(self):
"""
This function adjusts all necessary elements of the agent at the end of
the episodes.
.. note::
Every agent must call this function at the end of the learning if the
transition led to terminal state.
"""
# Increase the number of episodes
self.episode_count += 1
# Set eligibility Traces to zero if it is end of the episode
if hasattr(self, "eligibility_trace"):
self.eligibility_trace = np.zeros_like(self.eligibility_trace)
class DescentAlgorithm(object):
"""
Abstract base class that contains step-size control methods for (stochastic)
descent algorithms such as TD Learning, Greedy-GQ etc.
"""
# The initial learning rate. Note that initial_learn_rate should be set to
# 1 for automatic learning rate; otherwise, initial_learn_rate will act as
# a permanent upper-bound on learn_rate.
initial_learn_rate = 0.1
#: The learning rate
learn_rate = 0
#: The eligibility trace, which marks states as eligible for a learning
#: update. Used by \ref Agents.SARSA.SARSA "SARSA" agent when the
#: parameter lambda is set. See:
#: http://www.incompleteideas.net/sutton/book/7/node1.html
eligibility_trace = []
#: A simple object that records the prints in a file
logger = None
#: Used by some learn_rate_decay modes
episode_count = 0
# Decay mode of learning rate. Options are determined by valid_decay_modes.
learn_rate_decay_mode = "dabney"
# Valid selections for the ``learn_rate_decay_mode``.
valid_decay_modes = ["dabney", "boyan", "const", "boyan_const"]
# The N0 parameter for boyan learning rate decay
boyan_N0 = 1000
def __init__(
self,
initial_learn_rate=0.1,
learn_rate_decay_mode="dabney",
boyan_N0=1000,
**kwargs
):
"""
:param initial_learn_rate: Initial learning rate to use (where applicable)
.. warning::
``initial_learn_rate`` should be set to 1 for automatic learning rate;
otherwise, initial_learn_rate will act as a permanent upper-bound on learn_rate.
:param learn_rate_decay_mode: The learning rate decay mode (where applicable)
:param boyan_N0: Initial Boyan rate parameter (when learn_rate_decay_mode='boyan')
"""
self.initial_learn_rate = initial_learn_rate
self.learn_rate = initial_learn_rate
self.learn_rate_decay_mode = learn_rate_decay_mode.lower()
self.boyan_N0 = boyan_N0
# Note that initial_learn_rate should be set to 1 for automatic learning rate; otherwise,
# initial_learn_rate will act as a permanent upper-bound on learn_rate.
if self.learn_rate_decay_mode == "dabney":
self.initial_learn_rate = 1.0
self.learn_rate = 1.0
super(DescentAlgorithm, self).__init__(**kwargs)
def updateLearnRate(
self, phi, phi_prime, eligibility_trace, discount_factor, nnz, terminal
):
"""Computes a new learning rate (learn_rate) for the agent based on
``self.learn_rate_decay_mode``.
:param phi: The feature vector evaluated at state (s) and action (a)
:param phi_prime_: The feature vector evaluated at the new state (ns) = (s') and action (na)
:param eligibility_trace: Eligibility trace
:param discount_factor: The discount factor for learning (gamma)
:param nnz: The number of nonzero features
:param terminal: Boolean that determines if the step is terminal or not
"""
if self.learn_rate_decay_mode == "dabney":
# We only update learn_rate if this step is non-terminal; else phi_prime becomes
# zero and the dot product below becomes very large, creating a very
# small learn_rate
if not terminal:
# Automatic learning rate: [Dabney W. 2012]
# http://people.cs.umass.edu/~wdabney/papers/alphaBounds.pdf
candid_learn_rate = np.dot(
discount_factor * phi_prime - phi, eligibility_trace
)
if candid_learn_rate < 0:
self.learn_rate = np.minimum(
self.learn_rate, -1.0 / candid_learn_rate
)
elif self.learn_rate_decay_mode == "boyan":
self.learn_rate = (
self.initial_learn_rate
* (self.boyan_N0 + 1.0)
/ (self.boyan_N0 + (self.episode_count + 1) ** 1.1)
)
# divide by l1 of the features; note that this method is only called if phi != 0
self.learn_rate /= np.sum(np.abs(phi))
elif self.learn_rate_decay_mode == "boyan_const":
# New little change from not having +1 for episode count
self.learn_rate = (
self.initial_learn_rate
* (self.boyan_N0 + 1.0)
/ (self.boyan_N0 + (self.episode_count + 1) ** 1.1)
)
elif self.learn_rate_decay_mode == "const":
self.learn_rate = self.initial_learn_rate
else:
self.logger.warn("Unrecognized decay mode ")
def episodeTerminated(self):
"""
This function adjusts all necessary elements of the agent at the end of
the episodes.
.. note::
Every Agent must call this function at the end of the learning if the
transition led to terminal state.
"""
# Increase the number of episodes
self.episode_count += 1
self.representation.episodeTerminated()
super(DescentAlgorithm, self).episodeTerminated() | /rlpy3-2.0.0a0-cp36-cp36m-win_amd64.whl/rlpy/Agents/Agent.py | 0.925217 | 0.484624 | Agent.py | pypi |
import numpy as np
from .Agent import Agent
from rlpy.Tools import solveLinear, regularize
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = [
"Alborz Geramifard",
"Robert H. Klein",
"Christoph Dann",
"William Dabney",
"Jonathan P. How",
]
__license__ = "BSD 3-Clause"
__author__ = "Christoph Dann"
class NaturalActorCritic(Agent):
"""
the step-based Natural Actor Critic algorithm
as described in algorithm 1 of
Peters, J. & Schaal, S. Natural Actor-Critic.
Neurocomputing 71, 1180-1190 (2008).
"""
# minimum for the cosine of the current and last gradient
min_cos = np.cos(np.pi / 180.0)
def __init__(
self,
policy,
representation,
discount_factor,
forgetting_rate,
min_steps_between_updates,
max_steps_between_updates,
lambda_,
learn_rate,
):
"""
@param representation: function approximation used to approximate the
value function
@param policy: parametrized stochastic policy that is an instance of
DifferentiablePolicy
@param forgetting_rate: specifies the decay of previous statistics
after a policy update; 1 = forget all
0 = forget none
@param min_steps_between_updates: minimum number of steps between
two policy updates
@param max_steps_between_updates
@param lambda_: e-trace parameter lambda
@param learn_rate: learning rate
"""
self.samples_count = 0
self.forgetting_rate = forgetting_rate
self.n = representation.features_num + len(policy.theta)
self.representation = representation
self.min_steps_between_updates = min_steps_between_updates
self.max_steps_between_updates = max_steps_between_updates
self.lambda_ = lambda_
self.learn_rate = learn_rate
self.steps_between_updates = 0
self.b = np.zeros((self.n))
self.A = np.zeros((self.n, self.n))
self.buf_ = np.zeros((self.n, self.n))
self.z = np.zeros((self.n))
super(NaturalActorCritic, self).__init__(
policy, representation, discount_factor
)
def learn(self, s, p_actions, a, r, ns, np_actions, na, terminal):
# compute basis functions
phi_s = np.zeros((self.n))
phi_ns = np.zeros((self.n))
k = self.representation.features_num
phi_s[:k] = self.representation.phi(s, False)
phi_s[k:] = self.policy.dlogpi(s, a)
phi_ns[:k] = self.representation.phi(ns, terminal)
# update statistics
self.z *= self.lambda_
self.z += phi_s
self.A += np.einsum(
"i,j", self.z, phi_s - self.discount_factor * phi_ns, out=self.buf_
)
self.b += self.z * r
if terminal:
self.z[:] = 0.0
self.steps_between_updates += 1
self.logger.debug("Statistics updated")
if self.steps_between_updates > self.min_steps_between_updates:
A = regularize(self.A)
param, time = solveLinear(A, self.b)
# v = param[:k] # parameters of the value function representation
w = param[k:] # natural gradient estimate
if (
self._gradient_sane(w)
or self.steps_between_updates > self.max_steps_between_updates
):
# update policy
self.policy.theta = self.policy.theta + self.learn_rate * w
self.last_w = w
self.logger.debug(
"Policy updated, norm of gradient {}".format(np.linalg.norm(w))
)
# forget statistics
self.z *= 1.0 - self.forgetting_rate
self.A *= 1.0 - self.forgetting_rate
self.b *= 1.0 - self.forgetting_rate
self.steps_between_updates = 0
if terminal:
self.episodeTerminated()
def _gradient_sane(self, w):
"""
checks the natural gradient estimate w for sanity
"""
if hasattr(self, "last_w"):
cos = (
np.dot(w, self.last_w) / np.linalg.norm(w) / np.linalg.norm(self.last_w)
)
return cos < self.min_cos
else:
return False | /rlpy3-2.0.0a0-cp36-cp36m-win_amd64.whl/rlpy/Agents/NaturalActorCritic.py | 0.781205 | 0.522568 | NaturalActorCritic.py | pypi |
from .LSPI import LSPI
from .TDControlAgent import SARSA
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = [
"Alborz Geramifard",
"Robert H. Klein",
"Christoph Dann",
"William Dabney",
"Jonathan P. How",
]
__license__ = "BSD 3-Clause"
__author__ = "Alborz Geramifard"
# EXPERIMENTAL
class LSPI_SARSA(SARSA):
"""This agent uses SARSA for online learning and calls LSPI on sample_window"""
def __init__(
self,
policy,
representation,
discount_factor,
lspi_iterations=5,
steps_between_LSPI=100,
sample_window=100,
tol_epsilon=1e-3,
re_iterations=100,
initial_learn_rate=0.1,
lambda_=0,
learn_rate_decay_mode="dabney",
boyan_N0=1000,
):
super(LSPI_SARSA, self).__init__(
policy,
representation,
discount_factor=discount_factor,
lambda_=lambda_,
initial_learn_rate=initial_learn_rate,
learn_rate_decay_mode=learn_rate_decay_mode,
boyan_N0=boyan_N0,
)
self.LSPI = LSPI(
policy,
representation,
discount_factor,
sample_window,
steps_between_LSPI,
lspi_iterations,
tol_epsilon,
re_iterations,
)
def learn(self, s, p_actions, a, r, ns, np_actions, na, terminal):
"""Iterative learning method for the agent.
Args:
s (ndarray): The current state features
p_actions (ndarray): The actions available in state s
a (int): The action taken by the agent in state s
r (float): The reward received by the agent for taking action a in state s
ns (ndarray): The next state features
np_actions (ndarray): The actions available in state ns
na (int): The action taken by the agent in state ns
terminal (bool): Whether or not ns is a terminal state
"""
self.LSPI.process(s, a, r, ns, na, terminal)
if self.LSPI.samples_count + 1 % self.LSPI.steps_between_LSPI == 0:
self.LSPI.representationExpansionLSPI()
if terminal:
self.episodeTerminated()
else:
super(LSPI_SARSA, self).learn(
s, p_actions, a, r, ns, np_actions, na, terminal
) | /rlpy3-2.0.0a0-cp36-cp36m-win_amd64.whl/rlpy/Agents/LSPI_SARSA.py | 0.649023 | 0.213254 | LSPI_SARSA.py | pypi |
from .Agent import Agent
import numpy as np
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = [
"Alborz Geramifard",
"Robert H. Klein",
"Christoph Dann",
"William Dabney",
"Jonathan P. How",
]
__license__ = "BSD 3-Clause"
__author__ = "Alborz Geramifard"
class BatchAgent(Agent):
"""An abstract class for batch agents
"""
max_window = 0
samples_count = 0 # Number of samples gathered so far
def __init__(self, policy, representation, discount_factor, max_window):
super(BatchAgent, self).__init__(
policy, representation, discount_factor=discount_factor
)
self.max_window = max_window
self.samples_count = 0
# Take memory for stored values
self.data_s = np.zeros((max_window, self.representation.state_space_dims))
self.data_ns = np.zeros((max_window, self.representation.state_space_dims))
self.data_a = np.zeros((max_window, 1), dtype=np.uint32)
self.data_na = np.zeros((max_window, 1), dtype=np.uint32)
self.data_r = np.zeros((max_window, 1))
def learn(self, s, p_actions, a, r, ns, np_actions, na, terminal):
"""Iterative learning method for the agent.
:param ndarray s: The current state features.
:param ndarray p_actions: The actions available in state s.
:param int a: The action taken by the agent in state s.
:param float r: The reward received by the agent for taking action a in state s.
:param ndarray ns: The next state features.
:param ndarray np_actions: The actions available in state ns.
:param int na: The action taken by the agent in state ns.
:param bool terminal: Whether or not ns is a terminal state.
"""
self.store_samples(s, a, r, ns, na, terminal)
if terminal:
self.episodeTerminated()
if self.samples_count % self.max_window == 0:
self.batch_learn()
def batch_learn(self):
pass
def store_samples(self, s, a, r, ns, na, terminal):
"""Process one transition instance."""
# Save samples
self.data_s[self.samples_count, :] = s
self.data_a[self.samples_count] = a
self.data_r[self.samples_count] = r
self.data_ns[self.samples_count, :] = ns
self.data_na[self.samples_count] = na
self.samples_count += 1 | /rlpy3-2.0.0a0-cp36-cp36m-win_amd64.whl/rlpy/Agents/BatchAgent.py | 0.779028 | 0.297095 | BatchAgent.py | pypi |
from .Agent import Agent, DescentAlgorithm
from rlpy.Tools import addNewElementForAllActions, count_nonzero
import numpy as np
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = [
"Alborz Geramifard",
"Robert H. Klein",
"Christoph Dann",
"William Dabney",
"Jonathan P. How",
]
__license__ = "BSD 3-Clause"
class TDControlAgent(DescentAlgorithm, Agent):
"""
abstract class for the control variants of the classical linear TD-Learning.
It is the parent of SARSA and Q-Learning
All children must implement the _future_action function.
"""
lambda_ = 0 #: lambda Parameter in SARSA [Sutton Book 1998]
eligibility_trace = [] #: eligibility trace
def __init__(self, policy, representation, discount_factor, lambda_=0, **kwargs):
self.eligibility_trace = np.zeros(
representation.features_num * representation.actions_num
)
self.lambda_ = lambda_
super(TDControlAgent, self).__init__(
policy=policy,
representation=representation,
discount_factor=discount_factor,
**kwargs
)
def _future_action(self, ns, terminal, np_actions, ns_phi, na):
"""needs to be implemented by children"""
pass
def learn(self, s, p_actions, a, r, ns, np_actions, na, terminal):
# The previous state could never be terminal
# (otherwise the episode would have already terminated)
prevStateTerminal = False
self.representation.pre_discover(s, prevStateTerminal, a, ns, terminal)
discount_factor = self.discount_factor
weight_vec = self.representation.weight_vec
phi_s = self.representation.phi(s, prevStateTerminal)
phi = self.representation.phi_sa(s, prevStateTerminal, a, phi_s)
phi_prime_s = self.representation.phi(ns, terminal)
na = self._future_action(
ns, terminal, np_actions, phi_prime_s, na
) # here comes the difference between SARSA and Q-Learning
phi_prime = self.representation.phi_sa(ns, terminal, na, phi_prime_s)
nnz = count_nonzero(phi_s) # Number of non-zero elements
# Set eligibility traces:
if self.lambda_:
expanded = (
len(phi) - len(self.eligibility_trace)
) // self.representation.actions_num
if expanded > 0:
# Correct the size of eligibility traces (pad with zeros for
# new features)
self.eligibility_trace = addNewElementForAllActions(
self.eligibility_trace,
self.representation.actions_num,
np.zeros((self.representation.actions_num, expanded)),
)
self.eligibility_trace *= discount_factor * self.lambda_
self.eligibility_trace += phi
# Set max to 1
self.eligibility_trace[self.eligibility_trace > 1] = 1
else:
self.eligibility_trace = phi
td_error = r + np.dot(discount_factor * phi_prime - phi, weight_vec)
if nnz > 0:
self.updateLearnRate(
phi, phi_prime, self.eligibility_trace, discount_factor, nnz, terminal
)
weight_vec_old = weight_vec.copy()
weight_vec += (
self.learn_rate
* self.representation.featureLearningRate()
* td_error
* self.eligibility_trace
)
if not np.all(np.isfinite(weight_vec)):
weight_vec = weight_vec_old
print("WARNING: TD-Learning diverged, weight_vec reached infinity!")
# Discover features if the representation has the discover method
expanded = self.representation.post_discover(
s, prevStateTerminal, a, td_error, phi_s
)
if terminal:
# If THIS state is terminal:
self.episodeTerminated()
class Q_Learning(TDControlAgent):
"""
The off-policy variant known as Q-Learning
"""
def _future_action(self, ns, terminal, np_actions, ns_phi, na):
"""Q Learning chooses the optimal action"""
return self.representation.bestAction(ns, terminal, np_actions, ns_phi)
class SARSA(TDControlAgent):
"""
The on-policy variant known as SARSA.
"""
def _future_action(self, ns, terminal, np_actions, ns_phi, na):
"""SARS-->A<--, so SARSA simply chooses the action the agent will follow"""
return na | /rlpy3-2.0.0a0-cp36-cp36m-win_amd64.whl/rlpy/Agents/TDControlAgent.py | 0.68784 | 0.365513 | TDControlAgent.py | pypi |
import numpy as np
import logging
from copy import deepcopy
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = [
"Alborz Geramifard",
"Robert H. Klein",
"Christoph Dann",
"William Dabney",
"Jonathan P. How",
]
__license__ = "BSD 3-Clause"
class Domain(object):
"""
The Domain controls the environment in which the
:py:class:`~rlpy.Agents.Agent.Agent` resides as well as the reward function the
Agent is subject to.
The Agent interacts with the Domain in discrete timesteps called
*episodes* (see :py:meth:`~rlpy.Domains.Domain.Domain.step`).
At each step, the Agent informs the Domain what indexed action it wants to
perform. The Domain then calculates the effects this action has on the
environment and updates its internal state accordingly.
It also returns the new state to the agent, along with a reward/penalty,
and whether or not the episode is over (thus resetting the agent to its
initial state).
This process repeats until the Domain determines that the Agent has either
completed its goal or failed.
The :py:class:`~rlpy.Experiments.Experiment.Experiment` controls this cycle.
Because Agents are designed to be agnostic to the Domain that they are
acting within and the problem they are trying to solve, the Domain needs
to completely describe everything related to the task. Therefore, the
Domain must not only define the observations that the Agent receives,
but also the states it can be in, the actions that it can perform, and the
relationships between the three.
The Domain class is a base clase that provides the basic framework for all
Domains. It provides the methods and attributes that allow child classes
to interact with the Agent and Experiment classes within the RLPy library.
Domains should also provide methods that provide visualization of the
Domain itself and of the Agent's learning
(:py:meth:`~rlpy.Domains.Domain.Domain.showDomain` and
:py:meth:`~rlpy.Domains.Domain.Domain.showLearning` respectively) \n
All new domain implementations should inherit from :py:class:`~rlpy.Domains.Domain.Domain`.
.. note::
Though the state *s* can take on almost any value, if a dimension is not
marked as 'continuous' then it is assumed to be integer.
"""
def __init__(
self,
actions_num,
statespace_limits,
discount_factor=0.9,
continuous_dims=None,
episodeCap=None,
random_state=None,
):
"""
:param actions_num: The number of Actions the agent can perform
:param discount_factor: The discount factor by which rewards are reduced
:param statespace_limits: Limits of each dimension of the state space.
Each row corresponds to one dimension and has two elements [min, max]
:param state_space_dims: Number of dimensions of the state space
:param continuous_dims: List of the continuous dimensions of the domain
:param episodeCap: The cap used to bound each episode (return to state 0 after)
:param random_state: A seeded numpy random number generator
"""
self.actions_num = actions_num
self.statespace_limits = statespace_limits
self.discount_factor = float(discount_factor)
if continuous_dims is None:
self.states_num = int(
np.prod(self.statespace_limits[:, 1] - self.statespace_limits[:, 0])
)
self.continuous_dims = []
else:
self.states_num = np.inf
self.continuous_dims = continuous_dims
self.episodeCap = episodeCap
if random_state is None:
self.random_state = np.random.RandomState()
else:
self.random_state = random_state
self.state_space_dims = self.statespace_limits.shape[0]
# For discrete domains, limits should be extended by half on each side so that
# the mapping becomes identical with continuous states.
# The original limits will be saved in self.discrete_statespace_limits.
self._extendDiscreteDimensions()
self.logger = logging.getLogger("rlpy.Domains." + self.__class__.__name__)
def init_randomization(self):
"""
Any stochastic behavior in __init__() is broken out into this function
so that if the random seed is later changed (eg, by the Experiment),
other member variables and functions are updated accordingly.
"""
pass
def __str__(self):
res = """{self.__class__}:
------------
Dimensions: {self.state_space_dims}
|S|: {self.states_num}
|A|: {self.actions_num}
Episode Cap:{self.episodeCap}
Gamma: {self.discount_factor}
""".format(
self=self
)
return res
def show(self, a=None, representation=None):
"""
Shows a visualization of the current state of the domain and that of
learning.
See :py:meth:`~rlpy.Domains.Domain.Domain.showDomain()` and
:py:meth:`~rlpy.Domains.Domain.Domain.showLearning()`,
both called by this method.
.. note::
Some domains override this function to allow an optional *s*
parameter to be passed, which overrides the *self.state* internal
to the domain; however, not all have this capability.
:param a: The action being performed
:param representation: The learned value function
:py:class:`~rlpy.Representation.Representation.Representation`.
"""
self.saveRandomState()
self.showDomain(a=a)
self.showLearning(representation=representation)
self.loadRandomState()
def showDomain(self, a=0):
"""
*Abstract Method:*\n
Shows a visualization of the current state of the domain.
:param a: The action being performed.
"""
pass
def showLearning(self, representation):
"""
*Abstract Method:*\n
Shows a visualization of the current learning,
usually in the form of a gridded value function and policy.
It is thus really only possible for 1 or 2-state domains.
:param representation: the learned value function
:py:class:`~rlpy.Representation.Representation.Representation`
to generate the value function / policy plots.
"""
pass
def s0(self):
"""
Begins a new episode and returns the initial observed state of the Domain.
Sets self.state accordingly.
:return: A numpy array that defines the initial domain state.
"""
raise NotImplementedError("Children need to implement this method")
def possibleActions(self, s=None):
"""
The default version returns an enumeration of all actions [0, 1, 2...].
We suggest overriding this method in your domain, especially if not all
actions are available from all states.
:param s: The state to query for possible actions
(overrides self.state if ``s != None``)
:return: A numpy array containing every possible action in the domain.
.. note::
*These actions must be integers*; internally they may be handled
using other datatypes. See :py:meth:`~rlpy.Tools.GeneralTools.vec2id`
and :py:meth:`~rlpy.Tools.GeneralTools.id2vec` for converting between
integers and multidimensional quantities.
"""
return np.arange(self.actions_num)
# TODO: change 'a' to be 'aID' to make it clearer when we refer to
# actions vs. integer IDs of actions? They aren't always interchangeable.
def step(self, a):
"""
*Abstract Method:*\n
Performs the action *a* and updates the Domain
state accordingly.
Returns the reward/penalty the agent obtains for
the state/action pair determined by *Domain.state* and the parameter
*a*, the next state into which the agent has transitioned, and a
boolean determining whether a goal or fail state has been reached.
.. note::
Domains often specify stochastic internal state transitions, such
that the result of a (state,action) pair might vary on different
calls (see also the :py:meth:`~rlpy.Domains.Domain.Domain.sampleStep`
method).
Be sure to look at unique noise parameters of each domain if you
require deterministic transitions.
:param a: The action to perform.
.. warning::
The action *a* **must** be an integer >= 0, and might better be
called the "actionID". See the class description
:py:class:`~rlpy.Domains.Domain.Domain` above.
:return: The tuple (r, ns, t, p_actions) =
(Reward [value], next observed state, isTerminal [boolean])
"""
raise NotImplementedError("Each domain needs to implement this method")
def saveRandomState(self):
"""
Stores the state of the the random generator.
Using loadRandomState this state can be loaded.
"""
self.random_state_backup = self.random_state.get_state()
def loadRandomState(self):
"""
Loads the random state stored in the self.random_state_backup
"""
self.random_state.set_state(self.random_state_backup)
def isTerminal(self):
"""
Returns ``True`` if the current Domain.state is a terminal one, ie,
one that ends the episode. This often results from either a failure
or goal state being achieved.\n
The default definition does not terminate.
:return: ``True`` if the state is a terminal state, ``False`` otherwise.
"""
return False
def _extendDiscreteDimensions(self):
"""
Offsets discrete dimensions by 0.5 so that binning works properly.
.. warning::
This code is used internally by the Domain base class.
**It should only be called once**
"""
# Store the original limits for other types of calculations
self.discrete_statespace_limits = self.statespace_limits
self.statespace_limits = self.statespace_limits.astype("float")
for d in range(self.state_space_dims):
if d not in self.continuous_dims:
self.statespace_limits[d, 0] += -0.5
self.statespace_limits[d, 1] += +0.5
def sampleStep(self, a, num_samples):
"""
Sample a set number of next states and rewards from the domain.
This function is used when state transitions are stochastic;
deterministic transitions will yield an identical result regardless
of *num_samples*, since repeatedly sampling a (state,action) pair
will always yield the same tuple (r,ns,terminal).
See :py:meth:`~rlpy.Domains.Domain.Domain.step`.
:param a: The action to attempt
:param num_samples: The number of next states and rewards to be sampled.
:return: A tuple of arrays ( S[], A[] ) where
*S* is an array of next states,
*A* is an array of rewards for those states.
"""
next_states = []
rewards = []
s = self.state.copy()
for i in range(num_samples):
r, ns, terminal = self.step(a)
self.state = s.copy()
next_states.append(ns)
rewards.append(r)
return np.array(next_states), np.array(rewards)
def __copy__(self):
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in list(self.__dict__.items()):
if k is "logger":
continue
# This block bandles matplotlib transformNode objects,
# which cannot be coped
try:
setattr(result, k, deepcopy(v, memo))
except:
# Try this: if this doesnt work, just let theat error get thrown
try:
setattr(result, k, v.frozen())
except:
self.logger.warning(
"Could not copy attribute " + k + " when duplicating domain."
)
return result | /rlpy3-2.0.0a0-cp36-cp36m-win_amd64.whl/rlpy/Domains/Domain.py | 0.8398 | 0.627951 | Domain.py | pypi |
from .Domain import Domain
import numpy as np
from itertools import tee
import itertools
import os
try:
from tkinter import Tk, Canvas
except ImportError:
import warnings
warnings.warn("TkInter is not found for Pinball.")
from rlpy.Tools import __rlpy_location__
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = [
"Alborz Geramifard",
"Robert H. Klein",
"Christoph Dann",
"William Dabney",
"Jonathan P. How",
]
__license__ = "BSD 3-Clause"
__author__ = [
"Pierre-Luc Bacon", # author of the original version
"Austin Hays",
] # adapted for RLPy and TKinter
class Pinball(Domain):
"""
The goal of this domain is to maneuver a small ball on a plate into a hole.
The plate may contain obstacles which should be avoided.
**STATE:**
The state is given by a 4-dimensional vector, consisting of position and
velocity of the ball.
**ACTIONS:**
There are 5 actions, standing for slanting the plat in x or y direction
or a horizontal position
of the plate.
**REWARD:**
Slanting the plate costs -4 reward in addition to -1 reward for each timestep.
When the ball reaches the hole, the agent receives 10000 units of reward.
**REFERENCE:**
.. seealso::
G.D. Konidaris and A.G. Barto:
*Skill Discovery in Continuous Reinforcement Learning Domains using Skill Chaining.*
Advances in Neural Information Processing Systems 22, pages 1015-1023, December 2009.
"""
#: default location of config files shipped with rlpy
default_config_dir = os.path.join(__rlpy_location__, "Domains", "PinballConfigs")
def __init__(
self,
noise=0.1,
episodeCap=1000,
configuration=os.path.join(default_config_dir, "pinball_simple_single.cfg"),
):
"""
configuration:
location of the configuration file
episodeCap:
maximum length of an episode
noise:
with probability noise, a uniformly random action is executed
"""
self.NOISE = noise
self.configuration = configuration
self.screen = None
self.actions = [
PinballModel.ACC_X,
PinballModel.DEC_Y,
PinballModel.DEC_X,
PinballModel.ACC_Y,
PinballModel.ACC_NONE,
]
super().__init__(
actions_num=len(self.actions),
statespace_limits=np.array(
[[0.0, 1.0], [0.0, 1.0], [-2.0, 2.0], [-2.0, 2.0]]
),
continuous_dims=[4],
episodeCap=episodeCap,
)
self.environment = PinballModel(
self.configuration, random_state=self.random_state
)
def showDomain(self, a):
if self.screen is None:
master = Tk()
master.title("RLPY Pinball")
self.screen = Canvas(master, width=500.0, height=500.0)
self.screen.configure(background="LightGray")
self.screen.pack()
self.environment_view = PinballView(
self.screen, 500.0, 500.0, self.environment
)
self.environment_view.blit()
self.screen.pack()
self.screen.update()
def step(self, a):
s = self.state
[
self.environment.ball.position[0],
self.environment.ball.position[1],
self.environment.ball.xdot,
self.environment.ball.ydot,
] = s
if self.random_state.random_sample() < self.NOISE:
# Random Move
a = self.random_state.choice(self.possibleActions())
reward = self.environment.take_action(a)
self.environment._check_bounds()
state = np.array(self.environment.get_state())
self.state = state.copy()
return reward, state, self.isTerminal(), self.possibleActions()
def s0(self):
self.environment.ball.position[0], self.environment.ball.position[
1
] = self.environment.start_pos
self.environment.ball.xdot, self.environment.ball.ydot = 0.0, 0.0
self.state = np.array(
[
self.environment.ball.position[0],
self.environment.ball.position[1],
self.environment.ball.xdot,
self.environment.ball.ydot,
]
)
return self.state, self.isTerminal(), self.possibleActions()
def possibleActions(self, s=0):
return np.array(self.actions)
def isTerminal(self):
return self.environment.episode_ended()
class BallModel(object):
""" This class maintains the state of the ball
in the pinball domain. It takes care of moving
it according to the current velocity and drag coefficient.
"""
DRAG = 0.995
def __init__(self, start_position, radius):
"""
:param start_position: The initial position
:type start_position: float
:param radius: The ball radius
:type radius: float
"""
self.position = start_position
self.radius = radius
self.xdot = 0.0
self.ydot = 0.0
def add_impulse(self, delta_xdot, delta_ydot):
""" Change the momentum of the ball
:param delta_xdot: The change in velocity in the x direction
:type delta_xdot: float
:param delta_ydot: The change in velocity in the y direction
:type delta_ydot: float
"""
self.xdot += delta_xdot / 5
self.ydot += delta_ydot / 5
self.xdot = self._clip(self.xdot)
self.ydot = self._clip(self.ydot)
def add_drag(self):
""" Add a fixed amount of drag to the current velocity """
self.xdot *= self.DRAG
self.ydot *= self.DRAG
def step(self):
""" Move the ball by one increment """
self.position[0] += self.xdot * self.radius / 20.0
self.position[1] += self.ydot * self.radius / 20.0
def _clip(self, val, low=-2, high=2):
""" Clip a value in a given range """
if val > high:
val = high
if val < low:
val = low
return val
class PinballObstacle(object):
""" This class represents a single polygon obstacle in the
pinball domain and detects when a :class:`BallModel` hits it.
When a collision is detected, it also provides a way to
compute the appropriate effect to apply on the ball.
"""
def __init__(self, points):
"""
:param points: A list of points defining the polygon
:type points: list of lists
"""
self.points = points
self.min_x = min(self.points, key=lambda pt: pt[0])[0]
self.max_x = max(self.points, key=lambda pt: pt[0])[0]
self.min_y = min(self.points, key=lambda pt: pt[1])[1]
self.max_y = max(self.points, key=lambda pt: pt[1])[1]
self._double_collision = False
self._intercept = None
def collision(self, ball):
""" Determines if the ball hits this obstacle
:param ball: An instance of :class:`BallModel`
:type ball: :class:`BallModel`
"""
self._double_collision = False
if ball.position[0] - ball.radius > self.max_x:
return False
if ball.position[0] + ball.radius < self.min_x:
return False
if ball.position[1] - ball.radius > self.max_y:
return False
if ball.position[1] + ball.radius < self.min_y:
return False
a, b = tee(np.vstack([np.array(self.points), self.points[0]]))
next(b, None)
intercept_found = False
for pt_pair in zip(a, b):
if self._intercept_edge(pt_pair, ball):
if intercept_found:
# Ball has hit a corner
self._intercept = self._select_edge(pt_pair, self._intercept, ball)
self._double_collision = True
else:
self._intercept = pt_pair
intercept_found = True
return intercept_found
def collision_effect(self, ball):
""" Based of the collision detection result triggered
in :func:`PinballObstacle.collision`, compute the
change in velocity.
:param ball: An instance of :class:`BallModel`
:type ball: :class:`BallModel`
"""
if self._double_collision:
return [-ball.xdot, -ball.ydot]
# Normalize direction
obstacle_vector = self._intercept[1] - self._intercept[0]
if obstacle_vector[0] < 0:
obstacle_vector = self._intercept[0] - self._intercept[1]
velocity_vector = np.array([ball.xdot, ball.ydot])
theta = self._angle(velocity_vector, obstacle_vector) - np.pi
if theta < 0:
theta += 2 * np.pi
intercept_theta = self._angle([-1, 0], obstacle_vector)
theta += intercept_theta
if theta > 2 * np.pi:
theta -= 2 * np.pi
velocity = np.linalg.norm([ball.xdot, ball.ydot])
return [velocity * np.cos(theta), velocity * np.sin(theta)]
def _select_edge(self, intersect1, intersect2, ball):
""" If the ball hits a corner, select one of two edges.
:param intersect1: A pair of points defining an edge of the polygon
:type intersect1: list of lists
:param intersect2: A pair of points defining an edge of the polygon
:type intersect2: list of lists
:returns: The edge with the smallest angle with the velocity vector
:rtype: list of lists
"""
velocity = np.array([ball.xdot, ball.ydot])
obstacle_vector1 = intersect1[1] - intersect1[0]
obstacle_vector2 = intersect2[1] - intersect2[0]
angle1 = self._angle(velocity, obstacle_vector1)
if angle1 > np.pi:
angle1 -= np.pi
angle2 = self._angle(velocity, obstacle_vector2)
if angle1 > np.pi:
angle2 -= np.pi
if np.abs(angle1 - np.pi / 2) < np.abs(angle2 - np.pi / 2):
return intersect1
return intersect2
def _angle(self, v1, v2):
""" Compute the angle difference between two vectors
:param v1: The x,y coordinates of the vector
:type: v1: list
:param v2: The x,y coordinates of the vector
:type: v2: list
:rtype: float
"""
angle_diff = np.arctan2(v1[0], v1[1]) - np.arctan2(v2[0], v2[1])
if angle_diff < 0:
angle_diff += 2 * np.pi
return angle_diff
def _intercept_edge(self, pt_pair, ball):
""" Compute the projection on and edge and find out
if it intercept with the ball.
:param pt_pair: The pair of points defining an edge
:type pt_pair: list of lists
:param ball: An instance of :class:`BallModel`
:type ball: :class:`BallModel`
:returns: True if the ball has hit an edge of the polygon
:rtype: bool
"""
# Find the projection on an edge
obstacle_edge = pt_pair[1] - pt_pair[0]
difference = np.array(ball.position) - pt_pair[0]
scalar_proj = difference.dot(obstacle_edge) / obstacle_edge.dot(obstacle_edge)
if scalar_proj > 1.0:
scalar_proj = 1.0
elif scalar_proj < 0.0:
scalar_proj = 0.0
# Compute the distance to the closest point
closest_pt = pt_pair[0] + obstacle_edge * scalar_proj
obstacle_to_ball = ball.position - closest_pt
distance = obstacle_to_ball.dot(obstacle_to_ball)
if distance <= ball.radius * ball.radius:
# A collision only if the ball is not already moving away
velocity = np.array([ball.xdot, ball.ydot])
ball_to_obstacle = closest_pt - ball.position
angle = self._angle(ball_to_obstacle, velocity)
if angle > np.pi:
angle = 2 * np.pi - angle
if angle > np.pi / 1.99:
return False
return True
else:
return False
class PinballModel(object):
""" This class is a self-contained model of the pinball
domain for reinforcement learning.
It can be used either over RL-Glue through the :class:`PinballRLGlue`
adapter or interactively with :class:`PinballView`.
"""
ACC_X = 0
ACC_Y = 1
DEC_X = 2
DEC_Y = 3
ACC_NONE = 4
STEP_PENALTY = -1
THRUST_PENALTY = -5
END_EPISODE = 10000
def __init__(self, configuration, random_state=np.random.RandomState()):
""" Read a configuration file for Pinball and draw the domain to screen
:param configuration: a configuration file containing the polygons,
source(s) and target location.
:type configuration: str
"""
self.random_state = random_state
self.action_effects = {
self.ACC_X: (1, 0),
self.ACC_Y: (0, 1),
self.DEC_X: (-1, 0),
self.DEC_Y: (0, -1),
self.ACC_NONE: (0, 0),
}
# Set up the environment according to the configuration
self.obstacles = []
self.target_pos = []
self.target_rad = 0.01
ball_rad = 0.01
start_pos = []
with open(configuration) as fp:
for line in fp.readlines():
tokens = line.strip().split()
if not len(tokens):
continue
elif tokens[0] == "polygon":
self.obstacles.append(
PinballObstacle(list(zip(*[iter(map(float, tokens[1:]))] * 2)))
)
elif tokens[0] == "target":
self.target_pos = [float(tokens[1]), float(tokens[2])]
self.target_rad = float(tokens[3])
elif tokens[0] == "start":
start_pos = list(zip(*[iter(map(float, tokens[1:]))] * 2))
elif tokens[0] == "ball":
ball_rad = float(tokens[1])
self.start_pos = start_pos[0]
a = self.random_state.randint(len(start_pos))
self.ball = BallModel(list(start_pos[a]), ball_rad)
def get_state(self):
""" Access the current 4-dimensional state vector
:returns: a list containing the x position, y position, xdot, ydot
:rtype: list
"""
return [
self.ball.position[0],
self.ball.position[1],
self.ball.xdot,
self.ball.ydot,
]
def take_action(self, action):
""" Take a step in the environment
:param action: The action to apply over the ball
:type action: int
"""
for i in range(20):
if i == 0:
self.ball.add_impulse(*self.action_effects[action])
self.ball.step()
# Detect collisions
ncollision = 0
dxdy = np.array([0, 0])
for obs in self.obstacles:
if obs.collision(self.ball):
dxdy = dxdy + obs.collision_effect(self.ball)
ncollision += 1
if ncollision == 1:
self.ball.xdot = dxdy[0]
self.ball.ydot = dxdy[1]
if i == 19:
self.ball.step()
elif ncollision > 1:
self.ball.xdot = -self.ball.xdot
self.ball.ydot = -self.ball.ydot
if self.episode_ended():
return self.END_EPISODE
self.ball.add_drag()
self._check_bounds()
if action == self.ACC_NONE:
return self.STEP_PENALTY
return self.THRUST_PENALTY
def episode_ended(self):
""" Find out if the ball reached the target
:returns: True if the ball reached the target position
:rtype: bool
"""
return (
np.linalg.norm(np.array(self.ball.position) - np.array(self.target_pos))
< self.target_rad
)
def _check_bounds(self):
""" Make sure that the ball stays within the environment """
if self.ball.position[0] > 1.0:
self.ball.position[0] = 0.95
if self.ball.position[0] < 0.0:
self.ball.position[0] = 0.05
if self.ball.position[1] > 1.0:
self.ball.position[1] = 0.95
if self.ball.position[1] < 0.0:
self.ball.position[1] = 0.05
class PinballView(object):
""" This class displays a :class:`PinballModel`
This class is used in conjunction with the :func:`run_pinballview`
function, acting as a *controller*.
"""
def __init__(self, screen, width, height, model):
"""
Changed from original PyGame implementation to work
with Tkinter visualization.
"""
self.screen = screen
self.width = 500.0
self.height = 500.0
self.model = model
self.x, self.y = self._to_pixels(self.model.ball.position)
self.rad = int(self.model.ball.radius * self.width)
self.DARK_GRAY = [64, 64, 64]
self.DARK_BLUE = [0, 0, 128]
self.LIGHT_GRAY = [232, 232, 232]
self.BALL_COLOR = [0, 0, 255]
self.TARGET_COLOR = [255, 0, 0]
for obs in model.obstacles:
coords_list = list(map(self._to_pixels, obs.points))
chain = itertools.chain(*coords_list)
coords = list(chain)
self.screen.create_polygon(coords, fill="blue")
self.screen.pack()
self.target_x, self.target_y = self._to_pixels(self.model.target_pos)
self.target_rad = int(self.model.target_rad * self.width)
_ = self.drawcircle(
self.screen, self.target_x, self.target_y, self.target_rad, "red"
)
self.ball_id = self.drawcircle(self.screen, self.x, self.y, self.rad, "black")
self.screen.pack()
def drawcircle(self, canv, x, y, rad, color):
return canv.create_oval(x - rad, y - rad, x + rad, y + rad, width=0, fill=color)
def _to_pixels(self, pt):
""" Converts from real units in the 0-1 range to pixel units
:param pt: a point in real units
:type pt: list
:returns: the input point in pixel units
:rtype: list
"""
return [int(pt[0] * self.width), int(pt[1] * self.height)]
def blit(self):
""" Blit the ball onto the background surface """
self.screen.coords(
self.ball_id,
self.x - self.rad,
self.y - self.rad,
self.x + self.rad,
self.y + self.rad,
)
self.x, self.y = self._to_pixels(self.model.ball.position)
self.screen.pack()
def run_pinballview(width, height, configuration):
"""
Changed from original Pierre-Luc Bacon implementation to reflect
the visualization changes in the PinballView Class.
"""
width, height = float(width), float(height)
master = Tk()
master.title("RLPY Pinball")
screen = Canvas(master, width=500.0, height=500.0)
screen.configure(background="LightGray")
screen.pack()
environment = PinballModel(configuration)
environment_view = PinballView(screen, width, height, environment)
actions = [
PinballModel.ACC_X,
PinballModel.DEC_Y,
PinballModel.DEC_X,
PinballModel.ACC_Y,
PinballModel.ACC_NONE,
]
done = False
while not done:
user_action = np.random.choice(actions)
environment_view.blit()
if environment.episode_ended():
done = True
if environment.take_action(user_action) == environment.END_EPISODE:
done = True
environment_view.blit()
screen.update() | /rlpy3-2.0.0a0-cp36-cp36m-win_amd64.whl/rlpy/Domains/Pinball.py | 0.784979 | 0.359027 | Pinball.py | pypi |
import numpy as np
import itertools
from rlpy.Tools import plt, FONTSIZE, linearMap
from rlpy.Tools import __rlpy_location__, findElemArray1D, perms
import os
from .Domain import Domain
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = [
"Alborz Geramifard",
"Robert H. Klein",
"Christoph Dann",
"William Dabney",
"Jonathan P. How",
]
__license__ = "BSD 3-Clause"
__author__ = "Alborz Geramifard"
class GridWorld(Domain):
"""
The GridWorld domain simulates a path-planning problem for a mobile robot
in an environment with obstacles. The goal of the agent is to
navigate from the starting point to the goal state.
The map is loaded from a text file filled with numbers showing the map with the following
coding for each cell:
* 0: empty
* 1: blocked
* 2: start
* 3: goal
* 4: pit
**STATE:**
The Row and Column corresponding to the agent's location. \n
**ACTIONS:**
Four cardinal directions: up, down, left, right (given that
the destination is not blocked or out of range). \n
**TRANSITION:**
There is 30% probability of failure for each move, in which case the action
is replaced with a random action at each timestep. Otherwise the move succeeds
and the agent moves in the intended direction. \n
**REWARD:**
The reward on each step is -.001 , except for actions
that bring the agent to the goal with reward of +1.\n
"""
#: Reward constants
GOAL_REWARD = +1
PIT_REWARD = -1
STEP_REWARD = -0.001
# Used for graphical normalization
MAX_RETURN = 1
RMAX = MAX_RETURN
# Used for graphical normalization
MIN_RETURN = -1
# Used for graphical shifting of arrows
SHIFT = 0.1
# Constants in the map
EMPTY, BLOCKED, START, GOAL, PIT, AGENT = range(6)
#: Up, Down, Left, Right
ACTIONS = np.array([[-1, 0], [+1, 0], [0, -1], [0, +1]])
# directory of maps shipped with rlpy
DEFAULT_MAP_DIR = os.path.join(__rlpy_location__, "Domains", "GridWorldMaps")
# Keys to access arrow figures
ARROW_NAMES = ["UP", "DOWN", "LEFT", "RIGHT"]
@classmethod
def default_map(cls, name="4x5.txt"):
return os.path.join(cls.DEFAULT_MAP_DIR, name)
def _load_map(self, mapfile):
self.map = np.loadtxt(mapfile, dtype=np.uint8)
if self.map.ndim == 1:
self.map = self.map[np.newaxis, :]
def __init__(
self,
mapfile=os.path.join(DEFAULT_MAP_DIR, "4x5.txt"),
noise=0.1,
random_start=False,
episodeCap=1000,
):
self._load_map(mapfile)
self.random_start = random_start
#: Number of rows and columns of the map
self.rows, self.cols = np.shape(self.map)
super().__init__(
actions_num=4,
statespace_limits=np.array([[0, self.rows - 1], [0, self.cols - 1]]),
# 2*W*H, small values can cause problem for some planning techniques
episodeCap=episodeCap,
)
#: Movement noise
self.noise = noise
self.DimNames = ["Row", "Col"]
self.state = self._sample_start()
# map name for showing
mapfname = os.path.basename(mapfile)
dot_pos = mapfname.find(".")
if dot_pos == -1:
self.mapname = mapfname
else:
self.mapname = mapfname[:dot_pos]
# Used for graphics to show the domain
self.domain_fig, self.domain_ax, self.agent_fig = None, None, None
self.vf_fig, self.vf_ax, self.vf_img = None, None, None
self.arrow_figs = {}
def _sample_start(self):
starts = np.argwhere(self.map == self.START)
if self.random_start:
idx = self.random_state.randint(len(starts))
else:
idx = 0
self.start_state = starts[idx]
return self.start_state.copy()
def _show_map(self):
cmap = plt.get_cmap("GridWorld")
self.domain_ax.imshow(
self.map, cmap=cmap, interpolation="nearest", vmin=0, vmax=5
)
self.domain_ax.plot([0.0], [0.0], color=cmap(1), label="Block")
self.domain_ax.plot([0.0], [0.0], color=cmap(2), label="Start")
self.domain_ax.plot([0.0], [0.0], color=cmap(3), label="Goal")
self.domain_ax.plot([0.0], [0.0], color=cmap(4), label="Pit")
self.domain_ax.legend(fontsize=12, loc="upper right", bbox_to_anchor=(1.2, 1.1))
def _set_ticks(self, ax):
ax.get_xaxis().set_ticks_position("top")
plt.xticks(np.arange(self.cols), fontsize=FONTSIZE)
plt.yticks(np.arange(self.rows), fontsize=FONTSIZE)
def showDomain(self, a=0, s=None):
if s is None:
s = self.state
# Draw the environment
if self.domain_fig is None:
self.domain_fig = plt.figure("GridWorld: {}".format(self.mapname))
ratio = self.rows / self.cols
self.domain_ax = self.domain_fig.add_axes((0.08, 0.04, 0.86 * ratio, 0.86))
self._show_map()
self._set_ticks(self.domain_ax)
self.agent_fig = self.domain_ax.plot(
s[1], s[0], "k>", markersize=20.0 - self.cols
)[0]
self.domain_fig.show()
self.agent_fig.remove()
self.agent_fig = self.domain_ax.plot(
s[1], s[0], "k>", markersize=20.0 - self.cols
)[0]
self.domain_fig.canvas.draw()
def _init_arrow(self, name, x, y):
arrow_ratio = 0.4
Max_Ratio_ArrowHead_to_ArrowLength = 0.25
ARROW_WIDTH = 0.5 * Max_Ratio_ArrowHead_to_ArrowLength / 5.0
is_y = name in ["UP", "DOWN"]
c = np.zeros(x.shape)
c[0, 0] = 1
self.arrow_figs[name] = self.vf_ax.quiver(
y,
x,
np.ones(x.shape),
np.ones(x.shape),
c,
units="y" if is_y else "x",
cmap="Actions",
scale_units="height" if is_y else "width",
scale=(self.rows if is_y else self.cols) / arrow_ratio,
width=-ARROW_WIDTH if is_y else ARROW_WIDTH,
)
self.arrow_figs[name].set_clim(vmin=0, vmax=1)
def showLearning(self, representation):
if self.vf_ax is None:
self.vf_fig = plt.figure("Value Function")
self.vf_ax = self.vf_fig.add_subplot(1, 1, 1)
cmap = plt.get_cmap("ValueFunction-New")
self.vf_img = self.vf_ax.imshow(
self.map,
cmap=cmap,
interpolation="nearest",
vmin=self.MIN_RETURN,
vmax=self.MAX_RETURN,
)
self.vf_ax.legend(fontsize=12, bbox_to_anchor=(1.3, 1.05))
self._set_ticks(self.vf_ax)
# Create quivers for each action. 4 in total
xshift = [-self.SHIFT, self.SHIFT, 0, 0]
yshift = [0, 0, -self.SHIFT, self.SHIFT]
for name, xshift, yshift in zip(self.ARROW_NAMES, xshift, yshift):
x = np.arange(self.rows) + xshift
y = np.arange(self.cols) + yshift
self._init_arrow(name, *np.meshgrid(x, y))
self.vf_fig.show()
V = np.zeros((self.rows, self.cols))
# Boolean 3 dimensional array. The third array highlights the action.
# Thie mask is used to see in which cells what actions should exist
Mask = np.ones((self.cols, self.rows, self.actions_num), dtype="bool")
arrowSize = np.zeros((self.cols, self.rows, self.actions_num), dtype="float")
# 0 = suboptimal action, 1 = optimal action
arrowColors = np.zeros((self.cols, self.rows, self.actions_num), dtype="uint8")
for r, c in itertools.product(range(self.rows), range(self.cols)):
if self.map[r, c] == self.BLOCKED:
V[r, c] = 0
elif self.map[r, c] == self.GOAL:
V[r, c] = self.MAX_RETURN
elif self.map[r, c] == self.PIT:
V[r, c] = self.MIN_RETURN
elif self.map[r, c] == self.EMPTY or self.map[r, c] == self.START:
s = np.array([r, c])
As = self.possibleActions(s)
terminal = self.isTerminal(s)
Qs = representation.Qs(s, terminal)
bestA = representation.bestActions(s, terminal, As)
V[r, c] = max(Qs[As])
Mask[c, r, As] = False
arrowColors[c, r, bestA] = 1
for a, Q in zip(As, Qs):
value = linearMap(Q, self.MIN_RETURN, self.MAX_RETURN, 0, 1)
arrowSize[c, r, a] = value
# Show Value Function
self.vf_img.set_data(V)
# Show Policy for arrows
for i, name in enumerate(self.ARROW_NAMES):
flip = -1 if name in ["DOWN", "LEFT"] else 1
if name in ["UP", "DOWN"]:
dx, dy = flip * arrowSize[:, :, i], np.zeros((self.rows, self.cols))
else:
dx, dy = np.zeros((self.rows, self.cols)), flip * arrowSize[:, :, i]
dx = np.ma.masked_array(dx, mask=Mask[:, :, i])
dy = np.ma.masked_array(dy, mask=Mask[:, :, i])
c = np.ma.masked_array(arrowColors[:, :, i], mask=Mask[:, :, i])
self.arrow_figs[name].set_UVC(dy, dx, c)
self.vf_fig.canvas.draw()
def _reward(self, next_state, _terminal):
if self.map[next_state[0], next_state[1]] == self.GOAL:
return self.GOAL_REWARD
elif self.map[next_state[0], next_state[1]] == self.PIT:
return self.PIT_REWARD
else:
return self.STEP_REWARD
def step(self, a):
ns = self.state.copy()
if self.random_state.random_sample() < self.noise:
# Random Move
a = self.random_state.choice(self.possibleActions())
# Take action
ns = self.state + self.ACTIONS[a]
# Check bounds on state values
if (
ns[0] < 0
or ns[0] == self.rows
or ns[1] < 0
or ns[1] == self.cols
or self.map[ns[0], ns[1]] == self.BLOCKED
):
ns = self.state.copy()
else:
# If in bounds, update the current state
self.state = ns.copy()
terminal = self.isTerminal()
reward = self._reward(ns, terminal)
return reward, ns, terminal, self.possibleActions()
def s0(self):
self.state = self._sample_start()
return self.state, self.isTerminal(), self.possibleActions()
def isTerminal(self, s=None):
if s is None:
s = self.state
if self.map[int(s[0]), int(s[1])] == self.GOAL:
return True
if self.map[int(s[0]), int(s[1])] == self.PIT:
return True
return False
def possibleActions(self, s=None):
if s is None:
s = self.state
possibleA = np.array([], np.uint8)
for a in range(self.actions_num):
ns = s + self.ACTIONS[a]
if (
ns[0] < 0
or ns[0] == self.rows
or ns[1] < 0
or ns[1] == self.cols
or self.map[int(ns[0]), int(ns[1])] == self.BLOCKED
):
continue
possibleA = np.append(possibleA, [a])
return possibleA
def expectedStep(self, s, a):
# Returns k possible outcomes
# p: k-by-1 probability of each transition
# r: k-by-1 rewards
# ns: k-by-|s| next state
# t: k-by-1 terminal values
# pa: k-by-?? possible actions for each next state
actions = self.possibleActions(s)
k = len(actions)
# Make Probabilities
intended_action_index = findElemArray1D(a, actions)
p = np.ones((k, 1)) * self.noise / (k * 1.0)
p[intended_action_index, 0] += 1 - self.noise
# Make next states
ns = np.tile(s, (k, 1)).astype(int)
actions = self.ACTIONS[actions]
ns += actions
# Make next possible actions
pa = np.array([self.possibleActions(sn) for sn in ns])
# Make rewards
r = np.ones((k, 1)) * self.STEP_REWARD
goal = self.map[ns[:, 0].astype(np.int), ns[:, 1].astype(np.int)] == self.GOAL
pit = self.map[ns[:, 0].astype(np.int), ns[:, 1].astype(np.int)] == self.PIT
r[goal] = self.GOAL_REWARD
r[pit] = self.PIT_REWARD
# Make terminals
t = np.zeros((k, 1), bool)
t[goal] = True
t[pit] = True
return p, r, ns, t, pa
def allStates(self):
if len(self.continuous_dims) > 0:
# Recall that discrete dimensions are assumed to be integer
return (
perms(
self.discrete_statespace_limits[:, 1]
- self.discrete_statespace_limits[:, 0]
+ 1
)
+ self.discrete_statespace_limits[:, 0]
)
else:
return None | /rlpy3-2.0.0a0-cp36-cp36m-win_amd64.whl/rlpy/Domains/GridWorld.py | 0.673084 | 0.415966 | GridWorld.py | pypi |
from rlpy.Tools import FONTSIZE, id2vec, plt
from .Domain import Domain
import numpy as np
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = [
"Alborz Geramifard",
"Robert H. Klein",
"Christoph Dann",
"William Dabney",
"Jonathan P. How",
]
__license__ = "BSD 3-Clause"
__author__ = "Alborz Geramifard"
class FlipBoard(Domain):
"""
A domain based on the last puzzle of Doors and Rooms Game stage 5-3.
The goal of the game is to get all elements of a 4x4 board
to have value 1.
The initial state is the following::
1 0 0 0
0 0 0 0
0 1 0 0
0 0 1 0
**STATE:** a 4x4 array of binary values. \n
**ACTION:** Invert the value of a given [Row, Col] (from 0->1 or 1->0).\n
**TRANSITION:** Determinisically flip all elements of the board on the same
row OR col of the action. \n
**REWARD:** -1 per step. 0 when the board is solved [all ones]
**REFERENCE:**
.. seealso::
`gameday inc. Doors and Rooms game <http://bit.ly/SYqdZI>`_
"""
BOARD_SIZE = 4
STEP_REWARD = -1
# Visual Stuff
domain_fig = None
move_fig = None
def __init__(self):
boards_num = self.BOARD_SIZE ** 2
super().__init__(
actions_num=boards_num,
statespace_limits=np.tile([0, 1], (boards_num, 1)),
discount_factor=1.0,
episodeCap=min(100, boards_num),
)
def showDomain(self, a=0):
s = self.state
# Draw the environment
if self.domain_fig is None:
self.move_fig = plt.subplot(111)
s = s.reshape((self.BOARD_SIZE, self.BOARD_SIZE))
self.domain_fig = plt.imshow(
s, cmap="FlipBoard", interpolation="nearest", vmin=0, vmax=1
)
plt.xticks(np.arange(self.BOARD_SIZE), fontsize=FONTSIZE)
plt.yticks(np.arange(self.BOARD_SIZE), fontsize=FONTSIZE)
# pl.tight_layout()
a_row, a_col = id2vec(a, [self.BOARD_SIZE, self.BOARD_SIZE])
self.move_fig = self.move_fig.plot(a_col, a_row, "kx", markersize=30.0)
plt.show()
a_row, a_col = id2vec(a, [self.BOARD_SIZE, self.BOARD_SIZE])
self.move_fig.pop(0).remove()
# print a_row,a_col
# Instead of '>' you can use 'D', 'o'
self.move_fig = plt.plot(a_col, a_row, "kx", markersize=30.0)
s = s.reshape((self.BOARD_SIZE, self.BOARD_SIZE))
self.domain_fig.set_data(s)
plt.draw()
def step(self, a):
ns = self.state.copy()
ns = np.reshape(ns, (self.BOARD_SIZE, -1))
a_row, a_col = id2vec(a, [self.BOARD_SIZE, self.BOARD_SIZE])
ns[a_row, :] = np.logical_not(ns[a_row, :])
ns[:, a_col] = np.logical_not(ns[:, a_col])
ns[a_row, a_col] = not ns[a_row, a_col]
if self.isTerminal():
terminal = True
r = 0
else:
terminal = False
r = self.STEP_REWARD
ns = ns.flatten()
self.state = ns.copy()
return r, ns, terminal, self.possibleActions()
def s0(self):
self.state = np.array(
[[1, 0, 0, 0], [0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]], dtype="bool"
).flatten()
return self.state, self.isTerminal(), self.possibleActions()
def isTerminal(self):
return np.count_nonzero(self.state) == self.BOARD_SIZE ** 2 | /rlpy3-2.0.0a0-cp36-cp36m-win_amd64.whl/rlpy/Domains/FlipBoard.py | 0.595022 | 0.588268 | FlipBoard.py | pypi |
from .Domain import Domain
import numpy as np
from scipy.integrate import odeint
from rlpy.Tools import plt
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = [
"Alborz Geramifard",
"Robert H. Klein",
"Christoph Dann",
"William Dabney",
"Jonathan P. How",
]
__license__ = "BSD 3-Clause"
__author__ = "Christoph Dann"
class HIVTreatment(Domain):
"""
Simulation of HIV Treatment. The aim is to find an optimal drug schedule.
**STATE:** The state contains concentrations of 6 different cells:
* T1: non-infected CD4+ T-lymphocytes [cells / ml]
* T1*: infected CD4+ T-lymphocytes [cells / ml]
* T2: non-infected macrophages [cells / ml]
* T2*: infected macrophages [cells / ml]
* V: number of free HI viruses [copies / ml]
* E: number of cytotoxic T-lymphocytes [cells / ml]
**ACTIONS:** The therapy consists of 2 drugs
(reverse transcriptase inhibitor [RTI] and protease inhibitor [PI]) which
are activated or not. The action space contains therefore of 4 actions:
* *0*: none active
* *1*: RTI active
* *2*: PI active
* *3*: RTI and PI active
**REFERENCE:**
.. seealso::
Ernst, D., Stan, G., Gonc, J. & Wehenkel, L.
Clinical data based optimal STI strategies for HIV:
A reinforcement learning approach
In Proceedings of the 45th IEEE Conference on Decision and Control (2006).
"""
state_names = ("T1", "T1*", "T2", "T2*", "V", "E")
actions = np.array([[0.0, 0.0], [0.7, 0.0], [0.0, 0.3], [0.7, 0.3]])
dt = 5 #: measurement every 5 days
#: only update the graphs in showDomain every x steps
show_domain_every = 20
def __init__(self, logspace=True):
"""
:params logspace: whether observed states are in log10 space or not
"""
self.logspace = logspace
if logspace:
statespace_limits = np.array([[-5, 8]] * 6)
else:
statespace_limits = np.array([[0.0, 1e8]] * 6)
super().__init__(
actions_num=4,
statespace_limits=statespace_limits,
episodeCap=200, #: total of 1000 days with a measurement every 5 days
discount_factor=0.98,
continuous_dims=np.arange(6),
)
# store samples of current episode for drawing
self.episode_data = np.zeros((7, self.episodeCap + 1))
def step(self, a):
self.t += 1
eps1, eps2 = self.actions[a]
ns = odeint(dsdt, self.state, [0, self.dt], args=(eps1, eps2), mxstep=1000)[-1]
T1, T2, T1s, T2s, V, E = ns
# the reward function penalizes treatment because of side-effects
reward = -0.1 * V - 2e4 * eps1 ** 2 - 2e3 * eps2 ** 2 + 1e3 * E
self.state = ns.copy()
if self.logspace:
ns = np.log10(ns)
self.episode_data[:-1, self.t] = self.state
self.episode_data[-1, self.t - 1] = a
return reward, ns, False, self.possibleActions()
def possibleActions(self):
return np.arange(4)
def s0(self):
self.t = 0
self.episode_data[:] = np.nan
# non-healthy stable state of the system
s = np.array([163573.0, 5.0, 11945.0, 46.0, 63919.0, 24.0])
self.state = s.copy()
if self.logspace:
return np.log10(s), self.isTerminal(), self.possibleActions()
self.episode_data[:-1, 0] = s
return s, self.isTerminal(), self.possibleActions()
def showDomain(self, a=0, s=None):
"""
shows a live graph of each concentration
"""
# only update the graph every couple of steps, otherwise it is
# extremely slow
if self.t % self.show_domain_every != 0 and not self.t >= self.episodeCap:
return
n = self.state_space_dims + 1
names = list(self.state_names) + ["Action"]
colors = ["b", "b", "b", "b", "r", "g", "k"]
handles = getattr(self, "_state_graph_handles", None)
fig = plt.figure("HIVTreatment", figsize=(12, 10))
if handles is None:
handles = []
f, axes = plt.subplots(n, sharex=True, num="HIVTreatment", figsize=(12, 10))
f.subplots_adjust(hspace=0.1)
for i in range(n):
ax = axes[i]
d = np.arange(self.episodeCap + 1) * 5
ax.set_ylabel(names[i])
ax.locator_params(tight=True, nbins=4)
handles.append(ax.plot(d, self.episode_data[i], color=colors[i])[0])
self._state_graph_handles = handles
ax.set_xlabel("Days")
for i in range(n):
handles[i].set_ydata(self.episode_data[i])
ax = handles[i].axes
ax.relim()
ax.autoscale_view()
fig.canvas.draw()
fig.canvas.flush_events()
def dsdt(s, t, eps1, eps2):
"""
system derivate per time. The unit of time are days.
"""
# model parameter constants
lambda1 = 1e4
lambda2 = 31.98
d1 = 0.01
d2 = 0.01
f = 0.34
k1 = 8e-7
k2 = 1e-4
delta = 0.7
m1 = 1e-5
m2 = 1e-5
NT = 100.0
c = 13.0
rho1 = 1.0
rho2 = 1.0
lambdaE = 1
bE = 0.3
Kb = 100
d_E = 0.25
Kd = 500
deltaE = 0.1
# decompose state
T1, T2, T1s, T2s, V, E = s
# compute derivatives
tmp1 = (1.0 - eps1) * k1 * V * T1
tmp2 = (1.0 - f * eps1) * k2 * V * T2
dT1 = lambda1 - d1 * T1 - tmp1
dT2 = lambda2 - d2 * T2 - tmp2
dT1s = tmp1 - delta * T1s - m1 * E * T1s
dT2s = tmp2 - delta * T2s - m2 * E * T2s
dV = (
(1.0 - eps2) * NT * delta * (T1s + T2s)
- c * V
- ((1.0 - eps1) * rho1 * k1 * T1 + (1.0 - f * eps1) * rho2 * k2 * T2) * V
)
dE = (
lambdaE
+ bE * (T1s + T2s) / (T1s + T2s + Kb) * E
- d_E * (T1s + T2s) / (T1s + T2s + Kd) * E
- deltaE * E
)
return np.array([dT1, dT2, dT1s, dT2s, dV, dE])
try:
from .HIVTreatment_dynamics import dsdt
except Exception as e:
print(e)
print(
"Cython extension for HIVTreatment dynamics not available, expect slow runtime"
) | /rlpy3-2.0.0a0-cp36-cp36m-win_amd64.whl/rlpy/Domains/HIVTreatment.py | 0.59561 | 0.571587 | HIVTreatment.py | pypi |
from rlpy.Tools import plt, mpatches, fromAtoB
from .Domain import Domain
import numpy as np
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = [
"Alborz Geramifard",
"Robert H. Klein",
"Christoph Dann",
"William Dabney",
"Jonathan P. How",
]
__license__ = "BSD 3-Clause"
__author__ = "Alborz Geramifard"
class ChainMDP(Domain):
"""
A simple Chain MDP.
**STATE:** s0 <-> s1 <-> ... <-> sn \n
**ACTIONS:** are left [0] and right [1], deterministic. \n
.. note::
The actions [left, right] are available in ALL states, but if
left is selected in s0 or right in sn, then s remains unchanged.
The task is to reach sn from s0, after which the episode terminates.
.. note::
Optimal policy is to always to go right.
**REWARD:**
-1 per step, 0 at goal (terminates)
**REFERENCE:**
.. seealso::
Michail G. Lagoudakis, Ronald Parr, and L. Bartlett
Least-squares policy iteration. Journal of Machine Learning Research
(2003) Issue 4.
"""
#: Reward for each timestep spent in the goal region
GOAL_REWARD = 0
#: Reward for each timestep
STEP_REWARD = -1
#: Set by the domain = min(100,rows*cols)
episodeCap = 0
# Used for graphical normalization
MAX_RETURN = 1
# Used for graphical normalization
MIN_RETURN = 0
# Used for graphical shifting of arrows
SHIFT = 0.3
# Used for graphical radius of states
RADIUS = 0.5
# Stores the graphical pathes for states so that we can later change their
# colors
circles = None
#: Number of states in the chain
chainSize = 0
# Y values used for drawing circles
Y = 1
def __init__(self, chainSize=2):
"""
:param chainSize: Number of states \'n\' in the chain.
"""
self.chainSize = chainSize
self.start = 0
self.goal = chainSize - 1
super().__init__(
actions_num=2,
statespace_limits=np.array([[0, chainSize - 1]]),
episodeCap=2 * chainSize,
)
def showDomain(self, a=0):
# Draw the environment
s = self.state
s = s[0]
if self.circles is None:
fig = plt.figure(1, (self.chainSize * 2, 2))
ax = fig.add_axes([0, 0, 1, 1], frameon=False, aspect=1.0)
ax.set_xlim(0, self.chainSize * 2)
ax.set_ylim(0, 2)
# Make the last one double circle
ax.add_patch(
mpatches.Circle(
(1 + 2 * (self.chainSize - 1), self.Y), self.RADIUS * 1.1, fc="w"
)
)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
self.circles = [
mpatches.Circle((1 + 2 * i, self.Y), self.RADIUS, fc="w")
for i in range(self.chainSize)
]
for i in range(self.chainSize):
ax.add_patch(self.circles[i])
if i != self.chainSize - 1:
fromAtoB(
1 + 2 * i + self.SHIFT,
self.Y + self.SHIFT,
1 + 2 * (i + 1) - self.SHIFT,
self.Y + self.SHIFT,
)
if i != self.chainSize - 2:
fromAtoB(
1 + 2 * (i + 1) - self.SHIFT,
self.Y - self.SHIFT,
1 + 2 * i + self.SHIFT,
self.Y - self.SHIFT,
"r",
)
fromAtoB(
0.75,
self.Y - 1.5 * self.SHIFT,
0.75,
self.Y + 1.5 * self.SHIFT,
"r",
connectionstyle="arc3,rad=-1.2",
)
plt.show()
for p in self.circles:
p.set_facecolor("w")
self.circles[s].set_facecolor("k")
plt.draw()
def step(self, a):
s = self.state[0]
if a == 0: # left
ns = max(0, s - 1)
if a == 1:
ns = min(self.chainSize - 1, s + 1)
ns = np.array([ns])
self.state = ns
terminal = self.isTerminal()
r = self.GOAL_REWARD if terminal else self.STEP_REWARD
return r, ns, terminal, self.possibleActions()
def s0(self):
self.state = np.array([0])
return self.state, self.isTerminal(), self.possibleActions()
def isTerminal(self):
s = self.state
return s[0] == self.chainSize - 1 | /rlpy3-2.0.0a0-cp36-cp36m-win_amd64.whl/rlpy/Domains/ChainMDP.py | 0.695441 | 0.520435 | ChainMDP.py | pypi |
from rlpy.Tools import plt, id2vec, bound_vec
import numpy as np
from .Domain import Domain
import os
from rlpy.Tools import __rlpy_location__, FONTSIZE
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = [
"Alborz Geramifard",
"Robert H. Klein",
"Christoph Dann",
"William Dabney",
"Jonathan P. How",
]
__license__ = "BSD 3-Clause"
__author__ = "N. Kemal Ure"
class IntruderMonitoring(Domain):
"""
Formulated as an MDP, the intruder monitoring task is to guard danger zones using cameras
so that if an intruder moves to a danger zone, at least one camera is pointing at that location.
All locations are on a 2-D grid.
The episode is finished after 1000 steps.
**STATE:** \n
Location of: [ Agent_1, Agent_2, ... Agent n ] \n
Location of: [ Intruder_1, Intruder_2, ... Intruder_m ]\n
Where *n* is number of agents, *m* is number of intruders.
**ACTIONS:**
[Up, Down, Left, Right, Remain]^n (one action for each agent).
**TRANSITION:**
Each agent can move in 4 directions + stay still.
There is no noise on any movements.
Each intruder moves with a fixed policy (specified by the user)
By Default, intruder policy is uniform random.
Map of the world contains fixed number of danger zones. Maps are simple text files
contained in the ``Domains/IntruderMonitoringMaps/`` directory.
**REWARD:** \n
-1 for every visit of an intruder to a danger zone with no camera present.
The team receives a penalty whenever there is an intruder on a danger zone in the
absence of an agent. The task is to allocate agents on the map so that intruders
do not enter the danger zones without attendance of an agent.
"""
map = None
#: Number of Cooperating agents
NUMBER_OF_AGENTS = 0
#: Number of Intruders
NUMBER_OF_INTRUDERS = 0
NUMBER_OF_DANGER_ZONES = 0
#: Rewards
INTRUSION_PENALTY = -1.0
# Constants in the map
EMPTY, INTRUDER, AGENT, DANGER = range(4)
#: Actions: Up, Down, Left, Right, Null
ACTIONS_PER_AGENT = np.array([[-1, 0], [+1, 0], [0, -1], [0, +1], [0, 0]])
# Visual Variables
domain_fig = None
ally_fig = None
intruder_fig = None
#: directory with maps shipped with rlpy
DEFAULT_MAP_DIR = os.path.join(
__rlpy_location__, "Domains", "IntruderMonitoringMaps"
)
def __init__(self, mapname=os.path.join(DEFAULT_MAP_DIR, "4x4_2A_3I.txt")):
self.setupMap(mapname)
self.state_space_dims = 2 * (self.NUMBER_OF_AGENTS + self.NUMBER_OF_INTRUDERS)
_statespace_limits = np.vstack([[0, self.ROWS - 1], [0, self.COLS - 1]])
statespace_limits = np.tile(
_statespace_limits, ((self.NUMBER_OF_AGENTS + self.NUMBER_OF_INTRUDERS), 1)
)
self.ACTION_LIMITS = [5] * self.NUMBER_OF_AGENTS
self.DimNames = []
super().__init__(
actions_num=5 ** self.NUMBER_OF_AGENTS,
statespace_limits=statespace_limits,
discount_factor=0.8,
episodeCap=100,
)
def setupMap(self, mapname):
# Load the map as an array
self.map = np.loadtxt(mapname, dtype=np.uint8)
if self.map.ndim == 1:
self.map = self.map[np.newaxis, :]
self.ROWS, self.COLS = np.shape(self.map)
R, C = (self.map == self.AGENT).nonzero()
self.agents_initial_locations = np.vstack([R, C]).T
self.NUMBER_OF_AGENTS = len(self.agents_initial_locations)
R, C = (self.map == self.INTRUDER).nonzero()
self.intruders_initial_locations = np.vstack([R, C]).T
self.NUMBER_OF_INTRUDERS = len(self.intruders_initial_locations)
R, C = (self.map == self.DANGER).nonzero()
self.danger_zone_locations = np.vstack([R, C]).T
self.NUMBER_OF_DANGER_ZONES = len(self.danger_zone_locations)
def step(self, a):
"""
Move all intruders according to
the :py:meth:`~rlpy.Domains.IntruderMonitoring.IntruderPolicy`, default
uniform random action.
Move all agents according to the selected action ``a``.
Calculate the reward = Number of danger zones being violated by
intruders while no agents are present (ie, intruder occupies a danger
cell with no agent simultaneously occupying the cell).
"""
s = self.state
# Move all agents based on the taken action
agents = np.array(s[: self.NUMBER_OF_AGENTS * 2].reshape(-1, 2))
actions = id2vec(a, self.ACTION_LIMITS)
actions = self.ACTIONS_PER_AGENT[actions]
agents += actions
# Generate actions for each intruder based on the function
# IntruderPolicy()
intruders = np.array(s[self.NUMBER_OF_AGENTS * 2 :].reshape(-1, 2))
actions = [
self.IntruderPolicy(intruders[i]) for i in range(self.NUMBER_OF_INTRUDERS)
]
actions = self.ACTIONS_PER_AGENT[actions]
intruders += actions
# Put all info in one big vector
ns = np.hstack((agents.ravel(), intruders.ravel()))
# Saturate states so that if actions forced agents to move out of the
# grid world they bound back
ns = bound_vec(ns, self.discrete_statespace_limits)
# Find agents and intruders after saturation
agents = ns[: self.NUMBER_OF_AGENTS * 2].reshape(-1, 2)
intruders = ns[self.NUMBER_OF_AGENTS * 2 :].reshape(-1, 2)
# Reward Calculation
map = np.zeros((self.ROWS, self.COLS), "bool")
map[intruders[:, 0], intruders[:, 1]] = True
map[agents[:, 0], agents[:, 1]] = False
intrusion_counter = np.count_nonzero(
map[self.danger_zone_locations[:, 0], self.danger_zone_locations[:, 1]]
)
r = intrusion_counter * self.INTRUSION_PENALTY
ns = bound_vec(ns, self.discrete_statespace_limits)
# print s, id2vec(a,self.ACTION_LIMITS), ns
self.state = ns.copy()
return r, ns, False, self.possibleActions()
def s0(self):
self.state = np.hstack(
[
self.agents_initial_locations.ravel(),
self.intruders_initial_locations.ravel(),
]
)
return self.state.copy(), self.isTerminal(), self.possibleActions()
def possibleActionsPerAgent(self, s_i):
"""
Returns all possible actions for a single (2-D) agent state *s_i*
(where the domain state s = [s_0, ... s_i ... s_NUMBER_OF_AGENTS])
1. tile the [R,C] for all actions
2. add all actions to the results
3. Find feasible rows and add them as possible actions
"""
tile_s = np.tile(s_i, [len(self.ACTIONS_PER_AGENT), 1])
next_states = tile_s + self.ACTIONS_PER_AGENT
next_states_rows = next_states[:, 0]
next_states_cols = next_states[:, 1]
possibleActions1 = np.logical_and(
0 <= next_states_rows, next_states_rows < self.ROWS
)
possibleActions2 = np.logical_and(
0 <= next_states_cols, next_states_cols < self.COLS
)
possibleActions, _ = (
np.logical_and(possibleActions1, possibleActions2).reshape(-1, 1).nonzero()
)
return possibleActions
def printDomain(self, s, a):
print("--------------")
for i in range(0, self.NUMBER_OF_AGENTS):
s_a = s[i * 2 : i * 2 + 2]
aa = id2vec(a, self.ACTION_LIMITS)
print("Agent {} Location: {} Action {}".format(i, s_a, aa))
offset = 2 * self.NUMBER_OF_AGENTS
for i in range(0, self.NUMBER_OF_INTRUDERS):
s_i = s[offset + i * 2 : offset + i * 2 + 2]
print("Intruder", s_i)
r, ns, terminal = self.step(s, a)
print("Reward ", r)
def IntruderPolicy(self, s_i):
"""
:param s_i: The state of a single agent
(where the domain state s = [s_0, ... s_i ... s_NUMBER_OF_AGENTS]).
:returns: a valid actions for the agent in state **s_i** to take.
Default random action among possible.
"""
return self.random_state.choice(self.possibleActionsPerAgent(s_i))
def showDomain(self, a):
s = self.state
# Draw the environment
fig = plt.figure("IntruderMonitoring")
if self.domain_fig is None:
self.domain_fig = plt.imshow(
self.map,
cmap="IntruderMonitoring",
interpolation="nearest",
vmin=0,
vmax=3,
)
plt.xticks(np.arange(self.COLS), fontsize=FONTSIZE)
plt.yticks(np.arange(self.ROWS), fontsize=FONTSIZE)
plt.show()
if self.ally_fig is not None:
self.ally_fig.pop(0).remove()
self.intruder_fig.pop(0).remove()
s_ally = s[0 : self.NUMBER_OF_AGENTS * 2].reshape((-1, 2))
s_intruder = s[self.NUMBER_OF_AGENTS * 2 :].reshape((-1, 2))
self.ally_fig = plt.plot(
s_ally[:, 1],
s_ally[:, 0],
"bo",
markersize=30.0,
alpha=0.7,
markeredgecolor="k",
markeredgewidth=2,
)
self.intruder_fig = plt.plot(
s_intruder[:, 1],
s_intruder[:, 0],
"g>",
color="gray",
markersize=30.0,
alpha=0.7,
markeredgecolor="k",
markeredgewidth=2,
)
fig.canvas.draw()
fig.canvas.flush_events() | /rlpy3-2.0.0a0-cp36-cp36m-win_amd64.whl/rlpy/Domains/IntruderMonitoring.py | 0.600423 | 0.437643 | IntruderMonitoring.py | pypi |
import itertools
import numpy as np
from rlpy.Tools import __rlpy_location__, plt, with_bold_fonts
import os
from .GridWorld import GridWorld
__license__ = "BSD 3-Clause"
__author__ = "Yuji Kanagawa"
class AnyRewardGridWorld(GridWorld):
"""The same as GridWorld, but you can set any reward for each cell.
"""
# directory of maps shipped with rlpy
DEFAULT_MAP_DIR = os.path.join(
__rlpy_location__, "Domains", "AnyRewardGridWorldMaps"
)
def _load_map(self, mapfile):
map_and_reward = np.loadtxt(mapfile, dtype=np.int32)
mshape = map_and_reward.shape
if mshape[1] * 2 != mshape[0]:
raise ValueError("Invalid map with shape {}".format(mshape))
col = mshape[0] // 2
self.map = map_and_reward[:col]
self.reward_map = map_and_reward[col:]
def __init__(
self,
mapfile=os.path.join(DEFAULT_MAP_DIR, "6x6guided.txt"),
noise=0.1,
step_penalty=1.0,
random_start=False,
episodeCap=20,
):
super().__init__(
mapfile=mapfile,
noise=noise,
random_start=random_start,
episodeCap=episodeCap,
)
self.step_penalty = step_penalty
def _reward(self, next_state, terminal):
reward = self.reward_map[next_state[0], next_state[1]]
if not terminal:
reward -= self.step_penalty
return reward
def _rew_range(self):
mi, ma = 1000, -1000
for r, c in itertools.product(range(self.rows), range(self.cols)):
if self.map[r, c] == self.EMPTY:
mi = min(mi, self.reward_map[r, c])
ma = max(ma, self.reward_map[r, c])
if mi == 1000:
mi = min(self.reward_map[r, c])
if ma == -1000:
ma = max(self.reward_map[r, c])
return mi, ma
def _show_numbers(self):
cmap = plt.get_cmap("ValueFunction-New")
rw_min, rw_max = self._rew_range()
for r, c in itertools.product(range(self.rows), range(self.cols)):
if self.reward_map[r, c] == 0:
continue
raw_reward = self.reward_map[r, c]
if self.map[r, c] == self.EMPTY:
if rw_max > rw_min:
reward = (self.reward_map[r, c] - rw_min) / (rw_max - rw_min)
else:
reward = 0.7
color = cmap(reward)
elif self.map[r, c] == self.GOAL or self.PIT:
color = "w"
else:
continue
self.domain_ax.text(c - 0.2, r + 0.1, str(raw_reward), color=color)
def _show_map(self):
super()._show_map()
with with_bold_fonts():
self._show_numbers() | /rlpy3-2.0.0a0-cp36-cp36m-win_amd64.whl/rlpy/Domains/any_reward_grid_world.py | 0.452294 | 0.237272 | any_reward_grid_world.py | pypi |
from .Domain import Domain
import numpy as np
from rlpy.Tools import mpl, plt, rk4, cartesian, colors
from rlpy.Policies.SwimmerPolicy import SwimmerPolicy
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = [
"Alborz Geramifard",
"Robert H. Klein",
"Christoph Dann",
"William Dabney",
"Jonathan P. How",
]
__license__ = "BSD 3-Clause"
__author__ = "Christoph Dann"
class Swimmer(Domain):
"""
A swimmer consisting of a chain of d links connected by rotational joints.
Each joint is actuated. The goal is to move the swimmer to a specified goal
position.
*States*:
| 2 dimensions: position of nose relative to goal
| d -1 dimensions: angles
| 2 dimensions: velocity of the nose
| d dimensions: angular velocities
*Actions*:
each joint torque is discretized in 3 values: -2, 0, 2
.. note::
adapted from Yuval Tassas swimmer implementation in Matlab available at
http://www.cs.washington.edu/people/postdocs/tassa/code/
.. seealso::
Tassa, Y., Erez, T., & Smart, B. (2007).
*Receding Horizon Differential Dynamic Programming.*
In Advances in Neural Information Processing Systems.
"""
dt = 0.03
def __init__(self, d=3, k1=7.5, k2=0.3):
"""
d:
number of joints
"""
self.d = d
self.k1 = k1
self.k2 = k2
self.nose = 0
self.masses = np.ones(d)
self.lengths = np.ones(d)
self.inertia = self.masses * self.lengths * self.lengths / 12.0
self.goal = np.zeros(2)
# reward function parameters
self.cu = 0.04
self.cx = 2.0
Q = np.eye(self.d, k=1) - np.eye(self.d)
Q[-1, :] = self.masses
A = np.eye(self.d, k=1) + np.eye(self.d)
A[-1, -1] = 0.0
self.P = np.dot(np.linalg.inv(Q), A * self.lengths[None, :]) / 2
self.U = np.eye(self.d) - np.eye(self.d, k=-1)
self.U = self.U[:, :-1]
self.G = np.dot(self.P.T * self.masses[None, :], self.P)
# incidator variables for angles in a state representation
self.angles = np.zeros(2 + self.d * 2 + 1, dtype=np.bool)
self.angles[2 : 2 + self.d - 1] = True
self.angles[-self.d - 2 :] = True
self.actions = cartesian((d - 1) * [[-2.0, 0.0, 2]])
statespace_limits = np.array(
[[-15, 15]] * 2
+ [[-np.pi, np.pi]] * (d - 1)
+ [[-2, 2]] * 2
+ [[-np.pi * 2, np.pi * 2]] * d
)
super().__init__(
actions_num=len(self.actions),
statespace_limits=statespace_limits,
continuous_dims=np.arange(statespace_limits.shape[0]),
episodeCap=1000,
discount_factor=0.98,
)
self.swimmer_lines = None
def s0(self):
self.theta = np.zeros(self.d)
self.pos_cm = np.array([10, 0])
self.v_cm = np.zeros(2)
self.dtheta = np.zeros(self.d)
return self.state, self.isTerminal(), self.possibleActions()
@property
def state(self):
return np.hstack(self._body_coord())
def isTerminal(self):
return False
def possibleActions(self, s=None):
return np.arange(self.actions_num)
def showDomain(self, a=None):
if a is not None:
a = self.actions[a]
T = np.empty((self.d, 2))
T[:, 0] = np.cos(self.theta)
T[:, 1] = np.sin(self.theta)
R = np.dot(self.P, T)
R1 = R - 0.5 * self.lengths[:, None] * T
R2 = R + 0.5 * self.lengths[:, None] * T
Rx = np.hstack([R1[:, 0], R2[:, 0]]) + self.pos_cm[0]
Ry = np.hstack([R1[:, 1], R2[:, 1]]) + self.pos_cm[1]
fig = plt.figure("Swimmer")
if self.swimmer_lines is None:
plt.plot(0.0, 0.0, "ro")
self.swimmer_lines = plt.plot(Rx, Ry)[0]
self.action_text = plt.text(-2, -8, str(a))
plt.xlim(-5, 15)
plt.ylim(-10, 10)
else:
self.swimmer_lines.set_data(Rx, Ry)
self.action_text.set_text(str(a))
fig.canvas.draw()
fig.canvas.flush_events()
def showLearning(self, representation):
good_pol = SwimmerPolicy(representation=representation, epsilon=0)
id1 = 2
id2 = 3
res = 200
s = np.zeros(self.state_space_dims)
l1 = np.linspace(
self.statespace_limits[id1, 0], self.statespace_limits[id1, 1], res
)
l2 = np.linspace(
self.statespace_limits[id2, 0], self.statespace_limits[id2, 1], res
)
pi = np.zeros((res, res), "uint8")
good_pi = np.zeros((res, res), "uint8")
V = np.zeros((res, res))
for row, x1 in enumerate(l1):
for col, x2 in enumerate(l2):
s[id1] = x1
s[id2] = x2
# Array of Q-function evaluated at all possible actions at
# state s
Qs = representation.Qs(s, False)
# Assign pi to be optimal action (which maximizes Q-function)
maxQ = np.max(Qs)
pi[row, col] = np.random.choice(np.arange(len(Qs))[Qs == maxQ])
good_pi[row, col] = good_pol.pi(s, False, np.arange(self.actions_num))
# Assign V to be the value of the Q-function under optimal
# action
V[row, col] = maxQ
self._plot_policy(
pi,
title="Learned Policy",
ylim=self.statespace_limits[id1],
xlim=self.statespace_limits[id2],
)
self._plot_policy(
good_pi,
title="Good Policy",
var="good_policy_fig",
ylim=self.statespace_limits[id1],
xlim=self.statespace_limits[id2],
)
self._plot_valfun(
V, ylim=self.statespace_limits[id1], xlim=self.statespace_limits[id2]
)
if self.policy_fig is None or self.valueFunction_fig is None:
plt.show()
def _plot_policy(
self, piMat, title="Policy", var="policy_fig", xlim=None, ylim=None
):
"""
:returns: handle to the figure
"""
if getattr(self, var, None) is None:
plt.figure(title)
# define the colormap
cmap = plt.cm.jet
# extract all colors from the .jet map
cmaplist = [cmap(i) for i in range(cmap.N)]
# force the first color entry to be grey
cmaplist[0] = (0.5, 0.5, 0.5, 1.0)
# create the new map
cmap = cmap.from_list("Custom cmap", cmaplist, cmap.N)
# define the bins and normalize
bounds = np.linspace(0, self.actions_num, self.actions_num + 1)
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
if xlim is not None and ylim is not None:
extent = [xlim[0], xlim[1], ylim[0], ylim[1]]
else:
extent = [0, 1, 0, 1]
self.__dict__[var] = plt.imshow(
piMat,
interpolation="nearest",
origin="lower",
cmap=cmap,
norm=norm,
extent=extent,
)
# pl.xticks(self.xTicks,self.xTicksLabels, fontsize=12)
# pl.yticks(self.yTicks,self.yTicksLabels, fontsize=12)
# pl.xlabel(r"$\theta$ (degree)")
# pl.ylabel(r"$\dot{\theta}$ (degree/sec)")
plt.title(title)
plt.colorbar()
plt.figure(title)
self.__dict__[var].set_data(piMat)
plt.draw()
def _plot_valfun(self, VMat, xlim=None, ylim=None):
"""
:returns: handle to the figure
"""
plt.figure("Value Function")
# pl.xticks(self.xTicks,self.xTicksLabels, fontsize=12)
# pl.yticks(self.yTicks,self.yTicksLabels, fontsize=12)
# pl.xlabel(r"$\theta$ (degree)")
# pl.ylabel(r"$\dot{\theta}$ (degree/sec)")
plt.title("Value Function")
if xlim is not None and ylim is not None:
extent = [xlim[0], xlim[1], ylim[0], ylim[1]]
else:
extent = [0, 1, 0, 1]
self.valueFunction_fig = plt.imshow(
VMat,
cmap="ValueFunction",
interpolation="nearest",
origin="lower",
extent=extent,
)
norm = colors.Normalize(vmin=VMat.min(), vmax=VMat.max())
self.valueFunction_fig.set_data(VMat)
self.valueFunction_fig.set_norm(norm)
plt.draw()
def _body_coord(self):
"""
transforms the current state into coordinates that are more
reasonable for learning
returns a 4-tupel consisting of:
nose position, joint angles (d-1), nose velocity, angular velocities
The nose position and nose velocities are referenced to the nose rotation.
"""
cth = np.cos(self.theta)
sth = np.sin(self.theta)
M = self.P - 0.5 * np.diag(self.lengths)
# stores the vector from the center of mass to the nose
c2n = np.array([np.dot(M[self.nose], cth), np.dot(M[self.nose], sth)])
# absolute position of nose
T = -self.pos_cm - c2n - self.goal
# rotating coordinate such that nose is axis-aligned (nose frame)
# (no effect when \theta_{nose} = 0)
c2n_x = np.array([cth[self.nose], sth[self.nose]])
c2n_y = np.array([-sth[self.nose], cth[self.nose]])
Tcn = np.array([np.sum(T * c2n_x), np.sum(T * c2n_y)])
# velocity at each joint relative to center of mass velocity
vx = -np.dot(M, sth * self.dtheta)
vy = np.dot(M, cth * self.dtheta)
# velocity at nose (world frame) relative to center of mass velocity
v2n = np.array([vx[self.nose], vy[self.nose]])
# rotating nose velocity to be in nose frame
Vcn = np.array(
[np.sum((self.v_cm + v2n) * c2n_x), np.sum((self.v_cm + v2n) * c2n_y)]
)
# angles should be in [-pi, pi]
ang = np.mod(self.theta[1:] - self.theta[:-1] + np.pi, 2 * np.pi) - np.pi
return Tcn, ang, Vcn, self.dtheta
def step(self, a):
d = self.d
a = self.actions[a]
s = np.hstack((self.pos_cm, self.theta, self.v_cm, self.dtheta))
ns = rk4(
dsdt,
s,
[0, self.dt],
a,
self.P,
self.inertia,
self.G,
self.U,
self.lengths,
self.masses,
self.k1,
self.k2,
)[-1]
self.theta = ns[2 : 2 + d]
self.v_cm = ns[2 + d : 4 + d]
self.dtheta = ns[4 + d :]
self.pos_cm = ns[:2]
return (self._reward(a), self.state, self.isTerminal(), self.possibleActions())
def _dsdt(self, s, a):
""" just a convenience function for testing and debugging, not really used"""
return dsdt(
s,
0.0,
a,
self.P,
self.inertia,
self.G,
self.U,
self.lengths,
self.masses,
self.k1,
self.k2,
)
def _reward(self, a):
"""
penalizes the l2 distance to the goal (almost linearly) and
a small penalty for torques coming from actions
"""
xrel = self._body_coord()[0] - self.goal
dist = np.sum(xrel ** 2)
return -self.cx * dist / (np.sqrt(dist) + 1) - self.cu * np.sum(a ** 2)
def dsdt(s, t, a, P, I, G, U, lengths, masses, k1, k2):
"""
time derivative of system dynamics
"""
d = len(a) + 1
theta = s[2 : 2 + d]
vcm = s[2 + d : 4 + d]
dtheta = s[4 + d :]
cth = np.cos(theta)
sth = np.sin(theta)
rVx = np.dot(P, -sth * dtheta)
rVy = np.dot(P, cth * dtheta)
Vx = rVx + vcm[0]
Vy = rVy + vcm[1]
Vn = -sth * Vx + cth * Vy
Vt = cth * Vx + sth * Vy
EL1 = np.dot(
(v1Mv2(-sth, G, cth) + v1Mv2(cth, G, sth)) * dtheta[None, :]
+ (v1Mv2(cth, G, -sth) + v1Mv2(sth, G, cth)) * dtheta[:, None],
dtheta,
)
EL3 = np.diag(I) + v1Mv2(sth, G, sth) + v1Mv2(cth, G, cth)
EL2 = (
-k1
* np.dot((v1Mv2(-sth, P.T, -sth) + v1Mv2(cth, P.T, cth)) * lengths[None, :], Vn)
- k1 * np.power(lengths, 3) * dtheta / 12.0
- k2
* np.dot((v1Mv2(-sth, P.T, cth) + v1Mv2(cth, P.T, sth)) * lengths[None, :], Vt)
)
ds = np.zeros_like(s)
ds[:2] = vcm
ds[2 : 2 + d] = dtheta
ds[2 + d] = -(k1 * np.sum(-sth * Vn) + k2 * np.sum(cth * Vt)) / np.sum(masses)
ds[3 + d] = -(k1 * np.sum(cth * Vn) + k2 * np.sum(sth * Vt)) / np.sum(masses)
ds[4 + d :] = np.linalg.solve(EL3, EL1 + EL2 + np.dot(U, a))
return ds
def v1Mv2(v1, M, v2):
"""
computes diag(v1) dot M dot diag(v2).
returns np.ndarray with same dimensions as M
"""
return v1[:, None] * M * v2[None, :] | /rlpy3-2.0.0a0-cp36-cp36m-win_amd64.whl/rlpy/Domains/Swimmer.py | 0.800536 | 0.561095 | Swimmer.py | pypi |
from .Domain import Domain
import numpy as np
import matplotlib.pyplot as plt
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = [
"Alborz Geramifard",
"Robert H. Klein",
"Christoph Dann",
"William Dabney",
"Jonathan P. How",
]
__license__ = "BSD 3-Clause"
__author__ = "Christoph Dann"
class PuddleWorld(Domain):
"""
Implementation of the puddle world benchmark as described in references
below.
**STATE:** 2-dimensional vector, *s*, each dimension is continuous in [0,1]\n
**ACTIONS:** [right, up, left, down] - NOTE it is not possible to loiter.\n
**REWARD:** 0 for goal state, -1 for each step, and an additional penalty
for passing near puddles.
**REFERENCE:**
.. seealso::
Jong, N. & Stone, P.: Kernel-based models for reinforcement learning, ICML (2006)
.. seealso::
Sutton, R. S.: Generalization in Reinforcement Learning:
Successful Examples Using Sparse Coarse Coding, NIPS(1996)
"""
domain_fig = None
valfun_fig = None
polfun_fig = None
actions = 0.05 * np.array([[1, 0], [0, 1], [-1, 0], [0, -1]], dtype=np.float)
puddles = np.array([[[0.1, 0.75], [0.45, 0.75]], [[0.45, 0.4], [0.45, 0.8]]])
def __init__(self, noise_level=0.01, discount_factor=1.0):
super().__init__(
actions_num=len(self.actions),
statespace_limits=np.array([[0.0, 1.0]] * 2),
continuous_dims=np.arange(2),
episodeCap=1000,
discount_factor=discount_factor,
)
self.noise_level = noise_level
self.reward_map = np.zeros((100, 100))
self.val_map = np.zeros((100, 100))
self.pi_map = np.zeros((100, 100))
a = np.zeros((2))
for i, x in enumerate(np.linspace(0, 1, 100)):
for j, y in enumerate(np.linspace(0, 1, 100)):
a[0] = x
a[1] = y
self.reward_map[j, i] = self._reward(a)
def s0(self):
self.state = self.random_state.rand(2)
while self.isTerminal():
self.state = self.random_state.rand(2)
return self.state.copy(), False, self.possibleActions()
def isTerminal(self, s=None):
if s is None:
s = self.state
return s.sum() > 0.95 * 2
def possibleActions(self, s=0):
return np.arange(self.actions_num)
def step(self, a):
a = self.actions[a]
ns = self.state + a + self.random_state.randn() * self.noise_level
# make sure we stay inside the [0,1]^2 region
ns = np.minimum(ns, 1.0)
ns = np.maximum(ns, 0.0)
self.state = ns.copy()
return self._reward(ns), ns, self.isTerminal(), self.possibleActions()
def _reward(self, s):
if self.isTerminal(s):
return 0 # goal state reached
reward = -1
# compute puddle influence
d = self.puddles[:, 1, :] - self.puddles[:, 0, :]
denom = (d ** 2).sum(axis=1)
g = ((s - self.puddles[:, 0, :]) * d).sum(axis=1) / denom
g = np.minimum(g, 1)
g = np.maximum(g, 0)
dists = np.sqrt(((self.puddles[:, 0, :] + g * d - s) ** 2).sum(axis=1))
dists = dists[dists < 0.1]
if len(dists):
reward -= 400 * (0.1 - dists[dists < 0.1]).max()
return reward
def showDomain(self, a=None):
s = self.state
# Draw the environment
if self.domain_fig is None:
self.domain_fig = plt.figure("Domain")
self.reward_im = plt.imshow(
self.reward_map, extent=(0, 1, 0, 1), origin="lower"
)
self.state_mark = plt.plot(s[0], s[1], "kd", markersize=20)
plt.figure("Domain").canvas.draw()
plt.figure("Domain").canvas.flush_events()
else:
self.domain_fig = plt.figure("Domain")
self.state_mark[0].set_data([s[0]], [s[1]])
plt.figure("Domain").canvas.draw()
plt.figure("Domain").canvas.flush_events()
def showLearning(self, representation):
a = np.zeros((2))
for i, x in enumerate(np.linspace(0, 1, 100)):
for j, y in enumerate(np.linspace(0, 1, 100)):
a[0] = x
a[1] = y
self.val_map[j, i] = representation.V(
a, self.isTerminal(a), self.possibleActions()
)
self.pi_map[j, i] = representation.bestAction(
a, self.isTerminal(a), self.possibleActions()
)
if self.valfun_fig is None:
self.valfun_fig = plt.figure("Value Function")
plt.clf()
self.val_im = plt.imshow(self.val_map, extent=(0, 1, 0, 1), origin="lower")
plt.colorbar()
else:
self.valfun_fig = plt.figure("Value Function")
self.val_im.set_data(self.val_map)
self.val_im.autoscale()
plt.draw()
if self.polfun_fig is None:
self.polfun_fig = plt.figure("Policy")
plt.clf()
self.pol_im = plt.imshow(
self.pi_map, extent=(0, 1, 0, 1), origin="lower", cmap="4Actions"
)
else:
self.polfun_fig = plt.figure("Policy")
self.pol_im.set_data(self.pi_map)
self.pol_im.autoscale()
plt.draw()
class PuddleGapWorld(PuddleWorld):
def _reward(self, s):
r = super(PuddleGapWorld, self)._reward(s)
if s[1] < 0.67 and s[1] >= 0.6:
r = -1
return r | /rlpy3-2.0.0a0-cp36-cp36m-win_amd64.whl/rlpy/Domains/PuddleWorld.py | 0.660063 | 0.586996 | PuddleWorld.py | pypi |
from .Domain import Domain
import numpy as np
from itertools import product
from rlpy.Tools import plt
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = [
"Alborz Geramifard",
"Robert H. Klein",
"Christoph Dann",
"William Dabney",
"Jonathan P. How",
]
__license__ = "BSD 3-Clause"
__author__ = "Christoph Dann"
class BicycleBalancing(Domain):
"""
Simulation of balancing a bicycle.
**STATE:**
The state contains of 7 variables, 5 of which are observable.
* ``omega:`` angle from the vertical to the bicycle [rad]
* ``omega dot:`` angular velocity for omega [rad / s]
* ``theta:`` angle the handlebars are displaced from normal [rad]
* ``theta dot:`` angular velocity for theta [rad / s]
* ``psi:`` angle formed by bicycle frame and x-axis [rad]
[x_b: x-coordinate where the back tire touches the ground [m]]
[y_b: y-coordinate where the back tire touches the ground [m]]
*The state variables* x_b *and* y_b *are not observable.*
**ACTIONS:**
* T in {-2, 0, 2}: the torque applied to the handlebar
* d in {-.02, 0, .02}: displacement of the rider
i.e., 9 actions in total.
**REFERENCE:**
.. seealso::
Ernst, D., Geurts, P. & Wehenkel, L. Tree-Based Batch Mode Reinforcement Learning.
Journal of Machine Learning Research (2005) Issue 6
.. warning::
This domain is tested only marginally, use with a care.
"""
state_names = (
r"$\omega$",
r"$\dot{\omega}$",
r"$\theta$",
r"$\dot{\theta}$",
r"$\psi",
)
#: only update the graphs in showDomain every x steps
show_domain_every = 20
dt = 0.01 #: Frequency is ``1 / dt``.
def __init__(self):
self.actions = np.array(list(product([-2, 0, 2], [-0.02, 0.0, 0.02])))
smax = np.array([np.pi * 12 / 180, np.pi, np.pi * 80 / 180, np.pi, np.pi])
super().__init__(
actions_num=self.actions.shape[0],
statespace_limits=np.stack((-smax, smax), axis=1),
discount_factor=0.98,
continuous_dims=np.arange(5),
episodeCap=50000,
)
self.episode_data = np.zeros((6, self.episodeCap + 1))
self._state_graph_handles = None
def step(self, a):
self.t += 1
s = self.state
T, d = self.actions[a]
omega, domega, theta, dtheta, psi = s
v = 10 / 3.6
g = 9.82
d_CM = 0.3
c = 0.66
h = 0.94
M_c = 15.0
M_d = 1.7
M_p = 60.0
M = M_c + M_p
r = 0.34
dsigma = v / r
I = 13 / 3.0 * M_c * h ** 2 + M_p * (h + d_CM) ** 2
I_dc = M_d * r ** 2
I_dv = 3 / 2.0 * M_d * r ** 2
I_dl = M_d / 2 * r ** 2
l = 1.11
w = self.random_state.uniform(-0.02, 0.02)
phi = omega + np.arctan(d + w) / h
invr_f = np.abs(np.sin(theta)) / l
invr_b = np.abs(np.tan(theta)) / l
invr_CM = ((l - c) ** 2 + invr_b ** (-2)) ** (-0.5) if theta != 0.0 else 0.0
nomega = omega + self.dt * domega
ndomega = (
domega
+ self.dt
* (
M * h * g * np.sin(phi)
- np.cos(phi)
* (
I_dc * dsigma * dtheta
+ np.sign(theta)
* v ** 2
* (M_d * r * (invr_f + invr_b) + M * h * invr_CM)
)
)
/ I
)
out = theta + self.dt * dtheta
rad80 = (80 / 180) * np.pi
ntheta = out if abs(out) <= rad80 else np.sign(out) * rad80
ndtheta = (
dtheta + self.dt * (T - I_dv * dsigma * domega) / I_dl
if abs(out) <= rad80
else 0.0
)
npsi = psi + self.dt * np.sign(theta) * v * invr_b
# Where are these three lines from? Having a hard time finding them in
# the paper referenced
npsi = npsi % (2 * np.pi)
if npsi > np.pi:
npsi -= 2 * np.pi
ns = np.array([nomega, ndomega, ntheta, ndtheta, npsi])
self.state = ns
self.episode_data[:-1, self.t] = self.state
self.episode_data[-1, self.t - 1] = a
return self._reward(s), ns, self.isTerminal(), self.possibleActions()
def isTerminal(self):
s = self.state
omega = s[0]
return omega < -np.pi * 12.0 / 180 or omega > np.pi * 12.0 / 180.0
def _reward(self, s):
return -1.0 if self.isTerminal() else 0.0
def possibleActions(self):
return np.arange(9)
def s0(self):
# non-healthy stable state of the system
self.t = 0
s = np.zeros(5)
self.state = s
self.episode_data[:] = np.nan
self.episode_data[:-1, 0] = s
return s, self.isTerminal(), self.possibleActions()
def showDomain(self, a=0, s=None):
"""
shows a live graph of each observable dimension
"""
# only update the graph every couple of steps, otherwise it is
# extremely slow
if self.t % self.show_domain_every != 0 and not self.t >= self.episodeCap:
return
n = self.state_space_dims + 1
names = list(self.state_names) + ["Action"]
colors = ["m", "c", "b", "r", "g", "k"]
handles = self._state_graph_handles
fig = plt.figure("Bicycle", figsize=(12, 10))
if handles is None:
handles = []
f, axes = plt.subplots(n, sharex=True, num="Bicycle", figsize=(12, 10))
f.subplots_adjust(hspace=0.1)
for i in range(n):
ax = axes[i]
d = np.arange(self.episodeCap + 1) * 5
ax.set_ylabel(names[i])
ax.locator_params(tight=True, nbins=4)
handles.append(ax.plot(d, self.episode_data[i], color=colors[i])[0])
self._state_graph_handles = handles
ax.set_xlabel("Days")
for i in range(n):
handles[i].set_ydata(self.episode_data[i])
ax = handles[i].axes
ax.relim()
ax.autoscale_view()
fig.canvas.draw()
fig.canvas.flush_events()
class BicycleRiding(BicycleBalancing):
def _reward(self, s):
ns = self.state
psi = s[-1]
npsi = ns[-1]
if self.isTerminal():
return -1.0
return 0.1 * (psi - npsi) | /rlpy3-2.0.0a0-cp36-cp36m-win_amd64.whl/rlpy/Domains/Bicycle.py | 0.741861 | 0.543227 | Bicycle.py | pypi |
from .CartPoleBase import CartPoleBase, StateIndex
import numpy as np
from rlpy.Tools import pl, plt
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = [
"Alborz Geramifard",
"Robert H. Klein",
"Christoph Dann",
"William Dabney",
"Jonathan P. How",
]
__license__ = "BSD 3-Clause"
pi = np.pi
class FiniteTrackCartPole(CartPoleBase):
"""
Finite Track Cart Pole.\n
Inherits dynamics from ``CartPoleBase`` and utilizes four states - angular
quantities of pendulum (position and velocity) and lateral quantities of
the cart.
Not only does this increase the state space relative to
%InfTrackCartPole, but the cart must remain in a finite interval
corresponding to a physical rail, which affects valid solutions/policies.
**State** \n
theta = Angular position of pendulum
(relative to straight up at 0 rad), positive clockwise. \n
thetaDot = Angular rate of pendulum. \n
x = Linear position of the cart on its track (positive right). \n
xDot = Linear velocity of the cart on its track.
**Actions** \n
Actions take the form of force applied to cart; \n
Positive force acts to the right on the cart. \n
Note the domain defaults in %CartPoleBase.
.. warning::
For \"Swing-Up\" tasks where the goal is to swing the pendulum from
rest to vertical, Lagoudakis, Parr, and Bartlett's default [-2, 2] rad/s
is unphysically slow; the Pendulum often saturates it.\n
RLPy will issue truncation warnings if this is occurring.
**Reference** \n
For details, see:
Michail G. Lagoudakis, Ronald Parr, and L. Bartlett
Least-squares policy iteration. Journal of Machine Learning Research
(2003) Issue 4.
.. note::
For full domain description, see: \n
Wang, H., Tanaka, K., and Griffin, M. An approach to fuzzy control
of nonlinear systems; Stability and design issues.
IEEE Trans. on Fuzzy Systems, 4(1):14-23, 1996.
"""
__author__ = ["Robert H. Klein"]
#: Default limits on theta
ANGLE_LIMITS = [-pi / 15.0, pi / 15.0]
#: Default limits on pendulum rate
ANGULAR_RATE_LIMITS = [-2.0, 2.0]
#: m - Default limits on cart position [Per RL Community CartPole]
POSITION_LIMITS = [-2.4, 2.4]
#: m/s - Default limits on cart velocity [per RL Community CartPole]
VELOCITY_LIMITS = [-6.0, 6.0]
#: Newtons, N - Force values available as actions
AVAIL_FORCE = np.array([-10, 10])
#: kilograms, kg - Mass of the pendulum arm
MASS_PEND = 0.1
#: kilograms, kg - Mass of cart
MASS_CART = 1.0
#: meters, m - Physical length of the pendulum, meters (note the moment-arm lies at half this distance)
LENGTH = 1.0
# m - Length of moment-arm to center of mass (= half the pendulum length)
MOMENT_ARM = LENGTH / 2
# 1/kg - Used in dynamics computations, equal to 1 / (MASS_PEND +
# MASS_CART)
_ALPHA_MASS = 1.0 / (MASS_CART + MASS_PEND)
#: seconds, s - Time between steps
dt = 0.02
#: Newtons, N - Maximum noise possible, uniformly distributed. Default 0.
force_noise_max = 0.0
def __init__(self, **kwargs):
# Limits of each dimension of the state space.
# Each row corresponds to one dimension and has two elements [min, max]
super().__init__(
statespace_limits=np.array(
[
self.ANGLE_LIMITS,
self.ANGULAR_RATE_LIMITS,
self.POSITION_LIMITS,
self.VELOCITY_LIMITS,
]
),
continuous_dims=[
StateIndex.THETA,
StateIndex.THETA_DOT,
StateIndex.X,
StateIndex.X_DOT,
],
**kwargs
)
self.DimNames = ["Theta", "Thetadot", "X", "Xdot"]
def step(self, a):
s = self.state
ns = self._stepFourState(s, a)
self.state = ns.copy()
terminal = self.isTerminal() # automatically uses self.state
reward = self._getReward(a) # Automatically uses self.state
possibleActions = self.possibleActions()
return reward, ns, terminal, possibleActions
def s0(self):
# defined by children
raise NotImplementedError
def showLearning(self, representation):
"""
``xSlice`` and ``xDotSlice`` - the value of ``x`` and ``xDot``
respectively, associated with the plotted value function and policy
(which are each 2-D grids across ``theta`` and ``thetaDot``).
"""
xSlice = 0.0 # value of x assumed when plotting V and pi
xDotSlice = 0.0 # value of xDot assumed when plotting V and pi
(thetas, theta_dots) = self._setup_learning(representation)
pi = np.zeros((len(theta_dots), len(thetas)), "uint8")
V = np.zeros((len(theta_dots), len(thetas)))
for row, thetaDot in enumerate(theta_dots):
for col, theta in enumerate(thetas):
s = np.array([theta, thetaDot, xSlice, xDotSlice])
terminal = self.isTerminal(s)
# Array of Q-function evaluated at all possible actions at
# state s
Qs = representation.Qs(s, terminal)
# Array of all possible actions at state s
As = self.possibleActions(s=s)
# If multiple optimal actions, pick one randomly
a = np.random.choice(As[Qs.max() == Qs])
# Assign pi to be an optimal action (which maximizes
# Q-function)
pi[row, col] = a
# Assign V to be the value of the Q-function under optimal
# action
V[row, col] = max(Qs)
self._plot_policy(pi)
plt.title("Policy (Slice at x=0, xDot=0)")
self._plot_valfun(V)
plt.title("Value Function (Slice at x=0, xDot=0)")
pl.draw()
def showDomain(self, a=0):
"""
Display the 4-d state of the cartpole and arrow indicating current
force action (not including noise!).
"""
fourState = self.state
self._plot_state(fourState, a)
class FiniteCartPoleBalance(FiniteTrackCartPole):
"""
**Goal** \n
Reward 1 is received on each timestep spent within the goal region,
zero elsewhere.
This is also the terminal condition.
The bounds for failure match those used in the RL-Community (see Reference) \n
``theta``: [-12, 12] degrees --> [-pi/15, pi/15] \n
``x``: [-2.4, 2.4] meters
Pendulum starts straight up, ``theta = 0``, with the
cart at ``x = 0``.
**Reference** \n
See `RL-Library CartPole <http://library.rl-community.org/wiki/CartPole>`_ \n
Domain constants per RL Community / RL-Library
`CartPole implementation <http://code.google.com/p/rl-library/wiki/CartpoleJava>`_
"""
def __init__(self):
super().__init__(discount_factor=0.999)
def s0(self):
# Returns the initial state, pendulum vertical
self.state = np.zeros(4)
return self.state.copy(), self.isTerminal(), self.possibleActions()
def _getReward(self, a, s=None):
# On this domain, reward of 1 is given for each step spent within goal region.
# There is no specific penalty for failure.
if s is None:
s = self.state
return self.GOAL_REWARD if -pi / 15 < s[StateIndex.THETA] < pi / 15 else 0
def isTerminal(self, s=None):
if s is None:
s = self.state
return (
not -pi / 15 < s[StateIndex.THETA] < pi / 15
or not -2.4 < s[StateIndex.X] < 2.4
)
class FiniteCartPoleBalanceOriginal(FiniteTrackCartPole):
"""
**Reference** \n
Sutton, Richard S., and Andrew G. Barto:
Reinforcement learning: An introduction.
Cambridge: MIT press, 1998.
See :class:`Domains.FiniteTrackCartPole.FiniteCartPoleBalance` \n
.. note::
`original definition and code <http://webdocs.cs.ualberta.ca/~sutton/book/code/pole.c>`_
"""
__author__ = "Christoph Dann"
def __init__(self, good_reward=0.0):
self.good_reward = good_reward
super().__init__()
def s0(self):
self.state = np.zeros(4)
return self.state.copy(), self.isTerminal(), self.possibleActions()
def _getReward(self, a, s=None):
if s is None:
s = self.state
return self.good_reward if not self.isTerminal(s=s) else -1.0
def isTerminal(self, s=None):
if s is None:
s = self.state
return (
not -np.pi / 15 < s[StateIndex.THETA] < np.pi / 15
or not -2.4 < s[StateIndex.X] < 2.4
)
class FiniteCartPoleBalanceModern(FiniteTrackCartPole):
"""
A more realistic version of balancing with 3 actions (left, right, none)
instead of the default (left, right), and nonzero, uniform noise in actions.\n
See :class:`Domains.FiniteTrackCartPole.FiniteCartPoleBalance`.\n
Note that the start state has some noise.
"""
__author__ = "Christoph Dann"
#: Newtons, N - Force values available as actions (Note we add a 0-force action)
AVAIL_FORCE = np.array([-10.0, 0.0, 10.0])
#: Newtons, N - Maximum noise possible, uniformly distributed
force_noise_max = 1.0
def s0(self):
self.state = np.array([self.random_state.randn() * 0.01, 0.0, 0.0, 0.0])
return self.state.copy(), self.isTerminal(), self.possibleActions()
def _getReward(self, a, s=None):
if s is None:
s = self.state
return 0.0 if not self.isTerminal(s=s) else -1.0
def isTerminal(self, s=None):
if s is None:
s = self.state
return (
not -np.pi / 15 < s[StateIndex.THETA] < np.pi / 15
or not -2.4 < s[StateIndex.X] < 2.4
)
class FiniteCartPoleSwingUp(FiniteTrackCartPole):
"""
**Goal** \n
Reward is 1 within the goal region, 0 elsewhere. \n
Pendulum starts straight down, theta = pi, with the
cart at x = 0.
The objective is to get and then keep the pendulum in the goal
region for as long as possible, with +1 reward for
each step in which this condition is met; the expected
optimum then is to swing the pendulum vertically and
hold it there, collapsing the problem to InfCartPoleBalance
but with much tighter bounds on the goal region.
See parent class :class:`Domains.FiniteTrackCartPole.FiniteTrackCartPole` for more information.
"""
#: Limit on pendulum angle (no termination, pendulum can make full cycle)
ANGLE_LIMITS = [-pi, pi]
# NOTE that L+P's rate limits [-2,2] are actually unphysically slow, and the pendulum
# saturates them frequently when falling; more realistic to use 2*pi.
def __init__(self):
super().__init__()
def s0(self):
# Returns the initial state, pendulum vertical
self.state = np.array([pi, 0, 0, 0])
return self.state.copy(), self.isTerminal(), self.possibleActions()
def _getReward(self, a, s=None):
if s is None:
s = self.state
return self.GOAL_REWARD if -pi / 6 < s[StateIndex.THETA] < pi / 6 else 0
def isTerminal(self, s=None):
if s is None:
s = self.state
return not (-2.4 < s[StateIndex.X] < 2.4)
class FiniteCartPoleSwingUpFriction(FiniteCartPoleSwingUp):
"""
Modifies ``CartPole`` dynamics to include friction. \n
This domain is a child of :class:`Domains.FiniteTrackCartPole.FiniteCartPoleSwingUp`.
"""
__author__ = "Christoph Dann"
# TODO - needs reference
#: Limit on pendulum angle (no termination, pendulum can make full cycle)
ANGLE_LIMITS = [-pi, pi]
#: Limits on pendulum rate
ANGULAR_RATE_LIMITS = [-3.0, 3.0]
#: m - Limits on cart position
POSITION_LIMITS = [-2.4, 2.4]
#: m/s - Limits on cart velocity
VELOCITY_LIMITS = [-3.0, 3.0]
MASS_CART = 0.5 # : kilograms, kg - Mass of cart
MASS_PEND = 0.5 # : kilograms, kg - Mass of the pendulum arm
#: meters, m - Physical length of the pendulum, meters (note the moment-arm lies at half this distance)
LENGTH = 0.6
# a friction coefficient
A = 0.5
#: seconds, s - Time between steps
dt = 0.10
#: Max number of steps per trajectory (reduced from default of 3000)
episodeCap = 400
# Friction coefficient between cart and ground
B = 0.1
def __init__(self):
super().__init__()
def _getReward(self, a, s=None):
if s is None:
s = self.state
if not (self.POSITION_LIMITS[0] < s[StateIndex.X] < self.POSITION_LIMITS[1]):
return -30
pen_pos = np.array(
[
s[StateIndex.X] + self.LENGTH * np.sin(s[StateIndex.THETA]),
self.LENGTH * np.cos(s[StateIndex.THETA]),
]
)
diff = pen_pos - np.array([0, self.LENGTH])
# diff[1] *= 1.5
return np.exp(-0.5 * sum(diff ** 2) * self.A) - 0.5
def _dsdt(self, s_aug, t):
s = np.zeros((4))
s[0] = s_aug[StateIndex.X]
s[1] = s_aug[StateIndex.X_DOT]
s[3] = pi - s_aug[StateIndex.THETA]
s[2] = -s_aug[StateIndex.THETA_DOT]
a = s_aug[4]
ds = self._ode(s, t, a, self.MASS_PEND, self.LENGTH, self.MASS_CART, self.B)
ds_aug = s_aug.copy()
ds_aug[StateIndex.X] = ds[0]
ds_aug[StateIndex.X_DOT] = ds[1]
ds_aug[StateIndex.THETA_DOT] = ds[2]
ds_aug[StateIndex.THETA] = ds[3]
return ds_aug
def _ode(self, s, t, a, m, l, M, b):
"""
[x, dx, dtheta, theta]
"""
# cdef double g, c3, s3
s3 = np.sin(s[3])
c3 = np.cos(s[3])
g = self.ACCEL_G
ds = np.zeros(4)
ds[0] = s[1]
ds[1] = (
2 * m * l * s[2] ** 2 * s3 + 3 * m * g * s3 * c3 + 4 * a - 4 * b * s[1]
) / (4 * (M + m) - 3 * m * c3 ** 2)
ds[2] = (
-3 * m * l * s[2] ** 2 * s3 * c3
- 6 * (M + m) * g * s3
- 6 * (a - b * s[1]) * c3
) / (4 * l * (m + M) - 3 * m * l * c3 ** 2)
ds[3] = s[2]
return ds | /rlpy3-2.0.0a0-cp36-cp36m-win_amd64.whl/rlpy/Domains/FiniteTrackCartPole.py | 0.794146 | 0.658459 | FiniteTrackCartPole.py | pypi |
from rlpy.Tools import plt, bound, fromAtoB
from rlpy.Tools import lines
from .Domain import Domain
import numpy as np
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = [
"Alborz Geramifard",
"Robert H. Klein",
"Christoph Dann",
"William Dabney",
"Jonathan P. How",
]
__license__ = "BSD 3-Clause"
__author__ = ["Josh Joseph", "Alborz Geramifard"]
class MountainCar(Domain):
"""
The goal is to drive an under accelerated car up to the hill.\n
**STATE:** Position and velocity of the car [x, xdot] \n
**ACTIONS:** [Acc backwards, Coast, Acc forward] \n
**TRANSITIONS:** Move along the hill with some noise on the movement. \n
**REWARD:** -1 per step and 0 at or beyond goal (``x-goal > 0``). \n
There is optional noise on vehicle acceleration.
**REFERENCE:**
Based on `RL-Community Java Implementation <http://library.rl-community.org/wiki/Mountain_Car_(Java)>`_
"""
XMIN = -1.2 # : Lower bound on domain position
XMAX = 0.6 #: Upper bound on domain position
XDOTMIN = -0.07 # : Lower bound on car velocity
XDOTMAX = 0.07 #: Upper bound on car velocity
INIT_STATE = np.array([-0.5, 0.0]) # : Initial car state
STEP_REWARD = -1 # : Penalty for each step taken before reaching the goal
GOAL_REWARD = 0 #: Reward for reach the goal.
#: X-Position of the goal location (Should be at/near hill peak)
GOAL = 0.5
actions = [-1, 0, 1]
#: Magnitude of noise (times accelerationFactor) in stochastic velocity changes
noise = 0
accelerationFactor = 0.001 # : Magnitude of acceleration action
gravityFactor = -0.0025
#: Hill peaks are generated as sinusoid; this is freq. of that sinusoid.
hillPeakFrequency = 3.0
# Used for visual stuff:
domain_fig = None
valueFunction_fig = None
policy_fig = None
actionArrow = None
X_discretization = 20
XDot_discretization = 20
CAR_HEIGHT = 0.2
CAR_WIDTH = 0.1
ARROW_LENGTH = 0.2
def __init__(self, noise=0, discount_factor=0.9):
"""
:param noise: Magnitude of noise (times accelerationFactor) in stochastic velocity changes
"""
super().__init__(
actions_num=3,
statespace_limits=np.array(
[[self.XMIN, self.XMAX], [self.XDOTMIN, self.XDOTMAX]]
),
continuous_dims=[0, 1],
discount_factor=discount_factor,
episodeCap=10000,
)
self.noise = noise
# Visual stuff:
self.xTicks = np.linspace(0, self.X_discretization - 1, 5)
self.xTicksLabels = np.linspace(self.XMIN, self.XMAX, 5)
self.yTicks = np.linspace(0, self.XDot_discretization - 1, 5)
self.yTicksLabels = np.linspace(self.XDOTMIN, self.XDOTMAX, 5)
self.MIN_RETURN = (
self.STEP_REWARD
* (1 - discount_factor ** self.episodeCap)
/ (1 - discount_factor)
if discount_factor != 1
else self.STEP_REWARD * self.episodeCap
)
self.MAX_RETURN = 0
self.DimNames = ["X", "Xdot"]
def step(self, a):
"""
Take acceleration action *a*, adding noise as specified in ``__init__()``.
"""
position, velocity = self.state
noise = (
self.accelerationFactor * self.noise * 2 * (self.random_state.rand() - 0.5)
)
velocity += (
noise
+ self.actions[a] * self.accelerationFactor
+ np.cos(self.hillPeakFrequency * position) * self.gravityFactor
)
velocity = bound(velocity, self.XDOTMIN, self.XDOTMAX)
position += velocity
position = bound(position, self.XMIN, self.XMAX)
if position <= self.XMIN and velocity < 0:
velocity = 0 # Bump into wall
terminal = self.isTerminal()
r = self.GOAL_REWARD if terminal else self.STEP_REWARD
ns = np.array([position, velocity])
self.state = ns.copy()
return r, ns, terminal, self.possibleActions()
def s0(self):
self.state = self.INIT_STATE.copy()
return self.state.copy(), self.isTerminal(), self.possibleActions()
def isTerminal(self):
"""
:return: ``True`` if the car has reached or exceeded the goal position.
"""
return self.state[0] > self.GOAL
def showDomain(self, a):
"""
Plot the car and an arrow indicating the direction of accelaration
Parts of this code was adopted from Jose Antonio Martin H.
<jamartinh@fdi.ucm.es> online source code
"""
s = self.state
pos, vel = s
if self.domain_fig is None: # Need to initialize the figure
self.domain_fig = plt.figure("MountainCar")
# plot mountain
mountain_x = np.linspace(self.XMIN, self.XMAX, 1000)
mountain_y = np.sin(3 * mountain_x)
plt.gca().fill_between(
mountain_x, min(mountain_y) - self.CAR_HEIGHT * 2, mountain_y, color="g"
)
plt.xlim([self.XMIN - 0.2, self.XMAX])
plt.ylim(
[
min(mountain_y) - self.CAR_HEIGHT * 2,
max(mountain_y) + self.CAR_HEIGHT * 2,
]
)
# plot car
self.car = lines.Line2D([], [], linewidth=20, color="b", alpha=0.8)
plt.gca().add_line(self.car)
# Goal
plt.plot(self.GOAL, np.sin(3 * self.GOAL), "yd", markersize=10.0)
plt.axis("off")
plt.gca().set_aspect("1")
plt.show()
car_middle_x = pos
car_middle_y = np.sin(3 * pos)
slope = np.arctan(3 * np.cos(3 * pos))
car_back_x = car_middle_x - self.CAR_WIDTH * np.cos(slope) / 2.0
car_front_x = car_middle_x + self.CAR_WIDTH * np.cos(slope) / 2.0
car_back_y = car_middle_y - self.CAR_WIDTH * np.sin(slope) / 2.0
car_front_y = car_middle_y + self.CAR_WIDTH * np.sin(slope) / 2.0
self.car.set_data([car_back_x, car_front_x], [car_back_y, car_front_y])
# Arrows
if self.actionArrow is not None:
self.actionArrow.remove()
self.actionArrow = None
if self.actions[a] > 0:
self.actionArrow = fromAtoB(
car_front_x,
car_front_y,
car_front_x + self.ARROW_LENGTH * np.cos(slope),
car_front_y + self.ARROW_LENGTH * np.sin(slope),
"k",
"arc3,rad=0",
0,
0,
"simple",
)
if self.actions[a] < 0:
self.actionArrow = fromAtoB(
car_back_x,
car_back_y,
car_back_x - self.ARROW_LENGTH * np.cos(slope),
car_back_y - self.ARROW_LENGTH * np.sin(slope),
"r",
"arc3,rad=0",
0,
0,
"simple",
)
self.domain_fig.canvas.draw()
self.domain_fig.canvas.flush_events()
def showLearning(self, representation):
pi = np.zeros((self.X_discretization, self.XDot_discretization), "uint8")
V = np.zeros((self.X_discretization, self.XDot_discretization))
if self.valueFunction_fig is None:
self.valueFunction_fig = plt.figure("Value Function")
self.valueFunction_im = plt.imshow(
V,
cmap="ValueFunction",
interpolation="nearest",
origin="lower",
vmin=self.MIN_RETURN,
vmax=self.MAX_RETURN,
)
plt.xticks(self.xTicks, self.xTicksLabels, fontsize=12)
plt.yticks(self.yTicks, self.yTicksLabels, fontsize=12)
plt.xlabel(r"$x$")
plt.ylabel(r"$\dot x$")
self.policy_fig = plt.figure("Policy")
self.policy_im = plt.imshow(
pi,
cmap="MountainCarActions",
interpolation="nearest",
origin="lower",
vmin=0,
vmax=self.actions_num,
)
plt.xticks(self.xTicks, self.xTicksLabels, fontsize=12)
plt.yticks(self.yTicks, self.yTicksLabels, fontsize=12)
plt.xlabel(r"$x$")
plt.ylabel(r"$\dot x$")
plt.show()
for row, xDot in enumerate(
np.linspace(self.XDOTMIN, self.XDOTMAX, self.XDot_discretization)
):
for col, x in enumerate(
np.linspace(self.XMIN, self.XMAX, self.X_discretization)
):
s = np.array([x, xDot])
Qs = representation.Qs(s, False)
As = self.possibleActions()
pi[row, col] = representation.bestAction(s, False, As)
V[row, col] = max(Qs)
self.valueFunction_im.set_data(V)
self.policy_im.set_data(pi)
self.valueFunction_fig = plt.figure("Value Function")
plt.draw()
self.policy_fig = plt.figure("Policy")
plt.draw() | /rlpy3-2.0.0a0-cp36-cp36m-win_amd64.whl/rlpy/Domains/MountainCar.py | 0.84137 | 0.62701 | MountainCar.py | pypi |
from rlpy.Tools import plt, mpatches
import numpy as np
from .Domain import Domain
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = [
"Alborz Geramifard",
"Robert H. Klein",
"Christoph Dann",
"William Dabney",
"Jonathan P. How",
]
__license__ = "BSD 3-Clause"
__author__ = ["Alborz Geramifard", "Robert H. Klein"]
class FiftyChain(Domain):
"""
Random start location, goal is to proceed to nearest reward. \n
**STATE:** s0 <-> s1 <-> ... <-> s49 \n
**ACTIONS:** left [0] or right [1] \n
Actions succeed with probability .9, otherwise execute opposite action.
Note that the actions [left, right] are available in ALL states, but if
left is selected in s0 or right in s49, then s remains unchanged. \n
.. note::
The optimal policy is to always go to the nearest goal
**REWARD:** of +1 at states 10 and 41 (indices 9 and 40). Reward is
obtained when transition out of the reward state, not when first enter. \n
Note that this class provides the
function :py:meth`~rlpy.Domains.FiftyChain.L_inf_distance_to_V_star`, which
accepts an arbitrary representation and returns the error between it and
the optimal policy.
The user can also enforce actions under the optimal policy (ignoring the
agent's policy) by setting ``using_optimal_policy=True`` in FiftyChain.py.
**REFERENCE:**
.. seealso::
Michail G. Lagoudakis, Ronald Parr, and L. Bartlett
Least-squares policy iteration. Journal of Machine Learning Research
(2003) Issue 4.
"""
#: Reward for each timestep spent in the goal region
GOAL_REWARD = 1
#: Indices of states with rewards
GOAL_STATES = [9, 40]
# Used for graphical normalization
MAX_RETURN = 2.5
# Used for graphical normalization
MIN_RETURN = 0
# Used for graphical shifting of arrows
SHIFT = 0.01
# Used for graphical radius of states
RADIUS = 0.05
# Stores the graphical paths for states so that we can later change their
# colors
circles = None
#: Number of states in the chain
chainSize = 50
# Y values used for drawing circles
Y = 1
#: Probability of taking the other (unselected) action
p_action_failure = 0.1
V_star = [
0.25424059953210576,
0.32237043339365301,
0.41732244995544071,
0.53798770416976416,
0.69467264588670452,
0.91307612341516964,
1.1996067857970858,
1.5914978718669359,
2.1011316163482885,
2.7509878207260079,
2.2007902565808002,
1.7606322052646419,
1.4085057642117096,
1.1268046113693631,
0.90144368909548567,
0.72115495127639073,
0.5769239610211111,
0.46153916881688833,
0.36923133505350991,
0.29538506804280829,
0.23630805443424513,
0.18904644354739669,
0.15123715483791522,
0.12098972387033219,
0.096791779096267572,
0.077433423277011526,
0.064110827579671889,
0.080577201155275072,
0.10271844729124571,
0.13354008685155827,
0.17749076168535796,
0.22641620289304287,
0.29916005826456937,
0.39326998437016564,
0.52325275246999614,
0.67438770340963006,
0.90293435616054674,
1.1704408409975584,
1.5213965403184493,
2.0462009513290296,
2.7423074964894685,
2.1938459971915725,
1.7550767977532584,
1.404061438202612,
1.1232491505620894,
0.89859932044966939,
0.71887945635973116,
0.57510356508778659,
0.46008285207022837,
0.36806628165617972,
] # Array of optimal values at each state
# The optimal policy for this domain
optimalPolicy = None
# Should the domain only allow optimal actions
using_optimal_policy = False
# Plotting values
domain_fig = None
value_function_fig = None
policy_fig = None
V_star_line = None
V_approx_line = None
COLORS = ["g", "k"]
LEFT = 0
RIGHT = 1
# Constants in the map
def __init__(self):
super().__init__(
actions_num=2,
statespace_limits=np.array([[0, self.chainSize - 1]]),
# Set discount_factor to be 0.8 for this domain per L & P 2007
discount_factor=0.8,
episodeCap=50,
)
self.start = 0
# To catch errors
self.optimal_policy = np.array([-1 for dummy in range(0, self.chainSize)])
self.storeOptimalPolicy()
def storeOptimalPolicy(self):
"""
Computes and stores the optimal policy on this particular chain.
.. warning::
This ONLY applies for the scenario where two states provide
reward - this policy will be suboptimal for all other domains!
"""
self.optimal_policy[np.arange(self.GOAL_STATES[0])] = self.RIGHT
goalStateIndices = np.arange(1, len(self.GOAL_STATES))
for i in goalStateIndices:
goalState1 = self.GOAL_STATES[i - 1]
goalState2 = self.GOAL_STATES[i]
averageState = int(np.mean([goalState1, goalState2]))
self.optimal_policy[np.arange(goalState1, averageState)] = self.LEFT
self.optimal_policy[np.arange(averageState, goalState2)] = self.RIGHT
self.optimal_policy[np.arange(self.GOAL_STATES[-1], self.chainSize)] = self.LEFT
def showDomain(self, a=0):
s = self.state
# Draw the environment
fig = plt.figure("FiftyChain")
if self.circles is None:
self.domain_fig = plt.subplot(3, 1, 1)
plt.figure(1, (self.chainSize * 2 / 10.0, 2))
self.domain_fig.set_xlim(0, self.chainSize * 2 / 10.0)
self.domain_fig.set_ylim(0, 2)
# Make the last one double circle
self.domain_fig.add_patch(
mpatches.Circle(
(0.2 + 2 / 10.0 * (self.chainSize - 1), self.Y),
self.RADIUS * 1.1,
fc="w",
)
)
self.domain_fig.xaxis.set_visible(False)
self.domain_fig.yaxis.set_visible(False)
self.circles = [
mpatches.Circle((0.2 + 2 / 10.0 * i, self.Y), self.RADIUS, fc="w")
for i in range(self.chainSize)
]
for i in range(self.chainSize):
self.domain_fig.add_patch(self.circles[i])
plt.show()
for p in self.circles:
p.set_facecolor("w")
for p in self.GOAL_STATES:
self.circles[p].set_facecolor("g")
self.circles[s].set_facecolor("k")
fig.canvas.draw()
fig.canvas.flush_events()
def showLearning(self, representation):
allStates = np.arange(0, self.chainSize)
X = np.arange(self.chainSize) * 2.0 / 10.0 - self.SHIFT
Y = np.ones(self.chainSize) * self.Y
DY = np.zeros(self.chainSize)
DX = np.zeros(self.chainSize)
C = np.zeros(self.chainSize)
if self.value_function_fig is None:
self.value_function_fig = plt.subplot(3, 1, 2)
self.V_star_line = self.value_function_fig.plot(allStates, self.V_star)
V = [
representation.V(s, False, self.possibleActions(s=s)) for s in allStates
]
# Note the comma below, since a tuple of line objects is returned
self.V_approx_line, = self.value_function_fig.plot(
allStates, V, "r-", linewidth=3
)
self.V_star_line = self.value_function_fig.plot(
allStates, self.V_star, "b--", linewidth=3
)
# Maximum value function is sum of all possible rewards
plt.ylim([0, self.GOAL_REWARD * (len(self.GOAL_STATES) + 1)])
self.policy_fig = plt.subplot(3, 1, 3)
self.policy_fig.set_xlim(0, self.chainSize * 2 / 10.0)
self.policy_fig.set_ylim(0, 2)
self.arrows = plt.quiver(
X,
Y,
DX,
DY,
C,
cmap="fiftyChainActions",
units="x",
width=0.05,
scale=0.008,
alpha=0.8,
) # headwidth=.05, headlength = .03, headaxislength = .02)
self.policy_fig.xaxis.set_visible(False)
self.policy_fig.yaxis.set_visible(False)
V = [representation.V(s, False, self.possibleActions(s=s)) for s in allStates]
pi = [
representation.bestAction(s, False, self.possibleActions(s=s))
for s in allStates
]
# pi = [self.optimal_policy[s] for s in allStates]
DX = [(2 * a - 1) * self.SHIFT * 0.1 for a in pi]
self.V_approx_line.set_ydata(V)
self.arrows.set_UVC(DX, DY, pi)
plt.draw()
def step(self, a):
s = self.state
actionFailure = self.random_state.random_sample() < self.p_action_failure
if a == self.LEFT or (a == self.RIGHT and actionFailure): # left
ns = max(0, self.state - 1)
elif a == self.RIGHT or (a == self.LEFT and actionFailure):
ns = min(self.chainSize - 1, self.state + 1)
self.state = ns
terminal = self.isTerminal()
r = self.GOAL_REWARD if s in self.GOAL_STATES else 0
return r, ns, terminal, self.possibleActions()
def s0(self):
self.state = self.random_state.randint(0, self.chainSize)
return self.state, self.isTerminal(), self.possibleActions()
def isTerminal(self):
return False
def possibleActions(self, s=None):
if s is None:
s = self.state
if self.using_optimal_policy:
return np.array([self.optimal_policy[s]])
else:
return np.arange(self.actions_num)
def L_inf_distance_to_V_star(self, representation):
"""
:param representation: An arbitrary learned representation of the
value function.
:return: the L-infinity distance between the parameter representation
and the optimal one.
"""
V = np.array(
[
representation.V(s, False, self.possibleActions(s=s))
for s in range(self.chainSize)
]
)
return np.linalg.norm(V - self.V_star, np.inf) | /rlpy3-2.0.0a0-cp36-cp36m-win_amd64.whl/rlpy/Domains/FiftyChain.py | 0.760028 | 0.545528 | FiftyChain.py | pypi |
from .game import Agent
from .game import Actions
from .game import Directions
from .util import manhattanDistance
from . import util
class GhostAgent(Agent):
def __init__(self, index):
self.index = index
def getAction(self, state):
dist = self.getDistribution(state)
if len(dist) == 0:
return Directions.STOP
else:
return util.chooseFromDistribution(dist)
def getDistribution(self, state):
"Returns a Counter encoding a distribution over actions from the provided state."
util.raiseNotDefined()
class RandomGhost(GhostAgent):
"A ghost that chooses a legal action uniformly at random."
def getDistribution(self, state):
dist = util.Counter()
for a in state.getLegalActions(self.index):
dist[a] = 1.0
dist.normalize()
return dist
class DirectionalGhost(GhostAgent):
"A ghost that prefers to rush Pacman, or flee when scared."
def __init__(self, index, prob_attack=0.8, prob_scaredFlee=0.8):
self.index = index
self.prob_attack = prob_attack
self.prob_scaredFlee = prob_scaredFlee
def getDistribution(self, state):
# Read variables from state
ghostState = state.getGhostState(self.index)
legalActions = state.getLegalActions(self.index)
pos = state.getGhostPosition(self.index)
isScared = ghostState.scaredTimer > 0
speed = 1
if isScared:
speed = 0.5
actionVectors = [Actions.directionToVector(a, speed) for a in legalActions]
newPositions = [(pos[0] + a[0], pos[1] + a[1]) for a in actionVectors]
pacmanPosition = state.getPacmanPosition()
# Select best actions given the state
distancesToPacman = [
manhattanDistance(pos, pacmanPosition) for pos in newPositions
]
if isScared:
bestScore = max(distancesToPacman)
bestProb = self.prob_scaredFlee
else:
bestScore = min(distancesToPacman)
bestProb = self.prob_attack
bestActions = [
action
for action, distance in zip(legalActions, distancesToPacman)
if distance == bestScore
]
# Construct distribution
dist = util.Counter()
for a in bestActions:
dist[a] = bestProb / len(bestActions)
for a in legalActions:
dist[a] += (1 - bestProb) / len(legalActions)
dist.normalize()
return dist | /rlpy3-2.0.0a0-cp36-cp36m-win_amd64.whl/rlpy/Domains/PacmanPackage/ghostAgents.py | 0.741674 | 0.350157 | ghostAgents.py | pypi |
from .util import manhattanDistance
from .game import Grid
import os
import random
from functools import reduce
VISIBILITY_MATRIX_CACHE = {}
class Layout(object):
"""
A Layout manages the static information about the game board.
"""
def __init__(self, layoutText):
self.width = len(layoutText[0])
self.height = len(layoutText)
self.walls = Grid(self.width, self.height, False)
self.food = Grid(self.width, self.height, False)
self.capsules = []
self.agentPositions = []
self.numGhosts = 0
self.processLayoutText(layoutText)
self.layoutText = layoutText
# self.initializeVisibilityMatrix()
def getNumGhosts(self):
return self.numGhosts
def initializeVisibilityMatrix(self):
global VISIBILITY_MATRIX_CACHE
if reduce(str.__add__, self.layoutText) not in VISIBILITY_MATRIX_CACHE:
from .game import Directions
vecs = [(-0.5, 0), (0.5, 0), (0, -0.5), (0, 0.5)]
dirs = [
Directions.NORTH,
Directions.SOUTH,
Directions.WEST,
Directions.EAST,
]
vis = Grid(
self.width,
self.height,
{
Directions.NORTH: set(),
Directions.SOUTH: set(),
Directions.EAST: set(),
Directions.WEST: set(),
Directions.STOP: set(),
},
)
for x in range(self.width):
for y in range(self.height):
if self.walls[x][y] == False:
for vec, direction in zip(vecs, dirs):
dx, dy = vec
nextx, nexty = x + dx, y + dy
while (nextx + nexty) != int(nextx) + int(
nexty
) or not self.walls[int(nextx)][int(nexty)]:
vis[x][y][direction].add((nextx, nexty))
nextx, nexty = x + dx, y + dy
self.visibility = vis
VISIBILITY_MATRIX_CACHE[reduce(str.__add__, self.layoutText)] = vis
else:
self.visibility = VISIBILITY_MATRIX_CACHE[
reduce(str.__add__, self.layoutText)
]
def isWall(self, pos):
x, col = pos
return self.walls[x][col]
def getRandomLegalPosition(self):
x = random.choice(list(range(self.width)))
y = random.choice(list(range(self.height)))
while self.isWall((x, y)):
x = random.choice(list(range(self.width)))
y = random.choice(list(range(self.height)))
return (x, y)
def getRandomCorner(self):
poses = [
(1, 1),
(1, self.height - 2),
(self.width - 2, 1),
(self.width - 2, self.height - 2),
]
return random.choice(poses)
def getFurthestCorner(self, pacPos):
poses = [
(1, 1),
(1, self.height - 2),
(self.width - 2, 1),
(self.width - 2, self.height - 2),
]
dist, pos = max([(manhattanDistance(p, pacPos), p) for p in poses])
return pos
def isVisibleFrom(self, ghostPos, pacPos, pacDirection):
row, col = [int(x) for x in pacPos]
return ghostPos in self.visibility[row][col][pacDirection]
def __str__(self):
return "\n".join(self.layoutText)
def deepCopy(self):
return Layout(self.layoutText[:])
def processLayoutText(self, layoutText):
"""
Coordinates are flipped from the input format to the (x,y) convention here
The shape of the maze. Each character
represents a different type of object.
% - Wall
. - Food
o - Capsule
G - Ghost
P - Pacman
Other characters are ignored.
"""
maxY = self.height - 1
for y in range(self.height):
for x in range(self.width):
layoutChar = layoutText[maxY - y][x]
self.processLayoutChar(x, y, layoutChar)
self.agentPositions.sort()
self.agentPositions = [(i == 0, pos) for i, pos in self.agentPositions]
def processLayoutChar(self, x, y, layoutChar):
if layoutChar == "%":
self.walls[x][y] = True
elif layoutChar == ".":
self.food[x][y] = True
elif layoutChar == "o":
self.capsules.append((x, y))
elif layoutChar == "P":
self.agentPositions.append((0, (x, y)))
elif layoutChar in ["G"]:
self.agentPositions.append((1, (x, y)))
self.numGhosts += 1
elif layoutChar in ["1", "2", "3", "4"]:
self.agentPositions.append((int(layoutChar), (x, y)))
self.numGhosts += 1
def getLayout(name, back=2):
if name.endswith(".lay"):
layout = tryToLoad("layouts/" + name)
if layout is None:
layout = tryToLoad(name)
else:
layout = tryToLoad("layouts/" + name + ".lay")
if layout is None:
layout = tryToLoad(name + ".lay")
if layout is None and back >= 0:
curdir = os.path.abspath(".")
os.chdir("..")
layout = getLayout(name, back - 1)
os.chdir(curdir)
return layout
def tryToLoad(fullname):
if not os.path.exists(fullname):
return None
f = open(fullname)
try:
return Layout([line.strip() for line in f])
finally:
f.close() | /rlpy3-2.0.0a0-cp36-cp36m-win_amd64.whl/rlpy/Domains/PacmanPackage/layout.py | 0.607081 | 0.261798 | layout.py | pypi |
from rlpy.Tools import className, discrete_sample
import numpy as np
import logging
from abc import ABC, abstractmethod
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = [
"Alborz Geramifard",
"Robert H. Klein",
"Christoph Dann",
"William Dabney",
"Jonathan P. How",
]
__license__ = "BSD 3-Clause"
__author__ = "Alborz Geramifard"
class Policy(ABC):
"""The Policy determines the discrete action that an
:py:class:`~rlpy.Agents.Agent.Agent` will take given its
:py:class:`~rlpy.Representations.Representation.Representation`.
The Agent learns about the :py:class:`~rlpy.Domains.Domain.Domain`
as the two interact.
At each step, the Agent passes information about its current state
to the Policy; the Policy uses this to decide what discrete action the
Agent should perform next (see :py:meth:`~rlpy.Policies.Policy.Policy.pi`) \n
The Policy class is a base class that provides the basic framework for all
policies. It provides the methods and attributes that allow child classes
to interact with the Agent and Representation within the RLPy library. \n
.. note::
All new policy implementations should inherit from Policy.
"""
DEBUG = False
def __init__(self, representation, seed=1):
"""
:param representation: the :py:class:`~rlpy.Representations.Representation.Representation`
to use in learning the value function.
"""
self.representation = representation
# An object to record the print outs in a file
self.logger = logging.getLogger("rlpy.Policies." + self.__class__.__name__)
# a new stream of random numbers for each domain
self.random_state = np.random.RandomState(seed=seed)
def init_randomization(self):
"""
Any stochastic behavior in __init__() is broken out into this function
so that if the random seed is later changed (eg, by the Experiment),
other member variables and functions are updated accordingly.
"""
pass
@abstractmethod
def pi(self, s, terminal, p_actions):
"""
*Abstract Method:*\n Select an action given a state.
:param s: The current state
:param terminal: boolean, whether or not the *s* is a terminal state.
:param p_actions: a list / array of all possible actions in *s*.
"""
raise NotImplementedError
def turnOffExploration(self):
"""
*Abstract Method:* \n Turn off exploration (e.g., epsilon=0 in epsilon-greedy)
"""
pass
# [turnOffExploration code]
# \b ABSTRACT \b METHOD: Turn exploration on. See code
# \ref Policy_turnOnExploration "Here".
# [turnOnExploration code]
def turnOnExploration(self):
"""
*Abstract Method:* \n
If :py:meth:`~rlpy.Policies.Policy.Policy.turnOffExploration` was called
previously, reverse its effects (e.g. restore epsilon to its previous,
possibly nonzero, value).
"""
pass
def printAll(self):
""" Prints all class information to console. """
print(className(self))
print("=======================================")
for property, value in vars(self).items():
print(property, ": ", value)
class DifferentiablePolicy(Policy):
def pi(self, s, terminal, p_actions):
"""Sample action from policy"""
p = self.probabilities(s, terminal)
return discrete_sample(p)
@abstractmethod
def dlogpi(self, s, a):
"""derivative of the log probabilities of the policy"""
return NotImplementedError
def prob(self, s, a):
"""
probability of chosing action a given the state s
"""
v = self.probabilities(s, False)
return v[a]
@property
def theta(self):
return self.representation.weight_vec
@theta.setter
def theta(self, v):
self.representation.weight_vec = v
@abstractmethod
def probabilities(self, s, terminal):
"""
returns a vector of num_actions length containing the normalized
probabilities for taking each action given the state s
"""
return NotImplementedError | /rlpy3-2.0.0a0-cp36-cp36m-win_amd64.whl/rlpy/Policies/Policy.py | 0.86129 | 0.589716 | Policy.py | pypi |
from .Policy import Policy
import numpy as np
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = [
"Alborz Geramifard",
"Robert H. Klein",
"Christoph Dann",
"William Dabney",
"Jonathan P. How",
]
__license__ = "BSD 3-Clause"
__author__ = "Alborz Geramifard"
class eGreedy(Policy):
""" Greedy policy with epsilon-probability for uniformly random exploration.
From a given state, it selects the action with the highest expected value
(greedy with respect to value function), but with some probability
``epsilon``, takes a random action instead.
This explicitly balances the exploration/exploitation tradeoff, and
ensures that in the limit of infinite samples, the agent will
have explored the entire domain.
"""
def __init__(
self,
representation,
epsilon=0.1,
deterministic=False,
epsilon_decay=0.0,
epsilon_min=0.0,
seed=1,
):
"""
:param representation: The representation that the agent use.
:param epsilon: Probability of selecting a random action instead of greedy.
:param deterministic: Select an action deterministically among the best actions.
:param episilon_decay: if > 0, linealy decays episilon.
:param episilon_min: The minimum value of epsilon when epsilon_decay > 0.
:param seed: Random seed used for action sampling.
"""
self.epsilon = epsilon
self.deterministic = deterministic
self.epsilon_decay = epsilon_decay
self.epsilon_min = epsilon_min
# Temporarily stores value of ``epsilon`` when exploration disabled
self.old_epsilon = None
super().__init__(representation, seed)
def pi(self, s, terminal, p_actions):
coin = self.random_state.rand()
eps = self.epsilon
if self.epsilon_decay > 0 and self.epsilon >= self.epsilon_min:
self.epsilon -= self.epsilon_decay
if coin < eps:
return self.random_state.choice(p_actions)
else:
b_actions = self.representation.bestActions(s, terminal, p_actions)
if self.deterministic:
return b_actions[0]
else:
return self.random_state.choice(b_actions)
def prob(self, s, terminal, p_actions):
p = np.ones(len(p_actions)) / len(p_actions)
p *= self.epsilon
b_actions = self.representation.bestActions(s, terminal, p_actions)
if self.deterministic:
p[b_actions[0]] += 1 - self.epsilon
else:
p[b_actions] += (1 - self.epsilon) / len(b_actions)
return p
def turnOffExploration(self):
self.old_epsilon = self.epsilon
self.epsilon = 0
def turnOnExploration(self):
self.epsilon = self.old_epsilon | /rlpy3-2.0.0a0-cp36-cp36m-win_amd64.whl/rlpy/Policies/eGreedy.py | 0.858452 | 0.497559 | eGreedy.py | pypi |
from rlpy.Tools import perms
from .Representation import Representation
import numpy as np
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = [
"Alborz Geramifard",
"Robert H. Klein",
"Christoph Dann",
"William Dabney",
"Jonathan P. How",
]
__license__ = "BSD 3-Clause"
__author__ = "Alborz Geramifard"
class RBF(Representation):
"""
Representation that uses a weighted sum of radial basis functions (RBFs)
to reconstruct the value function. Each RBF has a mean and variance
based on user-specified resolution_min, resolution_max, and grid_bins.
See specification in __init__ below.
"""
state_dimensions = None
rbfs_mu = None #: The mean of RBFs
#: The std dev of the RBFs (uniformly selected between [0, dimension width]
rbfs_sigma = None
def __init__(
self,
domain,
num_rbfs=None,
state_dimensions=None,
const_feature=True,
resolution_min=2.0,
resolution_max=None,
seed=1,
normalize=False,
grid_bins=None,
include_border=False,
):
"""
:param domain: the :py:class`~rlpy.Domains.Domain.Domain` associated
with the value function we want to learn.
:param num_rbfs: (Optional) Number of RBFs to use for fnctn
approximation. *THIS IS IGNORED* if grid_bins != None, and is
instead determined by the resolution.
:param state_dimensions: (Optional) Allows user to select subset of
state dimensions for representation: ndarray.
:param const_feature: Boolean, set true to allow for constant offset
:param resolution_min: If ``grid_bins`` is specified, the standard
deviation sigma of each RBF is given by the average with
``resolution_max``; otherwise it is selected uniform random in the
range with resolution_max.
:param resolution_max: If ``grid_bins`` is specified, the standard
deviation sigma of each RBF is given by the average with
``resolution_min``; otherwise it is selected uniform random in the
range with resolution_min.
:param seed: To seed the random state generator when placing RBFs.
:param normalize: (Boolean) If true, normalize returned feature
function values phi(s) so that sum( phi(s) ) = 1.
:param grid_bins: ndarray, an int for each dimension, determines
discretization of each dimension.
:param include_border: (Boolean) If true, adds an extra RBF to include
the domain boundaries.
"""
if resolution_max is None:
resolution_max = resolution_min
self.grid_bins = grid_bins
self.resolution_max = resolution_max
self.resolution_min = resolution_min
self.num_rbfs = num_rbfs
if state_dimensions is not None:
self.dims = len(state_dimensions)
else: # just consider all dimensions
state_dimensions = list(range(domain.state_space_dims))
self.dims = domain.state_space_dims
if self.grid_bins is not None:
# uniform grid of rbfs
self.rbfs_mu, self.num_rbfs = self._uniformRBFs(
self.grid_bins, domain, include_border
)
self.rbfs_sigma = (
np.ones((self.num_rbfs, self.dims))
* (self.resolution_max + self.resolution_min)
/ 2
)
self.const_feature = const_feature
self.features_num = self.num_rbfs
if const_feature:
self.features_num += 1 # adds a constant 1 to each feature vector
self.state_dimensions = state_dimensions
self.normalize = normalize
super(RBF, self).__init__(domain, seed=seed)
self.init_randomization()
def init_randomization(self):
if self.grid_bins is not None:
return
else:
# uniformly scattered
assert self.num_rbfs is not None
self.rbfs_mu = np.zeros((self.num_rbfs, self.dims))
self.rbfs_sigma = np.zeros((self.num_rbfs, self.dims))
dim_widths = self.domain.statespace_limits[self.state_dimensions, 1]
for i in range(self.num_rbfs):
for d in self.state_dimensions:
self.rbfs_mu[i, d] = self.random_state.uniform(
self.domain.statespace_limits[d, 0],
self.domain.statespace_limits[d, 1],
)
self.rbfs_sigma[i, d] = self.random_state.uniform(
dim_widths[d] / self.resolution_max,
dim_widths[d] / self.resolution_min,
)
def phi_nonTerminal(self, s):
F_s = np.ones(self.features_num)
if self.state_dimensions is not None:
s = s[self.state_dimensions]
exponent = np.sum(0.5 * ((s - self.rbfs_mu) / self.rbfs_sigma) ** 2, axis=1)
if self.const_feature:
F_s[:-1] = np.exp(-exponent)
else:
F_s[:] = np.exp(-exponent)
if self.normalize and F_s.sum() != 0.0:
F_s /= F_s.sum()
return F_s
def _uniformRBFs(self, bins_per_dimension, domain, includeBorders=False):
"""
:param bins_per_dimension: Determines the number of RBFs to place
uniformly in each dimension, see example below.
:param includeBorders: (Boolean) If true, adds an extra RBF to include
the domain boundaries.
Positions RBF Centers uniformly across the state space.\n
Returns the centers as RBFs-by-dims matrix and number of rbfs.
Each row is a center of an RBF. \n
Example: 2D domain where each dimension is in [0,3]
with bins = [2,3], False => we get 1 center in the first dimension and
2 centers in the second dimension, hence the combination is:\n
1.5 1 \n
1.5 2 \n
with parameter [2,3], True => we get 3 center in the first dimension
and 5 centers in the second dimension, hence the combination is: \n
0 0 \n
0 1 \n
0 2 \n
0 3 \n
1.5 0 \n
1.5 1 \n
1.5 2 \n
1.5 3 \n
3 0 \n
3 1 \n
3 2 \n
3 3 \n
"""
dims = domain.state_space_dims
if includeBorders:
rbfs_num = np.prod(bins_per_dimension[:] + 1)
else:
rbfs_num = np.prod(bins_per_dimension[:] - 1)
all_centers = []
for d in range(dims):
centers = np.linspace(
domain.statespace_limits[d, 0],
domain.statespace_limits[d, 1],
bins_per_dimension[d] + 1,
)
if not includeBorders:
centers = centers[1:-1] # Exclude the beginning and ending
all_centers.append(centers.tolist())
# print all_centers
# Find all pair combinations of them:
result = perms(all_centers)
# print result.shape
return result, rbfs_num
def featureType(self):
return float | /rlpy3-2.0.0a0-cp36-cp36m-win_amd64.whl/rlpy/Representations/RBF.py | 0.806243 | 0.510008 | RBF.py | pypi |
from .Representation import Representation
import numpy as np
from rlpy.Tools.GeneralTools import addNewElementForAllActions
import matplotlib.pyplot as plt
try:
from .kernels import batch
except ImportError:
from .slow_kernels import batch
print("C-Extensions for kernels not available, expect slow runtime")
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = [
"Alborz Geramifard",
"Robert H. Klein",
"Christoph Dann",
"William Dabney",
"Jonathan P. How",
]
__license__ = "BSD 3-Clause"
class LocalBases(Representation):
"""
abstract base class for representations that use local basis functions
"""
#: centers of bases
centers = None
#: widths of bases
widths = None
def __init__(self, domain, kernel, normalization=False, seed=1, **kwargs):
"""
:param domain: domain to learn on.
:param kernel: function handle to use for kernel function evaluations.
:param normalization: (Boolean) If true, normalize feature vector so
that sum( phi(s) ) = 1.
Associates a kernel function with each
"""
self.kernel = batch[kernel.__name__]
self.normalization = normalization
self.centers = np.zeros((0, domain.statespace_limits.shape[0]))
self.widths = np.zeros((0, domain.statespace_limits.shape[0]))
super(LocalBases, self).__init__(domain, seed=seed)
def phi_nonTerminal(self, s):
v = self.kernel(s, self.centers, self.widths)
if self.normalization and not v.sum() == 0.0:
# normalize such that each vector has a l1 norm of 1
v /= v.sum()
return v
def plot_2d_feature_centers(self, d1=None, d2=None):
"""
:param d1: 1 (of 2 possible) indices of dimensions to plot; ignore all
others, purely visual.
:param d2: 1 (of 2 possible) indices of dimensions to plot; ignore all
others, purely visual.
Phe centers of all features in dimension d1 and d2.
If no dimensions are specified, the first two continuous dimensions
are shown.
"""
if d1 is None and d2 is None:
# just take the first two dimensions
d1, d2 = self.domain.continuous_dims[:2]
plt.figure("Feature Dimensions {} and {}".format(d1, d2))
for i in range(self.centers.shape[0]):
plt.plot([self.centers[i, d1]], [self.centers[i, d2]], "r", marker="x")
plt.draw()
class NonparametricLocalBases(LocalBases):
def __init__(self, domain, kernel, max_similarity=0.9, resolution=5, **kwargs):
"""
:param domain: domain to learn on.
:param kernel: function handle to use for kernel function evaluations.
:param max_similarity: threshold to allow feature to be added to
representation. Larger max_similarity makes it \"easier\" to add
more features by permitting larger values of phi(s) before
discarding. (An existing feature function in phi() with large value
at phi(s) implies that it is very representative of the true
function at *s*. i.e., the value of a feature in phi(s) is
inversely related to the \"similarity\" of a potential new feature.
:param resolution: to be used by the ``kernel()`` function, see parent.
Determines *width* of basis functions, eg sigma in Gaussian basis.
"""
self.max_similarity = max_similarity
self.common_width = (
domain.statespace_limits[:, 1] - domain.statespace_limits[:, 0]
) / resolution
self.features_num = 0
super(NonparametricLocalBases, self).__init__(domain, kernel, **kwargs)
def pre_discover(self, s, terminal, a, sn, terminaln):
norm = self.normalization
expanded = 0
self.normalization = False
if not terminal:
phi_s = self.phi_nonTerminal(s)
if np.all(phi_s < self.max_similarity):
self._add_feature(s)
expanded += 1
if not terminaln:
phi_s = self.phi_nonTerminal(sn)
if np.all(phi_s < self.max_similarity):
self._add_feature(sn)
expanded += 1
self.normalization = norm
return expanded
def _add_feature(self, center):
self.features_num += 1
self.centers = np.vstack((self.centers, center))
self.widths = np.vstack((self.widths, self.common_width))
# TODO if normalized, use Q estimate for center to fill weight_vec
new = np.zeros((self.domain.actions_num, 1))
self.weight_vec = addNewElementForAllActions(
self.weight_vec, self.domain.actions_num, new
)
class RandomLocalBases(LocalBases):
def __init__(
self,
domain,
kernel,
num=100,
resolution_min=5,
resolution_max=None,
seed=1,
**kwargs
):
"""
:param domain: domain to learn on.
:param kernel: function handle to use for kernel function evaluations.
:param num: Fixed number of feature (kernel) functions to use in
EACH dimension. (for a total of features_num=numDims * num)
:param resolution_min: resolution selected uniform random, lower bound.
:param resolution_max: resolution selected uniform random, upper bound.
:param seed: the random seed to use when scattering basis functions.
Randomly scatter ``num`` feature functions throughout the domain, with
sigma / noise parameter selected uniform random between
``resolution_min`` and ``resolution_max``. NOTE these are
sensitive to the choice of coordinate (scale with coordinate units).
"""
self.features_num = num
self.dim_widths = (
domain.statespace_limits[:, 1] - domain.statespace_limits[:, 0]
)
self.resolution_max = resolution_max
self.resolution_min = resolution_min
super(RandomLocalBases, self).__init__(domain, kernel, seed=seed, **kwargs)
self.centers = np.zeros((num, len(self.dim_widths)))
self.widths = np.zeros((num, len(self.dim_widths)))
self.init_randomization()
def init_randomization(self):
for i in range(self.features_num):
for d in range(len(self.dim_widths)):
self.centers[i, d] = self.random_state.uniform(
self.domain.statespace_limits[d, 0],
self.domain.statespace_limits[d, 1],
)
self.widths[i, d] = self.random_state.uniform(
self.dim_widths[d] / self.resolution_max,
self.dim_widths[d] / self.resolution_min,
) | /rlpy3-2.0.0a0-cp36-cp36m-win_amd64.whl/rlpy/Representations/LocalBases.py | 0.733833 | 0.437223 | LocalBases.py | pypi |
from .Representation import Representation
import numpy as np
from copy import copy
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = [
"Alborz Geramifard",
"Robert H. Klein",
"Christoph Dann",
"William Dabney",
"Jonathan P. How",
]
__license__ = "BSD 3-Clause"
__author__ = "Alborz Geramifard"
class IndependentDiscretizationCompactBinary(Representation):
"""
Compact tabular representation with linearly independent basis functions.
This representation is identical to IndependentDiscretization except when binary features exist in the state-space
In such case the feature corresponding to the 0 values of binary dimension are excluded.
Furthermore an extra feature is added to the representation which is activated only if all dimensions are binary and non of them are active
Based on preliminary mathematical formulation both this representation
and the non-compact representation will have the same representational
power in the limit.
"""
def __init__(self, domain, discretization=20):
# See superclass __init__ definition
self.setBinsPerDimension(domain, discretization)
nontwobuckets_dims = np.where(self.bins_per_dim != 2)[0]
self.nonbinary_dims = np.union1d(nontwobuckets_dims, domain.continuous_dims)
self.binary_dims = np.setdiff1d(
np.arange(domain.state_space_dims), self.nonbinary_dims
)
self.features_num = int(sum(self.bins_per_dim)) - len(self.binary_dims) + 1
# Calculate the maximum id number
temp_bin_number = copy(self.bins_per_dim)
temp_bin_number[self.binary_dims] -= 1
self.maxFeatureIDperDimension = np.cumsum(temp_bin_number) - 1
super(IndependentDiscretizationCompactBinary, self).__init__(
domain, discretization
)
def phi_nonTerminal(self, s):
F_s = np.zeros(self.features_num, "bool")
activeInitialFeatures = self.activeInitialFeaturesCompactBinary(s)
if len(activeInitialFeatures):
F_s[self.activeInitialFeaturesCompactBinary(s)] = 1
else:
F_s[-1] = 1 # Activate the last feature
return F_s
def activeInitialFeaturesCompactBinary(self, s):
"""
Same as :py:meth:`~rlpy.Representations.Representation.activeInitialFeatures`
except that for binary dimensions (taking values 0,1) only the
``1`` value will have a corresponding feature; ``0`` is expressed by
that feature being inactive.
"""
bs = self.binState(s)
zero_index = np.where(bs == 0)[0]
# Has zero value and is binary dimension
remove_index = np.intersect1d(zero_index, self.binary_dims)
remain_index = np.setdiff1d(
np.arange(self.domain.state_space_dims), remove_index
)
# Create a new bin vector where the number of bins are 1 for binary
temp_bin_number = copy(self.bins_per_dim)
temp_bin_number[self.binary_dims] -= 1
# Because activation now is mapped to the first bin which is 0
bs[self.binary_dims] -= 1
shifts = np.hstack((0, np.cumsum(temp_bin_number)[:-1]))
index = bs + shifts
# Remove the corresponding features highlighted by remove_index
return index[remain_index].astype("uint32")
def getDimNumber(self, f):
""" Returns the dimension number corresponding to feature ``f``. """
dim = np.searchsorted(self.maxFeatureIDperDimension, f)
return dim
def featureType(self):
return bool | /rlpy3-2.0.0a0-cp36-cp36m-win_amd64.whl/rlpy/Representations/IndependentDiscretizationCompactBinary.py | 0.787114 | 0.540985 | IndependentDiscretizationCompactBinary.py | pypi |
from .Representation import Representation
import numpy as np
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = [
"Alborz Geramifard",
"Robert H. Klein",
"Christoph Dann",
"William Dabney",
"Jonathan P. How",
]
__license__ = "BSD 3-Clause"
__author__ = "Alborz Geramifard"
class IndependentDiscretization(Representation):
"""
Creates a feature for each discrete bin in each dimension; the feature
vector for a given state is comprised of binary features, where only the
single feature in a particular dimension is 1, all others 0.
I.e., in a particular state, the sum of all elements of a feature vector
equals the number of dimensions in the state space.
Note that This is the minimum number of binary features required to
uniquely represent a state in a given finite discrete domain.
"""
def __init__(self, domain, discretization=20):
self.setBinsPerDimension(domain, discretization)
self.features_num = int(sum(self.bins_per_dim))
self.maxFeatureIDperDimension = np.cumsum(self.bins_per_dim) - 1
super(IndependentDiscretization, self).__init__(domain, discretization)
def phi_nonTerminal(self, s):
F_s = np.zeros(self.features_num, "bool")
F_s[self.activeInitialFeatures(s)] = 1
return F_s
def getDimNumber(self, f):
# Returns the dimension number corresponding to this feature
dim = np.searchsorted(self.maxFeatureIDperDimension, f)
return dim
def getFeatureName(self, feat_id):
if hasattr(self.domain, "DimNames"):
dim = np.searchsorted(self.maxFeatureIDperDimension, feat_id)
# Find the index of the feature in the corresponding dimension
index_in_dim = feat_id
if dim != 0:
index_in_dim = feat_id - self.maxFeatureIDperDimension[dim - 1]
print(self.domain.DimNames[dim])
f_name = self.domain.DimNames[dim] + "=" + str(index_in_dim)
def featureType(self):
return bool | /rlpy3-2.0.0a0-cp36-cp36m-win_amd64.whl/rlpy/Representations/IndependentDiscretization.py | 0.812756 | 0.444625 | IndependentDiscretization.py | pypi |
from .Representation import Representation
import numpy as np
from .iFDD import iFDD
from rlpy.Tools import className, plt
from copy import deepcopy
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = [
"Alborz Geramifard",
"Robert H. Klein",
"Christoph Dann",
"William Dabney",
"Jonathan P. How",
]
__license__ = "BSD 3-Clause"
__author__ = "Alborz Geramifard"
class OMPTD(Representation):
"""OMP-TD implementation based on ICML 2012 paper of Wakefield and Parr.
This implementation assumes an initial representation exists and the bag
of features is the conjunctions of existing features.
OMP-TD uses iFDD to represents its features, yet its discovery method is
different; while iFDD looks at the fringe of the tree of expanded features,
OMPTD only looks through a predefined set of features.
The set of features used by OMPTD aside from the initial_features are
represented by self.expandedFeatures
"""
# Maximum number of features to be expanded on each iteration
maxBatchDiscovery = 0
batchThreshold = 0 # Minimum threshold to add features
# List of selected features. In this implementation initial features are
# selected initially by default
selectedFeatures = None
remainingFeatures = None # Array of remaining features
def __init__(
self,
domain,
initial_representation,
discretization=20,
maxBatchDiscovery=1,
batchThreshold=0,
bagSize=100000,
sparsify=False,
):
"""
:param domain: the :py:class`~rlpy.Domains.Domain.Domain` associated
with the value function we want to learn.
:param initial_representation: The initial set of features available.
OMP-TD does not dynamically introduce any features of its own,
instead it takes conjunctions of initial_representation feats until
all permutations have been created or bagSize has been reached.
OMP-TD uses an (ever-growing) subset,termed the \"active\" features.
:param discretization: Number of bins used for each continuous dimension.
For discrete dimensions, this parameter is ignored.
:param maxBatchDiscovery: Maximum number of features to be expanded on
each iteration
:param batchThreshold: Minimum features \"relevance\" required to add
a feature to the active set.
:param bagSize: The maximum number of features available for
consideration.
:param sparsify: (Boolean)
See :py:class`~rlpy.Representations.iFDD.iFDD`.
"""
self.selectedFeatures = []
# This is dummy since omptd will not use ifdd in the online fashion
self.iFDD_ONLINETHRESHOLD = 1
self.maxBatchDiscovery = maxBatchDiscovery
self.batchThreshold = batchThreshold
self.initial_representation = initial_representation
self.iFDD = iFDD(
domain,
self.iFDD_ONLINETHRESHOLD,
initial_representation,
sparsify=0,
discretization=discretization,
useCache=1,
)
self.bagSize = bagSize
self.features_num = self.initial_representation.features_num
self.isDynamic = True
super(OMPTD, self).__init__(domain, discretization)
self.fillBag()
self.totalFeatureSize = self.bagSize
# Add initial features to the selected list
self.selectedFeatures = list(range(self.initial_representation.features_num))
# Array of indicies of features that have not been selected
self.remainingFeatures = np.arange(self.features_num, self.bagSize)
def phi_nonTerminal(self, s):
F_s = self.iFDD.phi_nonTerminal(s)
return F_s[self.selectedFeatures]
def show(self):
self.logger.info("Features:\t\t%d" % self.features_num)
self.logger.info("Remaining Bag Size:\t%d" % len(self.remainingFeatures))
def showBag(self):
"""
Displays the non-active features that OMP-TD can select from to add
to its representation.
"""
print("Remaining Items in the feature bag:")
for f in self.remainingFeatures:
print("%d: %s" % (f, str(sorted(list(self.iFDD.getFeature(f).f_set)))))
def calculateFullPhiNormalized(self, states):
"""
In general for OMPTD it is faster to cache the normalized feature matrix
at once. Note this is only valid if possible states do not change over
execution. (In the feature matrix, each column is a feature function,
each row is a state; thus the matrix has rows phi(s1)', phi(s2)', ...).
"""
p = len(states)
self.fullphi = np.empty((p, self.totalFeatureSize))
o_s = self.domain.state
for i, s in enumerate(states):
self.domain.state = s
if not self.domain.isTerminal(s):
self.fullphi[i, :] = self.iFDD.phi_nonTerminal(s)
self.domain.state = o_s
# Normalize features
for f in range(self.totalFeatureSize):
phi_f = self.fullphi[:, f]
norm_phi_f = np.linalg.norm(phi_f) # L2-Norm of phi_f
if norm_phi_f == 0:
norm_phi_f = 1 # This helps to avoid divide by zero
self.fullphi[:, f] = phi_f / norm_phi_f
def batchDiscover(self, td_errors, phi, states):
"""
:param td_errors: p-by-1 vector, error associated with each state
:param phi: p-by-n matrix, vector-valued feature function evaluated at
each state.
:param states: p-by-(statedimension) matrix, each state under test.
Discovers features using OMPTD
1. Find the index of remaining features in the bag \n
2. Calculate the inner product of each feature with the TD_Error vector \n
3. Add the top maxBatchDiscovery features to the selected features \n
OUTPUT: Boolean indicating expansion of features
"""
if len(self.remainingFeatures) == 0:
# No More features to Expand
return False
SHOW_RELEVANCES = 0 # Plot the relevances
self.calculateFullPhiNormalized(states)
relevances = np.zeros(len(self.remainingFeatures))
for i, f in enumerate(self.remainingFeatures):
phi_f = self.fullphi[:, f]
relevances[i] = np.abs(np.dot(phi_f, td_errors))
if SHOW_RELEVANCES:
e_vec = relevances.flatten()
e_vec = e_vec[e_vec != 0]
e_vec = np.sort(e_vec)
plt.plot(e_vec, linewidth=3)
plt.ioff()
plt.show()
plt.ion()
# Sort based on relevances
# We want high to low hence the reverse: [::-1]
sortedIndices = np.argsort(relevances)[::-1]
max_relevance = relevances[sortedIndices[0]]
# Add top <maxDiscovery> features
self.logger.debug("OMPTD Batch: Max Relevance = %0.3f" % max_relevance)
added_feature = False
to_be_deleted = [] # Record the indices of items to be removed
for j in range(min(self.maxBatchDiscovery, len(relevances))):
max_index = sortedIndices[j]
f = self.remainingFeatures[max_index]
relevance = relevances[max_index]
# print "Inspecting %s" % str(list(self.iFDD.getFeature(f).f_set))
if relevance >= self.batchThreshold:
self.logger.debug(
"New Feature %d: %s, Relevance = %0.3f"
% (
self.features_num,
str(np.sort(list(self.iFDD.getFeature(f).f_set))),
relevances[max_index],
)
)
to_be_deleted.append(max_index)
self.selectedFeatures.append(f)
self.features_num += 1
added_feature = True
else:
# Because the list is sorted, there is no use to look at the
# others
break
self.remainingFeatures = np.delete(self.remainingFeatures, to_be_deleted)
return added_feature
def fillBag(self):
"""
Generates potential features by taking conjunctions of existing ones.
Adds these to the bag of features available to OMPTD in a breadth-first
fashion until the ``bagSize`` limit is reached.
"""
level_1_features = np.arange(self.initial_representation.features_num)
# We store the dimension corresponding to each feature so we avoid
# adding pairs of features in the same dimension
level_1_features_dim = {}
for i in range(self.initial_representation.features_num):
level_1_features_dim[i] = np.array(
[self.initial_representation.getDimNumber(i)]
)
# print i,level_1_features_dim[i]
level_n_features = np.array(level_1_features)
level_n_features_dim = deepcopy(level_1_features_dim)
new_id = self.initial_representation.features_num
self.logger.debug(
"Added %d size 1 features to the feature bag."
% (self.initial_representation.features_num)
)
# Loop over possible layers that conjunctions can be add. Notice that
# layer one was already built
for f_size in np.arange(2, self.domain.state_space_dims + 1):
added = 0
next_features = []
next_features_dim = {}
for f in level_1_features:
f_dim = level_1_features_dim[f][0]
for g in level_n_features:
g_dims = level_n_features_dim[g]
if not f_dim in g_dims:
# We pass inf to make sure iFDD will add the
# combination of these two features
added_new_feature = self.iFDD.inspectPair(f, g, np.inf)
if added_new_feature:
# print '%d: [%s,%s]' % (new_id, str(f),str(g))
next_features.append(new_id)
next_features_dim[new_id] = g_dims + f_dim
new_id += 1
added += 1
if new_id == self.bagSize:
self.logger.debug(
"Added %d size %d features to the feature bag."
% (added, f_size)
)
return
level_n_features = next_features
level_n_features_dim = next_features_dim
self.logger.debug(
"Added %d size %d features to the feature bag." % (added, f_size)
)
self.bagSize = new_id
def featureType(self):
return self.initial_representation.featureType() | /rlpy3-2.0.0a0-cp36-cp36m-win_amd64.whl/rlpy/Representations/OMPTD.py | 0.770033 | 0.465813 | OMPTD.py | pypi |
from .Representation import Representation
import numpy as np
from copy import deepcopy
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = [
"Alborz Geramifard",
"Robert H. Klein",
"Christoph Dann",
"William Dabney",
"Jonathan P. How",
]
__license__ = "BSD 3-Clause"
__author__ = "Alborz Geramifard"
class IncrementalTabular(Representation):
"""
Identical to Tabular representation (ie assigns a binary feature function
f_{d}() to each possible discrete state *d* in the domain, with
f_{d}(s) = 1 when d=s, 0 elsewhere.
HOWEVER, unlike *Tabular*, feature functions are only created for *s* which
have been encountered in the domain, not instantiated for every single
state at the outset.
"""
hash = None
def __init__(self, domain, discretization=20):
self.hash = {}
self.features_num = 0
self.isDynamic = True
super(IncrementalTabular, self).__init__(domain, discretization)
def phi_nonTerminal(self, s):
hash_id = self.hashState(s)
hashVal = self.hash.get(hash_id)
F_s = np.zeros(self.features_num, bool)
if hashVal is not None:
F_s[hashVal] = 1
return F_s
def pre_discover(self, s, terminal, a, sn, terminaln):
return self._add_state(s) + self._add_state(sn)
def _add_state(self, s):
"""
:param s: the (possibly un-cached) state to hash.
Accepts state ``s``; if it has been cached already, do nothing and
return 0; if not, add it to the hash table and return 1.
"""
hash_id = self.hashState(s)
hashVal = self.hash.get(hash_id)
if hashVal is None:
# New State
self.features_num += 1
# New id = feature_num - 1
hashVal = self.features_num - 1
self.hash[hash_id] = hashVal
# Add a new element to the feature weight vector, theta
self.addNewWeight()
return 1
return 0
def __deepcopy__(self, memo):
new_copy = IncrementalTabular(self.domain, self.discretization)
new_copy.hash = deepcopy(self.hash)
return new_copy
def featureType(self):
return bool | /rlpy3-2.0.0a0-cp36-cp36m-win_amd64.whl/rlpy/Representations/IncrementalTabular.py | 0.730386 | 0.340047 | IncrementalTabular.py | pypi |
import numpy as np
from .Representation import Representation
from itertools import combinations
from rlpy.Tools import addNewElementForAllActions, PriorityQueueWithNovelty
import matplotlib.pyplot as plt
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = [
"Alborz Geramifard",
"Robert H. Klein",
"Christoph Dann",
"William Dabney",
"Jonathan P. How",
]
__license__ = "BSD 3-Clause"
__author__ = "Christoph Dann <cdann@mit.edu>"
class KernelizedFeature(object):
# feature index, -1 for non-discovered ones
index = -1
# relevance used to decide when to discover
relevance = 0.0
# list of dimensions that are regarded by this feature
dim = []
# center = data point used to generate the feature
# center gives the highest output of this feature
center = None
def __init__(self, center, dim, kernel, index=-1, base_ids=None, kernel_args=[]):
self.index = index
self.kernel_args = kernel_args
self.center = center
self.dim = dim
self.kernel = kernel
if base_ids is None:
self.base_ids = frozenset([self.index])
else:
self.base_ids = base_ids
def __str__(self):
res = "{" + ", ".join(sorted([str(i) for i in self.base_ids])) + "} "
res += ", ".join(["s{}={:.3g}".format(d + 1, self.center[d]) for d in self.dim])
return res
def output(self, s):
return self.kernel(s, self.center, self.dim, *self.kernel_args)
class Candidate(object):
"""
candidate feature as a combination of two existing features
"""
activation_count = 0.0
td_error_sum = 0.0
relevance = 0.0
idx1 = -1
idx2 = -1
def __init__(self, idx1, idx2):
self.idx1 = idx1
self.idx2 = idx2
class KernelizediFDD(Representation):
"""
Kernelized version of iFDD
"""
features = []
candidates = {}
# contains a set for each feature indicating the ids of
base_id_sets = set()
# 1-dim features it refines
base_feature_ids = []
max_relevance = 0.0
def __init__(
self,
domain,
kernel,
active_threshold,
discover_threshold,
kernel_args=[],
normalization=True,
sparsify=True,
max_active_base_feat=2,
max_base_feat_sim=0.7,
):
super(KernelizediFDD, self).__init__(domain)
self.kernel = kernel
self.kernel_args = kernel_args
self.active_threshold = active_threshold
self.discover_threshold = discover_threshold
self.normalization = normalization
self.sparsify = sparsify
self.sorted_ids = PriorityQueueWithNovelty()
self.max_active_base_feat = max_active_base_feat
self.max_base_feat_sim = max_base_feat_sim
self.candidates = {}
self.features = []
self.base_features_ids = []
self.max_relevance = 0.0
def show_features(self):
l = self.sorted_ids.toList()[:]
key = lambda x: (
len(self.features[x].base_ids),
tuple(self.features[x].dim),
tuple(self.features[x].center[self.features[x].dim]),
)
l.sort(key=key)
for i in l:
f = self.features[i]
print("{:>5} {:>20}".format(i, f))
def plot_1d_features(self, dimension_idx=None):
"""Creates a plot for each specified dimension of the state space and shows
all 1-dimensional features in this dimension
If no indices are passed, all dimensions are plotted
dimension_idx: either a single dimension index (int) or a list of indices.
"""
idx = dimension_idx
if isinstance(idx, int):
idx = [idx]
elif idx is None:
idx = self.domain.continuous_dims
feat_list = list(range(self.features_num))
key = lambda x: (
len(self.features[x].base_ids),
tuple(self.features[x].dim),
tuple(self.features[x].center[self.features[x].dim]),
)
feat_list.sort(key=key)
last_i = -1
for k in feat_list:
if len(self.features[k].dim) > 1:
break
cur_i = self.features[k].dim[0]
if cur_i != last_i:
if last_i in idx:
plt.draw()
if cur_i in idx:
xi = np.linspace(
self.domain.statespace_limits[cur_i, 0],
self.domain.statespace_limits[cur_i, 1],
200,
)
x = np.zeros((200, self.domain.statespace_limits.shape[0]))
x[:, cur_i] = xi
plt.figure("Feature Dimension {}".format(cur_i))
if cur_i in idx:
y = [self.features[k].output(xk) for xk in x]
plt.plot(x, y, label="id {}".format(k))
last_i = cur_i
plt.draw()
def plot_2d_features(self, d1=None, d2=None, n_lines=3):
"""
plot contours of all 2-dimensional features covering
dimension d1 and d2. For each feature, n_lines number of lines
are shown.
If no dimensions are specified, the first two continuous dimensions
are shown.
d1, d2: indices of dimensions to show
n_lines: number of countour lines per feature (default: 3)
"""
if d1 is None and d2 is None:
# just take the first two dimensions
idx = self.domain.continuous_dims[:2]
else:
idx = [d1, d2]
idx.sort()
feat_list = list(range(self.features_num))
key = lambda x: (
len(self.features[x].base_ids),
tuple(self.features[x].dim),
tuple(self.features[x].center[self.features[x].dim]),
)
feat_list.sort(key=key)
last_i = -1
last_j = -1
for k in feat_list:
if len(self.features[k].dim) < 2:
continue
elif len(self.features[k].dim) > 2:
break
cur_i = self.features[k].dim[0]
cur_j = self.features[k].dim[1]
if cur_i != last_i or cur_j != last_j:
if last_i in idx and last_j in idx:
plt.draw()
if cur_i in idx and cur_j in idx:
xi = np.linspace(
self.domain.statespace_limits[cur_i, 0],
self.domain.statespace_limits[cur_i, 1],
100,
)
xj = np.linspace(
self.domain.statespace_limits[cur_j, 0],
self.domain.statespace_limits[cur_j, 1],
100,
)
X, Y = np.meshgrid(xi, xj)
plt.figure("Feature Dimensions {} and {}".format(cur_i, cur_j))
if cur_i in idx and cur_j in idx:
Z = np.zeros_like(X)
for m in range(100):
for n in range(100):
x = np.zeros(self.domain.statespace_limits.shape[0])
x[cur_i] = X[m, n]
x[cur_j] = Y[m, n]
Z[m, n] = self.features[k].output(x)
plt.contour(X, Y, Z, n_lines)
last_i = cur_i
last_j = cur_j
plt.draw()
def plot_2d_feature_centers(self, d1=None, d2=None):
"""
plot the centers of all 2-dimensional features covering
dimension d1 and d2.
If no dimensions are specified, the first two continuous dimensions
are shown.
d1, d2: indices of dimensions to show
"""
if d1 is None and d2 is None:
# just take the first two dimensions
idx = self.domain.continuous_dims[:2]
else:
idx = [d1, d2]
idx.sort()
feat_list = list(range(self.features_num))
key = lambda x: (
len(self.features[x].base_ids),
tuple(self.features[x].dim),
tuple(self.features[x].center[self.features[x].dim]),
)
feat_list.sort(key=key)
last_i = -1
last_j = -1
for k in feat_list:
if len(self.features[k].dim) < 2:
continue
elif len(self.features[k].dim) > 2:
break
cur_i = self.features[k].dim[0]
cur_j = self.features[k].dim[1]
if cur_i != last_i or cur_j != last_j:
if last_i in idx and last_j in idx:
plt.draw()
if cur_i in idx and cur_j in idx:
plt.figure("Feature Dimensions {} and {}".format(cur_i, cur_j))
if cur_i in idx and cur_j in idx:
plt.plot(
[self.features[k].center[cur_i]],
[self.features[k].center[cur_j]],
"r",
marker="x",
)
last_i = cur_i
last_j = cur_j
plt.draw()
def phi_nonTerminal(self, s):
out = np.zeros(self.features_num)
if not self.sparsify:
for i in range(self.features_num):
out[i] = self.features[i].output(s)
else:
# get all base feature values and check if they are activated
active_bases = set([])
for i in self.sorted_ids.toList()[::-1]:
if len(self.features[i].base_ids) > 1:
break
if self.features[i].output(s) >= self.active_threshold:
active_bases.add(i)
base_vals = {k: 1.0 for k in active_bases}
# iterate over the remaining compound features
for i in self.sorted_ids.toList():
if active_bases.issuperset(self.features[i].base_ids):
if self.sparsify > 1:
out[i] = self.features[i].output(s)
if self.sparsify > 2 or out[i] >= self.active_threshold:
active_bases -= self.features[i].base_ids
else:
u = 0
for k in self.features[i].base_ids:
u = max(u, base_vals[k])
out[i] = self.features[i].output(s) * u
for k in self.features[i].base_ids:
base_vals[k] -= out[i]
if base_vals[k] < 0:
active_bases.remove(k)
if self.normalization:
summ = out.sum()
if summ != 0:
out /= out.sum()
return out
def phi_raw(self, s, terminal):
assert terminal is False
out = np.zeros(self.features_num)
for i in range(self.features_num):
out[i] = self.features[i].output(s)
return out
# @profile
def post_discover(self, s, terminal, a, td_error, phi_s=None):
if phi_s is None:
phi_s = self.phi(s, terminal)
phi_s_unnorm = self.phi_raw(s, terminal)
discovered = 0
Q = self.Qs(s, terminal, phi_s=phi_s).reshape(-1, 1)
# indices of active features
active_indices = list(np.where(phi_s_unnorm > self.active_threshold)[0])
# "active indices", active_indices
# gather all dimensions regarded by active features
active_dimensions = np.zeros((len(s)), dtype="int")
closest_neighbor = np.zeros((len(s)))
for i in active_indices:
for j in self.features[i].dim:
active_dimensions[j] += 1
closest_neighbor[j] = max(closest_neighbor[j], phi_s_unnorm[i])
# add new base features for all dimension not regarded
for j in range(len(s)):
if active_dimensions[j] < self.max_active_base_feat and (
closest_neighbor[j] < self.max_base_feat_sim or active_dimensions[j] < 1
):
active_indices.append(self.add_base_feature(s, j, Q=Q))
discovered += 1
# update relevance statistics of all feature candidates
if discovered:
phi_s = self.phi(s, terminal)
la = len(active_indices)
if la * (la - 1) < len(self.candidates):
for ind, cand in list(self.candidates.items()):
g, h = ind
rel = self.update_relevance_stat(cand, g, h, td_error, s, a, phi_s)
self.max_relevance = max(rel, self.max_relevance)
# add if relevance is high enough
if rel > self.discover_threshold:
self.add_refined_feature(g, h, Q=Q)
discovered += 1
else:
# the result of both branches can be very different as this one
# updates only combinations which are considered active.
for g, h in combinations(active_indices, 2):
# note: g, h are ordered as active_indices are ordered
cand = self.candidates.get((g, h))
if cand is None:
continue
rel = self.update_relevance_stat(cand, g, h, td_error, s, a, phi_s)
self.max_relevance = max(rel, self.max_relevance)
# add if relevance is high enough
if rel > self.discover_threshold:
self.add_refined_feature(g, h, Q=Q)
discovered += 1
if discovered:
self.max_relevance = 0.0
return discovered
def update_relevance_stat(self, candidate, index1, index2, td_error, s, a, phi_s):
"""
make sure that inputs are ordered, i.e.,index1 <= index2!
returns the relevance of a potential feature combination
"""
candidate.td_error_sum += phi_s[index1] * phi_s[index2] * td_error
candidate.activation_count += phi_s[index1] ** 2 * phi_s[index2] ** 2
if candidate.activation_count == 0.0:
return 0.0
return np.abs(candidate.td_error_sum) / np.sqrt(candidate.activation_count)
def add_base_feature(self, center, dim, Q):
"""
adds a new 1-dimensional feature and returns its index
"""
new_f = KernelizedFeature(
center=center,
dim=[dim],
kernel_args=self.kernel_args,
kernel=self.kernel,
index=self.features_num,
)
self.features.append(new_f)
self.base_id_sets.add(new_f.base_ids)
self.sorted_ids.push(-1, self.features_num)
self.logger.debug("Added Feature {} {}".format(self.features_num, new_f))
# add combinations with all existing features as candidates
new_cand = {
(f, self.features_num): Candidate(f, self.features_num)
for f in range(self.features_num)
if dim not in self.features[f].dim
}
self.candidates.update(new_cand)
for f, _ in list(new_cand.keys()):
self.base_id_sets.add(new_f.base_ids | self.features[f].base_ids)
self.features_num += 1
# add parameter dimension
if self.normalization:
self.weight_vec = addNewElementForAllActions(
self.weight_vec, self.domain.actions_num, Q
)
else:
self.weight_vec = addNewElementForAllActions(
self.weight_vec, self.domain.actions_num
)
return self.features_num - 1
def add_refined_feature(self, index1, index2, Q):
"""
adds the combination of 2 existing features to the representation
"""
f1 = self.features[index1]
f2 = self.features[index2]
new_center = np.zeros_like(f1.center)
cnt = np.zeros_like(f1.center)
cnt[f1.dim] += 1
cnt[f2.dim] += 1
cnt[cnt == 0] = 1.0
new_center[f1.dim] += f1.center[f1.dim]
new_center[f2.dim] += f2.center[f2.dim]
new_center /= cnt
new_dim = list(frozenset(f1.dim) | frozenset(f2.dim))
new_base_ids = f1.base_ids | f2.base_ids
new_dim.sort()
new_f = KernelizedFeature(
center=new_center,
dim=new_dim,
kernel_args=self.kernel_args,
kernel=self.kernel,
index=self.features_num,
base_ids=new_base_ids,
)
self.features.append(new_f)
# Priority is the negative number of base ids
self.sorted_ids.push(-len(new_f.base_ids), self.features_num)
# assert(len(self.sorted_ids.toList()) == self.features_num + 1)
self.base_id_sets.add(new_f.base_ids)
del self.candidates[(index1, index2)]
# add new candidates
new_cand = {
(f, self.features_num): Candidate(f, self.features_num)
for f in range(self.features_num)
if (self.features[f].base_ids | new_base_ids) not in self.base_id_sets
and len(frozenset(self.features[f].dim) & frozenset(new_dim)) == 0
}
for c, _ in list(new_cand.keys()):
self.base_id_sets.add(new_base_ids | self.features[c].base_ids)
self.candidates.update(new_cand)
self.logger.debug(
"Added refined feature {} {}".format(self.features_num, new_f)
)
self.logger.debug("{} candidates".format(len(self.candidates)))
self.features_num += 1
if self.normalization:
self.weight_vec = addNewElementForAllActions(
self.weight_vec, self.domain.actions_num, Q
)
else:
self.weight_vec = addNewElementForAllActions(
self.weight_vec, self.domain.actions_num
)
return self.features_num - 1
try:
from .kernels import *
except ImportError:
print("C-Extension for kernels not available, expect slow runtime")
from .slow_kernels import * | /rlpy3-2.0.0a0-cp36-cp36m-win_amd64.whl/rlpy/Representations/KernelizediFDD.py | 0.616012 | 0.346431 | KernelizediFDD.py | pypi |
import itertools
import numpy as np
class RLSA(object):
"""
Class that contains the logic to apply the Run Length Smoothing Algorithm (RLSA) on an image.
"""
@staticmethod
def apply_rlsa(img, h_threshold=0, v_threshold=0, hf_threshold=0):
"""
Method that applies the 'smear_line' algorithm first to every row, then to every column, combines these two
results with the logical 'and' (so that only the white pixels on both images remain), and applies 'smear_line'
horizontally again to the combined result.
Parameters:
img (numpy.array): Array representing the image (must be binary image with only 0s or 255s. 255 pixels will be smeared).
h_threshold (int): First threshold to be used horizontally.
v_threshold (int): Threshold to be used vertically.
hf_threshold (int): Final threshold value to be used horizontally.
Returns:
numpy.array: Array after application of RLSA.
"""
horizontal_smear = np.apply_along_axis(RLSA.__smear_line, 1, img, h_threshold)
vertical_smear = np.apply_along_axis(RLSA.__smear_line, 0, img, v_threshold)
combined_smear = horizontal_smear & vertical_smear
return np.apply_along_axis(RLSA.__smear_line, 1, combined_smear, hf_threshold).astype("uint8")
@staticmethod
def __rle_encode(line):
"""
Method that encodes the given line using the Run Length Encoding algorithm.
Parameters:
line (numpy.array): The line to be encoded.
Returns:
numpy.array: An array of tuples representing the encoded line in the form (value, frequency).
"""
return [(pixel_value, len(list(group))) for pixel_value, group in itertools.groupby(line)]
@staticmethod
def __join_lines(line1, line2):
"""
Method that joins two lines
Parameters:
line1 (numpy.array): 1D array.
line2 (numpy.array): 1D array.
Returns:
numpy.array: line1 appended to the beginning of line2.
"""
line2[0:0] = line1
return line2
@staticmethod
def __smear_line(line, threshold):
"""
Method that 'smears' the given line. 0's are changed into 255's in the final array
if the nummber of consecutive 0's is less than or equal to (<=) the given 'threshold'.
255's remain unchanged in the final array.
Parameters:
line (numpy.array): Binary 1D array of either 0 or 255.
threshold (int): Threshold value used for the algorithm described above.
Returns:
numpy.array: Smeared line.
"""
encoded_line = RLSA.__rle_encode(line)
smeared_line = list()
for group in encoded_line:
pixel_value, frequency = group
if pixel_value == 0 and frequency > threshold:
smeared_line = RLSA.__join_lines(smeared_line, [0] * frequency)
else:
smeared_line = RLSA.__join_lines(smeared_line, [255] * frequency)
return smeared_line | /rlsa_python-0.1.tar.gz/rlsa_python-0.1/rlsa_python/rlsa.py | 0.924811 | 0.622861 | rlsa.py | pypi |
=======
RLScore
=======
RLScore - regularized least-squares machine learning algorithms package.
:Authors: `Tapio Pahikkala <http://staff.cs.utu.fi/~aatapa/>`_,
`Antti Airola <https://scholar.google.fi/citations?user=5CPOSr0AAAAJ>`_
:Email: firstname.lastname@utu.fi
:Homepage: `http://staff.cs.utu.fi/~aatapa/software/RLScore <http://staff.cs.utu.fi/~aatapa/software/RLScore>`_
:Version: 0.8.1
:License: `The MIT License <LICENCE.TXT>`_
:Date: August 22. 2018
.. contents::
Overview
========
RLScore is a machine learning software package for regularized kernel methods,
focusing especially on Regularized Least-Squares (RLS) based methods. The main
advantage of the RLS family of methods is that they admit a closed form solution, expressed as a system of linear equations.
This allows deriving highly efficient algorithms for RLS methods, based on matrix
algebraic optimization. Classical results include computational short-cuts for
multi-target learning, fast regularization path and leave-one-out
cross-validation. RLScore takes these results further by implementing a wide
variety of additional computational shortcuts for different types of cross-validation
strategies, single- and multi-target feature selection, multi-task and zero-shot
learning with Kronecker kernels, ranking, stochastic hill climbing based
clustering etc. The majority of the implemented methods are such that are not
available in any other software package.
For documentation, see project `home page <http://staff.cs.utu.fi/~aatapa/software/RLScore>`_.
Support for different tasks
===========================
- Regression and classification
- Regularized least-squares (RLS)
- multi-target learning
- regularization path
- leave-one-out cross-validation
- leave-pair-out cross-validation
- fast cross-validation with arbitrary hold-out sets
- Feature selection for regression and classification
- Greedy regularized least-squares (Greedy RLS)
- greedy forward selection; selects features based on leave-one-out error
- joint feature selection for multi-target problems
- Ranking
- Regularized least-squares ranking (GlobalRankRLS)
- minimizes magnitude preserving ranking error
- multi-target learning
- regularization path
- leave-pair-out cross-validation
- cross-validation with arbitrary hold-out sets
- Regularized least-squares ranking for query-structured data (QueryRankRLS)
- minimizes magnitude preserving ranking error, computed for each query separately
- multi-target learning
- regularization path
- leave-query-out cross-validation
- Pair-input data and zero-shot learning
- Learning with Kronecker product kernels
- Closed form solution for training models from complete data with labels for all pair-inputs available (KronRLS, TwoStepRLS)
- Leave-one-out and k-fold cross-validation algorithms for pair-input data (TwoStepRLS)
- Iterative training algorithm for pair-input data, where only a subset of pairwise labels are known (CGKronRLS)
- Clustering
- Unsupervised RLS methods, based on the maximum margin clustering principle
Software dependencies
=====================
RLScore is written in Python and thus requires a working
installation of Python 3.5 or newer. The package is also dependent on
the `NumPy <http://numpy.scipy.org/>`_ package for matrix
operations, and `SciPy <http://www.scipy.org/>`_ package for sparse
matrix implementations, and a c-compiler for building Cython extensions.
Citing RLScore
==============
RLScore is described in the following article:
`Rlscore: Regularized least-squares learners <http://jmlr.org/papers/v17/16-470.html>`_, Tapio Pahikkala and Antti Airola. Journal of Machine Learning Research, 17(221):1-5, 2016. BibTeX entry can be found `here <http://jmlr.org/papers/v17/16-470.bib>`_.
History
=======
Version 0.8.1 (2018.08.22):
- New tutorials for stacked (two-step) kernel ridge regression e.g. two-step RLS
- Many technical improvements for learning with pairwise data
- Requires Python 3.5 or newer due to the use of matrix product infix notation etc.
Version 0.8 (2017.08.17):
- Compatible with Python 3
- Last version still working properly with Python 2.7
Version 0.7 (2016.09.19):
- Tutorials available
- API documentation finished
- TwoStep-learning cross-validation methods available
- Unit testing extended
- Simplified internal structure of the package
Version 0.6 (2016.02.18):
- Major overhaul of learner interface, leaners now trained directly when initialized
- TwoStep-learning method, better Kronecker learners
- Cythonization of leave-pair-out cross-validation
- Automated regularization parameter selection via cross-validation for RLS and RankRLS added
- Old documentation removed as out-of-date, new documentation and tutorials in preparation
Version 0.5.1 (2014.07.31):
- This is a work in progress version maintained in a github repository.
- The command line functionality is dropped and the main focus is shifted towards the library interface.
- The interface has been considerably simplified to ease the use of the library.
- Learning with tensor (Kronecker) product kernels considerably extended.
- Many learners now implemented with cython to improve speed.
- Support for a new type of interactive classification usable for image segmentation and various other tasks.
- Numerous internal changes in the software.
Version 0.5 (2012.06.19):
- CGRLS and CGRankRLS learners for conjugate gradient -based training of RLS/RankRLS on large and high-dimensional, but sparse data.
- CGRankRLS supports learning from pairwise preferences between data points in addition to learning from utility values.
- Library interface for Python. Code examples for almost all included learning algorithms.
- Support for learning with Kronecker kernels.
- Numerous internal changes in the software.
Version 0.4 (2010.04.14):
- A linear time greedy forward feature selection with leave-one-out criterion for RLS (greedy RLS) included.
- Example data and codes for basic use cases included in the distribution.
- Fixed a bug causing problems when reading/writing binary files in Windows.
- Modifications to the configuration file format.
- All command line interfaces other than rls_core.py removed.
Version 0.3 (2009.12.03):
- Major restructuring of the code to make the software more modular.
- Configuration files introduced for more flexible use of software.
- Evolutionary maximum-margin clustering included.
- Model file format changed.
Version 0.2.1 (2009.06.24):
- Fixed a bug causing one of the features to get ignored.
Version 0.2 (2009.03.13):
- Major overhaul of the file formats.
- RLScore now supports learning multiple tasks simultaneously.
- Reduced set approximation included for large scale learning.
Version 0.1.1 (2009.01.11):
- Fixed a bug causing a memory leak after training with sparse data and linear kernel.
Version 0.1 (2008.10.18):
- First public release.
Credits
=======
:Other Contributors:
`Michiel Stock <https://michielstock.github.io/>`_
provided code for fast cross-validation with stacked (two-step) kernel ridge regression (version 0.8.1)
`Evgeni Tsivtsivadze <http://learning-machines.com/>`_
participated in designing the version 0.1
| /rlscore-0.8.1.tar.gz/rlscore-0.8.1/README.rst | 0.950881 | 0.865167 | README.rst | pypi |
from gym.spaces import Dict
from ray.rllib.models.torch.fcnet import FullyConnectedNetwork as TorchFC
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.utils.framework import try_import_tf, try_import_torch
from ray.rllib.utils.torch_utils import FLOAT_MIN
tf1, tf, tfv = try_import_tf()
torch, nn = try_import_torch()
class TorchActionMaskModel(TorchModelV2, nn.Module):
"""PyTorch version
Model that handles simple discrete action masking.
This assumes the outputs are logits for a single Categorical action dist.
Getting this to work with a more complex output (e.g., if the action space
is a tuple of several distributions) is also possible but left as an
exercise to the reader.
"""
def __init__(
self,
obs_space,
action_space,
num_outputs,
model_config,
name,
**kwargs,
):
orig_space = getattr(obs_space, "original_space", obs_space)
assert (
isinstance(orig_space, Dict)
and "action_mask" in orig_space.spaces
and "observations" in orig_space.spaces
)
TorchModelV2.__init__(
self, obs_space, action_space, num_outputs, model_config, name, **kwargs
)
nn.Module.__init__(self)
self.internal_model = TorchFC(
orig_space["observations"],
action_space,
num_outputs,
model_config,
name + "_internal",
)
# disable action masking --> will likely lead to invalid actions
self.no_masking = False
if "no_masking" in model_config["custom_model_config"]:
self.no_masking = model_config["custom_model_config"]["no_masking"]
def forward(self, input_dict, state, seq_lens):
# Extract the available actions tensor from the observation.
action_mask = input_dict["obs"]["action_mask"]
# Compute the unmasked logits.
logits, _ = self.internal_model({"obs": input_dict["obs"]["observations"]})
# If action masking is disabled, directly return unmasked logits
if self.no_masking:
return logits, state
# Convert action_mask into a [0.0 || -inf]-type mask.
inf_mask = torch.clamp(torch.log(action_mask), min=FLOAT_MIN)
masked_logits = logits + inf_mask
# Return masked logits.
return masked_logits, state
def value_function(self):
return self.internal_model.value_function() | /models/action_mask_model.py | 0.917437 | 0.473109 | action_mask_model.py | pypi |
import glob
import os
from typing import Tuple
import ray.tune
from ray import init
from ray.rllib.agents import ppo
from ray.rllib.env import PettingZooEnv
from ray.rllib.models import ModelCatalog
from ray.rllib.utils.framework import try_import_torch
from ray.tune.logger import pretty_print
from ray.tune.registry import register_env
from rlskyjo.environment import skyjo_env
from rlskyjo.game.skyjo import SkyjoGame
from rlskyjo.models.action_mask_model import TorchActionMaskModel
from rlskyjo.utils import get_project_root
torch, nn = try_import_torch()
def prepare_train() -> Tuple[ppo.PPOTrainer, PettingZooEnv]:
env_name = "pettingzoo_skyjo"
# get the Pettingzoo env
def env_creator():
env = skyjo_env.env(**skyjo_env.DEFAULT_CONFIG)
return env
register_env(env_name, lambda config: PettingZooEnv(env_creator()))
ModelCatalog.register_custom_model("pa_model2", TorchActionMaskModel)
# wrap the pettingzoo env in MultiAgent RLLib
env = PettingZooEnv(env_creator())
custom_config = {
"env": env_name,
"model": {
"custom_model": "pa_model2",
},
"framework": "torch",
# Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.
"num_gpus": int(torch.cuda.device_count()),
"num_workers": os.cpu_count() - 1,
"multiagent": {
"policies": {
name: (None, env.observation_space, env.action_space, {})
for name in env.agents
},
"policy_mapping_fn": lambda agent_id: agent_id,
},
}
# get trainer
ppo_config = ppo.DEFAULT_CONFIG.copy()
ppo_config.update(custom_config)
trainer = ppo.PPOTrainer(config=ppo_config)
return trainer, env, ppo_config
def train(trainer, max_steps=2e6):
# run manual training loop and print results after each iteration
iters = 0
while True:
iters += 1
result = trainer.train()
print(pretty_print(result))
# stop training if the target train steps or reward are reached
if result["timesteps_total"] >= max_steps:
print(
"training done, because max_steps"
f"{max_steps} {result['timesteps_total']} reached"
)
break
# manual test loop
return trainer
def train_ray(ppo_config, timesteps_total: int = 10):
analysis = ray.tune.run(
ppo.PPOTrainer,
config=ppo_config,
local_dir=os.path.join(get_project_root(), "models"),
stop={"timesteps_total": timesteps_total},
checkpoint_at_end=True,
)
return analysis
def load_ray(path, ppo_config):
"""
Load a trained RLlib agent from the specified path.
Call this before testing a trained agent.
:param path:
Path pointing to the agent's saved checkpoint (only used for RLlib agents)
:param ppo_config:
dict config
"""
trainer = ppo.PPOTrainer(config=ppo_config)
trainer.restore(path)
return trainer
def sample_trainer(trainer, env):
print("Finished training. Running manual test/inference loop.")
obs = env.reset()
done = {"__all__": False}
# run one iteration until done
for i in range(10000):
if done["__all__"]:
print("game done")
break
# get agent from current observation
agent = list(obs.keys())[0]
# format observation dict
print(obs)
obs = obs[agent]
env.render()
# get deterministic action
# trainer.compute_single_action(obs, policy_id=agent)
policy = trainer.get_policy(policy_id=agent)
action_exploration_policy, _, action_info = policy.compute_single_action(obs)
logits = action_info["action_dist_inputs"]
action = logits.argmax()
print("agent ", agent, " action ", SkyjoGame.render_action_explainer(action))
obs, reward, done, _ = env.step({agent: action})
# observations contain original observations and the action mask
# print(f"Obs: {obs}, Action: {action}, done: {done}")
env.render()
print(env.env.rewards)
def tune_training_loop(timesteps_total=10000):
"""train trainer and sample"""
trainer, env, ppo_config = prepare_train()
# train trainer
analysis = train_ray(ppo_config, timesteps_total=timesteps_total)
# reload the checkpoint
last_chpt_path = analysis._checkpoints[-1]["local_dir"]
checkpoint_file = glob.glob(
os.path.join(last_chpt_path, "**", "checkpoint-*"), recursive=True
)[0]
trainer_trained = load_ray(checkpoint_file, ppo_config)
# sample trainer
sample_trainer(trainer_trained, env)
def manual_training_loop(timesteps_total=10000):
"""train trainer and sample"""
trainer, env, ppo_config = prepare_train()
trainer_trained = train(trainer, max_steps=timesteps_total)
sample_trainer(trainer_trained, env)
if __name__ == "__main__":
init(local_mode=True)
tune_training_loop() | /models/train_model_simple_rllib.py | 0.79909 | 0.300566 | train_model_simple_rllib.py | pypi |
import math
from enum import IntEnum
from typing import Any
import torch
from tensordict import TensorDict
from torchrl.data import (
CompositeSpec,
DiscreteTensorSpec,
UnboundedContinuousTensorSpec,
)
from rlstack import Env
from rlstack.data import DataKeys, Device
class Action(IntEnum):
"""Enumeration of environment actions for readability."""
HOLD = 0
BUY = 1
SELL = 2
class AlgoTrading(Env):
"""An environment that mocks algotrading.
An asset's price is simulated according to the equation
``y[k + 1] = (1 + km) * (1 + kc * sin(f * t)) * y[k]`` where
``km``, ``kc``, ``f``, and ``y[0]`` are all randomly sampled
from their own independent uniform distributions, some of which
are defined by values in ``config``.
A policy must learn to hold, buy, or sell the asset based on the
asset's change in price with respect to the previous day and with
respect to the price at which the policy had previously bought the
asset.
This environment serves as a playground for different kinds of models.
Feedforward models could specify view requirements to utilize aggregated
metrics or sequence-based components, while recurrent models could accept
the environment's observations as-is.
"""
# Environment state that's reset when the environment is reset and is
# updated when the environment is stepped.
state: TensorDict
def __init__(
self,
num_envs: int,
/,
*,
config: dict[str, Any] | None = None,
device: Device = "cpu",
) -> None:
super().__init__(num_envs, config=config, device=device)
self.max_horizon = 128
self.observation_spec = CompositeSpec(
{
"action_mask": DiscreteTensorSpec(
2, shape=torch.Size([3]), device=device, dtype=torch.bool
),
"invested": DiscreteTensorSpec(
2, shape=torch.Size([1]), device=device, dtype=torch.long
),
"LOG_CHANGE(price)": UnboundedContinuousTensorSpec(
1, device=device, dtype=torch.float32
),
"LOG_CHANGE(price, position)": UnboundedContinuousTensorSpec(
1, device=device, dtype=torch.float32
),
}
)
self.action_spec = DiscreteTensorSpec(3, shape=torch.Size([1]), device=device)
self.f_bounds = self.config.get("f_bounds", math.pi)
self.k_cyclic_bounds = self.config.get("k_cyclic_bounds", 0.05)
self.k_market_bounds = self.config.get("k_market_bounds", 0.05)
def reset(self, *, config: dict[str, Any] | None = None) -> TensorDict:
config = config or {}
self.f_bounds = config.get("f_bounds", self.f_bounds)
self.k_cyclic_bounds = config.get("k_cyclic_bounds", self.k_cyclic_bounds)
self.k_market_bounds = config.get("k_market_bounds", self.k_market_bounds)
f = torch.empty(self.num_envs, 1, device=self.device).uniform_(0, self.f_bounds)
k_cyclic = torch.empty(self.num_envs, 1, device=self.device).uniform_(
-self.k_cyclic_bounds, self.k_cyclic_bounds
)
k_market = torch.empty(self.num_envs, 1, device=self.device).uniform_(
-self.k_market_bounds, self.k_market_bounds
)
t = torch.randint(0, 10, size=(self.num_envs, 1), device=self.device)
price = torch.empty(self.num_envs, 1, device=self.device).uniform_(100, 10000)
action_mask = torch.zeros(
self.num_envs, 3, device=self.device, dtype=torch.bool
)
action_mask[:, Action.HOLD] = True
action_mask[:, Action.BUY] = True
action_mask[:, Action.SELL] = False
self.state = TensorDict(
{
"action_mask": action_mask,
"invested": torch.zeros(
self.num_envs, 1, device=self.device, dtype=torch.long
),
"position": torch.zeros(
self.num_envs, 1, device=self.device, dtype=torch.float32
),
"f": f,
"k_cyclic": k_cyclic,
"k_market": k_market,
"t": t,
"price": price,
"LOG_CHANGE(price)": torch.zeros(
self.num_envs, 1, device=self.device, dtype=torch.float32
),
"LOG_CHANGE(price, position)": torch.zeros(
self.num_envs, 1, device=self.device, dtype=torch.float32
),
},
batch_size=self.num_envs,
device=self.device,
)
return self.state.select(
"action_mask",
"invested",
"LOG_CHANGE(price)",
"LOG_CHANGE(price, position)",
)
def step(self, action: torch.Tensor) -> TensorDict:
old_price = self.state["price"].clone()
reward = torch.zeros(self.num_envs, 1, device=self.device, dtype=torch.float32)
# Handle buy actions
buy_mask = (action == Action.BUY).flatten()
self.state["invested"][buy_mask] = 1
self.state["position"][buy_mask] = old_price[buy_mask]
# Handle sell actions
sell_mask = (action == Action.SELL).flatten()
self.state["invested"][sell_mask] = 0
reward[sell_mask] = torch.log(old_price[sell_mask]) - torch.log(
self.state["position"][sell_mask]
)
# Handle hold actions
hold_mask = (action == Action.HOLD).flatten()
invested_mask = (self.state["invested"] == 1).flatten()
not_invested_mask = ~invested_mask
self.state["position"][not_invested_mask] = old_price[not_invested_mask]
reward[invested_mask & hold_mask] = self.state["LOG_CHANGE(price)"][
invested_mask & hold_mask
].clone()
# Main environment state update
self.state["action_mask"][invested_mask, Action.HOLD] = True
self.state["action_mask"][invested_mask, Action.BUY] = False
self.state["action_mask"][invested_mask, Action.SELL] = True
self.state["action_mask"][not_invested_mask, Action.HOLD] = True
self.state["action_mask"][not_invested_mask, Action.BUY] = True
self.state["action_mask"][not_invested_mask, Action.SELL] = False
self.state["t"] += 1
self.state["price"] *= (1 + self.state["k_market"]) * (
1 + self.state["k_cyclic"] * torch.sin(self.state["t"] * self.state["f"])
)
self.state["LOG_CHANGE(price)"] = torch.log(self.state["price"]) - torch.log(
old_price
)
self.state["LOG_CHANGE(price, position)"] = torch.log(
self.state["price"]
) - torch.log(self.state["position"])
obs = self.state.select(
"action_mask",
"invested",
"LOG_CHANGE(price)",
"LOG_CHANGE(price, position)",
)
return TensorDict(
{DataKeys.OBS: obs, DataKeys.REWARDS: reward},
batch_size=self.num_envs,
device=self.device,
) | /rlstack-0.1.2.tar.gz/rlstack-0.1.2/examples/algotrading/env.py | 0.893046 | 0.602997 | env.py | pypi |
import torch
import torch.nn as nn
from tensordict import TensorDict
from torchrl.data import CompositeSpec, TensorSpec, UnboundedContinuousTensorSpec
from rlstack import RecurrentModel
from rlstack.data import DataKeys
from rlstack.nn import MLP, get_activation
FINFO = torch.finfo()
class LazyLemur(RecurrentModel):
"""An LSTM model that maintains states across horizons.
Args:
observation_spec: Environment observation spec.
action_spec: Environment action spec.
invested_embed_dim: The size of the embedding to create for the
environment observation indicating whether the policy is
already invested in the asset.
hidden_size: Hidden neurons within the LSTM.
num_layers: Number of LSTM cells.
hiddens: Hidden neurons for each layer in the feature and value
function models.
activation_fn: Activation function used by all components.
"""
def __init__(
self,
observation_spec: TensorSpec,
action_spec: TensorSpec,
/,
invested_embed_dim: int = 2,
hidden_size: int = 64,
num_layers: int = 1,
hiddens: tuple[int, ...] = (64, 64),
activation_fn: str = "relu",
) -> None:
super().__init__(
observation_spec,
action_spec,
invested_embed_dim=invested_embed_dim,
hidden_size=hidden_size,
num_layers=num_layers,
)
self.state_spec = CompositeSpec(
{
DataKeys.HIDDEN_STATES: UnboundedContinuousTensorSpec(
shape=torch.Size([num_layers, hidden_size]),
device=action_spec.device,
),
DataKeys.CELL_STATES: UnboundedContinuousTensorSpec(
shape=torch.Size([num_layers, hidden_size]),
device=action_spec.device,
),
}
)
self.invested_embedding = nn.Embedding(2, invested_embed_dim)
self.lstm = nn.LSTM(
invested_embed_dim + 2,
hidden_size,
num_layers=num_layers,
batch_first=True,
)
self.feature_model = nn.Sequential(
MLP(
hidden_size,
hiddens,
activation_fn=activation_fn,
norm_layer=nn.BatchNorm1d,
),
get_activation(activation_fn),
)
feature_head = nn.Linear(hiddens[-1], 3)
nn.init.uniform_(feature_head.weight, a=-1e-3, b=1e-3)
nn.init.zeros_(feature_head.bias)
self.feature_model.append(feature_head)
self.vf_model = nn.Sequential(
MLP(
hidden_size,
hiddens,
activation_fn=activation_fn,
norm_layer=nn.BatchNorm1d,
),
get_activation(activation_fn),
nn.Linear(hiddens[-1], 1),
)
self._value = None
def forward(
self, batch: TensorDict, states: TensorDict, /
) -> tuple[TensorDict, TensorDict]:
B, T = batch.shape[:2]
x_invested = self.invested_embedding(
batch[DataKeys.OBS, "invested"].flatten()
).reshape(B, T, -1)
x = torch.cat(
[
x_invested,
batch[DataKeys.OBS, "LOG_CHANGE(price, position)"],
batch[DataKeys.OBS, "LOG_CHANGE(price)"],
],
dim=-1,
)
h_0 = states[DataKeys.HIDDEN_STATES][:, 0, ...].permute(1, 0, 2).contiguous()
c_0 = states[DataKeys.CELL_STATES][:, 0, ...].permute(1, 0, 2).contiguous()
latents, (h_n, c_n) = self.lstm(x, (h_0, c_0))
latents = latents.reshape(B * T, -1)
features = self.feature_model(latents).reshape(-1, 1, 3)
self._value = self.vf_model(latents).reshape(-1, 1)
inf_mask = torch.clamp(
torch.log(batch[DataKeys.OBS, "action_mask"]), min=FINFO.min, max=FINFO.max
).reshape(-1, 1, 3)
masked_logits = features + inf_mask
self._value = self.vf_model(latents).reshape(-1, 1)
return TensorDict(
{"logits": masked_logits},
batch_size=masked_logits.size(0),
device=batch.device,
), TensorDict(
{
DataKeys.HIDDEN_STATES: h_n.permute(1, 0, 2),
DataKeys.CELL_STATES: c_n.permute(1, 0, 2),
},
batch_size=batch.size(0),
)
def value_function(self) -> torch.Tensor:
assert self._value is not None
return self._value | /rlstack-0.1.2.tar.gz/rlstack-0.1.2/examples/algotrading/models/lstm.py | 0.931509 | 0.702313 | lstm.py | pypi |
import torch
import torch.nn as nn
from tensordict import TensorDict
from torchrl.data import TensorSpec
from rlstack import Model
from rlstack.data import DataKeys
from rlstack.nn import (
MLP,
SelfAttention,
SelfAttentionStack,
get_activation,
masked_avg,
)
from rlstack.views import ViewRequirement
FINFO = torch.finfo()
class AttentiveAlpaca(Model):
"""A model that applies self-attention to historical price changes
to eventually construct logits used for sampling actions.
The model eventually reduces the environment observation into a 1D
tensor that's fed into feature and value function models that produce
the model's outputs. The feature model and value function
model share parameters since they share the same input from the
feature vector created partly from the self-attention mechanism.
Args:
observation_spec: Environment observation spec.
action_spec: Environment action spec.
invested_embed_dim: The size of the embedding to create for the
environment observation indicating whether the policy is
already invested in the asset.
price_embed_dim: The size of the embedding for historical price
changes.
seq_len: Number of historical price changes to use for the
self-attention mechanism. This should always be less than
the environment horizon used during training.
num_heads: Number of attention heads to use per self-attention
layer.
num_layers: Number of self-attention layers to use.
hiddens: Hidden neurons for each layer in the feature and value
function models. The first element is also used as the number
of hidden neurons in the self-attention mechanism.
activation_fn: Activation function used by all components.
"""
def __init__(
self,
observation_spec: TensorSpec,
action_spec: TensorSpec,
/,
invested_embed_dim: int = 2,
price_embed_dim: int = 8,
seq_len: int = 4,
num_heads: int = 4,
num_layers: int = 2,
hiddens: tuple[int, ...] = (64, 64),
activation_fn: str = "relu",
) -> None:
super().__init__(
observation_spec,
action_spec,
invested_embed_dim=invested_embed_dim,
price_embed_dim=price_embed_dim,
seq_len=seq_len,
num_heads=num_heads,
num_layers=num_layers,
hiddens=hiddens,
activation_fn=activation_fn,
)
self.view_requirements[(DataKeys.OBS, "LOG_CHANGE(price)")] = ViewRequirement(
shift=seq_len
)
self.invested_embedding = nn.Embedding(2, invested_embed_dim)
self.price_embedding = nn.Linear(1, price_embed_dim)
self.price_attention = SelfAttentionStack(
SelfAttention(
price_embed_dim,
num_heads=num_heads,
hidden_dim=hiddens[0],
activation_fn=activation_fn,
skip_kind="residual",
),
num_layers,
share_parameters=True,
)
self.feature_model = nn.Sequential(
MLP(
invested_embed_dim + 1 + price_embed_dim,
hiddens,
activation_fn=activation_fn,
norm_layer=nn.BatchNorm1d,
),
get_activation(activation_fn),
)
feature_head = nn.Linear(hiddens[-1], 3)
nn.init.uniform_(feature_head.weight, a=-1e-3, b=1e-3)
nn.init.zeros_(feature_head.bias)
self.feature_model.append(feature_head)
self.vf_model = nn.Sequential(
MLP(
invested_embed_dim + 1 + price_embed_dim,
hiddens,
activation_fn=activation_fn,
norm_layer=nn.BatchNorm1d,
),
get_activation(activation_fn),
nn.Linear(hiddens[-1], 1),
)
self._value = None
def forward(self, batch: TensorDict, /) -> TensorDict:
x_invested = self.invested_embedding(batch[DataKeys.OBS, "invested"].flatten())
x_price = self.price_embedding(
batch[DataKeys.OBS, "LOG_CHANGE(price)", DataKeys.INPUTS]
)
x_price = self.price_attention(
x_price,
key_padding_mask=batch[
DataKeys.OBS, "LOG_CHANGE(price)", DataKeys.PADDING_MASK
],
)
x_price = masked_avg(
x_price,
mask=~batch[DataKeys.OBS, "LOG_CHANGE(price)", DataKeys.PADDING_MASK],
dim=1,
keepdim=False,
)
x = torch.cat(
[
x_invested,
batch[DataKeys.OBS, "LOG_CHANGE(price, position)"],
x_price,
],
dim=-1,
)
features = self.feature_model(x).reshape(-1, 1, 3)
inf_mask = torch.clamp(
torch.log(batch[DataKeys.OBS, "action_mask"]), min=FINFO.min, max=FINFO.max
).reshape(-1, 1, 3)
masked_logits = features + inf_mask
self._value = self.vf_model(x)
return TensorDict(
{"logits": masked_logits},
batch_size=batch.batch_size,
device=batch.device,
)
def value_function(self) -> torch.Tensor:
assert self._value is not None
return self._value | /rlstack-0.1.2.tar.gz/rlstack-0.1.2/examples/algotrading/models/transformer.py | 0.931572 | 0.757324 | transformer.py | pypi |
import torch
import torch.nn as nn
from tensordict import TensorDict
from torchrl.data import TensorSpec
from rlstack import Model
from rlstack.data import DataKeys
from rlstack.nn import MLP, get_activation
from rlstack.views import ViewRequirement
FINFO = torch.finfo()
class MischievousMule(Model):
"""A model that aggregates historical price changes at different
intervals to form a latent vector that's fed into other model
components.
The feature model and value function model share parameters since
they share the same input from the feature vector created partly
from the aggregation mechanism.
Args:
observation_spec: Environment observation spec.
action_spec: Environment action spec.
invested_embed_dim: The size of the embedding to create for the
environment observation indicating whether the policy is
already invested in the asset.
seq_len: Number of historical price changes to use for the
aggregation mechanism. This should always be less than
the environment horizon used during training.
hiddens: Hidden neurons for each layer in the feature and value
function models.
activation_fn: Activation function used by all components.
"""
def __init__(
self,
observation_spec: TensorSpec,
action_spec: TensorSpec,
/,
invested_embed_dim: int = 2,
seq_len: int = 4,
hiddens: tuple[int, ...] = (128, 128),
activation_fn: str = "relu",
) -> None:
super().__init__(
observation_spec,
action_spec,
invested_embed_dim=invested_embed_dim,
seq_len=seq_len,
hiddens=hiddens,
activation_fn=activation_fn,
)
assert not seq_len % 4, "`seq_len` must be a factor of 4 for this model."
self.seq_len = seq_len
# Feedforward models use a default view requirement that passes
# only the most recent observation to the model for inference.
# We specify a view requirement on historical price changes by
# adding a nested key to the default view requirement. This
# keeps the default view requirement while also allowing the model
# to use historical price changes as additional inputs.
self.view_requirements[(DataKeys.OBS, "LOG_CHANGE(price)")] = ViewRequirement(
shift=seq_len
)
self.invested_embedding = nn.Embedding(2, invested_embed_dim)
self.feature_model = nn.Sequential(
MLP(
invested_embed_dim + 5,
hiddens,
activation_fn=activation_fn,
norm_layer=nn.BatchNorm1d,
),
get_activation(activation_fn),
)
feature_head = nn.Linear(hiddens[-1], 3)
nn.init.uniform_(feature_head.weight, a=-1e-3, b=1e-3)
nn.init.zeros_(feature_head.bias)
self.feature_model.append(feature_head)
self.vf_model = nn.Sequential(
MLP(
invested_embed_dim + 5,
hiddens,
activation_fn=activation_fn,
norm_layer=nn.BatchNorm1d,
),
get_activation(activation_fn),
nn.Linear(hiddens[-1], 1),
)
self._value = None
def forward(self, batch: TensorDict, /) -> TensorDict:
x_invested = self.invested_embedding(batch[DataKeys.OBS, "invested"].flatten())
x_price = batch[DataKeys.OBS, "LOG_CHANGE(price)", DataKeys.INPUTS]
x_price = torch.cat(
[
torch.sum(x_price[:, : (self.seq_len // 4), ...], dim=1),
torch.sum(x_price[:, : (self.seq_len // 2), ...], dim=1),
torch.sum(x_price[:, -(self.seq_len // 2) :, ...], dim=1),
torch.sum(x_price[:, -(self.seq_len // 4) :, ...], dim=1),
],
dim=-1,
)
x = torch.cat(
[
x_invested,
batch[DataKeys.OBS, "LOG_CHANGE(price, position)"],
x_price,
],
dim=-1,
)
features = self.feature_model(x).reshape(-1, 1, 3)
inf_mask = torch.clamp(
torch.log(batch[DataKeys.OBS, "action_mask"]), min=FINFO.min, max=FINFO.max
).reshape(-1, 1, 3)
masked_logits = features + inf_mask
self._value = self.vf_model(x)
return TensorDict(
{"logits": masked_logits},
batch_size=batch.batch_size,
device=batch.device,
)
def value_function(self) -> torch.Tensor:
assert self._value is not None
return self._value | /rlstack-0.1.2.tar.gz/rlstack-0.1.2/examples/algotrading/models/mlp.py | 0.921411 | 0.745584 | mlp.py | pypi |
from ibapi import order_condition
from ibapi.object_implem import Object
from ibapi.utils import * # @UnusedWildImport
from ibapi.server_versions import * # @UnusedWildImport
from ibapi.order import OrderComboLeg
from ibapi.contract import ComboLeg
from ibapi.tag_value import TagValue
from ibapi.wrapper import DeltaNeutralContract
from ibapi.softdollartier import SoftDollarTier
logger = logging.getLogger(__name__)
class OrderDecoder(Object):
def __init__(self, contract, order, orderState, version, serverVersion):
self.contract = contract
self.order = order
self.orderState = orderState
self.version = version
self.serverVersion = serverVersion
self.discoverParams()
def decodeOrderId(self, fields):
self.order.orderId = decode(int, fields)
def decodeContractFields(self, fields):
self.contract.conId = decode(int, fields)
self.contract.symbol = decode(str, fields)
self.contract.secType = decode(str, fields)
self.contract.lastTradeDateOrContractMonth = decode(str, fields)
self.contract.strike = decode(float, fields)
self.contract.right = decode(str, fields)
if self.version >= 32:
self.contract.multiplier = decode(str, fields)
self.contract.exchange = decode(str, fields)
self.contract.currency = decode(str, fields)
self.contract.localSymbol = decode(str, fields)
if self.version >= 32:
self.contract.tradingClass = decode(str, fields)
def decodeAction(self, fields):
self.order.action = decode(str, fields)
def decodeTotalQuantity(self, fields):
if self.serverVersion >= MIN_SERVER_VER_FRACTIONAL_POSITIONS:
self.order.totalQuantity = decode(float, fields)
else:
self.order.totalQuantity = decode(int, fields)
def decodeOrderType(self, fields):
self.order.orderType = decode(str, fields)
def decodeLmtPrice(self, fields):
if self.version < 29:
self.order.lmtPrice = decode(float, fields)
else:
self.order.lmtPrice = decode(float, fields, SHOW_UNSET)
def decodeAuxPrice(self, fields):
if self.version < 30:
self.order.auxPrice = decode(float, fields)
else:
self.order.auxPrice = decode(float, fields, SHOW_UNSET)
def decodeTIF(self, fields):
self.order.tif = decode(str, fields)
def decodeOcaGroup(self, fields):
self.order.ocaGroup = decode(str, fields)
def decodeAccount(self, fields):
self.order.account = decode(str, fields)
def decodeOpenClose(self, fields):
self.order.openClose = decode(str, fields)
def decodeOrigin(self, fields):
self.order.origin = decode(int, fields)
def decodeOrderRef(self, fields):
self.order.orderRef = decode(str, fields)
def decodeClientId(self, fields):
self.order.clientId = decode(int, fields)
def decodePermId(self, fields):
self.order.permId = decode(int, fields)
def decodeOutsideRth(self, fields):
self.order.outsideRth = decode(bool, fields)
def decodeHidden(self, fields):
self.order.hidden = decode(bool, fields)
def decodeDiscretionaryAmt(self, fields):
self.order.discretionaryAmt = decode(float, fields)
def decodeGoodAfterTime(self, fields):
self.order.goodAfterTime = decode(str, fields)
def skipSharesAllocation(self, fields):
_sharesAllocation = decode(str, fields) # deprecated
def decodeFAParams(self, fields):
self.order.faGroup = decode(str, fields)
self.order.faMethod = decode(str, fields)
self.order.faPercentage = decode(str, fields)
self.order.faProfile = decode(str, fields)
def decodeModelCode(self, fields):
if self.serverVersion >= MIN_SERVER_VER_MODELS_SUPPORT:
self.order.modelCode = decode(str, fields)
def decodeGoodTillDate(self, fields):
self.order.goodTillDate = decode(str, fields)
def decodeRule80A(self, fields):
self.order.rule80A = decode(str, fields)
def decodePercentOffset(self, fields):
self.order.percentOffset = decode(float, fields, SHOW_UNSET)
def decodeSettlingFirm(self, fields):
self.order.settlingFirm = decode(str, fields)
def decodeShortSaleParams(self, fields):
self.order.shortSaleSlot = decode(int, fields)
self.order.designatedLocation = decode(str, fields)
if self.serverVersion == MIN_SERVER_VER_SSHORTX_OLD:
decode(int, fields)
elif self.version >= 23:
self.order.exemptCode = decode(int, fields)
def decodeAuctionStrategy(self, fields):
self.order.auctionStrategy = decode(int, fields)
def decodeBoxOrderParams(self, fields):
self.order.startingPrice = decode(float, fields, SHOW_UNSET)
self.order.stockRefPrice = decode(float, fields, SHOW_UNSET)
self.order.delta = decode(float, fields, SHOW_UNSET)
def decodePegToStkOrVolOrderParams(self, fields):
self.order.stockRangeLower = decode(float, fields, SHOW_UNSET)
self.order.stockRangeUpper = decode(float, fields, SHOW_UNSET)
def decodeDisplaySize(self, fields):
self.order.displaySize = decode(int, fields)
def decodeBlockOrder(self, fields):
self.order.blockOrder = decode(bool, fields)
def decodeSweepToFill(self, fields):
self.order.sweepToFill = decode(bool, fields)
def decodeAllOrNone(self, fields):
self.order.allOrNone = decode(bool, fields)
def decodeMinQty(self, fields):
self.order.minQty = decode(int, fields, SHOW_UNSET)
def decodeOcaType(self, fields):
self.order.ocaType = decode(int, fields)
def decodeETradeOnly(self, fields):
self.order.eTradeOnly = decode(bool, fields)
def decodeFirmQuoteOnly(self, fields):
self.order.firmQuoteOnly = decode(bool, fields)
def decodeNbboPriceCap(self, fields):
self.order.nbboPriceCap = decode(float, fields, SHOW_UNSET)
def decodeParentId(self, fields):
self.order.parentId = decode(int, fields)
def decodeTriggerMethod(self, fields):
self.order.triggerMethod = decode(int, fields)
def decodeVolOrderParams(self, fields, readOpenOrderAttribs):
self.order.volatility = decode(float, fields, SHOW_UNSET)
self.order.volatilityType = decode(int, fields)
self.order.deltaNeutralOrderType = decode(str, fields)
self.order.deltaNeutralAuxPrice = decode(float, fields, SHOW_UNSET)
if self.version >= 27 and self.order.deltaNeutralOrderType:
self.order.deltaNeutralConId = decode(int, fields)
if readOpenOrderAttribs:
self.order.deltaNeutralSettlingFirm = decode(str, fields)
self.order.deltaNeutralClearingAccount = decode(str, fields)
self.order.deltaNeutralClearingIntent = decode(str, fields)
if self.version >= 31 and self.order.deltaNeutralOrderType:
if readOpenOrderAttribs:
self.order.deltaNeutralOpenClose = decode(str, fields)
self.order.deltaNeutralShortSale = decode(bool, fields)
self.order.deltaNeutralShortSaleSlot = decode(int, fields)
self.order.deltaNeutralDesignatedLocation = decode(str, fields)
self.order.continuousUpdate = decode(bool, fields)
self.order.referencePriceType = decode(int, fields)
def decodeTrailParams(self, fields):
self.order.trailStopPrice = decode(float, fields, SHOW_UNSET)
if self.version >= 30:
self.order.trailingPercent = decode(float, fields, SHOW_UNSET)
def decodeBasisPoints(self, fields):
self.order.basisPoints = decode(float, fields, SHOW_UNSET)
self.order.basisPointsType = decode(int, fields, SHOW_UNSET)
def decodeComboLegs(self, fields):
self.contract.comboLegsDescrip = decode(str, fields)
if self.version >= 29:
comboLegsCount = decode(int, fields)
if comboLegsCount > 0:
self.contract.comboLegs = []
for _ in range(comboLegsCount):
comboLeg = ComboLeg()
comboLeg.conId = decode(int, fields)
comboLeg.ratio = decode(int, fields)
comboLeg.action = decode(str, fields)
comboLeg.exchange = decode(str, fields)
comboLeg.openClose = decode(int, fields)
comboLeg.shortSaleSlot = decode(int, fields)
comboLeg.designatedLocation = decode(str, fields)
comboLeg.exemptCode = decode(int, fields)
self.contract.comboLegs.append(comboLeg)
orderComboLegsCount = decode(int, fields)
if orderComboLegsCount > 0:
self.order.orderComboLegs = []
for _ in range(orderComboLegsCount):
orderComboLeg = OrderComboLeg()
orderComboLeg.price = decode(float, fields, SHOW_UNSET)
self.order.orderComboLegs.append(orderComboLeg)
def decodeSmartComboRoutingParams(self, fields):
if self.version >= 26:
smartComboRoutingParamsCount = decode(int, fields)
if smartComboRoutingParamsCount > 0:
self.order.smartComboRoutingParams = []
for _ in range(smartComboRoutingParamsCount):
tagValue = TagValue()
tagValue.tag = decode(str, fields)
tagValue.value = decode(str, fields)
self.order.smartComboRoutingParams.append(tagValue)
def decodeScaleOrderParams(self, fields):
if self.version >= 20:
self.order.scaleInitLevelSize = decode(int, fields, SHOW_UNSET)
self.order.scaleSubsLevelSize = decode(int, fields, SHOW_UNSET)
else:
self.order.notSuppScaleNumComponents = decode(int, fields, SHOW_UNSET)
self.order.scaleInitLevelSize = decode(int, fields, SHOW_UNSET)
self.order.scalePriceIncrement = decode(float, fields, SHOW_UNSET)
if self.version >= 28 and self.order.scalePriceIncrement != UNSET_DOUBLE \
and self.order.scalePriceIncrement > 0.0:
self.order.scalePriceAdjustValue = decode(float, fields, SHOW_UNSET)
self.order.scalePriceAdjustInterval = decode(int, fields, SHOW_UNSET)
self.order.scaleProfitOffset = decode(float, fields, SHOW_UNSET)
self.order.scaleAutoReset = decode(bool, fields)
self.order.scaleInitPosition = decode(int, fields, SHOW_UNSET)
self.order.scaleInitFillQty = decode(int, fields, SHOW_UNSET)
self.order.scaleRandomPercent = decode(bool, fields)
def decodeHedgeParams(self, fields):
if self.version >= 24:
self.order.hedgeType = decode(str, fields)
if self.order.hedgeType:
self.order.hedgeParam = decode(str, fields)
def decodeOptOutSmartRouting(self, fields):
if self.version >= 25:
self.order.optOutSmartRouting = decode(bool, fields)
def decodeClearingParams(self, fields):
self.order.clearingAccount = decode(str, fields)
self.order.clearingIntent = decode(str, fields)
def decodeNotHeld(self, fields):
if self.version >= 22:
self.order.notHeld = decode(bool, fields)
def decodeDeltaNeutral(self, fields):
if self.version >= 20:
deltaNeutralContractPresent = decode(bool, fields)
if deltaNeutralContractPresent:
self.contract.deltaNeutralContract = DeltaNeutralContract()
self.contract.deltaNeutralContract.conId = decode(int, fields)
self.contract.deltaNeutralContract.delta = decode(float, fields)
self.contract.deltaNeutralContract.price = decode(float, fields)
def decodeAlgoParams(self, fields):
if self.version >= 21:
self.order.algoStrategy = decode(str, fields)
if self.order.algoStrategy:
algoParamsCount = decode(int, fields)
if algoParamsCount > 0:
self.order.algoParams = []
for _ in range(algoParamsCount):
tagValue = TagValue()
tagValue.tag = decode(str, fields)
tagValue.value = decode(str, fields)
self.order.algoParams.append(tagValue)
def decodeSolicited(self, fields):
if self.version >= 33:
self.order.solicited = decode(bool, fields)
def decodeOrderStatus(self, fields):
self.orderState.status = decode(str, fields)
def decodeWhatIfInfoAndCommission(self, fields):
self.order.whatIf = decode(bool, fields)
OrderDecoder.decodeOrderStatus(self, fields)
if self.serverVersion >= MIN_SERVER_VER_WHAT_IF_EXT_FIELDS:
self.orderState.initMarginBefore = decode(str, fields)
self.orderState.maintMarginBefore = decode(str, fields)
self.orderState.equityWithLoanBefore = decode(str, fields)
self.orderState.initMarginChange = decode(str, fields)
self.orderState.maintMarginChange = decode(str, fields)
self.orderState.equityWithLoanChange = decode(str, fields)
self.orderState.initMarginAfter = decode(str, fields)
self.orderState.maintMarginAfter = decode(str, fields)
self.orderState.equityWithLoanAfter = decode(str, fields)
self.orderState.commission = decode(float, fields, SHOW_UNSET)
self.orderState.minCommission = decode(float, fields, SHOW_UNSET)
self.orderState.maxCommission = decode(float, fields, SHOW_UNSET)
self.orderState.commissionCurrency = decode(str, fields)
self.orderState.warningText = decode(str, fields)
def decodeVolRandomizeFlags(self, fields):
if self.version >= 34:
self.order.randomizeSize = decode(bool, fields)
self.order.randomizePrice = decode(bool, fields)
def decodePegToBenchParams(self, fields):
if self.serverVersion >= MIN_SERVER_VER_PEGGED_TO_BENCHMARK:
if self.order.orderType == "PEG BENCH":
self.order.referenceContractId = decode(int, fields)
self.order.isPeggedChangeAmountDecrease = decode(bool, fields)
self.order.peggedChangeAmount = decode(float, fields)
self.order.referenceChangeAmount = decode(float, fields)
self.order.referenceExchangeId = decode(str, fields)
def decodeConditions(self, fields):
if self.serverVersion >= MIN_SERVER_VER_PEGGED_TO_BENCHMARK:
conditionsSize = decode(int, fields)
if conditionsSize > 0:
self.order.conditions = []
for _ in range(conditionsSize):
conditionType = decode(int, fields)
condition = order_condition.Create(conditionType)
condition.decode(fields)
self.order.conditions.append(condition)
self.order.conditionsIgnoreRth = decode(bool, fields)
self.order.conditionsCancelOrder = decode(bool, fields)
def decodeAdjustedOrderParams(self, fields):
if self.serverVersion >= MIN_SERVER_VER_PEGGED_TO_BENCHMARK:
self.order.adjustedOrderType = decode(str, fields)
self.order.triggerPrice = decode(float, fields)
OrderDecoder.decodeStopPriceAndLmtPriceOffset(self, fields)
self.order.adjustedStopPrice = decode(float, fields)
self.order.adjustedStopLimitPrice = decode(float, fields)
self.order.adjustedTrailingAmount = decode(float, fields)
self.order.adjustableTrailingUnit = decode(int, fields)
def decodeStopPriceAndLmtPriceOffset(self, fields):
self.order.trailStopPrice = decode(float, fields)
self.order.lmtPriceOffset = decode(float, fields)
def decodeSoftDollarTier(self, fields):
if self.serverVersion >= MIN_SERVER_VER_SOFT_DOLLAR_TIER:
name = decode(str, fields)
value = decode(str, fields)
displayName = decode(str, fields)
self.order.softDollarTier = SoftDollarTier(name, value, displayName)
def decodeCashQty(self, fields):
if self.serverVersion >= MIN_SERVER_VER_CASH_QTY:
self.order.cashQty = decode(float,fields)
def decodeDontUseAutoPriceForHedge(self, fields):
if self.serverVersion >= MIN_SERVER_VER_AUTO_PRICE_FOR_HEDGE:
self.order.dontUseAutoPriceForHedge = decode(bool,fields)
def decodeIsOmsContainers(self, fields):
if self.serverVersion >= MIN_SERVER_VER_ORDER_CONTAINER:
self.order.isOmsContainer = decode(bool, fields)
def decodeDiscretionaryUpToLimitPrice(self, fields):
if self.serverVersion >= MIN_SERVER_VER_D_PEG_ORDERS:
self.order.discretionaryUpToLimitPrice = decode(bool, fields)
def decodeAutoCancelDate(self, fields):
self.order.autoCancelDate = decode(str, fields)
def decodeFilledQuantity(self, fields):
self.order.filledQuantity = decode(float, fields)
def decodeRefFuturesConId(self, fields):
self.order.refFuturesConId = decode(int, fields)
def decodeAutoCancelParent(self, fields):
self.order.autoCancelParent = decode(bool, fields)
def decodeShareholder(self, fields):
self.order.shareholder = decode(str, fields)
def decodeImbalanceOnly(self, fields):
self.order.imbalanceOnly = decode(bool, fields)
def decodeRouteMarketableToBbo(self, fields):
self.order.routeMarketableToBbo = decode(bool, fields)
def decodeParentPermId(self, fields):
self.order.parentPermId = decode(int, fields)
def decodeCompletedTime(self, fields):
self.orderState.completedTime = decode(str, fields)
def decodeCompletedStatus(self, fields):
self.orderState.completedStatus = decode(str, fields)
def decodeUsePriceMgmtAlgo(self, fields):
if self.serverVersion >= MIN_SERVER_VER_PRICE_MGMT_ALGO:
self.order.usePriceMgmtAlgo = decode(bool, fields) | /rltrade-ibapi-9.76.1.tar.gz/rltrade-ibapi-9.76.1/ibapi/orderdecoder.py | 0.581065 | 0.151969 | orderdecoder.py | pypi |
This is the interface that will need to be overloaded by the customer so
that his/her code can receive info from the TWS/IBGW.
NOTE: the methods use type annotations to describe the types of the arguments.
This is used by the Decoder to dynamically and automatically decode the
received message into the given EWrapper method. This method can only be
used for the most simple messages, but it's still huge helper.
Also this method currently automatically decode a 'version' field in the
message. However having a 'version' field is a legacy thing, newer
message use the 'unified version': the agreed up min version of both
server and client.
"""
import logging
from ibapi.common import * # @UnusedWildImport
from ibapi.utils import * # @UnusedWildImport
from ibapi.contract import (Contract, ContractDetails, DeltaNeutralContract)
from ibapi.order import Order
from ibapi.order_state import OrderState
from ibapi.execution import Execution
from ibapi.ticktype import * # @UnusedWildImport
from ibapi.commission_report import CommissionReport
logger = logging.getLogger(__name__)
class EWrapper:
def __init__(self):
pass
def logAnswer(self, fnName, fnParams):
if logger.isEnabledFor(logging.INFO):
if 'self' in fnParams:
prms = dict(fnParams)
del prms['self']
else:
prms = fnParams
logger.info("ANSWER %s %s", fnName, prms)
def error(self, reqId:TickerId, errorCode:int, errorString:str):
"""This event is called when there is an error with the
communication or when TWS wants to send a message to the client."""
self.logAnswer(current_fn_name(), vars())
logger.error("ERROR %s %s %s", reqId, errorCode, errorString)
def winError(self, text:str, lastError:int):
self.logAnswer(current_fn_name(), vars())
def connectAck(self):
""" callback signifying completion of successful connection """
self.logAnswer(current_fn_name(), vars())
def marketDataType(self, reqId:TickerId, marketDataType:int):
"""TWS sends a marketDataType(type) callback to the API, where
type is set to Frozen or RealTime, to announce that market data has been
switched between frozen and real-time. This notification occurs only
when market data switches between real-time and frozen. The
marketDataType( ) callback accepts a reqId parameter and is sent per
every subscription because different contracts can generally trade on a
different schedule."""
self.logAnswer(current_fn_name(), vars())
def tickPrice(self, reqId:TickerId , tickType:TickType, price:float,
attrib:TickAttrib):
"""Market data tick price callback. Handles all price related ticks."""
self.logAnswer(current_fn_name(), vars())
def tickSize(self, reqId:TickerId, tickType:TickType, size:int):
"""Market data tick size callback. Handles all size-related ticks."""
self.logAnswer(current_fn_name(), vars())
def tickSnapshotEnd(self, reqId:int):
"""When requesting market data snapshots, this market will indicate the
snapshot reception is finished. """
self.logAnswer(current_fn_name(), vars())
def tickGeneric(self, reqId:TickerId, tickType:TickType, value:float):
self.logAnswer(current_fn_name(), vars())
def tickString(self, reqId:TickerId, tickType:TickType, value:str):
self.logAnswer(current_fn_name(), vars())
def tickEFP(self, reqId:TickerId, tickType:TickType, basisPoints:float,
formattedBasisPoints:str, totalDividends:float,
holdDays:int, futureLastTradeDate:str, dividendImpact:float,
dividendsToLastTradeDate:float):
self.logAnswer(current_fn_name(), vars())
""" market data call back for Exchange for Physical
tickerId - The request's identifier.
tickType - The type of tick being received.
basisPoints - Annualized basis points, which is representative of
the financing rate that can be directly compared to broker rates.
formattedBasisPoints - Annualized basis points as a formatted string
that depicts them in percentage form.
impliedFuture - The implied Futures price.
holdDays - The number of hold days until the lastTradeDate of the EFP.
futureLastTradeDate - The expiration date of the single stock future.
dividendImpact - The dividend impact upon the annualized basis points
interest rate.
dividendsToLastTradeDate - The dividends expected until the expiration
of the single stock future."""
self.logAnswer(current_fn_name(), vars())
def orderStatus(self, orderId:OrderId , status:str, filled:float,
remaining:float, avgFillPrice:float, permId:int,
parentId:int, lastFillPrice:float, clientId:int,
whyHeld:str, mktCapPrice: float):
"""This event is called whenever the status of an order changes. It is
also fired after reconnecting to TWS if the client has any open orders.
orderId: OrderId - The order ID that was specified previously in the
call to placeOrder()
status:str - The order status. Possible values include:
PendingSubmit - indicates that you have transmitted the order, but have not yet received confirmation that it has been accepted by the order destination. NOTE: This order status is not sent by TWS and should be explicitly set by the API developer when an order is submitted.
PendingCancel - indicates that you have sent a request to cancel the order but have not yet received cancel confirmation from the order destination. At this point, your order is not confirmed canceled. You may still receive an execution while your cancellation request is pending. NOTE: This order status is not sent by TWS and should be explicitly set by the API developer when an order is canceled.
PreSubmitted - indicates that a simulated order type has been accepted by the IB system and that this order has yet to be elected. The order is held in the IB system until the election criteria are met. At that time the order is transmitted to the order destination as specified.
Submitted - indicates that your order has been accepted at the order destination and is working.
Cancelled - indicates that the balance of your order has been confirmed canceled by the IB system. This could occur unexpectedly when IB or the destination has rejected your order.
Filled - indicates that the order has been completely filled.
Inactive - indicates that the order has been accepted by the system (simulated orders) or an exchange (native orders) but that currently the order is inactive due to system, exchange or other issues.
filled:int - Specifies the number of shares that have been executed.
For more information about partial fills, see Order Status for Partial Fills.
remaining:int - Specifies the number of shares still outstanding.
avgFillPrice:float - The average price of the shares that have been executed. This parameter is valid only if the filled parameter value is greater than zero. Otherwise, the price parameter will be zero.
permId:int - The TWS id used to identify orders. Remains the same over TWS sessions.
parentId:int - The order ID of the parent order, used for bracket and auto trailing stop orders.
lastFilledPrice:float - The last price of the shares that have been executed. This parameter is valid only if the filled parameter value is greater than zero. Otherwise, the price parameter will be zero.
clientId:int - The ID of the client (or TWS) that placed the order. Note that TWS orders have a fixed clientId and orderId of 0 that distinguishes them from API orders.
whyHeld:str - This field is used to identify an order held when TWS is trying to locate shares for a short sell. The value used to indicate this is 'locate'.
"""
self.logAnswer(current_fn_name(), vars())
def openOrder(self, orderId:OrderId, contract:Contract, order:Order,
orderState:OrderState):
"""This function is called to feed in open orders.
orderID: OrderId - The order ID assigned by TWS. Use to cancel or
update TWS order.
contract: Contract - The Contract class attributes describe the contract.
order: Order - The Order class gives the details of the open order.
orderState: OrderState - The orderState class includes attributes Used
for both pre and post trade margin and commission data."""
self.logAnswer(current_fn_name(), vars())
def openOrderEnd(self):
"""This is called at the end of a given request for open orders."""
self.logAnswer(current_fn_name(), vars())
def connectionClosed(self):
"""This function is called when TWS closes the sockets
connection with the ActiveX control, or when TWS is shut down."""
self.logAnswer(current_fn_name(), vars())
def updateAccountValue(self, key:str, val:str, currency:str,
accountName:str):
""" This function is called only when ReqAccountUpdates on
EEClientSocket object has been called. """
self.logAnswer(current_fn_name(), vars())
def updatePortfolio(self, contract:Contract, position:float,
marketPrice:float, marketValue:float,
averageCost:float, unrealizedPNL:float,
realizedPNL:float, accountName:str):
"""This function is called only when reqAccountUpdates on
EEClientSocket object has been called."""
self.logAnswer(current_fn_name(), vars())
def updateAccountTime(self, timeStamp:str):
self.logAnswer(current_fn_name(), vars())
def accountDownloadEnd(self, accountName:str):
"""This is called after a batch updateAccountValue() and
updatePortfolio() is sent."""
self.logAnswer(current_fn_name(), vars())
def nextValidId(self, orderId:int):
""" Receives next valid order id."""
self.logAnswer(current_fn_name(), vars())
def contractDetails(self, reqId:int, contractDetails:ContractDetails):
"""Receives the full contract's definitions. This method will return all
contracts matching the requested via EEClientSocket::reqContractDetails.
For example, one can obtain the whole option chain with it."""
self.logAnswer(current_fn_name(), vars())
def bondContractDetails(self, reqId:int, contractDetails:ContractDetails):
"""This function is called when reqContractDetails function
has been called for bonds."""
self.logAnswer(current_fn_name(), vars())
def contractDetailsEnd(self, reqId:int):
"""This function is called once all contract details for a given
request are received. This helps to define the end of an option
chain."""
self.logAnswer(current_fn_name(), vars())
def execDetails(self, reqId:int, contract:Contract, execution:Execution):
"""This event is fired when the reqExecutions() functions is
invoked, or when an order is filled. """
self.logAnswer(current_fn_name(), vars())
def execDetailsEnd(self, reqId:int):
"""This function is called once all executions have been sent to
a client in response to reqExecutions()."""
self.logAnswer(current_fn_name(), vars())
def updateMktDepth(self, reqId:TickerId , position:int, operation:int,
side:int, price:float, size:int):
"""Returns the order book.
tickerId - the request's identifier
position - the order book's row being updated
operation - how to refresh the row:
0 = insert (insert this new order into the row identified by 'position')
1 = update (update the existing order in the row identified by 'position')
2 = delete (delete the existing order at the row identified by 'position').
side - 0 for ask, 1 for bid
price - the order's price
size - the order's size"""
self.logAnswer(current_fn_name(), vars())
def updateMktDepthL2(self, reqId:TickerId , position:int, marketMaker:str,
operation:int, side:int, price:float, size:int, isSmartDepth:bool):
"""Returns the order book.
tickerId - the request's identifier
position - the order book's row being updated
marketMaker - the exchange holding the order
operation - how to refresh the row:
0 = insert (insert this new order into the row identified by 'position')
1 = update (update the existing order in the row identified by 'position')
2 = delete (delete the existing order at the row identified by 'position').
side - 0 for ask, 1 for bid
price - the order's price
size - the order's size
isSmartDepth - is SMART Depth request"""
self.logAnswer(current_fn_name(), vars())
def updateNewsBulletin(self, msgId:int, msgType:int, newsMessage:str,
originExch:str):
""" provides IB's bulletins
msgId - the bulletin's identifier
msgType - one of: 1 - Regular news bulletin 2 - Exchange no longer
available for trading 3 - Exchange is available for trading
message - the message
origExchange - the exchange where the message comes from. """
self.logAnswer(current_fn_name(), vars())
def managedAccounts(self, accountsList:str):
"""Receives a comma-separated string with the managed account ids."""
self.logAnswer(current_fn_name(), vars())
def receiveFA(self, faData:FaDataType , cxml:str):
""" receives the Financial Advisor's configuration available in the TWS
faDataType - one of:
Groups: offer traders a way to create a group of accounts and apply
a single allocation method to all accounts in the group.
Profiles: let you allocate shares on an account-by-account basis
using a predefined calculation value.
Account Aliases: let you easily identify the accounts by meaningful
names rather than account numbers.
faXmlData - the xml-formatted configuration """
self.logAnswer(current_fn_name(), vars())
def historicalData(self, reqId: int, bar: BarData):
""" returns the requested historical data bars
reqId - the request's identifier
date - the bar's date and time (either as a yyyymmss hh:mm:ssformatted
string or as system time according to the request)
open - the bar's open point
high - the bar's high point
low - the bar's low point
close - the bar's closing point
volume - the bar's traded volume if available
count - the number of trades during the bar's timespan (only available
for TRADES).
WAP - the bar's Weighted Average Price
hasGaps -indicates if the data has gaps or not. """
self.logAnswer(current_fn_name(), vars())
def historicalDataEnd(self, reqId:int, start:str, end:str):
""" Marks the ending of the historical bars reception. """
self.logAnswer(current_fn_name(), vars())
def scannerParameters(self, xml:str):
""" Provides the xml-formatted parameters available to create a market
scanner.
xml - the xml-formatted string with the available parameters."""
self.logAnswer(current_fn_name(), vars())
def scannerData(self, reqId:int, rank:int, contractDetails:ContractDetails,
distance:str, benchmark:str, projection:str, legsStr:str):
""" Provides the data resulting from the market scanner request.
reqid - the request's identifier.
rank - the ranking within the response of this bar.
contractDetails - the data's ContractDetails
distance - according to query.
benchmark - according to query.
projection - according to query.
legStr - describes the combo legs when the scanner is returning EFP"""
self.logAnswer(current_fn_name(), vars())
def scannerDataEnd(self, reqId:int):
""" Indicates the scanner data reception has terminated.
reqId - the request's identifier"""
self.logAnswer(current_fn_name(), vars())
def realtimeBar(self, reqId: TickerId, time:int, open_: float, high: float, low: float, close: float,
volume: int, wap: float, count: int):
""" Updates the real time 5 seconds bars
reqId - the request's identifier
bar.time - start of bar in unix (or 'epoch') time
bar.endTime - for synthetic bars, the end time (requires TWS v964). Otherwise -1.
bar.open_ - the bar's open value
bar.high - the bar's high value
bar.low - the bar's low value
bar.close - the bar's closing value
bar.volume - the bar's traded volume if available
bar.WAP - the bar's Weighted Average Price
bar.count - the number of trades during the bar's timespan (only available
for TRADES)."""
self.logAnswer(current_fn_name(), vars())
def currentTime(self, time:int):
""" Server's current time. This method will receive IB server's system
time resulting after the invokation of reqCurrentTime. """
self.logAnswer(current_fn_name(), vars())
def fundamentalData(self, reqId:TickerId , data:str):
"""This function is called to receive fundamental
market data. The appropriate market data subscription must be set
up in Account Management before you can receive this data."""
self.logAnswer(current_fn_name(), vars())
def deltaNeutralValidation(self, reqId:int, deltaNeutralContract:DeltaNeutralContract):
"""Upon accepting a Delta-Neutral RFQ(request for quote), the
server sends a deltaNeutralValidation() message with the DeltaNeutralContract
structure. If the delta and price fields are empty in the original
request, the confirmation will contain the current values from the
server. These values are locked when the RFQ is processed and remain
locked until the RFQ is canceled."""
self.logAnswer(current_fn_name(), vars())
def commissionReport(self, commissionReport:CommissionReport):
"""The commissionReport() callback is triggered as follows:
- immediately after a trade execution
- by calling reqExecutions()."""
self.logAnswer(current_fn_name(), vars())
def position(self, account:str, contract:Contract, position:float,
avgCost:float):
"""This event returns real-time positions for all accounts in
response to the reqPositions() method."""
self.logAnswer(current_fn_name(), vars())
def positionEnd(self):
"""This is called once all position data for a given request are
received and functions as an end marker for the position() data. """
self.logAnswer(current_fn_name(), vars())
def accountSummary(self, reqId:int, account:str, tag:str, value:str,
currency:str):
"""Returns the data from the TWS Account Window Summary tab in
response to reqAccountSummary()."""
self.logAnswer(current_fn_name(), vars())
def accountSummaryEnd(self, reqId:int):
"""This method is called once all account summary data for a
given request are received."""
self.logAnswer(current_fn_name(), vars())
def verifyMessageAPI(self, apiData:str):
""" Deprecated Function """
self.logAnswer(current_fn_name(), vars())
def verifyCompleted(self, isSuccessful:bool, errorText:str):
self.logAnswer(current_fn_name(), vars())
def verifyAndAuthMessageAPI(self, apiData:str, xyzChallange:str):
self.logAnswer(current_fn_name(), vars())
def verifyAndAuthCompleted(self, isSuccessful:bool, errorText:str):
self.logAnswer(current_fn_name(), vars())
def displayGroupList(self, reqId:int, groups:str):
"""This callback is a one-time response to queryDisplayGroups().
reqId - The requestId specified in queryDisplayGroups().
groups - A list of integers representing visible group ID separated by
the | character, and sorted by most used group first. This list will
not change during TWS session (in other words, user cannot add a
new group; sorting can change though)."""
self.logAnswer(current_fn_name(), vars())
def displayGroupUpdated(self, reqId:int, contractInfo:str):
"""This is sent by TWS to the API client once after receiving
the subscription request subscribeToGroupEvents(), and will be sent
again if the selected contract in the subscribed display group has
changed.
requestId - The requestId specified in subscribeToGroupEvents().
contractInfo - The encoded value that uniquely represents the contract
in IB. Possible values include:
none = empty selection
contractID@exchange = any non-combination contract.
Examples: 8314@SMART for IBM SMART; 8314@ARCA for IBM @ARCA.
combo = if any combo is selected. """
self.logAnswer(current_fn_name(), vars())
def positionMulti(self, reqId:int, account:str, modelCode:str,
contract:Contract, pos:float, avgCost:float):
"""same as position() except it can be for a certain
account/model"""
self.logAnswer(current_fn_name(), vars())
def positionMultiEnd(self, reqId:int):
"""same as positionEnd() except it can be for a certain
account/model"""
self.logAnswer(current_fn_name(), vars())
def accountUpdateMulti(self, reqId:int, account:str, modelCode:str,
key:str, value:str, currency:str):
"""same as updateAccountValue() except it can be for a certain
account/model"""
self.logAnswer(current_fn_name(), vars())
def accountUpdateMultiEnd(self, reqId:int):
"""same as accountDownloadEnd() except it can be for a certain
account/model"""
self.logAnswer(current_fn_name(), vars())
def tickOptionComputation(self, reqId:TickerId, tickType:TickType ,
impliedVol:float, delta:float, optPrice:float, pvDividend:float,
gamma:float, vega:float, theta:float, undPrice:float):
"""This function is called when the market in an option or its
underlier moves. TWS's option model volatilities, prices, and
deltas, along with the present value of dividends expected on that
options underlier are received."""
self.logAnswer(current_fn_name(), vars())
def securityDefinitionOptionParameter(self, reqId:int, exchange:str,
underlyingConId:int, tradingClass:str, multiplier:str,
expirations:SetOfString, strikes:SetOfFloat):
""" Returns the option chain for an underlying on an exchange
specified in reqSecDefOptParams There will be multiple callbacks to
securityDefinitionOptionParameter if multiple exchanges are specified
in reqSecDefOptParams
reqId - ID of the request initiating the callback
underlyingConId - The conID of the underlying security
tradingClass - the option trading class
multiplier - the option multiplier
expirations - a list of the expiries for the options of this underlying
on this exchange
strikes - a list of the possible strikes for options of this underlying
on this exchange """
self.logAnswer(current_fn_name(), vars())
def securityDefinitionOptionParameterEnd(self, reqId:int):
""" Called when all callbacks to securityDefinitionOptionParameter are
complete
reqId - the ID used in the call to securityDefinitionOptionParameter """
self.logAnswer(current_fn_name(), vars())
def softDollarTiers(self, reqId:int, tiers:list):
""" Called when receives Soft Dollar Tier configuration information
reqId - The request ID used in the call to EEClient::reqSoftDollarTiers
tiers - Stores a list of SoftDollarTier that contains all Soft Dollar
Tiers information """
self.logAnswer(current_fn_name(), vars())
def familyCodes(self, familyCodes:ListOfFamilyCode):
""" returns array of family codes """
self.logAnswer(current_fn_name(), vars())
def symbolSamples(self, reqId:int,
contractDescriptions:ListOfContractDescription):
""" returns array of sample contract descriptions """
self.logAnswer(current_fn_name(), vars())
def mktDepthExchanges(self, depthMktDataDescriptions:ListOfDepthExchanges):
""" returns array of exchanges which return depth to UpdateMktDepthL2"""
self.logAnswer(current_fn_name(), vars())
def tickNews(self, tickerId: int, timeStamp:int, providerCode:str, articleId:str, headline:str, extraData:str):
""" returns news headlines"""
self.logAnswer(current_fn_name(), vars())
def smartComponents(self, reqId:int, smartComponentMap:SmartComponentMap):
"""returns exchange component mapping"""
self.logAnswer(current_fn_name(), vars())
def tickReqParams(self, tickerId:int, minTick:float, bboExchange:str, snapshotPermissions:int):
"""returns exchange map of a particular contract"""
self.logAnswer(current_fn_name(), vars())
def newsProviders(self, newsProviders:ListOfNewsProviders):
"""returns available, subscribed API news providers"""
self.logAnswer(current_fn_name(), vars())
def newsArticle(self, requestId:int, articleType:int, articleText:str):
"""returns body of news article"""
self.logAnswer(current_fn_name(), vars())
def historicalNews(self, requestId:int, time:str, providerCode:str, articleId:str, headline:str):
"""returns historical news headlines"""
self.logAnswer(current_fn_name(), vars())
def historicalNewsEnd(self, requestId:int, hasMore:bool):
"""signals end of historical news"""
self.logAnswer(current_fn_name(), vars())
def headTimestamp(self, reqId:int, headTimestamp:str):
"""returns earliest available data of a type of data for a particular contract"""
self.logAnswer(current_fn_name(), vars())
def histogramData(self, reqId:int, items:HistogramData):
"""returns histogram data for a contract"""
self.logAnswer(current_fn_name(), vars())
def historicalDataUpdate(self, reqId: int, bar: BarData):
"""returns updates in real time when keepUpToDate is set to True"""
self.logAnswer(current_fn_name(), vars())
def rerouteMktDataReq(self, reqId: int, conId: int, exchange: str):
"""returns reroute CFD contract information for market data request"""
self.logAnswer(current_fn_name(), vars())
def rerouteMktDepthReq(self, reqId: int, conId: int, exchange: str):
"""returns reroute CFD contract information for market depth request"""
self.logAnswer(current_fn_name(), vars())
def marketRule(self, marketRuleId: int, priceIncrements: ListOfPriceIncrements):
"""returns minimum price increment structure for a particular market rule ID"""
self.logAnswer(current_fn_name(), vars())
def pnl(self, reqId: int, dailyPnL: float, unrealizedPnL: float, realizedPnL: float):
"""returns the daily PnL for the account"""
self.logAnswer(current_fn_name(), vars())
def pnlSingle(self, reqId: int, pos: int, dailyPnL: float, unrealizedPnL: float, realizedPnL: float, value: float):
"""returns the daily PnL for a single position in the account"""
self.logAnswer(current_fn_name(), vars())
def historicalTicks(self, reqId: int, ticks: ListOfHistoricalTick, done: bool):
"""returns historical tick data when whatToShow=MIDPOINT"""
self.logAnswer(current_fn_name(), vars())
def historicalTicksBidAsk(self, reqId: int, ticks: ListOfHistoricalTickBidAsk, done: bool):
"""returns historical tick data when whatToShow=BID_ASK"""
self.logAnswer(current_fn_name(), vars())
def historicalTicksLast(self, reqId: int, ticks: ListOfHistoricalTickLast, done: bool):
"""returns historical tick data when whatToShow=TRADES"""
self.logAnswer(current_fn_name(), vars())
def tickByTickAllLast(self, reqId: int, tickType: int, time: int, price: float,
size: int, tickAttribLast: TickAttribLast, exchange: str,
specialConditions: str):
"""returns tick-by-tick data for tickType = "Last" or "AllLast" """
self.logAnswer(current_fn_name(), vars())
def tickByTickBidAsk(self, reqId: int, time: int, bidPrice: float, askPrice: float,
bidSize: int, askSize: int, tickAttribBidAsk: TickAttribBidAsk):
"""returns tick-by-tick data for tickType = "BidAsk" """
self.logAnswer(current_fn_name(), vars())
def tickByTickMidPoint(self, reqId: int, time: int, midPoint: float):
"""returns tick-by-tick data for tickType = "MidPoint" """
self.logAnswer(current_fn_name(), vars())
def orderBound(self, reqId: int, apiClientId: int, apiOrderId: int):
"""returns orderBound notification"""
self.logAnswer(current_fn_name(), vars())
def completedOrder(self, contract:Contract, order:Order, orderState:OrderState):
"""This function is called to feed in completed orders.
contract: Contract - The Contract class attributes describe the contract.
order: Order - The Order class gives the details of the completed order.
orderState: OrderState - The orderState class includes completed order status details."""
self.logAnswer(current_fn_name(), vars())
def completedOrdersEnd(self):
"""This is called at the end of a given request for completed orders."""
self.logAnswer(current_fn_name(), vars()) | /rltrade-ibapi-9.76.1.tar.gz/rltrade-ibapi-9.76.1/ibapi/wrapper.py | 0.652352 | 0.459137 | wrapper.py | pypi |
from toyrobot.models.orientation import Orientation
from toyrobot.models.robot import Robot
import logging
class CommandParser:
def __init__(self):
self.valid_commands = ["PLACE", "LEFT", "RIGHT", "MOVE", "REPORT"]
def _parse_place_cmd_string(self, cmd):
return cmd[1].strip().split(",")
def _get_place_cmd_elements(self, cmd):
x, y, orient = self._parse_place_cmd_string(cmd)
return int(x), int(y), orient
def is_valid_base_command(self, cmd):
if len(cmd) < 1:
logging.error(f"{cmd} is empty")
return False
if cmd[0] not in self.valid_commands:
logging.error(f"{cmd[0]} is not valid. Accepted commands are {self.valid_commands}")
return False
return True
def is_valid_place_command(self, cmd):
invalid_place_messages = f"PLACE command requires position and orientation. (e.g PLACE 0,0,NORTH). " \
f"position has to be numeric, comma-separated with no spaces, " \
f"and valid orientations are {list(Orientation.__members__)}. {cmd} is not valid" \
if len(cmd) < 2:
logging.error(invalid_place_messages)
return False
else:
position = self._parse_place_cmd_string(cmd)
if len(position) != 3:
logging.error(invalid_place_messages)
return False
else:
x, y, orient = position
if (not x.isnumeric()) or (not y.isnumeric):
logging.error(invalid_place_messages)
return False
if orient not in Orientation.__members__:
logging.error(invalid_place_messages)
return False
return True
def apply_command(self, table_top, cmd):
if self.is_valid_base_command(cmd):
base_cmd = cmd[0]
# ignore any other commands until the first robot is placed (first PLACE command)
if not table_top.has_robot():
if base_cmd == "PLACE":
if self.is_valid_place_command(cmd):
x, y, orient = self._get_place_cmd_elements(cmd)
if table_top.is_valid_position(x, y):
robot = Robot(x, y, orient)
table_top.add_robot(robot)
else:
active_robot = table_top.get_robot()
if base_cmd == "PLACE":
if self.is_valid_place_command(cmd):
x, y, orient = self._get_place_cmd_elements(cmd)
if table_top.is_valid_position(x, y):
active_robot.place(x, y, orient)
elif base_cmd == "LEFT":
active_robot.left()
elif base_cmd == "RIGHT":
active_robot.right()
elif base_cmd == "MOVE":
new_x, new_y = table_top.get_robot().move()
if table_top.is_valid_position(new_x, new_y):
active_robot.update_position(new_x, new_y)
elif base_cmd == "REPORT":
active_robot.report() | /rlupat.toyrobot-0.2.1.tar.gz/rlupat.toyrobot-0.2.1/toyrobot/parser/commandparser.py | 0.682468 | 0.309689 | commandparser.py | pypi |
import json
import os
import os.path as osp
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
DIV_LINE_WIDTH = 50
# Global vars for tracking and labeling data at load time.
exp_idx = 0
units = dict()
def smooth_dataframe(dataframe, value, smooth):
y = np.ones(smooth)
x = np.asarray(dataframe[value])
z = np.ones(len(x))
smoothed_x = np.convolve(x, y, 'same') / np.convolve(z, y, 'same')
dataframe[value] = smoothed_x
def plot_data(data, xaxis='Epoch', value="AverageEpRet", condition="Condition1", smooth=1, **kwargs):
maxlen = min([len(d) for d in data])
if smooth > maxlen:
print(f'Truncate smooth to {maxlen}')
smooth = maxlen
if smooth > 1:
"""
smooth data with moving window average.
that is,
smoothed_y[t] = average(y[t-k], y[t-k+1], ..., y[t+k-1], y[t+k])
where the "smooth" param is width of that window (2k+1)
"""
for datum in data:
smooth_dataframe(datum, value, smooth)
if isinstance(data, list):
data = pd.concat(data, ignore_index=True)
if xaxis not in data:
if 'GradientSteps' in data:
xaxis = 'GradientSteps'
else:
xaxis = 'Epoch'
sns.set(style="darkgrid", font_scale=1.5)
sns.lineplot(data=data, x=xaxis, y=value, hue=condition, ci='sd', **kwargs)
"""
If you upgrade to any version of Seaborn greater than 0.8.1, switch from
tsplot to lineplot replacing L29 with:
sns.lineplot(data=data, x=xaxis, y=value, hue=condition, ci='sd', **kwargs)
Changes the colorscheme and the default legend style, though.
"""
plt.legend(loc='best').set_draggable(True)
# plt.legend(loc='upper center', ncol=3, handlelength=1,
# borderaxespad=0., prop={'size': 13})
"""
For the version of the legend used in the Spinning Up benchmarking page,
swap L38 with:
plt.legend(loc='upper center', ncol=6, handlelength=1,
mode="expand", borderaxespad=0., prop={'size': 13})
"""
xscale = np.max(np.asarray(data[xaxis])) > 5e3
if xscale:
# Just some formatting niceness: x-axis scale in scientific notation if max x is large
plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
plt.tight_layout(pad=0.5)
def get_datasets(logdir, condition=None):
"""
Recursively look through logdir for output files produced by
spinup.logx.Logger.
Assumes that any file "progress.txt" is a valid hit.
"""
global exp_idx
global units
datasets = []
for root, _, files in os.walk(logdir):
if 'progress.txt' in files:
exp_name = None
try:
config_path = open(os.path.join(root, 'config.json'))
config = json.load(config_path)
if 'exp_name' in config:
exp_name = config['exp_name']
except:
print('No file named config.json')
condition1 = condition or exp_name or 'exp'
condition2 = condition1 + '-' + str(exp_idx)
exp_idx += 1
if condition1 not in units:
units[condition1] = 0
unit = units[condition1]
units[condition1] += 1
try:
exp_data = pd.read_csv(os.path.join(root, 'progress.txt'), sep='\t')
except:
print('Could not read from %s' % os.path.join(root, 'progress.txt'))
continue
if 'NormalizedTestEpRet' in exp_data:
performance = 'NormalizedTestEpRet'
elif 'AverageTestEpRet' in exp_data:
performance = 'AverageTestEpRet'
elif 'AverageEpRet' in exp_data:
performance = 'AverageEpRet'
else:
performance = None
if performance is not None:
exp_data.insert(len(exp_data.columns), 'Performance', exp_data[performance])
exp_data.insert(len(exp_data.columns), 'Unit', unit)
exp_data.insert(len(exp_data.columns), 'Condition1', condition1)
exp_data.insert(len(exp_data.columns), 'Condition2', condition2)
datasets.append(exp_data)
return datasets
def get_all_datasets(all_logdirs, legend=None, select=None, exclude=None):
"""
For every entry in all_logdirs,
1) check if the entry is a real directory and if it is,
pull data from it;
2) if not, check to see if the entry is a prefix for a
real directory, and pull data from that.
"""
logdirs = []
for logdir in all_logdirs:
if osp.isdir(logdir) and logdir[-1] == os.sep:
logdirs += [logdir]
else:
basedir = osp.dirname(logdir)
fulldir = lambda x: osp.join(basedir, x)
prefix = logdir.split(os.sep)[-1]
listdir = os.listdir(basedir)
logdirs += sorted([fulldir(x) for x in listdir if prefix in x])
"""
Enforce selection rules, which check logdirs for certain substrings.
Makes it easier to look at graphs from particular ablations, if you
launch many jobs at once with similar names.
"""
if select is not None:
logdirs = [log for log in logdirs if all(x in log for x in select)]
if exclude is not None:
logdirs = [log for log in logdirs if all(not (x in log) for x in exclude)]
# Verify logdirs
print('Plotting from...\n' + '=' * DIV_LINE_WIDTH + '\n')
for logdir in logdirs:
print(logdir)
print('\n' + '=' * DIV_LINE_WIDTH)
# Make sure the legend is compatible with the logdirs
assert not (legend) or (len(legend) == len(logdirs)), \
"Must give a legend title for each set of experiments."
# Load data from logdirs
data = []
if legend:
for log, leg in zip(logdirs, legend):
data += get_datasets(log, leg)
else:
for log in logdirs:
data += get_datasets(log)
return data
def make_plots(all_logdirs, legend=None, xaxis=None, values=None, count=False,
font_scale=1.5, smooth=1, select=None, exclude=None, estimator='mean'):
data = get_all_datasets(all_logdirs, legend, select, exclude)
values = values if isinstance(values, list) else [values]
condition = 'Condition2' if count else 'Condition1'
estimator = getattr(np, estimator) # choose what to show on main curve: mean? max? min?
for value in values:
plt.figure()
plot_data(data, xaxis=xaxis, value=value, condition=condition, smooth=smooth, estimator=estimator)
plt.show()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('logdir', nargs='*')
parser.add_argument('--legend', '-l', nargs='*')
parser.add_argument('--xaxis', '-x', default='TotalEnvInteracts')
parser.add_argument('--value', '-y', default='Performance', nargs='*')
parser.add_argument('--count', action='store_true')
parser.add_argument('--smooth', '-s', type=int, default=20)
parser.add_argument('--select', nargs='*')
parser.add_argument('--exclude', nargs='*')
parser.add_argument('--est', default='mean')
args = parser.parse_args()
"""
Args:
logdir (strings): As many log directories (or prefixes to log
directories, which the plotter will autocomplete internally) as
you'd like to plot from.
legend (strings): Optional way to specify legend for the plot. The
plotter legend will automatically use the ``exp_name`` from the
config.json file, unless you tell it otherwise through this flag.
This only works if you provide a name for each directory that
will get plotted. (Note: this may not be the same as the number
of logdir args you provide! Recall that the plotter looks for
autocompletes of the logdir args: there may be more than one
match for a given logdir prefix, and you will need to provide a
legend string for each one of those matches---unless you have
removed some of them as candidates via selection or exclusion
rules (below).)
xaxis (string): Pick what column from data is used for the x-axis.
Defaults to ``TotalEnvInteracts``.
value (strings): Pick what columns from data to graph on the y-axis.
Submitting multiple values will produce multiple graphs. Defaults
to ``Performance``, which is not an actual output of any algorithm.
Instead, ``Performance`` refers to either ``AverageEpRet``, the
correct performance measure for the on-policy algorithms, or
``AverageTestEpRet``, the correct performance measure for the
off-policy algorithms. The plotter will automatically figure out
which of ``AverageEpRet`` or ``AverageTestEpRet`` to report for
each separate logdir.
count: Optional flag. By default, the plotter shows y-values which
are averaged across all results that share an ``exp_name``,
which is typically a set of identical experiments that only vary
in random seed. But if you'd like to see all of those curves
separately, use the ``--count`` flag.
smooth (int): Smooth data by averaging it over a fixed window. This
parameter says how wide the averaging window will be.
select (strings): Optional selection rule: the plotter will only show
curves from logdirs that contain all of these substrings.
exclude (strings): Optional exclusion rule: plotter will only show
curves from logdirs that do not contain these substrings.
"""
make_plots(args.logdir, args.legend, args.xaxis, args.value, args.count,
smooth=args.smooth, select=args.select, exclude=args.exclude,
estimator=args.est)
if __name__ == "__main__":
main() | /rlutils-python-0.0.3.tar.gz/rlutils-python-0.0.3/rlutils/plot.py | 0.479747 | 0.453141 | plot.py | pypi |
import atexit
import json
import os
import os.path as osp
import shutil
import time
import numpy as np
from rlutils.utils.serialization_utils import convert_json
from tensorboardX import SummaryWriter
DEFAULT_DATA_DIR = 'data'
FORCE_DATESTAMP = False
color2num = dict(
gray=30,
red=31,
green=32,
yellow=33,
blue=34,
magenta=35,
cyan=36,
white=37,
crimson=38
)
def statistics_scalar(x, with_min_and_max=False):
"""
Get mean/std and optional min/max of scalar x across MPI processes.
Args:
x: An array containing samples of the scalar to produce statistics
for.
with_min_and_max (bool): If true, return min and max of x in
addition to mean and std.
"""
x = np.array(x, dtype=np.float32)
global_sum, global_n = np.sum(x), len(x)
mean = global_sum / global_n
global_sum_sq = np.sum((x - mean) ** 2)
std = np.sqrt(global_sum_sq / global_n) # compute global std
if with_min_and_max:
global_min = np.min(x) if len(x) > 0 else np.inf
global_max = np.max(x) if len(x) > 0 else -np.inf
return mean, std, global_min, global_max
return mean, std
def colorize(string, color, bold=False, highlight=False):
"""
Colorize a string.
This function was originally written by John Schulman.
"""
attr = []
num = color2num[color]
if highlight: num += 10
attr.append(str(num))
if bold: attr.append('1')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)
def setup_logger_kwargs(exp_name, seed=None, data_dir=None, datestamp=False):
"""
Sets up the output_dir for a logger and returns a dict for logger kwargs.
If no seed is given and datestamp is false,
::
output_dir = data_dir/exp_name
If a seed is given and datestamp is false,
::
output_dir = data_dir/exp_name/exp_name_s[seed]
If datestamp is true, amend to
::
output_dir = data_dir/YY-MM-DD_exp_name/YY-MM-DD_HH-MM-SS_exp_name_s[seed]
You can force datestamp=True by setting ``FORCE_DATESTAMP=True`` in
``spinup/user_config.py``.
Args:
exp_name (string): Name for experiment.
seed (int): Seed for random number generators used by experiment.
data_dir (string): Path to folder where results should be saved.
Default is the ``DEFAULT_DATA_DIR`` in ``spinup/user_config.py``.
datestamp (bool): Whether to include a date and timestamp in the
name of the save directory.
Returns:
logger_kwargs, a dict containing output_dir and exp_name.
"""
# Datestamp forcing
datestamp = datestamp or FORCE_DATESTAMP
# Make base path
ymd_time = time.strftime("%Y-%m-%d_") if datestamp else ''
relpath = ''.join([ymd_time, exp_name])
if seed is not None:
# Make a seed-specific subfolder in the experiment directory.
if datestamp:
hms_time = time.strftime("%Y-%m-%d_%H-%M-%S")
subfolder = ''.join([hms_time, '-', exp_name, '_s', str(seed)])
else:
subfolder = ''.join([exp_name, '_s', str(seed)])
relpath = osp.join(relpath, subfolder)
if data_dir is not None:
output_dir = osp.join(data_dir, relpath)
else:
output_dir = None
logger_kwargs = dict(output_dir=output_dir,
exp_name=exp_name)
return logger_kwargs
class Logger:
"""
A general-purpose logger.
Makes it easy to save diagnostics, hyperparameter configurations, the
state of a training run, and the trained model.
"""
def __init__(self, output_dir=None, output_fname='progress.txt', exp_name=None, tensorboard=False):
"""
Initialize a Logger.
Args:
output_dir (string): A directory for saving results to. If
``None``, defaults to a temp directory of the form
``/tmp/experiments/somerandomnumber``.
output_fname (string): Name for the tab-separated-value file
containing metrics logged throughout a training run.
Defaults to ``progress.txt``.
exp_name (string): Experiment name. If you run multiple training
runs and give them all the same ``exp_name``, the plotter
will know to group them. (Use case: if you run the same
hyperparameter configuration with multiple random seeds, you
should give them all the same ``exp_name``.)
"""
self.output_dir = output_dir
if self.output_dir is not None:
if osp.exists(self.output_dir):
print("Warning: Log dir %s already exists! Storing info there anyway." % self.output_dir)
else:
os.makedirs(self.output_dir)
self.output_file = open(osp.join(self.output_dir, output_fname), 'w')
atexit.register(self.output_file.close)
print(colorize("Logging data to %s" % self.output_file.name, 'green', bold=True))
else:
self.output_file = None
if tensorboard:
tensorboard_dir = osp.join(self.output_dir, 'tensorboard')
if os.path.exists(tensorboard_dir) and os.path.isdir(tensorboard_dir):
shutil.rmtree(tensorboard_dir)
self.tensorboard_file = SummaryWriter(logdir=tensorboard_dir)
else:
self.tensorboard_file = None
self.first_row = True
self.log_headers = []
self.log_current_row = {}
self.exp_name = exp_name
self.num_epoch = 0
@staticmethod
def log(msg, color='green'):
"""Print a colorized message to stdout."""
print(colorize(msg, color, bold=True))
def log_tabular(self, key, val):
"""
Log a value of some diagnostic.
Call this only once for each diagnostic quantity, each iteration.
After using ``log_tabular`` to store values for each diagnostic,
make sure to call ``dump_tabular`` to write them out to file and
stdout (otherwise they will not get saved anywhere).
"""
if self.first_row:
self.log_headers.append(key)
else:
assert key in self.log_headers, "Trying to introduce a new key %s that you didn't include in the first iteration" % key
assert key not in self.log_current_row, "You already set %s this iteration. Maybe you forgot to call dump_tabular()" % key
self.log_current_row[key] = val
def save_config(self, config):
"""
Log an experiment configuration.
Call this once at the top of your experiment, passing in all important
config vars as a dict. This will serialize the config to JSON, while
handling anything which can't be serialized in a graceful way (writing
as informative a string as possible).
Example use:
.. code-block:: python
logger = EpochLogger(**logger_kwargs)
logger.save_config(locals())
"""
config_json = convert_json(config)
if self.exp_name is not None:
config_json['exp_name'] = self.exp_name
output = json.dumps(config_json, separators=(',', ':\t'), indent=4, sort_keys=True)
print(colorize('Saving config:\n', color='cyan', bold=True))
print(output)
if self.output_dir is not None:
with open(osp.join(self.output_dir, "config.json"), 'w') as out:
out.write(output)
def dump_tabular(self):
"""
Write all of the diagnostics from the current iteration.
Writes both to stdout, and to the output file.
"""
vals = []
key_lens = [len(key) for key in self.log_headers]
max_key_len = max(15, max(key_lens))
keystr = '%' + '%d' % max_key_len
fmt = "| " + keystr + "s | %15s |"
n_slashes = 22 + max_key_len
print("-" * n_slashes)
for key in self.log_headers:
val = self.log_current_row.get(key, "")
valstr = "%8.3g" % val if hasattr(val, "__float__") else val
print(fmt % (key, valstr))
vals.append(val)
print("-" * n_slashes, flush=True)
if self.output_file is not None:
if self.first_row:
self.output_file.write("\t".join(self.log_headers) + "\n")
self.output_file.write("\t".join(map(str, vals)) + "\n")
self.output_file.flush()
if self.tensorboard_file is not None:
self.num_epoch += 1
data = dict(zip(self.log_headers, vals))
epoch = data.pop('Epoch', self.num_epoch)
for key, val in data.items():
self.tensorboard_file.add_scalar(f'data/{key}', scalar_value=val, global_step=epoch)
self.log_current_row.clear()
self.first_row = False
class EpochLogger(Logger):
"""
A variant of Logger tailored for tracking average values over epochs.
Typical use case: there is some quantity which is calculated many times
throughout an epoch, and at the end of the epoch, you would like to
report the average / std / min / max value of that quantity.
With an EpochLogger, each time the quantity is calculated, you would
use
.. code-block:: python
epoch_logger.store(NameOfQuantity=quantity_value)
to load it into the EpochLogger's state. Then at the end of the epoch, you
would use
.. code-block:: python
epoch_logger.log_tabular(NameOfQuantity, **options)
to record the desired values.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.epoch_dict = dict()
def store(self, **kwargs):
"""
Save something into the epoch_logger's current state.
Provide an arbitrary number of keyword arguments with numerical
values.
"""
for k, v in kwargs.items():
if not (k in self.epoch_dict.keys()):
self.epoch_dict[k] = []
self.epoch_dict[k].append(v)
def log_tabular(self, key, val=None, with_min_and_max=False, average_only=False):
"""
Log a value or possibly the mean/std/min/max values of a diagnostic.
Args:
key (string): The name of the diagnostic. If you are logging a
diagnostic whose state has previously been saved with
``store``, the key here has to match the key you used there.
val: A value for the diagnostic. If you have previously saved
values for this key via ``store``, do *not* provide a ``val``
here.
with_min_and_max (bool): If true, log min and max values of the
diagnostic over the epoch.
average_only (bool): If true, do not log the standard deviation
of the diagnostic over the epoch.
"""
if val is not None:
super().log_tabular(key, val)
else:
v = self.epoch_dict[key]
if len(v) == 0:
super().log_tabular(key if average_only else 'Average' + key, np.nan)
if not (average_only):
super().log_tabular('Std' + key, np.nan)
if with_min_and_max:
super().log_tabular('Max' + key, np.nan)
super().log_tabular('Min' + key, np.nan)
else:
vals = np.concatenate(v) if isinstance(v[0], np.ndarray) and len(v[0].shape) > 0 else v
stats = statistics_scalar(vals, with_min_and_max=with_min_and_max)
super().log_tabular(key if average_only else 'Average' + key, stats[0])
if not (average_only):
super().log_tabular('Std' + key, stats[1])
if with_min_and_max:
super().log_tabular('Max' + key, stats[3])
super().log_tabular('Min' + key, stats[2])
self.epoch_dict[key] = []
def get_stats(self, key):
"""
Lets an algorithm ask the logger for mean/std/min/max of a diagnostic.
"""
v = self.epoch_dict[key]
vals = np.concatenate(v) if isinstance(v[0], np.ndarray) and len(v[0].shape) > 0 else v
return statistics_scalar(vals)
def get(self, key):
return self.epoch_dict[key]
def dump_tabular(self):
super(EpochLogger, self).dump_tabular()
for key in self.epoch_dict.keys():
assert len(self.epoch_dict[key]) == 0, f'Key {key} is not called using log_tabular' | /rlutils-python-0.0.3.tar.gz/rlutils-python-0.0.3/rlutils/logx.py | 0.733356 | 0.352648 | logx.py | pypi |
import torch.nn as nn
from .layers import EnsembleDense, SqueezeLayer
str_to_activation = {
'relu': nn.ReLU,
'leaky_relu': nn.LeakyReLU,
'tanh': nn.Tanh,
'sigmoid': nn.Sigmoid,
'softplus': nn.Softplus,
}
def decode_activation(activation):
if isinstance(activation, str):
act_fn = str_to_activation.get(activation)
elif callable(activation):
act_fn = activation
elif activation is None:
act_fn = nn.Identity
else:
raise ValueError('activation must be a string or callable.')
return act_fn
def build_mlp(input_dim, output_dim, mlp_hidden, num_ensembles=None, num_layers=3,
activation='relu', out_activation=None, squeeze=False, dropout=None,
batch_norm=False):
assert not batch_norm, 'BatchNorm is not supported yet.'
activation_fn = decode_activation(activation)
output_activation_fn = decode_activation(out_activation)
layers = []
if num_layers == 1:
if num_ensembles is not None:
layers.append(EnsembleDense(num_ensembles, input_dim, output_dim))
else:
layers.append(nn.Linear(input_dim, output_dim))
else:
# first layer
if num_ensembles is not None:
layers.append(EnsembleDense(num_ensembles, input_dim, mlp_hidden))
else:
layers.append(nn.Linear(input_dim, mlp_hidden))
layers.append(activation_fn())
if dropout is not None:
layers.append(nn.Dropout(p=dropout))
# intermediate layers
for _ in range(num_layers - 2):
if num_ensembles is not None:
layers.append(EnsembleDense(num_ensembles, mlp_hidden, mlp_hidden))
else:
layers.append(nn.Linear(mlp_hidden, mlp_hidden))
layers.append(activation_fn())
if dropout is not None:
layers.append(nn.Dropout(p=dropout))
# final dense layer
if num_ensembles is not None:
layers.append(EnsembleDense(num_ensembles, mlp_hidden, output_dim))
else:
layers.append(nn.Linear(mlp_hidden, output_dim))
if out_activation is not None:
layers.append(output_activation_fn())
if output_dim == 1 and squeeze is True:
layers.append(SqueezeLayer(dim=-1))
model = nn.Sequential(*layers)
return model | /rlutils-python-0.0.3.tar.gz/rlutils-python-0.0.3/rlutils/pytorch/nn/functional.py | 0.837155 | 0.33689 | functional.py | pypi |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from rlutils.np.functional import inverse_softplus
class EnsembleDense(nn.Module):
__constants__ = ['num_ensembles', 'in_features', 'out_features']
in_features: int
out_features: int
weight: torch.Tensor
def __init__(self, num_ensembles: int, in_features: int, out_features: int, bias: bool = True) -> None:
super(EnsembleDense, self).__init__()
self.num_ensembles = num_ensembles
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.Tensor(num_ensembles, in_features, out_features))
if bias:
self.bias = nn.Parameter(torch.Tensor(num_ensembles, 1, out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self) -> None:
fan = self.in_features
gain = nn.init.calculate_gain('leaky_relu', param=math.sqrt(5))
std = gain / math.sqrt(fan)
bound = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation
with torch.no_grad():
nn.init.uniform_(self.weight, -bound, bound)
if self.bias is not None:
fan_in = self.in_features
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(self.bias, -bound, bound)
def forward(self, input: torch.Tensor) -> torch.Tensor:
return torch.bmm(input, self.weight) + self.bias
def extra_repr(self) -> str:
return 'num_ensembles={}, in_features={}, out_features={}, bias={}'.format(
self.num_ensembles, self.in_features, self.out_features, self.bias is not None
)
class SqueezeLayer(nn.Module):
def __init__(self, dim=-1):
super(SqueezeLayer, self).__init__()
self.dim = dim
def forward(self, inputs):
return torch.squeeze(inputs, dim=self.dim)
class LagrangeLayer(nn.Module):
def __init__(self, initial_value=0.):
super(LagrangeLayer, self).__init__()
self.log_alpha = nn.Parameter(data=torch.as_tensor(inverse_softplus(initial_value), dtype=torch.float32))
def forward(self):
return F.softplus(self.log_alpha)
class LambdaLayer(nn.Module):
def __init__(self, function):
super(LambdaLayer, self).__init__()
self.function = function
def forward(self, x):
return self.function(x) | /rlutils-python-0.0.3.tar.gz/rlutils-python-0.0.3/rlutils/pytorch/nn/layers.py | 0.920843 | 0.424412 | layers.py | pypi |
import copy
import rlutils.pytorch.utils as ptu
import torch
import torch.nn as nn
from rlutils.infra.runner import run_func_as_main, PytorchOffPolicyRunner
from rlutils.pytorch.functional import soft_update, compute_target_value, to_numpy_or_python_type
from rlutils.pytorch.nn import EnsembleMinQNet
from rlutils.pytorch.nn.functional import build_mlp
class TD3Agent(nn.Module):
def __init__(self,
obs_spec,
act_spec,
num_q_ensembles=2,
policy_mlp_hidden=128,
policy_lr=3e-4,
q_mlp_hidden=256,
q_lr=3e-4,
tau=5e-3,
gamma=0.99,
actor_noise=0.1,
target_noise=0.2,
noise_clip=0.5
):
super(TD3Agent, self).__init__()
self.obs_spec = obs_spec
self.act_spec = act_spec
self.act_dim = self.act_spec.shape[0]
self.act_lim = 1.
self.actor_noise = actor_noise
self.target_noise = target_noise
self.noise_clip = noise_clip
self.tau = tau
self.gamma = gamma
if len(self.obs_spec.shape) == 1: # 1D observation
self.obs_dim = self.obs_spec.shape[0]
self.policy_net = build_mlp(self.obs_dim, self.act_dim, mlp_hidden=policy_mlp_hidden, num_layers=3,
out_activation='tanh').to(ptu.device)
self.target_policy_net = copy.deepcopy(self.policy_net).to(ptu.device)
self.q_network = EnsembleMinQNet(self.obs_dim, self.act_dim, q_mlp_hidden,
num_ensembles=num_q_ensembles).to(ptu.device)
self.target_q_network = copy.deepcopy(self.q_network).to(ptu.device)
else:
raise NotImplementedError
self.policy_optimizer = torch.optim.Adam(params=self.policy_net.parameters(), lr=policy_lr)
self.q_optimizer = torch.optim.Adam(params=self.q_network.parameters(), lr=q_lr)
def set_logger(self, logger):
self.logger = logger
def log_tabular(self):
self.logger.log_tabular('Q1Vals', with_min_and_max=True)
self.logger.log_tabular('Q2Vals', with_min_and_max=True)
self.logger.log_tabular('LossPi', average_only=True)
self.logger.log_tabular('LossQ', average_only=True)
def update_target(self):
soft_update(self.target_q_network, self.q_network, self.tau)
soft_update(self.target_policy_net, self.policy_net, self.tau)
def _compute_next_obs_q(self, next_obs):
next_action = self.target_policy_net(next_obs)
# Target policy smoothing
epsilon = torch.randn_like(next_action) * self.target_noise
epsilon = torch.clip(epsilon, -self.noise_clip, self.noise_clip)
next_action = next_action + epsilon
next_action = torch.clip(next_action, -self.act_lim, self.act_lim)
next_q_value = self.target_q_network((next_obs, next_action), training=False)
return next_q_value
def _update_nets(self, obs, actions, next_obs, done, reward):
# compute target q
with torch.no_grad():
next_q_value = self._compute_next_obs_q(next_obs)
q_target = compute_target_value(reward, self.gamma, done, next_q_value)
# q loss
self.q_optimizer.zero_grad()
q_values = self.q_network((obs, actions), training=True) # (num_ensembles, None)
q_values_loss = 0.5 * torch.square(torch.unsqueeze(q_target, dim=0) - q_values)
# (num_ensembles, None)
q_values_loss = torch.sum(q_values_loss, dim=0) # (None,)
# apply importance weights
q_values_loss = torch.mean(q_values_loss)
q_values_loss.backward()
self.q_optimizer.step()
info = dict(
Q1Vals=q_values[0],
Q2Vals=q_values[1],
LossQ=q_values_loss,
)
return info
def _update_actor(self, obs):
# policy loss
self.policy_optimizer.zero_grad()
a = self.policy_net(obs)
q = self.q_network((obs, a), training=False)
policy_loss = -torch.mean(q, dim=0)
policy_loss.backward()
self.policy_optimizer.step()
info = dict(
LossPi=policy_loss,
)
return info
def train_on_batch(self, data, **kwargs):
obs = data['obs']
act = data['act']
next_obs = data['next_obs']
done = data['done']
rew = data['rew']
update_target = data['update_target']
obs = torch.as_tensor(obs, dtype=torch.float32, device=ptu.device)
act = torch.as_tensor(act, dtype=torch.float32, device=ptu.device)
next_obs = torch.as_tensor(next_obs, dtype=torch.float32, device=ptu.device)
done = torch.as_tensor(done, dtype=torch.float32, device=ptu.device)
rew = torch.as_tensor(rew, dtype=torch.float32, device=ptu.device)
info = self._update_nets(obs, act, next_obs, done, rew)
if update_target:
actor_info = self._update_actor(obs)
info.update(actor_info)
self.update_target()
self.logger.store(**to_numpy_or_python_type(info))
def act_batch_test(self, obs):
obs = torch.as_tensor(obs, dtype=torch.float32, device=ptu.device)
with torch.no_grad():
return self.policy_net(obs).cpu().numpy()
def act_batch_explore(self, obs):
obs = torch.as_tensor(obs, dtype=torch.float32, device=ptu.device)
with torch.no_grad():
pi_final = self.policy_net(obs)
noise = torch.randn_like(pi_final) * self.actor_noise
pi_final = pi_final + noise
pi_final = torch.clip(pi_final, -self.act_lim, self.act_lim)
return pi_final.cpu().numpy()
class Runner(PytorchOffPolicyRunner):
@classmethod
def main(cls,
env_name,
epochs=200,
policy_mlp_hidden=256,
policy_lr=1e-3,
q_mlp_hidden=256,
q_lr=1e-3,
actor_noise=0.1,
target_noise=0.2,
noise_clip=0.5,
tau=5e-3,
gamma=0.99,
seed=1,
logger_path: str = None,
**kwargs
):
agent_kwargs = dict(
policy_mlp_hidden=policy_mlp_hidden,
policy_lr=policy_lr,
q_mlp_hidden=q_mlp_hidden,
q_lr=q_lr,
tau=tau,
gamma=gamma,
actor_noise=actor_noise,
target_noise=target_noise,
noise_clip=noise_clip
)
super(Runner, cls).main(env_name=env_name,
epochs=epochs,
policy_delay=2,
agent_cls=TD3Agent,
agent_kwargs=agent_kwargs,
seed=seed,
logger_path=logger_path,
**kwargs)
if __name__ == '__main__':
ptu.set_device('cuda')
run_func_as_main(Runner.main) | /rlutils-python-0.0.3.tar.gz/rlutils-python-0.0.3/rlutils/algos/pytorch/mf/td3.py | 0.882504 | 0.36727 | td3.py | pypi |
import copy
import rlutils.pytorch as rlu
import rlutils.pytorch.utils as ptu
import torch
from rlutils.infra.runner import PytorchOffPolicyRunner, run_func_as_main
from torch import nn
class SACAgent(nn.Module):
def __init__(self,
obs_spec,
act_spec,
num_ensembles=2,
policy_mlp_hidden=128,
policy_lr=3e-4,
q_mlp_hidden=256,
q_lr=3e-4,
alpha=1.0,
alpha_lr=1e-3,
tau=5e-3,
gamma=0.99,
target_entropy=None,
):
super(SACAgent, self).__init__()
self.obs_spec = obs_spec
self.act_spec = act_spec
self.act_dim = self.act_spec.shape[0]
if len(self.obs_spec.shape) == 1: # 1D observation
self.obs_dim = self.obs_spec.shape[0]
self.policy_net = rlu.nn.SquashedGaussianMLPActor(self.obs_dim, self.act_dim, policy_mlp_hidden)
self.q_network = rlu.nn.EnsembleMinQNet(self.obs_dim, self.act_dim, q_mlp_hidden,
num_ensembles=num_ensembles)
self.target_q_network = copy.deepcopy(self.q_network)
else:
raise NotImplementedError
self.alpha_net = rlu.nn.LagrangeLayer(initial_value=alpha)
self.policy_optimizer = torch.optim.Adam(params=self.policy_net.parameters(), lr=policy_lr)
self.q_optimizer = torch.optim.Adam(params=self.q_network.parameters(), lr=q_lr)
self.alpha_optimizer = torch.optim.Adam(params=self.alpha_net.parameters(), lr=alpha_lr)
self.target_entropy = -self.act_dim if target_entropy is None else target_entropy
self.tau = tau
self.gamma = gamma
self.to(ptu.device)
def set_logger(self, logger):
self.logger = logger
def log_tabular(self):
self.logger.log_tabular('Q1Vals', with_min_and_max=True)
self.logger.log_tabular('Q2Vals', with_min_and_max=True)
self.logger.log_tabular('LogPi', average_only=True)
self.logger.log_tabular('LossPi', average_only=True)
self.logger.log_tabular('LossQ', average_only=True)
self.logger.log_tabular('Alpha', average_only=True)
self.logger.log_tabular('LossAlpha', average_only=True)
def update_target(self):
rlu.functional.soft_update(self.target_q_network, self.q_network, self.tau)
def _update_nets(self, obs, act, next_obs, done, rew):
""" Sample a mini-batch from replay buffer and update the network
Args:
obs: (batch_size, ob_dim)
actions: (batch_size, action_dim)
next_obs: (batch_size, ob_dim)
done: (batch_size,)
reward: (batch_size,)
Returns: None
"""
with torch.no_grad():
alpha = self.alpha_net()
next_action, next_action_log_prob, _, _ = self.policy_net((next_obs, False))
target_q_values = self.target_q_network((next_obs, next_action),
training=False) - alpha * next_action_log_prob
q_target = rew + self.gamma * (1.0 - done) * target_q_values
# q loss
q_values = self.q_network((obs, act), training=True) # (num_ensembles, None)
q_values_loss = 0.5 * torch.square(torch.unsqueeze(q_target, dim=0) - q_values)
# (num_ensembles, None)
q_values_loss = torch.sum(q_values_loss, dim=0) # (None,)
# apply importance weights
q_values_loss = torch.mean(q_values_loss)
self.q_optimizer.zero_grad()
q_values_loss.backward()
self.q_optimizer.step()
# policy loss
action, log_prob, _, _ = self.policy_net((obs, False))
q_values_pi_min = self.q_network((obs, action), training=False)
policy_loss = torch.mean(log_prob * alpha - q_values_pi_min)
self.policy_optimizer.zero_grad()
policy_loss.backward()
self.policy_optimizer.step()
alpha = self.alpha_net()
alpha_loss = -torch.mean(alpha * (log_prob.detach() + self.target_entropy))
self.alpha_optimizer.zero_grad()
alpha_loss.backward()
self.alpha_optimizer.step()
info = dict(
Q1Vals=q_values[0],
Q2Vals=q_values[1],
LogPi=log_prob,
Alpha=alpha,
LossQ=q_values_loss,
LossAlpha=alpha_loss,
LossPi=policy_loss,
)
return info
def train_on_batch(self, data, **kwargs):
update_target = data.pop('update_target')
data = {key: torch.as_tensor(value, device=ptu.device) for key, value in data.items()}
info = self._update_nets(**data)
for key, item in info.items():
info[key] = item.detach().cpu().numpy()
self.logger.store(**info)
if update_target:
self.update_target()
def act_batch_torch(self, obs, deterministic):
with torch.no_grad():
pi_final = self.policy_net.select_action((obs, deterministic))
return pi_final
def act_batch_explore(self, obs):
obs = torch.as_tensor(obs, device=ptu.device)
return self.act_batch_torch(obs, deterministic=False).cpu().numpy()
def act_batch_test(self, obs):
obs = torch.as_tensor(obs, device=ptu.device)
return self.act_batch_torch(obs, deterministic=True).cpu().numpy()
class Runner(PytorchOffPolicyRunner):
@classmethod
def main(cls,
env_name,
epochs=100,
# sac args
policy_mlp_hidden=256,
policy_lr=3e-4,
q_mlp_hidden=256,
q_lr=3e-4,
alpha=0.2,
tau=5e-3,
gamma=0.99,
seed=1,
logger_path: str = None,
**kwargs
):
agent_kwargs = dict(
policy_mlp_hidden=policy_mlp_hidden,
policy_lr=policy_lr,
q_mlp_hidden=q_mlp_hidden,
q_lr=q_lr,
alpha=alpha,
alpha_lr=q_lr,
tau=tau,
gamma=gamma,
target_entropy=None
)
super(Runner, cls).main(
env_name=env_name,
epochs=epochs,
agent_cls=SACAgent,
agent_kwargs=agent_kwargs,
policy_delay=1,
seed=seed,
logger_path=logger_path,
**kwargs
)
if __name__ == '__main__':
run_func_as_main(Runner.main) | /rlutils-python-0.0.3.tar.gz/rlutils-python-0.0.3/rlutils/algos/pytorch/mf/sac.py | 0.893765 | 0.368093 | sac.py | pypi |
import numpy as np
import rlutils.tf as rlu
import tensorflow as tf
from rlutils.infra.runner import TFOnPolicyRunner
class PPOAgent(tf.keras.Model):
def __init__(self, obs_spec, act_spec, mlp_hidden=64,
pi_lr=1e-3, vf_lr=1e-3, clip_ratio=0.2,
entropy_coef=0.001, target_kl=0.05,
train_pi_iters=80, train_vf_iters=80
):
"""
Args:
policy_net: The policy net must implement following methods:
- forward: takes obs and return action_distribution and value
- forward_action: takes obs and return action_distribution
- forward_value: takes obs and return value.
The advantage is that we can save computation if we only need to fetch parts of the graph. Also, we can
implement policy and value in both shared and non-shared way.
learning_rate:
lam:
clip_param:
entropy_coef:
target_kl:
max_grad_norm:
"""
super(PPOAgent, self).__init__()
obs_dim = obs_spec.shape[0]
if act_spec.dtype == np.int32 or act_spec.dtype == np.int64:
self.policy_net = rlu.nn.CategoricalActor(obs_dim=obs_dim, act_dim=act_spec.n, mlp_hidden=mlp_hidden)
else:
self.policy_net = rlu.nn.NormalActor(obs_dim=obs_dim, act_dim=act_spec.shape[0], mlp_hidden=mlp_hidden)
self.pi_optimizer = tf.keras.optimizers.Adam(learning_rate=pi_lr)
self.v_optimizer = tf.keras.optimizers.Adam(learning_rate=vf_lr)
self.value_net = rlu.nn.build_mlp(input_dim=obs_dim, output_dim=1, squeeze=True, mlp_hidden=mlp_hidden)
self.value_net.compile(optimizer=self.v_optimizer, loss='mse')
self.target_kl = target_kl
self.clip_ratio = clip_ratio
self.entropy_coef = entropy_coef
self.train_pi_iters = train_pi_iters
self.train_vf_iters = train_vf_iters
self.logger = None
def set_logger(self, logger):
self.logger = logger
def log_tabular(self):
self.logger.log_tabular('PolicyLoss', average_only=True)
self.logger.log_tabular('ValueLoss', average_only=True)
self.logger.log_tabular('Entropy', average_only=True)
self.logger.log_tabular('AvgKL', average_only=True)
self.logger.log_tabular('StopIter', average_only=True)
def get_pi_distribution(self, obs, deterministic=tf.convert_to_tensor(False)):
return self.policy_net((obs, deterministic))[-1]
def call(self, inputs, training=None, mask=None):
pi_distribution = self.get_pi_distribution(inputs)
pi_action = pi_distribution.sample()
return pi_action
@tf.function
def act_batch_tf(self, obs):
pi_distribution = self.get_pi_distribution(obs)
pi_action = pi_distribution.sample()
log_prob = pi_distribution.log_prob(pi_action)
v = self.value_net(obs)
return pi_action, log_prob, v
def act_batch(self, obs):
pi_action, log_prob, v = self.act_batch_tf(tf.convert_to_tensor(obs))
return pi_action.numpy(), log_prob.numpy(), v.numpy()
@tf.function
def _update_policy_step(self, obs, act, adv, old_log_prob):
print(f'Tracing _update_policy_step with obs={obs}')
with tf.GradientTape() as tape:
distribution = self.get_pi_distribution(obs)
entropy = tf.reduce_mean(distribution.entropy())
log_prob = distribution.log_prob(act)
negative_approx_kl = log_prob - old_log_prob
approx_kl_mean = tf.reduce_mean(-negative_approx_kl)
ratio = tf.exp(negative_approx_kl)
surr1 = ratio * adv
surr2 = tf.clip_by_value(ratio, 1.0 - self.clip_ratio, 1.0 + self.clip_ratio) * adv
policy_loss = -tf.reduce_mean(tf.minimum(surr1, surr2))
loss = policy_loss - entropy * self.entropy_coef
gradients = tape.gradient(loss, self.policy_net.trainable_variables)
self.pi_optimizer.apply_gradients(zip(gradients, self.policy_net.trainable_variables))
info = dict(
PolicyLoss=policy_loss,
Entropy=entropy,
AvgKL=approx_kl_mean,
)
return info
def train_on_batch(self, obs, act, ret, adv, logp):
for i in range(self.train_pi_iters):
info = self._update_policy_step(obs, act, adv, logp)
if info['AvgKL'] > 1.5 * self.target_kl:
self.logger.log('Early stopping at step %d due to reaching max kl.' % i)
break
self.logger.store(StopIter=i)
for i in range(self.train_vf_iters):
loss = self.value_net.train_on_batch(x=obs, y=ret)
# only record the final result
info['ValueLoss'] = loss
self.logger.store(**rlu.functional.to_numpy_or_python_type(info))
class Runner(TFOnPolicyRunner):
@classmethod
def main(cls, env_name, mlp_hidden=256, clip_ratio=0.2, pi_lr=3e-4, vf_lr=1e-3,
train_pi_iters=80, train_vf_iters=80,
target_kl=0.05, entropy_coef=1e-3, **kwargs):
agent_kwargs = dict(
mlp_hidden=mlp_hidden,
pi_lr=pi_lr, vf_lr=vf_lr, clip_ratio=clip_ratio,
entropy_coef=entropy_coef, target_kl=target_kl,
train_pi_iters=train_pi_iters, train_vf_iters=train_vf_iters
)
super(Runner, cls).main(
env_name=env_name,
agent_cls=PPOAgent,
agent_kwargs=agent_kwargs,
**kwargs
) | /rlutils-python-0.0.3.tar.gz/rlutils-python-0.0.3/rlutils/algos/tf/mf/ppo.py | 0.883488 | 0.393968 | ppo.py | pypi |
import numpy as np
import rlutils.tf as rlu
import tensorflow as tf
import tensorflow_probability as tfp
from rlutils.infra.runner import TFOnPolicyRunner
class TRPOAgent(tf.keras.Model):
def __init__(self, obs_spec, act_spec, mlp_hidden=64,
delta=0.01, vf_lr=1e-3, damping_coeff=0.1, cg_iters=10, backtrack_iters=10,
backtrack_coeff=0.8, train_vf_iters=80, algo='npg'
):
"""
Args:
policy_net: The policy net must implement following methods:
- forward: takes obs and return action_distribution and value
- forward_action: takes obs and return action_distribution
- forward_value: takes obs and return value.
The advantage is that we can save computation if we only need to fetch parts of the graph. Also, we can
implement policy and value in both shared and non-shared way.
learning_rate:
lam:
clip_param:
entropy_coef:
target_kl:
max_grad_norm:
"""
super(TRPOAgent, self).__init__()
obs_dim = obs_spec.shape[0]
if act_spec.dtype == np.int32 or act_spec.dtype == np.int64:
self.policy_net = rlu.nn.CategoricalActor(obs_dim=obs_dim, act_dim=act_spec.n, mlp_hidden=mlp_hidden)
else:
self.policy_net = rlu.nn.NormalActor(obs_dim=obs_dim, act_dim=act_spec.shape[0], mlp_hidden=mlp_hidden)
self.v_optimizer = tf.keras.optimizers.Adam(learning_rate=vf_lr)
self.value_net = rlu.nn.build_mlp(input_dim=obs_dim, output_dim=1, squeeze=True, mlp_hidden=mlp_hidden)
self.value_net.compile(optimizer=self.v_optimizer, loss='mse')
self.delta = delta
self.damping_coeff = damping_coeff
self.cg_iters = cg_iters
self.backtrack_iters = backtrack_iters
self.backtrack_coeff = backtrack_coeff
self.train_vf_iters = train_vf_iters
self.algo = algo
self.logger = None
def set_logger(self, logger):
self.logger = logger
def log_tabular(self):
self.logger.log_tabular('LossPi', average_only=True)
self.logger.log_tabular('LossV', average_only=True)
self.logger.log_tabular('KL', average_only=True)
self.logger.log_tabular('DeltaLossPi', average_only=True)
self.logger.log_tabular('DeltaLossV', average_only=True)
self.logger.log_tabular('BacktrackIters', average_only=True)
def get_pi_distribution(self, obs, deterministic=tf.convert_to_tensor(False)):
return self.policy_net((obs, deterministic))[-1]
def call(self, inputs, training=None, mask=None):
pi_distribution = self.get_pi_distribution(inputs)
pi_action = pi_distribution.sample()
return pi_action
@tf.function
def act_batch_tf(self, obs):
pi_distribution = self.get_pi_distribution(obs)
pi_action = pi_distribution.sample()
log_prob = pi_distribution.log_prob(pi_action)
v = self.value_net(obs)
return pi_action, log_prob, v
def act_batch(self, obs):
pi_action, log_prob, v = self.act_batch_tf(tf.convert_to_tensor(obs))
return pi_action.numpy(), log_prob.numpy(), v.numpy()
def _compute_kl(self, obs, old_pi):
pi = self.get_pi_distribution(obs)
kl_loss = tfp.distributions.kl_divergence(pi, old_pi)
kl_loss = tf.reduce_mean(kl_loss)
return kl_loss
def _compute_loss_pi(self, obs, act, logp, adv):
distribution = self.get_pi_distribution(obs)
log_prob = distribution.log_prob(act)
negative_approx_kl = log_prob - logp
ratio = tf.exp(negative_approx_kl)
surr1 = ratio * adv
policy_loss = -tf.reduce_mean(surr1, axis=0)
return policy_loss
def _compute_gradient(self, obs, act, logp, adv):
# compute pi gradients
with tf.GradientTape() as tape:
policy_loss = self._compute_loss_pi(obs, act, logp, adv)
grads = tape.gradient(policy_loss, self.policy_net.trainable_variables)
grads = rlu.functional.flat_vars(grads)
# flat grads
return grads, policy_loss
def _hessian_vector_product(self, obs, p):
# compute Hx
old_pi = self.get_pi_distribution(obs)
with tf.GradientTape() as t2:
with tf.GradientTape() as t1:
kl = self._compute_kl(obs, old_pi)
inner_grads = t1.gradient(kl, self.policy_net.trainable_variables)
# flat gradients
inner_grads = rlu.functional.flat_vars(inner_grads)
kl_v = tf.reduce_sum(inner_grads * p)
grads = t2.gradient(kl_v, self.policy_net.trainable_variables)
grads = rlu.functional.flat_vars(grads)
_Avp = grads + p * self.damping_coeff
return _Avp
@tf.function
def _conjugate_gradients(self, obs, b, nsteps, residual_tol=1e-10):
# TODO: replace with tf.linalg.experimental.conjugate_gradient
"""
Args:
Avp: a callable computes matrix vector produce. Note that vector here has NO dummy dimension
b: A^{-1}b
nsteps: max number of steps
residual_tol:
Returns:
"""
print(f'Tracing _conjugate_gradients b={b}, nsteps={nsteps}')
x = tf.zeros_like(b)
r = tf.identity(b)
p = tf.identity(b)
rdotr = tf.tensordot(r, r, axes=1)
for _ in tf.range(nsteps):
_Avp = self._hessian_vector_product(obs, p)
# compute conjugate gradient
alpha = rdotr / tf.tensordot(p, _Avp, axes=1)
x += alpha * p
r -= alpha * _Avp
new_rdotr = tf.tensordot(r, r, axes=1)
betta = new_rdotr / rdotr
p = r + betta * p
rdotr = new_rdotr
if rdotr < residual_tol:
break
return x
def _compute_natural_gradient(self, obs, act, logp, adv):
print(f'Tracing _compute_natural_gradient with obs={obs}, act={act}, logp={logp}, adv={adv}')
grads, policy_loss = self._compute_gradient(obs, act, logp, adv)
x = self._conjugate_gradients(obs, grads, self.cg_iters)
alpha = tf.sqrt(2. * self.delta / (tf.tensordot(x, self._hessian_vector_product(obs, x),
axes=1) + 1e-8))
return alpha * x, policy_loss
def _set_and_eval(self, obs, act, logp, adv, old_params, old_pi, natural_gradient, step):
new_params = old_params - natural_gradient * step
rlu.functional.set_flat_trainable_variables(self.policy_net, new_params)
loss_pi = self._compute_loss_pi(obs, act, logp, adv)
kl_loss = self._compute_kl(obs, old_pi)
return kl_loss, loss_pi
@tf.function
def _update_actor(self, obs, act, adv):
print(f'Tracing _update_actor with obs={obs}, act={act}, adv={adv}')
old_params = rlu.functional.flat_vars(self.policy_net.trainable_variables)
old_pi = self.get_pi_distribution(obs)
logp = old_pi.log_prob(act)
natural_gradient, pi_l_old = self._compute_natural_gradient(obs, act, logp, adv)
if self.algo == 'npg':
# npg has no backtracking or hard kl constraint enforcement
kl, pi_l_new = self._set_and_eval(obs, act, logp, adv, old_params, old_pi,
natural_gradient, step=1.)
j = tf.constant(value=0, dtype=tf.int32)
elif self.algo == 'trpo':
# trpo augments npg with backtracking line search, hard kl
pi_l_new = tf.zeros(shape=(), dtype=tf.float32)
kl = tf.zeros(shape=(), dtype=tf.float32)
for j in tf.range(self.backtrack_iters):
steps = tf.pow(self.backtrack_coeff, tf.cast(j, dtype=tf.float32))
kl, pi_l_new = self._set_and_eval(obs, act, logp, adv, old_params, old_pi,
natural_gradient, step=steps)
if kl <= self.delta and pi_l_new <= pi_l_old:
tf.print('Accepting new params at step', j, 'of line search.')
break
if j == self.backtrack_iters - 1:
tf.print('Line search failed! Keeping old params.')
kl, pi_l_new = self._set_and_eval(obs, act, logp, adv, old_params, old_pi,
natural_gradient, step=0.)
info = dict(
LossPi=pi_l_old, KL=kl,
DeltaLossPi=(pi_l_new - pi_l_old),
BacktrackIters=j
)
return info
def train_on_batch(self, obs, act, ret, adv, logp):
info = self._update_actor(obs, act, adv)
# train the value network
v_l_old = self.value_net.evaluate(x=obs, y=ret, verbose=False)
for i in range(self.train_vf_iters):
loss_v = self.value_net.train_on_batch(x=obs, y=ret)
info['LossV'] = v_l_old
info['DeltaLossV'] = loss_v - v_l_old
# Log changes from update
self.logger.store(**rlu.functional.to_numpy_or_python_type(info))
class Runner(TFOnPolicyRunner):
@classmethod
def main(cls, env_name, mlp_hidden=128, delta=0.01, vf_lr=1e-3,
train_vf_iters=80, damping_coeff=0.1, cg_iters=10, backtrack_iters=10,
backtrack_coeff=0.8, algo='trpo', **kwargs):
agent_kwargs = dict(
mlp_hidden=mlp_hidden,
delta=delta,
vf_lr=vf_lr,
train_vf_iters=train_vf_iters,
damping_coeff=damping_coeff,
cg_iters=cg_iters,
backtrack_iters=backtrack_iters,
backtrack_coeff=backtrack_coeff,
algo=algo,
)
super(Runner, cls).main(
env_name=env_name,
agent_cls=TRPOAgent,
agent_kwargs=agent_kwargs,
**kwargs
) | /rlutils-python-0.0.3.tar.gz/rlutils-python-0.0.3/rlutils/algos/tf/mf/trpo.py | 0.888638 | 0.44734 | trpo.py | pypi |
import rlutils.tf as rlu
import tensorflow as tf
from rlutils.infra.runner import TFOffPolicyRunner
class TD3Agent(tf.keras.Model):
def __init__(self,
obs_spec,
act_spec,
num_q_ensembles=2,
policy_mlp_hidden=256,
policy_lr=3e-4,
q_mlp_hidden=256,
q_lr=3e-4,
tau=5e-3,
gamma=0.99,
actor_noise=0.1,
target_noise=0.2,
noise_clip=0.5,
out_activation='tanh'
):
super(TD3Agent, self).__init__()
self.obs_spec = obs_spec
self.act_spec = act_spec
self.act_dim = self.act_spec.shape[0]
self.act_lim = act_spec.high[0]
self.actor_noise = actor_noise * self.act_lim
self.target_noise = target_noise * self.act_lim
self.noise_clip = noise_clip * self.act_lim
self.tau = tau
self.gamma = gamma
if out_activation == 'sin':
out_activation = tf.sin
elif out_activation == 'tanh':
out_activation = tf.tanh
else:
raise ValueError('Unknown output activation function')
if len(self.obs_spec.shape) == 1: # 1D observation
self.obs_dim = self.obs_spec.shape[0]
self.policy_net = rlu.nn.DeterministicMLPActor(ob_dim=self.obs_dim, ac_dim=self.act_dim,
mlp_hidden=policy_mlp_hidden,
out_activation=out_activation)
self.target_policy_net = rlu.nn.DeterministicMLPActor(ob_dim=self.obs_dim, ac_dim=self.act_dim,
mlp_hidden=policy_mlp_hidden,
out_activation=out_activation)
rlu.functional.hard_update(self.target_policy_net, self.policy_net)
self.q_network = rlu.nn.EnsembleMinQNet(self.obs_dim, self.act_dim, q_mlp_hidden,
num_ensembles=num_q_ensembles)
self.target_q_network = rlu.nn.EnsembleMinQNet(self.obs_dim, self.act_dim, q_mlp_hidden,
num_ensembles=num_q_ensembles)
rlu.functional.hard_update(self.target_q_network, self.q_network)
else:
raise NotImplementedError
self.policy_optimizer = tf.keras.optimizers.Adam(lr=policy_lr)
self.q_optimizer = tf.keras.optimizers.Adam(lr=q_lr)
def set_logger(self, logger):
self.logger = logger
def log_tabular(self):
for i in range(self.q_network.num_ensembles):
self.logger.log_tabular(f'Q{i + 1}Vals', with_min_and_max=True)
self.logger.log_tabular('LossPi', average_only=True)
self.logger.log_tabular('LossQ', average_only=True)
@tf.function
def update_target_q(self):
rlu.functional.soft_update(self.target_q_network, self.q_network, self.tau)
@tf.function
def update_target_policy(self):
rlu.functional.soft_update(self.target_policy_net, self.policy_net, self.tau)
def _compute_next_obs_q(self, next_obs):
next_action = self.target_policy_net(next_obs)
# Target policy smoothing
if self.target_noise > 0.:
epsilon = tf.random.normal(shape=[tf.shape(next_obs)[0], self.act_dim]) * self.target_noise
epsilon = tf.clip_by_value(epsilon, -self.noise_clip, self.noise_clip)
next_action = next_action + epsilon
next_action = tf.clip_by_value(next_action, -self.act_lim, self.act_lim)
next_q_value = self.target_q_network((next_obs, next_action, tf.constant(True)))
return next_q_value
@tf.function
def _update_q_nets(self, obs, act, next_obs, done, rew):
print(f'Tracing _update_nets with obs={obs}, actions={act}')
# compute target q
next_q_value = self._compute_next_obs_q(next_obs)
q_target = rlu.functional.compute_target_value(rew, self.gamma, done, next_q_value)
# q loss
with tf.GradientTape() as q_tape:
q_tape.watch(self.q_network.trainable_variables)
q_values = self.q_network((obs, act, tf.constant(False))) # (num_ensembles, None)
q_values_loss = 0.5 * tf.square(tf.expand_dims(q_target, axis=0) - q_values)
# (num_ensembles, None)
q_values_loss = tf.reduce_sum(q_values_loss, axis=0) # (None,)
# apply importance weights
q_values_loss = tf.reduce_mean(q_values_loss)
q_gradients = q_tape.gradient(q_values_loss, self.q_network.trainable_variables)
self.q_optimizer.apply_gradients(zip(q_gradients, self.q_network.trainable_variables))
self.update_target_q()
info = dict(
LossQ=q_values_loss,
)
for i in range(self.q_network.num_ensembles):
info[f'Q{i + 1}Vals'] = q_values[i]
return info
@tf.function
def _update_actor(self, obs):
print(f'Tracing _update_actor with obs={obs}')
# policy loss
with tf.GradientTape() as policy_tape:
policy_tape.watch(self.policy_net.trainable_variables)
a = self.policy_net(obs)
q = self.q_network((obs, a, tf.constant(True)))
policy_loss = -tf.reduce_mean(q, axis=0)
policy_gradients = policy_tape.gradient(policy_loss, self.policy_net.trainable_variables)
self.policy_optimizer.apply_gradients(zip(policy_gradients, self.policy_net.trainable_variables))
self.update_target_policy()
info = dict(
LossPi=policy_loss,
)
return info
def train_on_batch(self, data, **kwargs):
update_target = data.pop('update_target')
obs = data['obs']
info = self._update_q_nets(**data)
if update_target:
actor_info = self._update_actor(obs)
info.update(actor_info)
self.logger.store(**rlu.functional.to_numpy_or_python_type(info))
@tf.function
def act_batch_test_tf(self, obs):
return self.policy_net(obs)
@tf.function
def act_batch_explore_tf(self, obs):
print('Tracing act_batch_explore')
pi_final = self.policy_net(obs)
noise = tf.random.normal(shape=[tf.shape(obs)[0], self.act_dim], dtype=tf.float32) * self.actor_noise
pi_final_noise = pi_final + noise
pi_final_noise = tf.clip_by_value(pi_final_noise, -self.act_lim, self.act_lim)
return pi_final_noise
def act_batch_test(self, obs):
return self.act_batch_test_tf(tf.convert_to_tensor(obs)).numpy()
def act_batch_explore(self, obs):
return self.act_batch_explore_tf(tf.convert_to_tensor(obs)).numpy()
class Runner(TFOffPolicyRunner):
@classmethod
def main(cls,
env_name,
epochs=200,
num_q_ensembles=2,
policy_mlp_hidden=256,
policy_lr=1e-3,
q_mlp_hidden=256,
q_lr=1e-3,
actor_noise=0.1,
target_noise=0.2,
noise_clip=0.5,
out_activation='sin',
tau=5e-3,
gamma=0.99,
seed=1,
logger_path: str = None,
**kwargs
):
agent_kwargs = dict(
num_q_ensembles=num_q_ensembles,
policy_mlp_hidden=policy_mlp_hidden,
policy_lr=policy_lr,
q_mlp_hidden=q_mlp_hidden,
q_lr=q_lr,
tau=tau,
gamma=gamma,
actor_noise=actor_noise,
target_noise=target_noise,
noise_clip=noise_clip,
out_activation=out_activation,
)
super(Runner, cls).main(env_name=env_name,
epochs=epochs,
policy_delay=2,
agent_cls=TD3Agent,
agent_kwargs=agent_kwargs,
seed=seed,
logger_path=logger_path,
**kwargs) | /rlutils-python-0.0.3.tar.gz/rlutils-python-0.0.3/rlutils/algos/tf/mf/td3.py | 0.792544 | 0.236021 | td3.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.