code stringlengths 66 870k | docstring stringlengths 19 26.7k | func_name stringlengths 1 138 | language stringclasses 1
value | repo stringlengths 7 68 | path stringlengths 5 324 | url stringlengths 46 389 | license stringclasses 7
values |
|---|---|---|---|---|---|---|---|
def get_task_description(task_instance: task_eval.TaskEval) -> str:
"""Get the description for a task.
Args:
task_instance: Task instance
Returns:
Task description
"""
task_name = task_instance.__class__.__name__
try:
# First try to use get_instruction() if available
if hasattr(task_instance, 'get_instruction') and callable(getattr(task_instance, 'get_instruction')):
return task_instance.get_instruction()
# Fall back to goal property which is common in AndroidWorld tasks
elif hasattr(task_instance, 'goal'):
return task_instance.goal
else:
# If neither is available, use a default message with the task name
logger.warning(f"Task {task_name} has no get_instruction() method or goal property")
return f"Complete the '{task_name}' task"
except Exception as e:
logger.error(f"Error getting task description for {task_name}: {e}")
return f"Complete the '{task_name}' task" | Get the description for a task.
Args:
task_instance: Task instance
Returns:
Task description
| get_task_description | python | droidrun/droidrun | eval/utils/task_manager.py | https://github.com/droidrun/droidrun/blob/master/eval/utils/task_manager.py | MIT |
def check_task_success(env, task_instance: task_eval.TaskEval) -> bool:
"""Check if a task was completed successfully.
Args:
env: AndroidWorld environment
task_instance: Task instance
Returns:
True if task was successful, False otherwise
"""
task_name = task_instance.__class__.__name__
try:
# First try to use check_success() if available
if hasattr(task_instance, 'check_success') and callable(getattr(task_instance, 'check_success')):
return task_instance.check_success(env)
# Fall back to is_successful() which is common in some AndroidWorld tasks
elif hasattr(task_instance, 'is_successful') and callable(getattr(task_instance, 'is_successful')):
return task_instance.is_successful(env)
else:
# If neither is available, mark as failed
logger.warning(f"Task {task_name} has no check_success() or is_successful() method")
return False
except Exception as e:
logger.error(f"Error checking task success for {task_name}: {e}")
return False | Check if a task was completed successfully.
Args:
env: AndroidWorld environment
task_instance: Task instance
Returns:
True if task was successful, False otherwise
| check_task_success | python | droidrun/droidrun | eval/utils/task_manager.py | https://github.com/droidrun/droidrun/blob/master/eval/utils/task_manager.py | MIT |
def teardown_task(env, task_instance: task_eval.TaskEval) -> bool:
"""Tear down a task.
Args:
env: AndroidWorld environment
task_instance: Task instance
Returns:
True if teardown was successful, False otherwise
"""
task_name = task_instance.__class__.__name__
try:
if hasattr(task_instance, 'tear_down') and callable(getattr(task_instance, 'tear_down')):
task_instance.tear_down(env)
logger.info(f"Task {task_name} torn down using tear_down() method")
return True
elif hasattr(task_instance, 'teardown') and callable(getattr(task_instance, 'teardown')):
task_instance.teardown(env)
logger.info(f"Task {task_name} torn down using teardown() method")
return True
else:
logger.warning(f"Task {task_name} has no tear_down() or teardown() method")
return False
except Exception as e:
logger.error(f"Error during task teardown for {task_name}: {e}")
return False | Tear down a task.
Args:
env: AndroidWorld environment
task_instance: Task instance
Returns:
True if teardown was successful, False otherwise
| teardown_task | python | droidrun/droidrun | eval/utils/task_manager.py | https://github.com/droidrun/droidrun/blob/master/eval/utils/task_manager.py | MIT |
def __init__(self, path, target_column=None,
ndarray=True, **kwargs):
"""
:param str path:
The *path* represents a filesystem path or URL that's passed
on as the *filepath_or_buffer* argument to
:func:`read_table`.
:param str target_column:
The column in the table to load that represents the target
value. This column will not be part of the returned *data*.
If *target_column* is None, then the target return value
will be None as well.
:param kwargs:
All other keyword parameters are passed on to
:func:`pandas.io.parsers.read_table`. The most useful
options may be *usecols* to select which columns of the
table to use, *skiprows* to skip a certain number of rows at
the beginning and *nrows* to select a given number of rows
only.
"""
self.path = path
self.target_column = target_column
self.ndarray = ndarray
self.kwargs = kwargs |
:param str path:
The *path* represents a filesystem path or URL that's passed
on as the *filepath_or_buffer* argument to
:func:`read_table`.
:param str target_column:
The column in the table to load that represents the target
value. This column will not be part of the returned *data*.
If *target_column* is None, then the target return value
will be None as well.
:param kwargs:
All other keyword parameters are passed on to
:func:`pandas.io.parsers.read_table`. The most useful
options may be *usecols* to select which columns of the
table to use, *skiprows* to skip a certain number of rows at
the beginning and *nrows* to select a given number of rows
only.
| __init__ | python | ottogroup/palladium | palladium/dataset.py | https://github.com/ottogroup/palladium/blob/master/palladium/dataset.py | Apache-2.0 |
def __init__(self, url, sql, target_column=None, ndarray=True, **kwargs):
"""
:param str url:
The database *url* that'll be used to make a connection.
Format follows RFC-1738.
:param str sql:
SQL query to be executed or database table name.
:param str target_column:
The name of the column used as the target. (All other
columns are considered feature data.)
:param kwargs:
All other keyword parameters are passed on to
:func:`pandas.io.parsers.read_sql`.
"""
self.engine = create_engine(url)
self.sql = sql
self.target_column = target_column
self.ndarray = ndarray
self.kwargs = kwargs |
:param str url:
The database *url* that'll be used to make a connection.
Format follows RFC-1738.
:param str sql:
SQL query to be executed or database table name.
:param str target_column:
The name of the column used as the target. (All other
columns are considered feature data.)
:param kwargs:
All other keyword parameters are passed on to
:func:`pandas.io.parsers.read_sql`.
| __init__ | python | ottogroup/palladium | palladium/dataset.py | https://github.com/ottogroup/palladium/blob/master/palladium/dataset.py | Apache-2.0 |
def __init__(self,
impl,
update_cache_rrule,
):
"""
:param palladium.interfaces.DatasetLoader impl:
The underlying (decorated) dataset loader object.
:param dict update_cache_rrule:
Keyword arguments for a :class:`dateutil.rrule.rrule` that
determines when the cache will be updated. See
:class:`~palladium.util.RruleThread` for details.
"""
self.impl = impl
self.update_cache_rrule = update_cache_rrule |
:param palladium.interfaces.DatasetLoader impl:
The underlying (decorated) dataset loader object.
:param dict update_cache_rrule:
Keyword arguments for a :class:`dateutil.rrule.rrule` that
determines when the cache will be updated. See
:class:`~palladium.util.RruleThread` for details.
| __init__ | python | ottogroup/palladium | palladium/dataset.py | https://github.com/ottogroup/palladium/blob/master/palladium/dataset.py | Apache-2.0 |
def test_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
Test a model.
Uses 'dataset_loader_test' and 'model_persister' from the
configuration to load a test dataset to test the accuracy of a trained
model with.
Usage:
pld-test [options]
Options:
-h --help Show this screen.
--model-version=<version> The version of the model to be tested. If
not specified, the newest model will be used.
"""
arguments = docopt(test_cmd.__doc__, argv=argv)
model_version = arguments['--model-version']
model_version = int(model_version) if model_version is not None else None
initialize_config(__mode__='fit')
test(model_version=model_version) | Test a model.
Uses 'dataset_loader_test' and 'model_persister' from the
configuration to load a test dataset to test the accuracy of a trained
model with.
Usage:
pld-test [options]
Options:
-h --help Show this screen.
--model-version=<version> The version of the model to be tested. If
not specified, the newest model will be used.
| test_cmd | python | ottogroup/palladium | palladium/eval.py | https://github.com/ottogroup/palladium/blob/master/palladium/eval.py | Apache-2.0 |
def fit_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
Fit a model and save to database.
Will use 'dataset_loader_train', 'model', and 'model_perister' from
the configuration file, to load a dataset to train a model with, and
persist it.
Usage:
pld-fit [options]
Options:
-n --no-save Don't persist the fitted model to disk.
--no-activate Don't activate the fitted model.
--save-if-better-than=<k> Persist only if test score better than given
value.
-e --evaluate Evaluate fitted model on train and test set and
print out results.
-h --help Show this screen.
"""
arguments = docopt(fit_cmd.__doc__, argv=argv)
no_save = arguments['--no-save']
no_activate = arguments['--no-activate']
save_if_better_than = arguments['--save-if-better-than']
evaluate = arguments['--evaluate'] or bool(save_if_better_than)
if save_if_better_than is not None:
save_if_better_than = float(save_if_better_than)
initialize_config(__mode__='fit')
fit(
persist=not no_save,
activate=not no_activate,
evaluate=evaluate,
persist_if_better_than=save_if_better_than,
) | Fit a model and save to database.
Will use 'dataset_loader_train', 'model', and 'model_perister' from
the configuration file, to load a dataset to train a model with, and
persist it.
Usage:
pld-fit [options]
Options:
-n --no-save Don't persist the fitted model to disk.
--no-activate Don't activate the fitted model.
--save-if-better-than=<k> Persist only if test score better than given
value.
-e --evaluate Evaluate fitted model on train and test set and
print out results.
-h --help Show this screen.
| fit_cmd | python | ottogroup/palladium | palladium/fit.py | https://github.com/ottogroup/palladium/blob/master/palladium/fit.py | Apache-2.0 |
def admin_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
Activate or delete models.
Models are usually made active right after fitting (see command
pld-fit). The 'activate' command allows you to explicitly set the
currently active model. Use 'pld-list' to get an overview of all
available models along with their version identifiers.
Deleting a model will simply remove it from the database.
Usage:
pld-admin activate <version> [options]
pld-admin delete <version> [options]
Options:
-h --help Show this screen.
"""
arguments = docopt(admin_cmd.__doc__, argv=argv)
initialize_config(__mode__='fit')
if arguments['activate']:
activate(model_version=int(arguments['<version>']))
elif arguments['delete']:
delete(model_version=int(arguments['<version>'])) | Activate or delete models.
Models are usually made active right after fitting (see command
pld-fit). The 'activate' command allows you to explicitly set the
currently active model. Use 'pld-list' to get an overview of all
available models along with their version identifiers.
Deleting a model will simply remove it from the database.
Usage:
pld-admin activate <version> [options]
pld-admin delete <version> [options]
Options:
-h --help Show this screen.
| admin_cmd | python | ottogroup/palladium | palladium/fit.py | https://github.com/ottogroup/palladium/blob/master/palladium/fit.py | Apache-2.0 |
def grid_search_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
Grid search parameters for the model.
Uses 'dataset_loader_train', 'model', and 'grid_search' from the
configuration to load a training dataset, and run a grid search on the
model using the grid of hyperparameters.
Usage:
pld-grid-search [options]
Options:
--save-results=<fname> Save results to CSV file
--persist-best Persist the best model from grid search
-h --help Show this screen.
"""
arguments = docopt(grid_search_cmd.__doc__, argv=argv)
initialize_config(__mode__='fit')
grid_search(
save_results=arguments['--save-results'],
persist_best=arguments['--persist-best'],
) | Grid search parameters for the model.
Uses 'dataset_loader_train', 'model', and 'grid_search' from the
configuration to load a training dataset, and run a grid search on the
model using the grid of hyperparameters.
Usage:
pld-grid-search [options]
Options:
--save-results=<fname> Save results to CSV file
--persist-best Persist the best model from grid search
-h --help Show this screen.
| grid_search_cmd | python | ottogroup/palladium | palladium/fit.py | https://github.com/ottogroup/palladium/blob/master/palladium/fit.py | Apache-2.0 |
def __call__(self):
"""Loads the data and returns a tuple *(data, target)*, or
*(X, y)*.
:return:
A tuple *(data, target*).
*data* is a two dimensional numpy array with shape n x m
(one row per example).
*target* is a one dimensional array with n target values.
*target* may be ``None`` if there is no target value,
e.g. in an unsupervised learning setting.
:rtype: tuple
""" | Loads the data and returns a tuple *(data, target)*, or
*(X, y)*.
:return:
A tuple *(data, target*).
*data* is a two dimensional numpy array with shape n x m
(one row per example).
*target* is a one dimensional array with n target values.
*target* may be ``None`` if there is no target value,
e.g. in an unsupervised learning setting.
:rtype: tuple
| __call__ | python | ottogroup/palladium | palladium/interfaces.py | https://github.com/ottogroup/palladium/blob/master/palladium/interfaces.py | Apache-2.0 |
def __iter__(self):
"""
:return:
Tuples of train/test indices.
""" |
:return:
Tuples of train/test indices.
| __iter__ | python | ottogroup/palladium | palladium/interfaces.py | https://github.com/ottogroup/palladium/blob/master/palladium/interfaces.py | Apache-2.0 |
def fit(self, X, y=None):
"""Fit to data array *X* and possibly a target array *y*.
:return: self
""" | Fit to data array *X* and possibly a target array *y*.
:return: self
| fit | python | ottogroup/palladium | palladium/interfaces.py | https://github.com/ottogroup/palladium/blob/master/palladium/interfaces.py | Apache-2.0 |
def predict(self, X, **kw):
"""Predict classes for data array *X* with shape n x m.
Some models may accept additional keyword arguments.
:return:
A numpy array of length n with the predicted classes (for
classification problems) or numeric values (for regression
problems).
:raises:
May raise a :class:`PredictError` to indicate that some
condition made it impossible to deliver a prediction.
""" | Predict classes for data array *X* with shape n x m.
Some models may accept additional keyword arguments.
:return:
A numpy array of length n with the predicted classes (for
classification problems) or numeric values (for regression
problems).
:raises:
May raise a :class:`PredictError` to indicate that some
condition made it impossible to deliver a prediction.
| predict | python | ottogroup/palladium | palladium/interfaces.py | https://github.com/ottogroup/palladium/blob/master/palladium/interfaces.py | Apache-2.0 |
def read(self, version=None):
"""Returns a :class:`Model` instance.
:param str version:
*version* may be used to read a specific version of a model.
If *version* is ``None``, returns the active model.
:return:
The model object.
:raises:
LookupError if no model was available.
""" | Returns a :class:`Model` instance.
:param str version:
*version* may be used to read a specific version of a model.
If *version* is ``None``, returns the active model.
:return:
The model object.
:raises:
LookupError if no model was available.
| read | python | ottogroup/palladium | palladium/interfaces.py | https://github.com/ottogroup/palladium/blob/master/palladium/interfaces.py | Apache-2.0 |
def write(self, model):
"""Persists a :class:`Model` and returns a new version number.
It is the :class:`ModelPersister`'s responsibility to annotate
the 'version' information onto the model before it is saved.
The new model will initially be inactive. Use
:meth:`ModelPersister.activate` to activate the model.
:return:
The new model's version identifier.
""" | Persists a :class:`Model` and returns a new version number.
It is the :class:`ModelPersister`'s responsibility to annotate
the 'version' information onto the model before it is saved.
The new model will initially be inactive. Use
:meth:`ModelPersister.activate` to activate the model.
:return:
The new model's version identifier.
| write | python | ottogroup/palladium | palladium/interfaces.py | https://github.com/ottogroup/palladium/blob/master/palladium/interfaces.py | Apache-2.0 |
def activate(self, version):
"""Set the model with the given *version* to be the active
one.
Implies that any previously active model becomes inactive.
:param str version:
The *version* of the model that's activated.
:raises:
LookupError if no model with given *version* exists.
""" | Set the model with the given *version* to be the active
one.
Implies that any previously active model becomes inactive.
:param str version:
The *version* of the model that's activated.
:raises:
LookupError if no model with given *version* exists.
| activate | python | ottogroup/palladium | palladium/interfaces.py | https://github.com/ottogroup/palladium/blob/master/palladium/interfaces.py | Apache-2.0 |
def delete(self, version):
"""Delete the model with the given *version* from the
database.
:param str version:
The *version* of the model that's activated.
:raises:
LookupError if no model with given *version* exists.
""" | Delete the model with the given *version* from the
database.
:param str version:
The *version* of the model that's activated.
:raises:
LookupError if no model with given *version* exists.
| delete | python | ottogroup/palladium | palladium/interfaces.py | https://github.com/ottogroup/palladium/blob/master/palladium/interfaces.py | Apache-2.0 |
def list_models(self):
"""List metadata of all available models.
:return:
A list of dicts, with each dict containing information about
one of the available models. Each dict is guaranteed to
contain the ``version`` key, which is the same version
number that :meth:`ModelPersister.read` accepts for loading
specific models.
""" | List metadata of all available models.
:return:
A list of dicts, with each dict containing information about
one of the available models. Each dict is guaranteed to
contain the ``version`` key, which is the same version
number that :meth:`ModelPersister.read` accepts for loading
specific models.
| list_models | python | ottogroup/palladium | palladium/interfaces.py | https://github.com/ottogroup/palladium/blob/master/palladium/interfaces.py | Apache-2.0 |
def list_properties(self):
"""List properties of :class:`ModelPersister` itself.
:return:
A dictionary of key and value pairs, where both keys and
values are of type ``str``. Properties will usually include
``active-model`` and ``db-version`` entries.
""" | List properties of :class:`ModelPersister` itself.
:return:
A dictionary of key and value pairs, where both keys and
values are of type ``str``. Properties will usually include
``active-model`` and ``db-version`` entries.
| list_properties | python | ottogroup/palladium | palladium/interfaces.py | https://github.com/ottogroup/palladium/blob/master/palladium/interfaces.py | Apache-2.0 |
def upgrade(self, from_version=None, to_version=__version__):
"""Upgrade the underlying database to the latest version.
Newer versions of Palladium may require changes to the
:class:`ModelPersister`'s database. This method provides an
opportunity to run the necessary upgrade steps.
It's the :class:`ModelPersister`'s responsibility to keep
track of the Palladium version that was used to create and
upgrade its database, and thus to determine the upgrade steps
necessary.
""" | Upgrade the underlying database to the latest version.
Newer versions of Palladium may require changes to the
:class:`ModelPersister`'s database. This method provides an
opportunity to run the necessary upgrade steps.
It's the :class:`ModelPersister`'s responsibility to keep
track of the Palladium version that was used to create and
upgrade its database, and thus to determine the upgrade steps
necessary.
| upgrade | python | ottogroup/palladium | palladium/interfaces.py | https://github.com/ottogroup/palladium/blob/master/palladium/interfaces.py | Apache-2.0 |
def __call__(self, model, request):
"""
Use the model to run a prediction with the requested data.
:param model:
The :class:`~Model` instance to use for making predictions.
:param request:
A werkzeug ``request`` object. A dictionary with query
parameters is available at *request.values*.
:return:
A werkzeug ``response`` object. It is the
:class:`PredictService`'s responsiblity to return
appropriate status codes and data in case of error.
""" |
Use the model to run a prediction with the requested data.
:param model:
The :class:`~Model` instance to use for making predictions.
:param request:
A werkzeug ``request`` object. A dictionary with query
parameters is available at *request.values*.
:return:
A werkzeug ``response`` object. It is the
:class:`PredictService`'s responsiblity to return
appropriate status codes and data in case of error.
| __call__ | python | ottogroup/palladium | palladium/interfaces.py | https://github.com/ottogroup/palladium/blob/master/palladium/interfaces.py | Apache-2.0 |
def __init__(self, fit_func, predict_func,
fit_kwargs=None, predict_kwargs=None,
encode_labels=False):
"""
Instantiates a model with the given *fit_func* and
*predict_func* written in Julia.
:param str fit_func:
The dotted name of the Julia function to use for fitting.
The function must take as its first two arguments the *X*
and *y* arrays. All elements of the optional *fit_kwargs*
dictionary will be passed on to the Julia function as
keyword arguments. The return value of *fit_func* will be
used as the first argument to *predict_func*.
:param str predict_func:
Similar to *fit_func*, this is the dotted name of the Julia
function used for prediction. The first argument of this
function is the return value of *fit_func*. The second
argument is the *X* data array. All elements of the
optional *fit_kwargs* dictionary will be passed on to the
Julia function as keyword arguments. The return value of
*predict_func* is considered to be the target array *y*.
:param bool encode_labels:
If set to *True*, the *y* target array will be automatically
encoded using a :class:`sklearn.preprocessing.LabelEncoder`,
which is useful if you have string labels but your Julia
function only accepts numeric labels.
"""
self.fit_func = fit_func
self.predict_func = predict_func
self.encode_labels = encode_labels
self.fit_kwargs = fit_kwargs or {}
self.predict_kwargs = predict_kwargs or {} |
Instantiates a model with the given *fit_func* and
*predict_func* written in Julia.
:param str fit_func:
The dotted name of the Julia function to use for fitting.
The function must take as its first two arguments the *X*
and *y* arrays. All elements of the optional *fit_kwargs*
dictionary will be passed on to the Julia function as
keyword arguments. The return value of *fit_func* will be
used as the first argument to *predict_func*.
:param str predict_func:
Similar to *fit_func*, this is the dotted name of the Julia
function used for prediction. The first argument of this
function is the return value of *fit_func*. The second
argument is the *X* data array. All elements of the
optional *fit_kwargs* dictionary will be passed on to the
Julia function as keyword arguments. The return value of
*predict_func* is considered to be the target array *y*.
:param bool encode_labels:
If set to *True*, the *y* target array will be automatically
encoded using a :class:`sklearn.preprocessing.LabelEncoder`,
which is useful if you have string labels but your Julia
function only accepts numeric labels.
| __init__ | python | ottogroup/palladium | palladium/julia.py | https://github.com/ottogroup/palladium/blob/master/palladium/julia.py | Apache-2.0 |
def open(self, path, mode='r'):
"""Return a file handle
For normal files, the implementation is:
```python
return open(path, mode)
```
""" | Return a file handle
For normal files, the implementation is:
```python
return open(path, mode)
```
| open | python | ottogroup/palladium | palladium/persistence.py | https://github.com/ottogroup/palladium/blob/master/palladium/persistence.py | Apache-2.0 |
def exists(self, path):
"""Test whether a path exists
For normal files, the implementation is:
```python
return os.path.exists(path)
```
""" | Test whether a path exists
For normal files, the implementation is:
```python
return os.path.exists(path)
```
| exists | python | ottogroup/palladium | palladium/persistence.py | https://github.com/ottogroup/palladium/blob/master/palladium/persistence.py | Apache-2.0 |
def remove(self, path):
"""Remove a file
For normal files, the implementation is:
```python
os.remove(path)
```
""" | Remove a file
For normal files, the implementation is:
```python
os.remove(path)
```
| remove | python | ottogroup/palladium | palladium/persistence.py | https://github.com/ottogroup/palladium/blob/master/palladium/persistence.py | Apache-2.0 |
def __init__(self, path, io):
"""
:param str path:
The *path* template that I will use to store models,
e.g. ``/path/to/model-{version}``.
:param FileLikeIO io:
Used to access low level file handle operations.
"""
if '{version}' not in path:
raise ValueError(
"Your file persister path must have a {version} placeholder,"
"e.g., model-{version}.pickle."
)
self.path = path
self.io = io |
:param str path:
The *path* template that I will use to store models,
e.g. ``/path/to/model-{version}``.
:param FileLikeIO io:
Used to access low level file handle operations.
| __init__ | python | ottogroup/palladium | palladium/persistence.py | https://github.com/ottogroup/palladium/blob/master/palladium/persistence.py | Apache-2.0 |
def __init__(
self, url, poolclass=None, chunk_size=1024 ** 2 * 100,
table_postfix=''):
"""
:param str url:
The database *url* that'll be used to make a connection.
Format follows RFC-1738. I'll create a table ``models`` to
store the pickles in if it doesn't exist yet.
:param sqlalchemy.pool.Pool poolclass:
A class specifying DB connection behavior of the engine. If set to
None, the NullPool will be used.
:param int chunk_size:
The pickled contents of the model are stored inside the
database in chunks. The default size is 1024 ** 2 * 100
(100MB).
:param str table_postfix:
If *table_postfix* is provided, I will append it to the
table name of all tables used in this instance.
"""
if not poolclass:
poolclass = NullPool
engine = create_engine(url, poolclass=poolclass)
self.engine = engine
self.chunk_size = chunk_size
self.table_postfix = table_postfix
self.write_lock = Lock()
orms = self.create_orm_classes()
self.Property = orms['Property']
self.DBModel = orms['DBModel']
self.DBModelChunk = orms['DBModelChunk']
metadata = self.DBModel.metadata
metadata.bind = engine
metadata.create_all()
self.session = scoped_session(sessionmaker(bind=engine))
self._initialize_properties() |
:param str url:
The database *url* that'll be used to make a connection.
Format follows RFC-1738. I'll create a table ``models`` to
store the pickles in if it doesn't exist yet.
:param sqlalchemy.pool.Pool poolclass:
A class specifying DB connection behavior of the engine. If set to
None, the NullPool will be used.
:param int chunk_size:
The pickled contents of the model are stored inside the
database in chunks. The default size is 1024 ** 2 * 100
(100MB).
:param str table_postfix:
If *table_postfix* is provided, I will append it to the
table name of all tables used in this instance.
| __init__ | python | ottogroup/palladium | palladium/persistence.py | https://github.com/ottogroup/palladium/blob/master/palladium/persistence.py | Apache-2.0 |
def __init__(self,
impl,
update_cache_rrule=None,
check_version=True,
):
"""
:param ModelPersister impl:
The underlying (decorated) persister object.
:param dict update_cache_rrule:
Optional keyword arguments for a
:class:`dateutil.rrule.rrule` that determines when the cache
will be updated. See :class:`~palladium.util.RruleThread` for
details.
:param bool check_version:
If set to `True`, I will perform a check and only load a new
model from the storage if my cached version differs from
what's the current active version.
"""
self.impl = impl
self.update_cache_rrule = update_cache_rrule
self.check_version = check_version |
:param ModelPersister impl:
The underlying (decorated) persister object.
:param dict update_cache_rrule:
Optional keyword arguments for a
:class:`dateutil.rrule.rrule` that determines when the cache
will be updated. See :class:`~palladium.util.RruleThread` for
details.
:param bool check_version:
If set to `True`, I will perform a check and only load a new
model from the storage if my cached version differs from
what's the current active version.
| __init__ | python | ottogroup/palladium | palladium/persistence.py | https://github.com/ottogroup/palladium/blob/master/palladium/persistence.py | Apache-2.0 |
def make_ujson_response(obj, status_code=200):
"""Encodes the given *obj* to json and wraps it in a response.
:return:
A Flask response.
"""
json_encoded = ujson.encode(obj, ensure_ascii=False)
resp = make_response(json_encoded)
resp.mimetype = 'application/json'
resp.content_type = 'application/json; charset=utf-8'
resp.status_code = status_code
return resp | Encodes the given *obj* to json and wraps it in a response.
:return:
A Flask response.
| make_ujson_response | python | ottogroup/palladium | palladium/server.py | https://github.com/ottogroup/palladium/blob/master/palladium/server.py | Apache-2.0 |
def __init__(
self,
mapping,
params=(),
entry_point='/predict',
decorator_list_name='predict_decorators',
predict_proba=False,
unwrap_sample=False,
**kwargs
):
"""
:param mapping:
A list of query parameters and their type that should be
included in the request. These will be processed in the
:meth:`sample_from_data` method to construct a sample
that can be used for prediction. An example that expects
two request parameters called ``pos`` and ``neg`` that are
both of type str::
{ ...
'mapping': [('pos', 'str'), ('neg', 'str')]
... }
:param params:
Similarly to *mapping*, this is a list of name and type of
parameters that will be passed to the model's
:meth:`~palladium.interfaces.Model.predict` method as keyword
arguments.
:param predict_proba:
Instead of returning a single class (the default), when
*predict_proba* is set to true, the result will instead
contain a list of class probabilities.
:param unwrap_sample:
When working with text, scikit-learn and others will
sometimes expect the input to be a 1d array of strings
rather than a 2d array. Setting *unwrap_sample* to true
will use this representation.
"""
self.mapping = mapping
self.params = params
self.entry_point = entry_point
self.decorator_list_name = decorator_list_name
self.predict_proba = predict_proba
self.unwrap_sample = unwrap_sample
vars(self).update(kwargs) |
:param mapping:
A list of query parameters and their type that should be
included in the request. These will be processed in the
:meth:`sample_from_data` method to construct a sample
that can be used for prediction. An example that expects
two request parameters called ``pos`` and ``neg`` that are
both of type str::
{ ...
'mapping': [('pos', 'str'), ('neg', 'str')]
... }
:param params:
Similarly to *mapping*, this is a list of name and type of
parameters that will be passed to the model's
:meth:`~palladium.interfaces.Model.predict` method as keyword
arguments.
:param predict_proba:
Instead of returning a single class (the default), when
*predict_proba* is set to true, the result will instead
contain a list of class probabilities.
:param unwrap_sample:
When working with text, scikit-learn and others will
sometimes expect the input to be a 1d array of strings
rather than a 2d array. Setting *unwrap_sample* to true
will use this representation.
| __init__ | python | ottogroup/palladium | palladium/server.py | https://github.com/ottogroup/palladium/blob/master/palladium/server.py | Apache-2.0 |
def sample_from_data(self, model, data):
"""Convert incoming sample *data* into a numpy array.
:param model:
The :class:`~Model` instance to use for making predictions.
:param data:
A dict-like with the sample's data, typically retrieved from
``request.args`` or similar.
"""
values = []
for key, type_name in self.mapping:
value_type = self.types[type_name]
values.append(value_type(data[key]))
if self.unwrap_sample:
assert len(values) == 1
return np.array(values[0])
else:
return np.array(values, dtype=object) | Convert incoming sample *data* into a numpy array.
:param model:
The :class:`~Model` instance to use for making predictions.
:param data:
A dict-like with the sample's data, typically retrieved from
``request.args`` or similar.
| sample_from_data | python | ottogroup/palladium | palladium/server.py | https://github.com/ottogroup/palladium/blob/master/palladium/server.py | Apache-2.0 |
def params_from_data(self, model, data):
"""Retrieve additional parameters (keyword arguments) for
``model.predict`` from request *data*.
:param model:
The :class:`~Model` instance to use for making predictions.
:param data:
A dict-like with the parameter data, typically retrieved
from ``request.args`` or similar.
"""
params = {}
for key, type_name in self.params:
value_type = self.types[type_name]
if key in data:
params[key] = value_type(data[key])
elif hasattr(model, key):
params[key] = getattr(model, key)
return params | Retrieve additional parameters (keyword arguments) for
``model.predict`` from request *data*.
:param model:
The :class:`~Model` instance to use for making predictions.
:param data:
A dict-like with the parameter data, typically retrieved
from ``request.args`` or similar.
| params_from_data | python | ottogroup/palladium | palladium/server.py | https://github.com/ottogroup/palladium/blob/master/palladium/server.py | Apache-2.0 |
def response_from_prediction(self, y_pred, single=True):
"""Turns a model's prediction in *y_pred* into a JSON
response.
"""
result = y_pred.tolist()
if single:
result = result[0]
response = {
'metadata': get_metadata(),
'result': result,
}
return make_ujson_response(response, status_code=200) | Turns a model's prediction in *y_pred* into a JSON
response.
| response_from_prediction | python | ottogroup/palladium | palladium/server.py | https://github.com/ottogroup/palladium/blob/master/palladium/server.py | Apache-2.0 |
def create_predict_function(
route, predict_service, decorator_list_name, config):
"""Creates a predict function and registers it to
the Flask app using the route decorator.
:param str route:
Path of the entry point.
:param palladium.interfaces.PredictService predict_service:
The predict service to be registered to this entry point.
:param str decorator_list_name:
The decorator list to be used for this predict service. It is
OK if there is no such entry in the active Palladium config.
:return:
A predict service function that will be used to process
predict requests.
"""
model_persister = config.get('model_persister')
@app.route(route, methods=['GET', 'POST'], endpoint=route)
@PluggableDecorator(decorator_list_name)
def predict_func():
return predict(model_persister, predict_service)
return predict_func | Creates a predict function and registers it to
the Flask app using the route decorator.
:param str route:
Path of the entry point.
:param palladium.interfaces.PredictService predict_service:
The predict service to be registered to this entry point.
:param str decorator_list_name:
The decorator list to be used for this predict service. It is
OK if there is no such entry in the active Palladium config.
:return:
A predict service function that will be used to process
predict requests.
| create_predict_function | python | ottogroup/palladium | palladium/server.py | https://github.com/ottogroup/palladium/blob/master/palladium/server.py | Apache-2.0 |
def devserver_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
Serve the web API for development.
Usage:
pld-devserver [options]
Options:
-h --help Show this screen.
--host=<host> The host to use [default: 0.0.0.0].
--port=<port> The port to use [default: 5000].
--debug=<debug> Whether or not to use debug mode [default: 0].
"""
arguments = docopt(devserver_cmd.__doc__, argv=argv)
initialize_config()
app.run(
host=arguments['--host'],
port=int(arguments['--port']),
debug=int(arguments['--debug']),
) | Serve the web API for development.
Usage:
pld-devserver [options]
Options:
-h --help Show this screen.
--host=<host> The host to use [default: 0.0.0.0].
--port=<port> The port to use [default: 5000].
--debug=<debug> Whether or not to use debug mode [default: 0].
| devserver_cmd | python | ottogroup/palladium | palladium/server.py | https://github.com/ottogroup/palladium/blob/master/palladium/server.py | Apache-2.0 |
def listen(self, io_in, io_out, io_err):
"""Listens to provided io stream and writes predictions
to output. In case of errors, the error stream will be used.
"""
for line in io_in:
if line.strip().lower() == 'exit':
break
try:
y_pred = self.process_line(line)
except Exception as e:
io_out.write('[]\n')
io_err.write(
"Error while processing input row: {}"
"{}: {}\n".format(line, type(e), e))
io_err.flush()
else:
io_out.write(ujson.dumps(y_pred.tolist()))
io_out.write('\n')
io_out.flush() | Listens to provided io stream and writes predictions
to output. In case of errors, the error stream will be used.
| listen | python | ottogroup/palladium | palladium/server.py | https://github.com/ottogroup/palladium/blob/master/palladium/server.py | Apache-2.0 |
def stream_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
Start the streaming server, which listens to stdin, processes line
by line, and returns predictions.
The input should consist of a list of json objects, where each object
will result in a prediction. Each line is processed in a batch.
Example input (must be on a single line):
[{"sepal length": 1.0, "sepal width": 1.1, "petal length": 0.7,
"petal width": 5}, {"sepal length": 1.0, "sepal width": 8.0,
"petal length": 1.4, "petal width": 5}]
Example output:
["Iris-virginica","Iris-setosa"]
An input line with the word 'exit' will quit the streaming server.
Usage:
pld-stream [options]
Options:
-h --help Show this screen.
"""
docopt(stream_cmd.__doc__, argv=argv)
initialize_config()
stream = PredictStream()
stream.listen(sys.stdin, sys.stdout, sys.stderr) | Start the streaming server, which listens to stdin, processes line
by line, and returns predictions.
The input should consist of a list of json objects, where each object
will result in a prediction. Each line is processed in a batch.
Example input (must be on a single line):
[{"sepal length": 1.0, "sepal width": 1.1, "petal length": 0.7,
"petal width": 5}, {"sepal length": 1.0, "sepal width": 8.0,
"petal length": 1.4, "petal width": 5}]
Example output:
["Iris-virginica","Iris-setosa"]
An input line with the word 'exit' will quit the streaming server.
Usage:
pld-stream [options]
Options:
-h --help Show this screen.
| stream_cmd | python | ottogroup/palladium | palladium/server.py | https://github.com/ottogroup/palladium/blob/master/palladium/server.py | Apache-2.0 |
def apply_kwargs(func, **kwargs):
"""Call *func* with kwargs, but only those kwargs that it accepts.
"""
new_kwargs = {}
params = signature(func).parameters
for param_name in params.keys():
if param_name in kwargs:
new_kwargs[param_name] = kwargs[param_name]
return func(**new_kwargs) | Call *func* with kwargs, but only those kwargs that it accepts.
| apply_kwargs | python | ottogroup/palladium | palladium/util.py | https://github.com/ottogroup/palladium/blob/master/palladium/util.py | Apache-2.0 |
def args_from_config(func):
"""Decorator that injects parameters from the configuration.
"""
func_args = signature(func).parameters
@wraps(func)
def wrapper(*args, **kwargs):
config = get_config()
for i, argname in enumerate(func_args):
if len(args) > i or argname in kwargs:
continue
elif argname in config:
kwargs[argname] = config[argname]
try:
getcallargs(func, *args, **kwargs)
except TypeError as exc:
msg = "{}\n{}".format(exc.args[0], PALLADIUM_CONFIG_ERROR)
exc.args = (msg,)
raise exc
return func(*args, **kwargs)
wrapper.__wrapped__ = func
return wrapper | Decorator that injects parameters from the configuration.
| args_from_config | python | ottogroup/palladium | palladium/util.py | https://github.com/ottogroup/palladium/blob/master/palladium/util.py | Apache-2.0 |
def session_scope(session):
"""Provide a transactional scope around a series of operations."""
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close() | Provide a transactional scope around a series of operations. | session_scope | python | ottogroup/palladium | palladium/util.py | https://github.com/ottogroup/palladium/blob/master/palladium/util.py | Apache-2.0 |
def __init__(self, func, rrule, sleep_between_checks=60):
"""
:param callable func:
The function that I will call periodically.
:param rrule rrule:
The :class:`dateutil.rrule.rrule` recurrence rule that
defines when I will do the calls. See the `python-dateutil
docs <https://labix.org/python-dateutil>`_ for details on
how to define rrules.
For convenience, I will also accept a dict instead of a
`rrule` instance, in which case I will instantiate an rrule
using the dict contents as keyword parameters.
:param int sleep_between_checks:
Number of seconds to sleep before I check again if I should
run the function *func*.
"""
super(RruleThread, self).__init__(daemon=True)
if isinstance(rrule, dict):
rrule = self._rrule_from_dict(rrule)
self.func = func
self.rrule = rrule
self.sleep_between_checks = sleep_between_checks
self.last_execution = datetime.now()
self.alive = True |
:param callable func:
The function that I will call periodically.
:param rrule rrule:
The :class:`dateutil.rrule.rrule` recurrence rule that
defines when I will do the calls. See the `python-dateutil
docs <https://labix.org/python-dateutil>`_ for details on
how to define rrules.
For convenience, I will also accept a dict instead of a
`rrule` instance, in which case I will instantiate an rrule
using the dict contents as keyword parameters.
:param int sleep_between_checks:
Number of seconds to sleep before I check again if I should
run the function *func*.
| __init__ | python | ottogroup/palladium | palladium/util.py | https://github.com/ottogroup/palladium/blob/master/palladium/util.py | Apache-2.0 |
def memory_usage_psutil():
"""Return the current process memory usage in MB.
"""
process = psutil.Process(os.getpid())
mem = process.memory_info()[0] / float(2 ** 20)
mem_vms = process.memory_info()[1] / float(2 ** 20)
return mem, mem_vms | Return the current process memory usage in MB.
| memory_usage_psutil | python | ottogroup/palladium | palladium/util.py | https://github.com/ottogroup/palladium/blob/master/palladium/util.py | Apache-2.0 |
def upgrade_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
Upgrade the database to the latest version.
Usage:
pld-ugprade [options]
Options:
--from=<v> Upgrade from a specific version, overriding
the version stored in the database.
--to=<v> Upgrade to a specific version instead of the
latest version.
-h --help Show this screen.
"""
arguments = docopt(upgrade_cmd.__doc__, argv=argv)
initialize_config(__mode__='fit')
upgrade(from_version=arguments['--from'], to_version=arguments['--to']) | Upgrade the database to the latest version.
Usage:
pld-ugprade [options]
Options:
--from=<v> Upgrade from a specific version, overriding
the version stored in the database.
--to=<v> Upgrade to a specific version instead of the
latest version.
-h --help Show this screen.
| upgrade_cmd | python | ottogroup/palladium | palladium/util.py | https://github.com/ottogroup/palladium/blob/master/palladium/util.py | Apache-2.0 |
def export_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
Export a model from one model persister to another.
The model persister to export to is supposed to be available in the
configuration file under the 'model_persister_export' key.
Usage:
pld-export [options]
Options:
--version=<v> Export a specific version rather than the active
one.
--no-activate Don't activate the exported model with the
'model_persister_export'.
-h --help Show this screen.
"""
arguments = docopt(export_cmd.__doc__, argv=argv)
model_version = export(
model_version=arguments['--version'],
activate=not arguments['--no-activate'],
)
logger.info("Exported model. New version number: {}".format(model_version)) | Export a model from one model persister to another.
The model persister to export to is supposed to be available in the
configuration file under the 'model_persister_export' key.
Usage:
pld-export [options]
Options:
--version=<v> Export a specific version rather than the active
one.
--no-activate Don't activate the exported model with the
'model_persister_export'.
-h --help Show this screen.
| export_cmd | python | ottogroup/palladium | palladium/util.py | https://github.com/ottogroup/palladium/blob/master/palladium/util.py | Apache-2.0 |
def Partial(func, **kwargs):
"""Allows the use of partially applied functions in the
configuration.
"""
if isinstance(func, str):
func = resolve_dotted_name(func)
partial_func = partial(func, **kwargs)
update_wrapper(partial_func, func)
return partial_func | Allows the use of partially applied functions in the
configuration.
| Partial | python | ottogroup/palladium | palladium/util.py | https://github.com/ottogroup/palladium/blob/master/palladium/util.py | Apache-2.0 |
def test_upload(self, mocked_requests, persister):
""" test upload of model and metadata """
model = Dummy(name='mymodel')
get_md_url = "%s/mymodel-metadata.json" % (self.base_url,)
mocked_requests.head(get_md_url, status_code=404)
put_model_body = None
def handle_put_model(request, context):
nonlocal put_model_body
put_model_body = request.body.read()
return ''
put_model_url = "%s/mymodel-1.pkl.gz" % (self.base_url,)
put_model = mocked_requests.put(
put_model_url,
text=handle_put_model,
status_code=201,
)
put_md_body = None
def handle_put_md(request, context):
nonlocal put_md_body
put_md_body = request.body.read()
return ''
put_md_url = "%s/mymodel-metadata.json" % (self.base_url,)
put_md = mocked_requests.put(
put_md_url,
text=handle_put_md,
status_code=201,
)
persister.write(model)
assert put_model.called
assert put_md.called
assert pickle.loads(gzip.decompress(put_model_body)) == model
assert len(json.loads(put_md_body.decode('utf-8'))['models']) == 1
self.assert_auth_headers(mocked_requests) | test upload of model and metadata | test_upload | python | ottogroup/palladium | palladium/tests/test_persistence.py | https://github.com/ottogroup/palladium/blob/master/palladium/tests/test_persistence.py | Apache-2.0 |
def test_download(self, mocked_requests, persister):
""" test download and activation of a model """
expected = Dummy(name='mymodel', __metadata__={})
zipped_model = gzip.compress(pickle.dumps(expected))
get_md_url = "%s/mymodel-metadata.json" % (self.base_url,)
mocked_requests.head(get_md_url, status_code=200)
get_md = mocked_requests.get(
get_md_url,
json={"models": [{"version": 1}],
"properties": {'active-model': 1}},
status_code=200,
)
get_model_url = "%s/mymodel-1.pkl.gz" % (self.base_url,)
mocked_requests.head(get_model_url, status_code=200)
get_model = mocked_requests.get(
get_model_url,
content=zipped_model,
status_code=200,
)
model = persister.read()
assert get_md.called
assert get_model.called
assert model == expected
self.assert_auth_headers(mocked_requests) | test download and activation of a model | test_download | python | ottogroup/palladium | palladium/tests/test_persistence.py | https://github.com/ottogroup/palladium/blob/master/palladium/tests/test_persistence.py | Apache-2.0 |
def test_delete(self, mocked_requests, persister):
""" test deleting a model and metadata update """
get_md_url = "%s/mymodel-metadata.json" % (self.base_url,)
mocked_requests.head(get_md_url, status_code=200)
mocked_requests.get(
get_md_url,
json={"models": [{"version": 1}],
"properties": {'active-model': 1}},
status_code=200,
)
put_md_body = None
def handle_put_md(request, context):
nonlocal put_md_body
put_md_body = request.body.read()
return ''
put_md_url = "%s/mymodel-metadata.json" % (self.base_url,)
put_md = mocked_requests.put(
put_md_url,
text=handle_put_md,
status_code=201,
)
delete_model_url = "%s/mymodel-1.pkl.gz" % (self.base_url,)
delete_model = mocked_requests.delete(
delete_model_url,
status_code=200,
)
persister.delete(1)
assert put_md.called
assert delete_model.called
assert len(json.loads(put_md_body.decode('utf-8'))['models']) == 0
self.assert_auth_headers(mocked_requests) | test deleting a model and metadata update | test_delete | python | ottogroup/palladium | palladium/tests/test_persistence.py | https://github.com/ottogroup/palladium/blob/master/palladium/tests/test_persistence.py | Apache-2.0 |
def flask_app_test(request, config):
"""A Flask app where _url_map, _view_functions, _rules, and
_rules_by_end_point will be reset to the previous values after
running the test.
"""
from palladium.server import app
orig_rules = app.url_map._rules
app.url_map._rules = [rule for rule in app.url_map._rules]
orig_rules_by_endpoint = app.url_map._rules_by_endpoint
app.url_map._rules_by_endpoint = {
k: v for k, v in app.url_map._rules_by_endpoint.items()}
orig_view_functions = app.view_functions
app.view_functions = {
k: v for (k, v) in app.view_functions.items()}
orig_remap = app.url_map._remap
request.addfinalizer(
lambda: (
_reset_url_map(
app, orig_rules, orig_rules_by_endpoint, orig_view_functions,
orig_remap))
)
return app | A Flask app where _url_map, _view_functions, _rules, and
_rules_by_end_point will be reset to the previous values after
running the test.
| flask_app_test | python | ottogroup/palladium | palladium/tests/__init__.py | https://github.com/ottogroup/palladium/blob/master/palladium/tests/__init__.py | Apache-2.0 |
def get_task(benchmark, env_id):
"""Get a task by env_id.
Return None if the benchmark doesn't have the env.
"""
return next(
filter(lambda task: task['env_id'] == env_id, benchmark['tasks']),
None) | Get a task by env_id.
Return None if the benchmark doesn't have the env.
| get_task | python | rlworkgroup/garage | benchmarks/src/garage_benchmarks/benchmarks.py | https://github.com/rlworkgroup/garage/blob/master/benchmarks/src/garage_benchmarks/benchmarks.py | MIT |
def find_task_for_env_id_in_any_benchmark(env_id):
"""Find task for env id in any benchmark."""
for bm in _BENCHMARKS:
for task in bm['tasks']:
if task['env_id'] == env_id:
return bm, task
return None, None | Find task for env id in any benchmark. | find_task_for_env_id_in_any_benchmark | python | rlworkgroup/garage | benchmarks/src/garage_benchmarks/benchmarks.py | https://github.com/rlworkgroup/garage/blob/master/benchmarks/src/garage_benchmarks/benchmarks.py | MIT |
def continuous_mlp_policy_tf_ddpg_benchmarks():
"""Run benchmarking experiments for Continuous MLP Policy on TF-DDPG."""
seeds = random.sample(range(100), 5)
iterate_experiments(continuous_mlp_policy, MuJoCo1M_ENV_SET, seeds=seeds) | Run benchmarking experiments for Continuous MLP Policy on TF-DDPG. | continuous_mlp_policy_tf_ddpg_benchmarks | python | rlworkgroup/garage | benchmarks/src/garage_benchmarks/benchmark_policies.py | https://github.com/rlworkgroup/garage/blob/master/benchmarks/src/garage_benchmarks/benchmark_policies.py | MIT |
def benchmark(exec_func=None, *, plot=True, auto=False):
"""Decorator for benchmark function.
Args:
exec_func (func): The experiment function.
plot (bool): Whether the result of this run needs to be plotted.
PNG files will be generated in sub folder /plot.
auto (auto): Whether this is automatic benchmarking. JSON files
will be generated in sub folder /auto.
Returns:
func: The wrapper function.
"""
if exec_func is None:
return functools.partial(benchmark, plot=plot, auto=auto)
@functools.wraps(exec_func)
def wrapper_func():
"""The wrapper function."""
# pylint: disable=global-statement
global _plot, _log_dir, _auto
_plot = {} if plot else None
plt.close('all')
_log_dir = _get_log_dir(exec_func.__name__)
if os.path.exists(_log_dir):
count = 1
while os.path.exists(_log_dir + '_' + str(count)):
count += 1
_log_dir = _log_dir + '_' + str(count)
if auto:
_auto = auto
auto_dir = os.path.join(_log_dir, 'auto')
os.makedirs(auto_dir)
exec_func()
if plot:
plot_dir = os.path.join(_log_dir, 'plot')
os.makedirs(plot_dir)
for env_id in _plot:
plt.figure(env_id)
plt.legend()
plt.xlabel(_plot[env_id]['xlabel'])
plt.ylabel(_plot[env_id]['ylabel'])
plt.title(env_id)
plt.savefig(plot_dir + '/' + env_id)
if auto:
_upload_to_gcp_storage(_log_dir)
return wrapper_func | Decorator for benchmark function.
Args:
exec_func (func): The experiment function.
plot (bool): Whether the result of this run needs to be plotted.
PNG files will be generated in sub folder /plot.
auto (auto): Whether this is automatic benchmarking. JSON files
will be generated in sub folder /auto.
Returns:
func: The wrapper function.
| benchmark | python | rlworkgroup/garage | benchmarks/src/garage_benchmarks/helper.py | https://github.com/rlworkgroup/garage/blob/master/benchmarks/src/garage_benchmarks/helper.py | MIT |
def iterate_experiments(func,
env_ids,
snapshot_config=None,
seeds=None,
xcolumn='TotalEnvSteps',
xlabel='Total Environment Steps',
ycolumn='Evaluation/AverageReturn',
ylabel='Average Return'):
"""Iterate experiments for benchmarking over env_ids and seeds.
Args:
env_ids (list[str]): List of environment ids.
snapshot_config (garage.experiment.SnapshotConfig): The experiment
configuration used by :class:`~Trainer` to create the
:class:`~Snapshotter`.
seeds (list[int]): List of seeds.
func (func): The experiment function.
xcolumn (str): Which column should be the JSON x axis.
xlabel (str): Label name for x axis.
ycolumn (str): Which column should be the JSON y axis.
ylabel (str): Label name for y axis.
"""
func_name = func.__name__.replace('_', '-')
if seeds is None:
seeds = random.sample(range(100), 4)
for env_id in env_ids:
task_ys = []
if _plot is not None and env_id not in _plot:
_plot[env_id] = {'xlabel': xlabel, 'ylabel': ylabel}
for seed in seeds:
exp_name = func_name + '_' + env_id + '_' + str(seed)
sub_log_dir = os.path.join(_log_dir, exp_name)
tf.compat.v1.reset_default_graph()
ctxt = dict(log_dir=sub_log_dir)
if snapshot_config:
ctxt.update(snapshot_config)
func(ctxt, env_id=env_id, seed=seed)
if _plot is not None or _auto:
xs, ys = _read_csv(sub_log_dir, xcolumn, ycolumn)
task_ys.append(ys)
if _plot is not None or _auto:
ys_mean = np.array(task_ys).mean(axis=0)
ys_std = np.array(task_ys).std(axis=0)
if _plot is not None:
plt.figure(env_id)
plt.plot(xs, ys_mean, label=func_name)
plt.fill_between(xs, (ys_mean - ys_std), (ys_mean + ys_std),
alpha=.1)
if _auto:
_export_to_json(env_id + '_' + func_name, xs, xlabel, ys_mean,
ylabel, ys_std) | Iterate experiments for benchmarking over env_ids and seeds.
Args:
env_ids (list[str]): List of environment ids.
snapshot_config (garage.experiment.SnapshotConfig): The experiment
configuration used by :class:`~Trainer` to create the
:class:`~Snapshotter`.
seeds (list[int]): List of seeds.
func (func): The experiment function.
xcolumn (str): Which column should be the JSON x axis.
xlabel (str): Label name for x axis.
ycolumn (str): Which column should be the JSON y axis.
ylabel (str): Label name for y axis.
| iterate_experiments | python | rlworkgroup/garage | benchmarks/src/garage_benchmarks/helper.py | https://github.com/rlworkgroup/garage/blob/master/benchmarks/src/garage_benchmarks/helper.py | MIT |
def _get_log_dir(exec_func_name):
"""Get the log directory given the experiment name.
Args:
exec_func_name (str): The function name which runs benchmarks.
Returns:
str: Log directory.
"""
cwd = pathlib.Path.cwd()
return str(cwd.joinpath('data', 'local', 'benchmarks', exec_func_name)) | Get the log directory given the experiment name.
Args:
exec_func_name (str): The function name which runs benchmarks.
Returns:
str: Log directory.
| _get_log_dir | python | rlworkgroup/garage | benchmarks/src/garage_benchmarks/helper.py | https://github.com/rlworkgroup/garage/blob/master/benchmarks/src/garage_benchmarks/helper.py | MIT |
def _read_csv(log_dir, xcolumn, ycolumn):
"""Read csv files and return xs and ys.
Args:
log_dir (str): Log directory for csv file.
xcolumn (str): Which column should be the JSON x axis.
ycolumn (str): Which column should be the JSON y axis.
Returns:
list: List of x axis points.
list: List of y axis points.
"""
xs, ys = [], []
with open(os.path.join(log_dir, 'progress.csv'), 'r') as csv_file:
for row in csv.DictReader(csv_file):
xs.append(float(row[xcolumn]))
ys.append(float(row[ycolumn]))
return xs, ys | Read csv files and return xs and ys.
Args:
log_dir (str): Log directory for csv file.
xcolumn (str): Which column should be the JSON x axis.
ycolumn (str): Which column should be the JSON y axis.
Returns:
list: List of x axis points.
list: List of y axis points.
| _read_csv | python | rlworkgroup/garage | benchmarks/src/garage_benchmarks/helper.py | https://github.com/rlworkgroup/garage/blob/master/benchmarks/src/garage_benchmarks/helper.py | MIT |
def _export_to_json(json_name, xs, xlabel, ys, ylabel, ys_std):
"""Save selected csv column to JSON preparing for automatic benchmarking.
Args:
json_name (str): The JSON file name.
xs (list): List of x axis points
xlabel (str): Label name for x axis.
ys (np.array): List of y axis points
ylabel (str): Label name for y axis.
ys_std (np.array): Standard deviation of y asis, used to calculate
upper and lower boundary for confidence interval.
"""
json_path = os.path.join(_log_dir, 'auto', json_name + '.json')
with open(json_path, 'w') as json_file:
json.dump(
dict(x=xs,
y=ys.tolist(),
y_min=(ys - ys_std).tolist(),
y_max=(ys + ys_std).tolist(),
xlabel=xlabel,
ylabel=ylabel,
git_hash=_get_git_hash()), json_file) | Save selected csv column to JSON preparing for automatic benchmarking.
Args:
json_name (str): The JSON file name.
xs (list): List of x axis points
xlabel (str): Label name for x axis.
ys (np.array): List of y axis points
ylabel (str): Label name for y axis.
ys_std (np.array): Standard deviation of y asis, used to calculate
upper and lower boundary for confidence interval.
| _export_to_json | python | rlworkgroup/garage | benchmarks/src/garage_benchmarks/helper.py | https://github.com/rlworkgroup/garage/blob/master/benchmarks/src/garage_benchmarks/helper.py | MIT |
def _upload_to_gcp_storage(exec_dir):
"""Upload all files to GCP storage under exec_dir folder.
Args:
exec_dir (str): The execution directory.
"""
_bucket = storage.Client().bucket('resl-garage-benchmarks')
exec_name = os.path.basename(exec_dir)
for folder_name in os.listdir(exec_dir):
folder_path = os.path.join(exec_dir, folder_name)
if not os.path.isfile(folder_path):
remote_folder = os.path.join(exec_name, folder_name)
for file_name in os.listdir(folder_path):
file_path = os.path.join(folder_path, file_name)
if os.path.isfile(file_path):
blob = _bucket.blob(os.path.join(remote_folder, file_name))
blob.upload_from_filename(file_path) | Upload all files to GCP storage under exec_dir folder.
Args:
exec_dir (str): The execution directory.
| _upload_to_gcp_storage | python | rlworkgroup/garage | benchmarks/src/garage_benchmarks/helper.py | https://github.com/rlworkgroup/garage/blob/master/benchmarks/src/garage_benchmarks/helper.py | MIT |
def run(names):
"""Run selected benchmarks.
Args:
names (tuple): Benchmark names.
Raises:
BadParameter: if any run name is invalid or duplicated.
"""
if not names:
raise click.BadParameter('Empty names!')
if len(names) != len(set(names)):
raise click.BadParameter('Duplicate names!')
options = _get_all_options()
for name in names:
if name not in options:
raise click.BadParameter(
'Invalid run name! Make sure every name can be found in '
'`garage_benchmark list`!')
for name in names:
options[name]() | Run selected benchmarks.
Args:
names (tuple): Benchmark names.
Raises:
BadParameter: if any run name is invalid or duplicated.
| run | python | rlworkgroup/garage | benchmarks/src/garage_benchmarks/run_benchmarks.py | https://github.com/rlworkgroup/garage/blob/master/benchmarks/src/garage_benchmarks/run_benchmarks.py | MIT |
def _get_all_options():
"""Return a dict containing all benchmark options.
Dict of (str: obj) representing benchmark name and its function object.
Returns:
dict: Benchmark options.
"""
d = {}
d.update(_get_runs_dict(benchmark_algos))
d.update(_get_runs_dict(benchmark_policies))
d.update(_get_runs_dict(benchmark_baselines))
d.update(_get_runs_dict(benchmark_q_functions))
d.update(_get_runs_dict(benchmark_auto))
return d | Return a dict containing all benchmark options.
Dict of (str: obj) representing benchmark name and its function object.
Returns:
dict: Benchmark options.
| _get_all_options | python | rlworkgroup/garage | benchmarks/src/garage_benchmarks/run_benchmarks.py | https://github.com/rlworkgroup/garage/blob/master/benchmarks/src/garage_benchmarks/run_benchmarks.py | MIT |
def _get_runs_dict(module):
"""Return a dict containing benchmark options of the module.
Dict of (str: obj) representing benchmark name and its function object.
Args:
module (object): Module object.
Returns:
dict: Benchmark options of the module.
"""
d = {}
for name, obj in inspect.getmembers(module):
if inspect.isfunction(obj) and name.endswith('benchmarks'):
d[name] = obj
return d | Return a dict containing benchmark options of the module.
Dict of (str: obj) representing benchmark name and its function object.
Args:
module (object): Module object.
Returns:
dict: Benchmark options of the module.
| _get_runs_dict | python | rlworkgroup/garage | benchmarks/src/garage_benchmarks/run_benchmarks.py | https://github.com/rlworkgroup/garage/blob/master/benchmarks/src/garage_benchmarks/run_benchmarks.py | MIT |
def _echo_run_names(header, d):
"""Echo run names to the command line.
Args:
header (str): The header name.
d (dict): The dict containing benchmark options.
"""
click.echo('-----' + header + '-----')
for name in d:
click.echo(name)
click.echo() | Echo run names to the command line.
Args:
header (str): The header name.
d (dict): The dict containing benchmark options.
| _echo_run_names | python | rlworkgroup/garage | benchmarks/src/garage_benchmarks/run_benchmarks.py | https://github.com/rlworkgroup/garage/blob/master/benchmarks/src/garage_benchmarks/run_benchmarks.py | MIT |
def ddpg_garage_tf(ctxt, env_id, seed):
"""Create garage TensorFlow DDPG model and training.
Args:
ctxt (ExperimentContext): The experiment configuration used by
:class:`~Trainer` to create the :class:`~Snapshotter`.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
"""
deterministic.set_seed(seed)
with TFTrainer(ctxt) as trainer:
env = normalize(GymEnv(env_id))
policy = ContinuousMLPPolicy(
env_spec=env.spec,
hidden_sizes=hyper_parameters['policy_hidden_sizes'],
hidden_nonlinearity=tf.nn.relu,
output_nonlinearity=tf.nn.tanh)
exploration_policy = AddOrnsteinUhlenbeckNoise(
env.spec, policy, sigma=hyper_parameters['sigma'])
qf = ContinuousMLPQFunction(
env_spec=env.spec,
hidden_sizes=hyper_parameters['qf_hidden_sizes'],
hidden_nonlinearity=tf.nn.relu)
replay_buffer = PathBuffer(
capacity_in_transitions=hyper_parameters['replay_buffer_size'])
sampler = LocalSampler(agents=exploration_policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True,
worker_class=FragmentWorker)
algo = DDPG(env_spec=env.spec,
policy=policy,
qf=qf,
replay_buffer=replay_buffer,
sampler=sampler,
steps_per_epoch=hyper_parameters['steps_per_epoch'],
policy_lr=hyper_parameters['policy_lr'],
qf_lr=hyper_parameters['qf_lr'],
target_update_tau=hyper_parameters['tau'],
n_train_steps=hyper_parameters['n_train_steps'],
discount=hyper_parameters['discount'],
min_buffer_size=int(1e4),
exploration_policy=exploration_policy,
policy_optimizer=tf.compat.v1.train.AdamOptimizer,
qf_optimizer=tf.compat.v1.train.AdamOptimizer)
trainer.setup(algo, env)
trainer.train(n_epochs=hyper_parameters['n_epochs'],
batch_size=hyper_parameters['n_exploration_steps']) | Create garage TensorFlow DDPG model and training.
Args:
ctxt (ExperimentContext): The experiment configuration used by
:class:`~Trainer` to create the :class:`~Snapshotter`.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
| ddpg_garage_tf | python | rlworkgroup/garage | benchmarks/src/garage_benchmarks/experiments/algos/ddpg_garage_tf.py | https://github.com/rlworkgroup/garage/blob/master/benchmarks/src/garage_benchmarks/experiments/algos/ddpg_garage_tf.py | MIT |
def her_garage_tf(ctxt, env_id, seed):
"""Create garage TensorFlow HER model and training.
Args:
ctxt (ExperimentContext): The experiment configuration used by
:class:`~Trainer` to create the :class:`~Snapshotter`.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
"""
deterministic.set_seed(seed)
with TFTrainer(ctxt) as trainer:
env = normalize(GymEnv(env_id))
policy = ContinuousMLPPolicy(
env_spec=env.spec,
hidden_sizes=hyper_parameters['policy_hidden_sizes'],
hidden_nonlinearity=tf.nn.relu,
output_nonlinearity=tf.nn.tanh,
)
exploration_policy = AddOrnsteinUhlenbeckNoise(
env_spec=env.spec, policy=policy, sigma=hyper_parameters['sigma'])
qf = ContinuousMLPQFunction(
env_spec=env.spec,
hidden_sizes=hyper_parameters['qf_hidden_sizes'],
hidden_nonlinearity=tf.nn.relu,
)
replay_buffer = HERReplayBuffer(
env_spec=env.spec,
capacity_in_transitions=hyper_parameters['replay_buffer_size'],
replay_k=4,
reward_fn=env.compute_reward,
)
sampler = LocalSampler(agents=exploration_policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True,
worker_class=FragmentWorker)
algo = DDPG(
env_spec=env.spec,
policy=policy,
qf=qf,
replay_buffer=replay_buffer,
sampler=sampler,
steps_per_epoch=hyper_parameters['steps_per_epoch'],
policy_lr=hyper_parameters['policy_lr'],
qf_lr=hyper_parameters['qf_lr'],
target_update_tau=hyper_parameters['tau'],
n_train_steps=hyper_parameters['n_train_steps'],
discount=hyper_parameters['discount'],
exploration_policy=exploration_policy,
policy_optimizer=tf.compat.v1.train.AdamOptimizer,
qf_optimizer=tf.compat.v1.train.AdamOptimizer,
buffer_batch_size=256,
)
trainer.setup(algo, env)
trainer.train(n_epochs=hyper_parameters['n_epochs'],
batch_size=hyper_parameters['n_exploration_steps']) | Create garage TensorFlow HER model and training.
Args:
ctxt (ExperimentContext): The experiment configuration used by
:class:`~Trainer` to create the :class:`~Snapshotter`.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
| her_garage_tf | python | rlworkgroup/garage | benchmarks/src/garage_benchmarks/experiments/algos/her_garage_tf.py | https://github.com/rlworkgroup/garage/blob/master/benchmarks/src/garage_benchmarks/experiments/algos/her_garage_tf.py | MIT |
def ppo_garage_pytorch(ctxt, env_id, seed):
"""Create garage PyTorch PPO model and training.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
"""
deterministic.set_seed(seed)
trainer = Trainer(ctxt)
env = normalize(GymEnv(env_id))
policy = PyTorch_GMP(env.spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
value_function = GaussianMLPValueFunction(env_spec=env.spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
policy_optimizer = OptimizerWrapper((torch.optim.Adam, dict(lr=2.5e-4)),
policy,
max_optimization_epochs=10,
minibatch_size=64)
vf_optimizer = OptimizerWrapper((torch.optim.Adam, dict(lr=2.5e-4)),
value_function,
max_optimization_epochs=10,
minibatch_size=64)
sampler = RaySampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length)
algo = PyTorch_PPO(env_spec=env.spec,
policy=policy,
value_function=value_function,
sampler=sampler,
policy_optimizer=policy_optimizer,
vf_optimizer=vf_optimizer,
discount=0.99,
gae_lambda=0.95,
center_adv=True,
lr_clip_range=0.2)
trainer.setup(algo, env)
trainer.train(n_epochs=hyper_parameters['n_epochs'],
batch_size=hyper_parameters['batch_size']) | Create garage PyTorch PPO model and training.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
| ppo_garage_pytorch | python | rlworkgroup/garage | benchmarks/src/garage_benchmarks/experiments/algos/ppo_garage_pytorch.py | https://github.com/rlworkgroup/garage/blob/master/benchmarks/src/garage_benchmarks/experiments/algos/ppo_garage_pytorch.py | MIT |
def ppo_garage_tf(ctxt, env_id, seed):
"""Create garage TensorFlow PPO model and training.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
"""
deterministic.set_seed(seed)
with TFTrainer(ctxt) as trainer:
env = normalize(GymEnv(env_id))
policy = TF_GMP(
env_spec=env.spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.tanh,
output_nonlinearity=None,
)
baseline = TF_GMB(
env_spec=env.spec,
hidden_sizes=(32, 32),
use_trust_region=False,
optimizer=FirstOrderOptimizer,
optimizer_args=dict(
batch_size=32,
max_optimization_epochs=10,
learning_rate=3e-4,
),
)
sampler = RaySampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True)
algo = TF_PPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
sampler=sampler,
discount=0.99,
gae_lambda=0.95,
center_adv=True,
lr_clip_range=0.2,
optimizer_args=dict(batch_size=32,
max_optimization_epochs=10,
learning_rate=3e-4,
verbose=True))
trainer.setup(algo, env)
trainer.train(n_epochs=hyper_parameters['n_epochs'],
batch_size=hyper_parameters['batch_size']) | Create garage TensorFlow PPO model and training.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
| ppo_garage_tf | python | rlworkgroup/garage | benchmarks/src/garage_benchmarks/experiments/algos/ppo_garage_tf.py | https://github.com/rlworkgroup/garage/blob/master/benchmarks/src/garage_benchmarks/experiments/algos/ppo_garage_tf.py | MIT |
def td3_garage_pytorch(ctxt, env_id, seed):
"""Create garage TensorFlow TD3 model and training.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Localtrainer to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
"""
deterministic.set_seed(seed)
trainer = Trainer(ctxt)
num_timesteps = hyper_parameters['n_epochs'] * hyper_parameters[
'steps_per_epoch'] * hyper_parameters['batch_size']
env = normalize(GymEnv(env_id))
policy = DeterministicMLPPolicy(
env_spec=env.spec,
hidden_sizes=hyper_parameters['policy_hidden_sizes'],
hidden_nonlinearity=F.relu,
output_nonlinearity=torch.tanh)
exploration_policy = AddGaussianNoise(env.spec,
policy,
total_timesteps=num_timesteps,
max_sigma=hyper_parameters['sigma'],
min_sigma=hyper_parameters['sigma'])
uniform_random_policy = UniformRandomPolicy(env.spec)
qf1 = ContinuousMLPQFunction(
env_spec=env.spec,
hidden_sizes=hyper_parameters['qf_hidden_sizes'],
hidden_nonlinearity=F.relu)
qf2 = ContinuousMLPQFunction(
env_spec=env.spec,
hidden_sizes=hyper_parameters['qf_hidden_sizes'],
hidden_nonlinearity=F.relu)
replay_buffer = PathBuffer(
capacity_in_transitions=hyper_parameters['replay_buffer_size'])
sampler = LocalSampler(agents=exploration_policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
worker_class=FragmentWorker)
td3 = TD3(
env_spec=env.spec,
policy=policy,
qf1=qf1,
qf2=qf2,
sampler=sampler,
exploration_policy=exploration_policy,
uniform_random_policy=uniform_random_policy,
replay_buffer=replay_buffer,
steps_per_epoch=hyper_parameters['steps_per_epoch'],
policy_lr=hyper_parameters['policy_lr'],
qf_lr=hyper_parameters['qf_lr'],
target_update_tau=hyper_parameters['target_update_tau'],
discount=hyper_parameters['discount'],
grad_steps_per_env_step=hyper_parameters['grad_steps_per_env_step'],
start_steps=hyper_parameters['start_steps'],
min_buffer_size=hyper_parameters['min_buffer_size'],
buffer_batch_size=hyper_parameters['buffer_batch_size'],
policy_optimizer=torch.optim.Adam,
qf_optimizer=torch.optim.Adam,
policy_noise_clip=hyper_parameters['policy_noise_clip'],
policy_noise=hyper_parameters['policy_noise'])
prefer_gpu()
td3.to()
trainer.setup(td3, env)
trainer.train(n_epochs=hyper_parameters['n_epochs'],
batch_size=hyper_parameters['batch_size']) | Create garage TensorFlow TD3 model and training.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Localtrainer to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
| td3_garage_pytorch | python | rlworkgroup/garage | benchmarks/src/garage_benchmarks/experiments/algos/td3_garage_pytorch.py | https://github.com/rlworkgroup/garage/blob/master/benchmarks/src/garage_benchmarks/experiments/algos/td3_garage_pytorch.py | MIT |
def td3_garage_tf(ctxt, env_id, seed):
"""Create garage TensorFlow TD3 model and training.
Args:
ctxt (ExperimentContext): The experiment configuration used by
:class:`~Trainer` to create the :class:`~Snapshotter`.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
"""
deterministic.set_seed(seed)
with TFTrainer(ctxt) as trainer:
num_timesteps = (hyper_parameters['n_epochs'] *
hyper_parameters['steps_per_epoch'] *
hyper_parameters['n_exploration_steps'])
env = normalize(GymEnv(env_id))
policy = ContinuousMLPPolicy(
env_spec=env.spec,
hidden_sizes=hyper_parameters['policy_hidden_sizes'],
hidden_nonlinearity=tf.nn.relu,
output_nonlinearity=tf.nn.tanh)
exploration_policy = AddGaussianNoise(
env.spec,
policy,
total_timesteps=num_timesteps,
max_sigma=hyper_parameters['sigma'],
min_sigma=hyper_parameters['sigma'])
qf = ContinuousMLPQFunction(
name='ContinuousMLPQFunction',
env_spec=env.spec,
hidden_sizes=hyper_parameters['qf_hidden_sizes'],
action_merge_layer=0,
hidden_nonlinearity=tf.nn.relu)
qf2 = ContinuousMLPQFunction(
name='ContinuousMLPQFunction2',
env_spec=env.spec,
hidden_sizes=hyper_parameters['qf_hidden_sizes'],
action_merge_layer=0,
hidden_nonlinearity=tf.nn.relu)
replay_buffer = PathBuffer(
capacity_in_transitions=hyper_parameters['replay_buffer_size'])
sampler = LocalSampler(agents=exploration_policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True,
worker_class=FragmentWorker)
td3 = TD3(env.spec,
policy=policy,
qf=qf,
qf2=qf2,
replay_buffer=replay_buffer,
sampler=sampler,
steps_per_epoch=hyper_parameters['steps_per_epoch'],
policy_lr=hyper_parameters['policy_lr'],
qf_lr=hyper_parameters['qf_lr'],
target_update_tau=hyper_parameters['tau'],
n_train_steps=hyper_parameters['n_train_steps'],
discount=hyper_parameters['discount'],
min_buffer_size=hyper_parameters['min_buffer_size'],
buffer_batch_size=hyper_parameters['buffer_batch_size'],
exploration_policy=exploration_policy,
policy_optimizer=tf.compat.v1.train.AdamOptimizer,
qf_optimizer=tf.compat.v1.train.AdamOptimizer)
trainer.setup(td3, env)
trainer.train(n_epochs=hyper_parameters['n_epochs'],
batch_size=hyper_parameters['n_exploration_steps']) | Create garage TensorFlow TD3 model and training.
Args:
ctxt (ExperimentContext): The experiment configuration used by
:class:`~Trainer` to create the :class:`~Snapshotter`.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
| td3_garage_tf | python | rlworkgroup/garage | benchmarks/src/garage_benchmarks/experiments/algos/td3_garage_tf.py | https://github.com/rlworkgroup/garage/blob/master/benchmarks/src/garage_benchmarks/experiments/algos/td3_garage_tf.py | MIT |
def trpo_garage_pytorch(ctxt, env_id, seed):
"""Create garage PyTorch TRPO model and training.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
"""
deterministic.set_seed(seed)
trainer = Trainer(ctxt)
env = normalize(GymEnv(env_id))
policy = PyTorch_GMP(env.spec,
hidden_sizes=hyper_parameters['hidden_sizes'],
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
value_function = GaussianMLPValueFunction(env_spec=env.spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
sampler = LocalSampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length)
algo = PyTorch_TRPO(env_spec=env.spec,
policy=policy,
value_function=value_function,
sampler=sampler,
discount=hyper_parameters['discount'],
gae_lambda=hyper_parameters['gae_lambda'])
trainer.setup(algo, env)
trainer.train(n_epochs=hyper_parameters['n_epochs'],
batch_size=hyper_parameters['batch_size']) | Create garage PyTorch TRPO model and training.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
| trpo_garage_pytorch | python | rlworkgroup/garage | benchmarks/src/garage_benchmarks/experiments/algos/trpo_garage_pytorch.py | https://github.com/rlworkgroup/garage/blob/master/benchmarks/src/garage_benchmarks/experiments/algos/trpo_garage_pytorch.py | MIT |
def trpo_garage_tf(ctxt, env_id, seed):
"""Create garage Tensorflow TROI model and training.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
"""
deterministic.set_seed(seed)
with TFTrainer(ctxt) as trainer:
env = normalize(GymEnv(env_id))
policy = GaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=hyper_parameters['hidden_sizes'],
hidden_nonlinearity=tf.nn.tanh,
output_nonlinearity=None,
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
sampler = RaySampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True)
algo = TRPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
sampler=sampler,
discount=hyper_parameters['discount'],
gae_lambda=hyper_parameters['gae_lambda'],
max_kl_step=hyper_parameters['max_kl'])
trainer.setup(algo, env)
trainer.train(n_epochs=hyper_parameters['n_epochs'],
batch_size=hyper_parameters['batch_size']) | Create garage Tensorflow TROI model and training.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
| trpo_garage_tf | python | rlworkgroup/garage | benchmarks/src/garage_benchmarks/experiments/algos/trpo_garage_tf.py | https://github.com/rlworkgroup/garage/blob/master/benchmarks/src/garage_benchmarks/experiments/algos/trpo_garage_tf.py | MIT |
def vpg_garage_pytorch(ctxt, env_id, seed):
"""Create garage PyTorch VPG model and training.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
"""
deterministic.set_seed(seed)
trainer = Trainer(ctxt)
env = normalize(GymEnv(env_id))
policy = PyTorch_GMP(env.spec,
hidden_sizes=hyper_parameters['hidden_sizes'],
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
value_function = GaussianMLPValueFunction(env_spec=env.spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
policy_optimizer = OptimizerWrapper((torch.optim.Adam, dict(lr=2.5e-4)),
policy,
max_optimization_epochs=10,
minibatch_size=64)
vf_optimizer = OptimizerWrapper((torch.optim.Adam, dict(lr=2.5e-4)),
value_function,
max_optimization_epochs=10,
minibatch_size=64)
sampler = RaySampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length)
algo = PyTorch_VPG(env_spec=env.spec,
policy=policy,
value_function=value_function,
sampler=sampler,
policy_optimizer=policy_optimizer,
vf_optimizer=vf_optimizer,
discount=hyper_parameters['discount'],
center_adv=hyper_parameters['center_adv'])
trainer.setup(algo, env)
trainer.train(n_epochs=hyper_parameters['n_epochs'],
batch_size=hyper_parameters['batch_size']) | Create garage PyTorch VPG model and training.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
| vpg_garage_pytorch | python | rlworkgroup/garage | benchmarks/src/garage_benchmarks/experiments/algos/vpg_garage_pytorch.py | https://github.com/rlworkgroup/garage/blob/master/benchmarks/src/garage_benchmarks/experiments/algos/vpg_garage_pytorch.py | MIT |
def vpg_garage_tf(ctxt, env_id, seed):
"""Create garage TensorFlow VPG model and training.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
"""
deterministic.set_seed(seed)
with TFTrainer(ctxt) as trainer:
env = normalize(GymEnv(env_id))
policy = TF_GMP(
env_spec=env.spec,
hidden_sizes=hyper_parameters['hidden_sizes'],
hidden_nonlinearity=tf.nn.tanh,
output_nonlinearity=None,
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
sampler = RaySampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True)
algo = TF_VPG(env_spec=env.spec,
policy=policy,
baseline=baseline,
sampler=sampler,
discount=hyper_parameters['discount'],
center_adv=hyper_parameters['center_adv'],
optimizer_args=dict(
learning_rate=hyper_parameters['learning_rate'], ))
trainer.setup(algo, env)
trainer.train(n_epochs=hyper_parameters['n_epochs'],
batch_size=hyper_parameters['batch_size']) | Create garage TensorFlow VPG model and training.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
| vpg_garage_tf | python | rlworkgroup/garage | benchmarks/src/garage_benchmarks/experiments/algos/vpg_garage_tf.py | https://github.com/rlworkgroup/garage/blob/master/benchmarks/src/garage_benchmarks/experiments/algos/vpg_garage_tf.py | MIT |
def continuous_mlp_baseline(ctxt, env_id, seed):
"""Create Continuous MLP Baseline on TF-PPO.
Args:
ctxt (ExperimentContext): The experiment configuration used by
:class:`~Trainer` to create the :class:`~Snapshotter`.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
"""
deterministic.set_seed(seed)
with TFTrainer(ctxt) as trainer:
env = normalize(GymEnv(env_id))
policy = GaussianLSTMPolicy(
env_spec=env.spec,
hidden_dim=hyper_params['policy_hidden_sizes'],
hidden_nonlinearity=hyper_params['hidden_nonlinearity'],
)
baseline = ContinuousMLPBaseline(
env_spec=env.spec,
hidden_sizes=(64, 64),
)
sampler = RaySampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True)
algo = PPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
sampler=sampler,
discount=hyper_params['discount'],
gae_lambda=hyper_params['gae_lambda'],
lr_clip_range=hyper_params['lr_clip_range'],
entropy_method=hyper_params['entropy_method'],
policy_ent_coeff=hyper_params['policy_ent_coeff'],
optimizer_args=dict(
batch_size=32,
max_optimization_epochs=10,
learning_rate=1e-3,
),
center_adv=hyper_params['center_adv'],
stop_entropy_gradient=True)
trainer.setup(algo, env)
trainer.train(n_epochs=hyper_params['n_epochs'],
batch_size=hyper_params['n_exploration_steps']) | Create Continuous MLP Baseline on TF-PPO.
Args:
ctxt (ExperimentContext): The experiment configuration used by
:class:`~Trainer` to create the :class:`~Snapshotter`.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
| continuous_mlp_baseline | python | rlworkgroup/garage | benchmarks/src/garage_benchmarks/experiments/baselines/continuous_mlp_baseline.py | https://github.com/rlworkgroup/garage/blob/master/benchmarks/src/garage_benchmarks/experiments/baselines/continuous_mlp_baseline.py | MIT |
def gaussian_cnn_baseline(ctxt, env_id, seed):
"""Create Gaussian CNN Baseline on TF-PPO.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
"""
deterministic.set_seed(seed)
with TFTrainer(ctxt) as trainer:
env = normalize(GymEnv(env_id))
policy = CategoricalCNNPolicy(env_spec=env.spec,
filters=params['conv_filters'],
strides=params['conv_strides'],
padding=params['conv_pad'],
hidden_sizes=params['hidden_sizes'])
baseline = GaussianCNNBaseline(
env_spec=env.spec,
filters=params['conv_filters'],
strides=params['conv_strides'],
padding=params['conv_pad'],
hidden_sizes=params['hidden_sizes'],
use_trust_region=params['use_trust_region'])
sampler = RaySampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True)
algo = PPO(
env_spec=env.spec,
policy=policy,
baseline=baseline,
sampler=sampler,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
policy_ent_coeff=0.0,
optimizer_args=dict(
batch_size=32,
max_optimization_epochs=10,
learning_rate=1e-3,
),
)
trainer.setup(algo, env)
trainer.train(n_epochs=params['n_epochs'],
batch_size=params['batch_size']) | Create Gaussian CNN Baseline on TF-PPO.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
| gaussian_cnn_baseline | python | rlworkgroup/garage | benchmarks/src/garage_benchmarks/experiments/baselines/gaussian_cnn_baseline.py | https://github.com/rlworkgroup/garage/blob/master/benchmarks/src/garage_benchmarks/experiments/baselines/gaussian_cnn_baseline.py | MIT |
def gaussian_mlp_baseline(ctxt, env_id, seed):
"""Create Gaussian MLP Baseline on TF-PPO.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
"""
deterministic.set_seed(seed)
with TFTrainer(ctxt) as trainer:
env = normalize(GymEnv(env_id))
policy = GaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.tanh,
output_nonlinearity=None,
)
baseline = GaussianMLPBaseline(
env_spec=env.spec,
hidden_sizes=(64, 64),
use_trust_region=False,
optimizer=FirstOrderOptimizer,
optimizer_args=dict(
batch_size=32,
max_optimization_epochs=10,
learning_rate=1e-3,
),
)
sampler = RaySampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True)
algo = PPO(
env_spec=env.spec,
policy=policy,
baseline=baseline,
sampler=sampler,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
policy_ent_coeff=0.0,
optimizer_args=dict(
batch_size=32,
max_optimization_epochs=10,
learning_rate=1e-3,
),
)
trainer.setup(algo, env)
trainer.train(n_epochs=5, batch_size=2048) | Create Gaussian MLP Baseline on TF-PPO.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
| gaussian_mlp_baseline | python | rlworkgroup/garage | benchmarks/src/garage_benchmarks/experiments/baselines/gaussian_mlp_baseline.py | https://github.com/rlworkgroup/garage/blob/master/benchmarks/src/garage_benchmarks/experiments/baselines/gaussian_mlp_baseline.py | MIT |
def categorical_cnn_policy(ctxt, env_id, seed):
"""Create Categorical CNN Policy on TF-PPO.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
"""
deterministic.set_seed(seed)
with TFTrainer(ctxt) as trainer:
env = normalize(GymEnv(env_id))
policy = CategoricalCNNPolicy(
env_spec=env.spec,
filters=hyper_params['conv_filters'],
strides=hyper_params['conv_strides'],
padding=hyper_params['conv_pad'],
hidden_sizes=hyper_params['hidden_sizes'])
baseline = GaussianCNNBaseline(
env_spec=env.spec,
filters=hyper_params['conv_filters'],
strides=hyper_params['conv_strides'],
padding=hyper_params['conv_pad'],
hidden_sizes=hyper_params['hidden_sizes'],
use_trust_region=hyper_params['use_trust_region'])
sampler = RaySampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True)
algo = PPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
sampler=sampler,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
policy_ent_coeff=0.0,
optimizer_args=dict(
batch_size=32,
max_optimization_epochs=10,
learning_rate=1e-3,
))
trainer.setup(algo, env)
trainer.train(n_epochs=hyper_params['n_epochs'],
batch_size=hyper_params['batch_size']) | Create Categorical CNN Policy on TF-PPO.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
| categorical_cnn_policy | python | rlworkgroup/garage | benchmarks/src/garage_benchmarks/experiments/policies/categorical_cnn_policy.py | https://github.com/rlworkgroup/garage/blob/master/benchmarks/src/garage_benchmarks/experiments/policies/categorical_cnn_policy.py | MIT |
def categorical_gru_policy(ctxt, env_id, seed):
"""Create Categorical CNN Policy on TF-PPO.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
"""
deterministic.set_seed(seed)
with TFTrainer(ctxt) as trainer:
env = normalize(GymEnv(env_id))
policy = CategoricalGRUPolicy(
env_spec=env.spec,
hidden_dim=32,
hidden_nonlinearity=tf.nn.tanh,
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
sampler = LocalSampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length)
algo = PPO(
env_spec=env.spec,
policy=policy,
baseline=baseline,
sampler=sampler,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
policy_ent_coeff=0.0,
optimizer_args=dict(
batch_size=32,
max_optimization_epochs=10,
learning_rate=1e-3,
),
)
trainer.setup(algo, env)
trainer.train(n_epochs=488, batch_size=2048) | Create Categorical CNN Policy on TF-PPO.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
| categorical_gru_policy | python | rlworkgroup/garage | benchmarks/src/garage_benchmarks/experiments/policies/categorical_gru_policy.py | https://github.com/rlworkgroup/garage/blob/master/benchmarks/src/garage_benchmarks/experiments/policies/categorical_gru_policy.py | MIT |
def categorical_lstm_policy(ctxt, env_id, seed):
"""Create Categorical LSTM Policy on TF-PPO.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
"""
deterministic.set_seed(seed)
with TFTrainer(ctxt) as trainer:
env = normalize(GymEnv(env_id))
policy = CategoricalLSTMPolicy(
env_spec=env.spec,
hidden_dim=32,
hidden_nonlinearity=tf.nn.tanh,
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
sampler = RaySampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True)
algo = PPO(
env_spec=env.spec,
policy=policy,
baseline=baseline,
sampler=sampler,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
policy_ent_coeff=0.0,
optimizer_args=dict(
batch_size=32,
max_optimization_epochs=10,
learning_rate=1e-3,
),
)
trainer.setup(algo, env)
trainer.train(n_epochs=488, batch_size=2048) | Create Categorical LSTM Policy on TF-PPO.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
| categorical_lstm_policy | python | rlworkgroup/garage | benchmarks/src/garage_benchmarks/experiments/policies/categorical_lstm_policy.py | https://github.com/rlworkgroup/garage/blob/master/benchmarks/src/garage_benchmarks/experiments/policies/categorical_lstm_policy.py | MIT |
def categorical_mlp_policy(ctxt, env_id, seed):
"""Create Categorical MLP Policy on TF-PPO.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
"""
deterministic.set_seed(seed)
with TFTrainer(ctxt) as trainer:
env = normalize(GymEnv(env_id))
policy = CategoricalMLPPolicy(
env_spec=env.spec,
hidden_nonlinearity=tf.nn.tanh,
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
sampler = RaySampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True)
algo = PPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
sampler=sampler,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
policy_ent_coeff=0.0,
optimizer_args=dict(
batch_size=32,
max_optimization_epochs=10,
learning_rate=1e-3,
),
name='CategoricalMLPPolicyBenchmark')
trainer.setup(algo, env)
trainer.train(n_epochs=5, batch_size=2048) | Create Categorical MLP Policy on TF-PPO.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
| categorical_mlp_policy | python | rlworkgroup/garage | benchmarks/src/garage_benchmarks/experiments/policies/categorical_mlp_policy.py | https://github.com/rlworkgroup/garage/blob/master/benchmarks/src/garage_benchmarks/experiments/policies/categorical_mlp_policy.py | MIT |
def continuous_mlp_policy(ctxt, env_id, seed):
"""Create Continuous MLP Policy on TF-DDPG.
Args:
ctxt (ExperimentContext): The experiment configuration used by
:class:`~Trainer` to create the :class:`~Snapshotter`.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
"""
deterministic.set_seed(seed)
with TFTrainer(ctxt) as trainer:
env = normalize(GymEnv(env_id))
policy = ContinuousMLPPolicy(
env_spec=env.spec,
name='ContinuousMLPPolicy',
hidden_sizes=hyper_params['policy_hidden_sizes'],
hidden_nonlinearity=tf.nn.relu,
output_nonlinearity=tf.nn.tanh)
exploration_policy = AddOrnsteinUhlenbeckNoise(
env.spec, policy, sigma=hyper_params['sigma'])
qf = ContinuousMLPQFunction(
env_spec=env.spec,
hidden_sizes=hyper_params['qf_hidden_sizes'],
hidden_nonlinearity=tf.nn.relu,
name='ContinuousMLPQFunction')
replay_buffer = PathBuffer(
capacity_in_transitions=hyper_params['replay_buffer_size'])
sampler = LocalSampler(agents=exploration_policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True,
worker_class=FragmentWorker)
ddpg = DDPG(env_spec=env.spec,
policy=policy,
qf=qf,
replay_buffer=replay_buffer,
sampler=sampler,
steps_per_epoch=hyper_params['steps_per_epoch'],
policy_lr=hyper_params['policy_lr'],
qf_lr=hyper_params['qf_lr'],
target_update_tau=hyper_params['tau'],
n_train_steps=hyper_params['n_train_steps'],
discount=hyper_params['discount'],
min_buffer_size=int(1e4),
exploration_policy=exploration_policy,
policy_optimizer=tf.compat.v1.train.AdamOptimizer,
qf_optimizer=tf.compat.v1.train.AdamOptimizer)
trainer.setup(ddpg, env)
trainer.train(n_epochs=hyper_params['n_epochs'],
batch_size=hyper_params['n_exploration_steps']) | Create Continuous MLP Policy on TF-DDPG.
Args:
ctxt (ExperimentContext): The experiment configuration used by
:class:`~Trainer` to create the :class:`~Snapshotter`.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
| continuous_mlp_policy | python | rlworkgroup/garage | benchmarks/src/garage_benchmarks/experiments/policies/continuous_mlp_policy.py | https://github.com/rlworkgroup/garage/blob/master/benchmarks/src/garage_benchmarks/experiments/policies/continuous_mlp_policy.py | MIT |
def gaussian_gru_policy(ctxt, env_id, seed):
"""Create Gaussian GRU Policy on TF-PPO.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
"""
deterministic.set_seed(seed)
with TFTrainer(ctxt) as trainer:
env = normalize(GymEnv(env_id))
policy = GaussianGRUPolicy(
env_spec=env.spec,
hidden_dim=32,
hidden_nonlinearity=tf.nn.tanh,
output_nonlinearity=None,
)
baseline = GaussianMLPBaseline(
env_spec=env.spec,
hidden_sizes=(64, 64),
use_trust_region=False,
optimizer=FirstOrderOptimizer,
optimizer_args=dict(
batch_size=32,
max_optimization_epochs=10,
learning_rate=1e-3,
),
)
sampler = RaySampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True)
algo = PPO(
env_spec=env.spec,
policy=policy,
baseline=baseline,
sampler=sampler,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
policy_ent_coeff=0.0,
optimizer_args=dict(
batch_size=32,
max_optimization_epochs=10,
learning_rate=1e-3,
),
)
trainer.setup(algo, env)
trainer.train(n_epochs=5, batch_size=2048) | Create Gaussian GRU Policy on TF-PPO.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
| gaussian_gru_policy | python | rlworkgroup/garage | benchmarks/src/garage_benchmarks/experiments/policies/gaussian_gru_policy.py | https://github.com/rlworkgroup/garage/blob/master/benchmarks/src/garage_benchmarks/experiments/policies/gaussian_gru_policy.py | MIT |
def gaussian_lstm_policy(ctxt, env_id, seed):
"""Create Gaussian LSTM Policy on TF-PPO.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
"""
deterministic.set_seed(seed)
with TFTrainer(ctxt) as trainer:
env = normalize(GymEnv(env_id))
policy = GaussianLSTMPolicy(
env_spec=env.spec,
hidden_dim=32,
hidden_nonlinearity=tf.nn.tanh,
output_nonlinearity=None,
)
baseline = GaussianMLPBaseline(
env_spec=env.spec,
hidden_sizes=(64, 64),
use_trust_region=False,
optimizer=FirstOrderOptimizer,
optimizer_args=dict(
batch_size=32,
max_optimization_epochs=10,
learning_rate=1e-3,
),
)
sampler = RaySampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True)
algo = PPO(
env_spec=env.spec,
policy=policy,
baseline=baseline,
sampler=sampler,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
policy_ent_coeff=0.0,
optimizer_args=dict(
batch_size=32,
max_optimization_epochs=10,
learning_rate=1e-3,
),
)
trainer.setup(algo, env)
trainer.train(n_epochs=5, batch_size=2048) | Create Gaussian LSTM Policy on TF-PPO.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
| gaussian_lstm_policy | python | rlworkgroup/garage | benchmarks/src/garage_benchmarks/experiments/policies/gaussian_lstm_policy.py | https://github.com/rlworkgroup/garage/blob/master/benchmarks/src/garage_benchmarks/experiments/policies/gaussian_lstm_policy.py | MIT |
def gaussian_mlp_policy(ctxt, env_id, seed):
"""Create Gaussian MLP Policy on TF-PPO.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
"""
deterministic.set_seed(seed)
with TFTrainer(ctxt) as trainer:
env = normalize(GymEnv(env_id))
policy = GaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.tanh,
output_nonlinearity=None,
)
baseline = GaussianMLPBaseline(
env_spec=env.spec,
hidden_sizes=(64, 64),
use_trust_region=False,
optimizer=FirstOrderOptimizer,
optimizer_args=dict(
batch_size=32,
max_optimization_epochs=10,
learning_rate=1e-3,
),
)
sampler = RaySampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True)
algo = PPO(
env_spec=env.spec,
policy=policy,
baseline=baseline,
sampler=sampler,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
policy_ent_coeff=0.0,
optimizer_args=dict(
batch_size=32,
max_optimization_epochs=10,
learning_rate=1e-3,
),
)
trainer.setup(algo, env)
trainer.train(n_epochs=5, batch_size=2048) | Create Gaussian MLP Policy on TF-PPO.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
| gaussian_mlp_policy | python | rlworkgroup/garage | benchmarks/src/garage_benchmarks/experiments/policies/gaussian_mlp_policy.py | https://github.com/rlworkgroup/garage/blob/master/benchmarks/src/garage_benchmarks/experiments/policies/gaussian_mlp_policy.py | MIT |
def continuous_mlp_q_function(ctxt, env_id, seed):
"""Create Continuous MLP QFunction on TF-DDPG.
Args:
ctxt (ExperimentContext): The experiment configuration used by
:class:`~Trainer` to create the :class:`~Snapshotter`.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
"""
deterministic.set_seed(seed)
with TFTrainer(ctxt) as trainer:
env = normalize(GymEnv(env_id))
policy = ContinuousMLPPolicy(
env_spec=env.spec,
name='ContinuousMLPPolicy',
hidden_sizes=hyper_params['policy_hidden_sizes'],
hidden_nonlinearity=tf.nn.relu,
output_nonlinearity=tf.nn.tanh)
exploration_policy = AddOrnsteinUhlenbeckNoise(
env.spec, policy, sigma=hyper_params['sigma'])
qf = ContinuousMLPQFunction(
env_spec=env.spec,
hidden_sizes=hyper_params['qf_hidden_sizes'],
hidden_nonlinearity=tf.nn.relu,
name='ContinuousMLPQFunction')
replay_buffer = PathBuffer(
capacity_in_transitions=hyper_params['replay_buffer_size'])
sampler = LocalSampler(agents=exploration_policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True,
worker_class=FragmentWorker)
ddpg = DDPG(env_spec=env.spec,
policy=policy,
qf=qf,
replay_buffer=replay_buffer,
sampler=sampler,
steps_per_epoch=hyper_params['steps_per_epoch'],
policy_lr=hyper_params['policy_lr'],
qf_lr=hyper_params['qf_lr'],
target_update_tau=hyper_params['tau'],
n_train_steps=hyper_params['n_train_steps'],
discount=hyper_params['discount'],
min_buffer_size=int(1e4),
exploration_policy=exploration_policy,
policy_optimizer=tf.compat.v1.train.AdamOptimizer,
qf_optimizer=tf.compat.v1.train.AdamOptimizer)
trainer.setup(ddpg, env)
trainer.train(n_epochs=hyper_params['n_epochs'],
batch_size=hyper_params['n_exploration_steps']) | Create Continuous MLP QFunction on TF-DDPG.
Args:
ctxt (ExperimentContext): The experiment configuration used by
:class:`~Trainer` to create the :class:`~Snapshotter`.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
| continuous_mlp_q_function | python | rlworkgroup/garage | benchmarks/src/garage_benchmarks/experiments/q_functions/continuous_mlp_q_function.py | https://github.com/rlworkgroup/garage/blob/master/benchmarks/src/garage_benchmarks/experiments/q_functions/continuous_mlp_q_function.py | MIT |
def setup(self, algo, env):
"""Set up trainer for algorithm and environment.
This method saves algo and env within trainer and creates a sampler.
Note:
After setup() is called all variables in session should have been
initialized. setup() respects existing values in session so
policy weights can be loaded before setup().
Args:
algo (RLAlgorithm): An algorithm instance. If this algo want to use
samplers, it should have a `_sampler` field.
env (Environment): An environment instance.
"""
self._algo = algo
self._env = env
self._seed = get_seed()
if hasattr(self._algo, '_sampler'):
# pylint: disable=protected-access
self._sampler = self._algo._sampler
self._has_setup = True | Set up trainer for algorithm and environment.
This method saves algo and env within trainer and creates a sampler.
Note:
After setup() is called all variables in session should have been
initialized. setup() respects existing values in session so
policy weights can be loaded before setup().
Args:
algo (RLAlgorithm): An algorithm instance. If this algo want to use
samplers, it should have a `_sampler` field.
env (Environment): An environment instance.
| setup | python | rlworkgroup/garage | src/garage/trainer.py | https://github.com/rlworkgroup/garage/blob/master/src/garage/trainer.py | MIT |
def obtain_episodes(self,
itr,
batch_size=None,
agent_update=None,
env_update=None):
"""Obtain one batch of episodes.
Args:
itr (int): Index of iteration (epoch).
batch_size (int): Number of steps in batch. This is a hint that the
sampler may or may not respect.
agent_update (object): Value which will be passed into the
`agent_update_fn` before doing sampling episodes. If a list is
passed in, it must have length exactly `factory.n_workers`, and
will be spread across the workers.
env_update (object): Value which will be passed into the
`env_update_fn` before sampling episodes. If a list is passed
in, it must have length exactly `factory.n_workers`, and will
be spread across the workers.
Raises:
ValueError: If the trainer was initialized without a sampler, or
batch_size wasn't provided here or to train.
Returns:
EpisodeBatch: Batch of episodes.
"""
if self._sampler is None:
raise ValueError('trainer was not initialized with `sampler`. '
'the algo should have a `_sampler` field when'
'`setup()` is called')
if batch_size is None and self._train_args.batch_size is None:
raise ValueError(
'trainer was not initialized with `batch_size`. '
'Either provide `batch_size` to trainer.train, '
' or pass `batch_size` to trainer.obtain_samples.')
episodes = None
if agent_update is None:
policy = getattr(self._algo, 'exploration_policy', None)
if policy is None:
# This field should exist, since self.make_sampler would have
# failed otherwise.
policy = self._algo.policy
agent_update = policy.get_param_values()
episodes = self._sampler.obtain_samples(
itr, (batch_size or self._train_args.batch_size),
agent_update=agent_update,
env_update=env_update)
self._stats.total_env_steps += sum(episodes.lengths)
return episodes | Obtain one batch of episodes.
Args:
itr (int): Index of iteration (epoch).
batch_size (int): Number of steps in batch. This is a hint that the
sampler may or may not respect.
agent_update (object): Value which will be passed into the
`agent_update_fn` before doing sampling episodes. If a list is
passed in, it must have length exactly `factory.n_workers`, and
will be spread across the workers.
env_update (object): Value which will be passed into the
`env_update_fn` before sampling episodes. If a list is passed
in, it must have length exactly `factory.n_workers`, and will
be spread across the workers.
Raises:
ValueError: If the trainer was initialized without a sampler, or
batch_size wasn't provided here or to train.
Returns:
EpisodeBatch: Batch of episodes.
| obtain_episodes | python | rlworkgroup/garage | src/garage/trainer.py | https://github.com/rlworkgroup/garage/blob/master/src/garage/trainer.py | MIT |
def obtain_samples(self,
itr,
batch_size=None,
agent_update=None,
env_update=None):
"""Obtain one batch of samples.
Args:
itr (int): Index of iteration (epoch).
batch_size (int): Number of steps in batch.
This is a hint that the sampler may or may not respect.
agent_update (object): Value which will be passed into the
`agent_update_fn` before sampling episodes. If a list is passed
in, it must have length exactly `factory.n_workers`, and will
be spread across the workers.
env_update (object): Value which will be passed into the
`env_update_fn` before sampling episodes. If a list is passed
in, it must have length exactly `factory.n_workers`, and will
be spread across the workers.
Raises:
ValueError: Raised if the trainer was initialized without a
sampler, or batch_size wasn't provided here
or to train.
Returns:
list[dict]: One batch of samples.
"""
eps = self.obtain_episodes(itr, batch_size, agent_update, env_update)
return eps.to_list() | Obtain one batch of samples.
Args:
itr (int): Index of iteration (epoch).
batch_size (int): Number of steps in batch.
This is a hint that the sampler may or may not respect.
agent_update (object): Value which will be passed into the
`agent_update_fn` before sampling episodes. If a list is passed
in, it must have length exactly `factory.n_workers`, and will
be spread across the workers.
env_update (object): Value which will be passed into the
`env_update_fn` before sampling episodes. If a list is passed
in, it must have length exactly `factory.n_workers`, and will
be spread across the workers.
Raises:
ValueError: Raised if the trainer was initialized without a
sampler, or batch_size wasn't provided here
or to train.
Returns:
list[dict]: One batch of samples.
| obtain_samples | python | rlworkgroup/garage | src/garage/trainer.py | https://github.com/rlworkgroup/garage/blob/master/src/garage/trainer.py | MIT |
def save(self, epoch):
"""Save snapshot of current batch.
Args:
epoch (int): Epoch.
Raises:
NotSetupError: if save() is called before the trainer is set up.
"""
if not self._has_setup:
raise NotSetupError('Use setup() to setup trainer before saving.')
logger.log('Saving snapshot...')
params = dict()
# Save arguments
params['seed'] = self._seed
params['train_args'] = self._train_args
params['stats'] = self._stats
# Save states
params['env'] = self._env
params['algo'] = self._algo
params['n_workers'] = self._n_workers
params['worker_class'] = self._worker_class
params['worker_args'] = self._worker_args
self._snapshotter.save_itr_params(epoch, params)
logger.log('Saved') | Save snapshot of current batch.
Args:
epoch (int): Epoch.
Raises:
NotSetupError: if save() is called before the trainer is set up.
| save | python | rlworkgroup/garage | src/garage/trainer.py | https://github.com/rlworkgroup/garage/blob/master/src/garage/trainer.py | MIT |
def restore(self, from_dir, from_epoch='last'):
"""Restore experiment from snapshot.
Args:
from_dir (str): Directory of the pickle file
to resume experiment from.
from_epoch (str or int): The epoch to restore from.
Can be 'first', 'last' or a number.
Not applicable when snapshot_mode='last'.
Returns:
TrainArgs: Arguments for train().
"""
saved = self._snapshotter.load(from_dir, from_epoch)
self._seed = saved['seed']
self._train_args = saved['train_args']
self._stats = saved['stats']
set_seed(self._seed)
self.setup(env=saved['env'], algo=saved['algo'])
n_epochs = self._train_args.n_epochs
last_epoch = self._stats.total_epoch
last_itr = self._stats.total_itr
total_env_steps = self._stats.total_env_steps
batch_size = self._train_args.batch_size
store_episodes = self._train_args.store_episodes
pause_for_plot = self._train_args.pause_for_plot
fmt = '{:<20} {:<15}'
logger.log('Restore from snapshot saved in %s' %
self._snapshotter.snapshot_dir)
logger.log(fmt.format('-- Train Args --', '-- Value --'))
logger.log(fmt.format('n_epochs', n_epochs))
logger.log(fmt.format('last_epoch', last_epoch))
logger.log(fmt.format('batch_size', batch_size))
logger.log(fmt.format('store_episodes', store_episodes))
logger.log(fmt.format('pause_for_plot', pause_for_plot))
logger.log(fmt.format('-- Stats --', '-- Value --'))
logger.log(fmt.format('last_itr', last_itr))
logger.log(fmt.format('total_env_steps', total_env_steps))
self._train_args.start_epoch = last_epoch + 1
return copy.copy(self._train_args) | Restore experiment from snapshot.
Args:
from_dir (str): Directory of the pickle file
to resume experiment from.
from_epoch (str or int): The epoch to restore from.
Can be 'first', 'last' or a number.
Not applicable when snapshot_mode='last'.
Returns:
TrainArgs: Arguments for train().
| restore | python | rlworkgroup/garage | src/garage/trainer.py | https://github.com/rlworkgroup/garage/blob/master/src/garage/trainer.py | MIT |
def log_diagnostics(self, pause_for_plot=False):
"""Log diagnostics.
Args:
pause_for_plot (bool): Pause for plot.
"""
logger.log('Time %.2f s' % (time.time() - self._start_time))
logger.log('EpochTime %.2f s' % (time.time() - self._itr_start_time))
tabular.record('TotalEnvSteps', self._stats.total_env_steps)
logger.log(tabular)
if self._plot:
self._plotter.update_plot(self._algo.policy,
self._algo.max_episode_length)
if pause_for_plot:
input('Plotting evaluation run: Press Enter to " "continue...') | Log diagnostics.
Args:
pause_for_plot (bool): Pause for plot.
| log_diagnostics | python | rlworkgroup/garage | src/garage/trainer.py | https://github.com/rlworkgroup/garage/blob/master/src/garage/trainer.py | MIT |
def train(self,
n_epochs,
batch_size=None,
plot=False,
store_episodes=False,
pause_for_plot=False):
"""Start training.
Args:
n_epochs (int): Number of epochs.
batch_size (int or None): Number of environment steps in one batch.
plot (bool): Visualize an episode from the policy after each epoch.
store_episodes (bool): Save episodes in snapshot.
pause_for_plot (bool): Pause for plot.
Raises:
NotSetupError: If train() is called before setup().
Returns:
float: The average return in last epoch cycle.
"""
if not self._has_setup:
raise NotSetupError(
'Use setup() to setup trainer before training.')
# Save arguments for restore
self._train_args = TrainArgs(n_epochs=n_epochs,
batch_size=batch_size,
plot=plot,
store_episodes=store_episodes,
pause_for_plot=pause_for_plot,
start_epoch=0)
self._plot = plot
self._start_worker()
log_dir = self._snapshotter.snapshot_dir
summary_file = os.path.join(log_dir, 'experiment.json')
dump_json(summary_file, self)
average_return = self._algo.train(self)
self._shutdown_worker()
return average_return | Start training.
Args:
n_epochs (int): Number of epochs.
batch_size (int or None): Number of environment steps in one batch.
plot (bool): Visualize an episode from the policy after each epoch.
store_episodes (bool): Save episodes in snapshot.
pause_for_plot (bool): Pause for plot.
Raises:
NotSetupError: If train() is called before setup().
Returns:
float: The average return in last epoch cycle.
| train | python | rlworkgroup/garage | src/garage/trainer.py | https://github.com/rlworkgroup/garage/blob/master/src/garage/trainer.py | MIT |
def step_epochs(self):
"""Step through each epoch.
This function returns a magic generator. When iterated through, this
generator automatically performs services such as snapshotting and log
management. It is used inside train() in each algorithm.
The generator initializes two variables: `self.step_itr` and
`self.step_episode`. To use the generator, these two have to be
updated manually in each epoch, as the example shows below.
Yields:
int: The next training epoch.
Examples:
for epoch in trainer.step_epochs():
trainer.step_episode = trainer.obtain_samples(...)
self.train_once(...)
trainer.step_itr += 1
"""
self._start_time = time.time()
self.step_itr = self._stats.total_itr
self.step_episode = None
# Used by integration tests to ensure examples can run one epoch.
n_epochs = int(
os.environ.get('GARAGE_EXAMPLE_TEST_N_EPOCHS',
self._train_args.n_epochs))
logger.log('Obtaining samples...')
for epoch in range(self._train_args.start_epoch, n_epochs):
self._itr_start_time = time.time()
with logger.prefix('epoch #%d | ' % epoch):
yield epoch
save_episode = (self.step_episode
if self._train_args.store_episodes else None)
self._stats.last_episode = save_episode
self._stats.total_epoch = epoch
self._stats.total_itr = self.step_itr
self.save(epoch)
if self.enable_logging:
self.log_diagnostics(self._train_args.pause_for_plot)
logger.dump_all(self.step_itr)
tabular.clear() | Step through each epoch.
This function returns a magic generator. When iterated through, this
generator automatically performs services such as snapshotting and log
management. It is used inside train() in each algorithm.
The generator initializes two variables: `self.step_itr` and
`self.step_episode`. To use the generator, these two have to be
updated manually in each epoch, as the example shows below.
Yields:
int: The next training epoch.
Examples:
for epoch in trainer.step_epochs():
trainer.step_episode = trainer.obtain_samples(...)
self.train_once(...)
trainer.step_itr += 1
| step_epochs | python | rlworkgroup/garage | src/garage/trainer.py | https://github.com/rlworkgroup/garage/blob/master/src/garage/trainer.py | MIT |
def resume(self,
n_epochs=None,
batch_size=None,
plot=None,
store_episodes=None,
pause_for_plot=None):
"""Resume from restored experiment.
This method provides the same interface as train().
If not specified, an argument will default to the
saved arguments from the last call to train().
Args:
n_epochs (int): Number of epochs.
batch_size (int): Number of environment steps in one batch.
plot (bool): Visualize an episode from the policy after each epoch.
store_episodes (bool): Save episodes in snapshot.
pause_for_plot (bool): Pause for plot.
Raises:
NotSetupError: If resume() is called before restore().
Returns:
float: The average return in last epoch cycle.
"""
if self._train_args is None:
raise NotSetupError('You must call restore() before resume().')
self._train_args.n_epochs = n_epochs or self._train_args.n_epochs
self._train_args.batch_size = batch_size or self._train_args.batch_size
if plot is not None:
self._train_args.plot = plot
if store_episodes is not None:
self._train_args.store_episodes = store_episodes
if pause_for_plot is not None:
self._train_args.pause_for_plot = pause_for_plot
average_return = self._algo.train(self)
self._shutdown_worker()
return average_return | Resume from restored experiment.
This method provides the same interface as train().
If not specified, an argument will default to the
saved arguments from the last call to train().
Args:
n_epochs (int): Number of epochs.
batch_size (int): Number of environment steps in one batch.
plot (bool): Visualize an episode from the policy after each epoch.
store_episodes (bool): Save episodes in snapshot.
pause_for_plot (bool): Pause for plot.
Raises:
NotSetupError: If resume() is called before restore().
Returns:
float: The average return in last epoch cycle.
| resume | python | rlworkgroup/garage | src/garage/trainer.py | https://github.com/rlworkgroup/garage/blob/master/src/garage/trainer.py | MIT |
def get_env_copy(self):
"""Get a copy of the environment.
Returns:
Environment: An environment instance.
"""
if self._env:
return cloudpickle.loads(cloudpickle.dumps(self._env))
else:
return None | Get a copy of the environment.
Returns:
Environment: An environment instance.
| get_env_copy | python | rlworkgroup/garage | src/garage/trainer.py | https://github.com/rlworkgroup/garage/blob/master/src/garage/trainer.py | MIT |
def __enter__(self):
"""Set self.sess as the default session.
Returns:
TFTrainer: This trainer.
"""
if tf.compat.v1.get_default_session() is not self.sess:
self.sess.__enter__()
self.sess_entered = True
return self | Set self.sess as the default session.
Returns:
TFTrainer: This trainer.
| __enter__ | python | rlworkgroup/garage | src/garage/trainer.py | https://github.com/rlworkgroup/garage/blob/master/src/garage/trainer.py | MIT |
def __exit__(self, exc_type, exc_val, exc_tb):
"""Leave session.
Args:
exc_type (str): Type.
exc_val (object): Value.
exc_tb (object): Traceback.
"""
if tf.compat.v1.get_default_session(
) is self.sess and self.sess_entered:
self.sess.__exit__(exc_type, exc_val, exc_tb)
self.sess_entered = False | Leave session.
Args:
exc_type (str): Type.
exc_val (object): Value.
exc_tb (object): Traceback.
| __exit__ | python | rlworkgroup/garage | src/garage/trainer.py | https://github.com/rlworkgroup/garage/blob/master/src/garage/trainer.py | MIT |
def setup(self, algo, env):
"""Set up trainer and sessions for algorithm and environment.
This method saves algo and env within trainer and creates a sampler,
and initializes all uninitialized variables in session.
Note:
After setup() is called all variables in session should have been
initialized. setup() respects existing values in session so
policy weights can be loaded before setup().
Args:
algo (RLAlgorithm): An algorithm instance.
env (Environment): An environment instance.
"""
self.initialize_tf_vars()
logger.log(self.sess.graph)
super().setup(algo, env) | Set up trainer and sessions for algorithm and environment.
This method saves algo and env within trainer and creates a sampler,
and initializes all uninitialized variables in session.
Note:
After setup() is called all variables in session should have been
initialized. setup() respects existing values in session so
policy weights can be loaded before setup().
Args:
algo (RLAlgorithm): An algorithm instance.
env (Environment): An environment instance.
| setup | python | rlworkgroup/garage | src/garage/trainer.py | https://github.com/rlworkgroup/garage/blob/master/src/garage/trainer.py | MIT |
def initialize_tf_vars(self):
"""Initialize all uninitialized variables in session."""
with tf.name_scope('initialize_tf_vars'):
uninited_set = [
e.decode() for e in self.sess.run(
tf.compat.v1.report_uninitialized_variables())
]
self.sess.run(
tf.compat.v1.variables_initializer([
v for v in tf.compat.v1.global_variables()
if v.name.split(':')[0] in uninited_set
])) | Initialize all uninitialized variables in session. | initialize_tf_vars | python | rlworkgroup/garage | src/garage/trainer.py | https://github.com/rlworkgroup/garage/blob/master/src/garage/trainer.py | MIT |
def get_step_type(cls, step_cnt, max_episode_length, done):
"""Determines the step type based on step cnt and done signal.
Args:
step_cnt (int): current step cnt of the environment.
max_episode_length (int): maximum episode length.
done (bool): the done signal returned by Environment.
Returns:
StepType: the step type.
Raises:
ValueError: if step_cnt is < 1. In this case a environment's
`reset()` is likely not called yet and the step_cnt is None.
"""
if max_episode_length is not None and step_cnt >= max_episode_length:
return StepType.TIMEOUT
elif done:
return StepType.TERMINAL
elif step_cnt == 1:
return StepType.FIRST
elif step_cnt < 1:
raise ValueError('Expect step_cnt to be >= 1, but got {} '
'instead. Did you forget to call `reset('
')`?'.format(step_cnt))
else:
return StepType.MID | Determines the step type based on step cnt and done signal.
Args:
step_cnt (int): current step cnt of the environment.
max_episode_length (int): maximum episode length.
done (bool): the done signal returned by Environment.
Returns:
StepType: the step type.
Raises:
ValueError: if step_cnt is < 1. In this case a environment's
`reset()` is likely not called yet and the step_cnt is None.
| get_step_type | python | rlworkgroup/garage | src/garage/_dtypes.py | https://github.com/rlworkgroup/garage/blob/master/src/garage/_dtypes.py | MIT |
Subsets and Splits
Django Code with Docstrings
Filters Python code examples from Django repository that contain Django-related code, helping identify relevant code snippets for understanding Django framework usage patterns.
SQL Console for Shuu12121/python-treesitter-filtered-datasetsV2
Retrieves specific code examples from the Flask repository but doesn't provide meaningful analysis or patterns beyond basic data retrieval.
HTTPX Repo Code and Docstrings
Retrieves specific code examples from the httpx repository, which is useful for understanding how particular libraries are used but doesn't provide broader analytical insights about the dataset.
Requests Repo Docstrings & Code
Retrieves code examples with their docstrings and file paths from the requests repository, providing basic filtering but limited analytical value beyond finding specific code samples.
Quart Repo Docstrings & Code
Retrieves code examples with their docstrings from the Quart repository, providing basic code samples but offering limited analytical value for understanding broader patterns or relationships in the dataset.