index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
13,827
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/tunables/tunables_copy_test.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Unit tests for deep copy of tunable objects and groups.
"""
from mlos_bench.tunables.covariant_group import CovariantTunableGroup
from mlos_bench.tunables.tunable import Tunable, TunableValue
from mlos_bench.tunables.tunable_groups import TunableGroups
def test_copy_tunable_int(tunable_int: Tunable) -> None:
"""
Check if deep copy works for Tunable object.
"""
tunable_copy = tunable_int.copy()
assert tunable_int == tunable_copy
tunable_copy.numerical_value += 200
assert tunable_int != tunable_copy
def test_copy_tunable_groups(tunable_groups: TunableGroups) -> None:
"""
Check if deep copy works for TunableGroups object.
"""
tunable_groups_copy = tunable_groups.copy()
assert tunable_groups == tunable_groups_copy
tunable_groups_copy["vmSize"] = "Standard_B2ms"
assert tunable_groups_copy.is_updated()
assert not tunable_groups.is_updated()
assert tunable_groups != tunable_groups_copy
def test_copy_covariant_group(covariant_group: CovariantTunableGroup) -> None:
"""
Check if deep copy works for TunableGroups object.
"""
covariant_group_copy = covariant_group.copy()
assert covariant_group == covariant_group_copy
tunable = next(iter(covariant_group.get_tunables()))
new_value: TunableValue
if tunable.is_categorical:
new_value = [x for x in tunable.categories if x != tunable.category][0]
elif tunable.is_numerical:
new_value = tunable.numerical_value + 1
covariant_group_copy[tunable] = new_value
assert covariant_group_copy.is_updated()
assert not covariant_group.is_updated()
assert covariant_group != covariant_group_copy
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,828
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_core/mlos_core/spaces/adapters/identity_adapter.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Contains the Identity (no-op) Space Adapter class.
"""
import ConfigSpace
import pandas as pd
from mlos_core.spaces.adapters.adapter import BaseSpaceAdapter
class IdentityAdapter(BaseSpaceAdapter):
"""Identity (no-op) SpaceAdapter class.
Parameters
----------
orig_parameter_space : ConfigSpace.ConfigurationSpace
The original parameter space to explore.
"""
@property
def target_parameter_space(self) -> ConfigSpace.ConfigurationSpace:
return self._orig_parameter_space
def transform(self, configuration: pd.DataFrame) -> pd.DataFrame:
return configuration
def inverse_transform(self, configurations: pd.DataFrame) -> pd.DataFrame:
return configurations
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,829
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
A collection Service functions for mocking remote script execution.
"""
from typing import Any, Dict, Optional
from mlos_bench.services.base_service import Service
from mlos_bench.services.types.remote_exec_type import SupportsRemoteExec
from mlos_bench.tests.services.remote.mock import mock_operation
class MockRemoteExecService(Service, SupportsRemoteExec):
"""
Mock remote script execution service.
"""
def __init__(self, config: Optional[Dict[str, Any]] = None,
global_config: Optional[Dict[str, Any]] = None,
parent: Optional[Service] = None):
"""
Create a new instance of mock remote exec service.
Parameters
----------
config : dict
Free-format dictionary that contains the benchmark environment
configuration.
global_config : dict
Free-format dictionary of global parameters.
parent : Service
Parent service that can provide mixin functions.
"""
super().__init__(config, global_config, parent)
self.register({
"remote_exec": mock_operation,
"get_remote_exec_results": mock_operation,
})
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,830
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_core/mlos_core/tests/optimizers/one_hot_test.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Tests for one-hot encoding for certain optimizers.
"""
import pytest
import pandas as pd
import numpy as np
import numpy.typing as npt
import ConfigSpace as CS
from mlos_core.optimizers import SmacOptimizer
# pylint: disable=protected-access,redefined-outer-name
@pytest.fixture
def data_frame() -> pd.DataFrame:
"""
Toy data frame corresponding to the `configuration_space` hyperparameters.
The columns are deliberately *not* in alphabetic order.
"""
return pd.DataFrame({
'y': ['a', 'b', 'c'],
'x': [0.1, 0.2, 0.3],
'z': [1, 5, 8],
})
@pytest.fixture
def one_hot() -> npt.NDArray:
"""
One-hot encoding of the `data_frame` above.
The columns follow the order of the hyperparameters in `configuration_space`.
"""
return np.array([
[0.1, 1.0, 0.0, 0.0, 1.0],
[0.2, 0.0, 1.0, 0.0, 5.0],
[0.3, 0.0, 0.0, 1.0, 8.0],
])
def test_to_1hot(configuration_space: CS.ConfigurationSpace,
data_frame: pd.DataFrame, one_hot: npt.NDArray) -> None:
"""
Toy problem to test one-hot encoding.
"""
optimizer = SmacOptimizer(parameter_space=configuration_space)
assert optimizer._to_1hot(data_frame) == pytest.approx(one_hot)
def test_from_1hot(configuration_space: CS.ConfigurationSpace,
data_frame: pd.DataFrame, one_hot: npt.NDArray) -> None:
"""
Toy problem to test one-hot decoding.
"""
optimizer = SmacOptimizer(parameter_space=configuration_space)
assert optimizer._from_1hot(one_hot).to_dict() == data_frame.to_dict()
def test_round_trip(configuration_space: CS.ConfigurationSpace, data_frame: pd.DataFrame) -> None:
"""
Round-trip test for one-hot-encoding and then decoding a data frame.
"""
optimizer = SmacOptimizer(parameter_space=configuration_space)
df_round_trip = optimizer._from_1hot(optimizer._to_1hot(data_frame))
assert df_round_trip.x.to_numpy() == pytest.approx(data_frame.x)
assert (df_round_trip.y == data_frame.y).all()
assert (df_round_trip.z == data_frame.z).all()
def test_round_trip_reverse(configuration_space: CS.ConfigurationSpace, one_hot: npt.NDArray) -> None:
"""
Round-trip test for one-hot-decoding and then encoding of a numpy array.
"""
optimizer = SmacOptimizer(parameter_space=configuration_space)
round_trip = optimizer._to_1hot(optimizer._from_1hot(one_hot))
assert round_trip == pytest.approx(one_hot)
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,831
|
microsoft/MLOS
|
refs/heads/main
|
/doc/source/conf.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
from logging import warning
import sphinx_rtd_theme
#sys.path.insert(0, os.path.abspath('../..'))
sys.path.insert(0, os.path.abspath('../../mlos_core/mlos_core'))
sys.path.insert(1, os.path.abspath('../../mlos_bench/mlos_bench'))
# -- Project information -----------------------------------------------------
project = 'MlosCore'
copyright = '2022, GSL'
author = 'GSL'
# The full version, including alpha/beta/rc tags
release = '0.1.0'
try:
from setuptools_scm import get_version
version = get_version(root='../..', relative_to=__file__)
if version is not None:
release = version
except ImportError:
warning("setuptools_scm not found, using version from _version.py")
except LookupError as e:
warning(f"setuptools_scm failed to find git version, using version from _version.py: {e}")
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'nbsphinx',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'numpydoc',
'matplotlib.sphinxext.plot_directive',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# generate autosummary even if no references
autosummary_generate = True
# but don't complain about missing stub files
# See Also: <https://stackoverflow.com/a/73294408>
numpydoc_class_members_toctree = False
autodoc_default_options = {
'members': True,
'undoc-members': True,
# Don't generate documentation for some (non-private) functions that are more for internal implementation use.
'exclude-members': 'mlos_bench.util.check_required_params'
}
# Generate the plots for the gallery
# plot_gallery = True
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', '_templates']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- nbsphinx options for rendering notebooks -------------------------------
# nbsphinx_execute = 'never' # enable to stop nbsphinx from executing notebooks
nbsphinx_kernel_name = 'python3'
# Exclude build directory and Jupyter backup files:
exclude_patterns = ['_build', '**.ipynb_checkpoints']
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,832
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/services/__init__.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Services for implementing Environments for mlos_bench.
"""
from mlos_bench.services.base_service import Service
from mlos_bench.services.base_fileshare import FileShareService
from mlos_bench.services.local.local_exec import LocalExecService
__all__ = [
'Service',
'FileShareService',
'LocalExecService',
]
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,833
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/services/local/__init__.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Local scheduler side Services for mlos_bench.
"""
from mlos_bench.services.local.local_exec import LocalExecService
__all__ = [
'LocalExecService',
]
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,834
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/environments/remote/os_env.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
OS-level remote Environment on Azure.
"""
from typing import Optional
import logging
from mlos_bench.environments.base_environment import Environment
from mlos_bench.environments.status import Status
from mlos_bench.services.base_service import Service
from mlos_bench.services.types.vm_provisioner_type import SupportsVMOps
from mlos_bench.tunables.tunable_groups import TunableGroups
_LOG = logging.getLogger(__name__)
class OSEnv(Environment):
"""
OS Level Environment for a host.
"""
def __init__(self,
*,
name: str,
config: dict,
global_config: Optional[dict] = None,
tunables: Optional[TunableGroups] = None,
service: Optional[Service] = None):
"""
Create a new environment for remote execution.
Parameters
----------
name: str
Human-readable name of the environment.
config : dict
Free-format dictionary that contains the benchmark environment
configuration. Each config must have at least the "tunable_params"
and the "const_args" sections.
`RemoteEnv` must also have at least some of the following parameters:
{setup, run, teardown, wait_boot}
global_config : dict
Free-format dictionary of global parameters (e.g., security credentials)
to be mixed in into the "const_args" section of the local config.
tunables : TunableGroups
A collection of tunable parameters for *all* environments.
service: Service
An optional service object (e.g., providing methods to
deploy or reboot a VM, etc.).
"""
super().__init__(name=name, config=config, global_config=global_config, tunables=tunables, service=service)
# TODO: Refactor this as "host" and "os" operations to accommodate SSH service.
assert self._service is not None and isinstance(self._service, SupportsVMOps), \
"RemoteEnv requires a service that supports host operations"
self._host_service: SupportsVMOps = self._service
def setup(self, tunables: TunableGroups, global_config: Optional[dict] = None) -> bool:
"""
Check if the host is up and running; boot it, if necessary.
Parameters
----------
tunables : TunableGroups
A collection of groups of tunable parameters along with the
parameters' values. VMEnv tunables are variable parameters that,
together with the VMEnv configuration, are sufficient to provision
and start a VM.
global_config : dict
Free-format dictionary of global parameters of the environment
that are not used in the optimization process.
Returns
-------
is_success : bool
True if operation is successful, false otherwise.
"""
_LOG.info("OS set up: %s :: %s", self, tunables)
if not super().setup(tunables, global_config):
return False
(status, params) = self._host_service.vm_start(self._params)
if status.is_pending():
(status, _) = self._host_service.wait_vm_operation(params)
self._is_ready = status in {Status.SUCCEEDED, Status.READY}
return self._is_ready
def teardown(self) -> None:
"""
Clean up and shut down the host without deprovisioning it.
"""
_LOG.info("OS tear down: %s", self)
(status, params) = self._host_service.vm_stop(self._params)
if status.is_pending():
(status, _) = self._host_service.wait_vm_operation(params)
super().teardown()
_LOG.debug("Final status of OS stopping: %s :: %s", self, status)
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,835
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_core/mlos_core/spaces/adapters/adapter.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Contains the BaseSpaceAdapter abstract class.
"""
from abc import ABCMeta, abstractmethod
import ConfigSpace
import pandas as pd
class BaseSpaceAdapter(metaclass=ABCMeta):
"""SpaceAdapter abstract class defining the basic interface.
Parameters
----------
orig_parameter_space : ConfigSpace.ConfigurationSpace
The original parameter space to explore.
"""
def __init__(self, *, orig_parameter_space: ConfigSpace.ConfigurationSpace):
self._orig_parameter_space: ConfigSpace.ConfigurationSpace = orig_parameter_space
self._random_state = orig_parameter_space.random
def __repr__(self) -> str:
# pylint: disable=consider-using-f-string
return "{}(original_parameter_space={}, target_parameter_space={})".format(
self.__class__.__name__,
self.orig_parameter_space,
self.target_parameter_space,
)
@property
def orig_parameter_space(self) -> ConfigSpace.ConfigurationSpace:
"""
Original (user-provided) parameter space to explore.
"""
return self._orig_parameter_space
@property
@abstractmethod
def target_parameter_space(self) -> ConfigSpace.ConfigurationSpace:
"""
Target parameter space that is fed to the underlying optimizer.
"""
pass # pylint: disable=unnecessary-pass # pragma: no cover
@abstractmethod
def transform(self, configuration: pd.DataFrame) -> pd.DataFrame:
"""Translates a configuration, which belongs to the target parameter space, to the original parameter space.
This method is called by the `suggest` method of the `BaseOptimizer` class.
Parameters
----------
configuration : pd.DataFrame
Pandas dataframe with a single row. Column names are the parameter names of the target parameter space.
Returns
-------
configuration : pd.DataFrame
Pandas dataframe with a single row, containing the translated configuration.
Column names are the parameter names of the original parameter space.
"""
pass # pylint: disable=unnecessary-pass # pragma: no cover
@abstractmethod
def inverse_transform(self, configurations: pd.DataFrame) -> pd.DataFrame:
"""Translates a configuration, which belongs to the original parameter space, to the target parameter space.
This method is called by the `register` method of the `BaseOptimizer` class, and performs the inverse operation
of `BaseSpaceAdapter.transform` method.
Parameters
----------
configurations : pd.DataFrame
Dataframe of configurations / parameters, which belong to the original parameter space.
The columns are the parameter names the original parameter space and the rows are the configurations.
Returns
-------
configurations : pd.DataFrame
Dataframe of the translated configurations / parameters.
The columns are the parameter names of the target parameter space and the rows are the configurations.
"""
pass # pylint: disable=unnecessary-pass # pragma: no cover
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,836
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/launcher.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
A helper class to load the configuration files, parse the command line parameters,
and instantiate the main components of mlos_bench system.
It is used in `mlos_bench.run` module to run the benchmark/optimizer from the
command line.
"""
import logging
import argparse
from string import Template
from typing import Any, Dict, Iterable, List, Optional, Tuple, Type
from mlos_bench.config.schemas import ConfigSchema
from mlos_bench.util import BaseTypeVar
from mlos_bench.tunables.tunable_groups import TunableGroups
from mlos_bench.environments.base_environment import Environment
from mlos_bench.optimizers.base_optimizer import Optimizer
from mlos_bench.optimizers.mock_optimizer import MockOptimizer
from mlos_bench.optimizers.one_shot_optimizer import OneShotOptimizer
from mlos_bench.storage.base_storage import Storage
from mlos_bench.services.local.local_exec import LocalExecService
from mlos_bench.services.config_persistence import ConfigPersistenceService
_LOG_LEVEL = logging.INFO
_LOG_FORMAT = '%(asctime)s %(filename)s:%(lineno)d %(funcName)s %(levelname)s %(message)s'
logging.basicConfig(level=_LOG_LEVEL, format=_LOG_FORMAT)
_LOG = logging.getLogger(__name__)
class Launcher:
# pylint: disable=too-few-public-methods,too-many-instance-attributes
"""
Command line launcher for mlos_bench and mlos_core.
"""
def __init__(self, description: str, long_text: str = ""):
_LOG.info("Launch: %s", description)
parser = argparse.ArgumentParser(description=f"{description} : {long_text}")
(args, args_rest) = self._parse_args(parser)
# Bootstrap config loader: command line takes priority.
self._config_loader = ConfigPersistenceService({"config_path": args.config_path or []})
if args.config:
config = self._config_loader.load_config(args.config, ConfigSchema.CLI)
assert isinstance(config, Dict)
config_path = config.get("config_path", [])
if config_path and not args.config_path:
# Reset the config loader with the paths from JSON file.
self._config_loader = ConfigPersistenceService({"config_path": config_path})
else:
config = {}
log_level = args.log_level or config.get("log_level", _LOG_LEVEL)
try:
log_level = int(log_level)
except ValueError:
# failed to parse as an int - leave it as a string and let logging
# module handle whether it's an appropriate log name or not
log_level = logging.getLevelName(log_level)
logging.root.setLevel(log_level)
log_file = args.log_file or config.get("log_file")
if log_file:
log_handler = logging.FileHandler(log_file)
log_handler.setLevel(log_level)
log_handler.setFormatter(logging.Formatter(_LOG_FORMAT))
logging.root.addHandler(log_handler)
self._parent_service = LocalExecService(parent=self._config_loader)
self.global_config = self._load_config(
config.get("globals", []) + (args.globals or []),
(args.config_path or []) + config.get("config_path", []),
args_rest,
{key: val for (key, val) in config.items() if key not in vars(args)},
)
self.global_config = self._expand_vars(self.global_config)
env_path = args.environment or config.get("environment")
if not env_path:
_LOG.error("No environment config specified.")
parser.error("At least the Environment config must be specified." +
" Run `mlos_bench --help` and consult `README.md` for more info.")
self.root_env_config = self._config_loader.resolve_path(env_path)
self.environment: Environment = self._config_loader.load_environment(
self.root_env_config, TunableGroups(), self.global_config, service=self._parent_service)
_LOG.info("Init environment: %s", self.environment)
# NOTE: Init tunable values *after* the Environment, but *before* the Optimizer
self.tunables = self._init_tunable_values(
args.random_init or config.get("random_init", False),
config.get("random_seed") if args.random_seed is None else args.random_seed,
config.get("tunable_values", []) + (args.tunable_values or [])
)
_LOG.info("Init tunables: %s", self.tunables)
self.optimizer = self._load_optimizer(args.optimizer or config.get("optimizer"))
_LOG.info("Init optimizer: %s", self.optimizer)
self.storage = self._load_storage(args.storage or config.get("storage"))
_LOG.info("Init storage: %s", self.storage)
self.teardown = args.teardown or config.get("teardown", True)
@staticmethod
def _parse_args(parser: argparse.ArgumentParser) -> Tuple[argparse.Namespace, List[str]]:
"""
Parse the command line arguments.
"""
parser.add_argument(
'--config', required=False,
help='Main JSON5 configuration file. Its keys are the same as the' +
' command line options and can be overridden by the latter.\n' +
'\n' +
' See the `mlos_bench/config/` tree at https://github.com/microsoft/MLOS/ ' +
' for additional config examples for this and other arguments.')
parser.add_argument(
'--log_file', required=False,
help='Path to the log file. Use stdout if omitted.')
parser.add_argument(
'--log_level', required=False, type=str,
help=f'Logging level. Default is {logging.getLevelName(_LOG_LEVEL)}.' +
' Set to DEBUG for debug, WARNING for warnings only.')
parser.add_argument(
'--config_path', nargs="+", required=False,
help='One or more locations of JSON config files.')
parser.add_argument(
'--environment', required=False,
help='Path to JSON file with the configuration of the benchmarking environment.')
parser.add_argument(
'--optimizer', required=False,
help='Path to the optimizer configuration file. If omitted, run' +
' a single trial with default (or specified in --tunable_values).')
parser.add_argument(
'--storage', required=False,
help='Path to the storage configuration file.' +
' If omitted, use the ephemeral in-memory SQL storage.')
parser.add_argument(
'--random_init', required=False, default=False,
dest='random_init', action='store_true',
help='Initialize tunables with random values. (Before applying --tunable_values).')
parser.add_argument(
'--random_seed', required=False, type=int,
help='Seed to use with --random_init')
parser.add_argument(
'--tunable_values', nargs="+", required=False,
help='Path to one or more JSON files that contain values of the tunable' +
' parameters. This can be used for a single trial (when no --optimizer' +
' is specified) or as default values for the first run in optimization.')
parser.add_argument(
'--globals', nargs="+", required=False,
help='Path to one or more JSON files that contain additional' +
' [private] parameters of the benchmarking environment.')
parser.add_argument(
'--no_teardown', required=False, default=None,
dest='teardown', action='store_false',
help='Disable teardown of the environment after the benchmark.')
return parser.parse_known_args()
@staticmethod
def _try_parse_extra_args(cmdline: Iterable[str]) -> Dict[str, str]:
"""
Helper function to parse global key/value pairs from the command line.
"""
_LOG.debug("Extra args: %s", cmdline)
config = {}
key = None
for elem in cmdline:
if elem.startswith("--"):
if key is not None:
raise ValueError("Command line argument has no value: " + key)
key = elem[2:]
kv_split = key.split("=", 1)
if len(kv_split) == 2:
config[kv_split[0].strip()] = kv_split[1]
key = None
else:
if key is None:
raise ValueError("Command line argument has no key: " + elem)
config[key.strip()] = elem
key = None
if key is not None:
raise ValueError("Command line argument has no value: " + key)
_LOG.debug("Parsed config: %s", config)
return config
def _load_config(self,
args_globals: Iterable[str],
config_path: Iterable[str],
args_rest: Iterable[str],
global_config: Dict[str, Any]) -> Dict[str, Any]:
"""
Get key/value pairs of the global configuration parameters
from the specified config files (if any) and command line arguments.
"""
for config_file in (args_globals or []):
conf = self._config_loader.load_config(config_file, ConfigSchema.GLOBALS)
assert isinstance(conf, dict)
global_config.update(conf)
global_config.update(Launcher._try_parse_extra_args(args_rest))
if config_path:
global_config["config_path"] = config_path
return global_config
def _expand_vars(self, value: Any) -> Any:
"""
Expand dollar variables in the globals.
NOTE: `self.global_config` must be set.
"""
if isinstance(value, str):
return Template(value).safe_substitute(self.global_config)
if isinstance(value, dict):
return {key: self._expand_vars(val) for (key, val) in value.items()}
if isinstance(value, list):
return [self._expand_vars(val) for val in value]
return value
def _init_tunable_values(self, random_init: bool, seed: Optional[int],
args_tunables: Optional[str]) -> TunableGroups:
"""
Initialize the tunables and load key/value pairs of the tunable values
from given JSON files, if specified.
"""
tunables = self.environment.tunable_params
_LOG.debug("Init tunables: default = %s", tunables)
if random_init:
tunables = MockOptimizer(
tunables=tunables, service=None,
config={"start_with_defaults": False, "seed": seed}).suggest()
_LOG.debug("Init tunables: random = %s", tunables)
if args_tunables is not None:
for data_file in args_tunables:
values = self._config_loader.load_config(data_file, ConfigSchema.TUNABLE_VALUES)
assert isinstance(values, Dict)
tunables.assign(values)
_LOG.debug("Init tunables: load %s = %s", data_file, tunables)
return tunables
def _load_optimizer(self, args_optimizer: Optional[str]) -> Optimizer:
"""
Instantiate the Optimizer object from JSON config file, if specified
in the --optimizer command line option. If config file not specified,
create a one-shot optimizer to run a single benchmark trial.
"""
if args_optimizer is None:
return OneShotOptimizer(
self.tunables, config=self.global_config, service=self._parent_service)
optimizer = self._load(Optimizer, args_optimizer, ConfigSchema.OPTIMIZER) # type: ignore[type-abstract]
return optimizer
def _load_storage(self, args_storage: Optional[str]) -> Storage:
"""
Instantiate the Storage object from JSON file provided in the --storage
command line parameter. If omitted, create an ephemeral in-memory SQL
storage instead.
"""
if args_storage is None:
# pylint: disable=import-outside-toplevel
from mlos_bench.storage.sql.storage import SqlStorage
return SqlStorage(self.tunables, service=self._parent_service,
config={"drivername": "sqlite", "database": ":memory:"})
storage = self._load(Storage, args_storage, ConfigSchema.STORAGE) # type: ignore[type-abstract]
return storage
def _load(self, cls: Type[BaseTypeVar], json_file_name: str, schema_type: Optional[ConfigSchema]) -> BaseTypeVar:
"""
Create a new instance of class `cls` from JSON configuration.
Note: For abstract types, mypy will complain at the call site.
Use "# type: ignore[type-abstract]" to suppress the warning.
See Also: https://github.com/python/mypy/issues/4717
"""
class_config = self._config_loader.load_config(json_file_name, schema_type)
assert isinstance(class_config, Dict)
ret = self._config_loader.build_generic(
base_cls=cls,
tunables=self.tunables,
service=self._parent_service,
config=class_config,
global_config=self.global_config
)
assert isinstance(ret, cls)
return ret
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,837
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/optimizers/mlos_core_opt_smac_test.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Unit tests for mock mlos_bench optimizer.
"""
import os
import sys
import pytest
from mlos_bench.util import path_join
from mlos_bench.optimizers.mlos_core_optimizer import MlosCoreOptimizer
from mlos_bench.tunables.tunable_groups import TunableGroups
from mlos_bench.tests import SEED
from mlos_core.optimizers.bayesian_optimizers.smac_optimizer import SmacOptimizer
OUTPUT_DIR_PATH_BASE = r'c:/temp' if sys.platform == 'win32' else '/tmp/'
def test_init_mlos_core_smac_opt_bad_trial_count(tunable_groups: TunableGroups) -> None:
"""
Test invalid max_trials initialization of mlos_core SMAC optimizer.
"""
test_opt_config = {
'optimizer_type': 'SMAC',
'max_trials': 10,
'max_iterations': 11,
'seed': SEED,
}
with pytest.raises(AssertionError):
opt = MlosCoreOptimizer(tunable_groups, test_opt_config)
assert opt is None
def test_init_mlos_core_smac_opt_max_trials(tunable_groups: TunableGroups) -> None:
"""
Test max_trials initialization of mlos_core SMAC optimizer.
"""
test_opt_config = {
'optimizer_type': 'SMAC',
'max_iterations': 123,
'seed': SEED,
}
opt = MlosCoreOptimizer(tunable_groups, test_opt_config)
# pylint: disable=protected-access
assert isinstance(opt._opt, SmacOptimizer)
assert opt._opt.base_optimizer.scenario.n_trials == test_opt_config['max_iterations']
def test_init_mlos_core_smac_absolute_output_directory(tunable_groups: TunableGroups) -> None:
"""
Test absolute path output directory initialization of mlos_core SMAC optimizer.
"""
test_opt_config = {
'optimizer_type': 'SMAC',
'output_directory': path_join(OUTPUT_DIR_PATH_BASE, 'test_output_dir'),
'seed': SEED,
}
opt = MlosCoreOptimizer(tunable_groups, test_opt_config)
assert isinstance(opt, MlosCoreOptimizer)
# pylint: disable=protected-access
assert isinstance(opt._opt, SmacOptimizer)
# Final portions of the path are generated by SMAC when run_name is not specified.
assert path_join(str(opt._opt.base_optimizer.scenario.output_directory)).startswith(
str(test_opt_config['output_directory']))
def test_init_mlos_core_smac_relative_output_directory(tunable_groups: TunableGroups) -> None:
"""
Test relative path output directory initialization of mlos_core SMAC optimizer.
"""
test_opt_config = {
'optimizer_type': 'SMAC',
'output_directory': 'test_output_dir',
'seed': SEED,
}
opt = MlosCoreOptimizer(tunable_groups, test_opt_config)
assert isinstance(opt, MlosCoreOptimizer)
# pylint: disable=protected-access
assert isinstance(opt._opt, SmacOptimizer)
assert path_join(str(opt._opt.base_optimizer.scenario.output_directory)).startswith(
path_join(os.getcwd(), str(test_opt_config['output_directory'])))
def test_init_mlos_core_smac_relative_output_directory_with_run_name(tunable_groups: TunableGroups) -> None:
"""
Test relative path output directory initialization of mlos_core SMAC optimizer.
"""
test_opt_config = {
'optimizer_type': 'SMAC',
'output_directory': 'test_output_dir',
'run_name': 'test_run',
'seed': SEED,
}
opt = MlosCoreOptimizer(tunable_groups, test_opt_config)
assert isinstance(opt, MlosCoreOptimizer)
# pylint: disable=protected-access
assert isinstance(opt._opt, SmacOptimizer)
assert path_join(str(opt._opt.base_optimizer.scenario.output_directory)).startswith(
path_join(os.getcwd(), str(test_opt_config['output_directory']), str(test_opt_config['run_name'])))
def test_init_mlos_core_smac_relative_output_directory_with_experiment_id(tunable_groups: TunableGroups) -> None:
"""
Test relative path output directory initialization of mlos_core SMAC optimizer.
"""
test_opt_config = {
'optimizer_type': 'SMAC',
'output_directory': 'test_output_dir',
'seed': SEED,
}
global_config = {
'experiment_id': 'experiment_id',
}
opt = MlosCoreOptimizer(tunable_groups, test_opt_config, global_config)
assert isinstance(opt, MlosCoreOptimizer)
# pylint: disable=protected-access
assert isinstance(opt._opt, SmacOptimizer)
assert path_join(str(opt._opt.base_optimizer.scenario.output_directory)).startswith(
path_join(os.getcwd(), str(test_opt_config['output_directory']), global_config['experiment_id']))
def test_init_mlos_core_smac_temp_output_directory(tunable_groups: TunableGroups) -> None:
"""
Test random output directory initialization of mlos_core SMAC optimizer.
"""
test_opt_config = {
'optimizer_type': 'SMAC',
'output_directory': None,
'seed': SEED,
}
opt = MlosCoreOptimizer(tunable_groups, test_opt_config)
assert isinstance(opt, MlosCoreOptimizer)
# pylint: disable=protected-access
assert isinstance(opt._opt, SmacOptimizer)
assert opt._opt.base_optimizer.scenario.output_directory is not None
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,838
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/services/remote/azure/conftest.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Configuration test fixtures for azure_services in mlos_bench.
"""
from unittest.mock import patch
import pytest
from mlos_bench.services.config_persistence import ConfigPersistenceService
from mlos_bench.services.remote.azure import AzureAuthService, AzureVMService, AzureFileShareService
# pylint: disable=redefined-outer-name
@pytest.fixture
def config_persistence_service() -> ConfigPersistenceService:
"""
Test fixture for ConfigPersistenceService.
"""
return ConfigPersistenceService()
@pytest.fixture
def azure_auth_service(config_persistence_service: ConfigPersistenceService,
monkeypatch: pytest.MonkeyPatch) -> AzureAuthService:
"""
Creates a dummy AzureAuthService for tests that require it.
"""
auth = AzureAuthService(config={}, global_config={}, parent=config_persistence_service)
monkeypatch.setattr(auth, "get_access_token", lambda: "TEST_TOKEN")
return auth
@pytest.fixture
def azure_vm_service(azure_auth_service: AzureAuthService) -> AzureVMService:
"""
Creates a dummy Azure VM service for tests that require it.
"""
return AzureVMService(config={
"deploymentTemplatePath": "services/remote/azure/arm-templates/azuredeploy-ubuntu-vm.jsonc",
"deploymentName": "TEST_DEPLOYMENT",
"subscription": "TEST_SUB",
"resourceGroup": "TEST_RG",
"deploymentTemplateParameters": {
"location": "westus2",
},
"vmName": "test-vm", # Should come from the upper-level config
"pollInterval": 1,
"pollTimeout": 2
}, global_config={}, parent=azure_auth_service)
@pytest.fixture
def azure_fileshare(config_persistence_service: ConfigPersistenceService) -> AzureFileShareService:
"""
Creates a dummy AzureFileShareService for tests that require it.
"""
with patch("mlos_bench.services.remote.azure.azure_fileshare.ShareClient"):
return AzureFileShareService(config={
"storageAccountName": "TEST_ACCOUNT_NAME",
"storageFileShareName": "TEST_FS_NAME",
"storageAccountKey": "TEST_ACCOUNT_KEY"
}, global_config={}, parent=config_persistence_service)
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,839
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/optimizers/toy_optimization_loop_test.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Toy optimization loop to test the optimizers on mock benchmark environment.
"""
from typing import Tuple
import logging
import pytest
from mlos_core import config_to_dataframe
from mlos_core.optimizers.bayesian_optimizers.smac_optimizer import SmacOptimizer
from mlos_bench.optimizers.convert_configspace import tunable_values_to_configuration
from mlos_bench.environments.base_environment import Environment
from mlos_bench.environments.mock_env import MockEnv
from mlos_bench.tunables.tunable_groups import TunableGroups
from mlos_bench.optimizers.base_optimizer import Optimizer
from mlos_bench.optimizers.mock_optimizer import MockOptimizer
from mlos_bench.optimizers.mlos_core_optimizer import MlosCoreOptimizer
# For debugging purposes output some warnings which are captured with failed tests.
DEBUG = True
logger = logging.debug
if DEBUG:
logger = logging.warning
def _optimize(env: Environment, opt: Optimizer) -> Tuple[float, TunableGroups]:
"""
Toy optimization loop.
"""
assert opt.not_converged()
while opt.not_converged():
with env as env_context:
tunables = opt.suggest()
logger("tunables: %s", str(tunables))
# pylint: disable=protected-access
if isinstance(opt, MlosCoreOptimizer) and isinstance(opt._opt, SmacOptimizer):
config = tunable_values_to_configuration(tunables)
config_df = config_to_dataframe(config)
logger("config: %s", str(config))
try:
logger("prediction: %s", opt._opt.surrogate_predict(config_df))
except RuntimeError:
pass
assert env_context.setup(tunables)
(status, output) = env_context.run()
assert status.is_succeeded()
assert output is not None
score = output['score']
assert 60 <= score <= 120
logger("score: %s", str(score))
opt.register(tunables, status, score)
(best_score, best_tunables) = opt.get_best_observation()
assert isinstance(best_score, float) and isinstance(best_tunables, TunableGroups)
return (best_score, best_tunables)
def test_mock_optimization_loop(mock_env_no_noise: MockEnv,
mock_opt: MockOptimizer) -> None:
"""
Toy optimization loop with mock environment and optimizer.
"""
(score, tunables) = _optimize(mock_env_no_noise, mock_opt)
assert score == pytest.approx(64.9, 0.01)
assert tunables.get_param_values() == {
"vmSize": "Standard_B2ms",
"idle": "halt",
"kernel_sched_migration_cost_ns": 117025,
"kernel_sched_latency_ns": 149827706,
}
def test_mock_optimization_loop_no_defaults(mock_env_no_noise: MockEnv,
mock_opt_no_defaults: MockOptimizer) -> None:
"""
Toy optimization loop with mock environment and optimizer.
"""
(score, tunables) = _optimize(mock_env_no_noise, mock_opt_no_defaults)
assert score == pytest.approx(60.97, 0.01)
assert tunables.get_param_values() == {
"vmSize": "Standard_B2s",
"idle": "halt",
"kernel_sched_migration_cost_ns": 49122,
"kernel_sched_latency_ns": 234760738,
}
def test_flaml_optimization_loop(mock_env_no_noise: MockEnv,
flaml_opt: MlosCoreOptimizer) -> None:
"""
Toy optimization loop with mock environment and FLAML optimizer.
"""
(score, tunables) = _optimize(mock_env_no_noise, flaml_opt)
assert score == pytest.approx(60.15, 0.01)
assert tunables.get_param_values() == {
"vmSize": "Standard_B2s",
"idle": "halt",
"kernel_sched_migration_cost_ns": 50132,
"kernel_sched_latency_ns": 22674895,
}
# @pytest.mark.skip(reason="SMAC is not deterministic")
def test_smac_optimization_loop(mock_env_no_noise: MockEnv,
smac_opt: MlosCoreOptimizer) -> None:
"""
Toy optimization loop with mock environment and SMAC optimizer.
"""
(score, tunables) = _optimize(mock_env_no_noise, smac_opt)
expected_score = 73.59
expected_tunable_values = {
"vmSize": "Standard_B2s",
"idle": "mwait",
"kernel_sched_migration_cost_ns": 319025,
"kernel_sched_latency_ns": 499339615,
}
assert score == pytest.approx(expected_score, 0.01)
assert tunables.get_param_values() == expected_tunable_values
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,840
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/tunables/tunable_comparison_test.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Unit tests for checking tunable comparisons.
"""
import pytest
from mlos_bench.tunables.covariant_group import CovariantTunableGroup
from mlos_bench.tunables.tunable import Tunable
from mlos_bench.tunables.tunable_groups import TunableGroups
def test_tunable_int_value_lt(tunable_int: Tunable) -> None:
"""
Tests that the __lt__ operator works as expected.
"""
tunable_int_2 = tunable_int.copy()
tunable_int_2.numerical_value += 1
assert tunable_int.numerical_value < tunable_int_2.numerical_value
assert tunable_int < tunable_int_2
def test_tunable_int_name_lt(tunable_int: Tunable) -> None:
"""
Tests that the __lt__ operator works as expected.
"""
tunable_int_2 = tunable_int.copy()
tunable_int_2._name = "aaa" # pylint: disable=protected-access
assert tunable_int_2 < tunable_int
def test_tunable_categorical_value_lt(tunable_categorical: Tunable) -> None:
"""
Tests that the __lt__ operator works as expected.
"""
tunable_categorical_2 = tunable_categorical.copy()
new_value = [
x for x in tunable_categorical.categories
if x != tunable_categorical.category and x is not None
][0]
assert tunable_categorical.category is not None
tunable_categorical_2.category = new_value
if tunable_categorical.category < new_value:
assert tunable_categorical < tunable_categorical_2
elif tunable_categorical.category > new_value:
assert tunable_categorical > tunable_categorical_2
def test_tunable_categorical_lt_null() -> None:
"""
Tests that the __lt__ operator works as expected.
"""
tunable_cat = Tunable(
name="same-name",
config={
"type": "categorical",
"values": ["floof", "fuzz"],
"default": "floof",
}
)
tunable_dog = Tunable(
name="same-name",
config={
"type": "categorical",
"values": [None, "doggo"],
"default": None,
}
)
assert tunable_dog < tunable_cat
def test_tunable_lt_same_name_different_type() -> None:
"""
Tests that the __lt__ operator works as expected.
"""
tunable_cat = Tunable(
name="same-name",
config={
"type": "categorical",
"values": ["floof", "fuzz"],
"default": "floof",
}
)
tunable_int = Tunable(
name="same-name",
config={
"type": "int",
"range": [1, 3],
"default": 2,
}
)
assert tunable_cat < tunable_int
def test_tunable_lt_different_object(tunable_int: Tunable) -> None:
"""
Tests that the __lt__ operator works as expected.
"""
assert (tunable_int < "foo") is False
with pytest.raises(TypeError):
assert "foo" < tunable_int # type: ignore[operator]
def test_tunable_group_ne_object(tunable_groups: TunableGroups) -> None:
"""
Tests that the __eq__ operator works as expected with other objects.
"""
assert tunable_groups != "foo"
def test_covariant_group_ne_object(covariant_group: CovariantTunableGroup) -> None:
"""
Tests that the __eq__ operator works as expected with other objects.
"""
assert covariant_group != "foo"
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,841
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/services/config_persistence.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Helper functions to load, instantiate, and serialize Python objects
that encapsulate benchmark environments, tunable parameters, and
service functions.
"""
import os
import sys
import json # For logging only
import logging
from typing import Any, Dict, Iterable, List, Optional, Tuple, Type
import json5 # To read configs with comments and other JSON5 syntax features
from jsonschema import ValidationError, SchemaError
from mlos_bench.config.schemas import ConfigSchema
from mlos_bench.environments.base_environment import Environment
from mlos_bench.services.base_service import Service
from mlos_bench.services.types.config_loader_type import SupportsConfigLoading
from mlos_bench.tunables.tunable import TunableValue
from mlos_bench.tunables.tunable_groups import TunableGroups
from mlos_bench.util import instantiate_from_config, merge_parameters, path_join, preprocess_dynamic_configs, BaseTypeVar
if sys.version_info < (3, 10):
from importlib_resources import files
else:
from importlib.resources import files
_LOG = logging.getLogger(__name__)
class ConfigPersistenceService(Service, SupportsConfigLoading):
"""
Collection of methods to deserialize the Environment, Service, and TunableGroups objects.
"""
BUILTIN_CONFIG_PATH = str(files("mlos_bench.config").joinpath("")).replace("\\", "/")
def __init__(self,
config: Optional[Dict[str, Any]] = None,
global_config: Optional[Dict[str, Any]] = None,
parent: Optional[Service] = None):
"""
Create a new instance of config persistence service.
Parameters
----------
config : dict
Free-format dictionary that contains parameters for the service.
(E.g., root path for config files, etc.)
global_config : dict
Free-format dictionary of global parameters.
parent : Service
An optional parent service that can provide mixin functions.
"""
super().__init__(config, global_config, parent)
self._config_path: List[str] = self.config.get("config_path", [])
self._config_loader_service = self
if self.BUILTIN_CONFIG_PATH not in self._config_path:
self._config_path.append(self.BUILTIN_CONFIG_PATH)
# Register methods that we want to expose to the Environment objects.
self.register([
self.resolve_path,
self.load_config,
self.prepare_class_load,
self.build_service,
self.build_environment,
self.load_services,
self.load_environment,
self.load_environment_list,
])
def resolve_path(self, file_path: str,
extra_paths: Optional[Iterable[str]] = None) -> str:
"""
Prepend the suitable `_config_path` to `path` if the latter is not absolute.
If `_config_path` is `None` or `path` is absolute, return `path` as is.
Parameters
----------
file_path : str
Path to the input config file.
extra_paths : Iterable[str]
Additional directories to prepend to the list of search paths.
Returns
-------
path : str
An actual path to the config or script.
"""
path_list = list(extra_paths or []) + self._config_path
_LOG.debug("Resolve path: %s in: %s", file_path, path_list)
if not os.path.isabs(file_path):
for path in path_list:
full_path = path_join(path, file_path, abs_path=True)
if os.path.exists(full_path):
_LOG.debug("Path resolved: %s", full_path)
return full_path
_LOG.debug("Path not resolved: %s", file_path)
return file_path
def load_config(self,
json_file_name: str,
schema_type: Optional[ConfigSchema],
) -> Dict[str, Any]:
"""
Load JSON config file. Search for a file relative to `_config_path`
if the input path is not absolute.
This method is exported to be used as a service.
Parameters
----------
json_file_name : str
Path to the input config file.
schema_type : Optional[ConfigSchema]
The schema type to validate the config against.
Returns
-------
config : Union[dict, List[dict]]
Free-format dictionary that contains the configuration.
"""
json_file_name = self.resolve_path(json_file_name)
_LOG.info("Load config: %s", json_file_name)
with open(json_file_name, mode='r', encoding='utf-8') as fh_json:
config = json5.load(fh_json)
if schema_type is not None:
try:
schema_type.validate(config)
except (ValidationError, SchemaError) as ex:
_LOG.error("Failed to validate config %s against schema type %s at %s",
json_file_name, schema_type.name, schema_type.value)
raise ValueError(f"Failed to validate config {json_file_name} against " +
f"schema type {schema_type.name} at {schema_type.value}") from ex
if isinstance(config, dict) and config.get("$schema"):
# Remove $schema attributes from the config after we've validated
# them to avoid passing them on to other objects
# (e.g. SqlAlchemy based storage initializers).
# NOTE: we only do this for internal schemas.
# Other configs that get loaded may need the schema field
# (e.g. Azure ARM templates).
del config["$schema"]
return config # type: ignore[no-any-return]
def prepare_class_load(self, config: Dict[str, Any],
global_config: Optional[Dict[str, Any]] = None,
parent_args: Optional[Dict[str, TunableValue]] = None) -> Tuple[str, Dict[str, Any]]:
"""
Extract the class instantiation parameters from the configuration.
Mix-in the global parameters and resolve the local file system paths,
where it is required.
Parameters
----------
config : dict
Configuration of the optimizer.
global_config : dict
Global configuration parameters (optional).
parent_args : Dict[str, TunableValue]
An optional reference of the parent CompositeEnv's const_args used to
expand dynamic config parameters from.
Returns
-------
(class_name, class_config) : (str, dict)
Name of the class to instantiate and its configuration.
"""
class_name = config["class"]
class_config = config.setdefault("config", {})
# Replace any appearance of "$param_name" in the const_arg values with
# the value from the parent CompositeEnv.
# Note: we could consider expanding this feature to additional config
# sections in the future, but for now only use it in const_args.
if class_name.startswith("mlos_bench.environments."):
const_args = class_config.get("const_args", {})
preprocess_dynamic_configs(dest=const_args, source=parent_args)
merge_parameters(dest=class_config, source=global_config)
for key in set(class_config).intersection(config.get("resolve_config_property_paths", [])):
if isinstance(class_config[key], str):
class_config[key] = self.resolve_path(class_config[key])
elif isinstance(class_config[key], (list, tuple)):
class_config[key] = [self.resolve_path(path) for path in class_config[key]]
else:
raise ValueError(f"Parameter {key} must be a string or a list")
if _LOG.isEnabledFor(logging.DEBUG):
_LOG.debug("Instantiating: %s with config:\n%s",
class_name, json.dumps(class_config, indent=2))
return (class_name, class_config)
def build_generic(self, *,
base_cls: Type[BaseTypeVar],
tunables: TunableGroups,
service: Service,
config: Dict[str, Any],
global_config: Optional[Dict[str, Any]] = None) -> BaseTypeVar:
"""
Generic instantiation of mlos_bench objects like Storage and Optimizer
that depend on Service and TunableGroups.
A class *MUST* have a constructor that takes four named arguments:
(tunables, config, global_config, service)
Parameters
----------
base_cls : ClassType
A base class of the object to instantiate.
tunables : TunableGroups
Tunable parameters of the environment. We need them to validate the
configurations of merged-in experiments and restored/pending trials.
service: Service
An optional service object (e.g., providing methods to load config files, etc.)
config : dict
Configuration of the class to instantiate, as loaded from JSON.
global_config : dict
Global configuration parameters (optional).
Returns
-------
inst : Any
A new instance of the `cls` class.
"""
tunables_path = config.get("include_tunables")
if tunables_path is not None:
tunables = self._load_tunables(tunables_path, tunables)
(class_name, class_config) = self.prepare_class_load(config, global_config)
inst = instantiate_from_config(base_cls, class_name,
tunables=tunables,
config=class_config,
global_config=global_config,
service=service)
_LOG.info("Created: %s %s", base_cls.__name__, inst)
return inst
def build_environment(self, # pylint: disable=too-many-arguments
config: Dict[str, Any],
tunables: TunableGroups,
global_config: Optional[Dict[str, Any]] = None,
parent_args: Optional[Dict[str, TunableValue]] = None,
service: Optional[Service] = None) -> Environment:
"""
Factory method for a new environment with a given config.
Parameters
----------
config : dict
A dictionary with three mandatory fields:
"name": Human-readable string describing the environment;
"class": FQN of a Python class to instantiate;
"config": Free-format dictionary to pass to the constructor.
tunables : TunableGroups
A (possibly empty) collection of groups of tunable parameters for
all environments.
global_config : dict
Global parameters to add to the environment config.
parent_args : Dict[str, TunableValue]
An optional reference of the parent CompositeEnv's const_args used to
expand dynamic config parameters from.
service: Service
An optional service object (e.g., providing methods to
deploy or reboot a VM, etc.).
Returns
-------
env : Environment
An instance of the `Environment` class initialized with `config`.
"""
env_name = config["name"]
(env_class, env_config) = self.prepare_class_load(config, global_config, parent_args)
env_services_path = config.get("include_services")
if env_services_path is not None:
service = self.load_services(env_services_path, global_config, service)
env_tunables_path = config.get("include_tunables")
if env_tunables_path is not None:
tunables = self._load_tunables(env_tunables_path, tunables)
_LOG.debug("Creating env: %s :: %s", env_name, env_class)
env = Environment.new(env_name=env_name, class_name=env_class,
config=env_config, global_config=global_config,
tunables=tunables, service=service)
_LOG.info("Created env: %s :: %s", env_name, env)
return env
def _build_standalone_service(self, config: Dict[str, Any],
global_config: Optional[Dict[str, Any]] = None,
parent: Optional[Service] = None) -> Service:
"""
Factory method for a new service with a given config.
Parameters
----------
config : dict
A dictionary with two mandatory fields:
"class": FQN of a Python class to instantiate;
"config": Free-format dictionary to pass to the constructor.
global_config : dict
Global parameters to add to the service config.
parent: Service
An optional reference of the parent service to mix in.
Returns
-------
svc : Service
An instance of the `Service` class initialized with `config`.
"""
(svc_class, svc_config) = self.prepare_class_load(config, global_config)
service = Service.new(svc_class, svc_config, global_config, parent)
_LOG.info("Created service: %s", service)
return service
def _build_composite_service(self, config_list: Iterable[Dict[str, Any]],
global_config: Optional[Dict[str, Any]] = None,
parent: Optional[Service] = None) -> Service:
"""
Factory method for a new service with a given config.
Parameters
----------
config_list : a list of dict
A list where each element is a dictionary with 2 mandatory fields:
"class": FQN of a Python class to instantiate;
"config": Free-format dictionary to pass to the constructor.
global_config : dict
Global parameters to add to the service config.
parent: Service
An optional reference of the parent service to mix in.
Returns
-------
svc : Service
An instance of the `Service` class that is a combination of all
services from the list plus the parent mix-in.
"""
service = Service()
if parent:
service.register(parent.export())
for config in config_list:
service.register(self._build_standalone_service(
config, global_config, service).export())
if _LOG.isEnabledFor(logging.DEBUG):
_LOG.debug("Created mix-in service:\n%s", "\n".join(
f' "{key}": {val}' for (key, val) in service.export().items()))
return service
def build_service(self,
config: Dict[str, Any],
global_config: Optional[Dict[str, Any]] = None,
parent: Optional[Service] = None) -> Service:
"""
Factory method for a new service with a given config.
Parameters
----------
config : dict
A dictionary with 2 mandatory fields:
"class": FQN of a Python class to instantiate;
"config": Free-format dictionary to pass to the constructor.
global_config : dict
Global parameters to add to the service config.
parent: Service
An optional reference of the parent service to mix in.
Returns
-------
svc : Service
An instance of the `Service` class that is a combination of all
services from the list plus the parent mix-in.
"""
if _LOG.isEnabledFor(logging.DEBUG):
_LOG.debug("Build service from config:\n%s",
json.dumps(config, indent=2))
assert isinstance(config, dict)
config_list: List[Dict[str, Any]]
if "class" not in config:
# Top level config is a simple object with a list of services
config_list = config["services"]
else:
# Top level config is a single service
if parent is None:
return self._build_standalone_service(config, global_config)
config_list = [config]
return self._build_composite_service(config_list, global_config, parent)
def load_environment(self, # pylint: disable=too-many-arguments
json_file_name: str,
tunables: TunableGroups,
global_config: Optional[Dict[str, Any]] = None,
parent_args: Optional[Dict[str, TunableValue]] = None,
service: Optional[Service] = None) -> Environment:
"""
Load and build new environment from the config file.
Parameters
----------
json_file_name : str
The environment JSON configuration file.
tunables : TunableGroups
A (possibly empty) collection of tunables to add to the environment.
global_config : dict
Global parameters to add to the environment config.
parent_args : Dict[str, TunableValue]
An optional reference of the parent CompositeEnv's const_args used to
expand dynamic config parameters from.
service : Service
An optional reference of the parent service to mix in.
Returns
-------
env : Environment
A new benchmarking environment.
"""
config = self.load_config(json_file_name, ConfigSchema.ENVIRONMENT)
assert isinstance(config, dict)
return self.build_environment(config, tunables, global_config, parent_args, service)
def load_environment_list(self, # pylint: disable=too-many-arguments
json_file_name: str,
tunables: TunableGroups,
global_config: Optional[Dict[str, Any]] = None,
parent_args: Optional[Dict[str, TunableValue]] = None,
service: Optional[Service] = None) -> List[Environment]:
"""
Load and build a list of environments from the config file.
Parameters
----------
json_file_name : str
The environment JSON configuration file.
Can contain either one environment or a list of environments.
tunables : TunableGroups
An (possibly empty) collection of tunables to add to the environment.
global_config : dict
Global parameters to add to the environment config.
service : Service
An optional reference of the parent service to mix in.
parent_args : Dict[str, TunableValue]
An optional reference of the parent CompositeEnv's const_args used to
expand dynamic config parameters from.
Returns
-------
env : List[Environment]
A list of new benchmarking environments.
"""
config = self.load_config(json_file_name, ConfigSchema.ENVIRONMENT)
return [
self.build_environment(config, tunables, global_config, parent_args, service)
]
def load_services(self, json_file_names: Iterable[str],
global_config: Optional[Dict[str, Any]] = None,
parent: Optional[Service] = None) -> Service:
"""
Read the configuration files and bundle all service methods
from those configs into a single Service object.
Parameters
----------
json_file_names : list of str
A list of service JSON configuration files.
global_config : dict
Global parameters to add to the service config.
parent : Service
An optional reference of the parent service to mix in.
Returns
-------
service : Service
A collection of service methods.
"""
_LOG.info("Load services: %s parent: %s",
json_file_names, parent.__class__.__name__)
service = Service({}, global_config, parent)
for fname in json_file_names:
config = self.load_config(fname, ConfigSchema.SERVICE)
service.register(self.build_service(config, global_config, service).export())
return service
def _load_tunables(self, json_file_names: Iterable[str],
parent: TunableGroups) -> TunableGroups:
"""
Load a collection of tunable parameters from JSON files into the parent
TunableGroup.
This helps allow standalone environment configs to reference
overlapping tunable groups configs but still allow combining them into
a single instance that each environment can reference.
Parameters
----------
json_file_names : list of str
A list of JSON files to load.
parent : TunableGroups
A (possibly empty) collection of tunables to add to the new collection.
Returns
-------
tunables : TunableGroup
The larger collection of tunable parameters.
"""
_LOG.info("Load tunables: '%s'", json_file_names)
tunables = parent.copy()
for fname in json_file_names:
config = self.load_config(fname, ConfigSchema.TUNABLE_PARAMS)
assert isinstance(config, dict)
tunables.merge(TunableGroups(config))
return tunables
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,842
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/optimizers/mock_optimizer.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Mock optimizer for mlos_bench.
"""
import random
import logging
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from mlos_bench.environments.status import Status
from mlos_bench.tunables.tunable import Tunable, TunableValue
from mlos_bench.tunables.tunable_groups import TunableGroups
from mlos_bench.optimizers.base_optimizer import Optimizer
from mlos_bench.services.base_service import Service
_LOG = logging.getLogger(__name__)
class MockOptimizer(Optimizer):
"""
Mock optimizer to test the Environment API.
"""
def __init__(self,
tunables: TunableGroups,
config: dict,
global_config: Optional[dict] = None,
service: Optional[Service] = None):
super().__init__(tunables, config, global_config, service)
rnd = random.Random(config.get("seed", 42))
self._random: Dict[str, Callable[[Tunable], TunableValue]] = {
"categorical": lambda tunable: rnd.choice(tunable.categories),
"float": lambda tunable: rnd.uniform(*tunable.range),
"int": lambda tunable: rnd.randint(*tunable.range),
}
self._best_config: Optional[TunableGroups] = None
self._best_score: Optional[float] = None
def bulk_register(self, configs: Sequence[dict], scores: Sequence[Optional[float]],
status: Optional[Sequence[Status]] = None) -> bool:
if not super().bulk_register(configs, scores, status):
return False
if status is None:
status = [Status.SUCCEEDED] * len(configs)
for (params, score, trial_status) in zip(configs, scores, status):
tunables = self._tunables.copy().assign(params)
self.register(tunables, trial_status, None if score is None else float(score))
self._iter -= 1 # Do not advance the iteration counter during warm-up.
if _LOG.isEnabledFor(logging.DEBUG):
(score, _) = self.get_best_observation()
_LOG.debug("Warm-up end: %s = %s", self.target, score)
return True
def suggest(self) -> TunableGroups:
"""
Generate the next (random) suggestion.
"""
tunables = self._tunables.copy()
if self._start_with_defaults:
_LOG.info("Use default values for the first trial")
self._start_with_defaults = False
else:
for (tunable, _group) in tunables:
tunable.value = self._random[tunable.type](tunable)
_LOG.info("Iteration %d :: Suggest: %s", self._iter, tunables)
return tunables
def register(self, tunables: TunableGroups, status: Status,
score: Optional[Union[float, dict]] = None) -> Optional[float]:
registered_score = super().register(tunables, status, score)
if status.is_succeeded() and (
self._best_score is None or (registered_score is not None and registered_score < self._best_score)
):
self._best_score = registered_score
self._best_config = tunables.copy()
self._iter += 1
return registered_score
def get_best_observation(self) -> Union[Tuple[float, TunableGroups], Tuple[None, None]]:
if self._best_score is None:
return (None, None)
assert self._best_config is not None
return (self._best_score * self._opt_sign, self._best_config)
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,843
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/storage/trial_telemetry_test.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Unit tests for saving and restoring the telemetry data.
"""
from datetime import datetime, timedelta
from typing import Any, List, Optional, Tuple
import pytest
from mlos_bench.environments.status import Status
from mlos_bench.tunables.tunable_groups import TunableGroups
from mlos_bench.storage.base_storage import Storage
# pylint: disable=redefined-outer-name
@pytest.fixture
def telemetry_data() -> List[Tuple[datetime, str, Any]]:
"""
Mock telemetry data for the trial.
Returns
-------
List[Tuple[datetime, str, str]]
A list of (timestamp, metric_id, metric_value)
"""
timestamp1 = datetime.utcnow()
timestamp2 = timestamp1 + timedelta(seconds=1)
return sorted([
(timestamp1, "cpu_load", 10.1),
(timestamp1, "memory", 20),
(timestamp1, "setup", "prod"),
(timestamp2, "cpu_load", 30.1),
(timestamp2, "memory", 40),
(timestamp2, "setup", "prod"),
])
def _telemetry_str(data: List[Tuple[datetime, str, Any]]
) -> List[Tuple[datetime, str, Optional[str]]]:
"""
Convert telemetry values to strings.
"""
return [(ts, key, None if val is None else str(val)) for (ts, key, val) in data]
def test_update_telemetry(exp_storage_memory_sql: Storage.Experiment,
tunable_groups: TunableGroups,
telemetry_data: List[Tuple[datetime, str, Any]]) -> None:
"""
Make sure update_telemetry() and load_telemetry() methods work.
"""
trial = exp_storage_memory_sql.new_trial(tunable_groups)
assert exp_storage_memory_sql.load_telemetry(trial.trial_id) == []
trial.update_telemetry(Status.RUNNING, telemetry_data)
assert exp_storage_memory_sql.load_telemetry(trial.trial_id) == _telemetry_str(telemetry_data)
def test_update_telemetry_twice(exp_storage_memory_sql: Storage.Experiment,
tunable_groups: TunableGroups,
telemetry_data: List[Tuple[datetime, str, Any]]) -> None:
"""
Make sure update_telemetry() call is idempotent.
"""
trial = exp_storage_memory_sql.new_trial(tunable_groups)
trial.update_telemetry(Status.RUNNING, telemetry_data)
trial.update_telemetry(Status.RUNNING, telemetry_data)
trial.update_telemetry(Status.RUNNING, telemetry_data)
assert exp_storage_memory_sql.load_telemetry(trial.trial_id) == _telemetry_str(telemetry_data)
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,844
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/storage/base_storage.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Base interface for saving and restoring the benchmark data.
"""
import logging
from abc import ABCMeta, abstractmethod
from datetime import datetime
from types import TracebackType
from typing import Optional, Union, List, Tuple, Dict, Iterator, Type, Any
from typing_extensions import Literal
from mlos_bench.environments.status import Status
from mlos_bench.services.base_service import Service
from mlos_bench.tunables.tunable_groups import TunableGroups
from mlos_bench.util import get_git_info
_LOG = logging.getLogger(__name__)
class Storage(metaclass=ABCMeta):
# pylint: disable=too-few-public-methods,too-many-instance-attributes
"""
An abstract interface between the benchmarking framework
and storage systems (e.g., SQLite or MLFLow).
"""
def __init__(self,
tunables: TunableGroups,
config: Dict[str, Any],
global_config: Optional[dict] = None,
service: Optional[Service] = None):
"""
Create a new storage object.
Parameters
----------
tunables : TunableGroups
Tunable parameters of the environment. We need them to validate the
configurations of merged-in experiments and restored/pending trials.
config : dict
Free-format key/value pairs of configuration parameters.
"""
_LOG.debug("Storage config: %s", config)
self._tunables = tunables.copy()
self._service = service
self._config = config.copy()
self._global_config = global_config or {}
@abstractmethod
def experiment(self, *,
experiment_id: str,
trial_id: int,
root_env_config: str,
description: str,
opt_target: str) -> 'Storage.Experiment':
"""
Create a new experiment in the storage.
We need the `opt_target` parameter here to know what metric to retrieve
when we load the data from previous trials. Later we will replace it with
full metadata about the optimization direction, multiple objectives, etc.
Parameters
----------
experiment_id : str
Unique identifier of the experiment.
trial_id : int
Starting number of the trial.
root_env_config : str
A path to the root JSON configuration file of the benchmarking environment.
description : str
Human-readable description of the experiment.
opt_target : str
Name of metric we're optimizing for.
Returns
-------
experiment : Storage.Experiment
An object that allows to update the storage with
the results of the experiment and related data.
"""
class Experiment(metaclass=ABCMeta):
"""
Base interface for storing the results of the experiment.
This class is instantiated in the `Storage.experiment()` method.
"""
def __init__(self, tunables: TunableGroups, experiment_id: str, root_env_config: str):
self._tunables = tunables.copy()
self._experiment_id = experiment_id
(self._git_repo, self._git_commit, self._root_env_config) = get_git_info(root_env_config)
def __enter__(self) -> 'Storage.Experiment':
"""
Enter the context of the experiment.
Override the `_setup` method to add custom context initialization.
"""
_LOG.debug("Starting experiment: %s", self)
self._setup()
return self
def __exit__(self, exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType]) -> Literal[False]:
"""
End the context of the experiment.
Override the `_teardown` method to add custom context teardown logic.
"""
is_ok = exc_val is None
if is_ok:
_LOG.debug("Finishing experiment: %s", self)
else:
assert exc_type and exc_val
_LOG.warning("Finishing experiment: %s", self,
exc_info=(exc_type, exc_val, exc_tb))
self._teardown(is_ok)
return False # Do not suppress exceptions
def __repr__(self) -> str:
return self._experiment_id
def _setup(self) -> None:
"""
Create a record of the new experiment or find an existing one in the storage.
This method is called by `Storage.Experiment.__enter__()`.
"""
def _teardown(self, is_ok: bool) -> None:
"""
Finalize the experiment in the storage.
This method is called by `Storage.Experiment.__exit__()`.
Parameters
----------
is_ok : bool
True if there were no exceptions during the experiment, False otherwise.
"""
@abstractmethod
def merge(self, experiment_ids: List[str]) -> None:
"""
Merge in the results of other (compatible) experiments trials.
Used to help warm up the optimizer for this experiment.
Parameters
----------
experiment_ids : List[str]
List of IDs of the experiments to merge in.
"""
@abstractmethod
def load_config(self, config_id: int) -> Dict[str, Any]:
"""
Load tunable values for a given config ID.
"""
@abstractmethod
def load_telemetry(self, trial_id: int) -> List[Tuple[datetime, str, Any]]:
"""
Retrieve the telemetry data for a given trial.
Parameters
----------
trial_id : int
Trial ID.
Returns
-------
metrics : List[Tuple[datetime, str, Any]]
Telemetry data.
"""
@abstractmethod
def load(self, opt_target: Optional[str] = None) -> Tuple[List[dict], List[Optional[float]], List[Status]]:
"""
Load (tunable values, benchmark scores, status) to warm-up the optimizer.
This call returns data from ALL merged-in experiments and attempts
to impute the missing tunable values.
"""
@abstractmethod
def pending_trials(self) -> Iterator['Storage.Trial']:
"""
Return an iterator over the pending trial runs for this experiment.
"""
@abstractmethod
def new_trial(self, tunables: TunableGroups,
config: Optional[Dict[str, Any]] = None) -> 'Storage.Trial':
"""
Create a new experiment run in the storage.
Parameters
----------
tunables : TunableGroups
Tunable parameters of the experiment.
config : dict
Key/value pairs of additional non-tunable parameters of the trial.
Returns
-------
trial : Storage.Trial
An object that allows to update the storage with
the results of the experiment trial run.
"""
class Trial(metaclass=ABCMeta):
"""
Base interface for storing the results of a single run of the experiment.
This class is instantiated in the `Storage.Experiment.trial()` method.
"""
def __init__(self, *,
tunables: TunableGroups, experiment_id: str, trial_id: int,
config_id: int, opt_target: str, config: Optional[Dict[str, Any]] = None):
self._tunables = tunables
self._experiment_id = experiment_id
self._trial_id = trial_id
self._config_id = config_id
self._opt_target = opt_target
self._config = config or {}
def __repr__(self) -> str:
return f"{self._experiment_id}:{self._trial_id}"
@property
def trial_id(self) -> int:
"""
ID of the current trial.
"""
return self._trial_id
@property
def config_id(self) -> int:
"""
ID of the current trial configuration.
"""
return self._config_id
@property
def tunables(self) -> TunableGroups:
"""
Tunable parameters of the current trial.
"""
return self._tunables
def config(self, global_config: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
"""
Produce a copy of the global configuration updated
with the parameters of the current trial.
"""
config = self._config.copy()
config.update(global_config or {})
config["experiment_id"] = self._experiment_id
config["trial_id"] = self._trial_id
return config
@abstractmethod
def update(self, status: Status, timestamp: datetime,
metrics: Optional[Union[Dict[str, float], float]] = None
) -> Optional[Dict[str, float]]:
"""
Update the storage with the results of the experiment.
Parameters
----------
status : Status
Status of the experiment run.
timestamp: datetime
Timestamp of the status and metrics.
metrics : Optional[Union[Dict[str, float], float]]
One or several metrics of the experiment run.
Must contain the optimization target if the status is SUCCEEDED.
Returns
-------
metrics : Optional[Dict[str, float]]
Same as `metrics`, but always in the dict format.
"""
_LOG.info("Store trial: %s :: %s %s", self, status, metrics)
if isinstance(metrics, dict) and self._opt_target not in metrics:
_LOG.warning("Trial %s :: opt.target missing: %s", self, self._opt_target)
# raise ValueError(
# f"Optimization target '{self._opt_target}' is missing from {metrics}")
return {self._opt_target: metrics} if isinstance(metrics, (float, int)) else metrics
@abstractmethod
def update_telemetry(self, status: Status,
metrics: List[Tuple[datetime, str, Any]]) -> None:
"""
Save the experiment's telemetry data and intermediate status.
Parameters
----------
status : Status
Current status of the trial.
metrics : List[Tuple[datetime, str, Any]]
Telemetry data.
"""
_LOG.info("Store telemetry: %s :: %s %d records", self, status, len(metrics))
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,845
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/optimizers/llamatune_opt_test.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Unit tests for mock mlos_bench optimizer.
"""
import pytest
from mlos_bench.environments.status import Status
from mlos_bench.tunables.tunable_groups import TunableGroups
from mlos_bench.optimizers.mlos_core_optimizer import MlosCoreOptimizer
from mlos_bench.tests import SEED
# pylint: disable=redefined-outer-name
@pytest.fixture
def llamatune_opt(tunable_groups: TunableGroups) -> MlosCoreOptimizer:
"""
Test fixture for mlos_core SMAC optimizer.
"""
return MlosCoreOptimizer(
tunables=tunable_groups,
service=None,
config={
"space_adapter_type": "LLAMATUNE",
"space_adapter_config": {
"num_low_dims": 1,
},
"optimization_target": "score",
"max_iterations": 10,
"optimizer_type": "SMAC",
"seed": SEED,
# "start_with_defaults": False,
})
@pytest.fixture
def mock_scores() -> list:
"""
A list of fake benchmark scores to test the optimizers.
"""
return [88.88, 66.66, 99.99]
def test_llamatune_optimizer(llamatune_opt: MlosCoreOptimizer, mock_scores: list) -> None:
"""
Make sure that llamatune+smac optimizer initializes and works correctly.
"""
for score in mock_scores:
assert llamatune_opt.not_converged()
tunables = llamatune_opt.suggest()
# FIXME: Emukit optimizer is not deterministic, so we can't check the tunables here.
llamatune_opt.register(tunables, Status.SUCCEEDED, score)
(score, _tunables) = llamatune_opt.get_best_observation()
assert score == pytest.approx(66.66, 0.01)
if __name__ == '__main__':
# For attaching debugger debugging:
pytest.main(["-vv", "-n1", "-k", "test_llamatune_optimizer", __file__])
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,846
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/services/local/local_exec_python_test.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Unit tests for LocalExecService to run Python scripts locally.
"""
from typing import Any, Dict
import json
import pytest
from mlos_bench.tunables.tunable import TunableValue
from mlos_bench.services.local.local_exec import LocalExecService
from mlos_bench.services.config_persistence import ConfigPersistenceService
from mlos_bench.util import path_join
# pylint: disable=redefined-outer-name
@pytest.fixture
def local_exec_service() -> LocalExecService:
"""
Test fixture for LocalExecService.
"""
return LocalExecService(parent=ConfigPersistenceService())
def test_run_python_script(local_exec_service: LocalExecService) -> None:
"""
Run a Python script using a local_exec service.
"""
input_file = "./input-params.json"
meta_file = "./input-params-meta.json"
output_file = "./config-kernel.sh"
# Tunable parameters to save in JSON
params: Dict[str, TunableValue] = {
"sched_migration_cost_ns": 40000,
"sched_granularity_ns": 800000,
}
# Tunable parameters metadata
params_meta: Dict[str, Any] = {
"sched_migration_cost_ns": {"name_prefix": "/proc/sys/kernel/"},
"sched_granularity_ns": {"name_prefix": "/proc/sys/kernel/"},
}
with local_exec_service.temp_dir_context() as temp_dir:
with open(path_join(temp_dir, input_file), "wt", encoding="utf-8") as fh_input:
json.dump(params, fh_input)
with open(path_join(temp_dir, meta_file), "wt", encoding="utf-8") as fh_meta:
json.dump(params_meta, fh_meta)
script_path = local_exec_service.config_loader_service.resolve_path(
"environments/os/linux/runtime/scripts/local/generate_kernel_config_script.py")
(return_code, _stdout, stderr) = local_exec_service.local_exec([
f"{script_path} {input_file} {meta_file} {output_file}"
], cwd=temp_dir, env=params)
assert stderr.strip() == ""
assert return_code == 0
# assert stdout.strip() == ""
with open(path_join(temp_dir, output_file), "rt", encoding="utf-8") as fh_output:
assert [ln.strip() for ln in fh_output.readlines()] == [
'echo "40000" > /proc/sys/kernel/sched_migration_cost_ns',
'echo "800000" > /proc/sys/kernel/sched_granularity_ns',
]
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,847
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/config/environments/os/linux/boot/scripts/local/generate_grub_config.py
|
#!/usr/bin/env python3
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Helper script to generate GRUB config from tunable parameters JSON.
Run: `./generate_grub_config.py ./input-boot-params.json ./output-grub.cfg`
"""
import json
import argparse
def _main(fname_input: str, fname_output: str) -> None:
with open(fname_input, "rt", encoding="utf-8") as fh_tunables, \
open(fname_output, "wt", encoding="utf-8", newline="") as fh_config:
for (key, val) in json.load(fh_tunables).items():
line = f'GRUB_CMDLINE_LINUX_DEFAULT="${{GRUB_CMDLINE_LINUX_DEFAULT}} {key}={val}"'
fh_config.write(line + "\n")
print(line)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate GRUB config from tunable parameters JSON.")
parser.add_argument("input", help="JSON file with tunable parameters.")
parser.add_argument("output", help="Output shell script to configure GRUB.")
args = parser.parse_args()
_main(args.input, args.output)
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,848
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/services/types/local_exec_type.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Protocol interface for Service types that provide helper functions to run
scripts and commands locally on the scheduler side.
"""
from typing import Iterable, Mapping, Optional, Tuple, Union, Protocol, runtime_checkable
import tempfile
import contextlib
from mlos_bench.tunables.tunable import TunableValue
@runtime_checkable
class SupportsLocalExec(Protocol):
"""
Protocol interface for a collection of methods to run scripts and commands
in an external process on the node acting as the scheduler. Can be useful
for data processing due to reduced dependency management complications vs
the target environment.
Used in LocalEnv and provided by LocalExecService.
"""
def local_exec(self, script_lines: Iterable[str],
env: Optional[Mapping[str, TunableValue]] = None,
cwd: Optional[str] = None,
return_on_error: bool = False) -> Tuple[int, str, str]:
"""
Execute the script lines from `script_lines` in a local process.
Parameters
----------
script_lines : Iterable[str]
Lines of the script to run locally.
Treat every line as a separate command to run.
env : Mapping[str, Union[int, float, str]]
Environment variables (optional).
cwd : str
Work directory to run the script at.
If omitted, use `temp_dir` or create a temporary dir.
return_on_error : bool
If True, stop running script lines on first non-zero return code.
The default is False.
Returns
-------
(return_code, stdout, stderr) : (int, str, str)
A 3-tuple of return code, stdout, and stderr of the script process.
"""
def temp_dir_context(self, path: Optional[str] = None) -> Union[tempfile.TemporaryDirectory, contextlib.nullcontext]:
"""
Create a temp directory or use the provided path.
Parameters
----------
path : str
A path to the temporary directory. Create a new one if None.
Returns
-------
temp_dir_context : TemporaryDirectory
Temporary directory context to use in the `with` clause.
"""
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,849
|
microsoft/MLOS
|
refs/heads/main
|
/conftest.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Provides some pytest configuration overrides for both modules.
"""
# Note: This file is named conftest.py so that pytest picks it up automatically
# without the need to adjust PYTHONPATH or sys.path as much.
import os
from warnings import warn
import pytest
def pytest_configure(config: pytest.Config) -> None: # pylint: disable=unused-argument
"""
Add some additional (global) configuration steps for pytest.
"""
# Workaround some issues loading emukit in certain environments.
if os.environ.get('DISPLAY', None):
import matplotlib # pylint: disable=import-outside-toplevel
matplotlib.rcParams['backend'] = 'agg'
warn(UserWarning('DISPLAY environment variable is set, which can cause problems in some setups (e.g. WSL). '
+ f'Adjusting matplotlib backend to "{matplotlib.rcParams["backend"]}" to compensate.'))
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,850
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/services/remote/__init__.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Tests for mlos_bench.services.remote.
Used to make mypy happy about multiple conftest.py modules.
"""
from .mock.mock_fileshare_service import MockFileShareService
from .mock.mock_remote_exec_service import MockRemoteExecService
from .mock.mock_vm_service import MockVMService
__all__ = [
'MockFileShareService',
'MockRemoteExecService',
'MockVMService',
]
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,851
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/optimizers/opt_bulk_register_test.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Unit tests for mock mlos_bench optimizer.
"""
from typing import Optional, List
import pytest
from mlos_bench.environments.status import Status
from mlos_bench.optimizers.base_optimizer import Optimizer
from mlos_bench.optimizers.mock_optimizer import MockOptimizer
from mlos_bench.optimizers.mlos_core_optimizer import MlosCoreOptimizer
# pylint: disable=redefined-outer-name
@pytest.fixture
def mock_configs() -> List[dict]:
"""
Mock configurations of earlier experiments.
"""
return [
{
'vmSize': 'Standard_B4ms',
'idle': 'halt',
'kernel_sched_migration_cost_ns': 50000,
'kernel_sched_latency_ns': 1000000,
},
{
'vmSize': 'Standard_B4ms',
'idle': 'halt',
'kernel_sched_migration_cost_ns': 40000,
'kernel_sched_latency_ns': 2000000,
},
{
'vmSize': 'Standard_B4ms',
'idle': 'mwait',
'kernel_sched_migration_cost_ns': 100000,
'kernel_sched_latency_ns': 3000000,
},
{
'vmSize': 'Standard_B2s',
'idle': 'mwait',
'kernel_sched_migration_cost_ns': 200000,
'kernel_sched_latency_ns': 4000000,
}
]
@pytest.fixture
def mock_configs_str(mock_configs: List[dict]) -> List[dict]:
"""
Same as `mock_config` above, but with all values converted to strings.
(This can happen when we retrieve the data from storage).
"""
return [
{key: str(val) for (key, val) in config.items()}
for config in mock_configs
]
@pytest.fixture
def mock_scores() -> List[Optional[float]]:
"""
Mock benchmark results from earlier experiments.
"""
return [None, 88.88, 66.66, 99.99]
@pytest.fixture
def mock_status() -> List[Status]:
"""
Mock status values for earlier experiments.
"""
return [Status.FAILED, Status.SUCCEEDED, Status.SUCCEEDED, Status.SUCCEEDED]
def _test_opt_update_min(opt: Optimizer, configs: List[dict],
scores: List[float], status: Optional[List[Status]] = None) -> None:
"""
Test the bulk update of the optimizer on the minimization problem.
"""
opt.bulk_register(configs, scores, status)
(score, tunables) = opt.get_best_observation()
assert score == pytest.approx(66.66, 0.01)
assert tunables is not None
assert tunables.get_param_values() == {
"vmSize": "Standard_B4ms",
"idle": "mwait",
"kernel_sched_migration_cost_ns": 100000,
'kernel_sched_latency_ns': 3000000,
}
def _test_opt_update_max(opt: Optimizer, configs: List[dict],
scores: List[float], status: Optional[List[Status]] = None) -> None:
"""
Test the bulk update of the optimizer on the maximization problem.
"""
opt.bulk_register(configs, scores, status)
(score, tunables) = opt.get_best_observation()
assert score == pytest.approx(99.99, 0.01)
assert tunables is not None
assert tunables.get_param_values() == {
"vmSize": "Standard_B2s",
"idle": "mwait",
"kernel_sched_migration_cost_ns": 200000,
'kernel_sched_latency_ns': 4000000,
}
def test_update_mock_min(mock_opt: MockOptimizer, mock_configs: List[dict],
mock_scores: List[float], mock_status: List[Status]) -> None:
"""
Test the bulk update of the mock optimizer on the minimization problem.
"""
_test_opt_update_min(mock_opt, mock_configs, mock_scores, mock_status)
# make sure the first suggestion after bulk load is *NOT* the default config:
assert mock_opt.suggest().get_param_values() == {
"vmSize": "Standard_B4ms",
"idle": "halt",
"kernel_sched_migration_cost_ns": 13111,
'kernel_sched_latency_ns': 796233790,
}
def test_update_mock_min_str(mock_opt: MockOptimizer, mock_configs_str: List[dict],
mock_scores: List[float], mock_status: List[Status]) -> None:
"""
Test the bulk update of the mock optimizer with all-strings data.
"""
_test_opt_update_min(mock_opt, mock_configs_str, mock_scores, mock_status)
def test_update_mock_max(mock_opt_max: MockOptimizer, mock_configs: List[dict],
mock_scores: List[float], mock_status: List[Status]) -> None:
"""
Test the bulk update of the mock optimizer on the maximization problem.
"""
_test_opt_update_max(mock_opt_max, mock_configs, mock_scores, mock_status)
def test_update_flaml(flaml_opt: MlosCoreOptimizer, mock_configs: List[dict],
mock_scores: List[float], mock_status: List[Status]) -> None:
"""
Test the bulk update of the FLAML optimizer.
"""
_test_opt_update_min(flaml_opt, mock_configs, mock_scores, mock_status)
def test_update_flaml_max(flaml_opt_max: MlosCoreOptimizer, mock_configs: List[dict],
mock_scores: List[float], mock_status: List[Status]) -> None:
"""
Test the bulk update of the FLAML optimizer.
"""
_test_opt_update_max(flaml_opt_max, mock_configs, mock_scores, mock_status)
def test_update_smac(smac_opt: MlosCoreOptimizer, mock_configs: List[dict],
mock_scores: List[float], mock_status: List[Status]) -> None:
"""
Test the bulk update of the SMAC optimizer.
"""
_test_opt_update_min(smac_opt, mock_configs, mock_scores, mock_status)
def test_update_smac_max(smac_opt_max: MlosCoreOptimizer, mock_configs: List[dict],
mock_scores: List[float], mock_status: List[Status]) -> None:
"""
Test the bulk update of the SMAC optimizer.
"""
_test_opt_update_max(smac_opt_max, mock_configs, mock_scores, mock_status)
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,852
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/environments/local/local_env_test.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Unit tests for LocalEnv benchmark environment.
"""
import pytest
from mlos_bench.tunables.tunable_groups import TunableGroups
from mlos_bench.tests.environments.local import create_local_env, check_local_env_success
def test_local_env(tunable_groups: TunableGroups) -> None:
"""
Produce benchmark and telemetry data in a local script and read it.
"""
local_env = create_local_env(tunable_groups, {
"run": [
"echo 'metric,value' > output.csv",
"echo 'latency,10' >> output.csv",
"echo 'throughput,66' >> output.csv",
"echo 'score,0.9' >> output.csv",
],
"read_results_file": "output.csv",
})
check_local_env_success(
local_env, tunable_groups,
expected_results={
"latency": 10.0,
"throughput": 66.0,
"score": 0.9,
},
expected_telemetry=[],
)
def test_local_env_results_no_header(tunable_groups: TunableGroups) -> None:
"""
Fail if the results are not in the expected format.
"""
local_env = create_local_env(tunable_groups, {
"run": [
# No header
"echo 'latency,10' > output.csv",
"echo 'throughput,66' >> output.csv",
"echo 'score,0.9' >> output.csv",
],
"read_results_file": "output.csv",
})
with local_env as env_context:
assert env_context.setup(tunable_groups)
with pytest.raises(ValueError):
env_context.run()
def test_local_env_wide(tunable_groups: TunableGroups) -> None:
"""
Produce benchmark data in wide format and read it.
"""
local_env = create_local_env(tunable_groups, {
"run": [
"echo 'latency,throughput,score' > output.csv",
"echo '10,66,0.9' >> output.csv",
],
"read_results_file": "output.csv",
})
check_local_env_success(
local_env, tunable_groups,
expected_results={
"latency": 10,
"throughput": 66,
"score": 0.9,
},
expected_telemetry=[],
)
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,853
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/optimizers/__init__.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Tests for mlos_bench.optimizers.
Used to make mypy happy about multiple conftest.py modules.
"""
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,854
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/environments/remote/vm_env.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
"Remote" VM Environment.
"""
from typing import Optional
import logging
from mlos_bench.environments.base_environment import Environment
from mlos_bench.services.base_service import Service
from mlos_bench.services.types.vm_provisioner_type import SupportsVMOps
from mlos_bench.tunables.tunable_groups import TunableGroups
_LOG = logging.getLogger(__name__)
class VMEnv(Environment):
"""
"Remote" VM environment.
"""
def __init__(self,
*,
name: str,
config: dict,
global_config: Optional[dict] = None,
tunables: Optional[TunableGroups] = None,
service: Optional[Service] = None):
"""
Create a new environment for VM operations.
Parameters
----------
name: str
Human-readable name of the environment.
config : dict
Free-format dictionary that contains the benchmark environment
configuration. Each config must have at least the "tunable_params"
and the "const_args" sections.
global_config : dict
Free-format dictionary of global parameters (e.g., security credentials)
to be mixed in into the "const_args" section of the local config.
tunables : TunableGroups
A collection of tunable parameters for *all* environments.
service: Service
An optional service object (e.g., providing methods to
deploy or reboot a VM, etc.).
"""
super().__init__(name=name, config=config, global_config=global_config, tunables=tunables, service=service)
assert self._service is not None and isinstance(self._service, SupportsVMOps), \
"VMEnv requires a service that supports VM operations"
self._vm_service: SupportsVMOps = self._service
def setup(self, tunables: TunableGroups, global_config: Optional[dict] = None) -> bool:
"""
Check if VM is ready. (Re)provision and start it, if necessary.
Parameters
----------
tunables : TunableGroups
A collection of groups of tunable parameters along with the
parameters' values. VMEnv tunables are variable parameters that,
together with the VMEnv configuration, are sufficient to provision
and start a VM.
global_config : dict
Free-format dictionary of global parameters of the environment
that are not used in the optimization process.
Returns
-------
is_success : bool
True if operation is successful, false otherwise.
"""
_LOG.info("VM set up: %s :: %s", self, tunables)
if not super().setup(tunables, global_config):
return False
(status, params) = self._vm_service.vm_provision(self._params)
if status.is_pending():
(status, _) = self._vm_service.wait_vm_deployment(True, params)
self._is_ready = status.is_succeeded()
return self._is_ready
def teardown(self) -> None:
"""
Shut down the VM and release it.
"""
_LOG.info("VM tear down: %s", self)
(status, params) = self._vm_service.vm_deprovision(self._params)
if status.is_pending():
(status, _) = self._vm_service.wait_vm_deployment(False, params)
super().teardown()
_LOG.debug("Final status of VM deprovisioning: %s :: %s", self, status)
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,855
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/storage/__init__.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Interfaces to the storage backends for OS Autotune.
"""
from mlos_bench.storage.base_storage import Storage
__all__ = [
'Storage',
]
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,856
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/tunables/tunable_to_configspace_test.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Unit tests for Tunable to ConfigSpace conversion.
"""
import pytest
from ConfigSpace import UniformIntegerHyperparameter
from ConfigSpace import UniformFloatHyperparameter
from ConfigSpace import CategoricalHyperparameter
from ConfigSpace import ConfigurationSpace
from mlos_bench.tunables.tunable import Tunable
from mlos_bench.tunables.tunable_groups import TunableGroups
from mlos_bench.optimizers.convert_configspace import _tunable_to_hyperparameter
from mlos_bench.optimizers.convert_configspace import tunable_groups_to_configspace
# pylint: disable=redefined-outer-name
@pytest.fixture
def configuration_space() -> ConfigurationSpace:
"""
A test fixture that produces a mock ConfigurationSpace object
matching the tunable_groups fixture.
Returns
-------
configuration_space : ConfigurationSpace
A new ConfigurationSpace object for testing.
"""
spaces = ConfigurationSpace(space={
"vmSize": ["Standard_B2s", "Standard_B2ms", "Standard_B4ms"],
"idle": ["halt", "mwait", "noidle"],
"kernel_sched_migration_cost_ns": (-1, 500000),
"kernel_sched_latency_ns": (0, 1000000000),
})
spaces["vmSize"].default_value = "Standard_B4ms"
spaces["idle"].default_value = "halt"
spaces["kernel_sched_migration_cost_ns"].default_value = -1
spaces["kernel_sched_latency_ns"].default_value = 2000000
return spaces
def _cmp_tunable_hyperparameter_categorical(
tunable: Tunable, cs_param: CategoricalHyperparameter) -> None:
"""
Check if categorical Tunable and ConfigSpace Hyperparameter actually match.
"""
assert isinstance(cs_param, CategoricalHyperparameter)
assert set(cs_param.choices) == set(tunable.categories)
assert cs_param.default_value == tunable.value
def _cmp_tunable_hyperparameter_int(
tunable: Tunable, cs_param: UniformIntegerHyperparameter) -> None:
"""
Check if integer Tunable and ConfigSpace Hyperparameter actually match.
"""
assert isinstance(cs_param, UniformIntegerHyperparameter)
assert (cs_param.lower, cs_param.upper) == tuple(tunable.range)
assert cs_param.default_value == tunable.value
def _cmp_tunable_hyperparameter_float(
tunable: Tunable, cs_param: UniformFloatHyperparameter) -> None:
"""
Check if float Tunable and ConfigSpace Hyperparameter actually match.
"""
assert isinstance(cs_param, UniformFloatHyperparameter)
assert (cs_param.lower, cs_param.upper) == tuple(tunable.range)
assert cs_param.default_value == tunable.value
def test_tunable_to_hyperparameter_categorical(tunable_categorical: Tunable) -> None:
"""
Check the conversion of Tunable to CategoricalHyperparameter.
"""
cs_param = _tunable_to_hyperparameter(tunable_categorical)
_cmp_tunable_hyperparameter_categorical(tunable_categorical, cs_param)
def test_tunable_to_hyperparameter_int(tunable_int: Tunable) -> None:
"""
Check the conversion of Tunable to UniformIntegerHyperparameter.
"""
cs_param = _tunable_to_hyperparameter(tunable_int)
_cmp_tunable_hyperparameter_int(tunable_int, cs_param)
def test_tunable_to_hyperparameter_float(tunable_float: Tunable) -> None:
"""
Check the conversion of Tunable to UniformFloatHyperparameter.
"""
cs_param = _tunable_to_hyperparameter(tunable_float)
_cmp_tunable_hyperparameter_float(tunable_float, cs_param)
_CMP_FUNC = {
"int": _cmp_tunable_hyperparameter_int,
"float": _cmp_tunable_hyperparameter_float,
"categorical": _cmp_tunable_hyperparameter_categorical
}
def test_tunable_groups_to_hyperparameters(tunable_groups: TunableGroups) -> None:
"""
Check the conversion of TunableGroups to ConfigurationSpace.
Make sure that the corresponding Tunable and Hyperparameter objects match.
"""
space = tunable_groups_to_configspace(tunable_groups)
for (tunable, _group) in tunable_groups:
cs_param = space[tunable.name]
assert cs_param.default_value == tunable.value
_CMP_FUNC[tunable.type](tunable, cs_param)
def test_tunable_groups_to_configspace(
tunable_groups: TunableGroups, configuration_space: ConfigurationSpace) -> None:
"""
Check the conversion of the entire TunableGroups collection
to a single ConfigurationSpace object.
"""
space = tunable_groups_to_configspace(tunable_groups)
assert space == configuration_space
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,857
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/optimizers/conftest.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Test fixtures for mlos_bench optimizers.
"""
import pytest
from mlos_bench.tunables.tunable_groups import TunableGroups
from mlos_bench.optimizers.mock_optimizer import MockOptimizer
from mlos_bench.optimizers.mlos_core_optimizer import MlosCoreOptimizer
from mlos_bench.tests import SEED
@pytest.fixture
def mock_opt_no_defaults(tunable_groups: TunableGroups) -> MockOptimizer:
"""
Test fixture for MockOptimizer that ignores the initial configuration.
"""
return MockOptimizer(
tunables=tunable_groups,
service=None,
config={
"optimization_target": "score",
"optimization_direction": "min",
"max_iterations": 5,
"start_with_defaults": False,
"seed": SEED
},
)
@pytest.fixture
def mock_opt(tunable_groups: TunableGroups) -> MockOptimizer:
"""
Test fixture for MockOptimizer.
"""
return MockOptimizer(
tunables=tunable_groups,
service=None,
config={
"optimization_target": "score",
"optimization_direction": "min",
"max_iterations": 5,
"seed": SEED
},
)
@pytest.fixture
def mock_opt_max(tunable_groups: TunableGroups) -> MockOptimizer:
"""
Test fixture for MockOptimizer.
"""
return MockOptimizer(
tunables=tunable_groups,
service=None,
config={
"optimization_target": "score",
"optimization_direction": "max",
"max_iterations": 10,
"seed": SEED
},
)
@pytest.fixture
def flaml_opt(tunable_groups: TunableGroups) -> MlosCoreOptimizer:
"""
Test fixture for mlos_core FLAML optimizer.
"""
return MlosCoreOptimizer(
tunables=tunable_groups,
service=None,
config={
"optimization_target": "score",
"optimization_direction": "min",
"max_iterations": 15,
"optimizer_type": "FLAML",
"seed": SEED,
},
)
@pytest.fixture
def flaml_opt_max(tunable_groups: TunableGroups) -> MlosCoreOptimizer:
"""
Test fixture for mlos_core FLAML optimizer.
"""
return MlosCoreOptimizer(
tunables=tunable_groups,
service=None,
config={
"optimization_target": "score",
"optimization_direction": "max",
"max_iterations": 15,
"optimizer_type": "FLAML",
"seed": SEED,
},
)
# FIXME: SMAC's RF model can be non-deterministic at low iterations, which are
# normally calculated as a percentage of the max_iterations and number of
# tunable dimensions, so for now we set the initial random samples equal to the
# number of iterations and control them with a seed.
SMAC_ITERATIONS = 10
@pytest.fixture
def smac_opt(tunable_groups: TunableGroups) -> MlosCoreOptimizer:
"""
Test fixture for mlos_core SMAC optimizer.
"""
return MlosCoreOptimizer(
tunables=tunable_groups,
service=None,
config={
"optimization_target": "score",
"optimization_direction": "min",
"max_iterations": SMAC_ITERATIONS,
"optimizer_type": "SMAC",
"seed": SEED,
"output_directory": None,
# See Above
"n_random_init": SMAC_ITERATIONS,
"max_ratio": 1.0,
},
)
@pytest.fixture
def smac_opt_max(tunable_groups: TunableGroups) -> MlosCoreOptimizer:
"""
Test fixture for mlos_core SMAC optimizer.
"""
return MlosCoreOptimizer(
tunables=tunable_groups,
service=None,
config={
"optimization_target": "score",
"optimization_direction": "max",
"max_iterations": SMAC_ITERATIONS,
"optimizer_type": "SMAC",
"seed": SEED,
"output_directory": None,
# See Above
"n_random_init": SMAC_ITERATIONS,
"max_ratio": 1.0,
},
)
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,858
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/optimizers/convert_configspace.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Functions to convert TunableGroups to ConfigSpace for use with the mlos_core optimizers.
"""
import logging
from typing import Optional
from ConfigSpace.hyperparameters import Hyperparameter
from ConfigSpace import UniformIntegerHyperparameter
from ConfigSpace import UniformFloatHyperparameter
from ConfigSpace import CategoricalHyperparameter
from ConfigSpace import ConfigurationSpace, Configuration
from mlos_bench.tunables.tunable import Tunable
from mlos_bench.tunables.tunable_groups import TunableGroups
_LOG = logging.getLogger(__name__)
def _tunable_to_hyperparameter(
tunable: Tunable, group_name: Optional[str] = None, cost: int = 0) -> Hyperparameter:
"""
Convert a single Tunable to an equivalent ConfigSpace Hyperparameter object.
Parameters
----------
tunable : Tunable
An mlos_bench Tunable object.
group_name : str
Human-readable id of the CovariantTunableGroup this Tunable belongs to.
cost : int
Cost to change this parameter (comes from the corresponding CovariantTunableGroup).
Returns
-------
hyperparameter : Hyperparameter
A ConfigSpace Hyperparameter object that corresponds to the Tunable.
"""
meta = {"group": group_name, "cost": cost} # {"lower": "", "upper": "", "scaling": ""}
if tunable.type == "categorical":
return CategoricalHyperparameter(
tunable.name, choices=tunable.categories,
default_value=tunable.default, meta=meta)
elif tunable.type == "int":
return UniformIntegerHyperparameter(
tunable.name, lower=tunable.range[0], upper=tunable.range[1],
default_value=tunable.default, meta=meta)
elif tunable.type == "float":
return UniformFloatHyperparameter(
tunable.name, lower=tunable.range[0], upper=tunable.range[1],
default_value=tunable.default, meta=meta)
else:
raise TypeError(f"Undefined Parameter Type: {tunable.type}")
def tunable_groups_to_configspace(tunables: TunableGroups, seed: Optional[int] = None) -> ConfigurationSpace:
"""
Convert TunableGroups to hyperparameters in ConfigurationSpace.
Parameters
----------
tunables : TunableGroups
A collection of tunable parameters.
seed : Optional[int]
Random seed to use.
Returns
-------
configspace : ConfigurationSpace
A new ConfigurationSpace instance that corresponds to the input TunableGroups.
"""
space = ConfigurationSpace(seed=seed)
space.add_hyperparameters([
_tunable_to_hyperparameter(tunable, group.name, group.get_current_cost())
for (tunable, group) in tunables
])
return space
def tunable_values_to_configuration(tunables: TunableGroups) -> Configuration:
"""
Converts a TunableGroups current values to a ConfigSpace Configuration.
Parameters
----------
tunables : TunableGroups
The TunableGroups to take the current value from.
Returns
-------
Configuration
A ConfigSpace Configuration.
"""
configspace = tunable_groups_to_configspace(tunables)
return Configuration(configspace, values={tunable.name: tunable.value for (tunable, _group) in tunables})
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,859
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/tunables/tunable_accessors_test.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Unit tests for accessing values to the individual parameters within tunable groups.
"""
import pytest
from mlos_bench.tunables.covariant_group import CovariantTunableGroup
from mlos_bench.tunables.tunable import Tunable
def test_categorical_access_to_numerical_tunable(tunable_int: Tunable) -> None:
"""
Make sure we throw an error on accessing a numerical tunable as a categorical.
"""
with pytest.raises(ValueError):
print(tunable_int.category)
with pytest.raises(AssertionError):
print(tunable_int.categories)
def test_numerical_access_to_categorical_tunable(tunable_categorical: Tunable) -> None:
"""
Make sure we throw an error on accessing a numerical tunable as a categorical.
"""
with pytest.raises(ValueError):
print(tunable_categorical.numerical_value)
with pytest.raises(AssertionError):
print(tunable_categorical.range)
def test_covariant_group_repr(covariant_group: CovariantTunableGroup) -> None:
"""
Tests that the covariant group representation works as expected.
"""
assert repr(covariant_group).startswith(f"{covariant_group.name}:")
def test_covariant_group_tunables(covariant_group: CovariantTunableGroup) -> None:
"""
Tests that we can access the tunables in the covariant group.
"""
for tunable in covariant_group.get_tunables():
assert isinstance(tunable, Tunable)
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,860
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/util.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Various helper functions for mlos_bench.
"""
# NOTE: This has to be placed in the top-level mlos_bench package to avoid circular imports.
import os
import json
import logging
import importlib
import subprocess
from typing import Any, Dict, Iterable, Mapping, Optional, Tuple, Type, TypeVar, TYPE_CHECKING, Union
_LOG = logging.getLogger(__name__)
if TYPE_CHECKING:
from mlos_bench.environments.base_environment import Environment
from mlos_bench.optimizers.base_optimizer import Optimizer
from mlos_bench.services.base_service import Service
from mlos_bench.storage.base_storage import Storage
# BaseTypeVar is a generic with a constraint of the three base classes.
BaseTypeVar = TypeVar("BaseTypeVar", "Environment", "Optimizer", "Service", "Storage")
BaseTypes = Union["Environment", "Optimizer", "Service", "Storage"]
def preprocess_dynamic_configs(*, dest: dict, source: Optional[dict] = None) -> dict:
"""
Replaces all $name values in the destination config with the corresponding
value from the source config.
Parameters
----------
dest : dict
Destination config.
source : Optional[dict]
Source config.
Returns
-------
dest : dict
A reference to the destination config after the preprocessing.
"""
if source is None:
source = {}
for key, val in dest.items():
if isinstance(val, str) and val.startswith("$") and val[1:] in source:
dest[key] = source[val[1:]]
return dest
def merge_parameters(*, dest: dict, source: Optional[dict] = None,
required_keys: Optional[Iterable[str]] = None) -> dict:
"""
Merge the source config dict into the destination config.
Pick from the source configs *ONLY* the keys that are already present
in the destination config.
Parameters
----------
dest : dict
Destination config.
source : Optional[dict]
Source config.
required_keys : Optional[Iterable[str]]
An optional list of keys that must be present in the destination config.
Returns
-------
dest : dict
A reference to the destination config after the merge.
"""
if source is None:
source = {}
for key in set(dest).intersection(source):
dest[key] = source[key]
for key in required_keys or []:
if key in dest:
continue
if key in source:
dest[key] = source[key]
else:
raise ValueError("Missing required parameter: " + key)
return dest
def path_join(*args: str, abs_path: bool = False) -> str:
"""
Joins the path components and normalizes the path.
Parameters
----------
args : str
Path components.
abs_path : bool
If True, the path is converted to be absolute.
Returns
-------
str
Joined path.
"""
path = os.path.join(*args)
if abs_path:
path = os.path.abspath(path)
return os.path.normpath(path).replace("\\", "/")
def prepare_class_load(config: dict,
global_config: Optional[Dict[str, Any]] = None) -> Tuple[str, Dict[str, Any]]:
"""
Extract the class instantiation parameters from the configuration.
Parameters
----------
config : dict
Configuration of the optimizer.
global_config : dict
Global configuration parameters (optional).
Returns
-------
(class_name, class_config) : (str, dict)
Name of the class to instantiate and its configuration.
"""
class_name = config["class"]
class_config = config.setdefault("config", {})
merge_parameters(dest=class_config, source=global_config)
if _LOG.isEnabledFor(logging.DEBUG):
_LOG.debug("Instantiating: %s with config:\n%s",
class_name, json.dumps(class_config, indent=2))
return (class_name, class_config)
def get_class_from_name(class_name: str) -> type:
"""
Gets the class from the fully qualified name.
Parameters
----------
class_name : str
Fully qualified class name.
Returns
-------
type
Class object.
"""
# We need to import mlos_bench to make the factory methods work.
class_name_split = class_name.split(".")
module_name = ".".join(class_name_split[:-1])
class_id = class_name_split[-1]
module = importlib.import_module(module_name)
cls = getattr(module, class_id)
assert isinstance(cls, type)
return cls
# FIXME: Technically, this should return a type "class_name" derived from "base_class".
def instantiate_from_config(base_class: Type[BaseTypeVar], class_name: str,
*args: Any, **kwargs: Any) -> BaseTypeVar:
"""
Factory method for a new class instantiated from config.
Parameters
----------
base_class : type
Base type of the class to instantiate.
Currently it's one of {Environment, Service, Optimizer}.
class_name : str
FQN of a Python class to instantiate, e.g.,
"mlos_bench.environments.remote.VMEnv".
Must be derived from the `base_class`.
args : list
Positional arguments to pass to the constructor.
kwargs : dict
Keyword arguments to pass to the constructor.
Returns
-------
inst : Union[Environment, Service, Optimizer, Storage]
An instance of the `class_name` class.
"""
impl = get_class_from_name(class_name)
_LOG.info("Instantiating: %s :: %s", class_name, impl)
assert issubclass(impl, base_class)
ret: BaseTypeVar = impl(*args, **kwargs)
assert isinstance(ret, base_class)
return ret
def check_required_params(config: Mapping[str, Any], required_params: Iterable[str]) -> None:
"""
Check if all required parameters are present in the configuration.
Raise ValueError if any of the parameters are missing.
Parameters
----------
config : dict
Free-format dictionary with the configuration
of the service or benchmarking environment.
required_params : Iterable[str]
A collection of identifiers of the parameters that must be present
in the configuration.
"""
missing_params = set(required_params).difference(config)
if missing_params:
raise ValueError(
"The following parameters must be provided in the configuration"
+ f" or as command line arguments: {missing_params}")
def get_git_info(path: str = __file__) -> Tuple[str, str, str]:
"""
Get the git repository, commit hash, and local path of the given file.
Parameters
----------
path : str
Path to the file in git repository.
Returns
-------
(git_repo, git_commit, git_path) : Tuple[str, str, str]
Git repository URL, last commit hash, and relative file path.
"""
dirname = os.path.dirname(path)
git_repo = subprocess.check_output(
["git", "-C", dirname, "remote", "get-url", "origin"], text=True).strip()
git_commit = subprocess.check_output(
["git", "-C", dirname, "rev-parse", "HEAD"], text=True).strip()
git_root = subprocess.check_output(
["git", "-C", dirname, "rev-parse", "--show-toplevel"], text=True).strip()
_LOG.debug("Current git branch: %s %s", git_repo, git_commit)
rel_path = os.path.relpath(os.path.abspath(path), os.path.abspath(git_root))
return (git_repo, git_commit, rel_path.replace("\\", "/"))
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,861
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/services/remote/azure/azure_services_test.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Tests for mlos_bench.services.remote.azure.azure_services
"""
from unittest.mock import MagicMock, patch
import pytest
from mlos_bench.environments.status import Status
from mlos_bench.services.remote.azure.azure_services import AzureVMService
# pylint: disable=missing-function-docstring
# pylint: disable=too-many-arguments
@pytest.mark.parametrize(
("operation_name", "accepts_params"), [
("vm_start", True),
("vm_stop", True),
("vm_deprovision", True),
("vm_restart", True),
])
@pytest.mark.parametrize(
("http_status_code", "operation_status"), [
(200, Status.SUCCEEDED),
(202, Status.PENDING),
(401, Status.FAILED),
(404, Status.FAILED),
])
@patch("mlos_bench.services.remote.azure.azure_services.requests")
def test_vm_operation_status(mock_requests: MagicMock, azure_vm_service: AzureVMService, operation_name: str,
accepts_params: bool, http_status_code: int, operation_status: Status) -> None:
mock_response = MagicMock()
mock_response.status_code = http_status_code
mock_requests.post.return_value = mock_response
operation = getattr(azure_vm_service, operation_name)
if accepts_params:
status, _ = operation({"vmName": "test-vm"})
else:
status, _ = operation()
assert status == operation_status
@patch("mlos_bench.services.remote.azure.azure_services.time.sleep")
@patch("mlos_bench.services.remote.azure.azure_services.requests")
def test_wait_vm_operation_ready(mock_requests: MagicMock, mock_sleep: MagicMock, azure_vm_service: AzureVMService) -> None:
# Mock response header
async_url = "DUMMY_ASYNC_URL"
retry_after = 12345
params = {
"asyncResultsUrl": async_url,
"vmName": "test-vm",
"pollInterval": retry_after,
}
mock_status_response = MagicMock(status_code=200)
mock_status_response.json.return_value = {
"status": "Succeeded",
}
mock_requests.get.return_value = mock_status_response
status, _ = azure_vm_service.wait_vm_operation(params)
assert (async_url, ) == mock_requests.get.call_args[0]
assert (retry_after, ) == mock_sleep.call_args[0]
assert status.is_succeeded()
@patch("mlos_bench.services.remote.azure.azure_services.requests")
def test_wait_vm_operation_timeout(mock_requests: MagicMock, azure_vm_service: AzureVMService) -> None:
# Mock response header
params = {
"asyncResultsUrl": "DUMMY_ASYNC_URL",
"vmName": "test-vm",
"pollInterval": 1
}
mock_status_response = MagicMock(status_code=200)
mock_status_response.json.return_value = {
"status": "InProgress",
}
mock_requests.get.return_value = mock_status_response
(status, _) = azure_vm_service.wait_vm_operation(params)
assert status == Status.TIMED_OUT
@pytest.mark.parametrize(
("http_status_code", "operation_status"), [
(200, Status.SUCCEEDED),
(202, Status.PENDING),
(401, Status.FAILED),
(404, Status.FAILED),
])
@patch("mlos_bench.services.remote.azure.azure_services.requests")
def test_remote_exec_status(mock_requests: MagicMock, azure_vm_service: AzureVMService,
http_status_code: int, operation_status: Status) -> None:
script = ["command_1", "command_2"]
mock_response = MagicMock()
mock_response.status_code = http_status_code
mock_requests.post.return_value = mock_response
status, _ = azure_vm_service.remote_exec(script, config={}, env_params={})
assert status == operation_status
@patch("mlos_bench.services.remote.azure.azure_services.requests")
def test_remote_exec_headers_output(mock_requests: MagicMock, azure_vm_service: AzureVMService) -> None:
async_url_key = "asyncResultsUrl"
async_url_value = "DUMMY_ASYNC_URL"
script = ["command_1", "command_2"]
mock_response = MagicMock()
mock_response.status_code = 202
mock_response.headers = {
"Azure-AsyncOperation": async_url_value
}
mock_requests.post.return_value = mock_response
_, cmd_output = azure_vm_service.remote_exec(script, config={}, env_params={
"param_1": 123,
"param_2": "abc",
})
assert async_url_key in cmd_output
assert cmd_output[async_url_key] == async_url_value
assert mock_requests.post.call_args[1]["json"] == {
"commandId": "RunShellScript",
"script": script,
"parameters": [
{"name": "param_1", "value": 123},
{"name": "param_2", "value": "abc"}
]
}
@pytest.mark.parametrize(
("operation_status", "wait_output", "results_output"), [
(Status.SUCCEEDED, {
"properties": {
"output": [
{"message": "DUMMY_STDOUT_STDERR"},
]
}
}, [
{"message": "DUMMY_STDOUT_STDERR"},
]),
(Status.PENDING, {}, {}),
(Status.FAILED, {}, {}),
])
def test_get_remote_exec_results(azure_vm_service: AzureVMService, operation_status: Status,
wait_output: dict, results_output: dict) -> None:
params = {"asyncResultsUrl": "DUMMY_ASYNC_URL"}
mock_wait_vm_operation = MagicMock()
mock_wait_vm_operation.return_value = (operation_status, wait_output)
# azure_vm_service.wait_vm_operation = mock_wait_vm_operation
setattr(azure_vm_service, "wait_vm_operation", mock_wait_vm_operation)
status, cmd_output = azure_vm_service.get_remote_exec_results(params)
assert status == operation_status
assert cmd_output == results_output
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,862
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/services/local/local_exec_test.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Unit tests for the service to run the scripts locally.
"""
import sys
import pytest
import pandas
from mlos_bench.services.local.local_exec import LocalExecService, split_cmdline
from mlos_bench.services.config_persistence import ConfigPersistenceService
from mlos_bench.util import path_join
# pylint: disable=redefined-outer-name
# -- Ignore pylint complaints about pytest references to
# `local_exec_service` fixture as both a function and a parameter.
def test_split_cmdline() -> None:
"""
Test splitting a commandline into subcommands.
"""
cmdline = ". env.sh && (echo hello && echo world | tee > /tmp/test || echo foo && echo $var; true)"
assert list(split_cmdline(cmdline)) == [
['.', 'env.sh'],
['&&'],
['('],
['echo', 'hello'],
['&&'],
['echo', 'world'],
['|'],
['tee'],
['>'],
['/tmp/test'],
['||'],
['echo', 'foo'],
['&&'],
['echo', '$var'],
[';'],
['true'],
[')'],
]
@pytest.fixture
def local_exec_service() -> LocalExecService:
"""
Test fixture for LocalExecService.
"""
return LocalExecService(parent=ConfigPersistenceService())
def test_resolve_script(local_exec_service: LocalExecService) -> None:
"""
Test local script resolution logic with complex subcommand names.
"""
script = "os/linux/runtime/scripts/local/generate_kernel_config_script.py"
script_abspath = local_exec_service.config_loader_service.resolve_path(script)
orig_cmdline = f". env.sh && {script}"
expected_cmdline = f". env.sh && {script_abspath}"
subcmds_tokens = split_cmdline(orig_cmdline)
# pylint: disable=protected-access
subcmds_tokens = [local_exec_service._resolve_cmdline_script_path(subcmd_tokens) for subcmd_tokens in subcmds_tokens]
cmdline_tokens = [token for subcmd_tokens in subcmds_tokens for token in subcmd_tokens]
expanded_cmdline = " ".join(cmdline_tokens)
assert expanded_cmdline == expected_cmdline
def test_run_script(local_exec_service: LocalExecService) -> None:
"""
Run a script locally and check the results.
"""
# `echo` should work on all platforms
(return_code, stdout, stderr) = local_exec_service.local_exec(["echo hello"])
assert return_code == 0
assert stdout.strip() == "hello"
assert stderr.strip() == ""
def test_run_script_multiline(local_exec_service: LocalExecService) -> None:
"""
Run a multiline script locally and check the results.
"""
# `echo` should work on all platforms
(return_code, stdout, stderr) = local_exec_service.local_exec([
"echo hello",
"echo world"
])
assert return_code == 0
assert stdout.strip().split() == ["hello", "world"]
assert stderr.strip() == ""
def test_run_script_multiline_env(local_exec_service: LocalExecService) -> None:
"""
Run a multiline script locally and pass the environment variables to it.
"""
# `echo` should work on all platforms
(return_code, stdout, stderr) = local_exec_service.local_exec([
r"echo $var", # Unix shell
r"echo %var%" # Windows cmd
], env={"var": "VALUE", "int_var": 10})
assert return_code == 0
if sys.platform == 'win32':
assert stdout.strip().split() == ["$var", "VALUE"]
else:
assert stdout.strip().split() == ["VALUE", "%var%"]
assert stderr.strip() == ""
def test_run_script_read_csv(local_exec_service: LocalExecService) -> None:
"""
Run a script locally and read the resulting CSV file.
"""
with local_exec_service.temp_dir_context() as temp_dir:
(return_code, stdout, stderr) = local_exec_service.local_exec([
"echo 'col1,col2'> output.csv", # No space before '>' to make it work on Windows
"echo '111,222' >> output.csv",
"echo '333,444' >> output.csv",
], cwd=temp_dir)
assert return_code == 0
assert stdout.strip() == ""
assert stderr.strip() == ""
data = pandas.read_csv(path_join(temp_dir, "output.csv"))
if sys.platform == 'win32':
# Workaround for Python's subprocess module on Windows adding a
# space inbetween the col1,col2 arg and the redirect symbol which
# cmd poorly interprets as being part of the original string arg.
# Without this, we get "col2 " as the second column name.
data.rename(str.rstrip, axis='columns', inplace=True)
assert all(data.col1 == [111, 333])
assert all(data.col2 == [222, 444])
def test_run_script_write_read_txt(local_exec_service: LocalExecService) -> None:
"""
Write data a temp location and run a script that updates it there.
"""
with local_exec_service.temp_dir_context() as temp_dir:
input_file = "input.txt"
with open(path_join(temp_dir, input_file), "wt", encoding="utf-8") as fh_input:
fh_input.write("hello\n")
(return_code, stdout, stderr) = local_exec_service.local_exec([
f"echo 'world' >> {input_file}",
f"echo 'test' >> {input_file}",
], cwd=temp_dir)
assert return_code == 0
assert stdout.strip() == ""
assert stderr.strip() == ""
with open(path_join(temp_dir, input_file), "rt", encoding="utf-8") as fh_input:
assert fh_input.read().split() == ["hello", "world", "test"]
def test_run_script_fail(local_exec_service: LocalExecService) -> None:
"""
Try to run a non-existent command.
"""
(return_code, stdout, _stderr) = local_exec_service.local_exec(["foo_bar_baz hello"])
assert return_code != 0
assert stdout.strip() == ""
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,863
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/optimizers/__init__.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Interfaces and wrapper classes for optimizers to be used in Autotune.
"""
from mlos_bench.optimizers.base_optimizer import Optimizer
from mlos_bench.optimizers.mock_optimizer import MockOptimizer
from mlos_bench.optimizers.one_shot_optimizer import OneShotOptimizer
from mlos_bench.optimizers.mlos_core_optimizer import MlosCoreOptimizer
__all__ = [
'Optimizer',
'MockOptimizer',
'OneShotOptimizer',
'MlosCoreOptimizer',
]
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,864
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/config/schemas/services/test_services_schemas.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Tests for service schema validation.
"""
from os import path
from typing import Any, Dict, List
import pytest
from mlos_core.tests import get_all_concrete_subclasses
from mlos_bench.config.schemas import ConfigSchema
from mlos_bench.services.base_service import Service
from mlos_bench.services.config_persistence import ConfigPersistenceService
from mlos_bench.services.local.temp_dir_context import TempDirContextService
from mlos_bench.tests import try_resolve_class_name
from mlos_bench.tests.config.schemas import (get_schema_test_cases,
check_test_case_against_schema,
check_test_case_config_with_extra_param)
# General testing strategy:
# - hand code a set of good/bad configs (useful to test editor schema checking)
# - enumerate and try to check that we've covered all the cases
# - for each config, load and validate against expected schema
TEST_CASES = get_schema_test_cases(path.join(path.dirname(__file__), "test-cases"))
# Dynamically enumerate some of the cases we want to make sure we cover.
NON_CONFIG_SERVICE_CLASSES = {
ConfigPersistenceService, # configured thru the launcher cli args
TempDirContextService, # ABCMeta abstract class, but no good way to test that dynamically in Python.
}
expected_service_class_names = [subclass.__module__ + "." + subclass.__name__
for subclass
in get_all_concrete_subclasses(Service, pkg_name='mlos_bench')
if subclass not in NON_CONFIG_SERVICE_CLASSES]
assert expected_service_class_names
# Do the full cross product of all the test cases and all the Service types.
@pytest.mark.parametrize("test_case_subtype", sorted(TEST_CASES.by_subtype))
@pytest.mark.parametrize("service_class", expected_service_class_names)
def test_case_coverage_mlos_bench_service_type(test_case_subtype: str, service_class: str) -> None:
"""
Checks to see if there is a given type of test case for the given mlos_bench Service type.
"""
for test_case in TEST_CASES.by_subtype[test_case_subtype].values():
config_list: List[Dict[str, Any]]
if not isinstance(test_case.config, dict):
continue # type: ignore[unreachable]
if "class" not in test_case.config:
config_list = test_case.config["services"]
else:
config_list = [test_case.config]
for config in config_list:
if try_resolve_class_name(config.get("class")) == service_class:
return
raise NotImplementedError(
f"Missing test case for subtype {test_case_subtype} for service class {service_class}")
# Now we actually perform all of those validation tests.
@pytest.mark.parametrize("test_case_name", sorted(TEST_CASES.by_path))
def test_service_configs_against_schema(test_case_name: str) -> None:
"""
Checks that the service config validates against the schema.
"""
check_test_case_against_schema(TEST_CASES.by_path[test_case_name], ConfigSchema.SERVICE)
@pytest.mark.parametrize("test_case_name", sorted(TEST_CASES.by_type["good"]))
def test_service_configs_with_extra_param(test_case_name: str) -> None:
"""
Checks that the service config fails to validate if extra params are present in certain places.
"""
check_test_case_config_with_extra_param(TEST_CASES.by_type["good"][test_case_name], ConfigSchema.SERVICE)
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,865
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/environments/base_env_test.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Unit tests for base environment class functionality.
"""
import pytest
from mlos_bench.environments.base_environment import Environment
_GROUPS = {
"group": ["a", "b"],
"list": ["c", "d"],
"str": "efg",
"empty": [],
"other": ["h", "i", "j"],
}
# pylint: disable=protected-access
def test_expand_groups() -> None:
"""
Check the dollar variable expansion for tunable groups.
"""
assert Environment._expand_groups(
["begin", "$list", "$empty", "$str", "end"],
_GROUPS) == ["begin", "c", "d", "efg", "end"]
def test_expand_groups_empty_input() -> None:
"""
Make sure an empty group stays empty.
"""
assert Environment._expand_groups([], _GROUPS) == []
def test_expand_groups_empty_list() -> None:
"""
Make sure an empty group expansion works properly.
"""
assert not Environment._expand_groups(["$empty"], _GROUPS)
def test_expand_groups_unknown() -> None:
"""
Make sure we fail on unknown $GROUP names expansion.
"""
with pytest.raises(KeyError):
Environment._expand_groups(["$list", "$UNKNOWN", "$str", "end"], _GROUPS)
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,866
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/config/environments/os/linux/boot/scripts/local/create_new_grub_cfg.py
|
#!/usr/bin/env python3
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Python script to parse through JSON and create new config file.
This script will be run in the SCHEDULER.
NEW_CFG will need to be copied over to the VM (/etc/default/grub.d).
"""
import json
JSON_CONFIG_FILE = "config-boot-time.json"
NEW_CFG = "zz-mlos-boot-params.cfg"
with open(JSON_CONFIG_FILE, 'r', encoding='UTF-8') as fh_json, \
open(NEW_CFG, 'w', encoding='UTF-8') as fh_config:
for key, val in json.load(fh_json).items():
fh_config.write('GRUB_CMDLINE_LINUX_DEFAULT="$'
f'{{GRUB_CMDLINE_LINUX_DEFAULT}} {key}={val}"\n')
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,867
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/tunables/tunables_str_test.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Unit tests to make sure we always produce a string representation
of a TunableGroup in canonical form.
"""
from mlos_bench.tunables.tunable_groups import TunableGroups
def test_tunable_groups_str(tunable_groups: TunableGroups) -> None:
"""
Check that we produce the same string representation of TunableGroups,
regardless of the order in which we declare the covariant groups and
tunables within each covariant group.
"""
# Same as `tunable_groups` (defined in the `conftest.py` file), but in different order:
tunables_other = TunableGroups({
"kernel": {
"cost": 1,
"params": {
"kernel_sched_latency_ns": {
"type": "int",
"default": 2000000,
"range": [0, 1000000000]
},
"kernel_sched_migration_cost_ns": {
"type": "int",
"default": -1,
"range": [-1, 500000],
"special": [-1]
}
}
},
"boot": {
"cost": 300,
"params": {
"idle": {
"type": "categorical",
"default": "halt",
"values": ["halt", "mwait", "noidle"]
}
}
},
"provision": {
"cost": 1000,
"params": {
"vmSize": {
"type": "categorical",
"default": "Standard_B4ms",
"values": ["Standard_B2s", "Standard_B2ms", "Standard_B4ms"]
}
}
},
})
assert str(tunable_groups) == str(tunables_other)
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,868
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_core/mlos_core/spaces/adapters/llamatune.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Implementation of LlamaTune space adapter.
"""
from typing import Dict, Optional
from warnings import warn
import ConfigSpace
import numpy as np
import numpy.typing as npt
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from mlos_core.spaces.adapters.adapter import BaseSpaceAdapter
class LlamaTuneAdapter(BaseSpaceAdapter): # pylint: disable=too-many-instance-attributes
"""
Implementation of LlamaTune, a set of parameter space transformation techniques,
aimed at improving the sample-efficiency of the underlying optimizer.
"""
DEFAULT_NUM_LOW_DIMS = 16
"""Default number of dimensions in the low-dimensional search space, generated by HeSBO projection"""
DEFAULT_SPECIAL_PARAM_VALUE_BIASING_PERCENTAGE = .2
"""Default percentage of bias for each special parameter value"""
DEFAULT_MAX_UNIQUE_VALUES_PER_PARAM = 10000
"""Default number of (max) unique values of each parameter, when space discretization is used"""
def __init__(self, *,
orig_parameter_space: ConfigSpace.ConfigurationSpace,
num_low_dims: int = DEFAULT_NUM_LOW_DIMS,
special_param_values: Optional[dict] = None,
max_unique_values_per_param: Optional[int] = DEFAULT_MAX_UNIQUE_VALUES_PER_PARAM,
use_approximate_reverse_mapping: bool = False):
"""
Create a space adapter that employs LlamaTune's techniques.
Parameters
----------
orig_parameter_space : ConfigSpace.ConfigurationSpace
The original (user-provided) parameter space to optimize.
num_low_dims: int
Number of dimensions used in the low-dimensional parameter search space.
special_param_values_dict: Optional[dict]
Dictionary of special
max_unique_values_per_param: Optional[int]:
Number of unique values per parameter. Used to discretize the parameter space.
If `None` space discretization is disabled.
"""
super().__init__(orig_parameter_space=orig_parameter_space)
if num_low_dims >= len(orig_parameter_space):
raise ValueError("Number of target config space dimensions should be less than those of original config space.")
# Validate input special param values dict
special_param_values = special_param_values or {}
self._validate_special_param_values(special_param_values)
# Create low-dimensional parameter search space
self._construct_low_dim_space(num_low_dims, max_unique_values_per_param)
# Initialize config values scaler: from (-1, 1) to (0, 1) range
config_scaler = MinMaxScaler(feature_range=(0, 1))
ones_vector = np.ones(len(list(self.orig_parameter_space.values())))
config_scaler.fit([-ones_vector, ones_vector])
self._config_scaler = config_scaler
# Generate random mapping from low-dimensional space to original config space
num_orig_dims = len(list(self.orig_parameter_space.values()))
self._h_matrix = self._random_state.choice(range(num_low_dims), num_orig_dims)
self._sigma_vector = self._random_state.choice([-1, 1], num_orig_dims)
# Used to retrieve the low-dim point, given the high-dim one
self._suggested_configs: Dict[ConfigSpace.Configuration, ConfigSpace.Configuration] = {}
self._pinv_matrix: npt.NDArray
self._use_approximate_reverse_mapping = use_approximate_reverse_mapping
@property
def target_parameter_space(self) -> ConfigSpace.ConfigurationSpace:
"""Get the parameter space, which is explored by the underlying optimizer."""
return self._target_config_space
def inverse_transform(self, configurations: pd.DataFrame) -> pd.DataFrame:
target_configurations = []
for (_, config) in configurations.iterrows():
configuration = ConfigSpace.Configuration(self.orig_parameter_space, values=config.to_dict())
target_config = self._suggested_configs.get(configuration, None)
# NOTE: HeSBO is a non-linear projection method, and does not inherently support inverse projection
# To (partly) support this operation, we keep track of the suggested low-dim point(s) along with the
# respective high-dim point; this way we can retrieve the low-dim point, from its high-dim counterpart.
if target_config is None:
# Inherently it is not supported to register points, which were not suggested by the optimizer.
if configuration == self.orig_parameter_space.get_default_configuration():
# Default configuration should always be registerable.
pass
elif not self._use_approximate_reverse_mapping:
raise ValueError(f"{repr(configuration)}\n" "The above configuration was not suggested by the optimizer. "
"Approximate reverse mapping is currently disabled; thus *only* configurations suggested "
"previously by the optimizer can be registered.")
# ...yet, we try to support that by implementing an approximate reverse mapping using pseudo-inverse matrix.
if getattr(self, '_pinv_matrix', None) is None:
self._try_generate_approx_inverse_mapping()
# Perform approximate reverse mapping
# NOTE: applying special value biasing is not possible
vector = self._config_scaler.inverse_transform([configuration.get_array()])[0]
target_config_vector = self._pinv_matrix.dot(vector)
target_config = ConfigSpace.Configuration(self.target_parameter_space, vector=target_config_vector)
target_configurations.append(target_config)
return pd.DataFrame(target_configurations, columns=list(self.target_parameter_space.keys()))
def transform(self, configuration: pd.DataFrame) -> pd.DataFrame:
if len(configuration) != 1:
raise ValueError("Configuration dataframe must contain exactly 1 row. "
f"Found {len(configuration)} rows.")
target_values_dict = configuration.iloc[0].to_dict()
target_configuration = ConfigSpace.Configuration(self.target_parameter_space, values=target_values_dict)
orig_values_dict = self._transform(target_values_dict)
orig_configuration = ConfigSpace.Configuration(self.orig_parameter_space, values=orig_values_dict)
# Add to inverse dictionary -- needed for registering the performance later
self._suggested_configs[orig_configuration] = target_configuration
return pd.DataFrame([orig_values_dict.values()], columns=list(self.orig_parameter_space.keys()))
def _construct_low_dim_space(self, num_low_dims: int, max_unique_values_per_param: Optional[int]) -> None:
"""Constructs the low-dimensional parameter (potentially discretized) search space.
Parameters
----------
num_low_dims : int
Number of dimensions used in the low-dimensional parameter search space.
max_unique_values_per_param: Optional[int]:
Number of unique values per parameter. Used to discretize the parameter space.
If `None` space discretization is disabled.
"""
# Define target space parameters
q_scaler = None
if max_unique_values_per_param is None:
hyperparameters = [
ConfigSpace.UniformFloatHyperparameter(name=f'dim_{idx}', lower=-1, upper=1)
for idx in range(num_low_dims)
]
else:
# Currently supported optimizers do not support defining a discretized space (like ConfigSpace does using `q` kwarg).
# Thus, to support space discretization, we define the low-dimensional space using integer hyperparameters.
# We also employ a scaler, which scales suggested values to [-1, 1] range, used by HeSBO projection.
hyperparameters = [
ConfigSpace.UniformIntegerHyperparameter(name=f'dim_{idx}', lower=1, upper=max_unique_values_per_param)
for idx in range(num_low_dims)
]
# Initialize quantized values scaler: from [0, max_unique_values_per_param] to (-1, 1) range
q_scaler = MinMaxScaler(feature_range=(-1, 1))
ones_vector = np.ones(num_low_dims)
max_value_vector = ones_vector * max_unique_values_per_param
q_scaler.fit([ones_vector, max_value_vector])
self._q_scaler = q_scaler
# Construct low-dimensional parameter search space
config_space = ConfigSpace.ConfigurationSpace(name=self.orig_parameter_space.name)
config_space.random = self._random_state # use same random state as in original parameter space
config_space.add_hyperparameters(hyperparameters)
self._target_config_space = config_space
def _transform(self, configuration: dict) -> dict:
"""Projects a low-dimensional point (configuration) to the high-dimensional original parameter space,
and then biases the resulting parameter values towards their special value(s) (if any).
Parameters
----------
configuration : dict
Configuration in the low-dimensional space.
Returns
-------
configuration : dict
Projected configuration in the high-dimensional original search space.
"""
original_parameters = list(self.orig_parameter_space.values())
low_dim_config_values = list(configuration.values())
if self._q_scaler is not None:
# Scale parameter values from [1, max_value] to [-1, 1]
low_dim_config_values = self._q_scaler.transform([low_dim_config_values])[0]
# Project low-dim point to original parameter space
original_config_values = [
self._sigma_vector[idx] * low_dim_config_values[self._h_matrix[idx]]
for idx in range(len(original_parameters))
]
# Scale parameter values to [0, 1]
original_config_values = self._config_scaler.transform([original_config_values])[0]
original_config = {}
for param, norm_value in zip(original_parameters, original_config_values):
# Clip value to force it to fall in [0, 1]
# NOTE: HeSBO projection ensures that theoretically but due to
# floating point ops nuances this is not always guaranteed
value = max(0., min(1., norm_value)) # pylint: disable=redefined-loop-name
if isinstance(param, ConfigSpace.CategoricalHyperparameter):
index = int(value * len(param.choices)) # truncate integer part
index = max(0, min(len(param.choices) - 1, index))
# NOTE: potential rounding here would be unfair to first & last values
orig_value = param.choices[index]
elif isinstance(param, ConfigSpace.hyperparameters.NumericalHyperparameter):
if param.name in self._special_param_values_dict:
value = self._special_param_value_scaler(param, value)
orig_value = param._transform(value) # pylint: disable=protected-access
orig_value = max(param.lower, min(param.upper, orig_value))
else:
raise NotImplementedError("Only Categorical, Integer, and Float hyperparameters are currently supported.")
original_config[param.name] = orig_value
return original_config
def _special_param_value_scaler(self, param: ConfigSpace.UniformIntegerHyperparameter, input_value: float) -> float:
"""Biases the special value(s) of this parameter, by shifting the normalized `input_value` towards those.
Parameters
----------
param: ConfigSpace.UniformIntegerHyperparameter
Parameter of the original parameter space.
input_value: float
Normalized value for this parameter, as suggested by the underlying optimizer.
Returns
-------
biased_value: float
Normalized value after special value(s) biasing is applied.
"""
special_values_list = self._special_param_values_dict[param.name]
# Check if input value corresponds to some special value
perc_sum = 0.
ret: float
for special_value, biasing_perc in special_values_list:
perc_sum += biasing_perc
if input_value < perc_sum:
ret = param._inverse_transform(special_value) # pylint: disable=protected-access
return ret
# Scale input value uniformly to non-special values
ret = param._inverse_transform( # pylint: disable=protected-access
param._transform_scalar((input_value - perc_sum) / (1 - perc_sum))) # pylint: disable=protected-access
return ret
# pylint: disable=too-complex,too-many-branches
def _validate_special_param_values(self, special_param_values_dict: dict) -> None:
"""Checks that the user-provided dict of special parameter values is valid.
And assigns it to the corresponding attribute.
Parameters
----------
special_param_values_dict: dict
User-provided dict of special parameter values.
Raises
------
ValueError: if dictionary key, valid, or structure is invalid.
NotImplementedError: if special value is defined for a non-integer parameter
"""
error_prefix = "Validation of special parameter values dict failed."
all_parameters = list(self.orig_parameter_space.keys())
sanitized_dict = {}
for param, value in special_param_values_dict.items():
if param not in all_parameters:
raise ValueError(error_prefix + f"Parameter '{param}' does not exist.")
hyperparameter = self.orig_parameter_space[param]
if not isinstance(hyperparameter, ConfigSpace.UniformIntegerHyperparameter):
raise NotImplementedError(error_prefix + f"Parameter '{param}' is not supported. "
"Only Integer Hyperparameters are currently supported.")
if isinstance(value, int):
# User specifies a single special value -- default biasing percentage is used
tuple_list = [(value, self.DEFAULT_SPECIAL_PARAM_VALUE_BIASING_PERCENTAGE)]
elif isinstance(value, tuple) and [type(v) for v in value] == [int, float]:
# User specifies both special value and biasing percentage
tuple_list = [value] # type: ignore[list-item]
elif isinstance(value, list) and value:
if all(isinstance(t, int) for t in value):
# User specifies list of special values
tuple_list = [(v, self.DEFAULT_SPECIAL_PARAM_VALUE_BIASING_PERCENTAGE) for v in value]
elif all(isinstance(t, tuple) and [type(v) for v in t] == [int, float] for t in value):
# User specifies list of tuples; each tuple defines the special value and the biasing percentage
tuple_list = value
else:
raise ValueError(error_prefix + f"Invalid format in value list for parameter '{param}'. "
f"Special value list should contain either integers, or (special value, biasing %) tuples.")
else:
raise ValueError(error_prefix + f"Invalid format for parameter '{param}'. Dict value should be "
"an int, a (int, float) tuple, a list of integers, or a list of (int, float) tuples.")
# Are user-specified special values valid?
if not all(hyperparameter.lower <= v <= hyperparameter.upper for v, _ in tuple_list):
raise ValueError(error_prefix + f"One (or more) special values are outside of parameter '{param}' value domain.")
# Are user-provided special values unique?
if len(set(v for v, _ in tuple_list)) != len(tuple_list):
raise ValueError(error_prefix + f"One (or more) special values are defined more than once for parameter '{param}'.")
# Are biasing percentages valid?
if not all(0 < perc < 1 for _, perc in tuple_list):
raise ValueError(error_prefix + f"One (or more) biasing percentages for parameter '{param}' are invalid: "
"i.e., fall outside (0, 1) range.")
total_percentage = sum(perc for _, perc in tuple_list)
if total_percentage >= 1.:
raise ValueError(error_prefix + f"Total special values percentage for parameter '{param}' surpass 100%.")
# ... and reasonable?
if total_percentage >= 0.5:
warn(f"Total special values percentage for parameter '{param}' exceeds 50%.", UserWarning)
sanitized_dict[param] = tuple_list
self._special_param_values_dict = sanitized_dict
def _try_generate_approx_inverse_mapping(self) -> None:
"""Tries to generate an approximate reverse mapping: i.e., from high-dimensional space to the low-dimensional one.
Reverse mapping is generated using the pseudo-inverse matrix, of original HeSBO projection matrix.
This mapping can be potentially used to register configurations that were *not* previously suggested by the optimizer.
NOTE: This method is experimental, and there is currently no guarantee that it works as expected.
Raises
------
RuntimeError: if reverse mapping computation fails.
"""
from scipy.linalg import pinv, LinAlgError # pylint: disable=import-outside-toplevel
warn("Trying to register a configuration that was not previously suggested by the optimizer. " +
"This inverse configuration transformation is typically not supported. " +
"However, we will try to register this configuration using an *experimental* method.", UserWarning)
orig_space_num_dims = len(list(self.orig_parameter_space.values()))
target_space_num_dims = len(list(self.target_parameter_space.values()))
# Construct dense projection matrix from sparse repr
proj_matrix = np.zeros(shape=(orig_space_num_dims, target_space_num_dims))
for row, col in enumerate(self._h_matrix):
proj_matrix[row][col] = self._sigma_vector[row]
# Compute pseudo-inverse matrix
try:
self._pinv_matrix = pinv(proj_matrix)
except LinAlgError as err:
raise RuntimeError(f"Unable to generate reverse mapping using pseudo-inverse matrix: {repr(err)}") from err
assert self._pinv_matrix.shape == (target_space_num_dims, orig_space_num_dims)
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,869
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/tunables/tunable_definition_test.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Unit tests for checking tunable definition rules.
"""
import json5 as json
import pytest
from mlos_bench.tunables.tunable import Tunable
def test_categorical_required_params() -> None:
"""
Check that required parameters are present for categorical tunables.
"""
json_config = """
{
"type": "categorical",
"values_missing": ["foo", "bar", "foo"],
"default": "foo"
}
"""
config = json.loads(json_config)
with pytest.raises(ValueError):
Tunable(name='test', config=config)
def test_categorical_wrong_params() -> None:
"""
Disallow range param for categorical tunables.
"""
json_config = """
{
"type": "categorical",
"values": ["foo", "bar", "foo"],
"range": [0, 1],
"default": "foo"
}
"""
config = json.loads(json_config)
with pytest.raises(ValueError):
Tunable(name='test', config=config)
def test_categorical_disallow_special_values() -> None:
"""
Disallow special values for categorical values.
"""
json_config = """
{
"type": "categorical",
"values": ["foo", "bar", "foo"],
"special": ["baz"],
"default": "foo"
}
"""
config = json.loads(json_config)
with pytest.raises(ValueError):
Tunable(name='test', config=config)
def test_categorical_tunable_disallow_repeats() -> None:
"""
Disallow duplicate values in categorical tunables.
"""
with pytest.raises(ValueError):
Tunable(name='test', config={
"type": "categorical",
"values": ["foo", "bar", "foo"],
"default": "foo",
})
@pytest.mark.parametrize("tunable_type", ["int", "float"])
def test_numerical_tunable_disallow_null_default(tunable_type: str) -> None:
"""
Disallow null values as default for numerical tunables.
"""
with pytest.raises(ValueError):
Tunable(name=f'test_{tunable_type}', config={
"type": tunable_type,
"range": [0, 10],
"default": None,
})
@pytest.mark.parametrize("tunable_type", ["int", "float"])
def test_numerical_tunable_disallow_out_of_range(tunable_type: str) -> None:
"""
Disallow out of range values as default for numerical tunables.
"""
with pytest.raises(ValueError):
Tunable(name=f'test_{tunable_type}', config={
"type": tunable_type,
"range": [0, 10],
"default": 11,
})
@pytest.mark.parametrize("tunable_type", ["int", "float"])
def test_numerical_tunable_wrong_params(tunable_type: str) -> None:
"""
Disallow values param for numerical tunables.
"""
with pytest.raises(ValueError):
Tunable(name=f'test_{tunable_type}', config={
"type": tunable_type,
"range": [0, 10],
"values": ["foo", "bar"],
"default": 0,
})
@pytest.mark.parametrize("tunable_type", ["int", "float"])
def test_numerical_tunable_required_params(tunable_type: str) -> None:
"""
Disallow null values param for numerical tunables.
"""
json_config = f"""
{{
"type": "{tunable_type}",
"range_missing": [0, 10],
"default": 0
}}
"""
config = json.loads(json_config)
with pytest.raises(ValueError):
Tunable(name=f'test_{tunable_type}', config=config)
@pytest.mark.parametrize("tunable_type", ["int", "float"])
def test_numerical_tunable_invalid_range(tunable_type: str) -> None:
"""
Disallow invalid range param for numerical tunables.
"""
json_config = f"""
{{
"type": "{tunable_type}",
"range": [0, 10, 7],
"default": 0
}}
"""
config = json.loads(json_config)
with pytest.raises(AssertionError):
Tunable(name=f'test_{tunable_type}', config=config)
@pytest.mark.parametrize("tunable_type", ["int", "float"])
def test_numerical_tunable_reversed_range(tunable_type: str) -> None:
"""
Disallow reverse range param for numerical tunables.
"""
json_config = f"""
{{
"type": "{tunable_type}",
"range": [10, 0],
"default": 0
}}
"""
config = json.loads(json_config)
with pytest.raises(ValueError):
Tunable(name=f'test_{tunable_type}', config=config)
def test_bad_type() -> None:
"""
Disallow bad types.
"""
json_config = """
{
"type": "foo",
"range": [0, 10],
"default": 0
}
"""
config = json.loads(json_config)
with pytest.raises(ValueError):
Tunable(name='test_bad_type', config=config)
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,870
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/config/services/test_load_service_config_examples.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Tests for loading service config examples.
"""
import logging
from typing import List
import pytest
from mlos_bench.tests.config import locate_config_examples
from mlos_bench.config.schemas.config_schemas import ConfigSchema
from mlos_bench.services.base_service import Service
from mlos_bench.services.config_persistence import ConfigPersistenceService
from mlos_bench.util import path_join
_LOG = logging.getLogger(__name__)
_LOG.setLevel(logging.DEBUG)
# Get the set of configs to test.
CONFIG_TYPE = "services"
def filter_configs(configs_to_filter: List[str]) -> List[str]:
"""If necessary, filter out json files that aren't for the module we're testing."""
for config_path in configs_to_filter:
if config_path.endswith("arm-templates/azuredeploy-ubuntu-vm.jsonc"):
configs_to_filter.remove(config_path)
return configs_to_filter
configs = filter_configs(locate_config_examples(path_join(ConfigPersistenceService.BUILTIN_CONFIG_PATH, CONFIG_TYPE)))
assert configs
@pytest.mark.parametrize("config_path", configs)
def test_load_service_config_examples(config_loader_service: ConfigPersistenceService, config_path: str) -> None:
"""Tests loading a config example."""
config = config_loader_service.load_config(config_path, ConfigSchema.SERVICE)
# Make an instance of the class based on the config.
service_inst = config_loader_service.build_service(
config=config,
parent=config_loader_service,
)
assert service_inst is not None
assert isinstance(service_inst, Service)
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,871
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/config/conftest.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Test fixtures for mlos_bench config loader tests.
"""
import sys
import pytest
from mlos_bench.services.config_persistence import ConfigPersistenceService
if sys.version_info < (3, 10):
from importlib_resources import files
else:
from importlib.resources import files
@pytest.fixture
def config_loader_service() -> ConfigPersistenceService:
"""Config loader service fixture."""
return ConfigPersistenceService(config={
"config_path": [
files("mlos_bench.tests.config"),
]
})
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,872
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/services/base_fileshare.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Base class for remote file shares.
"""
import logging
from abc import ABCMeta, abstractmethod
from typing import Any, Dict, Optional
from mlos_bench.services.base_service import Service
from mlos_bench.services.types.fileshare_type import SupportsFileShareOps
_LOG = logging.getLogger(__name__)
class FileShareService(Service, SupportsFileShareOps, metaclass=ABCMeta):
"""
An abstract base of all file shares.
"""
def __init__(self, config: Optional[Dict[str, Any]] = None,
global_config: Optional[Dict[str, Any]] = None,
parent: Optional[Service] = None):
"""
Create a new file share with a given config.
Parameters
----------
config : dict
Free-format dictionary that contains the file share configuration.
It will be passed as a constructor parameter of the class
specified by `class_name`.
global_config : dict
Free-format dictionary of global parameters.
parent : Service
Parent service that can provide mixin functions.
"""
super().__init__(config, global_config, parent)
self.register([
self.download,
self.upload,
])
@abstractmethod
def download(self, remote_path: str, local_path: str, recursive: bool = True) -> None:
"""
Downloads contents from a remote share path to a local path.
Parameters
----------
remote_path : str
Path to download from the remote file share, a file if recursive=False
or a directory if recursive=True.
local_path : str
Path to store the downloaded content to.
recursive : bool
If False, ignore the subdirectories;
if True (the default), download the entire directory tree.
"""
_LOG.info("Download from File Share %s recursively: %s -> %s",
"" if recursive else "non", remote_path, local_path)
@abstractmethod
def upload(self, local_path: str, remote_path: str, recursive: bool = True) -> None:
"""
Uploads contents from a local path to remote share path.
Parameters
----------
local_path : str
Path to the local directory to upload contents from.
remote_path : str
Path in the remote file share to store the uploaded content to.
recursive : bool
If False, ignore the subdirectories;
if True (the default), upload the entire directory tree.
"""
_LOG.info("Upload to File Share %s recursively: %s -> %s",
"" if recursive else "non", local_path, remote_path)
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,873
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_core/mlos_core/optimizers/random_optimizer.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Contains the RandomOptimizer class.
"""
from typing import Optional
import pandas as pd
from mlos_core.optimizers.optimizer import BaseOptimizer
class RandomOptimizer(BaseOptimizer):
"""Optimizer class that produces random suggestions.
Useful for baseline comparison against Bayesian optimizers.
Parameters
----------
parameter_space : ConfigSpace.ConfigurationSpace
The parameter space to optimize.
"""
def _register(self, configurations: pd.DataFrame, scores: pd.Series,
context: Optional[pd.DataFrame] = None) -> None:
"""Registers the given configurations and scores.
Doesn't do anything on the RandomOptimizer except storing configurations for logging.
Parameters
----------
configurations : pd.DataFrame
Dataframe of configurations / parameters. The columns are parameter names and the rows are the configurations.
scores : pd.Series
Scores from running the configurations. The index is the same as the index of the configurations.
context : None
Not Yet Implemented.
"""
if context is not None:
raise NotImplementedError()
# should we pop them from self.pending_observations?
def _suggest(self, context: Optional[pd.DataFrame] = None) -> pd.DataFrame:
"""Suggests a new configuration.
Sampled at random using ConfigSpace.
Parameters
----------
context : None
Not Yet Implemented.
Returns
-------
configuration : pd.DataFrame
Pandas dataframe with a single row. Column names are the parameter names.
"""
if context is not None:
# not sure how that works here?
raise NotImplementedError()
return pd.DataFrame(dict(self.optimizer_parameter_space.sample_configuration()), index=[0])
def register_pending(self, configurations: pd.DataFrame,
context: Optional[pd.DataFrame] = None) -> None:
raise NotImplementedError()
# self._pending_observations.append((configurations, context))
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,874
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Mock local services for testing purposes.
"""
from .mock_local_exec_service import MockLocalExecService
__all__ = [
'MockLocalExecService',
]
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,875
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_core/mlos_core/optimizers/bayesian_optimizers/smac_optimizer.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Contains the wrapper class for SMAC Bayesian optimizers.
See Also: <https://automl.github.io/SMAC3/main/index.html>
"""
from logging import warning
from pathlib import Path
from typing import Dict, List, Optional, Union, TYPE_CHECKING
from tempfile import TemporaryDirectory
import ConfigSpace
import numpy.typing as npt
import pandas as pd
from mlos_core.optimizers.bayesian_optimizers.bayesian_optimizer import BaseBayesianOptimizer
from mlos_core.spaces.adapters.adapter import BaseSpaceAdapter
from mlos_core.spaces.adapters.identity_adapter import IdentityAdapter
class SmacOptimizer(BaseBayesianOptimizer):
"""Wrapper class for SMAC based Bayesian optimization.
Parameters
----------
parameter_space : ConfigSpace.ConfigurationSpace
The parameter space to optimize.
space_adapter : BaseSpaceAdapter
The space adapter class to employ for parameter space transformations.
seed : Optional[int]
By default SMAC uses a known seed (0) to keep results reproducible.
However, if a `None` seed is explicitly provided, we let a random seed be produced by SMAC.
run_name : Optional[str]
Name of this run. This is used to easily distinguish across different runs.
If set to `None` (default), SMAC will generate a hash from metadata.
output_directory : Optional[str]
The directory where SMAC output will saved. If set to `None` (default), a temporary dir will be used.
max_trials : int
Maximum number of trials (i.e., function evaluations) to be run. Defaults to 100.
Note that modifying this value directly affects the value of `n_random_init`, if latter is set to `None`.
n_random_init : Optional[int]
Number of points evaluated at start to bootstrap the optimizer.
Default depends on max_trials and number of parameters and max_ratio.
Note: it can sometimes be useful to set this to 1 when pre-warming the
optimizer from historical data.
See Also: mlos_bench.optimizer.bulk_register
max_ratio : Optional[int]
Maximum ratio of max_trials to be random configurations to be evaluated
at start to bootstrap the optimizer.
Useful if you want to explicitly control the number of random
configurations evaluated at start.
use_default_config: bool
Whether to use the default config for the first trial after random initialization.
n_random_probability: float
Probability of choosing to evaluate a random configuration during optimization.
Defaults to `0.1`. Setting this to a higher value favors exploration over exploitation.
"""
def __init__(self, *, # pylint: disable=too-many-locals
parameter_space: ConfigSpace.ConfigurationSpace,
space_adapter: Optional[BaseSpaceAdapter] = None,
seed: Optional[int] = 0,
run_name: Optional[str] = None,
output_directory: Optional[str] = None,
max_trials: int = 100,
n_random_init: Optional[int] = None,
max_ratio: Optional[float] = None,
use_default_config: bool = False,
n_random_probability: float = 0.1):
super().__init__(
parameter_space=parameter_space,
space_adapter=space_adapter,
)
# Declare at the top because we need it in __del__/cleanup()
self._temp_output_directory: Optional[TemporaryDirectory] = None
# pylint: disable=import-outside-toplevel
from smac import HyperparameterOptimizationFacade as Optimizer_Smac
from smac import Scenario
from smac.intensifier.abstract_intensifier import AbstractIntensifier
from smac.main.config_selector import ConfigSelector
from smac.random_design.probability_design import ProbabilityRandomDesign
from smac.runhistory import TrialInfo
# Store for TrialInfo instances returned by .ask()
self.trial_info_map: Dict[ConfigSpace.Configuration, TrialInfo] = {}
# The default when not specified is to use a known seed (0) to keep results reproducible.
# However, if a `None` seed is explicitly provided, we let a random seed be produced by SMAC.
# https://automl.github.io/SMAC3/main/api/smac.scenario.html#smac.scenario.Scenario
seed = -1 if seed is None else seed
# Create temporary directory for SMAC output (if none provided)
if output_directory is None:
# pylint: disable=consider-using-with
try:
self._temp_output_directory = TemporaryDirectory(ignore_cleanup_errors=True) # Argument added in Python 3.10
except TypeError:
self._temp_output_directory = TemporaryDirectory()
output_directory = self._temp_output_directory.name
if n_random_init is not None:
assert isinstance(n_random_init, int) and n_random_init >= 0
if n_random_init == max_trials and use_default_config:
# Increase max budgeted trials to account for use_default_config.
max_trials += 1
scenario: Scenario = Scenario(
self.optimizer_parameter_space,
name=run_name,
output_directory=Path(output_directory),
deterministic=True,
use_default_config=use_default_config,
n_trials=max_trials,
seed=seed or -1, # if -1, SMAC will generate a random seed internally
n_workers=1, # Use a single thread for evaluating trials
)
intensifier: AbstractIntensifier = Optimizer_Smac.get_intensifier(scenario, max_config_calls=1)
config_selector: ConfigSelector = Optimizer_Smac.get_config_selector(scenario, retrain_after=1)
# TODO: When bulk registering prior configs to rewarm the optimizer,
# there is a way to inform SMAC's initial design that we have
# additional_configs and can set n_configs == 0.
# Additionally, we may want to consider encoding those values into the
# runhistory when prewarming the optimizer so that the initial design
# doesn't reperform random init.
# See Also: #488
initial_design_args: Dict[str, Union[list, int, float, Scenario]] = {
'scenario': scenario,
# Workaround a bug in SMAC that sets a default arg to a mutable
# value that can cause issues when multiple optimizers are
# instantiated with the use_default_config option within the same
# process that use different ConfigSpaces so that the second
# receives the default config from both as an additional config.
'additional_configs': []
}
if n_random_init is not None:
initial_design_args['n_configs'] = n_random_init
if n_random_init > 0.25 * max_trials and max_ratio is None:
warning(
'Number of random initial configurations (%d) is ' +
'greater than 25%% of max_trials (%d). ' +
'Consider setting max_ratio to avoid SMAC overriding n_random_init.',
n_random_init,
max_trials,
)
if max_ratio is not None:
assert isinstance(max_ratio, float) and 0.0 <= max_ratio <= 1.0
initial_design_args['max_ratio'] = max_ratio
# Use the default InitialDesign from SMAC.
# (currently SBOL instead of LatinHypercube due to better uniformity
# for initial sampling which results in lower overall samples required)
initial_design = Optimizer_Smac.get_initial_design(**initial_design_args) # type: ignore[arg-type]
# initial_design = LatinHypercubeInitialDesign(**initial_design_args) # type: ignore[arg-type]
# Workaround a bug in SMAC that doesn't pass the seed to the random
# design when generated a random_design for itself via the
# get_random_design static method when random_design is None.
assert isinstance(n_random_probability, float) and n_random_probability >= 0
random_design = ProbabilityRandomDesign(probability=n_random_probability, seed=scenario.seed)
self.base_optimizer = Optimizer_Smac(
scenario,
SmacOptimizer._dummy_target_func,
initial_design=initial_design,
intensifier=intensifier,
random_design=random_design,
config_selector=config_selector,
overwrite=True,
logging_level=False, # Use the existing logger
)
def __del__(self) -> None:
# Best-effort attempt to clean up, in case the user forgets to call .cleanup()
self.cleanup()
@property
def n_random_init(self) -> int:
"""
Gets the number of random samples to use to initialize the optimizer's search space sampling.
Note: This may not be equal to the value passed to the initializer, due to logic present in the SMAC.
See Also: max_ratio
Returns
-------
int
The number of random samples used to initialize the optimizer's search space sampling.
"""
# pylint: disable=protected-access
return self.base_optimizer._initial_design._n_configs
@staticmethod
def _dummy_target_func(config: ConfigSpace.Configuration, seed: int = 0) -> None:
"""Dummy target function for SMAC optimizer.
Since we only use the ask-and-tell interface, this is never called.
Parameters
----------
config : ConfigSpace.Configuration
Configuration to evaluate.
seed : int
Random seed to use for the target function. Not actually used.
"""
# NOTE: Providing a target function when using the ask-and-tell interface is an imperfection of the API
# -- this planned to be fixed in some future release: https://github.com/automl/SMAC3/issues/946
raise RuntimeError('This function should never be called.')
def _register(self, configurations: pd.DataFrame, scores: pd.Series, context: Optional[pd.DataFrame] = None) -> None:
"""Registers the given configurations and scores.
Parameters
----------
configurations : pd.DataFrame
Dataframe of configurations / parameters. The columns are parameter names and the rows are the configurations.
scores : pd.Series
Scores from running the configurations. The index is the same as the index of the configurations.
context : pd.DataFrame
Not Yet Implemented.
"""
from smac.runhistory import StatusType, TrialInfo, TrialValue # pylint: disable=import-outside-toplevel
if context is not None:
raise NotImplementedError()
# Register each trial (one-by-one)
for config, score in zip(self._to_configspace_configs(configurations), scores.tolist()):
# Retrieve previously generated TrialInfo (returned by .ask()) or create new TrialInfo instance
info: TrialInfo = self.trial_info_map.get(config, TrialInfo(config=config, seed=self.base_optimizer.scenario.seed))
value: TrialValue = TrialValue(cost=score, time=0.0, status=StatusType.SUCCESS)
self.base_optimizer.tell(info, value, save=False)
# Save optimizer once we register all configs
self.base_optimizer.optimizer.save()
def _suggest(self, context: Optional[pd.DataFrame] = None) -> pd.DataFrame:
"""Suggests a new configuration.
Parameters
----------
context : pd.DataFrame
Not Yet Implemented.
Returns
-------
configuration : pd.DataFrame
Pandas dataframe with a single row. Column names are the parameter names.
"""
if TYPE_CHECKING:
from smac.runhistory import TrialInfo # pylint: disable=import-outside-toplevel
if context is not None:
raise NotImplementedError()
trial: TrialInfo = self.base_optimizer.ask()
trial.config.is_valid_configuration()
self.optimizer_parameter_space.check_configuration(trial.config)
assert trial.config.config_space == self.optimizer_parameter_space
self.trial_info_map[trial.config] = trial
config_df = pd.DataFrame([trial.config], columns=list(self.optimizer_parameter_space.keys()))
return config_df
def register_pending(self, configurations: pd.DataFrame, context: Optional[pd.DataFrame] = None) -> None:
raise NotImplementedError()
def surrogate_predict(self, configurations: pd.DataFrame, context: Optional[pd.DataFrame] = None) -> npt.NDArray:
from smac.utils.configspace import convert_configurations_to_array # pylint: disable=import-outside-toplevel
if context is not None:
raise NotImplementedError()
if self._space_adapter and not isinstance(self._space_adapter, IdentityAdapter):
raise NotImplementedError()
# pylint: disable=protected-access
if len(self._observations) <= self.base_optimizer._initial_design._n_configs:
raise RuntimeError(
'Surrogate model can make predictions *only* after all initial points have been evaluated ' +
f'{len(self._observations)} <= {self.base_optimizer._initial_design._n_configs}')
if self.base_optimizer._config_selector._model is None:
raise RuntimeError('Surrogate model is not yet trained')
configs: npt.NDArray = convert_configurations_to_array(self._to_configspace_configs(configurations))
mean_predictions, _ = self.base_optimizer._config_selector._model.predict(configs)
return mean_predictions.reshape(-1,)
def acquisition_function(self, configurations: pd.DataFrame, context: Optional[pd.DataFrame] = None) -> npt.NDArray:
if context is not None:
raise NotImplementedError()
if self._space_adapter:
raise NotImplementedError()
# pylint: disable=protected-access
if self.base_optimizer._config_selector._acquisition_function is None:
raise RuntimeError('Acquisition function is not yet initialized')
configs: list = self._to_configspace_configs(configurations)
return self.base_optimizer._config_selector._acquisition_function(configs).reshape(-1,)
def cleanup(self) -> None:
if self._temp_output_directory is not None:
self._temp_output_directory.cleanup()
self._temp_output_directory = None
def _to_configspace_configs(self, configurations: pd.DataFrame) -> List[ConfigSpace.Configuration]:
"""Convert a dataframe of configurations to a list of ConfigSpace configurations.
Parameters
----------
configurations : pd.DataFrame
Dataframe of configurations / parameters. The columns are parameter names and the rows are the configurations.
Returns
-------
configurations : list
List of ConfigSpace configurations.
"""
return [
ConfigSpace.Configuration(self.optimizer_parameter_space, values=config.to_dict())
for (_, config) in configurations.iterrows()
]
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,876
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/storage/trial_config_test.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Unit tests for saving and retrieving additional parameters of pending trials.
"""
from mlos_bench.storage.base_storage import Storage
from mlos_bench.tunables.tunable_groups import TunableGroups
def test_exp_trial_pending(exp_storage_memory_sql: Storage.Experiment,
tunable_groups: TunableGroups) -> None:
"""
Schedule a trial and check that it is pending and has the right configuration.
"""
config = {"location": "westus2", "num_repeats": 100}
trial = exp_storage_memory_sql.new_trial(tunable_groups, config)
(pending,) = list(exp_storage_memory_sql.pending_trials())
assert pending.trial_id == trial.trial_id
assert pending.tunables == tunable_groups
assert pending.config() == {
"location": "westus2",
"num_repeats": "100",
"experiment_id": "Test-001",
"trial_id": 1,
}
def test_exp_trial_configs(exp_storage_memory_sql: Storage.Experiment,
tunable_groups: TunableGroups) -> None:
"""
Start multiple trials with two different configs and check that
we store only two config objects in the DB.
"""
config1 = tunable_groups.copy().assign({'idle': 'mwait'})
trials1 = [
exp_storage_memory_sql.new_trial(config1),
exp_storage_memory_sql.new_trial(config1),
exp_storage_memory_sql.new_trial(config1.copy()), # Same values, different instance
]
assert trials1[0].config_id == trials1[1].config_id
assert trials1[0].config_id == trials1[2].config_id
config2 = tunable_groups.copy().assign({'idle': 'halt'})
trials2 = [
exp_storage_memory_sql.new_trial(config2),
exp_storage_memory_sql.new_trial(config2),
exp_storage_memory_sql.new_trial(config2.copy()), # Same values, different instance
]
assert trials2[0].config_id == trials2[1].config_id
assert trials2[0].config_id == trials2[2].config_id
assert trials1[0].config_id != trials2[0].config_id
pending_ids = [
pending.config_id for pending in exp_storage_memory_sql.pending_trials()
]
assert len(pending_ids) == 6
assert len(set(pending_ids)) == 2
assert set(pending_ids) == {trials1[0].config_id, trials2[0].config_id}
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,877
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/environments/script_env.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Base scriptable benchmark environment.
"""
import abc
import re
from typing import Dict, Iterable, Optional
from mlos_bench.environments.base_environment import Environment
from mlos_bench.services.base_service import Service
from mlos_bench.tunables.tunable_groups import TunableGroups
class ScriptEnv(Environment, metaclass=abc.ABCMeta):
"""
Base Environment that runs scripts for setup/run/teardown.
"""
_RE_INVALID = re.compile(r"[^a-zA-Z0-9_]")
def __init__(self,
*,
name: str,
config: dict,
global_config: Optional[dict] = None,
tunables: Optional[TunableGroups] = None,
service: Optional[Service] = None):
"""
Create a new environment for script execution.
Parameters
----------
name: str
Human-readable name of the environment.
config : dict
Free-format dictionary that contains the benchmark environment
configuration. Each config must have at least the `tunable_params`
and the `const_args` sections. It must also have at least one of
the following parameters: {`setup`, `run`, `teardown`}.
Additional parameters:
* `shell_env_params` - an array of parameters to pass to the script
as shell environment variables, and
* `shell_env_params_rename` - a dictionary of {to: from} mappings
of the script parameters. If not specified, replace all
non-alphanumeric characters with underscores.
If neither `shell_env_params` nor `shell_env_params_rename` are specified,
pass *all* parameters to the script.
global_config : dict
Free-format dictionary of global parameters (e.g., security credentials)
to be mixed in into the "const_args" section of the local config.
tunables : TunableGroups
A collection of tunable parameters for *all* environments.
service: Service
An optional service object (e.g., providing methods to
deploy or reboot a VM, etc.).
"""
super().__init__(name=name, config=config, global_config=global_config,
tunables=tunables, service=service)
self._script_setup = self.config.get("setup")
self._script_run = self.config.get("run")
self._script_teardown = self.config.get("teardown")
self._shell_env_params: Optional[Iterable[str]] = self.config.get("shell_env_params")
self._shell_env_params_rename: Dict[str, str] = self.config.get("shell_env_params_rename", {})
def _get_env_params(self) -> Dict[str, str]:
"""
Get the *shell* environment parameters to be passed to the script.
Returns
-------
env_params : Dict[str, str]
Parameters to pass as *shell* environment variables into the script.
This is usually a subset of `_params` with some possible conversions.
"""
rename: Dict[str, str] # {to: from} mapping of the script parameters.
if self._shell_env_params is None:
if self._shell_env_params_rename:
# Only rename specified - use it.
rename = self._shell_env_params_rename.copy()
else:
# Neither `shell_env_params` nor rename are specified - use all params.
rename = {self._RE_INVALID.sub("_", key): key for key in self._params}
else:
# Use `shell_env_params` and rename if specified.
rename = {self._RE_INVALID.sub("_", key): key for key in self._shell_env_params}
rename.update(self._shell_env_params_rename)
return {key_sub: str(self._params[key]) for (key_sub, key) in rename.items()}
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,878
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/storage/sql/schema.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
DB schema definition.
"""
import logging
from typing import List, Any
from sqlalchemy import (
Engine, MetaData, Dialect, create_mock_engine,
Table, Column, Sequence, Integer, String, DateTime,
PrimaryKeyConstraint, ForeignKeyConstraint, UniqueConstraint,
)
_LOG = logging.getLogger(__name__)
# This class is internal to SqlStorage and is mostly a struct
# for all DB tables, so it's ok to disable the warnings.
# pylint: disable=too-many-instance-attributes
class _DDL:
"""
A helper class to capture the DDL statements from SQLAlchemy.
It is used in `DbSchema.__str__()` method below.
"""
def __init__(self, dialect: Dialect):
self._dialect = dialect
self.statements: List[str] = []
def __call__(self, sql: Any, *_args: Any, **_kwargs: Any) -> None:
self.statements.append(str(sql.compile(dialect=self._dialect)))
def __repr__(self) -> str:
res = ";\n".join(self.statements)
return res + ";" if res else ""
class DbSchema:
"""
A class to define and create the DB schema.
"""
def __init__(self, engine: Engine):
"""
Declare the SQLAlchemy schema for the database.
"""
_LOG.info("Create the DB schema for: %s", engine)
self._engine = engine
self._meta = MetaData()
self.experiment = Table(
"experiment",
self._meta,
Column("exp_id", String(255), nullable=False),
Column("description", String(1024)),
Column("root_env_config", String(1024), nullable=False),
Column("git_repo", String(1024), nullable=False),
Column("git_commit", String(40), nullable=False),
PrimaryKeyConstraint("exp_id"),
)
# A workaround for SQLAlchemy issue with autoincrement in DuckDB:
if engine.dialect.name == "duckdb":
seq_config_id = Sequence('seq_config_id')
col_config_id = Column("config_id", Integer, seq_config_id,
server_default=seq_config_id.next_value(),
nullable=False, primary_key=True)
else:
col_config_id = Column("config_id", Integer, nullable=False,
primary_key=True, autoincrement=True)
self.config = Table(
"config",
self._meta,
col_config_id,
Column("config_hash", String(64), nullable=False, unique=True),
)
self.trial = Table(
"trial",
self._meta,
Column("exp_id", String(255), nullable=False),
Column("trial_id", Integer, nullable=False),
Column("config_id", Integer, nullable=False),
Column("ts_start", DateTime, nullable=False, default="now"),
Column("ts_end", DateTime),
# Should match the text IDs of `mlos_bench.environments.Status` enum:
Column("status", String(16), nullable=False),
PrimaryKeyConstraint("exp_id", "trial_id"),
ForeignKeyConstraint(["exp_id"], [self.experiment.c.exp_id]),
ForeignKeyConstraint(["config_id"], [self.config.c.config_id]),
)
# Values of the tunable parameters of the experiment,
# fixed for a particular trial config.
self.config_param = Table(
"config_param",
self._meta,
Column("config_id", Integer, nullable=False),
Column("param_id", String(255), nullable=False),
Column("param_value", String(255)),
PrimaryKeyConstraint("config_id", "param_id"),
ForeignKeyConstraint(["config_id"], [self.config.c.config_id]),
)
# Values of additional non-tunable parameters of the trial,
# e.g., scheduled execution time, VM name / location, number of repeats, etc.
self.trial_param = Table(
"trial_param",
self._meta,
Column("exp_id", String(255), nullable=False),
Column("trial_id", Integer, nullable=False),
Column("param_id", String(255), nullable=False),
Column("param_value", String(255)),
PrimaryKeyConstraint("exp_id", "trial_id", "param_id"),
ForeignKeyConstraint(["exp_id", "trial_id"],
[self.trial.c.exp_id, self.trial.c.trial_id]),
)
self.trial_result = Table(
"trial_result",
self._meta,
Column("exp_id", String(255), nullable=False),
Column("trial_id", Integer, nullable=False),
Column("metric_id", String(255), nullable=False),
Column("metric_value", String(255)),
PrimaryKeyConstraint("exp_id", "trial_id", "metric_id"),
ForeignKeyConstraint(["exp_id", "trial_id"],
[self.trial.c.exp_id, self.trial.c.trial_id]),
)
self.trial_telemetry = Table(
"trial_telemetry",
self._meta,
Column("exp_id", String(255), nullable=False),
Column("trial_id", Integer, nullable=False),
Column("ts", DateTime, nullable=False, default="now"),
Column("metric_id", String(255), nullable=False),
Column("metric_value", String(255)),
UniqueConstraint("exp_id", "trial_id", "ts", "metric_id"),
ForeignKeyConstraint(["exp_id", "trial_id"],
[self.trial.c.exp_id, self.trial.c.trial_id]),
)
_LOG.debug("Schema: %s", self._meta)
def create(self) -> 'DbSchema':
"""
Create the DB schema.
"""
_LOG.info("Create the DB schema")
self._meta.create_all(self._engine)
return self
def __repr__(self) -> str:
"""
Produce a string with all SQL statements required to create the schema
from scratch in current SQL dialect.
That is, return a collection of CREATE TABLE statements and such.
NOTE: this method is quite heavy! We use it only once at startup
to log the schema, and if the logging level is set to DEBUG.
Returns
-------
sql : str
A multi-line string with SQL statements to create the DB schema from scratch.
"""
ddl = _DDL(self._engine.dialect)
mock_engine = create_mock_engine(self._engine.url, executor=ddl)
self._meta.create_all(mock_engine, checkfirst=False)
return str(ddl)
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,879
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/storage/sql/trial.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Saving and updating benchmark data using SQLAlchemy backend.
"""
import logging
from datetime import datetime
from typing import List, Optional, Tuple, Union, Dict, Any
from sqlalchemy import Engine
from sqlalchemy.exc import IntegrityError
from mlos_bench.environments.status import Status
from mlos_bench.tunables.tunable_groups import TunableGroups
from mlos_bench.storage.base_storage import Storage
from mlos_bench.storage.sql.schema import DbSchema
_LOG = logging.getLogger(__name__)
class Trial(Storage.Trial):
"""
Store the results of a single run of the experiment in SQL database.
"""
def __init__(self, *,
engine: Engine, schema: DbSchema, tunables: TunableGroups,
experiment_id: str, trial_id: int, config_id: int,
opt_target: str, config: Optional[Dict[str, Any]] = None):
super().__init__(
tunables=tunables,
experiment_id=experiment_id,
trial_id=trial_id,
config_id=config_id,
opt_target=opt_target,
config=config,
)
self._engine = engine
self._schema = schema
def update(self, status: Status, timestamp: datetime,
metrics: Optional[Union[Dict[str, float], float]] = None
) -> Optional[Dict[str, float]]:
metrics = super().update(status, timestamp, metrics)
with self._engine.begin() as conn:
try:
cur_status = conn.execute(
self._schema.trial.update().where(
self._schema.trial.c.exp_id == self._experiment_id,
self._schema.trial.c.trial_id == self._trial_id,
self._schema.trial.c.status.notin_(
['SUCCEEDED', 'CANCELED', 'FAILED', 'TIMED_OUT']),
).values(
status=status.name,
ts_end=timestamp,
)
)
if cur_status.rowcount not in {1, -1}:
_LOG.warning("Trial %s :: update failed: %s", self, status)
raise RuntimeError(
f"Failed to update the status of the trial {self} to {status}." +
f" ({cur_status.rowcount} rows)")
if metrics:
conn.execute(self._schema.trial_result.insert().values([
{
"exp_id": self._experiment_id,
"trial_id": self._trial_id,
"metric_id": key,
"metric_value": None if val is None else str(val),
}
for (key, val) in metrics.items()
]))
except Exception:
conn.rollback()
raise
return metrics
def update_telemetry(self, status: Status, metrics: List[Tuple[datetime, str, Any]]) -> None:
super().update_telemetry(status, metrics)
# NOTE: Not every SQLAlchemy dialect supports `Insert.on_conflict_do_nothing()`
# and we need to keep `.update_telemetry()` idempotent; hence a loop instead of
# a bulk upsert.
# See Also: comments in <https://github.com/microsoft/MLOS/pull/466>
for (timestamp, key, val) in metrics:
with self._engine.begin() as conn:
try:
conn.execute(self._schema.trial_telemetry.insert().values(
exp_id=self._experiment_id,
trial_id=self._trial_id,
ts=timestamp,
metric_id=key,
metric_value=None if val is None else str(val),
))
except IntegrityError as ex:
_LOG.warning("Record already exists: %s :: %s", (timestamp, key, val), ex)
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,880
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/environments/mock_env.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Scheduler-side environment to mock the benchmark results.
"""
import random
import logging
from typing import Dict, Optional, Tuple
import numpy
from mlos_bench.services.base_service import Service
from mlos_bench.environments.status import Status
from mlos_bench.environments.base_environment import Environment
from mlos_bench.tunables import Tunable, TunableGroups
_LOG = logging.getLogger(__name__)
class MockEnv(Environment):
"""
Scheduler-side environment to mock the benchmark results.
"""
_NOISE_VAR = 0.2
"""Variance of the Gaussian noise added to the benchmark value."""
def __init__(self,
*,
name: str,
config: dict,
global_config: Optional[dict] = None,
tunables: Optional[TunableGroups] = None,
service: Optional[Service] = None):
"""
Create a new environment that produces mock benchmark data.
Parameters
----------
name: str
Human-readable name of the environment.
config : dict
Free-format dictionary that contains the benchmark environment configuration.
global_config : dict
Free-format dictionary of global parameters (e.g., security credentials)
to be mixed in into the "const_args" section of the local config.
Optional arguments are `seed`, `range`, and `metrics`.
tunables : TunableGroups
A collection of tunable parameters for *all* environments.
service: Service
An optional service object. Not used by this class.
"""
super().__init__(name=name, config=config, global_config=global_config,
tunables=tunables, service=service)
seed = self.config.get("seed")
self._random = random.Random(seed) if seed is not None else None
self._range = self.config.get("range")
self._metrics = self.config.get("metrics", ["score"])
self._is_ready = True
def run(self) -> Tuple[Status, Optional[Dict[str, float]]]:
"""
Produce mock benchmark data for one experiment.
Returns
-------
(status, output) : (Status, dict)
A pair of (Status, output) values, where `output` is a dict
with the results or None if the status is not COMPLETED.
The keys of the `output` dict are the names of the metrics
specified in the config; by default it's just one metric
named "score". All output metrics have the same value.
"""
(status, _) = result = super().run()
if not status.is_ready():
return result
# Simple convex function of all tunable parameters.
score = numpy.mean(numpy.square([
self._normalized(tunable) for (tunable, _group) in self._tunable_params
]))
# Add noise and shift the benchmark value from [0, 1] to a given range.
noise = self._random.gauss(0, self._NOISE_VAR) if self._random else 0
score = numpy.clip(score + noise, 0, 1)
if self._range:
score = self._range[0] + score * (self._range[1] - self._range[0])
return (Status.SUCCEEDED, {metric: score for metric in self._metrics})
@staticmethod
def _normalized(tunable: Tunable) -> float:
"""
Get the NORMALIZED value of a tunable.
That is, map current value to the [0, 1] range.
"""
val = None
if tunable.is_categorical:
val = (tunable.categories.index(tunable.category) /
float(len(tunable.categories) - 1))
elif tunable.is_numerical:
val = ((tunable.numerical_value - tunable.range[0]) /
float(tunable.range[1] - tunable.range[0]))
else:
raise ValueError("Invalid parameter type: " + tunable.type)
# Explicitly clip the value in case of numerical errors.
ret: float = numpy.clip(val, 0, 1)
return ret
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,881
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_core/mlos_core/spaces/converters/flaml.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Contains space converters for FLAML.
"""
from typing import Dict
import sys
import ConfigSpace
import numpy as np
import flaml.tune
import flaml.tune.sample
if sys.version_info >= (3, 10):
from typing import TypeAlias
else:
from typing_extensions import TypeAlias
FlamlDomain: TypeAlias = flaml.tune.sample.Domain
FlamlSpace: TypeAlias = Dict[str, flaml.tune.sample.Domain]
def configspace_to_flaml_space(config_space: ConfigSpace.ConfigurationSpace) -> Dict[str, FlamlDomain]:
"""Converts a ConfigSpace.ConfigurationSpace to dict.
Parameters
----------
config_space : ConfigSpace.ConfigurationSpace
Input configuration space.
Returns
-------
flaml_space : dict
A dictionary of flaml.tune.sample.Domain objects keyed by parameter name.
"""
flaml_numeric_type = {
(ConfigSpace.UniformIntegerHyperparameter, False): flaml.tune.randint,
(ConfigSpace.UniformIntegerHyperparameter, True): flaml.tune.lograndint,
(ConfigSpace.UniformFloatHyperparameter, False): flaml.tune.uniform,
(ConfigSpace.UniformFloatHyperparameter, True): flaml.tune.loguniform,
}
def _one_parameter_convert(parameter: ConfigSpace.hyperparameters.Hyperparameter) -> FlamlDomain:
if isinstance(parameter, ConfigSpace.UniformFloatHyperparameter):
# FIXME: upper isn't included in the range
return flaml_numeric_type[(type(parameter), parameter.log)](parameter.lower, parameter.upper)
elif isinstance(parameter, ConfigSpace.UniformIntegerHyperparameter):
return flaml_numeric_type[(type(parameter), parameter.log)](parameter.lower, parameter.upper + 1)
elif isinstance(parameter, ConfigSpace.CategoricalHyperparameter):
if len(np.unique(parameter.probabilities)) > 1:
raise ValueError("FLAML doesn't support categorical parameters with non-uniform probabilities.")
return flaml.tune.choice(parameter.choices) # TODO: set order?
raise ValueError(f"Type of parameter {parameter} ({type(parameter)}) not supported.")
return {param.name: _one_parameter_convert(param) for param in config_space.values()}
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,882
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/config/schemas/globals/test_globals_schemas.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Tests for CLI schema validation.
"""
from os import path
import pytest
from mlos_bench.config.schemas import ConfigSchema
from mlos_bench.tests.config.schemas import get_schema_test_cases, check_test_case_against_schema
# General testing strategy:
# - hand code a set of good/bad configs (useful to test editor schema checking)
# - for each config, load and validate against expected schema
TEST_CASES = get_schema_test_cases(path.join(path.dirname(__file__), "test-cases"))
# Now we actually perform all of those validation tests.
@pytest.mark.parametrize("test_case_name", sorted(TEST_CASES.by_path))
def test_globals_configs_against_schema(test_case_name: str) -> None:
"""
Checks that the CLI config validates against the schema.
"""
check_test_case_against_schema(TEST_CASES.by_path[test_case_name], ConfigSchema.GLOBALS)
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,883
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/services/remote/azure/azure_services.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
A collection Service functions for managing VMs on Azure.
"""
import json
import time
import logging
from typing import Any, Callable, Dict, Iterable, Optional, Tuple
import requests
from mlos_bench.environments.status import Status
from mlos_bench.services.base_service import Service
from mlos_bench.services.types.authenticator_type import SupportsAuth
from mlos_bench.services.types.remote_exec_type import SupportsRemoteExec
from mlos_bench.services.types.vm_provisioner_type import SupportsVMOps
from mlos_bench.util import check_required_params, merge_parameters
_LOG = logging.getLogger(__name__)
class AzureVMService(Service, SupportsVMOps, SupportsRemoteExec):
"""
Helper methods to manage VMs on Azure.
"""
_POLL_INTERVAL = 4 # seconds
_POLL_TIMEOUT = 300 # seconds
_REQUEST_TIMEOUT = 5 # seconds
# Azure Resources Deployment REST API as described in
# https://docs.microsoft.com/en-us/rest/api/resources/deployments
_URL_DEPLOY = (
"https://management.azure.com" +
"/subscriptions/{subscription}" +
"/resourceGroups/{resource_group}" +
"/providers/Microsoft.Resources" +
"/deployments/{deployment_name}" +
"?api-version=2022-05-01"
)
# Azure Compute REST API calls as described in
# https://docs.microsoft.com/en-us/rest/api/compute/virtual-machines
# From: https://docs.microsoft.com/en-us/rest/api/compute/virtual-machines/start
_URL_START = (
"https://management.azure.com" +
"/subscriptions/{subscription}" +
"/resourceGroups/{resource_group}" +
"/providers/Microsoft.Compute" +
"/virtualMachines/{vm_name}" +
"/start" +
"?api-version=2022-03-01"
)
# From: https://docs.microsoft.com/en-us/rest/api/compute/virtual-machines/power-off
_URL_STOP = (
"https://management.azure.com" +
"/subscriptions/{subscription}" +
"/resourceGroups/{resource_group}" +
"/providers/Microsoft.Compute" +
"/virtualMachines/{vm_name}" +
"/powerOff" +
"?api-version=2022-03-01"
)
# From: https://docs.microsoft.com/en-us/rest/api/compute/virtual-machines/deallocate
_URL_DEPROVISION = (
"https://management.azure.com" +
"/subscriptions/{subscription}" +
"/resourceGroups/{resource_group}" +
"/providers/Microsoft.Compute" +
"/virtualMachines/{vm_name}" +
"/deallocate" +
"?api-version=2022-03-01"
)
# From: https://docs.microsoft.com/en-us/rest/api/compute/virtual-machines/restart
_URL_REBOOT = (
"https://management.azure.com" +
"/subscriptions/{subscription}" +
"/resourceGroups/{resource_group}" +
"/providers/Microsoft.Compute" +
"/virtualMachines/{vm_name}" +
"/restart" +
"?api-version=2022-03-01"
)
# From: https://docs.microsoft.com/en-us/rest/api/compute/virtual-machines/run-command
_URL_REXEC_RUN = (
"https://management.azure.com" +
"/subscriptions/{subscription}" +
"/resourceGroups/{resource_group}" +
"/providers/Microsoft.Compute" +
"/virtualMachines/{vm_name}" +
"/runCommand" +
"?api-version=2022-03-01"
)
def __init__(self,
config: Optional[Dict[str, Any]] = None,
global_config: Optional[Dict[str, Any]] = None,
parent: Optional[Service] = None):
"""
Create a new instance of Azure services proxy.
Parameters
----------
config : dict
Free-format dictionary that contains the benchmark environment
configuration.
global_config : dict
Free-format dictionary of global parameters.
parent : Service
Parent service that can provide mixin functions.
"""
super().__init__(config, global_config, parent)
check_required_params(
self.config, {
"subscription",
"resourceGroup",
"deploymentName",
"deploymentTemplatePath",
"deploymentTemplateParameters",
}
)
# Register methods that we want to expose to the Environment objects.
self.register([
self.wait_vm_deployment,
self.wait_vm_operation,
self.vm_provision,
self.vm_start,
self.vm_stop,
self.vm_deprovision,
self.vm_restart,
self.remote_exec,
self.get_remote_exec_results,
])
# These parameters can come from command line as strings, so conversion is needed.
self._poll_interval = float(self.config.get("pollInterval", self._POLL_INTERVAL))
self._poll_timeout = float(self.config.get("pollTimeout", self._POLL_TIMEOUT))
self._request_timeout = float(self.config.get("requestTimeout", self._REQUEST_TIMEOUT))
# TODO: Provide external schema validation?
template = self.config_loader_service.load_config(
self.config['deploymentTemplatePath'], schema_type=None)
assert template is not None and isinstance(template, dict)
self._deploy_template = template
self._deploy_params = merge_parameters(
dest=self.config['deploymentTemplateParameters'].copy(), source=global_config)
def _get_headers(self) -> dict:
"""
Get the headers for the REST API calls.
"""
assert self._parent is not None and isinstance(self._parent, SupportsAuth), \
"Authorization service not provided. Include service-auth.jsonc?"
return {"Authorization": "Bearer " + self._parent.get_access_token()}
@staticmethod
def _extract_arm_parameters(json_data: dict) -> dict:
"""
Extract parameters from the ARM Template REST response JSON.
Returns
-------
parameters : dict
Flat dictionary of parameters and their values.
"""
return {
key: val.get("value")
for (key, val) in json_data.get("properties", {}).get("parameters", {}).items()
if val.get("value") is not None
}
def _azure_vm_post_helper(self, params: dict, url: str) -> Tuple[Status, dict]:
"""
General pattern for performing an action on an Azure VM via its REST API.
Parameters
----------
params: dict
Flat dictionary of (key, value) pairs of tunable parameters.
url: str
REST API url for the target to perform on the Azure VM.
Should be a url that we intend to POST to.
Returns
-------
result : (Status, dict={})
A pair of Status and result.
Status is one of {PENDING, SUCCEEDED, FAILED}
Result will have a value for 'asyncResultsUrl' if status is PENDING,
and 'pollInterval' if suggested by the API.
"""
_LOG.debug("Request: POST %s", url)
response = requests.post(url, headers=self._get_headers(), timeout=self._request_timeout)
_LOG.debug("Response: %s", response)
# Logical flow for async operations based on:
# https://docs.microsoft.com/en-us/azure/azure-resource-manager/management/async-operations
if response.status_code == 200:
return (Status.SUCCEEDED, params.copy())
elif response.status_code == 202:
result = params.copy()
if "Azure-AsyncOperation" in response.headers:
result["asyncResultsUrl"] = response.headers.get("Azure-AsyncOperation")
elif "Location" in response.headers:
result["asyncResultsUrl"] = response.headers.get("Location")
if "Retry-After" in response.headers:
result["pollInterval"] = float(response.headers["Retry-After"])
return (Status.PENDING, result)
else:
_LOG.error("Response: %s :: %s", response, response.text)
# _LOG.error("Bad Request:\n%s", response.request.body)
return (Status.FAILED, {})
def _check_vm_operation_status(self, params: dict) -> Tuple[Status, dict]:
"""
Checks the status of a pending operation on an Azure VM.
Parameters
----------
params: dict
Flat dictionary of (key, value) pairs of tunable parameters.
Must have the "asyncResultsUrl" key to get the results.
If the key is not present, return Status.PENDING.
Returns
-------
result : (Status, dict)
A pair of Status and result.
Status is one of {PENDING, RUNNING, SUCCEEDED, FAILED}
Result is info on the operation runtime if SUCCEEDED, otherwise {}.
"""
url = params.get("asyncResultsUrl")
if url is None:
return Status.PENDING, {}
try:
response = requests.get(url, headers=self._get_headers(), timeout=self._request_timeout)
except requests.exceptions.ReadTimeout:
_LOG.warning("Request timed out: %s", url)
# return Status.TIMED_OUT, {}
return Status.RUNNING, {}
if _LOG.isEnabledFor(logging.DEBUG):
_LOG.debug("Response: %s\n%s", response,
json.dumps(response.json(), indent=2)
if response.content else "")
if response.status_code == 200:
output = response.json()
status = output.get("status")
if status == "InProgress":
return Status.RUNNING, {}
elif status == "Succeeded":
return Status.SUCCEEDED, output
_LOG.error("Response: %s :: %s", response, response.text)
return Status.FAILED, {}
def wait_vm_deployment(self, is_setup: bool, params: dict) -> Tuple[Status, dict]:
"""
Waits for a pending operation on an Azure VM to resolve to SUCCEEDED or FAILED.
Return TIMED_OUT when timing out.
Parameters
----------
is_setup : bool
If True, wait for VM being deployed; otherwise, wait for successful deprovisioning.
params : dict
Flat dictionary of (key, value) pairs of tunable parameters.
Returns
-------
result : (Status, dict)
A pair of Status and result.
Status is one of {PENDING, SUCCEEDED, FAILED, TIMED_OUT}
Result is info on the operation runtime if SUCCEEDED, otherwise {}.
"""
_LOG.info("Wait for %s to %s", params["deploymentName"],
"provision" if is_setup else "deprovision")
return self._wait_while(self._check_deployment, Status.PENDING, params)
def wait_vm_operation(self, params: dict) -> Tuple[Status, dict]:
"""
Waits for a pending operation on an Azure VM to resolve to SUCCEEDED or FAILED.
Return TIMED_OUT when timing out.
Parameters
----------
params: dict
Flat dictionary of (key, value) pairs of tunable parameters.
Must have the "asyncResultsUrl" key to get the results.
If the key is not present, return Status.PENDING.
Returns
-------
result : (Status, dict)
A pair of Status and result.
Status is one of {PENDING, SUCCEEDED, FAILED, TIMED_OUT}
Result is info on the operation runtime if SUCCEEDED, otherwise {}.
"""
_LOG.info("Wait for operation on VM %s", params["vmName"])
return self._wait_while(self._check_vm_operation_status, Status.RUNNING, params)
def _wait_while(self, func: Callable[[dict], Tuple[Status, dict]],
loop_status: Status, params: dict) -> Tuple[Status, dict]:
"""
Invoke `func` periodically while the status is equal to `loop_status`.
Return TIMED_OUT when timing out.
Parameters
----------
func : a function
A function that takes `params` and returns a pair of (Status, {})
loop_status: Status
Steady state status - keep polling `func` while it returns `loop_status`.
params : dict
Flat dictionary of (key, value) pairs of tunable parameters.
Returns
-------
result : (Status, dict)
A pair of Status and result.
"""
config = merge_parameters(
dest=self.config.copy(), source=params, required_keys=["deploymentName"])
poll_period = params.get("pollInterval", self._poll_interval)
_LOG.debug("Wait for %s status %s :: poll %.2f timeout %d s",
config["deploymentName"], loop_status, poll_period, self._poll_timeout)
ts_timeout = time.time() + self._poll_timeout
poll_delay = poll_period
while True:
# Wait for the suggested time first then check status
ts_start = time.time()
if ts_start >= ts_timeout:
break
if poll_delay > 0:
_LOG.debug("Sleep for: %.2f of %.2f s", poll_delay, poll_period)
time.sleep(poll_delay)
(status, output) = func(params)
if status != loop_status:
return status, output
ts_end = time.time()
poll_delay = poll_period - ts_end + ts_start
_LOG.warning("Request timed out: %s", params)
return (Status.TIMED_OUT, {})
def _check_deployment(self, params: dict) -> Tuple[Status, dict]:
"""
Check if Azure deployment exists.
Return SUCCEEDED if true, PENDING otherwise.
Parameters
----------
_params : dict
Flat dictionary of (key, value) pairs of tunable parameters.
This parameter is not used; we need it for compatibility with
other polling functions used in `_wait_while()`.
Returns
-------
result : (Status, dict={})
A pair of Status and result. The result is always {}.
Status is one of {SUCCEEDED, PENDING, FAILED}
"""
config = merge_parameters(
dest=self.config.copy(),
source=params,
required_keys=[
"subscription",
"resourceGroup",
"deploymentName",
]
)
_LOG.info("Check deployment: %s", config["deploymentName"])
url = self._URL_DEPLOY.format(
subscription=config["subscription"],
resource_group=config["resourceGroup"],
deployment_name=config["deploymentName"],
)
response = requests.head(url, headers=self._get_headers(), timeout=self._request_timeout)
_LOG.debug("Response: %s", response)
if response.status_code == 204:
return (Status.SUCCEEDED, {})
elif response.status_code == 404:
return (Status.PENDING, {})
_LOG.error("Response: %s :: %s", response, response.text)
return (Status.FAILED, {})
def vm_provision(self, params: dict) -> Tuple[Status, dict]:
"""
Check if Azure VM is ready. Deploy a new VM, if necessary.
Parameters
----------
params : dict
Flat dictionary of (key, value) pairs of tunable parameters.
VMEnv tunables are variable parameters that, together with the
VMEnv configuration, are sufficient to provision a VM.
Returns
-------
result : (Status, dict={})
A pair of Status and result. The result is the input `params` plus the
parameters extracted from the response JSON, or {} if the status is FAILED.
Status is one of {PENDING, SUCCEEDED, FAILED}
"""
config = merge_parameters(dest=self.config.copy(), source=params)
_LOG.info("Deploy: %s :: %s", config["deploymentName"], params)
params = merge_parameters(dest=self._deploy_params.copy(), source=params)
if _LOG.isEnabledFor(logging.DEBUG):
_LOG.debug("Deploy: %s merged params ::\n%s",
config["deploymentName"], json.dumps(params, indent=2))
url = self._URL_DEPLOY.format(
subscription=config["subscription"],
resource_group=config["resourceGroup"],
deployment_name=config["deploymentName"],
)
json_req = {
"properties": {
"mode": "Incremental",
"template": self._deploy_template,
"parameters": {
key: {"value": val} for (key, val) in params.items()
if key in self._deploy_template.get("parameters", {})
}
}
}
if _LOG.isEnabledFor(logging.DEBUG):
_LOG.debug("Request: PUT %s\n%s", url, json.dumps(json_req, indent=2))
response = requests.put(url, json=json_req,
headers=self._get_headers(), timeout=self._request_timeout)
if _LOG.isEnabledFor(logging.DEBUG):
_LOG.debug("Response: %s\n%s", response,
json.dumps(response.json(), indent=2)
if response.content else "")
else:
_LOG.info("Response: %s", response)
if response.status_code == 200:
return (Status.PENDING, config)
elif response.status_code == 201:
output = self._extract_arm_parameters(response.json())
if _LOG.isEnabledFor(logging.DEBUG):
_LOG.debug("Extracted parameters:\n%s", json.dumps(output, indent=2))
params.update(output)
params.setdefault("asyncResultsUrl", url)
params.setdefault("deploymentName", config["deploymentName"])
return (Status.PENDING, params)
else:
_LOG.error("Response: %s :: %s", response, response.text)
# _LOG.error("Bad Request:\n%s", response.request.body)
return (Status.FAILED, {})
def vm_start(self, params: dict) -> Tuple[Status, dict]:
"""
Start the VM on Azure.
Parameters
----------
params : dict
Flat dictionary of (key, value) pairs of tunable parameters.
Returns
-------
result : (Status, dict={})
A pair of Status and result. The result is always {}.
Status is one of {PENDING, SUCCEEDED, FAILED}
"""
config = merge_parameters(
dest=self.config.copy(),
source=params,
required_keys=[
"subscription",
"resourceGroup",
"vmName",
]
)
_LOG.info("Start VM: %s :: %s", config["vmName"], params)
return self._azure_vm_post_helper(config, self._URL_START.format(
subscription=config["subscription"],
resource_group=config["resourceGroup"],
vm_name=config["vmName"],
))
def vm_stop(self, params: dict) -> Tuple[Status, dict]:
"""
Stops the VM on Azure by initiating a graceful shutdown.
Parameters
----------
params : dict
Flat dictionary of (key, value) pairs of tunable parameters.
Returns
-------
result : (Status, dict={})
A pair of Status and result. The result is always {}.
Status is one of {PENDING, SUCCEEDED, FAILED}
"""
config = merge_parameters(
dest=self.config.copy(),
source=params,
required_keys=[
"subscription",
"resourceGroup",
"vmName",
]
)
_LOG.info("Stop VM: %s", config["vmName"])
return self._azure_vm_post_helper(config, self._URL_STOP.format(
subscription=config["subscription"],
resource_group=config["resourceGroup"],
vm_name=config["vmName"],
))
def vm_deprovision(self, params: dict) -> Tuple[Status, dict]:
"""
Deallocates the VM on Azure by shutting it down then releasing the compute resources.
Parameters
----------
params : dict
Flat dictionary of (key, value) pairs of tunable parameters.
Returns
-------
result : (Status, dict={})
A pair of Status and result. The result is always the same as input `params`.
Status is one of {PENDING, SUCCEEDED, FAILED}
"""
config = merge_parameters(
dest=self.config.copy(),
source=params,
required_keys=[
"subscription",
"resourceGroup",
"deploymentName",
]
)
_LOG.info("Deprovision: %s", config["deploymentName"])
# TODO: Properly deprovision all resources specified in the ARM template.
if "vmName" in config:
return self.vm_stop(params)
return (Status.SUCCEEDED, config)
def vm_restart(self, params: dict) -> Tuple[Status, dict]:
"""
Reboot the VM on Azure by initiating a graceful shutdown.
Parameters
----------
params : dict
Flat dictionary of (key, value) pairs of tunable parameters.
Returns
-------
result : (Status, dict={})
A pair of Status and result. The result is always {}.
Status is one of {PENDING, SUCCEEDED, FAILED}
"""
config = merge_parameters(
dest=self.config.copy(),
source=params,
required_keys=[
"subscription",
"resourceGroup",
"vmName",
]
)
_LOG.info("Reboot VM: %s", config["vmName"])
return self._azure_vm_post_helper(config, self._URL_REBOOT.format(
subscription=config["subscription"],
resource_group=config["resourceGroup"],
vm_name=config["vmName"],
))
def remote_exec(self, script: Iterable[str], config: dict,
env_params: dict) -> Tuple[Status, dict]:
"""
Run a command on Azure VM.
Parameters
----------
script : Iterable[str]
A list of lines to execute as a script on a remote VM.
config : dict
Flat dictionary of (key, value) pairs of the Environment parameters.
They usually come from `const_args` and `tunable_params`
properties of the Environment.
env_params : dict
Parameters to pass as *shell* environment variables into the script.
This is usually a subset of `config` with some possible conversions.
Returns
-------
result : (Status, dict)
A pair of Status and result.
Status is one of {PENDING, SUCCEEDED, FAILED}
"""
config = merge_parameters(
dest=self.config.copy(),
source=config,
required_keys=[
"subscription",
"resourceGroup",
"vmName",
]
)
if _LOG.isEnabledFor(logging.INFO):
_LOG.info("Run a script on VM: %s\n %s", config["vmName"], "\n ".join(script))
json_req = {
"commandId": "RunShellScript",
"script": list(script),
"parameters": [{"name": key, "value": val} for (key, val) in env_params.items()]
}
url = self._URL_REXEC_RUN.format(
subscription=config["subscription"],
resource_group=config["resourceGroup"],
vm_name=config["vmName"],
)
if _LOG.isEnabledFor(logging.DEBUG):
_LOG.debug("Request: POST %s\n%s", url, json.dumps(json_req, indent=2))
response = requests.post(
url, json=json_req, headers=self._get_headers(), timeout=self._request_timeout)
if _LOG.isEnabledFor(logging.DEBUG):
_LOG.debug("Response: %s\n%s", response,
json.dumps(response.json(), indent=2)
if response.content else "")
else:
_LOG.info("Response: %s", response)
if response.status_code == 200:
# TODO: extract the results from JSON response
return (Status.SUCCEEDED, config)
elif response.status_code == 202:
return (Status.PENDING, {
**config,
"asyncResultsUrl": response.headers.get("Azure-AsyncOperation")
})
else:
_LOG.error("Response: %s :: %s", response, response.text)
# _LOG.error("Bad Request:\n%s", response.request.body)
return (Status.FAILED, {})
def get_remote_exec_results(self, config: dict) -> Tuple[Status, dict]:
"""
Get the results of the asynchronously running command.
Parameters
----------
config : dict
Flat dictionary of (key, value) pairs of tunable parameters.
Must have the "asyncResultsUrl" key to get the results.
If the key is not present, return Status.PENDING.
Returns
-------
result : (Status, dict)
A pair of Status and result.
Status is one of {PENDING, SUCCEEDED, FAILED, TIMED_OUT}
"""
_LOG.info("Check the results on VM: %s", config.get("vmName"))
(status, result) = self.wait_vm_operation(config)
if status.is_succeeded():
return (status, result.get("properties", {}).get("output", {}))
else:
return (status, result)
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,884
|
microsoft/MLOS
|
refs/heads/main
|
/doc/source/_templates/numpydoc_docstring.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
{{index}}
{{summary}}
{{extended_summary}}
{{parameters}}
{{returns}}
{{yields}}
{{other_parameters}}
{{attributes}}
{{raises}}
{{warns}}
{{warnings}}
{{see_also}}
{{notes}}
{{references}}
{{examples}}
{{methods}}
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,885
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/storage/sql/experiment.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Saving and restoring the benchmark data using SQLAlchemy.
"""
import logging
import hashlib
from datetime import datetime
from typing import Optional, Tuple, List, Dict, Iterator, Any
from sqlalchemy import Engine, Connection, Table, column, func
from mlos_bench.environments.status import Status
from mlos_bench.tunables.tunable_groups import TunableGroups
from mlos_bench.storage.base_storage import Storage
from mlos_bench.storage.sql.schema import DbSchema
from mlos_bench.storage.sql.trial import Trial
_LOG = logging.getLogger(__name__)
class Experiment(Storage.Experiment):
"""
Logic for retrieving and storing the results of a single experiment.
"""
def __init__(self, *,
engine: Engine,
schema: DbSchema,
tunables: TunableGroups,
experiment_id: str,
trial_id: int,
root_env_config: str,
description: str,
opt_target: str):
super().__init__(tunables, experiment_id, root_env_config)
self._engine = engine
self._schema = schema
self._trial_id = trial_id
self._description = description
self._opt_target = opt_target
def _setup(self) -> None:
super()._setup()
with self._engine.begin() as conn:
# Get git info and the last trial ID for the experiment.
# pylint: disable=not-callable
exp_info = conn.execute(
self._schema.experiment.select().with_only_columns(
self._schema.experiment.c.git_repo,
self._schema.experiment.c.git_commit,
self._schema.experiment.c.root_env_config,
func.max(self._schema.trial.c.trial_id).label("trial_id"),
).join(
self._schema.trial,
self._schema.trial.c.exp_id == self._schema.experiment.c.exp_id,
isouter=True
).where(
self._schema.experiment.c.exp_id == self._experiment_id,
).group_by(
self._schema.experiment.c.git_repo,
self._schema.experiment.c.git_commit,
self._schema.experiment.c.root_env_config,
)
).fetchone()
if exp_info is None:
_LOG.info("Start new experiment: %s", self._experiment_id)
# It's a new experiment: create a record for it in the database.
conn.execute(self._schema.experiment.insert().values(
exp_id=self._experiment_id,
description=self._description,
git_repo=self._git_repo,
git_commit=self._git_commit,
root_env_config=self._root_env_config,
))
else:
if exp_info.trial_id is not None:
self._trial_id = exp_info.trial_id + 1
_LOG.info("Continue experiment: %s last trial: %s resume from: %d",
self._experiment_id, exp_info.trial_id, self._trial_id)
if exp_info.git_commit != self._git_commit:
_LOG.warning("Experiment %s git expected: %s %s",
self, exp_info.git_repo, exp_info.git_commit)
def merge(self, experiment_ids: List[str]) -> None:
_LOG.info("Merge: %s <- %s", self._experiment_id, experiment_ids)
raise NotImplementedError()
def load_config(self, config_id: int) -> Dict[str, Any]:
with self._engine.connect() as conn:
return self._get_params(conn, self._schema.config_param, config_id=config_id)
def load_telemetry(self, trial_id: int) -> List[Tuple[datetime, str, Any]]:
with self._engine.connect() as conn:
cur_telemetry = conn.execute(
self._schema.trial_telemetry.select().where(
self._schema.trial_telemetry.c.exp_id == self._experiment_id,
self._schema.trial_telemetry.c.trial_id == trial_id
).order_by(
self._schema.trial_telemetry.c.ts,
self._schema.trial_telemetry.c.metric_id,
)
)
return [(row.ts, row.metric_id, row.metric_value)
for row in cur_telemetry.fetchall()]
def load(self, opt_target: Optional[str] = None) -> Tuple[List[dict], List[Optional[float]], List[Status]]:
opt_target = opt_target or self._opt_target
(configs, scores, status) = ([], [], [])
with self._engine.connect() as conn:
cur_trials = conn.execute(
self._schema.trial.select().with_only_columns(
self._schema.trial.c.trial_id,
self._schema.trial.c.config_id,
self._schema.trial.c.status,
self._schema.trial_result.c.metric_value,
).join(
self._schema.trial_result, (
(self._schema.trial.c.exp_id == self._schema.trial_result.c.exp_id) &
(self._schema.trial.c.trial_id == self._schema.trial_result.c.trial_id)
), isouter=True
).where(
self._schema.trial.c.exp_id == self._experiment_id,
self._schema.trial.c.status.in_(['SUCCEEDED', 'FAILED', 'TIMED_OUT']),
(self._schema.trial_result.c.metric_id.is_(None) |
(self._schema.trial_result.c.metric_id == opt_target)),
).order_by(
self._schema.trial.c.trial_id.asc(),
)
)
for trial in cur_trials.fetchall():
tunables = self._get_params(
conn, self._schema.config_param, config_id=trial.config_id)
configs.append(tunables)
scores.append(None if trial.metric_value is None else float(trial.metric_value))
status.append(Status[trial.status])
return (configs, scores, status)
@staticmethod
def _get_params(conn: Connection, table: Table, **kwargs: Any) -> Dict[str, Any]:
cur_params = conn.execute(table.select().where(*[
column(key) == val for (key, val) in kwargs.items()]))
return {row.param_id: row.param_value for row in cur_params.fetchall()}
@staticmethod
def _save_params(conn: Connection, table: Table,
params: Dict[str, Any], **kwargs: Any) -> None:
conn.execute(table.insert(), [
{
**kwargs,
"param_id": key,
"param_value": None if val is None else str(val)
}
for (key, val) in params.items()
])
def pending_trials(self) -> Iterator[Storage.Trial]:
_LOG.info("Retrieve pending trials for: %s", self._experiment_id)
with self._engine.connect() as conn:
cur_trials = conn.execute(self._schema.trial.select().where(
self._schema.trial.c.exp_id == self._experiment_id,
self._schema.trial.c.ts_end.is_(None)
))
for trial in cur_trials.fetchall():
tunables = self._get_params(
conn, self._schema.config_param,
config_id=trial.config_id)
config = self._get_params(
conn, self._schema.trial_param,
exp_id=self._experiment_id, trial_id=trial.trial_id)
yield Trial(
engine=self._engine,
schema=self._schema,
# Reset .is_updated flag after the assignment:
tunables=self._tunables.copy().assign(tunables).reset(),
experiment_id=self._experiment_id,
trial_id=trial.trial_id,
config_id=trial.config_id,
opt_target=self._opt_target,
config=config,
)
def _get_config_id(self, conn: Connection, tunables: TunableGroups) -> int:
"""
Get the config ID for the given tunables. If the config does not exist,
create a new record for it.
"""
config_hash = hashlib.sha256(str(tunables).encode('utf-8')).hexdigest()
cur_config = conn.execute(self._schema.config.select().where(
self._schema.config.c.config_hash == config_hash
)).fetchone()
if cur_config is not None:
return int(cur_config.config_id) # mypy doesn't know it's always int
# Config not found, create a new one:
config_id: int = conn.execute(self._schema.config.insert().values(
config_hash=config_hash)).inserted_primary_key[0]
self._save_params(
conn, self._schema.config_param,
{tunable.name: tunable.value for (tunable, _group) in tunables},
config_id=config_id)
return config_id
def new_trial(self, tunables: TunableGroups,
config: Optional[Dict[str, Any]] = None) -> Storage.Trial:
_LOG.debug("Create trial: %s:%d", self._experiment_id, self._trial_id)
with self._engine.begin() as conn:
try:
config_id = self._get_config_id(conn, tunables)
conn.execute(self._schema.trial.insert().values(
exp_id=self._experiment_id,
trial_id=self._trial_id,
config_id=config_id,
ts_start=datetime.utcnow(),
status='PENDING',
))
if config is not None:
self._save_params(
conn, self._schema.trial_param, config,
exp_id=self._experiment_id, trial_id=self._trial_id)
trial = Trial(
engine=self._engine,
schema=self._schema,
tunables=tunables,
experiment_id=self._experiment_id,
trial_id=self._trial_id,
config_id=config_id,
opt_target=self._opt_target,
config=config,
)
self._trial_id += 1
return trial
except Exception:
conn.rollback()
raise
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,886
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/config/environments/os/linux/runtime/scripts/local/generate_kernel_config_script.py
|
#!/usr/bin/env python3
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Helper script to generate a script to update kernel parameters from tunables JSON.
Run: `./generate_kernel_config_script.py ./kernel-params.json ./kernel-params-meta.json ./config-kernel.sh`
"""
import json
import argparse
def _main(fname_input: str, fname_meta: str, fname_output: str) -> None:
with open(fname_input, "rt", encoding="utf-8") as fh_tunables:
tunables_data = json.load(fh_tunables)
with open(fname_meta, "rt", encoding="utf-8") as fh_meta:
tunables_meta = json.load(fh_meta)
with open(fname_output, "wt", encoding="utf-8", newline="") as fh_config:
for (key, val) in tunables_data.items():
meta = tunables_meta.get(key, {})
name_prefix = meta.get("name_prefix", "")
line = f'echo "{val}" > {name_prefix}{key}'
fh_config.write(line + "\n")
print(line)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="generate a script to update kernel parameters from tunables JSON.")
parser.add_argument("input", help="JSON file with tunable parameters.")
parser.add_argument("meta", help="JSON file with tunable parameters metadata.")
parser.add_argument("output", help="Output shell script to configure Linux kernel.")
args = parser.parse_args()
_main(args.input, args.meta, args.output)
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,887
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_core/mlos_core/tests/optimizers/__init__.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Optimizer tests.
Note: this file is required so that mypy doesn't complain about overlapping conftest.py modules.
"""
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,888
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
A collection Service functions for mocking local exec.
"""
import logging
from typing import Any, Dict, Iterable, Mapping, Optional, Tuple, TYPE_CHECKING
from mlos_bench.services.base_service import Service
from mlos_bench.services.local.temp_dir_context import TempDirContextService
from mlos_bench.services.types.local_exec_type import SupportsLocalExec
if TYPE_CHECKING:
from mlos_bench.tunables.tunable import TunableValue
_LOG = logging.getLogger(__name__)
class MockLocalExecService(TempDirContextService, SupportsLocalExec):
"""
Mock methods for LocalExecService testing.
"""
def __init__(self, config: Optional[Dict[str, Any]] = None,
global_config: Optional[Dict[str, Any]] = None,
parent: Optional[Service] = None):
super().__init__(config, global_config, parent)
self.register([self.local_exec])
def local_exec(self, script_lines: Iterable[str],
env: Optional[Mapping[str, "TunableValue"]] = None,
cwd: Optional[str] = None,
return_on_error: bool = False) -> Tuple[int, str, str]:
return (0, "", "")
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,889
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_core/mlos_core/optimizers/bayesian_optimizers/bayesian_optimizer.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Contains the wrapper classes for base Bayesian optimizers.
"""
from abc import ABCMeta, abstractmethod
from typing import Optional
import pandas as pd
import numpy.typing as npt
from mlos_core.optimizers.optimizer import BaseOptimizer
class BaseBayesianOptimizer(BaseOptimizer, metaclass=ABCMeta):
"""Abstract base class defining the interface for Bayesian optimization."""
@abstractmethod
def surrogate_predict(self, configurations: pd.DataFrame,
context: Optional[pd.DataFrame] = None) -> npt.NDArray:
"""Obtain a prediction from this Bayesian optimizer's surrogate model for the given configuration(s).
Parameters
----------
configurations : pd.DataFrame
Dataframe of configurations / parameters. The columns are parameter names and the rows are the configurations.
context : pd.DataFrame
Not Yet Implemented.
"""
pass # pylint: disable=unnecessary-pass # pragma: no cover
@abstractmethod
def acquisition_function(self, configurations: pd.DataFrame,
context: Optional[pd.DataFrame] = None) -> npt.NDArray:
"""Invokes the acquisition function from this Bayesian optimizer for the given configuration.
Parameters
----------
configurations : pd.DataFrame
Dataframe of configurations / parameters. The columns are parameter names and the rows are the configurations.
context : pd.DataFrame
Not Yet Implemented.
"""
pass # pylint: disable=unnecessary-pass # pragma: no cover
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,890
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/services/types/vm_provisioner_type.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Protocol interface for VM provisioning operations.
"""
from typing import Tuple, Protocol, runtime_checkable, TYPE_CHECKING
if TYPE_CHECKING:
from mlos_bench.environments.status import Status
@runtime_checkable
class SupportsVMOps(Protocol):
"""
Protocol interface for VM provisioning operations.
"""
def vm_provision(self, params: dict) -> Tuple["Status", dict]:
"""
Check if VM is ready. Deploy a new VM, if necessary.
Parameters
----------
params : dict
Flat dictionary of (key, value) pairs of tunable parameters.
VMEnv tunables are variable parameters that, together with the
VMEnv configuration, are sufficient to provision a VM.
Returns
-------
result : (Status, dict={})
A pair of Status and result. The result is always {}.
Status is one of {PENDING, SUCCEEDED, FAILED}
"""
def wait_vm_deployment(self, is_setup: bool, params: dict) -> Tuple["Status", dict]:
"""
Waits for a pending operation on an Azure VM to resolve to SUCCEEDED or FAILED.
Return TIMED_OUT when timing out.
Parameters
----------
is_setup : bool
If True, wait for VM being deployed; otherwise, wait for successful deprovisioning.
params : dict
Flat dictionary of (key, value) pairs of tunable parameters.
Returns
-------
result : (Status, dict)
A pair of Status and result.
Status is one of {PENDING, SUCCEEDED, FAILED, TIMED_OUT}
Result is info on the operation runtime if SUCCEEDED, otherwise {}.
"""
def vm_start(self, params: dict) -> Tuple["Status", dict]:
"""
Start a VM.
Parameters
----------
params : dict
Flat dictionary of (key, value) pairs of tunable parameters.
Returns
-------
result : (Status, dict={})
A pair of Status and result. The result is always {}.
Status is one of {PENDING, SUCCEEDED, FAILED}
"""
def vm_stop(self, params: dict) -> Tuple["Status", dict]:
"""
Stops the VM by initiating a graceful shutdown.
Parameters
----------
params : dict
Flat dictionary of (key, value) pairs of tunable parameters.
Returns
-------
result : (Status, dict={})
A pair of Status and result. The result is always {}.
Status is one of {PENDING, SUCCEEDED, FAILED}
"""
def vm_restart(self, params: dict) -> Tuple["Status", dict]:
"""
Restarts the VM by initiating a graceful shutdown.
Parameters
----------
params : dict
Flat dictionary of (key, value) pairs of tunable parameters.
Returns
-------
result : (Status, dict={})
A pair of Status and result. The result is always {}.
Status is one of {PENDING, SUCCEEDED, FAILED}
"""
def vm_deprovision(self, params: dict) -> Tuple["Status", dict]:
"""
Deallocates the VM by shutting it down then releasing the compute resources.
Parameters
----------
params : dict
Flat dictionary of (key, value) pairs of tunable parameters.
Returns
-------
result : (Status, dict={})
A pair of Status and result. The result is always {}.
Status is one of {PENDING, SUCCEEDED, FAILED}
"""
def wait_vm_operation(self, params: dict) -> Tuple["Status", dict]:
"""
Waits for a pending operation on a VM to resolve to SUCCEEDED or FAILED.
Return TIMED_OUT when timing out.
Parameters
----------
params: dict
Flat dictionary of (key, value) pairs of tunable parameters.
Must have the "asyncResultsUrl" key to get the results.
If the key is not present, return Status.PENDING.
Returns
-------
result : (Status, dict)
A pair of Status and result.
Status is one of {PENDING, SUCCEEDED, FAILED, TIMED_OUT}
Result is info on the operation runtime if SUCCEEDED, otherwise {}.
"""
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,891
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/tunables/tunable_slice_references_test.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Unit tests for unique references to tunables when they're loaded multiple times.
"""
import json5 as json
import pytest
from mlos_bench.tunables.tunable_groups import TunableGroups
def test_duplicate_merging_tunable_groups(tunable_groups_config: dict) -> None:
"""
Check that the merging logic of tunable groups works as expected.
"""
parent_tunables = TunableGroups(tunable_groups_config)
# Pretend we loaded this one from disk another time.
tunables_dup = TunableGroups(tunable_groups_config)
(tunable, covariant_group) = next(iter(parent_tunables))
(tunable_dup, covariant_group_dup) = next(iter(tunables_dup))
assert tunable == tunable_dup
assert covariant_group == covariant_group_dup
# Test merging prior to making any changes.
parent_tunable_copy = parent_tunables.copy()
parent_tunables = parent_tunables.merge(tunables_dup)
# Check that they're the same.
assert covariant_group == covariant_group_dup
assert parent_tunables == tunables_dup
assert parent_tunables == parent_tunable_copy
(tunable_retry, covariant_group_retry) = next(iter(parent_tunables))
assert tunable == tunable_retry
assert covariant_group == covariant_group_retry
# Update a value to indicate that they're separate copies.
if tunable.is_categorical:
tunable.category = [x for x in tunable.categories if x != tunable.category][0]
elif tunable.is_numerical:
tunable.numerical_value += 1
# Check that they're separate.
assert tunable != tunable_dup
assert covariant_group != covariant_group_dup
assert parent_tunables != tunables_dup
# Should be ok since we only changed the value.
parent_tunable_copy = parent_tunables.copy()
parent_tunables = parent_tunables.merge(tunables_dup)
# Make sure nothing changed in the parent.
assert tunable != tunable_dup
assert covariant_group != covariant_group_dup
assert parent_tunables != tunables_dup
assert parent_tunables == parent_tunable_copy
def test_overlapping_group_merge_tunable_groups(tunable_groups_config: dict) -> None:
"""
Check that the merging logic of tunable groups works as expected.
"""
parent_tunables = TunableGroups(tunable_groups_config)
# This config should overlap with the parent config.
# (same group name, different param name, different values)
other_tunables_json = """
{
"boot": {
"cost": 300,
"params": {
"noidle": {
"description": "(different) idling method",
"type": "categorical",
"default": "nomwait",
"values": ["nohalt", "nomwait", "idle"]
}
}
}
}
"""
other_tunables_config = json.loads(other_tunables_json)
other_tunables = TunableGroups(other_tunables_config)
with pytest.raises(ValueError):
parent_tunables.merge(other_tunables)
def test_bad_extended_merge_tunable_group(tunable_groups_config: dict) -> None:
"""
Check that the merging logic of tunable groups works as expected.
"""
parent_tunables = TunableGroups(tunable_groups_config)
# This config should overlap with the parent config.
# (different group name, same param name)
other_tunables_json = """
{
"new-group": {
"cost": 300,
"params": {
"idle": {
"type": "categorical",
"description": "Idling method",
"default": "mwait",
"values": ["halt", "mwait", "noidle"]
}
}
}
}
"""
other_tunables_config = json.loads(other_tunables_json)
other_tunables = TunableGroups(other_tunables_config)
with pytest.raises(ValueError):
parent_tunables.merge(other_tunables)
def test_good_extended_merge_tunable_group(tunable_groups_config: dict) -> None:
"""
Check that the merging logic of tunable groups works as expected.
"""
parent_tunables = TunableGroups(tunable_groups_config)
# This config should overlap with the parent config.
# (different group name, same param name)
other_tunables_json = """
{
"new-group": {
"cost": 300,
"params": {
"new-param": {
"type": "int",
"default": 0,
"range": [0, 10]
}
}
}
}
"""
other_tunables_config = json.loads(other_tunables_json)
other_tunables = TunableGroups(other_tunables_config)
assert "new-param" not in parent_tunables
assert "new-param" in other_tunables
parent_tunables = parent_tunables.merge(other_tunables)
assert "new-param" in parent_tunables
(tunable_param, covariant_group) = parent_tunables.get_tunable("new-param")
assert tunable_param.name == "new-param"
assert covariant_group.name == "new-group"
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,892
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_core/mlos_core/tests/spaces/spaces_test.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Tests for mlos_core.spaces
"""
# pylint: disable=missing-function-docstring
from abc import ABCMeta, abstractmethod
from typing import Any, Callable, List, NoReturn, Union
import numpy as np
import numpy.typing as npt
import pytest
import scipy
import ConfigSpace as CS
from ConfigSpace.hyperparameters import NormalIntegerHyperparameter
import flaml.tune.sample
from mlos_core.spaces.converters.flaml import configspace_to_flaml_space, FlamlDomain, FlamlSpace
OptimizerSpace = Union[FlamlSpace, CS.ConfigurationSpace]
OptimizerParam = Union[FlamlDomain, CS.hyperparameters.Hyperparameter]
def assert_is_uniform(arr: npt.NDArray) -> None:
"""Implements a few tests for uniformity."""
_values, counts = np.unique(arr, return_counts=True)
kurtosis = scipy.stats.kurtosis(arr)
_chi_sq, p_value = scipy.stats.chisquare(counts)
frequencies = counts / len(arr)
assert np.isclose(frequencies.sum(), 1)
_f_chi_sq, f_p_value = scipy.stats.chisquare(frequencies)
assert np.isclose(kurtosis, -1.2, atol=.1)
assert p_value > .3
assert f_p_value > .5
def assert_is_log_uniform(arr: npt.NDArray, base: float = np.e) -> None:
"""Checks whether an array is log uniformly distributed."""
logs = np.log(arr) / np.log(base)
assert_is_uniform(logs)
def test_is_uniform() -> None:
"""Test our uniform distribution check function."""
np.random.seed(42)
uniform = np.random.uniform(1, 20, 1000)
assert_is_uniform(uniform)
def test_is_log_uniform() -> None:
"""Test our log uniform distribution check function."""
np.random.seed(42)
log_uniform = np.exp(np.random.uniform(np.log(1), np.log(20), 1000))
assert_is_log_uniform(log_uniform)
def invalid_conversion_function(*args: Any) -> NoReturn:
"""
A quick dummy function for the base class to make pylint happy.
"""
raise NotImplementedError('subclass must override conversion_function')
class BaseConversion(metaclass=ABCMeta):
"""
Base class for testing optimizer space conversions.
"""
conversion_function: Callable[..., OptimizerSpace] = invalid_conversion_function
@abstractmethod
def sample(self, config_space: OptimizerSpace, n_samples: int = 1) -> OptimizerParam:
"""
Sample from the given configuration space.
Parameters
----------
config_space : CS.ConfigurationSpace
Configuration space to sample from.
n_samples : int, optional
Number of samples to use, by default 1.
"""
@abstractmethod
def get_parameter_names(self, config_space: OptimizerSpace) -> List[str]:
"""
Get the parameter names from the given configuration space.
Parameters
----------
config_space : CS.ConfigurationSpace
Configuration space.
"""
@abstractmethod
def categorical_counts(self, points: npt.NDArray) -> npt.NDArray:
"""
Get the counts of each categorical value in the given points.
Parameters
----------
points : np.array
Counts of each categorical value.
"""
@abstractmethod
def test_dimensionality(self) -> None:
"""
Check that the dimensionality of the converted space is correct.
"""
def test_unsupported_hyperparameter(self) -> None:
input_space = CS.ConfigurationSpace()
input_space.add_hyperparameter(NormalIntegerHyperparameter("a", 2, 1))
with pytest.raises(ValueError, match="NormalIntegerHyperparameter"):
self.conversion_function(input_space)
def test_continuous_bounds(self) -> None:
input_space = CS.ConfigurationSpace()
input_space.add_hyperparameter(CS.UniformFloatHyperparameter("a", lower=100, upper=200))
input_space.add_hyperparameter(CS.UniformIntegerHyperparameter("b", lower=-10, upper=-5))
converted_space = self.conversion_function(input_space)
assert self.get_parameter_names(converted_space) == ["a", "b"]
point = self.sample(converted_space)
assert 100 <= point[0] <= 200
assert -10 <= point[1] <= -5
def test_uniform_samples(self) -> None:
input_space = CS.ConfigurationSpace()
input_space.add_hyperparameter(CS.UniformFloatHyperparameter("a", lower=1, upper=5))
input_space.add_hyperparameter(CS.UniformIntegerHyperparameter("c", lower=1, upper=20))
converted_space = self.conversion_function(input_space)
np.random.seed(42)
uniform, integer_uniform = self.sample(converted_space, n_samples=1000).T
# uniform float
assert_is_uniform(uniform)
# Check that we get both ends of the sampled range returned to us.
assert input_space['c'].lower in integer_uniform
assert input_space['c'].upper in integer_uniform
# integer uniform
assert_is_uniform(integer_uniform)
def test_uniform_categorical(self) -> None:
input_space = CS.ConfigurationSpace()
input_space.add_hyperparameter(CS.CategoricalHyperparameter("c", choices=["foo", "bar"]))
converted_space = self.conversion_function(input_space)
points = self.sample(converted_space, n_samples=100)
counts = self.categorical_counts(points)
assert 35 < counts[0] < 65
assert 35 < counts[1] < 65
def test_weighted_categorical(self) -> None:
raise NotImplementedError('subclass must override')
def test_log_int_spaces(self) -> None:
raise NotImplementedError('subclass must override')
def test_log_float_spaces(self) -> None:
raise NotImplementedError('subclass must override')
class TestFlamlConversion(BaseConversion):
"""
Tests for ConfigSpace to Flaml parameter conversions.
"""
conversion_function = staticmethod(configspace_to_flaml_space)
def sample(self, config_space: FlamlSpace, n_samples: int = 1) -> npt.NDArray: # type: ignore[override]
assert isinstance(config_space, dict)
assert isinstance(next(iter(config_space.values())), flaml.tune.sample.Domain)
ret: npt.NDArray = np.array([domain.sample(size=n_samples) for domain in config_space.values()]).T
return ret
def get_parameter_names(self, config_space: FlamlSpace) -> List[str]: # type: ignore[override]
assert isinstance(config_space, dict)
ret: List[str] = list(config_space.keys())
return ret
def categorical_counts(self, points: npt.NDArray) -> npt.NDArray:
_vals, counts = np.unique(points, return_counts=True)
assert isinstance(counts, np.ndarray)
return counts
def test_dimensionality(self) -> None:
input_space = CS.ConfigurationSpace()
input_space.add_hyperparameter(CS.UniformIntegerHyperparameter("a", lower=1, upper=10))
input_space.add_hyperparameter(CS.CategoricalHyperparameter("b", choices=["bof", "bum"]))
input_space.add_hyperparameter(CS.CategoricalHyperparameter("c", choices=["foo", "bar"]))
output_space = configspace_to_flaml_space(input_space)
assert len(output_space) == 3
def test_weighted_categorical(self) -> None:
np.random.seed(42)
input_space = CS.ConfigurationSpace()
input_space.add_hyperparameter(CS.CategoricalHyperparameter("c", choices=["foo", "bar"], weights=[0.9, 0.1]))
with pytest.raises(ValueError, match="non-uniform"):
configspace_to_flaml_space(input_space)
@pytest.mark.skip(reason="FIXME: flaml sampling is non-log-uniform")
def test_log_int_spaces(self) -> None:
np.random.seed(42)
# integer is supported
input_space = CS.ConfigurationSpace()
input_space.add_hyperparameter(CS.UniformIntegerHyperparameter("d", lower=1, upper=20, log=True))
converted_space = configspace_to_flaml_space(input_space)
# test log integer sampling
integer_log_uniform = self.sample(converted_space, n_samples=1000)
integer_log_uniform = np.array(integer_log_uniform).ravel()
# FIXME: this fails - flaml is calling np.random.uniform() on base 10
# logs of the bounds as expected but for some reason the resulting
# samples are more skewed towards the lower end of the range
# See Also: https://github.com/microsoft/FLAML/issues/1104
assert_is_log_uniform(integer_log_uniform, base=10)
def test_log_float_spaces(self) -> None:
np.random.seed(42)
# continuous is supported
input_space = CS.ConfigurationSpace()
input_space.add_hyperparameter(CS.UniformFloatHyperparameter("b", lower=1, upper=5, log=True))
converted_space = configspace_to_flaml_space(input_space)
# test log integer sampling
float_log_uniform = self.sample(converted_space, n_samples=1000)
float_log_uniform = np.array(float_log_uniform).ravel()
assert_is_log_uniform(float_log_uniform)
if __name__ == '__main__':
# For attaching debugger debugging:
pytest.main(["-vv", "-k", "test_log_int_spaces", __file__])
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,893
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/setup.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Setup instructions for the mlos_bench package.
"""
from logging import warning
from itertools import chain
from typing import Dict, List
from setuptools import setup, find_packages
from _version import _VERSION # pylint: disable=import-private-name
try:
from setuptools_scm import get_version
version = get_version(root='..', relative_to=__file__)
if version is not None:
_VERSION = version # noqa: F811
except ImportError:
warning("setuptools_scm not found, using version from _version.py")
except LookupError as e:
warning(f"setuptools_scm failed to find git version, using version from _version.py: {e}")
extra_requires: Dict[str, List[str]] = { # pylint: disable=consider-using-namedtuple-or-dataclass
# Additional tools for extra functionality.
'azure': ['azure-storage-file-share'],
'storage-sql-duckdb': ['sqlalchemy', 'duckdb_engine'],
'storage-sql-mysql': ['sqlalchemy', 'mysql-connector-python'],
'storage-sql-postgres': ['sqlalchemy', 'psycopg2'],
'storage-sql-sqlite': ['sqlalchemy'], # sqlite3 comes with python, so we don't need to install it.
# Transitive extra_requires from mlos-core.
'flaml': ['flaml[blendsearch]'],
'smac': ['smac'],
}
# construct special 'full' extra that adds requirements for all built-in
# backend integrations and additional extra features.
extra_requires['full'] = list(set(chain(*extra_requires.values())))
extra_requires['full-tests'] = extra_requires['full'] + [
'pytest',
'pytest-forked',
'pytest-xdist',
'pytest-cov',
'pytest-local-badge',
]
# pylint: disable=duplicate-code
MODULE_BASE_NAME = 'mlos_bench'
setup(
name='mlos-bench',
version=_VERSION,
packages=find_packages(exclude=[f"{MODULE_BASE_NAME}.tests", f"{MODULE_BASE_NAME}.tests.*"]),
package_data={
'': ['py.typed', '**/*.pyi'],
'mlos_bench': [
'config/**/*.md',
'config/**/*.jsonc',
'config/**/*.json',
'config/**/*.py',
'config/**/*.sh',
'config/**/*.cmd',
'config/**/*.ps1',
],
},
entry_points={
'console_scripts': [
'mlos_bench = mlos_bench.run:_main',
],
},
install_requires=[
'mlos-core==' + _VERSION,
'requests',
'json5',
'jsonschema>=4.18.0', 'referencing>=0.29.1',
'importlib_resources;python_version<"3.10"',
] + extra_requires['storage-sql-sqlite'], # NOTE: For now sqlite is a fallback storage backend, so we always install it.
extras_require=extra_requires,
author='Microsoft',
author_email='mlos-maintainers@service.microsoft.com',
description=('MLOS Bench Python interface for benchmark automation and optimization.'),
license='MIT',
keywords='',
url='https://aka.ms/mlos-core',
python_requires='>=3.8',
)
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,894
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tunables/tunable.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Tunable parameter definition.
"""
import copy
import collections
import logging
from typing import Any, Dict, List, Optional, Sequence, Tuple, Type, TypedDict, Union
_LOG = logging.getLogger(__name__)
"""A tunable parameter value type alias."""
TunableValue = Union[int, float, Optional[str]]
class TunableDict(TypedDict, total=False):
"""
A typed dict for tunable parameters.
Mostly used for mypy type checking.
These are the types expected to be received from the json config.
"""
type: str
description: Optional[str]
default: TunableValue
values: Optional[List[Optional[str]]]
range: Optional[Union[Sequence[int], Sequence[float]]]
special: Optional[Union[List[int], List[str]]]
meta: Dict[str, Any]
class Tunable: # pylint: disable=too-many-instance-attributes
"""
A tunable parameter definition and its current value.
"""
# Maps tunable types to their corresponding Python types by name.
_DTYPE: Dict[str, Type] = {
"int": int,
"float": float,
"categorical": str,
}
def __init__(self, name: str, config: TunableDict):
"""
Create an instance of a new tunable parameter.
Parameters
----------
name : str
Human-readable identifier of the tunable parameter.
config : dict
Python dict that represents a Tunable (e.g., deserialized from JSON)
"""
self._name = name
self._type = config["type"] # required
self._description = config.get("description")
self._default = config["default"]
self._values = config.get("values")
self._meta: Dict[str, Any] = config.get("meta", {})
self._range: Optional[Union[Tuple[int, int], Tuple[float, float]]] = None
config_range = config.get("range")
if config_range is not None:
assert len(config_range) == 2, f"Invalid range: {config_range}"
config_range = (config_range[0], config_range[1])
self._range = config_range
self._special = config.get("special")
self._current_value = self._default
self._sanity_check()
def _sanity_check(self) -> None:
"""
Check if the status of the Tunable is valid, and throw ValueError if it is not.
"""
if self.is_categorical:
if not (self._values and isinstance(self._values, collections.abc.Iterable)):
raise ValueError("Must specify values for the categorical type")
if self._range is not None:
raise ValueError("Range must be None for the categorical type")
if len(set(self._values)) != len(self._values):
raise ValueError("Values must be unique for the categorical type")
if self._special is not None:
raise ValueError("Special values must be None for the categorical type")
elif self.is_numerical:
if self._values is not None:
raise ValueError("Values must be None for the numerical type")
if not self._range or len(self._range) != 2 or self._range[0] >= self._range[1]:
raise ValueError(f"Invalid range: {self._range}")
else:
raise ValueError(f"Invalid parameter type: {self._type}")
if not self.is_valid(self.default):
raise ValueError(f"Invalid default value: {self.default}")
def __repr__(self) -> str:
"""
Produce a human-readable version of the Tunable (mostly for logging).
Returns
-------
string : str
A human-readable version of the Tunable.
"""
return f"{self._name}={self._current_value}"
def __eq__(self, other: object) -> bool:
"""
Check if two Tunable objects are equal.
Parameters
----------
other : Tunable
A tunable object to compare to.
Returns
-------
is_equal : bool
True if the Tunables correspond to the same parameter and have the same value and type.
NOTE: ranges and special values are not currently considered in the comparison.
"""
if not isinstance(other, Tunable):
return False
return bool(
self._name == other._name and
self._type == other._type and
self._current_value == other._current_value
)
def __lt__(self, other: object) -> bool: # pylint: disable=too-many-return-statements
"""
Compare the two Tunable objects. We mostly need this to create a canonical list
of tunable objects when hashing a TunableGroup.
Parameters
----------
other : Tunable
A tunable object to compare to.
Returns
-------
is_less : bool
True if the current Tunable is less then the other one, False otherwise.
"""
if not isinstance(other, Tunable):
return False
if self._name < other._name:
return True
if self._name == other._name and self._type < other._type:
return True
if self._name == other._name and self._type == other._type:
if self.is_numerical:
assert self._current_value is not None
assert other._current_value is not None
return bool(float(self._current_value) < float(other._current_value))
# else: categorical
if self._current_value is None:
return True
if other._current_value is None:
return False
return bool(str(self._current_value) < str(other._current_value))
return False
def copy(self) -> "Tunable":
"""
Deep copy of the Tunable object.
Returns
-------
tunable : Tunable
A new Tunable object that is a deep copy of the original one.
"""
return copy.deepcopy(self)
@property
def default(self) -> TunableValue:
"""
Get the default value of the tunable.
"""
return self._default
@property
def value(self) -> TunableValue:
"""
Get the current value of the tunable.
"""
return self._current_value
@value.setter
def value(self, value: TunableValue) -> TunableValue:
"""
Set the current value of the tunable.
"""
# We need this coercion for the values produced by some optimizers
# (e.g., scikit-optimize) and for data restored from certain storage
# systems (where values can be strings).
try:
if self.is_categorical and value is None:
coerced_value = None
else:
coerced_value = self._DTYPE[self._type](value)
except Exception:
_LOG.error("Impossible conversion: %s %s <- %s %s",
self._type, self._name, type(value), value)
raise
if self._type == "int" and isinstance(value, float) and value != coerced_value:
_LOG.error("Loss of precision: %s %s <- %s %s",
self._type, self._name, type(value), value)
raise ValueError(f"Loss of precision: {self._name}={value}")
if not self.is_valid(coerced_value):
_LOG.error("Invalid assignment: %s %s <- %s %s",
self._type, self._name, type(value), value)
raise ValueError(f"Invalid value for the tunable: {self._name}={value}")
self._current_value = coerced_value
return self._current_value
def update(self, value: TunableValue) -> bool:
"""
Assign the value to the tunable. Return True if it is a new value, False otherwise.
Parameters
----------
value : Union[int, float, str]
Value to assign.
Returns
-------
is_updated : bool
True if the new value is different from the previous one, False otherwise.
"""
prev_value = self._current_value
self.value = value
return prev_value != self._current_value
def is_valid(self, value: TunableValue) -> bool:
"""
Check if the value can be assigned to the tunable.
Parameters
----------
value : Union[int, float, str]
Value to validate.
Returns
-------
is_valid : bool
True if the value is valid, False otherwise.
"""
if self.is_categorical and self._values:
return value in self._values
elif self.is_numerical and self._range:
if isinstance(value, (int, float)):
# TODO: allow special values outside of range?
return bool(self._range[0] <= value <= self._range[1]) # or value == self._default
else:
raise ValueError(f"Invalid value type for tunable {self}: {value}={type(value)}")
else:
raise ValueError(f"Invalid parameter type: {self._type}")
@property
def category(self) -> Optional[str]:
"""
Get the current value of the tunable as a number.
"""
if self.is_categorical:
return None if self._current_value is None else str(self._current_value)
else:
raise ValueError("Cannot get categorical values for a numerical tunable.")
@category.setter
def category(self, new_value: Optional[str]) -> Optional[str]:
"""
Set the current value of the tunable.
"""
assert self.is_categorical
assert isinstance(new_value, (str, type(None)))
self.value = new_value
return self.value
@property
def numerical_value(self) -> Union[int, float]:
"""
Get the current value of the tunable as a number.
"""
assert self._current_value is not None
if self._type == "int":
return int(self._current_value)
elif self._type == "float":
return float(self._current_value)
else:
raise ValueError("Cannot get numerical value for a categorical tunable.")
@numerical_value.setter
def numerical_value(self, new_value: Union[int, float]) -> Union[int, float]:
"""
Set the current numerical value of the tunable.
"""
# We need this coercion for the values produced by some optimizers
# (e.g., scikit-optimize) and for data restored from certain storage
# systems (where values can be strings).
assert self.is_numerical
self.value = new_value
return self.value
@property
def name(self) -> str:
"""
Get the name / string ID of the tunable.
"""
return self._name
@property
def type(self) -> str:
"""
Get the data type of the tunable.
Returns
-------
type : str
Data type of the tunable - one of {'int', 'float', 'categorical'}.
"""
return self._type
@property
def dtype(self) -> Type:
"""
Get the actual Python data type of the tunable.
This is useful for bulk conversions of the input data.
Returns
-------
dtype : type
Data type of the tunable - one of {int, float, str}.
"""
return self._DTYPE[self._type]
@property
def is_categorical(self) -> bool:
"""
Check if the tunable is categorical.
Returns
-------
is_categorical : bool
True if the tunable is categorical, False otherwise.
"""
return self._type == "categorical"
@property
def is_numerical(self) -> bool:
"""
Check if the tunable is an integer or float.
Returns
-------
is_int : bool
True if the tunable is an integer or float, False otherwise.
"""
return self._type in {"int", "float"}
@property
def range(self) -> Union[Tuple[int, int], Tuple[float, float]]:
"""
Get the range of the tunable if it is numerical, None otherwise.
Returns
-------
range : (number, number)
A 2-tuple of numbers that represents the range of the tunable.
Numbers can be int or float, depending on the type of the tunable.
"""
assert self.is_numerical
assert self._range is not None
return self._range
@property
def categories(self) -> List[Optional[str]]:
"""
Get the list of all possible values of a categorical tunable.
Return None if the tunable is not categorical.
Returns
-------
values : List[str]
List of all possible values of a categorical tunable.
"""
assert self.is_categorical
assert self._values is not None
return self._values
@property
def meta(self) -> Dict[str, Any]:
"""
Get the tunable's metadata. This is a free-form dictionary that can be used to
store any additional information about the tunable (e.g., the unit information).
"""
return self._meta
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,895
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/environments/__init__.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Tunable Environments for mlos_bench.
"""
from mlos_bench.environments.status import Status
from mlos_bench.environments.base_environment import Environment
from mlos_bench.environments.mock_env import MockEnv
from mlos_bench.environments.remote.remote_env import RemoteEnv
from mlos_bench.environments.local.local_env import LocalEnv
from mlos_bench.environments.local.local_fileshare_env import LocalFileShareEnv
from mlos_bench.environments.composite_env import CompositeEnv
__all__ = [
'Status',
'Environment',
'MockEnv',
'RemoteEnv',
'LocalEnv',
'LocalFileShareEnv',
'CompositeEnv',
]
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,896
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/config/schemas/config_schemas.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
A simple class for describing where to find different config schemas and validating configs against them.
"""
import logging
from enum import Enum
from os import path, walk, environ
from typing import Dict, Iterator, Mapping
import json # schema files are pure json - no comments
import jsonschema
from referencing import Registry, Resource
from referencing.jsonschema import DRAFT202012
from mlos_bench.util import path_join
_LOG = logging.getLogger(__name__)
# The path to find all config schemas.
CONFIG_SCHEMA_DIR = path_join(path.dirname(__file__), abs_path=True)
# Allow skipping schema validation for tight dev cycle changes.
# It is used in `ConfigSchema.validate()` method below.
# NOTE: this may cause pytest to fail if it's expecting exceptions
# to be raised for invalid configs.
_VALIDATION_ENV_FLAG = 'MLOS_BENCH_SKIP_SCHEMA_VALIDATION'
_SKIP_VALIDATION = (environ.get(_VALIDATION_ENV_FLAG, 'false').lower()
in {'true', 'y', 'yes', 'on', '1'})
# Note: we separate out the SchemaStore from a class method on ConfigSchema
# because of issues with mypy/pylint and non-Enum-member class members.
class SchemaStore(Mapping):
"""
A simple class for storing schemas and subschemas for the validator to reference.
"""
# A class member mapping of schema id to schema object.
_SCHEMA_STORE: Dict[str, dict] = {}
_REGISTRY: Registry = Registry()
def __len__(self) -> int:
return self._SCHEMA_STORE.__len__()
def __iter__(self) -> Iterator:
return self._SCHEMA_STORE.__iter__()
def __getitem__(self, key: str) -> dict:
"""Gets the schema object for the given key."""
if not self._SCHEMA_STORE:
self._load_schemas()
return self._SCHEMA_STORE[key]
@classmethod
def _load_schemas(cls) -> None:
"""Loads all schemas and subschemas into the schema store for the validator to reference."""
if cls._SCHEMA_STORE:
return
for root, _, files in walk(CONFIG_SCHEMA_DIR):
for file_name in files:
if not file_name.endswith(".json"):
continue
file_path = path_join(root, file_name)
if path.getsize(file_path) == 0:
continue
with open(file_path, mode="r", encoding="utf-8") as schema_file:
schema = json.load(schema_file)
cls._SCHEMA_STORE[file_path] = schema
# Let the schema be referenced by its id as well.
assert "$id" in schema
assert schema["$id"] not in cls._SCHEMA_STORE
cls._SCHEMA_STORE[schema["$id"]] = schema
@classmethod
def _load_registry(cls) -> None:
"""Also store them in a Registry object for referencing by recent versions of jsonschema."""
if not cls._SCHEMA_STORE:
cls._load_schemas()
cls._REGISTRY = Registry().with_resources([
(url, Resource.from_contents(schema, default_specification=DRAFT202012))
for url, schema in cls._SCHEMA_STORE.items()
])
@property
def registry(self) -> Registry:
"""Returns a Registry object with all the schemas loaded."""
if not self._REGISTRY:
self._load_registry()
return self._REGISTRY
SCHEMA_STORE = SchemaStore()
class ConfigSchema(Enum):
"""
An enum to help describe schema types and help validate configs against them.
"""
CLI = path_join(CONFIG_SCHEMA_DIR, "cli/cli-schema.json")
GLOBALS = path_join(CONFIG_SCHEMA_DIR, "cli/globals-schema.json")
ENVIRONMENT = path_join(CONFIG_SCHEMA_DIR, "environments/environment-schema.json")
OPTIMIZER = path_join(CONFIG_SCHEMA_DIR, "optimizers/optimizer-schema.json")
SERVICE = path_join(CONFIG_SCHEMA_DIR, "services/service-schema.json")
STORAGE = path_join(CONFIG_SCHEMA_DIR, "storage/storage-schema.json")
TUNABLE_PARAMS = path_join(CONFIG_SCHEMA_DIR, "tunables/tunable-params-schema.json")
TUNABLE_VALUES = path_join(CONFIG_SCHEMA_DIR, "tunables/tunable-values-schema.json")
@property
def schema(self) -> dict:
"""Gets the schema object for this type."""
schema = SCHEMA_STORE[self.value]
assert schema
return schema
def validate(self, config: dict) -> None:
"""
Validates the given config against this schema.
Parameters
----------
config : dict
The config to validate.
Raises
------
jsonschema.exceptions.ValidationError
jsonschema.exceptions.SchemaError
"""
if _SKIP_VALIDATION:
_LOG.warning("%s is set - skip schema validation", _VALIDATION_ENV_FLAG)
else:
jsonschema.Draft202012Validator(
schema=self.schema,
registry=SCHEMA_STORE.registry, # type: ignore[call-arg]
).validate(config)
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,897
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/services/base_service.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Base class for the service mix-ins.
"""
import json
import logging
from typing import Any, Callable, Dict, List, Optional, Union
from mlos_bench.services.types.config_loader_type import SupportsConfigLoading
from mlos_bench.util import instantiate_from_config
_LOG = logging.getLogger(__name__)
class Service:
"""
An abstract base of all environment services.
"""
@classmethod
def new(cls,
class_name: str,
config: Optional[Dict[str, Any]] = None,
global_config: Optional[Dict[str, Any]] = None,
parent: Optional["Service"] = None) -> "Service":
"""
Factory method for a new service with a given config.
Parameters
----------
class_name: str
FQN of a Python class to instantiate, e.g.,
"mlos_bench.services.remote.azure.AzureVMService".
Must be derived from the `Service` class.
config : dict
Free-format dictionary that contains the service configuration.
It will be passed as a constructor parameter of the class
specified by `class_name`.
global_config : dict
Free-format dictionary of global parameters.
parent : Service
A parent service that can provide mixin functions.
Returns
-------
svc : Service
An instance of the `Service` class initialized with `config`.
"""
assert issubclass(cls, Service)
return instantiate_from_config(cls, class_name, config, global_config, parent)
def __init__(self,
config: Optional[Dict[str, Any]] = None,
global_config: Optional[Dict[str, Any]] = None,
parent: Optional["Service"] = None):
"""
Create a new service with a given config.
Parameters
----------
config : dict
Free-format dictionary that contains the service configuration.
It will be passed as a constructor parameter of the class
specified by `class_name`.
global_config : dict
Free-format dictionary of global parameters.
parent : Service
An optional parent service that can provide mixin functions.
"""
self.config = config or {}
self._parent = parent
self._services: Dict[str, Callable] = {}
if parent:
self.register(parent.export())
self._config_loader_service: SupportsConfigLoading
if parent and isinstance(parent, SupportsConfigLoading):
self._config_loader_service = parent
if _LOG.isEnabledFor(logging.DEBUG):
_LOG.debug("Service: %s Config:\n%s", self, json.dumps(self.config, indent=2))
_LOG.debug("Service: %s Globals:\n%s", self, json.dumps(global_config or {}, indent=2))
_LOG.debug("Service: %s Parent mixins: %s", self,
[] if parent is None else list(parent._services.keys()))
def __repr__(self) -> str:
return self.__class__.__name__
@property
def config_loader_service(self) -> SupportsConfigLoading:
"""
Return a config loader service.
Returns
-------
config_loader_service : SupportsConfigLoading
A config loader service.
"""
return self._config_loader_service
def register(self, services: Union[Dict[str, Callable], List[Callable]]) -> None:
"""
Register new mix-in services.
Parameters
----------
services : dict or list
A dictionary of string -> function pairs.
"""
if not isinstance(services, dict):
services = {svc.__name__: svc for svc in services}
if _LOG.isEnabledFor(logging.DEBUG):
_LOG.debug("Service: %s Add methods: %s",
self.__class__.__name__, list(services.keys()))
self._services.update(services)
self.__dict__.update(self._services)
def export(self) -> Dict[str, Callable]:
"""
Return a dictionary of functions available in this service.
Returns
-------
services : dict
A dictionary of string -> function pairs.
"""
return self._services
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,898
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/config/__init__.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Helper functions for config example loading tests.
"""
from typing import List
import os
from mlos_bench.util import path_join
def locate_config_examples(config_examples_dir: str) -> List[str]:
"""Locates all config examples in the given directory.
Parameters
----------
config_examples_dir: str
Path to the directory containing config examples.
Returns
-------
config_examples: List[str]
List of paths to config examples.
"""
assert os.path.isdir(config_examples_dir)
config_examples = []
for root, _, files in os.walk(config_examples_dir):
for file in files:
if file.endswith(".json") or file.endswith(".jsonc"):
config_examples.append(path_join(root, file))
return config_examples
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,899
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/optimizers/mlos_core_optimizer.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
A wrapper for mlos_core optimizers for mlos_bench.
"""
import logging
import os
from typing import Optional, Sequence, Tuple, Union
import pandas as pd
from mlos_core.optimizers import BaseOptimizer, OptimizerType, OptimizerFactory, SpaceAdapterType, DEFAULT_OPTIMIZER_TYPE
from mlos_bench.environments.status import Status
from mlos_bench.tunables.tunable_groups import TunableGroups
from mlos_bench.optimizers.base_optimizer import Optimizer
from mlos_bench.optimizers.convert_configspace import tunable_groups_to_configspace
from mlos_bench.services.base_service import Service
from mlos_bench.util import path_join
_LOG = logging.getLogger(__name__)
class MlosCoreOptimizer(Optimizer):
"""
A wrapper class for the mlos_core optimizers.
"""
def __init__(self,
tunables: TunableGroups,
config: dict,
global_config: Optional[dict] = None,
service: Optional[Service] = None):
super().__init__(tunables, config, global_config, service)
seed = config.get("seed")
seed = None if seed is None else int(seed)
space = tunable_groups_to_configspace(tunables, seed)
_LOG.debug("ConfigSpace: %s", space)
opt_type = getattr(OptimizerType, self._config.pop(
'optimizer_type', DEFAULT_OPTIMIZER_TYPE.name))
if opt_type == OptimizerType.SMAC:
# If output_directory is specified, turn it into an absolute path.
if 'output_directory' not in self._config:
self._config['output_directory'] = 'smac_output'
_LOG.info(
"No output_directory was specified for SMAC optimizer. Defaulting to '%s'.",
self._config['output_directory'])
output_directory = self._config.get('output_directory')
if output_directory is not None:
if not os.path.isabs(output_directory):
self._config['output_directory'] = path_join(os.getcwd(), output_directory)
else:
_LOG.warning("SMAC optimizer output_directory was null. SMAC will use a temporary directory.")
# Make sure max_trials >= max_iterations.
if 'max_trials' not in self._config:
self._config['max_trials'] = self._max_iter
assert int(self._config['max_trials']) >= self._max_iter, \
f"max_trials {self._config.get('max_trials')} <= max_iterations {self._max_iter}"
if 'run_name' not in self._config and self.experiment_id:
self._config['run_name'] = self.experiment_id
space_adapter_type = self._config.pop('space_adapter_type', None)
space_adapter_config = self._config.pop('space_adapter_config', {})
if space_adapter_type is not None:
space_adapter_type = getattr(SpaceAdapterType, space_adapter_type)
self._opt: BaseOptimizer = OptimizerFactory.create(
parameter_space=space,
optimizer_type=opt_type,
optimizer_kwargs=self._config,
space_adapter_type=space_adapter_type,
space_adapter_kwargs=space_adapter_config,
)
def __repr__(self) -> str:
return f"{super().__repr__()}({self._opt.__class__.__name__})"
def bulk_register(self, configs: Sequence[dict], scores: Sequence[Optional[float]],
status: Optional[Sequence[Status]] = None) -> bool:
if not super().bulk_register(configs, scores, status):
return False
df_configs = self._to_df(configs) # Impute missing values, if necessary
df_scores = pd.Series(scores, dtype=float) * self._opt_sign
if status is not None:
df_status = pd.Series(status)
df_scores[df_status != Status.SUCCEEDED] = float("inf")
df_status_completed = df_status.apply(Status.is_completed)
df_configs = df_configs[df_status_completed]
df_scores = df_scores[df_status_completed]
# External data can have incorrect types (e.g., all strings).
for (tunable, _group) in self._tunables:
df_configs[tunable.name] = df_configs[tunable.name].astype(tunable.dtype)
self._opt.register(df_configs, df_scores)
if _LOG.isEnabledFor(logging.DEBUG):
(score, _) = self.get_best_observation()
_LOG.debug("Warm-up end: %s = %s", self.target, score)
return True
def _to_df(self, configs: Sequence[dict]) -> pd.DataFrame:
"""
Select from past trials only the columns required in this experiment and
impute default values for the tunables that are missing in the dataframe.
Parameters
----------
configs : Sequence[dict]
Sequence of dicts with past trials data.
Returns
-------
df_configs : pd.DataFrame
A dataframe with past trials data, with missing values imputed.
"""
df_configs = pd.DataFrame(configs)
tunables_names = self._tunables.get_param_values().keys()
missing_cols = set(tunables_names).difference(df_configs.columns)
for (tunable, _group) in self._tunables:
if tunable.name in missing_cols:
df_configs[tunable.name] = tunable.default
else:
df_configs[tunable.name].fillna(tunable.default, inplace=True)
# By default, hyperparameters in ConfigurationSpace are sorted by name:
df_configs = df_configs[sorted(tunables_names)]
_LOG.debug("Loaded configs:\n%s", df_configs)
return df_configs
def suggest(self) -> TunableGroups:
if self._start_with_defaults:
_LOG.info("Use default values for the first trial")
df_config = self._opt.suggest(defaults=self._start_with_defaults)
self._start_with_defaults = False
_LOG.info("Iteration %d :: Suggest:\n%s", self._iter, df_config)
return self._tunables.copy().assign(df_config.loc[0].to_dict())
def register(self, tunables: TunableGroups, status: Status,
score: Optional[Union[float, dict]] = None) -> Optional[float]:
score = super().register(tunables, status, score) # With _opt_sign applied
if status.is_completed():
# By default, hyperparameters in ConfigurationSpace are sorted by name:
df_config = pd.DataFrame(dict(sorted(tunables.get_param_values().items())), index=[0])
_LOG.debug("Score: %s Dataframe:\n%s", score, df_config)
self._opt.register(df_config, pd.Series([score], dtype=float))
self._iter += 1
return score
def get_best_observation(self) -> Union[Tuple[float, TunableGroups], Tuple[None, None]]:
df_config = self._opt.get_best_observation()
if len(df_config) == 0:
return (None, None)
params = df_config.iloc[0].to_dict()
_LOG.debug("Best observation: %s", params)
score = params.pop("score") * self._opt_sign # mlos_core always uses the `score` column
return (score, self._tunables.copy().assign(params))
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,900
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/__init__.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Tests for mlos_bench.
Used to make mypy happy about multiple conftest.py modules.
"""
from typing import Optional
from mlos_bench.util import get_class_from_name
# A common seed to use to avoid tracking down race conditions and intermingling
# issues of seeds across tests that run in non-deterministic parallel orders.
SEED = 42
# import numpy as np
# np.random.seed(SEED)
def try_resolve_class_name(class_name: Optional[str]) -> Optional[str]:
"""
Gets the full class name from the given name or None on error.
"""
if class_name is None:
return None
try:
the_class = get_class_from_name(class_name)
return the_class.__module__ + "." + the_class.__name__
except (ValueError, AttributeError, ModuleNotFoundError, ImportError):
return None
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,901
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/config/storage/test_load_storage_config_examples.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Tests for loading storage config examples.
"""
import logging
from typing import List
import pytest
from mlos_bench.tests.config import locate_config_examples
from mlos_bench.config.schemas.config_schemas import ConfigSchema
from mlos_bench.services.config_persistence import ConfigPersistenceService
from mlos_bench.storage.base_storage import Storage
from mlos_bench.tunables.tunable_groups import TunableGroups
from mlos_bench.util import get_class_from_name, path_join
_LOG = logging.getLogger(__name__)
_LOG.setLevel(logging.DEBUG)
# Get the set of configs to test.
CONFIG_TYPE = "storage"
def filter_configs(configs_to_filter: List[str]) -> List[str]:
"""If necessary, filter out json files that aren't for the module we're testing."""
return configs_to_filter
configs = filter_configs(locate_config_examples(path_join(ConfigPersistenceService.BUILTIN_CONFIG_PATH, CONFIG_TYPE)))
assert configs
@pytest.mark.parametrize("config_path", configs)
def test_load_storage_config_examples(config_loader_service: ConfigPersistenceService, config_path: str) -> None:
"""Tests loading a config example."""
config = config_loader_service.load_config(config_path, ConfigSchema.STORAGE)
assert isinstance(config, dict)
# Skip schema loading that would require a database connection for this test.
config["config"]["lazy_schema_create"] = True
cls = get_class_from_name(config["class"])
assert issubclass(cls, Storage)
# Make an instance of the class based on the config.
storage_inst = config_loader_service.build_generic(
base_cls=Storage, # type: ignore[type-abstract]
tunables=TunableGroups(),
config=config,
service=config_loader_service,
)
assert storage_inst is not None
assert isinstance(storage_inst, cls)
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,902
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/__init__.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
mlos_bench is a framework to help automate benchmarking and and
OS/application parameter autotuning.
"""
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,903
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/services/remote/mock/__init__.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Mock remote services for testing purposes.
"""
from typing import Any, Tuple
from mlos_bench.environments.status import Status
def mock_operation(*_args: Any, **_kwargs: Any) -> Tuple[Status, dict]:
"""
Mock VM operation that always succeeds.
Returns
-------
result : (Status, dict)
A pair of Status and result, always (SUCCEEDED, {}).
"""
return Status.SUCCEEDED, {}
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,904
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/optimizers/mock_opt_test.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Unit tests for mock mlos_bench optimizer.
"""
import pytest
from mlos_bench.environments.status import Status
from mlos_bench.optimizers.mock_optimizer import MockOptimizer
# pylint: disable=redefined-outer-name
@pytest.fixture
def mock_configurations_no_defaults() -> list:
"""
A list of 2-tuples of (tunable_values, score) to test the optimizers.
"""
return [
({
"vmSize": "Standard_B4ms",
"idle": "halt",
"kernel_sched_migration_cost_ns": 13111,
"kernel_sched_latency_ns": 796233790,
}, 88.88),
({
"vmSize": "Standard_B2ms",
"idle": "halt",
"kernel_sched_migration_cost_ns": 117025,
"kernel_sched_latency_ns": 149827706,
}, 66.66),
({
"vmSize": "Standard_B4ms",
"idle": "halt",
"kernel_sched_migration_cost_ns": 354784,
"kernel_sched_latency_ns": 795285932,
}, 99.99),
]
@pytest.fixture
def mock_configurations(mock_configurations_no_defaults: list) -> list:
"""
A list of 2-tuples of (tunable_values, score) to test the optimizers.
"""
return [
({
"vmSize": "Standard_B4ms",
"idle": "halt",
"kernel_sched_migration_cost_ns": -1,
"kernel_sched_latency_ns": 2000000,
}, 88.88),
] + mock_configurations_no_defaults
def _optimize(mock_opt: MockOptimizer, mock_configurations: list) -> float:
"""
Run several iterations of the optimizer and return the best score.
"""
for (tunable_values, score) in mock_configurations:
assert mock_opt.not_converged()
tunables = mock_opt.suggest()
assert tunables.get_param_values() == tunable_values
mock_opt.register(tunables, Status.SUCCEEDED, score)
(score, _tunables) = mock_opt.get_best_observation()
assert score is not None
assert isinstance(score, float)
return score
def test_mock_optimizer(mock_opt: MockOptimizer, mock_configurations: list) -> None:
"""
Make sure that mock optimizer produces consistent suggestions.
"""
score = _optimize(mock_opt, mock_configurations)
assert score == pytest.approx(66.66, 0.01)
def test_mock_optimizer_no_defaults(mock_opt_no_defaults: MockOptimizer,
mock_configurations_no_defaults: list) -> None:
"""
Make sure that mock optimizer produces consistent suggestions.
"""
score = _optimize(mock_opt_no_defaults, mock_configurations_no_defaults)
assert score == pytest.approx(66.66, 0.01)
def test_mock_optimizer_max(mock_opt_max: MockOptimizer, mock_configurations: list) -> None:
"""
Check the maximization mode of the mock optimizer.
"""
score = _optimize(mock_opt_max, mock_configurations)
assert score == pytest.approx(99.99, 0.01)
def test_mock_optimizer_register_fail(mock_opt: MockOptimizer) -> None:
"""
Check the input acceptance conditions for Optimizer.register().
"""
tunables = mock_opt.suggest()
mock_opt.register(tunables, Status.SUCCEEDED, 10)
mock_opt.register(tunables, Status.FAILED)
with pytest.raises(ValueError):
mock_opt.register(tunables, Status.SUCCEEDED, None)
with pytest.raises(ValueError):
mock_opt.register(tunables, Status.FAILED, 10)
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,905
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/util_git_test.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Unit tests for get_git_info utility function.
"""
import re
from mlos_bench.util import get_git_info
def test_get_git_info() -> None:
"""
Check that we can retrieve git info about the current repository correctly.
"""
(git_repo, git_commit, rel_path) = get_git_info(__file__)
assert "mlos" in git_repo.lower()
assert re.match(r"[0-9a-f]{40}", git_commit) is not None
assert rel_path == "mlos_bench/mlos_bench/tests/util_git_test.py"
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,906
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_core/mlos_core/optimizers/flaml_optimizer.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Contains the FlamlOptimizer class.
"""
from typing import Dict, NamedTuple, Optional, Union
from warnings import warn
import ConfigSpace
import numpy as np
import pandas as pd
from mlos_core.optimizers.optimizer import BaseOptimizer
from mlos_core.spaces.adapters.adapter import BaseSpaceAdapter
class EvaluatedSample(NamedTuple):
"""A named tuple representing a sample that has been evaluated."""
config: dict
score: float
class FlamlOptimizer(BaseOptimizer):
"""Wrapper class for FLAML Optimizer: A fast library for AutoML and tuning.
Parameters
----------
parameter_space : ConfigSpace.ConfigurationSpace
The parameter space to optimize.
space_adapter : BaseSpaceAdapter
The space adapter class to employ for parameter space transformations.
low_cost_partial_config : dict
A dictionary from a subset of controlled dimensions to the initial low-cost values.
More info: https://microsoft.github.io/FLAML/docs/FAQ#about-low_cost_partial_config-in-tune
seed : Optional[int]
If provided, calls np.random.seed() with the provided value to set the seed globally at init.
"""
def __init__(self, *,
parameter_space: ConfigSpace.ConfigurationSpace,
space_adapter: Optional[BaseSpaceAdapter] = None,
low_cost_partial_config: Optional[dict] = None,
seed: Optional[int] = None):
super().__init__(
parameter_space=parameter_space,
space_adapter=space_adapter,
)
# Per upstream documentation, it is recommended to set the seed for
# flaml at the start of its operation globally.
if seed is not None:
np.random.seed(seed)
# pylint: disable=import-outside-toplevel
from mlos_core.spaces.converters.flaml import configspace_to_flaml_space, FlamlDomain
self.flaml_parameter_space: Dict[str, FlamlDomain] = configspace_to_flaml_space(self.optimizer_parameter_space)
self.low_cost_partial_config = low_cost_partial_config
self.evaluated_samples: Dict[ConfigSpace.Configuration, EvaluatedSample] = {}
self._suggested_config: Optional[dict]
def _register(self, configurations: pd.DataFrame, scores: pd.Series,
context: Optional[pd.DataFrame] = None) -> None:
"""Registers the given configurations and scores.
Parameters
----------
configurations : pd.DataFrame
Dataframe of configurations / parameters. The columns are parameter names and the rows are the configurations.
scores : pd.Series
Scores from running the configurations. The index is the same as the index of the configurations.
context : None
Not Yet Implemented.
"""
if context is not None:
raise NotImplementedError()
for (_, config), score in zip(configurations.iterrows(), scores):
cs_config: ConfigSpace.Configuration = ConfigSpace.Configuration(
self.optimizer_parameter_space, values=config.to_dict())
if cs_config in self.evaluated_samples:
warn(f"Configuration {config} was already registered", UserWarning)
self.evaluated_samples[cs_config] = EvaluatedSample(config=config.to_dict(), score=score)
def _suggest(self, context: Optional[pd.DataFrame] = None) -> pd.DataFrame:
"""Suggests a new configuration.
Sampled at random using ConfigSpace.
Parameters
----------
context : None
Not Yet Implemented.
Returns
-------
configuration : pd.DataFrame
Pandas dataframe with a single row. Column names are the parameter names.
"""
if context is not None:
raise NotImplementedError()
config: dict = self._get_next_config()
return pd.DataFrame(config, index=[0])
def register_pending(self, configurations: pd.DataFrame,
context: Optional[pd.DataFrame] = None) -> None:
raise NotImplementedError()
def _target_function(self, config: dict) -> Union[dict, None]:
"""Configuration evaluation function called by FLAML optimizer.
FLAML may suggest the same configuration multiple times (due to its warm-start mechanism).
Once FLAML suggests an unseen configuration, we store it, and stop the optimization process.
Parameters
----------
config: dict
Next configuration to be evaluated, as suggested by FLAML.
This config is stored internally and is returned to user, via `.suggest()` method.
Returns
-------
result: Union[dict, None]
Dictionary with a single key, `score`, if config already evaluated; `None` otherwise.
"""
cs_config: ConfigSpace.Configuration = ConfigSpace.Configuration(self.optimizer_parameter_space, values=config)
if cs_config in self.evaluated_samples:
return {'score': self.evaluated_samples[cs_config].score}
self._suggested_config = config
return None # Returning None stops the process
def _get_next_config(self) -> dict:
"""Warm-starts a new instance of FLAML, and returns a recommended, unseen new configuration.
Since FLAML does not provide an ask-and-tell interface, we need to create a new instance of FLAML
each time we get asked for a new suggestion. This is suboptimal performance-wise, but works.
To do so, we use any previously evaluated configurations to bootstrap FLAML (i.e., warm-start).
For more info: https://microsoft.github.io/FLAML/docs/Use-Cases/Tune-User-Defined-Function#warm-start
Returns
-------
result: dict
Dictionary with a single key, `score`, if config already evaluated; `None` otherwise.
Raises
------
RuntimeError: if FLAML did not suggest a previously unseen configuration.
"""
from flaml import tune # pylint: disable=import-outside-toplevel
# Parse evaluated configs to format used by FLAML
points_to_evaluate: list = []
evaluated_rewards: list = []
if len(self.evaluated_samples) > 0:
evaluated_samples_list: list = [(s.config, s.score) for s in self.evaluated_samples.values()]
points_to_evaluate, evaluated_rewards = list(zip(*evaluated_samples_list))
# Warm start FLAML optimizer
self._suggested_config = None
tune.run(
self._target_function,
config=self.flaml_parameter_space,
mode='min',
metric='score',
points_to_evaluate=list(points_to_evaluate),
evaluated_rewards=list(evaluated_rewards),
num_samples=len(points_to_evaluate) + 1,
low_cost_partial_config=self.low_cost_partial_config,
verbose=0,
)
if self._suggested_config is None:
raise RuntimeError('FLAML did not produce a suggestion')
return self._suggested_config # type: ignore[unreachable]
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,907
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/services/local/local_exec.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Helper functions to run scripts and commands locally on the scheduler side.
"""
import errno
import logging
import os
import shlex
import subprocess
import sys
from typing import Any, Dict, Iterable, List, Mapping, Optional, Tuple, TYPE_CHECKING
from mlos_bench.services.base_service import Service
from mlos_bench.services.local.temp_dir_context import TempDirContextService
from mlos_bench.services.types.local_exec_type import SupportsLocalExec
if TYPE_CHECKING:
from mlos_bench.tunables.tunable import TunableValue
_LOG = logging.getLogger(__name__)
def split_cmdline(cmdline: str) -> Iterable[List[str]]:
"""
A single command line may contain multiple commands separated by
special characters (e.g., &&, ||, etc.) so further split the
commandline into an array of subcommand arrays.
Parameters
----------
cmdline: str
The commandline to split.
Yields
------
Iterable[List[str]]
A list of subcommands or separators, each one a list of tokens.
Can be rejoined as a flattened array.
"""
cmdline_tokens = shlex.shlex(cmdline, posix=True, punctuation_chars=True)
cmdline_tokens.whitespace_split = True
subcmd = []
for token in cmdline_tokens:
if token[0] not in cmdline_tokens.punctuation_chars:
subcmd.append(token)
else:
# Separator encountered. Yield any non-empty previous subcmd we accumulated.
if subcmd:
yield subcmd
# Also return the separators.
yield [token]
subcmd = []
# Return the trailing subcommand.
if subcmd:
yield subcmd
class LocalExecService(TempDirContextService, SupportsLocalExec):
"""
Collection of methods to run scripts and commands in an external process
on the node acting as the scheduler. Can be useful for data processing
due to reduced dependency management complications vs the target environment.
"""
def __init__(self,
config: Optional[Dict[str, Any]] = None,
global_config: Optional[Dict[str, Any]] = None,
parent: Optional[Service] = None):
"""
Create a new instance of a service to run scripts locally.
Parameters
----------
config : dict
Free-format dictionary that contains parameters for the service.
(E.g., root path for config files, etc.)
global_config : dict
Free-format dictionary of global parameters.
parent : Service
An optional parent service that can provide mixin functions.
"""
super().__init__(config, global_config, parent)
self.register([self.local_exec])
def local_exec(self, script_lines: Iterable[str],
env: Optional[Mapping[str, "TunableValue"]] = None,
cwd: Optional[str] = None,
return_on_error: bool = False) -> Tuple[int, str, str]:
"""
Execute the script lines from `script_lines` in a local process.
Parameters
----------
script_lines : Iterable[str]
Lines of the script to run locally.
Treat every line as a separate command to run.
env : Mapping[str, Union[int, float, str]]
Environment variables (optional).
cwd : str
Work directory to run the script at.
If omitted, use `temp_dir` or create a temporary dir.
return_on_error : bool
If True, stop running script lines on first non-zero return code.
The default is False.
Returns
-------
(return_code, stdout, stderr) : (int, str, str)
A 3-tuple of return code, stdout, and stderr of the script process.
"""
(return_code, stdout_list, stderr_list) = (0, [], [])
with self.temp_dir_context(cwd) as temp_dir:
_LOG.debug("Run in directory: %s", temp_dir)
for line in script_lines:
(return_code, stdout, stderr) = self._local_exec_script(line, env, temp_dir)
stdout_list.append(stdout)
stderr_list.append(stderr)
if return_code != 0 and return_on_error:
break
stdout = "".join(stdout_list)
stderr = "".join(stderr_list)
_LOG.debug("Run: stdout:\n%s", stdout)
_LOG.debug("Run: stderr:\n%s", stderr)
return (return_code, stdout, stderr)
def _resolve_cmdline_script_path(self, subcmd_tokens: List[str]) -> List[str]:
"""
Resolves local script path (first token) in the (sub)command line
tokens to its full path.
Parameters
----------
subcmd_tokens : List[str]
The previously split tokens of the subcmd.
Returns
-------
List[str]
A modified sub command line with the script paths resolved.
"""
script_path = self.config_loader_service.resolve_path(subcmd_tokens[0])
# Special case check for lone `.` which means both `source` and
# "current directory" (which isn't executable) in posix shells.
if os.path.exists(script_path) and os.path.isfile(script_path):
# If the script exists, use it.
subcmd_tokens[0] = os.path.abspath(script_path)
# Also check if it is a python script and prepend the currently
# executing python executable path to avoid requiring
# executable mode bits or a shebang.
if script_path.strip().lower().endswith(".py"):
subcmd_tokens.insert(0, sys.executable)
return subcmd_tokens
def _local_exec_script(self, script_line: str,
env_params: Optional[Mapping[str, "TunableValue"]],
cwd: str) -> Tuple[int, str, str]:
"""
Execute the script from `script_path` in a local process.
Parameters
----------
script_line : str
Line of the script to run in the local process.
env_params : Mapping[str, Union[int, float, str]]
Environment variables.
cwd : str
Work directory to run the script at.
Returns
-------
(return_code, stdout, stderr) : (int, str, str)
A 3-tuple of return code, stdout, and stderr of the script process.
"""
# Split the command line into set of subcmd tokens.
# For each subcmd, perform path resolution fixups for any scripts being executed.
subcmds = split_cmdline(script_line)
subcmds = [self._resolve_cmdline_script_path(subcmd) for subcmd in subcmds]
# Finally recombine all of the fixed up subcmd tokens into the original.
cmd = [token for subcmd in subcmds for token in subcmd]
env: Dict[str, str] = {}
if env_params:
env = {key: str(val) for (key, val) in env_params.items()}
if sys.platform == 'win32':
# A hack to run Python on Windows with env variables set:
env_copy = os.environ.copy()
env_copy["PYTHONPATH"] = ""
env_copy.update(env)
env = env_copy
try:
if sys.platform != 'win32':
cmd = [" ".join(cmd)]
_LOG.info("Run: %s", cmd)
proc = subprocess.run(cmd, env=env or None, cwd=cwd, shell=True,
text=True, check=False, capture_output=True)
_LOG.debug("Run: return code = %d", proc.returncode)
return (proc.returncode, proc.stdout, proc.stderr)
except FileNotFoundError as ex:
_LOG.warning("File not found: %s", cmd, exc_info=ex)
return (errno.ENOENT, "", "File not found")
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,908
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/launcher_test.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Unit tests to check the main CLI launcher.
"""
import os
import re
from typing import List
import pytest
from mlos_bench.services.local.local_exec import LocalExecService
from mlos_bench.services.config_persistence import ConfigPersistenceService
from mlos_bench.util import path_join
# pylint: disable=redefined-outer-name
@pytest.fixture
def root_path() -> str:
"""
Root path of mlos_bench project.
"""
return path_join(os.path.dirname(__file__), "../../..", abs_path=True)
@pytest.fixture
def local_exec_service() -> LocalExecService:
"""
Test fixture for LocalExecService.
"""
return LocalExecService(parent=ConfigPersistenceService({
"config_path": [
"mlos_bench/config",
"mlos_bench/examples",
]
}))
def _launch_main_app(root_path: str, local_exec_service: LocalExecService,
cli_config: str, re_expected: List[str]) -> None:
"""
Run mlos_bench command-line application with given config
and check the results in the log.
"""
with local_exec_service.temp_dir_context() as temp_dir:
# Test developers note: for local debugging,
# uncomment the following line to use a known file path that can be examined:
# temp_dir = '/tmp'
log_path = path_join(temp_dir, "mock-test.log")
(return_code, _stdout, _stderr) = local_exec_service.local_exec(
[f"./mlos_bench/mlos_bench/run.py {cli_config} --log_file '{log_path}'"],
cwd=root_path)
assert return_code == 0
try:
iter_expected = iter(re_expected)
re_log = re.compile(next(iter_expected))
with open(log_path, "rt", encoding="utf-8") as fh_out:
for line in fh_out:
if re_log.match(line):
re_log = re.compile(next(iter_expected))
assert False, f"Pattern not found: '{re_log.pattern}'"
except StopIteration:
pass # Success: all patterns found
_RE_DATE = r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3}"
def test_launch_main_app_bench(root_path: str, local_exec_service: LocalExecService) -> None:
"""
Run mlos_bench command-line application with mock benchmark config
and check the results in the log.
"""
_launch_main_app(
root_path, local_exec_service,
"--config mlos_bench/mlos_bench/tests/config/cli/mock-bench.jsonc",
[
f"^{_RE_DATE} run\\.py:\\d+ " +
r"_optimize INFO Env: Mock environment best score: 65\.67\d+\s*$",
]
)
def test_launch_main_app_opt(root_path: str, local_exec_service: LocalExecService) -> None:
"""
Run mlos_bench command-line application with mock optimization config
and check the results in the log.
"""
_launch_main_app(
root_path, local_exec_service,
"--config mlos_bench/mlos_bench/tests/config/cli/mock-opt.jsonc --max_iterations 3",
[
# Iteration 1: Expect first value to be the baseline
f"^{_RE_DATE} mlos_core_optimizer\\.py:\\d+ " +
r"register DEBUG Score: 65\.67\d+ Dataframe:\s*$",
# Iteration 2: The result may not always be deterministic
f"^{_RE_DATE} mlos_core_optimizer\\.py:\\d+ " +
r"register DEBUG Score: \d+\.\d+ Dataframe:\s*$",
# Iteration 3: non-deterministic (depends on the optimizer)
f"^{_RE_DATE} mlos_core_optimizer\\.py:\\d+ " +
r"register DEBUG Score: \d+\.\d+ Dataframe:\s*$",
# Final result: baseline is the optimum for the mock environment
f"^{_RE_DATE} run\\.py:\\d+ " +
r"_optimize INFO Env: Mock environment best score: 65\.67\d+\s*$",
]
)
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,909
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_core/mlos_core/optimizers/optimizer.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Contains the BaseOptimizer abstract class.
"""
import collections
from abc import ABCMeta, abstractmethod
from typing import List, Optional, Tuple
import ConfigSpace
import numpy as np
import numpy.typing as npt
import pandas as pd
from mlos_core import config_to_dataframe
from mlos_core.spaces.adapters.adapter import BaseSpaceAdapter
class BaseOptimizer(metaclass=ABCMeta):
"""
Optimizer abstract base class defining the basic interface.
"""
def __init__(self, *,
parameter_space: ConfigSpace.ConfigurationSpace,
space_adapter: Optional[BaseSpaceAdapter] = None):
"""
Create a new instance of the base optimizer.
Parameters
----------
parameter_space : ConfigSpace.ConfigurationSpace
The parameter space to optimize.
space_adapter : BaseSpaceAdapter
The space adapter class to employ for parameter space transformations.
"""
self.parameter_space: ConfigSpace.ConfigurationSpace = parameter_space
self.optimizer_parameter_space: ConfigSpace.ConfigurationSpace = \
parameter_space if space_adapter is None else space_adapter.target_parameter_space
if space_adapter is not None and space_adapter.orig_parameter_space != parameter_space:
raise ValueError("Given parameter space differs from the one given to space adapter")
self._space_adapter: Optional[BaseSpaceAdapter] = space_adapter
self._observations: List[Tuple[pd.DataFrame, pd.Series, Optional[pd.DataFrame]]] = []
self._has_context: Optional[bool] = None
self._pending_observations: List[Tuple[pd.DataFrame, Optional[pd.DataFrame]]] = []
def __repr__(self) -> str:
return f"{self.__class__.__name__}(space_adapter={self.space_adapter})"
@property
def space_adapter(self) -> Optional[BaseSpaceAdapter]:
"""Get the space adapter instance (if any)."""
return self._space_adapter
def register(self, configurations: pd.DataFrame, scores: pd.Series,
context: Optional[pd.DataFrame] = None) -> None:
"""Wrapper method, which employs the space adapter (if any), before registering the configurations and scores.
Parameters
----------
configurations : pd.DataFrame
Dataframe of configurations / parameters. The columns are parameter names and the rows are the configurations.
scores : pd.Series
Scores from running the configurations. The index is the same as the index of the configurations.
context : pd.DataFrame
Not Yet Implemented.
"""
# Do some input validation.
assert self._has_context is None or self._has_context ^ (context is None), \
"Context must always be added or never be added."
assert len(configurations) == len(scores), \
"Mismatched number of configurations and scores."
if context is not None:
assert len(configurations) == len(context), \
"Mismatched number of configurations and context."
assert configurations.shape[1] == len(self.parameter_space.values()), \
"Mismatched configuration shape."
self._observations.append((configurations, scores, context))
self._has_context = context is not None
if self._space_adapter:
configurations = self._space_adapter.inverse_transform(configurations)
assert configurations.shape[1] == len(self.optimizer_parameter_space.values()), \
"Mismatched configuration shape after inverse transform."
return self._register(configurations, scores, context)
@abstractmethod
def _register(self, configurations: pd.DataFrame, scores: pd.Series,
context: Optional[pd.DataFrame] = None) -> None:
"""Registers the given configurations and scores.
Parameters
----------
configurations : pd.DataFrame
Dataframe of configurations / parameters. The columns are parameter names and the rows are the configurations.
scores : pd.Series
Scores from running the configurations. The index is the same as the index of the configurations.
context : pd.DataFrame
Not Yet Implemented.
"""
pass # pylint: disable=unnecessary-pass # pragma: no cover
def suggest(self, context: Optional[pd.DataFrame] = None, defaults: bool = False) -> pd.DataFrame:
"""
Wrapper method, which employs the space adapter (if any), after suggesting a new configuration.
Parameters
----------
context : pd.DataFrame
Not Yet Implemented.
defaults : bool
Whether or not to return the default config instead of an optimizer guided one.
By default, use the one from the optimizer.
Returns
-------
configuration : pd.DataFrame
Pandas dataframe with a single row. Column names are the parameter names.
"""
if defaults:
configuration = config_to_dataframe(self.parameter_space.get_default_configuration())
if self.space_adapter is not None:
configuration = self.space_adapter.inverse_transform(configuration)
else:
configuration = self._suggest(context)
assert len(configuration) == 1, \
"Suggest must return a single configuration."
assert len(configuration.columns) == len(self.optimizer_parameter_space.values()), \
"Suggest returned a configuration with the wrong number of parameters."
if self._space_adapter:
configuration = self._space_adapter.transform(configuration)
assert len(configuration.columns) == len(self.parameter_space.values()), \
"Space adapter transformed configuration with the wrong number of parameters."
return configuration
@abstractmethod
def _suggest(self, context: Optional[pd.DataFrame] = None) -> pd.DataFrame:
"""Suggests a new configuration.
Parameters
----------
context : pd.DataFrame
Not Yet Implemented.
Returns
-------
configuration : pd.DataFrame
Pandas dataframe with a single row. Column names are the parameter names.
"""
pass # pylint: disable=unnecessary-pass # pragma: no cover
@abstractmethod
def register_pending(self, configurations: pd.DataFrame,
context: Optional[pd.DataFrame] = None) -> None:
"""Registers the given configurations as "pending".
That is it say, it has been suggested by the optimizer, and an experiment trial has been started.
This can be useful for executing multiple trials in parallel, retry logic, etc.
Parameters
----------
configurations : pd.DataFrame
Dataframe of configurations / parameters. The columns are parameter names and the rows are the configurations.
context : pd.DataFrame
Not Yet Implemented.
"""
pass # pylint: disable=unnecessary-pass # pragma: no cover
def get_observations(self) -> pd.DataFrame:
"""Returns the observations as a dataframe.
Returns
-------
observations : pd.DataFrame
Dataframe of observations. The columns are parameter names and "score" for the score, each row is an observation.
"""
if len(self._observations) == 0:
raise ValueError("No observations registered yet.")
configs = pd.concat([config for config, _, _ in self._observations])
scores = pd.concat([score for _, score, _ in self._observations])
try:
contexts = pd.concat([context for _, _, context in self._observations if context is not None])
except ValueError:
contexts = None
configs["score"] = scores
if contexts is not None:
# configs = pd.concat([configs, contexts], axis=1)
# Not reachable for now
raise NotImplementedError() # pragma: no cover
return configs
def get_best_observation(self) -> pd.DataFrame:
"""Returns the best observation so far as a dataframe.
Returns
-------
best_observation : pd.DataFrame
Dataframe with a single row containing the best observation. The columns are parameter names and "score" for the score.
"""
if len(self._observations) == 0:
raise ValueError("No observations registered yet.")
observations = self.get_observations()
return observations.nsmallest(1, columns='score')
def cleanup(self) -> None:
"""Cleanup the optimizer."""
pass # pylint: disable=unnecessary-pass # pragma: no cover
def _from_1hot(self, config: npt.NDArray) -> pd.DataFrame:
"""
Convert numpy array from one-hot encoding to a DataFrame
with categoricals and ints in proper columns.
"""
df_dict = collections.defaultdict(list)
for i in range(config.shape[0]):
j = 0
for param in self.optimizer_parameter_space.values():
if isinstance(param, ConfigSpace.CategoricalHyperparameter):
for (offset, val) in enumerate(param.choices):
if config[i][j + offset] == 1:
df_dict[param.name].append(val)
break
j += len(param.choices)
else:
val = config[i][j]
if isinstance(param, ConfigSpace.UniformIntegerHyperparameter):
val = int(val)
df_dict[param.name].append(val)
j += 1
return pd.DataFrame(df_dict)
def _to_1hot(self, config: pd.DataFrame) -> npt.NDArray:
"""
Convert pandas DataFrame to one-hot-encoded numpy array.
"""
n_cols = 0
n_rows = config.shape[0] if config.ndim > 1 else 1
for param in self.optimizer_parameter_space.values():
if isinstance(param, ConfigSpace.CategoricalHyperparameter):
n_cols += len(param.choices)
else:
n_cols += 1
one_hot = np.zeros((n_rows, n_cols), dtype=np.float32)
for i in range(n_rows):
j = 0
for param in self.optimizer_parameter_space.values():
if config.ndim > 1:
col = config.columns.get_loc(param.name)
val = config.iloc[i, col]
else:
col = config.index.get_loc(param.name)
val = config.iloc[col]
if isinstance(param, ConfigSpace.CategoricalHyperparameter):
offset = param.choices.index(val)
one_hot[i][j + offset] = 1
j += len(param.choices)
else:
one_hot[i][j] = val
j += 1
return one_hot
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,910
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/services/local/temp_dir_context.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Helper functions to work with temp files locally on the scheduler side.
"""
import abc
import logging
from contextlib import nullcontext
from tempfile import TemporaryDirectory
from typing import Any, Dict, Optional, Union
from mlos_bench.services.base_service import Service
_LOG = logging.getLogger(__name__)
class TempDirContextService(Service, metaclass=abc.ABCMeta):
"""
A *base* service class that provides a method to create a temporary
directory context for local scripts.
It is inherited by LocalExecService and MockLocalExecService.
This class is not supposed to be used as a standalone service.
"""
def __init__(self,
config: Optional[Dict[str, Any]] = None,
global_config: Optional[Dict[str, Any]] = None,
parent: Optional[Service] = None):
"""
Create a new instance of a service that provides temporary directory context
for local exec service.
Parameters
----------
config : dict
Free-format dictionary that contains parameters for the service.
(E.g., root path for config files, etc.)
global_config : dict
Free-format dictionary of global parameters.
parent : Service
An optional parent service that can provide mixin functions.
"""
super().__init__(config, global_config, parent)
self._temp_dir = self.config.get("temp_dir")
self.register([self.temp_dir_context])
def temp_dir_context(self, path: Optional[str] = None) -> Union[TemporaryDirectory, nullcontext]:
"""
Create a temp directory or use the provided path.
Parameters
----------
path : str
A path to the temporary directory. Create a new one if None.
Returns
-------
temp_dir_context : TemporaryDirectory
Temporary directory context to use in the `with` clause.
"""
if path is None and self._temp_dir is None:
return TemporaryDirectory()
return nullcontext(path or self._temp_dir)
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,911
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/services/types/fileshare_type.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Protocol interface for file share operations.
"""
from typing import Protocol, runtime_checkable
@runtime_checkable
class SupportsFileShareOps(Protocol):
"""
Protocol interface for file share operations.
"""
def download(self, remote_path: str, local_path: str, recursive: bool = True) -> None:
"""
Downloads contents from a remote share path to a local path.
Parameters
----------
remote_path : str
Path to download from the remote file share, a file if recursive=False
or a directory if recursive=True.
local_path : str
Path to store the downloaded content to.
recursive : bool
If False, ignore the subdirectories;
if True (the default), download the entire directory tree.
"""
def upload(self, local_path: str, remote_path: str, recursive: bool = True) -> None:
"""
Uploads contents from a local path to remote share path.
Parameters
----------
local_path : str
Path to the local directory to upload contents from.
remote_path : str
Path in the remote file share to store the uploaded content to.
recursive : bool
If False, ignore the subdirectories;
if True (the default), upload the entire directory tree.
"""
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,912
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/tunables/tunables_assign_test.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Unit tests for assigning values to the individual parameters within tunable groups.
"""
import json5 as json
import pytest
from mlos_bench.tunables.tunable import Tunable
from mlos_bench.tunables.tunable_groups import TunableGroups
def test_tunables_assign_unknown_param(tunable_groups: TunableGroups) -> None:
"""
Make sure that bulk assignment fails for parameters
that don't exist in the TunableGroups object.
"""
with pytest.raises(KeyError):
tunable_groups.assign({
"vmSize": "Standard_B2ms",
"idle": "mwait",
"UnknownParam_1": 1,
"UnknownParam_2": "invalid-value"
})
def test_tunables_assign_invalid_categorical(tunable_groups: TunableGroups) -> None:
"""
Check parameter validation for categorical tunables.
"""
with pytest.raises(ValueError):
tunable_groups.assign({"vmSize": "InvalidSize"})
def test_tunables_assign_invalid_range(tunable_groups: TunableGroups) -> None:
"""
Check parameter out-of-range validation for numerical tunables.
"""
with pytest.raises(ValueError):
tunable_groups.assign({"kernel_sched_migration_cost_ns": -2})
def test_tunables_assign_coerce_str(tunable_groups: TunableGroups) -> None:
"""
Check the conversion from strings when assigning to an integer parameter.
"""
tunable_groups.assign({"kernel_sched_migration_cost_ns": "10000"})
def test_tunables_assign_coerce_str_range_check(tunable_groups: TunableGroups) -> None:
"""
Check the range when assigning to an integer tunable.
"""
with pytest.raises(ValueError):
tunable_groups.assign({"kernel_sched_migration_cost_ns": "5500000"})
def test_tunables_assign_coerce_str_invalid(tunable_groups: TunableGroups) -> None:
"""
Make sure we fail when assigning an invalid string to an integer tunable.
"""
with pytest.raises(ValueError):
tunable_groups.assign({"kernel_sched_migration_cost_ns": "1.1"})
def test_tunable_assign_str_to_numerical(tunable_int: Tunable) -> None:
"""
Check str to int coercion.
"""
with pytest.raises(ValueError):
tunable_int.numerical_value = "foo" # type: ignore[assignment]
def test_tunable_assign_int_to_numerical_value(tunable_int: Tunable) -> None:
"""
Check numerical value assignment.
"""
tunable_int.numerical_value = 10.0
assert tunable_int.numerical_value == 10
def test_tunable_assign_float_to_numerical_value(tunable_float: Tunable) -> None:
"""
Check numerical value assignment.
"""
tunable_float.numerical_value = 0.1
assert tunable_float.numerical_value == 0.1
def test_tunable_assign_str_to_int(tunable_int: Tunable) -> None:
"""
Check str to int coercion.
"""
tunable_int.value = "10"
assert tunable_int.value == 10 # type: ignore[comparison-overlap]
def test_tunable_assign_str_to_float(tunable_float: Tunable) -> None:
"""
Check str to float coercion.
"""
tunable_float.value = "0.5"
assert tunable_float.value == 0.5 # type: ignore[comparison-overlap]
def test_tunable_assign_float_to_int(tunable_int: Tunable) -> None:
"""
Check float to int coercion.
"""
tunable_int.value = 10.0
assert tunable_int.value == 10
def test_tunable_assign_float_to_int_fail(tunable_int: Tunable) -> None:
"""
Check the invalid float to int coercion.
"""
with pytest.raises(ValueError):
tunable_int.value = 10.1
def test_tunable_assign_null_to_categorical() -> None:
"""
Checks that we can use null/None in categorical tunables.
"""
json_config = """
{
"name": "categorical_test",
"type": "categorical",
"values": ["foo", null],
"default": "foo"
}
"""
config = json.loads(json_config)
categorical_tunable = Tunable(name='categorical_test', config=config)
assert categorical_tunable
assert categorical_tunable.category == "foo"
categorical_tunable.value = None
assert categorical_tunable.value is None
assert categorical_tunable.value != 'None'
assert categorical_tunable.category is None
def test_tunable_assign_null_to_int(tunable_int: Tunable) -> None:
"""
Checks that we can't use null/None in integer tunables.
"""
with pytest.raises(TypeError):
tunable_int.value = None
with pytest.raises(TypeError):
tunable_int.numerical_value = None # type: ignore[assignment]
def test_tunable_assign_null_to_float(tunable_float: Tunable) -> None:
"""
Checks that we can't use null/None in float tunables.
"""
with pytest.raises(TypeError):
tunable_float.value = None
with pytest.raises(TypeError):
tunable_float.numerical_value = None # type: ignore[assignment]
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,913
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/optimizers/base_optimizer.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Base class for an interface between the benchmarking framework
and mlos_core optimizers.
"""
import logging
from typing import Dict, Optional, Sequence, Tuple, Union
from abc import ABCMeta, abstractmethod
from distutils.util import strtobool # pylint: disable=deprecated-module
from mlos_bench.services.base_service import Service
from mlos_bench.environments.status import Status
from mlos_bench.tunables.tunable_groups import TunableGroups
_LOG = logging.getLogger(__name__)
class Optimizer(metaclass=ABCMeta): # pylint: disable=too-many-instance-attributes
"""
An abstract interface between the benchmarking framework and mlos_core optimizers.
"""
def __init__(self,
tunables: TunableGroups,
config: dict,
global_config: Optional[dict] = None,
service: Optional[Service] = None):
"""
Create a new optimizer for the given configuration space defined by the tunables.
Parameters
----------
tunables : TunableGroups
The tunables to optimize.
config : dict
Free-format key/value pairs of configuration parameters to pass to the optimizer.
global_config : Optional[dict]
service : Optional[Service]
"""
_LOG.info("Create optimizer for: %s", tunables)
_LOG.debug("Optimizer config: %s", config)
self._config = config.copy()
self._global_config = global_config or {}
self._tunables = tunables
self._service = service
experiment_id = self._global_config.get('experiment_id')
self.experiment_id = str(experiment_id).strip() if experiment_id else None
self._iter = 1
# If False, use the optimizer to suggest the initial configuration;
# if True (default), use the already initialized values for the first iteration.
self._start_with_defaults: bool = bool(
strtobool(str(self._config.pop('start_with_defaults', True))))
self._max_iter = int(self._config.pop('max_iterations', 100))
self._opt_target = str(self._config.pop('optimization_target', 'score'))
self._opt_sign = {"min": 1, "max": -1}[self._config.pop('optimization_direction', 'min')]
def __repr__(self) -> str:
opt_direction = 'min' if self._opt_sign > 0 else 'max'
return f"{self.__class__.__name__}:{opt_direction}({self._opt_target})(config={self._config})"
@property
def target(self) -> str:
"""
The name of the target metric to optimize.
"""
return self._opt_target
@property
def supports_preload(self) -> bool:
"""
Return True if the optimizer supports pre-loading the data from previous experiments.
"""
return True
@abstractmethod
def bulk_register(self, configs: Sequence[dict], scores: Sequence[Optional[float]],
status: Optional[Sequence[Status]] = None) -> bool:
"""
Pre-load the optimizer with the bulk data from previous experiments.
Parameters
----------
configs : Sequence[dict]
Records of tunable values from other experiments.
scores : Sequence[float]
Benchmark results from experiments that correspond to `configs`.
status : Optional[Sequence[float]]
Status of the experiments that correspond to `configs`.
Returns
-------
is_not_empty : bool
True if there is data to register, false otherwise.
"""
_LOG.info("Warm-up the optimizer with: %d configs, %d scores, %d status values",
len(configs or []), len(scores or []), len(status or []))
if len(configs or []) != len(scores or []):
raise ValueError("Numbers of configs and scores do not match.")
if status is not None and len(configs or []) != len(status or []):
raise ValueError("Numbers of configs and status values do not match.")
has_data = bool(configs and scores)
if has_data and self._start_with_defaults:
_LOG.info("Prior data exists - do *NOT* use the default initialization.")
self._start_with_defaults = False
return has_data
@abstractmethod
def suggest(self) -> TunableGroups:
"""
Generate the next suggestion.
Returns
-------
tunables : TunableGroups
The next configuration to benchmark.
These are the same tunables we pass to the constructor,
but with the values set to the next suggestion.
"""
@abstractmethod
def register(self, tunables: TunableGroups, status: Status,
score: Optional[Union[float, Dict[str, float]]] = None) -> Optional[float]:
"""
Register the observation for the given configuration.
Parameters
----------
tunables : TunableGroups
The configuration that has been benchmarked.
Usually it's the same config that the `.suggest()` method returned.
status : Status
Final status of the experiment (e.g., SUCCEEDED or FAILED).
score : Union[float, Dict[str, float]]
A scalar or a dict with the final benchmark results.
None if the experiment was not successful.
Returns
-------
value : float
The scalar benchmark score extracted (and possibly transformed) from the dataframe that's being minimized.
"""
_LOG.info("Iteration %d :: Register: %s = %s score: %s",
self._iter, tunables, status, score)
if status.is_succeeded() == (score is None): # XOR
raise ValueError("Status and score must be consistent.")
return self._get_score(status, score)
def _get_score(self, status: Status, score: Optional[Union[float, Dict[str, float]]]) -> Optional[float]:
"""
Extract a scalar benchmark score from the dataframe.
Change the sign if we are maximizing.
Parameters
----------
status : Status
Final status of the experiment (e.g., SUCCEEDED or FAILED).
score : Union[float, Dict[str, float]]
A scalar or a dict with the final benchmark results.
None if the experiment was not successful.
Returns
-------
score : float
A scalar benchmark score to be used as a primary target for MINIMIZATION.
"""
if not status.is_completed():
return None
if status.is_succeeded():
assert score is not None
if isinstance(score, dict):
score = score[self._opt_target]
return float(score) * self._opt_sign
assert score is None
return float("inf")
def not_converged(self) -> bool:
"""
Return True if not converged, False otherwise.
Base implementation just checks the iteration count.
"""
return self._iter <= self._max_iter
@abstractmethod
def get_best_observation(self) -> Union[Tuple[float, TunableGroups], Tuple[None, None]]:
"""
Get the best observation so far.
Returns
-------
(value, tunables) : Tuple[float, TunableGroups]
The best value and the corresponding configuration.
(None, None) if no successful observation has been registered yet.
"""
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,914
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/config/environments/test_load_environment_config_examples.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Tests for loading environment config examples.
"""
import logging
from typing import List
import pytest
from mlos_bench.tests.config import locate_config_examples
from mlos_bench.config.schemas.config_schemas import ConfigSchema
from mlos_bench.environments.base_environment import Environment
from mlos_bench.environments.composite_env import CompositeEnv
from mlos_bench.services.config_persistence import ConfigPersistenceService
from mlos_bench.tunables.tunable_groups import TunableGroups
from mlos_bench.util import path_join
_LOG = logging.getLogger(__name__)
_LOG.setLevel(logging.DEBUG)
# Get the set of configs to test.
CONFIG_TYPE = "environments"
def filter_configs(configs_to_filter: List[str]) -> List[str]:
"""If necessary, filter out json files that aren't for the module we're testing."""
configs_to_filter = [config_path for config_path in configs_to_filter if not config_path.endswith("-tunables.jsonc")]
return configs_to_filter
configs = filter_configs(locate_config_examples(path_join(ConfigPersistenceService.BUILTIN_CONFIG_PATH, CONFIG_TYPE)))
assert configs
@pytest.mark.parametrize("config_path", configs)
def test_load_environment_config_examples(config_loader_service: ConfigPersistenceService, config_path: str) -> None:
"""Tests loading an environment config example."""
envs = load_environment_config_examples(config_loader_service, config_path)
for env in envs:
assert env is not None
assert isinstance(env, Environment)
def load_environment_config_examples(config_loader_service: ConfigPersistenceService, config_path: str) -> List[Environment]:
"""Loads an environment config example."""
# Make sure that any "required_args" are provided.
global_config = {
"experiment_id": "test",
"trial_id": 1,
"mountPoint": "/mnt/tmp",
# FIXME: The setup ubuntu configs currently use these values in their mounting scripts.
# We should abstract that out so those details are only needed when a service that uses those is used.
"storageAccountName": "foo",
"storageAccountKey": "bar",
"storageFileShareName": "baz",
# Assign some values to variadic tunables and required parameters present in the config examples.
"vmName": "vmTestName",
"tunable_params_map": {
"linux-runtime": ["linux-scheduler", "linux-swap"],
"linux-boot": ["linux-kernel-boot"],
"provision": ["azure-vm"],
"redis": ["redis"],
}
}
# Make sure we have the required services for the envs being used.
mock_service_configs = [
"services/local/mock/mock_local_exec_service.jsonc",
"services/remote/mock/mock_fileshare_service.jsonc",
"services/remote/mock/mock_vm_service.jsonc",
"services/remote/mock/mock_remote_exec_service.jsonc",
]
tunable_groups = TunableGroups() # base tunable groups that all others get built on
for mock_service_config_path in mock_service_configs:
mock_service_config = config_loader_service.load_config(mock_service_config_path, ConfigSchema.SERVICE)
config_loader_service.register(config_loader_service.build_service(
config=mock_service_config, parent=config_loader_service).export())
envs = config_loader_service.load_environment_list(
config_path, tunable_groups, global_config, service=config_loader_service)
return envs
composite_configs = filter_configs(locate_config_examples(path_join(
ConfigPersistenceService.BUILTIN_CONFIG_PATH, "environments/root/")))
assert composite_configs
@pytest.mark.parametrize("config_path", composite_configs)
def test_load_composite_env_config_examples(config_loader_service: ConfigPersistenceService, config_path: str) -> None:
"""Tests loading a composite env config example."""
envs = load_environment_config_examples(config_loader_service, config_path)
assert len(envs) == 1
assert isinstance(envs[0], CompositeEnv)
composite_env: CompositeEnv = envs[0]
for child_env in composite_env.children:
assert child_env is not None
assert isinstance(child_env, Environment)
assert child_env.tunable_params is not None
checked_child_env_groups = set()
for (child_tunable, child_group) in child_env.tunable_params:
# Lookup that tunable in the composite env.
assert child_tunable in composite_env.tunable_params
(composite_tunable, composite_group) = composite_env.tunable_params.get_tunable(child_tunable)
assert child_tunable is composite_tunable # Check that the tunables are the same object.
if child_group.name not in checked_child_env_groups:
assert child_group is composite_group
checked_child_env_groups.add(child_group.name)
# Check that when we change a child env, it's value is reflected in the composite env as well.
# That is to say, they refer to the same objects, despite having potentially been loaded from separate configs.
if child_tunable.is_categorical:
old_cat_value = child_tunable.category
assert child_tunable.value == old_cat_value
assert child_group[child_tunable] == old_cat_value
assert composite_env.tunable_params[child_tunable] == old_cat_value
new_cat_value = [x for x in child_tunable.categories if x != old_cat_value][0]
child_tunable.category = new_cat_value
assert child_env.tunable_params[child_tunable] == new_cat_value
assert composite_env.tunable_params[child_tunable] == child_tunable.category
elif child_tunable.is_numerical:
old_num_value = child_tunable.numerical_value
assert child_tunable.value == old_num_value
assert child_group[child_tunable] == old_num_value
assert composite_env.tunable_params[child_tunable] == old_num_value
child_tunable.numerical_value += 1
assert child_env.tunable_params[child_tunable] == old_num_value + 1
assert composite_env.tunable_params[child_tunable] == child_tunable.numerical_value
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,915
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/storage/exp_load_test.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Unit tests for the storage subsystem.
"""
from datetime import datetime
import pytest
from mlos_bench.environments.status import Status
from mlos_bench.tunables.tunable_groups import TunableGroups
from mlos_bench.storage.base_storage import Storage
def test_exp_load_empty(exp_storage_memory_sql: Storage.Experiment) -> None:
"""
Try to retrieve old experimental data from the empty storage.
"""
(configs, scores, status) = exp_storage_memory_sql.load()
assert not configs
assert not scores
assert not status
def test_exp_pending_empty(exp_storage_memory_sql: Storage.Experiment) -> None:
"""
Try to retrieve pending experiments from the empty storage.
"""
trials = list(exp_storage_memory_sql.pending_trials())
assert not trials
def test_exp_trial_pending(exp_storage_memory_sql: Storage.Experiment,
tunable_groups: TunableGroups) -> None:
"""
Start a trial and check that it is pending.
"""
trial = exp_storage_memory_sql.new_trial(tunable_groups)
(pending,) = list(exp_storage_memory_sql.pending_trials())
assert pending.trial_id == trial.trial_id
assert pending.tunables == tunable_groups
def test_exp_trial_pending_many(exp_storage_memory_sql: Storage.Experiment,
tunable_groups: TunableGroups) -> None:
"""
Start THREE trials and check that both are pending.
"""
config1 = tunable_groups.copy().assign({'idle': 'mwait'})
config2 = tunable_groups.copy().assign({'idle': 'noidle'})
trial_ids = {
exp_storage_memory_sql.new_trial(config1).trial_id,
exp_storage_memory_sql.new_trial(config2).trial_id,
exp_storage_memory_sql.new_trial(config2).trial_id, # Submit same config twice
}
pending_ids = {pending.trial_id for pending in exp_storage_memory_sql.pending_trials()}
assert len(pending_ids) == 3
assert trial_ids == pending_ids
def test_exp_trial_pending_fail(exp_storage_memory_sql: Storage.Experiment,
tunable_groups: TunableGroups) -> None:
"""
Start a trial, fail it, and and check that it is NOT pending.
"""
trial = exp_storage_memory_sql.new_trial(tunable_groups)
trial.update(Status.FAILED, datetime.utcnow())
trials = list(exp_storage_memory_sql.pending_trials())
assert not trials
def test_exp_trial_success(exp_storage_memory_sql: Storage.Experiment,
tunable_groups: TunableGroups) -> None:
"""
Start a trial, finish it successfully, and and check that it is NOT pending.
"""
trial = exp_storage_memory_sql.new_trial(tunable_groups)
trial.update(Status.SUCCEEDED, datetime.utcnow(), 99.9)
trials = list(exp_storage_memory_sql.pending_trials())
assert not trials
def test_exp_trial_update_twice(exp_storage_memory_sql: Storage.Experiment,
tunable_groups: TunableGroups) -> None:
"""
Update the trial status twice and receive an error.
"""
trial = exp_storage_memory_sql.new_trial(tunable_groups)
trial.update(Status.FAILED, datetime.utcnow())
with pytest.raises(RuntimeError):
trial.update(Status.SUCCEEDED, datetime.utcnow(), 99.9)
def test_exp_trial_pending_3(exp_storage_memory_sql: Storage.Experiment,
tunable_groups: TunableGroups) -> None:
"""
Start THREE trials, let one succeed, another one fail and keep one not updated.
Check that one is still pending another one can be loaded into the optimizer.
"""
score = 99.9
trial_fail = exp_storage_memory_sql.new_trial(tunable_groups)
trial_succ = exp_storage_memory_sql.new_trial(tunable_groups)
trial_pend = exp_storage_memory_sql.new_trial(tunable_groups)
trial_fail.update(Status.FAILED, datetime.utcnow())
trial_succ.update(Status.SUCCEEDED, datetime.utcnow(), score)
(pending,) = list(exp_storage_memory_sql.pending_trials())
assert pending.trial_id == trial_pend.trial_id
(configs, scores, status) = exp_storage_memory_sql.load()
assert len(configs) == 2
assert scores == [None, score]
assert status == [Status.FAILED, Status.SUCCEEDED]
assert tunable_groups.copy().assign(configs[0]).reset() == trial_fail.tunables
assert tunable_groups.copy().assign(configs[1]).reset() == trial_succ.tunables
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,916
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/conftest.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Common fixtures for mock TunableGroups and Environment objects.
"""
from typing import Any, Dict
import json5 as json
import pytest
from mlos_bench.tests import SEED
from mlos_bench.environments.mock_env import MockEnv
from mlos_bench.tunables.covariant_group import CovariantTunableGroup
from mlos_bench.tunables.tunable_groups import TunableGroups
# pylint: disable=redefined-outer-name
# -- Ignore pylint complaints about pytest references to
# `tunable_groups` fixture as both a function and a parameter.
TUNABLE_GROUPS_JSON = """
{
"provision": {
"cost": 1000,
"params": {
"vmSize": {
"description": "Azure VM size",
"type": "categorical",
"default": "Standard_B4ms",
"values": ["Standard_B2s", "Standard_B2ms", "Standard_B4ms"]
}
}
},
"boot": {
"cost": 300,
"params": {
"idle": {
"description": "Idling method",
"type": "categorical",
"default": "halt",
"values": ["halt", "mwait", "noidle"]
}
}
},
"kernel": {
"cost": 1,
"params": {
"kernel_sched_migration_cost_ns": {
"description": "Cost of migrating the thread to another core",
"type": "int",
"default": -1,
"range": [-1, 500000],
"special": [-1]
},
"kernel_sched_latency_ns": {
"description": "Initial value for the scheduler period",
"type": "int",
"default": 2000000,
"range": [0, 1000000000]
}
}
}
}
"""
@pytest.fixture
def tunable_groups_config() -> Dict[str, Any]:
"""
Fixture to get the JSON string for the tunable groups.
"""
conf = json.loads(TUNABLE_GROUPS_JSON)
assert isinstance(conf, dict)
return conf
@pytest.fixture
def tunable_groups(tunable_groups_config: dict) -> TunableGroups:
"""
A test fixture that produces a mock TunableGroups.
Returns
-------
tunable_groups : TunableGroups
A new TunableGroups object for testing.
"""
tunables = TunableGroups(tunable_groups_config)
tunables.reset()
return tunables
@pytest.fixture
def covariant_group(tunable_groups: TunableGroups) -> CovariantTunableGroup:
"""
Text fixture to get a CovariantTunableGroup from tunable_groups.
Returns
-------
CovariantTunableGroup
"""
(_, covariant_group) = next(iter(tunable_groups))
return covariant_group
@pytest.fixture
def mock_env(tunable_groups: TunableGroups) -> MockEnv:
"""
Test fixture for MockEnv.
"""
return MockEnv(
name="Test Env",
config={
"tunable_params": ["provision", "boot", "kernel"],
"seed": SEED,
"range": [60, 120],
"metrics": ["score"],
},
tunables=tunable_groups
)
@pytest.fixture
def mock_env_no_noise(tunable_groups: TunableGroups) -> MockEnv:
"""
Test fixture for MockEnv.
"""
return MockEnv(
name="Test Env No Noise",
config={
"tunable_params": ["provision", "boot", "kernel"],
"range": [60, 120],
"metrics": ["score", "other_score"],
},
tunables=tunable_groups
)
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,917
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/environments/composite_env.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Composite benchmark environment.
"""
import logging
from datetime import datetime
from types import TracebackType
from typing import Any, Dict, List, Optional, Tuple, Type
from typing_extensions import Literal
from mlos_bench.services.base_service import Service
from mlos_bench.environments.status import Status
from mlos_bench.environments.base_environment import Environment
from mlos_bench.tunables.tunable_groups import TunableGroups
_LOG = logging.getLogger(__name__)
class CompositeEnv(Environment):
"""
Composite benchmark environment.
"""
def __init__(self,
*,
name: str,
config: dict,
global_config: Optional[dict] = None,
tunables: Optional[TunableGroups] = None,
service: Optional[Service] = None):
"""
Create a new environment with a given config.
Parameters
----------
name: str
Human-readable name of the environment.
config : dict
Free-format dictionary that contains the environment
configuration. Must have a "children" section.
global_config : dict
Free-format dictionary of global parameters (e.g., security credentials)
to be mixed in into the "const_args" section of the local config.
tunables : TunableGroups
A collection of groups of tunable parameters for *all* environments.
service: Service
An optional service object (e.g., providing methods to
deploy or reboot a VM, etc.).
"""
super().__init__(name=name, config=config, global_config=global_config,
tunables=tunables, service=service)
# By default, the Environment includes only the tunables explicitly specified
# in the "tunable_params" section of the config. `CompositeEnv`, however, must
# retain all tunables from its children environments plus the ones that come
# from the "include_tunables".
tunables = tunables.copy() if tunables else TunableGroups()
_LOG.debug("Build composite environment '%s' START: %s", self, tunables)
self._children: List[Environment] = []
self._child_contexts: List[Environment] = []
# To support trees of composite environments (e.g. for multiple VM experiments),
# each CompositeEnv gets a copy of the original global config and adjusts it with
# the `const_args` specific to it.
global_config = (global_config or {}).copy()
for (key, val) in self._const_args.items():
global_config.setdefault(key, val)
for child_config_file in config.get("include_children", []):
for env in self._config_loader_service.load_environment_list(
child_config_file, tunables, global_config, self._const_args, self._service):
self._add_child(env, tunables)
for child_config in config.get("children", []):
env = self._config_loader_service.build_environment(
child_config, tunables, global_config, self._const_args, self._service)
self._add_child(env, tunables)
_LOG.debug("Build composite environment '%s' END: %s", self, self._tunable_params)
if not self._children:
raise ValueError("At least one child environment must be present")
def __enter__(self) -> Environment:
self._child_contexts = [env.__enter__() for env in self._children]
return super().__enter__()
def __exit__(self, ex_type: Optional[Type[BaseException]],
ex_val: Optional[BaseException],
ex_tb: Optional[TracebackType]) -> Literal[False]:
ex_throw = None
for env in reversed(self._children):
try:
env.__exit__(ex_type, ex_val, ex_tb)
# pylint: disable=broad-exception-caught
except Exception as ex:
_LOG.error("Exception while exiting child environment '%s': %s", env, ex)
ex_throw = ex
self._child_contexts = []
super().__exit__(ex_type, ex_val, ex_tb)
if ex_throw:
raise ex_throw
return False
@property
def children(self) -> List[Environment]:
"""
Return the list of child environments.
"""
return self._children
def pprint(self, indent: int = 4, level: int = 0) -> str:
"""
Pretty-print the environment and its children.
Parameters
----------
indent : int
Number of spaces to indent the output at each level. Default is 4.
level : int
Current level of indentation. Default is 0.
Returns
-------
pretty : str
Pretty-printed environment configuration.
"""
return super().pprint(indent, level) + '\n' + '\n'.join(
child.pprint(indent, level + 1) for child in self._children)
def _add_child(self, env: Environment, tunables: TunableGroups) -> None:
"""
Add a new child environment to the composite environment.
This method is called from the constructor only.
"""
_LOG.debug("Merge tunables: '%s' <- '%s' :: %s", self, env, env.tunable_params)
self._children.append(env)
self._tunable_params.merge(env.tunable_params)
tunables.merge(env.tunable_params)
def setup(self, tunables: TunableGroups, global_config: Optional[dict] = None) -> bool:
"""
Set up the children environments.
Parameters
----------
tunables : TunableGroups
A collection of tunable parameters along with their values.
global_config : dict
Free-format dictionary of global parameters of the environment
that are not used in the optimization process.
Returns
-------
is_success : bool
True if all children setup() operations are successful,
false otherwise.
"""
assert self._in_context
self._is_ready = super().setup(tunables, global_config) and all(
env_context.setup(tunables, global_config) for env_context in self._child_contexts)
return self._is_ready
def teardown(self) -> None:
"""
Tear down the children environments. This method is idempotent,
i.e., calling it several times is equivalent to a single call.
The environments are being torn down in the reverse order.
"""
assert self._in_context
for env_context in reversed(self._child_contexts):
env_context.teardown()
super().teardown()
def run(self) -> Tuple[Status, Optional[Dict[str, float]]]:
"""
Submit a new experiment to the environment.
Return the result of the *last* child environment if successful,
or the status of the last failed environment otherwise.
Returns
-------
(status, output) : (Status, dict)
A pair of (Status, output) values, where `output` is a dict
with the results or None if the status is not COMPLETED.
If run script is a benchmark, then the score is usually expected to
be in the `score` field.
"""
_LOG.info("Run: %s", self._children)
(status, metrics) = super().run()
if not status.is_ready():
return (status, metrics)
joint_metrics = {}
for env_context in self._child_contexts:
_LOG.debug("Child env. run: %s", env_context)
(status, metrics) = env_context.run()
_LOG.debug("Child env. run results: %s :: %s %s", env_context, status, metrics)
if not status.is_good():
_LOG.info("Run failed: %s :: %s", self, status)
return (status, None)
joint_metrics.update(metrics or {})
_LOG.info("Run completed: %s :: %s %s", self, status, joint_metrics)
return (status, joint_metrics)
def status(self) -> Tuple[Status, List[Tuple[datetime, str, Any]]]:
"""
Check the status of the benchmark environment.
Returns
-------
(benchmark_status, telemetry) : (Status, list)
A pair of (benchmark status, telemetry) values.
`telemetry` is a list (maybe empty) of (timestamp, metric, value) triplets.
"""
(status, telemetry) = super().status()
if not status.is_ready():
return (status, telemetry)
joint_telemetry = []
final_status = None
for env_context in self._child_contexts:
(status, telemetry) = env_context.status()
_LOG.debug("Child env. status: %s :: %s", env_context, status)
joint_telemetry.extend(telemetry)
if not status.is_good() and final_status is None:
final_status = status
final_status = final_status or status
_LOG.info("Final status: %s :: %s", self, final_status)
return (final_status, joint_telemetry)
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,918
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/environments/local/__init__.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Local Environments for mlos_bench.
"""
from mlos_bench.environments.local.local_env import LocalEnv
from mlos_bench.environments.local.local_fileshare_env import LocalFileShareEnv
__all__ = [
'LocalEnv',
'LocalFileShareEnv',
]
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,919
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/config/schemas/__init__.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Common tests for config schemas and their validation and test cases.
"""
from copy import deepcopy
from dataclasses import dataclass
from typing import Any, Dict, Set
import os
import json5
import jsonschema
import pytest
from mlos_bench.config.schemas.config_schemas import ConfigSchema
from mlos_bench.tests.config import locate_config_examples
# A dataclass to make pylint happy.
@dataclass
class SchemaTestType:
"""
The different type of schema test cases we expect to have.
"""
test_case_type: str
test_case_subtypes: Set[str]
def __hash__(self) -> int:
return hash(self.test_case_type)
# The different type of schema test cases we expect to have.
_SCHEMA_TEST_TYPES = {x.test_case_type: x for x in (
SchemaTestType(test_case_type='good', test_case_subtypes={'full', 'partial'}),
SchemaTestType(test_case_type='bad', test_case_subtypes={'invalid', 'unhandled'}),
)}
@dataclass
class SchemaTestCaseInfo():
"""
Some basic info about a schema test case.
"""
config: Dict[str, Any]
test_case_file: str
test_case_type: str
test_case_subtype: str
def __hash__(self) -> int:
return hash(self.test_case_file)
def check_schema_dir_layout(test_cases_root: str) -> None:
"""
Makes sure the directory layout matches what we expect so we aren't missing
any extra configs or test cases.
"""
for test_case_dir in os.listdir(test_cases_root):
if test_case_dir == 'README.md':
continue
if test_case_dir not in _SCHEMA_TEST_TYPES:
raise NotImplementedError(f"Unhandled test case type: {test_case_dir}")
for test_case_subdir in os.listdir(os.path.join(test_cases_root, test_case_dir)):
if test_case_subdir == 'README.md':
continue
if test_case_subdir not in _SCHEMA_TEST_TYPES[test_case_dir].test_case_subtypes:
raise NotImplementedError(f"Unhandled test case subtype {test_case_subdir} for test case type {test_case_dir}")
@dataclass
class TestCases:
"""
A container for test cases by type.
"""
by_path: Dict[str, SchemaTestCaseInfo]
by_type: Dict[str, Dict[str, SchemaTestCaseInfo]]
by_subtype: Dict[str, Dict[str, SchemaTestCaseInfo]]
def get_schema_test_cases(test_cases_root: str) -> TestCases:
"""
Gets a dict of schema test cases from the given root.
"""
test_cases = TestCases(by_path={},
by_type={x: {} for x in _SCHEMA_TEST_TYPES},
by_subtype={y: {} for x in _SCHEMA_TEST_TYPES for y in _SCHEMA_TEST_TYPES[x].test_case_subtypes})
check_schema_dir_layout(test_cases_root)
# Note: we sort the test cases so that we can deterministically test them in parallel.
for (test_case_type, schema_test_type) in _SCHEMA_TEST_TYPES.items():
for test_case_subtype in schema_test_type.test_case_subtypes:
for test_case_file in locate_config_examples(os.path.join(test_cases_root, test_case_type, test_case_subtype)):
with open(test_case_file, mode='r', encoding='utf-8') as test_case_fh:
try:
test_case_info = SchemaTestCaseInfo(
config=json5.load(test_case_fh),
test_case_file=test_case_file,
test_case_type=test_case_type,
test_case_subtype=test_case_subtype,
)
test_cases.by_path[test_case_info.test_case_file] = test_case_info
test_cases.by_type[test_case_info.test_case_type][test_case_info.test_case_file] = test_case_info
test_cases.by_subtype[test_case_info.test_case_subtype][test_case_info.test_case_file] = test_case_info
except Exception as ex:
raise RuntimeError("Failed to load test case: " + test_case_file) from ex
assert test_cases
assert len(test_cases.by_type["good"]) > 0
assert len(test_cases.by_type["bad"]) > 0
assert len(test_cases.by_subtype) > 2
return test_cases
def check_test_case_against_schema(test_case: SchemaTestCaseInfo, schema_type: ConfigSchema) -> None:
"""
Checks the given test case against the given schema.
Parameters
----------
test_case : SchemaTestCaseInfo
Schema test case to check.
schema_type : ConfigSchema
Schema to check against, e.g., ENVIRONMENT or SERVICE.
Raises
------
NotImplementedError
If test case is not known.
"""
if test_case.test_case_type == "good":
schema_type.validate(test_case.config)
elif test_case.test_case_type == "bad":
with pytest.raises(jsonschema.ValidationError):
schema_type.validate(test_case.config)
else:
raise NotImplementedError(f"Unknown test case type: {test_case.test_case_type}")
def check_test_case_config_with_extra_param(test_case: SchemaTestCaseInfo, schema_type: ConfigSchema) -> None:
"""
Checks that the config fails to validate if extra params are present in certain places.
"""
config = deepcopy(test_case.config)
schema_type.validate(config)
extra_outer_attr = "extra_outer_attr"
config[extra_outer_attr] = "should not be here"
with pytest.raises(jsonschema.ValidationError):
schema_type.validate(config)
del config[extra_outer_attr]
if not config.get("config"):
config["config"] = {}
extra_config_attr = "extra_config_attr"
config["config"][extra_config_attr] = "should not be here"
with pytest.raises(jsonschema.ValidationError):
schema_type.validate(config)
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,920
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/services/types/__init__.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Service types for implementing declaring Service behavior for Environments to use in mlos_bench.
"""
from mlos_bench.services.types.config_loader_type import SupportsConfigLoading
from mlos_bench.services.types.fileshare_type import SupportsFileShareOps
from mlos_bench.services.types.vm_provisioner_type import SupportsVMOps
from mlos_bench.services.types.local_exec_type import SupportsLocalExec
from mlos_bench.services.types.remote_exec_type import SupportsRemoteExec
__all__ = [
'SupportsConfigLoading',
'SupportsFileShareOps',
'SupportsVMOps',
'SupportsLocalExec',
'SupportsRemoteExec',
]
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,921
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/config/environments/apps/redis/scripts/local/process_redis_results.py
|
#!/usr/bin/env python3
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Script for post-processing redis-benchmark results.
"""
import argparse
import pandas as pd
def _main(input_file: str, output_file: str) -> None:
"""
Re-shape Redis benchmark CSV results from wide to long.
"""
df_wide = pd.read_csv(input_file)
# Format the results from wide to long
# The target is columns of metric and value to act as key-value pairs.
df_long = (
df_wide
.melt(id_vars=["test"])
.assign(metric=lambda df: df["test"] + "_" + df["variable"])
.drop(columns=["test", "variable"])
.loc[:, ["metric", "value"]]
)
# Add a default `score` metric to the end of the dataframe.
df_long = pd.concat([
df_long,
pd.DataFrame({"metric": ["score"], "value": [df_long.value[df_long.index.max()]]})
])
df_long.to_csv(output_file, index=False)
print(f"Converted: {input_file} -> {output_file}")
# print(df_long)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Post-process Redis benchmark results.")
parser.add_argument("input", help="Redis benchmark results (downloaded from a remote VM).")
parser.add_argument("output", help="Converted Redis benchmark data" +
" (to be consumed by OS Autotune framework).")
args = parser.parse_args()
_main(args.input, args.output)
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,922
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/config/cli/test_load_cli_config_examples.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Tests for loading storage config examples.
"""
from typing import List
import logging
import pytest
from mlos_bench.tests.config import locate_config_examples
from mlos_bench.config.schemas import ConfigSchema
from mlos_bench.services.config_persistence import ConfigPersistenceService
from mlos_bench.util import path_join
_LOG = logging.getLogger(__name__)
_LOG.setLevel(logging.DEBUG)
# Get the set of configs to test.
CONFIG_TYPE = "cli"
def filter_configs(configs_to_filter: List[str]) -> List[str]:
"""If necessary, filter out json files that aren't for the module we're testing."""
return configs_to_filter
configs = filter_configs(locate_config_examples(path_join(ConfigPersistenceService.BUILTIN_CONFIG_PATH, CONFIG_TYPE)))
assert configs
@pytest.mark.parametrize("config_path", configs)
def test_load_cli_config_examples(config_loader_service: ConfigPersistenceService, config_path: str) -> None:
"""Tests loading a config example."""
config = config_loader_service.load_config(config_path, ConfigSchema.CLI)
assert isinstance(config, dict)
if config_paths := config.get("config_path"):
assert isinstance(config_paths, list)
config_paths.reverse()
for path in config_paths:
config_loader_service._config_path.insert(0, path) # pylint: disable=protected-access
# Foreach arg that references another file, see if we can at least load that too.
args_to_skip = {
"config_path", # handled above
"globals", # we don't commit globals to the repo generally, so skip testing them
"log_file",
"log_level",
"experiment_id",
"trial_id",
"teardown",
}
for arg in config:
if arg in args_to_skip:
continue
if arg == "environment":
sub_config = config_loader_service.load_config(config[arg], ConfigSchema.ENVIRONMENT)
assert isinstance(sub_config, dict)
elif arg == "optimizer":
sub_config = config_loader_service.load_config(config[arg], ConfigSchema.OPTIMIZER)
assert isinstance(sub_config, dict)
elif arg == "storage":
sub_config = config_loader_service.load_config(config[arg], ConfigSchema.STORAGE)
assert isinstance(sub_config, dict)
elif arg == "tunable_values":
for path in config[arg]:
sub_config = config_loader_service.load_config(path, ConfigSchema.TUNABLE_VALUES)
assert isinstance(sub_config, dict)
else:
raise NotImplementedError(f"Unhandled arg {arg} in config {config_path}")
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,923
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/config/schemas/environments/test_environment_schemas.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Tests for environment schema validation.
"""
from os import path
import pytest
from mlos_core.tests import get_all_concrete_subclasses
from mlos_bench.config.schemas import ConfigSchema
from mlos_bench.environments.base_environment import Environment
from mlos_bench.environments.composite_env import CompositeEnv
from mlos_bench.environments.script_env import ScriptEnv
from mlos_bench.tests import try_resolve_class_name
from mlos_bench.tests.config.schemas import (get_schema_test_cases,
check_test_case_against_schema,
check_test_case_config_with_extra_param)
# General testing strategy:
# - hand code a set of good/bad configs (useful to test editor schema checking)
# - enumerate and try to check that we've covered all the cases
# - for each config, load and validate against expected schema
TEST_CASES = get_schema_test_cases(path.join(path.dirname(__file__), "test-cases"))
# Dynamically enumerate some of the cases we want to make sure we cover.
NON_CONFIG_ENV_CLASSES = {
ScriptEnv # ScriptEnv is ABCMeta abstract, but there's no good way to test that dynamically in Python.
}
expected_environment_class_names = [subclass.__module__ + "." + subclass.__name__
for subclass
in get_all_concrete_subclasses(Environment, pkg_name='mlos_bench')
if subclass not in NON_CONFIG_ENV_CLASSES]
assert expected_environment_class_names
COMPOSITE_ENV_CLASS_NAME = CompositeEnv.__module__ + "." + CompositeEnv.__name__
expected_leaf_environment_class_names = [subclass_name for subclass_name in expected_environment_class_names
if subclass_name != COMPOSITE_ENV_CLASS_NAME]
# Do the full cross product of all the test cases and all the Environment types.
@pytest.mark.parametrize("test_case_subtype", sorted(TEST_CASES.by_subtype))
@pytest.mark.parametrize("env_class", expected_environment_class_names)
def test_case_coverage_mlos_bench_environment_type(test_case_subtype: str, env_class: str) -> None:
"""
Checks to see if there is a given type of test case for the given mlos_bench Environment type.
"""
for test_case in TEST_CASES.by_subtype[test_case_subtype].values():
if try_resolve_class_name(test_case.config.get("class")) == env_class:
return
raise NotImplementedError(
f"Missing test case for subtype {test_case_subtype} for Environment class {env_class}")
# Now we actually perform all of those validation tests.
@pytest.mark.parametrize("test_case_name", sorted(TEST_CASES.by_path))
def test_environment_configs_against_schema(test_case_name: str) -> None:
"""
Checks that the environment config validates against the schema.
"""
check_test_case_against_schema(TEST_CASES.by_path[test_case_name], ConfigSchema.ENVIRONMENT)
@pytest.mark.parametrize("test_case_name", sorted(TEST_CASES.by_type["good"]))
def test_environment_configs_with_extra_param(test_case_name: str) -> None:
"""
Checks that the environment config fails to validate if extra params are present in certain places.
"""
check_test_case_config_with_extra_param(TEST_CASES.by_type["good"][test_case_name], ConfigSchema.ENVIRONMENT)
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,924
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/_version.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Version number for the mlos_core package.
"""
# NOTE: This should be managed by bumpversion.
_VERSION = '0.1.0'
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,925
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/services/remote/azure/azure_fileshare_test.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Tests for mlos_bench.services.remote.azure.azure_fileshare
"""
import os
from unittest.mock import MagicMock, Mock, patch, call
from mlos_bench.services.remote.azure.azure_fileshare import AzureFileShareService
# pylint: disable=missing-function-docstring
# pylint: disable=too-many-arguments
# pylint: disable=unused-argument
@patch("mlos_bench.services.remote.azure.azure_fileshare.open")
@patch("mlos_bench.services.remote.azure.azure_fileshare.os.makedirs")
def test_download_file(mock_makedirs: MagicMock, mock_open: MagicMock, azure_fileshare: AzureFileShareService) -> None:
filename = "test.csv"
remote_folder = "a/remote/folder"
local_folder = "some/local/folder"
remote_path = f"{remote_folder}/{filename}"
local_path = f"{local_folder}/{filename}"
mock_share_client = azure_fileshare._share_client # pylint: disable=protected-access
with patch.object(mock_share_client, "get_file_client") as mock_get_file_client, \
patch.object(mock_share_client, "get_directory_client") as mock_get_directory_client:
mock_get_directory_client.return_value = Mock(exists=Mock(return_value=False))
azure_fileshare.download(remote_path, local_path)
mock_get_file_client.assert_called_with(remote_path)
mock_makedirs.assert_called_with(
local_folder,
exist_ok=True,
)
open_path, open_mode = mock_open.call_args.args
assert os.path.abspath(local_path) == os.path.abspath(open_path)
assert open_mode == "wb"
def make_dir_client_returns(remote_folder: str) -> dict:
return {
remote_folder: Mock(
exists=Mock(return_value=True),
list_directories_and_files=Mock(return_value=[
{"name": "a_folder", "is_directory": True},
{"name": "a_file_1.csv", "is_directory": False},
])
),
f"{remote_folder}/a_folder": Mock(
exists=Mock(return_value=True),
list_directories_and_files=Mock(return_value=[
{"name": "a_file_2.csv", "is_directory": False},
])
),
f"{remote_folder}/a_file_1.csv": Mock(
exists=Mock(return_value=False)
),
f"{remote_folder}/a_folder/a_file_2.csv": Mock(
exists=Mock(return_value=False)
),
}
@patch("mlos_bench.services.remote.azure.azure_fileshare.open")
@patch("mlos_bench.services.remote.azure.azure_fileshare.os.makedirs")
def test_download_folder_non_recursive(mock_makedirs: MagicMock,
mock_open: MagicMock,
azure_fileshare: AzureFileShareService) -> None:
remote_folder = "a/remote/folder"
local_folder = "some/local/folder"
dir_client_returns = make_dir_client_returns(remote_folder)
mock_share_client = azure_fileshare._share_client # pylint: disable=protected-access
with patch.object(mock_share_client, "get_directory_client") as mock_get_directory_client, \
patch.object(mock_share_client, "get_file_client") as mock_get_file_client:
mock_get_directory_client.side_effect = lambda x: dir_client_returns[x]
azure_fileshare.download(remote_folder, local_folder, recursive=False)
mock_get_file_client.assert_called_with(
f"{remote_folder}/a_file_1.csv",
)
mock_get_directory_client.assert_has_calls([
call(remote_folder),
call(f"{remote_folder}/a_file_1.csv"),
], any_order=True)
@patch("mlos_bench.services.remote.azure.azure_fileshare.open")
@patch("mlos_bench.services.remote.azure.azure_fileshare.os.makedirs")
def test_download_folder_recursive(mock_makedirs: MagicMock, mock_open: MagicMock, azure_fileshare: AzureFileShareService) -> None:
remote_folder = "a/remote/folder"
local_folder = "some/local/folder"
dir_client_returns = make_dir_client_returns(remote_folder)
mock_share_client = azure_fileshare._share_client # pylint: disable=protected-access
with patch.object(mock_share_client, "get_directory_client") as mock_get_directory_client, \
patch.object(mock_share_client, "get_file_client") as mock_get_file_client:
mock_get_directory_client.side_effect = lambda x: dir_client_returns[x]
azure_fileshare.download(remote_folder, local_folder, recursive=True)
mock_get_file_client.assert_has_calls([
call(f"{remote_folder}/a_file_1.csv"),
call(f"{remote_folder}/a_folder/a_file_2.csv"),
], any_order=True)
mock_get_directory_client.assert_has_calls([
call(remote_folder),
call(f"{remote_folder}/a_file_1.csv"),
call(f"{remote_folder}/a_folder"),
call(f"{remote_folder}/a_folder/a_file_2.csv"),
], any_order=True)
@patch("mlos_bench.services.remote.azure.azure_fileshare.open")
@patch("mlos_bench.services.remote.azure.azure_fileshare.os.path.isdir")
def test_upload_file(mock_isdir: MagicMock, mock_open: MagicMock, azure_fileshare: AzureFileShareService) -> None:
filename = "test.csv"
remote_folder = "a/remote/folder"
local_folder = "some/local/folder"
remote_path = f"{remote_folder}/{filename}"
local_path = f"{local_folder}/{filename}"
mock_share_client = azure_fileshare._share_client # pylint: disable=protected-access
mock_isdir.return_value = False
with patch.object(mock_share_client, "get_file_client") as mock_get_file_client:
azure_fileshare.upload(local_path, remote_path)
mock_get_file_client.assert_called_with(remote_path)
open_path, open_mode = mock_open.call_args.args
assert os.path.abspath(local_path) == os.path.abspath(open_path)
assert open_mode == "rb"
class MyDirEntry:
# pylint: disable=too-few-public-methods
"""Dummy class for os.DirEntry"""
def __init__(self, name: str, is_a_dir: bool):
self.name = name
self.is_a_dir = is_a_dir
def is_dir(self) -> bool:
return self.is_a_dir
def make_scandir_returns(local_folder: str) -> dict:
return {
local_folder: [
MyDirEntry("a_folder", True),
MyDirEntry("a_file_1.csv", False),
],
f"{local_folder}/a_folder": [
MyDirEntry("a_file_2.csv", False),
],
}
def make_isdir_returns(local_folder: str) -> dict:
return {
local_folder: True,
f"{local_folder}/a_file_1.csv": False,
f"{local_folder}/a_folder": True,
f"{local_folder}/a_folder/a_file_2.csv": False,
}
def process_paths(input_path: str) -> str:
skip_prefix = os.getcwd()
# Remove prefix from os.path.abspath if there
if input_path == os.path.abspath(input_path):
result = input_path[len(skip_prefix) + 1:]
else:
result = input_path
# Change file seps to unix-style
return result.replace("\\", "/")
@patch("mlos_bench.services.remote.azure.azure_fileshare.open")
@patch("mlos_bench.services.remote.azure.azure_fileshare.os.path.isdir")
@patch("mlos_bench.services.remote.azure.azure_fileshare.os.scandir")
def test_upload_directory_non_recursive(mock_scandir: MagicMock,
mock_isdir: MagicMock,
mock_open: MagicMock,
azure_fileshare: AzureFileShareService) -> None:
remote_folder = "a/remote/folder"
local_folder = "some/local/folder"
scandir_returns = make_scandir_returns(local_folder)
isdir_returns = make_isdir_returns(local_folder)
mock_scandir.side_effect = lambda x: scandir_returns[process_paths(x)]
mock_isdir.side_effect = lambda x: isdir_returns[process_paths(x)]
mock_share_client = azure_fileshare._share_client # pylint: disable=protected-access
with patch.object(mock_share_client, "get_file_client") as mock_get_file_client:
azure_fileshare.upload(local_folder, remote_folder, recursive=False)
mock_get_file_client.assert_called_with(f"{remote_folder}/a_file_1.csv")
@patch("mlos_bench.services.remote.azure.azure_fileshare.open")
@patch("mlos_bench.services.remote.azure.azure_fileshare.os.path.isdir")
@patch("mlos_bench.services.remote.azure.azure_fileshare.os.scandir")
def test_upload_directory_recursive(mock_scandir: MagicMock,
mock_isdir: MagicMock,
mock_open: MagicMock,
azure_fileshare: AzureFileShareService) -> None:
remote_folder = "a/remote/folder"
local_folder = "some/local/folder"
scandir_returns = make_scandir_returns(local_folder)
isdir_returns = make_isdir_returns(local_folder)
mock_scandir.side_effect = lambda x: scandir_returns[process_paths(x)]
mock_isdir.side_effect = lambda x: isdir_returns[process_paths(x)]
mock_share_client = azure_fileshare._share_client # pylint: disable=protected-access
with patch.object(mock_share_client, "get_file_client") as mock_get_file_client:
azure_fileshare.upload(local_folder, remote_folder, recursive=True)
mock_get_file_client.assert_has_calls([
call(f"{remote_folder}/a_file_1.csv"),
call(f"{remote_folder}/a_folder/a_file_2.csv"),
], any_order=True)
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,926
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/services/remote/azure/azure_fileshare.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
A collection FileShare functions for interacting with Azure File Shares.
"""
import os
import logging
from typing import Any, Dict, Optional, Set
from azure.storage.fileshare import ShareClient
from azure.core.exceptions import ResourceNotFoundError
from mlos_bench.services.base_service import Service
from mlos_bench.services.base_fileshare import FileShareService
from mlos_bench.util import check_required_params
_LOG = logging.getLogger(__name__)
class AzureFileShareService(FileShareService):
"""
Helper methods for interacting with Azure File Share
"""
_SHARE_URL = "https://{account_name}.file.core.windows.net/{fs_name}"
def __init__(self,
config: Optional[Dict[str, Any]] = None,
global_config: Optional[Dict[str, Any]] = None,
parent: Optional[Service] = None):
"""
Create a new file share Service for Azure environments with a given config.
Parameters
----------
config : dict
Free-format dictionary that contains the file share configuration.
It will be passed as a constructor parameter of the class
specified by `class_name`.
global_config : dict
Free-format dictionary of global parameters.
parent : Service
Parent service that can provide mixin functions.
"""
super().__init__(config, global_config, parent)
check_required_params(
self.config, {
"storageAccountName",
"storageFileShareName",
"storageAccountKey",
}
)
self._share_client = ShareClient.from_share_url(
AzureFileShareService._SHARE_URL.format(
account_name=self.config["storageAccountName"],
fs_name=self.config["storageFileShareName"],
),
credential=self.config["storageAccountKey"],
)
def download(self, remote_path: str, local_path: str, recursive: bool = True) -> None:
super().download(remote_path, local_path, recursive)
dir_client = self._share_client.get_directory_client(remote_path)
if dir_client.exists():
os.makedirs(local_path, exist_ok=True)
for content in dir_client.list_directories_and_files():
name = content["name"]
local_target = f"{local_path}/{name}"
remote_target = f"{remote_path}/{name}"
if recursive or not content["is_directory"]:
self.download(remote_target, local_target, recursive)
else: # Must be a file
# Ensure parent folders exist
folder, _ = os.path.split(local_path)
os.makedirs(folder, exist_ok=True)
file_client = self._share_client.get_file_client(remote_path)
try:
data = file_client.download_file()
with open(local_path, "wb") as output_file:
_LOG.debug("Download file: %s -> %s", remote_path, local_path)
data.readinto(output_file) # type: ignore[no-untyped-call]
except ResourceNotFoundError as ex:
# Translate into non-Azure exception:
raise FileNotFoundError("Cannot download: {remote_path}") from ex
def upload(self, local_path: str, remote_path: str, recursive: bool = True) -> None:
super().upload(local_path, remote_path, recursive)
self._upload(local_path, remote_path, recursive, set())
def _upload(self, local_path: str, remote_path: str, recursive: bool, seen: Set[str]) -> None:
"""
Upload contents from a local path to an Azure file share.
This method is called from `.upload()` above. We need it to avoid exposing
the `seen` parameter and to make `.upload()` match the base class' virtual
method.
Parameters
----------
local_path : str
Path to the local directory to upload contents from, either a file or directory.
remote_path : str
Path in the remote file share to store the uploaded content to.
recursive : bool
If False, ignore the subdirectories;
if True (the default), upload the entire directory tree.
seen: Set[str]
Helper set for keeping track of visited directories to break circular paths.
"""
local_path = os.path.abspath(local_path)
if local_path in seen:
_LOG.warning("Loop in directories, skipping '%s'", local_path)
return
seen.add(local_path)
if os.path.isdir(local_path):
dir_client = self._share_client.get_directory_client(remote_path)
if not dir_client.exists():
dir_client.create_directory()
for entry in os.scandir(local_path):
name = entry.name
local_target = f"{local_path}/{name}"
remote_target = f"{remote_path}/{name}"
if recursive or not entry.is_dir():
self._upload(local_target, remote_target, recursive, seen)
else:
# Ensure parent folders exist
folder, _ = os.path.split(remote_path)
self._remote_makedirs(folder)
file_client = self._share_client.get_file_client(remote_path)
with open(local_path, "rb") as file_data:
_LOG.debug("Upload file: %s -> %s", local_path, remote_path)
file_client.upload_file(file_data)
def _remote_makedirs(self, remote_path: str) -> None:
"""
Create remote directories for the entire path.
Succeeds even some or all directories along the path already exist.
Parameters
----------
remote_path : str
Path in the remote file share to create.
"""
path = ""
for folder in remote_path.replace("\\", "/").split("/"):
if not folder:
continue
path += folder + "/"
dir_client = self._share_client.get_directory_client(path)
if not dir_client.exists():
dir_client.create_directory()
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.