index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
13,547
|
mperrin/jwst
|
refs/heads/master
|
/jwst/tests/compare_outputs.py
|
import copy
from datetime import datetime
import os
from difflib import unified_diff
from io import StringIO
from ci_watson.artifactory_helpers import (
get_bigdata,
BigdataError,
generate_upload_schema,
)
from astropy.io import fits
from astropy.io.fits import FITSDiff, HDUDiff
TODAYS_DATE = datetime.now().strftime("%Y-%m-%d")
def compare_outputs(outputs, raise_error=True, ignore_keywords=[],
ignore_hdus=[], ignore_fields=[], rtol=0.0, atol=0.0,
input_path=[], docopy=True, results_root=None,
verbose=True):
"""
Compare output with "truth" using appropriate
diff routine; namely:
* ``fitsdiff`` for FITS file comparisons.
* ``unified_diff`` for ASCII products.
Only after all elements of ``outputs`` have been
processed will the method report any success or failure, with
failure of any one comparison *not* preventing the rest of the
comparisons to be performed.
Parameters
----------
outputs : list of tuple or dict
This list defines what outputs from running the test will be
compared. Three distinct types of values as list elements
are supported:
* 2-tuple : ``(test output filename, truth filename)``
* 3-tuple : ``(test output filename, truth filename, HDU names)``
* dict : ``{'files': (output, truth), 'pars': {key: val}}``
If filename contains extension such as ``[hdrtab]``,
it will be interpreted as specifying comparison of just that HDU.
raise_error : bool
Raise ``AssertionError`` if difference is found.
ignore_keywords : list of str
List of FITS header keywords to be ignored by
``FITSDiff`` and ``HDUDiff``.
ignore_hdus : list of str
List of FITS HDU names to ignore by ``FITSDiff``.
This is only available for ``astropy>=3.1``.
ignore_fields : list of str
List FITS table column names to be ignored by
``FITSDiff`` and ``HDUDiff``.
rtol, atol : float
Relative and absolute tolerance to be used by
``FITSDiff`` and ``HDUDiff``.
input_path : list or tuple
A series of sub-directory names under :func:`get_bigdata_root`
that leads to the path of the 'truth' files to be compared
against. If not provided, it assumes that 'truth' is in the
working directory. For example, with :func:`get_bigdata_root`
pointing to ``/grp/test_data``, a file at::
/grp/test_data/pipeline/dev/ins/test_1/test_a.py
would require ``input_path`` of::
["pipeline", "dev", "ins", "test_1"]
docopy : bool
If `True`, 'truth' will be copied to output directory before
comparison is done.
results_root : str or `None`
If not `None`, for every failed comparison, the test output
is automatically renamed to the given 'truth' in the output
directory and :func:`generate_upload_schema` will be called
to generate a JSON scheme for Artifactory upload.
If you do not need this functionality, use ``results_root=None``.
verbose : bool
Print extra info to screen.
Returns
-------
creature_report : str
Report from FITS or ASCII comparator.
This is part of error message if ``raise_error=True``.
Examples
--------
There are multiple use cases for this method, specifically
related to how ``outputs`` are defined upon calling this method.
The specification of the ``outputs`` can be any combination of the
following patterns:
1. 2-tuple inputs::
outputs = [('file1.fits', 'file1_truth.fits')]
This definition indicates that ``file1.fits`` should be compared
as a whole with ``file1_truth.fits``.
2. 2-tuple inputs with extensions::
outputs = [('file1.fits[hdrtab]', 'file1_truth.fits[hdrtab]')]
This definition indicates that only the HDRTAB extension from
``file1.fits`` will be compared to the HDRTAB extension from
``file1_truth.fits``.
3. 3-tuple inputs::
outputs = [('file1.fits', 'file1_truth.fits', ['primary', 'sci'])]
This definition indicates that only the PRIMARY and SCI extensions
should be compared between the two files. This creates a temporary
``HDUList`` object comprising only the given extensions for comparison.
4. Dictionary of inputs and parameters::
outputs = [{'files': ('file1.fits', 'file1_truth.fits'),
'pars': {'ignore_keywords': ['ROOTNAME']}}]
This definition indicates that ROOTNAME will be ignored during
the comparison between the files specified in ``'files'``.
Any input parameter for ``FITSDiff`` or ``HDUDiff`` can be specified
as part of the ``'pars'`` dictionary.
In addition, the input files listed in ``'files'`` can also include
an extension specification, such as ``[hdrtab]``, to limit the
comparison to just that extension.
This example from an actual test definition demonstrates
how multiple input defintions can be used at the same time::
outputs = [
('jw99999_nircam_f140m-maskbar_psfstack.fits',
'jw99999_nircam_f140m-maskbar_psfstack_ref.fits'
),
('jw9999947001_02102_00002_nrcb3_a3001_crfints.fits',
'jw9999947001_02102_00002_nrcb3_a3001_crfints_ref.fits'
),
{'files': ('jw99999_nircam_f140m-maskbar_i2d.fits',
'jw99999_nircam_f140m-maskbar_i2d_ref.fits'),
'pars': {'ignore_hdus': ['HDRTAB']},
{'files': ('jw99999_nircam_f140m-maskbar_i2d.fits',
'jw99999_nircam_f140m-maskbar_i2d_ref.fits',
['primary','sci','dq']),
'pars': {'rtol': 0.000001}
},
{'files': ('jw99999_nircam_f140m-maskbar_i2d.fits[hdrtab]',
'jw99999_nircam_f140m-maskbar_i2d_ref.fits[hdrtab]'),
'pars': {'ignore_keywords': ['NAXIS1', 'TFORM*'],
'ignore_fields': ['COL1', 'COL2']}
}]
.. note:: Each ``outputs`` entry in the list gets interpreted and processed
separately.
"""
__tracebackhide__ = True
default_kwargs = {'rtol': rtol, 'atol': atol,
'ignore_keywords': ignore_keywords,
'ignore_fields': ignore_fields,
'ignore_hdus': ignore_hdus}
all_okay = True
creature_report = ''
updated_outputs = [] # To track outputs for Artifactory JSON schema
for entry in outputs:
diff_kwargs = copy.deepcopy(default_kwargs)
extn_list = None
num_entries = len(entry)
if isinstance(entry, dict):
entry_files = entry['files']
actual = entry_files[0]
desired = entry_files[1]
if len(entry_files) > 2:
extn_list = entry_files[2]
diff_kwargs.update(entry.get('pars', {}))
elif num_entries == 2:
actual, desired = entry
elif num_entries == 3:
actual, desired, extn_list = entry
else:
all_okay = False
creature_report += '\nERROR: Cannot handle entry {}\n'.format(
entry)
continue
# TODO: Use regex?
if actual.endswith(']'):
if extn_list is not None:
all_okay = False
creature_report += (
'\nERROR: Ambiguous extension requirements '
'for {} ({})\n'.format(actual, extn_list))
continue
actual_name, actual_extn = actual.split('[')
actual_extn = actual_extn.replace(']', '')
else:
actual_name = actual
actual_extn = None
if desired.endswith(']'):
if extn_list is not None:
all_okay = False
creature_report += (
'\nERROR: Ambiguous extension requirements '
'for {} ({})\n'.format(desired, extn_list))
continue
desired_name, desired_extn = desired.split('[')
desired_extn = desired_extn.replace(']', '')
else:
desired_name = desired
desired_extn = None
actual = os.path.abspath(actual)
# Get "truth" image
try:
os.makedirs('truth', exist_ok=True)
os.chdir('truth')
desired = get_bigdata(*input_path, desired_name, docopy=docopy)
desired = os.path.abspath(desired)
os.chdir('..')
except BigdataError:
all_okay = False
creature_report += '\nERROR: Cannot find {} in {}\n'.format(
desired_name, input_path)
continue
if desired_extn is not None:
desired_name = desired
desired = "{}[{}]".format(desired, desired_extn)
if verbose:
print("\nComparing:\n {}\n {}".format(actual, desired))
if actual.endswith('.fits') and desired.endswith('.fits'):
# Build HDULists for comparison based on user-specified extensions
if extn_list is not None:
with fits.open(actual) as f_act:
with fits.open(desired) as f_des:
actual_hdu = fits.HDUList(
[f_act[extn] for extn in extn_list])
actual_hdu.filename = lambda: os.path.basename(actual)
desired_hdu = fits.HDUList(
[f_des[extn] for extn in extn_list])
desired_hdu.filename = lambda: os.path.basename(desired)
fdiff = FITSDiff(actual_hdu, desired_hdu,
**diff_kwargs)
creature_report += '\na: {}\nb: {}\n'.format(
actual, desired) # diff report only gives hash
# Working with FITS files...
else:
fdiff = FITSDiff(actual, desired, **diff_kwargs)
creature_report += fdiff.report()
if not fdiff.identical:
all_okay = False
# Only keep track of failed results which need to
# be used to replace the truth files (if OK).
updated_outputs.append((actual, desired))
elif actual_extn is not None or desired_extn is not None:
if 'ignore_hdus' in diff_kwargs: # pragma: no cover
diff_kwargs.pop('ignore_hdus') # Not applicable
# Specific element of FITS file specified
with fits.open(actual_name) as f_act:
with fits.open(desired_name) as f_des:
actual_hdu = f_act[actual_extn]
desired_hdu = f_des[desired_extn]
fdiff = HDUDiff(actual_hdu, desired_hdu, **diff_kwargs)
creature_report += 'a: {}\nb: {}\n'.format(actual, desired)
creature_report += fdiff.report()
if not fdiff.identical:
all_okay = False
# Only keep track of failed results which need to
# be used to replace the truth files (if OK).
updated_outputs.append((actual_name, desired_name))
else:
# ASCII-based diff
with open(actual) as afile:
actual_lines = afile.readlines()
with open(desired) as dfile:
desired_lines = dfile.readlines()
udiff = unified_diff(actual_lines, desired_lines,
fromfile=actual, tofile=desired)
udiffIO = StringIO()
udiffIO.writelines(udiff)
udiff_report = udiffIO.getvalue()
udiffIO.close()
if len(udiff_report) == 0:
creature_report += ('\na: {}\nb: {}\nNo differences '
'found.\n'.format(actual, desired))
else:
all_okay = False
creature_report += udiff_report
# Only keep track of failed results which need to
# be used to replace the truth files (if OK).
updated_outputs.append((actual, desired))
if not all_okay and results_root is not None: # pragma: no cover
schema_pattern, tree, testname = generate_upload_params(
results_root, updated_outputs, verbose=verbose)
generate_upload_schema(schema_pattern, tree, testname)
if not all_okay and raise_error:
raise AssertionError(os.linesep + creature_report)
return creature_report
def generate_upload_params(results_root, updated_outputs, verbose=True):
"""
Generate pattern, target, and test name for :func:`generate_upload_schema`.
This uses ``BUILD_TAG`` and ``BUILD_MATRIX_SUFFIX`` on Jenkins CI to create
meaningful Artifactory target path. They are optional for local runs.
Other attributes like user, time stamp, and test name are also
automatically determined.
In addition to renamed outputs, ``*.log``is also inserted into the
``schema_pattern``.
Parameters
----------
results_root : str
See :func:`compare_outputs` for more info.
updated_outputs : list
List containing tuples of ``(actual, desired)`` of failed
test output comparison to be processed.
verbose : bool
Print extra info to screen.
Returns
-------
schema_pattern, tree, testname
Analogous to ``pattern``, ``target``, and ``testname`` that are
passed into :func:`generate_upload_schema`, respectively.
"""
import getpass
# Create instructions for uploading results to artifactory for use
# as new comparison/truth files
testname = os.path.split(os.path.abspath(os.curdir))[1]
# Meaningful test dir from build info.
# TODO: Organize results by day test was run. Could replace with git-hash
whoami = getpass.getuser() or 'nobody'
user_tag = 'NOT_CI_{}'.format(whoami)
build_tag = os.environ.get('BUILD_TAG', user_tag)
build_matrix_suffix = os.environ.get('BUILD_MATRIX_SUFFIX', '0')
subdir = '{}_{}_{}'.format(TODAYS_DATE, build_tag, build_matrix_suffix)
tree = os.path.join(results_root, subdir, testname) + os.sep
schema_pattern = []
# Write out JSON file to enable retention of different results.
# Also rename outputs as new truths.
for test_result, truth in updated_outputs:
schema_pattern.append(test_result)
if verbose:
print("\nFailed comparison:")
print(" {}".format(test_result))
print(" {}".format(truth))
return schema_pattern, tree, testname
|
{"/jwst/tests_nightly/general/miri/test_sloperpipeline.py": ["/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/nircam/test_image2pipeline_2b.py": ["/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/niriss/test_nis_wfss_spec2.py": ["/jwst/tests/base_classes.py"], "/jwst/regtest/test_miri_image_detector1.py": ["/jwst/stpipe/__init__.py"], "/jwst/flatfield/flat_field_step.py": ["/jwst/stpipe/__init__.py"], "/jwst/tests_nightly/general/nirspec/test_spec2pipelines.py": ["/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/miri/test_miri_steps_single.py": ["/jwst/stpipe/__init__.py", "/jwst/tests/base_classes.py", "/jwst/master_background/__init__.py"], "/jwst/tests_nightly/general/nircam/test_wfs_combine.py": ["/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/niriss/test_niriss_steps.py": ["/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/nircam/test_tso3.py": ["/jwst/tests/base_classes.py"], "/jwst/regtest/test_nirspec_masterbackground.py": ["/jwst/stpipe/__init__.py"], "/jwst/regtest/test_nirspec_image2.py": ["/jwst/stpipe/__init__.py"], "/jwst/tests_nightly/general/nirspec/test_calwebb_spec2_nrs_msa.py": ["/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/nirspec/test_nirspec_steps.py": ["/jwst/tests/base_classes.py"], "/jwst/pipeline/linear_pipeline.py": ["/jwst/stpipe/__init__.py", "/jwst/flatfield/flat_field_step.py"], "/jwst/tests_nightly/general/nircam/test_nircam_steps_single.py": ["/jwst/tests/base_classes.py", "/jwst/stpipe/__init__.py"], "/jwst/tests_nightly/general/fgs/test_fgs_sloper_1.py": ["/jwst/tests/base_classes.py"], "/jwst/regtest/test_fgs_guider.py": ["/jwst/stpipe/__init__.py"], "/jwst/tests_nightly/general/miri/test_miri_spec2pipeline.py": ["/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/fgs/test_guider_pipeline.py": ["/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/nircam/test_nircam_steps.py": ["/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/nircam/test_image2pipeline_2.py": ["/jwst/tests/base_classes.py"], "/jwst/tests/base_classes.py": ["/jwst/tests/compare_outputs.py"], "/jwst/master_background/tests/test_nirspec_corrections.py": ["/jwst/master_background/nirspec_corrections.py"], "/jwst/datamodels/tests/test_fits.py": ["/jwst/datamodels/util.py"], "/jwst/tests_nightly/general/fgs/test_fgs_image2_1.py": ["/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/miri/test_mirilrs2_slitless.py": ["/jwst/stpipe/__init__.py", "/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/nirspec/test_nirspec_steps_single.py": ["/jwst/tests/base_classes.py", "/jwst/master_background/__init__.py"], "/jwst/stpipe/tests/test_pipeline.py": ["/jwst/stpipe/__init__.py"], "/jwst/tests_nightly/general/niriss/test_niriss_steps_single.py": ["/jwst/tests/base_classes.py", "/jwst/stpipe/__init__.py"], "/jwst/tests_nightly/general/nirspec/test_nirspec_fs_spec3.py": ["/jwst/stpipe/__init__.py", "/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/nircam/test_nrc_image3_1.py": ["/jwst/tests/base_classes.py", "/jwst/stpipe/__init__.py"], "/jwst/stpipe/__init__.py": ["/jwst/stpipe/linear_pipeline.py"], "/jwst/wfs_combine/wfs_combine_step.py": ["/jwst/stpipe/__init__.py"], "/jwst/tests_nightly/general/miri/test_miri_steps.py": ["/jwst/tests/base_classes.py", "/jwst/rscd/__init__.py"], "/jwst/tests_nightly/general/niriss/test_tso3.py": ["/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/miri/test_mrs_spec3.py": ["/jwst/tests/base_classes.py"], "/jwst/ami/ami_analyze.py": ["/jwst/ami/nrm_model.py"], "/jwst/tests_nightly/general/nirspec/test_nirspec_msa_spec3.py": ["/jwst/stpipe/__init__.py", "/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/nirspec/test_pipelines.py": ["/jwst/stpipe/__init__.py", "/jwst/tests/base_classes.py"], "/jwst/regtest/test_nirspec_mos_spec2.py": ["/jwst/stpipe/__init__.py"], "/jwst/tests_nightly/general/nircam/test_coron3_1.py": ["/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/miri/test_image2pipeline_1.py": ["/jwst/tests/base_classes.py"]}
|
13,548
|
mperrin/jwst
|
refs/heads/master
|
/jwst/tests_nightly/general/nircam/test_coron3_1.py
|
import pytest
from jwst.tests.base_classes import BaseJWSTTest, raw_from_asn
from jwst.pipeline import Coron3Pipeline
@pytest.mark.bigdata
class TestCoron3Pipeline(BaseJWSTTest):
rtol = 0.00001
atol = 0.00001
input_loc = 'nircam'
ref_loc = ['test_coron3', 'truth']
def test_coron3_1(self):
"""Regression test of calwebb_coron3 pipeline.
Test is performed on NIRCam simulated data.
"""
asn_name = 'jw99999-a3001_20170327t121212_coron3_001_asn.json'
override_psfmask = 'jwst_nircam_psfmask_somb.fits'
# get a local copy of the inputs
asn_file = self.get_data('test_coron3', asn_name)
psfmask_file = self.get_data('test_coron3', override_psfmask)
for file in raw_from_asn(asn_file):
self.get_data('test_coron3', file)
pipe = Coron3Pipeline()
pipe.align_refs.override_psfmask = psfmask_file
pipe.outlier_detection.resample_data = False
pipe.run(asn_file)
self.ignore_keywords += ['NAXIS1', 'TFORM*']
self.ignore_fields = self.ignore_keywords
outputs = [( # Compare psfstack product
'jw99999-a3001_t1_nircam_f140m-maskbar_psfstack.fits',
'jw99999-a3001_t1_nircam_f140m-maskbar_psfstack_ref.fits'
),
( # Compare psfalign product
'jw9999947001_02102_00001_nrcb3_a3001_psfalign.fits',
'jw99999-a3001_t1_nircam_f140m-maskbar_psfalign_ref.fits'
),
( # Compare psfsub product
'jw9999947001_02102_00001_nrcb3_a3001_psfsub.fits',
'jw9999947001_02102_00001_nrcb3_psfsub_ref.fits'
),
( # Compare level-2c products
'jw9999947001_02102_00001_nrcb3_a3001_crfints.fits',
'jw9999947001_02102_00001_nrcb3_a3001_crfints_ref.fits'
),
(
'jw9999947001_02102_00002_nrcb3_a3001_crfints.fits',
'jw9999947001_02102_00002_nrcb3_a3001_crfints_ref.fits'
),
( # Compare i2d product
'jw99999-a3001_t1_nircam_f140m-maskbar_i2d.fits',
'jw99999-a3001_t1_nircam_f140m-maskbar_i2d_ref.fits'
)
]
self.compare_outputs(outputs)
|
{"/jwst/tests_nightly/general/miri/test_sloperpipeline.py": ["/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/nircam/test_image2pipeline_2b.py": ["/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/niriss/test_nis_wfss_spec2.py": ["/jwst/tests/base_classes.py"], "/jwst/regtest/test_miri_image_detector1.py": ["/jwst/stpipe/__init__.py"], "/jwst/flatfield/flat_field_step.py": ["/jwst/stpipe/__init__.py"], "/jwst/tests_nightly/general/nirspec/test_spec2pipelines.py": ["/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/miri/test_miri_steps_single.py": ["/jwst/stpipe/__init__.py", "/jwst/tests/base_classes.py", "/jwst/master_background/__init__.py"], "/jwst/tests_nightly/general/nircam/test_wfs_combine.py": ["/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/niriss/test_niriss_steps.py": ["/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/nircam/test_tso3.py": ["/jwst/tests/base_classes.py"], "/jwst/regtest/test_nirspec_masterbackground.py": ["/jwst/stpipe/__init__.py"], "/jwst/regtest/test_nirspec_image2.py": ["/jwst/stpipe/__init__.py"], "/jwst/tests_nightly/general/nirspec/test_calwebb_spec2_nrs_msa.py": ["/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/nirspec/test_nirspec_steps.py": ["/jwst/tests/base_classes.py"], "/jwst/pipeline/linear_pipeline.py": ["/jwst/stpipe/__init__.py", "/jwst/flatfield/flat_field_step.py"], "/jwst/tests_nightly/general/nircam/test_nircam_steps_single.py": ["/jwst/tests/base_classes.py", "/jwst/stpipe/__init__.py"], "/jwst/tests_nightly/general/fgs/test_fgs_sloper_1.py": ["/jwst/tests/base_classes.py"], "/jwst/regtest/test_fgs_guider.py": ["/jwst/stpipe/__init__.py"], "/jwst/tests_nightly/general/miri/test_miri_spec2pipeline.py": ["/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/fgs/test_guider_pipeline.py": ["/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/nircam/test_nircam_steps.py": ["/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/nircam/test_image2pipeline_2.py": ["/jwst/tests/base_classes.py"], "/jwst/tests/base_classes.py": ["/jwst/tests/compare_outputs.py"], "/jwst/master_background/tests/test_nirspec_corrections.py": ["/jwst/master_background/nirspec_corrections.py"], "/jwst/datamodels/tests/test_fits.py": ["/jwst/datamodels/util.py"], "/jwst/tests_nightly/general/fgs/test_fgs_image2_1.py": ["/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/miri/test_mirilrs2_slitless.py": ["/jwst/stpipe/__init__.py", "/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/nirspec/test_nirspec_steps_single.py": ["/jwst/tests/base_classes.py", "/jwst/master_background/__init__.py"], "/jwst/stpipe/tests/test_pipeline.py": ["/jwst/stpipe/__init__.py"], "/jwst/tests_nightly/general/niriss/test_niriss_steps_single.py": ["/jwst/tests/base_classes.py", "/jwst/stpipe/__init__.py"], "/jwst/tests_nightly/general/nirspec/test_nirspec_fs_spec3.py": ["/jwst/stpipe/__init__.py", "/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/nircam/test_nrc_image3_1.py": ["/jwst/tests/base_classes.py", "/jwst/stpipe/__init__.py"], "/jwst/stpipe/__init__.py": ["/jwst/stpipe/linear_pipeline.py"], "/jwst/wfs_combine/wfs_combine_step.py": ["/jwst/stpipe/__init__.py"], "/jwst/tests_nightly/general/miri/test_miri_steps.py": ["/jwst/tests/base_classes.py", "/jwst/rscd/__init__.py"], "/jwst/tests_nightly/general/niriss/test_tso3.py": ["/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/miri/test_mrs_spec3.py": ["/jwst/tests/base_classes.py"], "/jwst/ami/ami_analyze.py": ["/jwst/ami/nrm_model.py"], "/jwst/tests_nightly/general/nirspec/test_nirspec_msa_spec3.py": ["/jwst/stpipe/__init__.py", "/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/nirspec/test_pipelines.py": ["/jwst/stpipe/__init__.py", "/jwst/tests/base_classes.py"], "/jwst/regtest/test_nirspec_mos_spec2.py": ["/jwst/stpipe/__init__.py"], "/jwst/tests_nightly/general/nircam/test_coron3_1.py": ["/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/miri/test_image2pipeline_1.py": ["/jwst/tests/base_classes.py"]}
|
13,549
|
mperrin/jwst
|
refs/heads/master
|
/jwst/tests_nightly/general/miri/test_image2pipeline_1.py
|
import pytest
from jwst.pipeline import Image2Pipeline
from jwst.pipeline.collect_pipeline_cfgs import collect_pipeline_cfgs
from jwst.tests.base_classes import BaseJWSTTest
@pytest.mark.bigdata
class TestImage2Pipeline(BaseJWSTTest):
input_loc = 'miri'
ref_loc = ['test_image2pipeline', 'truth']
def test_image2pipeline1(self):
"""
Regression test of calwebb_image2 pipeline performed on MIRI data.
"""
input_file = self.get_data('test_image2pipeline',
'jw00001001001_01101_00001_mirimage_rate.fits')
collect_pipeline_cfgs('cfgs')
Image2Pipeline.call(input_file,
config_file='cfgs/calwebb_image2.cfg',
save_results=True
)
outputs = [('jw00001001001_01101_00001_mirimage_cal.fits',
'jw00001001001_01101_00001_mirimage_cal_ref.fits',
['primary','sci','err','dq','area']),
('jw00001001001_01101_00001_mirimage_i2d.fits',
'jw00001001001_01101_00001_mirimage_i2d_ref.fits',
['primary','sci','wht','con'])
]
self.compare_outputs(outputs)
|
{"/jwst/tests_nightly/general/miri/test_sloperpipeline.py": ["/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/nircam/test_image2pipeline_2b.py": ["/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/niriss/test_nis_wfss_spec2.py": ["/jwst/tests/base_classes.py"], "/jwst/regtest/test_miri_image_detector1.py": ["/jwst/stpipe/__init__.py"], "/jwst/flatfield/flat_field_step.py": ["/jwst/stpipe/__init__.py"], "/jwst/tests_nightly/general/nirspec/test_spec2pipelines.py": ["/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/miri/test_miri_steps_single.py": ["/jwst/stpipe/__init__.py", "/jwst/tests/base_classes.py", "/jwst/master_background/__init__.py"], "/jwst/tests_nightly/general/nircam/test_wfs_combine.py": ["/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/niriss/test_niriss_steps.py": ["/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/nircam/test_tso3.py": ["/jwst/tests/base_classes.py"], "/jwst/regtest/test_nirspec_masterbackground.py": ["/jwst/stpipe/__init__.py"], "/jwst/regtest/test_nirspec_image2.py": ["/jwst/stpipe/__init__.py"], "/jwst/tests_nightly/general/nirspec/test_calwebb_spec2_nrs_msa.py": ["/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/nirspec/test_nirspec_steps.py": ["/jwst/tests/base_classes.py"], "/jwst/pipeline/linear_pipeline.py": ["/jwst/stpipe/__init__.py", "/jwst/flatfield/flat_field_step.py"], "/jwst/tests_nightly/general/nircam/test_nircam_steps_single.py": ["/jwst/tests/base_classes.py", "/jwst/stpipe/__init__.py"], "/jwst/tests_nightly/general/fgs/test_fgs_sloper_1.py": ["/jwst/tests/base_classes.py"], "/jwst/regtest/test_fgs_guider.py": ["/jwst/stpipe/__init__.py"], "/jwst/tests_nightly/general/miri/test_miri_spec2pipeline.py": ["/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/fgs/test_guider_pipeline.py": ["/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/nircam/test_nircam_steps.py": ["/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/nircam/test_image2pipeline_2.py": ["/jwst/tests/base_classes.py"], "/jwst/tests/base_classes.py": ["/jwst/tests/compare_outputs.py"], "/jwst/master_background/tests/test_nirspec_corrections.py": ["/jwst/master_background/nirspec_corrections.py"], "/jwst/datamodels/tests/test_fits.py": ["/jwst/datamodels/util.py"], "/jwst/tests_nightly/general/fgs/test_fgs_image2_1.py": ["/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/miri/test_mirilrs2_slitless.py": ["/jwst/stpipe/__init__.py", "/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/nirspec/test_nirspec_steps_single.py": ["/jwst/tests/base_classes.py", "/jwst/master_background/__init__.py"], "/jwst/stpipe/tests/test_pipeline.py": ["/jwst/stpipe/__init__.py"], "/jwst/tests_nightly/general/niriss/test_niriss_steps_single.py": ["/jwst/tests/base_classes.py", "/jwst/stpipe/__init__.py"], "/jwst/tests_nightly/general/nirspec/test_nirspec_fs_spec3.py": ["/jwst/stpipe/__init__.py", "/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/nircam/test_nrc_image3_1.py": ["/jwst/tests/base_classes.py", "/jwst/stpipe/__init__.py"], "/jwst/stpipe/__init__.py": ["/jwst/stpipe/linear_pipeline.py"], "/jwst/wfs_combine/wfs_combine_step.py": ["/jwst/stpipe/__init__.py"], "/jwst/tests_nightly/general/miri/test_miri_steps.py": ["/jwst/tests/base_classes.py", "/jwst/rscd/__init__.py"], "/jwst/tests_nightly/general/niriss/test_tso3.py": ["/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/miri/test_mrs_spec3.py": ["/jwst/tests/base_classes.py"], "/jwst/ami/ami_analyze.py": ["/jwst/ami/nrm_model.py"], "/jwst/tests_nightly/general/nirspec/test_nirspec_msa_spec3.py": ["/jwst/stpipe/__init__.py", "/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/nirspec/test_pipelines.py": ["/jwst/stpipe/__init__.py", "/jwst/tests/base_classes.py"], "/jwst/regtest/test_nirspec_mos_spec2.py": ["/jwst/stpipe/__init__.py"], "/jwst/tests_nightly/general/nircam/test_coron3_1.py": ["/jwst/tests/base_classes.py"], "/jwst/tests_nightly/general/miri/test_image2pipeline_1.py": ["/jwst/tests/base_classes.py"]}
|
13,560
|
enginoid/django-debug-toolbar-requests
|
refs/heads/master
|
/debug_toolbar_requests/models.py
|
from debug_toolbar_requests.utils import timedelta_with_milliseconds
class ResponseTimer(object):
def __init__(self, start_time=None, end_time=None, response=None):
self.start_time = start_time
self.end_time = end_time
self.response = response
@property
def duration(self):
seconds = self.end_time - self.start_time
return timedelta_with_milliseconds(seconds=seconds)
@property
def request(self):
return self.response.request
|
{"/debug_toolbar_requests/models.py": ["/debug_toolbar_requests/utils.py"], "/debug_toolbar_requests/panel.py": ["/debug_toolbar_requests/models.py"]}
|
13,561
|
enginoid/django-debug-toolbar-requests
|
refs/heads/master
|
/debug_toolbar_requests/utils.py
|
from datetime import timedelta
class timedelta_with_milliseconds(timedelta):
def milliseconds(self):
return int(round(self.microseconds / 1000.0))
|
{"/debug_toolbar_requests/models.py": ["/debug_toolbar_requests/utils.py"], "/debug_toolbar_requests/panel.py": ["/debug_toolbar_requests/models.py"]}
|
13,562
|
enginoid/django-debug-toolbar-requests
|
refs/heads/master
|
/setup.py
|
from setuptools import setup
setup(
name='django-debug-toolbar-requests',
version='0.0.3',
description=('A django-debug-toolbar panel for HTTP requests made with '
'the `requests` library.'),
long_description=open('README.rst').read(),
author='Fred Jonsson',
author_email='fridrik@pyth.net',
url='https://github.com/enginous/django-debug-toolbar-requests',
license='BSD',
packages=['debug_toolbar_requests'],
package_data={'debug_toolbar_requests': ['templates/*.html']},
zip_safe=False,
include_package_data=True,
classifiers=[
'Development Status :: 1 - Planning',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
{"/debug_toolbar_requests/models.py": ["/debug_toolbar_requests/utils.py"], "/debug_toolbar_requests/panel.py": ["/debug_toolbar_requests/models.py"]}
|
13,563
|
enginoid/django-debug-toolbar-requests
|
refs/heads/master
|
/debug_toolbar_requests/panel.py
|
from functools import partial
from pprint import pformat
from threading import local
import time
import requests
import requests.defaults
from django.utils.translation import ugettext_lazy as _, ngettext
from django.template.defaultfilters import truncatechars
from debug_toolbar.panels import DebugPanel
# Retain, because it won't be retrievable after monkey-patching.
from debug_toolbar_requests.models import ResponseTimer
original_thread_class = requests.models.Request
class RequestsDebugPanel(DebugPanel):
"""
A panel to display HTTP requests made by the `requests` library.
"""
name = 'Requests'
template = 'debug_toolbar/panels/requests.html'
has_content = True
def receive_response(self, index, response):
self.thread_locals.response_timers[index].end_time = time.time()
self.thread_locals.response_timers[index].response = response
def receive_request(self, index, request):
self.thread_locals.response_timers[index].start_time = time.time()
def __init__(self, *args, **kwargs):
super(RequestsDebugPanel, self).__init__(*args, **kwargs)
self.thread_locals = local()
self.thread_locals.response_timers = []
debug_panel = self
class TrackedRequest(original_thread_class):
def __init__(self, *args, **kwargs):
super(TrackedRequest, self).__init__(*args, **kwargs)
response_timer = ResponseTimer()
next_index = len(debug_panel.thread_locals.response_timers)
debug_panel.thread_locals.response_timers.append(response_timer)
self.register_hook('pre_request',
hook=partial(debug_panel.receive_request, next_index))
self.register_hook('response',
hook=partial(debug_panel.receive_response, next_index))
# TODO: in the interest of forward-compatibility, can this be done
# more safely dynamically; e.g. by looking for use of the `Request`
# object in all package modules?
requests.models.Request = TrackedRequest
requests.Request = TrackedRequest
requests.sessions.Request = TrackedRequest
def nav_title(self):
return _('HTTP Requests')
def title(self):
return _('HTTP Requests')
def nav_subtitle(self):
request_count = len(self.thread_locals.response_timers)
return ngettext("%d request", "%d requests", request_count) % request_count
def url(self):
return ''
def process_response(self, _request, _response): # unused params
response_timers = self.thread_locals.response_timers
for response_timer in response_timers:
# Tack template-specific information on to the response timer
# objects to save some boilerplate in the template.
response = response_timer.response
response_timer.response.template_items = (
(_("URL"), response.url),
(_("Status"), u"{code} {reason}".format(
code=response.status_code, reason=response.reason)),
(_("Headers"), pformat(response.headers)),
(_("Body"), truncatechars(response.text, 1024)),
)
request = response_timer.request
response_timer.request.template_items = (
(_("URL"), request.url),
(_("Method"), request.method),
(_("Headers"), pformat(request.headers)),
(_("Parameters"), request.params),
# TODO: it would be nice to get the actual raw body
(_("Data"), request.data),
(_("Files"), request.files),
)
# TODO: this desperately needs tests
# TODO: the browser replay functionality calls for extraction
# into its own module.
def check_browser_compatible_headers(request):
# We only have access to the resulting headers. To verify
# that the standard `requests` headers are being sent (which
# themselves are browser-compatible), we check that the
# headers sent are exactly equivalent to the default headers
# sent by `requests`.
# As an exception, we can also support a request if it only
# adds a `Content-Type` header to the defaults sent by
# `requests`. However, we only support that header if it
# contains one of the two encodings supported by HTML4.
browser_supported_enctypes = (
# automatically sent by browser for every POST form
'application/x-www-form-urlencoded',
# sent by POST forms with `enctype` set to this
'multipart/form-data'
)
headers = request.headers.copy() # don't corrupt the original
header_name = 'Content-Type'
content_type_header = headers.get(header_name, '')
for enctype in browser_supported_enctypes:
# `startswith` is used because we might have a trailing
# semicolon: multipart/form-data; boundary=foobar
if content_type_header.startswith(enctype):
# TODO: need much safer parsing for this, find header lib
# TODO: also matches 'multipart/form-data-foo`
# TODO: messy
del headers[header_name]
return headers == requests.defaults.defaults['base_headers']
# The template displays a button in-browser allowing the user to
# repeat the call. Because this is done through a form, we cannot
# allow this for some more complex requests. Multiple conditions
# are required to determine this, and they are kept in a dict
# instead of a serial condition for traceability (for debugging,
# or to show why request can't be displayed in the template).
response_timer.request.browser_repeatability_conditions = dict(
is_get_or_post = request.method in ('GET', 'POST'),
# The browser can't send its own headers. We must ensure
# that the headers sent only use headers that won't make
# the meaning of the request semantically different, or
# headers that we can support using forms (e.g. 'enctype'
# can emulate some values of the'Content-Type' header.)
has_browser_compatible_headers = check_browser_compatible_headers(request),
# Can't repeat GET requests with anything in the body. The
# browser will just tack it on to the URL instead of using
# a GET body. (Not that GET bodies have semantic meaning in
# HTTP, but people still do strange things.)
is_not_get_with_body = any((
(request.method == 'POST'),
((not request.data) and (not request.files)),
)),
# In POST requests, you can send multipart and non-multipart
# data separately. Once browser forms have an encoding of
# `multipart/form-data`, however, every parameter will be
# sent as multipart data.
is_not_data_and_files = not (request.data and request.files),
# For POST bodies, the browser only do key-value bodies and
# not other payloads, such as strings.
is_key_value_body = isinstance(request.data, dict),
)
response_timer.request.is_browser_repeatable = all(
response_timer.request.browser_repeatability_conditions.values()
)
self.record_stats({
'response_timers': response_timers,
})
|
{"/debug_toolbar_requests/models.py": ["/debug_toolbar_requests/utils.py"], "/debug_toolbar_requests/panel.py": ["/debug_toolbar_requests/models.py"]}
|
13,576
|
goodwilrv/FlaskBlog
|
refs/heads/master
|
/FlaskBlog/model.py
|
from FlaskBlog import db,login_manager
from datetime import datetime
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(db.Model):
id = db.Column(db.Integer,primary_key=True)
username = db.Column(db.String(20),unique=True,nullable=False)
email = db.Column(db.String(20),unique=True,nullable=False)
image_file = db.Column(db.String(20),unique=False,nullable=False,default='default.jpg')
password = db.Column(db.String(60),nullable=False)
posts = db.relationship('Post',backref='author',lazy=True)
def __repr__(self):
return f"User('{self.username}','{self.email}','{self.image_file}')"
class Post(db.Model):
id = db.Column(db.Integer,primary_key=True)
title = db.Column(db.String(100),nullable=False)
date_posted = db.Column(db.DateTime,nullable=False, default=datetime.utcnow)
content = db.Column(db.Text, nullable=False)
user_id = db.Column(db.Integer,db.ForeignKey(User.id),nullable=False)
def __repr__(self):
return f"User('{self.title}','{self.date_posted}')"
|
{"/FlaskBlog/routes.py": ["/FlaskBlog/model.py"]}
|
13,577
|
goodwilrv/FlaskBlog
|
refs/heads/master
|
/FlaskBlog/routes.py
|
from flask import render_template, url_for, flash, redirect
from FlaskBlog.model import User, Post
from FlaskBlog.forms import RegistrationForm, LoginForm
from FlaskBlog import app,db, bcrypt
from flask_login import UserMixin, login_user, current_user, logout_user
posts = [
{
'author':'Gautam Kumar',
'title':'Blog Post 1',
'content':'First Post content',
'date_posted':'May 28, 2020'
},
{
'author':'Manisha Bhagat',
'title':'Blog Post 2',
'content':'second Post content',
'date_posted':'May 29, 2020'
}
]
@app.route('/')
@app.route('/home')
def home():
return render_template('home.html',posts=posts)
@app.route('/about')
def about():
return render_template('about.html',title = 'About Flask Title')
@app.route('/register',methods=['GET','POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = RegistrationForm()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user = User(username = form.username.data, email =form.email.data,password=hashed_password)
db.session.add(user)
db.session.commit()
flash('Your account has been Created, you can now login','success')
#flash(f'Account Created for {form.username.data}!','success')
return redirect(url_for('login'))
return render_template('register.html', title='Register',form=form)
@app.route('/login',methods=['GET','POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user and bcrypt.check_password_hash(user.password,form.password.data):
login_user(user,remember=form.remember.data)
return redirect(url_for('home'))
else:
flash('Login Unsuccessful, Please check UserName and Password','danger')
return render_template('login.html', title='Login',form=form)
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('home'))
|
{"/FlaskBlog/routes.py": ["/FlaskBlog/model.py"]}
|
13,698
|
FrancoCuevas444/TimeTable-Generator
|
refs/heads/master
|
/TimeTableModule.py
|
from SubjectEntryModule import *
from openpyxl import *
from openpyxl.styles import *
class TimeTable:
def __init__(self, subjectList, step=30):
self.subjectList = subjectList
self.step = step
self.baseTimeRange = TimeRange.findTimeRange(subjectList)
def rowsFromTimeRange(self, timeRange):
firstRow = 2 + (Time.distance(self.baseTimeRange.initial, timeRange.initial) // self.step)
rows = [i + firstRow for i in range(len(timeRange)//self.step)]
return rows
def getCells(self, day, timeRange):
column = day.getColumn()
rows = self.rowsFromTimeRange(timeRange)
cells = [column+str(row) for row in rows]
return cells
def addSubject(self, ws, subject):
for day in subject.days:
cells = self.getCells(day, subject.timeRange)
ws.merge_cells("{}:{}".format(cells[0], cells[-1]))
for cell in cells:
ws[cell] = "{}\n{}".format(subject.name, str(subject.module))
ws[cell].style = "subjectStyle"
def generateTable(self, filename):
wb = Workbook()
ws = wb.active
self.initStyle(wb)
self.addDays(ws)
self.addTimeMarks(ws)
for subject in self.subjectList:
self.addSubject(ws, subject)
#self.setTableBorder(ws)
wb.save(filename)
def setTableBorder(self, ws):
thickSide = Side(style="thick", color="000000")
boldBorder = Border(right=thickSide, left=thickSide, top=thickSide, bottom=thickSide)
cellsRange = "A1:F{}".format(1+len(self.baseTimeRange)//self.step)
rows = ws[cellsRange]
for row in rows:
row[0].border = Border(left=thickSide)
row[-1].border = Border(right=thickSide)
for cell in rows[0]:
cell.border = Border(top=thickSide)
def addDays(self, ws):
ws["B1"] = "Lunes"
ws["C1"] = "Martes"
ws["D1"] = "Miércoles"
ws["E1"] = "Jueves"
ws["F1"] = "Viernes"
for colName in ["A","B","C","D","E","F"]:
col = ws.column_dimensions[colName]
col.width = 18
def addTimeMarks(self, ws):
timeList = TimeRange.timeRangeArray(self.baseTimeRange, self.step)
i = 0
for row in ws.iter_rows(min_row=2, max_col=1, max_row=len(timeList)):
for cell in row:
cell.value = str(TimeRange(timeList[i],timeList[i+1]))
i+=1
def initStyle(self, wb):
centered = Alignment(horizontal="center", vertical="center", wrapText=True)
mediumSide = Side(style="medium", color="000000")
border = Border(left=mediumSide, right=mediumSide, top=mediumSide, bottom=mediumSide)
subjectStyle = NamedStyle(name="subjectStyle", border=border, alignment=centered)
wb.add_named_style(subjectStyle)
|
{"/TimeTableModule.py": ["/SubjectEntryModule.py"], "/TimeTableGenerator.py": ["/SubjectEntryModule.py", "/TimeTableModule.py", "/SubjectsPermutations.py", "/TimeModule.py"], "/SubjectEntryModule.py": ["/TimeModule.py"]}
|
13,699
|
FrancoCuevas444/TimeTable-Generator
|
refs/heads/master
|
/TimeTableGenerator.py
|
from SubjectEntryModule import *
from TimeTableModule import *
from SubjectsPermutations import *
from TimeModule import TimeRange
import sys
VERSION = 0.1
# Main entry of the program
def main():
#Reading arguments from console for: JSON filename, initial time and final time
if(len(sys.argv) < 4):
print("Incorrect number of parameters!")
print("You must have: json filename, initial time and final time as parameters")
exit()
jsonName = sys.argv[1]
initialTime = Time.fromString(sys.argv[2])
finalTime = Time.fromString(sys.argv[3])
#Welcome message
welcomeMessage(jsonName, initialTime, finalTime)
#Counter for amount of tables generated
table_number = 0
myTimeRange = TimeRange(initialTime, finalTime)
#Calculating posible combinations of subjects
tableOfChoices = generateTableFromJSON("Resources/{}".format(jsonName))
while(not allTrueInTable(tableOfChoices)):
#Generate one posible combination
choices = generateNext(tableOfChoices)
#Grab the info for that combination
subjects = SubjectEntry.parseJSONWithChoices("Resources/{}".format(jsonName), choices)
#Checking if the list of subjects is compatible, and generating the table if true
if(checkSubjectList(subjects, False)):
myTimeTable = TimeTable(subjects)
if(TimeRange.timeRangesInside(myTimeTable.baseTimeRange, myTimeRange)):
myTimeTable.generateTable("Generated Tables/timeTable{}.xlsx".format(table_number))
table_number += 1
#Output result
print("{} tables were generated.".format(table_number))
#Generate a list of lists of Choices from the json file
def generateTableFromJSON(filepath):
myFile = open(filepath, encoding="utf-8-sig")
myJSON = json.load(myFile)
myTable = []
for subject in myJSON:
choices_list = []
for choice in myJSON[subject]:
if(choice != "name" and choice != "module"):
choices_list.append(Choice(subject, choice))
myTable.append(choices_list)
return myTable
#Checks if the subject list doesn't have any overlapping subjects (overlapping timerange and same day)
def checkSubjectList(subjectList, printing=True):
overlapingList = SubjectEntry.subjectListOverlap(subjectList)
if (overlapingList == []): return True
elif(printing):
print("The following subjects are overlaping: ")
for pair in overlapingList:
print("{} ({}) and {} ({}) with days in common: {}".format(pair[0].name, pair[0].timeRange, pair[1].name, pair[1].timeRange, [str(day) for day in SubjectEntry.daysInCommon(pair[0],pair[1])]))
return False
#Simple welcome message
def welcomeMessage(jsonfile, initial, final):
print("=============================================")
print(" TimeTable Generator ver. {}".format(VERSION))
print("=============================================")
print("Current settings:")
print(" - JSON filename: {}".format(jsonfile))
print(" - Initial Time: {}".format(initial))
print(" - Final Time: {}".format(final))
print("\nGenerating...\n")
if __name__ == "__main__":
main()
|
{"/TimeTableModule.py": ["/SubjectEntryModule.py"], "/TimeTableGenerator.py": ["/SubjectEntryModule.py", "/TimeTableModule.py", "/SubjectsPermutations.py", "/TimeModule.py"], "/SubjectEntryModule.py": ["/TimeModule.py"]}
|
13,700
|
FrancoCuevas444/TimeTable-Generator
|
refs/heads/master
|
/SubjectsPermutations.py
|
class Choice:
def __init__(self, name, code):
self.name = name
self.code = code
self.visited = False
def generateNext(table):
nextPerm = {}
for i in range(len(table)):
if(allFalseInList(table[i])):
nextPerm[table[i][0].name] = table[i][0].code
table[i][0].visited = True
elif(allTrueInTable(table[i+1:])):
setAllFalse(table[(i+1):])
firstFalse = firstFalseIndex(table[i])
nextPerm[table[i][firstFalse].name] = table[i][firstFalse].code
table[i][firstFalse].visited = True
else:
lastTrue = lastTrueIndex(table[i])
nextPerm[table[i][lastTrue].name] = table[i][lastTrue].code
return nextPerm
def allFalseInList(choicesList):
for choice in choicesList:
if(choice.visited == True):
return False
return True
def allTrueInTable(table):
for row in table:
for choice in row:
if(choice.visited == False):
return False
return True
def setAllFalse(table):
for row in table:
for choice in row:
choice.visited = False
def firstFalseIndex(choicesList):
for i in range(len(choicesList)):
if(choicesList[i].visited == False):
return i
def lastTrueIndex(choicesList):
for i in range(len(choicesList)-1):
if(choicesList[i+1].visited == False):
return i
return len(choicesList)-1
|
{"/TimeTableModule.py": ["/SubjectEntryModule.py"], "/TimeTableGenerator.py": ["/SubjectEntryModule.py", "/TimeTableModule.py", "/SubjectsPermutations.py", "/TimeModule.py"], "/SubjectEntryModule.py": ["/TimeModule.py"]}
|
13,701
|
FrancoCuevas444/TimeTable-Generator
|
refs/heads/master
|
/TimeModule.py
|
import copy
from enum import IntEnum
#A representation of "Days"
class Days(IntEnum):
LUNES = 0
MARTES = 1
MIERCOLES = 2
JUEVES = 3
VIERNES = 4
def __str__(self):
if(self.value == 0): return "Lunes"
elif(self.value == 1): return "Martes"
elif(self.value == 2): return "Miércoles"
elif(self.value == 3): return "Jueves"
elif(self.value == 4): return "Viernes"
def getColumn(self):
if(self.value == 0): return "B"
elif(self.value == 1): return "C"
elif(self.value == 2): return "D"
elif(self.value == 3): return "E"
elif(self.value == 4): return "F"
def fromString(string):
if (string == "Lunes"): return Days.LUNES
elif (string == "Martes"): return Days.MARTES
elif (string == "Miércoles"): return Days.MIERCOLES
elif (string == "Jueves"): return Days.JUEVES
elif (string == "Viernes"): return Days.VIERNES
def fromStringArray(stringArr):
daysArray = []
for x in stringArr:
daysArray.append(Days.fromString(x))
return daysArray
class Time:
def __init__(self, hours, minutes):
self.hours = hours
self.minutes = minutes
def fromString(timeString):
time = timeString.split(':')
timeObj = Time(int(time[0]), int(time[1]))
return timeObj
def __str__(self):
if (self.minutes > 9):
return "{}:{}".format(self.hours, self.minutes)
else:
return "{}:0{}".format(self.hours, self.minutes)
def add(self, minutes):
self.hours += (self.minutes + minutes) // 60
self.minutes = (self.minutes + minutes) % 60
def isLater(self, otherTime):
return (self.hours > otherTime.hours) or (self.hours == otherTime.hours and self.minutes > otherTime.minutes)
def isEqual(self, otherTime):
return (self.hours == otherTime.hours) and (self.minutes == otherTime.minutes)
def distance(time1, time2):
totalMin = 0
if time1<=time2:
minutes = time2.minutes - time1.minutes
hours = time2.hours - time1.hours
totalMin = hours*60 + minutes
else:
minutes = time1.minutes - time2.minutes
hours = time1.hours - time2.hours
totalMin = hours*60 + minutes
return totalMin
def __lt__(self, otherTime):
return not(self.isLater(otherTime)) and not(self.isEqual(otherTime))
def __le__(self, otherTime):
return not(self.isLater(otherTime))
def __eq__(self, otherTime):
return self.isEqual(otherTime)
def __ge__(self, otherTime):
return self.isLater(otherTime) or self.isEqual(otherTime)
def __gt__(self, otherTime):
return self.isLater(otherTime)
class TimeRange:
def __init__(self, initial, final):
self.initial = initial
self.final = final
def __str__(self):
return "{} - {}".format(str(self.initial), str(self.final))
def __len__(self):
return Time.distance(self.initial, self.final)
def timeRangeArray(timeRange, step=30):
timeList = []
current = copy.deepcopy(timeRange.initial)
while(current <= timeRange.final):
timeList.append(copy.deepcopy(current))
current.add(step)
return timeList
def rangesOverlap(firstRange, secondRange):
return ((firstRange.initial >= secondRange.initial) and (firstRange.initial < secondRange.final)
or (firstRange.final>secondRange.initial) and (firstRange.final <= secondRange.final)
or (secondRange.initial >= firstRange.initial) and (secondRange.initial < firstRange.final)
or (secondRange.final > firstRange.initial) and (secondRange.final <= firstRange.final))
def findTimeRange(subjectList):
lowestTime = subjectList[0].timeRange.initial
highestTime = subjectList[0].timeRange.final
for subject in subjectList:
if(subject.timeRange.initial < lowestTime): lowestTime = subject.timeRange.initial
if(subject.timeRange.final > highestTime): highestTime = subject.timeRange.final
return TimeRange(lowestTime, highestTime)
#True if second range contains first
def timeRangesInside(first, second):
return (first.initial >= second.initial) and (first.final <= second.final)
|
{"/TimeTableModule.py": ["/SubjectEntryModule.py"], "/TimeTableGenerator.py": ["/SubjectEntryModule.py", "/TimeTableModule.py", "/SubjectsPermutations.py", "/TimeModule.py"], "/SubjectEntryModule.py": ["/TimeModule.py"]}
|
13,702
|
FrancoCuevas444/TimeTable-Generator
|
refs/heads/master
|
/SubjectEntryModule.py
|
from TimeModule import *
import json
import io
class SubjectModule(IntEnum):
TEORICO = 0
PRACTICO = 1
TEOPRA = 2
CONSULTA = 3
def __str__(self):
if(self.value == 0): return "Teórico"
elif(self.value == 1): return "Práctico"
elif(self.value == 2): return "Teórico/Práctico"
elif(self.value == 3): return "Consulta"
def fromString(moduleName):
if(moduleName == "Teórico"): return SubjectModule.TEORICO
elif(moduleName == "Práctico"): return SubjectModule.PRACTICO
elif(moduleName == "Teo-Pra"): return SubjectModule.TEOPRA
elif(moduleName == "Consulta"): return SubjectModule.CONSULTA
class SubjectEntry:
def __init__(self, name, module, timeRange, days):
self.name = name
self.module = module
self.timeRange = timeRange
self.days = days
def printEntry(self):
print("-------------------------")
print("Subject : {}\nInitial Time : {}\nFinal Time : {}\nDays : {}".format(self.name, self.timeRange.initial, self.timeRange.final, [str(day) for day in self.days]))
print("-------------------------")
def parseJSON(filepath):
allSubjects = []
myfile = open(filepath, encoding="utf-8-sig")
myJSON = json.load(myfile)
for subject in myJSON:
subJSON = myJSON[subject]
newSubjectEntry = SubjectEntry(subJSON["name"],
SubjectModule.fromString(subJSON["module"]),
TimeRange(Time.fromString(subJSON["initialTime"]),
Time.fromString(subJSON["finalTime"])),
Days.fromStringArray(subJSON["days"]))
allSubjects.append(newSubjectEntry)
return allSubjects
def parseJSONWithChoices(filepath, choices):
allSubjects = []
myfile = open(filepath, encoding="utf-8-sig")
myJSON = json.load(myfile)
for subject in myJSON:
subJSON = myJSON[subject]
newSubjectEntry = SubjectEntry(subJSON["name"],
SubjectModule.fromString(subJSON["module"]),
TimeRange(Time.fromString(subJSON[choices[subject]]["initialTime"]),
Time.fromString(subJSON[choices[subject]]["finalTime"])),
Days.fromStringArray(subJSON[choices[subject]]["days"]))
allSubjects.append(newSubjectEntry)
return allSubjects
def daysInCommon(firstSubject, secondSubject):
return list(set(firstSubject.days).intersection(secondSubject.days))
def subjectsOverlap(firstSubject, secondSubject):
return (SubjectEntry.daysInCommon(firstSubject, secondSubject) != []) and TimeRange.rangesOverlap(firstSubject.timeRange, secondSubject.timeRange)
def subjectListOverlap(subjectList):
overlapingList = []
for i in range(len(subjectList) - 1):
for j in range(i+1, len(subjectList)):
if(SubjectEntry.subjectsOverlap(subjectList[i], subjectList[j])):
overlapingList.append([subjectList[i], subjectList[j]])
return overlapingList
|
{"/TimeTableModule.py": ["/SubjectEntryModule.py"], "/TimeTableGenerator.py": ["/SubjectEntryModule.py", "/TimeTableModule.py", "/SubjectsPermutations.py", "/TimeModule.py"], "/SubjectEntryModule.py": ["/TimeModule.py"]}
|
13,703
|
sebastian-philipp/test-rook-orchestrator
|
refs/heads/master
|
/test_rook.py
|
import json
import pytest
import requests
from fixtures import _orch_exec, _wait_for_condition, _service_exist, _ceph_exec, ceph_cluster, \
get_pods, pods_started, dashboard_url, dashboard_token_header
def test_status(ceph_cluster):
_orch_exec('status')
def test_service_ls(ceph_cluster):
svs = json.loads(_orch_exec('service ls --format=json'))
assert len(svs) >= 2
def test_device_ls(ceph_cluster):
svs = json.loads(_orch_exec('device ls --format=json'))
assert len(svs) >= 2
def test_mon_update(ceph_cluster):
assert len(get_pods(labels='app=rook-ceph-mon')) < 3
_orch_exec('mon update 3')
# Two checkpoints in order to keep the timeouts low:
_wait_for_condition(lambda: len(get_pods(labels='app=rook-ceph-mon')) >= 2)
_wait_for_condition(lambda: len(get_pods(labels='app=rook-ceph-mon')) == 3)
def test_osd_create(ceph_cluster):
assert 'osd' not in _orch_exec('service ls')
#_orch_exec('osd create kubic-1:vdb --encrypted=true')
#_orch_exec('osd create kubic-2:vdb --osds-per-device=2')
_orch_exec('osd create kubic-1:vdb')
_orch_exec('osd create kubic-2:vdb')
# Two checkpoints in order to keep the timeouts reasonable:
_wait_for_condition(lambda: len(get_pods(labels='app=rook-ceph-osd')) >= 1, timeout=180)
_wait_for_condition(lambda: len(get_pods(labels='app=rook-ceph-osd')) >= 2, timeout=120)
#_wait_for_condition(lambda: len(get_pods(labels='app=rook-ceph-osd')) >= 3, timeout=120)
_wait_for_condition(lambda: _service_exist('osd'))
_wait_for_condition(lambda: pods_started(labels='app=rook-ceph-osd'))
def test_nfs(ceph_cluster):
assert _service_exist('osd')
if not 'nfs-ganesha' in _ceph_exec('osd pool ls'):
_ceph_exec("osd pool create nfs-ganesha 64")
assert not _service_exist('nfs')
# TODO: test update_nfs_count
_orch_exec("nfs add mynfs nfs-ganesha mynfs")
_wait_for_condition(lambda: len(get_pods(labels='app=rook-ceph-nfs')) >= 1, timeout=120)
_wait_for_condition(lambda: pods_started(labels='app=rook-ceph-nfs'), timeout=60)
_wait_for_condition(lambda: _service_exist('nfs'))
_orch_exec("nfs rm mynfs")
_wait_for_condition(lambda: not _service_exist('nfs'))
_wait_for_condition(lambda: not get_pods(labels='app=rook-ceph-nfs'))
def test_mds(ceph_cluster):
assert not _service_exist('mds')
_ceph_exec('fs volume create myname')
_wait_for_condition(lambda: len(get_pods(labels='app=rook-ceph-mds')) == 2)
_wait_for_condition(lambda: pods_started(labels='app=rook-ceph-mds'))
_wait_for_condition(lambda: _service_exist('mds'))
_ceph_exec('fs volume rm myname')
_wait_for_condition(lambda: not _service_exist('mds'))
_wait_for_condition(lambda: not get_pods(labels='app=rook-ceph-mds'))
#@pytest.mark.skip(reason="needs image rebuild")
def test_rgw(ceph_cluster):
assert not _service_exist('rgw')
_orch_exec("rgw add myrgw")
_wait_for_condition(lambda: len(get_pods(labels='app=rook-ceph-rgw')) >= 1, timeout=60)
_wait_for_condition(lambda: pods_started(labels='app=rook-ceph-rgw'))
_wait_for_condition(lambda: _service_exist('rgw'))
_orch_exec("rgw rm myrgw")
_wait_for_condition(lambda: not _service_exist('rgw'))
_wait_for_condition(lambda: not get_pods(labels='app=rook-ceph-rgw'))
def test_dashboard(ceph_cluster):
url = f'{dashboard_url()}/api/summary'
headers = dashboard_token_header(dashboard_url())
requests.get(url, verify=False, headers=headers).raise_for_status()
|
{"/test_rook.py": ["/fixtures.py"]}
|
13,704
|
sebastian-philipp/test-rook-orchestrator
|
refs/heads/master
|
/fixtures.py
|
import base64
import time
from subprocess import check_output, CalledProcessError
from typing import List
import requests
import yaml
from kubernetes import client, config
#from kubetest import utils, objects
#from kubetest.client import TestClient
from kubernetes.client import V1Pod, V1Service, V1ServicePort, V1Secret
from pytest import fixture
ceph_image = 'ceph/daemon-base:latest-master'
#ceph_image = '192.168.122.1:5000/ceph/ceph:latest'
def download_rook_manifests():
def change_flexvolume(text):
yamls = list(yaml.safe_load_all(text))
for y in yamls:
try:
if y['metadata']['name'] == 'rook-ceph-operator':
flex = dict(name='FLEXVOLUME_DIR_PATH', value="/var/lib/kubelet/volumeplugins")
y['spec']['template']['spec']['containers'][0]['env'].append(flex)
except (KeyError, TypeError):
pass
try:
y['spec']['cephVersion']['allowUnsupported'] = True
y['spec']['cephVersion']['image'] = ceph_image
except (KeyError, TypeError):
pass
return yaml.safe_dump_all(yamls)
def download(name):
url = 'https://raw.githubusercontent.com/rook/rook/master/cluster/examples/kubernetes/ceph/{}.yaml'.format(name)
r = requests.get(url)
r.raise_for_status()
with open(name + '.yaml', 'w') as f:
f.write(change_flexvolume(r.text))
for name in ['common', 'operator', 'cluster-minimal', 'toolbox', 'dashboard-external-https']:
download(name)
# @fixture(scope='module')
def rook_operator():
download_rook_manifests()
if not get_pods(labels='app=rook-ceph-operator'):
check_output('./deploy-rook-operator.sh')
_wait_for_condition(lambda: get_pods(labels='app=rook-ceph-operator'), 240)
_wait_for_condition(lambda: pods_started(labels='app=rook-ceph-operator'), 240)
_wait_for_condition(lambda: pods_started(labels='app=rook-ceph-agent'), 240)
_wait_for_condition(lambda: pods_started(labels='app=rook-discover'), 240)
@fixture(scope='module')
def ceph_cluster():
config.load_kube_config()
rook_operator()
check_output('kubectl apply -f cluster-minimal.yaml', shell=True)
_wait_for_condition(lambda: pods_started(labels='app=rook-ceph-mon'), 240)
_wait_for_condition(lambda: pods_started(labels='app=rook-ceph-mgr'), 240)
check_output('kubectl apply -f toolbox.yaml', shell=True) # now depends on running cluster.
_wait_for_condition(lambda: pods_started(labels='app=rook-ceph-tools'), 240)
_wait_for_condition(lambda: _service_exist('mon'))
_wait_for_condition(lambda: _service_exist('mgr'))
check_output('kubectl apply -f dashboard-external-https.yaml', shell=True)
yield None
check_output('./undeploy-rook-ceph.sh')
def _service_exist(name):
try:
return name in _orch_exec('service ls')
except CalledProcessError:
return False
def _orch_exec(cmd):
return _ceph_exec('orchestrator ' + cmd)
def _ceph_exec(cmd):
return _toolbox_exec('ceph ' + cmd)
def _toolbox_exec(cmd):
return check_output(f"""timeout 60 kubectl -n rook-ceph exec -it $(kubectl -n rook-ceph get pod -l "app=rook-ceph-tools" -o jsonpath='{{.items[0].metadata.name}}') -- timeout 30 {cmd}""", shell=True).decode('utf-8')
def dashboard_url():
service: V1Service = client.CoreV1Api().read_namespaced_service('rook-ceph-mgr-dashboard-external-https', 'rook-ceph')
ports: List[V1ServicePort] = service.spec.ports
mgr = get_pods(labels='app=rook-ceph-mgr')[0]
return f'https://{mgr.status.host_ip}:{ports[0].node_port}'
def dashboard_password():
s: V1Secret = client.CoreV1Api().read_namespaced_secret('rook-ceph-dashboard-password', 'rook-ceph')
return base64.b64decode(s.data['password']).decode('utf-8')
def dashboard_token_header(url):
r = requests.post(f'{url}/api/auth', json={'username': 'admin', 'password': dashboard_password()}, verify=False)
return {'Authorization': f"Bearer {r.json()['token']}"}
def _wait_for_condition(condition, timeout=30):
max_time = time.time() + timeout
while True:
if time.time() >= max_time:
raise TimeoutError(
'timed out ({}s) while waiting for condition {}'
.format(timeout, str(condition))
)
if condition():
break
time.sleep(1)
def get_pods(namespace='rook-ceph', fields: str=None, labels: str=None) -> List[V1Pod]:
return client.CoreV1Api().list_namespaced_pod(
namespace=namespace,
**_field_labels_kwargs(fields, labels)
).items
def _field_labels_kwargs(fields, labels):
kwargs = {}
if fields:
kwargs['field_selector'] = fields
if labels:
kwargs['label_selector'] = labels
return kwargs
def containers_started(p: V1Pod):
try:
return all(cs.state.running.started_at is not None for cs in p.status.container_statuses)
except (AttributeError, TypeError):
return False
def pods_started(namespace='rook-ceph', fields: str=None, labels: str=None):
pods = get_pods(namespace, fields=fields, labels=labels)
if not pods:
return False
return all(containers_started(p) for p in pods)
if __name__ == '__main__':
config.load_kube_config()
print(dashboard_url())
print(dashboard_password())
url = f'{dashboard_url()}/api/summary'
headers = dashboard_token_header(dashboard_url())#
requests.get(url, verify=False, headers=headers).raise_for_status()
|
{"/test_rook.py": ["/fixtures.py"]}
|
13,728
|
chbrown13/servo-dependency-tool
|
refs/heads/master
|
/test/test_cargo_lock_parser.py
|
import unittest
import os
import sys
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.insert(0,path)
import cargo_lock_parser as parser
from cargo_lock_parser import LockRoot, LockDependency, LockPackage, LockFile
class TestCargoLockParser(unittest.TestCase):
def setUp(self):
self.path = os.path.join(path,"test")
self.root = LockRoot()
self.root.name = "webvr_traits"
self.root.version = "0.0.1"
self.depend1 = LockDependency()
self.depend1.name = "ipc-channel"
self.depend1.version = "0.7.0"
self.depend1.source = "(registry+https://github.com/rust-lang/crates.io-index)"
self.package = LockPackage()
self.package.name = "servo"
self.package.version = "0.0.1"
self.package.source = ""
self.depend2 = LockDependency()
self.depend2.name = "android_injected_glue"
self.depend2.version = "0.2.1"
self.depend2.source = "(git+https://github.com/mmatyas/android-rs-injected-glue)"
def test_lock_file_parse(self):
file = parser.lock_file_parse("Cargo.lock")
self.assertEqual(type(file),LockFile)
root = file.root
self.assertEqual(root.name,self.root.name)
self.assertEqual(root.version,self.root.version)
self.assertEqual(len(root.dependencies),5)
self.assertEqual(root.dependencies[0].name,self.depend1.name)
self.assertEqual(root.dependencies[0].version,self.depend1.version)
self.assertEqual(root.dependencies[0].source,self.depend1.source)
self.assertEqual(len(file.packages),319)
pkg = file.packages["servo"]
self.assertEqual(pkg.name,self.package.name)
self.assertEqual(pkg.version,self.package.version)
self.assertEqual(pkg.source,self.package.source)
self.assertFalse(pkg.upgrade_available)
self.assertEqual(len(pkg.dependencies),18)
dpd = pkg.dependencies[0]
self.assertEqual(dpd.name,self.depend2.name)
self.assertEqual(dpd.version,self.depend2.version)
self.assertEqual(dpd.source,self.depend2.source)
|
{"/test/test_cargo_lock_parser.py": ["/cargo_lock_parser.py"], "/crates_io_checker.py": ["/repo_management.py"], "/test/test_cargo_toml_updater.py": ["/cargo_toml_updater.py", "/cargo_lock_parser.py"], "/test/test_crates_io_checker.py": ["/crates_io_checker.py", "/cargo_lock_parser.py"], "/servo_dependency_tool.py": ["/cargo_lock_parser.py", "/cargo_toml_updater.py", "/crates_io_checker.py", "/repo_management.py", "/run_cargo_update.py"]}
|
13,729
|
chbrown13/servo-dependency-tool
|
refs/heads/master
|
/setup.py
|
from distutils.core import setup
setup(
name='servo-dependency-tool',
description='Tool for automatically upgrading Cargo dependencies.',
install_requires=['gitpython','github3.py'],
)
|
{"/test/test_cargo_lock_parser.py": ["/cargo_lock_parser.py"], "/crates_io_checker.py": ["/repo_management.py"], "/test/test_cargo_toml_updater.py": ["/cargo_toml_updater.py", "/cargo_lock_parser.py"], "/test/test_crates_io_checker.py": ["/crates_io_checker.py", "/cargo_lock_parser.py"], "/servo_dependency_tool.py": ["/cargo_lock_parser.py", "/cargo_toml_updater.py", "/crates_io_checker.py", "/repo_management.py", "/run_cargo_update.py"]}
|
13,730
|
chbrown13/servo-dependency-tool
|
refs/heads/master
|
/crates_io_checker.py
|
from git import Repo, Remote
import git
import os
import platform
import json
import repo_management
CRATES = "crates.io-index"
depend = {}
# Delete repo and files when done
def cleanup():
if platform.system() == "Windows":
rm = 'rmdir /S /Q "%s"' % CRATES
else:
rm = "rm -rf %s" % CRATES
os.system(rm)
# Check for upgrades for input packages
def check_upgrade(package):
if package.name not in depend.keys():
# Error somewhere
print("Package not found")
return False
else:
# Check input version vs latest version
current = package.version
latest = depend[package.name][-1]["vers"]
print("Checking for '%s' upgrades... current= %s, latest= %s" % (package.name, current, latest))
package.version = latest
return current != latest
# Read dependency information from crates.io-index file and store in dict
def read_file(path):
if path is None:
return
filename = os.path.basename(path)
d = []
with open(path, 'r') as f:
for line in f:
d.append(json.loads(line))
depend[filename] = d
# Check if file is in the current path
def check_folder(name, path):
try:
if name in os.listdir(path):
file = os.path.join(path, name)
return file
except FileNotFoundError:
return None
return None
# Check if a package exists in crates.io-index
def check_package(package):
file = None
pack = package.name
if len(pack) > 3:
split = [pack[i:i+2] for i in range(0, len(pack), 2)]
path = os.path.join(CRATES, split[0])
i = 0
while file is None:
i += 1
if os.path.exists(path):
file = check_folder(pack, path)
else:
# path doesn't exist
break
try:
path = os.path.join(path, split[i])
except IndexError:
break
else:
if len(pack) == 3:
file = check_folder(pack, os.path.join(CRATES, "3", pack[0]))
else:
file = check_folder(pack, os.path.join(CRATES, str(len(pack))))
if file is None:
print("Package '%s' Not Found" % pack)
return
else:
# print("Found package '%s'"%pack)
return file
def clone_crates():
try:
print("Cloning crates.io-index repository...(This may take a while)") # Git submodules may avoid this
repo = Repo.clone_from("https://github.com/rust-lang/crates.io-index.git", CRATES)
except git.exc.GitCommandError:
# crates.io-index repo already exists
git_path = os.path.abspath(os.path.join(os.path.dirname((CRATES)),CRATES))
repo_management.pull(git_path)
def check(p):
f = check_package(p)
read_file(f)
p.upgrade_available = check_upgrade(p)
|
{"/test/test_cargo_lock_parser.py": ["/cargo_lock_parser.py"], "/crates_io_checker.py": ["/repo_management.py"], "/test/test_cargo_toml_updater.py": ["/cargo_toml_updater.py", "/cargo_lock_parser.py"], "/test/test_crates_io_checker.py": ["/crates_io_checker.py", "/cargo_lock_parser.py"], "/servo_dependency_tool.py": ["/cargo_lock_parser.py", "/cargo_toml_updater.py", "/crates_io_checker.py", "/repo_management.py", "/run_cargo_update.py"]}
|
13,731
|
chbrown13/servo-dependency-tool
|
refs/heads/master
|
/test/test_cargo_toml_updater.py
|
import unittest
import os
import sys
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.insert(0,path)
import cargo_toml_updater as updater
from cargo_lock_parser import lock_file_parse
class TestCargoTOMLUpdater(unittest.TestCase):
def setUp(self):
self.lock_file = lock_file_parse("Cargo.lock")
self.lock_file.packages["servo"].upgrade_available = True
self.lock_file.packages["toml"].upgrade_available = True
self.lock_file.packages["rustc-serialize"].upgrade_available = True
with open("Cargo.toml",'r') as f:
self.original = f.read()
def test_toml_file_update(self):
updater.toml_file_update("Cargo.toml",self.lock_file)
with open("Cargo.toml",'r') as f:
update = f.read()
self.assertNotEqual(self.original,update)
def tearDown(self):
with open("Cargo.toml",'w') as f:
f.write(self.original)
|
{"/test/test_cargo_lock_parser.py": ["/cargo_lock_parser.py"], "/crates_io_checker.py": ["/repo_management.py"], "/test/test_cargo_toml_updater.py": ["/cargo_toml_updater.py", "/cargo_lock_parser.py"], "/test/test_crates_io_checker.py": ["/crates_io_checker.py", "/cargo_lock_parser.py"], "/servo_dependency_tool.py": ["/cargo_lock_parser.py", "/cargo_toml_updater.py", "/crates_io_checker.py", "/repo_management.py", "/run_cargo_update.py"]}
|
13,732
|
chbrown13/servo-dependency-tool
|
refs/heads/master
|
/repo_management.py
|
from git import Repo, Remote
from github3 import login
import git
import traceback
# Function that takes the local git clone directory path and the new branch name as parameters
# and create a new branch in both the local repository and remote
def create_new_branch(path, branch_name):
repo = Repo(path)
new_branch = repo.create_head(branch_name)
new_branch.commit
repo.git.push("origin", branch_name)
# Function that pushes changes to the current branch of the remote repository (should be the newly created branch).
def push(path, branch_name, message):
try:
repo = Repo(path)
print('Currently on branch: %s' % repo.head.ref)
repo.git.checkout(branch_name)
print('Switched to branch: %s' % repo.head.ref)
print(repo.git.add("."))
print(repo.git.commit(m=message))
repo.git.push("origin", branch_name)
print('')
print(repo.git.status())
except Exception:
traceback.print_exc()
# Function that pulls everything from the master branch of the the remote repository to the local git clone.
def pull(path):
try:
repo = git.Repo(path)
origin = repo.remotes.origin
# only pulls the master branch
s = origin.pull("master")
# print(repo.git.status())
except Exception:
traceback.print_exc()
# Function that opens a pull request against Servo's github repository from a particular branch on a fork.
def pull_request(username, password, title, base, head, body=None):
# Login to the forked repo
gh = login(username, password)
# Create a Repository instance of servo (with owner Servo and repo name servo)
repo = gh.repository("Servo", "servo")
# Now create the pull request
repo.create_pull(title, base, head, body)
# :param str title: (required) The title of the pull request.
# :param str base: (required), The branch of the servo repo which you want the changes pulled into. e.g., 'master'
# :param str head: (required), The place where your changes are implemented. e.g. 'qiufengyu21:master'
# :param str body: (optional), The contents of the pull request.
|
{"/test/test_cargo_lock_parser.py": ["/cargo_lock_parser.py"], "/crates_io_checker.py": ["/repo_management.py"], "/test/test_cargo_toml_updater.py": ["/cargo_toml_updater.py", "/cargo_lock_parser.py"], "/test/test_crates_io_checker.py": ["/crates_io_checker.py", "/cargo_lock_parser.py"], "/servo_dependency_tool.py": ["/cargo_lock_parser.py", "/cargo_toml_updater.py", "/crates_io_checker.py", "/repo_management.py", "/run_cargo_update.py"]}
|
13,733
|
chbrown13/servo-dependency-tool
|
refs/heads/master
|
/test/test_crates_io_checker.py
|
import unittest
import os
import sys
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.insert(0,path)
import crates_io_checker as crates
from cargo_lock_parser import LockPackage
class TestCratesIOChecker(unittest.TestCase):
def setUp(self):
self.path = os.path.join(path,"test")
self.package = LockPackage()
self.package.name = "unittest"
self.package.version = "1.0"
self.package.source = "src"
self.package1 = LockPackage()
self.package1.name = "servo"
self.package1.version = "1.0"
self.package.source = "test"
self.package2 = LockPackage()
self.package2.name = "servo"
self.package2.version = "2.0"
self.package.source = "test"
crates.depend = {"servo":[{"name":"servo","vers":"1.0","deps":[]},{"name":"servo","vers":"2.0","deps":[]}]}
crates.CRATES = "cargo_test"
def test_check_upgrade(self):
self.assertFalse(crates.check_upgrade(self.package))
self.assertTrue(crates.check_upgrade(self.package1))
self.assertFalse(crates.check_upgrade(self.package2))
def test_read_file(self):
self.assertIsNone(crates.read_file(None))
self.assertFalse('testing' in crates.depend.keys())
read = crates.check_folder("testing",os.path.join(self.path,crates.CRATES,"te","st"))
self.assertIsNotNone(read)
crates.read_file(read)
self.assertTrue('testing' in crates.depend.keys())
def test_check_folder(self):
self.assertIsNone(crates.check_folder("test.txt",self.path))
self.assertEqual(os.path.join(self.path,"test_crates_io_checker.py"),crates.check_folder("test_crates_io_checker.py",self.path))
def test_check_package(self):
test = LockPackage()
test.name = "testing"
self.assertIsNotNone(crates.check_package(test))
self.assertIsNone(crates.check_package(self.package))
def test_check(self):
crates.check(self.package1)
self.assertTrue(self.package1.upgrade_available)
crates.check(self.package2)
self.assertFalse(self.package2.upgrade_available)
|
{"/test/test_cargo_lock_parser.py": ["/cargo_lock_parser.py"], "/crates_io_checker.py": ["/repo_management.py"], "/test/test_cargo_toml_updater.py": ["/cargo_toml_updater.py", "/cargo_lock_parser.py"], "/test/test_crates_io_checker.py": ["/crates_io_checker.py", "/cargo_lock_parser.py"], "/servo_dependency_tool.py": ["/cargo_lock_parser.py", "/cargo_toml_updater.py", "/crates_io_checker.py", "/repo_management.py", "/run_cargo_update.py"]}
|
13,734
|
chbrown13/servo-dependency-tool
|
refs/heads/master
|
/servo_dependency_tool.py
|
# Servo Dependency Tool
#
# Authors:
# Chris Brown (dcbrow10@ncsu.edu)
# Bradford Ingersoll (bingers@ncsu.edu)
# Qiufeng Yu (qyu4@ncsu.edu)
import datetime
import getpass
import os
import shutil
import cargo_lock_parser
import cargo_toml_updater
import crates_io_checker
import repo_management
import run_cargo_update
#
# Main
#
# Perform a "git pull" on the parent directory
git_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
print('Performing git pull inside "%s"...' % git_path)
print('')
repo_management.pull(git_path)
# Create a new branch before making any updates
branch_name = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S_crate_update")
print('Creating new branch: %s...' % branch_name)
print('')
repo_management.create_new_branch('..', branch_name)
# Edit .gitignore to add this servo-dependency-tool directory
with open(os.path.join(git_path, '.gitignore'), "r") as f:
tool_ignored = False
for line in f:
if line == 'servo-dependency-tool/':
tool_ignored = True
if not tool_ignored:
print('Adding /servo-dependency-tool to .gitignore...')
print('')
with open(os.path.join(git_path, '.gitignore'), "a") as f:
f.write('\n')
f.write('# Servo Dependency Tool\n')
f.write('/servo-dependency-tool')
# Check for existence of Cargo.lock file and parse it
for filename in os.listdir(git_path):
if filename == "Cargo.lock":
print('Parsing Cargo.lock file...')
print('')
lock_file = cargo_lock_parser.lock_file_parse(os.path.join(git_path, filename))
# Ignore hyper dependencies per Josh Matthews: "Can't update hyper without additional work"
# Do so by removing it from the collection
package_names_to_ignore = []
for package_name in lock_file.packages:
if package_name.startswith('hyper'):
package_names_to_ignore.append(package_name)
for name in package_names_to_ignore:
print('Removing %s from packages to update...' % name)
del lock_file.packages[name]
print('')
# Run crates_io_checker which determines the latest version for all packages in lock_file.packages
crates_io_checker.clone_crates()
print('Checking crates.io for latest version of each package...')
print('')
for package_name in lock_file.packages:
crates_io_checker.check(lock_file.packages[package_name])
# Remove the cloned crates.io-index. We do this here
# so that the upcoming directory tree traversal won't
# go into the crates.io-index folders.
print('Removing local clone of crates.io...')
print('')
shutil.rmtree('crates.io-index')
# Loop through directory tree
# For each instance of Cargo.toml, call toml_file_update to update
# the version numbers for each dependency
print('Iterating through all Cargo.toml files and updating their dependency versions...')
print('')
for root, dirs, files in os.walk(git_path):
if 'servo-dependency-tool' in dirs:
dirs.remove('servo-dependency-tool') # Don't visit this tool's directory
for filename in files:
if filename.lower() == "cargo.toml":
toml_file_path = os.path.join(root, filename)
cargo_toml_updater.toml_file_update(toml_file_path, lock_file)
# "Delete" Cargo.lock to avoid conflicts (rename to Cargo.lock.bak)
print('Making a backup (Cargo.lock.bak) of the current Cargo.lock before updating...')
print('')
os.rename(os.path.join(git_path, 'Cargo.lock'), os.path.join(git_path, 'Cargo.lock.bak'))
# Loop through the packages again and call run_cargo_update
# to run the appropriate update command.
print('Running appropriate cargo update command for each package...')
print('')
for package_name in lock_file.packages:
if lock_file.packages[package_name].upgrade_available:
run_cargo_update.run_update(git_path, lock_file.packages[package_name])
# Push the updates to origin/branch_name
print('Pushing changes to new branch...')
print('')
repo_management.push(git_path, branch_name, 'Updated dependencies')
# Pull request on master
print('Initiating pull request...')
gh_username = input('GitHub Username: ')
gh_password = getpass.getpass('GitHub Password: ')
title = 'Updated dependencies in Cargo.toml files to latest versions'
desc = 'Updated all Cargo.toml files with the latest versions found on crates.io for all dependencies and ran \
"./mach cargo-update -p <package_name> for each'
repo_management.pull_request(gh_username, gh_password, title, 'master', gh_username + ':' + branch_name, desc)
|
{"/test/test_cargo_lock_parser.py": ["/cargo_lock_parser.py"], "/crates_io_checker.py": ["/repo_management.py"], "/test/test_cargo_toml_updater.py": ["/cargo_toml_updater.py", "/cargo_lock_parser.py"], "/test/test_crates_io_checker.py": ["/crates_io_checker.py", "/cargo_lock_parser.py"], "/servo_dependency_tool.py": ["/cargo_lock_parser.py", "/cargo_toml_updater.py", "/crates_io_checker.py", "/repo_management.py", "/run_cargo_update.py"]}
|
13,735
|
chbrown13/servo-dependency-tool
|
refs/heads/master
|
/cargo_lock_parser.py
|
# Cargo.lock Parser
#
# This script parses a Cargo.lock file and converts it into a
# LockFile object. A LockFile object contains all of the information about
# the [root] and the [[package]] sections within a Cargo.lock file.
# NOTE: Must be placed in the same folder as the Cargo.lock file
#
# From our Initial Steps requirement:
# "write code that takes a Cargo.lock file as input and determines
# the list of crate names and versions that are dependencies"
import re
# Object representing the root. A Cargo.lock file will always have one [[root]]
class LockRoot:
def __init__(self):
self.name = ""
self.version = ""
self.dependencies = []
# Object representing a dependency. Each [[root]] and [[package]] within a Cargo.lock file can have >= 0 dependencies
class LockDependency:
def __init__(self):
self.name = ""
self.version = ""
self.source = ""
# Object representing a [[package]] within the Cargo.lock file. A Cargo.lock file can have >= 0 packages
class LockPackage:
def __init__(self):
self.name = ""
self.version = ""
self.upgrade_available = False
self.source = ""
self.dependencies = []
# Object representing an entire Cargo.lock file
class LockFile:
def __init__(self):
self.root = LockRoot()
self.packages = {} # dictionary
# Method to parse the passed file (a Cargo.lock file)
# and return the lock_file object
def lock_file_parse(fname):
with open(fname, 'r') as fp:
lock_file = LockFile()
lock_package_to_add = LockPackage() # temporary LockPackage object
dependency_to_add = LockDependency() # temporary LockDependency object
in_root = True # flag to determine whether the current lines are within the root or not
for line in fp:
if line.strip():
if in_root:
if line.strip().startswith('name'):
lock_file.root.name = re.findall(r'"(.*?)"', line)[0]
elif line.strip().startswith('version'):
lock_file.root.version = re.findall(r'"(.*?)"', line)[0]
elif line.strip().startswith('"'): # lines that start with " are dependencies
dependency_string = re.findall(r'"(.*?)"', line)[0].split(' ')
dependency_to_add.name = dependency_string[0] # All dependencies should have a name
dependency_to_add.version = dependency_string[1] # All dependencies should have a version
if len(dependency_string) == 3: # If the dependency has a third field, it has a source
dependency_to_add.source = dependency_string[2]
lock_file.root.dependencies.append(dependency_to_add)
dependency_to_add = LockDependency()
elif line.strip() == "[[package]]": # End of Root
in_root = False
else:
# If [[package]] is found, we've reached a new package
if line.strip() == "[[package]]":
# If lock_package_to_add has data, add to list and then reset
if lock_package_to_add.name != "":
lock_file.packages[lock_package_to_add.name] = lock_package_to_add
lock_package_to_add = LockPackage()
elif line.strip().startswith('name'):
lock_package_to_add.name = re.findall(r'"(.*?)"', line)[0]
elif line.strip().startswith('version'):
lock_package_to_add.version = re.findall(r'"(.*?)"', line)[0]
elif line.strip().startswith('source'):
lock_package_to_add.source = re.findall(r'"(.*?)"', line)[0]
elif line.strip().startswith('[metadata]'):
lock_file.packages[lock_package_to_add.name] = lock_package_to_add # add the last entry
break
elif not in_root and line.strip().startswith('"'): # lines that start with " are dependencies
dependency_string = re.findall(r'"(.*?)"', line)[0].split(' ')
dependency_to_add.name = dependency_string[0] # All dependencies should have a name
dependency_to_add.version = dependency_string[1] # All dependencies should have a version
if len(dependency_string) == 3: # If the dependency has a third field, it has a source
dependency_to_add.source = dependency_string[2]
lock_package_to_add.dependencies.append(dependency_to_add)
dependency_to_add = LockDependency()
return lock_file # return the lock_file object
|
{"/test/test_cargo_lock_parser.py": ["/cargo_lock_parser.py"], "/crates_io_checker.py": ["/repo_management.py"], "/test/test_cargo_toml_updater.py": ["/cargo_toml_updater.py", "/cargo_lock_parser.py"], "/test/test_crates_io_checker.py": ["/crates_io_checker.py", "/cargo_lock_parser.py"], "/servo_dependency_tool.py": ["/cargo_lock_parser.py", "/cargo_toml_updater.py", "/crates_io_checker.py", "/repo_management.py", "/run_cargo_update.py"]}
|
13,736
|
chbrown13/servo-dependency-tool
|
refs/heads/master
|
/cargo_toml_updater.py
|
# Cargo.toml File Updater
#
# This script parses a cargo.toml file and updates all of the version numbers
# to the version numbers from the lock_file object
import re
# Method to update the passed file (a Cargo.toml file)
def toml_file_update(fname, lock_file):
with open(fname, 'r+') as fp:
in_dependencies = False
lines = fp.readlines()
fp.seek(0)
fp.truncate()
for line in lines:
if line.strip():
if line.strip().startswith('['):
if line.strip().endswith('dependencies]'):
in_dependencies = True
else:
in_dependencies = False
elif in_dependencies:
dependency_name = line.split(' ')[0]
if dependency_name in lock_file.packages: # Check if package exists
if lock_file.packages[dependency_name].upgrade_available: # Check if upgrade was found
if len(line.split(' ')) == 3: # Line with format: <package> = "<version>"
version_string = '"' + lock_file.packages[dependency_name].version + '"'
line = re.sub(r'"(.*?)"', version_string, line)
elif 'version = "' in line:
version_string = 'version = "' + lock_file.packages[dependency_name].version + '"'
line = re.sub(r'version = "(.*?)"', version_string, line)
else:
in_dependencies = False
fp.write(line)
|
{"/test/test_cargo_lock_parser.py": ["/cargo_lock_parser.py"], "/crates_io_checker.py": ["/repo_management.py"], "/test/test_cargo_toml_updater.py": ["/cargo_toml_updater.py", "/cargo_lock_parser.py"], "/test/test_crates_io_checker.py": ["/crates_io_checker.py", "/cargo_lock_parser.py"], "/servo_dependency_tool.py": ["/cargo_lock_parser.py", "/cargo_toml_updater.py", "/crates_io_checker.py", "/repo_management.py", "/run_cargo_update.py"]}
|
13,737
|
chbrown13/servo-dependency-tool
|
refs/heads/master
|
/run_cargo_update.py
|
import os
import subprocess
def run_update(git_path, pkg):
print("Running update for %s" % pkg.name)
if os.path.isfile(os.path.join(git_path, 'mach')):
mach_path = git_path + '/mach'
args = [mach_path, 'cargo-update', '-p', pkg.name]
else: # Otherwise use default cargo update command
cargo_bin_path = os.path.expanduser('~/.cargo/bin/cargo')
args = [cargo_bin_path, 'update', '-p', pkg.name]
print('This may take a moment...')
print(args)
cmd_out = None
cmd_err = None
cmd_out, cmd_err = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
print(cmd_err.decode('utf-8'))
if 'is ambiguous.' in cmd_err.decode('utf-8'): # If failure due to ambiguity, use precise version
if os.path.isfile(os.path.join(git_path, 'mach')):
mach_path = git_path + '/mach'
args = [mach_path, 'cargo-update', '-p', (pkg.name + ':' + pkg.version)]
else: # Otherwise use default cargo update command
cargo_bin_path = os.path.expanduser('~/.cargo/bin/cargo')
args = [cargo_bin_path, 'update', '-p', (pkg.name + ':' + pkg.version)]
print('Specifying version %s...' % pkg.version)
print('This may take a moment...')
print(args)
cmd_out = None
cmd_err = None
cmd_out, cmd_err = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
print(cmd_err.decode('utf-8'))
|
{"/test/test_cargo_lock_parser.py": ["/cargo_lock_parser.py"], "/crates_io_checker.py": ["/repo_management.py"], "/test/test_cargo_toml_updater.py": ["/cargo_toml_updater.py", "/cargo_lock_parser.py"], "/test/test_crates_io_checker.py": ["/crates_io_checker.py", "/cargo_lock_parser.py"], "/servo_dependency_tool.py": ["/cargo_lock_parser.py", "/cargo_toml_updater.py", "/crates_io_checker.py", "/repo_management.py", "/run_cargo_update.py"]}
|
13,743
|
DavidWalshe93/Network-Anomoly-Classification
|
refs/heads/master
|
/src/pipeline.py
|
"""
Author: David Walshe
Date: 08/04/2020
"""
from __future__ import annotations
from collections import Counter
from typing import TYPE_CHECKING
from imblearn.over_sampling import RandomOverSampler, SMOTE
from imblearn.pipeline import Pipeline as ImblearnPipeline
from imblearn.under_sampling import NearMiss
from sklearn.base import TransformerMixin
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline as SklearnPipeline
from sklearn.preprocessing import OneHotEncoder, StandardScaler, LabelEncoder
from src.config import RANDOM_STATE
if TYPE_CHECKING:
import numpy as np
from pandas import DataFrame
class CustomColumnTransformer(ColumnTransformer):
"""
Custom ColumnTransformer to allow easy feature extraction
"""
def get_feature_names_from_ohe_step(self) -> np.ndarray:
"""
Helper method to access internal step feature names.
:return: The feature names after the OneHotEncoder step of the Pipeline.
"""
return self.named_transformers_['cp'].named_steps["ohe"].get_feature_names()
class PipelineLabelEncoder(TransformerMixin):
"""
Custom LabelEncoder to allow for passing of X and y datasets.
Default LabelEncoder only accepts one dataset by default and is not
suitable for Pipeline usage.
"""
def __init__(self):
"""
Class Constructor
"""
self.encoder = LabelEncoder()
def fit(self, X: DataFrame, y: DataFrame = None):
"""
Fit the dataset X to the encoder.
:param X: Passed dataset to be encoded.
:param y: Dummy variable included to allow for Pipeline usage.
:return: This instance.
"""
self.encoder.fit(X)
return self
def transform(self, X: DataFrame, y: DataFrame = None) -> np.ndarray:
"""
Apply the LabelEncoder transformation to the dataset X.
:param X: The dataset to encode.
:param y: Dummy variable included to allow for Pipeline usage.
:return: A numpy ndarray of the applied transformation.
"""
return self.encoder.transform(X).reshape(-1, 1)
@property
def classes_(self):
return self.encoder.classes_
class PreprocessPipelineFactory:
"""
Method Factory Class to help with the creation of various pre-processing Pipelines.
"""
def X_preprocess_pipeline(self, category_variables: list, numeric_variables: list) -> CustomColumnTransformer:
"""
Creates a pre-processing pipeline targeted at the X segment for the KDD cup99 dataset.
:param category_variables: The categorical variable names of the dataset X.
:param numeric_variables: The numerical variable names of the dataset X.
:return: A CustomColumnTransformer instance to pre-process the X dataset.
"""
return CustomColumnTransformer(
transformers=[
("cp", self._category_step, category_variables),
("sp", self._scaler_step, numeric_variables)
],
remainder="drop",
n_jobs=-1
)
def y_preprocess_pipeline(self, variables: tuple = (0,)):
"""
Creates a pre-processing pipeline targeted at the y segment for the KDD cup99 dataset.
:param variables: Optional argument to pass in column indexes to use in the pipeline. Default= (0, )
:return: A CustomColumnTransformer instance to pre-process the X dataset.
"""
return ColumnTransformer(
transformers=[
("lep", self._label_encoder_step, variables)
],
remainder="drop",
n_jobs=-1
)
@property
def _category_step(self) -> SklearnPipeline:
"""
Property to get the category step for use in a Pipeline.
:return: Pipeline with an OneHotEncoder internal step.
"""
return SklearnPipeline([
("ohe", OneHotEncoder())
])
@property
def _scaler_step(self) -> SklearnPipeline:
"""
Property to get the scaler step for use in a Pipeline.
:return: Pipeline with an StandardScaler internal step.
"""
return SklearnPipeline([
("ss", StandardScaler())
])
@property
def _label_encoder_step(self) -> SklearnPipeline:
"""
Property to get the encoder step for use in a Pipeline.
:return: Pipeline with a LabelEncoder internal step.
"""
return SklearnPipeline([
("le", PipelineLabelEncoder())
])
class SamplingPipelineFactory:
def __init__(self, y, max_sample_limit=1000, k_neighbors=5):
"""
Class Constructor.
:param y: The y dataset of output labels.
:param max_sample_limit: The upper limit of samples.
:param k_neighbors: The number of neigbors to use in SMOTE and near miss.
"""
self.k_neighbors = k_neighbors
self.under_sampling_strategy = {key: max_sample_limit for key, value in Counter(y["signature"]).items() if
value > max_sample_limit}
self.ros_sampling_strategy = {key: k_neighbors * 20 for key, value in Counter(y["signature"]).items() if
value <= k_neighbors}
def sampling_pipeline(self) -> ImblearnPipeline:
"""
Creates and returns a sampling Pipeline.
:return: A constructed sampling pipeline.
"""
return ImblearnPipeline(
steps=[
("ros", self.random_over_sampling_step),
("nm", self.near_miss_step),
("smt", self.smote_pipeline_step),
]
)
@property
def random_over_sampling_step(self) -> RandomOverSampler:
"""
Creates a RandomOverSammpling Step for a sampling pipeline.
:return: A RandomOverSampler for use in a sampling pipeline.
"""
return RandomOverSampler(sampling_strategy=self.ros_sampling_strategy,
random_state=RANDOM_STATE)
@property
def near_miss_step(self) -> NearMiss:
"""
Creates a NearMiss Version 1 Step for Undersampling the majority classes for a sampling pipeline.
:return: A NearMiss1 object for usin a sampling pipeline.
"""
return NearMiss(sampling_strategy=self.under_sampling_strategy,
n_neighbors=self.k_neighbors,
n_jobs=-1)
@property
def smote_pipeline_step(self) -> SMOTE:
"""
Creates a SMOTE Step for sample synthesis of the minority classes for a sampling pipeline.
:return: A SMOTE object for usin a sampling pipeline.
"""
return SMOTE(k_neighbors=self.k_neighbors,
random_state=RANDOM_STATE,
n_jobs=-1)
|
{"/src/pipeline.py": ["/src/config.py"], "/src/data.py": ["/src/config.py", "/src/logger_config.py"], "/src/preprocess.py": ["/src/data.py", "/src/pipeline.py", "/src/utils.py"], "/main.py": ["/src/plotting.py", "/src/config.py", "/src/data.py", "/src/evaluate.py", "/src/logger_config.py", "/src/pipeline.py", "/src/preprocess.py", "/src/timer.py"], "/src/plotting.py": ["/src/utils.py"], "/src/evaluate.py": ["/src/config.py", "/src/logger_config.py", "/src/plotting.py", "/src/timer.py", "/src/utils.py"], "/src/logger_config.py": ["/src/timer.py"]}
|
13,744
|
DavidWalshe93/Network-Anomoly-Classification
|
refs/heads/master
|
/src/data.py
|
"""
Author: David Walshe
Date: 03/04/2020
"""
# Imports
import logging
from json import load
import numpy as np
import pandas as pd
from sklearn.datasets import fetch_kddcup99
from src.config import RANDOM_STATE
from src.logger_config import setup_logger
logger = setup_logger(logging.getLogger(__name__))
class LabelManager:
"""
Helper class to read in discrete and continuous identifiers for data.
"""
def __init__(self, config_file="data.json"):
"""
Class constructor.
:param config_file: The file path to read the data configuration file form.
"""
self._data_info = self._read_data_info(config_file=config_file)
self._X_column_names = None
self._y_column_name = None
@staticmethod
def _read_data_info(config_file) -> dict:
"""
Helper method to read the contents of the config file.
:param config_file: The configuration file path.
:return: The json data within the file as a dictionary.
"""
with open(config_file) as fh:
data_info = load(fp=fh)
return data_info
@property
def info(self):
"""
:return: The configuration dictionary.
"""
return self._data_info
def _get_column_names(self, key: str) -> list:
"""
Get the names of all features within the dataset X or y as a list.
:param key: Feature set names to get, options: "X" or "y"
:return: A list of all the names in that feature set.
"""
return [item.get("name") for item in self.info.get(key)]
@property
def X_column_names(self):
"""
:return: All the feature names for the X feature set.
"""
# Lazy Init
if self._X_column_names is None:
self._X_column_names = self._get_column_names(key="X")
return self._X_column_names
@property
def y_column_name(self):
"""
:return: All the feature names for the y feature set.
"""
# Lazy Init
if self._y_column_name is None:
self._y_column_name = self._get_column_names(key="y")
return self._y_column_name
@property
def X_y_column_names(self) -> tuple:
"""
Helper function for acquiring the dataset column names from data.json.
:return: The X columns names and the y column name as a tuple.
"""
return self.X_column_names, self.y_column_name
def get_variable_on_dtype(self, key: str, dtype: str) -> list:
"""
Return a list of feature names depending if the dtype is continuous or categorical.
:param key: The data set to search in. options: "X" or "y".
:param dtype: The dtype to search for. options: "discrete" or "continuous".
:return: A list feature names that contain data of the dtype passed.
"""
return [item.get("name") for item in self.info.get(key) if item.get("dtype") == dtype]
@property
def X_discrete(self):
"""
:return: Returns feature names from X that have the "discrete" dtype.
"""
return self.get_variable_on_dtype(key="X", dtype="discrete")
@property
def X_continuous(self):
"""
:return: Returns feature names from X that have the "continuous" dtype.
"""
return self.get_variable_on_dtype(key="X", dtype="continuous")
class DataRetriever:
def __init__(self, label_manager: LabelManager):
self.label_manager = label_manager
self._X = None
self._y = None
def _remove_duplicate_rows(self):
"""
Helper function to remove duplicates for the dataset.
"""
# Merge (X, y) before reduction.
dataset = pd.concat([self.X, self.y], axis=1, join="outer")
orig_size = dataset.shape[0]
logger.info(f"Step - Original dataset record count: {orig_size}")
# Reduce the merge dataset by removing duplicates.
dataset.drop_duplicates(inplace=True)
logger.info(f"Step - Dataset record count with duplicates removed: {dataset.shape[0]}")
logger.info(
f"Step - Dataset records reduced by {round(100 - ((dataset.shape[0] / orig_size) * 100), 2)}%")
# Reassign X and y with the reduced dataset.
self._X = pd.DataFrame(data=dataset.iloc[:, :-1].values, columns=self.label_manager.X_column_names)
self._y = pd.DataFrame(data=dataset.iloc[:, -1].values.reshape(-1, 1),
columns=self.label_manager.y_column_name)
def X_y_dataset(self, remove_duplicates: bool = False, full_dataset: bool = True, force: bool = False) -> np.array:
"""
Helper function to create the dataset, including the dependant "target" variable.
:param remove_duplicates: Flag to decide whether duplicates should be reduced using Dataframe.drop_duplicates
:param full_dataset: Flag to decide if full dataset or only 10% should be retrieved.
:param force: Flag to force re-retrieval of X and y from source or used locally stored (X, y) from previous call.
:return: The dataset as (X, y).
"""
# Lazy init
if self._X is None or self._y is None or force is True:
logger.info(f"Step - Only 10% of Dataset: {(not full_dataset)}")
data, target = fetch_kddcup99(return_X_y=True, percent10=(not full_dataset), random_state=RANDOM_STATE)
target = np.array(target).reshape(-1, 1)
self._X = pd.DataFrame(data=data, columns=self.label_manager.X_column_names)
self._y = pd.DataFrame(data=target, columns=self.label_manager.y_column_name)
if remove_duplicates:
self._remove_duplicate_rows()
return self._X, self._y
@property
def X(self):
"""
Returns X features set. Builds it using lazy initialisation if it has not already been assigned.
:return: Feature set X
"""
# Lazy init
if self._X is None:
self.X_y_dataset()
return self._X
@property
def y(self):
"""
Returns y features set. Builds it using lazy initialisation if it has not already been assigned.
:return: Feature set y
"""
# Lazy init
if self._y is None:
self.X_y_dataset()
return self._y
|
{"/src/pipeline.py": ["/src/config.py"], "/src/data.py": ["/src/config.py", "/src/logger_config.py"], "/src/preprocess.py": ["/src/data.py", "/src/pipeline.py", "/src/utils.py"], "/main.py": ["/src/plotting.py", "/src/config.py", "/src/data.py", "/src/evaluate.py", "/src/logger_config.py", "/src/pipeline.py", "/src/preprocess.py", "/src/timer.py"], "/src/plotting.py": ["/src/utils.py"], "/src/evaluate.py": ["/src/config.py", "/src/logger_config.py", "/src/plotting.py", "/src/timer.py", "/src/utils.py"], "/src/logger_config.py": ["/src/timer.py"]}
|
13,745
|
DavidWalshe93/Network-Anomoly-Classification
|
refs/heads/master
|
/src/preprocess.py
|
"""
Author: David Walshe
Date: 09/04/2020
"""
import numpy as np
import pandas as pd
from src.data import LabelManager
from src.pipeline import PreprocessPipelineFactory
from src.utils import refactor_names, refactor_byte_name
class Preprocess:
def __init__(self):
self._signature_keys = None
def X_pre_process(self, X, pipeline_factory: PreprocessPipelineFactory, **kwargs):
"""
Perform pre-processing on X.
:param X: The set of input features.
:param pipeline_factory: The pipeline factory to obtain the X pre-processing pipeline from.
:param kwargs: The kwargs for the pipeline to use.
:return: The processed dataset X.
"""
X_preprocess_pipeline = pipeline_factory.X_preprocess_pipeline(**kwargs)
_X = X_preprocess_pipeline.fit_transform(X)
names = X_preprocess_pipeline.get_feature_names_from_ohe_step()
feature_names = refactor_names(names, kwargs["category_variables"])
feature_names = np.append(feature_names, kwargs["numeric_variables"])
_X = self._convert_to_array(_X)
X = pd.DataFrame(data=_X, columns=feature_names)
return X
def y_pre_process(self, y, pipeline_factory: PreprocessPipelineFactory):
"""
Perform pre-processing on y.
:param X: The set of output labels.
:param pipeline_factory: The pipeline factory to obtain the y pre-processing pipeline from.
:param kwargs: The kwargs for the pipeline to use.
:return: The processed dataset X.
"""
y_preprocess_pipeline = pipeline_factory.y_preprocess_pipeline()
y = y_preprocess_pipeline.fit_transform(y)
y = self._convert_to_array(y)
y = y.ravel()
y = pd.DataFrame(data=y, columns=["signature"])
self._signature_keys = y_preprocess_pipeline.named_transformers_['lep'].named_steps["le"].classes_
return y
def X_y_pre_process(self, X, y, label_manager: LabelManager) -> tuple:
"""
Helper method that manages the preprocessing of both X and y datasets and returns the post processed data.
:param X: The input dataset of features.
:param y: The output dataset of labels.
:param label_manager: The label manager object from main.
:return: (X, y) datasets after being processed.
"""
pipeline_factory = PreprocessPipelineFactory()
X = self.X_pre_process(X, pipeline_factory,
category_variables=label_manager.X_discrete,
numeric_variables=label_manager.X_continuous)
y = self.y_pre_process(y, pipeline_factory)
return X, y
@staticmethod
def _convert_to_array(dataset: pd.Series):
"""
Attempts to convert a pandas Series object into a numpy array.
:param dataset: A Series object to transform
:return: The converted dataset.
"""
if type(dataset) is not np.ndarray:
dataset = dataset.toarray()
return dataset
@property
def y_classes(self):
"""
The y class names.
:return:
"""
return {key: refactor_byte_name(value) for key, value in enumerate(self._signature_keys)}
|
{"/src/pipeline.py": ["/src/config.py"], "/src/data.py": ["/src/config.py", "/src/logger_config.py"], "/src/preprocess.py": ["/src/data.py", "/src/pipeline.py", "/src/utils.py"], "/main.py": ["/src/plotting.py", "/src/config.py", "/src/data.py", "/src/evaluate.py", "/src/logger_config.py", "/src/pipeline.py", "/src/preprocess.py", "/src/timer.py"], "/src/plotting.py": ["/src/utils.py"], "/src/evaluate.py": ["/src/config.py", "/src/logger_config.py", "/src/plotting.py", "/src/timer.py", "/src/utils.py"], "/src/logger_config.py": ["/src/timer.py"]}
|
13,746
|
DavidWalshe93/Network-Anomoly-Classification
|
refs/heads/master
|
/src/utils.py
|
"""
Author: David Walshe
Date: 09/04/2020
"""
import pandas as pd
def refactor_names(names, features):
for i, feature in enumerate(features):
for j, name in enumerate(names):
if name.find(f"x{i}") > -1:
name = name.replace(f"x{i}_", f"[{feature}] ")
name = refactor_byte_name(name)
names[j] = name
return names
def refactor_byte_name(name):
name = str(name)
name = name.replace("b'", "")
name = name.replace("'", "")
return name
def ravel_y(y):
if type(y) is pd.DataFrame:
y = y.to_numpy().ravel()
return y
def change_label_to_class(label):
return {
0: "back",
1: "buffer_overflow",
2: "ftp_write",
3: "guess_passwd",
4: "imap",
5: "ipsweep",
6: "land",
7: "loadmodulde",
8: "multihop",
9: "neptune",
10: "nmap",
11: "normal",
12: "perl",
13: "phf",
14: "pod",
15: "portsweep",
16: "rootkit",
17: "satan",
18: "smurf",
19: "spy",
20: "teardrop",
21: "warezclient",
22: "warezmaster"
}.get(label, "N/A")
|
{"/src/pipeline.py": ["/src/config.py"], "/src/data.py": ["/src/config.py", "/src/logger_config.py"], "/src/preprocess.py": ["/src/data.py", "/src/pipeline.py", "/src/utils.py"], "/main.py": ["/src/plotting.py", "/src/config.py", "/src/data.py", "/src/evaluate.py", "/src/logger_config.py", "/src/pipeline.py", "/src/preprocess.py", "/src/timer.py"], "/src/plotting.py": ["/src/utils.py"], "/src/evaluate.py": ["/src/config.py", "/src/logger_config.py", "/src/plotting.py", "/src/timer.py", "/src/utils.py"], "/src/logger_config.py": ["/src/timer.py"]}
|
13,747
|
DavidWalshe93/Network-Anomoly-Classification
|
refs/heads/master
|
/main.py
|
"""
Author: David Walshe
Date: 03/04/2020
"""
import logging
from collections import Counter
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# Pre-processing
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
# noinspection PyUnresolvedReferences
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
import src.plotting as myPlt
from src.config import RANDOM_STATE, FULL_DATA_SET, TUNING
from src.data import DataRetriever, LabelManager
from src.evaluate import ModelEvaluator, ModelTuner
from src.logger_config import setup_logger
from src.pipeline import SamplingPipelineFactory
from src.preprocess import Preprocess
from src.timer import Timer
mpl.rcParams['figure.dpi'] = 200
logger = setup_logger(logging.getLogger(__name__))
def get_y_distribution(y):
distribution = pd.DataFrame(columns=["label", "class", "count", "percentage"])
counter = Counter(y["signature"])
for index, (k, v) in enumerate(counter.items()):
per = v / len(y) * 100
distribution.loc[index] = {"label": k, "class": preprocess.y_classes.get(k, k), "count": v, "percentage": per}
return distribution.sort_values("percentage", ascending=False)
def print_optimiser_results(model_tuner: ModelTuner):
print(model_tuner.best_parameters)
print(model_tuner.best_score)
print(model_tuner.cv_results)
if __name__ == '__main__':
logger.info("Start")
# ==============================================================================================================
# ==============================================================================================================
# Setup
# ==============================================================================================================
# ==============================================================================================================
timer = Timer()
logger.info("Stage - Data Retrieval BEGIN")
label_manager = LabelManager(config_file="data.json")
data_retriever = DataRetriever(label_manager=label_manager)
# ==============================================================================================================
# ==============================================================================================================
# Get Raw Data.
# ==============================================================================================================
# ==============================================================================================================
logger.info("Stage - BEGIN")
raw_X, raw_y = data_retriever.X_y_dataset(remove_duplicates=False, full_dataset=FULL_DATA_SET)
logger.info(f"Stage - Data Retrieval END {timer.time_stage('Data Retrieval')}")
myPlt.plot_value_counts(raw_y, title="y Distribution (Raw)")
# ==============================================================================================================
# ==============================================================================================================
# Get Raw Data with duplicates removed
# ==============================================================================================================
# ==============================================================================================================
logger.info("Stage - BEGIN")
X, y = data_retriever.X_y_dataset(remove_duplicates=True, full_dataset=FULL_DATA_SET, force=True)
logger.info(f"Stage - Data Retrieval END {timer.time_stage('Data Retrieval')}")
myPlt.plot_value_counts(y, title="y Distribution (Duplicates removed)")
myPlt.plot_value_counts_compare(y1=raw_y, y2=y)
myPlt.plot_value_counts_compare(y1=raw_y, y2=y, level="Max")
myPlt.plot_value_counts_compare(y1=raw_y, y2=y, level="Mid")
myPlt.plot_value_counts_compare(y1=raw_y, y2=y, level="Min")
# ==============================================================================================================
# ==============================================================================================================
# Preprocess raw data
# ==============================================================================================================
# ==============================================================================================================
logger.info("Stage - Preprocess BEGIN")
preprocess = Preprocess()
X, y = preprocess.X_y_pre_process(X, y, label_manager)
logger.info(f"Stage - Preprocess END {timer.time_stage('Preprocessing')}")
# ==============================================================================================================
# ==============================================================================================================
# Principle Component Analysis
# ==============================================================================================================
# ==============================================================================================================
logger.info("Stage - PCA BEGIN")
X_backup = X
# ==============================================================================================================
# ==============================================================================================================
# PCA with 3 Components
# ==============================================================================================================
# ==============================================================================================================
logger.info("Stage - PCA 3 Component BEGIN")
X = X_backup
pca = PCA(n_components=3)
X = pca.fit_transform(X)
explained_variance_3d_df = pd.DataFrame(pca.explained_variance_ratio_.reshape(1, -1),
columns=["X-axis", "Y-axis", "Z-axis"])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y["signature"], marker='o', cmap=plt.cm.get_cmap('tab20', 23))
ax.set_title("PCA Analysis (3 Components)")
ax.set_xlabel('Component 1')
ax.set_ylabel('Component 2')
ax.set_zlabel('Component 3')
plt.show()
logger.info(f"Step - PCA 3 Component END {timer.time_stage('PCA 3C')}")
# ==============================================================================================================
# ==============================================================================================================
# PCA with 2 Components
# ==============================================================================================================
# ==============================================================================================================
logger.info("Stage - PCA 2 Component BEGIN")
X = X_backup
pca = PCA(n_components=2)
X = pca.fit_transform(X)
explained_variance_2d_df = pd.DataFrame(pca.explained_variance_ratio_.reshape(1, -1), columns=["X-axis", "Y-axis"])
myPlt.show_pca_plot(X, y, title="PCA Analysis (2 Components)")
logger.info(f"Step - PCA 2 Component END {timer.time_stage('PCA 2C')}")
# ==============================================================================================================
# ==============================================================================================================
# PCA N-Component Selection
# ==============================================================================================================
# ==============================================================================================================
logger.info("Step - PCA Variance Graph BEGIN")
X = X_backup
pca = PCA().fit(X)
pca_selection = np.cumsum(pca.explained_variance_ratio_)
plt.plot(pca_selection)
plt.title("PCA Variance Graph")
plt.xlabel('Number of Components')
plt.ylabel('Cumulative Explained Variance')
plt.show()
logger.info(f"Step - PCA Variance Graph {timer.time_stage('PCA Graph')}")
# ==============================================================================================================
# ==============================================================================================================
# PCA with 20 Components
# ==============================================================================================================
# ==============================================================================================================
logger.info("Stage - PCA 2 Component BEGIN")
X = X_backup
pca = PCA(n_components=20)
X = pca.fit_transform(X)
logger.info(f"Step - PCA 2 Component END {timer.time_stage('PCA 2C')}")
logger.info("Stage - PCA END")
# ==============================================================================================================
# ==============================================================================================================
# Test/Train Split
# ==============================================================================================================
# ==============================================================================================================
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=.20, random_state=RANDOM_STATE
)
timer.time_stage("Train Test Split")
# ==============================================================================================================
# ==============================================================================================================
# Sampling
# ==============================================================================================================
# ==============================================================================================================
logger.info("Stage - Sampling BEGIN")
if FULL_DATA_SET:
max_sample_limit = 100_000
else:
max_sample_limit = 10_000
sampling_pipeline = SamplingPipelineFactory(y_train, max_sample_limit=max_sample_limit).sampling_pipeline()
prior_y_train = y_train
X_train, y_train = sampling_pipeline.fit_resample(X_train, y_train)
# Plot sampling vs original
myPlt.plot_value_counts_compare(y1=prior_y_train, y2=y_train, title="Original Vs Sampled Training Sets (y)",
label1="Original Data", label2="Sampled Data")
logger.info(f"Stage - Sampling END {timer.time_stage('Sampling')}")
if TUNING:
# ==============================================================================================================
# ==============================================================================================================
# Hyper-parameter Tuning
# ==============================================================================================================
# ==============================================================================================================
# Random Forest.
# ===
rf_params_estimators = [
{
"n_estimators": [100, 250, 500],
}
]
model_tuner_estimator = ModelTuner(y_classes=preprocess.y_classes, tuning_params=rf_params_estimators,
model_type="RF")
model_tuner_estimator.run_model_optimisation(X_train=X_train, y_train=y_train)
print_optimiser_results(model_tuner_estimator)
# --------------------------------------------------------------------------------------------------------------
rf_params_criterion = [
{
"criterion": ["gini", "entropy"],
"n_estimators": [model_tuner_estimator.best_parameters["n_estimators"]]
}
]
model_tuner_criterion = ModelTuner(y_classes=preprocess.y_classes, tuning_params=rf_params_criterion,
model_type="RF")
model_tuner_criterion.run_model_optimisation(X_train=X_train, y_train=y_train)
print_optimiser_results(model_tuner_criterion)
# --------------------------------------------------------------------------------------------------------------
rf_params_max_features = [
{
"max_features": ["auto", "sqrt", "log2", None],
"criterion": [model_tuner_criterion.best_parameters["criterion"]],
"n_estimators": [model_tuner_criterion.best_parameters["n_estimators"]]
}
]
model_tuner_max_features = ModelTuner(y_classes=preprocess.y_classes, tuning_params=rf_params_max_features,
model_type="RF")
model_tuner_max_features.run_model_optimisation(X_train=X_train, y_train=y_train)
print_optimiser_results(model_tuner_max_features)
# --------------------------------------------------------------------------------------------------------------
rf_params_oob = [
{
"oob_score": [True, False],
"max_features": [model_tuner_max_features.best_parameters["max_features"]],
"criterion": [model_tuner_max_features.best_parameters["criterion"]],
"n_estimators": [model_tuner_max_features.best_parameters["n_estimators"]]
}
]
model_tuner_oob = ModelTuner(y_classes=preprocess.y_classes, tuning_params=rf_params_oob,
model_type="RF")
model_tuner_oob.run_model_optimisation(X_train=X_train, y_train=y_train)
print_optimiser_results(model_tuner_oob)
# --------------------------------------------------------------------------------------------------------------
# KNN
# ===
knn_params_neigbors = [
{
"n_neighbors": [3, 5, 7, 10],
}
]
model_tuner_neighors = ModelTuner(y_classes=preprocess.y_classes, tuning_params=knn_params_neigbors,
model_type="KNN")
model_tuner_neighors.run_model_optimisation(X_train=X_train, y_train=y_train)
print_optimiser_results(model_tuner_neighors)
# --------------------------------------------------------------------------------------------------------------
knn_params_p = [
{
"p": [1, 2],
"n_neighbors": [model_tuner_neighors.best_parameters["n_neighbors"]]
}
]
model_tuner_p = ModelTuner(y_classes=preprocess.y_classes, tuning_params=knn_params_p, model_type="KNN")
model_tuner_p.run_model_optimisation(X_train=X_train, y_train=y_train)
print_optimiser_results(model_tuner_p)
# --------------------------------------------------------------------------------------------------------------
knn_params_weights = [
{
"weights": ["uniform", "distance"],
"p": [model_tuner_p.best_parameters["p"]],
"n_neighbors": [model_tuner_p.best_parameters["n_neighbors"]]
}
]
model_tuner_weights = ModelTuner(y_classes=preprocess.y_classes, tuning_params=knn_params_weights,
model_type="KNN")
model_tuner_weights.run_model_optimisation(X_train=X_train, y_train=y_train)
print_optimiser_results(model_tuner_weights)
# --------------------------------------------------------------------------------------------------------------
knn_params_algorithm = [
{
"algorithm": ["ball_tree", "kd_tree", "brute"],
"leaf_size": [20, 30, 40, 50],
"weights": [model_tuner_weights.best_parameters["weights"]],
"p": [model_tuner_weights.best_parameters["p"]],
"n_neighbors": [model_tuner_weights.best_parameters["n_neighbors"]]
}
]
model_tuner_algorithm = ModelTuner(y_classes=preprocess.y_classes, tuning_params=knn_params_algorithm,
model_type="KNN")
model_tuner_algorithm.run_model_optimisation(X_train=X_train, y_train=y_train)
print_optimiser_results(model_tuner_algorithm)
# --------------------------------------------------------------------------------------------------------------
else:
# ==============================================================================================================
# ==============================================================================================================
# Testing Model Performance
# ==============================================================================================================
# ==============================================================================================================
model_evaluator = ModelEvaluator(y_classes=preprocess.y_classes)
model_evaluator.run_model_evaluation(X_train, y_train, X_test, y_test)
model_evaluator.show_confusion_matrices()
model_evaluator.plot_results()
model_evaluator.save_results()
timer.time_script()
stages, times = timer.plot_data
myPlt.plot_model_build_time(stages=stages, times=times)
logger.info("Complete")
|
{"/src/pipeline.py": ["/src/config.py"], "/src/data.py": ["/src/config.py", "/src/logger_config.py"], "/src/preprocess.py": ["/src/data.py", "/src/pipeline.py", "/src/utils.py"], "/main.py": ["/src/plotting.py", "/src/config.py", "/src/data.py", "/src/evaluate.py", "/src/logger_config.py", "/src/pipeline.py", "/src/preprocess.py", "/src/timer.py"], "/src/plotting.py": ["/src/utils.py"], "/src/evaluate.py": ["/src/config.py", "/src/logger_config.py", "/src/plotting.py", "/src/timer.py", "/src/utils.py"], "/src/logger_config.py": ["/src/timer.py"]}
|
13,748
|
DavidWalshe93/Network-Anomoly-Classification
|
refs/heads/master
|
/src/plotting.py
|
"""
Author: David Walshe
Date: 09/04/2020
"""
from collections import Counter
from itertools import zip_longest
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from src.utils import change_label_to_class
def get_colors():
import json
with open("colors.json") as fh:
colors = json.load(fh)
return list(colors.values())
def plot_2d_space(X, y, label='Classes'):
colors = get_colors()
for label, colors in zip_longest(np.unique(y), colors):
plt.scatter(
X[y == label, 0],
X[y == label, 1],
c=colors, label=label, marker="o"
)
plt.title(label)
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
def plot_value_counts(y, title="Count (target)"):
if type(y) is not pd.DataFrame:
y = pd.DataFrame(y, columns=["signature"])
target_count = y["signature"].value_counts()
target_count.plot.bar(title=title)
plt.xlabel("Classes")
plt.ylabel("Instance Count")
plt.show()
def plot_model_build_time(stages, times):
import math
fig, ax = plt.subplots()
ax.bar(stages, times)
plt.xticks(stages, stages)
max_time = math.ceil(max(times))
tick_scale = math.ceil(max_time / 20)
max_time += tick_scale
plt.yticks([i for i in range(0, max_time, tick_scale)],
[i if max_time < 60 else f"{int(i / 60)}:{i % 60}" for idx, i in
enumerate(range(0, max_time, tick_scale))])
plt.setp(ax.get_xticklabels(), rotation=30, ha="right")
total_time = sum(times)
if max_time > 60:
total_time = f"{round(total_time / 60)}m {round(total_time % 60)}s"
plt.ylabel("Minutes")
else:
plt.ylabel("Seconds")
plt.xlabel("Stages")
textstr = f"Total Time: {total_time}"
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
# place a text box in upper left in axes coords
ax.text(0.05, 0.95, textstr, transform=ax.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
plt.show()
def show_pca_plot(X, y, title="PCA Component Plot"):
plt.scatter(X[:, 0], X[:, 1],
c=y["signature"], edgecolor='none', alpha=0.5,
cmap=plt.cm.get_cmap('tab20', 23))
plt.xlabel('Component 1')
plt.ylabel('Component 2')
plt.title(title)
plt.colorbar()
plt.show()
def plot_value_counts_compare(y1, y2, level="All", title="Raw Vs Reduced Datasets (All)",
label1="Raw Data", label2="Reduced Data"):
y1_count = Counter(y1["signature"])
y2_count = Counter(y2["signature"])
ks = []
mx = 10000
mn = 900
for key, value in y1_count.items():
if level == "Max":
title = f"Raw Vs Reduced Datasets (High Occurrence)"
if value <= mx:
ks.append(key)
elif level == "Mid":
title = f"Raw Vs Reduced Datasets (Mid Occurrence)"
if value > mx or value <= mn:
ks.append(key)
elif level == "Min":
title = f"Raw Vs Reduced Datasets (Low Occurrence)"
if value > mn:
ks.append(key)
else:
break
if len(ks) > 0:
for k in ks:
y1_count.pop(k)
keys = list(y1_count.keys())
y1_group = [y1_count[key] for key in keys]
y2_group = [y2_count[key] for key in keys]
if type(keys[0]) is int:
keys = [change_label_to_class(key) for key in keys]
x = np.arange(len(keys)) # the label locations
width = 0.35 # the width of the bars
fig, ax = plt.subplots()
ax.bar(x - width / 2, y1_group, width, label=label1)
ax.bar(x + width / 2, y2_group, width, label=label2)
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('Instance Occurrences')
ax.set_xlabel('Classes')
ax.set_title(title)
ax.set_xticks(x)
ax.set_xticklabels(keys)
ax.legend()
fig.tight_layout()
plt.xticks(rotation=90)
plt.show()
def compare_average_results_plots(result_metrics: dict):
rf_results = result_metrics["RF"]
knn_results = result_metrics["KNN"]
percentage_result_keys = ['Avg Accuracy(a)', 'Avg Precision(p)', 'Avg Sensitivity(r)', 'Avg Specificity']
x = np.arange(len(percentage_result_keys)) # the label locations
rf_per_data = [rf_results[label] for label in percentage_result_keys]
knn_per_data = [knn_results[label] for label in percentage_result_keys]
results_plotter(rf_per_data, knn_per_data, keys=percentage_result_keys, x=x)
float_result_keys = ['Avg F-Score', 'Avg TPR', 'Avg TNR', 'Avg FPR', 'Avg FNR']
x = np.arange(len(float_result_keys)) # the label locations
rf_float_data = [rf_results[label] for label in float_result_keys]
knn_float_data = [knn_results[label] for label in float_result_keys]
results_plotter(rf_float_data, knn_float_data, keys=float_result_keys, x=x, scale="0-1")
def compare_class_results(rf_result_metric: dict, knn_result_metric, title="No Title"):
classes = list(rf_result_metric.keys())
x = np.arange(len(classes)) # the label locations
rf_per_data = [rf_result_metric[label] for label in classes]
knn_per_data = [knn_result_metric[label] for label in classes]
results_plotter(rf_per_data, knn_per_data, keys=classes, x=x, title=title, rotate=True)
def results_plotter(rf_data, knn_data, keys, x, scale="%",
title="Average Metric Comparison Between KNN and Random Forests", rotate=False):
width = 0.35 # the width of the bars
fig, ax = plt.subplots()
ax.bar(x - width / 2, rf_data, width, label="KNN")
ax.bar(x + width / 2, knn_data, width, label="RF")
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel(f'Output Value ({scale})')
ax.set_xlabel('Metrics')
ax.set_title(title)
ax.set_xticks(x)
ax.set_xticklabels(keys)
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
fig.tight_layout()
if rotate:
plt.xticks(rotation=90)
plt.show()
|
{"/src/pipeline.py": ["/src/config.py"], "/src/data.py": ["/src/config.py", "/src/logger_config.py"], "/src/preprocess.py": ["/src/data.py", "/src/pipeline.py", "/src/utils.py"], "/main.py": ["/src/plotting.py", "/src/config.py", "/src/data.py", "/src/evaluate.py", "/src/logger_config.py", "/src/pipeline.py", "/src/preprocess.py", "/src/timer.py"], "/src/plotting.py": ["/src/utils.py"], "/src/evaluate.py": ["/src/config.py", "/src/logger_config.py", "/src/plotting.py", "/src/timer.py", "/src/utils.py"], "/src/logger_config.py": ["/src/timer.py"]}
|
13,749
|
DavidWalshe93/Network-Anomoly-Classification
|
refs/heads/master
|
/src/config.py
|
"""
Author: David Walshe
Date: 11/04/2020
Desc: Helper file to control all global variables in the model Pipeline.
"""
# Sets up configuration for a pipeline run.
# =========================================
RANDOM_STATE = 0
FULL_DATA_SET = False
TUNING = False
|
{"/src/pipeline.py": ["/src/config.py"], "/src/data.py": ["/src/config.py", "/src/logger_config.py"], "/src/preprocess.py": ["/src/data.py", "/src/pipeline.py", "/src/utils.py"], "/main.py": ["/src/plotting.py", "/src/config.py", "/src/data.py", "/src/evaluate.py", "/src/logger_config.py", "/src/pipeline.py", "/src/preprocess.py", "/src/timer.py"], "/src/plotting.py": ["/src/utils.py"], "/src/evaluate.py": ["/src/config.py", "/src/logger_config.py", "/src/plotting.py", "/src/timer.py", "/src/utils.py"], "/src/logger_config.py": ["/src/timer.py"]}
|
13,750
|
DavidWalshe93/Network-Anomoly-Classification
|
refs/heads/master
|
/src/evaluate.py
|
"""
Author: David Walshe
Date: 12/04/2020
"""
import logging
from itertools import product
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_auc_score, roc_curve, auc
from sklearn.model_selection import RepeatedStratifiedKFold, cross_val_score, GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import LabelBinarizer
from src.config import RANDOM_STATE
from src.logger_config import setup_logger
from src.plotting import compare_average_results_plots, compare_class_results
from src.timer import Timer
from src.utils import ravel_y
logger = setup_logger(logging.getLogger(__name__))
timer = Timer()
class ConfusionMatrix:
"""
Helper class for dealing with Confusion matrix data.
"""
def __init__(self, name, y_test, y_pred, class_names):
"""
Class constructor.
:param name: The name of the model containing this confusion matrix.
:param y_test: The y_test dataset.
:param y_pred: The model predictions on y
:param class_names: The class names of the y labels.
"""
self._name = name
self._class_names = class_names
self.matrix = confusion_matrix(y_test, y_pred, labels=[x for x in range(23)])
self.scores = []
self.calculate_results(class_names)
def calculate_results(self, names):
"""
Calculate the metric results of the confusion matrix.
:param names: The names of the features.
"""
for row, col in zip(range(0, self.matrix.shape[0]), range(0, self.matrix.shape[1])):
TP = self.matrix[row, col]
TN = np.trace(self.matrix) - TP
FP = np.sum(self.matrix[:, col]) - TP
FN = np.sum(self.matrix[row, :]) - TP
self.scores.append({
"Class": names[row],
"TP": TP,
"TN": TN,
"FP": FP,
"FN": FN,
"Accuracy": self.calculate_accuracy(TP=TP, TN=TN, FP=FP, FN=FN),
"Precision": self.calculate_precision(TP=TP, FP=FP),
"Sensitivity": self.calculate_recall(TP=TP, FN=FN),
"Specificity": self.calculate_specificity(TN=TN, FP=FP),
"F-Score": self.calculate_f_score(TP=TP, FP=FP, FN=FN),
"TPR": self.calculate_recall(TP=TP, FN=FN) / 100,
"TNR": self.calculate_specificity(TN=TN, FP=FP) / 100,
"FPR": self.calculate_fpr(TN=TN, FP=FP),
"FNR": self.calculate_fnr(TP=TP, FN=FN)
})
@staticmethod
def calculate_accuracy(TP, TN, FP, FN):
"""
Helper method to generate the accuracy metric for a given class.
:param TP: True Positives (Good)
:param TN: True Negatives (Good)
:param FP: False Positives (Bad)
:param FN: False Positives (Bad)
:return: The accuracy metric for the given class described by the passed params.
"""
return round(((TP + TN) / (TP + FP + TN + FN)) * 100, 2)
# TPR / Sensitivity
@staticmethod
def calculate_recall(TP, FN):
"""
Helper method to generate the recall(a.k.a sensitivity) metric for a given class.
:param TP: True Positives (Good)
:param FN: False Positives (Bad)
:return: The recall(a.k.a sensitivity) metric for the given class described by the passed params.
"""
return round((TP / (TP + FN)) * 100, 2)
@staticmethod
def calculate_specificity(TN, FP):
"""
Helper method to generate the specificity metric for a given class.
:param TN: True Negatives (Good)
:param FP: False Positives (Bad)
:return: The specificity metric for the given class described by the passed params.
"""
return round((TN / (FP + TN)) * 100, 2)
@staticmethod
def calculate_precision(TP, FP):
"""
Helper method to generate the precision metric for a given class.
:param TP: True Positives (Good)
:param FP: False Positives (Bad)
:return: The precision metric for the given class described by the passed params.
"""
return round((TP / (TP + FP)) * 100, 2)
def calculate_f_score(self, TP, FP, FN):
"""
Helper method to generate the F-Score metric for a given class.
:param TP: True Positives (Good)
:param FP: False Positives (Bad)
:param FN: False Positives (Bad)
:return: The F-Score metric for the given class described by the passed params.
"""
r = self.calculate_recall(TP, FN)
p = self.calculate_precision(TP, FP)
return round(((2 * r * p) / (r + p)) / 100, 2)
def calculate_fpr(self, TN, FP):
"""
Helper method to generate the FPR (False Positive Rate) metric for a given class.
:param TN: True Negatives (Good)
:param FP: False Positives (Bad)
:return: The FPR metric for the given class described by the passed params.
"""
return round((100 - self.calculate_specificity(TN=TN, FP=FP)) / 100, 2)
def calculate_fnr(self, TP, FN):
"""
Helper method to generate the FNR (False Negative Rate) metric for a given class.
:param TP: True Positives (Good)
:param FN: False Positives (Bad)
:return: The FNR metric for the given class described by the passed params.
"""
return round((100 - self.calculate_recall(TP=TP, FN=FN)) / 100, 2)
@property
def class_names(self) -> list:
return self._class_names
@property
def name(self) -> str:
return self._name
@property
def data(self) -> pd.DataFrame:
"""
Retrieves the confusion matrix metric scores as a dataframe with NaN filled with 0's.
:return: The confusion matrix data as a Dataframe with NaN's filled with 0's.
"""
return pd.DataFrame(self.scores).fillna(0)
@property
def matrix_normalised(self) -> np.ndarray:
"""
Creates a normalised version of the confusion matrix from 0 -> 1.
:return: A numpy array containing the normalised values of the confusion matrix.
"""
division_matrix = np.sum(self.matrix, axis=1)
division_matrix_inverse = division_matrix.reshape(-1, 1)
confusion_matrix_normalised = (self.matrix / division_matrix_inverse[None, :])
confusion_matrix_normalised = np.nan_to_num(confusion_matrix_normalised, posinf=0.0, neginf=0.0)
return confusion_matrix_normalised
def show(self):
"""
Helper method to show the confusion matrix as a heatmap plot.
"""
cm_norm = self.matrix_normalised
plt.subplots(figsize=(20, 10))
ax: plt.Axes = sns.heatmap(cm_norm[0], cmap="YlOrRd", linewidths=0.5, annot=True,
xticklabels=self.class_names,
yticklabels=self.class_names,
fmt=".2f", cbar=False)
ax.set_title(f"Confusion matrix ({self.name})")
ax.set_xlabel("Predictions")
ax.set_ylabel("Real Values")
plt.show()
@property
def f_scores(self) -> dict:
df = self.data[["Class", "F-Score"]]
return dict(zip(df['Class'], df['F-Score']))
@property
def sensitivities(self) -> dict:
df = self.data[["Class", "Sensitivity"]]
return dict(zip(df['Class'], df['Sensitivity']))
@property
def precisions(self) -> dict:
df = self.data[["Class", "Precision"]]
return dict(zip(df['Class'], df['Precision']))
class Model:
def __init__(self, name, model, class_names):
"""
Class constructor.
:param name: The textual name of this model (RF/KNN).
:param model: The model to use.
:param class_names: The y labels corresponding class name.
"""
self.name = name
self._model = model
self._class_names = class_names
self._y_pred = None
self._confusion_matrix = None
def fit(self, X, y):
"""
Fit the (X, y) datasets to the model.
:param X: The X dataset of features.
:param y: The y dataset of output labels.
"""
logger.info(f"Step - Fitting {self.name} model BEGIN")
self._model.fit(X, ravel_y(y))
logger.info(f"Step - Fitting {self.name} model END {timer.time_stage(f'Fitting Model {self.name}')}")
def predict(self, X_test, force=False):
"""
Predicts y values for the set of features found in X_test.
:param X_test: The dataset of input features.
:param force: Flag to force re-computation of y_pred if previously calculated.
:return: The y predictions for X_test input features.
"""
logger.info(f"Step - Prediction BEGIN")
if self._y_pred is None or force is True:
self._y_pred = self._model.predict(X_test)
else:
logger.info(f"Step - Prediction already evaluated")
logger.info(f"Step - Prediction END {timer.time_stage(f'y Prediction {self.name}')}")
return self._y_pred
def confusion_matrix(self, y_test):
"""
Creates a confusion matrix object.
:param y_test: The y output labels to use to construct the confusion matrix.
:return: The newly constructed confusion matrix.
"""
if self._confusion_matrix is None:
if self._y_pred is None:
logger.info(f"y_pred has not been evaluated yet. Call 'predict(X_test)'")
else:
self._confusion_matrix = ConfusionMatrix(name=self.name,
y_test=y_test,
y_pred=self._y_pred,
class_names=self.class_names)
return self._confusion_matrix
def multi_class_roc_auc_score(self, y_test, average="macro"):
"""
Returns the roc auc score for the multiclass problem.
:param y_test: The y output labels to use.
:param average: The averaging metric to use.
:return: The ROC Area Under the Curve score.
"""
lb = LabelBinarizer()
lb.fit(y_test)
y_test = lb.transform(y_test)
y_pred = lb.transform(self._y_pred)
score = roc_auc_score(y_test, y_pred, average=average)
logger.info(f"Step - {self.name} ROC AUC Score: {score}")
return score
def plot_multi_class_roc_curve(self, y_test: pd.DataFrame):
"""
Creates a plot of all output features ROC curves. Experimental and not verified the correct data is being
output.
:param y_test: The test set of output labels.
:return: A tuple of FPR, TPR and ROC_AUC.
"""
lb = LabelBinarizer()
lb.fit(y_test)
y_test = lb.transform(y_test)
class_names = [self.get_class_for_label(label) for label in lb.classes_]
y_pred = lb.transform(self._y_pred)
fpr = {}
tpr = {}
roc_auc = {}
for i, _ in enumerate(class_names):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_pred[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
self._plot_roc_curve(fpr, tpr, roc_auc, class_names)
return fpr, tpr, roc_auc
@staticmethod
def _plot_roc_curve(fpr, tpr, roc_auc, class_names):
"""
Creates a ROC Plot for a given output feature.
:param fpr: False positive rate.
:param tpr: True postive rate.
:param roc_auc: ROC Area under the curve scoring.
:param class_names: The output label's class names.
"""
fig, axs = plt.subplots(5, 5)
for (p1, p2), i in zip(product([0, 1, 2, 3, 4], [0, 1, 2, 3, 4]), range(len(class_names))):
print(f"{p1}, {p2}, {i}, {class_names[i]}")
lw = 2
axs[p1, p2].plot(fpr[i], tpr[i], color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[i])
axs[p1, p2].plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
axs[p1, p2].set_title(class_names[i])
plt.title("ROC Curve Analysis")
plt.xlabel("TPR")
plt.ylabel("FPR")
plt.show()
@property
def model(self):
return self._model
def get_class_for_label(self, label) -> str:
"""
Gets a class name from it's numeric label.
:param label: The numeric label to match.
:return: The class name matching the label passed.
"""
return self._class_names.get(label)
@property
def class_names(self) -> list:
"""
List of all class names.
:return: A list of all class names.
"""
return [self._class_names.get(idx) for idx, _ in enumerate(self._class_names)]
@property
def get_classes_size(self) -> int:
"""
The amount of classes.
:return: The amount of classes.
"""
return len(self.class_names)
@property
def matrix(self) -> ConfusionMatrix:
"""
Getter accessor for confusion matrix.
:return: The confusion matrix fo this model.
"""
return self._confusion_matrix
class ModelEvaluator:
"""
Class to manage evaluation of the models passed.
"""
def __init__(self, y_classes):
"""
Class constructor.
:param y_classes: The class names for y output labels.
"""
self.y_classes = y_classes
self.y_test = None
self.rf_model = Model("RF", RandomForestClassifier(n_estimators=100,
criterion="gini",
max_features="auto",
random_state=RANDOM_STATE,
n_jobs=-1), y_classes)
self.knn_model = Model("KNN", KNeighborsClassifier(n_neighbors=10,
p=1,
weights="distance",
algorithm="kd_tree",
n_jobs=-1), y_classes)
@staticmethod
def _eval_model(model: Model, X, y) -> None:
"""
Evaluates the passed model with (X, y) datasets.
:param model: The model to validate (RF/KNN).
:param X: The input feature dataset.
:param y: The output labels to predict.
"""
logger.info(f"Step - Evaluating {model.name} model BEGIN")
cv = RepeatedStratifiedKFold(n_splits=5, n_repeats=3, random_state=RANDOM_STATE)
cross_val_score(model.model, X, ravel_y(y), scoring='f1_macro', cv=cv, verbose=10, n_jobs=-1)
logger.info(f"Step - Evaluating {model.name} model {timer.time_stage(f'{model.name} Evaluation')} END")
def run_model_evaluation(self, X_train, y_train, X_test, y_test):
"""
Fits and evaluates all internal models.
:param X_train: The training set of input features.
:param y_train: The training set of output labels.
:param X_test: The test set of input features.
:param y_test: The test set of output labels.
"""
self.y_test = y_test
logger.info("Stage - Model Analysis BEGIN")
for classifier in (self.knn_model, self.rf_model):
classifier.fit(X_train, y_train)
self._eval_model(classifier, X=X_test, y=y_test)
classifier.predict(X_test=X_test)
logger.info("Stage - Model Analysis END")
@property
def rf(self):
return self.rf_model
@property
def knn(self):
return self.knn_model
def show_confusion_matrices(self):
"""
Helper method to show both models confusion matrices.
"""
self.rf.confusion_matrix(y_test=self.y_test).show()
self.knn.confusion_matrix(y_test=self.y_test).show()
def show_roc_curves(self):
"""
Helper method to show both ROC curve collections for both models. Experimental.
"""
self.rf.plot_multi_class_roc_curve(y_test=self.y_test)
self.knn.plot_multi_class_roc_curve(y_test=self.y_test)
def roc_auc_scores(self):
"""
Helper method to show ROC AUC metric. Experimental.
"""
self.rf.multi_class_roc_auc_score(y_test=self.y_test)
self.knn.multi_class_roc_auc_score(y_test=self.y_test)
def save_results(self):
"""
Saves the metric data for both models to a csv file for import to Excel.
"""
self.rf.matrix.data.to_csv("rf_results_averages.csv", index=False)
pd.DataFrame(self.rf.matrix.matrix).to_csv("rf_confusion_matrix.csv", index=False)
pd.DataFrame(self.rf.matrix.matrix_normalised[0, :, :]).to_csv("rf_confusion_matrix_normalised.csv",
index=False)
self.knn.matrix.data.to_csv("knn_results_averages.csv", index=False)
pd.DataFrame(self.rf.matrix.matrix).to_csv("knn_confusion_matrix.csv", index=False)
pd.DataFrame(self.rf.matrix.matrix_normalised[0, :, :]).to_csv("knn_confusion_matrix_normalised.csv",
index=False)
def show_result_overview(self):
"""
Helper method to show pretty print format of metric results to the console for both models.
:return: The metric results of the model.
"""
from pprint import pprint
knn_result = {key: round(value, 2) for key, value in self._get_result_overview(self.knn).items()}
rf_result = {key: round(value, 2) for key, value in self._get_result_overview(self.rf).items()}
results = {
"KNN": knn_result,
"RF": rf_result
}
pprint(results)
return results
@staticmethod
def _get_result_overview(model: Model):
"""
Returns a dict with the passed models result metric data.
:param model: The model to extract data from (RF/KNN)
:return: A dict with all the metric data contained.
"""
return {
"Avg F-Score": model.matrix.data["F-Score"].mean(),
"Avg Accuracy(a)": model.matrix.data["Accuracy"].mean(),
"Avg Precision(p)": model.matrix.data["Precision"].mean(),
"Avg Sensitivity(r)": model.matrix.data["Sensitivity"].mean(),
"Avg Specificity": model.matrix.data["Specificity"].mean(),
"Avg TPR": model.matrix.data["TPR"].mean(),
"Avg TNR": model.matrix.data["TNR"].mean(),
"Avg FPR": model.matrix.data["FPR"].mean(),
"Avg FNR": model.matrix.data["FNR"].mean(),
}
def plot_results(self):
compare_class_results(self.rf.matrix.sensitivities, self.knn.matrix.sensitivities,
"Random Forest Vs KNN Sensitivity Comparision")
compare_class_results(self.rf.matrix.precisions, self.knn.matrix.precisions,
"Random Forest Vs KNN Precision Comparision")
compare_class_results(self.rf.matrix.f_scores, self.knn.matrix.f_scores,
"Random Forest Vs KNN F-Score Comparision")
compare_average_results_plots(self.show_result_overview())
class ModelTuner(ModelEvaluator):
"""
Helper class for performing GridSearch hyper-parameter analysis.
"""
def __init__(self, y_classes, tuning_params: list, model_type: str):
"""
Class constructor.
:param y_classes: The class names of the y numeric labels.
:param tuning_params: The tuning parameters to use.
:param model_type: The model type (RF/KNN)
"""
super().__init__(y_classes=y_classes)
self.tuning_params = tuning_params
self.model_type = model_type
self._grid_search = None
self._cv_results = None
self._best_score = None
self._best_parameters = None
@property
def grid_search(self):
return self._grid_search
@property
def best_score(self):
return self._best_score
@property
def best_parameters(self):
return self._best_parameters
@property
def cv_results(self):
return self._cv_results
@property
def classifier(self):
return {
"RF": self.rf_model.model,
"KNN": self.knn_model.model,
}.get(self.model_type.upper(), None)
def run_model_optimisation(self, X_train, y_train):
"""
Fits and runs grid search using X_train and y_train.
:param X_train: The training input set of features.
:param y_train: The training output set of labels.
"""
logger.info("Stage - Model Optimisation BEGIN")
y_train = ravel_y(y_train)
self.classifier.fit(X_train, y_train)
self._run_grid_search(self.classifier, X=X_train, y=y_train)
logger.info("Stage - Model Optimisation END")
def _run_grid_search(self, model, X, y):
"""
Private helper method to run GridSearch and record results.
:param model: The model to run through GridSearch.
:param X: The X training set of features.
:param y: The y training set of output labels.
"""
grid_search = GridSearchCV(estimator=model,
param_grid=self.tuning_params,
scoring="f1_macro",
cv=4,
verbose=10,
n_jobs=-1)
self._grid_search = grid_search.fit(X, y)
self._cv_results = grid_search.cv_results_
self._best_score = grid_search.best_score_
self._best_parameters = grid_search.best_params_
|
{"/src/pipeline.py": ["/src/config.py"], "/src/data.py": ["/src/config.py", "/src/logger_config.py"], "/src/preprocess.py": ["/src/data.py", "/src/pipeline.py", "/src/utils.py"], "/main.py": ["/src/plotting.py", "/src/config.py", "/src/data.py", "/src/evaluate.py", "/src/logger_config.py", "/src/pipeline.py", "/src/preprocess.py", "/src/timer.py"], "/src/plotting.py": ["/src/utils.py"], "/src/evaluate.py": ["/src/config.py", "/src/logger_config.py", "/src/plotting.py", "/src/timer.py", "/src/utils.py"], "/src/logger_config.py": ["/src/timer.py"]}
|
13,751
|
DavidWalshe93/Network-Anomoly-Classification
|
refs/heads/master
|
/src/timer.py
|
"""
Author: David Walshe
Date: 05/04/2020
"""
from collections import OrderedDict
from datetime import datetime as dt
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class Timer(metaclass=Singleton):
def __init__(self):
print("Timer Started")
self.initial_start_time = dt.now()
self.section_start_time = dt.now()
self.delta = None
self.time_log = OrderedDict()
def time_stage(self, stage):
end_time = dt.now()
self.delta = end_time - self.section_start_time
tdelta = self._print_time(stage=stage)
self.time_log.update({stage: self.delta.total_seconds()})
self._reset()
return tdelta
def time_script(self):
end_time = dt.now()
self.delta = end_time - self.initial_start_time
self._print_time(stage="END")
self._reset()
def _reset(self):
self.section_start_time = dt.now()
def _print_time(self, stage):
minutes = round(self.delta.total_seconds() / 60)
seconds = round((self.delta.total_seconds() % 60))
microseconds = round(((self.delta.total_seconds() % 60) % 1) * 1000)
tdelta = f"Time Taken - {minutes:02d}:{seconds:02d}.{microseconds:03d}"
return tdelta
@property
def plot_data(self):
return list(self.time_log.keys()), list(self.time_log.values())
|
{"/src/pipeline.py": ["/src/config.py"], "/src/data.py": ["/src/config.py", "/src/logger_config.py"], "/src/preprocess.py": ["/src/data.py", "/src/pipeline.py", "/src/utils.py"], "/main.py": ["/src/plotting.py", "/src/config.py", "/src/data.py", "/src/evaluate.py", "/src/logger_config.py", "/src/pipeline.py", "/src/preprocess.py", "/src/timer.py"], "/src/plotting.py": ["/src/utils.py"], "/src/evaluate.py": ["/src/config.py", "/src/logger_config.py", "/src/plotting.py", "/src/timer.py", "/src/utils.py"], "/src/logger_config.py": ["/src/timer.py"]}
|
13,752
|
DavidWalshe93/Network-Anomoly-Classification
|
refs/heads/master
|
/src/logger_config.py
|
"""
Author: David Walshe
Date: 12/04/2020
"""
import logging
from src.timer import Singleton
class SetupLogger(metaclass=Singleton):
def __init__(self):
self._c_handler = logging.StreamHandler()
self._f_handler = logging.FileHandler("run.log", mode="w")
self._c_handler.setLevel(logging.INFO)
self._f_handler.setLevel(logging.INFO)
self.formatter = logging.Formatter("%(asctime)s - [%(levelname)s] - %(message)s")
self._c_handler.setFormatter(self.formatter)
self._f_handler.setFormatter(self.formatter)
@property
def c_handler(self):
return self._c_handler
@property
def f_handler(self):
return self._f_handler
def setup_logger(logger):
"""
Helper function to setup a logger with a file and console handler.
:param logger: A logger from the source file looking to create the logger.
:return: A logger with the correct file and console handler bound.
"""
config = SetupLogger()
logger.addHandler(config.c_handler)
logger.addHandler(config.f_handler)
logger.setLevel(logging.INFO)
return logger
|
{"/src/pipeline.py": ["/src/config.py"], "/src/data.py": ["/src/config.py", "/src/logger_config.py"], "/src/preprocess.py": ["/src/data.py", "/src/pipeline.py", "/src/utils.py"], "/main.py": ["/src/plotting.py", "/src/config.py", "/src/data.py", "/src/evaluate.py", "/src/logger_config.py", "/src/pipeline.py", "/src/preprocess.py", "/src/timer.py"], "/src/plotting.py": ["/src/utils.py"], "/src/evaluate.py": ["/src/config.py", "/src/logger_config.py", "/src/plotting.py", "/src/timer.py", "/src/utils.py"], "/src/logger_config.py": ["/src/timer.py"]}
|
13,763
|
huleos/django_course
|
refs/heads/master
|
/portfolio/migrations/0004_auto_20160617_1943.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-06-17 19:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('portfolio', '0003_auto_20160603_0140'),
]
operations = [
migrations.AlterField(
model_name='portfolioitem',
name='categories',
field=models.ManyToManyField(blank=True, to='portfolio.Category'),
),
]
|
{"/portfolio/forms.py": ["/portfolio/models.py"], "/portfolio/serializers.py": ["/portfolio/models.py"], "/core/urls.py": ["/core/views.py"], "/blog/views.py": ["/blog/models.py"], "/blog/admin.py": ["/blog/models.py"], "/portfolio/views.py": ["/portfolio/models.py", "/blog/models.py", "/portfolio/forms.py"], "/blog/urls.py": ["/blog/views.py"], "/portfolio/urls.py": ["/portfolio/views_api.py", "/portfolio/views.py"], "/portfolio/admin.py": ["/portfolio/models.py"], "/portfolio/views_api.py": ["/portfolio/serializers.py", "/portfolio/models.py"], "/core/templatetags/core_tags.py": ["/core/forms.py"], "/core/views.py": ["/core/forms.py"]}
|
13,764
|
huleos/django_course
|
refs/heads/master
|
/portfolio/forms.py
|
from django import forms
from portfolio.models import PortfolioItem
class PortfolioForm(forms.ModelForm):
class Meta:
model = PortfolioItem
fields = ['categories', 'title', 'edit_slug', 'slug', 'image', 'body', 'status']
title = forms.CharField(
required=True,
error_messages={'required': 'Please enter your name'},
help_text='A valid name, please.',
widget=forms.TextInput({ 'placeholder': 'Your name'})
)
slug = forms.SlugField(required=False)
edit_slug = forms.BooleanField(required=False, initial=False)
def clean_slug(self):
if self.cleaned_data.get('edit_slug'):
if not self.cleaned_data.get('slug'):
raise forms.ValidationError('Slug is required!')
|
{"/portfolio/forms.py": ["/portfolio/models.py"], "/portfolio/serializers.py": ["/portfolio/models.py"], "/core/urls.py": ["/core/views.py"], "/blog/views.py": ["/blog/models.py"], "/blog/admin.py": ["/blog/models.py"], "/portfolio/views.py": ["/portfolio/models.py", "/blog/models.py", "/portfolio/forms.py"], "/blog/urls.py": ["/blog/views.py"], "/portfolio/urls.py": ["/portfolio/views_api.py", "/portfolio/views.py"], "/portfolio/admin.py": ["/portfolio/models.py"], "/portfolio/views_api.py": ["/portfolio/serializers.py", "/portfolio/models.py"], "/core/templatetags/core_tags.py": ["/core/forms.py"], "/core/views.py": ["/core/forms.py"]}
|
13,765
|
huleos/django_course
|
refs/heads/master
|
/portfolio/migrations/0003_auto_20160603_0140.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-06-03 01:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('portfolio', '0002_auto_20160603_0119'),
]
operations = [
migrations.AlterField(
model_name='portfolioitem',
name='status',
field=models.CharField(choices=[('draft', 'Draft'), ('published', 'Published'), ('deleted', 'Deleted')], default='draft', max_length=50),
),
]
|
{"/portfolio/forms.py": ["/portfolio/models.py"], "/portfolio/serializers.py": ["/portfolio/models.py"], "/core/urls.py": ["/core/views.py"], "/blog/views.py": ["/blog/models.py"], "/blog/admin.py": ["/blog/models.py"], "/portfolio/views.py": ["/portfolio/models.py", "/blog/models.py", "/portfolio/forms.py"], "/blog/urls.py": ["/blog/views.py"], "/portfolio/urls.py": ["/portfolio/views_api.py", "/portfolio/views.py"], "/portfolio/admin.py": ["/portfolio/models.py"], "/portfolio/views_api.py": ["/portfolio/serializers.py", "/portfolio/models.py"], "/core/templatetags/core_tags.py": ["/core/forms.py"], "/core/views.py": ["/core/forms.py"]}
|
13,766
|
huleos/django_course
|
refs/heads/master
|
/portfolio/serializers.py
|
from rest_framework import serializers
from portfolio.models import PortfolioItem
class PortfolioSerializer(serializers.ModelSerializer):
url = serializers.URLField(source='get_absolute_url')
class Meta:
model = PortfolioItem
fields = ('id', 'author', 'title', 'slug', 'body', 'status', 'url')
read_only_fields = ('slug',)
|
{"/portfolio/forms.py": ["/portfolio/models.py"], "/portfolio/serializers.py": ["/portfolio/models.py"], "/core/urls.py": ["/core/views.py"], "/blog/views.py": ["/blog/models.py"], "/blog/admin.py": ["/blog/models.py"], "/portfolio/views.py": ["/portfolio/models.py", "/blog/models.py", "/portfolio/forms.py"], "/blog/urls.py": ["/blog/views.py"], "/portfolio/urls.py": ["/portfolio/views_api.py", "/portfolio/views.py"], "/portfolio/admin.py": ["/portfolio/models.py"], "/portfolio/views_api.py": ["/portfolio/serializers.py", "/portfolio/models.py"], "/core/templatetags/core_tags.py": ["/core/forms.py"], "/core/views.py": ["/core/forms.py"]}
|
13,767
|
huleos/django_course
|
refs/heads/master
|
/core/urls.py
|
from django.conf.urls import url
from core.views import NewsletterView
urlpatterns = [
url(r'^newsletter/$', NewsletterView.as_view(), name='newsletter'),
]
|
{"/portfolio/forms.py": ["/portfolio/models.py"], "/portfolio/serializers.py": ["/portfolio/models.py"], "/core/urls.py": ["/core/views.py"], "/blog/views.py": ["/blog/models.py"], "/blog/admin.py": ["/blog/models.py"], "/portfolio/views.py": ["/portfolio/models.py", "/blog/models.py", "/portfolio/forms.py"], "/blog/urls.py": ["/blog/views.py"], "/portfolio/urls.py": ["/portfolio/views_api.py", "/portfolio/views.py"], "/portfolio/admin.py": ["/portfolio/models.py"], "/portfolio/views_api.py": ["/portfolio/serializers.py", "/portfolio/models.py"], "/core/templatetags/core_tags.py": ["/core/forms.py"], "/core/views.py": ["/core/forms.py"]}
|
13,768
|
huleos/django_course
|
refs/heads/master
|
/blog/views.py
|
from django.views.generic import TemplateView, DetailView
from blog.models import Post
class BlogView(TemplateView):
template_name = 'blog/list.html'
class DetailPost(DetailView):
template_name = 'blog/post.html'
model = Post
context_object_name = 'post'
|
{"/portfolio/forms.py": ["/portfolio/models.py"], "/portfolio/serializers.py": ["/portfolio/models.py"], "/core/urls.py": ["/core/views.py"], "/blog/views.py": ["/blog/models.py"], "/blog/admin.py": ["/blog/models.py"], "/portfolio/views.py": ["/portfolio/models.py", "/blog/models.py", "/portfolio/forms.py"], "/blog/urls.py": ["/blog/views.py"], "/portfolio/urls.py": ["/portfolio/views_api.py", "/portfolio/views.py"], "/portfolio/admin.py": ["/portfolio/models.py"], "/portfolio/views_api.py": ["/portfolio/serializers.py", "/portfolio/models.py"], "/core/templatetags/core_tags.py": ["/core/forms.py"], "/core/views.py": ["/core/forms.py"]}
|
13,769
|
huleos/django_course
|
refs/heads/master
|
/blog/admin.py
|
from django.contrib import admin
from blog.models import Post, Category
class PostAdmin(admin.ModelAdmin):
filter_horizontal = ['categories']
admin.site.register(Post, PostAdmin)
admin.site.register(Category)
|
{"/portfolio/forms.py": ["/portfolio/models.py"], "/portfolio/serializers.py": ["/portfolio/models.py"], "/core/urls.py": ["/core/views.py"], "/blog/views.py": ["/blog/models.py"], "/blog/admin.py": ["/blog/models.py"], "/portfolio/views.py": ["/portfolio/models.py", "/blog/models.py", "/portfolio/forms.py"], "/blog/urls.py": ["/blog/views.py"], "/portfolio/urls.py": ["/portfolio/views_api.py", "/portfolio/views.py"], "/portfolio/admin.py": ["/portfolio/models.py"], "/portfolio/views_api.py": ["/portfolio/serializers.py", "/portfolio/models.py"], "/core/templatetags/core_tags.py": ["/core/forms.py"], "/core/views.py": ["/core/forms.py"]}
|
13,770
|
huleos/django_course
|
refs/heads/master
|
/core/forms.py
|
from django import forms
class NewsletterForm(forms.Form):
name = forms.CharField(required=True)
email = forms.EmailField(required=True)
|
{"/portfolio/forms.py": ["/portfolio/models.py"], "/portfolio/serializers.py": ["/portfolio/models.py"], "/core/urls.py": ["/core/views.py"], "/blog/views.py": ["/blog/models.py"], "/blog/admin.py": ["/blog/models.py"], "/portfolio/views.py": ["/portfolio/models.py", "/blog/models.py", "/portfolio/forms.py"], "/blog/urls.py": ["/blog/views.py"], "/portfolio/urls.py": ["/portfolio/views_api.py", "/portfolio/views.py"], "/portfolio/admin.py": ["/portfolio/models.py"], "/portfolio/views_api.py": ["/portfolio/serializers.py", "/portfolio/models.py"], "/core/templatetags/core_tags.py": ["/core/forms.py"], "/core/views.py": ["/core/forms.py"]}
|
13,771
|
huleos/django_course
|
refs/heads/master
|
/portfolio/views.py
|
from django.core.urlresolvers import reverse_lazy
from django.views.generic import (
DetailView,
ListView,
CreateView,
UpdateView,
DeleteView,
TemplateView,
)
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from portfolio.models import PortfolioItem
from blog.models import Post
from django.db.models import Q
from portfolio.forms import PortfolioForm
class PortfolioView(TemplateView):
template_name = 'portfolio/index.html'
def get_context_data(self, **kwargs):
context = super(PortfolioView, self).get_context_data(**kwargs)
context['posts'] = Post.objects.all()[:3]
context['portfolios'] = PortfolioItem.objects.filter(Q(status='draft') | Q(status='published'))[:3]
return context
class DetailPortfolio(DetailView):
template_name = 'portfolio/item.html'
model = PortfolioItem
context_object_name = 'portfolio'
class ListPortfolio(ListView):
template_name = 'portfolio/portfolio-items.html'
model = PortfolioItem
context_object_name = 'portfolios'
paginate_by = 1
ordering = ('-date_create', 'title')
queryset = PortfolioItem.objects.all()
@method_decorator(login_required, name='dispatch')
class CreatePortfolio(CreateView):
template_name = 'portfolio/create-portfolio.html'
model = PortfolioItem
form_class = PortfolioForm
@method_decorator(login_required, name='dispatch')
class UpdatePortfolio(UpdateView):
template_name = 'portfolio/create-portfolio.html'
model = PortfolioItem
fields = ['title', 'status', 'body', 'image', 'categories',]
@method_decorator(login_required, name='dispatch')
class DeletePortfolio(DeleteView):
template_name = 'portfolio/delete-portfolio.html'
model = PortfolioItem
context_object_name = 'portfolio'
success_url = reverse_lazy('portfolio-list')
|
{"/portfolio/forms.py": ["/portfolio/models.py"], "/portfolio/serializers.py": ["/portfolio/models.py"], "/core/urls.py": ["/core/views.py"], "/blog/views.py": ["/blog/models.py"], "/blog/admin.py": ["/blog/models.py"], "/portfolio/views.py": ["/portfolio/models.py", "/blog/models.py", "/portfolio/forms.py"], "/blog/urls.py": ["/blog/views.py"], "/portfolio/urls.py": ["/portfolio/views_api.py", "/portfolio/views.py"], "/portfolio/admin.py": ["/portfolio/models.py"], "/portfolio/views_api.py": ["/portfolio/serializers.py", "/portfolio/models.py"], "/core/templatetags/core_tags.py": ["/core/forms.py"], "/core/views.py": ["/core/forms.py"]}
|
13,772
|
huleos/django_course
|
refs/heads/master
|
/blog/urls.py
|
from django.conf.urls import url
from blog.views import BlogView, DetailPost
urlpatterns = [
url(r'^$', BlogView.as_view(), name='blog-list'),
url(r'^(?P<slug>[-\w]+)/$', DetailPost.as_view(), name='blog-post')
]
|
{"/portfolio/forms.py": ["/portfolio/models.py"], "/portfolio/serializers.py": ["/portfolio/models.py"], "/core/urls.py": ["/core/views.py"], "/blog/views.py": ["/blog/models.py"], "/blog/admin.py": ["/blog/models.py"], "/portfolio/views.py": ["/portfolio/models.py", "/blog/models.py", "/portfolio/forms.py"], "/blog/urls.py": ["/blog/views.py"], "/portfolio/urls.py": ["/portfolio/views_api.py", "/portfolio/views.py"], "/portfolio/admin.py": ["/portfolio/models.py"], "/portfolio/views_api.py": ["/portfolio/serializers.py", "/portfolio/models.py"], "/core/templatetags/core_tags.py": ["/core/forms.py"], "/core/views.py": ["/core/forms.py"]}
|
13,773
|
huleos/django_course
|
refs/heads/master
|
/blog/models.py
|
from __future__ import unicode_literals
from django.db import models
from django.utils.text import slugify
from django.core.urlresolvers import reverse
class Post(models.Model):
author = models.ForeignKey('auth.User', on_delete=models.SET_NULL, blank=True, null=True)
categories = models.ManyToManyField('Category')
title = models.CharField(max_length=255)
slug = models.SlugField(max_length=300, blank=True, default='')
body = models.TextField()
date_create = models.DateTimeField(auto_now_add=True)
date_update = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.title
def save(self):
if self.id is None:
self.slug = slugify(self.title)
return super(Post, self).save()
def get_absolute_url(self):
return reverse('blog-post', kwargs={'slug': self.slug})
class Category(models.Model):
name = models.CharField(max_length=255)
def __unicode__(self):
return self.name
|
{"/portfolio/forms.py": ["/portfolio/models.py"], "/portfolio/serializers.py": ["/portfolio/models.py"], "/core/urls.py": ["/core/views.py"], "/blog/views.py": ["/blog/models.py"], "/blog/admin.py": ["/blog/models.py"], "/portfolio/views.py": ["/portfolio/models.py", "/blog/models.py", "/portfolio/forms.py"], "/blog/urls.py": ["/blog/views.py"], "/portfolio/urls.py": ["/portfolio/views_api.py", "/portfolio/views.py"], "/portfolio/admin.py": ["/portfolio/models.py"], "/portfolio/views_api.py": ["/portfolio/serializers.py", "/portfolio/models.py"], "/core/templatetags/core_tags.py": ["/core/forms.py"], "/core/views.py": ["/core/forms.py"]}
|
13,774
|
huleos/django_course
|
refs/heads/master
|
/portfolio/urls.py
|
from django.conf.urls import url, include
from rest_framework.routers import DefaultRouter
from portfolio.views_api import PortfolioViewSet
from portfolio.views import (
DetailPortfolio,
ListPortfolio,
CreatePortfolio,
UpdatePortfolio,
DeletePortfolio,
PortfolioView,
)
router = DefaultRouter()
router.register('portfolios', PortfolioViewSet)
urlpatterns = [
url(r'^$', PortfolioView.as_view(), name='portfolio-index'),
url(r'^create/$', CreatePortfolio.as_view(), name='create-portfolio'),
url(r'^update/(?P<pk>[\d]+)/$', UpdatePortfolio.as_view(), name='update-portfolio'),
url(r'^delete/(?P<pk>[\d]+)/$', DeletePortfolio.as_view(), name='delete-portfolio'),
url(r'^list/$', ListPortfolio.as_view(), name='portfolio-list'),
url(r'^(?P<slug>[-\w]+)/$', DetailPortfolio.as_view(), name='portfolio-item'),
url(r'^api/', include(router.urls), name='portfolio-api')
]
|
{"/portfolio/forms.py": ["/portfolio/models.py"], "/portfolio/serializers.py": ["/portfolio/models.py"], "/core/urls.py": ["/core/views.py"], "/blog/views.py": ["/blog/models.py"], "/blog/admin.py": ["/blog/models.py"], "/portfolio/views.py": ["/portfolio/models.py", "/blog/models.py", "/portfolio/forms.py"], "/blog/urls.py": ["/blog/views.py"], "/portfolio/urls.py": ["/portfolio/views_api.py", "/portfolio/views.py"], "/portfolio/admin.py": ["/portfolio/models.py"], "/portfolio/views_api.py": ["/portfolio/serializers.py", "/portfolio/models.py"], "/core/templatetags/core_tags.py": ["/core/forms.py"], "/core/views.py": ["/core/forms.py"]}
|
13,775
|
huleos/django_course
|
refs/heads/master
|
/portfolio/admin.py
|
from django.db import models
from django.contrib import admin
from portfolio.models import PortfolioItem, Category
def make_published(modeladmin, request, queryset):
queryset.update(status='published')
make_published.short_description = 'Mark as published'
class PortfolioAdmin(admin.ModelAdmin):
class Media:
js = ('//cdn.tinymce.com/4/tinymce.min.js', '/static/js/vendor/tinymce.js')
list_display = ('title', 'author', 'date_create', 'slug', 'status')
list_editable = ('author', 'slug', 'status')
list_filter = ('title', 'date_create')
fieldsets = (
(None, {
'fields': ('author',)
}),
('Portfolio details',{
'fields': (('title', 'slug',), 'status', 'body', 'image', 'categories')
}),
('Dates',{
'fields': (('date_create', 'date_update'),)
})
)
filter_horizontal = ['categories']
search_fields = ('title', 'body', 'author__email')
save_on_top = True
readonly_fields = ('date_create', 'date_update')
actions = (make_published,)
ordering = ('-date_create', 'title')
admin.site.register(PortfolioItem, PortfolioAdmin)
admin.site.register(Category)
|
{"/portfolio/forms.py": ["/portfolio/models.py"], "/portfolio/serializers.py": ["/portfolio/models.py"], "/core/urls.py": ["/core/views.py"], "/blog/views.py": ["/blog/models.py"], "/blog/admin.py": ["/blog/models.py"], "/portfolio/views.py": ["/portfolio/models.py", "/blog/models.py", "/portfolio/forms.py"], "/blog/urls.py": ["/blog/views.py"], "/portfolio/urls.py": ["/portfolio/views_api.py", "/portfolio/views.py"], "/portfolio/admin.py": ["/portfolio/models.py"], "/portfolio/views_api.py": ["/portfolio/serializers.py", "/portfolio/models.py"], "/core/templatetags/core_tags.py": ["/core/forms.py"], "/core/views.py": ["/core/forms.py"]}
|
13,776
|
huleos/django_course
|
refs/heads/master
|
/portfolio/views_api.py
|
from rest_framework import viewsets
from rest_framework.permissions import IsAdminUser
from portfolio.serializers import PortfolioSerializer
from portfolio.models import PortfolioItem
class PortfolioViewSet(viewsets.ModelViewSet):
serializer_class = PortfolioSerializer
queryset = PortfolioItem.objects.all()
search_fields = ('title', 'body')
filter_fields = ('status', 'title', 'date_create')
permission_classes = (IsAdminUser,)
|
{"/portfolio/forms.py": ["/portfolio/models.py"], "/portfolio/serializers.py": ["/portfolio/models.py"], "/core/urls.py": ["/core/views.py"], "/blog/views.py": ["/blog/models.py"], "/blog/admin.py": ["/blog/models.py"], "/portfolio/views.py": ["/portfolio/models.py", "/blog/models.py", "/portfolio/forms.py"], "/blog/urls.py": ["/blog/views.py"], "/portfolio/urls.py": ["/portfolio/views_api.py", "/portfolio/views.py"], "/portfolio/admin.py": ["/portfolio/models.py"], "/portfolio/views_api.py": ["/portfolio/serializers.py", "/portfolio/models.py"], "/core/templatetags/core_tags.py": ["/core/forms.py"], "/core/views.py": ["/core/forms.py"]}
|
13,777
|
huleos/django_course
|
refs/heads/master
|
/portfolio/migrations/0002_auto_20160603_0119.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-06-03 01:19
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('portfolio', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='portfolioitem',
name='status',
field=models.CharField(default='draft', max_length=50),
),
migrations.AlterField(
model_name='portfolioitem',
name='image',
field=models.ImageField(upload_to='portfolio/'),
),
]
|
{"/portfolio/forms.py": ["/portfolio/models.py"], "/portfolio/serializers.py": ["/portfolio/models.py"], "/core/urls.py": ["/core/views.py"], "/blog/views.py": ["/blog/models.py"], "/blog/admin.py": ["/blog/models.py"], "/portfolio/views.py": ["/portfolio/models.py", "/blog/models.py", "/portfolio/forms.py"], "/blog/urls.py": ["/blog/views.py"], "/portfolio/urls.py": ["/portfolio/views_api.py", "/portfolio/views.py"], "/portfolio/admin.py": ["/portfolio/models.py"], "/portfolio/views_api.py": ["/portfolio/serializers.py", "/portfolio/models.py"], "/core/templatetags/core_tags.py": ["/core/forms.py"], "/core/views.py": ["/core/forms.py"]}
|
13,778
|
huleos/django_course
|
refs/heads/master
|
/core/templatetags/core_tags.py
|
from django import template
from core.forms import NewsletterForm
register = template.Library()
@register.inclusion_tag('core/newsletter_widget.html', takes_context=True)
def newsletter_widget(context):
form = context.get('form', NewsletterForm())
return {
'form': form
}
|
{"/portfolio/forms.py": ["/portfolio/models.py"], "/portfolio/serializers.py": ["/portfolio/models.py"], "/core/urls.py": ["/core/views.py"], "/blog/views.py": ["/blog/models.py"], "/blog/admin.py": ["/blog/models.py"], "/portfolio/views.py": ["/portfolio/models.py", "/blog/models.py", "/portfolio/forms.py"], "/blog/urls.py": ["/blog/views.py"], "/portfolio/urls.py": ["/portfolio/views_api.py", "/portfolio/views.py"], "/portfolio/admin.py": ["/portfolio/models.py"], "/portfolio/views_api.py": ["/portfolio/serializers.py", "/portfolio/models.py"], "/core/templatetags/core_tags.py": ["/core/forms.py"], "/core/views.py": ["/core/forms.py"]}
|
13,779
|
huleos/django_course
|
refs/heads/master
|
/portfolio/models.py
|
from __future__ import unicode_literals
from django.db import models
from django.utils.text import slugify
from django.core.urlresolvers import reverse
STATUSES = (
('draft', 'Draft'),
('published', 'Published'),
('deleted', 'Deleted')
)
class PortfolioItem(models.Model):
author = models.ForeignKey('auth.User', on_delete=models.SET_NULL, blank=True, null=True)
categories = models.ManyToManyField('Category', blank=True)
title = models.CharField(max_length=255)
slug = models.SlugField(max_length=300, blank=True, default='')
image = models.ImageField(upload_to='portfolio/', blank=True, null=True)
body = models.TextField()
status = models.CharField(max_length=50, default='draft', choices=STATUSES)
date_create = models.DateTimeField(auto_now_add=True)
date_update = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.title
def save(self, *args, **kwargs):
if self.id is None:
self.slug = slugify(self.title)
return super(PortfolioItem, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse('portfolio-item', kwargs={'slug': self.slug})
class Category(models.Model):
name = models.CharField(max_length=255)
def __unicode__(self):
return self.name
|
{"/portfolio/forms.py": ["/portfolio/models.py"], "/portfolio/serializers.py": ["/portfolio/models.py"], "/core/urls.py": ["/core/views.py"], "/blog/views.py": ["/blog/models.py"], "/blog/admin.py": ["/blog/models.py"], "/portfolio/views.py": ["/portfolio/models.py", "/blog/models.py", "/portfolio/forms.py"], "/blog/urls.py": ["/blog/views.py"], "/portfolio/urls.py": ["/portfolio/views_api.py", "/portfolio/views.py"], "/portfolio/admin.py": ["/portfolio/models.py"], "/portfolio/views_api.py": ["/portfolio/serializers.py", "/portfolio/models.py"], "/core/templatetags/core_tags.py": ["/core/forms.py"], "/core/views.py": ["/core/forms.py"]}
|
13,780
|
huleos/django_course
|
refs/heads/master
|
/core/views.py
|
from django.views.generic import TemplateView, FormView
from core.forms import NewsletterForm
from django.core.mail import EmailMultiAlternatives
class HomeView(TemplateView):
template_name = 'core/index.html'
class NewsletterView(FormView):
template_name = 'core/newsletter.html'
form_class = NewsletterForm
success_url = '/thanks/'
def form_valid(self, form):
return super(NewsletterView, self).form_valid(form)
|
{"/portfolio/forms.py": ["/portfolio/models.py"], "/portfolio/serializers.py": ["/portfolio/models.py"], "/core/urls.py": ["/core/views.py"], "/blog/views.py": ["/blog/models.py"], "/blog/admin.py": ["/blog/models.py"], "/portfolio/views.py": ["/portfolio/models.py", "/blog/models.py", "/portfolio/forms.py"], "/blog/urls.py": ["/blog/views.py"], "/portfolio/urls.py": ["/portfolio/views_api.py", "/portfolio/views.py"], "/portfolio/admin.py": ["/portfolio/models.py"], "/portfolio/views_api.py": ["/portfolio/serializers.py", "/portfolio/models.py"], "/core/templatetags/core_tags.py": ["/core/forms.py"], "/core/views.py": ["/core/forms.py"]}
|
13,781
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/environments/local/local_fileshare_env.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Scheduler-side Environment to run scripts locally
and upload/download data to the shared storage.
"""
import logging
from datetime import datetime
from string import Template
from typing import Any, Dict, List, Generator, Iterable, Mapping, Optional, Tuple
from mlos_bench.services.base_service import Service
from mlos_bench.services.types.local_exec_type import SupportsLocalExec
from mlos_bench.services.types.fileshare_type import SupportsFileShareOps
from mlos_bench.environments.status import Status
from mlos_bench.environments.local.local_env import LocalEnv
from mlos_bench.tunables.tunable import TunableValue
from mlos_bench.tunables.tunable_groups import TunableGroups
_LOG = logging.getLogger(__name__)
class LocalFileShareEnv(LocalEnv):
"""
Scheduler-side Environment that runs scripts locally
and uploads/downloads data to the shared file storage.
"""
def __init__(self,
*,
name: str,
config: dict,
global_config: Optional[dict] = None,
tunables: Optional[TunableGroups] = None,
service: Optional[Service] = None):
"""
Create a new application environment with a given config.
Parameters
----------
name: str
Human-readable name of the environment.
config : dict
Free-format dictionary that contains the benchmark environment
configuration. Each config must have at least the "tunable_params"
and the "const_args" sections.
`LocalFileShareEnv` must also have at least some of the following
parameters: {setup, upload, run, download, teardown,
dump_params_file, read_results_file}
global_config : dict
Free-format dictionary of global parameters (e.g., security credentials)
to be mixed in into the "const_args" section of the local config.
tunables : TunableGroups
A collection of tunable parameters for *all* environments.
service: Service
An optional service object (e.g., providing methods to
deploy or reboot a VM, etc.).
"""
super().__init__(name=name, config=config, global_config=global_config, tunables=tunables, service=service)
assert self._service is not None and isinstance(self._service, SupportsLocalExec), \
"LocalEnv requires a service that supports local execution"
self._local_exec_service: SupportsLocalExec = self._service
assert self._service is not None and isinstance(self._service, SupportsFileShareOps), \
"LocalEnv requires a service that supports file upload/download operations"
self._file_share_service: SupportsFileShareOps = self._service
self._upload = self._template_from_to("upload")
self._download = self._template_from_to("download")
def _template_from_to(self, config_key: str) -> List[Tuple[Template, Template]]:
"""
Convert a list of {"from": "...", "to": "..."} to a list of pairs
of string.Template objects so that we can plug in self._params into it later.
"""
return [
(Template(d['from']), Template(d['to']))
for d in self.config.get(config_key, [])
]
@staticmethod
def _expand(from_to: Iterable[Tuple[Template, Template]],
params: Mapping[str, TunableValue]) -> Generator[Tuple[str, str], None, None]:
"""
Substitute $var parameters in from/to path templates.
Return a generator of (str, str) pairs of paths.
"""
return (
(path_from.safe_substitute(params), path_to.safe_substitute(params))
for (path_from, path_to) in from_to
)
def setup(self, tunables: TunableGroups, global_config: Optional[dict] = None) -> bool:
"""
Run setup scripts locally and upload the scripts and data to the shared storage.
Parameters
----------
tunables : TunableGroups
A collection of tunable OS and application parameters along with their
values. In a local environment these could be used to prepare a config
file on the scheduler prior to transferring it to the remote environment,
for instance.
global_config : dict
Free-format dictionary of global parameters of the environment
that are not used in the optimization process.
Returns
-------
is_success : bool
True if operation is successful, false otherwise.
"""
self._is_ready = super().setup(tunables, global_config)
if self._is_ready:
assert self._temp_dir is not None
params = self._get_env_params()
params["PWD"] = self._temp_dir
for (path_from, path_to) in self._expand(self._upload, params):
self._file_share_service.upload(self._config_loader_service.resolve_path(
path_from, extra_paths=[self._temp_dir]), path_to)
return self._is_ready
def _download_files(self, ignore_missing: bool = False) -> None:
"""
Download files from the shared storage.
Parameters
----------
ignore_missing : bool
If True, raise an exception when some file cannot be downloaded.
If False, proceed with downloading other files and log a warning.
"""
assert self._temp_dir is not None
params = self._get_env_params()
params["PWD"] = self._temp_dir
for (path_from, path_to) in self._expand(self._download, params):
try:
self._file_share_service.download(
path_from, self._config_loader_service.resolve_path(
path_to, extra_paths=[self._temp_dir]))
except FileNotFoundError as ex:
_LOG.warning("Cannot download: %s", path_from)
if not ignore_missing:
raise ex
def run(self) -> Tuple[Status, Optional[Dict[str, float]]]:
"""
Download benchmark results from the shared storage
and run post-processing scripts locally.
Returns
-------
(status, output) : (Status, dict)
A pair of (Status, output) values, where `output` is a dict
with the results or None if the status is not COMPLETED.
If run script is a benchmark, then the score is usually expected to
be in the `score` field.
"""
self._download_files()
return super().run()
def status(self) -> Tuple[Status, List[Tuple[datetime, str, Any]]]:
self._download_files(ignore_missing=True)
return super().status()
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,782
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/tunables/tunable_group_indexing_test.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Tests for checking the indexing rules for tunable groups.
"""
from mlos_bench.tunables.tunable import Tunable
from mlos_bench.tunables.tunable_groups import TunableGroups
def test_tunable_group_indexing(tunable_groups: TunableGroups, tunable_categorical: Tunable) -> None:
"""
Check that various types of indexing work for the tunable group.
"""
# Check that the "in" operator works.
assert tunable_categorical in tunable_groups
assert tunable_categorical.name in tunable_groups
# NOTE: we reassign the tunable_categorical here since they come from
# different fixtures so are technically different objects.
(tunable_categorical, covariant_group) = tunable_groups.get_tunable(tunable_categorical.name)
assert tunable_groups.get_tunable(tunable_categorical)[0] == tunable_categorical
assert tunable_categorical in covariant_group
assert tunable_categorical.name in covariant_group
# Check that we can lookup that tunable by name or tunable object in the covariant group.
assert covariant_group.get_tunable(tunable_categorical) == tunable_categorical
assert covariant_group.get_tunable(tunable_categorical.name) == tunable_categorical
# Reset the value on the tunable using the tunable.
tunable_categorical.value = tunable_categorical.default
# Check that we can index by name or tunable object.
assert tunable_groups[tunable_categorical] == tunable_categorical.value
assert tunable_groups[tunable_categorical.name] == tunable_categorical.value
assert covariant_group[tunable_categorical] == tunable_categorical.value
assert covariant_group[tunable_categorical.name] == tunable_categorical.value
# Check that we can assign a new value by index.
new_value = [x for x in tunable_categorical.categories if x != tunable_categorical.value][0]
tunable_groups[tunable_categorical] = new_value
assert tunable_groups[tunable_categorical] == new_value
assert tunable_groups[tunable_categorical.name] == new_value
assert covariant_group[tunable_categorical] == new_value
assert covariant_group[tunable_categorical.name] == new_value
assert tunable_categorical.value == new_value
assert tunable_categorical.value != tunable_categorical.default
# Check that we can assign a new value by name.
tunable_groups[tunable_categorical] = tunable_categorical.default
assert tunable_categorical.value == tunable_categorical.default
assert tunable_groups[tunable_categorical] == tunable_categorical.value
assert tunable_groups[tunable_categorical.name] == tunable_categorical.value
assert covariant_group[tunable_categorical] == tunable_categorical.value
assert covariant_group[tunable_categorical.name] == tunable_categorical.value
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,783
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/optimizers/one_shot_optimizer.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
No-op optimizer for mlos_bench that proposes a single configuration.
"""
import logging
from typing import Optional
from mlos_bench.services.base_service import Service
from mlos_bench.tunables.tunable_groups import TunableGroups
from mlos_bench.optimizers.mock_optimizer import MockOptimizer
_LOG = logging.getLogger(__name__)
class OneShotOptimizer(MockOptimizer):
"""
Mock optimizer that proposes a single configuration and returns.
Explicit configs (partial or full) are possible using configuration files.
"""
# TODO: Add support for multiple explicit configs (i.e., FewShot or Manual Optimizer) - #344
def __init__(self,
tunables: TunableGroups,
config: dict,
global_config: Optional[dict] = None,
service: Optional[Service] = None):
super().__init__(tunables, config, global_config, service)
_LOG.info("Run a single iteration for: %s", self._tunables)
self._max_iter = 1 # Always run for just one iteration.
@property
def supports_preload(self) -> bool:
return False
def suggest(self) -> TunableGroups:
_LOG.info("Suggest: %s", self._tunables)
return self._tunables.copy()
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,784
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/environments/remote/remote_env.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Remotely executed benchmark/script environment.
"""
import logging
from typing import Dict, Iterable, Optional, Tuple
from mlos_bench.environments.status import Status
from mlos_bench.environments.script_env import ScriptEnv
from mlos_bench.services.base_service import Service
from mlos_bench.services.types.remote_exec_type import SupportsRemoteExec
from mlos_bench.services.types.vm_provisioner_type import SupportsVMOps
from mlos_bench.tunables.tunable_groups import TunableGroups
_LOG = logging.getLogger(__name__)
class RemoteEnv(ScriptEnv):
"""
Environment to run benchmarks and scripts on a remote host.
"""
def __init__(self,
*,
name: str,
config: dict,
global_config: Optional[dict] = None,
tunables: Optional[TunableGroups] = None,
service: Optional[Service] = None):
"""
Create a new environment for remote execution.
Parameters
----------
name: str
Human-readable name of the environment.
config : dict
Free-format dictionary that contains the benchmark environment
configuration. Each config must have at least the "tunable_params"
and the "const_args" sections.
`RemoteEnv` must also have at least some of the following parameters:
{setup, run, teardown, wait_boot}
global_config : dict
Free-format dictionary of global parameters (e.g., security credentials)
to be mixed in into the "const_args" section of the local config.
tunables : TunableGroups
A collection of tunable parameters for *all* environments.
service: Service
An optional service object (e.g., providing methods to
deploy or reboot a VM, etc.).
"""
super().__init__(name=name, config=config, global_config=global_config,
tunables=tunables, service=service)
self._wait_boot = self.config.get("wait_boot", False)
assert self._service is not None and isinstance(self._service, SupportsRemoteExec), \
"RemoteEnv requires a service that supports remote execution operations"
self._remote_exec_service: SupportsRemoteExec = self._service
# TODO: Refactor this as "host" and "os" operations to accommodate SSH service.
assert self._service is not None and isinstance(self._service, SupportsVMOps), \
"RemoteEnv requires a service that supports host operations"
self._host_service: SupportsVMOps = self._service
def setup(self, tunables: TunableGroups, global_config: Optional[dict] = None) -> bool:
"""
Check if the environment is ready and set up the application
and benchmarks on a remote host.
Parameters
----------
tunables : TunableGroups
A collection of tunable OS and application parameters along with their
values. Setting these parameters should not require an OS reboot.
global_config : dict
Free-format dictionary of global parameters of the environment
that are not used in the optimization process.
Returns
-------
is_success : bool
True if operation is successful, false otherwise.
"""
if not super().setup(tunables, global_config):
return False
if self._wait_boot:
_LOG.info("Wait for the remote environment to start: %s", self)
(status, params) = self._host_service.vm_start(self._params)
if status.is_pending():
(status, _) = self._host_service.wait_vm_operation(params)
if not status.is_succeeded():
return False
if self._script_setup:
_LOG.info("Set up the remote environment: %s", self)
(status, _) = self._remote_exec(self._script_setup)
_LOG.info("Remote set up complete: %s :: %s", self, status)
self._is_ready = status.is_succeeded()
else:
self._is_ready = True
return self._is_ready
def run(self) -> Tuple[Status, Optional[Dict[str, float]]]:
"""
Runs the run script on the remote environment.
This can be used to, for instance, submit a new experiment to the
remote application environment by (re)configuring an application and
launching the benchmark, or run a script that collects the results.
Returns
-------
(status, output) : (Status, dict)
A pair of (Status, output) values, where `output` is a dict
with the results or None if the status is not COMPLETED.
If run script is a benchmark, then the score is usually expected to
be in the `score` field.
"""
_LOG.info("Run script remotely on: %s", self)
(status, _) = result = super().run()
if not (status.is_ready() and self._script_run):
return result
result = self._remote_exec(self._script_run)
_LOG.info("Remote run complete: %s :: %s", self, result)
return result
def teardown(self) -> None:
"""
Clean up and shut down the remote environment.
"""
if self._script_teardown:
_LOG.info("Remote teardown: %s", self)
(status, _) = self._remote_exec(self._script_teardown)
_LOG.info("Remote teardown complete: %s :: %s", self, status)
super().teardown()
def _remote_exec(self, script: Iterable[str]) -> Tuple[Status, Optional[dict]]:
"""
Run a script on the remote host.
Parameters
----------
script : [str]
List of commands to be executed on the remote host.
Returns
-------
result : (Status, dict)
A pair of Status and dict with the benchmark/script results.
Status is one of {PENDING, SUCCEEDED, FAILED, TIMED_OUT}
"""
env_params = self._get_env_params()
_LOG.debug("Submit script: %s with %s", self, env_params)
(status, output) = self._remote_exec_service.remote_exec(
script, config=self._params, env_params=env_params)
_LOG.debug("Script submitted: %s %s :: %s", self, status, output)
if status in {Status.PENDING, Status.SUCCEEDED}:
(status, output) = self._remote_exec_service.get_remote_exec_results(output)
# TODO: extract the results from `output`.
_LOG.debug("Status: %s :: %s", status, output)
return (status, output)
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,785
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_core/mlos_core/spaces/adapters/__init__.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Basic initializer module for the mlos_core space adapters.
"""
from enum import Enum
from typing import Optional, TypeVar
import ConfigSpace
from mlos_core.spaces.adapters.identity_adapter import IdentityAdapter
from mlos_core.spaces.adapters.llamatune import LlamaTuneAdapter
__all__ = [
'IdentityAdapter',
'LlamaTuneAdapter',
]
class SpaceAdapterType(Enum):
"""Enumerate supported MlosCore space adapters."""
IDENTITY = IdentityAdapter
"""A no-op adapter will be used"""
LLAMATUNE = LlamaTuneAdapter
"""An instance of LlamaTuneAdapter class will be used"""
# To make mypy happy, we need to define a type variable for each optimizer type.
# https://github.com/python/mypy/issues/12952
# ConcreteSpaceAdapter = TypeVar('ConcreteSpaceAdapter', *[member.value for member in SpaceAdapterType])
# To address this, we add a test for complete coverage of the enum.
ConcreteSpaceAdapter = TypeVar(
'ConcreteSpaceAdapter',
IdentityAdapter,
LlamaTuneAdapter,
)
class SpaceAdapterFactory:
"""Simple factory class for creating BaseSpaceAdapter-derived objects"""
# pylint: disable=too-few-public-methods
@staticmethod
def create(*,
parameter_space: ConfigSpace.ConfigurationSpace,
space_adapter_type: SpaceAdapterType = SpaceAdapterType.IDENTITY,
space_adapter_kwargs: Optional[dict] = None) -> ConcreteSpaceAdapter:
"""
Create a new space adapter instance, given the parameter space and potential
space adapter options.
Parameters
----------
parameter_space : ConfigSpace.ConfigurationSpace
Input configuration space.
space_adapter_type : Optional[SpaceAdapterType]
Space adapter class to be used alongside the optimizer.
space_adapter_kwargs : Optional[dict]
Optional arguments passed in SpaceAdapter class constructor.
Returns
-------
space_adapter : ConcreteSpaceAdapter
Instance of concrete space adapter (e.g., None, LlamaTuneAdapter, etc.)
"""
if space_adapter_type is None:
space_adapter_type = SpaceAdapterType.IDENTITY
if space_adapter_kwargs is None:
space_adapter_kwargs = {}
space_adapter: ConcreteSpaceAdapter = space_adapter_type.value(
orig_parameter_space=parameter_space,
**space_adapter_kwargs
)
return space_adapter
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,786
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/services/__init__.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Tests for mlos_bench.services.
Used to make mypy happy about multiple conftest.py modules.
"""
from .local import MockLocalExecService
from .remote import MockFileShareService, MockRemoteExecService, MockVMService
__all__ = [
'MockLocalExecService',
'MockFileShareService',
'MockRemoteExecService',
'MockVMService',
]
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,787
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tunables/tunable_groups.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
TunableGroups definition.
"""
import copy
from typing import Dict, Generator, Iterable, Mapping, Optional, Tuple, Union
from mlos_bench.tunables.tunable import Tunable, TunableValue
from mlos_bench.tunables.covariant_group import CovariantTunableGroup
class TunableGroups:
"""
A collection of covariant groups of tunable parameters.
"""
def __init__(self, config: Optional[dict] = None):
"""
Create a new group of tunable parameters.
Parameters
----------
config : dict
Python dict of serialized representation of the covariant tunable groups.
"""
if config is None:
config = {}
self._index: Dict[str, CovariantTunableGroup] = {} # Index (Tunable id -> CovariantTunableGroup)
self._tunable_groups: Dict[str, CovariantTunableGroup] = {}
for (name, group_config) in config.items():
self._add_group(CovariantTunableGroup(name, group_config))
def __eq__(self, other: object) -> bool:
"""
Check if two TunableGroups are equal.
Parameters
----------
other : TunableGroups
A tunable groups object to compare to.
Returns
-------
is_equal : bool
True if two TunableGroups are equal.
"""
if not isinstance(other, TunableGroups):
return False
return bool(self._tunable_groups == other._tunable_groups)
def copy(self) -> "TunableGroups":
"""
Deep copy of the TunableGroups object.
Returns
-------
tunables : TunableGroups
A new instance of the TunableGroups object
that is a deep copy of the original one.
"""
return copy.deepcopy(self)
def _add_group(self, group: CovariantTunableGroup) -> None:
"""
Add a CovariantTunableGroup to the current collection.
Note: non-overlapping groups are expected to be added to the collection.
Parameters
----------
group : CovariantTunableGroup
"""
assert group.name not in self._tunable_groups, f"Duplicate covariant tunable group name {group.name} in {self}"
self._tunable_groups[group.name] = group
for tunable in group.get_tunables():
if tunable.name in self._index:
raise ValueError(f"Duplicate Tunable {tunable.name} from group {group.name} in {self}")
self._index[tunable.name] = group
def merge(self, tunables: "TunableGroups") -> "TunableGroups":
"""
Merge the two collections of covariant tunable groups.
Unlike the dict `update` method, this method does not modify the
original when overlapping keys are found.
It is expected be used to merge the tunable groups referenced by a
standalone Environment config into a parent CompositeEnvironment,
for instance.
This allows self contained, potentially overlapping, but also
overridable configs to be composed together.
Parameters
----------
tunables : TunableGroups
A collection of covariant tunable groups.
Returns
-------
self : TunableGroups
Self-reference for chaining.
"""
# pylint: disable=protected-access
# Check that covariant groups are unique, else throw an error.
for group in tunables._tunable_groups.values():
if group.name not in self._tunable_groups:
self._add_group(group)
else:
# Check that there's no overlap in the tunables.
# But allow for differing current values.
if not self._tunable_groups[group.name].equals_defaults(group):
raise ValueError(f"Overlapping covariant tunable group name {group.name} " +
"in {self._tunable_groups[group.name]} and {tunables}")
return self
def __repr__(self) -> str:
"""
Produce a human-readable version of the TunableGroups (mostly for logging).
Returns
-------
string : str
A human-readable version of the TunableGroups.
"""
return "{ " + ", ".join(
f"{group.name}::{tunable}"
for group in sorted(self._tunable_groups.values(), key=lambda g: (-g.cost, g.name))
for tunable in sorted(group._tunables.values())) + " }"
def __contains__(self, tunable: Union[str, Tunable]) -> bool:
"""
Checks if the given name/tunable is in this tunable group.
"""
name: str = tunable.name if isinstance(tunable, Tunable) else tunable
return name in self._index
def __getitem__(self, tunable: Union[str, Tunable]) -> TunableValue:
"""
Get the current value of a single tunable parameter.
"""
name: str = tunable.name if isinstance(tunable, Tunable) else tunable
return self._index[name][name]
def __setitem__(self, tunable: Union[str, Tunable], tunable_value: Union[TunableValue, Tunable]) -> TunableValue:
"""
Update the current value of a single tunable parameter.
"""
# Use double index to make sure we set the is_updated flag of the group
name: str = tunable.name if isinstance(tunable, Tunable) else tunable
value: TunableValue = tunable_value.value if isinstance(tunable_value, Tunable) else tunable_value
self._index[name][name] = value
return self._index[name][name]
def __iter__(self) -> Generator[Tuple[Tunable, CovariantTunableGroup], None, None]:
"""
An iterator over all tunables in the group.
Returns
-------
[(tunable, group), ...] : iter(Tunable, CovariantTunableGroup)
An iterator over all tunables in all groups. Each element is a 2-tuple
of an instance of the Tunable parameter and covariant group it belongs to.
"""
return ((group.get_tunable(name), group) for (name, group) in self._index.items())
def get_tunable(self, tunable: Union[str, Tunable]) -> Tuple[Tunable, CovariantTunableGroup]:
"""
Access the entire Tunable (not just its value) and its covariant group.
Throw KeyError if the tunable is not found.
Parameters
----------
tunable : Union[str, Tunable]
Name of the tunable parameter.
Returns
-------
(tunable, group) : (Tunable, CovariantTunableGroup)
A 2-tuple of an instance of the Tunable parameter and covariant group it belongs to.
"""
name: str = tunable.name if isinstance(tunable, Tunable) else tunable
group = self._index[name]
return (group.get_tunable(name), group)
def get_covariant_group_names(self) -> Iterable[str]:
"""
Get the names of all covariance groups in the collection.
Returns
-------
group_names : [str]
IDs of the covariant tunable groups.
"""
return self._tunable_groups.keys()
def subgroup(self, group_names: Iterable[str]) -> "TunableGroups":
"""
Select the covariance groups from the current set and create a new
TunableGroups object that consists of those covariance groups.
Note: The new TunableGroup will include *references* (not copies) to
original ones, so each will get updated together.
This is often desirable to support the use case of multiple related
Environments (e.g. Local vs Remote) using the same set of tunables
within a CompositeEnvironment.
Parameters
----------
group_names : list of str
IDs of the covariant tunable groups.
Returns
-------
tunables : TunableGroups
A collection of covariant tunable groups.
"""
# pylint: disable=protected-access
tunables = TunableGroups()
for name in group_names:
if name not in self._tunable_groups:
raise KeyError(f"Unknown covariant group name '{name}' in tunable group {self}")
tunables._add_group(self._tunable_groups[name])
return tunables
def get_param_values(self, group_names: Optional[Iterable[str]] = None,
into_params: Optional[Dict[str, TunableValue]] = None) -> Dict[str, TunableValue]:
"""
Get the current values of the tunables that belong to the specified covariance groups.
Parameters
----------
group_names : list of str or None
IDs of the covariant tunable groups.
Select parameters from all groups if omitted.
into_params : dict
An optional dict to copy the parameters and their values into.
Returns
-------
into_params : dict
Flat dict of all parameters and their values from given covariance groups.
"""
if group_names is None:
group_names = self.get_covariant_group_names()
if into_params is None:
into_params = {}
for name in group_names:
into_params.update(self._tunable_groups[name].get_tunable_values_dict())
return into_params
def is_updated(self, group_names: Optional[Iterable[str]] = None) -> bool:
"""
Check if any of the given covariant tunable groups has been updated.
Parameters
----------
group_names : list of str or None
IDs of the (covariant) tunable groups. Check all groups if omitted.
Returns
-------
is_updated : bool
True if any of the specified tunable groups has been updated, False otherwise.
"""
return any(self._tunable_groups[name].is_updated()
for name in (group_names or self.get_covariant_group_names()))
def reset(self, group_names: Optional[Iterable[str]] = None) -> "TunableGroups":
"""
Clear the update flag of given covariant groups.
Parameters
----------
group_names : list of str or None
IDs of the (covariant) tunable groups. Reset all groups if omitted.
Returns
-------
self : TunableGroups
Self-reference for chaining.
"""
for name in (group_names or self.get_covariant_group_names()):
self._tunable_groups[name].reset_is_updated()
return self
def assign(self, param_values: Mapping[str, TunableValue]) -> "TunableGroups":
"""
In-place update the values of the tunables from the dictionary
of (key, value) pairs.
Parameters
----------
param_values : Mapping[str, TunableValue]
Dictionary mapping Tunable parameter names to new values.
Returns
-------
self : TunableGroups
Self-reference for chaining.
"""
for key, value in param_values.items():
self[key] = value
return self
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,788
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/tunables/tunable_group_update_test.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Tests for checking the is_updated flag for tunable groups.
"""
from mlos_bench.tunables.tunable_groups import TunableGroups
_TUNABLE_VALUES = {
"kernel_sched_migration_cost_ns": 8888,
"kernel_sched_latency_ns": 9999,
}
def test_tunable_group_update(tunable_groups: TunableGroups) -> None:
"""
Test that updating a tunable group raises the is_updated flag.
"""
tunable_groups.assign(_TUNABLE_VALUES)
assert tunable_groups.is_updated()
def test_tunable_group_update_twice(tunable_groups: TunableGroups) -> None:
"""
Test that updating a tunable group with the same values do *NOT* raises the is_updated flag.
"""
tunable_groups.assign(_TUNABLE_VALUES)
assert tunable_groups.is_updated()
tunable_groups.reset()
assert not tunable_groups.is_updated()
tunable_groups.assign(_TUNABLE_VALUES)
assert not tunable_groups.is_updated()
def test_tunable_group_update_kernel(tunable_groups: TunableGroups) -> None:
"""
Test that the is_updated flag is set only for the affected covariant group.
"""
tunable_groups.assign(_TUNABLE_VALUES)
assert tunable_groups.is_updated()
assert tunable_groups.is_updated(["kernel"])
assert not tunable_groups.is_updated(["boot", "provision"])
def test_tunable_group_update_boot(tunable_groups: TunableGroups) -> None:
"""
Test that the is_updated flag is set only for the affected covariant group.
"""
tunable_groups.assign(_TUNABLE_VALUES)
assert tunable_groups.is_updated()
assert not tunable_groups.is_updated(["boot"])
tunable_groups.reset()
tunable_groups["idle"] = "mwait"
assert tunable_groups.is_updated()
assert tunable_groups.is_updated(["boot"])
assert not tunable_groups.is_updated(["kernel"])
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,789
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_core/mlos_core/tests/optimizers/optimizer_test.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Tests for Bayesian Optimizers.
"""
from copy import deepcopy
from typing import List, Optional, Type
import logging
import pytest
import pandas as pd
import numpy as np
import numpy.typing as npt
import ConfigSpace as CS
from mlos_core.optimizers import (
OptimizerType, ConcreteOptimizer, OptimizerFactory, BaseOptimizer)
from mlos_core.optimizers.bayesian_optimizers import BaseBayesianOptimizer, SmacOptimizer
from mlos_core.spaces.adapters import SpaceAdapterType
from mlos_core.tests import get_all_concrete_subclasses
_LOG = logging.getLogger(__name__)
_LOG.setLevel(logging.DEBUG)
@pytest.mark.parametrize(('optimizer_class', 'kwargs'), [
*[(member.value, {}) for member in OptimizerType],
])
def test_create_optimizer_and_suggest(configuration_space: CS.ConfigurationSpace,
optimizer_class: Type[BaseOptimizer], kwargs: Optional[dict]) -> None:
"""
Test that we can create an optimizer and get a suggestion from it.
"""
if kwargs is None:
kwargs = {}
optimizer = optimizer_class(parameter_space=configuration_space, **kwargs)
assert optimizer is not None
assert optimizer.parameter_space is not None
suggestion = optimizer.suggest()
assert suggestion is not None
myrepr = repr(optimizer)
assert myrepr.startswith(optimizer_class.__name__)
# pending not implemented
with pytest.raises(NotImplementedError):
optimizer.register_pending(suggestion)
@pytest.mark.parametrize(('optimizer_class', 'kwargs'), [
*[(member.value, {}) for member in OptimizerType],
])
def test_basic_interface_toy_problem(configuration_space: CS.ConfigurationSpace,
optimizer_class: Type[BaseOptimizer], kwargs: Optional[dict]) -> None:
"""
Toy problem to test the optimizers.
"""
max_iterations = 20
if kwargs is None:
kwargs = {}
if optimizer_class == OptimizerType.SMAC.value:
# SMAC sets the initial random samples as a percentage of the max iterations, which defaults to 100.
# To avoid having to train more than 25 model iterations, we set a lower number of max iterations.
kwargs['max_trials'] = max_iterations * 2
def objective(x: pd.Series) -> npt.ArrayLike: # pylint: disable=invalid-name
ret: npt.ArrayLike = (6 * x - 2)**2 * np.sin(12 * x - 4)
return ret
# Emukit doesn't allow specifying a random state, so we set the global seed.
np.random.seed(42)
optimizer = optimizer_class(parameter_space=configuration_space, **kwargs)
with pytest.raises(ValueError, match="No observations"):
optimizer.get_best_observation()
with pytest.raises(ValueError, match="No observations"):
optimizer.get_observations()
for _ in range(max_iterations):
suggestion = optimizer.suggest()
assert isinstance(suggestion, pd.DataFrame)
assert (suggestion.columns == ['x', 'y', 'z']).all()
# check that suggestion is in the space
configuration = CS.Configuration(optimizer.parameter_space, suggestion.iloc[0].to_dict())
# Raises an error if outside of configuration space
configuration.is_valid_configuration()
observation = objective(suggestion['x'])
assert isinstance(observation, pd.Series)
optimizer.register(suggestion, observation)
best_observation = optimizer.get_best_observation()
assert isinstance(best_observation, pd.DataFrame)
assert (best_observation.columns == ['x', 'y', 'z', 'score']).all()
assert best_observation['score'].iloc[0] < -5
all_observations = optimizer.get_observations()
assert isinstance(all_observations, pd.DataFrame)
assert all_observations.shape == (20, 4)
assert (all_observations.columns == ['x', 'y', 'z', 'score']).all()
# It would be better to put this into bayesian_optimizer_test but then we'd have to refit the model
if isinstance(optimizer, BaseBayesianOptimizer):
pred_best = optimizer.surrogate_predict(best_observation[['x', 'y', 'z']])
assert pred_best.shape == (1,)
pred_all = optimizer.surrogate_predict(all_observations[['x', 'y', 'z']])
assert pred_all.shape == (20,)
@pytest.mark.parametrize(('optimizer_type'), [
# Enumerate all supported Optimizers
# *[member for member in OptimizerType],
*list(OptimizerType),
])
def test_concrete_optimizer_type(optimizer_type: OptimizerType) -> None:
"""
Test that all optimizer types are listed in the ConcreteOptimizer constraints.
"""
assert optimizer_type.value in ConcreteOptimizer.__constraints__ # type: ignore[attr-defined] # pylint: disable=no-member
@pytest.mark.parametrize(('optimizer_type', 'kwargs'), [
# Default optimizer
(None, {}),
# Enumerate all supported Optimizers
*[(member, {}) for member in OptimizerType],
# Optimizer with non-empty kwargs argument
])
def test_create_optimizer_with_factory_method(configuration_space: CS.ConfigurationSpace,
optimizer_type: Optional[OptimizerType], kwargs: Optional[dict]) -> None:
"""
Test that we can create an optimizer via a factory.
"""
if kwargs is None:
kwargs = {}
if optimizer_type is None:
optimizer = OptimizerFactory.create(
parameter_space=configuration_space,
optimizer_kwargs=kwargs,
)
else:
optimizer = OptimizerFactory.create(
parameter_space=configuration_space,
optimizer_type=optimizer_type,
optimizer_kwargs=kwargs,
)
assert optimizer is not None
assert optimizer.parameter_space is not None
suggestion = optimizer.suggest()
assert suggestion is not None
if optimizer_type is not None:
myrepr = repr(optimizer)
assert myrepr.startswith(optimizer_type.value.__name__)
@pytest.mark.parametrize(('optimizer_type', 'kwargs'), [
# Enumerate all supported Optimizers
*[(member, {}) for member in OptimizerType],
# Optimizer with non-empty kwargs argument
(OptimizerType.SMAC, {
# Test with default config.
'use_default_config': True,
# 'n_random_init': 10,
}),
])
def test_optimizer_with_llamatune(optimizer_type: OptimizerType, kwargs: Optional[dict]) -> None:
"""
Toy problem to test the optimizers with llamatune space adapter.
"""
# pylint: disable=too-complex
# pylint: disable=too-many-statements
# pylint: disable=too-many-locals
num_iters = 50
if kwargs is None:
kwargs = {}
def objective(point: pd.DataFrame) -> pd.Series:
# Best value can be reached by tuning an 1-dimensional search space
ret: pd.Series = np.sin(point['x'] * point['y'])
assert ret.hasnans is False
return ret
input_space = CS.ConfigurationSpace(seed=1234)
# Add two continuous inputs
input_space.add_hyperparameter(CS.UniformFloatHyperparameter(name='x', lower=0, upper=3))
input_space.add_hyperparameter(CS.UniformFloatHyperparameter(name='y', lower=0, upper=3))
# Initialize an optimizer that uses LlamaTune space adapter
space_adapter_kwargs = {
"num_low_dims": 1,
"special_param_values": None,
"max_unique_values_per_param": None,
}
# Make some adjustments to the kwargs for the optimizer and LlamaTuned
# optimizer for debug/testing.
# if optimizer_type == OptimizerType.SMAC:
# # Allow us to override the number of random init samples.
# kwargs['max_ratio'] = 1.0
optimizer_kwargs = deepcopy(kwargs)
llamatune_optimizer_kwargs = deepcopy(kwargs)
# if optimizer_type == OptimizerType.SMAC:
# optimizer_kwargs['n_random_init'] = 20
# llamatune_optimizer_kwargs['n_random_init'] = 10
llamatune_optimizer: BaseOptimizer = OptimizerFactory.create(
parameter_space=input_space,
optimizer_type=optimizer_type,
optimizer_kwargs=llamatune_optimizer_kwargs,
space_adapter_type=SpaceAdapterType.LLAMATUNE,
space_adapter_kwargs=space_adapter_kwargs,
)
# Initialize an optimizer that uses the original space
optimizer: BaseOptimizer = OptimizerFactory.create(
parameter_space=input_space,
optimizer_type=optimizer_type,
optimizer_kwargs=optimizer_kwargs,
)
assert optimizer is not None
assert llamatune_optimizer is not None
assert optimizer.optimizer_parameter_space != llamatune_optimizer.optimizer_parameter_space
llamatune_n_random_init = 0
opt_n_random_init = int(kwargs.get('n_random_init', 0))
if optimizer_type == OptimizerType.SMAC:
assert isinstance(optimizer, SmacOptimizer)
assert isinstance(llamatune_optimizer, SmacOptimizer)
opt_n_random_init = optimizer.n_random_init
llamatune_n_random_init = llamatune_optimizer.n_random_init
for i in range(num_iters):
# Place to set a breakpoint for when the optimizer is done with random init.
if llamatune_n_random_init and i > llamatune_n_random_init:
_LOG.debug("LlamaTuned Optimizer is done with random init.")
if opt_n_random_init and i >= opt_n_random_init:
_LOG.debug("Optimizer is done with random init.")
# loop for optimizer
suggestion = optimizer.suggest()
observation = objective(suggestion)
optimizer.register(suggestion, observation)
# loop for llamatune-optimizer
suggestion = llamatune_optimizer.suggest()
_x, _y = suggestion['x'].iloc[0], suggestion['y'].iloc[0]
assert _x == pytest.approx(_y, rel=1e-3) or _x + _y == pytest.approx(3., rel=1e-3) # optimizer explores 1-dimensional space
observation = objective(suggestion)
llamatune_optimizer.register(suggestion, observation)
# Retrieve best observations
best_observation = optimizer.get_best_observation()
llamatune_best_observation = llamatune_optimizer.get_best_observation()
for best_obv in (best_observation, llamatune_best_observation):
assert isinstance(best_obv, pd.DataFrame)
assert (best_obv.columns == ['x', 'y', 'score']).all()
# LlamaTune's optimizer score should better (i.e., lower) than plain optimizer's one, or close to that
assert best_observation['score'].iloc[0] > llamatune_best_observation['score'].iloc[0] or \
best_observation['score'].iloc[0] + 1e-3 > llamatune_best_observation['score'].iloc[0]
# Retrieve and check all observations
for all_obvs in (optimizer.get_observations(), llamatune_optimizer.get_observations()):
assert isinstance(all_obvs, pd.DataFrame)
assert all_obvs.shape == (num_iters, 3)
assert (all_obvs.columns == ['x', 'y', 'score']).all()
# .surrogate_predict method not currently implemented if space adapter is employed
if isinstance(llamatune_optimizer, BaseBayesianOptimizer):
with pytest.raises(NotImplementedError):
llamatune_optimizer.surrogate_predict(llamatune_best_observation[['x', 'y']])
# Dynamically determine all of the optimizers we have implemented.
# Note: these must be sorted.
optimizer_subclasses: List[Type[BaseOptimizer]] = get_all_concrete_subclasses(BaseOptimizer, # type: ignore[type-abstract]
pkg_name='mlos_core')
assert optimizer_subclasses
@pytest.mark.parametrize(('optimizer_class'), optimizer_subclasses)
def test_optimizer_type_defs(optimizer_class: Type[BaseOptimizer]) -> None:
"""
Test that all optimizer classes are listed in the OptimizerType enum.
"""
optimizer_type_classes = {member.value for member in OptimizerType}
assert optimizer_class in optimizer_type_classes
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,790
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/environments/composite_env_test.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Unit tests for composite environment.
"""
import pytest
from mlos_bench.environments.composite_env import CompositeEnv
from mlos_bench.tunables.tunable_groups import TunableGroups
from mlos_bench.services.config_persistence import ConfigPersistenceService
# pylint: disable=redefined-outer-name
@pytest.fixture
def composite_env(tunable_groups: TunableGroups) -> CompositeEnv:
"""
Test fixture for CompositeEnv.
"""
return CompositeEnv(
name="Composite Test Environment",
config={
"tunable_params": ["provision", "boot"],
"const_args": {
"vm_server_name": "Mock Server VM",
"vm_client_name": "Mock Client VM",
"someConst": "root"
},
"children": [
{
"name": "Mock Client Environment 1",
"class": "mlos_bench.environments.mock_env.MockEnv",
"config": {
"tunable_params": ["provision"],
"const_args": {
"vmName": "$vm_client_name",
"EnvId": 1,
},
"required_args": ["vmName", "someConst"],
"range": [60, 120],
"metrics": ["score"],
}
},
{
"name": "Mock Server Environment 2",
"class": "mlos_bench.environments.mock_env.MockEnv",
"config": {
"tunable_params": ["boot"],
"const_args": {
"vmName": "$vm_server_name",
"EnvId": 2,
},
"required_args": ["vmName"],
"range": [60, 120],
"metrics": ["score"],
}
},
{
"name": "Mock Control Environment 3",
"class": "mlos_bench.environments.mock_env.MockEnv",
"config": {
"tunable_params": ["boot"],
"const_args": {
"vmName": "Mock Control VM",
"EnvId": 3,
},
"required_args": ["vmName", "vm_server_name", "vm_client_name"],
"range": [60, 120],
"metrics": ["score"],
}
}
]
},
tunables=tunable_groups,
service=ConfigPersistenceService({}),
)
def test_composite_env_params(composite_env: CompositeEnv) -> None:
"""
Check that the const_args from the parent environment get propagated to the children.
NOTE: The current logic is that variables flow down via required_args and const_args, parent
"""
assert composite_env.children[0].parameters == {
"vmName": "Mock Client VM", # const_args from the parent thru variable substitution
"EnvId": 1, # const_args from the child
"vmSize": "Standard_B4ms", # tunable_params from the parent
"someConst": "root", # pulled in from parent via required_args
}
assert composite_env.children[1].parameters == {
"vmName": "Mock Server VM", # const_args from the parent
"EnvId": 2, # const_args from the child
"idle": "halt", # tunable_params from the parent
# "someConst": "root" # not required, so not passed from the parent
}
assert composite_env.children[2].parameters == {
"vmName": "Mock Control VM", # const_args from the parent
"EnvId": 3, # const_args from the child
"idle": "halt", # tunable_params from the parent
# "someConst": "root" # not required, so not passed from the parent
"vm_client_name": "Mock Client VM",
"vm_server_name": "Mock Server VM"
}
def test_composite_env_setup(composite_env: CompositeEnv, tunable_groups: TunableGroups) -> None:
"""
Check that the child environments update their tunable parameters.
"""
tunable_groups.assign({
"vmSize": "Standard_B2s",
"idle": "mwait",
"kernel_sched_migration_cost_ns": 100000,
})
with composite_env as env_context:
assert env_context.setup(tunable_groups)
assert composite_env.children[0].parameters == {
"vmName": "Mock Client VM", # const_args from the parent
"EnvId": 1, # const_args from the child
"vmSize": "Standard_B2s", # tunable_params from the parent
"someConst": "root", # pulled in from parent via required_args
}
assert composite_env.children[1].parameters == {
"vmName": "Mock Server VM", # const_args from the parent
"EnvId": 2, # const_args from the child
"idle": "mwait", # tunable_params from the parent
# "someConst": "root" # not required, so not passed from the parent
}
assert composite_env.children[2].parameters == {
"vmName": "Mock Control VM", # const_args from the parent
"EnvId": 3, # const_args from the child
"idle": "mwait", # tunable_params from the parent
"vm_client_name": "Mock Client VM",
"vm_server_name": "Mock Server VM",
}
@pytest.fixture
def nested_composite_env(tunable_groups: TunableGroups) -> CompositeEnv:
"""
Test fixture for CompositeEnv.
"""
return CompositeEnv(
name="Composite Test Environment",
config={
"tunable_params": ["provision", "boot"],
"const_args": {
"vm_server_name": "Mock Server VM",
"vm_client_name": "Mock Client VM",
"someConst": "root"
},
"children": [
{
"name": "Nested Composite Client Environment 1",
"class": "mlos_bench.environments.composite_env.CompositeEnv",
"config": {
"tunable_params": ["provision"],
"const_args": {
"vmName": "$vm_client_name",
"EnvId": 1,
},
"required_args": ["vmName", "EnvId", "someConst", "vm_server_name"],
"children": [
{
"name": "Mock Client Environment 1",
"class": "mlos_bench.environments.mock_env.MockEnv",
"config": {
"tunable_params": ["provision"],
# TODO: Might be nice to include a "^" or "*" option
# here to indicate that all required_args from
# the parent should be included here too in
# order to reduce duplication.
"required_args": ["vmName", "EnvId", "someConst", "vm_server_name"],
"range": [60, 120],
"metrics": ["score"],
}
},
# ...
],
},
},
{
"name": "Nested Composite Server Environment 2",
"class": "mlos_bench.environments.composite_env.CompositeEnv",
"config": {
"tunable_params": ["boot"],
"const_args": {
"vmName": "$vm_server_name",
"EnvId": 2,
},
"required_args": ["vmName", "EnvId", "vm_client_name"],
"children": [
{
"name": "Mock Server Environment 2",
"class": "mlos_bench.environments.mock_env.MockEnv",
"config": {
"tunable_params": ["boot"],
"required_args": ["vmName", "EnvId", "vm_client_name"],
"range": [60, 120],
"metrics": ["score"],
}
},
# ...
],
},
},
]
},
tunables=tunable_groups,
service=ConfigPersistenceService({}),
)
def test_nested_composite_env_params(nested_composite_env: CompositeEnv) -> None:
"""
Check that the const_args from the parent environment get propagated to the children.
NOTE: The current logic is that variables flow down via required_args and const_args, parent
"""
assert isinstance(nested_composite_env.children[0], CompositeEnv)
assert nested_composite_env.children[0].children[0].parameters == {
"vmName": "Mock Client VM", # const_args from the parent thru variable substitution
"EnvId": 1, # const_args from the child
"vmSize": "Standard_B4ms", # tunable_params from the parent
"someConst": "root", # pulled in from parent via required_args
"vm_server_name": "Mock Server VM",
}
assert isinstance(nested_composite_env.children[1], CompositeEnv)
assert nested_composite_env.children[1].children[0].parameters == {
"vmName": "Mock Server VM", # const_args from the parent
"EnvId": 2, # const_args from the child
"idle": "halt", # tunable_params from the parent
# "someConst": "root" # not required, so not passed from the parent
"vm_client_name": "Mock Client VM",
}
def test_nested_composite_env_setup(nested_composite_env: CompositeEnv, tunable_groups: TunableGroups) -> None:
"""
Check that the child environments update their tunable parameters.
"""
tunable_groups.assign({
"vmSize": "Standard_B2s",
"idle": "mwait",
"kernel_sched_migration_cost_ns": 100000,
})
with nested_composite_env as env_context:
assert env_context.setup(tunable_groups)
assert isinstance(nested_composite_env.children[0], CompositeEnv)
assert nested_composite_env.children[0].children[0].parameters == {
"vmName": "Mock Client VM", # const_args from the parent
"EnvId": 1, # const_args from the child
"vmSize": "Standard_B2s", # tunable_params from the parent
"someConst": "root", # pulled in from parent via required_args
"vm_server_name": "Mock Server VM",
}
assert isinstance(nested_composite_env.children[1], CompositeEnv)
assert nested_composite_env.children[1].children[0].parameters == {
"vmName": "Mock Server VM", # const_args from the parent
"EnvId": 2, # const_args from the child
"idle": "mwait", # tunable_params from the parent
# "someConst": "root" # not required, so not passed from the parent
"vm_client_name": "Mock Client VM",
}
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,791
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_core/mlos_core/tests/spaces/adapters/llamatune_test.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Tests for LlamaTune space adapter.
"""
# pylint: disable=missing-function-docstring
from typing import Any, Dict, Iterator, List, Set
import pytest
import ConfigSpace as CS
import pandas as pd
from mlos_core.spaces.adapters import LlamaTuneAdapter
def construct_parameter_space(
n_continuous_params: int = 0,
n_integer_params: int = 0,
n_categorical_params: int = 0,
seed: int = 1234,
) -> CS.ConfigurationSpace:
"""
Helper function for construct an instance of `ConfigSpace.ConfigurationSpace`.
"""
input_space = CS.ConfigurationSpace(seed=seed)
for idx in range(n_continuous_params):
input_space.add_hyperparameter(
CS.UniformFloatHyperparameter(name=f'cont_{idx}', lower=0, upper=64))
for idx in range(n_integer_params):
input_space.add_hyperparameter(
CS.UniformIntegerHyperparameter(name=f'int_{idx}', lower=-1, upper=256))
for idx in range(n_categorical_params):
input_space.add_hyperparameter(
CS.CategoricalHyperparameter(name=f'str_{idx}', choices=[f'option_{idx}' for idx in range(5)]))
return input_space
@pytest.mark.parametrize(('num_target_space_dims', 'param_space_kwargs'), ([
(num_target_space_dims, param_space_kwargs)
for num_target_space_dims in (2, 4)
for num_orig_space_factor in (1.5, 4)
for param_space_kwargs in (
{'n_continuous_params': int(num_target_space_dims * num_orig_space_factor)},
{'n_integer_params': int(num_target_space_dims * num_orig_space_factor)},
{'n_categorical_params': int(num_target_space_dims * num_orig_space_factor)},
# Mix of all three types
{
'n_continuous_params': int(num_target_space_dims * num_orig_space_factor / 3),
'n_integer_params': int(num_target_space_dims * num_orig_space_factor / 3),
'n_categorical_params': int(num_target_space_dims * num_orig_space_factor / 3),
},
)
]))
def test_num_low_dims(num_target_space_dims: int, param_space_kwargs: dict) -> None: # pylint: disable=too-many-locals
"""
Tests LlamaTune's low-to-high space projection method.
"""
input_space = construct_parameter_space(**param_space_kwargs)
# Number of target parameter space dimensions should be fewer than those of the original space
with pytest.raises(ValueError):
LlamaTuneAdapter(
orig_parameter_space=input_space,
num_low_dims=len(list(input_space.keys()))
)
# Enable only low-dimensional space projections
adapter = LlamaTuneAdapter(
orig_parameter_space=input_space,
num_low_dims=num_target_space_dims,
special_param_values=None,
max_unique_values_per_param=None
)
sampled_configs = adapter.target_parameter_space.sample_configuration(size=100)
for sampled_config in sampled_configs: # pylint: disable=not-an-iterable # (false positive)
# Transform low-dim config to high-dim point/config
sampled_config_df = pd.DataFrame([sampled_config.values()], columns=list(sampled_config.keys()))
orig_config_df = adapter.transform(sampled_config_df)
# High-dim (i.e., original) config should be valid
orig_config = CS.Configuration(input_space, values=orig_config_df.iloc[0].to_dict())
input_space.check_configuration(orig_config)
# Transform high-dim config back to low-dim
target_config_df = adapter.inverse_transform(orig_config_df)
# Sampled config and this should be the same
target_config = CS.Configuration(adapter.target_parameter_space, values=target_config_df.iloc[0].to_dict())
assert target_config == sampled_config
# Try inverse projection (i.e., high-to-low) for previously unseen configs
unseen_sampled_configs = adapter.target_parameter_space.sample_configuration(size=25)
for unseen_sampled_config in unseen_sampled_configs: # pylint: disable=not-an-iterable # (false positive)
if unseen_sampled_config in sampled_configs: # pylint: disable=unsupported-membership-test # (false positive)
continue
unseen_sampled_config_df = pd.DataFrame([unseen_sampled_config.values()], columns=list(unseen_sampled_config.keys()))
with pytest.raises(ValueError):
_ = adapter.inverse_transform(unseen_sampled_config_df) # pylint: disable=redefined-variable-type
def test_special_parameter_values_validation() -> None:
"""
Tests LlamaTune's validation process of user-provided special parameter values dictionary.
"""
input_space = CS.ConfigurationSpace(seed=1234)
input_space.add_hyperparameter(
CS.CategoricalHyperparameter(name='str', choices=[f'choice_{idx}' for idx in range(5)]))
input_space.add_hyperparameter(
CS.UniformFloatHyperparameter(name='cont', lower=-1, upper=100))
input_space.add_hyperparameter(
CS.UniformIntegerHyperparameter(name='int', lower=0, upper=100))
# Only UniformIntegerHyperparameters are currently supported
with pytest.raises(NotImplementedError):
special_param_values_dict_1 = {'str': 'choice_1'}
LlamaTuneAdapter(
orig_parameter_space=input_space,
num_low_dims=2,
special_param_values=special_param_values_dict_1,
max_unique_values_per_param=None,
)
with pytest.raises(NotImplementedError):
special_param_values_dict_2 = {'cont': -1}
LlamaTuneAdapter(
orig_parameter_space=input_space,
num_low_dims=2,
special_param_values=special_param_values_dict_2,
max_unique_values_per_param=None,
)
# Special value should belong to parameter value domain
with pytest.raises(ValueError, match='value domain'):
special_param_values_dict = {'int': -1}
LlamaTuneAdapter(
orig_parameter_space=input_space,
num_low_dims=2,
special_param_values=special_param_values_dict,
max_unique_values_per_param=None,
)
# Invalid dicts; ValueError should be thrown
invalid_special_param_values_dicts: List[Dict[str, Any]] = [
{'int-Q': 0}, # parameter does not exist
{'int': {0: 0.2}}, # invalid definition
{'int': 0.2}, # invalid parameter value
{'int': (0.4, 0)}, # (biasing %, special value) instead of (special value, biasing %)
{'int': [0, 0]}, # duplicate special values
{'int': []}, # empty list
{'int': [{0: 0.2}]},
{'int': [(0.4, 0), (1, 0.7)]}, # first tuple is inverted; second is correct
{'int': [(0, 0.1), (0, 0.2)]}, # duplicate special values
]
for spv_dict in invalid_special_param_values_dicts:
with pytest.raises(ValueError):
LlamaTuneAdapter(
orig_parameter_space=input_space,
num_low_dims=2,
special_param_values=spv_dict,
max_unique_values_per_param=None,
)
# Biasing percentage of special value(s) are invalid
invalid_special_param_values_dicts = [
{'int': (0, 1.1)}, # >1 probability
{'int': (0, 0)}, # Zero probability
{'int': (0, -0.1)}, # Negative probability
{'int': (0, 20)}, # 2,000% instead of 20%
{'int': [0, 1, 2, 3, 4, 5]}, # default biasing is 20%; 6 values * 20% > 100%
{'int': [(0, 0.4), (1, 0.7)]}, # combined probability >100%
{'int': [(0, -0.4), (1, 0.7)]}, # probability for value 0 is invalid.
]
for spv_dict in invalid_special_param_values_dicts:
with pytest.raises(ValueError):
LlamaTuneAdapter(
orig_parameter_space=input_space,
num_low_dims=2,
special_param_values=spv_dict,
max_unique_values_per_param=None,
)
def gen_random_configs(adapter: LlamaTuneAdapter, num_configs: int) -> Iterator[CS.Configuration]:
for sampled_config in adapter.target_parameter_space.sample_configuration(size=num_configs):
# Transform low-dim config to high-dim config
sampled_config_df = pd.DataFrame([sampled_config.values()], columns=list(sampled_config.keys()))
orig_config_df = adapter.transform(sampled_config_df)
orig_config = CS.Configuration(adapter.orig_parameter_space, values=orig_config_df.iloc[0].to_dict())
yield orig_config
def test_special_parameter_values_biasing() -> None: # pylint: disable=too-complex
"""
Tests LlamaTune's special parameter values biasing methodology
"""
input_space = CS.ConfigurationSpace(seed=1234)
input_space.add_hyperparameter(
CS.UniformIntegerHyperparameter(name='int_1', lower=0, upper=100))
input_space.add_hyperparameter(
CS.UniformIntegerHyperparameter(name='int_2', lower=0, upper=100))
num_configs = 400
bias_percentage = LlamaTuneAdapter.DEFAULT_SPECIAL_PARAM_VALUE_BIASING_PERCENTAGE
eps = 0.2
# Single parameter; single special value
special_param_value_dicts: List[Dict[str, Any]] = [
{'int_1': 0},
{'int_1': (0, bias_percentage)},
{'int_1': [0]},
{'int_1': [(0, bias_percentage)]}
]
for spv_dict in special_param_value_dicts:
adapter = LlamaTuneAdapter(
orig_parameter_space=input_space,
num_low_dims=1,
special_param_values=spv_dict,
max_unique_values_per_param=None,
)
special_value_occurrences = sum(
1 for config in gen_random_configs(adapter, num_configs) if config['int_1'] == 0)
assert (1 - eps) * int(num_configs * bias_percentage) <= special_value_occurrences
# Single parameter; multiple special values
special_param_value_dicts = [
{'int_1': [0, 1]},
{'int_1': [(0, bias_percentage), (1, bias_percentage)]}
]
for spv_dict in special_param_value_dicts:
adapter = LlamaTuneAdapter(
orig_parameter_space=input_space,
num_low_dims=1,
special_param_values=spv_dict,
max_unique_values_per_param=None,
)
special_values_occurrences = {0: 0, 1: 0}
for config in gen_random_configs(adapter, num_configs):
if config['int_1'] == 0:
special_values_occurrences[0] += 1
elif config['int_1'] == 1:
special_values_occurrences[1] += 1
assert (1 - eps) * int(num_configs * bias_percentage) <= special_values_occurrences[0]
assert (1 - eps) * int(num_configs * bias_percentage) <= special_values_occurrences[1]
# Multiple parameters; multiple special values; different biasing percentage
spv_dict = {
'int_1': [(0, bias_percentage), (1, bias_percentage / 2)],
'int_2': [(2, bias_percentage / 2), (100, bias_percentage * 1.5)]
}
adapter = LlamaTuneAdapter(
orig_parameter_space=input_space,
num_low_dims=1,
special_param_values=spv_dict,
max_unique_values_per_param=None,
)
special_values_instances: Dict[str, Dict[int, int]] = {
'int_1': {0: 0, 1: 0},
'int_2': {2: 0, 100: 0},
}
for config in gen_random_configs(adapter, num_configs):
if config['int_1'] == 0:
special_values_instances['int_1'][0] += 1
elif config['int_1'] == 1:
special_values_instances['int_1'][1] += 1
if config['int_2'] == 2:
special_values_instances['int_2'][2] += 1
elif config['int_2'] == 100:
special_values_instances['int_2'][100] += 1
assert (1 - eps) * int(num_configs * bias_percentage) <= special_values_instances['int_1'][0]
assert (1 - eps) * int(num_configs * bias_percentage / 2) <= special_values_instances['int_1'][1]
assert (1 - eps) * int(num_configs * bias_percentage / 2) <= special_values_instances['int_2'][2]
assert (1 - eps) * int(num_configs * bias_percentage * 1.5) <= special_values_instances['int_2'][100]
def test_max_unique_values_per_param() -> None:
"""
Tests LlamaTune's parameter values discretization implementation.
"""
# Define config space with a mix of different parameter types
input_space = CS.ConfigurationSpace(seed=1234)
input_space.add_hyperparameter(
CS.UniformFloatHyperparameter(name='cont_1', lower=0, upper=5))
input_space.add_hyperparameter(
CS.UniformFloatHyperparameter(name='cont_2', lower=1, upper=100))
input_space.add_hyperparameter(
CS.UniformIntegerHyperparameter(name='int_1', lower=1, upper=10))
input_space.add_hyperparameter(
CS.UniformIntegerHyperparameter(name='int_2', lower=0, upper=2048))
input_space.add_hyperparameter(
CS.CategoricalHyperparameter(name='str_1', choices=['on', 'off']))
input_space.add_hyperparameter(
CS.CategoricalHyperparameter(name='str_2', choices=[f'choice_{idx}' for idx in range(10)]))
# Restrict the number of unique parameter values
num_configs = 200
for max_unique_values_per_param in (5, 25, 100):
adapter = LlamaTuneAdapter(
orig_parameter_space=input_space,
num_low_dims=3,
special_param_values=None,
max_unique_values_per_param=max_unique_values_per_param,
)
# Keep track of unique values generated for each parameter
unique_values_dict: Dict[str, set] = {param: set() for param in list(input_space.keys())}
for config in gen_random_configs(adapter, num_configs):
for param, value in config.items():
unique_values_dict[param].add(value)
# Ensure that their number is less than the maximum number allowed
for _, unique_values in unique_values_dict.items():
assert len(unique_values) <= max_unique_values_per_param
@pytest.mark.parametrize(('num_target_space_dims', 'param_space_kwargs'), ([
(num_target_space_dims, param_space_kwargs)
for num_target_space_dims in (2, 4)
for num_orig_space_factor in (1.5, 4)
for param_space_kwargs in (
{'n_continuous_params': int(num_target_space_dims * num_orig_space_factor)},
{'n_integer_params': int(num_target_space_dims * num_orig_space_factor)},
{'n_categorical_params': int(num_target_space_dims * num_orig_space_factor)},
# Mix of all three types
{
'n_continuous_params': int(num_target_space_dims * num_orig_space_factor / 3),
'n_integer_params': int(num_target_space_dims * num_orig_space_factor / 3),
'n_categorical_params': int(num_target_space_dims * num_orig_space_factor / 3),
},
)
]))
def test_approx_inverse_mapping(num_target_space_dims: int, param_space_kwargs: dict) -> None: # pylint: disable=too-many-locals
"""
Tests LlamaTune's approximate high-to-low space projection method, using pseudo-inverse.
"""
input_space = construct_parameter_space(**param_space_kwargs)
# Enable low-dimensional space projection, but disable reverse mapping
adapter = LlamaTuneAdapter(
orig_parameter_space=input_space,
num_low_dims=num_target_space_dims,
special_param_values=None,
max_unique_values_per_param=None,
use_approximate_reverse_mapping=False,
)
sampled_config = input_space.sample_configuration() # size=1)
with pytest.raises(ValueError):
sampled_config_df = pd.DataFrame([sampled_config.values()], columns=list(sampled_config.keys()))
_ = adapter.inverse_transform(sampled_config_df)
# Enable low-dimensional space projection *and* reverse mapping
adapter = LlamaTuneAdapter(
orig_parameter_space=input_space,
num_low_dims=num_target_space_dims,
special_param_values=None,
max_unique_values_per_param=None,
use_approximate_reverse_mapping=True,
)
# Warning should be printed the first time
sampled_config = input_space.sample_configuration() # size=1)
with pytest.warns(UserWarning):
sampled_config_df = pd.DataFrame([sampled_config.values()], columns=list(sampled_config.keys()))
target_config_df = adapter.inverse_transform(sampled_config_df)
# Low-dim (i.e., target) config should be valid
target_config = CS.Configuration(adapter.target_parameter_space, values=target_config_df.iloc[0].to_dict())
adapter.target_parameter_space.check_configuration(target_config)
# Test inverse transform with 100 random configs
for _ in range(100):
sampled_config = input_space.sample_configuration() # size=1)
sampled_config_df = pd.DataFrame([sampled_config.values()], columns=list(sampled_config.keys()))
target_config_df = adapter.inverse_transform(sampled_config_df)
# Low-dim (i.e., target) config should be valid
target_config = CS.Configuration(adapter.target_parameter_space, values=target_config_df.iloc[0].to_dict())
adapter.target_parameter_space.check_configuration(target_config)
@pytest.mark.parametrize(('num_low_dims', 'special_param_values', 'max_unique_values_per_param'), ([
(num_low_dims, special_param_values, max_unique_values_per_param)
for num_low_dims in (8, 16)
for special_param_values in (
{'int_1': -1, 'int_2': -1, 'int_3': -1, 'int_4': [-1, 0]},
{'int_1': (-1, 0.1), 'int_2': -1, 'int_3': (-1, 0.3), 'int_4': [(-1, 0.1), (0, 0.2)]},
)
for max_unique_values_per_param in (50, 250)
]))
def test_llamatune_pipeline(num_low_dims: int, special_param_values: dict, max_unique_values_per_param: int) -> None:
"""
Tests LlamaTune space adapter when all components are active.
"""
# pylint: disable=too-many-locals
# Define config space with a mix of different parameter types
input_space = construct_parameter_space(n_continuous_params=10, n_integer_params=10, n_categorical_params=5)
adapter = LlamaTuneAdapter(
orig_parameter_space=input_space,
num_low_dims=num_low_dims,
special_param_values=special_param_values,
max_unique_values_per_param=max_unique_values_per_param,
)
special_value_occurrences = {
param: {special_value: 0 for special_value, _ in tuples_list}
for param, tuples_list in adapter._special_param_values_dict.items() # pylint: disable=protected-access
}
unique_values_dict: Dict[str, Set] = {param: set() for param in input_space.keys()}
num_configs = 1000
for config in adapter.target_parameter_space.sample_configuration(size=num_configs): # pylint: disable=not-an-iterable
# Transform low-dim config to high-dim point/config
sampled_config_df = pd.DataFrame([config.values()], columns=list(config.keys()))
orig_config_df = adapter.transform(sampled_config_df)
# High-dim (i.e., original) config should be valid
orig_config = CS.Configuration(input_space, values=orig_config_df.iloc[0].to_dict())
input_space.check_configuration(orig_config)
# Transform high-dim config back to low-dim
target_config_df = adapter.inverse_transform(orig_config_df)
# Sampled config and this should be the same
target_config = CS.Configuration(adapter.target_parameter_space, values=target_config_df.iloc[0].to_dict())
assert target_config == config
for param, value in orig_config.items():
# Keep track of special value occurrences
if param in special_value_occurrences:
if value in special_value_occurrences[param]:
special_value_occurrences[param][value] += 1
# Keep track of unique values generated for each parameter
unique_values_dict[param].add(value)
# Ensure that occurrences of special values do not significantly deviate from expected
eps = 0.2
for param, tuples_list in adapter._special_param_values_dict.items(): # pylint: disable=protected-access
for value, bias_percentage in tuples_list:
assert (1 - eps) * int(num_configs * bias_percentage) <= special_value_occurrences[param][value]
# Ensure that number of unique values is less than the maximum number allowed
for _, unique_values in unique_values_dict.items():
assert len(unique_values) <= max_unique_values_per_param
@pytest.mark.parametrize(('num_target_space_dims', 'param_space_kwargs'), ([
(num_target_space_dims, param_space_kwargs)
for num_target_space_dims in (2, 4)
for num_orig_space_factor in (1.5, 4)
for param_space_kwargs in (
{'n_continuous_params': int(num_target_space_dims * num_orig_space_factor)},
{'n_integer_params': int(num_target_space_dims * num_orig_space_factor)},
{'n_categorical_params': int(num_target_space_dims * num_orig_space_factor)},
# Mix of all three types
{
'n_continuous_params': int(num_target_space_dims * num_orig_space_factor / 3),
'n_integer_params': int(num_target_space_dims * num_orig_space_factor / 3),
'n_categorical_params': int(num_target_space_dims * num_orig_space_factor / 3),
},
)
]))
def test_deterministic_behavior_for_same_seed(num_target_space_dims: int, param_space_kwargs: dict) -> None:
"""
Tests LlamaTune's space adapter deterministic behavior when given same seed in the input parameter space.
"""
def generate_target_param_space_configs(seed: int) -> List[CS.Configuration]:
input_space = construct_parameter_space(**param_space_kwargs, seed=seed)
# Init adapter and sample points in the low-dim space
adapter = LlamaTuneAdapter(
orig_parameter_space=input_space,
num_low_dims=num_target_space_dims,
special_param_values=None,
max_unique_values_per_param=None,
use_approximate_reverse_mapping=False,
)
sample_configs: List[CS.Configuration] = adapter.target_parameter_space.sample_configuration(size=100)
return sample_configs
assert generate_target_param_space_configs(42) == generate_target_param_space_configs(42)
assert generate_target_param_space_configs(1234) != generate_target_param_space_configs(42)
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,792
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_core/mlos_core/tests/spaces/adapters/identity_adapter_test.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Tests for Identity space adapter.
"""
# pylint: disable=missing-function-docstring
import ConfigSpace as CS
import pandas as pd
from mlos_core.spaces.adapters import IdentityAdapter
def test_identity_adapter() -> None:
"""
Tests identity adapter
"""
input_space = CS.ConfigurationSpace(seed=1234)
input_space.add_hyperparameter(
CS.UniformIntegerHyperparameter(name='int_1', lower=0, upper=100))
input_space.add_hyperparameter(
CS.UniformFloatHyperparameter(name='float_1', lower=0, upper=100))
input_space.add_hyperparameter(
CS.CategoricalHyperparameter(name='str_1', choices=['on', 'off']))
adapter = IdentityAdapter(orig_parameter_space=input_space)
num_configs = 10
for sampled_config in input_space.sample_configuration(size=num_configs): # pylint: disable=not-an-iterable # (false positive)
sampled_config_df = pd.DataFrame([sampled_config.values()], columns=list(sampled_config.keys()))
target_config_df = adapter.inverse_transform(sampled_config_df)
assert target_config_df.equals(sampled_config_df)
target_config = CS.Configuration(adapter.target_parameter_space, values=target_config_df.iloc[0].to_dict())
assert target_config == sampled_config
orig_config_df = adapter.transform(target_config_df)
assert orig_config_df.equals(sampled_config_df)
orig_config = CS.Configuration(adapter.orig_parameter_space, values=orig_config_df.iloc[0].to_dict())
assert orig_config == sampled_config
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,793
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_core/mlos_core/tests/optimizers/conftest.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Test fixtures for mlos_bench optimizers.
"""
import pytest
import ConfigSpace as CS
@pytest.fixture
def configuration_space() -> CS.ConfigurationSpace:
"""
Test fixture to produce a config space with all types of hyperparameters.
"""
# Start defining a ConfigurationSpace for the Optimizer to search.
space = CS.ConfigurationSpace(seed=1234)
# Add a continuous input dimension between 0 and 1.
space.add_hyperparameter(CS.UniformFloatHyperparameter(name='x', lower=0, upper=1))
# Add a categorical hyperparameter with 3 possible values.
space.add_hyperparameter(CS.CategoricalHyperparameter(name='y', choices=["a", "b", "c"]))
# Add a discrete input dimension between 0 and 10.
space.add_hyperparameter(CS.UniformIntegerHyperparameter(name='z', lower=0, upper=10))
return space
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,794
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_core/mlos_core/optimizers/bayesian_optimizers/__init__.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Basic initializer module for the mlos_core Bayesian optimizers.
"""
from mlos_core.optimizers.bayesian_optimizers.bayesian_optimizer import BaseBayesianOptimizer
from mlos_core.optimizers.bayesian_optimizers.smac_optimizer import SmacOptimizer
__all__ = [
'BaseBayesianOptimizer',
'SmacOptimizer',
]
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,795
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/environments/remote/__init__.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Remote Tunable Environments for mlos_bench.
"""
from mlos_bench.environments.remote.remote_env import RemoteEnv
from mlos_bench.environments.remote.os_env import OSEnv
from mlos_bench.environments.remote.vm_env import VMEnv
__all__ = [
'RemoteEnv',
'OSEnv',
'VMEnv',
]
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,796
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/services/remote/azure/azure_auth.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
A collection Service functions for managing VMs on Azure.
"""
import datetime
import json
import logging
import subprocess
from typing import Any, Dict, Optional
from mlos_bench.services.base_service import Service
from mlos_bench.services.types.authenticator_type import SupportsAuth
_LOG = logging.getLogger(__name__)
class AzureAuthService(Service, SupportsAuth):
"""
Helper methods to get access to Azure services.
"""
_REQ_INTERVAL = 300 # = 5 min
def __init__(self,
config: Optional[Dict[str, Any]] = None,
global_config: Optional[Dict[str, Any]] = None,
parent: Optional[Service] = None):
"""
Create a new instance of Azure authentication services proxy.
Parameters
----------
config : dict
Free-format dictionary that contains the benchmark environment
configuration.
global_config : dict
Free-format dictionary of global parameters.
parent : Service
Parent service that can provide mixin functions.
"""
super().__init__(config, global_config, parent)
# Register methods that we want to expose to the Environment objects.
self.register([self.get_access_token])
# This parameter can come from command line as strings, so conversion is needed.
self._req_interval = float(self.config.get("tokenRequestInterval", self._REQ_INTERVAL))
self._access_token = "RENEW *NOW*"
self._token_expiration_ts = datetime.datetime.now() # Typically, some future timestamp.
def get_access_token(self) -> str:
"""
Get the access token from Azure CLI, if expired.
"""
ts_diff = (self._token_expiration_ts - datetime.datetime.now()).total_seconds()
_LOG.debug("Time to renew the token: %.2f sec.", ts_diff)
if ts_diff < self._req_interval:
_LOG.debug("Request new accessToken")
# TODO: Use azure-identity SDK and a key valut instead of `az` CLI.
res = json.loads(subprocess.check_output(
'az account get-access-token', shell=True, text=True))
self._token_expiration_ts = datetime.datetime.fromisoformat(res["expiresOn"])
self._access_token = res["accessToken"]
_LOG.info("Got new accessToken. Expiration time: %s", self._token_expiration_ts)
return self._access_token
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,797
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tunables/covariant_group.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Tunable parameter definition.
"""
import copy
from typing import Dict, Iterable, Union
from mlos_bench.tunables.tunable import Tunable, TunableValue
class CovariantTunableGroup:
"""
A collection of tunable parameters.
Changing any of the parameters in the group incurs the same cost of the experiment.
"""
def __init__(self, name: str, config: dict):
"""
Create a new group of tunable parameters.
Parameters
----------
name : str
Human-readable identifier of the tunable parameters group.
config : dict
Python dict that represents a CovariantTunableGroup
(e.g., deserialized from JSON).
"""
self._is_updated = True
self._name = name
self._cost = int(config.get("cost", 0))
self._tunables: Dict[str, Tunable] = {
name: Tunable(name, tunable_config)
for (name, tunable_config) in config.get("params", {}).items()
}
@property
def name(self) -> str:
"""
Get the name of the covariant group.
Returns
-------
name : str
Name (i.e., a string id) of the covariant group.
"""
return self._name
@property
def cost(self) -> int:
"""
Get the cost of changing the values in the covariant group.
This value is a constant. Use `get_current_cost()` to get
the cost given the group update status.
Returns
-------
cost : int
Cost of changing the values in the covariant group.
"""
return self._cost
def copy(self) -> "CovariantTunableGroup":
"""
Deep copy of the CovariantTunableGroup object.
Returns
-------
group : CovariantTunableGroup
A new instance of the CovariantTunableGroup object
that is a deep copy of the original one.
"""
return copy.deepcopy(self)
def __eq__(self, other: object) -> bool:
"""
Check if two CovariantTunableGroup objects are equal.
Parameters
----------
other : CovariantTunableGroup
A covariant tunable group object to compare to.
Returns
-------
is_equal : bool
True if two CovariantTunableGroup objects are equal.
"""
if not isinstance(other, CovariantTunableGroup):
return False
# TODO: May need to provide logic to relax the equality check on the
# tunables (e.g. "compatible" vs. "equal").
return (self._name == other._name and
self._cost == other._cost and
self._is_updated == other._is_updated and
self._tunables == other._tunables)
def equals_defaults(self, other: "CovariantTunableGroup") -> bool:
"""
Checks to see if the other CovariantTunableGroup is the same, ignoring
the current values of the two groups' Tunables.
Parameters
----------
other : CovariantTunableGroup
A covariant tunable group object to compare to.
Returns
-------
are_equal : bool
True if the two CovariantTunableGroup objects' *metadata* are the same,
False otherwise.
"""
# NOTE: May be worth considering to implement this check without copies.
cpy = self.copy()
cpy.restore_defaults()
cpy.reset_is_updated()
other = other.copy()
other.restore_defaults()
other.reset_is_updated()
return cpy == other
def restore_defaults(self) -> None:
"""
Restore all tunable parameters to their default values.
"""
for tunable in self._tunables.values():
if tunable.value != tunable.default:
self._is_updated = True
tunable.value = tunable.default
def reset_is_updated(self) -> None:
"""
Clear the update flag. That is, state that running an experiment with the
current values of the tunables in this group has no extra cost.
"""
self._is_updated = False
def is_updated(self) -> bool:
"""
Check if any of the tunable values in the group has been updated.
Returns
-------
is_updated : bool
True if any of the tunable values in the group has been updated, False otherwise.
"""
return self._is_updated
def get_current_cost(self) -> int:
"""
Get the cost of the experiment given current tunable values.
Returns
-------
cost : int
Cost of the experiment or 0 if parameters have not been updated.
"""
return self._cost if self._is_updated else 0
def get_names(self) -> Iterable[str]:
"""
Get the names of all tunables in the group.
"""
return self._tunables.keys()
def get_tunable_values_dict(self) -> Dict[str, TunableValue]:
"""
Get current values of all tunables in the group as a dict.
Returns
-------
tunables : Dict[str, TunableValue]
"""
return {name: tunable.value for (name, tunable) in self._tunables.items()}
def __repr__(self) -> str:
"""
Produce a human-readable version of the CovariantTunableGroup
(mostly for logging).
Returns
-------
string : str
A human-readable version of the CovariantTunableGroup.
"""
return f"{self._name}: {self._tunables}"
def get_tunable(self, tunable: Union[str, Tunable]) -> Tunable:
"""
Access the entire Tunable in a group (not just its value).
Throw KeyError if the tunable is not in the group.
Parameters
----------
tunable : str
Name of the tunable parameter.
Returns
-------
Tunable
An instance of the Tunable parameter.
"""
name: str = tunable.name if isinstance(tunable, Tunable) else tunable
return self._tunables[name]
def get_tunables(self) -> Iterable[Tunable]:
"""Gets the set of tunables for this CovariantTunableGroup.
Returns
-------
Iterable[Tunable]
"""
return self._tunables.values()
def __contains__(self, tunable: Union[str, Tunable]) -> bool:
name: str = tunable.name if isinstance(tunable, Tunable) else tunable
return name in self._tunables
def __getitem__(self, tunable: Union[str, Tunable]) -> TunableValue:
return self.get_tunable(tunable).value
def __setitem__(self, tunable: Union[str, Tunable], tunable_value: Union[TunableValue, Tunable]) -> TunableValue:
value: TunableValue = tunable_value.value if isinstance(tunable_value, Tunable) else tunable_value
self._is_updated |= self.get_tunable(tunable).update(value)
return value
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,798
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/environments/local/local_env_vars_test.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Unit tests for passing shell environment variables into LocalEnv scripts.
"""
import sys
import pytest
from mlos_bench.tunables.tunable_groups import TunableGroups
from mlos_bench.tests.environments.local import create_local_env, check_local_env_success
def _run_local_env(tunable_groups: TunableGroups, shell_subcmd: str, expected: dict) -> None:
"""
Check that LocalEnv can set shell environment variables.
"""
local_env = create_local_env(tunable_groups, {
"const_args": {
"const_arg": 111, # Passed into "shell_env_params"
"other_arg": 222, # NOT passed into "shell_env_params"
},
"tunable_params": ["kernel"],
"shell_env_params": [
"const_arg", # From "const_arg"
"kernel_sched_latency_ns", # From "tunable_params"
],
"run": [
"echo const_arg,other_arg,unknown_arg,kernel_sched_latency_ns > output.csv",
f"echo {shell_subcmd} >> output.csv",
],
"read_results_file": "output.csv",
})
check_local_env_success(local_env, tunable_groups, expected, [])
@pytest.mark.skipif(sys.platform == 'win32', reason="sh-like shell only")
def test_local_env_vars_shell(tunable_groups: TunableGroups) -> None:
"""
Check that LocalEnv can set shell environment variables in sh-like shell.
"""
_run_local_env(
tunable_groups,
shell_subcmd="$const_arg,$other_arg,$unknown_arg,$kernel_sched_latency_ns",
expected={
"const_arg": 111, # From "const_args"
"other_arg": float("NaN"), # Not included in "shell_env_params"
"unknown_arg": float("NaN"), # Unknown/undefined variable
"kernel_sched_latency_ns": 2000000, # From "tunable_params"
}
)
@pytest.mark.skipif(sys.platform != 'win32', reason="Windows only")
def test_local_env_vars_windows(tunable_groups: TunableGroups) -> None:
"""
Check that LocalEnv can set shell environment variables on Windows / cmd shell.
"""
_run_local_env(
tunable_groups,
shell_subcmd=r"%const_arg%,%other_arg%,%unknown_arg%,%kernel_sched_latency_ns%",
expected={
"const_arg": 111, # From "const_args"
"other_arg": r"%other_arg%", # Not included in "shell_env_params"
"unknown_arg": r"%unknown_arg%", # Unknown/undefined variable
"kernel_sched_latency_ns": 2000000, # From "tunable_params"
}
)
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,799
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/environments/status.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Enum for the status of the benchmark/environment.
"""
import enum
class Status(enum.Enum):
"""
Enum for the status of the benchmark/environment.
"""
UNKNOWN = 0
PENDING = 1
READY = 2
RUNNING = 3
SUCCEEDED = 4
CANCELED = 5
FAILED = 6
TIMED_OUT = 7
def is_good(self) -> bool:
"""
Check if the status of the benchmark/environment is good.
"""
return self in {
Status.PENDING,
Status.READY,
Status.RUNNING,
Status.SUCCEEDED,
}
def is_completed(self) -> bool:
"""
Check if the status of the benchmark/environment is
one of {SUCCEEDED, FAILED, TIMED_OUT}.
"""
return self in {
Status.SUCCEEDED,
Status.FAILED,
Status.TIMED_OUT,
}
def is_pending(self) -> bool:
"""
Check if the status of the benchmark/environment is PENDING.
"""
return self == Status.PENDING
def is_ready(self) -> bool:
"""
Check if the status of the benchmark/environment is READY.
"""
return self == Status.READY
def is_succeeded(self) -> bool:
"""
Check if the status of the benchmark/environment is SUCCEEDED.
"""
return self == Status.SUCCEEDED
def is_failed(self) -> bool:
"""
Check if the status of the benchmark/environment is FAILED.
"""
return self == Status.FAILED
def is_canceled(self) -> bool:
"""
Check if the status of the benchmark/environment is CANCELED.
"""
return self == Status.CANCELED
def is_timed_out(self) -> bool:
"""
Check if the status of the benchmark/environment is TIMED_OUT.
"""
return self == Status.FAILED
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,800
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/tunables/conftest.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Test fixtures for individual Tunable objects.
"""
import pytest
from mlos_bench.tunables.tunable import Tunable
# pylint: disable=redefined-outer-name
# -- Ignore pylint complaints about pytest references to
# `tunable_groups` fixture as both a function and a parameter.
@pytest.fixture
def tunable_categorical() -> Tunable:
"""
A test fixture that produces a categorical Tunable object.
Returns
-------
tunable : Tunable
An instance of a categorical Tunable.
"""
return Tunable("vmSize", {
"description": "Azure VM size",
"type": "categorical",
"default": "Standard_B4ms",
"values": ["Standard_B2s", "Standard_B2ms", "Standard_B4ms"]
})
@pytest.fixture
def tunable_int() -> Tunable:
"""
A test fixture that produces an interger Tunable object with limited range.
Returns
-------
tunable : Tunable
An instance of an integer Tunable.
"""
return Tunable("kernel_sched_migration_cost_ns", {
"description": "Cost of migrating the thread to another core",
"type": "int",
"default": 40000,
"range": [-1, 500000],
"special": [-1]
})
@pytest.fixture
def tunable_float() -> Tunable:
"""
A test fixture that produces a float Tunable object with limited range.
Returns
-------
tunable : Tunable
An instance of a float Tunable.
"""
return Tunable("chaos_monkey_prob", {
"description": "Probability of spontaneous VM shutdown",
"type": "float",
"default": 0.01,
"range": [0, 1]
})
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,801
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/services/config_persistence_test.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Unit tests for configuration persistence service.
"""
import os
import sys
import pytest
from mlos_bench.config.schemas import ConfigSchema
from mlos_bench.services.config_persistence import ConfigPersistenceService
if sys.version_info < (3, 9):
from importlib_resources import files
else:
from importlib.resources import files
# pylint: disable=redefined-outer-name
@pytest.fixture
def config_persistence_service() -> ConfigPersistenceService:
"""
Test fixture for ConfigPersistenceService.
"""
return ConfigPersistenceService({
"config_path": [
"./non-existent-dir/test/foo/bar", # Non-existent config path
str(files("mlos_bench.tests.config").joinpath("")), # Test configs (relative to mlos_bench/tests)
# Shouldn't be necessary since we automatically add this.
# str(files("mlos_bench.config").joinpath("")), # Stock configs
]
})
def test_resolve_stock_path(config_persistence_service: ConfigPersistenceService) -> None:
"""
Check if we can actually find a file somewhere in `config_path`.
"""
# pylint: disable=protected-access
assert config_persistence_service._config_path is not None
assert ConfigPersistenceService.BUILTIN_CONFIG_PATH in config_persistence_service._config_path
file_path = "storage/in-memory.jsonc"
path = config_persistence_service.resolve_path(file_path)
assert path.endswith(file_path)
assert os.path.exists(path)
assert os.path.samefile(
ConfigPersistenceService.BUILTIN_CONFIG_PATH,
os.path.commonpath([ConfigPersistenceService.BUILTIN_CONFIG_PATH, path])
)
def test_resolve_path(config_persistence_service: ConfigPersistenceService) -> None:
"""
Check if we can actually find a file somewhere in `config_path`.
"""
file_path = "tunable-values/tunable-values-example.jsonc"
path = config_persistence_service.resolve_path(file_path)
assert path.endswith(file_path)
assert os.path.exists(path)
def test_resolve_path_fail(config_persistence_service: ConfigPersistenceService) -> None:
"""
Check if non-existent file resolves without using `config_path`.
"""
file_path = "foo/non-existent-config.json"
path = config_persistence_service.resolve_path(file_path)
assert not os.path.exists(path)
assert path == file_path
def test_load_config(config_persistence_service: ConfigPersistenceService) -> None:
"""
Check if we can successfully load a config file located relative to `config_path`.
"""
tunables_data = config_persistence_service.load_config("tunable-values/tunable-values-example.jsonc",
ConfigSchema.TUNABLE_VALUES)
assert tunables_data is not None
assert isinstance(tunables_data, dict)
assert len(tunables_data) >= 1
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,802
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tunables/__init__.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Tunables classes for Environments in mlos_bench.
"""
from mlos_bench.tunables.tunable import Tunable
from mlos_bench.tunables.tunable_groups import TunableGroups
__all__ = [
'Tunable',
'TunableGroups',
]
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,803
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/services/local/__init__.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Tests for mlos_bench.services.local.
Used to make mypy happy about multiple conftest.py modules.
"""
from .mock import MockLocalExecService
__all__ = [
'MockLocalExecService',
]
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,804
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test_optimizer_schemas.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Tests for optimizer schema validation.
"""
from os import path
from typing import Optional
import pytest
from mlos_core.optimizers import OptimizerType
from mlos_core.spaces.adapters import SpaceAdapterType
from mlos_core.tests import get_all_concrete_subclasses
from mlos_bench.config.schemas import ConfigSchema
from mlos_bench.optimizers.base_optimizer import Optimizer
from mlos_bench.tests import try_resolve_class_name
from mlos_bench.tests.config.schemas import (get_schema_test_cases,
check_test_case_against_schema,
check_test_case_config_with_extra_param)
# General testing strategy:
# - hand code a set of good/bad configs (useful to test editor schema checking)
# - enumerate and try to check that we've covered all the cases
# - for each config, load and validate against expected schema
TEST_CASES = get_schema_test_cases(path.join(path.dirname(__file__), "test-cases"))
# Dynamically enumerate some of the cases we want to make sure we cover.
expected_mlos_bench_optimizer_class_names = [subclass.__module__ + "." + subclass.__name__
for subclass in get_all_concrete_subclasses(Optimizer, # type: ignore[type-abstract]
pkg_name='mlos_bench')]
assert expected_mlos_bench_optimizer_class_names
# Also make sure that we check for configs where the optimizer_type or space_adapter_type are left unspecified (None).
expected_mlos_core_optimizer_types = list(OptimizerType) + [None]
assert expected_mlos_core_optimizer_types
expected_mlos_core_space_adapter_types = list(SpaceAdapterType) + [None]
assert expected_mlos_core_space_adapter_types
# Do the full cross product of all the test cases and all the optimizer types.
@pytest.mark.parametrize("test_case_subtype", sorted(TEST_CASES.by_subtype))
@pytest.mark.parametrize("mlos_bench_optimizer_type", expected_mlos_bench_optimizer_class_names)
def test_case_coverage_mlos_bench_optimizer_type(test_case_subtype: str, mlos_bench_optimizer_type: str) -> None:
"""
Checks to see if there is a given type of test case for the given mlos_bench optimizer type.
"""
for test_case in TEST_CASES.by_subtype[test_case_subtype].values():
if try_resolve_class_name(test_case.config.get("class")) == mlos_bench_optimizer_type:
return
raise NotImplementedError(
f"Missing test case for subtype {test_case_subtype} for Optimizer class {mlos_bench_optimizer_type}")
# Being a little lazy for the moment and relaxing the requirement that we have
# a subtype test case for each optimizer and space adapter combo.
@pytest.mark.parametrize("test_case_type", sorted(TEST_CASES.by_type))
# @pytest.mark.parametrize("test_case_subtype", sorted(TEST_CASES.by_subtype))
@pytest.mark.parametrize("mlos_core_optimizer_type", expected_mlos_core_optimizer_types)
def test_case_coverage_mlos_core_optimizer_type(test_case_type: str,
mlos_core_optimizer_type: Optional[OptimizerType]) -> None:
"""
Checks to see if there is a given type of test case for the given mlos_core optimizer type.
"""
optimizer_name = None if mlos_core_optimizer_type is None else mlos_core_optimizer_type.name
for test_case in TEST_CASES.by_type[test_case_type].values():
if try_resolve_class_name(test_case.config.get("class")) \
== "mlos_bench.optimizers.mlos_core_optimizer.MlosCoreOptimizer":
optimizer_type = None
if test_case.config.get("config"):
optimizer_type = test_case.config["config"].get("optimizer_type", None)
if optimizer_type == optimizer_name:
return
raise NotImplementedError(
f"Missing test case for type {test_case_type} for MlosCore Optimizer type {mlos_core_optimizer_type}")
@pytest.mark.parametrize("test_case_type", sorted(TEST_CASES.by_type))
# @pytest.mark.parametrize("test_case_subtype", sorted(TEST_CASES.by_subtype))
@pytest.mark.parametrize("mlos_core_space_adapter_type", expected_mlos_core_space_adapter_types)
def test_case_coverage_mlos_core_space_adapter_type(test_case_type: str,
mlos_core_space_adapter_type: Optional[SpaceAdapterType]) -> None:
"""
Checks to see if there is a given type of test case for the given mlos_core space adapter type.
"""
space_adapter_name = None if mlos_core_space_adapter_type is None else mlos_core_space_adapter_type.name
for test_case in TEST_CASES.by_type[test_case_type].values():
if try_resolve_class_name(test_case.config.get("class")) \
== "mlos_bench.optimizers.mlos_core_optimizer.MlosCoreOptimizer":
space_adapter_type = None
if test_case.config.get("config"):
space_adapter_type = test_case.config["config"].get("space_adapter_type", None)
if space_adapter_type == space_adapter_name:
return
raise NotImplementedError(
f"Missing test case for type {test_case_type} for SpaceAdapter type {mlos_core_space_adapter_type}")
# Now we actually perform all of those validation tests.
@pytest.mark.parametrize("test_case_name", sorted(TEST_CASES.by_path))
def test_optimizer_configs_against_schema(test_case_name: str) -> None:
"""
Checks that the optimizer config validates against the schema.
"""
check_test_case_against_schema(TEST_CASES.by_path[test_case_name], ConfigSchema.OPTIMIZER)
@pytest.mark.parametrize("test_case_name", sorted(TEST_CASES.by_type["good"]))
def test_optimizer_configs_with_extra_param(test_case_name: str) -> None:
"""
Checks that the optimizer config fails to validate if extra params are present in certain places.
"""
check_test_case_config_with_extra_param(TEST_CASES.by_type["good"][test_case_name], ConfigSchema.OPTIMIZER)
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,805
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/services/types/config_loader_type.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Protocol interface for helper functions to lookup and load configs.
"""
from typing import Dict, List, Iterable, Optional, Union, Protocol, runtime_checkable, TYPE_CHECKING
from mlos_bench.config.schemas import ConfigSchema
from mlos_bench.tunables.tunable import TunableValue
# Avoid's circular import issues.
if TYPE_CHECKING:
from mlos_bench.tunables.tunable_groups import TunableGroups
from mlos_bench.services.base_service import Service
from mlos_bench.environments.base_environment import Environment
@runtime_checkable
class SupportsConfigLoading(Protocol):
"""
Protocol interface for helper functions to lookup and load configs.
"""
def resolve_path(self, file_path: str,
extra_paths: Optional[Iterable[str]] = None) -> str:
"""
Prepend the suitable `_config_path` to `path` if the latter is not absolute.
If `_config_path` is `None` or `path` is absolute, return `path` as is.
Parameters
----------
file_path : str
Path to the input config file.
extra_paths : Iterable[str]
Additional directories to prepend to the list of search paths.
Returns
-------
path : str
An actual path to the config or script.
"""
def load_config(self, json_file_name: str, schema_type: Optional[ConfigSchema]) -> Union[dict, List[dict]]:
"""
Load JSON config file. Search for a file relative to `_config_path`
if the input path is not absolute.
This method is exported to be used as a service.
Parameters
----------
json_file_name : str
Path to the input config file.
schema_type : Optional[ConfigSchema]
The schema type to validate the config against.
Returns
-------
config : Union[dict, List[dict]]
Free-format dictionary that contains the configuration.
"""
def build_environment(self, # pylint: disable=too-many-arguments
config: dict,
tunables: "TunableGroups",
global_config: Optional[dict] = None,
parent_args: Optional[Dict[str, TunableValue]] = None,
service: Optional["Service"] = None) -> "Environment":
"""
Factory method for a new environment with a given config.
Parameters
----------
config : dict
A dictionary with three mandatory fields:
"name": Human-readable string describing the environment;
"class": FQN of a Python class to instantiate;
"config": Free-format dictionary to pass to the constructor.
tunables : TunableGroups
A (possibly empty) collection of groups of tunable parameters for
all environments.
global_config : Optional[dict]
Global parameters to add to the environment config.
parent_args : Optional[Dict[str, TunableValue]]
An optional reference of the parent CompositeEnv's const_args used to
expand dynamic config parameters from.
service: Optional[Service]
An optional service object (e.g., providing methods to
deploy or reboot a VM, etc.).
Returns
-------
env : Environment
An instance of the `Environment` class initialized with `config`.
"""
def load_environment_list( # pylint: disable=too-many-arguments
self,
json_file_name: str,
tunables: "TunableGroups",
global_config: Optional[dict] = None,
parent_args: Optional[Dict[str, TunableValue]] = None,
service: Optional["Service"] = None) -> List["Environment"]:
"""
Load and build a list of environments from the config file.
Parameters
----------
json_file_name : str
The environment JSON configuration file.
Can contain either one environment or a list of environments.
tunables : TunableGroups
A (possibly empty) collection of tunables to add to the environment.
global_config : Optional[dict]
Global parameters to add to the environment config.
parent_args : Optional[Dict[str, TunableValue]]
An optional reference of the parent CompositeEnv's const_args used to
expand dynamic config parameters from.
service : Optional[Service]
An optional reference of the parent service to mix in.
Returns
-------
env : List[Environment]
A list of new benchmarking environments.
"""
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,806
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/tunables/tunable_group_subgroup_test.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Tests for `TunableGroup.subgroup()` method.
"""
from mlos_bench.tunables.tunable_groups import TunableGroups
def test_tunable_group_subgroup(tunable_groups: TunableGroups) -> None:
"""
Check that the subgroup() method returns only a selection of tunable parameters.
"""
tunables = tunable_groups.subgroup(["provision"])
assert tunables.get_param_values() == {'vmSize': 'Standard_B4ms'}
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,807
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/storage/sql/storage.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Saving and restoring the benchmark data in SQL database.
"""
import logging
from typing import Optional
from sqlalchemy import URL, create_engine
from mlos_bench.tunables.tunable_groups import TunableGroups
from mlos_bench.services.base_service import Service
from mlos_bench.storage.base_storage import Storage
from mlos_bench.storage.sql.schema import DbSchema
from mlos_bench.storage.sql.experiment import Experiment
_LOG = logging.getLogger(__name__)
class SqlStorage(Storage):
"""
An implementation of the Storage interface using SQLAlchemy backend.
"""
def __init__(self,
tunables: TunableGroups,
config: dict,
global_config: Optional[dict] = None,
service: Optional[Service] = None):
super().__init__(tunables, config, global_config, service)
lazy_schema_create = self._config.pop("lazy_schema_create", False)
self._log_sql = self._config.pop("log_sql", False)
self._url = URL.create(**self._config)
self._repr = f"{self._url.get_backend_name()}:{self._url.database}"
_LOG.info("Connect to the database: %s", self)
self._engine = create_engine(self._url, echo=self._log_sql)
self._db_schema: DbSchema
if not lazy_schema_create:
assert self._schema
else:
_LOG.info("Using lazy schema create for database: %s", self)
@property
def _schema(self) -> DbSchema:
"""Lazily create schema upon first access."""
if not hasattr(self, '_db_schema'):
self._db_schema = DbSchema(self._engine).create()
if _LOG.isEnabledFor(logging.DEBUG):
_LOG.debug("DDL statements:\n%s", self._schema)
return self._db_schema
def __repr__(self) -> str:
return self._repr
def experiment(self, *,
experiment_id: str,
trial_id: int,
root_env_config: str,
description: str,
opt_target: str) -> Storage.Experiment:
return Experiment(
engine=self._engine,
schema=self._schema,
tunables=self._tunables,
experiment_id=experiment_id,
trial_id=trial_id,
root_env_config=root_env_config,
description=description,
opt_target=opt_target,
)
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,808
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/environments/local/__init__.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Tests for mlos_bench.environments.local.
Used to make mypy happy about multiple conftest.py modules.
"""
from datetime import datetime
from typing import Any, Dict, List, Tuple
import pytest
from mlos_bench.environments.local.local_env import LocalEnv
from mlos_bench.services.config_persistence import ConfigPersistenceService
from mlos_bench.services.local.local_exec import LocalExecService
from mlos_bench.tunables.tunable_groups import TunableGroups
def create_local_env(tunable_groups: TunableGroups, config: Dict[str, Any]) -> LocalEnv:
"""
Create a LocalEnv with the given configuration.
Parameters
----------
tunable_groups : TunableGroups
Tunable parameters (usually come from a fixture).
config : Dict[str, Any]
Environment configuration.
Returns
-------
local_env : LocalEnv
A new instance of the local environment.
"""
return LocalEnv(name="TestLocalEnv", config=config, tunables=tunable_groups,
service=LocalExecService(parent=ConfigPersistenceService()))
def check_local_env_success(local_env: LocalEnv,
tunable_groups: TunableGroups,
expected_results: Dict[str, float],
expected_telemetry: List[Tuple[datetime, str, Any]]) -> None:
"""
Set up a local environment and run a test experiment there.
Parameters
----------
tunable_groups : TunableGroups
Tunable parameters (usually come from a fixture).
local_env : LocalEnv
A local environment to query for the results.
expected_results : Dict[str, float]
Expected results of the benchmark.
expected_telemetry : List[Tuple[datetime, str, Any]]
Expected telemetry data of the benchmark.
"""
with local_env as env_context:
assert env_context.setup(tunable_groups)
(status, data) = env_context.run()
assert status.is_succeeded()
assert data == pytest.approx(expected_results, nan_ok=True)
(status, telemetry) = env_context.status()
assert status.is_good()
assert telemetry == pytest.approx(expected_telemetry, nan_ok=True)
def check_local_env_fail_telemetry(local_env: LocalEnv, tunable_groups: TunableGroups) -> None:
"""
Set up a local environment and run a test experiment there;
Make sure the environment `.status()` call fails.
Parameters
----------
tunable_groups : TunableGroups
Tunable parameters (usually come from a fixture).
local_env : LocalEnv
A local environment to query for the results.
"""
with local_env as env_context:
assert env_context.setup(tunable_groups)
(status, _data) = env_context.run()
assert status.is_succeeded()
with pytest.raises(ValueError):
env_context.status()
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,809
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/environments/mock_env_test.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Unit tests for mock benchmark environment.
"""
import pytest
from mlos_bench.environments.mock_env import MockEnv
from mlos_bench.tunables.tunable_groups import TunableGroups
def test_mock_env_default(mock_env: MockEnv, tunable_groups: TunableGroups) -> None:
"""
Check the default values of the mock environment.
"""
with mock_env as env_context:
assert env_context.setup(tunable_groups)
(status, data) = env_context.run()
assert status.is_succeeded()
assert data is not None
assert data["score"] == pytest.approx(73.97, 0.01)
# Second time, results should differ because of the noise.
(status, data) = env_context.run()
assert status.is_succeeded()
assert data is not None
assert data["score"] == pytest.approx(72.92, 0.01)
def test_mock_env_no_noise(mock_env_no_noise: MockEnv, tunable_groups: TunableGroups) -> None:
"""
Check the default values of the mock environment.
"""
with mock_env_no_noise as env_context:
assert env_context.setup(tunable_groups)
for _ in range(10):
# Noise-free results should be the same every time.
(status, data) = env_context.run()
assert status.is_succeeded()
assert data is not None
assert data["score"] == pytest.approx(75.0, 0.01)
@pytest.mark.parametrize(('tunable_values', 'expected_score'), [
({
"vmSize": "Standard_B2ms",
"idle": "halt",
"kernel_sched_migration_cost_ns": 250000
}, 66.4),
({
"vmSize": "Standard_B4ms",
"idle": "halt",
"kernel_sched_migration_cost_ns": 40000
}, 74.06),
])
def test_mock_env_assign(mock_env: MockEnv, tunable_groups: TunableGroups,
tunable_values: dict, expected_score: float) -> None:
"""
Check the benchmark values of the mock environment after the assignment.
"""
with mock_env as env_context:
tunable_groups.assign(tunable_values)
assert env_context.setup(tunable_groups)
(status, data) = env_context.run()
assert status.is_succeeded()
assert data is not None
assert data["score"] == pytest.approx(expected_score, 0.01)
@pytest.mark.parametrize(('tunable_values', 'expected_score'), [
({
"vmSize": "Standard_B2ms",
"idle": "halt",
"kernel_sched_migration_cost_ns": 250000
}, 67.5),
({
"vmSize": "Standard_B4ms",
"idle": "halt",
"kernel_sched_migration_cost_ns": 40000
}, 75.1),
])
def test_mock_env_no_noise_assign(mock_env_no_noise: MockEnv,
tunable_groups: TunableGroups,
tunable_values: dict, expected_score: float) -> None:
"""
Check the benchmark values of the noiseless mock environment after the assignment.
"""
with mock_env_no_noise as env_context:
tunable_groups.assign(tunable_values)
assert env_context.setup(tunable_groups)
for _ in range(10):
# Noise-free environment should produce the same results every time.
(status, data) = env_context.run()
assert status.is_succeeded()
assert data is not None
assert data["score"] == pytest.approx(expected_score, 0.01)
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,810
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/services/types/authenticator_type.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Protocol interface for authentication for the cloud services.
"""
from typing import Protocol, runtime_checkable
@runtime_checkable
class SupportsAuth(Protocol):
"""
Protocol interface for authentication for the cloud services.
"""
# pylint: disable=too-few-public-methods
def get_access_token(self) -> str:
"""
Get the access token for cloud services.
Returns
-------
access_token : str
Access token.
"""
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,811
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_core/mlos_core/tests/optimizers/bayesian_optimizers_test.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Tests for Bayesian Optimizers.
"""
from typing import Optional, Type
import pytest
import pandas as pd
import ConfigSpace as CS
from mlos_core.optimizers import BaseOptimizer, OptimizerType
from mlos_core.optimizers.bayesian_optimizers import BaseBayesianOptimizer
@pytest.mark.parametrize(('optimizer_class', 'kwargs'), [
*[(member.value, {}) for member in OptimizerType],
])
def test_context_not_implemented_error(configuration_space: CS.ConfigurationSpace,
optimizer_class: Type[BaseOptimizer], kwargs: Optional[dict]) -> None:
"""
Make sure we raise exceptions for the functionality that has not been implemented yet.
"""
if kwargs is None:
kwargs = {}
optimizer = optimizer_class(parameter_space=configuration_space, **kwargs)
suggestion = optimizer.suggest()
scores = pd.DataFrame({'score': [1]})
# test context not implemented errors
with pytest.raises(NotImplementedError):
optimizer.register(suggestion, scores['score'], context=pd.DataFrame([["something"]]))
with pytest.raises(NotImplementedError):
optimizer.suggest(context=pd.DataFrame([["something"]]))
if isinstance(optimizer, BaseBayesianOptimizer):
with pytest.raises(NotImplementedError):
optimizer.surrogate_predict(suggestion, context=pd.DataFrame([["something"]]))
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,812
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/storage/conftest.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Test fixtures for mlos_bench storage.
"""
import pytest
from mlos_bench.tunables.tunable_groups import TunableGroups
from mlos_bench.storage.base_storage import Storage
from mlos_bench.storage.sql.storage import SqlStorage
@pytest.fixture
def exp_storage_memory_sql(tunable_groups: TunableGroups) -> Storage.Experiment:
"""
Test fixture for in-memory SQLite3 storage.
"""
storage = SqlStorage(
tunables=tunable_groups,
service=None,
config={
"drivername": "sqlite",
"database": ":memory:",
}
)
# pylint: disable=unnecessary-dunder-call
return storage.experiment(
experiment_id="Test-001",
trial_id=1,
root_env_config="environment.jsonc",
description="pytest experiment",
opt_target="score",
).__enter__()
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,813
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/services/types/remote_exec_type.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Protocol interface for Service types that provide helper functions to run
scripts on a remote host OS.
"""
from typing import Iterable, Tuple, Protocol, runtime_checkable, TYPE_CHECKING
if TYPE_CHECKING:
from mlos_bench.environments.status import Status
@runtime_checkable
class SupportsRemoteExec(Protocol):
"""
Protocol interface for Service types that provide helper functions to run
scripts on a remote host OS.
"""
def remote_exec(self, script: Iterable[str], config: dict,
env_params: dict) -> Tuple["Status", dict]:
"""
Run a command on remote host OS.
Parameters
----------
script : Iterable[str]
A list of lines to execute as a script on a remote VM.
config : dict
Flat dictionary of (key, value) pairs of parameters.
They usually come from `const_args` and `tunable_params`
properties of the Environment.
env_params : dict
Parameters to pass as *shell* environment variables into the script.
This is usually a subset of `config` with some possible conversions.
Returns
-------
result : (Status, dict)
A pair of Status and result.
Status is one of {PENDING, SUCCEEDED, FAILED}
"""
def get_remote_exec_results(self, config: dict) -> Tuple["Status", dict]:
"""
Get the results of the asynchronously running command.
Parameters
----------
config : dict
Flat dictionary of (key, value) pairs of tunable parameters.
Must have the "asyncResultsUrl" key to get the results.
If the key is not present, return Status.PENDING.
Returns
-------
result : (Status, dict)
A pair of Status and result.
Status is one of {PENDING, SUCCEEDED, FAILED, TIMED_OUT}
"""
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,814
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_core/mlos_core/tests/__init__.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Common functions for mlos_core Optimizer tests.
"""
import sys
from importlib import import_module
from pkgutil import walk_packages
from typing import List, Optional, Set, Type, TypeVar
if sys.version_info >= (3, 10):
from typing import TypeAlias
else:
from typing_extensions import TypeAlias
T = TypeVar('T')
def get_all_submodules(pkg: TypeAlias) -> List[str]:
"""
Imports all submodules for a package and returns their names.
Useful for dynamically enumerating subclasses.
"""
submodules = []
for _, submodule_name, _ in walk_packages(pkg.__path__, prefix=f"{pkg.__name__}.", onerror=lambda x: None):
submodules.append(submodule_name)
return submodules
def _get_all_subclasses(cls: Type[T]) -> Set[Type[T]]:
"""
Gets the set of all of the subclasses of the given class.
Useful for dynamically enumerating expected test cases.
"""
return set(cls.__subclasses__()).union(
s for c in cls.__subclasses__() for s in _get_all_subclasses(c))
def get_all_concrete_subclasses(cls: Type[T], pkg_name: Optional[str] = None) -> List[Type[T]]:
"""
Gets a sorted list of all of the concrete subclasses of the given class.
Useful for dynamically enumerating expected test cases.
Note: For abstract types, mypy will complain at the call site.
Use "# type: ignore[type-abstract]" to suppress the warning.
See Also: https://github.com/python/mypy/issues/4717
"""
if pkg_name is not None:
pkg = import_module(pkg_name)
submodules = get_all_submodules(pkg)
assert submodules
return sorted([subclass for subclass in _get_all_subclasses(cls) if not getattr(subclass, "__abstractmethods__", None)],
key=lambda c: (c.__module__, c.__name__))
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,815
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/environments/include_tunables_test.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Test the selection of tunables / tunable groups for the environment.
"""
from mlos_bench.environments.mock_env import MockEnv
from mlos_bench.services.config_persistence import ConfigPersistenceService
from mlos_bench.tunables.tunable_groups import TunableGroups
def test_one_group(tunable_groups: TunableGroups) -> None:
"""
Make sure only one tunable group is available to the environment.
"""
env = MockEnv(
name="Test Env",
config={"tunable_params": ["provision"]},
tunables=tunable_groups
)
assert env.tunable_params.get_param_values() == {
"vmSize": "Standard_B4ms",
}
def test_two_groups(tunable_groups: TunableGroups) -> None:
"""
Make sure only the selected tunable groups are available to the environment.
"""
env = MockEnv(
name="Test Env",
config={"tunable_params": ["provision", "kernel"]},
tunables=tunable_groups
)
assert env.tunable_params.get_param_values() == {
"vmSize": "Standard_B4ms",
"kernel_sched_migration_cost_ns": -1,
"kernel_sched_latency_ns": 2000000,
}
def test_two_groups_setup(tunable_groups: TunableGroups) -> None:
"""
Make sure only the selected tunable groups are available to the environment,
the set is not changed after calling the `.setup()` method.
"""
env = MockEnv(
name="Test Env",
config={
"tunable_params": ["provision", "kernel"],
"const_args": {
"const_param1": 10,
"const_param2": "foo",
},
},
tunables=tunable_groups
)
expected_params = {
"vmSize": "Standard_B4ms",
"kernel_sched_migration_cost_ns": -1,
"kernel_sched_latency_ns": 2000000,
}
assert env.tunable_params.get_param_values() == expected_params
with env as env_context:
assert env_context.setup(tunable_groups)
# Make sure the set of tunables does not change after the setup:
assert env.tunable_params.get_param_values() == expected_params
assert env.parameters == {
**expected_params,
"const_param1": 10,
"const_param2": "foo",
}
def test_zero_groups_implicit(tunable_groups: TunableGroups) -> None:
"""
Make sure that no tunable groups are available to the environment by default.
"""
env = MockEnv(
name="Test Env",
config={},
tunables=tunable_groups
)
assert env.tunable_params.get_param_values() == {}
def test_zero_groups_explicit(tunable_groups: TunableGroups) -> None:
"""
Make sure that no tunable groups are available to the environment
when explicitly specifying an empty list of tunable_params.
"""
env = MockEnv(
name="Test Env",
config={"tunable_params": []},
tunables=tunable_groups
)
assert env.tunable_params.get_param_values() == {}
def test_zero_groups_implicit_setup(tunable_groups: TunableGroups) -> None:
"""
Make sure that no tunable groups are available to the environment by default
and it does not change after the setup.
"""
env = MockEnv(
name="Test Env",
config={
"const_args": {
"const_param1": 10,
"const_param2": "foo",
},
},
tunables=tunable_groups
)
assert env.tunable_params.get_param_values() == {}
with env as env_context:
assert env_context.setup(tunable_groups)
# Make sure the set of tunables does not change after the setup:
assert env.tunable_params.get_param_values() == {}
assert env.parameters == {
"const_param1": 10,
"const_param2": "foo",
}
def test_loader_level_include() -> None:
"""
Make sure only the selected tunable groups are available to the environment,
the set is not changed after calling the `.setup()` method.
"""
env_json = {
"class": "mlos_bench.environments.mock_env.MockEnv",
"name": "Test Env",
"include_tunables": [
"environments/os/linux/boot/linux-boot-tunables.jsonc"
],
"config": {
"tunable_params": ["linux-kernel-boot"],
"const_args": {
"const_param1": 10,
"const_param2": "foo",
},
},
}
loader = ConfigPersistenceService({
"config_path": [
"mlos_bench/config",
"mlos_bench/examples",
]
})
env = loader.build_environment(config=env_json, tunables=TunableGroups())
expected_params = {
"align_va_addr": "on",
"idle": "halt",
"ima.ahash_bufsize": 4096,
"noautogroup": "",
"nohugevmalloc": "",
"nohalt": "",
"nohz": "",
"no-kvmapf": "",
"nopvspin": "",
}
assert env.tunable_params.get_param_values() == expected_params
expected_params["align_va_addr"] = "off"
tunables = env.tunable_params.copy().assign({"align_va_addr": "off"})
with env as env_context:
assert env_context.setup(tunables)
# Make sure the set of tunables does not change after the setup:
assert env.parameters == {
**expected_params,
"const_param1": 10,
"const_param2": "foo",
}
assert env.tunable_params.get_param_values() == expected_params
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,816
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_core/mlos_core/__init__.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Basic initializer module for the mlos_core package.
"""
import ConfigSpace
import pandas as pd
def config_to_dataframe(config: ConfigSpace.Configuration) -> pd.DataFrame:
"""Converts a ConfigSpace config to a DataFrame
Parameters
----------
config : ConfigSpace.Configuration
The config to convert.
Returns
-------
pd.DataFrame
A DataFrame with a single row, containing the config's parameters.
"""
return pd.DataFrame([dict(config)])
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,817
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/config/schemas/__init__.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
A module for managing config schemas and their validation.
"""
from mlos_bench.config.schemas.config_schemas import ConfigSchema, CONFIG_SCHEMA_DIR
__all__ = [
'ConfigSchema',
'CONFIG_SCHEMA_DIR',
]
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,818
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_core/mlos_core/tests/spaces/adapters/space_adapter_factory_test.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Tests for space adapter factory.
"""
# pylint: disable=missing-function-docstring
from typing import List, Optional, Type
import pytest
import ConfigSpace as CS
from mlos_core.spaces.adapters import SpaceAdapterFactory, SpaceAdapterType, ConcreteSpaceAdapter
from mlos_core.spaces.adapters.adapter import BaseSpaceAdapter
from mlos_core.spaces.adapters.identity_adapter import IdentityAdapter
from mlos_core.tests import get_all_concrete_subclasses
@pytest.mark.parametrize(('space_adapter_type'), [
# Enumerate all supported SpaceAdapters
# *[member for member in SpaceAdapterType],
*list(SpaceAdapterType),
])
def test_concrete_optimizer_type(space_adapter_type: SpaceAdapterType) -> None:
"""
Test that all optimizer types are listed in the ConcreteOptimizer constraints.
"""
# pylint: disable=no-member
assert space_adapter_type.value in ConcreteSpaceAdapter.__constraints__ # type: ignore[attr-defined]
@pytest.mark.parametrize(('space_adapter_type', 'kwargs'), [
# Default space adapter
(None, {}),
# Enumerate all supported Optimizers
*[(member, {}) for member in SpaceAdapterType],
])
def test_create_space_adapter_with_factory_method(space_adapter_type: Optional[SpaceAdapterType], kwargs: Optional[dict]) -> None:
# Start defining a ConfigurationSpace for the Optimizer to search.
input_space = CS.ConfigurationSpace(seed=1234)
# Add a single continuous input dimension between 0 and 1.
input_space.add_hyperparameter(CS.UniformFloatHyperparameter(name='x', lower=0, upper=1))
# Add a single continuous input dimension between 0 and 1.
input_space.add_hyperparameter(CS.UniformFloatHyperparameter(name='y', lower=0, upper=1))
# Adjust some kwargs for specific space adapters
if space_adapter_type is SpaceAdapterType.LLAMATUNE:
if kwargs is None:
kwargs = {}
kwargs.setdefault('num_low_dims', 1)
space_adapter: BaseSpaceAdapter
if space_adapter_type is None:
space_adapter = SpaceAdapterFactory.create(parameter_space=input_space)
else:
space_adapter = SpaceAdapterFactory.create(
parameter_space=input_space,
space_adapter_type=space_adapter_type,
space_adapter_kwargs=kwargs,
)
if space_adapter_type is None or space_adapter_type is SpaceAdapterType.IDENTITY:
assert isinstance(space_adapter, IdentityAdapter)
else:
assert space_adapter is not None
assert space_adapter.orig_parameter_space is not None
myrepr = repr(space_adapter)
assert myrepr.startswith(space_adapter_type.value.__name__), \
f"Expected {space_adapter_type.value.__name__} but got {myrepr}"
# Dynamically determine all of the optimizers we have implemented.
# Note: these must be sorted.
space_adapter_subclasses: List[Type[BaseSpaceAdapter]] = \
get_all_concrete_subclasses(BaseSpaceAdapter, pkg_name='mlos_core') # type: ignore[type-abstract]
assert space_adapter_subclasses
@pytest.mark.parametrize(('space_adapter_class'), space_adapter_subclasses)
def test_space_adapter_type_defs(space_adapter_class: Type[BaseSpaceAdapter]) -> None:
"""
Test that all space adapter classes are listed in the SpaceAdapterType enum.
"""
space_adapter_type_classes = {space_adapter_type.value for space_adapter_type in SpaceAdapterType}
assert space_adapter_class in space_adapter_type_classes
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,819
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/environments/local/local_env_telemetry_test.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Unit tests for telemetry and status of LocalEnv benchmark environment.
"""
from datetime import datetime, timedelta
from mlos_bench.tunables.tunable_groups import TunableGroups
from mlos_bench.tests.environments.local import (
create_local_env, check_local_env_success, check_local_env_fail_telemetry
)
def test_local_env_telemetry(tunable_groups: TunableGroups) -> None:
"""
Produce benchmark and telemetry data in a local script and read it.
"""
ts1 = datetime.utcnow()
ts1 -= timedelta(microseconds=ts1.microsecond) # Round to a second
ts2 = ts1 + timedelta(minutes=1)
time_str1 = ts1.strftime("%Y-%m-%d %H:%M:%S")
time_str2 = ts2.strftime("%Y-%m-%d %H:%M:%S")
local_env = create_local_env(tunable_groups, {
"run": [
"echo 'metric,value' > output.csv",
"echo 'latency,4.1' >> output.csv",
"echo 'throughput,512' >> output.csv",
"echo 'score,0.95' >> output.csv",
"echo '-------------------'", # This output does not go anywhere
"echo 'timestamp,metric,value' > telemetry.csv",
f"echo {time_str1},cpu_load,0.65 >> telemetry.csv",
f"echo {time_str1},mem_usage,10240 >> telemetry.csv",
f"echo {time_str2},cpu_load,0.8 >> telemetry.csv",
f"echo {time_str2},mem_usage,20480 >> telemetry.csv",
],
"read_results_file": "output.csv",
"read_telemetry_file": "telemetry.csv",
})
check_local_env_success(
local_env, tunable_groups,
expected_results={
"latency": 4.1,
"throughput": 512.0,
"score": 0.95,
},
expected_telemetry=[
(ts1, "cpu_load", 0.65),
(ts1, "mem_usage", 10240.0),
(ts2, "cpu_load", 0.8),
(ts2, "mem_usage", 20480.0),
],
)
def test_local_env_telemetry_no_header(tunable_groups: TunableGroups) -> None:
"""
Read the telemetry data with no header.
"""
ts1 = datetime.utcnow()
ts1 -= timedelta(microseconds=ts1.microsecond) # Round to a second
ts2 = ts1 + timedelta(minutes=1)
time_str1 = ts1.strftime("%Y-%m-%d %H:%M:%S")
time_str2 = ts2.strftime("%Y-%m-%d %H:%M:%S")
local_env = create_local_env(tunable_groups, {
"run": [
f"echo {time_str1},cpu_load,0.65 > telemetry.csv",
f"echo {time_str1},mem_usage,10240 >> telemetry.csv",
f"echo {time_str2},cpu_load,0.8 >> telemetry.csv",
f"echo {time_str2},mem_usage,20480 >> telemetry.csv",
],
"read_telemetry_file": "telemetry.csv",
})
check_local_env_success(
local_env, tunable_groups,
expected_results={},
expected_telemetry=[
(ts1, "cpu_load", 0.65),
(ts1, "mem_usage", 10240.0),
(ts2, "cpu_load", 0.8),
(ts2, "mem_usage", 20480.0),
],
)
def test_local_env_telemetry_wrong_header(tunable_groups: TunableGroups) -> None:
"""
Read the telemetry data with incorrect header.
"""
ts1 = datetime.utcnow()
ts1 -= timedelta(microseconds=ts1.microsecond) # Round to a second
ts2 = ts1 + timedelta(minutes=1)
time_str1 = ts1.strftime("%Y-%m-%d %H:%M:%S")
time_str2 = ts2.strftime("%Y-%m-%d %H:%M:%S")
local_env = create_local_env(tunable_groups, {
"run": [
# Error: the data is correct, but the header has unexpected column names
"echo 'ts,metric_name,metric_value' > telemetry.csv",
f"echo {time_str1},cpu_load,0.65 >> telemetry.csv",
f"echo {time_str1},mem_usage,10240 >> telemetry.csv",
f"echo {time_str2},cpu_load,0.8 >> telemetry.csv",
f"echo {time_str2},mem_usage,20480 >> telemetry.csv",
],
"read_telemetry_file": "telemetry.csv",
})
check_local_env_fail_telemetry(local_env, tunable_groups)
def test_local_env_telemetry_invalid(tunable_groups: TunableGroups) -> None:
"""
Fail when the telemetry data has wrong format.
"""
ts1 = datetime.utcnow()
ts1 -= timedelta(microseconds=ts1.microsecond) # Round to a second
ts2 = ts1 + timedelta(minutes=1)
time_str1 = ts1.strftime("%Y-%m-%d %H:%M:%S")
time_str2 = ts2.strftime("%Y-%m-%d %H:%M:%S")
local_env = create_local_env(tunable_groups, {
"run": [
# Error: too many columns
f"echo {time_str1},EXTRA,cpu_load,0.65 > telemetry.csv",
f"echo {time_str1},EXTRA,mem_usage,10240 >> telemetry.csv",
f"echo {time_str2},EXTRA,cpu_load,0.8 >> telemetry.csv",
f"echo {time_str2},EXTRA,mem_usage,20480 >> telemetry.csv",
],
"read_telemetry_file": "telemetry.csv",
})
check_local_env_fail_telemetry(local_env, tunable_groups)
def test_local_env_telemetry_invalid_ts(tunable_groups: TunableGroups) -> None:
"""
Fail when the telemetry data has wrong format.
"""
local_env = create_local_env(tunable_groups, {
"run": [
# Error: field 1 must be a timestamp
"echo 1,cpu_load,0.65 > telemetry.csv",
"echo 2,mem_usage,10240 >> telemetry.csv",
"echo 3,cpu_load,0.8 >> telemetry.csv",
"echo 4,mem_usage,20480 >> telemetry.csv",
],
"read_telemetry_file": "telemetry.csv",
})
check_local_env_fail_telemetry(local_env, tunable_groups)
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,820
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/environments/local/local_env.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Scheduler-side benchmark environment to run scripts locally.
"""
import json
import logging
import sys
from datetime import datetime
from tempfile import TemporaryDirectory
from contextlib import nullcontext
from types import TracebackType
from typing import Any, Dict, Iterable, List, Mapping, Optional, Tuple, Type, Union
from typing_extensions import Literal
import pandas
from mlos_bench.environments.status import Status
from mlos_bench.environments.base_environment import Environment
from mlos_bench.environments.script_env import ScriptEnv
from mlos_bench.services.base_service import Service
from mlos_bench.services.types.local_exec_type import SupportsLocalExec
from mlos_bench.tunables.tunable_groups import TunableGroups
from mlos_bench.util import path_join
_LOG = logging.getLogger(__name__)
class LocalEnv(ScriptEnv):
# pylint: disable=too-many-instance-attributes
"""
Scheduler-side Environment that runs scripts locally.
"""
def __init__(self,
*,
name: str,
config: dict,
global_config: Optional[dict] = None,
tunables: Optional[TunableGroups] = None,
service: Optional[Service] = None):
"""
Create a new environment for local execution.
Parameters
----------
name: str
Human-readable name of the environment.
config : dict
Free-format dictionary that contains the benchmark environment
configuration. Each config must have at least the "tunable_params"
and the "const_args" sections.
`LocalEnv` must also have at least some of the following parameters:
{setup, run, teardown, dump_params_file, read_results_file}
global_config : dict
Free-format dictionary of global parameters (e.g., security credentials)
to be mixed in into the "const_args" section of the local config.
tunables : TunableGroups
A collection of tunable parameters for *all* environments.
service: Service
An optional service object (e.g., providing methods to
deploy or reboot a VM, etc.).
"""
super().__init__(name=name, config=config, global_config=global_config,
tunables=tunables, service=service)
assert self._service is not None and isinstance(self._service, SupportsLocalExec), \
"LocalEnv requires a service that supports local execution"
self._local_exec_service: SupportsLocalExec = self._service
self._temp_dir: Optional[str] = None
self._temp_dir_context: Union[TemporaryDirectory, nullcontext, None] = None
self._dump_params_file: Optional[str] = self.config.get("dump_params_file")
self._dump_meta_file: Optional[str] = self.config.get("dump_meta_file")
self._read_results_file: Optional[str] = self.config.get("read_results_file")
self._read_telemetry_file: Optional[str] = self.config.get("read_telemetry_file")
def __enter__(self) -> Environment:
assert self._temp_dir is None and self._temp_dir_context is None
self._temp_dir_context = self._local_exec_service.temp_dir_context(self.config.get("temp_dir"))
self._temp_dir = self._temp_dir_context.__enter__()
return super().__enter__()
def __exit__(self, ex_type: Optional[Type[BaseException]],
ex_val: Optional[BaseException],
ex_tb: Optional[TracebackType]) -> Literal[False]:
"""
Exit the context of the benchmarking environment.
"""
assert not (self._temp_dir is None or self._temp_dir_context is None)
self._temp_dir_context.__exit__(ex_type, ex_val, ex_tb)
self._temp_dir = None
self._temp_dir_context = None
return super().__exit__(ex_type, ex_val, ex_tb)
def setup(self, tunables: TunableGroups, global_config: Optional[dict] = None) -> bool:
"""
Check if the environment is ready and set up the application
and benchmarks, if necessary.
Parameters
----------
tunables : TunableGroups
A collection of tunable OS and application parameters along with their
values. In a local environment these could be used to prepare a config
file on the scheduler prior to transferring it to the remote environment,
for instance.
global_config : dict
Free-format dictionary of global parameters of the environment
that are not used in the optimization process.
Returns
-------
is_success : bool
True if operation is successful, false otherwise.
"""
if not super().setup(tunables, global_config):
return False
_LOG.info("Set up the environment locally: '%s' at %s", self, self._temp_dir)
assert self._temp_dir is not None
if self._dump_params_file:
fname = path_join(self._temp_dir, self._dump_params_file)
_LOG.debug("Dump tunables to file: %s", fname)
with open(fname, "wt", encoding="utf-8") as fh_tunables:
# json.dump(self._params, fh_tunables) # Tunables *and* const_args
json.dump(self._tunable_params.get_param_values(), fh_tunables)
if self._dump_meta_file:
fname = path_join(self._temp_dir, self._dump_meta_file)
_LOG.debug("Dump tunables metadata to file: %s", fname)
with open(fname, "wt", encoding="utf-8") as fh_meta:
json.dump({
tunable.name: tunable.meta
for (tunable, _group) in self._tunable_params if tunable.meta
}, fh_meta)
if self._script_setup:
return_code = self._local_exec(self._script_setup, self._temp_dir)
self._is_ready = bool(return_code == 0)
else:
self._is_ready = True
return self._is_ready
def run(self) -> Tuple[Status, Optional[Dict[str, float]]]:
"""
Run a script in the local scheduler environment.
Returns
-------
(status, output) : (Status, dict)
A pair of (Status, output) values, where `output` is a dict
with the results or None if the status is not COMPLETED.
If run script is a benchmark, then the score is usually expected to
be in the `score` field.
"""
(status, _) = result = super().run()
if not status.is_ready():
return result
assert self._temp_dir is not None
if self._script_run:
return_code = self._local_exec(self._script_run, self._temp_dir)
if return_code != 0:
return (Status.FAILED, None)
# FIXME: We should not be assuming that the only output file type is a CSV.
if not self._read_results_file:
_LOG.debug("Not reading the data at: %s", self)
return (Status.SUCCEEDED, {})
data = self._normalize_columns(pandas.read_csv(
self._config_loader_service.resolve_path(
self._read_results_file, extra_paths=[self._temp_dir]),
index_col=False,
))
_LOG.debug("Read data:\n%s", data)
if list(data.columns) == ["metric", "value"]:
_LOG.info("Local results have (metric,value) header and %d rows: assume long format", len(data))
data = pandas.DataFrame([data.value.to_list()], columns=data.metric.to_list())
elif len(data) == 1:
_LOG.info("Local results have 1 row: assume wide format")
else:
raise ValueError(f"Invalid data format: {data}")
data_dict = data.iloc[-1].to_dict()
_LOG.info("Local run complete: %s ::\n%s", self, data_dict)
return (Status.SUCCEEDED, data_dict)
@staticmethod
def _normalize_columns(data: pandas.DataFrame) -> pandas.DataFrame:
"""
Strip trailing spaces from column names (Windows only).
"""
# Windows cmd interpretation of > redirect symbols can leave trailing spaces in
# the final column, which leads to misnamed columns.
# For now, we simply strip trailing spaces from column names to account for that.
if sys.platform == 'win32':
data.rename(str.rstrip, axis='columns', inplace=True)
return data
def status(self) -> Tuple[Status, List[Tuple[datetime, str, Any]]]:
(status, _) = super().status()
if not (self._is_ready and self._read_telemetry_file):
return (status, [])
assert self._temp_dir is not None
try:
fname = self._config_loader_service.resolve_path(
self._read_telemetry_file, extra_paths=[self._temp_dir])
# FIXME: We should not be assuming that the only output file type is a CSV.
data = self._normalize_columns(
pandas.read_csv(fname, index_col=False, parse_dates=[0]))
expected_col_names = ["timestamp", "metric", "value"]
if len(data.columns) != len(expected_col_names):
raise ValueError(f'Telemetry data must have columns {expected_col_names}')
if list(data.columns) != expected_col_names:
# Assume no header - this is ok for telemetry data.
data = pandas.read_csv(
fname, index_col=False, parse_dates=[0], names=expected_col_names)
except FileNotFoundError as ex:
_LOG.warning("Telemetry CSV file not found: %s :: %s", self._read_telemetry_file, ex)
return (status, [])
_LOG.debug("Read telemetry data:\n%s", data)
col_dtypes: Mapping[int, Type] = {0: datetime}
return (status, [
(pandas.Timestamp(ts).to_pydatetime(), metric, value)
for (ts, metric, value) in data.to_records(index=False, column_dtypes=col_dtypes)
])
def teardown(self) -> None:
"""
Clean up the local environment.
"""
if self._script_teardown:
_LOG.info("Local teardown: %s", self)
return_code = self._local_exec(self._script_teardown)
_LOG.info("Local teardown complete: %s :: %s", self, return_code)
super().teardown()
def _local_exec(self, script: Iterable[str], cwd: Optional[str] = None) -> int:
"""
Execute a script locally in the scheduler environment.
Parameters
----------
script : Iterable[str]
Lines of the script to run locally.
Treat every line as a separate command to run.
cwd : Optional[str]
Work directory to run the script at.
Returns
-------
return_code : int
Return code of the script. 0 if successful.
"""
env_params = self._get_env_params()
_LOG.info("Run script locally on: %s at %s with env %s", self, cwd, env_params)
(return_code, _stdout, stderr) = self._local_exec_service.local_exec(
script, env=env_params, cwd=cwd)
if return_code != 0:
_LOG.warning("ERROR: Local script returns code %d stderr:\n%s", return_code, stderr)
return return_code
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,821
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/services/remote/azure/__init__.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Azure-specific benchmark environments for mlos_bench.
"""
from mlos_bench.services.remote.azure.azure_auth import AzureAuthService
from mlos_bench.services.remote.azure.azure_services import AzureVMService
from mlos_bench.services.remote.azure.azure_fileshare import AzureFileShareService
__all__ = [
'AzureAuthService',
'AzureVMService',
'AzureFileShareService',
]
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,822
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_core/mlos_core/optimizers/__init__.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Basic initializer module for the mlos_core optimizers.
"""
from enum import Enum
from typing import Optional, TypeVar
import ConfigSpace
from mlos_core.optimizers.optimizer import BaseOptimizer
from mlos_core.optimizers.random_optimizer import RandomOptimizer
from mlos_core.optimizers.bayesian_optimizers.smac_optimizer import SmacOptimizer
from mlos_core.optimizers.flaml_optimizer import FlamlOptimizer
from mlos_core.spaces.adapters import SpaceAdapterType, SpaceAdapterFactory
__all__ = [
'SpaceAdapterType',
'OptimizerFactory',
'BaseOptimizer',
'RandomOptimizer',
'FlamlOptimizer',
'SmacOptimizer',
]
class OptimizerType(Enum):
"""Enumerate supported MlosCore optimizers."""
RANDOM = RandomOptimizer
"""An instance of RandomOptimizer class will be used"""
FLAML = FlamlOptimizer
"""An instance of FlamlOptimizer class will be used"""
SMAC = SmacOptimizer
"""An instance of SmacOptimizer class will be used"""
# To make mypy happy, we need to define a type variable for each optimizer type.
# https://github.com/python/mypy/issues/12952
# ConcreteOptimizer = TypeVar('ConcreteOptimizer', *[member.value for member in OptimizerType])
# To address this, we add a test for complete coverage of the enum.
ConcreteOptimizer = TypeVar(
'ConcreteOptimizer',
RandomOptimizer,
FlamlOptimizer,
SmacOptimizer,
)
DEFAULT_OPTIMIZER_TYPE = OptimizerType.FLAML
class OptimizerFactory:
"""Simple factory class for creating BaseOptimizer-derived objects"""
# pylint: disable=too-few-public-methods
@staticmethod
def create(*,
parameter_space: ConfigSpace.ConfigurationSpace,
optimizer_type: OptimizerType = DEFAULT_OPTIMIZER_TYPE,
optimizer_kwargs: Optional[dict] = None,
space_adapter_type: SpaceAdapterType = SpaceAdapterType.IDENTITY,
space_adapter_kwargs: Optional[dict] = None) -> ConcreteOptimizer:
"""
Create a new optimizer instance, given the parameter space, optimizer type,
and potential optimizer options.
Parameters
----------
parameter_space : ConfigSpace.ConfigurationSpace
Input configuration space.
optimizer_type : OptimizerType
Optimizer class as defined by Enum.
optimizer_kwargs : Optional[dict]
Optional arguments passed in Optimizer class constructor.
space_adapter_type : Optional[SpaceAdapterType]
Space adapter class to be used alongside the optimizer.
space_adapter_kwargs : Optional[dict]
Optional arguments passed in SpaceAdapter class constructor.
Returns
-------
optimizer : ConcreteOptimizer
Instance of concrete optimizer class
(e.g., RandomOptimizer, FlamlOptimizer, SmacOptimizer, etc.).
"""
if space_adapter_kwargs is None:
space_adapter_kwargs = {}
if optimizer_kwargs is None:
optimizer_kwargs = {}
space_adapter = SpaceAdapterFactory.create(
parameter_space=parameter_space,
space_adapter_type=space_adapter_type,
space_adapter_kwargs=space_adapter_kwargs,
)
optimizer: ConcreteOptimizer = optimizer_type.value(
parameter_space=parameter_space,
space_adapter=space_adapter,
**optimizer_kwargs
)
return optimizer
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,823
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
A collection Service functions for mocking file share ops.
"""
import logging
from typing import Any, Dict, Optional
from mlos_bench.services.base_service import Service
from mlos_bench.services.base_fileshare import FileShareService
from mlos_bench.services.types.fileshare_type import SupportsFileShareOps
_LOG = logging.getLogger(__name__)
class MockFileShareService(FileShareService, SupportsFileShareOps):
"""
A collection Service functions for mocking file share ops.
"""
def __init__(self, config: Optional[Dict[str, Any]] = None,
global_config: Optional[Dict[str, Any]] = None,
parent: Optional[Service] = None):
super().__init__(config, global_config, parent)
self.register([
self.download,
self.upload,
])
def download(self, remote_path: str, local_path: str, recursive: bool = True) -> None:
pass
def upload(self, local_path: str, remote_path: str, recursive: bool = True) -> None:
pass
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,824
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/environments/base_environment.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
A hierarchy of benchmark environments.
"""
import abc
import json
import logging
from datetime import datetime
from types import TracebackType
from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Type, TYPE_CHECKING, Union
from typing_extensions import Literal
from mlos_bench.environments.status import Status
from mlos_bench.services.base_service import Service
from mlos_bench.tunables.tunable import TunableValue
from mlos_bench.tunables.tunable_groups import TunableGroups
from mlos_bench.util import instantiate_from_config, merge_parameters
if TYPE_CHECKING:
from mlos_bench.services.types.config_loader_type import SupportsConfigLoading
_LOG = logging.getLogger(__name__)
class Environment(metaclass=abc.ABCMeta):
# pylint: disable=too-many-instance-attributes
"""
An abstract base of all benchmark environments.
"""
@classmethod
def new(cls,
*,
env_name: str,
class_name: str,
config: dict,
global_config: Optional[dict] = None,
tunables: Optional[TunableGroups] = None,
service: Optional[Service] = None,
) -> "Environment":
"""
Factory method for a new environment with a given config.
Parameters
----------
env_name: str
Human-readable name of the environment.
class_name: str
FQN of a Python class to instantiate, e.g.,
"mlos_bench.environments.remote.VMEnv".
Must be derived from the `Environment` class.
config : dict
Free-format dictionary that contains the benchmark environment
configuration. It will be passed as a constructor parameter of
the class specified by `name`.
global_config : dict
Free-format dictionary of global parameters (e.g., security credentials)
to be mixed in into the "const_args" section of the local config.
tunables : TunableGroups
A collection of groups of tunable parameters for all environments.
service: Service
An optional service object (e.g., providing methods to
deploy or reboot a VM, etc.).
Returns
-------
env : Environment
An instance of the `Environment` class initialized with `config`.
"""
assert issubclass(cls, Environment)
return instantiate_from_config(
cls,
class_name,
name=env_name,
config=config,
global_config=global_config,
tunables=tunables,
service=service
)
def __init__(self,
*,
name: str,
config: dict,
global_config: Optional[dict] = None,
tunables: Optional[TunableGroups] = None,
service: Optional[Service] = None):
"""
Create a new environment with a given config.
Parameters
----------
name: str
Human-readable name of the environment.
config : dict
Free-format dictionary that contains the benchmark environment
configuration. Each config must have at least the "tunable_params"
and the "const_args" sections.
global_config : dict
Free-format dictionary of global parameters (e.g., security credentials)
to be mixed in into the "const_args" section of the local config.
tunables : TunableGroups
A collection of groups of tunable parameters for all environments.
service: Service
An optional service object (e.g., providing methods to
deploy or reboot a VM, etc.).
"""
self.name = name
self.config = config
self._service = service
self._is_ready = False
self._in_context = False
self._const_args = config.get("const_args", {})
if tunables is None:
_LOG.warning("No tunables provided for %s. Tunable inheritance across composite environments may be broken.", name)
tunables = TunableGroups()
groups = self._expand_groups(
config.get("tunable_params", []),
(global_config or {}).get("tunable_params_map", {}))
_LOG.debug("Tunable groups for: '%s' :: %s", name, groups)
self._tunable_params = tunables.subgroup(groups)
# If a parameter comes from the tunables, do not require it in the const_args or globals
req_args = (
set(config.get("required_args", [])) -
set(self._tunable_params.get_param_values().keys())
)
merge_parameters(dest=self._const_args, source=global_config, required_keys=req_args)
self._params = self._combine_tunables(self._tunable_params)
_LOG.debug("Parameters for '%s' :: %s", name, self._params)
if _LOG.isEnabledFor(logging.DEBUG):
_LOG.debug("Config for: '%s'\n%s",
name, json.dumps(self.config, indent=2))
@staticmethod
def _expand_groups(groups: Iterable[str],
groups_exp: Dict[str, Union[str, Sequence[str]]]) -> List[str]:
"""
Expand `$tunable_group` into actual names of the tunable groups.
Parameters
----------
groups : List[str]
Names of the groups of tunables, maybe with `$` prefix (subject to expansion).
groups_exp : dict
A dictionary that maps dollar variables for tunable groups to the lists
of actual tunable groups IDs.
Returns
-------
groups : List[str]
A flat list of tunable groups IDs for the environment.
"""
res: List[str] = []
for grp in groups:
if grp[:1] == "$":
tunable_group_name = grp[1:]
if tunable_group_name not in groups_exp:
raise KeyError(f"Expected tunable group name ${tunable_group_name} undefined in {groups_exp}")
add_groups = groups_exp[tunable_group_name]
res += [add_groups] if isinstance(add_groups, str) else add_groups
else:
res.append(grp)
return res
@property
def _config_loader_service(self) -> "SupportsConfigLoading":
assert self._service is not None
return self._service.config_loader_service
def __enter__(self) -> 'Environment':
"""
Enter the environment's benchmarking context.
"""
_LOG.debug("Environment START :: %s", self)
assert not self._in_context
self._in_context = True
return self
def __exit__(self, ex_type: Optional[Type[BaseException]],
ex_val: Optional[BaseException],
ex_tb: Optional[TracebackType]) -> Literal[False]:
"""
Exit the context of the benchmarking environment.
"""
if ex_val is None:
_LOG.debug("Environment END :: %s", self)
else:
assert ex_type and ex_val
_LOG.warning("Environment END :: %s", self, exc_info=(ex_type, ex_val, ex_tb))
assert self._in_context
self._in_context = False
return False # Do not suppress exceptions
def __str__(self) -> str:
return self.name
def __repr__(self) -> str:
return f"{self.__class__.__name__} :: '{self.name}'"
def pprint(self, indent: int = 4, level: int = 0) -> str:
"""
Pretty-print the environment configuration.
For composite environments, print all children environments as well.
Parameters
----------
indent : int
Number of spaces to indent the output. Default is 4.
level : int
Current level of indentation. Default is 0.
Returns
-------
pretty : str
Pretty-printed environment configuration.
Default output is the same as `__repr__`.
"""
return f'{" " * indent * level}{repr(self)}'
def _combine_tunables(self, tunables: TunableGroups) -> Dict[str, TunableValue]:
"""
Plug tunable values into the base config. If the tunable group is unknown,
ignore it (it might belong to another environment). This method should
never mutate the original config or the tunables.
Parameters
----------
tunables : TunableGroups
A collection of groups of tunable parameters
along with the parameters' values.
Returns
-------
params : Dict[str, Union[int, float, str]]
Free-format dictionary that contains the new environment configuration.
"""
return tunables.get_param_values(
group_names=list(self._tunable_params.get_covariant_group_names()),
into_params=self._const_args.copy())
@property
def tunable_params(self) -> TunableGroups:
"""
Get the configuration space of the given environment.
Returns
-------
tunables : TunableGroups
A collection of covariant groups of tunable parameters.
"""
return self._tunable_params
@property
def parameters(self) -> Dict[str, TunableValue]:
"""
Key/value pairs of all environment parameters (i.e., `const_args` and `tunable_params`).
Note that before `.setup()` is called, all tunables will be set to None.
Returns
-------
parameters : Dict[str, TunableValue]
Key/value pairs of all environment parameters (i.e., `const_args` and `tunable_params`).
"""
return self._params
def setup(self, tunables: TunableGroups, global_config: Optional[dict] = None) -> bool:
"""
Set up a new benchmark environment, if necessary. This method must be
idempotent, i.e., calling it several times in a row should be
equivalent to a single call.
Parameters
----------
tunables : TunableGroups
A collection of tunable parameters along with their values.
global_config : dict
Free-format dictionary of global parameters of the environment
that are not used in the optimization process.
Returns
-------
is_success : bool
True if operation is successful, false otherwise.
"""
_LOG.info("Setup %s :: %s", self, tunables)
assert isinstance(tunables, TunableGroups)
# Make sure we create a context before invoking setup/run/status/teardown
assert self._in_context
# Assign new values to the environment's tunable parameters:
groups = list(self._tunable_params.get_covariant_group_names())
self._tunable_params.assign(tunables.get_param_values(groups))
# Write to the log whether the environment needs to be reset.
# (Derived classes still have to check `self._tunable_params.is_updated()`).
is_updated = self._tunable_params.is_updated()
if _LOG.isEnabledFor(logging.DEBUG):
_LOG.debug("Env '%s': Tunable groups reset = %s :: %s", self, is_updated, {
name: self._tunable_params.is_updated([name])
for name in self._tunable_params.get_covariant_group_names()
})
else:
_LOG.info("Env '%s': Tunable groups reset = %s", self, is_updated)
# Combine tunables, const_args, and global config into `self._params`:
self._params = self._combine_tunables(tunables)
merge_parameters(dest=self._params, source=global_config)
if _LOG.isEnabledFor(logging.DEBUG):
_LOG.debug("Combined parameters:\n%s", json.dumps(self._params, indent=2))
return True
def teardown(self) -> None:
"""
Tear down the benchmark environment. This method must be idempotent,
i.e., calling it several times in a row should be equivalent to a
single call.
"""
_LOG.info("Teardown %s", self)
# Make sure we create a context before invoking setup/run/status/teardown
assert self._in_context
self._is_ready = False
def run(self) -> Tuple[Status, Optional[Dict[str, float]]]:
"""
Execute the run script for this environment.
For instance, this may start a new experiment, download results, reconfigure
the environment, etc. Details are configurable via the environment config.
Returns
-------
(status, output) : (Status, dict)
A pair of (Status, output) values, where `output` is a dict
with the results or None if the status is not COMPLETED.
If run script is a benchmark, then the score is usually expected to
be in the `score` field.
"""
# Make sure we create a context before invoking setup/run/status/teardown
assert self._in_context
(status, _) = self.status()
return (status, None)
def status(self) -> Tuple[Status, List[Tuple[datetime, str, Any]]]:
"""
Check the status of the benchmark environment.
Returns
-------
(benchmark_status, telemetry) : (Status, list)
A pair of (benchmark status, telemetry) values.
`telemetry` is a list (maybe empty) of (timestamp, metric, value) triplets.
"""
# Make sure we create a context before invoking setup/run/status/teardown
assert self._in_context
if self._is_ready:
return (Status.READY, [])
_LOG.warning("Environment not ready: %s", self)
return (Status.PENDING, [])
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,825
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_bench/mlos_bench/run.py
|
#!/usr/bin/env python3
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
OS Autotune main optimization loop.
Note: this script is also available as a CLI tool via pip under the name "mlos_bench".
See `--help` output for details.
"""
import json
import logging
from datetime import datetime
from typing import Optional, Tuple, Dict, Any
from mlos_bench.launcher import Launcher
from mlos_bench.optimizers.base_optimizer import Optimizer
from mlos_bench.environments.base_environment import Environment
from mlos_bench.storage.base_storage import Storage
from mlos_bench.environments.status import Status
from mlos_bench.tunables.tunable_groups import TunableGroups
_LOG = logging.getLogger(__name__)
def _main() -> None:
launcher = Launcher("mlos_bench", "Systems autotuning and benchmarking tool")
result = _optimize(
env=launcher.environment,
opt=launcher.optimizer,
storage=launcher.storage,
root_env_config=launcher.root_env_config,
global_config=launcher.global_config,
do_teardown=launcher.teardown
)
_LOG.info("Final result: %s", result)
def _optimize(*,
env: Environment,
opt: Optimizer,
storage: Storage,
root_env_config: str,
global_config: Dict[str, Any],
do_teardown: bool) -> Tuple[Optional[float], Optional[TunableGroups]]:
"""
Main optimization loop.
Parameters
----------
env : Environment
benchmarking environment to run the optimization on.
opt : Optimizer
An interface to mlos_core optimizers.
storage : Storage
A storage system to persist the experiment data.
root_env_config : str
A path to the root JSON configuration file of the benchmarking environment.
global_config : dict
Global configuration parameters.
do_teardown : bool
If True, teardown the environment at the end of the experiment
"""
# pylint: disable=too-many-locals
if _LOG.isEnabledFor(logging.INFO):
_LOG.info("Root Environment:\n%s", env.pprint())
experiment_id = global_config["experiment_id"].strip()
trial_id = int(global_config.get("trial_id", 1))
config_id = int(global_config.get("config_id", -1))
# Start new or resume the existing experiment. Verify that the
# experiment configuration is compatible with the previous runs.
# If the `merge` config parameter is present, merge in the data
# from other experiments and check for compatibility.
with env as env_context, storage.experiment(experiment_id=experiment_id,
trial_id=trial_id,
root_env_config=root_env_config,
description=env.name,
opt_target=opt.target) as exp:
_LOG.info("Experiment: %s Env: %s Optimizer: %s", exp, env, opt)
if opt.supports_preload:
# Load (tunable values, benchmark scores) to warm-up the optimizer.
# `.load()` returns data from ALL merged-in experiments and attempts
# to impute the missing tunable values.
(configs, scores, status) = exp.load()
opt.bulk_register(configs, scores, status)
# Complete any pending trials.
for trial in exp.pending_trials():
_run(env_context, opt, trial, global_config)
else:
_LOG.warning("Skip pending trials and warm-up: %s", opt)
# Now run new trials until the optimizer is done.
while opt.not_converged():
tunables = opt.suggest()
if config_id > 0:
tunable_values = exp.load_config(config_id)
tunables.assign(tunable_values)
_LOG.info("Load config from storage: %d", config_id)
if _LOG.isEnabledFor(logging.DEBUG):
_LOG.debug("Config %d ::\n%s",
config_id, json.dumps(tunable_values, indent=2))
config_id = -1
trial = exp.new_trial(tunables)
_run(env_context, opt, trial, global_config)
if do_teardown:
env_context.teardown()
(best_score, best_config) = opt.get_best_observation()
_LOG.info("Env: %s best score: %s", env, best_score)
return (best_score, best_config)
def _run(env_context: Environment, opt: Optimizer,
trial: Storage.Trial, global_config: Dict[str, Any]) -> None:
"""
Run a single trial.
Parameters
----------
env_context : Environment
Benchmarking environment context to run the optimization on.
opt : Optimizer
An interface to mlos_core optimizers.
storage : Storage
A storage system to persist the experiment data.
global_config : dict
Global configuration parameters.
"""
_LOG.info("Trial: %s", trial)
if not env_context.setup(trial.tunables, trial.config(global_config)):
_LOG.warning("Setup failed: %s :: %s", env_context, trial.tunables)
# FIXME: Use the actual timestamp from the environment.
trial.update(Status.FAILED, datetime.utcnow())
opt.register(trial.tunables, Status.FAILED)
return
(status, results) = env_context.run() # Block and wait for the final result.
_LOG.info("Results: %s :: %s\n%s", trial.tunables, status, results)
# In async mode (TODO), poll the environment for status and telemetry
# and update the storage with the intermediate results.
(_, telemetry) = env_context.status()
# Use the status from `.run()` as it is the final status of the experiment.
# TODO: Use the `.status()` output in async mode.
trial.update_telemetry(status, telemetry)
# FIXME: Use the actual timestamp from the benchmark.
trial.update(status, datetime.utcnow(), results)
opt.register(trial.tunables, status, results)
if __name__ == "__main__":
_main()
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
13,826
|
microsoft/MLOS
|
refs/heads/main
|
/mlos_core/setup.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Setup instructions for the mlos_core package.
"""
from itertools import chain
from logging import warning
from typing import Dict, List
from setuptools import setup, find_packages
from _version import _VERSION # pylint: disable=import-private-name
try:
from setuptools_scm import get_version
version = get_version(root='..', relative_to=__file__)
if version is not None:
_VERSION = version # noqa: F811
except ImportError:
warning("setuptools_scm not found, using version from _version.py")
except LookupError as e:
warning(f"setuptools_scm failed to find git version, using version from _version.py: {e}")
extra_requires: Dict[str, List[str]] = { # pylint: disable=consider-using-namedtuple-or-dataclass
'flaml': ['flaml[blendsearch]'],
'smac': ['smac>=2.0.0'], # NOTE: Major refactoring on SMAC starting from v2.0.0
}
# construct special 'full' extra that adds requirements for all built-in
# backend integrations and additional extra features.
extra_requires['full'] = list(set(chain(*extra_requires.values())))
extra_requires['full-tests'] = extra_requires['full'] + [
'pytest',
'pytest-forked',
'pytest-xdist',
'pytest-cov',
'pytest-local-badge',
]
# pylint: disable=duplicate-code
MODULE_BASE_NAME = 'mlos_core'
setup(
name='mlos-core',
version=_VERSION,
packages=find_packages(exclude=[f"{MODULE_BASE_NAME}.tests", f"{MODULE_BASE_NAME}.tests.*"]),
package_data={
'': ['py.typed', '**/*.pyi'],
},
install_requires=[
'scikit-learn>=1.2',
'joblib>=1.1.1', # CVE-2022-21797: scikit-learn dependency, addressed in 1.2.0dev0, which isn't currently released
'scipy>=1.3.2',
'numpy>=1.24',
'pandas>=1.0.3',
'ConfigSpace>=0.7.1',
],
extras_require=extra_requires,
author='Microsoft',
author_email='mlos-maintainers@service.microsoft.com',
description=('MLOS Core Python interface for parameter optimization.'),
license='MIT',
keywords='',
url='https://aka.ms/mlos-core',
python_requires='>=3.8',
)
|
{"/mlos_bench/mlos_bench/tests/services/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/__init__.py", "/mlos_bench/mlos_bench/tests/services/remote/__init__.py"], "/mlos_bench/mlos_bench/tests/services/local/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py"], "/mlos_bench/mlos_bench/tests/services/remote/__init__.py": ["/mlos_bench/mlos_bench/tests/services/remote/mock/mock_fileshare_service.py", "/mlos_bench/mlos_bench/tests/services/remote/mock/mock_remote_exec_service.py"], "/mlos_bench/mlos_bench/tests/services/local/mock/__init__.py": ["/mlos_bench/mlos_bench/tests/services/local/mock/mock_local_exec_service.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.