text stringlengths 81 112k |
|---|
Helper routine to easily test if the schedule is valid
def check_schedule():
"""Helper routine to easily test if the schedule is valid"""
all_items = prefetch_schedule_items()
for validator, _type, _msg in SCHEDULE_ITEM_VALIDATORS:
if validator(all_items):
return False
all_slots = prefetch_slots()
for validator, _type, _msg in SLOT_VALIDATORS:
if validator(all_slots):
return False
return True |
Helper routine to report issues with the schedule
def validate_schedule():
"""Helper routine to report issues with the schedule"""
all_items = prefetch_schedule_items()
errors = []
for validator, _type, msg in SCHEDULE_ITEM_VALIDATORS:
if validator(all_items):
errors.append(msg)
all_slots = prefetch_slots()
for validator, _type, msg in SLOT_VALIDATORS:
if validator(all_slots):
errors.append(msg)
return errors |
Change the form depending on whether we're adding or
editing the slot.
def get_form(self, request, obj=None, **kwargs):
"""Change the form depending on whether we're adding or
editing the slot."""
if obj is None:
# Adding a new Slot
kwargs['form'] = SlotAdminAddForm
return super(SlotAdmin, self).get_form(request, obj, **kwargs) |
Return the menus from the cache or generate them if needed.
def get_cached_menus():
"""Return the menus from the cache or generate them if needed."""
items = cache.get(CACHE_KEY)
if items is None:
menu = generate_menu()
cache.set(CACHE_KEY, menu.items)
else:
menu = Menu(items)
return menu |
If argument is not a string, return it.
Otherwise import the dotted name and return that.
def maybe_obj(str_or_obj):
"""If argument is not a string, return it.
Otherwise import the dotted name and return that.
"""
if not isinstance(str_or_obj, six.string_types):
return str_or_obj
parts = str_or_obj.split(".")
mod, modname = None, None
for p in parts:
modname = p if modname is None else "%s.%s" % (modname, p)
try:
mod = __import__(modname)
except ImportError:
if mod is None:
raise
break
obj = mod
for p in parts[1:]:
obj = getattr(obj, p)
return obj |
Generate a new list of menus.
def generate_menu():
"""Generate a new list of menus."""
root_menu = Menu(list(copy.deepcopy(settings.WAFER_MENUS)))
for dynamic_menu_func in settings.WAFER_DYNAMIC_MENUS:
dynamic_menu_func = maybe_obj(dynamic_menu_func)
dynamic_menu_func(root_menu)
return root_menu |
Try to get locked the file
- the function will wait until the file is unlocked if 'wait' was defined as locktype
- the funciton will raise AlreadyLocked exception if 'lock' was defined as locktype
def lock(self):
'''
Try to get locked the file
- the function will wait until the file is unlocked if 'wait' was defined as locktype
- the funciton will raise AlreadyLocked exception if 'lock' was defined as locktype
'''
# Open file
self.__fd = open(self.__lockfile, "w")
# Get it locked
if self.__locktype == "wait":
# Try to get it locked until ready
fcntl.flock(self.__fd.fileno(), fcntl.LOCK_EX)
elif self.__locktype == "lock":
# Try to get the locker if can not raise an exception
try:
fcntl.flock(self.__fd.fileno(), fcntl.LOCK_EX|fcntl.LOCK_NB)
except IOError:
raise AlreadyLocked("File is already locked") |
Makes a a handler class to use inside the basic python HTTP server.
state_token is the expected state token.
done_function is a function that is called, with the code passed to it.
def _make_handler(state_token, done_function):
'''
Makes a a handler class to use inside the basic python HTTP server.
state_token is the expected state token.
done_function is a function that is called, with the code passed to it.
'''
class LocalServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def error_response(self, msg):
logging.warn(
'Error response: %(msg)s. %(path)s',
msg=msg,
path=self.path)
self.send_response(400)
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write(msg)
def do_GET(self):
parsed = urlparse.urlparse(self.path)
if len(parsed.query) == 0 or parsed.path != '/callback':
self.error_response(
'We encountered a problem with your request.')
return
params = urlparse.parse_qs(parsed.query)
if params['state'] != [state_token]:
self.error_response(
'Attack detected: state tokens did not match!')
return
if len(params['code']) != 1:
self.error_response('Wrong number of "code" query parameters.')
return
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write(
"courseraoauth2client: we have captured Coursera's response "
"code. Feel free to close this browser window now and return "
"to your terminal. Thanks!")
done_function(params['code'][0])
return LocalServerHandler |
Loads configuration from the file system.
def configuration():
'Loads configuration from the file system.'
defaults = '''
[oauth2]
hostname = localhost
port = 9876
api_endpoint = https://api.coursera.org
auth_endpoint = https://accounts.coursera.org/oauth2/v1/auth
token_endpoint = https://accounts.coursera.org/oauth2/v1/token
verify_tls = True
token_cache_base = ~/.coursera
[manage_graders]
client_id = NS8qaSX18X_Eu0pyNbLsnA
client_secret = bUqKqGywnGXEJPFrcd4Jpw
scopes = view_profile manage_graders
[manage_research_exports]
client_id = sDHC8Nfp-b1XMbzZx8Wa4w
client_secret = pgD4adDd7lm-ksfG7UazUA
scopes = view_profile manage_research_exports
'''
cfg = ConfigParser.SafeConfigParser()
cfg.readfp(io.BytesIO(defaults))
cfg.read([
'/etc/coursera/courseraoauth2client.cfg',
os.path.expanduser('~/.coursera/courseraoauth2client.cfg'),
'courseraoauth2client.cfg',
])
return cfg |
Reads the local fs cache for pre-authorized access tokens
def _load_token_cache(self):
'Reads the local fs cache for pre-authorized access tokens'
try:
logging.debug('About to read from local file cache file %s',
self.token_cache_file)
with open(self.token_cache_file, 'rb') as f:
fs_cached = cPickle.load(f)
if self._check_token_cache_type(fs_cached):
logging.debug('Loaded from file system: %s', fs_cached)
return fs_cached
else:
logging.warn('Found unexpected value in cache. %s',
fs_cached)
return None
except IOError:
logging.debug(
'Did not find file: %s on the file system.',
self.token_cache_file)
return None
except:
logging.info(
'Encountered exception loading from the file system.',
exc_info=True)
return None |
Write out to the filesystem a cache of the OAuth2 information.
def _save_token_cache(self, new_cache):
'Write out to the filesystem a cache of the OAuth2 information.'
logging.debug('Looking to write to local authentication cache...')
if not self._check_token_cache_type(new_cache):
logging.error('Attempt to save a bad value: %s', new_cache)
return
try:
logging.debug('About to write to fs cache file: %s',
self.token_cache_file)
with open(self.token_cache_file, 'wb') as f:
cPickle.dump(new_cache, f, protocol=cPickle.HIGHEST_PROTOCOL)
logging.debug('Finished dumping cache_value to fs cache file.')
except:
logging.exception(
'Could not successfully cache OAuth2 secrets on the file '
'system.') |
Checks the cache_value for appropriate type correctness.
Pass strict=True for strict validation to ensure the latest types are
being written.
Returns true is correct type, False otherwise.
def _check_token_cache_type(self, cache_value):
'''
Checks the cache_value for appropriate type correctness.
Pass strict=True for strict validation to ensure the latest types are
being written.
Returns true is correct type, False otherwise.
'''
def check_string_value(name):
return (
isinstance(cache_value[name], str) or
isinstance(cache_value[name], unicode)
)
def check_refresh_token():
if 'refresh' in cache_value:
return check_string_value('refresh')
else:
return True
return (
isinstance(cache_value, dict) and
'token' in cache_value and
'expires' in cache_value and
check_string_value('token') and
isinstance(cache_value['expires'], float) and
check_refresh_token()
) |
Stands up a new localhost http server and retrieves new OAuth2 access
tokens from the Coursera OAuth2 server.
def _authorize_new_tokens(self):
'''
Stands up a new localhost http server and retrieves new OAuth2 access
tokens from the Coursera OAuth2 server.
'''
logging.info('About to request new OAuth2 tokens from Coursera.')
# Attempt to request new tokens from Coursera via the browser.
state_token = uuid.uuid4().hex
authorization_url = self._build_authorizaton_url(state_token)
sys.stdout.write(
'Please visit the following URL to authorize this app:\n')
sys.stdout.write('\t%s\n\n' % authorization_url)
if _platform == 'darwin':
# OS X -- leverage the 'open' command present on all modern macs
sys.stdout.write(
'Mac OS X detected; attempting to auto-open the url '
'in your default browser...\n')
try:
subprocess.check_call(['open', authorization_url])
except:
logging.exception('Could not call `open %(url)s`.',
url=authorization_url)
if self.local_webserver_port is not None:
# Boot up a local webserver to retrieve the response.
server_address = ('', self.local_webserver_port)
code_holder = CodeHolder()
local_server = BaseHTTPServer.HTTPServer(
server_address,
_make_handler(state_token, code_holder))
while not code_holder.has_code():
local_server.handle_request()
coursera_code = code_holder.code
else:
coursera_code = raw_input('Please enter the code from Coursera: ')
form_data = {
'code': coursera_code,
'client_id': self.client_id,
'client_secret': self.client_secret,
'redirect_uri': self._redirect_uri,
'grant_type': 'authorization_code',
}
return self._request_tokens_from_token_endpoint(form_data) |
Exchanges a refresh token for an access token
def _exchange_refresh_tokens(self):
'Exchanges a refresh token for an access token'
if self.token_cache is not None and 'refresh' in self.token_cache:
# Attempt to use the refresh token to get a new access token.
refresh_form = {
'grant_type': 'refresh_token',
'refresh_token': self.token_cache['refresh'],
'client_id': self.client_id,
'client_secret': self.client_secret,
}
try:
tokens = self._request_tokens_from_token_endpoint(refresh_form)
tokens['refresh'] = self.token_cache['refresh']
return tokens
except OAuth2Exception:
logging.exception(
'Encountered an exception during refresh token flow.')
return None |
function to determine if each select field needs a create button or not
def foreignkey(element, exceptions):
'''
function to determine if each select field needs a create button or not
'''
label = element.field.__dict__['label']
try:
label = unicode(label)
except NameError:
pass
if (not label) or (label in exceptions):
return False
else:
return "_queryset" in element.field.__dict__ |
Some types get serialized to JSON, as strings.
If we know what they are supposed to be, we can deserialize them
def deserialize_by_field(value, field):
"""
Some types get serialized to JSON, as strings.
If we know what they are supposed to be, we can deserialize them
"""
if isinstance(field, forms.DateTimeField):
value = parse_datetime(value)
elif isinstance(field, forms.DateField):
value = parse_date(value)
elif isinstance(field, forms.TimeField):
value = parse_time(value)
return value |
Combined hyperprior for the kernel, noise kernel and (if present) mean function.
def hyperprior(self):
"""Combined hyperprior for the kernel, noise kernel and (if present) mean function.
"""
hp = self.k.hyperprior * self.noise_k.hyperprior
if self.mu is not None:
hp *= self.mu.hyperprior
return hp |
Combined fixed hyperparameter flags for the kernel, noise kernel and (if present) mean function.
def fixed_params(self):
"""Combined fixed hyperparameter flags for the kernel, noise kernel and (if present) mean function.
"""
fp = CombinedBounds(self.k.fixed_params, self.noise_k.fixed_params)
if self.mu is not None:
fp = CombinedBounds(fp, self.mu.fixed_params)
return fp |
Combined hyperparameters for the kernel, noise kernel and (if present) mean function.
def params(self):
"""Combined hyperparameters for the kernel, noise kernel and (if present) mean function.
"""
p = CombinedBounds(self.k.params, self.noise_k.params)
if self.mu is not None:
p = CombinedBounds(p, self.mu.params)
return p |
Combined names for the hyperparameters for the kernel, noise kernel and (if present) mean function.
def param_names(self):
"""Combined names for the hyperparameters for the kernel, noise kernel and (if present) mean function.
"""
pn = CombinedBounds(self.k.param_names, self.noise_k.param_names)
if self.mu is not None:
pn = CombinedBounds(pn, self.mu.param_names)
return pn |
Combined free hyperparameters for the kernel, noise kernel and (if present) mean function.
def free_params(self):
"""Combined free hyperparameters for the kernel, noise kernel and (if present) mean function.
"""
p = CombinedBounds(self.k.free_params, self.noise_k.free_params)
if self.mu is not None:
p = CombinedBounds(p, self.mu.free_params)
return p |
Set the free parameters. Note that this bypasses enforce_bounds.
def free_params(self, value):
"""Set the free parameters. Note that this bypasses enforce_bounds.
"""
value = scipy.asarray(value, dtype=float)
self.K_up_to_date = False
self.k.free_params = value[:self.k.num_free_params]
self.noise_k.free_params = value[self.k.num_free_params:self.k.num_free_params + self.noise_k.num_free_params]
if self.mu is not None:
self.mu.free_params = value[self.k.num_free_params + self.noise_k.num_free_params:] |
Combined free hyperparameter bounds for the kernel, noise kernel and (if present) mean function.
def free_param_bounds(self):
"""Combined free hyperparameter bounds for the kernel, noise kernel and (if present) mean function.
"""
fpb = CombinedBounds(self.k.free_param_bounds, self.noise_k.free_param_bounds)
if self.mu is not None:
fpb = CombinedBounds(fpb, self.mu.free_param_bounds)
return fpb |
Combined free hyperparameter names for the kernel, noise kernel and (if present) mean function.
def free_param_names(self):
"""Combined free hyperparameter names for the kernel, noise kernel and (if present) mean function.
"""
p = CombinedBounds(self.k.free_param_names, self.noise_k.free_param_names)
if self.mu is not None:
p = CombinedBounds(p, self.mu.free_param_names)
return p |
Add data to the training data set of the GaussianProcess instance.
Parameters
----------
X : array, (`M`, `D`)
`M` input values of dimension `D`.
y : array, (`M`,)
`M` target values.
err_y : array, (`M`,) or scalar float, optional
Non-negative values only. Error given as standard deviation) in the
`M` target values. If `err_y` is a scalar, the data set is taken to
be homoscedastic (constant error). Otherwise, the length of `err_y`
must equal the length of `y`. Default value is 0 (noiseless
observations).
n : array, (`M`, `D`) or scalar float, optional
Non-negative integer values only. Degree of derivative for each
target. If `n` is a scalar it is taken to be the value for all
points in `y`. Otherwise, the length of n must equal the length of
`y`. Default value is 0 (observation of target value). If
non-integer values are passed, they will be silently rounded.
T : array, (`M`, `N`), optional
Linear transformation to get from latent variables to data in the
argument `y`. When `T` is passed the argument `y` holds the
transformed quantities `y=TY(X)` where `y` are the observed values
of the transformed quantities, `T` is the transformation matrix and
`Y(X)` is the underlying (untransformed) values of the function to
be fit that enter into the transformation. When `T` is `M`-by-`N`
and `y` has `M` elements, `X` and `n` will both be `N`-by-`D`.
Default is None (no transformation).
Raises
------
ValueError
Bad shapes for any of the inputs, negative values for `err_y` or `n`.
def add_data(self, X, y, err_y=0, n=0, T=None):
"""Add data to the training data set of the GaussianProcess instance.
Parameters
----------
X : array, (`M`, `D`)
`M` input values of dimension `D`.
y : array, (`M`,)
`M` target values.
err_y : array, (`M`,) or scalar float, optional
Non-negative values only. Error given as standard deviation) in the
`M` target values. If `err_y` is a scalar, the data set is taken to
be homoscedastic (constant error). Otherwise, the length of `err_y`
must equal the length of `y`. Default value is 0 (noiseless
observations).
n : array, (`M`, `D`) or scalar float, optional
Non-negative integer values only. Degree of derivative for each
target. If `n` is a scalar it is taken to be the value for all
points in `y`. Otherwise, the length of n must equal the length of
`y`. Default value is 0 (observation of target value). If
non-integer values are passed, they will be silently rounded.
T : array, (`M`, `N`), optional
Linear transformation to get from latent variables to data in the
argument `y`. When `T` is passed the argument `y` holds the
transformed quantities `y=TY(X)` where `y` are the observed values
of the transformed quantities, `T` is the transformation matrix and
`Y(X)` is the underlying (untransformed) values of the function to
be fit that enter into the transformation. When `T` is `M`-by-`N`
and `y` has `M` elements, `X` and `n` will both be `N`-by-`D`.
Default is None (no transformation).
Raises
------
ValueError
Bad shapes for any of the inputs, negative values for `err_y` or `n`.
"""
# Verify y has only one non-trivial dimension:
y = scipy.atleast_1d(scipy.asarray(y, dtype=float))
if len(y.shape) != 1:
raise ValueError(
"Training targets y must have only one dimension with length "
"greater than one! Shape of y given is %s" % (y.shape,)
)
# Handle scalar error or verify shape of array error matches shape of y:
try:
iter(err_y)
except TypeError:
err_y = err_y * scipy.ones_like(y, dtype=float)
else:
err_y = scipy.asarray(err_y, dtype=float)
if err_y.shape != y.shape:
raise ValueError(
"When using array-like err_y, shape must match shape of y! "
"Shape of err_y given is %s, shape of y given is %s." % (err_y.shape, y.shape)
)
if (err_y < 0).any():
raise ValueError("All elements of err_y must be non-negative!")
# Handle scalar training input or convert array input into 2d.
X = scipy.atleast_2d(scipy.asarray(X, dtype=float))
# Correct single-dimension inputs:
if self.num_dim == 1 and X.shape[0] == 1:
X = X.T
if T is None and X.shape != (len(y), self.num_dim):
raise ValueError(
"Shape of training inputs must be (len(y), k.num_dim)! X given "
"has shape %s, shape of y is %s and num_dim=%d." % (X.shape, y.shape, self.num_dim)
)
# Handle scalar derivative orders or verify shape of array derivative
# orders matches shape of y:
try:
iter(n)
except TypeError:
n = n * scipy.ones_like(X, dtype=int)
else:
n = scipy.atleast_2d(scipy.asarray(n, dtype=int))
# Correct single-dimension inputs:
if self.num_dim == 1 and n.shape[1] != 1:
n = n.T
if n.shape != X.shape:
raise ValueError(
"When using array-like n, shape must be (len(y), k.num_dim)! "
"Shape of n given is %s, shape of y given is %s and num_dim=%d."
% (n.shape, y.shape, self.num_dim)
)
if (n < 0).any():
raise ValueError("All elements of n must be non-negative integers!")
# Handle transform:
if T is None and self.T is not None:
T = scipy.eye(len(y))
if T is not None:
T = scipy.atleast_2d(scipy.asarray(T, dtype=float))
if T.ndim != 2:
raise ValueError("T must have exactly 2 dimensions!")
if T.shape[0] != len(y):
raise ValueError(
"T must have as many rows are there are elements in y!"
)
if T.shape[1] != X.shape[0]:
raise ValueError(
"There must be as many columns in T as there are rows in X!"
)
if self.T is None and self.X is not None:
self.T = scipy.eye(len(self.y))
if self.T is None:
self.T = T
else:
self.T = scipy.linalg.block_diag(self.T, T)
if self.X is None:
self.X = X
else:
self.X = scipy.vstack((self.X, X))
self.y = scipy.append(self.y, y)
self.err_y = scipy.append(self.err_y, err_y)
if self.n is None:
self.n = n
else:
self.n = scipy.vstack((self.n, n))
self.K_up_to_date = False |
Condense duplicate points using a transformation matrix.
This is useful if you have multiple non-transformed points at the same
location or multiple transformed points that use the same quadrature
points.
Won't change the GP if all of the rows of [X, n] are unique. Will create
a transformation matrix T if necessary. Note that the order of the
points in [X, n] will be arbitrary after this operation.
If there are any transformed quantities (i.e., `self.T` is not None), it
will also remove any quadrature points for which all of the weights are
zero (even if all of the rows of [X, n] are unique).
def condense_duplicates(self):
"""Condense duplicate points using a transformation matrix.
This is useful if you have multiple non-transformed points at the same
location or multiple transformed points that use the same quadrature
points.
Won't change the GP if all of the rows of [X, n] are unique. Will create
a transformation matrix T if necessary. Note that the order of the
points in [X, n] will be arbitrary after this operation.
If there are any transformed quantities (i.e., `self.T` is not None), it
will also remove any quadrature points for which all of the weights are
zero (even if all of the rows of [X, n] are unique).
"""
unique, inv = unique_rows(
scipy.hstack((self.X, self.n)),
return_inverse=True
)
# Only proceed if there is anything to be gained:
if len(unique) != len(self.X):
if self.T is None:
self.T = scipy.eye(len(self.y))
new_T = scipy.zeros((len(self.y), unique.shape[0]))
for j in xrange(0, len(inv)):
new_T[:, inv[j]] += self.T[:, j]
self.T = new_T
self.n = unique[:, self.X.shape[1]:]
self.X = unique[:, :self.X.shape[1]]
# Also remove any points which don't enter into the calculation:
if self.T is not None:
# Find the columns of T which actually enter in:
# Recall that T is (n, n_Q), X is (n_Q, n_dim).
good_cols = (self.T != 0.0).any(axis=0)
self.T = self.T[:, good_cols]
self.X = self.X[good_cols, :]
self.n = self.n[good_cols, :] |
Remove outliers from the GP with very simplistic outlier detection.
Removes points that are more than `thresh` * `err_y` away from the GP
mean. Note that this is only very rough in that it ignores the
uncertainty in the GP mean at any given point. But you should only be
using this as a rough way of removing bad channels, anyways!
Returns the values that were removed and a boolean array indicating
where the removed points were.
Parameters
----------
thresh : float, optional
The threshold as a multiplier times `err_y`. Default is 3 (i.e.,
throw away all 3-sigma points).
**predict_kwargs : optional kwargs
All additional kwargs are passed to :py:meth:`predict`. You can, for
instance, use this to make it use MCMC to evaluate the mean. (If you
don't use MCMC, then the current value of the hyperparameters is
used.)
Returns
-------
X_bad : array
Input values of the bad points.
y_bad : array
Bad values.
err_y_bad : array
Uncertainties on the bad values.
n_bad : array
Derivative order of the bad values.
bad_idxs : array
Array of booleans with the original shape of X with True wherever a
point was taken to be bad and subsequently removed.
T_bad : array
Transformation matrix of returned points. Only returned if
:py:attr:`T` is not None for the instance.
def remove_outliers(self, thresh=3, **predict_kwargs):
"""Remove outliers from the GP with very simplistic outlier detection.
Removes points that are more than `thresh` * `err_y` away from the GP
mean. Note that this is only very rough in that it ignores the
uncertainty in the GP mean at any given point. But you should only be
using this as a rough way of removing bad channels, anyways!
Returns the values that were removed and a boolean array indicating
where the removed points were.
Parameters
----------
thresh : float, optional
The threshold as a multiplier times `err_y`. Default is 3 (i.e.,
throw away all 3-sigma points).
**predict_kwargs : optional kwargs
All additional kwargs are passed to :py:meth:`predict`. You can, for
instance, use this to make it use MCMC to evaluate the mean. (If you
don't use MCMC, then the current value of the hyperparameters is
used.)
Returns
-------
X_bad : array
Input values of the bad points.
y_bad : array
Bad values.
err_y_bad : array
Uncertainties on the bad values.
n_bad : array
Derivative order of the bad values.
bad_idxs : array
Array of booleans with the original shape of X with True wherever a
point was taken to be bad and subsequently removed.
T_bad : array
Transformation matrix of returned points. Only returned if
:py:attr:`T` is not None for the instance.
"""
mean = self.predict(
self.X, n=self.n, noise=False, return_std=False,
output_transform=self.T, **predict_kwargs
)
deltas = scipy.absolute(mean - self.y) / self.err_y
deltas[self.err_y == 0] = 0
bad_idxs = (deltas >= thresh)
good_idxs = ~bad_idxs
# Pull out the old values so they can be returned:
y_bad = self.y[bad_idxs]
err_y_bad = self.err_y[bad_idxs]
if self.T is not None:
T_bad = self.T[bad_idxs, :]
non_zero_cols = (T_bad != 0).all(axis=0)
T_bad = T_bad[:, non_zero_cols]
X_bad = self.X[non_zero_cols, :]
n_bad = self.n[non_zero_cols, :]
else:
X_bad = self.X[bad_idxs, :]
n_bad = self.n[bad_idxs, :]
# Delete the offending points:
if self.T is None:
self.X = self.X[good_idxs, :]
self.n = self.n[good_idxs, :]
else:
self.T = self.T[good_idxs, :]
non_zero_cols = (self.T != 0).all(axis=0)
self.T = self.T[:, non_zero_cols]
self.X = self.X[non_zero_cols, :]
self.n = self.n[non_zero_cols, :]
self.y = self.y[good_idxs]
self.err_y = self.err_y[good_idxs]
self.K_up_to_date = False
if self.T is None:
return (X_bad, y_bad, err_y_bad, n_bad, bad_idxs)
else:
return (X_bad, y_bad, err_y_bad, n_bad, bad_idxs, T_bad) |
r"""Optimize the hyperparameters by maximizing the log-posterior.
Leaves the :py:class:`GaussianProcess` instance in the optimized state.
If :py:func:`scipy.optimize.minimize` is not available (i.e., if your
:py:mod:`scipy` version is older than 0.11.0) then :py:func:`fmin_slsqp`
is used independent of what you set for the `method` keyword.
If :py:attr:`use_hyper_deriv` is True the optimizer will attempt to use
the derivatives of the log-posterior with respect to the hyperparameters
to speed up the optimization. Note that only the squared exponential
covariance kernel supports hyperparameter derivatives at present.
Parameters
----------
method : str, optional
The method to pass to :py:func:`scipy.optimize.minimize`.
Refer to that function's docstring for valid options. Default
is 'SLSQP'. See note above about behavior with older versions of
:py:mod:`scipy`.
opt_kwargs : dict, optional
Dictionary of extra keywords to pass to
:py:func:`scipy.optimize.minimize`. Refer to that function's
docstring for valid options. Default is: {}.
verbose : bool, optional
Whether or not the output should be verbose. If True, the entire
:py:class:`Result` object from :py:func:`scipy.optimize.minimize` is
printed. If False, status information is only printed if the
`success` flag from :py:func:`minimize` is False. Default is False.
random_starts : non-negative int, optional
Number of times to randomly perturb the starting guesses
(distributed according to the hyperprior) in order to seek the
global minimum. If None, then `num_proc` random starts will be
performed. Default is None (do number of random starts equal to the
number of processors allocated). Note that for `random_starts` != 0,
the initial state of the hyperparameters is not actually used.
num_proc : non-negative int or None, optional
Number of processors to use with random starts. If 0, processing is
not done in parallel. If None, all available processors are used.
Default is None (use all available processors).
max_tries : int, optional
Number of times to run through the random start procedure if a
solution is not found. Default is to only go through the procedure
once.
def optimize_hyperparameters(self, method='SLSQP', opt_kwargs={},
verbose=False, random_starts=None,
num_proc=None, max_tries=1):
r"""Optimize the hyperparameters by maximizing the log-posterior.
Leaves the :py:class:`GaussianProcess` instance in the optimized state.
If :py:func:`scipy.optimize.minimize` is not available (i.e., if your
:py:mod:`scipy` version is older than 0.11.0) then :py:func:`fmin_slsqp`
is used independent of what you set for the `method` keyword.
If :py:attr:`use_hyper_deriv` is True the optimizer will attempt to use
the derivatives of the log-posterior with respect to the hyperparameters
to speed up the optimization. Note that only the squared exponential
covariance kernel supports hyperparameter derivatives at present.
Parameters
----------
method : str, optional
The method to pass to :py:func:`scipy.optimize.minimize`.
Refer to that function's docstring for valid options. Default
is 'SLSQP'. See note above about behavior with older versions of
:py:mod:`scipy`.
opt_kwargs : dict, optional
Dictionary of extra keywords to pass to
:py:func:`scipy.optimize.minimize`. Refer to that function's
docstring for valid options. Default is: {}.
verbose : bool, optional
Whether or not the output should be verbose. If True, the entire
:py:class:`Result` object from :py:func:`scipy.optimize.minimize` is
printed. If False, status information is only printed if the
`success` flag from :py:func:`minimize` is False. Default is False.
random_starts : non-negative int, optional
Number of times to randomly perturb the starting guesses
(distributed according to the hyperprior) in order to seek the
global minimum. If None, then `num_proc` random starts will be
performed. Default is None (do number of random starts equal to the
number of processors allocated). Note that for `random_starts` != 0,
the initial state of the hyperparameters is not actually used.
num_proc : non-negative int or None, optional
Number of processors to use with random starts. If 0, processing is
not done in parallel. If None, all available processors are used.
Default is None (use all available processors).
max_tries : int, optional
Number of times to run through the random start procedure if a
solution is not found. Default is to only go through the procedure
once.
"""
if opt_kwargs is None:
opt_kwargs = {}
else:
opt_kwargs = dict(opt_kwargs)
if 'method' in opt_kwargs:
method = opt_kwargs['method']
if self.verbose:
warnings.warn(
"Key 'method' is present in opt_kwargs, will override option "
"specified with method kwarg.",
RuntimeWarning
)
else:
opt_kwargs['method'] = method
if num_proc is None:
num_proc = multiprocessing.cpu_count()
param_ranges = scipy.asarray(self.free_param_bounds, dtype=float)
# Replace unbounded variables with something big:
param_ranges[scipy.where(scipy.isnan(param_ranges[:, 0])), 0] = -1e16
param_ranges[scipy.where(scipy.isnan(param_ranges[:, 1])), 1] = 1e16
param_ranges[scipy.where(scipy.isinf(param_ranges[:, 0])), 0] = -1e16
param_ranges[scipy.where(scipy.isinf(param_ranges[:, 1])), 1] = 1e16
if random_starts == 0:
num_proc = 0
param_samples = [self.free_params[:]]
else:
if random_starts is None:
random_starts = max(num_proc, 1)
# Distribute random guesses according to the hyperprior:
param_samples = self.hyperprior.random_draw(size=random_starts).T
param_samples = param_samples[:, ~self.fixed_params]
if 'bounds' not in opt_kwargs:
opt_kwargs['bounds'] = param_ranges
if self.use_hyper_deriv:
opt_kwargs['jac'] = True
trial = 0
res_min = None
while trial < max_tries and res_min is None:
if trial >= 1:
if self.verbose:
warnings.warn(
"No solutions found on trial %d, retrying random starts." % (trial - 1,),
RuntimeWarning
)
# Produce a new initial guess:
if random_starts != 0:
param_samples = self.hyperprior.random_draw(size=random_starts).T
param_samples = param_samples[:, ~self.fixed_params]
trial += 1
if num_proc > 1:
pool = InterruptiblePool(processes=num_proc)
map_fun = pool.map
else:
map_fun = map
try:
res = map_fun(
_OptimizeHyperparametersEval(self, opt_kwargs),
param_samples
)
finally:
if num_proc > 1:
pool.close()
# Filter out the failed convergences:
res = [r for r in res if r is not None]
try:
res_min = min(res, key=lambda r: r.fun)
if scipy.isnan(res_min.fun) or scipy.isinf(res_min.fun):
res_min = None
except ValueError:
res_min = None
if res_min is None:
raise ValueError(
"Optimizer failed to find a valid solution. Try changing the "
"parameter bounds, picking a new initial guess or increasing the "
"number of random starts."
)
self.update_hyperparameters(res_min.x)
if verbose:
print("Got %d completed starts, optimal result is:" % (len(res),))
print(res_min)
print("\nLL\t%.3g" % (-1 * res_min.fun))
for v, l in zip(res_min.x, self.free_param_names):
print("%s\t%.3g" % (l.translate(None, '\\'), v))
if not res_min.success:
warnings.warn(
"Optimizer %s reports failure, selected hyperparameters are "
"likely NOT optimal. Status: %d, Message: '%s'. Try adjusting "
"bounds, initial guesses or the number of random starts used."
% (
method,
res_min.status,
res_min.message
),
RuntimeWarning
)
bounds = scipy.asarray(self.free_param_bounds)
# Augment the bounds a little bit to catch things that are one step away:
if ((res_min.x <= 1.001 * bounds[:, 0]).any() or
(res_min.x >= 0.999 * bounds[:, 1]).any()):
warnings.warn(
"Optimizer appears to have hit/exceeded the bounds. Bounds are:\n"
"%s\n, solution is:\n%s. Try adjusting bounds, initial guesses "
"or the number of random starts used."
% (str(bounds), str(res_min.x),)
)
return (res_min, len(res)) |
Predict the mean and covariance at the inputs `Xstar`.
The order of the derivative is given by `n`. The keyword `noise` sets
whether or not noise is included in the prediction.
Parameters
----------
Xstar : array, (`M`, `D`)
`M` test input values of dimension `D`.
n : array, (`M`, `D`) or scalar, non-negative int, optional
Order of derivative to predict (0 is the base quantity). If `n` is
scalar, the value is used for all points in `Xstar`. If non-integer
values are passed, they will be silently rounded. Default is 0
(return base quantity).
noise : bool, optional
Whether or not noise should be included in the covariance. Default
is False (no noise in covariance).
return_std : bool, optional
Set to True to compute and return the standard deviation for the
predictions, False to skip this step. Default is True (return tuple
of (`mean`, `std`)).
return_cov : bool, optional
Set to True to compute and return the full covariance matrix for the
predictions. This overrides the `return_std` keyword. If you want
both the standard deviation and covariance matrix pre-computed, use
the `full_output` keyword.
full_output : bool, optional
Set to True to return the full outputs in a dictionary with keys:
================= ===========================================================================
mean mean of GP at requested points
std standard deviation of GP at requested points
cov covariance matrix for values of GP at requested points
samp random samples of GP at requested points (only if `return_samples` is True)
mean_func mean function of GP (only if `return_mean_func` is True)
cov_func covariance of mean function of GP (zero if not using MCMC)
std_func standard deviation of mean function of GP (zero if not using MCMC)
mean_without_func mean of GP minus mean function of GP
cov_without_func covariance matrix of just the GP portion of the fit
std_without_func standard deviation of just the GP portion of the fit
================= ===========================================================================
return_samples : bool, optional
Set to True to compute and return samples of the GP in addition to
computing the mean. Only done if `full_output` is True. Default is
False.
num_samples : int, optional
Number of samples to compute. If using MCMC this is the number of
samples per MCMC sample, if using present values of hyperparameters
this is the number of samples actually returned. Default is 1.
samp_kwargs : dict, optional
Additional keywords to pass to :py:meth:`draw_sample` if
`return_samples` is True. Default is {}.
return_mean_func : bool, optional
Set to True to return the evaluation of the mean function in
addition to computing the mean of the process itself. Only done if
`full_output` is True and `self.mu` is not None. Default is False.
use_MCMC : bool, optional
Set to True to use :py:meth:`predict_MCMC` to evaluate the
prediction marginalized over the hyperparameters.
full_MC : bool, optional
Set to True to compute the mean and covariance matrix using Monte
Carlo sampling of the posterior. The samples will also be returned
if full_output is True. The sample mean and covariance will be
evaluated after filtering through `rejection_func`, so conditional
means and covariances can be computed. Default is False (do not use
full sampling).
rejection_func : callable, optional
Any samples where this function evaluates False will be rejected,
where it evaluates True they will be kept. Default is None (no
rejection). Only has an effect if `full_MC` is True.
ddof : int, optional
The degree of freedom correction to use when computing the covariance
matrix when `full_MC` is True. Default is 1 (unbiased estimator).
output_transform: array, (`L`, `M`), optional
Matrix to use to transform the output vector of length `M` to one of
length `L`. This can, for instance, be used to compute integrals.
**kwargs : optional kwargs
All additional kwargs are passed to :py:meth:`predict_MCMC` if
`use_MCMC` is True.
Returns
-------
mean : array, (`M`,)
Predicted GP mean. Only returned if `full_output` is False.
std : array, (`M`,)
Predicted standard deviation, only returned if `return_std` is True, `return_cov` is False and `full_output` is False.
cov : array, (`M`, `M`)
Predicted covariance matrix, only returned if `return_cov` is True and `full_output` is False.
full_output : dict
Dictionary with fields for mean, std, cov and possibly random samples and the mean function. Only returned if `full_output` is True.
Raises
------
ValueError
If `n` is not consistent with the shape of `Xstar` or is not entirely
composed of non-negative integers.
def predict(self, Xstar, n=0, noise=False, return_std=True, return_cov=False,
full_output=False, return_samples=False, num_samples=1,
samp_kwargs={}, return_mean_func=False, use_MCMC=False,
full_MC=False, rejection_func=None, ddof=1, output_transform=None,
**kwargs):
"""Predict the mean and covariance at the inputs `Xstar`.
The order of the derivative is given by `n`. The keyword `noise` sets
whether or not noise is included in the prediction.
Parameters
----------
Xstar : array, (`M`, `D`)
`M` test input values of dimension `D`.
n : array, (`M`, `D`) or scalar, non-negative int, optional
Order of derivative to predict (0 is the base quantity). If `n` is
scalar, the value is used for all points in `Xstar`. If non-integer
values are passed, they will be silently rounded. Default is 0
(return base quantity).
noise : bool, optional
Whether or not noise should be included in the covariance. Default
is False (no noise in covariance).
return_std : bool, optional
Set to True to compute and return the standard deviation for the
predictions, False to skip this step. Default is True (return tuple
of (`mean`, `std`)).
return_cov : bool, optional
Set to True to compute and return the full covariance matrix for the
predictions. This overrides the `return_std` keyword. If you want
both the standard deviation and covariance matrix pre-computed, use
the `full_output` keyword.
full_output : bool, optional
Set to True to return the full outputs in a dictionary with keys:
================= ===========================================================================
mean mean of GP at requested points
std standard deviation of GP at requested points
cov covariance matrix for values of GP at requested points
samp random samples of GP at requested points (only if `return_samples` is True)
mean_func mean function of GP (only if `return_mean_func` is True)
cov_func covariance of mean function of GP (zero if not using MCMC)
std_func standard deviation of mean function of GP (zero if not using MCMC)
mean_without_func mean of GP minus mean function of GP
cov_without_func covariance matrix of just the GP portion of the fit
std_without_func standard deviation of just the GP portion of the fit
================= ===========================================================================
return_samples : bool, optional
Set to True to compute and return samples of the GP in addition to
computing the mean. Only done if `full_output` is True. Default is
False.
num_samples : int, optional
Number of samples to compute. If using MCMC this is the number of
samples per MCMC sample, if using present values of hyperparameters
this is the number of samples actually returned. Default is 1.
samp_kwargs : dict, optional
Additional keywords to pass to :py:meth:`draw_sample` if
`return_samples` is True. Default is {}.
return_mean_func : bool, optional
Set to True to return the evaluation of the mean function in
addition to computing the mean of the process itself. Only done if
`full_output` is True and `self.mu` is not None. Default is False.
use_MCMC : bool, optional
Set to True to use :py:meth:`predict_MCMC` to evaluate the
prediction marginalized over the hyperparameters.
full_MC : bool, optional
Set to True to compute the mean and covariance matrix using Monte
Carlo sampling of the posterior. The samples will also be returned
if full_output is True. The sample mean and covariance will be
evaluated after filtering through `rejection_func`, so conditional
means and covariances can be computed. Default is False (do not use
full sampling).
rejection_func : callable, optional
Any samples where this function evaluates False will be rejected,
where it evaluates True they will be kept. Default is None (no
rejection). Only has an effect if `full_MC` is True.
ddof : int, optional
The degree of freedom correction to use when computing the covariance
matrix when `full_MC` is True. Default is 1 (unbiased estimator).
output_transform: array, (`L`, `M`), optional
Matrix to use to transform the output vector of length `M` to one of
length `L`. This can, for instance, be used to compute integrals.
**kwargs : optional kwargs
All additional kwargs are passed to :py:meth:`predict_MCMC` if
`use_MCMC` is True.
Returns
-------
mean : array, (`M`,)
Predicted GP mean. Only returned if `full_output` is False.
std : array, (`M`,)
Predicted standard deviation, only returned if `return_std` is True, `return_cov` is False and `full_output` is False.
cov : array, (`M`, `M`)
Predicted covariance matrix, only returned if `return_cov` is True and `full_output` is False.
full_output : dict
Dictionary with fields for mean, std, cov and possibly random samples and the mean function. Only returned if `full_output` is True.
Raises
------
ValueError
If `n` is not consistent with the shape of `Xstar` or is not entirely
composed of non-negative integers.
"""
if use_MCMC:
res = self.predict_MCMC(
Xstar,
n=n,
noise=noise,
return_std=return_std or full_output,
return_cov=return_cov or full_output,
return_samples=full_output and (return_samples or rejection_func),
return_mean_func=full_output and return_mean_func,
num_samples=num_samples,
samp_kwargs=samp_kwargs,
full_MC=full_MC,
rejection_func=rejection_func,
ddof=ddof,
output_transform=output_transform,
**kwargs
)
if full_output:
return res
elif return_cov:
return (res['mean'], res['cov'])
elif return_std:
return (res['mean'], res['std'])
else:
return res['mean']
else:
# Process Xstar:
Xstar = scipy.atleast_2d(scipy.asarray(Xstar, dtype=float))
# Handle 1d x case where array is passed in:
if self.num_dim == 1 and Xstar.shape[0] == 1:
Xstar = Xstar.T
if Xstar.shape[1] != self.num_dim:
raise ValueError(
"Second dimension of Xstar must be equal to self.num_dim! "
"Shape of Xstar given is %s, num_dim is %d."
% (Xstar.shape, self.num_dim)
)
# Process T:
if output_transform is not None:
output_transform = scipy.atleast_2d(scipy.asarray(output_transform, dtype=float))
if output_transform.ndim != 2:
raise ValueError(
"output_transform must have exactly 2 dimensions! Shape "
"of output_transform given is %s."
% (output_transform.shape,)
)
if output_transform.shape[1] != Xstar.shape[0]:
raise ValueError(
"output_transform must have the same number of columns "
"the number of rows in Xstar! Shape of output_transform "
"given is %s, shape of Xstar is %s."
% (output_transform.shape, Xstar.shape,)
)
# Process n:
try:
iter(n)
except TypeError:
n = n * scipy.ones(Xstar.shape, dtype=int)
else:
n = scipy.atleast_2d(scipy.asarray(n, dtype=int))
if self.num_dim == 1 and n.shape[0] == 1:
n = n.T
if n.shape != Xstar.shape:
raise ValueError(
"When using array-like n, shape must match shape of Xstar! "
"Shape of n given is %s, shape of Xstar given is %s."
% (n.shape, Xstar.shape)
)
if (n < 0).any():
raise ValueError("All elements of n must be non-negative integers!")
self.compute_K_L_alpha_ll()
Kstar = self.compute_Kij(self.X, Xstar, self.n, n)
if noise:
Kstar = Kstar + self.compute_Kij(self.X, Xstar, self.n, n, noise=True)
if self.T is not None:
Kstar = self.T.dot(Kstar)
mean = Kstar.T.dot(self.alpha)
if self.mu is not None:
mean_func = scipy.atleast_2d(self.mu(Xstar, n)).T
mean += mean_func
if output_transform is not None:
mean = output_transform.dot(mean)
if return_mean_func and self.mu is not None:
mean_func = output_transform.dot(mean_func)
mean = mean.ravel()
if return_mean_func and self.mu is not None:
mean_func = mean_func.ravel()
if return_std or return_cov or full_output or full_MC:
v = scipy.linalg.solve_triangular(self.L, Kstar, lower=True)
Kstarstar = self.compute_Kij(Xstar, None, n, None)
if noise:
Kstarstar = Kstarstar + self.compute_Kij(Xstar, None, n, None, noise=True)
covariance = Kstarstar - v.T.dot(v)
if output_transform is not None:
covariance = output_transform.dot(covariance.dot(output_transform.T))
if return_samples or full_MC:
samps = self.draw_sample(
Xstar, n=n, num_samp=num_samples, mean=mean,
cov=covariance, **samp_kwargs
)
if rejection_func:
good_samps = []
for samp in samps.T:
if rejection_func(samp):
good_samps.append(samp)
if len(good_samps) == 0:
raise ValueError("Did not get any good samples!")
samps = scipy.asarray(good_samps, dtype=float).T
if full_MC:
mean = scipy.mean(samps, axis=1)
covariance = scipy.cov(samps, rowvar=1, ddof=ddof)
std = scipy.sqrt(scipy.diagonal(covariance))
if full_output:
out = {
'mean': mean,
'std': std,
'cov': covariance
}
if return_samples or full_MC:
out['samp'] = samps
if return_mean_func and self.mu is not None:
out['mean_func'] = mean_func
out['cov_func'] = scipy.zeros(
(len(mean_func), len(mean_func)),
dtype=float
)
out['std_func'] = scipy.zeros_like(mean_func)
out['mean_without_func'] = mean - mean_func
out['cov_without_func'] = covariance
out['std_without_func'] = std
return out
else:
if return_cov:
return (mean, covariance)
elif return_std:
return (mean, std)
else:
return mean
else:
return mean |
Plots the Gaussian process using the current hyperparameters. Only for num_dim <= 2.
Parameters
----------
X : array-like (`M`,) or (`M`, `num_dim`), optional
The values to evaluate the Gaussian process at. If None, then 100
points between the minimum and maximum of the data's X are used for
a univariate Gaussian process and a 50x50 grid is used for a
bivariate Gaussian process. Default is None (use 100 points between
min and max).
n : int or list, optional
The order of derivative to compute. For num_dim=1, this must be an
int. For num_dim=2, this must be a list of ints of length 2.
Default is 0 (don't take derivative).
ax : axis instance, optional
Axis to plot the result on. If no axis is passed, one is created.
If the string 'gca' is passed, the current axis (from plt.gca())
is used. If X_dim = 2, the axis must be 3d.
envelopes: list of float, optional
+/-n*sigma envelopes to plot. Default is [1, 3].
base_alpha : float, optional
Alpha value to use for +/-1*sigma envelope. All other envelopes `env`
are drawn with `base_alpha`/`env`. Default is 0.375.
return_prediction : bool, optional
If True, the predicted values are also returned. Default is False.
return_std : bool, optional
If True, the standard deviation is computed and returned along with
the mean when `return_prediction` is True. Default is True.
full_output : bool, optional
Set to True to return the full outputs in a dictionary with keys:
==== ==========================================================================
mean mean of GP at requested points
std standard deviation of GP at requested points
cov covariance matrix for values of GP at requested points
samp random samples of GP at requested points (only if `return_sample` is True)
==== ==========================================================================
plot_kwargs : dict, optional
The entries in this dictionary are passed as kwargs to the plotting
command used to plot the mean. Use this to, for instance, change the
color, line width and line style.
**kwargs : extra arguments for predict, optional
Extra arguments that are passed to :py:meth:`predict`.
Returns
-------
ax : axis instance
The axis instance used.
mean : :py:class:`Array`, (`M`,)
Predicted GP mean. Only returned if `return_prediction` is True and `full_output` is False.
std : :py:class:`Array`, (`M`,)
Predicted standard deviation, only returned if `return_prediction` and `return_std` are True and `full_output` is False.
full_output : dict
Dictionary with fields for mean, std, cov and possibly random samples. Only returned if `return_prediction` and `full_output` are True.
def plot(self, X=None, n=0, ax=None, envelopes=[1, 3], base_alpha=0.375,
return_prediction=False, return_std=True, full_output=False,
plot_kwargs={}, **kwargs):
"""Plots the Gaussian process using the current hyperparameters. Only for num_dim <= 2.
Parameters
----------
X : array-like (`M`,) or (`M`, `num_dim`), optional
The values to evaluate the Gaussian process at. If None, then 100
points between the minimum and maximum of the data's X are used for
a univariate Gaussian process and a 50x50 grid is used for a
bivariate Gaussian process. Default is None (use 100 points between
min and max).
n : int or list, optional
The order of derivative to compute. For num_dim=1, this must be an
int. For num_dim=2, this must be a list of ints of length 2.
Default is 0 (don't take derivative).
ax : axis instance, optional
Axis to plot the result on. If no axis is passed, one is created.
If the string 'gca' is passed, the current axis (from plt.gca())
is used. If X_dim = 2, the axis must be 3d.
envelopes: list of float, optional
+/-n*sigma envelopes to plot. Default is [1, 3].
base_alpha : float, optional
Alpha value to use for +/-1*sigma envelope. All other envelopes `env`
are drawn with `base_alpha`/`env`. Default is 0.375.
return_prediction : bool, optional
If True, the predicted values are also returned. Default is False.
return_std : bool, optional
If True, the standard deviation is computed and returned along with
the mean when `return_prediction` is True. Default is True.
full_output : bool, optional
Set to True to return the full outputs in a dictionary with keys:
==== ==========================================================================
mean mean of GP at requested points
std standard deviation of GP at requested points
cov covariance matrix for values of GP at requested points
samp random samples of GP at requested points (only if `return_sample` is True)
==== ==========================================================================
plot_kwargs : dict, optional
The entries in this dictionary are passed as kwargs to the plotting
command used to plot the mean. Use this to, for instance, change the
color, line width and line style.
**kwargs : extra arguments for predict, optional
Extra arguments that are passed to :py:meth:`predict`.
Returns
-------
ax : axis instance
The axis instance used.
mean : :py:class:`Array`, (`M`,)
Predicted GP mean. Only returned if `return_prediction` is True and `full_output` is False.
std : :py:class:`Array`, (`M`,)
Predicted standard deviation, only returned if `return_prediction` and `return_std` are True and `full_output` is False.
full_output : dict
Dictionary with fields for mean, std, cov and possibly random samples. Only returned if `return_prediction` and `full_output` are True.
"""
if self.num_dim > 2:
raise ValueError("Plotting is not supported for num_dim > 2!")
if self.num_dim == 1:
if X is None:
X = scipy.linspace(self.X.min(), self.X.max(), 100)
elif self.num_dim == 2:
if X is None:
x1 = scipy.linspace(self.X[:, 0].min(), self.X[:, 0].max(), 50)
x2 = scipy.linspace(self.X[:, 1].min(), self.X[:, 1].max(), 50)
X1, X2 = scipy.meshgrid(x1, x2)
X1 = X1.flatten()
X2 = X2.flatten()
X = scipy.hstack((scipy.atleast_2d(X1).T, scipy.atleast_2d(X2).T))
else:
X1 = scipy.asarray(X[:, 0]).flatten()
X2 = scipy.asarray(X[:, 1]).flatten()
if envelopes or (return_prediction and (return_std or full_output)):
out = self.predict(X, n=n, full_output=True, **kwargs)
mean = out['mean']
std = out['std']
else:
mean = self.predict(X, n=n, return_std=False, **kwargs)
std = None
if self.num_dim == 1:
univariate_envelope_plot(
X,
mean,
std,
ax=ax,
base_alpha=base_alpha,
envelopes=envelopes,
**plot_kwargs
)
elif self.num_dim == 2:
if ax is None:
f = plt.figure()
ax = f.add_subplot(111, projection='3d')
elif ax == 'gca':
ax = plt.gca()
if 'linewidths' not in kwargs:
kwargs['linewidths'] = 0
s = ax.plot_trisurf(X1, X2, mean, **plot_kwargs)
for i in envelopes:
kwargs.pop('alpha', base_alpha)
ax.plot_trisurf(X1, X2, mean - std, alpha=base_alpha / i, **kwargs)
ax.plot_trisurf(X1, X2, mean + std, alpha=base_alpha / i, **kwargs)
if return_prediction:
if full_output:
return (ax, out)
elif return_std:
return (ax, out['mean'], out['std'])
else:
return (ax, out['mean'])
else:
return ax |
Draw a sample evaluated at the given points `Xstar`.
Note that this function draws samples from the GP given the current
values for the hyperparameters (which may be in a nonsense state if you
just created the instance or called a method that performs MCMC sampling).
If you want to draw random samples from MCMC output, use the
`return_samples` and `full_output` keywords to :py:meth:`predict`.
Parameters
----------
Xstar : array, (`M`, `D`)
`M` test input values of dimension `D`.
n : array, (`M`, `D`) or scalar, non-negative int, optional
Derivative order to evaluate at. Default is 0 (evaluate value).
noise : bool, optional
Whether or not to include the noise components of the kernel in the
sample. Default is False (no noise in samples).
num_samp : Positive int, optional
Number of samples to draw. Default is 1. Cannot be used in
conjunction with `rand_vars`: If you pass both `num_samp` and
`rand_vars`, `num_samp` will be silently ignored.
rand_vars : array, (`M`, `P`), optional
Vector of random variables :math:`u` to use in constructing the
sample :math:`y_* = f_* + Lu`, where :math:`K=LL^T`. If None,
values will be produced using
:py:func:`numpy.random.multivariate_normal`. This allows you to use
pseudo/quasi random numbers generated by an external routine. Note
that, when `method` is 'eig', the eigenvalues are in *ascending*
order.
Default is None (use :py:func:`multivariate_normal` directly).
rand_type : {'standard normal', 'uniform'}, optional
Type of distribution the inputs are given with.
* 'standard normal': Standard (`mu` = 0, `sigma` = 1) normal
distribution (this is the default)
* 'uniform': Uniform distribution on [0, 1). In this case
the required Gaussian variables are produced with inversion.
diag_factor : float, optional
Number (times machine epsilon) added to the diagonal of the
covariance matrix prior to computing its Cholesky decomposition.
This is necessary as sometimes the decomposition will fail because,
to machine precision, the matrix appears to not be positive definite.
If you are getting errors from :py:func:`scipy.linalg.cholesky`, try
increasing this an order of magnitude at a time. This parameter only
has an effect when using rand_vars. Default value is 1e3.
method : {'cholesky', 'eig'}, optional
Method to use for constructing the matrix square root. Default is
'cholesky' (use lower-triangular Cholesky decomposition).
* 'cholesky': Perform Cholesky decomposition on the covariance
matrix: :math:`K=LL^T`, use :math:`L` as the matrix square
root.
* 'eig': Perform an eigenvalue decomposition on the covariance
matrix: :math:`K=Q \\Lambda Q^{-1}`, use :math:`Q\\Lambda^{1/2}`
as the matrix square root.
num_eig : int or None, optional
Number of eigenvalues to compute. Can range from 1 to `M` (the
number of test points). If it is None, then all eigenvalues are
computed. Default is None (compute all eigenvalues). This keyword
only has an effect if `method` is 'eig'.
mean : array, (`M`,), optional
If you have pre-computed the mean and covariance matrix, then you
can simply pass them in with the `mean` and `cov` keywords to save
on having to call :py:meth:`predict`.
cov : array, (`M`, `M`), optional
If you have pre-computed the mean and covariance matrix, then you
can simply pass them in with the `mean` and `cov` keywords to save
on having to call :py:meth:`predict`.
modify_sign : {None, 'left value', 'right value', 'left slope', 'right slope', 'left concavity', 'right concavity'}, optional
If None (the default), the eigenvectors as returned by
:py:func:`scipy.linalg.eigh` are used without modification. To
modify the sign of the eigenvectors (necessary for some advanced use
cases), set this kwarg to one of the following:
* 'left value': forces the first value of each eigenvector to be
positive.
* 'right value': forces the last value of each eigenvector to be
positive.
* 'left slope': forces the slope to be positive at the start of
each eigenvector.
* 'right slope': forces the slope to be positive at the end of
each eigenvector.
* 'left concavity': forces the second derivative to be positive
at the start of each eigenvector.
* 'right concavity': forces the second derivative to be positive
at the end of each eigenvector.
**kwargs : optional kwargs
All extra keyword arguments are passed to :py:meth:`predict` when
evaluating the mean and covariance matrix of the GP.
Returns
-------
samples : :py:class:`Array` (`M`, `P`) or (`M`, `num_samp`)
Samples evaluated at the `M` points.
Raises
------
ValueError
If rand_type or method is invalid.
def draw_sample(self, Xstar, n=0, num_samp=1, rand_vars=None,
rand_type='standard normal', diag_factor=1e3,
method='cholesky', num_eig=None, mean=None, cov=None,
modify_sign=None, **kwargs):
"""Draw a sample evaluated at the given points `Xstar`.
Note that this function draws samples from the GP given the current
values for the hyperparameters (which may be in a nonsense state if you
just created the instance or called a method that performs MCMC sampling).
If you want to draw random samples from MCMC output, use the
`return_samples` and `full_output` keywords to :py:meth:`predict`.
Parameters
----------
Xstar : array, (`M`, `D`)
`M` test input values of dimension `D`.
n : array, (`M`, `D`) or scalar, non-negative int, optional
Derivative order to evaluate at. Default is 0 (evaluate value).
noise : bool, optional
Whether or not to include the noise components of the kernel in the
sample. Default is False (no noise in samples).
num_samp : Positive int, optional
Number of samples to draw. Default is 1. Cannot be used in
conjunction with `rand_vars`: If you pass both `num_samp` and
`rand_vars`, `num_samp` will be silently ignored.
rand_vars : array, (`M`, `P`), optional
Vector of random variables :math:`u` to use in constructing the
sample :math:`y_* = f_* + Lu`, where :math:`K=LL^T`. If None,
values will be produced using
:py:func:`numpy.random.multivariate_normal`. This allows you to use
pseudo/quasi random numbers generated by an external routine. Note
that, when `method` is 'eig', the eigenvalues are in *ascending*
order.
Default is None (use :py:func:`multivariate_normal` directly).
rand_type : {'standard normal', 'uniform'}, optional
Type of distribution the inputs are given with.
* 'standard normal': Standard (`mu` = 0, `sigma` = 1) normal
distribution (this is the default)
* 'uniform': Uniform distribution on [0, 1). In this case
the required Gaussian variables are produced with inversion.
diag_factor : float, optional
Number (times machine epsilon) added to the diagonal of the
covariance matrix prior to computing its Cholesky decomposition.
This is necessary as sometimes the decomposition will fail because,
to machine precision, the matrix appears to not be positive definite.
If you are getting errors from :py:func:`scipy.linalg.cholesky`, try
increasing this an order of magnitude at a time. This parameter only
has an effect when using rand_vars. Default value is 1e3.
method : {'cholesky', 'eig'}, optional
Method to use for constructing the matrix square root. Default is
'cholesky' (use lower-triangular Cholesky decomposition).
* 'cholesky': Perform Cholesky decomposition on the covariance
matrix: :math:`K=LL^T`, use :math:`L` as the matrix square
root.
* 'eig': Perform an eigenvalue decomposition on the covariance
matrix: :math:`K=Q \\Lambda Q^{-1}`, use :math:`Q\\Lambda^{1/2}`
as the matrix square root.
num_eig : int or None, optional
Number of eigenvalues to compute. Can range from 1 to `M` (the
number of test points). If it is None, then all eigenvalues are
computed. Default is None (compute all eigenvalues). This keyword
only has an effect if `method` is 'eig'.
mean : array, (`M`,), optional
If you have pre-computed the mean and covariance matrix, then you
can simply pass them in with the `mean` and `cov` keywords to save
on having to call :py:meth:`predict`.
cov : array, (`M`, `M`), optional
If you have pre-computed the mean and covariance matrix, then you
can simply pass them in with the `mean` and `cov` keywords to save
on having to call :py:meth:`predict`.
modify_sign : {None, 'left value', 'right value', 'left slope', 'right slope', 'left concavity', 'right concavity'}, optional
If None (the default), the eigenvectors as returned by
:py:func:`scipy.linalg.eigh` are used without modification. To
modify the sign of the eigenvectors (necessary for some advanced use
cases), set this kwarg to one of the following:
* 'left value': forces the first value of each eigenvector to be
positive.
* 'right value': forces the last value of each eigenvector to be
positive.
* 'left slope': forces the slope to be positive at the start of
each eigenvector.
* 'right slope': forces the slope to be positive at the end of
each eigenvector.
* 'left concavity': forces the second derivative to be positive
at the start of each eigenvector.
* 'right concavity': forces the second derivative to be positive
at the end of each eigenvector.
**kwargs : optional kwargs
All extra keyword arguments are passed to :py:meth:`predict` when
evaluating the mean and covariance matrix of the GP.
Returns
-------
samples : :py:class:`Array` (`M`, `P`) or (`M`, `num_samp`)
Samples evaluated at the `M` points.
Raises
------
ValueError
If rand_type or method is invalid.
"""
# All of the input processing for Xstar and n will be done in here:
if mean is None or cov is None:
out = self.predict(Xstar, n=n, full_output=True, **kwargs)
mean = out['mean']
cov = out['cov']
if rand_vars is None and method != 'eig':
try:
return numpy.random.multivariate_normal(mean, cov, num_samp).T
except numpy.linalg.LinAlgError as e:
if self.verbose:
warnings.warn(
"Failure when drawing from MVN! Falling back on eig. "
"Exception was:\n%s"
% (e,),
RuntimeWarning
)
method = 'eig'
if num_eig is None or num_eig > len(mean):
num_eig = len(mean)
elif num_eig < 1:
num_eig = 1
if rand_vars is None:
rand_vars = numpy.random.standard_normal((num_eig, num_samp))
valid_types = ('standard normal', 'uniform')
if rand_type not in valid_types:
raise ValueError(
"rand_type %s not recognized! Valid options are: %s."
% (rand_type, valid_types,)
)
if rand_type == 'uniform':
rand_vars = scipy.stats.norm.ppf(rand_vars)
if method == 'cholesky':
L = scipy.linalg.cholesky(
cov + diag_factor * sys.float_info.epsilon * scipy.eye(cov.shape[0]),
lower=True,
check_finite=False
)
elif method == 'eig':
# TODO: Add support for specifying cutoff eigenvalue!
# Not technically lower triangular, but we'll keep the name L:
eig, Q = scipy.linalg.eigh(
cov + diag_factor * sys.float_info.epsilon * scipy.eye(cov.shape[0]),
eigvals=(len(mean) - 1 - (num_eig - 1), len(mean) - 1)
)
if modify_sign is not None:
if modify_sign == 'left value':
modify_mask = (Q[0, :] < 0.0)
elif modify_sign == 'right value':
modify_mask = (Q[-1, :] < 0.0)
elif modify_sign == 'left slope':
modify_mask = ((Q[1, :] - Q[0, :]) < 0.0)
elif modify_sign == 'right slope':
modify_mask = ((Q[-1, :] - Q[-2, :]) < 0.0)
elif modify_sign == 'left concavity':
modify_mask = ((Q[2, :] - 2 * Q[1, :] + Q[0, :]) < 0.0)
elif modify_sign == 'right concavity':
modify_mask = ((Q[-1, :] - 2 * Q[-2, :] + Q[-3, :]) < 0.0)
else:
raise ValueError(
"modify_sign %s not recognized!" % (modify_sign,)
)
Q[:, modify_mask] *= -1.0
Lam_1_2 = scipy.diag(scipy.sqrt(eig))
L = Q.dot(Lam_1_2)
else:
raise ValueError("method %s not recognized!" % (method,))
return scipy.atleast_2d(mean).T + L.dot(rand_vars[:num_eig, :]) |
r"""Update the kernel's hyperparameters to the new parameters.
This will call :py:meth:`compute_K_L_alpha_ll` to update the state
accordingly.
Note that if this method crashes and the `hyper_deriv_handling` keyword
was used, it may leave :py:attr:`use_hyper_deriv` in the wrong state.
Parameters
----------
new_params : :py:class:`Array` or other Array-like, length dictated by kernel
New parameters to use.
hyper_deriv_handling : {'default', 'value', 'deriv'}, optional
Determines what to compute and return. If 'default' and
:py:attr:`use_hyper_deriv` is True then the negative log-posterior
and the negative gradient of the log-posterior with respect to the
hyperparameters is returned. If 'default' and
:py:attr:`use_hyper_deriv` is False or 'value' then only the negative
log-posterior is returned. If 'deriv' then only the negative gradient
of the log-posterior with respect to the hyperparameters is returned.
exit_on_bounds : bool, optional
If True, the method will automatically exit if the hyperparameters
are impossible given the hyperprior, without trying to update the
internal state. This is useful during MCMC sampling and optimization.
Default is True (don't perform update for impossible hyperparameters).
inf_on_error : bool, optional
If True, the method will return `scipy.inf` if the hyperparameters
produce a linear algebra error upon trying to update the Gaussian
process. Default is True (catch errors and return infinity).
Returns
-------
-1*ll : float
The updated log posterior.
-1*ll_deriv : array of float, (`num_params`,)
The gradient of the log posterior. Only returned if
:py:attr:`use_hyper_deriv` is True or `hyper_deriv_handling` is set
to 'deriv'.
def update_hyperparameters(self, new_params, hyper_deriv_handling='default', exit_on_bounds=True, inf_on_error=True):
r"""Update the kernel's hyperparameters to the new parameters.
This will call :py:meth:`compute_K_L_alpha_ll` to update the state
accordingly.
Note that if this method crashes and the `hyper_deriv_handling` keyword
was used, it may leave :py:attr:`use_hyper_deriv` in the wrong state.
Parameters
----------
new_params : :py:class:`Array` or other Array-like, length dictated by kernel
New parameters to use.
hyper_deriv_handling : {'default', 'value', 'deriv'}, optional
Determines what to compute and return. If 'default' and
:py:attr:`use_hyper_deriv` is True then the negative log-posterior
and the negative gradient of the log-posterior with respect to the
hyperparameters is returned. If 'default' and
:py:attr:`use_hyper_deriv` is False or 'value' then only the negative
log-posterior is returned. If 'deriv' then only the negative gradient
of the log-posterior with respect to the hyperparameters is returned.
exit_on_bounds : bool, optional
If True, the method will automatically exit if the hyperparameters
are impossible given the hyperprior, without trying to update the
internal state. This is useful during MCMC sampling and optimization.
Default is True (don't perform update for impossible hyperparameters).
inf_on_error : bool, optional
If True, the method will return `scipy.inf` if the hyperparameters
produce a linear algebra error upon trying to update the Gaussian
process. Default is True (catch errors and return infinity).
Returns
-------
-1*ll : float
The updated log posterior.
-1*ll_deriv : array of float, (`num_params`,)
The gradient of the log posterior. Only returned if
:py:attr:`use_hyper_deriv` is True or `hyper_deriv_handling` is set
to 'deriv'.
"""
use_hyper_deriv = self.use_hyper_deriv
if hyper_deriv_handling == 'value':
self.use_hyper_deriv = False
elif hyper_deriv_handling == 'deriv':
self.use_hyper_deriv = True
self.k.set_hyperparams(new_params[:len(self.k.free_params)])
self.noise_k.set_hyperparams(
new_params[len(self.k.free_params):len(self.k.free_params) + len(self.noise_k.free_params)]
)
if self.mu is not None:
self.mu.set_hyperparams(
new_params[len(self.k.free_params) + len(self.noise_k.free_params):]
)
self.K_up_to_date = False
try:
if exit_on_bounds:
if scipy.isinf(self.hyperprior(self.params)):
raise GPImpossibleParamsError("Impossible values for params!")
self.compute_K_L_alpha_ll()
except Exception as e:
if inf_on_error:
if not isinstance(e, GPImpossibleParamsError) and self.verbose:
warnings.warn(
"Unhandled exception when updating GP! Exception was:\n%s\n"
"State of params is: %s"
% (traceback.format_exc(), str(self.free_params[:]))
)
self.use_hyper_deriv = use_hyper_deriv
if use_hyper_deriv and hyper_deriv_handling == 'default':
return (scipy.inf, scipy.zeros(len(self.free_params)))
elif hyper_deriv_handling == 'deriv':
return scipy.zeros(len(self.free_params))
else:
return scipy.inf
else:
self.use_hyper_deriv = use_hyper_deriv
raise e
self.use_hyper_deriv = use_hyper_deriv
if use_hyper_deriv and hyper_deriv_handling == 'default':
return (-1.0 * self.ll, -1.0 * self.ll_deriv)
elif hyper_deriv_handling == 'deriv':
return -1.0 * self.ll_deriv
else:
return -1.0 * self.ll |
r"""Compute `K`, `L`, `alpha` and log-likelihood according to the first part of Algorithm 2.1 in R&W.
Computes `K` and the noise portion of `K` using :py:meth:`compute_Kij`,
computes `L` using :py:func:`scipy.linalg.cholesky`, then computes
`alpha` as `L.T\\(L\\y)`.
Only does the computation if :py:attr:`K_up_to_date` is False --
otherwise leaves the existing values.
def compute_K_L_alpha_ll(self):
r"""Compute `K`, `L`, `alpha` and log-likelihood according to the first part of Algorithm 2.1 in R&W.
Computes `K` and the noise portion of `K` using :py:meth:`compute_Kij`,
computes `L` using :py:func:`scipy.linalg.cholesky`, then computes
`alpha` as `L.T\\(L\\y)`.
Only does the computation if :py:attr:`K_up_to_date` is False --
otherwise leaves the existing values.
"""
if not self.K_up_to_date:
y = self.y
err_y = self.err_y
self.K = self.compute_Kij(self.X, None, self.n, None, noise=False)
# If the noise kernel is meant to be strictly diagonal, it should
# yield a diagonal noise_K:
if isinstance(self.noise_k, ZeroKernel):
self.noise_K = scipy.zeros((self.X.shape[0], self.X.shape[0]))
elif isinstance(self.noise_k, DiagonalNoiseKernel):
self.noise_K = self.noise_k.params[0]**2.0 * scipy.eye(self.X.shape[0])
else:
self.noise_K = self.compute_Kij(self.X, None, self.n, None, noise=True)
K = self.K
noise_K = self.noise_K
if self.T is not None:
KnK = self.T.dot(K + noise_K).dot(self.T.T)
else:
KnK = K + noise_K
K_tot = (
KnK +
scipy.diag(err_y**2.0) +
self.diag_factor * sys.float_info.epsilon * scipy.eye(len(y))
)
self.L = scipy.linalg.cholesky(K_tot, lower=True)
# Need to make the mean-subtracted y that appears in the expression
# for alpha:
if self.mu is not None:
mu_alph = self.mu(self.X, self.n)
if self.T is not None:
mu_alph = self.T.dot(mu_alph)
y_alph = self.y - mu_alph
else:
y_alph = self.y
self.alpha = scipy.linalg.cho_solve((self.L, True), scipy.atleast_2d(y_alph).T)
self.ll = (
-0.5 * scipy.atleast_2d(y_alph).dot(self.alpha) -
scipy.log(scipy.diag(self.L)).sum() -
0.5 * len(y) * scipy.log(2.0 * scipy.pi)
)[0, 0]
# Apply hyperpriors:
self.ll += self.hyperprior(self.params)
if self.use_hyper_deriv:
warnings.warn("Use of hyperparameter derivatives is experimental!")
# Only compute for the free parameters, since that is what we
# want to optimize:
self.ll_deriv = scipy.zeros(len(self.free_params))
# Combine the kernel and noise kernel so we only need one loop:
if isinstance(self.noise_k, ZeroKernel):
knk = self.k
elif isinstance(self.noise_k, DiagonalNoiseKernel):
knk = self.k
# Handle DiagonalNoiseKernel specially:
if not self.noise_k.fixed_params[0]:
dK_dtheta_i = 2.0 * self.noise_k.params[0] * scipy.eye(len(y))
self.ll_deriv[len(self.k.free_params)] = 0.5 * (
self.alpha.T.dot(dK_dtheta_i.dot(self.alpha)) -
scipy.trace(scipy.linalg.cho_solve((self.L, True), dK_dtheta_i))
)
else:
knk = self.k + self.noise_k
# Get the indices of the free params in knk.params:
free_param_idxs = scipy.arange(0, len(knk.params), dtype=int)[~knk.fixed_params]
# Handle the kernel and noise kernel:
for i, pi in enumerate(free_param_idxs):
dK_dtheta_i = self.compute_Kij(
self.X, None, self.n, None, k=knk, hyper_deriv=pi
)
if self.T is not None:
dK_dtheta_i = self.T.dot(dK_dtheta_i).dot(self.T.T)
self.ll_deriv[i] = 0.5 * (
self.alpha.T.dot(dK_dtheta_i.dot(self.alpha)) -
scipy.trace(scipy.linalg.cho_solve((self.L, True), dK_dtheta_i))
)
# Handle the mean function:
if self.mu is not None:
# Get the indices of the free params in self.mu.params:
free_param_idxs = scipy.arange(0, len(self.mu.params), dtype=int)[~self.mu.fixed_params]
for i, pi in enumerate(free_param_idxs):
dmu_dtheta_i = scipy.atleast_2d(self.mu(self.X, self.n, hyper_deriv=pi)).T
if self.T is not None:
dmu_dtheta_i = self.T.dot(dmu_dtheta_i)
self.ll_deriv[i + len(knk.free_params)] = dmu_dtheta_i.T.dot(self.alpha)
# Handle the hyperprior:
# Get the indices of the free params in self.params:
free_param_idxs = scipy.arange(0, len(self.params), dtype=int)[~self.fixed_params]
for i, pi in enumerate(free_param_idxs):
self.ll_deriv[i] += self.hyperprior(self.params, hyper_deriv=pi)
self.K_up_to_date = True |
r"""Compute covariance matrix between datasets `Xi` and `Xj`.
Specify the orders of derivatives at each location with the `ni`, `nj`
arrays. The `include_noise` flag is passed to the covariance kernel to
indicate whether noise is to be included (i.e., for evaluation of
:math:`K+\sigma I` versus :math:`K_*`).
If `Xj` is None, the symmetric matrix :math:`K(X, X)` is formed.
Note that type and dimension checking is NOT performed, as it is assumed
the data are from inside the instance and have hence been sanitized by
:py:meth:`add_data`.
Parameters
----------
Xi : array, (`M`, `D`)
`M` input values of dimension `D`.
Xj : array, (`P`, `D`)
`P` input values of dimension `D`.
ni : array, (`M`, `D`), non-negative integers
`M` derivative orders with respect to the `Xi` coordinates.
nj : array, (`P`, `D`), non-negative integers
`P` derivative orders with respect to the `Xj` coordinates.
noise : bool, optional
If True, uses the noise kernel, otherwise uses the regular kernel.
Default is False (use regular kernel).
hyper_deriv : None or non-negative int, optional
Index of the hyperparameter to compute the first derivative with
respect to. If None, no derivatives are taken. Default is None (no
hyperparameter derivatives).
k : :py:class:`~gptools.kernel.core.Kernel` instance, optional
The covariance kernel to used. Overrides `noise` if present.
Returns
-------
Kij : array, (`M`, `P`)
Covariance matrix between `Xi` and `Xj`.
def compute_Kij(self, Xi, Xj, ni, nj, noise=False, hyper_deriv=None, k=None):
r"""Compute covariance matrix between datasets `Xi` and `Xj`.
Specify the orders of derivatives at each location with the `ni`, `nj`
arrays. The `include_noise` flag is passed to the covariance kernel to
indicate whether noise is to be included (i.e., for evaluation of
:math:`K+\sigma I` versus :math:`K_*`).
If `Xj` is None, the symmetric matrix :math:`K(X, X)` is formed.
Note that type and dimension checking is NOT performed, as it is assumed
the data are from inside the instance and have hence been sanitized by
:py:meth:`add_data`.
Parameters
----------
Xi : array, (`M`, `D`)
`M` input values of dimension `D`.
Xj : array, (`P`, `D`)
`P` input values of dimension `D`.
ni : array, (`M`, `D`), non-negative integers
`M` derivative orders with respect to the `Xi` coordinates.
nj : array, (`P`, `D`), non-negative integers
`P` derivative orders with respect to the `Xj` coordinates.
noise : bool, optional
If True, uses the noise kernel, otherwise uses the regular kernel.
Default is False (use regular kernel).
hyper_deriv : None or non-negative int, optional
Index of the hyperparameter to compute the first derivative with
respect to. If None, no derivatives are taken. Default is None (no
hyperparameter derivatives).
k : :py:class:`~gptools.kernel.core.Kernel` instance, optional
The covariance kernel to used. Overrides `noise` if present.
Returns
-------
Kij : array, (`M`, `P`)
Covariance matrix between `Xi` and `Xj`.
"""
if k is None:
if not noise:
k = self.k
else:
k = self.noise_k
if Xj is None:
symmetric = True
Xj = Xi
nj = ni
else:
symmetric = False
# TODO: This technically doesn't take advantage of the symmetric case.
# Might be worth trying to do that at some point, but this is vastly
# superior to the double for loop implementation for which using
# symmetry is easy.
Xi_tile = scipy.repeat(Xi, Xj.shape[0], axis=0)
ni_tile = scipy.repeat(ni, Xj.shape[0], axis=0)
Xj_tile = scipy.tile(Xj, (Xi.shape[0], 1))
nj_tile = scipy.tile(nj, (Xi.shape[0], 1))
Kij = k(
Xi_tile,
Xj_tile,
ni_tile,
nj_tile,
hyper_deriv=hyper_deriv,
symmetric=symmetric
)
Kij = scipy.reshape(Kij, (Xi.shape[0], -1))
return Kij |
Compute the log likelihood over the (free) parameter space.
Parameters
----------
bounds : 2-tuple or list of 2-tuples with length equal to the number of free parameters
Bounds on the range to use for each of the parameters. If a single
2-tuple is given, it will be used for each of the parameters.
num_pts : int or list of ints with length equal to the number of free parameters
If a single int is given, it will be used for each of the parameters.
Returns
-------
ll_vals : :py:class:`Array`
The log likelihood for each of the parameter possibilities.
param_vals : List of :py:class:`Array`
The parameter values used.
def compute_ll_matrix(self, bounds, num_pts):
"""Compute the log likelihood over the (free) parameter space.
Parameters
----------
bounds : 2-tuple or list of 2-tuples with length equal to the number of free parameters
Bounds on the range to use for each of the parameters. If a single
2-tuple is given, it will be used for each of the parameters.
num_pts : int or list of ints with length equal to the number of free parameters
If a single int is given, it will be used for each of the parameters.
Returns
-------
ll_vals : :py:class:`Array`
The log likelihood for each of the parameter possibilities.
param_vals : List of :py:class:`Array`
The parameter values used.
"""
present_free_params = self.free_params[:]
bounds = scipy.atleast_2d(scipy.asarray(bounds, dtype=float))
if bounds.shape[1] != 2:
raise ValueError("Argument bounds must have shape (n, 2)!")
# If bounds is a single tuple, repeat it for each free parameter:
if bounds.shape[0] == 1:
bounds = scipy.tile(bounds, (len(present_free_params), 1))
# If num_pts is a single value, use it for all of the parameters:
try:
iter(num_pts)
except TypeError:
num_pts = num_pts * scipy.ones(bounds.shape[0], dtype=int)
else:
num_pts = scipy.asarray(num_pts, dtype=int)
if len(num_pts) != len(present_free_params):
raise ValueError(
"Length of num_pts must match the number of free parameters!"
)
# Form arrays to evaluate parameters over:
param_vals = []
for k in xrange(0, len(present_free_params)):
param_vals.append(scipy.linspace(bounds[k, 0], bounds[k, 1], num_pts[k]))
ll_vals = self._compute_ll_matrix(0, param_vals, num_pts)
# Reset the parameters to what they were before:
self.update_hyperparameters(scipy.asarray(present_free_params, dtype=float))
return (ll_vals, param_vals) |
Recursive helper function for compute_ll_matrix.
Parameters
----------
idx : int
The index of the parameter for this layer of the recursion to
work on. `idx` == len(`num_pts`) is the base case that terminates
the recursion.
param_vals : List of :py:class:`Array`
List of arrays of parameter values. Entries in the slots 0:`idx` are
set to scalars by the previous levels of recursion.
num_pts : :py:class:`Array`
The numbers of points for each parameter.
Returns
-------
vals : :py:class:`Array`
The log likelihood for each of the parameter possibilities at lower
levels.
def _compute_ll_matrix(self, idx, param_vals, num_pts):
"""Recursive helper function for compute_ll_matrix.
Parameters
----------
idx : int
The index of the parameter for this layer of the recursion to
work on. `idx` == len(`num_pts`) is the base case that terminates
the recursion.
param_vals : List of :py:class:`Array`
List of arrays of parameter values. Entries in the slots 0:`idx` are
set to scalars by the previous levels of recursion.
num_pts : :py:class:`Array`
The numbers of points for each parameter.
Returns
-------
vals : :py:class:`Array`
The log likelihood for each of the parameter possibilities at lower
levels.
"""
if idx >= len(num_pts):
# Base case: All entries in param_vals should be scalars:
return -1.0 * self.update_hyperparameters(
scipy.asarray(param_vals, dtype=float)
)
else:
# Recursive case: call _compute_ll_matrix for each entry in param_vals[idx]:
vals = scipy.zeros(num_pts[idx:], dtype=float)
for k in xrange(0, len(param_vals[idx])):
specific_param_vals = list(param_vals)
specific_param_vals[idx] = param_vals[idx][k]
vals[k] = self._compute_ll_matrix(
idx + 1,
specific_param_vals,
num_pts
)
return vals |
Produce samples from the posterior for the hyperparameters using MCMC.
Returns the sampler created, because storing it stops the GP from being
pickleable. To add more samples to a previous sampler, pass the sampler
instance in the `sampler` keyword.
Parameters
----------
nwalkers : int, optional
The number of walkers to use in the sampler. Should be on the order
of several hundred. Default is 200.
nsamp : int, optional
Number of samples (per walker) to take. Default is 500.
burn : int, optional
This keyword only has an effect on the corner plot produced when
`plot_posterior` is True and the flattened chain plot produced
when `plot_chains` is True. To perform computations with burn-in,
see :py:meth:`compute_from_MCMC`. The number of samples to discard
at the beginning of the chain. Default is 0.
thin : int, optional
This keyword only has an effect on the corner plot produced when
`plot_posterior` is True and the flattened chain plot produced
when `plot_chains` is True. To perform computations with thinning,
see :py:meth:`compute_from_MCMC`. Every `thin`-th sample is kept.
Default is 1.
num_proc : int or None, optional
Number of processors to use. If None, all available processors are
used. Default is None (use all available processors).
sampler : :py:class:`Sampler` instance
The sampler to use. If the sampler already has samples, the most
recent sample will be used as the starting point. Otherwise a
random sample from the hyperprior will be used.
plot_posterior : bool, optional
If True, a corner plot of the posterior for the hyperparameters
will be generated. Default is False.
plot_chains : bool, optional
If True, a plot showing the history and autocorrelation of the
chains will be produced.
sampler_type : str, optional
The type of sampler to use. Valid options are "ensemble" (affine-
invariant ensemble sampler) and "pt" (parallel-tempered ensemble
sampler).
ntemps : int, optional
Number of temperatures to use with the parallel-tempered ensemble
sampler.
sampler_a : float, optional
Scale of the proposal distribution.
plot_kwargs : additional keywords, optional
Extra arguments to pass to :py:func:`~gptools.utils.plot_sampler`.
def sample_hyperparameter_posterior(self, nwalkers=200, nsamp=500, burn=0,
thin=1, num_proc=None, sampler=None,
plot_posterior=False,
plot_chains=False, sampler_type='ensemble',
ntemps=20, sampler_a=2.0, **plot_kwargs):
"""Produce samples from the posterior for the hyperparameters using MCMC.
Returns the sampler created, because storing it stops the GP from being
pickleable. To add more samples to a previous sampler, pass the sampler
instance in the `sampler` keyword.
Parameters
----------
nwalkers : int, optional
The number of walkers to use in the sampler. Should be on the order
of several hundred. Default is 200.
nsamp : int, optional
Number of samples (per walker) to take. Default is 500.
burn : int, optional
This keyword only has an effect on the corner plot produced when
`plot_posterior` is True and the flattened chain plot produced
when `plot_chains` is True. To perform computations with burn-in,
see :py:meth:`compute_from_MCMC`. The number of samples to discard
at the beginning of the chain. Default is 0.
thin : int, optional
This keyword only has an effect on the corner plot produced when
`plot_posterior` is True and the flattened chain plot produced
when `plot_chains` is True. To perform computations with thinning,
see :py:meth:`compute_from_MCMC`. Every `thin`-th sample is kept.
Default is 1.
num_proc : int or None, optional
Number of processors to use. If None, all available processors are
used. Default is None (use all available processors).
sampler : :py:class:`Sampler` instance
The sampler to use. If the sampler already has samples, the most
recent sample will be used as the starting point. Otherwise a
random sample from the hyperprior will be used.
plot_posterior : bool, optional
If True, a corner plot of the posterior for the hyperparameters
will be generated. Default is False.
plot_chains : bool, optional
If True, a plot showing the history and autocorrelation of the
chains will be produced.
sampler_type : str, optional
The type of sampler to use. Valid options are "ensemble" (affine-
invariant ensemble sampler) and "pt" (parallel-tempered ensemble
sampler).
ntemps : int, optional
Number of temperatures to use with the parallel-tempered ensemble
sampler.
sampler_a : float, optional
Scale of the proposal distribution.
plot_kwargs : additional keywords, optional
Extra arguments to pass to :py:func:`~gptools.utils.plot_sampler`.
"""
if num_proc is None:
num_proc = multiprocessing.cpu_count()
# Needed for emcee to do it right:
if num_proc == 0:
num_proc = 1
ndim = len(self.free_params)
if sampler is None:
if sampler_type == 'ensemble':
sampler = emcee.EnsembleSampler(
nwalkers,
ndim,
_ComputeLnProbEval(self),
threads=num_proc,
a=sampler_a
)
elif sampler_type == 'pt':
# TODO: Finish this!
raise NotImplementedError("PTSampler not done yet!")
sampler = emcee.PTSampler(
ntemps,
nwalkers,
ndim,
logl,
logp
)
else:
raise NotImplementedError(
"Sampler type %s not supported!" % (sampler_type,)
)
else:
sampler.a = sampler_a
if sampler.chain.size == 0:
theta0 = self.hyperprior.random_draw(size=nwalkers).T
theta0 = theta0[:, ~self.fixed_params]
else:
# Start from the stopping point of the previous chain:
theta0 = sampler.chain[:, -1, :]
sampler.run_mcmc(theta0, nsamp)
if plot_posterior or plot_chains:
flat_trace = sampler.chain[:, burn::thin, :]
flat_trace = flat_trace.reshape((-1, flat_trace.shape[2]))
if plot_posterior and plot_chains:
plot_sampler(
sampler,
labels=['$%s$' % (l,) for l in self.free_param_names],
burn=burn,
**plot_kwargs
)
else:
if plot_posterior:
triangle.corner(
flat_trace,
plot_datapoints=False,
labels=['$%s$' % (l,) for l in self.free_param_names]
)
if plot_chains:
f = plt.figure()
for k in xrange(0, ndim):
# a = f.add_subplot(3, ndim, k + 1)
# a.acorr(
# sampler.flatchain[:, k],
# maxlags=100,
# detrend=plt.mlab.detrend_mean
# )
# a.set_xlabel('lag')
# a.set_title('$%s$ autocorrelation' % (self.free_param_names[k],))
a = f.add_subplot(ndim, 1, 0 * ndim + k + 1)
for chain in sampler.chain[:, :, k]:
a.plot(chain)
a.set_xlabel('sample')
a.set_ylabel('$%s$' % (self.free_param_names[k],))
a.set_title('$%s$ all chains' % (self.free_param_names[k],))
a.axvline(burn, color='r', linewidth=3, ls='--')
# a = f.add_subplot(2, ndim, 1 * ndim + k + 1)
# a.plot(flat_trace[:, k])
# a.set_xlabel('sample')
# a.set_ylabel('$%s$' % (self.free_param_names[k],))
# a.set_title('$%s$ flattened, burned and thinned chain' % (self.free_param_names[k],))
# Print a summary of the sampler:
print("MCMC parameter summary:")
print("param\tmean\t95% posterior interval")
mean, ci_l, ci_u = summarize_sampler(sampler, burn=burn)
names = self.free_param_names[:]
for n, m, l, u in zip(names, mean, ci_l, ci_u):
print("%s\t%4.4g\t[%4.4g, %4.4g]" % (n, m, l, u))
return sampler |
Compute desired quantities from MCMC samples of the hyperparameter posterior.
The return will be a list with a number of rows equal to the number of
hyperparameter samples. The columns depend on the state of the boolean
flags, but will be some subset of (mean, stddev, cov, samples), in that
order. Samples will be the raw output of :py:meth:`draw_sample`, so you
will need to remember to convert to an array and flatten if you want to
work with a single sample.
Parameters
----------
X : array-like (`M`,) or (`M`, `num_dim`)
The values to evaluate the Gaussian process at.
n : non-negative int or list, optional
The order of derivative to compute. For num_dim=1, this must be an
int. For num_dim=2, this must be a list of ints of length 2.
Default is 0 (don't take derivative).
return_mean : bool, optional
If True, the mean will be computed at each hyperparameter sample.
Default is True (compute mean).
return_std : bool, optional
If True, the standard deviation will be computed at each
hyperparameter sample. Default is True (compute stddev).
return_cov : bool, optional
If True, the covariance matrix will be computed at each
hyperparameter sample. Default is True (compute stddev).
return_samples : bool, optional
If True, random sample(s) will be computed at each hyperparameter
sample. Default is False (do not compute samples).
num_samples : int, optional
Compute this many samples if `return_sample` is True. Default is 1.
noise : bool, optional
If True, noise is included in the predictions and samples. Default
is False (do not include noise).
samp_kwargs : dict, optional
If `return_sample` is True, the contents of this dictionary will be
passed as kwargs to :py:meth:`draw_sample`.
sampler : :py:class:`Sampler` instance or None, optional
:py:class:`Sampler` instance that has already been run to the extent
desired on the hyperparameter posterior. If None, a new sampler will
be created with :py:meth:`sample_hyperparameter_posterior`. In this
case, all extra kwargs will be passed on, allowing you to set the
number of samples, etc. Default is None (create sampler).
flat_trace : array-like (`nsamp`, `ndim`) or None, optional
Flattened trace with samples of the free hyperparameters. If present,
overrides `sampler`. This allows you to use a sampler other than the
ones from :py:mod:`emcee`, or to specify arbitrary values you wish
to evaluate the curve at. Note that this WILL be thinned and burned
according to the following two kwargs. "Flat" refers to the fact
that you must have combined all chains into a single one. Default is
None (use `sampler`).
burn : int, optional
The number of samples to discard at the beginning of the chain.
Default is 0.
thin : int, optional
Every `thin`-th sample is kept. Default is 1.
num_proc : int, optional
The number of processors to use for evaluation. This is used both
when calling the sampler and when evaluating the Gaussian process.
If None, the number of available processors will be used. If zero,
evaluation will proceed in parallel. Default is to use all available
processors.
**kwargs : extra optional kwargs
All additional kwargs are passed to
:py:meth:`sample_hyperparameter_posterior`.
Returns
-------
out : dict
A dictionary having some or all of the fields 'mean', 'std', 'cov'
and 'samp'. Each entry is a list of array-like. The length of this
list is equal to the number of hyperparameter samples used, and the
entries have the following shapes:
==== ====================
mean (`M`,)
std (`M`,)
cov (`M`, `M`)
samp (`M`, `num_samples`)
==== ====================
def compute_from_MCMC(self, X, n=0, return_mean=True, return_std=True,
return_cov=False, return_samples=False,
return_mean_func=False, num_samples=1, noise=False,
samp_kwargs={}, sampler=None, flat_trace=None, burn=0,
thin=1, **kwargs):
"""Compute desired quantities from MCMC samples of the hyperparameter posterior.
The return will be a list with a number of rows equal to the number of
hyperparameter samples. The columns depend on the state of the boolean
flags, but will be some subset of (mean, stddev, cov, samples), in that
order. Samples will be the raw output of :py:meth:`draw_sample`, so you
will need to remember to convert to an array and flatten if you want to
work with a single sample.
Parameters
----------
X : array-like (`M`,) or (`M`, `num_dim`)
The values to evaluate the Gaussian process at.
n : non-negative int or list, optional
The order of derivative to compute. For num_dim=1, this must be an
int. For num_dim=2, this must be a list of ints of length 2.
Default is 0 (don't take derivative).
return_mean : bool, optional
If True, the mean will be computed at each hyperparameter sample.
Default is True (compute mean).
return_std : bool, optional
If True, the standard deviation will be computed at each
hyperparameter sample. Default is True (compute stddev).
return_cov : bool, optional
If True, the covariance matrix will be computed at each
hyperparameter sample. Default is True (compute stddev).
return_samples : bool, optional
If True, random sample(s) will be computed at each hyperparameter
sample. Default is False (do not compute samples).
num_samples : int, optional
Compute this many samples if `return_sample` is True. Default is 1.
noise : bool, optional
If True, noise is included in the predictions and samples. Default
is False (do not include noise).
samp_kwargs : dict, optional
If `return_sample` is True, the contents of this dictionary will be
passed as kwargs to :py:meth:`draw_sample`.
sampler : :py:class:`Sampler` instance or None, optional
:py:class:`Sampler` instance that has already been run to the extent
desired on the hyperparameter posterior. If None, a new sampler will
be created with :py:meth:`sample_hyperparameter_posterior`. In this
case, all extra kwargs will be passed on, allowing you to set the
number of samples, etc. Default is None (create sampler).
flat_trace : array-like (`nsamp`, `ndim`) or None, optional
Flattened trace with samples of the free hyperparameters. If present,
overrides `sampler`. This allows you to use a sampler other than the
ones from :py:mod:`emcee`, or to specify arbitrary values you wish
to evaluate the curve at. Note that this WILL be thinned and burned
according to the following two kwargs. "Flat" refers to the fact
that you must have combined all chains into a single one. Default is
None (use `sampler`).
burn : int, optional
The number of samples to discard at the beginning of the chain.
Default is 0.
thin : int, optional
Every `thin`-th sample is kept. Default is 1.
num_proc : int, optional
The number of processors to use for evaluation. This is used both
when calling the sampler and when evaluating the Gaussian process.
If None, the number of available processors will be used. If zero,
evaluation will proceed in parallel. Default is to use all available
processors.
**kwargs : extra optional kwargs
All additional kwargs are passed to
:py:meth:`sample_hyperparameter_posterior`.
Returns
-------
out : dict
A dictionary having some or all of the fields 'mean', 'std', 'cov'
and 'samp'. Each entry is a list of array-like. The length of this
list is equal to the number of hyperparameter samples used, and the
entries have the following shapes:
==== ====================
mean (`M`,)
std (`M`,)
cov (`M`, `M`)
samp (`M`, `num_samples`)
==== ====================
"""
output_transform = kwargs.pop('output_transform', None)
if flat_trace is None:
if sampler is None:
sampler = self.sample_hyperparameter_posterior(burn=burn, **kwargs)
# If we create the sampler, we need to make sure we clean up its pool:
try:
sampler.pool.close()
except AttributeError:
# This will occur if only one thread is used.
pass
flat_trace = sampler.chain[:, burn::thin, :]
flat_trace = flat_trace.reshape((-1, flat_trace.shape[2]))
else:
flat_trace = flat_trace[burn::thin, :]
num_proc = kwargs.get('num_proc', multiprocessing.cpu_count())
if num_proc > 1:
pool = InterruptiblePool(processes=num_proc)
map_fun = pool.map
else:
map_fun = map
try:
res = map_fun(
_ComputeGPWrapper(
self,
X,
n,
return_mean,
return_std,
return_cov,
return_samples,
return_mean_func,
num_samples,
noise,
samp_kwargs,
output_transform
),
flat_trace
)
finally:
if num_proc > 1:
pool.close()
out = dict()
if return_mean:
out['mean'] = [r['mean'] for r in res if r is not None]
if return_std:
out['std'] = [r['std'] for r in res if r is not None]
if return_cov:
out['cov'] = [r['cov'] for r in res if r is not None]
if return_samples:
out['samp'] = [r['samp'] for r in res if r is not None]
if return_mean_func and self.mu is not None:
out['mean_func'] = [r['mean_func'] for r in res if r is not None]
out['cov_func'] = [r['cov_func'] for r in res if r is not None]
out['std_func'] = [r['std_func'] for r in res if r is not None]
out['mean_without_func'] = [r['mean_without_func'] for r in res if r is not None]
out['cov_without_func'] = [r['cov_without_func'] for r in res if r is not None]
out['std_without_func'] = [r['std_without_func'] for r in res if r is not None]
return out |
Compute desired quantities from MCMC samples of the hyperparameter posterior.
The return will be a list with a number of rows equal to the number of
hyperparameter samples. The columns will contain the covariance length
scale function.
Parameters
----------
X : array-like (`M`,) or (`M`, `num_dim`)
The values to evaluate the Gaussian process at.
n : non-negative int or list, optional
The order of derivative to compute. For num_dim=1, this must be an
int. For num_dim=2, this must be a list of ints of length 2.
Default is 0 (don't take derivative).
sampler : :py:class:`Sampler` instance or None, optional
:py:class:`Sampler` instance that has already been run to the extent
desired on the hyperparameter posterior. If None, a new sampler will
be created with :py:meth:`sample_hyperparameter_posterior`. In this
case, all extra kwargs will be passed on, allowing you to set the
number of samples, etc. Default is None (create sampler).
flat_trace : array-like (`nsamp`, `ndim`) or None, optional
Flattened trace with samples of the free hyperparameters. If present,
overrides `sampler`. This allows you to use a sampler other than the
ones from :py:mod:`emcee`, or to specify arbitrary values you wish
to evaluate the curve at. Note that this WILL be thinned and burned
according to the following two kwargs. "Flat" refers to the fact
that you must have combined all chains into a single one. Default is
None (use `sampler`).
burn : int, optional
The number of samples to discard at the beginning of the chain.
Default is 0.
thin : int, optional
Every `thin`-th sample is kept. Default is 1.
num_proc : int, optional
The number of processors to use for evaluation. This is used both
when calling the sampler and when evaluating the Gaussian process.
If None, the number of available processors will be used. If zero,
evaluation will proceed in parallel. Default is to use all available
processors.
**kwargs : extra optional kwargs
All additional kwargs are passed to
:py:meth:`sample_hyperparameter_posterior`.
Returns
-------
out : array of float
Length scale function at the indicated points.
def compute_l_from_MCMC(self, X, n=0, sampler=None, flat_trace=None, burn=0, thin=1, **kwargs):
"""Compute desired quantities from MCMC samples of the hyperparameter posterior.
The return will be a list with a number of rows equal to the number of
hyperparameter samples. The columns will contain the covariance length
scale function.
Parameters
----------
X : array-like (`M`,) or (`M`, `num_dim`)
The values to evaluate the Gaussian process at.
n : non-negative int or list, optional
The order of derivative to compute. For num_dim=1, this must be an
int. For num_dim=2, this must be a list of ints of length 2.
Default is 0 (don't take derivative).
sampler : :py:class:`Sampler` instance or None, optional
:py:class:`Sampler` instance that has already been run to the extent
desired on the hyperparameter posterior. If None, a new sampler will
be created with :py:meth:`sample_hyperparameter_posterior`. In this
case, all extra kwargs will be passed on, allowing you to set the
number of samples, etc. Default is None (create sampler).
flat_trace : array-like (`nsamp`, `ndim`) or None, optional
Flattened trace with samples of the free hyperparameters. If present,
overrides `sampler`. This allows you to use a sampler other than the
ones from :py:mod:`emcee`, or to specify arbitrary values you wish
to evaluate the curve at. Note that this WILL be thinned and burned
according to the following two kwargs. "Flat" refers to the fact
that you must have combined all chains into a single one. Default is
None (use `sampler`).
burn : int, optional
The number of samples to discard at the beginning of the chain.
Default is 0.
thin : int, optional
Every `thin`-th sample is kept. Default is 1.
num_proc : int, optional
The number of processors to use for evaluation. This is used both
when calling the sampler and when evaluating the Gaussian process.
If None, the number of available processors will be used. If zero,
evaluation will proceed in parallel. Default is to use all available
processors.
**kwargs : extra optional kwargs
All additional kwargs are passed to
:py:meth:`sample_hyperparameter_posterior`.
Returns
-------
out : array of float
Length scale function at the indicated points.
"""
if flat_trace is None:
if sampler is None:
sampler = self.sample_hyperparameter_posterior(burn=burn, **kwargs)
# If we create the sampler, we need to make sure we clean up
# its pool:
try:
sampler.pool.close()
except AttributeError:
# This will occur if only one thread is used.
pass
flat_trace = sampler.chain[:, burn::thin, :]
flat_trace = flat_trace.reshape((-1, flat_trace.shape[2]))
else:
flat_trace = flat_trace[burn::thin, :]
num_proc = kwargs.get('num_proc', multiprocessing.cpu_count())
if num_proc > 1:
pool = InterruptiblePool(processes=num_proc)
try:
res = pool.map(_ComputeLWrapper(self, X, n), flat_trace)
finally:
pool.close()
else:
res = map(_ComputeLWrapper(self, X, n), flat_trace)
return res |
Make a prediction using MCMC samples.
This is essentially a convenient wrapper of :py:meth:`compute_from_MCMC`,
designed to act more or less interchangeably with :py:meth:`predict`.
Computes the mean of the GP posterior marginalized over the
hyperparameters using iterated expectations. If `return_std` is True,
uses the law of total variance to compute the variance of the GP
posterior marginalized over the hyperparameters. If `return_cov` is True,
uses the law of total covariance to compute the entire covariance of the
GP posterior marginalized over the hyperparameters. If both `return_cov`
and `return_std` are True, then both the covariance matrix and standard
deviation array will be returned.
Parameters
----------
X : array-like (`M`,) or (`M`, `num_dim`)
The values to evaluate the Gaussian process at.
ddof : int, optional
The degree of freedom correction to use when computing the variance.
Default is 1 (standard Bessel correction for unbiased estimate).
return_std : bool, optional
If True, the standard deviation is also computed. Default is True.
full_MC : bool, optional
Set to True to compute the mean and covariance matrix using Monte
Carlo sampling of the posterior. The samples will also be returned
if full_output is True. Default is False (don't use full sampling).
rejection_func : callable, optional
Any samples where this function evaluates False will be rejected,
where it evaluates True they will be kept. Default is None (no
rejection). Only has an effect if `full_MC` is True.
ddof : int, optional
**kwargs : optional kwargs
All additional kwargs are passed directly to
:py:meth:`compute_from_MCMC`.
def predict_MCMC(self, X, ddof=1, full_MC=False, rejection_func=None, **kwargs):
"""Make a prediction using MCMC samples.
This is essentially a convenient wrapper of :py:meth:`compute_from_MCMC`,
designed to act more or less interchangeably with :py:meth:`predict`.
Computes the mean of the GP posterior marginalized over the
hyperparameters using iterated expectations. If `return_std` is True,
uses the law of total variance to compute the variance of the GP
posterior marginalized over the hyperparameters. If `return_cov` is True,
uses the law of total covariance to compute the entire covariance of the
GP posterior marginalized over the hyperparameters. If both `return_cov`
and `return_std` are True, then both the covariance matrix and standard
deviation array will be returned.
Parameters
----------
X : array-like (`M`,) or (`M`, `num_dim`)
The values to evaluate the Gaussian process at.
ddof : int, optional
The degree of freedom correction to use when computing the variance.
Default is 1 (standard Bessel correction for unbiased estimate).
return_std : bool, optional
If True, the standard deviation is also computed. Default is True.
full_MC : bool, optional
Set to True to compute the mean and covariance matrix using Monte
Carlo sampling of the posterior. The samples will also be returned
if full_output is True. Default is False (don't use full sampling).
rejection_func : callable, optional
Any samples where this function evaluates False will be rejected,
where it evaluates True they will be kept. Default is None (no
rejection). Only has an effect if `full_MC` is True.
ddof : int, optional
**kwargs : optional kwargs
All additional kwargs are passed directly to
:py:meth:`compute_from_MCMC`.
"""
return_std = kwargs.get('return_std', True)
return_cov = kwargs.get('return_cov', False)
if full_MC:
kwargs['return_mean'] = False
kwargs['return_std'] = False
kwargs['return_cov'] = False
kwargs['return_samples'] = True
else:
kwargs['return_mean'] = True
return_samples = kwargs.get('return_samples', True)
res = self.compute_from_MCMC(X, **kwargs)
out = {}
if return_samples:
samps = scipy.asarray(scipy.hstack(res['samp']))
if full_MC:
if rejection_func:
good_samps = []
for samp in samps.T:
if rejection_func(samp):
good_samps.append(samp)
if len(good_samps) == 0:
raise ValueError("Did not get any good samples!")
samps = scipy.asarray(good_samps, dtype=float).T
mean = scipy.mean(samps, axis=1)
cov = scipy.cov(samps, rowvar=1, ddof=ddof)
std = scipy.sqrt(scipy.diagonal(cov))
else:
means = scipy.asarray(res['mean'])
mean = scipy.mean(means, axis=0)
# TODO: Allow use of robust estimators!
if 'cov' in res:
covs = scipy.asarray(res['cov'])
cov = scipy.mean(covs, axis=0) + scipy.cov(means, rowvar=0, ddof=ddof)
std = scipy.sqrt(scipy.diagonal(cov))
elif 'std' in res:
vars_ = scipy.asarray(scipy.asarray(res['std']))**2
std = scipy.sqrt(scipy.mean(vars_, axis=0) +
scipy.var(means, axis=0, ddof=ddof))
if 'mean_func' in res:
mean_funcs = scipy.asarray(res['mean_func'])
cov_funcs = scipy.asarray(res['cov_func'])
mean_func = scipy.mean(mean_funcs, axis=0)
cov_func = scipy.mean(cov_funcs, axis=0) + scipy.cov(mean_funcs, rowvar=0, ddof=ddof)
std_func = scipy.sqrt(scipy.diagonal(cov_func))
mean_without_funcs = scipy.asarray(res['mean_without_func'])
cov_without_funcs = scipy.asarray(res['cov_without_func'])
mean_without_func = scipy.mean(mean_without_funcs, axis=0)
cov_without_func = (
scipy.mean(cov_without_funcs, axis=0) +
scipy.cov(mean_without_funcs, rowvar=0, ddof=ddof)
)
std_without_func = scipy.sqrt(scipy.diagonal(cov_without_func))
out['mean_func'] = mean_func
out['cov_func'] = cov_func
out['std_func'] = std_func
out['mean_without_func'] = mean_without_func
out['cov_without_func'] = cov_without_func
out['std_without_func'] = std_without_func
out['mean'] = mean
if return_samples:
out['samp'] = samps
if return_std or return_cov:
out['std'] = std
if return_cov:
out['cov'] = cov
return out |
Build an argparse argument parser to parse the command line.
def build_parser():
"Build an argparse argument parser to parse the command line."
parser = argparse.ArgumentParser(
description="""Coursera OAuth2 client CLI. This tool
helps users of the Coursera App Platform to programmatically access
Coursera APIs.""",
epilog="""Please file bugs on github at:
https://github.com/coursera/courseraoauth2client/issues. If you
would like to contribute to this tool's development, check us out at:
https://github.com/coursera/courseraoauth2client""")
parser.add_argument('-c', '--config', help='the configuration file to use')
utils.add_logging_parser(parser)
# We support multiple subcommands. These subcommands have their own
# subparsers. Each subcommand should set a default value for the 'func'
# option. We then call the parsed 'func' function, and execution carries on
# from there.
subparsers = parser.add_subparsers()
commands.config.parser(subparsers)
commands.version.parser(subparsers)
return parser |
Boots up the command line tool
def main():
"Boots up the command line tool"
logging.captureWarnings(True)
args = build_parser().parse_args()
# Configure logging
args.setup_logging(args)
# Dispatch into the appropriate subcommand function.
try:
return args.func(args)
except SystemExit:
raise
except:
logging.exception('Problem when running command. Sorry!')
sys.exit(1) |
Add sponsor menu links.
def sponsor_menu(
root_menu, menu="sponsors", label=_("Sponsors"),
sponsors_item=_("Our sponsors"),
packages_item=_("Sponsorship packages")):
"""Add sponsor menu links."""
root_menu.add_menu(menu, label, items=[])
for sponsor in (
Sponsor.objects.all()
.order_by('packages', 'order', 'id')
.prefetch_related('packages')):
symbols = sponsor.symbols()
if symbols:
item_name = u"» %s %s" % (sponsor.name, symbols)
else:
item_name = u"» %s" % (sponsor.name,)
with menu_logger(logger, "sponsor %r" % (sponsor.name,)):
root_menu.add_item(
item_name, sponsor.get_absolute_url(), menu=menu)
if sponsors_item:
with menu_logger(logger, "sponsors page link"):
root_menu.add_item(
sponsors_item, reverse("wafer_sponsors"), menu)
if packages_item:
with menu_logger(logger, "sponsorship package page link"):
root_menu.add_item(
packages_item, reverse("wafer_sponsorship_packages"), menu) |
this filter is going to be useful to execute an object method or get an
object attribute dynamically. this method is going to take into account
the atrib param can contains underscores
def objectatrib(instance, atrib):
'''
this filter is going to be useful to execute an object method or get an
object attribute dynamically. this method is going to take into account
the atrib param can contains underscores
'''
atrib = atrib.replace("__", ".")
atribs = []
atribs = atrib.split(".")
obj = instance
for atrib in atribs:
if type(obj) == dict:
result = obj[atrib]
else:
try:
result = getattr(obj, atrib)()
except Exception:
result = getattr(obj, atrib)
obj = result
return result |
Renders the field.
def as_widget(self, widget=None, attrs=None, only_initial=False):
"""
Renders the field.
"""
attrs = attrs or {}
attrs.update(self.form.get_widget_attrs(self))
if hasattr(self.field, 'widget_css_classes'):
css_classes = self.field.widget_css_classes
else:
css_classes = getattr(self.form, 'widget_css_classes', None)
if css_classes:
attrs.update({'class': css_classes})
widget_classes = self.form.fields[self.name].widget.attrs.get('class', None)
if widget_classes:
if attrs.get('class', None):
attrs['class'] += ' ' + widget_classes
else:
attrs.update({'class': widget_classes})
return super(NgBoundField, self).as_widget(widget, attrs, only_initial) |
During form initialization, some widgets have to be replaced by a counterpart suitable to
be rendered the AngularJS way.
def convert_widgets(self):
"""
During form initialization, some widgets have to be replaced by a counterpart suitable to
be rendered the AngularJS way.
"""
for field in self.base_fields.values():
try:
new_widget = field.get_converted_widget()
except AttributeError:
pass
else:
if new_widget:
field.widget = new_widget |
Convet an epoch date to a tuple in format ("yyyy-mm-dd","hh:mm:ss")
Example: "1023456427" -> ("2002-06-07","15:27:07")
Parameters:
- `timestamp`: date in epoch format
def epochdate(timestamp):
'''
Convet an epoch date to a tuple in format ("yyyy-mm-dd","hh:mm:ss")
Example: "1023456427" -> ("2002-06-07","15:27:07")
Parameters:
- `timestamp`: date in epoch format
'''
dt = datetime.fromtimestamp(float(timestamp)).timetuple()
fecha = "{0:d}-{1:02d}-{2:02d}".format(dt.tm_year, dt.tm_mon, dt.tm_mday)
hora = "{0:02d}:{1:02d}:{2:02d}".format(dt.tm_hour, dt.tm_min, dt.tm_sec)
return (fecha, hora) |
Analize itself looking for special information, right now it returns:
- Application name
- Model name
def model_inspect(obj):
'''
Analize itself looking for special information, right now it returns:
- Application name
- Model name
'''
# Prepare the information object
info = {}
if hasattr(obj, '_meta'):
info['verbose_name'] = getattr(obj._meta, 'verbose_name', None)
else:
info['verbose_name'] = None
# Get info from the object
if hasattr(obj, 'model') and obj.model:
model = obj.model
else:
model = obj.__class__
namesp = str(model)
namesp = namesp.replace("<class ", "").replace(">", "").replace("'", "").split(".")
# Remember information
info['appname'] = namesp[-3]
info['modelname'] = namesp[-1]
info['model'] = model
# Return the info
return info |
This method is created to return the path to upload files. This path must be
different from any other to avoid problems.
def upload_path(instance, filename):
'''
This method is created to return the path to upload files. This path must be
different from any other to avoid problems.
'''
path_separator = "/"
date_separator = "-"
ext_separator = "."
empty_string = ""
# get the model name
model_name = model_inspect(instance)['modelname']
# get the string date
date = datetime.now().strftime("%Y-%m-%d").split(date_separator)
curr_day = date[2]
curr_month = date[1]
curr_year = date[0]
split_filename = filename.split(ext_separator)
filename = empty_string.join(split_filename[:-1])
file_ext = split_filename[-1]
new_filename = empty_string.join([filename, str(random.random()).split(ext_separator)[1]])
new_filename = ext_separator.join([new_filename, file_ext])
string_path = path_separator.join([model_name, curr_year, curr_month, curr_day, new_filename])
# the path is built using the current date and the modelname
return string_path |
for string 'get_FIELD_NAME_display' return 'FIELD_NAME'
def remove_getdisplay(field_name):
'''
for string 'get_FIELD_NAME_display' return 'FIELD_NAME'
'''
str_ini = 'get_'
str_end = '_display'
if str_ini == field_name[0:len(str_ini)] and str_end == field_name[(-1) * len(str_end):]:
field_name = field_name[len(str_ini):(-1) * len(str_end)]
return field_name |
JSON Encoder newdfeault is a wrapper capable of encoding several kinds
Usage:
from codenerix.helpers import JSONEncoder_newdefault
JSONEncoder_newdefault()
def JSONEncoder_newdefault(kind=['uuid', 'datetime', 'time', 'decimal']):
'''
JSON Encoder newdfeault is a wrapper capable of encoding several kinds
Usage:
from codenerix.helpers import JSONEncoder_newdefault
JSONEncoder_newdefault()
'''
JSONEncoder_olddefault = json.JSONEncoder.default
def JSONEncoder_wrapped(self, o):
'''
json.JSONEncoder.default = JSONEncoder_newdefault
'''
if ('uuid' in kind) and isinstance(o, UUID):
return str(o)
if ('datetime' in kind) and isinstance(o, datetime):
return str(o)
if ('time' in kind) and isinstance(o, time.struct_time):
return datetime.fromtimestamp(time.mktime(o))
if ('decimal' in kind) and isinstance(o, decimal.Decimal):
return str(o)
return JSONEncoder_olddefault(self, o)
json.JSONEncoder.default = JSONEncoder_wrapped |
Update context with context_processors from settings
Usage:
from codenerix.helpers import context_processors_update
context_processors_update(context, self.request)
def context_processors_update(context, request):
'''
Update context with context_processors from settings
Usage:
from codenerix.helpers import context_processors_update
context_processors_update(context, self.request)
'''
for template in settings.TEMPLATES:
for context_processor in template['OPTIONS']['context_processors']:
path = context_processor.split('.')
name = path.pop(-1)
processor = getattr(importlib.import_module('.'.join(path)), name, None)
if processor:
context.update(processor(request))
return context |
Appends a file with name filename_in_zip and contents of
file_contents to the in-memory zip.
def append(self, filename_in_zip, file_contents):
'''
Appends a file with name filename_in_zip and contents of
file_contents to the in-memory zip.
'''
# Set the file pointer to the end of the file
self.in_memory_zip.seek(-1, io.SEEK_END)
# Get a handle to the in-memory zip in append mode
zf = zipfile.ZipFile(self.in_memory_zip, "a", zipfile.ZIP_DEFLATED, False)
# Write the file to the in-memory zip
zf.writestr(filename_in_zip, file_contents)
# Mark the files as having been created on Windows so that
# Unix permissions are not inferred as 0000
for zfile in zf.filelist:
zfile.create_system = 0
# Close the ZipFile
zf.close()
# Rewind the file
self.in_memory_zip.seek(0)
return self |
Writes the in-memory zip to a file.
def writetofile(self, filename):
'''Writes the in-memory zip to a file.'''
f = open(filename, "w")
f.write(self.read())
f.close() |
Returns the corresponding url from the sponsors images
def sponsor_image_url(sponsor, name):
"""Returns the corresponding url from the sponsors images"""
if sponsor.files.filter(name=name).exists():
# We avoid worrying about multiple matches by always
# returning the first one.
return sponsor.files.filter(name=name).first().item.url
return '' |
returns the corresponding url from the tagged image list.
def sponsor_tagged_image(sponsor, tag):
"""returns the corresponding url from the tagged image list."""
if sponsor.files.filter(tag_name=tag).exists():
return sponsor.files.filter(tag_name=tag).first().tagged_file.item.url
return '' |
Check to see if the currently logged in user belongs to a specific
group. Requires the Django authentication contrib app and middleware.
Usage: {% ifusergroup Admins %} ... {% endifusergroup %}, or
{% ifusergroup Admins Clients Sellers %} ... {% else %} ... {% endifusergroup %}
def ifusergroup(parser, token):
""" Check to see if the currently logged in user belongs to a specific
group. Requires the Django authentication contrib app and middleware.
Usage: {% ifusergroup Admins %} ... {% endifusergroup %}, or
{% ifusergroup Admins Clients Sellers %} ... {% else %} ... {% endifusergroup %}
"""
try:
tokensp = token.split_contents()
groups = []
groups+=tokensp[1:]
except ValueError:
raise template.TemplateSyntaxError("Tag 'ifusergroup' requires at least 1 argument.")
nodelist_true = parser.parse(('else', 'endifusergroup'))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse(tuple(['endifusergroup',]))
parser.delete_first_token()
else:
nodelist_false = NodeList()
return GroupCheckNode(groups, nodelist_true, nodelist_false) |
Gets a handle for use with other vSphere Guest API functions. The guest library
handle provides a context for accessing information about the virtual machine.
Virtual machine statistics and state data are associated with a particular guest library
handle, so using one handle does not affect the data associated with another handle.
def OpenHandle(self):
'''Gets a handle for use with other vSphere Guest API functions. The guest library
handle provides a context for accessing information about the virtual machine.
Virtual machine statistics and state data are associated with a particular guest library
handle, so using one handle does not affect the data associated with another handle.'''
if hasattr(self, 'handle'):
return self.handle
else:
handle = c_void_p()
ret = vmGuestLib.VMGuestLib_OpenHandle(byref(handle))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return handle |
Releases a handle acquired with VMGuestLib_OpenHandle
def CloseHandle(self):
'''Releases a handle acquired with VMGuestLib_OpenHandle'''
if hasattr(self, 'handle'):
ret = vmGuestLib.VMGuestLib_CloseHandle(self.handle.value)
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
del(self.handle) |
Updates information about the virtual machine. This information is associated with
the VMGuestLibHandle.
VMGuestLib_UpdateInfo requires similar CPU resources to a system call and
therefore can affect performance. If you are concerned about performance, minimize
the number of calls to VMGuestLib_UpdateInfo.
If your program uses multiple threads, each thread must use a different handle.
Otherwise, you must implement a locking scheme around update calls. The vSphere
Guest API does not implement internal locking around access with a handle.
def UpdateInfo(self):
'''Updates information about the virtual machine. This information is associated with
the VMGuestLibHandle.
VMGuestLib_UpdateInfo requires similar CPU resources to a system call and
therefore can affect performance. If you are concerned about performance, minimize
the number of calls to VMGuestLib_UpdateInfo.
If your program uses multiple threads, each thread must use a different handle.
Otherwise, you must implement a locking scheme around update calls. The vSphere
Guest API does not implement internal locking around access with a handle.'''
ret = vmGuestLib.VMGuestLib_UpdateInfo(self.handle.value)
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) |
Retrieves the VMSessionID for the current session. Call this function after calling
VMGuestLib_UpdateInfo. If VMGuestLib_UpdateInfo has never been called,
VMGuestLib_GetSessionId returns VMGUESTLIB_ERROR_NO_INFO.
def GetSessionId(self):
'''Retrieves the VMSessionID for the current session. Call this function after calling
VMGuestLib_UpdateInfo. If VMGuestLib_UpdateInfo has never been called,
VMGuestLib_GetSessionId returns VMGUESTLIB_ERROR_NO_INFO.'''
sid = c_void_p()
ret = vmGuestLib.VMGuestLib_GetSessionId(self.handle.value, byref(sid))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return sid |
Retrieves the upperlimit of processor use in MHz available to the virtual
machine. For information about setting the CPU limit, see "Limits and
Reservations" on page 14.
def GetCpuLimitMHz(self):
'''Retrieves the upperlimit of processor use in MHz available to the virtual
machine. For information about setting the CPU limit, see "Limits and
Reservations" on page 14.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetCpuLimitMHz(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value |
Retrieves the minimum processing power in MHz reserved for the virtual
machine. For information about setting a CPU reservation, see "Limits and
Reservations" on page 14.
def GetCpuReservationMHz(self):
'''Retrieves the minimum processing power in MHz reserved for the virtual
machine. For information about setting a CPU reservation, see "Limits and
Reservations" on page 14.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetCpuReservationMHz(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value |
Retrieves the number of CPU shares allocated to the virtual machine. For
information about how an ESX server uses CPU shares to manage virtual
machine priority, see the vSphere Resource Management Guide.
def GetCpuShares(self):
'''Retrieves the number of CPU shares allocated to the virtual machine. For
information about how an ESX server uses CPU shares to manage virtual
machine priority, see the vSphere Resource Management Guide.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetCpuShares(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value |
Retrieves the number of milliseconds that the virtual machine was in a
ready state (able to transition to a run state), but was not scheduled to run.
def GetCpuStolenMs(self):
'''Retrieves the number of milliseconds that the virtual machine was in a
ready state (able to transition to a run state), but was not scheduled to run.'''
counter = c_uint64()
ret = vmGuestLib.VMGuestLib_GetCpuStolenMs(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value |
Retrieves the number of milliseconds during which the virtual machine
has used the CPU. This value includes the time used by the guest
operating system and the time used by virtualization code for tasks for this
virtual machine. You can combine this value with the elapsed time
(VMGuestLib_GetElapsedMs) to estimate the effective virtual machine
CPU speed. This value is a subset of elapsedMs.
def GetCpuUsedMs(self):
'''Retrieves the number of milliseconds during which the virtual machine
has used the CPU. This value includes the time used by the guest
operating system and the time used by virtualization code for tasks for this
virtual machine. You can combine this value with the elapsed time
(VMGuestLib_GetElapsedMs) to estimate the effective virtual machine
CPU speed. This value is a subset of elapsedMs.'''
counter = c_uint64()
ret = vmGuestLib.VMGuestLib_GetCpuUsedMs(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value |
Retrieves the number of milliseconds that have passed in the virtual
machine since it last started running on the server. The count of elapsed
time restarts each time the virtual machine is powered on, resumed, or
migrated using VMotion. This value counts milliseconds, regardless of
whether the virtual machine is using processing power during that time.
You can combine this value with the CPU time used by the virtual machine
(VMGuestLib_GetCpuUsedMs) to estimate the effective virtual machine
CPU speed. cpuUsedMs is a subset of this value.
def GetElapsedMs(self):
'''Retrieves the number of milliseconds that have passed in the virtual
machine since it last started running on the server. The count of elapsed
time restarts each time the virtual machine is powered on, resumed, or
migrated using VMotion. This value counts milliseconds, regardless of
whether the virtual machine is using processing power during that time.
You can combine this value with the CPU time used by the virtual machine
(VMGuestLib_GetCpuUsedMs) to estimate the effective virtual machine
CPU speed. cpuUsedMs is a subset of this value.'''
counter = c_uint64()
ret = vmGuestLib.VMGuestLib_GetElapsedMs(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value |
Undocumented.
def GetHostCpuUsedMs(self):
'''Undocumented.'''
counter = c_uint64()
ret = vmGuestLib.VMGuestLib_GetHostCpuUsedMs(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value |
Undocumented.
def GetHostMemKernOvhdMB(self):
'''Undocumented.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetHostMemKernOvhdMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value |
Undocumented.
def GetHostMemMappedMB(self):
'''Undocumented.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetHostMemMappedMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value |
Undocumented.
def GetHostMemPhysFreeMB(self):
'''Undocumented.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetHostMemPhysFreeMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value |
Undocumented.
def GetHostMemPhysMB(self):
'''Undocumented.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetHostMemPhysMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value |
Undocumented.
def GetHostMemSharedMB(self):
'''Undocumented.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetHostMemSharedMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value |
Undocumented.
def GetHostMemSwappedMB(self):
'''Undocumented.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetHostMemSwappedMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value |
Undocumented.
def GetHostMemUnmappedMB(self):
'''Undocumented.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetHostMemUnmappedMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value |
Undocumented.
def GetHostMemUsedMB(self):
'''Undocumented.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetHostMemUsedMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value |
Undocumented.
def GetHostNumCpuCores(self):
'''Undocumented.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetHostNumCpuCores(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value |
Retrieves the speed of the ESX system's physical CPU in MHz.
def GetHostProcessorSpeed(self):
'''Retrieves the speed of the ESX system's physical CPU in MHz.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetHostProcessorSpeed(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value |
Retrieves the amount of memory the virtual machine is actively using its
estimated working set size.
def GetMemActiveMB(self):
'''Retrieves the amount of memory the virtual machine is actively using its
estimated working set size.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetMemActiveMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value |
Retrieves the amount of memory that has been reclaimed from this virtual
machine by the vSphere memory balloon driver (also referred to as the
"vmmemctl" driver).
def GetMemBalloonedMB(self):
'''Retrieves the amount of memory that has been reclaimed from this virtual
machine by the vSphere memory balloon driver (also referred to as the
"vmmemctl" driver).'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetMemBalloonedMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value |
Undocumented.
def GetMemBalloonMaxMB(self):
'''Undocumented.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetMemBalloonMaxMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value |
Undocumented.
def GetMemBalloonTargetMB(self):
'''Undocumented.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetMemBalloonTargetMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value |
Retrieves the upper limit of memory that is available to the virtual
machine. For information about setting a memory limit, see "Limits and
Reservations" on page 14.
def GetMemLimitMB(self):
'''Retrieves the upper limit of memory that is available to the virtual
machine. For information about setting a memory limit, see "Limits and
Reservations" on page 14.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetMemLimitMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value |
Undocumented.
def GetMemLLSwappedMB(self):
'''Undocumented.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetMemLLSwappedMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value |
Retrieves the amount of memory that is allocated to the virtual machine.
Memory that is ballooned, swapped, or has never been accessed is
excluded.
def GetMemMappedMB(self):
'''Retrieves the amount of memory that is allocated to the virtual machine.
Memory that is ballooned, swapped, or has never been accessed is
excluded.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetMemMappedMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value |
Retrieves the amount of "overhead" memory associated with this virtual
machine that is currently consumed on the host system. Overhead
memory is additional memory that is reserved for data structures required
by the virtualization layer.
def GetMemOverheadMB(self):
'''Retrieves the amount of "overhead" memory associated with this virtual
machine that is currently consumed on the host system. Overhead
memory is additional memory that is reserved for data structures required
by the virtualization layer.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetMemOverheadMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value |
Retrieves the minimum amount of memory that is reserved for the virtual
machine. For information about setting a memory reservation, see "Limits
and Reservations" on page 14.
def GetMemReservationMB(self):
'''Retrieves the minimum amount of memory that is reserved for the virtual
machine. For information about setting a memory reservation, see "Limits
and Reservations" on page 14.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetMemReservationMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value |
Retrieves the amount of physical memory associated with this virtual
machine that is copy-on-write (COW) shared on the host.
def GetMemSharedMB(self):
'''Retrieves the amount of physical memory associated with this virtual
machine that is copy-on-write (COW) shared on the host.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetMemSharedMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value |
Retrieves the estimated amount of physical memory on the host saved
from copy-on-write (COW) shared guest physical memory.
def GetMemSharedSavedMB(self):
'''Retrieves the estimated amount of physical memory on the host saved
from copy-on-write (COW) shared guest physical memory.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetMemSharedSavedMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value |
Retrieves the number of memory shares allocated to the virtual machine.
For information about how an ESX server uses memory shares to manage
virtual machine priority, see the vSphere Resource Management Guide.
def GetMemShares(self):
'''Retrieves the number of memory shares allocated to the virtual machine.
For information about how an ESX server uses memory shares to manage
virtual machine priority, see the vSphere Resource Management Guide.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetMemShares(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value |
Retrieves the amount of memory that has been reclaimed from this virtual
machine by transparently swapping guest memory to disk.
def GetMemSwappedMB(self):
'''Retrieves the amount of memory that has been reclaimed from this virtual
machine by transparently swapping guest memory to disk.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetMemSwappedMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value |
Undocumented.
def GetMemSwapTargetMB(self):
'''Undocumented.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetMemSwapTargetMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value |
Retrieves the size of the target memory allocation for this virtual machine.
def GetMemTargetSizeMB(self):
'''Retrieves the size of the target memory allocation for this virtual machine.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetMemTargetSizeMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value |
Retrieves the estimated amount of physical host memory currently
consumed for this virtual machine's physical memory.
def GetMemUsedMB(self):
'''Retrieves the estimated amount of physical host memory currently
consumed for this virtual machine's physical memory.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetMemUsedMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value |
Undocumented.
def GetMemZippedMB(self):
'''Undocumented.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetMemZippedMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value |
Undocumented.
def GetMemZipSavedMB(self):
'''Undocumented.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetMemZipSavedMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value |
Evaluate a B-, M- or I-spline with the specified internal knots, order and coefficients.
`deg` boundary knots are appended at both sides of the domain.
The zeroth order basis functions are modified to ensure continuity at the
right-hand boundary.
Note that the I-splines include the :math:`i=0` case in order to have a "DC
offset". This way your functions do not have to start at zero. If you want
to not include this, simply set the first coefficient in `C` to zero.
Parameters
----------
t_int : array of float, (`M`,)
The internal knot locations. Must be monotonic (this is NOT checked).
C : array of float, (`M + deg - 1`,)
The coefficients applied to the basis functions.
deg : nonnegative int
The polynomial degree to use.
x : array of float, (`N`,)
The locations to evaluate the spline at.
cov_C : array of float, (`M + deg - 1`,) or (`M + deg - 1`, `M + deg - 1`), optional
The covariance matrix of the coefficients. If a 1d array is passed, this
is treated as the variance. If None, then the uncertainty is not
computed.
M_spline : bool, optional
If True, compute the M-spline instead of the B-spline. M-splines are
normalized to integrate to unity, as opposed to B-splines which sum to
unity at all points. Default is False (compute B-spline).
I_spline : bool, optional
If True, compute the I-spline instead of the B-spline. Note that this
will override `M_spline`. I-splines are the integrals of the M-splines,
and hence ensure curves are monotonic if all coefficients are of the
same sign. Note that the I-splines returned will be of polynomial degree
`deg` (i.e., the integral of what is returned from calling the function
with `deg=deg-1` and `M_spline=True`. Default is False (compute B-spline
or M-spline).
n : int, optional
The derivative order to compute. Default is 0. If `n>d`, all zeros are
returned (i.e., the discontinuities are not included).
Returns
-------
`y` or (`y`, `cov_y`): The values (and possibly uncertainties) of the spline
at the specified locations.
def spev(t_int, C, deg, x, cov_C=None, M_spline=False, I_spline=False, n=0):
"""Evaluate a B-, M- or I-spline with the specified internal knots, order and coefficients.
`deg` boundary knots are appended at both sides of the domain.
The zeroth order basis functions are modified to ensure continuity at the
right-hand boundary.
Note that the I-splines include the :math:`i=0` case in order to have a "DC
offset". This way your functions do not have to start at zero. If you want
to not include this, simply set the first coefficient in `C` to zero.
Parameters
----------
t_int : array of float, (`M`,)
The internal knot locations. Must be monotonic (this is NOT checked).
C : array of float, (`M + deg - 1`,)
The coefficients applied to the basis functions.
deg : nonnegative int
The polynomial degree to use.
x : array of float, (`N`,)
The locations to evaluate the spline at.
cov_C : array of float, (`M + deg - 1`,) or (`M + deg - 1`, `M + deg - 1`), optional
The covariance matrix of the coefficients. If a 1d array is passed, this
is treated as the variance. If None, then the uncertainty is not
computed.
M_spline : bool, optional
If True, compute the M-spline instead of the B-spline. M-splines are
normalized to integrate to unity, as opposed to B-splines which sum to
unity at all points. Default is False (compute B-spline).
I_spline : bool, optional
If True, compute the I-spline instead of the B-spline. Note that this
will override `M_spline`. I-splines are the integrals of the M-splines,
and hence ensure curves are monotonic if all coefficients are of the
same sign. Note that the I-splines returned will be of polynomial degree
`deg` (i.e., the integral of what is returned from calling the function
with `deg=deg-1` and `M_spline=True`. Default is False (compute B-spline
or M-spline).
n : int, optional
The derivative order to compute. Default is 0. If `n>d`, all zeros are
returned (i.e., the discontinuities are not included).
Returns
-------
`y` or (`y`, `cov_y`): The values (and possibly uncertainties) of the spline
at the specified locations.
"""
C = scipy.asarray(C, dtype=float)
t_int = scipy.asarray(t_int, dtype=float)
if (t_int != scipy.sort(t_int)).any():
raise ValueError("Knots must be in increasing order!")
# if len(scipy.unique(t_int)) != len(t_int):
# raise ValueError("Knots must be unique!")
if n > deg:
return scipy.zeros_like(x, dtype=float)
if I_spline:
# I_{i,k} = int_L^x M_{i,k}(u)du, so just take the derivative of the
# underlying M-spline. Discarding the first coefficient dumps the "DC
# offset" term.
if cov_C is not None:
cov_C = scipy.asarray(cov_C)
if cov_C.ndim == 1:
cov_C = cov_C[1:]
elif cov_C.ndim == 2:
cov_C = cov_C[1:, 1:]
if n > 0:
return spev(
t_int, C[1:], deg - 1, x,
cov_C=cov_C, M_spline=True, I_spline=False, n=n - 1
)
M_spline = True
if n > 0:
if M_spline:
t = scipy.concatenate(([t_int[0]] * deg, t_int, [t_int[-1]] * deg))
C = (deg + 1.0) * (
C[1:] / (t[deg + 2:len(t_int) + 2 * deg] - t[1:len(t_int) + deg - 1]) -
C[:-1] / (t[deg + 1:len(t_int) + 2 * deg - 1] - t[:len(t_int) + deg - 2])
)
else:
C = C[1:] - C[:-1]
return spev(
t_int, C, deg - 1, x,
cov_C=cov_C, M_spline=True, I_spline=False, n=n - 1
)
if len(C) != len(t_int) + deg - 1:
raise ValueError("Length of C must be equal to M + deg - 1!")
# Append the external knots directly at the boundary:
t = scipy.concatenate(([t_int[0]] * deg, t_int, [t_int[-1]] * deg))
# Compute the different orders:
B = scipy.zeros((deg + 1, len(t) - 1, len(x)))
# NOTE: The first dimension is indexed by deg, and is zero-indexed.
# Zeroth order: constant function
d = 0
for i in xrange(deg, deg + len(t_int) - 2 + 1):
# The second condition contains a hack to make the basis functions
# continuous at the right-hand edge.
mask = (t[i] <= x) & (
(x < t[i + 1]) | ((i == deg + len(t_int) - 2) & (x == t[-1]))
)
B[d, i, mask] = 1.0 / (t[i + 1] - t[i]) if M_spline else 1.0
# Loop over other orders:
for d in xrange(1, deg + 1):
for i in xrange(deg - d, deg + len(t_int) - 2 + 1):
if t[i + d] != t[i]:
v = (x - t[i]) * B[d - 1, i, :]
if not M_spline:
v /= t[i + d] - t[i]
B[d, i, :] += v
if t[i + d + 1] != t[i + 1]:
v = (t[i + d + 1] - x) * B[d - 1, i + 1, :]
if not M_spline:
v /= t[i + d + 1] - t[i + 1]
B[d, i, :] += v
if M_spline and ((t[i + d] != t[i]) or (t[i + d + 1] != t[i + 1])):
B[d, i, :] *= (d + 1) / (d * (t[i + d + 1] - t[i]))
B = B[deg, 0:len(C), :].T
# Now compute the I-splines, if needed:
if I_spline:
I = scipy.zeros_like(B)
for i in xrange(0, len(C)):
for m in xrange(i, len(C)):
I[:, i] += (t[m + deg + 1] - t[m]) * B[:, m] / (deg + 1.0)
B = I
y = B.dot(C)
if cov_C is not None:
cov_C = scipy.asarray(cov_C)
# If there are no covariances, promote cov_C to a diagonal matrix
if cov_C.ndim == 1:
cov_C = scipy.diag(cov_C)
cov_y = B.dot(cov_C).dot(B.T)
return (y, cov_y)
else:
return y |
Build an argparse argument parser to parse the command line.
def parser(subparsers):
"Build an argparse argument parser to parse the command line."
# create the parser for the version subcommand.
parser_version = subparsers.add_parser(
'version',
help="Output the version of %(prog)s to the console.")
parser_version.set_defaults(func=command_version)
return parser_version |
Find the specified Crispy FormHelper and instantiate it.
Handy when you are crispyifying other apps' forms.
def wafer_form_helper(context, helper_name):
'''
Find the specified Crispy FormHelper and instantiate it.
Handy when you are crispyifying other apps' forms.
'''
request = context.request
module, class_name = helper_name.rsplit('.', 1)
if module not in sys.modules:
__import__(module)
mod = sys.modules[module]
class_ = getattr(mod, class_name)
return class_(request=request) |
Add page menus.
def page_menus(root_menu):
"""Add page menus."""
for page in Page.objects.filter(include_in_menu=True):
path = page.get_path()
menu = path[0] if len(path) > 1 else None
try:
root_menu.add_item(page.name, page.get_absolute_url(), menu=menu)
except MenuError as e:
logger.error("Bad menu item %r for page with slug %r."
% (e, page.slug)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.