metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
|---|---|---|
{
"filename": "parse_pipeline_graph.py",
"repo_name": "CMB-S4/spt3g_software",
"repo_path": "spt3g_software_extracted/spt3g_software-master/core/python/parse_pipeline_graph.py",
"type": "Python"
}
|
import json
def uniquify_list(seq):
checked = []
for e in seq:
if e not in checked:
checked.append(e)
return checked
def frame_type_str_to_color(s):
cmap = {'Timepoint': 'm',
'Housekeeping' : 'y',
'Scan' : 'r',
'Map' : 'g',
'InstrumentStatus' : 'k',
'Wiring' : 'c',
'Calibration' : 'b',
'EndProcessing' : 'k',
'None' : 'k',
}
assert(s in cmap)
return cmap[s]
def plot_frame_processing_info(g3_pipeline):
import matplotlib.pyplot as plt
plt.clf()
parsed_output = json.loads( g3_pipeline.GetGraphInfo() )
proc_lst_trans = zip(*parsed_output['Processing List'])
ptime = proc_lst_trans[0]
pmod = proc_lst_trans[1]
pframe = proc_lst_trans[2]
ptype = proc_lst_trans[3]
n_frames = max(pframe)+1
n_times = max(ptime)+1
n_mods = max(pmod)+1
#create the legend in a realllly hacky way
unique_ptypes = uniquify_list(ptype)
ax = plt.gca()
colors = map(frame_type_str_to_color, unique_ptypes)
labels = unique_ptypes
[plt.plot([], [], color=c,label=l, linewidth=3 )[0] for c, l in zip(colors,labels)]
plt.legend(labels)
#these store the information for the state of the frame
frame_mod_pos = {}
frame_prev_time = {}
#plots the data flow
for i in range(len(ptime)):
if pframe[i] not in frame_mod_pos:
frame_mod_pos[pframe[i]] = pmod[i] - 1
prev_mod = frame_mod_pos[pframe[i]]
if pframe[i] in frame_prev_time:
prev_time = frame_prev_time[pframe[i]]
else:
prev_time = ptime[i]
cur_mod = pmod[i]
cur_time = ptime[i]
frame_id = pframe[i]
x_pos = lambda mod: mod - float(frame_id + 1)/(3*float(n_frames + 1)) + .66
plt.plot([x_pos(prev_mod), x_pos(prev_mod)],
[prev_time, cur_time],
color = frame_type_str_to_color(ptype[i]),
linestyle = '--'
)
plt.plot([x_pos(prev_mod), x_pos(cur_mod)],
[cur_time, cur_time+1],
color = frame_type_str_to_color(ptype[i]),
linewidth = 2.0,
marker = '.'
)
frame_mod_pos[pframe[i]] = cur_mod
frame_prev_time[pframe[i]] = cur_time + 1
#handles setting up labels
mod_x_ticks = []
mod_x_labels = []
module_stripper = lambda mods_name: mods_name[mods_name.rfind('.')+1:]
for p in parsed_output["Module List"]:
plt.axvline(p[0], linewidth = 2, linestyle = '-.', color = 'k')
mod_x_ticks.append(p[0]+.5)
mod_x_labels.append(module_stripper(p[1]))
plt.xticks(mod_x_ticks, mod_x_labels, rotation=-7)
#does some silly formatting
plt.gca().get_yaxis().set_ticks([])
plt.ylim(n_times+.5, -0.5)
plt.xlim(0, n_mods)
plt.ylabel("Time\n$\\Longleftarrow$")
plt.xlabel("Module $\\Longrightarrow$")
plt.show()
|
CMB-S4REPO_NAMEspt3g_softwarePATH_START.@spt3g_software_extracted@spt3g_software-master@core@python@parse_pipeline_graph.py@.PATH_END.py
|
{
"filename": "elementstyling.py",
"repo_name": "AishwaryaC26/RIS-Vis",
"repo_path": "RIS-Vis_extracted/RIS-Vis-main/app/elementstyling.py",
"type": "Python"
}
|
### Styling for all elements ###
MARGIN_STYLE = {'margin': '25px'}
CARD_HALF_WIDTH_LEFT = {'marginLeft': '25px'}
CARD_HALF_WIDTH_RIGHT = {'marginRight': '25px'}
CARD_HALF_WIDTH_LEFT_DOWNUP = {'marginLeft': '25px', 'marginTop': '25px', 'marginBottom': '25px'}
CARD_HALF_WIDTH_RIGHT_DOWNUP = {'marginRight': '25px', 'marginTop': '25px', 'marginBottom': '25px'}
CARD_HALF_WIDTH_LEFT_DOWN = {'marginLeft': '25px', 'marginBottom': '25px'}
CARD_HALF_WIDTH_RIGHT_DOWN = {'marginRight': '25px', 'marginBottom': '25px'}
CARD_HALF_WIDTH_DOWNUP = {'marginTop': '25px', 'marginBottom': '25px'}
DROPDOWN_STYLING = {"backgroundColor": "#303030", "color":"white"}
IMG_STYLING = {"backgroundColor": "transparent", "margin": "5px"}
|
AishwaryaC26REPO_NAMERIS-VisPATH_START.@RIS-Vis_extracted@RIS-Vis-main@app@elementstyling.py@.PATH_END.py
|
{
"filename": "axisman_util.py",
"repo_name": "simonsobs/sotodlib",
"repo_path": "sotodlib_extracted/sotodlib-master/sotodlib/core/axisman_util.py",
"type": "Python"
}
|
"""
Various AxisManager utility functions
"""
import numpy as np
from sotodlib.core import AxisManager
class RestrictionException(Exception):
"""Exception for when cannot restrict AxisManager properly"""
def restrict_to_times(am, t0, t1, in_place=False):
"""
Restricts axis manager to a time range (t0, t1)
"""
m = (t0 <= am.timestamps) & (am.timestamps < t1)
if not m.any():
raise RestrictionException()
i0, i1 = np.where(m)[0][[0, -1]] + am.samps.offset
return am.restrict('samps', (i0, i1+1), in_place=in_place)
def dict_to_am(d, skip_bad_types=False):
"""
Attempts to convert a dictionary into an AxisManager. This can be used on
dicts containing basic types such as (str, int, float) along with numpy
arrays. The AxisManager will not have any structure such as "axes", but
this is useful if you want to nest semi-arbitrary data such as the "meta"
dict into an AxisManager.
Args
-----
d : dict
Dict to convert ot axismanager
skip_bad_types : bool
If True, will skip any value that is not a str, int, float, or
np.ndarray. If False, this will raise an error for invalid types.
"""
allowed_types = (str, int, float, np.ndarray)
am = AxisManager()
for k, v in d.items():
if isinstance(v, dict):
am.wrap(k, dict_to_am(v))
elif isinstance(v, allowed_types):
am.wrap(k, v)
elif not skip_bad_types:
raise ValueError(
f"Key {k} is of type {type(v)} which cannot be wrapped by an "
"axismanager")
return am
|
simonsobsREPO_NAMEsotodlibPATH_START.@sotodlib_extracted@sotodlib-master@sotodlib@core@axisman_util.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/coloraxis/colorbar/tickfont/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._weight import WeightValidator
from ._variant import VariantValidator
from ._textcase import TextcaseValidator
from ._style import StyleValidator
from ._size import SizeValidator
from ._shadow import ShadowValidator
from ._lineposition import LinepositionValidator
from ._family import FamilyValidator
from ._color import ColorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._weight.WeightValidator",
"._variant.VariantValidator",
"._textcase.TextcaseValidator",
"._style.StyleValidator",
"._size.SizeValidator",
"._shadow.ShadowValidator",
"._lineposition.LinepositionValidator",
"._family.FamilyValidator",
"._color.ColorValidator",
],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@coloraxis@colorbar@tickfont@__init__.py@.PATH_END.py
|
{
"filename": "_odrpack.py",
"repo_name": "scipy/scipy",
"repo_path": "scipy_extracted/scipy-main/scipy/odr/_odrpack.py",
"type": "Python"
}
|
"""
Python wrappers for Orthogonal Distance Regression (ODRPACK).
Notes
=====
* Array formats -- FORTRAN stores its arrays in memory column first, i.e., an
array element A(i, j, k) will be next to A(i+1, j, k). In C and, consequently,
NumPy, arrays are stored row first: A[i, j, k] is next to A[i, j, k+1]. For
efficiency and convenience, the input and output arrays of the fitting
function (and its Jacobians) are passed to FORTRAN without transposition.
Therefore, where the ODRPACK documentation says that the X array is of shape
(N, M), it will be passed to the Python function as an array of shape (M, N).
If M==1, the 1-D case, then nothing matters; if M>1, then your
Python functions will be dealing with arrays that are indexed in reverse of
the ODRPACK documentation. No real issue, but watch out for your indexing of
the Jacobians: the i,jth elements (@f_i/@x_j) evaluated at the nth
observation will be returned as jacd[j, i, n]. Except for the Jacobians, it
really is easier to deal with x[0] and x[1] than x[:,0] and x[:,1]. Of course,
you can always use the transpose() function from SciPy explicitly.
* Examples -- See the accompanying file test/test.py for examples of how to set
up fits of your own. Some are taken from the User's Guide; some are from
other sources.
* Models -- Some common models are instantiated in the accompanying module
models.py . Contributions are welcome.
Credits
=======
* Thanks to Arnold Moene and Gerard Vermeulen for fixing some killer bugs.
Robert Kern
robert.kern@gmail.com
"""
import os
from threading import Lock
import numpy as np
from warnings import warn
from scipy.odr import __odrpack
__all__ = ['odr', 'OdrWarning', 'OdrError', 'OdrStop',
'Data', 'RealData', 'Model', 'Output', 'ODR',
'odr_error', 'odr_stop']
odr = __odrpack.odr
ODR_LOCK = Lock()
class OdrWarning(UserWarning):
"""
Warning indicating that the data passed into
ODR will cause problems when passed into 'odr'
that the user should be aware of.
"""
pass
class OdrError(Exception):
"""
Exception indicating an error in fitting.
This is raised by `~scipy.odr.odr` if an error occurs during fitting.
"""
pass
class OdrStop(Exception):
"""
Exception stopping fitting.
You can raise this exception in your objective function to tell
`~scipy.odr.odr` to stop fitting.
"""
pass
# Backwards compatibility
odr_error = OdrError
odr_stop = OdrStop
__odrpack._set_exceptions(OdrError, OdrStop)
def _conv(obj, dtype=None):
""" Convert an object to the preferred form for input to the odr routine.
"""
if obj is None:
return obj
else:
if dtype is None:
obj = np.asarray(obj)
else:
obj = np.asarray(obj, dtype)
if obj.shape == ():
# Scalar.
return obj.dtype.type(obj)
else:
return obj
def _report_error(info):
""" Interprets the return code of the odr routine.
Parameters
----------
info : int
The return code of the odr routine.
Returns
-------
problems : list(str)
A list of messages about why the odr() routine stopped.
"""
stopreason = ('Blank',
'Sum of squares convergence',
'Parameter convergence',
'Both sum of squares and parameter convergence',
'Iteration limit reached')[info % 5]
if info >= 5:
# questionable results or fatal error
I = (info//10000 % 10,
info//1000 % 10,
info//100 % 10,
info//10 % 10,
info % 10)
problems = []
if I[0] == 0:
if I[1] != 0:
problems.append('Derivatives possibly not correct')
if I[2] != 0:
problems.append('Error occurred in callback')
if I[3] != 0:
problems.append('Problem is not full rank at solution')
problems.append(stopreason)
elif I[0] == 1:
if I[1] != 0:
problems.append('N < 1')
if I[2] != 0:
problems.append('M < 1')
if I[3] != 0:
problems.append('NP < 1 or NP > N')
if I[4] != 0:
problems.append('NQ < 1')
elif I[0] == 2:
if I[1] != 0:
problems.append('LDY and/or LDX incorrect')
if I[2] != 0:
problems.append('LDWE, LD2WE, LDWD, and/or LD2WD incorrect')
if I[3] != 0:
problems.append('LDIFX, LDSTPD, and/or LDSCLD incorrect')
if I[4] != 0:
problems.append('LWORK and/or LIWORK too small')
elif I[0] == 3:
if I[1] != 0:
problems.append('STPB and/or STPD incorrect')
if I[2] != 0:
problems.append('SCLB and/or SCLD incorrect')
if I[3] != 0:
problems.append('WE incorrect')
if I[4] != 0:
problems.append('WD incorrect')
elif I[0] == 4:
problems.append('Error in derivatives')
elif I[0] == 5:
problems.append('Error occurred in callback')
elif I[0] == 6:
problems.append('Numerical error detected')
return problems
else:
return [stopreason]
class Data:
"""
The data to fit.
Parameters
----------
x : array_like
Observed data for the independent variable of the regression
y : array_like, optional
If array-like, observed data for the dependent variable of the
regression. A scalar input implies that the model to be used on
the data is implicit.
we : array_like, optional
If `we` is a scalar, then that value is used for all data points (and
all dimensions of the response variable).
If `we` is a rank-1 array of length q (the dimensionality of the
response variable), then this vector is the diagonal of the covariant
weighting matrix for all data points.
If `we` is a rank-1 array of length n (the number of data points), then
the i'th element is the weight for the i'th response variable
observation (single-dimensional only).
If `we` is a rank-2 array of shape (q, q), then this is the full
covariant weighting matrix broadcast to each observation.
If `we` is a rank-2 array of shape (q, n), then `we[:,i]` is the
diagonal of the covariant weighting matrix for the i'th observation.
If `we` is a rank-3 array of shape (q, q, n), then `we[:,:,i]` is the
full specification of the covariant weighting matrix for each
observation.
If the fit is implicit, then only a positive scalar value is used.
wd : array_like, optional
If `wd` is a scalar, then that value is used for all data points
(and all dimensions of the input variable). If `wd` = 0, then the
covariant weighting matrix for each observation is set to the identity
matrix (so each dimension of each observation has the same weight).
If `wd` is a rank-1 array of length m (the dimensionality of the input
variable), then this vector is the diagonal of the covariant weighting
matrix for all data points.
If `wd` is a rank-1 array of length n (the number of data points), then
the i'th element is the weight for the ith input variable observation
(single-dimensional only).
If `wd` is a rank-2 array of shape (m, m), then this is the full
covariant weighting matrix broadcast to each observation.
If `wd` is a rank-2 array of shape (m, n), then `wd[:,i]` is the
diagonal of the covariant weighting matrix for the ith observation.
If `wd` is a rank-3 array of shape (m, m, n), then `wd[:,:,i]` is the
full specification of the covariant weighting matrix for each
observation.
fix : array_like of ints, optional
The `fix` argument is the same as ifixx in the class ODR. It is an
array of integers with the same shape as data.x that determines which
input observations are treated as fixed. One can use a sequence of
length m (the dimensionality of the input observations) to fix some
dimensions for all observations. A value of 0 fixes the observation,
a value > 0 makes it free.
meta : dict, optional
Free-form dictionary for metadata.
Notes
-----
Each argument is attached to the member of the instance of the same name.
The structures of `x` and `y` are described in the Model class docstring.
If `y` is an integer, then the Data instance can only be used to fit with
implicit models where the dimensionality of the response is equal to the
specified value of `y`.
The `we` argument weights the effect a deviation in the response variable
has on the fit. The `wd` argument weights the effect a deviation in the
input variable has on the fit. To handle multidimensional inputs and
responses easily, the structure of these arguments has the n'th
dimensional axis first. These arguments heavily use the structured
arguments feature of ODRPACK to conveniently and flexibly support all
options. See the ODRPACK User's Guide for a full explanation of how these
weights are used in the algorithm. Basically, a higher value of the weight
for a particular data point makes a deviation at that point more
detrimental to the fit.
"""
def __init__(self, x, y=None, we=None, wd=None, fix=None, meta=None):
self.x = _conv(x)
if not isinstance(self.x, np.ndarray):
raise ValueError("Expected an 'ndarray' of data for 'x', "
f"but instead got data of type '{type(self.x).__name__}'")
self.y = _conv(y)
self.we = _conv(we)
self.wd = _conv(wd)
self.fix = _conv(fix)
self.meta = {} if meta is None else meta
def set_meta(self, **kwds):
""" Update the metadata dictionary with the keywords and data provided
by keywords.
Examples
--------
::
data.set_meta(lab="Ph 7; Lab 26", title="Ag110 + Ag108 Decay")
"""
self.meta.update(kwds)
def __getattr__(self, attr):
""" Dispatch attribute access to the metadata dictionary.
"""
if attr != "meta" and attr in self.meta:
return self.meta[attr]
else:
raise AttributeError(f"'{attr}' not in metadata")
class RealData(Data):
"""
The data, with weightings as actual standard deviations and/or
covariances.
Parameters
----------
x : array_like
Observed data for the independent variable of the regression
y : array_like, optional
If array-like, observed data for the dependent variable of the
regression. A scalar input implies that the model to be used on
the data is implicit.
sx : array_like, optional
Standard deviations of `x`.
`sx` are standard deviations of `x` and are converted to weights by
dividing 1.0 by their squares.
sy : array_like, optional
Standard deviations of `y`.
`sy` are standard deviations of `y` and are converted to weights by
dividing 1.0 by their squares.
covx : array_like, optional
Covariance of `x`
`covx` is an array of covariance matrices of `x` and are converted to
weights by performing a matrix inversion on each observation's
covariance matrix.
covy : array_like, optional
Covariance of `y`
`covy` is an array of covariance matrices and are converted to
weights by performing a matrix inversion on each observation's
covariance matrix.
fix : array_like, optional
The argument and member fix is the same as Data.fix and ODR.ifixx:
It is an array of integers with the same shape as `x` that
determines which input observations are treated as fixed. One can
use a sequence of length m (the dimensionality of the input
observations) to fix some dimensions for all observations. A value
of 0 fixes the observation, a value > 0 makes it free.
meta : dict, optional
Free-form dictionary for metadata.
Notes
-----
The weights `wd` and `we` are computed from provided values as follows:
`sx` and `sy` are converted to weights by dividing 1.0 by their squares.
For example, ``wd = 1./np.power(`sx`, 2)``.
`covx` and `covy` are arrays of covariance matrices and are converted to
weights by performing a matrix inversion on each observation's covariance
matrix. For example, ``we[i] = np.linalg.inv(covy[i])``.
These arguments follow the same structured argument conventions as wd and
we only restricted by their natures: `sx` and `sy` can't be rank-3, but
`covx` and `covy` can be.
Only set *either* `sx` or `covx` (not both). Setting both will raise an
exception. Same with `sy` and `covy`.
"""
def __init__(self, x, y=None, sx=None, sy=None, covx=None, covy=None,
fix=None, meta=None):
if (sx is not None) and (covx is not None):
raise ValueError("cannot set both sx and covx")
if (sy is not None) and (covy is not None):
raise ValueError("cannot set both sy and covy")
# Set flags for __getattr__
self._ga_flags = {}
if sx is not None:
self._ga_flags['wd'] = 'sx'
else:
self._ga_flags['wd'] = 'covx'
if sy is not None:
self._ga_flags['we'] = 'sy'
else:
self._ga_flags['we'] = 'covy'
self.x = _conv(x)
if not isinstance(self.x, np.ndarray):
raise ValueError("Expected an 'ndarray' of data for 'x', "
f"but instead got data of type '{type(self.x).__name__}'")
self.y = _conv(y)
self.sx = _conv(sx)
self.sy = _conv(sy)
self.covx = _conv(covx)
self.covy = _conv(covy)
self.fix = _conv(fix)
self.meta = {} if meta is None else meta
def _sd2wt(self, sd):
""" Convert standard deviation to weights.
"""
return 1./np.power(sd, 2)
def _cov2wt(self, cov):
""" Convert covariance matrix(-ices) to weights.
"""
from scipy.linalg import inv
if len(cov.shape) == 2:
return inv(cov)
else:
weights = np.zeros(cov.shape, float)
for i in range(cov.shape[-1]): # n
weights[:,:,i] = inv(cov[:,:,i])
return weights
def __getattr__(self, attr):
if attr not in ('wd', 'we'):
if attr != "meta" and attr in self.meta:
return self.meta[attr]
else:
raise AttributeError(f"'{attr}' not in metadata")
else:
lookup_tbl = {('wd', 'sx'): (self._sd2wt, self.sx),
('wd', 'covx'): (self._cov2wt, self.covx),
('we', 'sy'): (self._sd2wt, self.sy),
('we', 'covy'): (self._cov2wt, self.covy)}
func, arg = lookup_tbl[(attr, self._ga_flags[attr])]
if arg is not None:
return func(*(arg,))
else:
return None
class Model:
"""
The Model class stores information about the function you wish to fit.
It stores the function itself, at the least, and optionally stores
functions which compute the Jacobians used during fitting. Also, one
can provide a function that will provide reasonable starting values
for the fit parameters possibly given the set of data.
Parameters
----------
fcn : function
fcn(beta, x) --> y
fjacb : function
Jacobian of fcn wrt the fit parameters beta.
fjacb(beta, x) --> @f_i(x,B)/@B_j
fjacd : function
Jacobian of fcn wrt the (possibly multidimensional) input
variable.
fjacd(beta, x) --> @f_i(x,B)/@x_j
extra_args : tuple, optional
If specified, `extra_args` should be a tuple of extra
arguments to pass to `fcn`, `fjacb`, and `fjacd`. Each will be called
by `apply(fcn, (beta, x) + extra_args)`
estimate : array_like of rank-1
Provides estimates of the fit parameters from the data
estimate(data) --> estbeta
implicit : boolean
If TRUE, specifies that the model
is implicit; i.e `fcn(beta, x)` ~= 0 and there is no y data to fit
against
meta : dict, optional
freeform dictionary of metadata for the model
Notes
-----
Note that the `fcn`, `fjacb`, and `fjacd` operate on NumPy arrays and
return a NumPy array. The `estimate` object takes an instance of the
Data class.
Here are the rules for the shapes of the argument and return
arrays of the callback functions:
`x`
if the input data is single-dimensional, then `x` is rank-1
array; i.e., ``x = array([1, 2, 3, ...]); x.shape = (n,)``
If the input data is multi-dimensional, then `x` is a rank-2 array;
i.e., ``x = array([[1, 2, ...], [2, 4, ...]]); x.shape = (m, n)``.
In all cases, it has the same shape as the input data array passed to
`~scipy.odr.odr`. `m` is the dimensionality of the input data,
`n` is the number of observations.
`y`
if the response variable is single-dimensional, then `y` is a
rank-1 array, i.e., ``y = array([2, 4, ...]); y.shape = (n,)``.
If the response variable is multi-dimensional, then `y` is a rank-2
array, i.e., ``y = array([[2, 4, ...], [3, 6, ...]]); y.shape =
(q, n)`` where `q` is the dimensionality of the response variable.
`beta`
rank-1 array of length `p` where `p` is the number of parameters;
i.e. ``beta = array([B_1, B_2, ..., B_p])``
`fjacb`
if the response variable is multi-dimensional, then the
return array's shape is ``(q, p, n)`` such that ``fjacb(x,beta)[l,k,i] =
d f_l(X,B)/d B_k`` evaluated at the ith data point. If ``q == 1``, then
the return array is only rank-2 and with shape ``(p, n)``.
`fjacd`
as with fjacb, only the return array's shape is ``(q, m, n)``
such that ``fjacd(x,beta)[l,j,i] = d f_l(X,B)/d X_j`` at the ith data
point. If ``q == 1``, then the return array's shape is ``(m, n)``. If
``m == 1``, the shape is (q, n). If `m == q == 1`, the shape is ``(n,)``.
"""
def __init__(self, fcn, fjacb=None, fjacd=None,
extra_args=None, estimate=None, implicit=0, meta=None):
self.fcn = fcn
self.fjacb = fjacb
self.fjacd = fjacd
if extra_args is not None:
extra_args = tuple(extra_args)
self.extra_args = extra_args
self.estimate = estimate
self.implicit = implicit
self.meta = meta if meta is not None else {}
def set_meta(self, **kwds):
""" Update the metadata dictionary with the keywords and data provided
here.
Examples
--------
set_meta(name="Exponential", equation="y = a exp(b x) + c")
"""
self.meta.update(kwds)
def __getattr__(self, attr):
""" Dispatch attribute access to the metadata.
"""
if attr != "meta" and attr in self.meta:
return self.meta[attr]
else:
raise AttributeError(f"'{attr}' not in metadata")
class Output:
"""
The Output class stores the output of an ODR run.
Attributes
----------
beta : ndarray
Estimated parameter values, of shape (q,).
sd_beta : ndarray
Standard deviations of the estimated parameters, of shape (p,).
cov_beta : ndarray
Covariance matrix of the estimated parameters, of shape (p,p).
Note that this `cov_beta` is not scaled by the residual variance
`res_var`, whereas `sd_beta` is. This means
``np.sqrt(np.diag(output.cov_beta * output.res_var))`` is the same
result as `output.sd_beta`.
delta : ndarray, optional
Array of estimated errors in input variables, of same shape as `x`.
eps : ndarray, optional
Array of estimated errors in response variables, of same shape as `y`.
xplus : ndarray, optional
Array of ``x + delta``.
y : ndarray, optional
Array ``y = fcn(x + delta)``.
res_var : float, optional
Residual variance.
sum_square : float, optional
Sum of squares error.
sum_square_delta : float, optional
Sum of squares of delta error.
sum_square_eps : float, optional
Sum of squares of eps error.
inv_condnum : float, optional
Inverse condition number (cf. ODRPACK UG p. 77).
rel_error : float, optional
Relative error in function values computed within fcn.
work : ndarray, optional
Final work array.
work_ind : dict, optional
Indices into work for drawing out values (cf. ODRPACK UG p. 83).
info : int, optional
Reason for returning, as output by ODRPACK (cf. ODRPACK UG p. 38).
stopreason : list of str, optional
`info` interpreted into English.
Notes
-----
Takes one argument for initialization, the return value from the
function `~scipy.odr.odr`. The attributes listed as "optional" above are
only present if `~scipy.odr.odr` was run with ``full_output=1``.
"""
def __init__(self, output):
self.beta = output[0]
self.sd_beta = output[1]
self.cov_beta = output[2]
if len(output) == 4:
# full output
self.__dict__.update(output[3])
self.stopreason = _report_error(self.info)
def pprint(self):
""" Pretty-print important results.
"""
print('Beta:', self.beta)
print('Beta Std Error:', self.sd_beta)
print('Beta Covariance:', self.cov_beta)
if hasattr(self, 'info'):
print('Residual Variance:',self.res_var)
print('Inverse Condition #:', self.inv_condnum)
print('Reason(s) for Halting:')
for r in self.stopreason:
print(f' {r}')
class ODR:
"""
The ODR class gathers all information and coordinates the running of the
main fitting routine.
Members of instances of the ODR class have the same names as the arguments
to the initialization routine.
Parameters
----------
data : Data class instance
instance of the Data class
model : Model class instance
instance of the Model class
Other Parameters
----------------
beta0 : array_like of rank-1
a rank-1 sequence of initial parameter values. Optional if
model provides an "estimate" function to estimate these values.
delta0 : array_like of floats of rank-1, optional
a (double-precision) float array to hold the initial values of
the errors in the input variables. Must be same shape as data.x
ifixb : array_like of ints of rank-1, optional
sequence of integers with the same length as beta0 that determines
which parameters are held fixed. A value of 0 fixes the parameter,
a value > 0 makes the parameter free.
ifixx : array_like of ints with same shape as data.x, optional
an array of integers with the same shape as data.x that determines
which input observations are treated as fixed. One can use a sequence
of length m (the dimensionality of the input observations) to fix some
dimensions for all observations. A value of 0 fixes the observation,
a value > 0 makes it free.
job : int, optional
an integer telling ODRPACK what tasks to perform. See p. 31 of the
ODRPACK User's Guide if you absolutely must set the value here. Use the
method set_job post-initialization for a more readable interface.
iprint : int, optional
an integer telling ODRPACK what to print. See pp. 33-34 of the
ODRPACK User's Guide if you absolutely must set the value here. Use the
method set_iprint post-initialization for a more readable interface.
errfile : str, optional
string with the filename to print ODRPACK errors to. If the file already
exists, an error will be thrown. The `overwrite` argument can be used to
prevent this. *Do Not Open This File Yourself!*
rptfile : str, optional
string with the filename to print ODRPACK summaries to. If the file
already exists, an error will be thrown. The `overwrite` argument can be
used to prevent this. *Do Not Open This File Yourself!*
ndigit : int, optional
integer specifying the number of reliable digits in the computation
of the function.
taufac : float, optional
float specifying the initial trust region. The default value is 1.
The initial trust region is equal to taufac times the length of the
first computed Gauss-Newton step. taufac must be less than 1.
sstol : float, optional
float specifying the tolerance for convergence based on the relative
change in the sum-of-squares. The default value is eps**(1/2) where eps
is the smallest value such that 1 + eps > 1 for double precision
computation on the machine. sstol must be less than 1.
partol : float, optional
float specifying the tolerance for convergence based on the relative
change in the estimated parameters. The default value is eps**(2/3) for
explicit models and ``eps**(1/3)`` for implicit models. partol must be less
than 1.
maxit : int, optional
integer specifying the maximum number of iterations to perform. For
first runs, maxit is the total number of iterations performed and
defaults to 50. For restarts, maxit is the number of additional
iterations to perform and defaults to 10.
stpb : array_like, optional
sequence (``len(stpb) == len(beta0)``) of relative step sizes to compute
finite difference derivatives wrt the parameters.
stpd : optional
array (``stpd.shape == data.x.shape`` or ``stpd.shape == (m,)``) of relative
step sizes to compute finite difference derivatives wrt the input
variable errors. If stpd is a rank-1 array with length m (the
dimensionality of the input variable), then the values are broadcast to
all observations.
sclb : array_like, optional
sequence (``len(stpb) == len(beta0)``) of scaling factors for the
parameters. The purpose of these scaling factors are to scale all of
the parameters to around unity. Normally appropriate scaling factors
are computed if this argument is not specified. Specify them yourself
if the automatic procedure goes awry.
scld : array_like, optional
array (scld.shape == data.x.shape or scld.shape == (m,)) of scaling
factors for the *errors* in the input variables. Again, these factors
are automatically computed if you do not provide them. If scld.shape ==
(m,), then the scaling factors are broadcast to all observations.
work : ndarray, optional
array to hold the double-valued working data for ODRPACK. When
restarting, takes the value of self.output.work.
iwork : ndarray, optional
array to hold the integer-valued working data for ODRPACK. When
restarting, takes the value of self.output.iwork.
overwrite : bool, optional
If it is True, output files defined by `errfile` and `rptfile` are
overwritten. The default is False.
Attributes
----------
data : Data
The data for this fit
model : Model
The model used in fit
output : Output
An instance if the Output class containing all of the returned
data from an invocation of ODR.run() or ODR.restart()
"""
def __init__(self, data, model, beta0=None, delta0=None, ifixb=None,
ifixx=None, job=None, iprint=None, errfile=None, rptfile=None,
ndigit=None, taufac=None, sstol=None, partol=None, maxit=None,
stpb=None, stpd=None, sclb=None, scld=None, work=None, iwork=None,
overwrite=False):
self.data = data
self.model = model
if beta0 is None:
if self.model.estimate is not None:
self.beta0 = _conv(self.model.estimate(self.data))
else:
raise ValueError(
"must specify beta0 or provide an estimator with the model"
)
else:
self.beta0 = _conv(beta0)
if ifixx is None and data.fix is not None:
ifixx = data.fix
if overwrite:
# remove output files for overwriting.
if rptfile is not None and os.path.exists(rptfile):
os.remove(rptfile)
if errfile is not None and os.path.exists(errfile):
os.remove(errfile)
self.delta0 = _conv(delta0)
# These really are 32-bit integers in FORTRAN (gfortran), even on 64-bit
# platforms.
# XXX: some other FORTRAN compilers may not agree.
self.ifixx = _conv(ifixx, dtype=np.int32)
self.ifixb = _conv(ifixb, dtype=np.int32)
self.job = job
self.iprint = iprint
self.errfile = errfile
self.rptfile = rptfile
self.ndigit = ndigit
self.taufac = taufac
self.sstol = sstol
self.partol = partol
self.maxit = maxit
self.stpb = _conv(stpb)
self.stpd = _conv(stpd)
self.sclb = _conv(sclb)
self.scld = _conv(scld)
self.work = _conv(work)
self.iwork = _conv(iwork)
self.output = None
self._check()
def _check(self):
""" Check the inputs for consistency, but don't bother checking things
that the builtin function odr will check.
"""
x_s = list(self.data.x.shape)
if isinstance(self.data.y, np.ndarray):
y_s = list(self.data.y.shape)
if self.model.implicit:
raise OdrError("an implicit model cannot use response data")
else:
# implicit model with q == self.data.y
y_s = [self.data.y, x_s[-1]]
if not self.model.implicit:
raise OdrError("an explicit model needs response data")
self.set_job(fit_type=1)
if x_s[-1] != y_s[-1]:
raise OdrError("number of observations do not match")
n = x_s[-1]
if len(x_s) == 2:
m = x_s[0]
else:
m = 1
if len(y_s) == 2:
q = y_s[0]
else:
q = 1
p = len(self.beta0)
# permissible output array shapes
fcn_perms = [(q, n)]
fjacd_perms = [(q, m, n)]
fjacb_perms = [(q, p, n)]
if q == 1:
fcn_perms.append((n,))
fjacd_perms.append((m, n))
fjacb_perms.append((p, n))
if m == 1:
fjacd_perms.append((q, n))
if p == 1:
fjacb_perms.append((q, n))
if m == q == 1:
fjacd_perms.append((n,))
if p == q == 1:
fjacb_perms.append((n,))
# try evaluating the supplied functions to make sure they provide
# sensible outputs
arglist = (self.beta0, self.data.x)
if self.model.extra_args is not None:
arglist = arglist + self.model.extra_args
res = self.model.fcn(*arglist)
if res.shape not in fcn_perms:
print(res.shape)
print(fcn_perms)
raise OdrError(f"fcn does not output {y_s}-shaped array")
if self.model.fjacd is not None:
res = self.model.fjacd(*arglist)
if res.shape not in fjacd_perms:
raise OdrError(
f"fjacd does not output {repr((q, m, n))}-shaped array")
if self.model.fjacb is not None:
res = self.model.fjacb(*arglist)
if res.shape not in fjacb_perms:
raise OdrError(
f"fjacb does not output {repr((q, p, n))}-shaped array")
# check shape of delta0
if self.delta0 is not None and self.delta0.shape != self.data.x.shape:
raise OdrError(
f"delta0 is not a {repr(self.data.x.shape)}-shaped array")
if self.data.x.size == 0:
warn("Empty data detected for ODR instance. "
"Do not expect any fitting to occur",
OdrWarning, stacklevel=3)
def _gen_work(self):
""" Generate a suitable work array if one does not already exist.
"""
n = self.data.x.shape[-1]
p = self.beta0.shape[0]
if len(self.data.x.shape) == 2:
m = self.data.x.shape[0]
else:
m = 1
if self.model.implicit:
q = self.data.y
elif len(self.data.y.shape) == 2:
q = self.data.y.shape[0]
else:
q = 1
if self.data.we is None:
ldwe = ld2we = 1
elif len(self.data.we.shape) == 3:
ld2we, ldwe = self.data.we.shape[1:]
else:
we = self.data.we
ldwe = 1
ld2we = 1
if we.ndim == 1 and q == 1:
ldwe = n
elif we.ndim == 2:
if we.shape == (q, q):
ld2we = q
elif we.shape == (q, n):
ldwe = n
if self.job % 10 < 2:
# ODR not OLS
lwork = (18 + 11*p + p*p + m + m*m + 4*n*q + 6*n*m + 2*n*q*p +
2*n*q*m + q*q + 5*q + q*(p+m) + ldwe*ld2we*q)
else:
# OLS not ODR
lwork = (18 + 11*p + p*p + m + m*m + 4*n*q + 2*n*m + 2*n*q*p +
5*q + q*(p+m) + ldwe*ld2we*q)
if isinstance(self.work, np.ndarray) and self.work.shape == (lwork,)\
and self.work.dtype.str.endswith('f8'):
# the existing array is fine
return
else:
self.work = np.zeros((lwork,), float)
def set_job(self, fit_type=None, deriv=None, var_calc=None,
del_init=None, restart=None):
"""
Sets the "job" parameter is a hopefully comprehensible way.
If an argument is not specified, then the value is left as is. The
default value from class initialization is for all of these options set
to 0.
Parameters
----------
fit_type : {0, 1, 2} int
0 -> explicit ODR
1 -> implicit ODR
2 -> ordinary least-squares
deriv : {0, 1, 2, 3} int
0 -> forward finite differences
1 -> central finite differences
2 -> user-supplied derivatives (Jacobians) with results
checked by ODRPACK
3 -> user-supplied derivatives, no checking
var_calc : {0, 1, 2} int
0 -> calculate asymptotic covariance matrix and fit
parameter uncertainties (V_B, s_B) using derivatives
recomputed at the final solution
1 -> calculate V_B and s_B using derivatives from last iteration
2 -> do not calculate V_B and s_B
del_init : {0, 1} int
0 -> initial input variable offsets set to 0
1 -> initial offsets provided by user in variable "work"
restart : {0, 1} int
0 -> fit is not a restart
1 -> fit is a restart
Notes
-----
The permissible values are different from those given on pg. 31 of the
ODRPACK User's Guide only in that one cannot specify numbers greater than
the last value for each variable.
If one does not supply functions to compute the Jacobians, the fitting
procedure will change deriv to 0, finite differences, as a default. To
initialize the input variable offsets by yourself, set del_init to 1 and
put the offsets into the "work" variable correctly.
"""
if self.job is None:
job_l = [0, 0, 0, 0, 0]
else:
job_l = [self.job // 10000 % 10,
self.job // 1000 % 10,
self.job // 100 % 10,
self.job // 10 % 10,
self.job % 10]
if fit_type in (0, 1, 2):
job_l[4] = fit_type
if deriv in (0, 1, 2, 3):
job_l[3] = deriv
if var_calc in (0, 1, 2):
job_l[2] = var_calc
if del_init in (0, 1):
job_l[1] = del_init
if restart in (0, 1):
job_l[0] = restart
self.job = (job_l[0]*10000 + job_l[1]*1000 +
job_l[2]*100 + job_l[3]*10 + job_l[4])
def set_iprint(self, init=None, so_init=None,
iter=None, so_iter=None, iter_step=None, final=None, so_final=None):
""" Set the iprint parameter for the printing of computation reports.
If any of the arguments are specified here, then they are set in the
iprint member. If iprint is not set manually or with this method, then
ODRPACK defaults to no printing. If no filename is specified with the
member rptfile, then ODRPACK prints to stdout. One can tell ODRPACK to
print to stdout in addition to the specified filename by setting the
so_* arguments to this function, but one cannot specify to print to
stdout but not a file since one can do that by not specifying a rptfile
filename.
There are three reports: initialization, iteration, and final reports.
They are represented by the arguments init, iter, and final
respectively. The permissible values are 0, 1, and 2 representing "no
report", "short report", and "long report" respectively.
The argument iter_step (0 <= iter_step <= 9) specifies how often to make
the iteration report; the report will be made for every iter_step'th
iteration starting with iteration one. If iter_step == 0, then no
iteration report is made, regardless of the other arguments.
If the rptfile is None, then any so_* arguments supplied will raise an
exception.
"""
if self.iprint is None:
self.iprint = 0
ip = [self.iprint // 1000 % 10,
self.iprint // 100 % 10,
self.iprint // 10 % 10,
self.iprint % 10]
# make a list to convert iprint digits to/from argument inputs
# rptfile, stdout
ip2arg = [[0, 0], # none, none
[1, 0], # short, none
[2, 0], # long, none
[1, 1], # short, short
[2, 1], # long, short
[1, 2], # short, long
[2, 2]] # long, long
if (self.rptfile is None and
(so_init is not None or
so_iter is not None or
so_final is not None)):
raise OdrError(
"no rptfile specified, cannot output to stdout twice")
iprint_l = ip2arg[ip[0]] + ip2arg[ip[1]] + ip2arg[ip[3]]
if init is not None:
iprint_l[0] = init
if so_init is not None:
iprint_l[1] = so_init
if iter is not None:
iprint_l[2] = iter
if so_iter is not None:
iprint_l[3] = so_iter
if final is not None:
iprint_l[4] = final
if so_final is not None:
iprint_l[5] = so_final
if iter_step in range(10):
# 0..9
ip[2] = iter_step
ip[0] = ip2arg.index(iprint_l[0:2])
ip[1] = ip2arg.index(iprint_l[2:4])
ip[3] = ip2arg.index(iprint_l[4:6])
self.iprint = ip[0]*1000 + ip[1]*100 + ip[2]*10 + ip[3]
def run(self):
""" Run the fitting routine with all of the information given and with ``full_output=1``.
Returns
-------
output : Output instance
This object is also assigned to the attribute .output .
""" # noqa: E501
args = (self.model.fcn, self.beta0, self.data.y, self.data.x)
kwds = {'full_output': 1}
kwd_l = ['ifixx', 'ifixb', 'job', 'iprint', 'errfile', 'rptfile',
'ndigit', 'taufac', 'sstol', 'partol', 'maxit', 'stpb',
'stpd', 'sclb', 'scld', 'work', 'iwork']
if self.delta0 is not None and (self.job // 10000) % 10 == 0:
# delta0 provided and fit is not a restart
self._gen_work()
d0 = np.ravel(self.delta0)
self.work[:len(d0)] = d0
# set the kwds from other objects explicitly
if self.model.fjacb is not None:
kwds['fjacb'] = self.model.fjacb
if self.model.fjacd is not None:
kwds['fjacd'] = self.model.fjacd
if self.data.we is not None:
kwds['we'] = self.data.we
if self.data.wd is not None:
kwds['wd'] = self.data.wd
if self.model.extra_args is not None:
kwds['extra_args'] = self.model.extra_args
# implicitly set kwds from self's members
for attr in kwd_l:
obj = getattr(self, attr)
if obj is not None:
kwds[attr] = obj
with ODR_LOCK:
self.output = Output(odr(*args, **kwds))
return self.output
def restart(self, iter=None):
""" Restarts the run with iter more iterations.
Parameters
----------
iter : int, optional
ODRPACK's default for the number of new iterations is 10.
Returns
-------
output : Output instance
This object is also assigned to the attribute .output .
"""
if self.output is None:
raise OdrError("cannot restart: run() has not been called before")
self.set_job(restart=1)
self.work = self.output.work
self.iwork = self.output.iwork
self.maxit = iter
return self.run()
|
scipyREPO_NAMEscipyPATH_START.@scipy_extracted@scipy-main@scipy@odr@_odrpack.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "dr-guangtou/unagi",
"repo_path": "unagi_extracted/unagi-master/unagi/test/__init__.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
|
dr-guangtouREPO_NAMEunagiPATH_START.@unagi_extracted@unagi-master@unagi@test@__init__.py@.PATH_END.py
|
{
"filename": "check_massif_log.py",
"repo_name": "triton-inference-server/server",
"repo_path": "server_extracted/server-main/qa/common/check_massif_log.py",
"type": "Python"
}
|
#!/usr/bin/env python3
# Copyright 2020-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import re
import sys
from collections import defaultdict
def parse_massif_out(filename):
"""
Extract the allocation data from the massif output file, and compile
it into a dictionary.
"""
# Read the file
with open(filename, "r") as f:
contents = f.read()
snapshots = re.findall("snapshot=(.*?)heap_tree", contents, flags=re.DOTALL)
# Create snapshot dictionary
summary = defaultdict(list)
for snapshot in snapshots:
# Split the record and ignore first two columns
columns = snapshot.split()[2:]
# Put columns and values into dictionary
for col in columns:
k, v = col.split("=")
summary[k].append(int(v))
# Return dict
return summary
def is_unbounded_growth(summary, max_allowed_alloc, start_from_middle):
"""
Check whether the heap allocations is increasing
"""
totals = summary["mem_heap_B"]
if len(totals) < 5:
print("Error: Not enough snapshots")
return False
# Measure difference between mean and maximum memory usage
processed_snapshot = totals[len(totals) // 2 :] if start_from_middle else totals
processed_snapshot.sort(reverse=True)
# Remove 5% of the max value which will be treated as outlier
num_max_min_dropout = math.ceil(0.05 * len(processed_snapshot))
start = num_max_min_dropout
end = len(processed_snapshot) - num_max_min_dropout
mem_heap_avg = sum(processed_snapshot[start:end]) / len(
processed_snapshot[start:end]
)
mem_heap_max = max(processed_snapshot[start:end])
# Compute change in allocation rate
memory_allocation_delta_mb = (mem_heap_max - mem_heap_avg) / 1e6
print(
"Change in memory allocation: %f MB, MAX ALLOWED: %f MB"
% (memory_allocation_delta_mb, max_allowed_alloc)
)
return memory_allocation_delta_mb > max_allowed_alloc
if __name__ == "__main__":
# FIXME turn to proper argument handling
summary = parse_massif_out(sys.argv[1])
max_allowed_alloc = float(sys.argv[2])
start_from_middle = (len(sys.argv) == 4) and (sys.argv[3] == "--start-from-middle")
if is_unbounded_growth(summary, max_allowed_alloc, start_from_middle):
sys.exit(1)
else:
sys.exit(0)
|
triton-inference-serverREPO_NAMEserverPATH_START.@server_extracted@server-main@qa@common@check_massif_log.py@.PATH_END.py
|
{
"filename": "cooling_model_reader.py",
"repo_name": "cylammarco/WDPhotTools",
"repo_path": "WDPhotTools_extracted/WDPhotTools-main/src/WDPhotTools/cooling_model_reader.py",
"type": "Python"
}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Handling the formatting of different cooling models"""
import io
import glob
import os
import numpy as np
from scipy.interpolate import CloughTocher2DInterpolator
from scipy.interpolate import RBFInterpolator
class CoolingModelReader(object):
"""A reader object to handle the input of different cooling models"""
def __init__(self):
super(CoolingModelReader, self).__init__()
self.this_file = os.path.dirname(os.path.abspath(__file__))
self.model_list = {
"montreal_co_da_20": "Bedard et al. 2020 CO DA",
"montreal_co_db_20": "Bedard et al. 2020 CO DB",
"lpcode_he_da_07": "Panei et al. 2007 He DA",
"lpcode_he_da_09": "Althaus et al. 2009 He DA",
"lpcode_co_da_07": "Panei et al. 2007 CO DA",
"lpcode_co_da_10_z001": "Renedo et al. 2010 CO DA Z=0.01",
"lpcode_co_da_10_z0001": "Renedo et al. 2010 CO DA Z=0.001",
"lpcode_co_da_15_z00003": "Althaus et al. 2015 DA Z=0.00003",
"lpcode_co_da_15_z0001": "Althaus et al. 2015 DA Z=0.0001",
"lpcode_co_da_15_z0005": "Althaus et al. 2015 DA Z=0.0005",
"lpcode_co_db_17_z00005": "Althaus et al. 2017 DB Y=0.4",
"lpcode_co_db_17_z0001": "Althaus et al. 2017 DB Y=0.4",
"lpcode_co_db_17": "Camisassa et al. 2017 DB",
"lpcode_one_da_07": "Althaus et al. 2007 ONe DA",
"lpcode_one_da_19": "Camisassa et al. 2019 ONe DA",
"lpcode_one_db_19": "Camisassa et al. 2019 ONe DB",
"lpcode_da_22": "Althaus et al. 2013 He DA, "
+ "Camisassa et al. 2016 CO DA, Camisassa et al. 2019 ONe DA",
"lpcode_db_22": "Camisassa et al. 2017 CO DB, "
+ "Camisassa et al. 2019 ONe DB",
"basti_co_da_10": "Salaris et al. 2010 CO DA",
"basti_co_db_10": "Salaris et al. 2010 CO DB",
"basti_co_da_10_nps": "Salaris et al. 2010 CO DA, "
+ "no phase separation",
"basti_co_db_10_nps": "Salaris et al. 2010 CO DB, "
+ "no phase separation",
"mesa_one_da_18": "Lauffer et al. 2018 ONe DA",
"mesa_one_db_18": "Lauffer et al. 2018 ONe DB",
}
self.low_mass_cooling_model_list = [
"montreal_co_da_20",
"montreal_co_db_20",
"lpcode_he_da_07",
"lpcode_co_da_07",
"lpcode_he_da_09",
"lpcode_da_22",
None,
]
self.intermediate_mass_cooling_model_list = [
"montreal_co_da_20",
"montreal_co_db_20",
"lpcode_co_da_10_z001",
"lpcode_co_da_10_z0001",
"lpcode_co_da_15_z00003",
"lpcode_co_da_15_z0001",
"lpcode_co_da_15_z0005",
"lpcode_co_db_17_z0001",
"lpcode_co_db_17_z00005",
"lpcode_co_da_17_y04",
"lpcode_co_db_17",
"lpcode_da_22",
"lpcode_db_22",
"basti_co_da_10",
"basti_co_db_10",
"basti_co_da_10_nps",
"basti_co_db_10_nps",
None,
]
self.high_mass_cooling_model_list = [
"montreal_co_da_20",
"montreal_co_db_20",
"lpcode_one_da_07",
"lpcode_one_da_19",
"lpcode_one_db_19",
"lpcode_da_22",
"lpcode_db_22",
"basti_co_da_10",
"basti_co_db_10",
"basti_co_da_10_nps",
"basti_co_db_10_nps",
"mesa_one_da_18",
"mesa_one_db_18",
None,
]
# Default to montreal_co_da_20
self.cooling_models = {
"low_mass_cooling_model": "montreal_co_da_20",
"intermediate_mass_cooling_model": "montreal_co_da_20",
"high_mass_cooling_model": "montreal_co_da_20",
}
self.mass = None
self.age = None
self.luminosity = None
self.cooling_model_grid = None
self.cooling_interpolator = None
self.cooling_rate_interpolator = None
self.dLdt = None
def list_cooling_model(self, print_to_screen=True):
"""
Print the formatted list of available cooling models.
Parameters
----------
print_to_screen: bool (Default: True)
Set to True to print the list of cooling models to screen.
Returns
-------
model_list:
The names and references of the cooling models.
"""
if print_to_screen:
for i in self.model_list.items():
print(f"Model: {i[0]}, Reference: {i[1]}")
return self.model_list.items()
def list_cooling_parameters(self, model, print_to_screen=True):
"""
Print the formatted list of parameters available for the specified
cooling models.
Parameters
----------
model: str
Name of the cooling model as in the `model_list`.
print_to_screen: bool (Default: True)
Set to True to print the cooling model parameters to screen.
Returns
-------
mass:
WD mass available in the specified model.
column_names:
Available parameters in the specified model.
column_units:
Unites of the parameters in the specified model.
"""
mass, _, column_names, column_units = self.get_cooling_model(model)
if print_to_screen:
print("Available WD mass: {mass}")
for i, j in zip(column_names.items(), column_units.items()):
print(f"Parameter: {i[1]}, Column Name: {i[0]}, Unit: {j[1]}")
return mass, column_names.items(), column_units.items()
def get_cooling_model(self, model, mass_range="all"):
"""
Choose the specified cooling model for the chosen mass range.
Parameters
----------
model: str
Name of the cooling model as in the `model_list`.
mass_range: str (Default: 'all')
The mass range in which the cooling model should return.
The ranges are defined as <0.5, 0.5-1.0 and >1.0 solar masses.
"""
if model in ["montreal_co_da_20", "montreal_co_db_20"]:
(
mass,
cooling_model,
column_names,
column_units,
) = self._bedard20_formatter(model, mass_range)
elif model in ["lpcode_he_da_07", "lpcode_co_da_07"]:
(
mass,
cooling_model,
column_names,
column_units,
) = self._panei07_formatter(model)
elif model == "lpcode_he_da_09":
(
mass,
cooling_model,
column_names,
column_units,
) = self._althaus09_formatter(mass_range)
elif model in ["lpcode_co_db_17_z00005", "lpcode_co_db_17_z0001"]:
(
mass,
cooling_model,
column_names,
column_units,
) = self._althaus17_formatter(model, mass_range)
elif model in ["lpcode_co_da_10_z001", "lpcode_co_da_10_z0001"]:
(
mass,
cooling_model,
column_names,
column_units,
) = self._renedo10_formatter(model)
elif model in [
"lpcode_co_da_15_z00003",
"lpcode_co_da_15_z0001",
"lpcode_co_da_15_z0005",
]:
(
mass,
cooling_model,
column_names,
column_units,
) = self._althaus15_formatter(model)
elif model == "lpcode_co_db_17":
(
mass,
cooling_model,
column_names,
column_units,
) = self._camisassa17_formatter()
elif model in [
"basti_co_da_10",
"basti_co_db_10",
"basti_co_da_10_nps",
"basti_co_db_10_nps",
]:
(
mass,
cooling_model,
column_names,
column_units,
) = self._salaris10_formatter(model, mass_range)
elif model == "lpcode_one_da_07":
(
mass,
cooling_model,
column_names,
column_units,
) = self._althaus07_formatter()
elif model in ["lpcode_one_da_19", "lpcode_one_db_19"]:
(
mass,
cooling_model,
column_names,
column_units,
) = self._camisassa19_formatter(model)
elif model in ["mesa_one_da_18", "mesa_one_db_18"]:
(
mass,
cooling_model,
column_names,
column_units,
) = self._lauffer18_formatter(model)
elif model == "lpcode_da_22":
(
mass,
cooling_model,
column_names,
column_units,
) = self._lpcode22_da_formatter()
elif model == "lpcode_db_22":
(
mass,
cooling_model,
column_names,
column_units,
) = self._lpcode22_db_formatter()
elif model is None:
mass = np.array(())
cooling_model = np.array(())
column_names = {}
column_units = {}
else:
raise ValueError("Invalid model name.")
return mass, cooling_model, column_names, column_units
def _althaus07_formatter(self):
"""
A formatter to load the Althaus et al. 2007 WD cooling model
"""
filelist = glob.glob(
os.path.join(self.this_file, "wd_cooling/althaus07/*.dat")
)
# Prepare the array column dtype
column_key = np.array(
(
"lum",
"logg",
"B-V",
"V-R",
"V-K",
"R-I",
"J-H",
"H-K",
"V-I",
"U-V",
"BC",
"dmag_v",
"age",
)
)
column_key_formatted = np.array(
(
"Luminosity",
"log(g)",
r"$B-V$",
r"$V-R$",
r"$V-K$",
r"$R-I$",
r"$J-H$",
r"$H-K$",
r"$V-I$",
r"$U-V$",
"$Bolometric Correction$",
r"$V$",
"$log(Age)$",
)
)
column_key_unit = np.array(
[r"L$_{\odot}$", "(cgs)"] + ["mag"] * 10 + ["(yr)"]
)
column_type = np.array(([np.float64] * len(column_key)))
dtype = [(i, j) for i, j in zip(column_key, column_type)]
column_names = {}
column_units = {}
for i, j, k in zip(column_key, column_key_formatted, column_key_unit):
column_names[i] = j
column_units[i] = k
# Get the mass from the file name
mass = (
np.array([i.split("_")[-1][:3] for i in filelist]).astype(
np.float64
)
/ 100000.0
)
# Create an empty array for holding the cooling models
cooling_model = np.array(([""] * len(mass)), dtype="object")
for i, filepath in enumerate(filelist):
cooling_model[i] = np.loadtxt(filepath, skiprows=1, dtype=dtype)
# Convert the luminosity into erg/s
cooling_model[i]["lum"] = (
10.0 ** cooling_model[i]["lum"] * 3.826e33
)
# Convert the age to yr
cooling_model[i]["age"] = 10.0 ** cooling_model[i]["age"]
return mass, cooling_model, column_names, column_units
def _althaus09_formatter(self, mass_range="all"):
"""
A formatter to load the Althaus et al. 2009 WD cooling model
Parameters
----------
mass_range: str (Default: 'all')
The mass range in which the cooling model should return.
The ranges are defined as <0.5, 0.5-1.0 and >1.0 solar masses.
"""
filelist = glob.glob(
os.path.join(self.this_file, "wd_cooling/althaus09/z.*")
)
# Prepare the array column dtype
column_key = np.array(
(
"Teff",
"logg",
"lum",
"age",
"BC",
"M_V",
"U",
"B",
"V",
"R",
"I",
"J",
"H",
"K",
"L",
"U-B",
"B-V",
"V-R",
"V-K",
"V-I",
"R-I",
"J-H",
"H-K",
"K-L",
)
)
column_key_formatted = np.array(
(
r"T$_{\mathrm{eff}}$",
"log(g)",
"Luminosity",
"$log(Age)$",
"$Bolometric Correction$",
r"$V$",
r"$U$",
r"$B$",
r"$V$",
r"$R$",
r"$I$",
r"$J$",
r"$H$",
r"$K$",
r"$L$",
r"$U-B$",
r"$B-V$",
r"$V-R$",
r"$V-K$",
r"$V-I$",
r"$R-I$",
r"$J-H$",
r"$H-K$",
r"$K-L$",
)
)
column_key_unit = np.array(
["K", r"(cm/s$^2$)", r"L$_{\odot}$", "(yr)"] + ["mag"] * 20
)
column_type = np.array(([np.float64] * len(column_key)))
dtype = [(i, j) for i, j in zip(column_key, column_type)]
column_names = {}
column_units = {}
for i, j, k in zip(column_key, column_key_formatted, column_key_unit):
column_names[i] = j
column_units[i] = k
# Get the mass from the file name
mass = (
np.array([i.split(".")[-2] for i in filelist]).astype(np.float64)
/ 100000.0
)
if mass_range == "all":
pass
elif mass_range == "low":
mask_low = mass < 0.5
mass = mass[mask_low]
filelist = np.array(filelist)[mask_low]
else:
raise ValueError(
"Unknown mass range requested. Please choose "
"from 'all' or 'low' for althaus09 models."
)
# Create an empty array for holding the cooling models
cooling_model = np.array(([""] * len(mass)), dtype="object")
for i, filepath in enumerate(filelist):
cooling_model[i] = np.loadtxt(filepath, dtype=dtype)
# Convert the luminosity into erg/s
cooling_model[i]["lum"] = (
10.0 ** cooling_model[i]["lum"] * 3.826e33
)
# Convert the age to yr
cooling_model[i]["age"] *= 1e9
return mass, cooling_model, column_names, column_units
def _althaus15_formatter(self, model):
"""
A formatter to load the Althaus et al. 2015 WD cooling model
Parameters
----------
model: str
Name of the cooling model as in the `model_list`.
"""
# Z=0.00003 models
if model == "lpcode_co_da_15_z00003":
filelist = glob.glob(
os.path.join(
self.this_file, "wd_cooling/althaus15/Z=3d-5/*.trk"
)
)
# Z=0.0001 models
if model == "lpcode_co_da_15_z0001":
filelist = glob.glob(
os.path.join(
self.this_file, "wd_cooling/althaus15/Z=1d-4/*.trk"
)
)
# Z=0.0005 models
if model == "lpcode_co_da_15_z0005":
filelist = glob.glob(
os.path.join(
self.this_file, "wd_cooling/althaus15/Z=5d-4/*.trk"
)
)
# Prepare the array column dtype
column_key = np.array(
(
"lum",
"Teff",
"Tc",
"Roc",
"Hc",
"Hec",
"Con_s",
"Con_c",
"age",
"mass",
"mdot",
"model_no",
"Lpp",
"Lcno",
"LHe",
"LCC",
"dSdt",
"Lnu",
"MHtot",
"HeBuf",
"mass_Hfc",
"mass_Hefc",
"logg",
"Rsun",
"LH",
"ps",
)
)
column_key_formatted = np.array(
(
"Luminosity",
r"log(T$_{\mathrm{eff}})$",
r"T$_{\mathrm{c}}$",
r"$\rho_c$",
r"X$_c$",
r"Y$_c$",
"Outer Convective Zone",
"Inner Convective Zone",
"log(Age)",
"Mass",
"log(Rate of Change of Mass)",
"Model Number",
r"log($L_{PP}$)",
r"log($L_{CNO}$)",
r"log($L_{He}$)",
r"log($L_{CC}$)",
r"$\int\frac{\D{S}}{\D{t}}$",
r"log($L_{\nu}$)",
r"log(M$_{H, tot}$)",
r"log(Mass$_{\mathrm{He Buffer}}$)",
r"log(Mass$_{\mathrm{H-free Core}}$)",
r"log(Mass$_{\mathrm{He-free Core}}$)",
"log(g)",
r"Radius",
"Latent Heat",
"Phase Separation",
)
)
column_key_unit = np.array(
[
r"L$_{\odot}$",
"(K)",
r"($10^6$ K)",
r"(g/cm$^3$)",
"",
"",
"%",
"%",
"($10^6$ K)",
r"M$_\odot$",
r"(M$_\odot$ / yr)",
"",
]
+ [r"L$_{\odot}$"] * 4
+ ["", r"L$_{\odot}$"]
+ [r"M$_{\odot}$"] * 4
+ [r"(cm/s$2^$)", r"R$_{\odot}$"]
+ ["erg/s"] * 2
)
column_type = np.array(([np.float64] * len(column_key)))
dtype = [(i, j) for i, j in zip(column_key, column_type)]
column_names = {}
column_units = {}
for i, j, k in zip(column_key, column_key_formatted, column_key_unit):
column_names[i] = j
column_units[i] = k
# Get the mass from the file name
mass = (
np.array([i.split(".")[-2][-5:] for i in filelist]).astype(
np.float64
)
/ 100000.0
)
# Create an empty array for holding the cooling models
cooling_model = np.array(([""] * len(mass)), dtype="object")
for i, filepath in enumerate(filelist):
cooling_model[i] = np.loadtxt(filepath, skiprows=2, dtype=dtype)
# Convert the luminosity into erg/s
cooling_model[i]["lum"] = (
10.0 ** cooling_model[i]["lum"] * 3.826e33
)
# Convert the age to yr
cooling_model[i]["age"] = 10.0 ** cooling_model[i]["age"] * 1e6
cooling_model[i]["age"] -= min(cooling_model[i]["age"])
return mass, cooling_model, column_names, column_units
def _althaus17_formatter(self, model, mass_range="all"):
"""
A formatter to load the Althaus et al. 2017 WD cooling model
Parameters
----------
model: str
Name of the cooling model as in the `model_list`.
mass_range: str (Default: 'all')
The mass range in which the cooling model should return.
The ranges are defined as <0.5, 0.5-1.0 and >1.0 solar masses.
"""
# Y=0.4, Z=0.001 models
if model == "lpcode_co_db_17_z00005":
filelist = glob.glob(
os.path.join(self.this_file, "wd_cooling/althaus17/*d4.trk")
)
# Y=0.4, Z=0.0005 models
if model == "lpcode_co_db_17_z0001":
filelist = glob.glob(
os.path.join(self.this_file, "wd_cooling/althaus17/*d3.trk")
)
# Prepare the array column dtype
column_key = np.array(
(
"lum",
"Teff",
"Tc",
"Roc",
"Hc",
"Hec",
"Con_s",
"Con_c",
"age",
"mass",
"mdot",
"model_no",
"Lpp",
"Lcno",
"LHe",
"LCC",
"dSdt",
"Lnu",
"MHtot",
"HeBuf",
"mass_Hfc",
"mass_Hefc",
"logg",
"Rsun",
"LH",
"ps",
)
)
column_key_formatted = np.array(
(
"Luminosity",
r"log(T$_{\mathrm{eff}})$",
r"T$_{\mathrm{c}}$",
r"$\rho_c$",
r"X$_c$",
r"Y$_c$",
"Outer Convective Zone",
"Inner Convective Zone",
"log(Age)",
"Mass",
"log(Rate of Change of Mass)",
"Model Number",
r"log($L_{PP}$)",
r"log($L_{CNO}$)",
r"log($L_{He}$)",
r"log($L_{CC}$)",
r"$\int\frac{\D{S}}{\D{t}}$",
r"log($L_{\nu}$)",
r"log(M$_{H, tot}$)",
r"Mass$_{\mathrm{He Buffer}}$",
r"Mass$_{\mathrm{H-free Core}}$",
r"Mass$_{\mathrm{He-free Core}}$",
"log(g)",
"Radius",
"Latent Heat",
"Phase Separation",
)
)
column_key_unit = np.array(
[
r"L$_{\odot}$",
"(K)",
r"($10^6$ K)",
r"(g/cm$^3$)",
"",
"",
"%",
"%",
"($10^6$ K)",
r"M$_\odot$",
r"(M$_\odot$ / yr)",
"",
]
+ [r"L$_{\odot}$"] * 4
+ ["", r"L$_{\odot}$"]
+ [r"M$_{\odot}$"] * 4
+ [r"(cm/s$^2$)", r"R$_{\odot}$"]
+ ["erg/s"] * 2
)
column_type = np.array(([np.float64] * len(column_key)))
dtype = [(i, j) for i, j in zip(column_key, column_type)]
column_names = {}
column_units = {}
for i, j, k in zip(column_key, column_key_formatted, column_key_unit):
column_names[i] = j
column_units[i] = k
# Get the mass from the file name
mass = np.array(
[i.split(os.sep)[-1].split("_")[0] for i in filelist]
).astype(np.float64)
wd_mass = np.zeros_like(mass)
# Create an empty array for holding the cooling models
cooling_model = np.array(([""] * len(mass)), dtype="object")
for i, filepath in enumerate(filelist):
cooling_model[i] = np.loadtxt(filepath, skiprows=1, dtype=dtype)
# Convert the luminosity into erg/s
cooling_model[i]["lum"] = (
10.0 ** cooling_model[i]["lum"] * 3.826e33
)
# Convert the age to yr
wd_mass[i] = cooling_model[i]["mass"][0]
cooling_model[i]["age"] = 10.0 ** cooling_model[i]["age"] * 1e6
cooling_model[i]["age"] -= min(cooling_model[i]["age"])
if mass_range == "all":
pass
elif mass_range == "low":
mask_low = mass < 0.5
wd_mass = wd_mass[mask_low]
cooling_model = cooling_model[mask_low]
elif mass_range == "intermediate":
mask_intermediate = (mass >= 0.5) & (mass <= 1.0)
wd_mass = wd_mass[mask_intermediate]
cooling_model = cooling_model[mask_intermediate]
else:
raise ValueError(
"Unknown mass range requested. Please choose from"
"'all', 'low' or 'intermediate' for althaus17 models."
)
return wd_mass, cooling_model, column_names, column_units
def _bedard20_formatter(self, model, mass_range="all"):
"""
A formatter to load the Bedard et al. 2020 WD cooling model from
http://www.astro.umontreal.ca/~bergeron/CoolingModels/
The thick and thin models are for DA and DB WD, respectively.
Parameters
----------
model: str
Name of the cooling model as in the `model_list`.
mass_range: str (Default: 'all')
The mass range in which the cooling model should return.
The ranges are defined as <0.5, 0.5-1.0 and >1.0 solar masses.
"""
# DA models
if model == "montreal_co_da_20":
filelist = glob.glob(
os.path.join(self.this_file, "wd_cooling/bedard20/*thick*")
)
# DB models
if model == "montreal_co_db_20":
filelist = glob.glob(
os.path.join(self.this_file, "wd_cooling/bedard20/*thin*")
)
# Prepare the array column dtype
column_key = np.array(
(
"step",
"Teff",
"logg",
"r",
"age",
"lum",
"logTc",
"logPc",
"logrhoc",
"MxM",
"logqx",
"lumnu",
"logH",
"logHe",
"logC",
"logO",
)
)
column_key_formatted = np.array(
(
"Step",
r"T$_{\mathrm{eff}}$",
"log(g)",
"Radius",
"Age",
"Luminosity",
r"log(T$_{\mathrm{c}}$)",
r"log(P$_{\mathrm{c}}$)",
r"log($\rho_c$)",
"Mass Fraction of Crystallisation",
"Location of The Crystallization Front",
r"$L_{\nu}$",
r"log(Mass Fraction$_{H}$",
r"log(Mass Fraction$_{He}$",
r"log(Mass Fraction$_{C}$",
r"log(Mass Fraction$_{O}$",
)
)
column_key_unit = np.array(
[
"",
"K",
r"(cm/s$^2$)",
"cm",
"yr",
"erg/s",
"(K)",
"(K)",
r"(g/cm$^3$)",
]
+ [""] * 2
+ ["erg/s"]
+ [""] * 4
)
column_type = np.array(([np.float64] * len(column_key)))
dtype = [(i, j) for i, j in zip(column_key, column_type)]
column_names = {}
column_units = {}
for i, j, k in zip(column_key, column_key_formatted, column_key_unit):
column_names[i] = j
column_units[i] = k
# Get the mass from the file name
mass = (
np.array([i.split("_")[2] for i in filelist]).astype(np.float64)
/ 100.0
)
if mass_range == "all":
pass
elif mass_range == "low":
mask_low = mass < 0.5
mass = mass[mask_low]
filelist = np.array(filelist)[mask_low]
elif mass_range == "intermediate":
mask_intermediate = (mass >= 0.5) & (mass <= 1.0)
mass = mass[mask_intermediate]
filelist = np.array(filelist)[mask_intermediate]
elif mass_range == "high":
mask_high = mass > 1.0
mass = mass[mask_high]
filelist = np.array(filelist)[mask_high]
else:
raise ValueError(
"Unknown mass range requested. Please choose from"
"'all', 'low', 'intermediate' or 'high' for bedard20 models."
)
# Create an empty array for holding the cooling models
cooling_model = np.array(([""] * len(mass)), dtype="object")
for i, filepath in enumerate(filelist):
with open(filepath, encoding="ascii") as infile:
count = -5
cooling_model_text = ""
for line_i in infile:
count += 1
if count <= 0:
continue
if count % 3 != 0:
cooling_model_text += line_i.rstrip("\n")
else:
cooling_model_text += line_i
cooling_model[i] = np.loadtxt(
io.StringIO(cooling_model_text), dtype=dtype
)
return mass, cooling_model, column_names, column_units
def _camisassa17_formatter(self):
"""
A formatter to load the Camisassa et al. 2017 WD cooling model
The progenitor lifetime is taken off based on the extrapolation from
Table 1
https://iopscience.iop.org/article/10.3847/0004-637X/823/2/158
"""
# Y=0.4, Z=0.0005 models
filelist = glob.glob(
os.path.join(self.this_file, "wd_cooling/camisassa17/*.trk")
)
# Prepare the array column dtype
column_key = np.array(
(
"lum",
"Teff",
"Tc",
"Roc",
"Hc",
"Hec",
"Con_s",
"Con_c",
"age",
"mass",
"mdot",
"model_no",
"Lpp",
"Lcno",
"LHe",
"LCC",
"logG",
"Lnu",
"MHtot",
"HeBuf",
"mass_Hfc",
"mass_Hefc",
"logg",
"Rsun",
"LH",
"SF",
)
)
column_key_formatted = np.array(
(
"Luminosity",
r"log(T$_{\mathrm{eff}})$",
r"T$_{\mathrm{c}}$",
r"$\rho_c$",
r"X$_c$",
r"Y$_c$",
"Outer Convective Zone",
"Inner Convective Zone",
"log(Age)",
"Mass",
"log(Rate of Change of Mass)",
"Model Number",
r"log($L_{PP}$)",
r"log($L_{CNO}$)",
r"log($L_{He}$)",
r"log($L_{CC}$)",
r"log($L_{G}$)",
r"log($L_{\nu}$)",
r"log(M$_{H, tot}$)",
r"log(HeBuf)",
r"Mass$_{H-free Core}$",
r"Mass$_{He-free Core}$",
"log(g)",
r"Radius",
"Latent Heat",
"Phase Separation",
)
)
column_key_unit = np.array(
[r"L$_{\odot}$", "(K)", r"($10^6$ K)", r"(g/cm$^3$)"]
+ [""] * 2
+ ["%"] * 2
+ [r"($10^6$ K)", r"M$_\odot$", r"(M$_\odot$ / yr)", ""]
+ [r"L$_{\odot}$"] * 6
+ [r"M$_{\odot}$"] * 4
+ [r"(cm/s$^2$)", r"R$_{\odot}$"]
+ ["erg/s"] * 2
)
column_type = np.array(([np.float64] * len(column_key)))
dtype = [(i, j) for i, j in zip(column_key, column_type)]
column_names = {}
column_units = {}
for i, j, k in zip(column_key, column_key_formatted, column_key_unit):
column_names[i] = j
column_units[i] = k
# Get the mass from the file name
mass = (
np.array([i.split(os.sep)[-1][:3] for i in filelist]).astype(
np.float64
)
/ 100.0
)
# Create an empty array for holding the cooling models
cooling_model = np.array(([""] * len(mass)), dtype="object")
for i, filepath in enumerate(filelist):
cooling_model[i] = np.loadtxt(filepath, skiprows=1, dtype=dtype)
# Convert the luminosity into erg/s
cooling_model[i]["lum"] = (
10.0 ** cooling_model[i]["lum"] * 3.826e33
)
# Convert the age to yr
cooling_model[i]["age"] = 10.0 ** cooling_model[i]["age"] * 1e6
cooling_model[i]["age"] -= min(cooling_model[i]["age"])
return mass, cooling_model, column_names, column_units
def _camisassa19_formatter(self, model):
"""
A formatter to load the Camisassa et al. 2019 ultramassive WD cooling
model.
Some columns populated with 'I' are replaced with the nearest values.
Parameters
----------
model: str
Name of the cooling model as in the `model_list`.
"""
# DA model
if model == "lpcode_one_da_19":
filelist = glob.glob(
os.path.join(
self.this_file, "wd_cooling/camisassa19/*hrich.dat"
)
)
# DB model
if model == "lpcode_one_db_19":
filelist = glob.glob(
os.path.join(
self.this_file, "wd_cooling/camisassa19/*hdef.dat"
)
)
# Prepare the array column dtype
column_key = np.array(
(
"lum",
"Teff",
"Tc",
"Roc",
"Hc",
"Hec",
"Con_s",
"Con_c",
"age",
"mass",
"mdot",
"Lnu",
"MHtot",
"logg",
"Rsun",
"LH",
"sf",
)
)
column_key_formatted = np.array(
(
"Luminosity",
r"log(T$_{\mathrm{eff}})$",
r"T$_{\mathrm{c}}$",
r"$\rho_c$",
r"X$_c$",
r"Y$_c$",
"Outer Convective Zone",
"Inner Convective Zone",
"log(Age)",
"Mass",
"log(Rate of Change of Mass)",
r"log($L_{\nu}$)",
r"log(M$_{H, tot}$)",
"log(g)",
r"Radius",
"Latent Heat",
"Phase Separation",
)
)
column_key_unit = np.array(
[r"L$_{\odot}$", "(K)", r"($10^6$ K)", r"(g/cm$^3$)"]
+ [""] * 2
+ ["%"] * 2
+ [
r"M$_\odot$",
r"(M$_\odot$ / yr)",
r"L$_{\odot}$",
r"M$_{\odot}$",
r"(cm/s$^2$)",
r"R$_{\odot}$",
]
+ ["erg/s"]
)
column_type = np.array(([np.float64] * len(column_key)))
dtype = [(i, j) for i, j in zip(column_key, column_type)]
column_names = {}
column_units = {}
for i, j, k in zip(column_key, column_key_formatted, column_key_unit):
column_names[i] = j
column_units[i] = k
# Get the mass from the file name
mass = (
np.array([i.split(os.sep)[-1][:3] for i in filelist]).astype(
np.float64
)
/ 100.0
)
# Create an empty array for holding the cooling models
cooling_model = np.array(([""] * len(mass)), dtype="object")
for i, filepath in enumerate(filelist):
cooling_model[i] = np.loadtxt(filepath, skiprows=2, dtype=dtype)
# Convert the luminosity into erg/s
cooling_model[i]["lum"] = (
10.0 ** cooling_model[i]["lum"] * 3.826e33
)
# Convert the age to yr
cooling_model[i]["age"] = 10.0 ** cooling_model[i]["age"] * 1e6
cooling_model[i]["age"] -= min(cooling_model[i]["age"])
return mass, cooling_model, column_names, column_units
def _lauffer18_formatter(self, model):
"""
A formatter to load the Lauffer et al. 2018 WD cooling model
Parameters
----------
model: str
Name of the cooling model as in the `model_list`.
"""
# H models
if model == "mesa_one_da_18":
filelist = glob.glob(
os.path.join(self.this_file, "wd_cooling/lauffer18/H_*.dat")
)
# He models
if model == "mesa_one_db_18":
filelist = glob.glob(
os.path.join(self.this_file, "wd_cooling/lauffer18/He_*.dat")
)
# Prepare the array column dtype
column_key = np.array(
("Teff", "lum", "logg", "Rsun", "mass", "age", "total_age")
)
column_key_formatted = np.array(
(
r"log(T$_{\mathrm{eff}})$",
"Luminosity",
"log(g)",
r"Radius",
"Mass",
"log(Cooling Age)",
"log(Total Age)",
)
)
column_key_unit = np.array(
[
"(K)",
r"L$_{\odot}$",
r"(cm/s$^2$)",
r"R$_{\odot}$",
r"M$_\odot$",
]
+ [r"(Gyr)"] * 2
)
column_type = np.array(([np.float64] * len(column_key)))
dtype = [(i, j) for i, j in zip(column_key, column_type)]
column_names = {}
column_units = {}
for i, j, k in zip(column_key, column_key_formatted, column_key_unit):
column_names[i] = j
column_units[i] = k
# Get the mass from the file name
mass = np.array([i.split("-M")[-1][:-4] for i in filelist]).astype(
np.float64
)
# Create an empty array for holding the cooling models
cooling_model = np.array(([""] * len(mass)), dtype="object")
for i, filepath in enumerate(filelist):
cooling_model[i] = np.loadtxt(filepath, skiprows=1, dtype=dtype)
# Convert the luminosity into erg/s
cooling_model[i]["lum"] = (
10.0 ** cooling_model[i]["lum"] * 3.826e33
)
# Convert the age to yr
cooling_model[i]["age"] *= 1e9
return mass, cooling_model, column_names, column_units
def _lpcode22_da_formatter(self):
"""
A formatter to load the LPCODE collated DA cooling model grid.
"""
filelist = glob.glob(
os.path.join(
self.this_file, "wd_cooling", "lpcode22", "DA", "*.trk"
)
)
# Prepare the array column dtype
column_key = np.array(
(
"Teff",
"lum",
"logg",
"age",
"Rsun",
"Mbol",
"F070W",
"F090W",
"F115W",
"F150W",
"F200W",
"F277W",
"F356W",
"F444W",
"F164N",
"F187N",
"F212N",
"F323N",
"F405N",
"G",
"BP",
"RP",
"U",
"B",
"V",
"R",
"I",
"J",
"H",
"K",
"L",
"FUV",
"NUV",
"u",
"g",
"r",
"i",
"z",
"F220W",
"F250W",
"F330W",
"F344N",
"F435W",
"F475W",
"F502N",
"F550M",
"F555W",
"F606W",
"F625W",
"F658N",
"F660N",
"F775W",
"F814W",
"F850LP",
"F892N",
)
)
column_key_formatted = np.array(
(
r"log(T$_{\mathrm{eff}})$",
"log(Luminosity)",
"log(g)",
"log(Cooling Age)",
"Radius",
r"M$_{\mathrm{bol}}$",
"F070W",
"F090W",
"F115W",
"F150W",
"F200W",
"F277W",
"F356W",
"F444W",
"F164N",
"F187N",
"F212N",
"F323N",
"F405N",
"G",
"BP",
"RP",
"U",
"B",
"V",
"R",
"I",
"J",
"H",
"K",
"L",
"FUV",
"NUV",
"u",
"g",
"r",
"i",
"z",
"F220W",
"F250W",
"F330W",
"F344N",
"F435W",
"F475W",
"F502N",
"F550M",
"F555W",
"F606W",
"F625W",
"F658N",
"F660N",
"F775W",
"F814W",
"F850LP",
"F892N",
)
)
column_key_unit = np.array(
[
"log(K)",
r"log(L/L$_{\odot}$)",
r"log(cm/s$^2$)",
"log(yr)",
r"R$_{\odot}$",
]
+ ["mag"] * 50
)
column_type = np.array(([np.float64] * len(column_key)))
dtype = [(i, j) for i, j in zip(column_key, column_type)]
column_names = {}
column_units = {}
for i, j, k in zip(column_key, column_key_formatted, column_key_unit):
column_names[i] = j
column_units[i] = k
# Get the mass from the file name
mass = np.array(
[i.split("Msun")[0].split(os.path.sep)[-1] for i in filelist]
).astype(np.float64)
# Create an empty array for holding the cooling models
cooling_model = np.array(([""] * len(mass)), dtype="object")
for i, filepath in enumerate(filelist):
cooling_model[i] = np.loadtxt(filepath, skiprows=2, dtype=dtype)
# Convert the luminosity into erg/s
cooling_model[i]["lum"] = (
10.0 ** cooling_model[i]["lum"] * 3.826e33
)
# Convert the age to yr
cooling_model[i]["age"] *= 1.0e9
return mass, cooling_model, column_names, column_units
def _lpcode22_db_formatter(self):
"""
A formatter to load the LPCODE collated DB cooling model grid.
"""
filelist = glob.glob(
os.path.join(
self.this_file, "wd_cooling", "lpcode22", "DB", "*.trk"
)
)
# Prepare the array column dtype
column_key = np.array(
(
"Teff",
"lum",
"logg",
"age",
"Rsun",
"Mbol",
"G",
"BP",
"RP",
)
)
column_key_formatted = np.array(
(
r"log(T$_{\mathrm{eff}})$",
"log(Luminosity)",
"log(g)",
"log(Cooling Age)",
"Radius",
r"M$_{\mathrm{bol}}$",
"G",
"BP",
"RP",
)
)
column_key_unit = np.array(
[
"log(K)",
r"log(L/L$_{\odot}$)",
r"log(cm/s$^2$)",
"log(yr)",
r"R$_{\odot}$",
]
+ ["mag"] * 4
)
column_type = np.array(([np.float64] * len(column_key)))
dtype = [(i, j) for i, j in zip(column_key, column_type)]
column_names = {}
column_units = {}
for i, j, k in zip(column_key, column_key_formatted, column_key_unit):
column_names[i] = j
column_units[i] = k
# Get the mass from the file name
mass = np.array(
[i.split("Msun")[0].split(os.path.sep)[-1] for i in filelist]
).astype(np.float64)
# Create an empty array for holding the cooling models
cooling_model = np.array(([""] * len(mass)), dtype="object")
for i, filepath in enumerate(filelist):
cooling_model[i] = np.loadtxt(filepath, skiprows=2, dtype=dtype)
# Convert the luminosity into erg/s
cooling_model[i]["lum"] = (
10.0 ** cooling_model[i]["lum"] * 3.826e33
)
# Convert the age to yr
cooling_model[i]["age"] *= 1.0e9
return mass, cooling_model, column_names, column_units
def _panei07_formatter(self, model):
"""
A formatter to load the Panei et al. 2007 WD cooling model
Parameters
----------
model: str
Name of the cooling model as in the `model_list`.
"""
# He core models
if model == "lpcode_he_da_07":
filelist = glob.glob(
os.path.join(self.this_file, "wd_cooling/panei07/*He.SDSS")
)
# CO core models
if model == "lpcode_co_da_07":
filelist = glob.glob(
os.path.join(self.this_file, "wd_cooling/panei07/*CO.SDSS")
)
# Prepare the array column dtype
column_key = np.array(
("Teff", "logg", "lum", "age", "u", "g", "r", "i", "z")
)
column_key_formatted = np.array(
(
r"log(T$_{\mathrm{eff}})$",
"log(g)",
"Luminosity",
"log(Age)",
"u",
"g",
"r",
"i",
"z",
)
)
column_key_unit = np.array(
["(K)", r"(cm/s$^2$)", r"L$_{\odot}$", r"(Gyr)"] + ["mag"] * 5
)
column_type = np.array(([np.float64] * len(column_key)))
dtype = [(i, j) for i, j in zip(column_key, column_type)]
column_names = {}
column_units = {}
for i, j, k in zip(column_key, column_key_formatted, column_key_unit):
column_names[i] = j
column_units[i] = k
# Get the mass from the file name
mass = (
np.array([i.split(".")[-2][:5] for i in filelist]).astype(
np.float64
)
/ 100000.0
)
# Create an empty array for holding the cooling models
cooling_model = np.array(([""] * len(mass)), dtype="object")
for i, filepath in enumerate(filelist):
cooling_model[i] = np.loadtxt(filepath, skiprows=1, dtype=dtype)
# Convert the luminosity into erg/s
cooling_model[i]["lum"] = (
10.0 ** cooling_model[i]["lum"] * 3.826e33
)
# Convert the age to yr
cooling_model[i]["age"] *= 1e9
return mass, cooling_model, column_names, column_units
def _renedo10_formatter(self, model):
"""
A formatter to load the Renedo et al. 2010 WD cooling model from
http://evolgroup.fcaglp.unlp.edu.ar/TRACKS/tracks_cocore.html
Two metallicity for DA are available: Z=0.01 and Z=0.001
Parameters
----------
model: str
Name of the cooling model as in the `model_list`.
"""
# Solar metallicity model
if model == "lpcode_co_da_10_z001":
filelist = glob.glob(
os.path.join(self.this_file, "wd_cooling/renedo10/*z001.trk")
)
# Low metallicity model
if model == "lpcode_co_da_10_z0001":
filelist = glob.glob(
os.path.join(self.this_file, "wd_cooling/renedo10/*z0001.trk")
)
# Prepare the array column dtype
column_key = np.array(
(
"lum",
"Teff",
"logTc",
"logrhoc",
"age",
"mass",
"lumpp",
"lumcno",
"lumhe",
"lumnu",
"logH",
"logg",
"rsun",
)
)
column_key_formatted = np.array(
(
"log(Luminosity)",
r"log(T$_{\mathrm{eff}})$",
r"log(T$_{\mathrm{c}})$",
r"log($\rho_{\mathrm{c}})$",
"log(Age)",
"Mass",
r"log($L_{PP}$)",
r"log($L_{CNO}$)",
r"log($L_{He}$)",
r"log($L_{\nu}$)",
r"log(M$_{H, tot}$)",
"log(g)",
"Radius",
)
)
column_key_unit = np.array(
["erg/s", "(K)", "(K)", r"(g/cm$^3$)", r"(Gyr)", r"M$_{\odot}$"]
+ [r"L$_{\odot}$"] * 4
+ [r"M$_{\odot}$", r"(cm/s$^2$)", r"E$_{\odot}$"]
)
column_type = np.array(([np.float64] * len(column_key)))
dtype = [(i, j) for i, j in zip(column_key, column_type)]
column_names = {}
column_units = {}
for i, j, k in zip(column_key, column_key_formatted, column_key_unit):
column_names[i] = j
column_units[i] = k
# Get the mass from the file name
mass = (
np.array([i.split("_")[1][-4:] for i in filelist]).astype(
np.float64
)
/ 1000.0
)
# Create an empty array for holding the cooling models
cooling_model = np.array(([""] * len(mass)), dtype="object")
for i, filepath in enumerate(filelist):
cooling_model[i] = np.loadtxt(filepath, skiprows=1, dtype=dtype)
# Convert the luminosity into erg/s
cooling_model[i]["lum"] = (
10.0 ** cooling_model[i]["lum"] * 3.826e33
)
# Convert the age to yr
cooling_model[i]["age"] *= 1e6
return mass, cooling_model, column_names, column_units
def _salaris10_formatter(self, model, mass_range="all"):
"""
A formatter to load the Salaris et al. 2010 WD cooling model from
Parameters
----------
model: str
Name of the cooling model as in the `model_list`.
mass_range: str (Default: 'all')
The mass range in which the cooling model should return.
The ranges are defined as <0.5, 0.5-1.0 and >1.0 solar masses.
"""
# DA model with phase separation
if model == "basti_co_da_10":
filelist = glob.glob(
os.path.join(
self.this_file, "wd_cooling/salaris10/*DAsep.sdss"
)
)
# DB model with phase separation
if model == "basti_co_db_10":
filelist = glob.glob(
os.path.join(
self.this_file, "wd_cooling/salaris10/*DBsep.sdss"
)
)
# DA model without phase separation
if model == "basti_co_da_10_nps":
filelist = glob.glob(
os.path.join(
self.this_file, "wd_cooling/salaris10/*DAnosep.sdss"
)
)
# DB model without phase separation
if model == "basti_co_db_10_nps":
filelist = glob.glob(
os.path.join(
self.this_file, "wd_cooling/salaris10/*DBnosep.sdss"
)
)
# Prepare the array column dtype
column_key = np.array(
("age", "mass", "Teff", "lum", "u", "g", "r", "i", "z")
)
column_key_formatted = np.array(
(
"log(Age)",
"Mass",
r"log(T$_{\mathrm{eff}})$",
"Luminosity",
"u",
"g",
"r",
"i",
"z",
)
)
column_key_unit = np.array(
["(Gyr)", r"M$_{\odot}$", "(K)", r"L$_{\odot}$"] + ["mag"] * 5
)
column_type = np.array(([np.float64] * len(column_key)))
dtype = [(i, j) for i, j in zip(column_key, column_type)]
column_names = {}
column_units = {}
for i, j, k in zip(column_key, column_key_formatted, column_key_unit):
column_names[i] = j
column_units[i] = k
# Get the mass from the file name
mass = (
np.array([i.split("COOL")[-1][:3] for i in filelist]).astype(
np.float64
)
/ 100.0
)
if mass_range == "all":
pass
elif mass_range == "intermediate":
mask_intermediate = (mass >= 0.5) & (mass <= 1.0)
mass = mass[mask_intermediate]
filelist = np.array(filelist)[mask_intermediate]
elif mass_range == "high":
mask_high = mass > 1.0
mass = mass[mask_high]
filelist = np.array(filelist)[mask_high]
else:
raise ValueError(
"Unknown mass range requested. Please choose from"
"'all', 'intermediate' or 'high' for bedard20 models."
)
# Create an empty array for holding the cooling models
cooling_model = np.array(([""] * len(mass)), dtype="object")
for i, filepath in enumerate(filelist):
cooling_model[i] = np.loadtxt(filepath, skiprows=1, dtype=dtype)
# Convert the luminosity into erg/s
cooling_model[i]["lum"] = (
10.0 ** cooling_model[i]["lum"] * 3.826e33
)
# Convert the age to yr
cooling_model[i]["age"] = 10.0 ** cooling_model[i]["age"]
return mass, cooling_model, column_names, column_units
def set_low_mass_cooling_model(self, model):
"""
Set the WD cooling model.
Parameters
----------
model: str (Default: 'montreal_co_da_20')
Choice of WD cooling model:
1. 'montreal_co_da_20' - Bedard et al. 2020 CO DA
2. 'montreal_co_db_20' - Bedard et al. 2020 CO DB
3. 'lpcode_he_da_07' - Panei et al. 2007 He DA
4. 'lpcode_co_da_07' - Panei et al. 2007 CO DA
5. 'lpcode_he_da_09' - Althaus et al. 2009 He DA
6. 'lpcode_da_20' - Althaus et al. 2013, Camisassa et al. 2016,
Camisassa et al. 2019
The naming convention follows this format:
[model]_[core composition]_[atmosphere]_[publication year]
where a few models continue to have extra property description
terms trailing after the year, currently they are either the
progenitor metallicity or the (lack of) phase separation in the
evolution model.
"""
if model in self.low_mass_cooling_model_list:
self.cooling_models["low_mass_cooling_model"] = model
else:
raise ValueError("Please provide a valid model.")
def set_intermediate_mass_cooling_model(self, model):
"""
Set the WD cooling model.
Parameters
----------
model: str (Default: 'montreal_co_da_20')
Choice of WD cooling model:
1. 'montreal_co_da_20' - Bedard et al. 2020 CO DA
2. 'montreal_co_db_20' - Bedard et al. 2020 CO DB
3. 'lpcode_co_da_10_z001' - Renedo et al. 2010 CO DA Z=0.01
4. 'lpcode_co_da_10_z0001' - Renedo et al. 2010 CO DA Z=0.001
5. 'lpcode_co_da_15_z00003' - Althaus et al. 2015 DA Z=0.00003
6. 'lpcode_co_da_15_z0001' - Althaus et al. 2015 DA Z=0.0001
7. 'lpcode_co_da_15_z0005' - Althaus et al. 2015 DA Z=0.0005
8. 'lpcode_co_da_17_y04' - Althaus et al. 2017 DB Y=0.4
9. 'lpcode_co_db_17' - Camisassa et al. 2017 DB
10. 'lpcode_da_20' - Althaus et al. 2013, Camisassa et al. 2016,
Camisassa et al. 2019
11. 'lpcode_db_20' - Camisassa et al. 2017, Camisassa et al. 2019
12. 'basti_co_da_10' - Salaris et al. 2010 CO DA
13. 'basti_co_db_10' - Salaris et al. 2010 CO DB
14. 'basti_co_da_10_nps' - Salaris et al. 2010 CO DA,
no phase separation
15. 'basti_co_db_10_nps' - Salaris et al. 2010 CO DB,
no phase separation
The naming convention follows this format:
[model]_[core composition]_[atmosphere]_[publication year]
where a few models continue to have extra property description
terms trailing after the year, currently they are either the
progenitor metallicity or the (lack of) phase separation in the
evolution model.
"""
if model in self.intermediate_mass_cooling_model_list:
self.cooling_models["intermediate_mass_cooling_model"] = model
else:
raise ValueError("Please provide a valid model.")
def set_high_mass_cooling_model(self, model):
"""
Set the WD cooling model.
Parameters
----------
model: str (Default: 'montreal_co_da_20')
Choice of WD cooling model:
1. 'montreal_co_da_20' - Bedard et al. 2020 CO DA
2. 'montreal_co_db_20' - Bedard et al. 2020 CO DB
3. 'lpcode_one_da_07' - Althaus et al. 2007 ONe DA
4. 'lpcode_one_da_19' - Camisassa et al. 2019 ONe DA
5. 'lpcode_one_db_19' - Camisassa et al. 2019 ONe DB
6. 'lpcode_da_20' - Althaus et al. 2013, Camisassa et al. 2016,
Camisassa et al. 2019
7. 'lpcode_db_20' - Camisassa et al. 2017, Camisassa et al. 2019
8. 'basti_co_da_10' - Salaris et al. 2010 CO DA
9. 'basti_co_db_10' - Salaris et al. 2010 CO DB
10. 'basti_co_da_10_nps' - Salaris et al. 2010 CO DA,
no phase separation
11. 'basti_co_db_10_nps' - Salaris et al. 2010 CO DB,
no phase separation
12. 'mesa_one_da_18' - Lauffer et al. 2018 ONe DA
13. 'mesa_one_db_18' - Lauffer et al. 2018 ONe DB
The naming convention follows this format:
[model]_[core composition]_[atmosphere]_[publication year]
where a few models continue to have extra property description
terms trailing after the year, currently they are either the
progenitor metallicity or the (lack of) phase separation in the
evolution model.
"""
if model in self.high_mass_cooling_model_list:
self.cooling_models["high_mass_cooling_model"] = model
else:
raise ValueError("Please provide a valid model.")
def _itp2d_gradient(self, _f, val1, val2, frac=1e-6):
"""
A function to find the gradient in the direction in the first dimension
of a 2D function at a given coordinate.
Parameters
----------
f: callable function
A 2D function
val1: float
The first input value accepted by f. The gradient is computed in
this direction.
val2: float
The first input value accepted by f.
frac: float (Default: 1e-6)
The small fractional increment of val1.
Return
------
Gradient in the direction of val1.
"""
if not callable(_f):
raise TypeError("f has to be a callable function.")
increment = val1 * frac / 2.0
grad = np.asarray(
(_f(val1 + increment, val2) - _f(val1 - increment, val2))
/ (increment * 2.0)
).reshape(-1)
# cooling((L+1), m) - cooling(L, m) is always negative
grad[grad > 0.0] = 0.0
grad[np.isnan(grad)] = 0.0
return grad
def compute_cooling_age_interpolator(
self,
interpolator="CT",
kwargs_for_RBF={},
kwargs_for_CT={},
):
"""
Compute the callable CloughTocher2DInterpolator taking (logL, m) and
returning the cooling time of the WDs. It needs to use float64 or it
runs into float-point error at very faint lumnosity.
Parameters
----------
interpolator: str (Default: 'CT')
Choose between 'RBF' and 'CT'.
kwargs_for_RBF: dict (Default: {"neighbors": None,
"smoothing": 0.0, "kernel": "thin_plate_spline",
"epsilon": None, "degree": None,})
Keyword argument for the interpolator. See
`scipy.interpolate.RBFInterpolator`.
kwargs_for_CT: dict (Default: {'fill_value': -np.inf,
'tol': 1e-10, 'maxiter': 100000})
Keyword argument for the interpolator. See
`scipy.interpolate.CloughTocher2DInterpolator`.
"""
# Set the low mass cooling model, i.e. M < 0.5 M_sun
mass_low, cooling_model_low, _, _ = self.get_cooling_model(
self.cooling_models["low_mass_cooling_model"], mass_range="low"
)
# Set the intermediate mass cooling model, i.e. 0.5 < M < 1.0 M_sun
(
mass_intermediate,
cooling_model_intermediate,
_,
_,
) = self.get_cooling_model(
self.cooling_models["intermediate_mass_cooling_model"],
mass_range="intermediate",
)
# Set the high mass cooling model, i.e. 1.0 < M_sun
mass_high, cooling_model_high, _, _ = self.get_cooling_model(
self.cooling_models["high_mass_cooling_model"], mass_range="high"
)
# Gather all the models in different mass ranges
if mass_low.size == 0:
luminosity_low = np.array(())
age_low = np.array(())
else:
# Reshaping the WD mass array to match the shape of the other two.
mass_low = (
np.concatenate(
np.array(
[
[mass_low[i]] * len(model["age"])
for i, model in enumerate(cooling_model_low)
],
dtype=object,
)
)
.T.ravel()
.astype(np.float64)
)
# The luminosity of the WD at the corresponding mass and age
luminosity_low = (
np.concatenate([i["lum"] for i in cooling_model_low])
.reshape(-1)
.astype(np.float64)
)
# The luminosity of the WD at the corresponding mass and luminosity
age_low = (
np.concatenate([i["age"] for i in cooling_model_low])
.reshape(-1)
.astype(np.float64)
)
if mass_intermediate.size == 0:
luminosity_intermediate = np.array(())
age_intermediate = np.array(())
else:
# Reshaping the WD mass array to match the shape of the other two.
mass_intermediate = (
np.concatenate(
np.array(
[
[mass_intermediate[i]] * len(model["age"])
for i, model in enumerate(
cooling_model_intermediate
)
],
dtype=object,
)
)
.T.ravel()
.astype(np.float64)
)
# The luminosity of the WD at the corresponding mass and age
luminosity_intermediate = (
np.concatenate([i["lum"] for i in cooling_model_intermediate])
.reshape(-1)
.astype(np.float64)
)
# The luminosity of the WD at the corresponding mass and luminosity
age_intermediate = (
np.concatenate([i["age"] for i in cooling_model_intermediate])
.reshape(-1)
.astype(np.float64)
)
if mass_high.size == 0:
luminosity_high = np.array(())
age_high = np.array(())
else:
# Reshaping the WD mass array to match the shape of the other two.
mass_high = (
np.concatenate(
np.array(
[
[mass_high[i]] * len(model["age"])
for i, model in enumerate(cooling_model_high)
],
dtype=object,
)
)
.T.ravel()
.astype(np.float64)
)
# The luminosity of the WD at the corresponding mass and age
luminosity_high = (
np.concatenate([i["lum"] for i in cooling_model_high])
.reshape(-1)
.astype(np.float64)
)
# The luminosity of the WD at the corresponding mass and luminosity
age_high = (
np.concatenate([i["age"] for i in cooling_model_high])
.reshape(-1)
.astype(np.float64)
)
self.cooling_model_grid = np.concatenate(
(cooling_model_low, cooling_model_intermediate, cooling_model_high)
)
self.mass = np.concatenate((mass_low, mass_intermediate, mass_high))
self.luminosity = np.concatenate(
(luminosity_low, luminosity_intermediate, luminosity_high)
)
self.age = np.concatenate((age_low, age_intermediate, age_high))
# Configure interpolator for the cooling models
_kwargs_for_CT = {
"fill_value": float("-inf"),
"tol": 1e-10,
"maxiter": 100000,
"rescale": True,
}
_kwargs_for_CT.update(**kwargs_for_CT)
_kwargs_for_RBF = {
"neighbors": None,
"smoothing": 0.0,
"kernel": "thin_plate_spline",
"epsilon": None,
"degree": None,
}
_kwargs_for_RBF.update(**kwargs_for_RBF)
if interpolator.lower() == "ct":
# Interpolate with the scipy CloughTocher2DInterpolator
self.cooling_interpolator = CloughTocher2DInterpolator(
(np.log10(self.luminosity), self.mass),
self.age,
**_kwargs_for_CT,
)
elif interpolator.lower() == "rbf":
# Interpolate with the scipy RBFInterpolator
_cooling_interpolator = RBFInterpolator(
np.stack((np.log10(self.luminosity), self.mass), -1),
self.age,
**_kwargs_for_RBF,
)
lum_min = np.nanmin(np.log10(self.luminosity))
lum_max = np.nanmax(np.log10(self.luminosity))
mass_min = np.nanmin(self.mass)
mass_max = np.nanmax(self.mass)
def cooling_interpolator(x_0, x_1):
_x_0 = np.array(x_0)
_x_1 = np.array(x_1)
if (_x_0.size == 1) & (_x_1.size > 1):
_x_0 = np.repeat(_x_0, _x_1.size)
if (_x_1.size == 1) & (_x_0.size > 1):
_x_0 = np.repeat(_x_1, _x_0.size)
_x_0[_x_0 < lum_min] = lum_min
_x_0[_x_0 > lum_max] = lum_max
_x_1[_x_1 < mass_min] = mass_min
_x_1[_x_1 > mass_max] = mass_max
length0 = _x_0.size
return _cooling_interpolator(
np.array([_x_0, _x_1], dtype="object").T.reshape(
length0, 2
)
)
self.cooling_interpolator = cooling_interpolator
else:
raise ValueError(
f"Interpolator should be CT or RBF, {interpolator} is given."
)
self.dLdt = self._itp2d_gradient(
self.cooling_interpolator, np.log10(self.luminosity), self.mass
)
finite_mask = np.isfinite(self.dLdt)
if interpolator.lower() == "ct":
self.cooling_rate_interpolator = CloughTocher2DInterpolator(
(
np.log10(self.luminosity)[finite_mask],
self.mass[finite_mask],
),
self.dLdt[finite_mask],
**_kwargs_for_CT,
)
elif interpolator.lower() == "rbf":
# Interpolate with the scipy RBFInterpolator
_cooling_rate_interpolator = RBFInterpolator(
np.stack(
(
np.log10(self.luminosity)[finite_mask],
self.mass[finite_mask],
),
-1,
),
self.dLdt[finite_mask],
**_kwargs_for_RBF,
)
lum_min = np.nanmin(np.log10(self.luminosity))
lum_max = np.nanmax(np.log10(self.luminosity))
mass_min = np.nanmin(self.mass)
mass_max = np.nanmax(self.mass)
def cooling_rate_interpolator(x_0, x_1):
_x_0 = np.asarray(x_0)
_x_1 = np.asarray(x_1)
if (_x_0.size == 1) & (_x_1.size > 1):
_x_0 = np.repeat(_x_0, _x_1.size)
if (_x_1.size == 1) & (_x_0.size > 1):
_x_0 = np.repeat(_x_1, _x_0.size)
_x_0[_x_0 < lum_min] = lum_min
_x_0[_x_0 > lum_max] = lum_max
_x_1[_x_1 < mass_min] = mass_min
_x_1[_x_1 > mass_max] = mass_max
length0 = _x_0.size
return _cooling_rate_interpolator(
np.asarray([_x_0, _x_1], dtype="object").T.reshape(
length0, 2
)
)
self.cooling_rate_interpolator = cooling_rate_interpolator
else:
raise ValueError(
"Interpolator should be CT or RBF, {interpolator} is given."
)
|
cylammarcoREPO_NAMEWDPhotToolsPATH_START.@WDPhotTools_extracted@WDPhotTools-main@src@WDPhotTools@cooling_model_reader.py@.PATH_END.py
|
{
"filename": "_ticklabeloverflow.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/densitymap/colorbar/_ticklabeloverflow.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TicklabeloverflowValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="ticklabeloverflow",
parent_name="densitymap.colorbar",
**kwargs,
):
super(TicklabeloverflowValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["allow", "hide past div", "hide past domain"]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@densitymap@colorbar@_ticklabeloverflow.py@.PATH_END.py
|
{
"filename": "pool.py",
"repo_name": "adrn/schwimmbad",
"repo_path": "schwimmbad_extracted/schwimmbad-main/src/schwimmbad/pool.py",
"type": "Python"
}
|
# type: ignore
import abc
from collections.abc import Iterable
from typing import Any, Callable
# This package
from .utils import batch_tasks
__all__ = ["BasePool"]
def _callback_wrapper(
callback: Callable[..., Any], generator: Iterable[Any]
) -> Iterable[Any]:
for element in generator:
callback(element)
yield element
class BasePool(metaclass=abc.ABCMeta):
"""A base class multiprocessing pool with a ``map`` method."""
def __init__(self, **_: Any):
self.rank = 0
@staticmethod
def enabled() -> bool:
return False
def is_master(self) -> bool:
return self.rank == 0
def is_worker(self) -> bool:
return self.rank != 0
def wait(self) -> None:
return
@abc.abstractmethod
def map(self, *args: Any, **kwargs: Any) -> Any:
return
def batched_map(
self,
worker: Callable[..., Any],
tasks: Iterable[Any],
*args: Any,
**kwargs: Any,
) -> Iterable[Any]:
batches = batch_tasks(n_batches=self.size, data=tasks)
return self.map(worker, batches, *args, **kwargs)
def close(self):
pass
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def _call_callback(self, callback, generator):
if callback is None:
return generator
return _callback_wrapper(callback, generator)
|
adrnREPO_NAMEschwimmbadPATH_START.@schwimmbad_extracted@schwimmbad-main@src@schwimmbad@pool.py@.PATH_END.py
|
{
"filename": "_line.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/ohlc/_line.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LineValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="line", parent_name="ohlc", **kwargs):
super(LineValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Line"),
data_docs=kwargs.pop(
"data_docs",
"""
dash
Sets the dash style of lines. Set to a dash
type string ("solid", "dot", "dash",
"longdash", "dashdot", or "longdashdot") or a
dash length list in px (eg "5px,10px,2px,2px").
Note that this style setting can also be set
per direction via `increasing.line.dash` and
`decreasing.line.dash`.
width
[object Object] Note that this style setting
can also be set per direction via
`increasing.line.width` and
`decreasing.line.width`.
""",
),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@ohlc@_line.py@.PATH_END.py
|
{
"filename": "env_data.py",
"repo_name": "mtalapinto/moes",
"repo_path": "platospec/optics/env_data.py",
"type": "Python"
}
|
import glob
from astropy.time import Time
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from . import vis_spectrometer
from . import echelle_orders
from . import parameters
def get_CCD_T_vis(mjd):
# path_env_data = '/home/marcelo/Documents/ramses/vis/env_data/'
path_env_data = 'env_data/'
ccd_temp_ts01_file = 'VIS-CR-Ts01.dat'
ccd_temp_ts02_file = 'VIS-CR-Ts02.dat'
ccd_temp_data_vis_ts01 = pd.read_csv(path_env_data + ccd_temp_ts01_file, sep=',')
ccd_temp_data_vis_ts02 = pd.read_csv(path_env_data + ccd_temp_ts02_file, sep=',')
temp_ts01 = ccd_temp_data_vis_ts01.loc[ccd_temp_data_vis_ts01['mjd'] < mjd + .02]
temp_ts01 = temp_ts01.loc[temp_ts01['mjd'] > mjd - .02]
temp_ts02 = ccd_temp_data_vis_ts02.loc[ccd_temp_data_vis_ts02['mjd'] < mjd + .02]
temp_ts02 = temp_ts02.loc[temp_ts02['mjd'] > mjd - .02]
avg_temp_ts01 = np.average(temp_ts01[' temp'].values)
avg_temp_ts02 = np.average(temp_ts02[' temp'].values)
ccd_temp = (avg_temp_ts01 + avg_temp_ts02)/2
ccd_temp = ccd_temp - 273.15
return ccd_temp
#env_data_vis = env_data_vis.loc[env_data_vis['mjd'] > t_mjd - 25.5]
#env_data_vis = env_data_vis.loc[(env_data_vis[' temp'] > 278) & (env_data_vis[' temp'] < 285)]
#max_temp = max(env_data_vis[' temp'].values)
#min_temp = min(env_data_vis[' temp'].values)
#delta_T = np.abs(max_temp - min_temp)
#temp_out = np.average(env_data_vis[' temp'].values)
#temp_out = temp_out - 273.15
# print(temp_out)
#return temp_out
def get_temps_date(date):
date_isot = Time(date+'T18:00:00', format='isot')
date_jd = date_isot.jd
path_env_data = 'env_data/'
temps = []
temps.append(get_CCD_T_vis(date_jd))
for i in range(9):
if i != 0:
env_data_vis_file = 'VIS-IS-Ts0' + str(i) + '.dat'
env_data_vis = pd.read_csv(path_env_data + env_data_vis_file, sep=',')
t_mjd = date_jd - 2400000.5
env_data_vis = env_data_vis.loc[env_data_vis['mjd'] < t_mjd + 0.02]
env_data_vis = env_data_vis.loc[env_data_vis['mjd'] > t_mjd - 0.02]
temp_out = np.average(env_data_vis[' temp'].values)
temp_out = temp_out - 273.15
#print(i, temp_out)
temps.append(temp_out)
temps = np.array(temps)
return temps
def get_temps_bjd(bjd):
path_env_data = 'env_data/'
temps = []
temps.append(get_CCD_T_vis(bjd))
for i in range(9):
if i != 0:
env_data_vis_file = 'VIS-IS-Ts0' + str(i) + '.dat'
env_data_vis = pd.read_csv(path_env_data + env_data_vis_file, sep=',')
t_mjd = bjd - 2400000.5
env_data_vis = env_data_vis.loc[env_data_vis['mjd'] < t_mjd + 0.02]
env_data_vis = env_data_vis.loc[env_data_vis['mjd'] > t_mjd - 0.02]
temp_out = np.average(env_data_vis[' temp'].values)
temp_out = temp_out - 273.15
#print(i, temp_out)
temps.append(temp_out)
temps = np.array(temps)
return temps
def get_t_mjd(mjd):
datadir = 'env_data/'
tempout = []
for i in range(8):
sensorno = i+1
temp_data = pd.read_csv(datadir+'VIS-IS-Ts0'+str(sensorno)+'.dat',sep=',')
temp_data = temp_data.loc[temp_data['mjd'] < float(mjd) + 0.025]
temp_data = temp_data.loc[temp_data['mjd'] > float(mjd) - 0.025]
temp_out = np.average(temp_data[' temp'].values)
temp_out = temp_out - 273.15
tempout.append(temp_out)
tempout = np.array(tempout)
return tempout
def get_p_date(date):
path_env_data = 'env_data/'
date_isot = Time(date + 'T18:00:00', format='isot')
date_jd = date_isot.jd
p_vt_s1 = pd.read_csv(path_env_data+'VIS-VT-S1.dat', sep=',')
t_mjd = date_jd - 2400000.5
p_date = p_vt_s1.loc[p_vt_s1['mjd'] < t_mjd + 0.02]
p_date = p_date.loc[p_date['mjd'] > t_mjd - 0.02]
pdate = np.mean(p_date[' p'].values)
return pdate
def get_p_bjd(bjd):
path_env_data = 'env_data/'
p_vt_s1 = pd.read_csv(path_env_data+'VIS-VT-S1.dat', sep=',')
t_mjd = bjd - 2400000.5
p_date = p_vt_s1.loc[p_vt_s1['mjd'] < t_mjd + 0.02]
p_date = p_date.loc[p_date['mjd'] > t_mjd - 0.02]
pdate = np.mean(p_date[' p'].values)
return pdate
def get_p_mjd(mjd):
path_env_data = 'env_data/'
p_vt_s1 = pd.read_csv(path_env_data+'VIS-VT-S1.dat', sep=',')
p_date = p_vt_s1.loc[p_vt_s1['mjd'] < mjd + 0.05]
p_date = p_date.loc[p_date['mjd'] > mjd - 0.05]
pdate = np.mean(p_date[' p'].values)
return pdate
|
mtalapintoREPO_NAMEmoesPATH_START.@platospec@optics@env_data.py@.PATH_END.py
|
{
"filename": "test_mcmc_util.py",
"repo_name": "pyro-ppl/pyro",
"repo_path": "pyro_extracted/pyro-master/tests/infer/mcmc/test_mcmc_util.py",
"type": "Python"
}
|
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
from functools import partial
import pytest
import torch
import pyro
import pyro.distributions as dist
from pyro.infer import Predictive
from pyro.infer.autoguide import (
init_to_feasible,
init_to_generated,
init_to_mean,
init_to_median,
init_to_sample,
init_to_uniform,
init_to_value,
)
from pyro.infer.mcmc import NUTS
from pyro.infer.mcmc.api import MCMC
from pyro.infer.mcmc.util import initialize_model
from pyro.util import optional
from tests.common import assert_close, str_erase_pointers
def beta_bernoulli():
N = 1000
true_probs = torch.tensor([0.2, 0.3, 0.4, 0.8, 0.5])
data = dist.Bernoulli(true_probs).sample([N])
def model(data=None):
with pyro.plate("num_components", 5):
beta = pyro.sample("beta", dist.Beta(1.0, 1.0))
with pyro.plate("data", N):
pyro.sample("obs", dist.Bernoulli(beta), obs=data)
return model, data, true_probs
@pytest.mark.parametrize("num_samples", [100, 200, None])
@pytest.mark.parametrize("parallel", [False, True])
def test_predictive(num_samples, parallel):
model, data, true_probs = beta_bernoulli()
init_params, potential_fn, transforms, _ = initialize_model(
model, model_args=(data,)
)
nuts_kernel = NUTS(potential_fn=potential_fn, transforms=transforms)
mcmc = MCMC(nuts_kernel, 100, initial_params=init_params, warmup_steps=100)
mcmc.run(data)
samples = mcmc.get_samples()
with optional(pytest.warns(UserWarning), num_samples not in (None, 100)):
predictive = Predictive(
model,
samples,
num_samples=num_samples,
return_sites=["beta", "obs"],
parallel=parallel,
)
predictive_samples = predictive()
# check shapes
assert predictive_samples["beta"].shape == (100, 1, 5)
assert predictive_samples["obs"].shape == (100, 1000, 5)
# check sample mean
assert_close(
predictive_samples["obs"].reshape([-1, 5]).mean(0), true_probs, rtol=0.1
)
def model_with_param():
x = pyro.param("x", torch.tensor(1.0))
pyro.sample("y", dist.Normal(x, 1))
@pytest.mark.parametrize("jit_compile", [False, True])
@pytest.mark.parametrize("num_chains", [1, 2])
@pytest.mark.filterwarnings("ignore:num_chains")
def test_model_with_param(jit_compile, num_chains):
kernel = NUTS(model_with_param, jit_compile=jit_compile, ignore_jit_warnings=True)
mcmc = MCMC(kernel, 10, num_chains=num_chains, mp_context="spawn")
mcmc.run()
@pytest.mark.parametrize("subsample_size", [10, 5])
def test_model_with_subsample(subsample_size):
size = 10
def model():
with pyro.plate("J", size, subsample_size=subsample_size):
pyro.sample("x", dist.Normal(0, 1))
kernel = NUTS(model)
mcmc = MCMC(kernel, 10)
if subsample_size < size:
with pytest.raises(RuntimeError, match="subsample"):
mcmc.run()
else:
mcmc.run()
def test_init_to_value():
def model():
pyro.sample("x", dist.LogNormal(0, 1))
value = torch.randn(()).exp() * 10
kernel = NUTS(model, init_strategy=partial(init_to_value, values={"x": value}))
kernel.setup(warmup_steps=10)
assert_close(value, kernel.initial_params["x"].exp())
@pytest.mark.parametrize(
"init_strategy",
[
init_to_feasible,
init_to_mean,
init_to_median,
init_to_sample,
init_to_uniform,
init_to_value,
init_to_feasible(),
init_to_mean(),
init_to_median(num_samples=4),
init_to_sample(),
init_to_uniform(radius=0.1),
init_to_value(values={"x": torch.tensor(3.0)}),
init_to_generated(generate=lambda: init_to_value(values={"x": torch.rand(())})),
],
ids=str_erase_pointers,
)
def test_init_strategy_smoke(init_strategy):
def model():
pyro.sample("x", dist.LogNormal(0, 1))
kernel = NUTS(model, init_strategy=init_strategy)
kernel.setup(warmup_steps=10)
|
pyro-pplREPO_NAMEpyroPATH_START.@pyro_extracted@pyro-master@tests@infer@mcmc@test_mcmc_util.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "MNGuenther/allesfitter",
"repo_path": "allesfitter_extracted/allesfitter-master/allesfitter/exoworlds_rdx/lightcurves/__init__.py",
"type": "Python"
}
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 5 14:18:20 2018
@author:
Maximilian N. Günther
MIT Kavli Institute for Astrophysics and Space Research,
Massachusetts Institute of Technology,
77 Massachusetts Avenue,
Cambridge, MA 02109,
USA
Email: maxgue@mit.edu
Web: www.mnguenther.com
"""
from __future__ import print_function, division, absolute_import
#::: plotting settings
import seaborn as sns
sns.set(context='paper', style='ticks', palette='deep', font='sans-serif', font_scale=1.5, color_codes=True)
sns.set_style({"xtick.direction": "in","ytick.direction": "in"})
sns.set_context(rc={'lines.markeredgewidth': 1})
from .index_transits import index_transits, index_eclipses, get_first_epoch
from .lightcurve_tools import phase_fold, rebin_err
from .expand_flags import expand_flags
from .gp_decor import gp_decor
|
MNGuentherREPO_NAMEallesfitterPATH_START.@allesfitter_extracted@allesfitter-master@allesfitter@exoworlds_rdx@lightcurves@__init__.py@.PATH_END.py
|
{
"filename": "_cache.py",
"repo_name": "ThomasEdwardRiley/xpsi-pre-transfer",
"repo_path": "xpsi-pre-transfer_extracted/xpsi-pre-transfer-master/xpsi/PostProcessing/_cache.py",
"type": "Python"
}
|
from __future__ import division, print_function
from .. import __version__
from ._global_imports import *
try:
import h5py
except ImportError:
print('Install h5py to enable signal caching.')
raise
class _Cache(object):
""" Cache numerical model objects computed during likelihood evaluation.
:param str filename:
Filename of cache.
:param str cache_dir:
Directory to write cache to.
:param bool read_only:
Do not write to cache file?
:param bool archive:
If not read-only, then archive an existing cache file found at the
same path?
"""
def __init__(self, filename, cache_dir='./',
read_only=False, archive=True):
if isinstance(filename, _six.string_types):
if filename[-3:] != '.h5':
self._filename = filename + '.h5'
else:
self._filename = filename
self._cache_dir = cache_dir
self._path = _os.path.join(self._cache_dir, self._filename)
self._read_only = read_only
self._archive_if_incompatible = archive
def __enter__(self):
return self
def __exit__(self, exc, exc_value, traceback):
if exc:
print('Encountered problem whilst caching:')
def _open(self, mode='r'):
""" Get the :mod:`h5py` context manager. """
if self._read_only and mode != 'r':
raise RuntimeError('The cache is in read-only mode.')
return h5py.File(self._path, mode)
def cache(self, data):
""" Cache the computational data. """
with self._open('r+') as f:
g = f['data']
for key, value in data.iteritems():
if isinstance(value, tuple) or isinstance(value, list):
if key not in g.keys():
shape = [f.attrs['n'], len(value)]
shape += [s for s in value[0].shape]
g.create_dataset(key, shape=shape, dtype='float64')
for j, v in enumerate(value):
g[key][self.i,j,...] = v
else:
if key not in g.keys():
shape = [f.attrs['n']] + [s for s in value.shape]
g.create_dataset(key, shape=shape, dtype='float64')
g[key][self.i,...] = value
self.i += 1
def reset_iterator(self):
""" Reset the counter for the cache iterator. """
self.i = 0
def __iter__(self):
self.reset_iterator()
return self
def __next__(self):
""" Read from the cache. """
cached = {}
with self._open('r') as f:
g = f['data']
for key in g.keys():
cached[key] = g[key][self.i,...]
self.i += 1
return cached
def next(self):
""" Python 2.x compatibility. """
return self.__next__()
@make_verbose('Checking whether an existing cache can be read:',
'Cache state determined')
def do_caching(self, samples, force=False):
""" Check whether a new cache is required or whether an exising
cache can be read without additional computation.
:return: Boolean indicating whether to read (``False``) or write.
"""
if force:
self._new(samples)
return True
try: # try reading file and checking keys
with self._open('r') as f:
if 'thetas' not in f.keys():
self._new(samples)
return True
except IOError: # create new cache file
self._new(samples)
return True
else: # can be read, so check if samples array are matching
if self._changed(samples):
self._new(samples)
return True
else:
return False
@make_verbose('Creating new cache file', 'Cache file created')
def _new(self, samples):
""" Prepare a new cache file. """
if not _os.path.isdir(self._cache_dir):
_os.mkdir(self._cache_dir)
if self._archive_if_incompatible:
try:
with self._open('r'):
pass
except IOError:
self._initialise(samples)
else:
self._archive()
self._initialise(samples)
else:
self._initialise(samples)
@make_verbose('Initialising cache file', 'Cache file initialised')
def _initialise(self, samples):
""" Initialise the cache. """
with self._open('w') as f:
f.attrs['version'] = __version__
f.attrs['n'] = samples.shape[0]
f.create_dataset('thetas', data=samples)
f.create_group('/data')
self.reset_iterator()
def _changed(self, samples):
""" Check whether software version or sample set has changed. """
with self._open('r') as f:
if f.attrs['version'] != __version__:
return True
if not _np.array_equal(f['thetas'], samples):
return True
return False
@make_verbose('Attempting to archive existing cache file in '
'a subdirectory')
def _archive(self):
""" Archive an existing cache file. """
# to archive the existing cache file
archive_dir = _os.path.join(self._cache_dir, 'archive')
try:
if not _os.path.isdir(archive_dir):
_os.mkdir(archive_dir)
except OSError:
yield ('Archiving failed... cache file %s will be '
'overwritten.' % self._filename)
yield
else:
yield 'Targeting subdirectory: %s.' % archive_dir
try:
from datetime import datetime
except ImportError:
yield ('Archiving failed... cache file %s will be '
'overwritten.' % self._filename)
yield
else:
name_archived = self._filename[:-3] + '__archive__'
name_archived += 'xpsi_version_%s__' % __version__
obj = datetime.now()
name_archived += 'datetime__%i.%i.%i__%i.%i.%i' % (obj.day,
obj.month,
obj.year,
obj.hour,
obj.minute,
obj.second)
try:
_os.rename(self._filename,
_os.path.join(archive_dir, name_archived + '.h5'))
except OSError:
yield ('Archiving failed... cache file %s will be '
'overwritten.' % self._filename)
else:
yield ('Exisiting cache file archived in '
'subdirectory %s.' % archive_dir)
yield None
|
ThomasEdwardRileyREPO_NAMExpsi-pre-transferPATH_START.@xpsi-pre-transfer_extracted@xpsi-pre-transfer-master@xpsi@PostProcessing@_cache.py@.PATH_END.py
|
{
"filename": "_uid.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/sankey/_uid.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class UidValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="uid", parent_name="sankey", **kwargs):
super(UidValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@sankey@_uid.py@.PATH_END.py
|
{
"filename": "example-line.ipynb",
"repo_name": "JohannesBuchner/UltraNest",
"repo_path": "UltraNest_extracted/UltraNest-master/docs/example-line.ipynb",
"type": "Jupyter Notebook"
}
|
# Tutorial: fitting a line
In this tutorial you will learn:
- How to fit a line to data with error bars
- How to obtain the intrinsic scatter and its uncertainties
- How to quantify the numerical error of the sampling
- How to compare empirical models
Lets say we want to fit a line to some data points.
Here is our data: measurements of three observables (The bulge mass of galaxies, the velocity dispersion and the mass of the black hole.
```python
import numpy as np
# Black hole data from Kormendy & Ho (2014) https://arxiv.org/abs/1304.7762 https://arxiv.org/abs/1308.6483
# Bulge mass and error (log Msun)
mB = np.array([9.05, 11.84, 11.27, 10.65, 11.5, 11.74, 11.33, 10.26, 11.06, 11.61, 10.5, 10.91, 11.26, 11.01, 11.77, 11.65, 10.85, 11.62, 11.51, 10.88, 11.84, 10.85, 11.72, 9.64, 11.64, 10.97, 11.16, 12.09, 11.28, 11.05, 11.65, 11.6, 11.0, 10.57, 11.69, 11.25, 11.61, 11.65, 11.75, 11.6, 11.81, 11.78])
mBerr = np.array([0.1, 0.09, 0.09, 0.09, 0.09, 0.09, 0.09, 0.09, 0.09, 0.09, 0.09, 0.09, 0.09, 0.09, 0.09, 0.09, 0.09, 0.09, 0.09, 0.09, 0.09, 0.09, 0.09, 0.1, 0.09, 0.09, 0.09, 0.09, 0.09, 0.09, 0.09, 0.09, 0.09, 0.09, 0.09, 0.09, 0.1, 0.09, 0.09, 0.09, 0.1, 0.09])
# Black hole mass, errors, and magnitude
mBH = np.log10([2.45e+06, 1.69e+08, 1.47e+09, 5.90e+08, 8.81e+08, 4.65e+09, 3.87e+09,
1.45e+07, 1.08e+07, 3.72e+09, 1.78e+08, 4.16e+08, 1.37e+08, 4.65e+08,
9.09e+09, 5.29e+08, 9.78e+08, 9.25e+08, 1.30e+07, 6.96e+07, 2.54e+09,
9.00e+07, 6.15e+09, 6.00e+08, 4.72e+09, 2.02e+08, 1.71e+09, 2.08e+10,
8.55e+08, 5.69e+07, 4.75e+09, 3.69e+09, 2.73e+08, 4.87e+08, 3.74e+09,
2.10e+09, 3.96e+08, 2.30e+09, 1.34e+09, 2.48e+09, 3.74e+09, 1.30e+09])
mBHlo = np.log10([1.43e+06, 1.39e+08, 1.27e+09, 5.39e+08, 4.35e+08, 4.24e+09, 3.16e+09,
0.10e+07, 1.03e+07, 3.21e+09, 8.50e+07, 3.12e+08, 9.00e+07, 3.66e+08,
6.28e+09, 4.21e+08, 6.70e+08, 8.38e+08, 0.10e+07, 5.62e+07, 2.44e+09,
4.50e+07, 5.78e+09, 4.00e+08, 3.67e+09, 1.52e+08, 1.52e+09, 4.90e+09,
4.07e+08, 4.65e+07, 2.81e+09, 2.65e+09, 1.94e+08, 3.34e+08, 2.59e+09,
2.00e+09, 2.40e+08, 2.19e+09, 9.30e+08, 2.29e+09, 3.22e+09, 1.11e+09])
mBHhi = np.log10([3.460e+06, 1.970e+08, 1.680e+09, 6.510e+08, 1.781e+09, 5.380e+09,
4.480e+09, 2.910e+07, 1.120e+07, 3.830e+09, 2.720e+08, 5.200e+08,
1.820e+08, 5.640e+08, 1.143e+10, 6.360e+08, 1.286e+09, 1.023e+09,
2.240e+08, 8.290e+07, 3.120e+09, 1.350e+08, 6.530e+09, 9.000e+08,
5.760e+09, 2.530e+08, 1.810e+09, 3.660e+10, 1.293e+09, 6.730e+07,
5.630e+09, 3.790e+09, 3.410e+08, 6.400e+08, 5.500e+09, 2.730e+09,
6.720e+08, 3.450e+09, 1.850e+09, 2.960e+09, 4.160e+09, 1.500e+09])
# Velocity dispersion and error (km/s)
sigma = np.array([77.0, 226.0, 328.0, 167.0, 315.0, 276.0, 270.0, 175.0, 166.0, 297.0, 145.0, 206.0, 229.0, 182.0, 270.0, 315.0, 242.0, 296.0, 182.0, 167.0, 300.0, 190.0, 324.0, 185.0, 380.0, 177.0, 355.0, 347.0, 222.0, 150.0, 333.0, 328.0, 183.0, 239.0, 318.0, 389.0, 266.0, 292.0, 257.0, 331.0, 288.0, 322.0])
sigmaerr = np.array([3.0, 9.0, 9.0, 3.0, 3.0, 2.0, 10.0, 8.0, 16.0, 12.0, 7.0, 10.0, 11.0, 9.0, 27.0, 15.0, 12.0, 14.0, 5.0, 8.0, 7.0, 9.0, 28.0, 9.0, 19.0, 8.0, 14.0, 5.0, 11.0, 7.0, 2.0, 11.0, 9.0, 11.0, 2.0, 3.0, 13.0, 5.0, 26.0, 5.0, 14.0, 16.0])
n_data = len(mBerr)
```
## Visualise the data
Lets plot the data first to see what is going on:
```python
%matplotlib inline
import matplotlib.pyplot as plt
plt.figure()
xlabel = r'Bulge mass [log, $M_\odot$]'
ylabel = r'Velocity dispersion [km/s]'
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.errorbar(x=mB, xerr=mBerr, y=sigma, yerr=sigmaerr,
marker='o', ls=' ', color='orange')
plt.yscale('log')
```
## Data properties
This scatter plot shows:
* error bars in both x and y
* intrinsic scatter
### Resampling the data
We could also represent each data point by a cloud of samples.
Each point represents a possible true solution of that galaxy.
```python
samples = []
plt.figure()
for i in range(n_data):
# draw normal random points
# scale according to error bars and values
samples_mBi = np.random.normal(mB[i], mBerr[i], size=400)
# same for sigma
samples_sigmai = np.random.normal(sigma[i], sigmaerr[i], size=400)
# we will work in log-sigma:
samples_logsigmai = np.log10(samples_sigmai)
samples.append([samples_mBi, samples_logsigmai])
# for each galaxy, plot alittle cloud with its own colors
plt.scatter(samples_mBi, samples_logsigmai, s=2, marker='x')
samples = np.array(samples)
xlabel = r'Bulge mass [log, $M_\odot$]'
ylabel = r'Velocity dispersion [log, km/s]'
plt.xlabel(xlabel)
plt.ylabel(ylabel)
```
```python
samples.shape
```
## Model
Lets fit a line model with intrinsic, gaussian scatter.
$$ y \sim \mathrm{Normal}(x \times alpha + beta, sigma) $$
The model has three unknown parameters:
* the slope $\alpha$
* the offset $\beta$
* the scatter $\sigma$
Lets write down prior ranges for these parameters:
```python
parameters = ['slope', 'offset', 'scatter']
def prior_transform(cube):
# the argument, cube, consists of values from 0 to 1
# we have to convert them to physical scales
params = cube.copy()
# let slope go from -3 to +3
lo = -3
hi = +3
params[0] = cube[0] * (hi - lo) + lo
# let offset go from 10 to 1000 km/s -- use log
lo = np.log10(10)
hi = np.log10(1000)
params[1] = cube[1] * (hi - lo) + lo
# let scatter go from 0.001 to 10
lo = np.log10(0.001)
hi = np.log10(10)
params[2] = 10**(cube[2] * (hi - lo) + lo)
return params
```
Define the likelihood, which measures how far the data are from the model predictions.
More precisely, how often the data would arise under the given parameters.
```python
import scipy.stats
def log_likelihood(params):
# unpack the current parameters:
slope, offset, scatter = params
# compute for each x point, where it should lie in y
y_expected = (samples[:,0] - 10) * slope + offset
# compute the probability of each sample
probs_samples = scipy.stats.norm(y_expected, scatter).pdf(samples[:,1])
# average over each galaxy, because we assume one of the points is the correct one (logical OR)
probs_objects = probs_samples.mean(axis=1)
assert len(probs_objects) == n_data
# multiply over the galaxies, because we assume our model holds true for all objects (logical AND)
# for numerical stability, we work in log and avoid zeros
loglike = np.log(probs_objects + 1e-100).sum()
return loglike
```
Implicitly, this model assumes that the velocity dispersion is predicted by the bulge mass.
Alternatively, one could flip the axes. Or define the scatter orthogonally.
But lets stick with our approach for now.
## Solving the problem
```python
import ultranest
sampler = ultranest.ReactiveNestedSampler(parameters, log_likelihood, prior_transform)
```
Lets first try with relatively poor sampling:
```python
result = sampler.run(min_num_live_points=50, min_ess=100) # you can increase these numbers later
```
```python
from ultranest.plot import cornerplot
cornerplot(sampler.results)
```
```python
plt.figure()
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.errorbar(x=mB, xerr=mBerr, y=sigma, yerr=sigmaerr,
marker='o', ls=' ', color='orange')
from ultranest.plot import PredictionBand
x = np.linspace(9, 12.5, 400)
band = PredictionBand(x)
band_lo = PredictionBand(x)
band_hi = PredictionBand(x)
for params in sampler.results['samples'][:40]:
slope, offset, scatter = params
y = (x - 10) * slope + offset
band.add(10**y)
# indicate intrinsic scatter
band_hi.add(10**(y + scatter))
band_lo.add(10**(y - scatter))
band.shade(color='k', alpha=0.1)
band.line(color='k')
band_lo.line(color='r', ls='--')
band_hi.line(color='r', ls='--')
plt.yscale('log');
```
## Understanding the uncertainties
Lets focus on the scatter parameter.
We want to understand how well we can constrain it.
We can make a histogram of the posterior samples:
```python
scatter_samples = result['weighted_samples']['points'][:,2]
weights = result['weighted_samples']['weights']
plt.figure(figsize=(8, 2.5))
bins=np.linspace(0.01, 0.2, 100)
plt.hist(scatter_samples, weights=weights, bins=bins, density=True, histtype='step')
plt.xlabel('sigma')
plt.ylabel("Posterior probability")
```
## Quantifying the distribution tails
But how well do we really know this probability distribution? We put uncertainties on it, because UltraNest provides bootstrapped weights, which emulates multiple (30) runs with a different number of live points.
```python
result['weighted_samples']['bootstrapped_weights'].shape
```
```python
from fastkde import fastKDE
```
```python
plt.figure()
bins=np.linspace(0.01, 0.2, 64+1)
scatter_samples = result['samples'][:,2]
pdf = fastKDE.pdf_at_points(scatter_samples, list_of_points=bins)
plt.plot(bins, pdf, color='k')
from ultranest.plot import PredictionBand
from ultranest.integrator import resample_equal
band = PredictionBand(bins)
for weights in result['weighted_samples']['bootstrapped_weights'].transpose():
scatter_samples = resample_equal(result['weighted_samples']['points'][:,2], weights)
pdf = fastKDE.pdf_at_points(scatter_samples, list_of_points=bins)
band.add(pdf)
band.line(ls='--', color='r', alpha=0.5)
band.shade(0.49, color='r', alpha=0.1)
plt.xlabel(r'$\sigma$')
plt.ylabel(r"Posterior probability")
#plt.yscale('log')
plt.ylim(1e-3, 50);
```

### Task for you
Edit this notebook. Try adjusting the number of live points (min_num_live_points) and effective sample size (min_ess) parameters above to decrease the uncertainties.
## Comparing empirical models
We are using an ad-hoc/empirical model function,
and it does not have well-defined priors.
Because of that, doing Bayesian model comparison with Bayes factors does not make sense.
Instead, we can compare models based on their information content, and their
prediction power.
Lets see how much better the line model is to a constant.
```python
parameters0 = ['offset', 'scatter']
def prior_transform0(cube):
params = cube.copy()
# let offset go from 10 to 1000 km/s -- use log
lo = np.log10(10)
hi = np.log10(1000)
params[0] = cube[0] * (hi - lo) + lo
# let scatter go from 0.001 to 10
lo = np.log10(0.001)
hi = np.log10(10)
params[1] = 10**(cube[1] * (hi - lo) + lo)
return params
def log_likelihood0(params):
# unpack the current parameters:
offset, scatter = params
# compute for each x point, where it should lie in y
y_expected = offset
# compute the probability of each sample
probs_samples = scipy.stats.norm(y_expected, scatter).pdf(samples[:,1])
# average over each galaxy, because we assume one of the points is the correct one (logical OR)
probs_objects = probs_samples.mean(axis=1)
assert len(probs_objects) == n_data
# multiply over the galaxies, because we assume our model holds true for all objects (logical AND)
# for numerical stability, we work in log and avoid zeros
loglike = np.log(probs_objects + 1e-100).sum()
return loglike
```
```python
sampler0 = ultranest.ReactiveNestedSampler(parameters0, log_likelihood0, prior_transform0)
result0 = sampler0.run()
```
### Model comparison with AIC
Here we compute the Akaike information criterion (AIC).
https://en.wikipedia.org/wiki/Akaike_information_criterion
The model with the lowest AIC should be preferred.
```python
Lmax0 = result0['weighted_samples']['logl'].max()
AIC0 = -2 * Lmax0 + len(parameters0)
Lmax1 = result['weighted_samples']['logl'].max()
AIC1 = -2 * Lmax1 + len(parameters)
print("AIC of constant model: %d" % AIC0)
print("AIC of line model : %d" % AIC1)
```
The line model is doing better according to the AIC.
### Model comparison by prediction power
We can also leave out some data points and see how
well the model, trained on the others, predicts the unseen data points.
There are many ways to leave points out (K-Fold, LOO, bootstrapping).
Here we use a 5-fold cross-validation.
```python
samples_orig = samples.copy()
```
```python
from scipy.special import logsumexp
```
```python
Kpredicts = []
for lo, hi in [(9, 9.5), (9.5, 10), (10, 10.5), (10.5, 11), (11, 11.5), (11.5, 12.2)]:
#for lo, hi in [(9, 10), (10, 11), (11, 12.2)]:
# leave out samples within that interval
excluded = np.logical_and(mB >= lo, mB < hi)
# all the others are allowed
included = ~excluded
# set samples (used inside likelihood functions)
samples = samples_orig[included]
n_data = len(samples)
# analyse with line model
sampler1 = ultranest.ReactiveNestedSampler(parameters, log_likelihood, prior_transform)
result1 = sampler1.run()
# analyse with constant model
sampler0 = ultranest.ReactiveNestedSampler(parameters0, log_likelihood0, prior_transform0)
result0 = sampler0.run()
# now set the samples to the withheld data
samples = samples_orig[excluded]
n_data = len(samples)
# get the log of the mean likelihood from each model
Zpredict0 = logsumexp([log_likelihood0(sample) for sample in result0['samples']])
Zpredict1 = logsumexp([log_likelihood(sample) for sample in result1['samples']])
Kpredicts.append(Zpredict1 - Zpredict0)
```
So lets look at the prediction quality of line model compared to constant model:
Positive values indicate preference for the line model, each entry is a K-fold instance.
```python
Kpredicts
```
averaging the results, we get:
```python
np.mean(Kpredicts)
```
Again positive, so in terms of prediction, the line model is better.
### Next
To recap, we looked at two methods to compare empirical (ad-hoc) models.
Next, you can explore the black hole mass scaling relation, or a combination of the three measurables in the data:
```python
plt.figure()
plt.xlabel(r'Black Hole mass [log, $M_\odot$]')
plt.ylabel(r'Bulge mass [log, $M_\odot$]')
plt.errorbar(y=mB, yerr=mBerr, x=mBH, xerr=[mBHhi-mBH, mBH-mBHlo],
marker='o', ls=' ', color='orange');
```
|
JohannesBuchnerREPO_NAMEUltraNestPATH_START.@UltraNest_extracted@UltraNest-master@docs@example-line.ipynb@.PATH_END.py
|
{
"filename": "test_tf.py",
"repo_name": "hongwanliu/DarkHistory",
"repo_path": "DarkHistory_extracted/DarkHistory-master/tests/test_tf.py",
"type": "Python"
}
|
import os
import sys
import pytest
import h5py
import numpy as np
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from darkhistory.config import load_data
@pytest.fixture(scope='module')
def data_loader():
data = {
'dep_tf' : load_data('dep_tf'),
'tf_helper' : load_data('tf_helper'),
'ics_tf' : load_data('ics_tf'),
'expected' : {},
}
with h5py.File(os.path.dirname(os.path.realpath(__file__)) + '/data/test_tf_2261700c.h5', 'r') as hf:
for k in hf.keys():
data['expected'][k] = hf[k][()]
return data
def test_dep_tf(data_loader):
tfs = data_loader['dep_tf']
for k in ['highengphot', 'lowengphot', 'lowengelec']:
tf = tfs[k]
z = tf.get_tf(0.433, 0.302, 2244).sum_specs(np.sin(np.arange(500)))
z += tf.get_tf(0.760, 0.276, 384).sum_specs(np.sin(np.arange(500)))
z += tf.get_tf(0.930, 0.088, 18).sum_specs(np.sin(np.arange(500)))
z = z.N
assert np.allclose(z, data_loader['expected'][k])
for k in ['highengdep', 'CMB_engloss']:
tf = tfs[k]
z = np.sin(np.arange(500)) @ tf.get_val(0.433, 0.302, 2244)
z += np.sin(np.arange(500)) @ tf.get_val(0.760, 0.276, 384)
z += np.sin(np.arange(500)) @ tf.get_val(0.930, 0.088, 18)
assert np.allclose(z, data_loader['expected'][k])
def test_ics_tf(data_loader):
tfs = data_loader['ics_tf']
for k in ['thomson', 'rel', 'engloss']:
tf = tfs[k]
z = tf.sum_specs(np.sin(np.arange(5000)))
z = z.N
assert np.allclose(z, data_loader['expected'][k])
def test_tf_helper(data_loader):
tfs = data_loader['tf_helper']
for k in ['tf_E']:
tf = tfs[k]
z = tf.get_val(0.433, 0.302, 2244) @ np.sin(np.arange(500))
z += tf.get_val(0.760, 0.276, 384) @ np.sin(np.arange(500))
z += tf.get_val(0.930, 0.088, 18) @ np.sin(np.arange(500))
assert np.allclose(z, data_loader['expected'][k])
|
hongwanliuREPO_NAMEDarkHistoryPATH_START.@DarkHistory_extracted@DarkHistory-master@tests@test_tf.py@.PATH_END.py
|
{
"filename": "_color.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scatter3d/marker/line/_color.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="color", parent_name="scatter3d.marker.line", **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
colorscale_path=kwargs.pop(
"colorscale_path", "scatter3d.marker.line.colorscale"
),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scatter3d@marker@line@_color.py@.PATH_END.py
|
{
"filename": "_ticklabelstep.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattergl/marker/colorbar/_ticklabelstep.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TicklabelstepValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self,
plotly_name="ticklabelstep",
parent_name="scattergl.marker.colorbar",
**kwargs,
):
super(TicklabelstepValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 1),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scattergl@marker@colorbar@_ticklabelstep.py@.PATH_END.py
|
{
"filename": "_cmax.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scattercarpet/marker/line/_cmax.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class CmaxValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="cmax", parent_name="scattercarpet.marker.line", **kwargs
):
super(CmaxValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
implied_edits=kwargs.pop("implied_edits", {"cauto": False}),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scattercarpet@marker@line@_cmax.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "CP3-Origins/cp3-bench",
"repo_path": "cp3-bench_extracted/cp3-bench-master/bench/methods/template/tests/__init__.py",
"type": "Python"
}
|
CP3-OriginsREPO_NAMEcp3-benchPATH_START.@cp3-bench_extracted@cp3-bench-master@bench@methods@template@tests@__init__.py@.PATH_END.py
|
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/pytest/py3/pytest/__init__.py",
"type": "Python"
}
|
# PYTHON_ARGCOMPLETE_OK
"""pytest: unit and functional testing with Python."""
from _pytest import __version__
from _pytest import version_tuple
from _pytest._code import ExceptionInfo
from _pytest.assertion import register_assert_rewrite
from _pytest.cacheprovider import Cache
from _pytest.capture import CaptureFixture
from _pytest.config import cmdline
from _pytest.config import Config
from _pytest.config import console_main
from _pytest.config import ExitCode
from _pytest.config import hookimpl
from _pytest.config import hookspec
from _pytest.config import main
from _pytest.config import PytestPluginManager
from _pytest.config import UsageError
from _pytest.config.argparsing import OptionGroup
from _pytest.config.argparsing import Parser
from _pytest.debugging import pytestPDB as __pytestPDB
from _pytest.doctest import DoctestItem
from _pytest.fixtures import fixture
from _pytest.fixtures import FixtureLookupError
from _pytest.fixtures import FixtureRequest
from _pytest.fixtures import yield_fixture
from _pytest.freeze_support import freeze_includes
from _pytest.legacypath import TempdirFactory
from _pytest.legacypath import Testdir
from _pytest.logging import LogCaptureFixture
from _pytest.main import Session
from _pytest.mark import Mark
from _pytest.mark import MARK_GEN as mark
from _pytest.mark import MarkDecorator
from _pytest.mark import MarkGenerator
from _pytest.mark import param
from _pytest.monkeypatch import MonkeyPatch
from _pytest.nodes import Collector
from _pytest.nodes import File
from _pytest.nodes import Item
from _pytest.outcomes import exit
from _pytest.outcomes import fail
from _pytest.outcomes import importorskip
from _pytest.outcomes import skip
from _pytest.outcomes import xfail
from _pytest.pytester import HookRecorder
from _pytest.pytester import LineMatcher
from _pytest.pytester import Pytester
from _pytest.pytester import RecordedHookCall
from _pytest.pytester import RunResult
from _pytest.python import Class
from _pytest.python import Function
from _pytest.python import Metafunc
from _pytest.python import Module
from _pytest.python import Package
from _pytest.python_api import approx
from _pytest.python_api import raises
from _pytest.recwarn import deprecated_call
from _pytest.recwarn import WarningsRecorder
from _pytest.recwarn import warns
from _pytest.reports import CollectReport
from _pytest.reports import TestReport
from _pytest.runner import CallInfo
from _pytest.stash import Stash
from _pytest.stash import StashKey
from _pytest.terminal import TestShortLogReport
from _pytest.tmpdir import TempPathFactory
from _pytest.warning_types import PytestAssertRewriteWarning
from _pytest.warning_types import PytestCacheWarning
from _pytest.warning_types import PytestCollectionWarning
from _pytest.warning_types import PytestConfigWarning
from _pytest.warning_types import PytestDeprecationWarning
from _pytest.warning_types import PytestExperimentalApiWarning
from _pytest.warning_types import PytestRemovedIn8Warning
from _pytest.warning_types import PytestReturnNotNoneWarning
from _pytest.warning_types import PytestUnhandledCoroutineWarning
from _pytest.warning_types import PytestUnhandledThreadExceptionWarning
from _pytest.warning_types import PytestUnknownMarkWarning
from _pytest.warning_types import PytestUnraisableExceptionWarning
from _pytest.warning_types import PytestWarning
set_trace = __pytestPDB.set_trace
__all__ = [
"__version__",
"approx",
"Cache",
"CallInfo",
"CaptureFixture",
"Class",
"cmdline",
"Collector",
"CollectReport",
"Config",
"console_main",
"deprecated_call",
"DoctestItem",
"exit",
"ExceptionInfo",
"ExitCode",
"fail",
"File",
"fixture",
"FixtureLookupError",
"FixtureRequest",
"freeze_includes",
"Function",
"hookimpl",
"HookRecorder",
"hookspec",
"importorskip",
"Item",
"LineMatcher",
"LogCaptureFixture",
"main",
"mark",
"Mark",
"MarkDecorator",
"MarkGenerator",
"Metafunc",
"Module",
"MonkeyPatch",
"OptionGroup",
"Package",
"param",
"Parser",
"PytestAssertRewriteWarning",
"PytestCacheWarning",
"PytestCollectionWarning",
"PytestConfigWarning",
"PytestDeprecationWarning",
"PytestExperimentalApiWarning",
"PytestRemovedIn8Warning",
"PytestReturnNotNoneWarning",
"Pytester",
"PytestPluginManager",
"PytestUnhandledCoroutineWarning",
"PytestUnhandledThreadExceptionWarning",
"PytestUnknownMarkWarning",
"PytestUnraisableExceptionWarning",
"PytestWarning",
"raises",
"RecordedHookCall",
"register_assert_rewrite",
"RunResult",
"Session",
"set_trace",
"skip",
"Stash",
"StashKey",
"version_tuple",
"TempdirFactory",
"TempPathFactory",
"Testdir",
"TestReport",
"TestShortLogReport",
"UsageError",
"WarningsRecorder",
"warns",
"xfail",
"yield_fixture",
]
def __getattr__(name: str) -> object:
if name == "Instance":
# The import emits a deprecation warning.
from _pytest.python import Instance
return Instance
raise AttributeError(f"module {__name__} has no attribute {name}")
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@pytest@py3@pytest@__init__.py@.PATH_END.py
|
{
"filename": "smooth_cal_inspect_2458192.ipynb",
"repo_name": "HERA-Team/H1C_IDR3_Notebooks",
"repo_path": "H1C_IDR3_Notebooks-main/smooth_cal_inspect/smooth_cal_inspect_2458192.ipynb",
"type": "Jupyter Notebook"
}
|
# Stage 2 Calibration Smoothing Nightly Notebook
**Josh Dillon**, Last Revised 12/4/20
```python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from hera_cal import io, redcal, apply_cal, abscal, utils
from hera_cal.smooth_cal import build_time_blacklist
from hera_qm.metrics_io import load_metric_file
import pyuvdata
import glob
import os
from copy import deepcopy
import inspect
import h5py
import matplotlib.cm as cm
from IPython.display import display, HTML
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
display(HTML("<style>.container { width:100% !important; }</style>"))
```
<style>.container { width:100% !important; }</style>
```python
# If you want to run this notebook locally, copy the output of the next cell into the first few lines of this cell.
# JD = '2459122'
# data_path = '/lustre/aoc/projects/hera/H4C/2459122'
# lst_blacklist_string = '0-1.3 2.5-4.3 5.0-5.7 6.5-9.1 10.6-11.5 11.9-14.3 16.3-1.3'
# abscal_model_glob = '/lustre/aoc/projects/hera/zmartino/hera_calib_model/H3C/abscal_files_unique_baselines/zen.2458894.?????.uvh5'
# os.environ["JULIANDATE"] = JD
# os.environ["DATA_PATH"] = data_path
# os.environ["LST_BLACKLIST_STRING"] = lst_blacklist_string
# os.environ["ABSCAL_MODEL_GLOB"] = abscal_model_glob
```
```python
# Use environment variables to figure out path to data
JD = os.environ['JULIANDATE']
data_path = os.environ['DATA_PATH']
lst_blacklist_string = os.environ['LST_BLACKLIST_STRING']
abscal_model_glob = os.environ['ABSCAL_MODEL_GLOB']
print(f'JD = "{JD}"')
print(f'data_path = "{data_path}"')
print(f'lst_blacklist_string = "{lst_blacklist_string}"')
print(f'abscal_model_glob = "{abscal_model_glob}"')
```
JD = "2458192"
data_path = "/lustre/aoc/projects/hera/H1C_IDR3/IDR3_2/2458192"
lst_blacklist_string = ""
abscal_model_glob = "/lustre/aoc/projects/hera/H1C_IDR3/abscal_model/zen.245804*.HH.uvRXLS.uvh5"
```python
print('Looking for data in', data_path, 'on JD', JD)
data_list = sorted(glob.glob(os.path.join(data_path, f'zen.{JD}.?????.sum.uvh5')))
if len(data_list) == 0:
data_list = sorted(glob.glob(os.path.join(data_path, f'zen.{JD}.?????.uvh5')))
print('...found {} data files.'.format(len(data_list)))
abscal_list = sorted(glob.glob(os.path.join(data_path, f'zen.{JD}.*.abs.calfits')))
print('...found {} abscal files.'.format(len(abscal_list)))
smooth_cal_list = sorted(glob.glob(os.path.join(data_path, f'zen.{JD}.*.sum.smooth_abs.calfits')))
print('...found {} smooth_cal files.'.format(len(smooth_cal_list)))
```
Looking for data in /lustre/aoc/projects/hera/H1C_IDR3/IDR3_2/2458192 on JD 2458192
...found 73 data files.
...found 73 abscal files.
...found 73 smooth_cal files.
```python
# get all JDs and LSTs
_, _, file_lst_arrays, file_time_arrays = io.get_file_times(data_list)
# parse lst_blacklist_string
lst_blacklists = []
if len(lst_blacklist_string) > 0:
lst_blacklists = [tuple([float(arg) for arg in arg_pair.split('-', maxsplit=1)])
for arg_pair in lst_blacklist_string.split(' ')]
# get times that are blacklisted and reshape them like file_time_arrays
time_blacklisted_flat = build_time_blacklist(np.hstack(file_time_arrays), lst_blacklists=lst_blacklists)
time_blacklisted = [fta.astype(bool) for fta in file_time_arrays]
n = 0
for i in range(len(file_time_arrays)):
time_blacklisted[i] = np.zeros_like(time_blacklisted[i], dtype=bool)
for j in range(len(file_time_arrays[i])):
time_blacklisted[i][j] = time_blacklisted_flat[n]
n += 1
# pick the central time from among the not-LST blacklisted files, if possible
good_indices = [i for i, tb in enumerate(time_blacklisted) if not np.any(tb)]
if len(good_indices) > 0:
file_index = good_indices[len(good_indices)//2]
else:
file_index = len(data_list)//2
file_JD = '.'.join([s for s in data_list[file_index].split('.') if s.isdigit()])
```
```python
# Load abscal gains
hca = io.HERACal(abscal_list[file_index])
ga, gaf, _, _ = hca.read()
# Get min_bl_cut, we only want to compare baselines actually used in absolute calibration
try:
min_bl_cut = float(hca.history.replace('\n','').split('--min_bl_cut')[-1].split('--')[0].strip())
except:
print('Could not find min_bl_cut, setting to 1 m.')
min_bl_cut = 1.0
# Load the most common redundant baseline longer than min_bl_cut
hd = io.HERAData(data_list[file_index])
bls_to_plot = []
for pol in ['ee', 'nn']:
reds = redcal.get_reds(hd.antpos, pols=[pol])
reds = sorted(reds, key=len, reverse=True)
bl_lens = np.array([np.linalg.norm(hd.antpos[red[0][1]] - hd.antpos[red[0][0]]) for red in reds])
try:
bl_group_to_plot = (np.array(reds)[bl_lens >= min_bl_cut])[0]
except:
bl_group_to_plot = reds[0]
bls_to_plot.extend(bl_group_to_plot)
# Load smooth_cal gains and determine ex_ants
hc = io.HERACal(smooth_cal_list[file_index])
gains, gain_flags, _, _ = hc.read()
ex_ants = [ant for ant in gain_flags if np.all(gain_flags[ant])]
# Load data and calibrate
data, flags, nsamples = hd.read(bls=bls_to_plot)
sc_data, sc_flags = deepcopy(data), deepcopy(flags)
ac_data, ac_flags = deepcopy(data), deepcopy(flags)
apply_cal.calibrate_in_place(sc_data, gains, data_flags=sc_flags, cal_flags=gain_flags)
apply_cal.calibrate_in_place(ac_data, ga, data_flags=ac_flags, cal_flags=gaf)
```
Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray
```python
plt.figure(figsize=(8,8))
plt.scatter(np.array(list(hd.antpos.values()))[:,0],
np.array(list(hd.antpos.values()))[:,1], c='w', s=0)
for ant,pos in hd.antpos.items():
bad = ant in [ant[0] for ant in ex_ants]
plt.gca().add_artist(plt.Circle(tuple(pos[0:2]), radius=7,
fill=(~bad), color=['grey','r'][bad]))
plt.text(pos[0],pos[1],str(ant), va='center', ha='center', color='w')
plt.xlabel("Antenna East-West Position (meters)")
plt.ylabel("Antenna North-South Position (meters)")
plt.title('Antenna Positions on {} (Red = Flagged)'.format(file_JD));
plt.axis('equal')
plt.tight_layout()
plt.show()
```

### Figure 1: Array and Flagged Antennas
#### OBSERVER CHECKLIST:
* Check that the array configuration looks reasonable.
* Check that all flags expected to be flagged are actually flagged but also that not everything is getting flagged.
```python
#check whether the model is redudnant by looking at the history
model_is_redundant = ('--model_is_redundant' in "".join(hc.history.split()))
# Find files that overlap with this file
abscal_matched_files = list(abscal.match_times(data_list[file_index],
sorted(glob.glob(abscal_model_glob)),
filetype='uvh5', atol=1e-5))
hdm = io.HERAData(abscal_matched_files)
# Get model baselines to load
model_bls = hdm.bls
model_antpos = hdm.antpos
if isinstance(model_bls, dict):
model_bls = list(model_bls.values())[0]
model_antpos = {ant: pos for antpos in hdm.antpos.values() for ant, pos in antpos.items()}
_, model_bl_to_load, data_to_model_bl_map = abscal.match_baselines(bls_to_plot, model_bls,
hd.antpos, model_antpos=model_antpos,
model_is_redundant=model_is_redundant)
model, model_flags, _ = hdm.read(bls=model_bl_to_load)
# Rephase model at index of best match to mean LST in the data
model_index = np.argmin(np.abs(model.lsts - np.mean(data.lsts)))
model_blvecs = {bl: model.antpos[bl[0]] - model.antpos[bl[1]] for bl in model.keys()}
utils.lst_rephase(model, model_blvecs, model.freqs, np.mean(data.lsts) - model.lsts[model_index],
lat=hdm.telescope_location_lat_lon_alt_degrees[0], inplace=True)
if not model_is_redundant:
model, _, _ = utils.red_average(model, flags=model_flags)
```
```python
import warnings
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'All-NaN (slice|axis) encountered')
for pol in ['ee', 'nn']:
for func, plot, ylabel in zip([np.abs, np.angle], [plt.semilogy, plt.plot], ['Amplitude (Jy)', 'Phase (Radians)']):
plt.figure(figsize=(16,4))
for d, f, l, m in zip([ac_data, sc_data],
[ac_flags, sc_flags],
['Abs Calibrated Data', 'Smooth Calibrated Data'],
['r-', 'b.']):
to_avg = []
for bl in [k for k in bls_to_plot if k[2] == pol]:
blvec = hd.antpos[bl[0]] - hd.antpos[bl[1]]
to_avg.append(deepcopy(d[bl]))
to_avg[-1][f[bl]] = np.nan + 1.0j * np.nan
to_plot = np.nanmedian(np.real(to_avg), axis=(0,1)) + 1.0j * np.nanmedian(np.imag(to_avg), axis=(0,1))
plot(hd.freqs/1e6, func(to_plot), m, label=l)
for bl in [k for k in model if k[2] == pol]:
plot(hd.freqs/1e6, func(model[bl][model_index]), 'k-', label='Abscal Model')
plt.xlabel('Frequency (MHz)')
plt.ylabel(ylabel)
plt.legend(loc='lower right')
plt.title('{}-Polarized, {:f} m East, {:f} m North Visibility on {}'.format(pol, blvec[0], blvec[1], file_JD))
```




### Figure 2: Example redundant baseline average, both absolute calibrated and smoothed, compared to the Abscal Model
#### OBSERVER CHECKLIST:
* Check that the abscaled data and the smoothcaled data are reasonably consistent
* Check that both match the abscal model fairly well.
# Load a whole day
```python
# Load relative difference and flagging info from smooth_cal gains
ant_flags_dict = {}
avg_rel_diff_ee_dict = {}
avg_rel_diff_nn_dict = {}
rel_diff_med_dict = {}
ants = set([])
for cal in smooth_cal_list:
hc = io.HERACal(cal)
_, flags, rel_diff, avg_rel_diff = hc.read()
ants |= set(flags.keys())
ant_flags_dict[cal] = {ant: np.all(flags[ant]) for ant in flags}
avg_rel_diff_ee_dict[cal] = avg_rel_diff['Jee']
avg_rel_diff_nn_dict[cal] = avg_rel_diff['Jnn']
rel_diff_med_dict[cal] = {ant: np.nanmedian(rel_diff[ant], axis=1) for ant in rel_diff}
all_flagged_dict = {ant: np.all([af[ant] for af in ant_flags_dict.values()]) for ant in ants}
avg_rel_diff_ee = np.vstack(np.array(list(avg_rel_diff_ee_dict.values())))
avg_rel_diff_nn = np.vstack(np.array(list(avg_rel_diff_nn_dict.values())))
```
Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray
Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray
```python
# save middle-numbered ants with a minimal number of flags
ants_to_save = {}
ant_to_nflags_dict = {ant: np.sum([af[ant] for af in ant_flags_dict.values()]) for ant in ants}
for pol in ['Jee', 'Jnn']:
min_flags = np.min([ant_to_nflags_dict[ant] for ant in ants if ant[1] == pol])
ant_candidates = sorted([ant for ant in ants if ant_to_nflags_dict[ant] == min_flags and ant[1] == pol])
Nac = len(ant_candidates)
ants_to_save[pol] = ant_candidates[(Nac // 2 - 1):(Nac // 2 + 1)]
```
```python
# Load smooth_cal gains/flags
times_dict = {}
sc_gain_dict = {}
sc_flag_dict = {}
for cal in smooth_cal_list:
hc = io.HERACal(cal)
gains, flags, _, _ = hc.read()
times_dict[cal] = hc.times
sc_gain_dict[cal] = {ant: gains[ant] for pol in ants_to_save for ant in ants_to_save[pol]}
sc_flag_dict[cal] = {ant: flags[ant] for pol in ants_to_save for ant in ants_to_save[pol]}
# Load abscal gains/flags
ac_gain_dict = {}
ac_flag_dict = {}
for cal in abscal_list:
hc = io.HERACal(cal)
gains, flags, _, _ = hc.read()
ac_gain_dict[cal] = {ant: gains[ant] for pol in ants_to_save for ant in ants_to_save[pol]}
ac_flag_dict[cal] = {ant: flags[ant] for pol in ants_to_save for ant in ants_to_save[pol]}
# Organize gains/flags into grids
times = np.hstack(list(times_dict.values()))
lsts = 12 / np.pi * pyuvdata.utils.get_lst_for_time(times, *hd.telescope_location_lat_lon_alt_degrees)
sc_gains = {ant: np.vstack([sc_gain_dict[cal][ant] for cal in sc_gain_dict])
for pol in ants_to_save for ant in ants_to_save[pol]}
sc_flags = {ant: np.vstack([sc_flag_dict[cal][ant] for cal in sc_flag_dict])
for pol in ants_to_save for ant in ants_to_save[pol]}
flag_mask = np.all([f for f in sc_flags.values()], axis=0)
ac_gains = {ant: np.vstack([ac_gain_dict[cal][ant] for cal in ac_gain_dict])
for pol in ants_to_save for ant in ants_to_save[pol]}
ac_flags = {ant: np.vstack([ac_flag_dict[cal][ant] for cal in ac_flag_dict])
for pol in ants_to_save for ant in ants_to_save[pol]}
```
# Inspect a whole day
```python
# for overplotting blacklisted LSTs
my_cmap = cm.binary
my_cmap.set_under('k', alpha=0)
blacklist = np.ones_like(avg_rel_diff_ee) * np.hstack(time_blacklisted)[:, np.newaxis]
```
You are modifying the state of a globally registered colormap. In future versions, you will not be able to modify a registered colormap in-place. To remove this warning, you can make a copy of the colormap first. cmap = copy.copy(mpl.cm.get_cmap("binary"))
```python
# Pick vmax to not saturate 90% of the abscal gains
vmax = np.max([np.percentile(np.abs(sc_gains[ants_to_save[pol][1]][~flag_mask]), 99) for pol in ['Jee', 'Jnn']])
# Plot abscal gain amplitude waterfalls for a single antenna
fig, axes = plt.subplots(4, 2, figsize=(16,16), gridspec_kw={'height_ratios': [1, .25, .25, 1]})
for ax, pol in zip(axes[0], ['Jee', 'Jnn']):
ant = ants_to_save[pol][1]
extent=[hd.freqs[0]/1e6, hd.freqs[-1]/1e6, times[-1], times[0]]
im = ax.imshow(np.abs(sc_gains[ant]) / ~sc_flags[ant], aspect='auto', cmap='inferno',
interpolation='nearest', vmin=0, vmax=vmax, extent=extent)
ax.imshow(blacklist, aspect='auto', cmap=my_cmap, interpolation=None, clim=[0.9, 1], alpha=.25, extent=extent)
ax.set_title(f'Smoothcal Gain Amplitude of Antenna {ant[0]}: {pol[1:]}-polarized' )
ax.set_xlabel('Frequency (MHz)')
ax.set_ylabel('LST (Hours)')
ax.set_xlim([hd.freqs[0]/1e6, hd.freqs[-1]/1e6])
ax.set_yticklabels(np.around(lsts[[min(max(np.searchsorted(times, t), 0), len(times) - 1) for t in ax.get_yticks()]], 2))
plt.colorbar(im, ax=ax, orientation='horizontal', pad=.1)
# Now plot median gain spectra
for ax, pol in zip(axes[1], ['Jee', 'Jnn']):
ant = ants_to_save[pol][1]
# plot abscal
to_med = deepcopy(np.abs(ac_gains[ant]))
to_med[sc_flags[ant]] = np.nan
if not np.all(np.hstack(time_blacklisted)):
ax.plot(hd.freqs / 1e6, np.nanmedian(to_med[~np.hstack(time_blacklisted), :], axis=0), 'r.', label='Abscal')
# plot smooth_cal
to_med = deepcopy(np.abs(sc_gains[ant]))
to_med[sc_flags[ant]] = np.nan
if not np.all(np.hstack(time_blacklisted)):
ax.plot(hd.freqs / 1e6, np.nanmedian(to_med[~np.hstack(time_blacklisted), :], axis=0), 'k.', ms=2, label='Smoothcal')
ax.set_ylim([0, vmax])
ax.set_xlim([hd.freqs[0]/1e6, hd.freqs[-1]/1e6])
ax.set_xlabel('Frequency (MHz)')
ax.set_ylabel('|g| (unitless)')
ax.set_title(f'Median Non-Blacklisted or Flagged Gain Amplitude Spectrum of Antenna {ant[0]}: {pol[1:]}-polarized')
ax.legend()
# Now plot median gain time series
for ax, pol in zip(axes[2], ['Jee', 'Jnn']):
ant = ants_to_save[pol][1]
to_med = deepcopy(np.abs(ac_gains[ant]))
to_med[:, np.all(sc_flags[ant], axis=0)] = np.nan
# plot abscal
if not np.all(np.hstack(time_blacklisted)):
ax.plot(lsts[~np.hstack(time_blacklisted)],
np.nanmedian(to_med[~np.hstack(time_blacklisted), :], axis=1),
'b.', label='Abscal: Not Blacklisted LSTs')
if np.any(np.hstack(time_blacklisted)):
ax.plot(lsts[np.hstack(time_blacklisted)],
np.nanmedian(to_med[np.hstack(time_blacklisted), :], axis=1),
'r.', label='Abscal: Blacklisted LSTs')
# plot smooth_cal
to_med = deepcopy(np.abs(sc_gains[ant]))
to_med[:, np.all(sc_flags[ant], axis=0)] = np.nan
ax.plot(lsts, np.nanmedian(to_med, axis=1),'k.', ms=2, label='Smoothcal')
ax.set_ylim([0, vmax])
ax.set_xlabel('LST (hours)')
ax.set_ylabel('|g| (unitless)')
ax.set_title(f'Median Over Unflagged Channels Gain Amplitude Time-Series of Antenna {ant[0]}: {pol[1:]}-polarized')
ax.legend()
# Now flagged plot abscal waterfall
for ax, pol in zip(axes[3], ['Jee', 'Jnn']):
ant = ants_to_save[pol][1]
extent=[hd.freqs[0]/1e6, hd.freqs[-1]/1e6, times[-1], times[0]]
im = ax.imshow(np.abs(ac_gains[ant]) / ~sc_flags[ant], aspect='auto', cmap='inferno',
interpolation='nearest', vmin=0, vmax=vmax, extent=extent)
ax.imshow(blacklist, aspect='auto', cmap=my_cmap, interpolation=None, clim=[0.9, 1], alpha=.25, extent=extent)
ax.set_title(f'Flagged Abscal Gain Amplitude of Antenna {ant[0]}: {pol[1:]}-polarized' )
ax.set_xlabel('Frequency (MHz)')
ax.set_ylabel('LST (Hours)')
ax.set_xlim([hd.freqs[0]/1e6, hd.freqs[-1]/1e6])
ax.set_yticklabels(np.around(lsts[[min(max(np.searchsorted(times, t), 0), len(times) - 1) for t in ax.get_yticks()]], 2))
plt.colorbar(im, ax=ax, orientation='horizontal', pad=.1)
plt.tight_layout()
```
divide by zero encountered in true_divide
FixedFormatter should only be used together with FixedLocator
All-NaN slice encountered
All-NaN slice encountered
All-NaN slice encountered
All-NaN slice encountered
divide by zero encountered in true_divide
FixedFormatter should only be used together with FixedLocator

### Figure 3 Example Smoothing of Gain Amplitudes
Smoothcal (top row) and Abscal (bottom row) gain amplitudes for an example antenna. In the waterfalls, grayed out regions are "blacklisted," meaning they are not flagged but they are given zero weight when performing calibration smoothing. We also plot median non-blacklisted amplitudes as a function of frequency (second row) and the median amplitude as a function of time (third row) for both abscal and smoothcal.
#### OBSERVER CHECKLIST:
* Check that the smoothcal solution matches the abscal solution reasonably well in the non-blacklisted regions.
* Check to see that the overall bandpass looks reasonable
```python
# Plot abscal gain amplitude waterfalls for a single antenna
fig, axes = plt.subplots(4, 2, figsize=(16,16), gridspec_kw={'height_ratios': [1, .25, .25, 1]})
for ax, pol in zip(axes[0], ['Jee', 'Jnn']):
ant0, ant1 = ants_to_save[pol]
extent=[hd.freqs[0]/1e6, hd.freqs[-1]/1e6, times[-1], times[0]]
im = ax.imshow(np.angle(sc_gains[ant0] / sc_gains[ant1]) / ~sc_flags[ant0], aspect='auto', cmap='inferno',
interpolation='nearest', vmin=-np.pi, vmax=np.pi, extent=extent)
ax.imshow(blacklist, aspect='auto', cmap=my_cmap, interpolation=None, clim=[0.9, 1], alpha=.25, extent=extent)
ax.set_title(f'Smoothcal Gain Phase of Ant {ant0[0]} / Ant {ant1[0]}: {pol[1:]}-polarized')
ax.set_xlabel('Frequency (MHz)')
ax.set_ylabel('LST (Hours)')
ax.set_xlim([hd.freqs[0]/1e6, hd.freqs[-1]/1e6])
ax.set_yticklabels(np.around(lsts[[min(max(np.searchsorted(times, t), 0), len(times) - 1) for t in ax.get_yticks()]], 2))
plt.colorbar(im, ax=ax, orientation='horizontal', pad=.1)
# Now plot median gain spectra
for ax, pol in zip(axes[1], ['Jee', 'Jnn']):
ant0, ant1 = ants_to_save[pol]
# plot abscal
to_med = deepcopy(ac_gains[ant0] / ac_gains[ant1])
to_med[sc_flags[ant0]] = np.nan + 1.0j * np.nan
if not np.all(np.hstack(time_blacklisted)):
med = 1.0j * np.nanmedian(to_med[~np.hstack(time_blacklisted), :].imag, axis=0)
med += np.nanmedian(to_med[~np.hstack(time_blacklisted), :].real, axis=0)
ax.plot(hd.freqs / 1e6, np.angle(med), 'r.', label='Abscal')
# plot smooth_cal
to_med = deepcopy(sc_gains[ant0] / sc_gains[ant1])
to_med[sc_flags[ant0]] = np.nan + 1.0j * np.nan
if not np.all(np.hstack(time_blacklisted)):
med = 1.0j * np.nanmedian(to_med[~np.hstack(time_blacklisted), :].imag, axis=0)
med += np.nanmedian(to_med[~np.hstack(time_blacklisted), :].real, axis=0)
ax.plot(hd.freqs / 1e6, np.angle(med), 'k.', ms=2, label='Smoothcal')
ax.set_ylim([-np.pi, np.pi])
ax.set_xlabel('Frequency (MHz)')
ax.set_ylabel(f'Phase of g$_{ant0[0]}$ / g$_{ant1[0]}$')
ax.set_title(f'Median Non-Blacklisted or Flagged Gain Phase Spectrum of Ant {ant0[0]} / Ant {ant1[0]}: {pol[1:]}-polarized')
ax.legend()
# Now plot median gain time series
for ax, pol in zip(axes[2], ['Jee', 'Jnn']):
ant = ants_to_save[pol][1]
to_med = deepcopy(np.abs(ac_gains[ant]))
to_med[:, np.all(sc_flags[ant], axis=0)] = np.nan
# plot abscal
to_med = deepcopy(ac_gains[ant0] / ac_gains[ant1])
to_med[:, np.all(sc_flags[ant], axis=0)] = np.nan + 1.0j * np.nan
if not np.all(np.hstack(time_blacklisted)):
med = 1.0j * np.nanmedian(to_med[~np.hstack(time_blacklisted), :].imag, axis=1)
med += np.nanmedian(to_med[~np.hstack(time_blacklisted), :].real, axis=1)
ax.plot(lsts[~np.hstack(time_blacklisted)], np.angle(med), 'b.', label='Abscal: Not Blacklisted LSTs')
if np.any(np.hstack(time_blacklisted)):
med = 1.0j * np.nanmedian(to_med[np.hstack(time_blacklisted), :].imag, axis=1)
med += np.nanmedian(to_med[np.hstack(time_blacklisted), :].real, axis=1)
ax.plot(lsts[np.hstack(time_blacklisted)], np.angle(med), 'r.', label='Abscal: Blacklisted LSTs')
# plot smooth_cal
to_med = deepcopy(sc_gains[ant0] / sc_gains[ant1])
to_med[:, np.all(sc_flags[ant], axis=0)] = np.nan + 1.0j * np.nan
med = 1.0j * np.nanmedian(to_med.imag, axis=1) + np.nanmedian(to_med.real, axis=1)
ax.plot(lsts, np.angle(med), 'k.', ms=2, label='Smoothcal')
ax.set_ylim([-np.pi, np.pi])
ax.set_xlabel('LST (hours)')
ax.set_ylabel(f'Phase of g$_{ant0[0]}$ / g$_{ant1[0]}$')
ax.set_title(f'Median Non-Blacklisted or Flagged Gain Phase Spectrum of Ant {ant0[0]} / Ant {ant1[0]}: {pol[1:]}-polarized')
ax.legend()
# Now flagged plot abscal waterfall
for ax, pol in zip(axes[3], ['Jee', 'Jnn']):
ant0, ant1 = ants_to_save[pol]
extent=[hd.freqs[0]/1e6, hd.freqs[-1]/1e6, times[-1], times[0]]
im = ax.imshow(np.angle(ac_gains[ant0] / ac_gains[ant1]) / ~sc_flags[ant], aspect='auto', cmap='inferno',
interpolation='nearest', vmin=-np.pi, vmax=np.pi, extent=extent)
ax.imshow(blacklist, aspect='auto', cmap=my_cmap, interpolation=None, clim=[0.9, 1], alpha=.25, extent=extent)
ax.set_title(f'Flagged Abscal Gain Phase of Ant {ant0[0]} / Ant {ant1[0]}: {pol[1:]}-polarized')
ax.set_xlabel('Frequency (MHz)')
ax.set_ylabel('LST (Hours)')
ax.set_xlim([hd.freqs[0]/1e6, hd.freqs[-1]/1e6])
ax.set_yticklabels(np.around(lsts[[min(max(np.searchsorted(times, t), 0), len(times) - 1) for t in ax.get_yticks()]], 2))
plt.colorbar(im, ax=ax, orientation='horizontal', pad=.1)
plt.tight_layout()
```
divide by zero encountered in true_divide
FixedFormatter should only be used together with FixedLocator
All-NaN slice encountered
All-NaN slice encountered
All-NaN slice encountered
All-NaN slice encountered
divide by zero encountered in true_divide
invalid value encountered in true_divide
FixedFormatter should only be used together with FixedLocator

### Figure 4 Example Smoothing of Gain Phases
Smoothcal (top row) and Abscal (bottom row) gain phases for an example antenna. In the waterfalls, grayed out regions are "blacklisted," meaning they are not flagged but they are given zero weight when performing calibration smoothing. We also plot median non-blacklisted phases as a function of frequency (second row) and the median phases as a function of time (third row) for both abscal and smoothcal.
#### OBSERVER CHECKLIST:
* Check that the smoothcal solution matches the abscal solution reasonably well in the non-blacklisted regions.
* Check to see that the final gain solution is reasonably approximated by a single time-independent delay (linear phase ramp in row 2).
```python
fig, axes = plt.subplots(1, 2, figsize=(20,12))
for ax, rd, t in zip(axes, [avg_rel_diff_ee, avg_rel_diff_nn], ['ee-polarized', 'nn-polarized']):
extent=[hd.freqs[0]/1e6, hd.freqs[-1]/1e6, times[-1], times[0]]
im = ax.imshow(rd / ~sc_flags[ant0], aspect='auto', vmin=0, cmap='inferno', vmax=.2, interpolation='nearest', extent=extent)
ax.imshow(blacklist, aspect='auto',
cmap=my_cmap, interpolation=None, clim=[0.9, 1], alpha=.25, extent=extent)
ax.set_title('Relative Difference Between Smoothcal and Abscal: ' + t)
ax.set_xlabel('Frequency (MHz)')
ax.set_ylabel('LST (Hours)')
ax.set_yticklabels(np.around(lsts[[min(max(np.searchsorted(times, t), 0), len(times) - 1) for t in ax.get_yticks()]], 2))
plt.colorbar(im, ax=ax, label='$|g_{smooth} - g_{abs}| / |g_{abs}|$ (unitless)')
```
invalid value encountered in true_divide
FixedFormatter should only be used together with FixedLocator

### Figure 5: Relative difference between Abscal and Smoothcal
Where omnical calfits files store $\chi^2$ per antenna, smooth_cal calfits files store the relative difference between Abscal and Smoothcal gains. This difference is done before taking the absolute value, so this metric is sensitive both to phase errors and amplitude errors.
#### OBSERVER CHECKLIST:
* Look for regions of high relative difference that are not blacklisted. This would indicate a problem with smoothing.
# Metadata
```python
print(redcal.version.history_string())
```
------------
This file was produced by the function <module>() in <ipython-input-1-c6de44361328> using:
git_branch: master
git_description: v3.0-759-g3ecb3e9f
git_hash: 3ecb3e9faa161a3490bbc2f6ca2ea903d9a3bf68
git_origin: git@github.com:HERA-Team/hera_cal.git
version: 3.0
------------
```python
```
|
HERA-TeamREPO_NAMEH1C_IDR3_NotebooksPATH_START.@H1C_IDR3_Notebooks-main@smooth_cal_inspect@smooth_cal_inspect_2458192.ipynb@.PATH_END.py
|
{
"filename": "starpositions_interface.py",
"repo_name": "Varnani/pywd2015-qt5",
"repo_path": "pywd2015-qt5_extracted/pywd2015-qt5-master/src/interfaces/starpositions_interface.py",
"type": "Python"
}
|
from PyQt5 import QtWidgets, QtGui, QtCore
from gui import starpositions_widget
from src.helpers.matplotlib_embedder import MatplotlibWidget
from src import constants
from src.helpers.wd_utils import wd_io
from src.helpers import methods
import time
from matplotlib import pyplot
import io
import os
class Widget(QtWidgets.QWidget, starpositions_widget.Ui_StarPositionWidget):
def __init__(self, parent):
super(Widget, self).__init__()
self.setupUi(self)
self.setWindowIcon(QtGui.QIcon(constants.MAIN_ICON_PATH))
self.main_window = parent
self.chart = MatplotlibWidget(self.plot_widget, 1, 1, exportable=False)
self.chart.create_axis(0, 0, labels=("X", "Y"))
self.chart.axes[0].axis("equal")
self.start_btn.setIcon(QtGui.QIcon(constants.PLAY_ICON_PATH))
self.pause_btn.setIcon(QtGui.QIcon(constants.PAUSE_ICON_PATH))
self.next_btn.setIcon(QtGui.QIcon(constants.NEXT_ICON_PATH))
self.prev_btn.setIcon(QtGui.QIcon(constants.PREV_ICON_PATH))
# self.viewport_pixlabel.setScaledContents(True)
# animation variables
self.frames_per_second = 25.0 # 25 frames per second
self.frame_time = 1.0 / self.frames_per_second # 0.04ms per frame (25fps)
self.playback = False
self.Stopwatch = Stopwatch()
self.Stopwatch.wait = self.frame_time
self.star_position_data = None
self.rendered_frames = None
self.current_frame = None
self.single_frame = None
self.connect_signals()
def connect_signals(self):
self.roche_groupbox.toggled.connect(self.handle_toggle_roche)
self.positions_groupbox.toggled.connect(self.handle_toggle_position)
self.plot_btn.clicked.connect(self.plot)
# animator
self.horizontalSlider.sliderMoved.connect(self.slider_move)
self.horizontalSlider.sliderPressed.connect(self.slider_move)
self.start_btn.clicked.connect(self.start_playback)
self.pause_btn.clicked.connect(self.stop_playback)
self.next_btn.clicked.connect(self.next_frame)
self.prev_btn.clicked.connect(self.previous_frame)
self.Stopwatch.tick.connect(self.advance_frame)
self.render_btn.clicked.connect(self.start_render)
self.saveframe_btn.clicked.connect(self.save_frame)
self.saveall_btn.clicked.connect(self.save_all_frames)
def resizeEvent(self, resize_event):
self.draw_current_frame()
super(Widget, self).resizeEvent(resize_event)
def handle_toggle_roche(self):
self.roche_groupbox.blockSignals(True)
self.positions_groupbox.blockSignals(True)
if self.roche_groupbox.isChecked():
self.positions_groupbox.setChecked(False)
elif self.positions_groupbox.isChecked() is False:
self.roche_groupbox.setChecked(True)
self.roche_groupbox.blockSignals(False)
self.positions_groupbox.blockSignals(False)
def handle_toggle_position(self):
self.roche_groupbox.blockSignals(True)
self.positions_groupbox.blockSignals(True)
if self.positions_groupbox.isChecked():
self.roche_groupbox.setChecked(False)
elif self.roche_groupbox.isChecked() is False:
self.positions_groupbox.setChecked(True)
self.roche_groupbox.blockSignals(False)
self.positions_groupbox.blockSignals(False)
def plot(self):
if self.positions_groupbox.isChecked():
self.plot_positions()
elif self.roche_groupbox.isChecked():
self.plot_roche()
def plot_positions(self):
self.chart.set_axis_title("Star Positions")
lc_params = self.main_window.get_lc_params()
lc_params["jdphs"] = 2
lc_params["phstrt"] = self.phase_spinbox.value()
lc_params["phstop"] = self.phase_spinbox.value()
lc_params["phin"] = 0.1
lc_io = wd_io.LCIO(lc_params,
wd_path=self.main_window.lc_path,
lc_binary_name=self.main_window.lc_binary)
results = lc_io.fill_for_star_positions().save().run().read_star_positions()[0]
self.chart.plot(results[0], results[1], linestyle="", marker="+", markersize=1, color="black")
self.chart.plot([0], [0], clear=False, linestyle="", marker="+", markersize=10, color=constants.COLOR_RED)
def plot_roche(self):
# compute_roche_potentials(w, e, q, phase, phase_shift, plot_elements=None):
self.chart.clear_all()
w = self.main_window.perr0_ipt.value()
e = self.main_window.e_ipt.value()
q = self.main_window.q_ipt.value()
phase = self.main_window.p0_ipt.value()
pshift = self.main_window.pshift_ipt.value()
pot1 = self.main_window.pot1_ipt.value()
pot2 = self.main_window.pot2_ipt.value()
inner_critical, outer_critical = methods.compute_roche_potentials(w, e, q, phase, pshift,
plot_elements=[self.chart.axes[0], pot1, pot2])
self.chart.set_labels("X", "Y")
self.chart.set_axis_title("Roche Potentials")
self.chart.redraw()
self.inner_crit_otpt.setValue(inner_critical)
self.outer_crit_otpt.setValue(outer_critical)
def setup_slider(self):
self.horizontalSlider.setMinimum(0)
self.horizontalSlider.setMaximum(len(self.rendered_frames) - 1)
self.horizontalSlider.setTickInterval(1)
def slider_move(self):
self.stop_playback()
if self.rendered_frames is not None:
self.show_frame(self.rendered_frames[self.horizontalSlider.value()])
def start_playback(self):
if self.rendered_frames is not None:
self.Stopwatch.start()
def stop_playback(self):
if self.rendered_frames is not None:
self.Stopwatch.stop()
def next_frame(self):
self.stop_playback()
self.advance_frame()
def advance_frame(self):
if self.rendered_frames is not None:
index = self.horizontalSlider.value() + 1
if index == len(self.rendered_frames):
index = 0
self.horizontalSlider.setValue(index)
self.show_frame(self.rendered_frames[index])
def previous_frame(self):
self.stop_playback()
if self.rendered_frames is not None:
index = self.horizontalSlider.value() - 1
if index == -1:
index = len(self.rendered_frames) - 1
self.horizontalSlider.setValue(index)
self.show_frame(self.rendered_frames[index])
def clear_animator(self):
self.viewport_pixlabel.clear()
self.rendered_frames = None
self.star_position_data = None
def start_render(self):
self.clear_animator()
if self.single_chk.isChecked() is not True:
#if self.render_stars() is not 1:
if self.render_stars() != 1:
self.setup_slider()
else:
self.render_single()
def update_message_label(self, msg):
self.message_label.setText(msg)
self.message_label.repaint()
def update_progress_bar(self, value):
self.progressBar.setValue(value)
self.progressBar.repaint()
def render_stars(self):
increment = None
fmt = None
iterations = None
if self.main_window.jd_radiobtn.isChecked():
mn = self.main_window.lc_jd_start_ipt.value()
mx = self.main_window.lc_jd_end_ipt.value()
increment = self.main_window.lc_jd_incr_ipt.value()
fmt = "{:7.6f}"
iterations = int((mx - mn) / increment)
elif self.main_window.phase_radiobtn.isChecked():
mn = self.main_window.lc_phs_start_ipt.value()
mx = self.main_window.lc_phs_stop_ipt.value()
increment = self.main_window.lc_phs_incr_ipt.value()
fmt = "{:4.3f}"
iterations = int((mx - mn) / increment)
if iterations > 500:
msg = QtWidgets.QMessageBox()
msg.setIcon(msg.Warning)
msg.setText("Expected iteration count is larger than 500. (" + str(iterations) + ")")
msg.setInformativeText("This might result in a very large lcout file (>100MB), "
"take a long time and might crash LC altogether. "
"Are you sure you want to render the animation?")
msg.setStandardButtons(msg.Ok | msg.Cancel)
if msg.exec_() == msg.Cancel:
return 1
self.update_message_label("Running LC...")
lc_params = self.main_window.get_lc_params()
lc_io = wd_io.LCIO(lc_params,
wd_path=self.main_window.lc_path,
lc_binary_name=self.main_window.lc_binary)
results = lc_io.fill_for_star_positions().save().run().read_star_positions()
self.rendered_frames = []
self.update_message_label("Rendering plots...")
progress_increment = 100.0 / float(len(results))
current_progress = 0.0
for idx, result in enumerate(results):
qpixmap = self.render_frame(result[0], result[1], increment * idx, fmt=fmt)
self.rendered_frames.append(qpixmap)
current_progress = current_progress + progress_increment
self.update_progress_bar(current_progress)
self.show_frame(self.rendered_frames[0])
self.update_message_label("Done.")
self.update_progress_bar(100)
self.single_frame = False
def render_single(self):
lc_params = self.main_window.get_lc_params()
lc_params["jdphs"] = 2
lc_params["phstrt"] = self.render_phaseSpinbox.value()
lc_params["phstop"] = self.render_phaseSpinbox.value()
lc_params["phin"] = 0.1
lc_io = wd_io.LCIO(lc_params,
wd_path=self.main_window.lc_path,
lc_binary_name=self.main_window.lc_binary)
results = lc_io.fill_for_star_positions().save().run().read_star_positions()[0]
qpixmap = self.render_frame(results[0], results[1], self.render_phaseSpinbox.value())
self.show_frame(qpixmap)
self.single_frame = True
def render_frame(self, x, y, t, fmt="{:4.3f}"):
pyplot.cla()
pyplot.axis("equal")
pyplot.xlabel("X")
pyplot.ylabel("Y")
pyplot.xlim(self.min_spinbox.value(), self.max_spinbox.value())
pyplot.ylim(self.min_spinbox.value(), self.max_spinbox.value())
pyplot.plot(x, y, 'ko', markersize=0.2, label=fmt.format(t))
pyplot.legend(loc="upper right")
pyplot.plot([0], [0], linestyle="", marker="+", markersize=10, color="#ff3a3a")
image = io.BytesIO()
dpi_dict = {
"64dpi": 64,
"128dpi": 128,
"196dpi": 196,
"256dpi": 256
}
pyplot.savefig(image, dpi=dpi_dict[str(self.dpi_combobox.currentText())], format="png")
image.seek(0)
qbyte = QtCore.QByteArray(image.getvalue())
qpixmap = QtGui.QPixmap()
qpixmap.loadFromData(qbyte, "png")
return qpixmap
def show_frame(self, qpixmap):
self.current_frame = qpixmap
self.draw_current_frame()
def draw_current_frame(self):
if self.current_frame is not None:
w = self.viewport_pixlabel.width()
h = self.viewport_pixlabel.height()
# 1 means keep aspect ratio, it is a Qt enum (Qt::KeepAspectRatio)
self.viewport_pixlabel.setPixmap(QtGui.QPixmap(self.current_frame).scaled(w, h, 1))
def save_frame(self):
frame = None
if self.single_frame:
frame = self.current_frame
elif self.rendered_frames is not None:
frame = self.rendered_frames[self.horizontalSlider.value()]
if frame is not None:
dialog = QtWidgets.QFileDialog()
dialog.setDefaultSuffix("png")
dialog.setNameFilter("PNG File (*png)")
dialog.setAcceptMode(1)
return_code = dialog.exec_()
file_path = str((dialog.selectedFiles())[0])
if file_path != "" and return_code != 0:
frame.save(file_path, "png", 100)
msg = QtWidgets.QMessageBox()
msg.setText("Frame is saved into " + file_path)
msg.exec_()
def save_all_frames(self):
if self.rendered_frames is not None:
dialog = QtWidgets.QFileDialog()
dialog.setFileMode(2)
return_code = dialog.exec_()
file_path = str((dialog.selectedFiles())[0])
if file_path != "" and return_code != 0:
for idx, qpixmap in enumerate(self.rendered_frames):
qpixmap.save(os.path.join(file_path, "{:0>4d}".format(idx) + ".png"), "png", 100)
msg = QtWidgets.QMessageBox()
msg.setText("Frames are saved into " + file_path)
msg.exec_()
class Stopwatch(QtCore.QThread):
tick = QtCore.pyqtSignal(name="tick")
def __init__(self):
QtCore.QThread.__init__(self)
self.wait = None
self.running = True
def run(self):
self.running = True
while self.running:
time.sleep(self.wait)
self.tick.emit()
def stop(self):
self.running = False
|
VarnaniREPO_NAMEpywd2015-qt5PATH_START.@pywd2015-qt5_extracted@pywd2015-qt5-master@src@interfaces@starpositions_interface.py@.PATH_END.py
|
{
"filename": "efficientnet_test.py",
"repo_name": "keras-team/keras-tuner",
"repo_path": "keras-tuner_extracted/keras-tuner-master/keras_tuner/applications/efficientnet_test.py",
"type": "Python"
}
|
# Copyright 2019 The KerasTuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for HyperEfficientNet Model."""
import numpy as np
import pytest
from keras_tuner.applications import efficientnet
from keras_tuner.backend import config
from keras_tuner.backend import keras
from keras_tuner.engine import hypermodel as hm_module
from keras_tuner.engine import hyperparameters as hp_module
if config.backend() == "torch":
keras.backend.set_image_data_format("channels_first")
else:
keras.backend.set_image_data_format("channels_last")
if keras.backend.image_data_format() == "channels_last":
INPUT_SHAPE_32 = (32, 32, 3)
INPUT_SHAPE_224 = (224, 224, 3)
INPUT_SHAPE_256 = (256, 256, 3)
else:
INPUT_SHAPE_32 = (3, 32, 32)
INPUT_SHAPE_224 = (3, 224, 224)
INPUT_SHAPE_256 = (3, 256, 256)
@pytest.mark.parametrize("version", ["B0", "B1"])
@pytest.mark.skipif(
config.multi_backend(),
reason="The test is too slow.",
)
def test_model_construction(version):
hp = hp_module.HyperParameters()
hp.Choice("version", [version])
hypermodel = efficientnet.HyperEfficientNet(
input_shape=INPUT_SHAPE_32, classes=10
)
model = hypermodel.build(hp)
assert hp.values["version"] == version
assert model.layers
assert model.name == "EfficientNet"
assert model.output_shape == (None, 10)
model.train_on_batch(np.ones((1,) + INPUT_SHAPE_32), np.ones((1, 10)))
out = model.predict(np.ones((1,) + INPUT_SHAPE_32))
assert out.shape == (1, 10)
def test_hyperparameter_existence_and_defaults():
hp = hp_module.HyperParameters()
hypermodel = efficientnet.HyperEfficientNet(
input_shape=INPUT_SHAPE_224, classes=10
)
hypermodel.build(hp)
assert hp.get("version") == "B0"
assert hp.get("top_dropout_rate") == 0.2
assert hp.get("learning_rate") == 0.01
assert hp.get("pooling") == "avg"
def test_hyperparameter_override():
hp = hp_module.HyperParameters()
hp.Choice("version", ["B1"])
hp.Fixed("top_dropout_rate", 0.5)
hypermodel = efficientnet.HyperEfficientNet(
input_shape=INPUT_SHAPE_256, classes=10
)
hypermodel.build(hp)
assert hp.get("version") == "B1"
assert hp.get("top_dropout_rate") == 0.5
def test_input_tensor():
hp = hp_module.HyperParameters()
inputs = keras.Input(shape=INPUT_SHAPE_256)
hypermodel = efficientnet.HyperEfficientNet(input_tensor=inputs, classes=10)
model = hypermodel.build(hp)
assert model.inputs == [inputs]
def test_override_compiling_phase():
class MyHyperEfficientNet(efficientnet.HyperEfficientNet):
def _compile(self, model, hp):
learning_rate = 0.1
optimizer_name = hp.Choice(
"optimizer", ["adam", "sgd"], default="adam"
)
if optimizer_name == "sgd":
optimizer = keras.optimizers.SGD(
momentum=0.1, learning_rate=learning_rate
)
elif optimizer_name == "adam":
optimizer = keras.optimizers.Adam(learning_rate=learning_rate)
model.compile(
optimizer=optimizer,
loss="categorical_crossentropy",
metrics=["accuracy"],
)
hp = hp_module.HyperParameters()
hypermodel = MyHyperEfficientNet(input_shape=INPUT_SHAPE_32, classes=5)
hypermodel.build(hp)
assert "learning_rate" not in hp.values
assert hp.values["optimizer"] == "adam"
def test_augmentation_param_invalid_input():
with pytest.raises(ValueError):
efficientnet.HyperEfficientNet(
input_shape=INPUT_SHAPE_32, classes=10, augmentation_model=0
)
def test_augmentation_param_fixed_model():
hp = hp_module.HyperParameters()
aug_model = keras.Sequential([keras.layers.RandomRotation(1.0)], name="aug")
hypermodel = efficientnet.HyperEfficientNet(
input_shape=INPUT_SHAPE_32, classes=10, augmentation_model=aug_model
)
model = hypermodel.build(hp)
assert model.layers[1].name == "aug"
def test_augmentation_param_hyper_model():
class HyperAug(hm_module.HyperModel):
def build(self, hp):
model = keras.Sequential(name="aug")
scaling_factor = hp.Choice("scaling_factor", [1])
model.add(keras.layers.Lambda(lambda x: x * scaling_factor))
return model
hp = hp_module.HyperParameters()
aug_hm = HyperAug()
hypermodel = efficientnet.HyperEfficientNet(
input_shape=INPUT_SHAPE_32, classes=10, augmentation_model=aug_hm
)
model = hypermodel.build(hp)
assert model.layers[1].name == "aug"
assert hp.values["scaling_factor"] == 1
def test_pooling_is_max():
hp = hp_module.HyperParameters()
hp.values["pooling"] = "max"
hypermodel = efficientnet.HyperEfficientNet(
input_shape=INPUT_SHAPE_32, classes=10
)
hypermodel.build(hp)
def test_no_classes_raise_error():
with pytest.raises(ValueError, match="classes"):
efficientnet.HyperEfficientNet(input_shape=INPUT_SHAPE_32)
def test_no_input_shape_tensor_raise_error():
with pytest.raises(ValueError, match="input_tensor"):
efficientnet.HyperEfficientNet(classes=10)
|
keras-teamREPO_NAMEkeras-tunerPATH_START.@keras-tuner_extracted@keras-tuner-master@keras_tuner@applications@efficientnet_test.py@.PATH_END.py
|
{
"filename": "py_startup_test.py",
"repo_name": "perwin/imfit",
"repo_path": "imfit_extracted/imfit-master/python/py_startup_test.py",
"type": "Python"
}
|
#!/usr/bin/env python3
#
# Tests to see whether numpy and pyfits are present (returns 1 if they are,
# 0 if at least one is not accessible)
numpyPresent = False
astropyPresent = False
pyfitsPresent = False
# check for numpy
try:
import numpy
except ImportError:
numpyPresent = False
else:
numpyPresent = True
# check for FITS-reading modules
try:
import astropy.io.fits
except ImportError:
astropyPresent = False
else:
astropyPresent = True
if not astropyPresent:
try:
import pyfits
except ImportError:
pyfitsPresent = False
else:
pyfitsPresent = True
def main():
# output to be read by shell script calling this program:
# 1 = necessary libraries are present
# 0 = one or more necessary libraries are *not* present
if (numpyPresent and (astropyPresent or pyfitsPresent)):
print(1)
else:
print(0)
if __name__ == '__main__':
main()
|
perwinREPO_NAMEimfitPATH_START.@imfit_extracted@imfit-master@python@py_startup_test.py@.PATH_END.py
|
{
"filename": "memory_optimizer_test.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/grappler/memory_optimizer_test.py",
"type": "Python"
}
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the swig wrapper tf_optimizer."""
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.grappler import tf_optimizer
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variable_v1
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import training as train
class MemoryOptimizerSwapTest(test.TestCase):
"""Tests the Grappler memory optimizer."""
@test_util.run_deprecated_v1
def testNoSwapping(self):
"""Make sure the graph is preserved when there is nothing to swap."""
a = variable_v1.VariableV1(10, name='a')
b = variable_v1.VariableV1(20, name='b')
c = math_ops.add_n([a, b], name='c')
d = math_ops.add_n([b, c], name='d')
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(d)
mg = meta_graph.create_meta_graph_def(graph=ops.get_default_graph())
graph_size = len(mg.graph_def.node)
nodes = [node.name for node in mg.graph_def.node]
config = config_pb2.ConfigProto()
config.graph_options.rewrite_options.CopyFrom(
rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
constant_folding=rewriter_config_pb2.RewriterConfig.OFF,
dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF,
memory_optimization=rewriter_config_pb2.RewriterConfig.MANUAL))
graph = tf_optimizer.OptimizeGraph(config, mg)
self.assertEqual(len(graph.node), graph_size)
self.assertItemsEqual([node.name for node in graph.node], nodes)
@test_util.run_v1_only('b/120545219')
def testSimpleSwap(self):
"""Check that the swap annotations are followed."""
with ops.device('/gpu:0'):
a = variable_v1.VariableV1(10, name='a')
b = variable_v1.VariableV1(20, name='b')
c = math_ops.add_n([a, b], name='c')
d = math_ops.add_n([b, c], name='d')
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(d)
d.op._set_attr('_swap_to_host', attr_value_pb2.AttrValue(i=0))
mg = meta_graph.create_meta_graph_def(graph=ops.get_default_graph())
graph_size = len(mg.graph_def.node)
config = config_pb2.ConfigProto()
config.graph_options.rewrite_options.CopyFrom(
rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
meta_optimizer_iterations=rewriter_config_pb2.RewriterConfig.ONE,
constant_folding=rewriter_config_pb2.RewriterConfig.OFF,
memory_optimization=rewriter_config_pb2.RewriterConfig.MANUAL,
min_graph_nodes=-1))
graph = tf_optimizer.OptimizeGraph(config, mg)
self.assertEqual(len(graph.node), graph_size + 2)
self.assertTrue(
set(node.name for node in graph.node) > set(
['a', 'b', 'c', 'd', 'swap_in_d_0', 'swap_out_d_0']))
for node in graph.node:
if node.name == 'swap_in_d_0':
self.assertEqual('swap_out_d_0', node.input[0])
self.assertEqual('^b/read', node.input[1])
elif node.name == 'swap_out_d_0':
self.assertEqual('b/read', node.input[0])
elif node.name == 'd':
self.assertEqual('swap_in_d_0', node.input[0])
self.assertEqual('c', node.input[1])
class MemoryOptimizerRecomputeTest(test.TestCase):
"""Tests the Python interface to recomputation rewrites.
See core/grappler/optimizers/memory_optimizer_test.cc for functional tests.
"""
def _GetMetaGraph(self, batch_size=14, image_dim=12, optimizer_scope_name=''):
"""A simple layered graph with conv, an intermediate op, and a ReLU."""
graph = ops.Graph()
with graph.as_default():
random_seed.set_random_seed(1)
current_activation = variable_scope.get_variable(
name='start', shape=[batch_size, image_dim, image_dim, 5])
conv_filter = variable_scope.get_variable(
name='filter', shape=[5, 5, 5, 5])
for layer_number in range(10):
with variable_scope.variable_scope('layer_{}'.format(layer_number)):
after_conv = nn.conv2d(current_activation, conv_filter, [1, 1, 1, 1],
'SAME')
current_activation = 2. * after_conv
current_activation = nn.relu(current_activation)
loss = math_ops.reduce_mean(current_activation)
with ops.name_scope(optimizer_scope_name):
optimizer = train.AdamOptimizer(0.001)
train_op = optimizer.minimize(loss)
init_op = variables.global_variables_initializer()
metagraph = train.export_meta_graph()
return (metagraph, init_op.name, train_op.name, loss.name)
def testRewritingDefaultGradientNames(self):
"""Tests that rewriting occurs with default gradient names."""
(original_metagraph, _, _, _) = self._GetMetaGraph()
config = config_pb2.ConfigProto()
config.graph_options.rewrite_options.CopyFrom(
rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
constant_folding=rewriter_config_pb2.RewriterConfig.OFF,
dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF,
layout_optimizer=rewriter_config_pb2.RewriterConfig.OFF,
arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF,
min_graph_nodes=-1,
memory_optimization=(
rewriter_config_pb2.RewriterConfig.RECOMPUTATION_HEURISTICS)))
rewritten_graph_def = tf_optimizer.OptimizeGraph(config, original_metagraph)
self.assertGreater(
len(rewritten_graph_def.node),
len(original_metagraph.graph_def.node))
self.assertEqual(
0,
len([node for node in original_metagraph.graph_def.node
if 'Recomputed/' in node.name]))
self.assertEqual(
20, # Two per layer
len([node for node in rewritten_graph_def.node
if 'Recomputed/' in node.name]))
def testRewritingNameScopedGradientNames(self):
"""Tests that rewriting occurs with non-standard gradient names."""
(original_metagraph, _, _, _) = self._GetMetaGraph(
optimizer_scope_name='optimizer')
config = config_pb2.ConfigProto()
config.graph_options.rewrite_options.CopyFrom(
rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
constant_folding=rewriter_config_pb2.RewriterConfig.OFF,
dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF,
layout_optimizer=rewriter_config_pb2.RewriterConfig.OFF,
arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF,
min_graph_nodes=-1,
memory_optimization=rewriter_config_pb2.RewriterConfig
.RECOMPUTATION_HEURISTICS,
# Checks that name scope "gradients/" also match sub-scope.
memory_optimizer_target_node_name_scope='gradients/'))
rewritten_graph_def = tf_optimizer.OptimizeGraph(config, original_metagraph)
self.assertGreater(
len(rewritten_graph_def.node),
len(original_metagraph.graph_def.node))
self.assertEqual(
0,
len([node for node in original_metagraph.graph_def.node
if 'Recomputed/' in node.name]))
self.assertEqual(
20, # Two per layer
len([node for node in rewritten_graph_def.node
if 'Recomputed/' in node.name]))
def testRewritingNameScopedGradientNamesScope(self):
"""Tests that rewriting occurs with non-standard gradient names."""
(original_metagraph, _, _,
_) = self._GetMetaGraph(optimizer_scope_name='foo/bar')
config = config_pb2.ConfigProto()
config.graph_options.rewrite_options.CopyFrom(
rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
constant_folding=rewriter_config_pb2.RewriterConfig.OFF,
dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF,
layout_optimizer=rewriter_config_pb2.RewriterConfig.OFF,
arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF,
memory_optimization=rewriter_config_pb2.RewriterConfig
.RECOMPUTATION_HEURISTICS,
# This should not match anything.
memory_optimizer_target_node_name_scope='r/gradients/'))
rewritten_graph_def = tf_optimizer.OptimizeGraph(config, original_metagraph)
self.assertEqual(
len(rewritten_graph_def.node), len(original_metagraph.graph_def.node))
self.assertEqual(0,
len([
node for node in original_metagraph.graph_def.node
if 'Recomputed/' in node.name
]))
self.assertEqual(0,
len([
node for node in rewritten_graph_def.node
if 'Recomputed/' in node.name
]))
def _GetMemoryOptimizerSessionConfig(self):
rewrite_options = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
memory_optimization=rewriter_config_pb2.RewriterConfig.HEURISTICS)
graph_options = config_pb2.GraphOptions(rewrite_options=rewrite_options)
return config_pb2.ConfigProto(graph_options=graph_options)
def _RunMetaGraphWithConfig(
self, config, metagraph, init_op_name, train_op_name, loss_op_name):
graph = ops.Graph()
with graph.as_default():
train.import_meta_graph(metagraph)
init_op = graph.get_operation_by_name(init_op_name)
train_op = graph.get_operation_by_name(train_op_name)
loss_op = graph.get_tensor_by_name(loss_op_name)
with session.Session(config=config, graph=graph) as sess:
self.evaluate(init_op)
self.evaluate(train_op)
self.evaluate(train_op)
return self.evaluate(loss_op)
def testRecomputationRewritingNoErrors(self):
"""Tests that graph output is not significantly different with rewriting."""
(original_metagraph, init_op_name, train_op_name, loss_op_name
) = self._GetMetaGraph()
original_loss = self._RunMetaGraphWithConfig(
config=config_pb2.ConfigProto(),
metagraph=original_metagraph,
init_op_name=init_op_name,
train_op_name=train_op_name,
loss_op_name=loss_op_name)
memory_optimized_loss = self._RunMetaGraphWithConfig(
config=self._GetMemoryOptimizerSessionConfig(),
metagraph=original_metagraph,
init_op_name=init_op_name,
train_op_name=train_op_name,
loss_op_name=loss_op_name)
self.assertAllClose(original_loss, memory_optimized_loss, rtol=1e-2)
def _annotated_graph(self):
graph = ops.Graph()
with graph.as_default():
random_seed.set_random_seed(2)
current_activation = variable_scope.get_variable(
name='start', shape=[1, 2, 2, 5])
conv_filter = variable_scope.get_variable(
name='filter', shape=[5, 5, 5, 5])
for layer_number in range(3):
with variable_scope.variable_scope('layer_{}'.format(layer_number)):
after_conv = nn.conv2d(current_activation, conv_filter, [1, 1, 1, 1],
'SAME')
current_activation = 2. * after_conv
current_activation.op._set_attr(
'_recompute_hint',
# The value of the attribute does not matter; just that the key
# exists in the op's attributes.
attr_value_pb2.AttrValue(i=1))
current_activation += 5.
current_activation.op._set_attr(
'_recompute_hint', attr_value_pb2.AttrValue(i=0))
current_activation = nn.relu(current_activation)
current_activation.op._set_attr(
'_recompute_hint', attr_value_pb2.AttrValue(i=1))
loss = math_ops.reduce_mean(current_activation)
optimizer = train.AdamOptimizer(0.001)
train_op = optimizer.minimize(loss)
init_op = variables.global_variables_initializer()
return graph, init_op, train_op
def testHintNoMetaGraph(self):
# Closer to expected usage, but does not check that a re-write actually
# happens; see testHintDoesRewrite.
graph, init_op, train_op = self._annotated_graph()
with graph.as_default():
manual_memory_config = rewriter_config_pb2.RewriterConfig(
memory_optimization=rewriter_config_pb2.RewriterConfig.MANUAL)
graph_options = config_pb2.GraphOptions(
rewrite_options=manual_memory_config)
session_config = config_pb2.ConfigProto(graph_options=graph_options)
with session.Session(config=session_config) as sess:
self.evaluate(init_op)
self.evaluate(train_op)
@test_util.run_v1_only('b/120545219')
def testHintDoesRewrite(self):
graph = self._annotated_graph()[0]
with graph.as_default():
metagraph = train.export_meta_graph()
self.assertEqual(
0,
len([node for node in metagraph.graph_def.node
if 'Recomputed/' in node.name]))
config = config_pb2.ConfigProto()
config.graph_options.rewrite_options.CopyFrom(
rewriter_config_pb2.RewriterConfig(
min_graph_nodes=-1,
memory_optimization=rewriter_config_pb2.RewriterConfig.MANUAL))
rewritten_graph_def = tf_optimizer.OptimizeGraph(config, metagraph)
self.assertEqual(
9,
len([
node for node in rewritten_graph_def.node
if 'Recomputed/' in node.name
]))
if __name__ == '__main__':
test.main()
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@grappler@memory_optimizer_test.py@.PATH_END.py
|
{
"filename": "load_test.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/saved_model/load_test.py",
"type": "Python"
}
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for trackable object SavedModel loading."""
import collections
import contextlib
import functools
import gc
import io
import os
import pathlib
import sys
import tempfile
import unittest
import weakref
from absl.testing import parameterized
import numpy as np
# Import for py bindings to runtime
from tensorflow.python.checkpoint import checkpoint
from tensorflow.python.checkpoint import saveable_compat
from tensorflow.python.client import session as session_lib
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.eager import wrap_function
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function as framework_function
from tensorflow.python.framework import op_callbacks
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.lib.io import file_io
from tensorflow.python.lib.io import tf_record
from tensorflow.python.module import module
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import cond_v2
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops import while_loop
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.saved_model import load
from tensorflow.python.saved_model import load_options
from tensorflow.python.saved_model import loader_impl
from tensorflow.python.saved_model import save
from tensorflow.python.saved_model import save_options
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.trackable import asset
from tensorflow.python.trackable import autotrackable
from tensorflow.python.trackable import resource
from tensorflow.python.training import monitored_session
from tensorflow.python.types import core as types_core
from tensorflow.python.util import tf_inspect
def cycle(
obj,
cycles,
signatures=None,
save_option=None,
load_option=None,
use_cpp_bindings=False,
):
to_save = obj
# TODO(vbardiovsky): It would be nice if exported protos reached a fixed
# point w.r.t. saving/restoring, ideally after 2nd saving.
for _ in range(cycles):
path = tempfile.mkdtemp(prefix=test.get_temp_dir())
# If available, we'll run the save and restore preferring the GPU. This
# just makes sure we aren't throwing errors and have enough
# device("CPU") blocks to satisfy the placer.
with test_util.use_gpu():
save.save(to_save, path, signatures, options=save_option)
loaded = test_load(
path, options=load_option, use_cpp_bindings=use_cpp_bindings
)
signatures = loaded.signatures
to_save = loaded
return loaded
def _test_load_base(path, tags=None, options=None,
use_cpp_bindings=False): # pylint: disable=unused-argument
return load.load(path, tags=tags, options=options)
def _test_load_internal(path, tags=None, options=None, use_cpp_bindings=False):
if use_cpp_bindings:
runtime = runtime_pybind.Runtime()
return runtime.Import(path)
return _test_load_base(path, tags=tags, options=options,
use_cpp_bindings=use_cpp_bindings)
# replaced by copy.bara.sky
run_external = True
def test_load(path, **kwargs):
if not run_external:
return _test_load_internal(path, **kwargs)
return _test_load_base(path, **kwargs)
def _load_test_params():
params = [
dict(testcase_name="ReloadOncePy", cycles=1, use_cpp_bindings=False),
dict(testcase_name="ReloadTwicePy", cycles=2, use_cpp_bindings=False),
dict(testcase_name="ReloadThricePy", cycles=3, use_cpp_bindings=False),
]
if not run_external:
params.append(dict(testcase_name="ReloadOnceCpp", cycles=1,
use_cpp_bindings=True))
return params
def _test_params():
params = [dict(testcase_name="LoadWithPython", use_cpp_bindings=False)]
if not run_external:
params.append(dict(testcase_name="LoadWithCpp", use_cpp_bindings=True))
return params
@parameterized.named_parameters(*_load_test_params())
class LoadTest(test.TestCase, parameterized.TestCase):
def test_structure_import(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = autotrackable.AutoTrackable()
root.dep_one = autotrackable.AutoTrackable()
root.dep_two = autotrackable.AutoTrackable()
root.dep_two.dep = autotrackable.AutoTrackable()
root.dep_three = root.dep_two.dep
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertIs(imported.dep_three, imported.dep_two.dep)
self.assertIsNot(imported.dep_one, imported.dep_two)
@test_util.run_in_graph_and_eager_modes
def test_variables(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = autotrackable.AutoTrackable()
root.v1 = variables.Variable(1.0, trainable=True)
root.v2 = variables.Variable(2.0, trainable=False)
self.evaluate([root.v1.initializer, root.v2.initializer])
for _ in range(cycles):
imported = cycle(root, 1, use_cpp_bindings=use_cpp_bindings)
self.evaluate([imported.v1.initializer, imported.v2.initializer])
if not context.executing_eagerly():
self.assertIsInstance(imported.v1.initializer, ops.Operation)
self.assertIsInstance(imported.v2.initializer, ops.Operation)
self.assertEqual(self.evaluate(imported.v1), 1.0)
self.assertTrue(imported.v1.trainable)
self.assertEqual(self.evaluate(imported.v2), 2.0)
self.assertFalse(imported.v2.trainable)
def test_variables_name(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = autotrackable.AutoTrackable()
# Test 2 variables with same name: should work as the checkpoint
# is based on object name and not on variable name.
root.v1 = variables.Variable(1.0, trainable=True, name="v1")
root.v2 = variables.Variable(2.0, trainable=False, name="v1")
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(imported.v1.numpy(), 1.0)
self.assertEqual(imported.v2.numpy(), 2.0)
self.assertEqual(imported.v1.name, root.v1.name)
self.assertEqual(imported.v2.name, root.v2.name)
with variable_scope.variable_scope("foo"):
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertTrue(imported.v1.name.startswith("foo/"))
self.assertTrue(imported.v2.name.startswith("foo/"))
@test_util.disable_xla("This test never passed for XLA")
def test_partially_defined_variable_shape(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
class MakeVariable(module.Module):
def __init__(self):
self.v = None
@def_function.function(
input_signature=[tensor_spec.TensorSpec([None], dtypes.int64)]
)
def make_variable(self, initial_value):
if self.v is None:
self.v = variables.Variable(initial_value)
m = MakeVariable()
m.make_variable([1, 2, 3])
m = cycle(m, cycles, use_cpp_bindings=use_cpp_bindings)
m.v.assign([1, 2, 3, 4])
self.assertEqual([None], tensor_shape.as_shape(m.v.shape).as_list())
@test_util.run_in_graph_and_eager_modes
def test_capture_variables(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = autotrackable.AutoTrackable()
root.weights = variables.Variable(2.0)
self.evaluate(root.weights.initializer)
root.f = def_function.function(
lambda x: root.weights * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)],
)
for _ in range(cycles):
imported = cycle(root, 1, use_cpp_bindings=use_cpp_bindings)
self.evaluate(imported.weights.initializer)
self.assertEqual(4.0, self.evaluate(imported.f(constant_op.constant(2.0))))
self.evaluate(imported.weights.assign(4.0))
self.assertEqual(8.0, self.evaluate(imported.f(constant_op.constant(2.0))))
@test_util.run_in_graph_and_eager_modes
def test_capture_constant(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = autotrackable.AutoTrackable()
captured_constant = constant_op.constant(2.0)
root.f = def_function.function(
lambda x: captured_constant * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)],
)
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(4.0, self.evaluate(imported.f(constant_op.constant(2.0))))
def test_control_outputs(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
exported = autotrackable.AutoTrackable()
exported.v = variables.Variable(1.0)
exported.f = def_function.function(
lambda: exported.v.assign(2.0, name="should_be_control_output")
)
exported_graph = exported.f.get_concrete_function().graph
self.assertIn(
exported_graph.get_operation_by_name("should_be_control_output"),
exported_graph.control_outputs,
)
imported = cycle(exported, cycles, use_cpp_bindings=use_cpp_bindings)
# Calling get_concrete_function wraps in a second call operation; we want to
# inspect the original function body for the control output; digging into
# graph.as_graph_def() and its FunctionDefLibrary is another option.
(imported_concrete,) = imported.f.concrete_functions
imported_graph = imported_concrete.graph
self.assertIn(
imported_graph.get_operation_by_name("should_be_control_output"),
imported_graph.control_outputs,
)
def _make_asset(self, contents):
fd, filename = tempfile.mkstemp(prefix=self.get_temp_dir())
with os.fdopen(fd, "w") as f:
f.write(contents)
return filename
@test_util.run_in_graph_and_eager_modes
def test_assets(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
file1 = self._make_asset("contents 1")
file2 = self._make_asset("contents 2")
root = autotrackable.AutoTrackable()
root.asset1 = asset.Asset(file1)
root.asset2 = asset.Asset(file2)
save_dir = os.path.join(self.get_temp_dir(), "save_dir")
save.save(root, save_dir)
file_io.delete_file(file1)
file_io.delete_file(file2)
load_dir = os.path.join(self.get_temp_dir(), "load_dir")
file_io.rename(save_dir, load_dir)
imported = test_load(load_dir, use_cpp_bindings=use_cpp_bindings)
with open(self.evaluate(imported.asset1.asset_path), "r") as f:
self.assertEqual("contents 1", f.read())
with open(self.evaluate(imported.asset2.asset_path), "r") as f:
self.assertEqual("contents 2", f.read())
def test_cond_prune(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
x_in = []
x_out = []
def f(x, y):
x_in.append(x)
xx = cond_v2.cond_v2(
math_ops.less(1, 2),
lambda: x + 1,
lambda: x + 2,
)
x_out.append(xx)
return xx, 2 * y
f_wrapped = wrap_function.wrap_function(
f, [tensor_spec.TensorSpec((), dtypes.float32)] * 2
)
f_pruned = f_wrapped.prune(x_in[0], [x_out[0]])
class Adder(module.Module):
@def_function.function(
input_signature=[
tensor_spec.TensorSpec(shape=None, dtype=dtypes.float32)
]
)
def add(self, x):
return f_pruned(x)
root = Adder()
root.add(constant_op.constant(1.0))
root = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
root.add(constant_op.constant(1.0))
def test_capture_assets(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = autotrackable.AutoTrackable()
root.vocab = asset.Asset(self._make_asset("contents"))
root.f = def_function.function(
lambda: root.vocab.asset_path, input_signature=[]
)
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
original_output = root.f().numpy()
imported_output = imported.f().numpy()
self.assertNotEqual(original_output, imported_output)
with open(imported_output, "r") as f:
self.assertEqual("contents", f.read())
def test_capture_assets_in_graph(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = autotrackable.AutoTrackable()
root.vocab = asset.Asset(self._make_asset("contents"))
root.f = def_function.function(
lambda: root.vocab.asset_path, input_signature=[]
)
original_output = root.f().numpy()
if cycles > 1:
root = cycle(root, cycles - 1, use_cpp_bindings=use_cpp_bindings)
path = tempfile.mkdtemp(prefix=self.get_temp_dir())
save.save(root, path)
with ops.Graph().as_default():
imported = test_load(path, use_cpp_bindings=use_cpp_bindings)
imported_tensor = imported.f()
with monitored_session.MonitoredSession() as sess:
imported_output = sess.run(imported_tensor)
self.assertLen(ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS), 1)
self.assertNotEqual(original_output, imported_output)
with open(imported_output, "r") as f:
self.assertEqual("contents", f.read())
def test_dedup_assets(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
vocab = self._make_asset("contents")
root = autotrackable.AutoTrackable()
root.asset1 = asset.Asset(vocab)
root.asset2 = asset.Asset(vocab)
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(
imported.asset1.asset_path.numpy(), imported.asset2.asset_path.numpy()
)
def test_asset_fspath(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
vocab = pathlib.Path(self._make_asset("contents"))
root = autotrackable.AutoTrackable()
root.asset = asset.Asset(vocab)
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertTrue(hasattr(imported, "asset"))
def test_implicit_input_signature(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@def_function.function
def func(x):
return 2 * x
root = autotrackable.AutoTrackable()
root.f = func
# Add two traces.
root.f(constant_op.constant(1.0))
root.f(constant_op.constant(1))
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(4.0, imported.f(constant_op.constant(2.0)).numpy())
self.assertEqual(14, imported.f(constant_op.constant(7)).numpy())
def test_explicit_input_signature(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)]
)
def func(x):
return 2 * x
root = autotrackable.AutoTrackable()
root.f = func
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(4.0, imported.f(constant_op.constant(2.0)).numpy())
def test_explicit_save_signature(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@def_function.function
def func(x):
return 2 * x
root = autotrackable.AutoTrackable()
root.f = func
imported = cycle(
root,
cycles,
signatures={
"f": root.f.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32)
)
},
use_cpp_bindings=use_cpp_bindings,
)
self.assertEqual(4.0, imported.f(constant_op.constant(2.0)).numpy())
def test_nested_functions(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
f = def_function.function(
lambda x: x * 2.0,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)],
)
g = def_function.function(
lambda x: f(x) + 1.0,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)],
)
root = autotrackable.AutoTrackable()
root.g = g
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
imported.g(constant_op.constant([1.0]))
def test_function_with_default_bool_input(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
def func(x, training=False):
if training:
return 2 * x
else:
return 7
root = autotrackable.AutoTrackable()
root.f = def_function.function(func)
self.assertEqual(20, root.f(constant_op.constant(10), True).numpy())
self.assertEqual(7, root.f(constant_op.constant(1)).numpy())
self.assertEqual(2, root.f(constant_op.constant(1), True).numpy())
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(4, imported.f(constant_op.constant(2), True).numpy())
self.assertEqual(7, imported.f(constant_op.constant(2)).numpy())
def test_function_with_defaults_input_tensor(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@def_function.function(input_signature=[tensor_spec.TensorSpec([])])
def func(x=constant_op.constant(5.0)):
return x
root = autotrackable.AutoTrackable()
root.f = func
self.assertAllEqual(5.0, root.f())
self.assertAllEqual(7.0, root.f(7.0))
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(5.0, imported.f().numpy())
self.assertEqual(7.0, imported.f(constant_op.constant(7.0)).numpy())
# imported.signatures with defaults are not supported.
# TODO(b/277814477) support defaults in loaded.signatures
# self.assertEqual(
# {"output_0": 5.0},
# self.evaluate(
# imported.signatures["serving_default"]()
# ),
# )
def test_function_with_defaults_input_numpy(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@def_function.function(input_signature=[tensor_spec.TensorSpec([])])
def func(x=np.array(5.0)):
return x
root = autotrackable.AutoTrackable()
root.f = func
self.assertAllEqual(5.0, root.f())
self.assertAllEqual(7.0, root.f(np.array(7.0)))
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(5.0, imported.f().numpy())
self.assertEqual(7.0, imported.f(np.array(7.0)).numpy())
# imported.signatures with defaults are not supported.
# TODO(b/277814477) support defaults in loaded.signatures
# self.assertEqual(
# {"output_0": 5.0},
# self.evaluate(
# imported.signatures["serving_default"]()
# ),
# )
def test_function_with_default_none_input(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
def func(x, dtype=None):
if dtype:
return array_ops.zeros(shape=x.shape, dtype=dtype)
else:
return array_ops.zeros(shape=x.shape, dtype=dtypes.float32)
root = autotrackable.AutoTrackable()
root.f = def_function.function(func)
self.assertAllEqual(
[0.0, 0.0, 0.0], root.f(constant_op.constant([1, 2, 3])).numpy()
)
self.assertAllEqual(
[0.0, 0.0, 0.0], root.f(constant_op.constant([1.0, 2.0, 3.0])).numpy()
)
self.assertAllEqual(
[0.0, 0.0, 0.0, 0.0], root.f(constant_op.constant([1, 2, 3, 4])).numpy()
)
self.assertAllEqual(
[0, 0, 0],
root.f(
constant_op.constant([1.0, 2.0, 3.0]), dtype=dtypes.int32
).numpy(),
)
concrete_functions = root.f._list_all_concrete_functions_for_serialization() # pylint: disable=protected-access
self.assertLen(concrete_functions, 4)
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
restored_concrete_functions = imported.f._list_all_concrete_functions() # pylint: disable=protected-access
self.assertLen(restored_concrete_functions, 4)
self.assertAllEqual(
[0.0, 0.0, 0.0],
imported.f(constant_op.constant([1, 2, 3]), None).numpy(),
)
self.assertAllEqual(
[0.0, 0.0, 0.0],
imported.f(constant_op.constant([1.0, 2.0, 3.0])).numpy(),
)
self.assertAllEqual(
[0.0, 0.0, 0.0, 0.0],
imported.f(constant_op.constant([1, 2, 3, 4])).numpy(),
)
self.assertAllEqual(
[0, 0, 0],
imported.f(
constant_op.constant([1.0, 2.0, 3.0]), dtype=dtypes.int32
).numpy(),
)
def test_function_with_str_bytes_input(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@def_function.function
def func(x, y):
return string_ops.string_join([x, y])
root = autotrackable.AutoTrackable()
root.f = func
self.assertAllEqual(b"ab", root.f("a", "b"))
self.assertAllEqual(b"ab", root.f("a", constant_op.constant("b")))
self.assertAllEqual(b"ab", root.f(constant_op.constant("a"), "b"))
concrete_functions = root.f._list_all_concrete_functions_for_serialization() # pylint: disable=protected-access
self.assertLen(concrete_functions, 3)
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
restored_concrete_functions = imported.f._list_all_concrete_functions() # pylint: disable=protected-access
self.assertLen(restored_concrete_functions, 3)
self.assertAllEqual(b"ab", imported.f("a", "b"))
self.assertAllEqual(b"ab", imported.f("a", constant_op.constant("b")))
self.assertAllEqual(b"ab", imported.f(constant_op.constant("a"), "b"))
def test_function_no_return(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
class TrackableWithOneVariable(autotrackable.AutoTrackable):
def __init__(self, initial_value=0.0):
super(TrackableWithOneVariable, self).__init__()
self.variable = variables.Variable(initial_value)
@def_function.function
def increase(self, by=1.0):
self.variable.assign_add(by)
obj = TrackableWithOneVariable(5.0)
obj.increase(constant_op.constant(10.0))
self.assertEqual(15.0, obj.variable.numpy())
obj.increase()
self.assertEqual(16.0, obj.variable.numpy())
imported = cycle(obj, cycles, use_cpp_bindings=use_cpp_bindings)
imported.increase(constant_op.constant(10.0))
self.assertEqual(26.0, imported.variable.numpy())
imported.increase(constant_op.constant(1.0))
self.assertEqual(27.0, imported.variable.numpy())
def test_structured_inputs(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
def func(x, training=True):
# x is a nested structure, we care about one particular tensor.
_, (a, b) = x
if training:
return 2 * a["a"] + b
else:
return 7
root = autotrackable.AutoTrackable()
root.f = def_function.function(func)
x = constant_op.constant(10)
y = constant_op.constant(11)
input1 = [6, ({"a": x}, y)]
input2 = [7, ({"a": x}, y)] # Not compatible with input1 signature.
input3 = [6, ({"a": y}, x)] # Compatible with input1 signature.
# Note: by only calling f(input1) before serialization, only inputs with
# matching signature will be valid on the loaded model.
self.assertEqual(31, root.f(input1).numpy())
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
with self.assertRaisesRegex(
ValueError, "Could not find matching concrete function to call"
):
imported.f(input2)
self.assertEqual(31, imported.f(input1).numpy())
self.assertEqual(32, imported.f(input3).numpy())
def test_structured_inputs_bare_concrete_function(
self, cycles, use_cpp_bindings
):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
def func(x, training=True):
# x is a nested structure, we care about one particular tensor.
_, (a, b) = x
if training:
return 2 * a["a"] + b
else:
return 7
x = constant_op.constant(10)
y = constant_op.constant(11)
input1 = [6, ({"a": x}, y)]
input2 = [7, ({"a": x}, y)] # Not compatible with input1 signature.
input3 = [6, ({"a": y}, x)] # Compatible with input1 signature.
root = autotrackable.AutoTrackable()
root.f = def_function.function(func).get_concrete_function(input1)
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
with self.assertRaises(TypeError):
imported.f(input2)
self.assertEqual(31, imported.f(input1, True).numpy())
self.assertEqual(32, imported.f(input3, True).numpy())
def test_structured_output(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
# Use fields with non-alphabetical order
named_tuple_type = collections.namedtuple("NamedTupleHello", ["b", "a"])
def func(input1, input2):
named_tuple = named_tuple_type(a=input1 + input2, b=input1 * input2)
return [named_tuple, input2, {"x": 0.5}]
root = autotrackable.AutoTrackable()
root.f = def_function.function(func)
result = root.f(constant_op.constant(2), constant_op.constant(3))
self.assertEqual(5, result[0].a.numpy())
self.assertEqual(6, result[0].b.numpy())
self.assertEqual(["b", "a"], list(result[0]._asdict().keys()))
self.assertEqual(3, result[1].numpy())
self.assertEqual(0.5, result[2]["x"].numpy())
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
result = imported.f(constant_op.constant(2), constant_op.constant(5))
self.assertEqual(7, result[0].a.numpy())
self.assertEqual(10, result[0].b.numpy())
self.assertEqual(["b", "a"], list(result[0]._asdict().keys()))
self.assertEqual(5, result[1].numpy())
self.assertEqual(0.5, result[2]["x"].numpy())
def testConcreteFunctionType(self, cycles, use_cpp_bindings):
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
y = constant_op.constant(1)
@def_function.function
def foo(x):
return {"input": x, "capture": y}
root = autotrackable.AutoTrackable()
root.f = foo.get_concrete_function(tensor_spec.TensorSpec([], dtypes.int32))
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
x = constant_op.constant(2)
output = imported.f(x)
self.assertEqual(set(output.keys()), {"input", "capture"})
self.assertEqual(output["input"].numpy(), 2)
self.assertEqual(output["capture"].numpy(), 1)
parameters = list(imported.f.function_type.parameters.values())
self.assertLen(parameters, 1)
self.assertEqual(parameters[0].name, "x")
self.assertEqual(
parameters[0].type_constraint,
tensor_spec.TensorSpec([], dtypes.int32, name="x"),
)
captures = imported.f.function_type.captures
self.assertLen(captures, 1)
self.assertEqual(
list(captures.values())[0], tensor_spec.TensorSpec([], dtypes.int32)
)
output = imported.f.function_type.output
self.assertEqual(
output.mapping,
{
"input": tensor_spec.TensorSpec(
shape=(), dtype=dtypes.int32, name="input"
),
"capture": tensor_spec.TensorSpec(
shape=(), dtype=dtypes.int32, name="capture"
),
},
)
def test_pretty_print_signature(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
named_tuple_type = collections.namedtuple("NamedTupleHello", ["b", "a"])
def func(input1, input2):
named_tuple = named_tuple_type(a=input1 + input2, b=input1 * input2)
return [named_tuple, input2, {"x": 0.5}]
root = autotrackable.AutoTrackable()
root.f = def_function.function(func).get_concrete_function(
constant_op.constant(2), constant_op.constant(3)
)
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(
imported.f.pretty_printed_signature(),
"Input Parameters:\n"
+ " input1 (POSITIONAL_OR_KEYWORD): TensorSpec(shape=(),"
" dtype=tf.int32, name='input1')\n"
+ " input2 (POSITIONAL_OR_KEYWORD): TensorSpec(shape=(),"
" dtype=tf.int32, name='input2')\n"
+ "Output Type:\n"
+ " List[NamedTupleHello[['b', TensorSpec(shape=(), dtype=tf.int32,"
" name='tensor_0_b')], ['a', TensorSpec(shape=(), dtype=tf.int32,"
" name='tensor_0_a')]], TensorSpec(shape=(), dtype=tf.int32,"
" name='tensor_1'), Dict[['x', TensorSpec(shape=(), dtype=tf.float32,"
" name='tensor_2_x')]]]\n"
+ "Captures:\n"
+ " None",
)
def test_positional_arguments(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
def func(x, training=False, abc=7.1, defg=7.7):
del abc
if training:
return 2 * x
if defg == 7:
return 6
else:
return 7
root = autotrackable.AutoTrackable()
root.f = def_function.function(func)
self.assertEqual(20, root.f(constant_op.constant(10), True).numpy())
self.assertEqual(7, root.f(constant_op.constant(1)).numpy())
self.assertEqual(2, root.f(constant_op.constant(1), True).numpy())
self.assertEqual(6, root.f(constant_op.constant(1), defg=7.0).numpy())
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(4, imported.f(constant_op.constant(2), True).numpy())
self.assertEqual(7, imported.f(constant_op.constant(2)).numpy())
self.assertEqual(6, imported.f(constant_op.constant(1), defg=7.0).numpy())
def test_additional_kwargs(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
def func(x, training=False, **options):
del options
if training:
return 2 * x
else:
return 7
root = autotrackable.AutoTrackable()
root.f = def_function.function(func)
x = constant_op.constant(10)
self.assertEqual(7, root.f(x, learning_rate=0.5, epochs=3).numpy())
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
with self.assertRaisesRegex(
ValueError, "Could not find matching concrete function to call.*"
):
imported.f(x, learning_rate=0.5, epochs=4)
self.assertEqual(7, imported.f(x, learning_rate=0.5, epochs=3).numpy())
def test_member_function(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
class TrackableWithMember(autotrackable.AutoTrackable):
def __init__(self):
super(TrackableWithMember, self).__init__()
self._some_value = 20
@def_function.function
def f(self, x, training=False):
if training:
return 2 * x
else:
return 7 + self._some_value
root = TrackableWithMember()
self.assertEqual(20, root.f(constant_op.constant(10), True).numpy())
self.assertEqual(27, root.f(constant_op.constant(1)).numpy())
self.assertEqual(2, root.f(constant_op.constant(1), True).numpy())
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(4, imported.f(constant_op.constant(2), True).numpy())
self.assertEqual(27, imported.f(constant_op.constant(2)).numpy())
def test_side_effect_listing(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
class M(autotrackable.AutoTrackable):
def __init__(self):
super(M, self).__init__()
self.var = None
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)]
)
def f(self, x):
if self.var is None:
self.var = variables.Variable(2.0)
return x * self.var
m = M()
cycle(m, cycles)
self.assertEqual(4.0, m.f(constant_op.constant(2.0)).numpy())
def test_basic_backprop(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
weight = variables.Variable(1.0, trainable=True)
bias = variables.Variable(0.0, trainable=True)
g = def_function.function(
lambda x: x * weight + bias,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)],
)
root = autotrackable.AutoTrackable()
root.weight = weight
root.bias = bias
root.g = g
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
with backprop.GradientTape() as t:
x = constant_op.constant([3.5])
loss = imported.g(x)
grad = t.gradient(loss, [imported.weight, imported.bias])
self.assertAllClose(grad, [3.5, 1.0])
def test_nested_backprop(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
weight = variables.Variable(1.0, trainable=True)
bias = variables.Variable(0.0, trainable=True)
# Note: this function gets called from other function defs via a
# "PartitionedCall" op node.
@def_function.function(
input_signature=[
tensor_spec.TensorSpec(None, dtypes.float32),
tensor_spec.TensorSpec(None, dtypes.float32),
]
)
def mul(x, y):
return x * y
# Note: this function gets called from other function defs via a
# "StatefulPartitionedCall" op node.
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)]
)
def f(x):
return mul(weight.read_value(), x)
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)]
)
def g(x):
return (f(x) + bias,)
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)]
)
def h(x):
return (g(x) + bias,)
root = autotrackable.AutoTrackable()
root.weight = weight
root.bias = bias
root.g = h
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
with backprop.GradientTape() as t:
x = constant_op.constant([3.5])
loss = imported.g(x)
grad = t.gradient(loss, [imported.weight, imported.bias])
self.assertAllClose(grad, [3.5, 2.0])
def test_while_loop_backprop(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
weight = variables.Variable(2.0, trainable=True)
@def_function.function(
input_signature=[
tensor_spec.TensorSpec(dtype=dtypes.float32, shape=(None, None))
]
)
def g(x):
"""Adds rows of matrix x after multiplying each entry by v."""
i_0 = constant_op.constant(0)
s_0 = constant_op.constant([0.0, 0.0])
cond = lambda i, _: i < array_ops.shape(x)[1]
body = lambda i, s: (i + 1, s + weight * x[:, i])
i_end, s_end = while_loop.while_loop(cond, body, (i_0, s_0))
del i_end
return s_end
root = autotrackable.AutoTrackable()
root.weight = weight
root.g = g
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
def get_gradient(obj):
with backprop.GradientTape() as t:
x = constant_op.constant([[1.0, 2.0, 3.0], [1.0, -2, 3.0]])
y = obj.g(x)
self.assertAllClose(y, obj.weight * [6.0, 2.0])
loss = math_ops.reduce_sum(y) # weight * 8.
self.assertAllEqual(t.watched_variables(), [obj.weight])
return t.gradient(loss, obj.weight)
imported_gradient = get_gradient(imported)
original_gradient = get_gradient(root)
self.assertIsNotNone(original_gradient)
self.assertAllClose(original_gradient, 8.0)
self.assertIsNotNone(imported_gradient)
self.assertAllClose(imported_gradient, 8.0)
def _test_restored_func_with_captured_var_backprop(
self, cycles, use_cpp_bindings, dtype
):
weight = variables.Variable(2.0, trainable=True, dtype=dtype)
@def_function.function(
input_signature=[tensor_spec.TensorSpec(dtype=dtype, shape=())]
)
def g(x):
return x * weight
root = autotrackable.AutoTrackable()
root.weight = weight
root.g = g
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
def get_gradient(obj):
with backprop.GradientTape() as t:
x = constant_op.constant(2.0, dtype=dtype)
y = obj.g(x)
self.assertAllClose(y, obj.weight * 2.0)
self.assertAllEqual(t.watched_variables(), [obj.weight])
return t.gradient(y, obj.weight)
imported_gradient = get_gradient(imported)
original_gradient = get_gradient(root)
self.assertIsNotNone(original_gradient)
self.assertAllClose(original_gradient, 2.0)
self.assertIsNotNone(imported_gradient)
self.assertAllClose(imported_gradient, 2.0)
def test_nested_fn_backprop(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
weight = variables.Variable(2.0, trainable=True)
@def_function.function(
input_signature=[
tensor_spec.TensorSpec(dtype=dtypes.float32, shape=(None, None))
]
)
def g(x):
weight.read_value() # Just get the tape to watch the variable
handle = array_ops.identity(weight.handle)
@def_function.function
def launder_var_handle():
return array_ops.identity(handle)
return x + resource_variable_ops.read_variable_op(
launder_var_handle(), dtypes.float32
)
root = autotrackable.AutoTrackable()
root.weight = weight
root.g = g
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
def get_gradient(obj, persistent):
with backprop.GradientTape(persistent=persistent) as t:
x = constant_op.constant([[1.0, 2.0, 3.0], [1.0, -2, 3.0]])
y = obj.g(x)
self.assertAllClose(y, obj.weight + x)
loss = math_ops.reduce_sum(y)
return t.gradient(loss, obj.weight)
imported_gradient = get_gradient(imported, persistent=False)
original_gradient = get_gradient(root, persistent=False)
self.assertIsNotNone(original_gradient)
self.assertAllClose(original_gradient, 6.0)
self.assertIsNotNone(imported_gradient)
self.assertAllClose(imported_gradient, 6.0)
def test_restored_func_with_captured_var_backprop_float32(
self, cycles, use_cpp_bindings
):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
self._test_restored_func_with_captured_var_backprop(
cycles, use_cpp_bindings, dtypes.float32
)
def test_restored_func_with_captured_var_backprop_float64(
self, cycles, use_cpp_bindings
):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
self._test_restored_func_with_captured_var_backprop(
cycles, use_cpp_bindings, dtypes.float64
)
def test_callable(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
class M1(autotrackable.AutoTrackable):
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)]
)
def __call__(self, x):
return x
root = autotrackable.AutoTrackable()
root.m1 = M1()
root.m2 = autotrackable.AutoTrackable()
root.m2.__call__ = def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)]
)(lambda x: x * 3.0)
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
x = constant_op.constant(1.0)
self.assertTrue(callable(imported.m1))
self.assertAllEqual(root.m1(x), imported.m1(x))
# Note: `root.m2` was not callable since `__call__` attribute was set
# into the instance and not on the class. But after a serialization cycle
# that starts to work.
self.assertTrue(callable(imported.m2))
self.assertAllEqual(root.m2.__call__(x), imported.m2(x))
# Verify that user objects without `__call__` attribute are not callable.
self.assertFalse(callable(imported))
def test_chain_callable(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
func = def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)]
)(lambda x: x * 3.0)
root = autotrackable.AutoTrackable()
root.__call__ = autotrackable.AutoTrackable()
root.__call__.__call__ = autotrackable.AutoTrackable()
root.__call__.__call__.__call__ = func
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertTrue(callable(imported))
x = constant_op.constant(1.0)
self.assertAllEqual(imported(x).numpy(), 3.0)
def test_load_in_graph_mode(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = autotrackable.AutoTrackable()
root.v1 = variables.Variable(1.0, name="v_one", trainable=False)
root.v2 = variables.Variable(2.0, name="v_two", trainable=True)
root.f = def_function.function(
lambda x: root.v2 * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)],
)
if cycles > 1:
root = cycle(root, cycles - 1, use_cpp_bindings=use_cpp_bindings)
path = tempfile.mkdtemp(prefix=self.get_temp_dir())
save.save(root, path)
with ops.Graph().as_default() as g:
imported = test_load(path, use_cpp_bindings=use_cpp_bindings)
var_v1 = imported.v1
self.assertFalse(var_v1.trainable)
var_v2 = imported.v2
self.assertTrue(var_v2.trainable)
output = imported.f(constant_op.constant(2.0))
with monitored_session.MonitoredSession() as sess:
self.assertEqual(1.0, sess.run(var_v1))
self.assertEqual(4.0, sess.run(output))
self.assertCountEqual(
[var_v1, var_v2], g.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
)
# load() should not add to TRAINABLE_VARIABLES. Higher levels of model
# building control retraining or frozen use of imported SavedModels.
self.assertCountEqual(
[], g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
)
def test_load_in_func_graph(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = autotrackable.AutoTrackable()
root.v1 = variables.Variable(1.0)
root.v2 = variables.Variable(2.0)
root.f = def_function.function(
lambda x: root.v2 * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)],
)
if cycles > 1:
root = cycle(root, cycles - 1, use_cpp_bindings=use_cpp_bindings)
path = tempfile.mkdtemp(prefix=self.get_temp_dir())
save.save(root, path)
closure = autotrackable.AutoTrackable()
@def_function.function
def func(x):
if not hasattr(closure, "model"):
closure.model = load.load(path)
return closure.model.f(x)
inputs = constant_op.constant(2.0)
self.assertEqual(4.0, func(inputs).numpy())
def test_soft_matching(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@def_function.function(
input_signature=[tensor_spec.TensorSpec([None], dtypes.int32)]
)
def func(x):
return 2 * x
root = autotrackable.AutoTrackable()
root.f = func
self.assertAllEqual([2], root.f(constant_op.constant([1])).numpy())
self.assertAllEqual([2, 4], root.f(constant_op.constant([1, 2])).numpy())
concrete_functions = root.f._list_all_concrete_functions_for_serialization() # pylint: disable=protected-access
self.assertLen(concrete_functions, 1)
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
restored_concrete_functions = imported.f._list_all_concrete_functions() # pylint: disable=protected-access
self.assertLen(restored_concrete_functions, 1)
with self.assertRaisesRegex(
TypeError, "Binding inputs to tf.function failed"
):
# We cannot call the function with a constant of shape ().
imported.f(constant_op.constant(2)).numpy()
# TODO(vbardiovsky): When classes are revived with input_signatures, we
# should also check that the calls below are not generating any more
# concrete functions.
self.assertAllEqual(
[2, 4, 6, 8], imported.f(constant_op.constant([1, 2, 3, 4])).numpy()
)
self.assertAllEqual(
[2, 4, 6], imported.f(constant_op.constant([1, 2, 3])).numpy()
)
def test_jit_compile(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
# It'd be nice to use parameterize here, but the library does not support
# having parameterized test methods inside already-parameterized classes.
for jit_compile in (None, True, False):
@def_function.function(jit_compile=jit_compile)
def f(x):
return x + 1.0
root = module.Module()
root.f = f
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(root, save_dir)
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(imported.f._jit_compile, jit_compile)
def test_get_concrete_function(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@def_function.function
def func(x, training=False):
if training:
return 2 * x
else:
return 3 * x
func.get_concrete_function(
tensor_spec.TensorSpec([None], dtypes.int32), True
)
func.get_concrete_function(tensor_spec.TensorSpec([None], dtypes.float32))
root = autotrackable.AutoTrackable()
root.f = func
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
concrete = imported.f.get_concrete_function(
training=True, x=tensor_spec.TensorSpec([None], dtypes.int32)
)
self.assertAllEqual(
[2, 4, 6, 8], concrete(x=constant_op.constant([1, 2, 3, 4])).numpy()
)
with self.assertRaisesRegex(
ValueError, "Could not find matching concrete function to call"
):
imported.f.get_concrete_function(
tensor_spec.TensorSpec([None], dtypes.int32)
)
imported.f.get_concrete_function(
tensor_spec.TensorSpec([None], dtypes.int32), True
)
def test_concrete_function(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@def_function.function(
input_signature=[tensor_spec.TensorSpec([None], dtypes.int32)]
)
def func(x):
return 2 * x
root = autotrackable.AutoTrackable()
root.f = func.get_concrete_function()
self.assertAllEqual([2], root.f(constant_op.constant([1])).numpy())
self.assertAllEqual([2, 4], root.f(constant_op.constant([1, 2])).numpy())
# TODO(andresp): Fix exporting of loaded concrete functions as signatures.
imported = cycle(
root, cycles, signatures={}, use_cpp_bindings=use_cpp_bindings
)
self.assertAllEqual(
[2, 4, 6, 8], imported.f(constant_op.constant([1, 2, 3, 4])).numpy()
)
self.assertAllEqual(
[2, 4, 6], imported.f(constant_op.constant([1, 2, 3])).numpy()
)
def test_concrete_function_captures(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
class Root(module.Module):
def __init__(self):
self.v = variables.Variable(1.0)
self.v1 = variables.Variable(1.0)
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)]
)
def use_v(self, x):
return self.v + self.v1 + 1.0
root = Root()
self.assertIn(
root.v.handle,
root.use_v.get_concrete_function().graph.external_captures,
)
root = cycle(
root,
cycles,
signatures=root.use_v.get_concrete_function(),
use_cpp_bindings=use_cpp_bindings,
)
func_captures = root.use_v.get_concrete_function().graph.external_captures
self.assertLen(func_captures, 2)
self.assertTrue(any(root.v.handle is t for t in func_captures))
self.assertTrue(any(root.v1.handle is t for t in func_captures))
signature_captures = root.signatures[
"serving_default"
].graph.external_captures
self.assertLen(signature_captures, 2)
self.assertTrue(any(root.v.handle is t for t in signature_captures))
self.assertTrue(any(root.v1.handle is t for t in signature_captures))
def test_concrete_function_arg_names(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@def_function.function(
input_signature=[tensor_spec.TensorSpec([None], dtypes.int32)]
)
def func(x):
return 2 * x
root = autotrackable.AutoTrackable()
root.f = func.get_concrete_function()
self.assertAllEqual([2], root.f(constant_op.constant([1])).numpy())
# TODO(andresp): Fix exporting of loaded concrete functions as signatures.
imported = cycle(
root, cycles, signatures={}, use_cpp_bindings=use_cpp_bindings
)
self.assertAllEqual(
[2, 4, 6], imported.f(x=constant_op.constant([1, 2, 3])).numpy()
)
def test_concrete_function_no_signature(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@def_function.function
def func(x):
return 2 * x
root = autotrackable.AutoTrackable()
root.f = func.get_concrete_function(constant_op.constant([1]))
self.assertAllEqual([4], root.f(constant_op.constant([2])).numpy())
# TODO(andresp): Fix exporting of loaded concrete functions as signatures.
imported = cycle(
root, cycles, signatures={}, use_cpp_bindings=use_cpp_bindings
)
self.assertAllEqual([6], imported.f(constant_op.constant([3])).numpy())
@test_util.run_in_graph_and_eager_modes
def test_concrete_function_backprop(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@def_function.function(
input_signature=[tensor_spec.TensorSpec([], dtypes.float32)]
)
def func(x):
return x**2.0
root = autotrackable.AutoTrackable()
root.f = func.get_concrete_function()
def _compute_gradient(function):
with backprop.GradientTape() as tape:
inp = constant_op.constant(1.0)
tape.watch(inp)
output = function(inp)
return tape.gradient(output, inp)
self.assertAllEqual(2.0, _compute_gradient(root.f))
# TODO(andresp): Fix exporting of loaded concrete functions as signatures.
imported = cycle(
root, cycles, signatures={}, use_cpp_bindings=use_cpp_bindings
)
self.assertAllEqual(2.0, _compute_gradient(imported.f))
def test_revived_concrete_function_kwargs(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@def_function.function
def func(x, y):
return x * (y + 1.0)
root = autotrackable.AutoTrackable()
root.f = func.get_concrete_function(
tensor_spec.TensorSpec([], dtypes.float32),
tensor_spec.TensorSpec([], dtypes.float32),
)
self.assertEqual(
8.0,
root.f(
y=constant_op.constant(3.0), x=constant_op.constant(2.0)
).numpy(),
)
# TODO(andresp): Fix exporting of loaded concrete functions as signatures.
imported = cycle(
root, cycles, signatures={}, use_cpp_bindings=use_cpp_bindings
)
self.assertEqual(
8.0,
imported.f(
y=constant_op.constant(3.0), x=constant_op.constant(2.0)
).numpy(),
)
def test_revived_concrete_function_tensorspec_kwargs(
self, cycles, use_cpp_bindings
):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@def_function.function
def func(*args):
x, y = args
return x * (y + 1.0)
root = autotrackable.AutoTrackable()
root.f = func.get_concrete_function(
tensor_spec.TensorSpec([], dtypes.float32, name="x"),
tensor_spec.TensorSpec([], dtypes.float32, name="y"),
)
self.assertEqual(
8.0,
root.f(
y=constant_op.constant(3.0), x=constant_op.constant(2.0)
).numpy(),
)
imported = cycle(
root, cycles, signatures={}, use_cpp_bindings=use_cpp_bindings
)
self.assertEqual(
8.0,
imported.f(
y=constant_op.constant(3.0), x=constant_op.constant(2.0)
).numpy(),
)
def test_concrete_function_variable_argument(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
capture = variables.Variable(0)
@def_function.function
def func(v):
v.assign_add(1)
capture.assign_sub(1)
vsave = variables.Variable(1)
root = autotrackable.AutoTrackable()
root.f = func.get_concrete_function(vsave)
root.capture = capture
self.assertEqual(1, vsave.numpy())
root.f(vsave)
self.assertEqual(2, vsave.numpy())
self.assertEqual(-1, capture.numpy())
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
vload = variables.Variable(1)
imported.f(vload)
self.assertEqual(2, vload.numpy())
self.assertEqual(-2, imported.capture.numpy())
imported.f(v=vload)
self.assertEqual(3, vload.numpy())
self.assertEqual(-3, imported.capture.numpy())
self.assertEqual(-1, capture.numpy())
def test_function_and_component(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@def_function.function
def func(v):
return v + 1
root = autotrackable.AutoTrackable()
root.func = func
root.concrete_func = func.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.int32)
)
one = constant_op.constant(1)
self.assertEqual(2, root.func(one).numpy())
self.assertEqual(2, root.concrete_func(one).numpy())
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(2, imported.func(one).numpy())
self.assertEqual(2, imported.concrete_func(one).numpy())
def test_dict(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = autotrackable.AutoTrackable()
root.variables = dict(a=variables.Variable(1.0))
root.variables["b"] = variables.Variable(2.0)
root.variables["c"] = 1
root.funcs = dict(
a=def_function.function(lambda: constant_op.constant(100.0))
)
root.funcs["conc"] = root.funcs["a"].get_concrete_function()
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(1.0, imported.variables["a"].numpy())
self.assertEqual(2.0, imported.variables["b"].numpy())
self.assertEqual(set(["a", "b"]), set(imported.variables.keys()))
self.assertEqual(100.0, imported.funcs["a"]().numpy())
self.assertEqual(100.0, imported.funcs["conc"]().numpy())
def test_list(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = autotrackable.AutoTrackable()
root.variables = [variables.Variable(1.0)]
root.variables.append(1)
root.variables.append(variables.Variable(3.0))
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(1.0, imported.variables[0].numpy())
self.assertEqual(3.0, imported.variables[2].numpy())
self.assertIs(None, imported.variables[1])
self.assertLen(imported.variables, 3)
def test_tuple(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = autotrackable.AutoTrackable()
root.variables = (variables.Variable(1.0), 1, variables.Variable(3.0))
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(1.0, imported.variables[0].numpy())
self.assertEqual(3.0, imported.variables[2].numpy())
self.assertIs(None, imported.variables[1])
self.assertLen(imported.variables, 3)
def test_functions_list(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = autotrackable.AutoTrackable()
v1 = variables.Variable(1.0)
root.losses = [def_function.function(lambda: math_ops.reduce_sum(v1**2))]
root.variables = [v1]
@def_function.function
def _v2_loss():
if len(root.variables) == 1:
v2 = variables.Variable(2.0)
root.variables.append(v2)
return math_ops.reduce_sum(root.variables[1] ** 2)
root.losses.append(_v2_loss)
self.assertAllClose([1.0, 4.0], [loss() for loss in root.losses])
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertAllClose([1.0, 4.0], [loss() for loss in imported.losses])
imported.variables[0].assign(3.0)
imported.variables[1].assign(4.0)
self.assertAllClose([9.0, 16.0], [loss() for loss in imported.losses])
def test_captured_constant(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
const = array_ops.zeros([100])
root = autotrackable.AutoTrackable()
root.f = def_function.function(lambda: const + 1.0)
root.g = def_function.function(lambda: const + 2.0)
self.assertAllClose(array_ops.ones([100]), root.f())
self.assertAllClose(2.0 * array_ops.ones([100]), root.g())
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertAllClose(array_ops.ones([100]), imported.f())
self.assertAllClose(2.0 * array_ops.ones([100]), imported.g())
# TODO(b/123408994): Use the public get_concrete_function.
f_concrete = imported.f._list_all_concrete_functions_for_serialization()[0]
g_concrete = imported.g._list_all_concrete_functions_for_serialization()[0]
self.assertLen(f_concrete.captured_inputs, 1)
self.assertLen(g_concrete.captured_inputs, 1)
# We should be using the same captured EagerTensor in both functions, not
# duplicating the constant.
self.assertIs(f_concrete.captured_inputs[0], g_concrete.captured_inputs[0])
def test_functions_accessed_once(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
class Exported(autotrackable.AutoTrackable):
def __init__(self):
self._counter = 0
@property
def make_func(self):
@def_function.function
def f():
return constant_op.constant(self._counter)
f.get_concrete_function() # force a trace
self._counter += 1
return f
exported = Exported()
imported = cycle(exported, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(0, imported.make_func().numpy())
self.assertEqual(1, exported.make_func().numpy())
def test_overwritten_signatures_error(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
exported = autotrackable.AutoTrackable()
exported.f = def_function.function(lambda: constant_op.constant(1.0))
imported = cycle(
exported,
cycles,
signatures={"key": exported.f.get_concrete_function()},
use_cpp_bindings=use_cpp_bindings,
)
self.assertEqual(1.0, imported.signatures["key"]()["output_0"].numpy())
imported.signatures = {"key1": imported.signatures["key"]}
with self.assertRaisesRegex(ValueError, "signatures"):
save.save(imported, tempfile.mkdtemp(prefix=self.get_temp_dir()))
def test_signature_loading(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
class Exported(autotrackable.AutoTrackable):
def __init__(self):
self.v = variables.Variable(3.0)
@def_function.function
def do(self, x):
return self.v * x
exported = Exported()
imported = cycle(
exported,
cycles,
signatures=exported.do.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32)
),
use_cpp_bindings=use_cpp_bindings,
)
self.assertEqual(["serving_default"], list(imported.signatures.keys()))
imported_function = imported.signatures["serving_default"]
two = constant_op.constant(2.0)
self.assertEqual(6.0, imported_function(x=two)["output_0"].numpy())
imported.v.assign(4.0)
self.assertEqual(8.0, imported_function(x=two)["output_0"].numpy())
self.assertEqual(8.0, imported_function(two)["output_0"].numpy())
with self.assertRaises(TypeError):
# The signatures mapping is immutable
imported.signatures["random_key"] = 3
def test_names_normalized(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
class ObjWithFunction(module.Module):
@def_function.function(
input_signature=[
tensor_spec.TensorSpec([], dtype=dtypes.int32, name="A-b"),
tensor_spec.TensorSpec([], dtype=dtypes.int32, name="A/D"),
tensor_spec.TensorSpec([], dtype=dtypes.int32, name="bar"),
tensor_spec.TensorSpec([], dtype=dtypes.int32, name="e"),
]
)
def foo(self, a, b, c, d=10, **options):
del options
return a + b + c + d
exported = ObjWithFunction()
with self.assertLogs(level="INFO") as logs:
imported = cycle(exported, cycles, use_cpp_bindings=use_cpp_bindings)
expected_message = (
"INFO:absl:Function `foo` contains input name(s) A-b, A/D with "
"unsupported characters which will be renamed to a_b, a_d in the "
"SavedModel."
)
self.assertIn(expected_message, logs.output)
loaded_signature = imported.signatures["serving_default"].inputs
self.assertTrue(
{"a_b:0", "a_d:0"}.issubset({arg.name for arg in loaded_signature}),
)
def test_multiple_argument_signatures_no_positional(
self, cycles, use_cpp_bindings
):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
class Exported(autotrackable.AutoTrackable):
@def_function.function
def do(self, x, y):
return x + y
exported = Exported()
imported = cycle(
exported,
cycles,
signatures=exported.do.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32),
tensor_spec.TensorSpec(None, dtypes.float32),
),
use_cpp_bindings=use_cpp_bindings,
)
with self.assertRaises(TypeError):
imported.signatures["serving_default"](
constant_op.constant(1.0), y=constant_op.constant(2.0)
)
self.assertEqual(
{"output_0": 3.0},
self.evaluate(
imported.signatures["serving_default"](
x=constant_op.constant(1.0), y=constant_op.constant(2.0)
)
),
)
def _make_model_with_tables(self):
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table1_initializer = lookup_ops.KeyValueTensorInitializer(keys, values)
table1 = lookup_ops.HashTable(table1_initializer, default_val)
table2_file = self._make_asset("test\nfoo\nbrain\n")
table2_initializer = lookup_ops.TextFileIdTableInitializer(table2_file)
table2 = lookup_ops.HashTable(table2_initializer, default_val)
def _make_lookup_function(table):
signature = [tensor_spec.TensorSpec(None, dtypes.string)]
return def_function.function(input_signature=signature)(
lambda x: table.lookup(x)) # pylint: disable=unnecessary-lambda
root = autotrackable.AutoTrackable()
root.table1 = table1
root.lookup1 = _make_lookup_function(table1)
root.table2 = table2
root.lookup2 = _make_lookup_function(table2)
return root
def test_table(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = self._make_model_with_tables()
imported = cycle(root, cycles, signatures={})
keys = constant_op.constant(["brain", "test", "foo", "surgery"])
self.assertAllEqual([0, -1, -1, 2], imported.lookup1(keys).numpy())
self.assertAllEqual([2, 0, 1, -1], imported.lookup2(keys).numpy())
def test_table_collections_untouched_eager(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
def _gather_nonempty_collections():
graph = ops.get_default_graph()
gathered = {}
for collection in graph.collections:
collection_contents = graph.get_collection(collection)
if collection_contents:
gathered[collection] = collection_contents
return gathered
root = self._make_model_with_tables()
# Warm up collections to ignore those that don't expand every iteration,
# e.g. the __varscope collection.
cycle(root, 1, use_cpp_bindings=use_cpp_bindings)
original_collections = _gather_nonempty_collections()
cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(original_collections, _gather_nonempty_collections())
def test_table_in_graph(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = self._make_model_with_tables()
if cycles > 1:
root = cycle(root, cycles - 1, use_cpp_bindings=use_cpp_bindings)
path = tempfile.mkdtemp(prefix=self.get_temp_dir())
save.save(root, path)
imported = cycle(root, 1, use_cpp_bindings=use_cpp_bindings)
with ops.Graph().as_default():
imported = test_load(path, use_cpp_bindings=use_cpp_bindings)
keys = constant_op.constant(["brain", "test", "foo", "surgery"])
output1 = imported.lookup1(keys)
output2 = imported.lookup2(keys)
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual([0, -1, -1, 2], sess.run(output1))
self.assertAllEqual([2, 0, 1, -1], sess.run(output2))
def test_preserve_argspec(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
def f(a, b, c): # pylint: disable=unused-argument
return None
original_fullargspec = tf_inspect.getfullargspec(f)
root = autotrackable.AutoTrackable()
root.f = def_function.function(f)
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
restored_fullargspec = tf_inspect.getfullargspec(imported.f)
self.assertEqual(original_fullargspec, restored_fullargspec)
def test_canonicalize_inputs(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@def_function.function(autograph=False)
def func(a=1, b=2, c=3, training=True):
if training:
return [a, b, c, training]
else:
return [c, b, a, training]
# TODO(b/123501567): Work-around to trigger generic traces of a function
# with extra non tensor args.
signature = 3 * [tensor_spec.TensorSpec(None, dtypes.float32)]
@def_function.function(input_signature=signature)
def trigger(a, b, c):
func(a, b, c, True)
func(a, b, c, False)
trigger.get_concrete_function()
root = autotrackable.AutoTrackable()
root.f = func
root = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertAllEqual(root.f(), [1.0, 2.0, 3.0, True])
self.assertAllEqual(root.f(-1.0, training=False), [3.0, 2.0, -1.0, False])
with self.assertRaisesRegex(
ValueError, "Could not find matching concrete function"
):
root.f(["hello", 1.0])
def test_prefer_specific_trace(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@def_function.function(autograph=False)
def func(a):
if isinstance(a, int):
return a
else:
return a + 1
self.assertAllEqual(2, func(2).numpy())
self.assertAllEqual(3, func(constant_op.constant(2)).numpy())
root = autotrackable.AutoTrackable()
root.f = func
root = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertAllEqual(2, root.f(2).numpy())
self.assertAllEqual(4, root.f(3).numpy())
self.assertAllEqual(3, root.f(constant_op.constant(2)).numpy())
self.assertAllEqual(4, root.f(constant_op.constant(3)).numpy())
def test_partial_with_non_tensor_defaults(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
def f(x, y=3):
return x + y
func = def_function.function(functools.partial(f, y=5))
root = autotrackable.AutoTrackable()
root.f = func
self.assertAllEqual(root.f(1), 6)
root = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertAllEqual(root.f(1), 6)
def test_partial_with_positional(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
def f(x, y):
return x + y
func = def_function.function(functools.partial(f, constant_op.constant(5)))
root = autotrackable.AutoTrackable()
root.f = func
self.assertAllEqual(root.f(1), 6)
root = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertAllEqual(root.f(1), 6)
def test_partial_with_positional_captured_tensors(
self, cycles, use_cpp_bindings
):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
def f(x, y):
return x + y
tensor = constant_op.constant(5) + constant_op.constant(7)
func = def_function.function(functools.partial(f, tensor))
root = autotrackable.AutoTrackable()
root.f = func
self.assertAllEqual(root.f(1), 13)
root = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertAllEqual(root.f(1), 13)
def test_partial_keyword_hiding_default(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
def f(x=3, training=True, y=7):
if training:
return x + y
else:
return x + y + 2
func = def_function.function(functools.partial(f, y=6))
root = autotrackable.AutoTrackable()
root.f = func
self.assertEqual(root.f().numpy(), 9)
self.assertEqual(root.f(training=False).numpy(), 11)
root = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(root.f().numpy(), 9)
self.assertEqual(root.f(training=False).numpy(), 11)
def test_partial_with_kwargs(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
def f(a, b, *args, **kwargs):
args_sum = sum(args)
return a + b + kwargs["some_tensor"] * kwargs["learning_rate"] + args_sum
constant_tensor = constant_op.constant(10)
func = def_function.function(
functools.partial(
f, 7, 1, 2, learning_rate=3, some_tensor=constant_tensor
)
)
root = autotrackable.AutoTrackable()
root.f = func
self.assertEqual(root.f(constant_op.constant(4)).numpy(), 44)
root = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(root.f(constant_op.constant(5)).numpy(), 45)
def test_partial_bind_only_first_argument(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
if sys.version_info[0] < 3:
self.skipTest(
"Test is only valid in python3. Only then we get some more "
"advanced inspection of partials where this is allowed."
)
def f(x, y):
return x + y
partial_func = functools.partial(f, x=5)
tf_func = def_function.function(partial_func)
root = autotrackable.AutoTrackable()
root.f = tf_func
self.assertAllEqual(root.f(y=constant_op.constant(7)), 12)
root = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertAllEqual(root.f(y=constant_op.constant(9)), 14)
def test_partial_with_passed_fn_as_default(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
def f(x, y):
return x(3) + y
def my_func(a):
return 2 * a
func = def_function.function(functools.partial(f, my_func))
root = autotrackable.AutoTrackable()
root.f = func
self.assertEqual(root.f(constant_op.constant(3)).numpy(), 9)
root = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(root.f(constant_op.constant(3)).numpy(), 9)
def test_partial_with_input_signature(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
def full_function(a, b, c=3.0):
return a, b, c
partial = functools.partial(full_function, 1, c=4)
self.assertAllEqual((1, 2.0, 4), partial(2.0))
signature = [tensor_spec.TensorSpec([], dtypes.float32)]
func = def_function.function(partial, input_signature=signature)
root = autotrackable.AutoTrackable()
root.f = func
a, b, c = root.f(2.0)
self.assertAllEqual([a.numpy(), b.numpy(), c.numpy()], (1, 2.0, 4))
root = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
a, b, c = root.f(3.0)
self.assertAllEqual([a.numpy(), b.numpy(), c.numpy()], (1, 3.0, 4))
def test_convert_to_input_signature(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@def_function.function(
input_signature=[tensor_spec.TensorSpec([None], dtypes.int32)]
)
def func(x):
return x
root = autotrackable.AutoTrackable()
root.f = func
root = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual([2], root.f([2]).numpy())
def test_named_tuple(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
class NamedTupleType(collections.namedtuple("NamedTupleType", ["a", "b"])):
pass
@def_function.function
def f(x):
return x.a + x.b
f.get_concrete_function(
NamedTupleType(
a=tensor_spec.TensorSpec(None, dtypes.float32, name="a"),
b=tensor_spec.TensorSpec(None, dtypes.float32, name="b"),
)
)
obj = autotrackable.AutoTrackable()
obj.__call__ = f
if sys.version_info.major == 3 and sys.version_info.minor < 5:
# TODO(allenl): figure out why this doesn't work in Python3.4
self.skipTest("Not working in Python 3.4")
imported = cycle(obj, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertAllClose(
3.0,
imported(
NamedTupleType(
a=constant_op.constant(1.0), b=constant_op.constant(2.0)
)
),
)
def test_extra_args(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@def_function.function
def f(x):
return math_ops.add(x["a"], 1.0)
# Trigger a trace.
f({"a": constant_op.constant(2.0)})
obj = autotrackable.AutoTrackable()
obj.__call__ = f
imported = cycle(obj, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(4.0, imported({"a": 3.0}).numpy())
with self.assertRaisesRegex(
ValueError, "Could not find matching concrete function to call"
):
imported({"a": 2.0, "b": 3.0})
def test_shapes_available(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@def_function.function(
input_signature=[
tensor_spec.TensorSpec([None, 3], dtypes.int32),
tensor_spec.TensorSpec([None, 2], dtypes.int32),
]
)
def func(x, y):
return array_ops.concat([x, y], axis=1)
root = autotrackable.AutoTrackable()
root.f = func
root = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
imported_graph = root.f.get_concrete_function().graph
input_x, input_y = imported_graph.inputs
self.assertEqual([None, 3], input_x.shape.as_list())
self.assertEqual([None, 2], input_y.shape.as_list())
(output,) = imported_graph.outputs
self.assertEqual([None, 5], output.shape.as_list())
signature = root.signatures["serving_default"]
self.assertEqual([None, 3], signature.inputs[0].shape.as_list())
self.assertEqual([None, 2], signature.inputs[1].shape.as_list())
self.assertEqual([None, 5], signature.outputs[0].shape.as_list())
def test_variables_destroyed(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
v1 = variables.Variable(1.0)
weak_v1 = weakref.ref(v1)
root = checkpoint.Checkpoint(v=v1)
root = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
del v1
self.assertIsNone(weak_v1())
weak_v2 = weakref.ref(root.v)
del root
self.assertIsNone(weak_v2())
def test_variable_attributes_preserved(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
v = variables.Variable(
1.0,
trainable=False,
synchronization=variables.VariableSynchronization.NONE,
aggregation=variables.VariableAggregation.ONLY_FIRST_REPLICA,
)
self.assertEqual(variables.VariableSynchronization.NONE, v.synchronization)
self.assertEqual(
variables.VariableAggregation.ONLY_FIRST_REPLICA, v.aggregation
)
root = autotrackable.AutoTrackable()
root.v = v
root = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(False, root.v.trainable)
self.assertEqual(
variables.VariableSynchronization.NONE, root.v.synchronization
)
self.assertEqual(
variables.VariableAggregation.ONLY_FIRST_REPLICA, root.v.aggregation
)
def test_captured_dataset(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
class HasDataset(module.Module):
def __init__(self):
super(HasDataset, self).__init__()
self.dataset = dataset_ops.Dataset.range(5).map(lambda x: x**2)
@def_function.function
def __call__(self, x):
current_sum = array_ops.zeros([], dtype=dtypes.int64)
for element in self.dataset:
current_sum += x * element
return current_sum
root = HasDataset()
self.assertEqual(
3 * (1 + 4 + 9 + 16),
root(constant_op.constant(3, dtype=dtypes.int64)).numpy(),
)
root = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(
3 * (1 + 4 + 9 + 16),
root(constant_op.constant(3, dtype=dtypes.int64)).numpy(),
)
def test_tuple_signature(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = checkpoint.Checkpoint()
root.f = def_function.function(
lambda: (array_ops.ones([]), array_ops.zeros([])), input_signature=()
)
root = cycle(
root, cycles, signatures=root.f, use_cpp_bindings=use_cpp_bindings
)
self.assertEqual(
({"output_0": 1.0, "output_1": 0.0}),
self.evaluate(root.signatures["serving_default"]()),
)
def test_version_info(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = checkpoint.Checkpoint()
root = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(versions.__version__, root.tensorflow_version)
self.assertEqual(versions.__git_version__, root.tensorflow_git_version)
def test_load_grad_save(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = checkpoint.Checkpoint()
root.v = variables.Variable(2.0)
root.f = def_function.function(lambda x: root.v * x)
root.g = def_function.function(root.f)
for _ in range(cycles):
with backprop.GradientTape() as tape:
inp = constant_op.constant(2.0)
tape.watch(inp)
output = root.g(inp)
self.assertAllClose(4.0, output)
self.assertAllClose(2.0, tape.gradient(output, inp))
root = cycle(root, 1, use_cpp_bindings=use_cpp_bindings)
def test_destroy_resource(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
def get_handle():
return resource_variable_ops.var_handle_op(
shape=tensor_shape.as_shape([]),
dtype=dtypes.float32,
shared_name="my_var_name",
name="my_var",
container="my_container",
)
class MyResource(resource.TrackableResource):
def _create_resource(self):
return get_handle()
def _initialize(self):
resource_variable_ops.assign_variable_op(
self.resource_handle, 1.0, name="assign"
)
def _destroy_resource(self):
handle = get_handle()
resource_variable_ops.destroy_resource_op(
handle, ignore_lookup_error=True
)
class MyModel(autotrackable.AutoTrackable):
def __init__(self):
super(MyModel, self).__init__()
self.resource = MyResource()
@def_function.function(input_signature=[])
def increase(self):
handle = self.resource.resource_handle
resource_variable_ops.assign_add_variable_op(
handle, 10.0, name="assign_add"
)
return resource_variable_ops.read_variable_op(handle, dtypes.float32)
root = MyModel()
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(11, imported.increase().numpy()) # Create the resource.
handle = imported.resource.resource_handle
# Delete the imported SaveModel. Since we explicitly set the deleter, it
# should destroy the resource automatically.
del imported
# Try to destroy the resource again, should fail.
with self.assertRaisesRegex(
errors.NotFoundError, r"Resource .* does not exist."
):
resource_variable_ops.destroy_resource_op(
handle, ignore_lookup_error=False
)
def test_function_called_as_operation(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@framework_function.Defun(dtypes.float32)
def inner(x):
return x + 1.0
@def_function.function(
input_signature=[tensor_spec.TensorSpec([], dtypes.float32)]
)
def outer(x):
return inner(x)
root = module.Module()
root.f = outer
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertAllClose(2.0, imported.f(constant_op.constant(1.0)))
def test_ragged(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@def_function.function
def f(x, c=1):
"""Returns Tensor x incremented by Python constant c."""
return math_ops.add(x, c)
for c in (1, 2, 3):
_ = f.get_concrete_function(
ragged_tensor.RaggedTensorSpec([None, None], dtype=dtypes.int32), c
)
obj = autotrackable.AutoTrackable()
obj.f = f
imported1 = cycle(
obj, cycles, signatures={}, use_cpp_bindings=use_cpp_bindings
)
rt = ragged_factory_ops.constant([[1, 2], [3]])
self.assertAllEqual(imported1.f(rt), [[2, 3], [4]])
self.assertAllEqual(imported1.f(rt, 2), [[3, 4], [5]])
self.assertAllEqual(imported1.f(rt, 3), [[4, 5], [6]])
imported2 = cycle(obj, cycles, use_cpp_bindings=use_cpp_bindings)
rt = ragged_factory_ops.constant([[1, 2], [3]])
self.assertAllEqual(imported2.f(rt, 1), [[2, 3], [4]])
self.assertAllEqual(imported2.f(rt, 2), [[3, 4], [5]])
self.assertAllEqual(imported2.f(rt, 3), [[4, 5], [6]])
def test_accepts_io_device(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
options = load_options.LoadOptions()
self.assertIsNone(options.experimental_io_device)
options = load_options.LoadOptions(experimental_io_device="/job:localhost")
self.assertEqual("/job:localhost", options.experimental_io_device)
def _custom_saveable_object(self, cycles, use_cpp_bindings):
if context.is_tfrt_enabled():
self.skipTest("Disable due to b/190539415.")
root = autotrackable.AutoTrackable()
root.table = lookup_ops.MutableHashTable(dtypes.string, dtypes.float32, -1)
root.table.insert("foo", 15)
root.table2 = lookup_ops.MutableHashTable(dtypes.string, dtypes.float32, -1)
root.table2.insert("idk", 21)
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.string)]
)
def lookup(key):
return root.table.lookup(key)
root.lookup = lookup
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(self.evaluate(imported.lookup("foo")), 15)
self.assertEqual(self.evaluate(imported.lookup("idk")), -1)
if not saveable_compat.force_checkpoint_conversion_enabled():
self.assertEqual(
{"table"}, imported.table._self_saveable_object_factories.keys()
)
def test_load_custom_saveable_object(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
self._custom_saveable_object(cycles, use_cpp_bindings=use_cpp_bindings)
def test_load_custom_saveable_object_ckpt_conversion(
self, cycles, use_cpp_bindings
):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
# Tests custom saveable object with checkpoint conversion enabled (forces
# Trackable-based checkpoint implementation).
saveable_compat.force_checkpoint_conversion()
self._custom_saveable_object(cycles, use_cpp_bindings=use_cpp_bindings)
def test_load_resource_with_dependency(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
# Test with StaticHashTable, which has a _initializer attribute that tracks
# the Asset vocab table.
class MyLookupModel(autotrackable.AutoTrackable):
def __init__(self, vocab_file):
vocab_initializer = lookup_ops.TextFileInitializer(
vocab_file,
key_dtype=dtypes.string,
key_index=lookup_ops.TextFileIndex.WHOLE_LINE,
value_dtype=dtypes.int64,
value_index=lookup_ops.TextFileIndex.LINE_NUMBER,
)
self._vocab_table = lookup_ops.StaticHashTable(
vocab_initializer, default_value=-1
)
@def_function.function(
input_signature=[tensor_spec.TensorSpec((None,), dtypes.string)]
)
def __call__(self, inputs):
return self._vocab_table.lookup(inputs)
vocab_file = self._make_asset("\n".join(["a", "b", "c", "d"]))
root = MyLookupModel(vocab_file)
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
file_io.delete_file(vocab_file)
self.assertAllEqual(imported(constant_op.constant(["d", "b"])), [3, 1])
def test_custom_gradients(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@custom_gradient.custom_gradient
def log1pexp(x):
e = math_ops.exp(x)
def grad(dy):
return dy * e # incorrect to check the custom gradients is respected.
return math_ops.log(1 + e), grad
@def_function.function
def g(x):
y = log1pexp(x)
@def_function.function
def g_nest():
return log1pexp(y)
return g_nest()
@def_function.function
def f(x):
return log1pexp(g(x * x))
v = variables.Variable(1.)
with backprop.GradientTape() as tape2:
with backprop.GradientTape() as tape:
tape.watch(v)
y = f(v)
expected_grads = tape.gradient(y, v)
expected_grad_grads = tape2.gradient(expected_grads, v)
root = autotrackable.AutoTrackable()
root.f = f
loaded = cycle(
root,
cycles,
save_option=save_options.SaveOptions(
experimental_custom_gradients=True
),
use_cpp_bindings=use_cpp_bindings,
)
with backprop.GradientTape() as tape2:
with backprop.GradientTape() as tape:
tape.watch(v)
y = loaded.f(v)
grads = tape.gradient(y, v)
grad_grads = tape2.gradient(grads, v)
self.assertAllClose(grads, expected_grads)
self.assertAllClose(grad_grads, expected_grad_grads)
def test_custom_gradients_with_none_grad(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
# https://github.com/google/jax/issues/7123
@custom_gradient.custom_gradient
def f(params, state):
def grad_fn(*args):
return args
return (params, state), grad_fn
@def_function.function(
input_signature=[
tensor_spec.TensorSpec([], dtypes.float32),
tensor_spec.TensorSpec([], dtypes.int32),
]
)
def predict(params, state):
return f(params, state)
params = variables.Variable(1.0)
# None grads only appear when state is an int.
state = constant_op.constant(3, dtype=dtypes.int32)
with backprop.GradientTape() as tape:
tape.watch(params)
y = predict(params, state)
expected_grads = tape.gradient(y, params)
root = autotrackable.AutoTrackable()
root.fn = predict
loaded = cycle(
root,
cycles,
save_option=save_options.SaveOptions(
experimental_custom_gradients=True
),
use_cpp_bindings=use_cpp_bindings,
)
with backprop.GradientTape() as tape:
tape.watch(params)
y = loaded.fn(params, state)
grads = tape.gradient(y, params)
self.assertAllClose(grads, expected_grads)
def test_custom_gradients_with_none_grad_and_partial_shape(
self, cycles, use_cpp_bindings
):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
# https://github.com/google/jax/issues/7123
@custom_gradient.custom_gradient
def f(params, state):
def grad_fn(*args):
return args
return (params, state), grad_fn
@def_function.function(
input_signature=[
tensor_spec.TensorSpec(None, dtypes.float32),
tensor_spec.TensorSpec(None, dtypes.int32),
]
)
def predict(params, state):
return f(params, state)
params = variables.Variable(1.0)
# None grads only appear when state is an int.
state = constant_op.constant(3, dtype=dtypes.int32)
with backprop.GradientTape() as tape:
tape.watch(params)
y = predict(params, state)
expected_grads = tape.gradient(y, params)
root = autotrackable.AutoTrackable()
root.fn = predict
loaded = cycle(
root,
cycles,
save_option=save_options.SaveOptions(
experimental_custom_gradients=True
),
use_cpp_bindings=use_cpp_bindings,
)
with backprop.GradientTape() as tape:
tape.watch(params)
y = loaded.fn(params, state)
grads = tape.gradient(y, params)
self.assertAllClose(grads, expected_grads)
def test_signature_propagates_experimental_attr(
self, cycles, use_cpp_bindings
):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = autotrackable.AutoTrackable()
experimental_attributes = {"disable_summaries_at_runtime": ["x", True]}
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)],
experimental_attributes=experimental_attributes,
)
def f(x):
return x * 2.0
root.f = f
self.assertEqual(root.f(constant_op.constant(1.0)).numpy(), 2.0)
loaded = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(loaded.f(constant_op.constant(1.0)).numpy(), 2.0)
self.assertProtoEquals(
r"""
list {
s: 'x',
b: True
}
""",
loaded.signatures["serving_default"].function_def.attr[
"disable_summaries_at_runtime"
],
)
@parameterized.named_parameters(*_test_params())
class SingleCycleTests(test.TestCase, parameterized.TestCase):
def test_load_with_tags(self, use_cpp_bindings):
if use_cpp_bindings:
self.skipTest("Cpp bindings do not support Tags.")
root = autotrackable.AutoTrackable()
path = tempfile.mkdtemp(prefix=self.get_temp_dir())
save.save(root, path)
with self.assertRaises(ValueError):
load.load(path, tags=[tag_constants.EVAL])
load.load(path, tags=[tag_constants.SERVING])
load.load(path, tags=tag_constants.SERVING)
load.load(path, tags=set([tag_constants.SERVING]))
def test_save_load_contains_with_fspath(self, use_cpp_bindings):
if use_cpp_bindings:
self.skipTest("Cpp bindings cannot work with pathlib object.")
root = autotrackable.AutoTrackable()
path = pathlib.Path(tempfile.mkdtemp(prefix=self.get_temp_dir()))
save.save(root, path)
self.assertTrue(loader_impl.contains_saved_model(path))
test_load(path, use_cpp_bindings=use_cpp_bindings)
def test_single_restore_op_used(self, use_cpp_bindings):
# TODO(b/264869753) Fix SingleCycleTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = module.Module()
root.v1 = variables.Variable(1.0)
root.v2 = variables.Variable(2.0)
root.v3 = variables.Variable(3.0)
path = tempfile.mkdtemp(prefix=self.get_temp_dir())
save.save(root, path)
restore_count = 0
def _count_restores(op_type, *unused_args, **unused_kwargs):
nonlocal restore_count
if op_type == b"RestoreV2":
restore_count += 1
op_callbacks.add_op_callback(_count_restores)
save.save(root, path)
test_load(path, use_cpp_bindings=use_cpp_bindings)
op_callbacks.remove_op_callback(_count_restores)
self.assertEqual(1, restore_count)
def test_docstring_examples(self, use_cpp_bindings):
# TODO(b/264869753) Fix SingleCycleTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
path = tempfile.mkdtemp(prefix=self.get_temp_dir())
exported = checkpoint.Checkpoint(v=variables.Variable(3.0))
exported.f = def_function.function(
lambda x: exported.v * x,
input_signature=[
tensor_spec.TensorSpec(shape=None, dtype=dtypes.float32)
],
)
save.save(exported, path)
imported = test_load(path)
self.assertEqual(3.0, imported.v.numpy())
self.assertEqual(6.0, imported.f(x=constant_op.constant(2.0)).numpy())
save.save(exported, path, exported.f.get_concrete_function())
imported = test_load(path, use_cpp_bindings=use_cpp_bindings)
f = imported.signatures["serving_default"]
self.assertAllEqual(
[[-3.0]], f(x=constant_op.constant([[-1.0]]))["output_0"].numpy()
)
def test_object_with_extra_dependencies(self, use_cpp_bindings):
# TODO(b/264869753) Fix SingleCycleTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
class Extra(autotrackable.AutoTrackable):
def _trackable_children(self, save_type, **kwargs):
children = super(Extra, self)._trackable_children(save_type, **kwargs)
children["a"] = variables.Variable(5.0)
return children
root = Extra()
path = tempfile.mkdtemp(prefix=self.get_temp_dir())
save.save(root, path)
imported = test_load(path)
self.assertEqual(5, self.evaluate(imported.a))
def test_save_cached_variable(self, use_cpp_bindings):
# TODO(b/264869753) Fix SingleCycleTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
with ops.Graph().as_default(), session_lib.Session() as session:
obj = autotrackable.AutoTrackable()
obj.v = variables.Variable(2.0, caching_device=lambda op: op.device)
obj.w = variables.Variable(3.0)
session.run([obj.v.initializer, obj.w.initializer])
@def_function.function
def total():
return obj.v + obj.w
@def_function.function(input_signature=[tensor_spec.TensorSpec([])])
def wrapped_total(x):
return total() + x
@def_function.function
def increment_v(x):
obj.v.assign_add(x)
return x
session.run(increment_v(constant_op.constant(3.0))) # generate signatures
self.assertAllClose(8, total())
self.assertAllClose(13, wrapped_total(constant_op.constant(5.0)))
obj.total = total
obj.wrapped_total = wrapped_total.get_concrete_function()
obj.increment_v = increment_v
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(obj, save_dir, signatures=total.get_concrete_function())
imported = test_load(save_dir)
session.run(variables.global_variables_initializer())
self.assertAllClose(8, imported.total())
session.run(imported.increment_v(4))
self.assertAllClose(12, imported.total())
self.assertAllClose(15, imported.wrapped_total(constant_op.constant(3.0)))
self.assertAllClose(
{"output_0": 12}, imported.signatures["serving_default"]()
)
# Try loading and running the function in eager mode
imported = test_load(save_dir)
self.assertAllClose(8, imported.total())
imported.increment_v(5)
self.assertAllClose(13, imported.total())
self.assertAllClose(13.5, imported.wrapped_total(constant_op.constant(0.5)))
self.assertAllClose(
{"output_0": 13}, imported.signatures["serving_default"]()
)
# TODO(allenl, kkb): Use the new memory checker here once it's fast enough (3
# iterations took hundreds of seconds). It would be really nice to check
# allocations at a lower level.
@test_util.assert_no_new_pyobjects_executing_eagerly()
def test_functions_cleaned(self, use_cpp_bindings):
# TODO(b/264869753) Fix SingleCycleTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
if sys.version_info.major < 3:
self.skipTest("Not working in Python 2")
root = module.Module()
root.v = variables.Variable(1.0)
root.f = def_function.function(
lambda x: x + root.v,
input_signature=[
tensor_spec.TensorSpec(shape=[], dtype=dtypes.float32)
],
)
cycle(root, 1, use_cpp_bindings=use_cpp_bindings)
def test_load_partial_object(self, use_cpp_bindings):
# TODO(b/264869753) Fix SingleCycleTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = module.Module()
root.variables_holder = module.Module()
root.variables_holder.v = variables.Variable(1.0)
class Adder(module.Module):
@def_function.function(input_signature=[tensor_spec.TensorSpec(shape=[])])
def __call__(self, y):
root.variables_holder.v.assign_add(y)
return 1
root.adder = Adder()
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(root, save_dir)
imported = load.load_partial(
save_dir, ["root.variables_holder.v", "root.adder"]
)
v = imported["root.variables_holder.v"]
adder = imported["root.adder"]
self.assertEqual(self.evaluate(v), 1)
adder(5)
self.assertEqual(self.evaluate(v), 6)
with self.assertRaisesRegex(
ValueError, "does not include all required objects for loading"
):
imported = load.load_partial(save_dir, ["root.adder"])
def test_load_partial_checkpoint(self, use_cpp_bindings):
# TODO(b/264869753) Fix SingleCycleTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = module.Module()
root.variables_holder = module.Module()
root.variables_holder.v = variables.Variable(1.0)
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(root, save_dir)
loaded = module.Module()
loaded.v = variables.Variable(2.0)
load.load_partial(
save_dir,
{"root": loaded},
options=load_options.LoadOptions(allow_partial_checkpoint=True),
)
self.assertEqual(loaded.variables_holder.v.numpy(), 1)
with self.assertRaisesRegex(AssertionError, "were not bound"):
load.load_partial(save_dir, {"root": loaded})
def test_call_untraced_function_raises_error(self, use_cpp_bindings):
# TODO(b/264869753) Fix SingleCycleTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
class ObjWithFunction(module.Module):
@def_function.function
def foo(self, a):
return a
root = ObjWithFunction()
with self.assertLogs(level="INFO") as logs:
loaded = cycle(root, 1, use_cpp_bindings=use_cpp_bindings)
expected_save_message = (
"INFO:absl:Found untraced functions such as foo while saving "
"(showing 1 of 1). These functions will not be directly callable after "
"loading."
)
self.assertIn(expected_save_message, logs.output)
with self.assertRaisesRegex(
ValueError, "Found zero restored functions for caller function."
):
loaded.foo(1)
def test_restored_function_execute_eagerly(self, use_cpp_bindings):
# TODO(b/264869753) Fix SingleCycleTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
try:
def_function.run_functions_eagerly(True)
class MyModel(module.Module):
@def_function.function
def __call__(self, inputs, training=False):
return math_ops.multiply(0.5, inputs)
model = MyModel()
model.__call__.get_concrete_function(
tensor_spec.TensorSpec([None], dtypes.float32)
)
loaded = cycle(model, 1, use_cpp_bindings=use_cpp_bindings)
# Calling the function should not throw an exception.
loaded(constant_op.constant([1.0]))
finally:
def_function.run_functions_eagerly(False)
def test_restored_model_concrete_function_is_deterministic(
self, use_cpp_bindings
):
# TODO(b/264869753) Fix SingleCycleTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
previous_concrete_function = None
for _ in range(100):
class MyModel(module.Module):
@def_function.function
def __call__(self, x):
return x * constant_op.constant(3.0)
model = MyModel()
model(array_ops.ones((7, 3), dtype=dtypes.float32))
model.__call__.get_concrete_function(
tensor_spec.TensorSpec([None, 3], dtypes.float32)
)
loaded = cycle(model, 1, use_cpp_bindings=use_cpp_bindings)
# Ensure the newly loaded concrete function is the same as the previous
# after a cycle of serialization / deserialization.
new_concrete_function = loaded.__call__.get_concrete_function(
tensor_spec.TensorSpec([None, 3], dtypes.float32)
)
if previous_concrete_function is not None:
self.assertEqual(
previous_concrete_function.pretty_printed_signature(),
new_concrete_function.pretty_printed_signature(),
)
previous_concrete_function = new_concrete_function
def test_garbage_collection_capturable_resource_doesnt_raise_exception(
self, use_cpp_bindings
):
# TODO(b/264869753) Fix SingleCycleTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
model = module.Module()
model.mapping = lookup_ops.StaticHashTable(
lookup_ops.KeyValueTensorInitializer(
keys=math_ops.range(1, dtype=dtypes.int32), values=["foo"]
),
"default_value",
)
loaded = cycle(model, 1, use_cpp_bindings=use_cpp_bindings)
del model
del loaded
# Exceptions raised during garbage collection are simply printed to stderr
# and ignored, and we have no way to access them. We'll capture stdout
# during the garbage collection process and inspect to see if any
# exceptions were raised.
stderr = io.StringIO()
with contextlib.redirect_stderr(stderr):
gc.collect()
if "Exception ignored in" in stderr.getvalue():
raise Exception(stderr.getvalue())
def test_captured_dataset_with_asset(self, use_cpp_bindings):
# TODO(b/264869753) Fix SingleCycleTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
class HasDataset(module.Module):
def __init__(self, temp_dir, file_name):
super(HasDataset, self).__init__()
file = os.path.join(temp_dir, file_name)
with tf_record.TFRecordWriter(file, "GZIP") as f:
for v in ["a", "aa", "aaa"]:
f.write(str(v))
self.dataset = readers.TFRecordDataset([file], compression_type="GZIP")
@def_function.function
def __call__(self, x):
current_sum = array_ops.zeros([], dtype=dtypes.int32)
for element in self.dataset:
current_sum += x * string_ops.string_length(element)
return current_sum
temp_dir = self.get_temp_dir()
file_name = "tf_record_asset.tfrecord.gz"
root = HasDataset(temp_dir, file_name)
self.assertEqual(
18, # 3 * (1 + 2 + 3)
root(constant_op.constant(3, dtype=dtypes.int32)).numpy(),
)
save_dir = os.path.join(self.get_temp_dir(), "save_dir")
save.save(root, save_dir)
file_io.delete_file(os.path.join(temp_dir, file_name))
asset_path = os.path.join(save_dir, "assets/{}".format(file_name))
self.assertTrue(file_io.file_exists(asset_path))
load_dir = os.path.join(self.get_temp_dir(), "load_dir")
file_io.rename(save_dir, load_dir)
loaded = test_load(load_dir, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(
18, # 3 * (1 + 2 + 3)
loaded(constant_op.constant(3, dtype=dtypes.int32)).numpy(),
)
def test_function_aliases(self, use_cpp_bindings):
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = autotrackable.AutoTrackable()
root.f = def_function.function(
lambda x: 2 * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)],
)
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
options = save_options.SaveOptions(function_aliases={
"my_func": root.f,
})
save.save(root, save_dir, root.f, options=options)
loaded = test_load(
save_dir,
use_cpp_bindings=use_cpp_bindings,
options=load_options.LoadOptions(
experimental_load_function_aliases=True
),
)
self.assertLen(loaded.function_aliases, 1)
self.assertIn("my_func", loaded.function_aliases)
self.assertEqual(loaded.function_aliases["my_func"](1.0).numpy(), 2.0)
def test_function_aliases_with_non_saved_function(self, use_cpp_bindings):
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
# `f` below will be aliased but not saved because is not tracked
f = def_function.function(lambda x: 2 * x)
root = autotrackable.AutoTrackable()
root.g = def_function.function(lambda x: 2 * f(x))
# Create two traces
root.g(constant_op.constant(1))
root.g(constant_op.constant(1.0, dtype=dtypes.float32))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
options = save_options.SaveOptions(
function_aliases={
"my_func": f,
}
)
save.save(root, save_dir, options=options)
loaded = test_load(
save_dir,
use_cpp_bindings=use_cpp_bindings,
options=load_options.LoadOptions(
experimental_load_function_aliases=True
),
)
self.assertLen(loaded.function_aliases, 1)
self.assertIn("my_func", loaded.function_aliases)
self.assertLen(loaded.function_aliases["my_func"], 2)
self.assertIsInstance(
loaded.function_aliases["my_func"][0], types_core.ConcreteFunction
)
self.assertIsInstance(
loaded.function_aliases["my_func"][1], types_core.ConcreteFunction
)
@unittest.skip("skip until unexpected retracing is fixed/handled b/280121368")
def test_function_aliases_with_concrete_function(self, use_cpp_bindings):
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
# `f` below will be aliased but not saved because is not tracked
f = def_function.function(lambda x: 2 * x)
root = autotrackable.AutoTrackable()
root.g = def_function.function(lambda x: 2 * f(x))
# Create two traces
root.g(constant_op.constant(1))
root.g(constant_op.constant(1.0, dtype=dtypes.float32))
self.assertLen(f._list_all_concrete_functions(), 2)
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
options = save_options.SaveOptions(
function_aliases={
"my_func": f.get_concrete_function(
tensor_spec.TensorSpec([], dtypes.float32)
),
}
)
self.assertLen(f._list_all_concrete_functions(), 2)
save.save(root, save_dir, options=options)
loaded = test_load(
save_dir,
use_cpp_bindings=use_cpp_bindings,
options=load_options.LoadOptions(
experimental_load_function_aliases=True
),
)
self.assertLen(loaded.function_aliases, 1)
self.assertIn("my_func", loaded.function_aliases)
self.assertLen(loaded.function_aliases["my_func"], 1)
self.assertIsInstance(
loaded.function_aliases["my_func"][0], types_core.ConcreteFunction
)
@unittest.skip("skip until unexpected retracing is fixed/handled b/280121368")
def test_function_aliases_with_concrete_functions(self, use_cpp_bindings):
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
# `f` below will be aliased but not saved because is not tracked
f = def_function.function(lambda x: 2 * x)
root = autotrackable.AutoTrackable()
root.g = def_function.function(lambda x: 2 * f(x))
# Create 3 traces for g, which will in turn create 3 traces for f.
root.g(x=constant_op.constant(1))
root.g(x=constant_op.constant(1.0, dtype=dtypes.float32))
root.g(x=constant_op.constant(1.0, dtype=dtypes.float16))
self.assertLen(f._list_all_concrete_functions(), 3)
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
options = save_options.SaveOptions(
function_aliases={
# Alias 2 out of 3 traces of f
"my_func": [
f.get_concrete_function(
x=tensor_spec.TensorSpec([], dtypes.int32)
),
f.get_concrete_function(
x=tensor_spec.TensorSpec([], dtypes.float32)
),
],
}
)
self.assertLen(f._list_all_concrete_functions(), 3)
save.save(root, save_dir, options=options)
loaded = test_load(
save_dir,
use_cpp_bindings=use_cpp_bindings,
options=load_options.LoadOptions(
experimental_load_function_aliases=True
),
)
self.assertLen(loaded.function_aliases, 1)
self.assertIn("my_func", loaded.function_aliases)
self.assertLen(loaded.function_aliases["my_func"], 2)
self.assertIsInstance(
loaded.function_aliases["my_func"][0], types_core.ConcreteFunction
)
self.assertIsInstance(
loaded.function_aliases["my_func"][1], types_core.ConcreteFunction
)
def test_function_aliases_name_collision(self, use_cpp_bindings):
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = autotrackable.AutoTrackable()
root.f = def_function.function(
lambda x: 2. * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
root.function_aliases = variables.Variable(1.0)
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
options = save_options.SaveOptions(function_aliases={
"my_func": root.f,
})
save.save(root, save_dir, root.f, options=options)
with self.assertRaisesRegex(
ValueError, "Could not load with experimental_load_function_aliases"
):
test_load(
save_dir,
use_cpp_bindings=use_cpp_bindings,
options=load_options.LoadOptions(
experimental_load_function_aliases=True
),
)
# TODO(b/264882754) Support Cpp bindings DeferredInitModuleVariablesTest
class DeferredInitModuleVariablesTest(test.TestCase, parameterized.TestCase):
def test_deferred_init_module_variables(self):
"""Defer initialization of variables in a module to the load stage."""
class MyModule(module.Module):
def __init__(self, size):
super().__init__()
self.size = size
# variable initialized by a Tensor-compatible value
self.w1 = variables.Variable(
constant_op.constant(1., shape=[self.size]), trainable=False)
# variable initialized by a function
self.w2 = variables.Variable(
lambda: constant_op.constant(2., shape=[self.size]))
# variable instantiated lazily in call()
self.w3 = None
def call(self):
if self.w3 is None:
self.w3 = variables.Variable(
constant_op.constant(3., shape=[self.size]))
for w in (self.w1, self.w2, self.w3):
w.assign_add(constant_op.constant(1., shape=[self.size]))
return self.w1, self.w2, self.w3
def export_initializer(initial_value, export_dir):
class Initializer(module.Module):
@def_function.function(input_signature=[])
def call(self):
if callable(initial_value):
return initial_value()
return initial_value
save.save(Initializer(), export_dir)
def create_and_save_module(weight_size):
initial_values = {} # For storing initial_value of created variables
def variable_creator(next_creator, **kwargs):
variable = next_creator(**kwargs)
variable_name = variable.name
if ":" in variable_name:
variable_name = variable_name[:variable_name.index(":")]
initial_values[variable_name] = kwargs["initial_value"]
return variable
export_dir = self.create_tempdir().full_path
with ops.Graph().as_default():
with variable_scope.variable_creator_scope(variable_creator):
exported = MyModule(weight_size)
exported.call = def_function.function(input_signature=[])(
exported.call)
module_dir = f"{export_dir}/module"
file_io.recursive_create_dir(module_dir)
save.save_and_return_nodes(
exported, module_dir, experimental_skip_checkpoint=True)
# Save the initializer of the created variables.
for variable_name, initial_value in initial_values.items():
export_initializer(initial_value,
f"{export_dir}/variables/{variable_name}")
return export_dir
def load_and_run_module(export_dir, weight_size):
# pylint: disable=unused-argument
def layer_variable_creator(next_creator, **kwargs):
variable_dir = f"{export_dir}/variables/{kwargs['name']}"
initializer = load.load(variable_dir)
kwargs["initial_value"] = initializer.call
variable = resource_variable_ops.ResourceVariable(**kwargs)
return variable
with ops.Graph().as_default():
with variable_scope.variable_creator_scope(layer_variable_creator):
imported = load.load(
f"{export_dir}/module",
options=load_options.LoadOptions(
experimental_skip_checkpoint=True))
outputs = imported.call()
with self.cached_session() as sess:
variables.global_variables_initializer().run()
# Check if variables work as expected across multiple iterations.
for i in range(3):
np_outputs = sess.run(outputs)
for j, np_output in enumerate(np_outputs):
self.assertAllClose(np_output, np.full(weight_size, i + j + 2))
# The size of the serialized content (both module and variables) stays
# small even with a large weight_size as the initial values are not stored
# in checkpoints.
weight_size = 1024
export_dir = create_and_save_module(weight_size)
load_and_run_module(export_dir, weight_size)
def _make_asset(self, contents):
fd, filename = tempfile.mkstemp(prefix=self.get_temp_dir())
with os.fdopen(fd, "w") as f:
f.write(contents)
return filename
@parameterized.named_parameters(*_test_params())
def test_assets(self, use_cpp_bindings):
# TODO(b/264882754) Fix DeferredInitModuleVariablesTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
class MyLookupModel(autotrackable.AutoTrackable):
def __init__(self, vocab_file):
vocab_initializer = lookup_ops.TextFileInitializer(
vocab_file,
key_dtype=dtypes.string,
key_index=lookup_ops.TextFileIndex.WHOLE_LINE,
value_dtype=dtypes.int64,
value_index=lookup_ops.TextFileIndex.LINE_NUMBER,
)
self._vocab_table = lookup_ops.StaticHashTable(
vocab_initializer, default_value=-1
)
@def_function.function(
input_signature=[tensor_spec.TensorSpec((None,), dtypes.string)]
)
def __call__(self, inputs):
return self._vocab_table.lookup(inputs)
vocab_file = self._make_asset("\n".join(["a", "b", "c", "d"]))
root = MyLookupModel(vocab_file)
save_dir = os.path.join(self.get_temp_dir(), "save_dir")
save.save_and_return_nodes(
root, save_dir, experimental_skip_checkpoint=True
)
file_io.delete_file(vocab_file)
load_dir = os.path.join(self.get_temp_dir(), "load_dir")
file_io.rename(save_dir, load_dir)
imported = test_load(
load_dir,
options=load_options.LoadOptions(experimental_skip_checkpoint=True),
use_cpp_bindings=use_cpp_bindings,
)
self.assertAllEqual(imported(constant_op.constant(["d", "b"])), [3, 1])
class _TestModel(module.Module):
def __init__(self, rows, cols):
super().__init__()
self.rows = rows
self.cols = cols
self.table = None
def __call__(self, x):
with ops.device("/cpu:0"):
self.table = variables.Variable(
constant_op.constant(1.0, shape=[self.rows, self.cols])
)
x = math_ops.matmul(self.table, x)
x = math_ops.reduce_sum(x, axis=0)
return x
@parameterized.named_parameters(*_test_params())
class SavedModelLoadMemoryTests(test.TestCase, parameterized.TestCase):
@test_util.run_gpu_only
def test_no_oom_loading_large_tenor(self, use_cpp_bindings):
# TODO(b/264882686) Fix DeferredInitModuleVariablesTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
if not config.get_soft_device_placement():
self.skipTest("This test only works for soft device placement is on")
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
ncols = 16
nrows = 32
model = _TestModel(rows=nrows, cols=ncols)
x = array_ops.zeros(shape=(ncols, 2), dtype=dtypes.float32)
y = model(x)
save.save(
model,
save_dir,
options=save_options.SaveOptions(
experimental_variable_policy=save_options.VariablePolicy.SAVE_VARIABLE_DEVICES
),
)
loaded_on_cpu = test_load(
path=save_dir,
options=load_options.LoadOptions(
experimental_variable_policy=save_options.VariablePolicy.SAVE_VARIABLE_DEVICES
),
use_cpp_bindings=use_cpp_bindings,
)
loaded_on_gpu = test_load(save_dir)
self.assertIn("CPU", loaded_on_cpu.table.device)
self.assertIn("GPU", loaded_on_gpu.table.device)
if __name__ == "__main__":
test.main()
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@saved_model@load_test.py@.PATH_END.py
|
{
"filename": "_familysrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattergeo/textfont/_familysrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FamilysrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="familysrc", parent_name="scattergeo.textfont", **kwargs
):
super(FamilysrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scattergeo@textfont@_familysrc.py@.PATH_END.py
|
{
"filename": "CLIexample.py",
"repo_name": "3fon3fonov/exostriker",
"repo_path": "exostriker_extracted/exostriker-main/exostriker/lib/pyqtgraph/examples/CLIexample.py",
"type": "Python"
}
|
"""
Display a plot and an image with minimal setup.
pg.plot() and pg.image() are indended to be used from an interactive prompt
to allow easy data inspection (but note that PySide unfortunately does not
call the Qt event loop while the interactive prompt is running, in this case
it is necessary to call QApplication.exec_() to make the windows appear).
"""
import numpy as np
import pyqtgraph as pg
data = np.random.normal(size=1000)
pg.plot(data, title="Simplest possible plotting example")
data = np.random.normal(size=(500,500))
pg.image(data, title="Simplest possible image example")
if __name__ == '__main__':
pg.exec()
|
3fon3fonovREPO_NAMEexostrikerPATH_START.@exostriker_extracted@exostriker-main@exostriker@lib@pyqtgraph@examples@CLIexample.py@.PATH_END.py
|
{
"filename": "Hamiltonian_second_derivs_playground.py",
"repo_name": "zachetienne/nrpytutorial",
"repo_path": "nrpytutorial_extracted/nrpytutorial-master/in_progress-SEOBNR/SEOBNR/Hamiltonian_second_derivs_playground.py",
"type": "Python"
}
|
# NRPy+ code to generate first derivatives of the SEOBNRv3 Hamiltonian from a list of numerical expressions computing
# said Hamiltonian. Originally written by Zach Etienne; edited and commented by Tyler Knowles.
from outputC import outputC,lhrh,superfast_uniq # NRPy+: Core C code output module
import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends
import sys # Python module for multiplatform OS-related functions
# simplify_deriv() simplifies derivative expressions by removing terms equal to zero.
def simplify_deriv(lhss_deriv, rhss_deriv):
# Create 'simp' arrays to store and manipulate derivative expressions.
lhss_deriv_simp = []
rhss_deriv_simp = []
# Append terms to 'simp' arrays.
for i in range(len(rhss_deriv)):
lhss_deriv_simp.append(lhss_deriv[i])
rhss_deriv_simp.append(rhss_deriv[i])
# For each term equal to zero, loop through all expressions and replace that variable with the number zero.
for i in range(len(rhss_deriv_simp)):
if rhss_deriv_simp[i] == 0:
for j in range(i + 1, len(rhss_deriv_simp)):
for var in rhss_deriv_simp[j].free_symbols:
if str(var) == str(lhss_deriv_simp[i]):
rhss_deriv_simp[j] = rhss_deriv_simp[j].subs(var, 0)
# Create 'zero' array to store terms to be removed from derivative expressions.
zero_elements_to_remove = []
# Loop over all terms and add those equal to zero to 'zero' array.
for i in range(len(rhss_deriv_simp)):
if rhss_deriv_simp[i] == sp.sympify(0):
zero_elements_to_remove.append(i)
count = 0
# Remove from derivative list all elements of 'zero' array.
for i in range(len(zero_elements_to_remove)):
del lhss_deriv_simp[zero_elements_to_remove[i] + count]
del rhss_deriv_simp[zero_elements_to_remove[i] + count]
count -= 1
# Return simplified derivative expressions.
return lhss_deriv_simp, rhss_deriv_simp
# deriv_onevar() replaces variable derivatives with 1 or 0 depending on which partial derivaitve is computed. For
# example, pass 'xprm=1' to replace each instance of 'xprm' with 1 and 'qprm' with 0 for each q in (y,z,p1,p2,p3,S1x,
# S1y,S1z,S2x,S2y,S2z). This produces expressions which compute the partial derivative of the Hamiltonian with respect
# to x.
def deriv_onevar(lhss_deriv, rhss_deriv, xprm=0, yprm=0, zprm=0, p1prm=0, p2prm=0, p3prm=0, S1xprm=0, S1yprm=0,
S1zprm=0, S2xprm=0, S2yprm=0, S2zprm=0):
if xprm + yprm + zprm + p1prm + p2prm + p3prm + S1xprm + S1yprm + S1zprm + S2xprm + S2yprm + S2zprm != 1:
print("deriv_onevar() cannot take more than one derivative at a time!")
sys.exit()
# Create 'new' arrays to store and manipulate derivative terms.
lhss_deriv_new = []
rhss_deriv_new = []
# Append derivative terms to 'new' arrays
for i in range(len(rhss_deriv)):
lhss_deriv_new.append(lhss_deriv[i])
rhss_deriv_new.append(rhss_deriv[i])
# Replace each instance of 'qprm', q in (x,y,z,p1,p2,p3,S1x,S1y,S1z,S2x,S2y,S2z), with either 0 or 1.
for i in range(len(rhss_deriv_new)):
for var in rhss_deriv_new[i].free_symbols:
if str(var) == "xprm":
rhss_deriv_new[i] = rhss_deriv_new[i].subs(var, xprm)
elif str(var) == "yprm":
rhss_deriv_new[i] = rhss_deriv_new[i].subs(var, yprm)
elif str(var) == "zprm":
rhss_deriv_new[i] = rhss_deriv_new[i].subs(var, zprm)
elif str(var) == "p1prm":
rhss_deriv_new[i] = rhss_deriv_new[i].subs(var, p1prm)
elif str(var) == "p2prm":
rhss_deriv_new[i] = rhss_deriv_new[i].subs(var, p2prm)
elif str(var) == "p3prm":
rhss_deriv_new[i] = rhss_deriv_new[i].subs(var, p3prm)
elif str(var) == "S1xprm":
rhss_deriv_new[i] = rhss_deriv_new[i].subs(var, S1xprm)
elif str(var) == "S1yprm":
rhss_deriv_new[i] = rhss_deriv_new[i].subs(var, S1yprm)
elif str(var) == "S1zprm":
rhss_deriv_new[i] = rhss_deriv_new[i].subs(var, S1zprm)
elif str(var) == "S2xprm":
rhss_deriv_new[i] = rhss_deriv_new[i].subs(var, S2xprm)
elif str(var) == "S2yprm":
rhss_deriv_new[i] = rhss_deriv_new[i].subs(var, S2yprm)
elif str(var) == "S2zprm":
rhss_deriv_new[i] = rhss_deriv_new[i].subs(var, S2zprm)
# Simplify the derivative expressions with simplify_deriv().
lhss_deriv_simp, rhss_deriv_simp = simplify_deriv(lhss_deriv_new, rhss_deriv_new)
# Return simplified derivative expression.
return lhss_deriv_simp, rhss_deriv_simp
# replace_numpy_funcs() replaces specific SymPy function names with the corresponding NumPy function names.
def replace_numpy_funcs(expression):
return str(expression).replace("sqrt(", "sp.sqrt(").replace("Abs(", "sp.Abs(").replace("log(",
"sp.log(").replace("sign(", "sp.sign(")
# output_H_and_derivs() is the main wrapper function for computing the SEONBRv3 Hamiltonian H and the twelve first
# partial derivatives of H with respect to x, y, z, p1, p2, p3, S1x, S1y, S1z, S2x, S2y, S2z.
# TylerK: for now, only outputs dHdx, dHdpy, and dHdpz for initial condition root-finding!
def output_H_sec_derivs():
# Open and read the file of numerical expressions (written in SymPy syntax) computing the SEOBNRv3 Hamiltonian.
#f = open("SEOBNR/Hamstring.txt", 'r')
f = open("SEOBNR_Playground_Pycodes/dHdx.txt", 'r')
Hamstring = str(f.read())
f.close()
# Split Hamstring by carriage returns.
Hamterms = Hamstring.splitlines()
# Create 'lr' array to store each left-hand side and right-hand side of Hamstring as strings.
lr = []
# Loop over each line in Hamstring to separate the left- and right-hand sides.
for i in range(len(Hamterms)):
# Ignore lines with 2 or fewer characters and those starting with #
if len(Hamterms[i]) > 2 and Hamterms[i][0] != "#":
# Split each line by its equals sign.
splitHamterms = Hamterms[i].split("=")
# Append terms to the 'lr' array, removing spaces, "sp." prefixes, and replacing Lambda->Lamb (Lambda is a
# protected keyword)
lr.append(lhrh(lhs=splitHamterms[0].replace(" ", "").replace("Lambda", "Lamb").replace("prm", ""),
rhs=splitHamterms[1].replace(" ", "").replace("sp.", "").replace("Lambda", "Lamb").replace("prm", "")))
# Declare the symbol 'xx', which we use to denote each left-hand side as a function
xx = sp.Symbol('xx')
# Create arrays to store simplified left- and right-hand expressions, as well as left-hand sides designated as
# functions.
func = []
lhss = []
rhss = []
# Affix '(xx)' to each left-hand side as a function designation; separate and simplify left- and right-hand sides
# of the numerical expressions.
for i in range(len(lr)):
func.append(sp.sympify(sp.Function(lr[i].lhs)(xx)))
lhss.append(sp.sympify(lr[i].lhs))
rhss.append(sp.sympify(lr[i].rhs))
# Creat array for and generate a list of all the "free symbols" in the right-hand side expressions.
full_symbol_list_with_dups = []
for i in range(len(lr)):
for var in rhss[i].free_symbols:
full_symbol_list_with_dups.append(var)
# Remove all duplicated "free symbols" from the right-hand side expressions.
full_symbol_list = superfast_uniq(full_symbol_list_with_dups)
# Declare input constants.
m1, m2, eta, KK, k0, k1, dSO, dSS = sp.symbols("m1 m2 eta KK k0 k1 dSO dSS", real=True)
tortoise, EMgamma = sp.symbols("tortoise EMgamma", real=True)
input_constants = [m1, m2, eta, KK, k0, k1, dSO, dSS, tortoise, EMgamma]
# Derivatives of input constants will always be zero, so remove them from the full_symbol_list.
for inputconst in input_constants:
for symbol in full_symbol_list:
if str(symbol) == str(inputconst):
full_symbol_list.remove(symbol)
# Add symbols to the function list and replace right-hand side terms with their function equivalent.
full_function_list = []
for symb in full_symbol_list:
func = sp.sympify(sp.Function(str(symb))(xx))
full_function_list.append(func)
for i in range(len(rhss)):
for var in rhss[i].free_symbols:
if str(var) == str(symb):
rhss[i] = rhss[i].subs(var, func)
# Create left- and right-hand side 'deriv' arrays
lhss_deriv = []
rhss_deriv = []
# Differentiate with respect to xx, remove '(xx)', and replace xx with 'prm' notation.
for i in range(len(rhss)):
lhss_deriv.append(sp.sympify(str(lhss[i]) + "prm"))
newrhs = sp.sympify(
str(sp.diff(rhss[i], xx)).replace("(xx)", "").replace(", xx", "prm").replace("Derivative", ""))
rhss_deriv.append(newrhs)
# Simplify derivative expressions with simplify_deriv()
lhss_deriv_simp, rhss_deriv_simp = simplify_deriv(lhss_deriv, rhss_deriv)
lhss_deriv = lhss_deriv_simp
rhss_deriv = rhss_deriv_simp
# Generate partial derivatives with respect to each of the twelve input variables
lhss_deriv_x, rhss_deriv_x = deriv_onevar(lhss_deriv, rhss_deriv, xprm=1, yprm=0, zprm=0, p1prm=0, p2prm=0, p3prm=0,
S1xprm=0, S1yprm=0, S1zprm=0, S2xprm=0, S2yprm=0, S2zprm=0)
#lhss_deriv_y, rhss_deriv_y = deriv_onevar(lhss_deriv, rhss_deriv, xprm=0, yprm=1, zprm=0, p1prm=0, p2prm=0, p3prm=0,
#S1xprm=0, S1yprm=0, S1zprm=0, S2xprm=0, S2yprm=0, S2zprm=0)
#lhss_deriv_z, rhss_deriv_z = deriv_onevar(lhss_deriv, rhss_deriv, xprm=0, yprm=0, zprm=1, p1prm=0, p2prm=0, p3prm=0,
#S1xprm=0, S1yprm=0, S1zprm=0, S2xprm=0, S2yprm=0, S2zprm=0)
#lhss_deriv_p1, rhss_deriv_p1 = deriv_onevar(lhss_deriv, rhss_deriv, xprm=0, yprm=0, zprm=0, p1prm=1, p2prm=0,
#p3prm=0, S1xprm=0, S1yprm=0, S1zprm=0, S2xprm=0, S2yprm=0, S2zprm=0)
#lhss_deriv_p2, rhss_deriv_p2 = deriv_onevar(lhss_deriv, rhss_deriv, xprm=0, yprm=0, zprm=0, p1prm=0, p2prm=1,
#p3prm=0, S1xprm=0, S1yprm=0, S1zprm=0, S2xprm=0, S2yprm=0, S2zprm=0)
#lhss_deriv_p3, rhss_deriv_p3 = deriv_onevar(lhss_deriv, rhss_deriv, xprm=0, yprm=0, zprm=0, p1prm=0, p2prm=0,
#p3prm=1, S1xprm=0, S1yprm=0, S1zprm=0, S2xprm=0, S2yprm=0, S2zprm=0)
#lhss_deriv_S1x, rhss_deriv_S1x = deriv_onevar(lhss_deriv, rhss_deriv, xprm=0, yprm=0, zprm=0, p1prm=0, p2prm=0,
#p3prm=0, S1xprm=1, S1yprm=0, S1zprm=0, S2xprm=0, S2yprm=0, S2zprm=0)
#lhss_deriv_S1y, rhss_deriv_S1y = deriv_onevar(lhss_deriv, rhss_deriv, xprm=0, yprm=0, zprm=0, p1prm=0, p2prm=0,
#p3prm=0, S1xprm=0, S1yprm=1, S1zprm=0, S2xprm=0, S2yprm=0, S2zprm=0)
#lhss_deriv_S1z, rhss_deriv_S1z = deriv_onevar(lhss_deriv, rhss_deriv, xprm=0, yprm=0, zprm=0, p1prm=0, p2prm=0,
#p3prm=0, S1xprm=0, S1yprm=0, S1zprm=1, S2xprm=0, S2yprm=0, S2zprm=0)
#lhss_deriv_S2x, rhss_deriv_S2x = deriv_onevar(lhss_deriv, rhss_deriv, xprm=0, yprm=0, zprm=0, p1prm=0, p2prm=0,
#p3prm=0, S1xprm=0, S1yprm=0, S1zprm=0, S2xprm=1, S2yprm=0, S2zprm=0)
#lhss_deriv_S2y, rhss_deriv_S2y = deriv_onevar(lhss_deriv, rhss_deriv, xprm=0, yprm=0, zprm=0, p1prm=0, p2prm=0,
#p3prm=0, S1xprm=0, S1yprm=0, S1zprm=0, S2xprm=0, S2yprm=1, S2zprm=0)
#lhss_deriv_S2z, rhss_deriv_S2z = deriv_onevar(lhss_deriv, rhss_deriv, xprm=0, yprm=0, zprm=0, p1prm=0, p2prm=0,
#p3prm=0, S1xprm=0, S1yprm=0, S1zprm=0, S2xprm=0, S2yprm=0, S2zprm=1)
# Prepare to output derivative expressions in C syntax
outstring = "/* SEOBNR Hamiltonian expression: */\n"
outstringsp = ""
outsplhs = []
outsprhs = []
for i in range(len(lr)):
outstring += outputC(sp.sympify(lr[i].rhs), lr[i].lhs, "returnstring",
"outCverbose=False,includebraces=False,CSE_enable=False")
outstringsp += lr[i].lhs + " = " + lr[i].rhs + "\n"
outsplhs.append(sp.sympify(lr[i].lhs))
outsprhs.append(sp.sympify(lr[i].rhs))
outstring += "\n\n\n/* SEOBNR \partial_x H expression: */\n"
for i in range(len(lhss_deriv_x)):
outstring += outputC(rhss_deriv_x[i], str(lhss_deriv_x[i]), "returnstring",
"outCverbose=False,includebraces=False,CSE_enable=False")
outstringsp += str(lhss_deriv_x[i]) + " = " + str(rhss_deriv_x[i]) + "\n"
outsplhs.append(lhss_deriv_x[i])
outsprhs.append(rhss_deriv_x[i])
with open("SEOBNR_Playground_Pycodes/d2Hdx2.py", "w") as file:
file.write("""from __future__ import division
import numpy as np
def compute_d2Hdx2(m1, m2, eta, x, y, z, p1, p2, p3, S1x, S1y, S1z, S2x, S2y, S2z, KK, k0, k1, dSO, dSS, tortoise, EMgamma):
""")
for i in range(len(lr) - 1):
file.write(" " + lr[i].lhs + " = " + str(lr[i].rhs).replace("Rational(", "np.true_divide(").replace("sqrt(", "np.sqrt(").replace("log(", "np.log(").replace("sign(", "np.sign(").replace("Abs(", "np.abs(").replace("pi", "np.pi") + "\n")
for i in range(len(lhss_deriv_x)):
file.write(" " + str(lhss_deriv_x[i]).replace("prm", "prm_x") + " = " + replace_numpy_funcs(rhss_deriv_x[i]).replace("prm", "prm_x").replace("sp.sqrt(","np.sqrt(").replace("sp.log(","np.log(").replace("sp.sign(","np.sign(").replace("sp.Abs(", "np.abs(") + "\n")
file.write(" return np.array([Hreal_xprm_x])")
|
zachetienneREPO_NAMEnrpytutorialPATH_START.@nrpytutorial_extracted@nrpytutorial-master@in_progress-SEOBNR@SEOBNR@Hamiltonian_second_derivs_playground.py@.PATH_END.py
|
{
"filename": "_name.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/splom/marker/colorbar/tickformatstop/_name.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class NameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="name",
parent_name="splom.marker.colorbar.tickformatstop",
**kwargs,
):
super(NameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@splom@marker@colorbar@tickformatstop@_name.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "folsomcp/ZDIpy",
"repo_path": "ZDIpy_extracted/ZDIpy-main/core/__init__.py",
"type": "Python"
}
|
"""
Functions used by the main ZDI code
"""
|
folsomcpREPO_NAMEZDIpyPATH_START.@ZDIpy_extracted@ZDIpy-main@core@__init__.py@.PATH_END.py
|
{
"filename": "_legendwidth.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/streamtube/_legendwidth.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LegendwidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="legendwidth", parent_name="streamtube", **kwargs):
super(LegendwidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@streamtube@_legendwidth.py@.PATH_END.py
|
{
"filename": "psf_calculate.py",
"repo_name": "Yash-10/beta-sgp",
"repo_path": "beta-sgp_extracted/beta-sgp-master/psf/psf_calculate.py",
"type": "Python"
}
|
import os
import glob
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
class PSF:
def __init__(self, txt_file):
"""Initialize a `PSF` object.
Parameters
----------
txt_file: str
A file containing PSF attributes obtained from DIAPL.
Notes
-----
For the structure of `txt_file`, see for example `psf/examples/psf_ccfbrd210048.bin.txt`
"""
self.ldeg = 2
self.sdeg = 1 # Unused
with open(txt_file) as f:
data = [float(l.rstrip("\n")) for l in f]
self.hw = int(data[0])
self.ndeg_spat = int(data[1])
self.ndeg_local = int(data[2])
self.ngauss = int(data[3])
self.recenter = data[4]
self.cos = data[5]
self.sin = data[6]
self.ax = data[7]
self.ay = data[8]
self.sigma_inc = data[9]
self.sigma_mscale = data[10]
self.fitrad = data[11]
self.x_orig = data[12]
self.y_orig = data[13]
# The vector coefficients used to build the PSF model.
self.vec_coeffs = data[14:]
self.ntot = self.ngauss * (self.ndeg_local + 1) * (self.ndeg_local + 2) / 2;
self.ntot *= (self.ndeg_spat + 1) * (self.ndeg_spat + 2) / 2;
@property
def coeffs(self):
return self.vec_coeffs
def calc_psf_pix(self, coeffs, x, y):
"""Calculates PSF pixel values.
Parameters
----------
coeffs: list
List of PSF vector coefficients.
x, y: int
x and y are the local coordinates used to describe the PSF, in the range: [-psf.hw/2, +psf.hw/2].
Notes
-----
References:
[1] Pych, W. (2013). Difference Image Analysis Package (DIAPL2).
Specifically, the `psf_core.c` script from the `phot` program was used.
"""
# """Calculates PSF matrix locally, i.e. doesn't account for spatial PSF variation."""
x1 = self.cos * x - self.sin * y
y1 = self.sin * x + self.cos * y
rr = self.ax * x1 * x1 + self.ay * y1 * y1
psf_pix = 0.0
icomp = 0
for igauss in range(self.ngauss):
f = np.exp(rr)
a1 = 1.0
for m in range(self.ldeg+1):
a2 = 1.0
for n in range(self.ldeg-m+1):
psf_pix += float(self.vec_coeffs[icomp])*f*a1*a2
icomp += 1
a2 *= y
a1 *= x
rr *= self.sigma_inc*self.sigma_inc
return psf_pix
def get_psf_mat(self):
"""Get the PSF matrix representation.
Notes
-----
x and y ranges lie from [-15, 15) i.e. 31X31 because we generate 31X31 PSF matrices.
"""
pix_locs = []
psf_mat = np.zeros(961) # eg: 31*31 is the PSF matrix size to show.
for i in range(-15, 15+1):
for j in range(-15, 15+1):
pix_locs.append((i, j))
idx = j + self.hw + 31 * (i + self.hw)
psf_mat[idx] = self.calc_psf_pix(self.vec_coeffs, j, i)
# for i, pix_loc in enumerate(pix_locs):
# psf_mat[i] = self.calc_psf_pix(self.vec_coeffs, *pix_loc)
self.psf_mat = psf_mat.reshape(31, 31)
return self.psf_mat
def show_psf_mat(self):
"""Shows the PSF as an image."""
mat = self.get_psf_mat()
plt.matshow(mat, origin='lower')
plt.colorbar()
plt.show()
def check_symmetric(self, coeffs, rtol=1e-05, atol=1e-08):
"""Check if the matrix `coeffs` is symmetric or not. A helper function.
Parameters
----------
coeffs: 2D array.
"""
return np.allclose(coeffs, coeffs.T, rtol=rtol, atol=atol)
def normalize_psf_mat(self):
"""Normalizes the 2D PSF such that all pixels sum up to 1.
Thic could be helpful if the brightness of the convolved image must not be changed.
A helper function.
"""
mat = self.get_psf_mat()
mat = mat / np.sum(mat)
return mat
def init_psf(self, xpsf, ypsf):
"""Calculates the initial circular fit to the PSF.
xpsf, ypsf: float
The `xfit` and `yfit` point of a PSF object i.e. a single star.
Notes
-----
Currently this cannot be used since we don't yet have candidate PSF object database.
"""
ncomp = self.ngauss * (self.ldeg+1) * (self.ldeg + 2) / 2
local_vec = [0.0] * ncomp
# for icomp in range(ncomp):
# local_vec[icomp] = 0.0
itot = 0
a1 = 1.0
for m in range(self.sdeg+1):
a2 = 1.0
for n in range(self.sdeg-m+1):
for icomp in range(ncomp):
local_vec[icomp] += self.vec_coeffs[itot] * a1 * a2
itot += 1
a2 *= ypsf - self.y_orig
a1 *= xpsf - self.x_orig
if __name__ == "__main__":
### Draw PSF subplots for visualization ###
### PSF bin txt files must be present in the current working directory ###
mats = []
titles = []
for i, file_ in enumerate(glob.glob("psf*.txt")):
if file_.endswith(".txt"):
print(file_)
psf = PSF(file_)
mat = psf.normalize_psf_mat()
fits.writeto(file_.split(".")[0]+"_img.fits", mat, overwrite=True)
mats.append(psf.get_psf_mat())
titles.append(file_.split(".")[0])
# # print(len(mats))
# fig, ax = plt.subplots(3, 3, figsize=(5, 5))
# fig.tight_layout()
# ax[0, 0].imshow(mats[0], cmap="gray")
# ax[0, 0].set_title(titles[0])
# ax[0, 1].imshow(mats[1], cmap="gray")
# ax[0, 1].set_title(titles[1])
# ax[0, 2].imshow(mats[2], cmap="gray")
# ax[0, 2].set_title(titles[2])
# ax[1, 0].imshow(mats[3], cmap="gray")
# ax[1, 0].set_title(titles[3])
# ax[1, 1].imshow(mats[2], cmap="gray")
# ax[1, 1].set_title(titles[4])
# ax[1, 2].imshow(mats[3], cmap="gray")
# ax[1, 2].set_title(titles[5])
# ax[2, 0].imshow(mats[3], cmap="gray")
# ax[2, 0].set_title(titles[6])
# ax[2, 1].imshow(mats[2], cmap="gray")
# ax[2, 1].set_title(titles[7])
# ax[2, 2].imshow(mats[3], cmap="gray")
# ax[2, 2].set_title(titles[8])
# plt.show()
# # Example: The image corresponding to this PSF had the worst FWHM estimate from fwhms.bash (~11)
# plt.imshow(PSF("psfs/psf_ccfbvb230022.bin.txt").get_psf_mat(), cmap="gray")
# plt.show()
|
Yash-10REPO_NAMEbeta-sgpPATH_START.@beta-sgp_extracted@beta-sgp-master@psf@psf_calculate.py@.PATH_END.py
|
{
"filename": "_gridcolor.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/layout/ternary/baxis/_gridcolor.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class GridcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="gridcolor", parent_name="layout.ternary.baxis", **kwargs
):
super(GridcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@layout@ternary@baxis@_gridcolor.py@.PATH_END.py
|
{
"filename": "__main__.py",
"repo_name": "robvanholstein/IRDAP",
"repo_path": "IRDAP_extracted/IRDAP-master/irdap/__main__.py",
"type": "Python"
}
|
'''
This file contains the top-level code of IRDAP that allows the user to execute
it.
IRDAP is a Python package to accurately reduce SPHERE-IRDIS polarimetric data.
Copyright (C) 2019 R.G. van Holstein
Full documentation: https://irdap.readthedocs.io
Feedback, questions, comments: rob.vanholstein@eso.org
When publishing data reduced with IRDAP, please cite van Holstein et al.
(2020): https://ui.adsabs.harvard.edu/abs/2020A%26A...633A..64V/abstract.
For data in pupil-tracking mode please additionally cite van Holstein et al.
(2017): https://ui.adsabs.harvard.edu/abs/2017SPIE10400E..15V.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
# Import packages
import sys
import os
import argparse
import webbrowser
import urllib
from argparse import RawTextHelpFormatter
from .version import __version__
from .irdap import run_demo
from .irdap import create_overview_headers_main
from .irdap import make_config
from .irdap import run_pipeline
from .irdap import mean_combine_images
###############################################################################
# main
###############################################################################
def main(args=None):
'''
Main function to run IRDAP
Input:
args: user input arguments
File written by Rob van Holstein
Function status: verified
'''
if args is None:
# Obtain arguments that user put in
args = sys.argv[1:]
# Check if at least one argument is given
if len(args) == 0:
print('\nNo arguments were provided. Please check the help message by typing\n"irdap --help".')
# Define the arser including the description and epilog
parser = argparse.ArgumentParser(description='IRDAP (IRDIS Data reduction for Accurate Polarimetry) is a pipeline for\n' +
'accurate reduction of SPHERE-IRDIS polarimetric data.\n\n' +
'To run IRDAP, create a directory (e.g. "/home/T_Cha_2016-02-20") containing a\n' +
'subdirectory called "raw" in which you place the raw FITS-files. Then in the\n' +
'terminal navigate to the directory (e.g. "cd /home/T_Cha_2016-02-20") and type\n' +
'"irdap --makeconfig" to create a default configuration file "config.conf" in\n' +
'this directory. You can then adjust the parameters in the configuration file\n' +
'with a text editor. Finally in the terminal type "irdap --run" to perform the\n' +
'data reduction.\n\n' +
'The reduced images of two or more reductions can be mean-combined by typing\n' +
'"irdap --meancombine path1 path2 ... pathx", where the space-separated paths\n' +
'are absolute paths to the main directories of the reductions,\n' +
'e.g. "irdap --meancombine /home/T_Cha_2016-02-20 /home/T_Cha_2016-02-21".\n' +
'The mean-combined images will be written to the current working directory\n' +
'of the terminal.\n\n' +
'If this is the first time you use IRDAP, it is recommended to run the demo\n' +
'first by using the terminal to navigate to a directory of your choice and\n' +
'typing irdap --demo. Note that an internet connection is required as a small\n' +
'amount of raw data needs to be downloaded.\n\n' +
'When publishing data reduced with IRDAP, please cite van Holstein et al.\n' +
'(2020): https://ui.adsabs.harvard.edu/abs/2020A%26A...633A..64V/abstract.\n' +
'For data in pupil-tracking mode please additionally cite van Holstein et al.\n' +
'(2017): https://ui.adsabs.harvard.edu/abs/2017SPIE10400E..15V.',
epilog='Full documentation: https://irdap.readthedocs.io\n' +
'Feedback, questions, comments: rob.vanholstein@eso.org\n\n' +
'IRDAP Copyright (C) 2019 R.G. van Holstein',
formatter_class=RawTextHelpFormatter)
# Add parser arguments
parser.add_argument('-v', '--version', action='store_true',
help='show program\'s version number')
parser.add_argument('-w', '--website', action='store_true',
help='open IRDAP online documentation in web browser')
parser.add_argument('-p', '--print', action='store_true',
help='toggle printing of log statements in the terminal')
parser.add_argument('-d', '--demo', action='store_true',
help='run pipeline in current working directory with example\ndata of the circumstellar disk of T Cha (1 HWP cycle)')
parser.add_argument('-o', '--headers', action='store_true',
help='create overview of relevant headers of FITS-files in raw\nsubdirectory')
parser.add_argument('-c', '--makeconfig', action='store_true',
help='create default configuration file in current working\ndirectory')
parser.add_argument('-r', '--run', action='store_true',
help='run pipeline using configuration file in current working\ndirectory')
parser.add_argument('-m', '--meancombine', nargs='+', type=str, metavar='path',
help='mean-combine images of two or more reductions. The\n' \
'absolute paths to the main directories of the reductions\n' \
'should be supplied as arguments and be separated by\n' \
'spaces.')
# Use current working directory (of terminal) as path of main directory of reduction
path_main_dir = os.getcwd()
# Evaluate and act upon user arguments
args = parser.parse_args()
if args.version:
# Print the current version
print('\nIRDAP version %s' % __version__)
elif args.website:
webbrowser.open_new_tab('https://irdap.readthedocs.io')
if args.print:
# Toggle printing in terminal
path_file = os.path.join(os.path.dirname(__file__), 'print_in_terminal.txt')
f = open(path_file, 'r')
current_value = f.read()
f.close()
if current_value == 'True':
print('\nIRDAP will not print log statements in the terminal.')
f = open(path_file, 'w')
f.write('False')
f.close()
elif current_value == 'False':
print('\nIRDAP will print log statements in the terminal.')
f = open(path_file, 'w')
f.write('True')
f.close()
else:
print('\nThe file ' + path_file + ' should contain either the word \'True\' or \'False\'.')
elif args.demo:
# Run example reduction
run_demo(path_main_dir)
elif args.headers:
# Create an overview of relevant headers
create_overview_headers_main(path_main_dir)
elif args.makeconfig:
# Create a default configuration file
make_config(path_main_dir)
elif args.run:
# Run the pipeline
run_pipeline(path_main_dir)
elif args.meancombine:
# Mean-combine the images of two or more reductions
path_read_dirs = args.meancombine
if len(path_read_dirs) == 1:
print('\nPlease provide at least two absolute paths to directories containing reduced data to be combined.')
else:
mean_combine_images(path_main_dir, path_read_dirs)
# Check if latest version of IRDAP is used and if not suggest updating it
url_github_version = 'https://raw.githubusercontent.com/robvanholstein/IRDAP/master/irdap/version.py'
try:
version_string = str(urllib.request.urlopen(url_github_version).readlines()[0], 'utf-8')
version_github = version_string[version_string.rfind('=') + 1:].replace(' ', '').replace('\'', '')
except:
version_github = ''
if version_github != '':
if __version__ != version_github:
print('\n\n\n\n\n\n\n\nA newer version of IRDAP is available (v' + __version__ + ' --> v' + version_github +
'). Please consider\n' +
'updating IRDAP by typing "pip install irdap --upgrade" in the terminal.')
###############################################################################
# Run the function main
###############################################################################
# Run function when called, i.e. in the terminal one can just write "irdap --run" i.o. "python -m irdap --run"
if __name__ == "__main__":
main()
|
robvanholsteinREPO_NAMEIRDAPPATH_START.@IRDAP_extracted@IRDAP-master@irdap@__main__.py@.PATH_END.py
|
{
"filename": "test_perf.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/catboost/libs/data/benchmarks_ut/test_perf.py",
"type": "Python"
}
|
import yatest
def test(metrics):
metrics.set_benchmark(yatest.common.execute_benchmark("catboost/libs/data/benchmarks/benchmarks"))
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@catboost@libs@data@benchmarks_ut@test_perf.py@.PATH_END.py
|
{
"filename": "_ticktextsrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scattergl/marker/colorbar/_ticktextsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TicktextsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name="ticktextsrc",
parent_name="scattergl.marker.colorbar",
**kwargs,
):
super(TicktextsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scattergl@marker@colorbar@_ticktextsrc.py@.PATH_END.py
|
{
"filename": "simple_structured_grid.py",
"repo_name": "enthought/mayavi",
"repo_path": "mayavi_extracted/mayavi-master/examples/mayavi/mlab/simple_structured_grid.py",
"type": "Python"
}
|
"""
An example creating a structured grid data set from numpy arrays using TVTK
and visualizing it using mlab.
In this example, we create a structured-grid data set: we describe
data, both scalar and vector, lying on a structured-grid, ie a grid where
each vertex has 6 neighbors. For this we directly create a
StructuredGrid tvtk object, rather than using the mlab.pipeline source
functions, as it gives us more control.
To visualize the resulting dataset, we apply several modules, using the
mlab.pipeline interface (see :ref:`controlling-the-pipeline-with-mlab-scripts`)
"""
# Author: Prabhu Ramachandran <prabhu@aero.iitb.ac.in>
# Copyright (c) 2008, Prabhu Ramachandran.
# License: BSD Style.
from numpy import mgrid, empty, sin, pi
from tvtk.api import tvtk
from mayavi import mlab
# Generate some points.
x, y, z = mgrid[1:6:11j, 0:4:13j, 0:3:6j]
base = x[..., 0] + y[..., 0]
# Some interesting z values.
for i in range(z.shape[2]):
z[..., i] = base * 0.25 * i
# The actual points.
pts = empty(z.shape + (3,), dtype=float)
pts[..., 0] = x
pts[..., 1] = y
pts[..., 2] = z
# Simple scalars.
scalars = x * x + y * y + z * z
# Some vectors
vectors = empty(z.shape + (3,), dtype=float)
vectors[..., 0] = (4 - y * 2)
vectors[..., 1] = (x * 3 - 12)
vectors[..., 2] = sin(z * pi)
# We reorder the points, scalars and vectors so this is as per VTK's
# requirement of x first, y next and z last.
pts = pts.transpose(2, 1, 0, 3).copy()
pts.shape = pts.size // 3, 3
scalars = scalars.T.copy()
vectors = vectors.transpose(2, 1, 0, 3).copy()
vectors.shape = vectors.size // 3, 3
# Create the dataset.
sg = tvtk.StructuredGrid(dimensions=x.shape, points=pts)
sg.point_data.scalars = scalars.ravel()
sg.point_data.scalars.name = 'temperature'
sg.point_data.vectors = vectors
sg.point_data.vectors.name = 'velocity'
# Thats it!
# Now visualize the data.
d = mlab.pipeline.add_dataset(sg)
gx = mlab.pipeline.grid_plane(d)
gy = mlab.pipeline.grid_plane(d)
gy.grid_plane.axis = 'y'
gz = mlab.pipeline.grid_plane(d)
gz.grid_plane.axis = 'z'
iso = mlab.pipeline.iso_surface(d)
iso.contour.maximum_contour = 75.0
vec = mlab.pipeline.vectors(d)
vec.glyph.mask_input_points = True
vec.glyph.glyph.scale_factor = 1.5
mlab.show()
|
enthoughtREPO_NAMEmayaviPATH_START.@mayavi_extracted@mayavi-master@examples@mayavi@mlab@simple_structured_grid.py@.PATH_END.py
|
{
"filename": "test_header_standard.py",
"repo_name": "ledatelescope/bifrost",
"repo_path": "bifrost_extracted/bifrost-master/test/test_header_standard.py",
"type": "Python"
}
|
# Copyright (c) 2016-2022, The Bifrost Authors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Bifrost Authors nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""@package test_header_standard
This file tests the header standard"""
import unittest
import numpy as np
from bifrost.header_standard import enforce_header_standard
class TestHeaderStandardHandlesGoodHeaders(unittest.TestCase):
"""Create a bunch of headers which should pass the test,
and check that they do in fact pass"""
def setUp(self):
"""Create empty header dictionary"""
self.header_dict = {}
def tearDown(self):
"""Make sure header is accepted"""
self.assertTrue(
enforce_header_standard(self.header_dict))
def test_simple_header(self):
"""Simple header test, with all good values in range"""
self.header_dict = {
'nchans': 1, 'nifs': 1, 'nbits': 8, 'fch1': 100.0, 'foff': 1e-5,
'tstart': 1e5, 'tsamp': 1e-5}
def test_numpy_types(self):
"""Same values, but some are numpy types"""
self.header_dict = {
'nchans': np.int64(1), 'nifs': 1, 'nbits': 8,
'fch1': np.float64(100.0), 'foff': np.float64(1e-5),
'tstart': 1e5, 'tsamp': np.float64(1e-5)}
def test_extra_parameters(self):
"""Add some extra parameters"""
self.header_dict = {
'nchans': 1, 'nifs': 1, 'nbits': 8, 'fch1': 100.0, 'foff': 1e-5,
'tstart': 1e5, 'tsamp': 1e-5, 'my_extra_param': 50}
class TestHeaderStandardHandlesBadHeaders(unittest.TestCase):
"""Create a bunch of headers which should not pass the
test, and check that they do not."""
def setUp(self):
"""Create empty header dictionary"""
self.header_dict = {}
def tearDown(self):
"""Make sure the header is rejected"""
self.assertFalse(
enforce_header_standard(self.header_dict))
def test_empty_header(self):
"""Don't put anything in header"""
pass
def test_skip_one_parameter(self):
"""Make a good header, but without foff"""
header_dict = {
'nchans': 1, 'nifs': 1, 'nbits': 8, 'fch1': 100.0,
'tstart': 1e5, 'tsamp': 1e-5}
def test_bad_nchans_types(self):
"""Put noninteger number of channels in header"""
self.header_dict = {
'nchans': 1.05, 'nifs': 1, 'nbits': 8, 'fch1': 100, 'foff': 1e-5,
'tstart': 1e5, 'tsamp': 1e-5}
def test_low_value(self):
"""Put in low value for nbits"""
self.header_dict = {
'nchans': 1, 'nifs': 1, 'nbits': -8, 'fch1': 100.0, 'foff': 1e-5,
'tstart': 1e5, 'tsamp': 1e-5}
def test_non_dict(self):
"""Puts in a non dictionary header"""
self.header_dict = "nchans nifs nbits fch1 foff tstart"
|
ledatelescopeREPO_NAMEbifrostPATH_START.@bifrost_extracted@bifrost-master@test@test_header_standard.py@.PATH_END.py
|
{
"filename": "base.py",
"repo_name": "dwkim78/ASTRiDE",
"repo_path": "ASTRiDE_extracted/ASTRiDE-master/astride/datasets/base.py",
"type": "Python"
}
|
"""
Base IO code for all datasets
"""
from os.path import dirname
from os.path import join
from astropy.io import fits
def read_fits(filename='long.fits'):
"""
Read an sample fits file and return only image part.
Parameters
----------
filename : str
Fits filename.
Returns
-------
data : numpy.ndarray
Fits image data.
"""
module_path = dirname(__file__)
file_path = join(module_path, 'samples', filename)
hdulist = fits.open(file_path)
data = hdulist[0].data
hdulist.close()
return data
|
dwkim78REPO_NAMEASTRiDEPATH_START.@ASTRiDE_extracted@ASTRiDE-master@astride@datasets@base.py@.PATH_END.py
|
{
"filename": "arrayterator.py",
"repo_name": "numpy/numpy",
"repo_path": "numpy_extracted/numpy-main/numpy/typing/tests/data/pass/arrayterator.py",
"type": "Python"
}
|
from __future__ import annotations
from typing import Any
import numpy as np
AR_i8: np.ndarray[Any, np.dtype[np.int_]] = np.arange(10)
ar_iter = np.lib.Arrayterator(AR_i8)
ar_iter.var
ar_iter.buf_size
ar_iter.start
ar_iter.stop
ar_iter.step
ar_iter.shape
ar_iter.flat
ar_iter.__array__()
for i in ar_iter:
pass
ar_iter[0]
ar_iter[...]
ar_iter[:]
ar_iter[0, 0, 0]
ar_iter[..., 0, :]
|
numpyREPO_NAMEnumpyPATH_START.@numpy_extracted@numpy-main@numpy@typing@tests@data@pass@arrayterator.py@.PATH_END.py
|
{
"filename": "test_mometrics.py",
"repo_name": "lsst/rubin_sim",
"repo_path": "rubin_sim_extracted/rubin_sim-main/tests/maf/test_mometrics.py",
"type": "Python"
}
|
import unittest
import numpy as np
import pandas as pd
import rubin_sim.maf.metrics as metrics
class TestMoMetrics1(unittest.TestCase):
def setUp(self):
# Set up some sso_obs data to test the metrics on.
# Note that sso_obs is a numpy recarray.
# The expected set of columns in sso_obs is:
# cols = ['observationStartMJD', 'night', 'fieldRA', 'fieldDec',
# 'rotSkyPos', 'filter',
# 'visitExposureTime', 'seeingFwhmGeom', 'fiveSigmaDepth',
# 'solarElong',
# 'delta', 'ra', 'dec', 'magV', 'time', 'dradt', 'ddecdt',
# 'phase', 'solarelon',
# 'velocity', 'magFilter', 'dmagColor', 'dmagTrail',
# 'dmagDetect']
# And stackers will often add
# addCols = ['appMag', 'magLimit', 'snr', 'vis']
# Test metrics using sso_obs for a particular object.
times = np.array(
[0.1, 0.2, 0.3, 1.1, 1.3, 5.1, 7.1, 7.2, 7.3, 10.1, 10.2, 10.3, 13.1, 13.5],
dtype="float",
)
sso_obs = np.recarray(
[len(times)],
dtype=(
[
("time", "<f8"),
("ra", "<f8"),
("dec", "<f8"),
("appMag", "<f8"),
("observationStartMJD", "<f8"),
("night", "<f8"),
("magLimit", "<f8"),
("SNR", "<f8"),
("vis", "<f8"),
]
),
)
sso_obs["time"] = times
sso_obs["observationStartMJD"] = times
sso_obs["night"] = np.floor(times)
sso_obs["ra"] = np.arange(len(times))
sso_obs["dec"] = np.arange(len(times))
sso_obs["appMag"] = np.zeros(len(times), dtype="float") + 24.0
sso_obs["magLimit"] = np.zeros(len(times), dtype="float") + 25.0
sso_obs["SNR"] = np.zeros(len(times), dtype="float") + 5.0
sso_obs["vis"] = np.zeros(len(times), dtype="float") + 1
sso_obs["vis"][0:5] = 0
self.sso_obs = sso_obs
self.orb = None
self.hval = 8.0
def testn_obs_metric(self):
n_obs_metric = metrics.NObsMetric(snr_limit=5)
n_obs = n_obs_metric.run(self.sso_obs, self.orb, self.hval)
self.assertEqual(n_obs, len(self.sso_obs["time"]))
n_obs_metric = metrics.NObsMetric(snr_limit=10)
n_obs = n_obs_metric.run(self.sso_obs, self.orb, self.hval)
self.assertEqual(n_obs, 0)
n_obs_metric = metrics.NObsMetric(snr_limit=None)
n_obs = n_obs_metric.run(self.sso_obs, self.orb, self.hval)
self.assertEqual(n_obs, len(self.sso_obs["time"]) - 5)
def testn_obs_no_singles_metric(self):
n_obs_no_singles_metric = metrics.NObsNoSinglesMetric(snr_limit=5)
n_obs = n_obs_no_singles_metric.run(self.sso_obs, self.orb, self.hval)
self.assertEqual(n_obs, len(self.sso_obs["time"]) - 1)
def test_n_nights_metric(self):
n_nights_metric = metrics.NNightsMetric(snr_limit=5)
nnights = n_nights_metric.run(self.sso_obs, self.orb, self.hval)
self.assertEqual(nnights, len(np.unique(self.sso_obs["night"])))
n_nights_metric = metrics.NNightsMetric(snr_limit=None)
nnights = n_nights_metric.run(self.sso_obs, self.orb, self.hval)
self.assertEqual(nnights, len(np.unique(self.sso_obs["night"])) - 2)
def test_arc_metric(self):
arc_metric = metrics.ObsArcMetric(snr_limit=5)
arc = arc_metric.run(self.sso_obs, self.orb, self.hval)
self.assertEqual(
arc,
self.sso_obs["observationStartMJD"][-1] - self.sso_obs["observationStartMJD"][0],
)
arc_metric = metrics.ObsArcMetric(snr_limit=None)
arc = arc_metric.run(self.sso_obs, self.orb, self.hval)
self.assertEqual(
arc,
self.sso_obs["observationStartMJD"][-1] - self.sso_obs["observationStartMJD"][5],
)
def test_activity_over_period_metric(self):
# cometary orbit format ok
orb = np.recarray(
1,
dtype=(
[
("objId", (str, 20)),
("q", float),
("e", float),
("inc", float),
("Omega", float),
("argPeri", float),
("tPeri", float),
("epoch", float),
("H", float),
("g", float),
]
),
)
orb["objId"] = "NESC00001HYj"
orb["q"] = 1.00052
orb["e"] = 0.028514
orb["inc"] = 0.502477
orb["Omega"] = 50.989131
orb["argPeri"] = 55.091685
orb["tPeri"] = 61046.627194 - 59850
orb["epoch"] = 60973.799216 - 59850
orb["H"] = 35.526041
orb["g"] = 0.15
o = pd.DataFrame(orb)
activity_period_metric = metrics.ActivityOverPeriodMetric(bin_size=360, snr_limit=5)
activity = activity_period_metric.run(self.sso_obs, o.iloc[0], self.hval)
self.assertEqual(activity, 1.0)
activity_period_metric = metrics.ActivityOverPeriodMetric(bin_size=720, snr_limit=5)
activity = activity_period_metric.run(self.sso_obs, o.iloc[0], self.hval)
self.assertEqual(activity, 1.0)
activity_period_metric = metrics.ActivityOverPeriodMetric(bin_size=10, snr_limit=5)
activity = activity_period_metric.run(self.sso_obs, o.iloc[0], self.hval)
self.assertLess(activity, 0.03)
# different type of orbit - currently should fail quietly
orb = np.recarray(
1,
dtype=(
[
("objId", (str, 20)),
("a", float),
("e", float),
("inc", float),
("Omega", float),
("argPeri", float),
("meanAnomaly", float),
("epoch", float),
("H", float),
("g", float),
]
),
)
orb["objId"] = "NESC00001HYj"
orb["a"] = 1.029886
orb["e"] = 0.028514
orb["inc"] = 0.502477
orb["Omega"] = 50.989131
orb["argPeri"] = 55.091685
orb["meanAnomaly"] = 291.321814
orb["epoch"] = 60973.799216 - 59850
orb["H"] = 35.526041
orb["g"] = 0.15
o = pd.DataFrame(orb)
activity_period_metric = metrics.ActivityOverPeriodMetric(bin_size=360, snr_limit=5)
activity = activity_period_metric.run(self.sso_obs, o.iloc[0], self.hval)
self.assertEqual(activity, 1.0)
activity_period_metric = metrics.ActivityOverPeriodMetric(bin_size=180, snr_limit=5)
activity = activity_period_metric.run(self.sso_obs, o.iloc[0], self.hval)
self.assertEqual(activity, 0.5)
def tearDown(self):
del self.sso_obs
del self.orb
del self.hval
class TestDiscoveryMetrics(unittest.TestCase):
def setUp(self):
rng = np.random.RandomState(61331)
# Test metrics using sso_obs for a particular object.
times = np.array(
[0.1, 0.2, 0.9, 1.1, 1.3, 5.1, 7.1, 7.2, 7.5, 10.1, 10.2, 13.1, 13.5],
dtype="float",
)
sso_obs = np.recarray(
[len(times)],
dtype=(
[
("time", "<f8"),
("ra", "<f8"),
("dec", "<f8"),
("ec_lon", "<f8"),
("ec_lat", "<f8"),
("solar_elong", "<f8"),
("appMag", "<f8"),
("observationStartMJD", "<f8"),
("night", "<f8"),
("magLimit", "<f8"),
("velocity", "<f8"),
("SNR", "<f8"),
("vis", "<f8"),
("magFilter", "<f8"),
("fiveSigmaDepth", "<f8"),
("seeingFwhmGeom", "<f8"),
("visitExposureTime", "<f8"),
("dmagDetect", "<f8"),
]
),
)
sso_obs["time"] = times
sso_obs["observationStartMJD"] = times
sso_obs["night"] = np.floor(times)
sso_obs["ra"] = np.arange(len(times))
sso_obs["dec"] = np.arange(len(times)) + 5
sso_obs["ec_lon"] = sso_obs["ra"] + 10
sso_obs["ec_lat"] = sso_obs["dec"] + 20
sso_obs["solar_elong"] = sso_obs["ra"] + 30
sso_obs["appMag"] = np.zeros(len(times), dtype="float") + 24.0
sso_obs["magFilter"] = np.zeros(len(times), dtype="float") + 24.0
sso_obs["fiveSigmaDepth"] = np.zeros(len(times), dtype="float") + 25.0
sso_obs["dmagDetect"] = np.zeros(len(times), dtype="float")
sso_obs["magLimit"] = np.zeros(len(times), dtype="float") + 25.0
sso_obs["SNR"] = np.zeros(len(times), dtype="float") + 5.0
sso_obs["vis"] = np.zeros(len(times), dtype="float") + 1
sso_obs["vis"][0:5] = 0
sso_obs["velocity"] = rng.rand(len(times))
sso_obs["seeingFwhmGeom"] = np.ones(len(times), "float")
sso_obs["visitExposureTime"] = np.ones(len(times), "float") * 24.0
self.sso_obs = sso_obs
self.orb = np.recarray([len(times)], dtype=([("H", "<f8")]))
self.orb["H"] = np.zeros(len(times), dtype="float") + 8
self.hval = 8
def test_discovery_metric(self):
disc_metric = metrics.DiscoveryMetric(
n_obs_per_night=2,
t_min=0.0,
t_max=0.3,
n_nights_per_window=3,
t_window=9,
snr_limit=5,
)
metric_value = disc_metric.run(self.sso_obs, self.orb, self.hval)
child = metrics.DiscoveryNObsMetric(disc_metric)
n_obs = child.run(self.sso_obs, self.orb, self.hval, metric_value)
self.assertEqual(n_obs, 8)
# child = metrics.DiscoveryNObsMetric(disc_metric, i=1)
# n_obs = child.run(self.sso_obs, self.orb, self.hval, metric_value)
# self.assertEqual(n_obs, 7)
child = metrics.DiscoveryNChancesMetric(disc_metric)
nchances = child.run(self.sso_obs, self.orb, self.hval, metric_value)
self.assertEqual(nchances, 2)
child = metrics.DiscoveryTimeMetric(disc_metric)
time = child.run(self.sso_obs, self.orb, self.hval, metric_value)
self.assertEqual(time, self.sso_obs["observationStartMJD"][0])
# child = metrics.DiscoveryTimeMetric(disc_metric, i=1)
# time = child.run(self.sso_obs, self.orb, self.hval, metric_value)
# self.assertEqual(time, self.sso_obs["observationStartMJD"][3])
child = metrics.DiscoveryRadecMetric(disc_metric)
ra, dec = child.run(self.sso_obs, self.orb, self.hval, metric_value)
self.assertEqual(ra, 0)
self.assertEqual(dec, 5)
child = metrics.DiscoveryEclonlatMetric(disc_metric)
lon, lat, solar_elong = child.run(self.sso_obs, self.orb, self.hval, metric_value)
self.assertEqual(lon, 10)
self.assertEqual(lat, 25)
disc_metric3 = metrics.MagicDiscoveryMetric(n_obs=5, t_window=2, snr_limit=5)
magic = disc_metric3.run(self.sso_obs, self.orb, self.hval)
self.assertEqual(magic, 1)
disc_metric3 = metrics.MagicDiscoveryMetric(n_obs=3, t_window=1, snr_limit=5)
magic = disc_metric3.run(self.sso_obs, self.orb, self.hval)
self.assertEqual(magic, 2)
disc_metric3 = metrics.MagicDiscoveryMetric(n_obs=4, t_window=4, snr_limit=5)
magic = disc_metric3.run(self.sso_obs, self.orb, self.hval)
self.assertEqual(magic, 4)
def test_high_velocity_metric(self):
rng = np.random.RandomState(8123)
vel_metric = metrics.HighVelocityMetric(psf_factor=1.0, snr_limit=5)
metric_value = vel_metric.run(self.sso_obs, self.orb, self.hval)
self.assertEqual(metric_value, 0)
self.sso_obs["velocity"][0:2] = 1.5
metric_value = vel_metric.run(self.sso_obs, self.orb, self.hval)
self.assertEqual(metric_value, 2)
vel_metric = metrics.HighVelocityMetric(psf_factor=2.0, snr_limit=5)
metric_value = vel_metric.run(self.sso_obs, self.orb, self.hval)
self.assertEqual(metric_value, 0)
self.sso_obs["velocity"][0:2] = rng.rand(1)
def test_high_velocity_nights_metric(self):
vel_metric = metrics.HighVelocityNightsMetric(psf_factor=1.0, n_obs_per_night=1, snr_limit=5)
metric_value = vel_metric.run(self.sso_obs, self.orb, self.hval)
self.assertEqual(metric_value, 0)
self.sso_obs["velocity"][0:2] = 1.5
metric_value = vel_metric.run(self.sso_obs, self.orb, self.hval)
self.assertEqual(metric_value, self.sso_obs["observationStartMJD"][0])
self.sso_obs["velocity"][0:2] = np.random.rand(1)
class TestKnownObjectMetrics(unittest.TestCase):
def setUp(self):
self.t1 = 53371
self.t2 = 57023
self.t3 = 59580
times = np.arange(self.t1 - 365 * 2, self.t3 + 365 * 3, 1)
cols = ["MJD(UTC)", "RA", "Dec", "magV", "Elongation", "appMagV"]
dtype = []
for c in cols:
dtype.append((c, "<f8"))
sso_obs = np.recarray([len(times)], dtype=dtype)
sso_obs["MJD(UTC)"] = times
sso_obs["RA"] = np.arange(len(times))
sso_obs["Dec"] = np.arange(len(times))
sso_obs["magV"] = np.zeros(len(times), dtype="float") + 20.0
sso_obs["Elongation"] = np.zeros(len(times), dtype=float) + 180.0
self.hval = 0.0
sso_obs["appMagV"] = sso_obs["magV"] + self.hval
self.orb = None
self.sso_obs = sso_obs
def test_known_objects_metric(self):
known_object_metric = metrics.KnownObjectsMetric(
t_switch1=self.t1,
eff1=1.0,
v_mag_thresh1=20.5,
t_switch2=self.t2,
eff2=1.0,
v_mag_thresh2=20.5,
t_switch3=self.t3,
eff3=1.0,
v_mag_thresh3=20.5,
eff4=1.0,
v_mag_thresh4=22,
)
m_val = known_object_metric.run(self.sso_obs, self.orb, self.hval)
self.assertEqual(m_val, self.sso_obs["MJD(UTC)"].min())
known_object_metric = metrics.KnownObjectsMetric(
t_switch1=self.t1,
eff1=1.0,
v_mag_thresh1=15.0,
t_switch2=self.t2,
eff2=1.0,
v_mag_thresh2=20.5,
t_switch3=self.t3,
eff3=1.0,
v_mag_thresh3=20.5,
eff4=1.0,
v_mag_thresh4=22,
)
m_val = known_object_metric.run(self.sso_obs, self.orb, self.hval)
self.assertEqual(m_val, self.t1)
known_object_metric = metrics.KnownObjectsMetric(
t_switch1=self.t1,
eff1=0.0,
v_mag_thresh1=20.5,
t_switch2=self.t2,
eff2=1.0,
v_mag_thresh2=20.5,
t_switch3=self.t3,
eff3=1.0,
v_mag_thresh3=20.5,
eff4=1.0,
v_mag_thresh4=22,
)
m_val = known_object_metric.run(self.sso_obs, self.orb, self.hval)
self.assertEqual(m_val, self.t1)
known_object_metric = metrics.KnownObjectsMetric(
t_switch1=self.t1,
eff1=1.0,
v_mag_thresh1=10,
t_switch2=self.t2,
eff2=1.0,
v_mag_thresh2=10.5,
t_switch3=self.t3,
eff3=1.0,
v_mag_thresh3=20.5,
eff4=1.0,
v_mag_thresh4=22,
)
m_val = known_object_metric.run(self.sso_obs, self.orb, self.hval)
self.assertEqual(m_val, self.t2)
known_object_metric = metrics.KnownObjectsMetric(
t_switch1=self.t1,
eff1=1.0,
v_mag_thresh1=10,
t_switch2=self.t2,
eff2=1.0,
v_mag_thresh2=10.5,
t_switch3=self.t3,
eff3=1.0,
v_mag_thresh3=10.5,
eff4=1.0,
v_mag_thresh4=22,
)
m_val = known_object_metric.run(self.sso_obs, self.orb, self.hval)
self.assertEqual(m_val, self.t3)
known_object_metric = metrics.KnownObjectsMetric(
t_switch1=self.t1,
eff1=1.0,
v_mag_thresh1=10,
t_switch2=self.t2,
eff2=1.0,
v_mag_thresh2=10.5,
t_switch3=self.t3,
eff3=1.0,
v_mag_thresh3=10.5,
eff4=1.0,
v_mag_thresh4=10.5,
)
m_val = known_object_metric.run(self.sso_obs, self.orb, self.hval)
self.assertEqual(m_val, known_object_metric.badval)
if __name__ == "__main__":
unittest.main()
|
lsstREPO_NAMErubin_simPATH_START.@rubin_sim_extracted@rubin_sim-main@tests@maf@test_mometrics.py@.PATH_END.py
|
{
"filename": "_separatethousands.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/treemap/marker/colorbar/_separatethousands.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SeparatethousandsValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self,
plotly_name="separatethousands",
parent_name="treemap.marker.colorbar",
**kwargs,
):
super(SeparatethousandsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@treemap@marker@colorbar@_separatethousands.py@.PATH_END.py
|
{
"filename": "bench_load_TOAs.py",
"repo_name": "nanograv/PINT",
"repo_path": "PINT_extracted/PINT-master/profiling/bench_load_TOAs.py",
"type": "Python"
}
|
#!/usr/bin/env python
import pint.toa
# Get .tim file from here:
# curl -O https://data.nanograv.org/static/data/J0740+6620.cfr+19.tim
# This will load the TOAs, compute the positions of the Earth and planets, and apply clock corrections and build the table.
thanktoas = pint.toa.get_TOAs(
"J0740+6620.cfr+19.tim",
ephem="DE436",
planets=True,
usepickle=False,
bipm_version="BIPM2015",
include_bipm=True,
)
print()
print(f"Number of TOAs: {str(thanktoas.ntoas)}")
print()
|
nanogravREPO_NAMEPINTPATH_START.@PINT_extracted@PINT-master@profiling@bench_load_TOAs.py@.PATH_END.py
|
{
"filename": "_textfont.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/bar/selected/_textfont.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Textfont(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "bar.selected"
_path_str = "bar.selected.textfont"
_valid_props = {"color"}
# color
# -----
@property
def color(self):
"""
Sets the text font color of selected points.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the text font color of selected points.
"""
def __init__(self, arg=None, color=None, **kwargs):
"""
Construct a new Textfont object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.bar.selected.Textfont`
color
Sets the text font color of selected points.
Returns
-------
Textfont
"""
super(Textfont, self).__init__("textfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.bar.selected.Textfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.bar.selected.Textfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@bar@selected@_textfont.py@.PATH_END.py
|
{
"filename": "labels.py",
"repo_name": "enthought/mayavi",
"repo_path": "mayavi_extracted/mayavi-master/mayavi/modules/labels.py",
"type": "Python"
}
|
# Author: Prabhu Ramachandran <prabhu [at] aero . iitb . ac . in>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
# Standard library imports.
from vtk.numpy_interface import dataset_adapter as dsa
# Enthought library imports.
from traits.api import Int, Instance, Str, TraitError
from traitsui.api import View, Group, Item
from tvtk.api import tvtk
from apptools.persistence import state_pickler
# Local imports.
from mayavi.core.common import error
from mayavi.core.pipeline_base import PipelineBase
from mayavi.core.pipeline_info import PipelineInfo
from mayavi.core.module import Module
from mayavi.filters.optional import Optional
from mayavi.filters.mask_points import MaskPoints
from mayavi.filters.user_defined import UserDefined
from mayavi.components.actor2d import Actor2D
from mayavi.core.common import handle_children_state
################################################################################
# `Labels` class.
################################################################################
class Labels(Module):
"""
Allows a user to label the current dataset or the current actor of
the active module.
"""
# Used for persistence.
__version__ = 0
# The object which we are labeling.
object = Instance(PipelineBase, record=False)
# The label format string.
label_format = Str('', enter_set=True, auto_set=False,
desc='the label format string')
# Number of points to label.
number_of_labels = Int(25, enter_set=True, auto_set=False,
desc='the number of points to label')
# The filter used for masking of the points.
mask = Instance(MaskPoints, record=True)
# Filter to select visible points.
visible_points = Instance(Optional, record=True)
# The 2D actor for the labels.
actor = Instance(Actor2D, record=True)
# The text property of the labels.
property = Instance(tvtk.TextProperty, record=True)
# The mapper for the labels.
mapper = Instance(tvtk.LabeledDataMapper, args=(), record=True)
input_info = PipelineInfo(datasets=['any'],
attribute_types=['any'],
attributes=['any'])
########################################
# Private traits.
# The input used for the labeling.
input = Instance(PipelineBase)
# The id of the object in the modulemanager only used for
# persistence.
object_id = Int(-2)
########################################
# View related traits.
view = View(Group(Item(name='number_of_labels'),
Item(name='label_format'),
Item(name='mapper',
style='custom',
show_label=False,
resizable=True),
Item(name='mask',
style='custom',
resizable=True,
show_label=False),
label='Labels'
),
Group(
Item(name='visible_points',
style='custom',
resizable=True,
show_label=False),
label='VisiblePoints'
),
Group(Item(name='property',
style='custom',
show_label=False,
resizable=True),
label='TextProperty'),
resizable=True)
######################################################################
# `object` interface.
######################################################################
def __get_pure_state__(self):
self._compute_object_id()
d = super(Labels, self).__get_pure_state__()
for name in ('object', 'mapper', 'input'):
d.pop(name, None)
# Must pickle the components.
d['components'] = self.components
return d
def __set_pure_state__(self, state):
handle_children_state(self.components, state.components)
state_pickler.set_state(self, state)
self.update_pipeline()
######################################################################
# `Module` interface.
######################################################################
def setup_pipeline(self):
mask = MaskPoints()
mask.filter.trait_set(generate_vertices=True, random_mode=True)
self.mask = mask
v = UserDefined(filter=tvtk.SelectVisiblePoints(),
name='VisiblePoints')
self.visible_points = Optional(filter=v, enabled=False)
mapper = tvtk.LabeledDataMapper()
self.mapper = mapper
self.actor = Actor2D(mapper=mapper)
self.property = mapper.label_text_property
self.property.on_trait_change(self.render)
self.components = [self.mask, self.visible_points, self.actor]
def update_pipeline(self):
mm = self.module_manager
if mm is None:
return
self._find_input() # Calculates self.input
self.mask.inputs = [self.input]
self.visible_points.inputs = [self.mask]
self.actor.inputs = [self.visible_points]
self._number_of_labels_changed(self.number_of_labels)
self._label_format_changed(self.label_format)
######################################################################
# Non-public interface.
######################################################################
def _find_input(self):
mm = self.module_manager
if self.object is None:
if self.object_id == -1:
self.input = mm.source
elif self.object_id > -1:
obj = mm.children[self.object_id]
if hasattr(obj, 'actor'):
self.trait_set(object=obj, trait_change_notify=False)
self.input = obj.actor.inputs[0]
else:
self.input = mm.source
else:
o = self.object
if hasattr(o, 'module_manager'):
# A module.
if hasattr(o, 'actor'):
self.input = o.actor.inputs[0]
else:
self.input = o.module_manager.source
if self.input is None:
if self.object_id == -2:
self.input = mm.source
else:
error('No object to label!')
return
def _number_of_labels_changed(self, value):
if self.input is None:
return
f = self.mask.filter
inp = self.input.get_output_dataset()
data_obj = dsa.WrapDataObject(tvtk.to_vtk(inp))
npts = data_obj.GetNumberOfPoints()
typ = type(f.on_ratio)
f.on_ratio = typ(max(npts/value, 1))
if self.mask.running:
f.update()
self.mask.data_changed = True
def _label_format_changed(self, value):
if len(value) > 0:
self.mapper.label_format = value
self.render()
else:
try:
self.mapper.label_format = None
except TraitError:
self.mapper.label_format = '%g'
self.render()
def _object_changed(self, value):
self.update_pipeline()
def _compute_object_id(self):
mm = self.module_manager
input = self.input
self.object_id = -2
if input is mm.source:
self.object_id = -1
return
for id, child in enumerate(mm.children):
if child is self.object:
self.object_id = id
return
def _scene_changed(self, old, new):
self.visible_points.filter.filter.renderer = new.renderer
super(Labels, self)._scene_changed(old, new)
|
enthoughtREPO_NAMEmayaviPATH_START.@mayavi_extracted@mayavi-master@mayavi@modules@labels.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "simonsobs/socs",
"repo_path": "socs_extracted/socs-main/socs/agents/hwp_picoscope/drivers/__init__.py",
"type": "Python"
}
|
simonsobsREPO_NAMEsocsPATH_START.@socs_extracted@socs-main@socs@agents@hwp_picoscope@drivers@__init__.py@.PATH_END.py
|
|
{
"filename": "photometry.py",
"repo_name": "astrotuvi/pyplate",
"repo_path": "pyplate_extracted/pyplate-master/pyplate/process/photometry.py",
"type": "Python"
}
|
import os
import math
import numpy as np
from astropy.table import Table
from astropy.stats import sigma_clip
from scipy.interpolate import InterpolatedUnivariateSpline, SmoothBivariateSpline
from scipy.ndimage.filters import generic_filter
from scipy.optimize import curve_fit
from collections import OrderedDict
from ..conf import read_conf
try:
import configparser
except ImportError:
import ConfigParser as configparser
try:
import statsmodels.api as sm
have_statsmodels = True
except ImportError:
have_statsmodels = False
def _rmse(residuals):
return np.sqrt(np.mean(residuals**2))
def _abscurve(x, x0, a, b, c):
return a * np.abs(x - x0) + b * (x - x0)**2 + c
class PhotometryProcess:
"""
Plate photometry process class
"""
def __init__(self):
self.basefn = ''
self.write_phot_dir = ''
self.scratch_dir = None
self.log = None
self.plate_header = None
self.platemeta = None
self.sources = None
self.plate_solution = None
self.phot_cterm_list = []
self.phot_calib = None
self.phot_calib_list = []
self.phot_calibrated = False
self.calib_curve = None
self.faint_limit = None
self.bright_limit = None
def assign_conf(self, conf):
"""
Parse configuration and set class attributes.
"""
if isinstance(conf, str):
conf = read_conf(conf)
self.conf = conf
for attr in ['write_phot_dir']:
try:
setattr(self, attr, conf.get('Files', attr))
except configparser.Error:
pass
def evaluate_color_term(self, sources, solution_num=0):
"""
Evaluate color term for a given astrometric solution, using the
source data and reference catalog.
Parameters
----------
sources: SourceTable object
Source catalog with plate magnitudes and external catalog
(Gaia DR2) magnitudes
solution_num: int
Astrometric solution number
"""
cat_mag1 = sources['gaiaedr3_bpmag'].data
cat_mag2 = sources['gaiaedr3_rpmag'].data
plate_mag = sources['mag_auto'].data
mag_corr = sources['natmag_correction'].data
mag_err = sources['magerr_auto'].data
# Replace nans with numerical values
mag_corr[np.isnan(mag_corr)] = 0.
mag_err[np.isnan(mag_err)] = 1.
num_calstars = len(sources)
# Evaluate color term in 3 iterations
self.log.write('Determining color term: {:d} stars'
''.format(num_calstars),
double_newline=False, level=4, event=72,
solution_num=solution_num)
if num_calstars < 10:
self.log.write('Determining color term: too few stars!',
level=2, event=72, solution_num=solution_num)
return None
_,uind1 = np.unique(cat_mag1, return_index=True)
plate_mag_u,uind2 = np.unique(plate_mag[uind1], return_index=True)
cat_mag1_u = cat_mag1[uind1[uind2]]
cat_mag2_u = cat_mag2[uind1[uind2]]
mag_corr_u = mag_corr[uind1[uind2]]
mag_err_u = mag_err[uind1[uind2]]
# Discard faint sources (within 1 mag from the plate limit),
# if the number of sources is larger than 100
if len(plate_mag_u) > 100:
diff_from_limit = 1.
else:
diff_from_limit = 0.
kde = sm.nonparametric.KDEUnivariate(plate_mag_u
.astype(np.double))
kde.fit()
ind_dense = np.where(kde.density > 0.2*kde.density.max())[0]
plate_mag_lim = kde.support[ind_dense[-1]]
ind_nofaint = np.where(plate_mag_u < plate_mag_lim - diff_from_limit)[0]
num_nofaint = len(ind_nofaint)
self.log.write('Determining color term: {:d} stars after discarding '
'faint sources ({:.1f} mag from faint limit)'
.format(num_nofaint, diff_from_limit),
double_newline=False, level=4, event=72,
solution_num=solution_num)
if num_nofaint < 10:
self.log.write('Determining color term: too few stars after '
'discarding faint sources!',
level=2, event=72, solution_num=solution_num)
return None
frac = 0.2
if num_nofaint < 500:
frac = 0.2 + 0.3 * (500 - num_nofaint) / 500.
plate_mag_u = plate_mag_u[ind_nofaint]
cat_mag1_u = cat_mag1_u[ind_nofaint]
cat_mag2_u = cat_mag2_u[ind_nofaint]
mag_corr_u = mag_corr_u[ind_nofaint]
mag_err_u = mag_err_u[ind_nofaint]
# Iteration 1
cterm_list = np.arange(45) * 0.25 - 4.
stdev_list = []
for cterm in cterm_list:
cat_mag = cat_mag2_u + cterm * (cat_mag1_u - cat_mag2_u)
z = sm.nonparametric.lowess(cat_mag, plate_mag_u,
frac=frac, it=0, delta=0.2,
return_sorted=True)
s = InterpolatedUnivariateSpline(z[:,0], z[:,1], k=1)
mag_diff = cat_mag - s(plate_mag_u) - mag_corr_u
stdev_val = (np.sqrt(np.sum((mag_diff / mag_err_u)**2)
/ len(mag_diff))
* np.sqrt(np.sum(mag_err_u**2)
/ len(mag_diff)))
stdev_list.append(stdev_val)
# Store cterm data
self.phot_cterm_list.append(OrderedDict([
('solution_num', solution_num),
('iteration', 1),
('cterm', cterm),
('stdev', stdev_val),
('num_stars', len(mag_diff))
]))
if max(stdev_list) < 0.01:
self.log.write('Color term fit failed! '
'(iteration 1, num_stars = {:d}, '
'max_stdev = {:.3f})'
.format(len(mag_diff), max(stdev_list)),
level=2, event=72,
solution_num=solution_num)
return None
# Fit curve to stdev_list and get the cterm_min value
params, pcov = curve_fit(_abscurve, cterm_list, stdev_list)
perr = np.sqrt(np.diag(pcov))
cterm_min = params[0]
self.log.write('Color term fit (iteration 1, num_stars = {:d}, '
'min_stdev = {:.3f}, max_stdev = {:.3f}): '
'parameters {:.4f} {:.4f} {:.4f} {:.4f}, '
'errors {:.4f} {:.4f} {:.4f} {:.4f}'
.format(len(mag_diff), min(stdev_list), max(stdev_list),
*params, *perr),
double_newline=False,
level=4, event=72, solution_num=solution_num)
if cterm_min < -3 or cterm_min > 5:
self.log.write('Color term outside of allowed range!',
level=2, event=72, solution_num=solution_num)
return None
# Eliminate outliers (over 1.5 mag + sigma clip)
cat_mag = cat_mag2_u + cterm_min * (cat_mag1_u - cat_mag2_u)
z = sm.nonparametric.lowess(cat_mag, plate_mag_u,
frac=frac, it=3, delta=0.2,
return_sorted=True)
s = InterpolatedUnivariateSpline(z[:,0], z[:,1], k=1)
mag_diff = cat_mag - s(plate_mag_u) - mag_corr_u
ind1 = np.where(np.absolute(mag_diff) <= 1.5)[0]
flt = sigma_clip(mag_diff[ind1], maxiters=None)
ind_good1 = ~flt.mask
ind_good = ind1[ind_good1]
# Iteration 2
cterm_list = np.arange(45) * 0.25 - 4.
stdev_list = []
frac = 0.2
if len(ind_good) < 500:
frac = 0.2 + 0.3 * (500 - len(ind_good)) / 500.
for cterm in cterm_list:
cat_mag = cat_mag2_u + cterm * (cat_mag1_u - cat_mag2_u)
z = sm.nonparametric.lowess(cat_mag[ind_good],
plate_mag_u[ind_good],
frac=frac, it=0, delta=0.2,
return_sorted=True)
s = InterpolatedUnivariateSpline(z[:,0], z[:,1], k=1)
mag_diff = (cat_mag[ind_good] - s(plate_mag_u[ind_good])
- mag_corr_u[ind_good])
stdev_val = (np.sqrt(np.sum((mag_diff / mag_err_u[ind_good])**2)
/ len(mag_diff))
* np.sqrt(np.sum(mag_err_u[ind_good]**2)
/ len(mag_diff)))
stdev_list.append(stdev_val)
# Store cterm data
self.phot_cterm_list.append(OrderedDict([
('solution_num', solution_num),
('iteration', 2),
('cterm', cterm),
('stdev', stdev_val),
('num_stars', len(mag_diff))
]))
stdev_list = np.array(stdev_list)
if max(stdev_list) < 0.01:
self.log.write('Color term fit failed! '
'(iteration 2, num_stars = {:d}, '
'max_stdev = {:.3f})'
.format(len(mag_diff), max(stdev_list)),
level=2, event=72, solution_num=solution_num)
return None
# Fit curve to stdev_list and get the cterm_min value
params, pcov = curve_fit(_abscurve, cterm_list, stdev_list)
perr = np.sqrt(np.diag(pcov))
cterm_min = params[0]
cterm_min_err = perr[0]
self.log.write('Color term fit (iteration 2, num_stars = {:d}, '
'min_stdev = {:.3f}, max_stdev = {:.3f}): '
'parameters {:.4f} {:.4f} {:.4f} {:.4f}, '
'errors {:.4f} {:.4f} {:.4f} {:.4f}'
.format(len(mag_diff), min(stdev_list), max(stdev_list),
*params, *perr),
double_newline=False,
level=4, event=72, solution_num=solution_num)
if cterm_min < -3 or cterm_min > 5:
self.log.write('Color term outside of allowed range!',
level=2, event=72, solution_num=solution_num)
return None
stdev_fit_iter2 = np.nan
stdev_min_iter2 = np.min(stdev_list)
cterm_minval_iter2 = np.min(cterm_list)
cterm_maxval_iter2 = np.max(cterm_list)
num_stars_iter2 = len(mag_diff)
if params[1] < 0 or min(stdev_list) < 0.01:
self.log.write('Color term fit failed! '
'(iteration 2, num_stars = {:d}, params[1] = {:f}, '
'min_stdev = {:.3f})'
.format(len(mag_diff), params[1], min(stdev_list)),
level=2, event=72, solution_num=solution_num)
return None
# Iteration 3
cterm_list = (np.arange(41) * 0.02 +
round(cterm_min*50.)/50. - 0.4)
stdev_list = []
for cterm in cterm_list:
cat_mag = cat_mag2_u + cterm * (cat_mag1_u - cat_mag2_u)
z = sm.nonparametric.lowess(cat_mag[ind_good],
plate_mag_u[ind_good],
frac=frac, it=0, delta=0.2,
return_sorted=True)
s = InterpolatedUnivariateSpline(z[:,0], z[:,1], k=1)
mag_diff = (cat_mag[ind_good] - s(plate_mag_u[ind_good])
- mag_corr_u[ind_good])
stdev_val = (np.sqrt(np.sum((mag_diff / mag_err_u[ind_good])**2)
/ len(mag_diff))
* np.sqrt(np.sum(mag_err_u[ind_good]**2)
/ len(mag_diff)))
stdev_list.append(stdev_val)
# Store cterm data
self.phot_cterm_list.append(OrderedDict([
('solution_num', solution_num),
('iteration', 3),
('cterm', cterm),
('stdev', stdev_val),
('num_stars', len(mag_diff))
]))
stdev_list = np.array(stdev_list)
cf, cov = np.polyfit(cterm_list, stdev_list, 2,
w=1./stdev_list**2, cov=True)
cterm = -0.5 * cf[1] / cf[0]
cf_err = np.sqrt(np.diag(cov))
cterm_err = np.sqrt((-0.5 * cf_err[1] / cf[0])**2 +
(0.5 * cf[1] * cf_err[0] / cf[0]**2)**2)
p2 = np.poly1d(cf)
stdev_fit = p2(cterm)
stdev_min = np.min(stdev_list)
cterm_minval = np.min(cterm_list)
cterm_maxval = np.max(cterm_list)
num_stars = len(mag_diff)
iteration = 3
self.log.write('Color term fit (iteration 3, num_stars = {:d}, '
'min_stdev = {:.3f}, max_stdev = {:.3f}, '
'min_cterm = {:.3f}, max_cterm = {:.3f}): '
'parameters {:.4f} {:.4f} {:.4f}, '
'errors {:.4f} {:.4f} {:.4f}'
.format(num_stars, min(stdev_list), max(stdev_list),
cterm_minval, cterm_maxval, *cf, *cf_err),
double_newline=False,
level=4, event=72, solution_num=solution_num)
if cf[0] < 0 or cterm < -3 or cterm > 5:
if cf[0] < 0:
self.log.write('Color term fit not reliable!',
level=2, event=72, solution_num=solution_num)
else:
self.log.write('Color term outside of allowed range '
'({:.3f})!'.format(cterm),
level=2, event=72, solution_num=solution_num)
if cterm_min < -3 or cterm_min > 5:
self.log.write('Color term from previous iteration '
'outside of allowed range ({:.3f})!'
''.format(cterm_min),
level=2, event=72, solution_num=solution_num)
return None
else:
cterm = cterm_min
cterm_err = cterm_min_err
stdev_fit = stdev_fit_iter2
stdev_min = stdev_min_iter2
cterm_minval = cterm_minval_iter2
cterm_maxval = cterm_maxval_iter2
num_stars = num_stars_iter2
iteration = 2
self.log.write('Taking color term from previous iteration',
level=4, event=72, solution_num=solution_num)
# Create dictionary for calibration results, if not exists
if self.phot_calib is None:
self.phot_calib = OrderedDict()
self.phot_calib['solution_num'] = solution_num
self.phot_calib['iteration'] = 0
# Store color term result
self.phot_calib['color_term'] = cterm
self.phot_calib['color_term_error'] = cterm_err
self.phot_calib['cterm_stdev_fit'] = stdev_fit
self.phot_calib['cterm_stdev_min'] = stdev_min
self.phot_calib['cterm_range_min'] = cterm_minval
self.phot_calib['cterm_range_max'] = cterm_maxval
self.phot_calib['cterm_iterations'] = iteration
self.phot_calib['cterm_num_stars'] = num_stars
self.log.write('Plate color term (solution {:d}): {:.3f} ({:.3f})'
.format(solution_num, cterm, cterm_err),
level=4, event=72, solution_num=solution_num)
def calibrate_photometry_gaia(self, solution_num=None, iteration=1):
"""
Calibrate extracted magnitudes with Gaia data.
"""
num_solutions = self.plate_solution.num_solutions
assert (solution_num is None or
(solution_num > 0 and solution_num <= num_solutions))
self.log.write('Photometric calibration: solution {:d}, iteration {:d}'
.format(solution_num, iteration), level=3, event=70,
solution_num=solution_num)
# Initialise the flag value
self.phot_calibrated = False
if 'METHOD' in self.plate_header:
pmethod = self.plate_header['METHOD']
if (pmethod is not None and pmethod != ''
and 'direct photograph' not in pmethod
and 'focusing' not in pmethod
and 'test plate' not in pmethod):
self.log.write('Cannot calibrate photometry due to unsupported'
'observation method ({:s})'.format(pmethod),
level=2, event=70, solution_num=solution_num)
return
# Create dictionary for calibration results
self.phot_calib = OrderedDict()
# Create output directory, if missing
if self.write_phot_dir and not os.path.isdir(self.write_phot_dir):
self.log.write('Creating output directory {}'
.format(self.write_phot_dir), level=4, event=70,
solution_num=solution_num)
os.makedirs(self.write_phot_dir)
if self.write_phot_dir:
fn_cterm = os.path.join(self.write_phot_dir,
'{}_cterm.txt'.format(self.basefn))
fcterm = open(fn_cterm, 'wb')
fn_caldata = os.path.join(self.write_phot_dir,
'{}_caldata.txt'.format(self.basefn))
fcaldata = open(fn_caldata, 'wb')
# Select sources for photometric calibration
self.log.write('Selecting sources for photometric calibration',
level=3, event=71, solution_num=solution_num,
double_newline=False)
if solution_num is None:
solution_num = 1
self.phot_calib['solution_num'] = solution_num
self.phot_calib['iteration'] = iteration
# Store number of Gaia DR2 objects matched with the current solution
bgaia = (self.sources['solution_num'] == solution_num)
self.phot_calib['num_gaia_edr3'] = bgaia.sum()
# For single exposures, exclude blended sources.
# For multiple exposures, include them, because otherwise the bright
# end will lack calibration stars.
if num_solutions == 1:
bflags = ((self.sources['sextractor_flags'] == 0) |
(self.sources['sextractor_flags'] == 2))
else:
bflags = self.sources['sextractor_flags'] <= 3
# Create calibration-star mask
# Discard very red stars (BP-RP > 2)
cal_mask = ((self.sources['solution_num'] == solution_num) &
(self.sources['mag_auto'] > 0) &
(self.sources['mag_auto'] < 90) &
bflags &
(self.sources['flag_clean'] == 1) &
~self.sources['gaiaedr3_bpmag'].mask &
~self.sources['gaiaedr3_rpmag'].mask &
(self.sources['gaiaedr3_bp_rp'].filled(99.) <= 2) &
(self.sources['gaiaedr3_neighbors'] == 1))
num_calstars = cal_mask.sum()
self.phot_calib['num_candidate_stars'] = num_calstars
if num_calstars == 0:
self.log.write('No stars for photometric calibration',
level=2, event=71, solution_num=solution_num)
return
self.log.write('Found {:d} calibration-star candidates with '
'Gaia magnitudes on the plate'
.format(num_calstars), level=4, event=71,
solution_num=solution_num)
if num_calstars < 10:
self.log.write('Too few calibration stars on the plate!',
level=2, event=71, solution_num=solution_num)
return
# Evaluate color term
if iteration == 1:
self.log.write('Determining color term using annular bins 1-3',
level=3, event=72, solution_num=solution_num)
cterm_mask = cal_mask & (self.sources['annular_bin'] <= 3)
if cterm_mask.sum() < 50:
self.log.write('Found {:d} calibration stars in bins 1-3, '
'increasing area'.format(cterm_mask.sum()),
level=4, event=72, solution_num=solution_num)
self.log.write('Determining color term using annular bins 1-6',
level=3, event=72, solution_num=solution_num)
cterm_mask = cal_mask & (self.sources['annular_bin'] <= 6)
else:
self.log.write('Determining color term using annular bins 1-8',
level=3, event=72, solution_num=solution_num)
cterm_mask = cal_mask & (self.sources['annular_bin'] <= 8)
self.evaluate_color_term(self.sources[cterm_mask],
solution_num=solution_num)
# If color term was not determined, we need to terminate the
# calibration
if 'color_term' not in self.phot_calib:
self.log.write('Cannot continue photometric calibration without '
'color term', level=2, event=72,
solution_num=solution_num)
return
cterm = self.phot_calib['color_term']
cterm_err = self.phot_calib['color_term_error']
# Use stars in all annular bins
self.log.write('Photometric calibration using annular bins 1-9',
level=3, event=73, solution_num=solution_num)
# Select stars with unique plate mag values
plate_mag = self.sources['mag_auto'][cal_mask].data
plate_mag_u,uind = np.unique(plate_mag, return_index=True)
ind_calibstar_u = np.where(cal_mask)[0][uind]
#cal_u_mask = np.zeros_like(cal_mask)
#cal_u_mask[np.where(cal_mask)[0][uind]] = True
num_cal_u = len(plate_mag_u)
self.log.write('{:d} stars with unique magnitude'
.format(num_cal_u), double_newline=False,
level=4, event=73, solution_num=solution_num)
if num_cal_u < 10:
self.log.write('Too few stars with unique magnitude!',
double_newline=False, level=2, event=73,
solution_num=solution_num)
return
plate_mag_u = self.sources['mag_auto'][ind_calibstar_u].data
cat_bmag_u = self.sources['gaiaedr3_bpmag'][ind_calibstar_u].data
cat_vmag_u = self.sources['gaiaedr3_rpmag'][ind_calibstar_u].data
cat_natmag = cat_vmag_u + cterm * (cat_bmag_u - cat_vmag_u)
self.sources['cat_natmag'][ind_calibstar_u] = cat_natmag
# Eliminate outliers by constructing calibration curve from
# the bright end and extrapolate towards faint stars
# Find initial plate magnitude limit
kde = sm.nonparametric.KDEUnivariate(plate_mag_u
.astype(np.double))
kde.fit()
ind_maxden = np.argmax(kde.density)
plate_mag_maxden = kde.support[ind_maxden]
ind_dense = np.where(kde.density > 0.2*kde.density.max())[0]
brightmag = kde.support[ind_dense[0]]
plate_mag_lim = kde.support[ind_dense[-1]]
plate_mag_brt = plate_mag_u.min()
plate_mag_mid = (plate_mag_brt +
0.5 * (plate_mag_lim - plate_mag_brt))
if brightmag > plate_mag_mid:
brightmag = plate_mag_mid
# Check the number of stars in the bright end
nb = (plate_mag_u <= plate_mag_mid).sum()
if nb < 10:
plate_mag_mid = plate_mag_u[9]
# Construct magnitude cuts for outlier elimination
ncuts = int((plate_mag_lim - plate_mag_mid) / 0.5) + 2
mag_cuts = np.linspace(plate_mag_mid, plate_mag_lim, ncuts)
ind_cut = np.where(plate_mag_u <= plate_mag_mid)[0]
ind_good = np.arange(len(ind_cut))
mag_cut_prev = mag_cuts[0]
#mag_slope_prev = None
# Loop over magnitude bins
for mag_cut in mag_cuts[1:]:
gpmag = plate_mag_u[ind_cut[ind_good]]
gcmag = cat_natmag[ind_cut[ind_good]]
nbright = (gpmag < brightmag).sum()
if nbright < 20:
alt_brightmag = (plate_mag_u.min() +
(plate_mag_maxden - plate_mag_u.min()) * 0.5)
nbright = (gpmag < alt_brightmag).sum()
if nbright < 10:
nbright = 10
# Exclude bright outliers by fitting a line and checking
# if residuals are larger than 2 mag
ind_outliers = np.array([], dtype=int)
xdata = gpmag[:nbright]
ydata = gcmag[:nbright]
p1 = np.poly1d(np.polyfit(xdata, ydata, 1))
res = cat_natmag[ind_cut] - p1(plate_mag_u[ind_cut])
ind_brightout = np.where((np.absolute(res) > 2.) &
(plate_mag_u[ind_cut] <=
xdata.max()))[0]
if len(ind_brightout) > 0:
ind_outliers = np.append(ind_outliers,
ind_cut[ind_brightout])
ind_good = np.setdiff1d(ind_good, ind_outliers)
gpmag = plate_mag_u[ind_cut[ind_good]]
gcmag = cat_natmag[ind_cut[ind_good]]
nbright -= len(ind_brightout)
if nbright < 10:
nbright = 10
# Construct calibration curve
# Set lowess fraction depending on the number of data points
frac = 0.2
if len(ind_good) < 500:
frac = 0.2 + 0.3 * (500 - len(ind_good)) / 500.
z = sm.nonparametric.lowess(gcmag, gpmag,
frac=frac, it=3, delta=0.1,
return_sorted=True)
# In case there are less than 20 good stars, use only
# polynomial
if len(ind_good) < 20:
weights = np.zeros(len(ind_good)) + 1.
for i in np.arange(len(ind_good)):
indw = np.where(np.absolute(gpmag-gpmag[i]) < 1.0)[0]
if len(indw) > 2:
weights[i] = 1. / gcmag[indw].std()**2
p2 = np.poly1d(np.polyfit(gpmag, gcmag, 2, w=weights))
z[:,1] = p2(z[:,0])
# Improve bright-star calibration
if nbright > len(ind_good):
nbright = len(ind_good)
xbright = gpmag[:nbright]
ybright = gcmag[:nbright]
if nbright < 50:
p2 = np.poly1d(np.polyfit(xbright, ybright, 2))
vals = p2(xbright)
else:
z1 = sm.nonparametric.lowess(ybright, xbright,
frac=0.4, it=3, delta=0.1,
return_sorted=True)
vals = z1[:,1]
weight2 = np.arange(nbright, dtype=float) / nbright
weight1 = 1. - weight2
z[:nbright,1] = weight1 * vals + weight2 * z[:nbright,1]
# Improve faint-star calibration by fitting a 2nd order
# polynomial
# Currently, disable improvement
improve_faint = False
if improve_faint:
ind_faint = np.where(gpmag > mag_cut_prev-6.)[0]
nfaint = len(ind_faint)
if nfaint > 5:
xfaint = gpmag[ind_faint]
yfaint = gcmag[ind_faint]
weights = np.zeros(nfaint) + 1.
for i in np.arange(nfaint):
indw = np.where(np.absolute(xfaint-xfaint[i]) < 0.5)[0]
if len(indw) > 2:
weights[i] = 1. / yfaint[indw].std()**2
p2 = np.poly1d(np.polyfit(xfaint, yfaint, 2,
w=weights))
vals = p2(xfaint)
weight2 = (np.arange(nfaint, dtype=float) / nfaint)**1
weight1 = 1. - weight2
z[ind_faint,1] = weight2 * vals + weight1 * z[ind_faint,1]
# Interpolate smoothed calibration curve
s = InterpolatedUnivariateSpline(z[:,0], z[:,1], k=1)
ind_cut = np.where(plate_mag_u <= mag_cut)[0]
fit_mag = s(plate_mag_u[ind_cut])
residuals = cat_natmag[ind_cut] - fit_mag
mag_cut_prev = mag_cut
ind_outliers = np.array([], dtype=int)
# Mark as outliers those stars that deviate more than 1 mag
ind_out = np.where(np.absolute(residuals) > 1.0)
if len(ind_out) > 0:
ind_outliers = np.append(ind_outliers, ind_cut[ind_out])
ind_outliers = np.unique(ind_outliers)
# Additionally clip outliers in small bins
for mag_loc in np.linspace(plate_mag_brt, mag_cut, 100):
mag_low = mag_loc - 0.5
mag_high = mag_loc + 0.5
ind_loc = np.where((plate_mag_u[ind_cut] > mag_low) &
(plate_mag_u[ind_cut] < mag_high))[0]
ind_loc = np.setdiff1d(ind_loc, ind_outliers)
if len(ind_loc) >= 5:
rms_res = np.sqrt((residuals[ind_loc]**2).sum())
ind_locout = np.where(np.absolute(residuals[ind_loc]) >
3.*rms_res)[0]
if len(ind_locout) > 0:
ind_outliers = np.append(ind_outliers,
ind_cut[ind_loc[ind_locout]])
ind_outliers = np.unique(ind_outliers)
ind_good = np.setdiff1d(np.arange(len(ind_cut)),
ind_outliers)
#flt = sigma_clip(residuals, maxiters=None)
#ind_good = ~flt.mask
#ind_good = np.where(np.absolute(residuals) < 3*residuals.std())[0]
# Stop outlier elimination if there is a gap in magnitudes
if mag_cut - plate_mag_u[ind_cut[ind_good]].max() > 1.5:
ind_faintout = np.where(plate_mag_u > mag_cut)[0]
if len(ind_faintout) > 0:
ind_outliers = np.append(ind_outliers, ind_faintout)
ind_outliers = np.unique(ind_outliers)
ind_good = np.setdiff1d(np.arange(len(plate_mag_u)),
ind_outliers)
self.log.write('{:d} faint stars eliminated as outliers'
.format(len(ind_faintout)),
double_newline=False,
level=4, event=73, solution_num=solution_num)
self.log.write('Outlier elimination stopped due to a long gap '
'in magnitudes!', double_newline=False,
level=2, event=73, solution_num=solution_num)
break
if len(ind_good) < 10:
self.log.write('Outlier elimination stopped '
'due to insufficient number of stars left!',
double_newline=False, level=2, event=73,
solution_num=solution_num)
break
num_outliers = len(ind_outliers)
self.log.write('{:d} outliers eliminated'.format(num_outliers),
double_newline=False, level=4, event=73,
solution_num=solution_num)
ind_good = np.setdiff1d(np.arange(len(plate_mag_u)),
ind_outliers)
self.log.write('{:d} stars after outlier elimination'
.format(len(ind_good)), double_newline=False,
level=4, event=73, solution_num=solution_num)
if len(ind_good) < 10:
self.log.write('Too few calibration stars ({:d}) after outlier '
'elimination!'.format(len(ind_good)),
double_newline=False, level=2, event=73,
solution_num=solution_num)
return
# Continue with photometric calibration without outliers
# Study the distribution of magnitudes
kde = sm.nonparametric.KDEUnivariate(plate_mag_u[ind_good]
.astype(np.double))
kde.fit()
ind_maxden = np.argmax(kde.density)
plate_mag_maxden = kde.support[ind_maxden]
ind_dense = np.where(kde.density > 0.2*kde.density.max())[0]
plate_mag_lim = kde.support[ind_dense[-1]]
ind_valid = np.where(plate_mag_u[ind_good] <= plate_mag_lim)[0]
num_valid = len(ind_valid)
self.log.write('{:d} calibration stars brighter than limiting magnitude'
.format(num_valid), double_newline=False, level=4,
event=73, solution_num=solution_num)
#valid_cal_mask = np.zeros_like(cal_u_mask)
#valid_cal_mask[np.where(cal_u_mask)[0][ind_good[ind_valid]]] = True
ind_calibstar_valid = ind_calibstar_u[ind_good[ind_valid]]
self.sources['phot_calib_flags'][ind_calibstar_valid] = 1
if num_outliers > 0:
#outlier_mask = np.zeros_like(cal_u_mask)
#outlier_mask[np.where(cal_u_mask)[0][ind_outliers]]
ind_calibstar_outlier = ind_calibstar_u[ind_outliers]
self.sources['phot_calib_flags'][ind_calibstar_outlier] = 2
cat_natmag = cat_natmag[ind_good[ind_valid]]
plate_mag_u = plate_mag_u[ind_good[ind_valid]]
plate_mag_brightest = plate_mag_u.min()
frac = 0.2
if num_valid < 500:
frac = 0.2 + 0.3 * (500 - num_valid) / 500.
z = sm.nonparametric.lowess(cat_natmag, plate_mag_u,
frac=frac, it=3, delta=0.1,
return_sorted=True)
# Improve bright-star calibration
# Find magnitude at which the frequency of stars becomes
# larger than 500 mag^(-1)
#ind_500 = np.where((kde.density*len(ind_good) > 500))[0][0]
#brightmag = kde.support[ind_500]
# Find magnitude at which density becomes larger than 0.05 of
# the max density
#ind_dense_005 = np.where(kde.density > 0.05*kde.density.max())[0]
# Index of kde.support at which density becomes 0.05 of max
#ind0 = ind_dense_005[0]
#brightmag = kde.support[ind0]
#nbright = len(plate_mag_u[np.where(plate_mag_u < brightmag)])
# Find magnitude at which density becomes larger than 0.2 of
# the max density
#brightmag = kde.support[ind_dense[0]]
#nbright = len(plate_mag_u[np.where(plate_mag_u < brightmag)])
# Find the second percentile of magnitudes
nbright = round(num_valid * 0.02)
# Limit bright stars with 2000
nbright = min([nbright, 2000])
if nbright < 20:
brightmag = (plate_mag_brightest +
(plate_mag_maxden - plate_mag_brightest) * 0.5)
nbright = len(plate_mag_u[np.where(plate_mag_u < brightmag)])
if nbright < 5:
nbright = 5
if nbright < 50:
p2 = np.poly1d(np.polyfit(plate_mag_u[:nbright],
cat_natmag[:nbright], 2))
vals = p2(plate_mag_u[:nbright])
else:
z1 = sm.nonparametric.lowess(cat_natmag[:nbright],
plate_mag_u[:nbright],
frac=0.4, it=3, delta=0.1,
return_sorted=True)
vals = z1[:,1]
t = Table()
t['plate_mag'] = plate_mag_u[:nbright]
t['cat_natmag'] = cat_natmag[:nbright]
t['fit_mag'] = vals
basefn_solution = '{}-{:02d}'.format(self.basefn, solution_num)
fn_tab = os.path.join(self.scratch_dir,
'{}_bright.fits'.format(basefn_solution))
t.write(fn_tab, format='fits', overwrite=True)
# Normalise density to max density of the bright range
#d_bright = kde.density[:ind0] / kde.density[:ind0].max()
# Find a smooth density curve and use values as weights
#s_bright = InterpolatedUnivariateSpline(kde.support[:ind0],
# d_bright, k=1)
#weight2 = s_bright(plate_mag_u[:nbright])
# Linearly increasing weight
weight2 = np.arange(nbright, dtype=float) / nbright
weight1 = 1. - weight2
# Merge two calibration curves with different weights
z[:nbright,1] = weight1 * vals + weight2 * z[:nbright,1]
# Interpolate the whole calibration curve
s = InterpolatedUnivariateSpline(z[:,0], z[:,1], k=1)
# Store the calibration curve
self.calib_curve = s
# Calculate residuals
residuals = cat_natmag - s(plate_mag_u)
# Smooth residuals with spline
X = self.sources['x_source'][ind_calibstar_valid].data
Y = self.sources['y_source'][ind_calibstar_valid].data
if num_valid > 100:
s_corr = SmoothBivariateSpline(X, Y, residuals, kx=5, ky=5)
elif num_valid > 50:
s_corr = SmoothBivariateSpline(X, Y, residuals, kx=3, ky=3)
else:
s_corr = None
# Calculate new residuals and correct for dependence on
# x, y, mag_auto. Do it only if the number of valid
# calibration stars is larger than 500.
s_magcorr = None
if num_valid > 500:
residuals2 = np.zeros(num_valid)
for i in np.arange(num_valid):
residuals2[i] = residuals[i] - s_corr(X[i], Y[i])
# Create magnitude bins
plate_mag_srt = np.sort(plate_mag_u)
bin_mag = [(plate_mag_srt[99] + plate_mag_srt[0]) / 2.]
bin_hw = [(plate_mag_srt[99] - plate_mag_srt[0]) / 2.]
ind_lastmag = 99
while True:
if plate_mag_srt[ind_lastmag+100] - bin_mag[-1] - bin_hw[-1] > 0.5:
bin_edge = bin_mag[-1] + bin_hw[-1]
bin_mag.append((plate_mag_srt[ind_lastmag+100] + bin_edge) / 2.)
bin_hw.append((plate_mag_srt[ind_lastmag+100] - bin_edge) / 2.)
ind_lastmag += 100
else:
bin_mag.append(bin_mag[-1] + bin_hw[-1] + 0.25)
bin_hw.append(0.25)
ind_lastmag = (plate_mag_srt < bin_mag[-1] + 0.25).sum() - 1
# If less than 100 sources remain
if ind_lastmag > num_valid - 101:
add_width = plate_mag_srt[-1] - bin_mag[-1] - bin_hw[-1]
bin_mag[-1] += add_width / 2.
bin_hw[-1] += add_width / 2.
break
# Evaluate natmag correction in magnitude bins
s_magcorr = []
for i, (m, hw) in enumerate(zip(bin_mag, bin_hw)):
binmask = (plate_mag_u > m-hw) & (plate_mag_u <= m+hw)
#print(m, m-hw, m+hw, binmask.sum())
smag = SmoothBivariateSpline(X[binmask], Y[binmask],
residuals2[binmask],
kx=3, ky=3)
s_magcorr.append(smag)
# Evaluate RMS errors from the calibration residuals
rmse_list = generic_filter(residuals, _rmse, size=10)
rmse_lowess = sm.nonparametric.lowess(rmse_list, plate_mag_u,
frac=0.5, it=3, delta=0.1)
s_rmse = InterpolatedUnivariateSpline(rmse_lowess[:,0],
rmse_lowess[:,1], k=1)
rmse = s_rmse(plate_mag_u)
if self.write_phot_dir:
np.savetxt(fcaldata, np.column_stack((plate_mag_u, cat_natmag,
s(plate_mag_u),
cat_natmag-s(plate_mag_u))))
fcaldata.write('\n\n')
# Store calibration statistics
bright_limit = s(plate_mag_brightest).item()
faint_limit = s(plate_mag_lim).item()
self.phot_calib['num_calib_stars'] = num_valid
self.phot_calib['num_bright_stars'] = nbright
self.phot_calib['num_outliers'] = num_outliers
self.phot_calib['bright_limit'] = bright_limit
self.phot_calib['faint_limit'] = faint_limit
self.phot_calib['mag_range'] = faint_limit - bright_limit
self.phot_calib['rmse_min'] = rmse.min()
self.phot_calib['rmse_median'] = np.median(rmse)
self.phot_calib['rmse_max'] = rmse.max()
self.phot_calib['plate_mag_brightest'] = plate_mag_brightest
self.phot_calib['plate_mag_density02'] = kde.support[ind_dense[0]]
self.phot_calib['plate_mag_brightcut'] = brightmag
self.phot_calib['plate_mag_maxden'] = plate_mag_maxden
self.phot_calib['plate_mag_lim'] = plate_mag_lim
# Append calibration results to the list
self.phot_calib_list.append(self.phot_calib)
# Apply photometric calibration to sources
sol_mask = ((self.sources['solution_num'] == solution_num) &
(self.sources['mag_auto'] < 90.))
num_solstars = sol_mask.sum()
mag_auto_sol = self.sources['mag_auto'][sol_mask]
self.log.write('Applying photometric calibration to sources '
'in annular bins 1-9',
level=3, event=74, solution_num=solution_num)
# Correct magnitudes for positional effects
if s_corr is not None:
natmag_corr = self.sources['natmag_correction'][sol_mask]
xsrc = self.sources['x_source'][sol_mask]
ysrc = self.sources['y_source'][sol_mask]
# Do a for-cycle, because SmoothBivariateSpline may crash with
# large input arrays
for i in np.arange(num_solstars):
# Apply first correction (dependent only on coordinates)
natmag_corr[i] = s_corr(xsrc[i], ysrc[i])
# Apply second correction (dependent on mag_auto)
if s_magcorr is not None:
corr_list = []
for smag in s_magcorr:
corr_list.append(smag(xsrc[i], ysrc[i])[0,0])
smc = InterpolatedUnivariateSpline(bin_mag, corr_list, k=1)
natmag_corr[i] += smc(mag_auto_sol[i])
# Assign magnitudes and errors
self.sources['natmag'][sol_mask] = s(mag_auto_sol)
self.sources['natmag_plate'][sol_mask] = s(mag_auto_sol)
self.sources['natmag_error'][sol_mask] = s_rmse(mag_auto_sol)
if s_corr is not None:
self.sources['natmag_correction'][sol_mask] = natmag_corr
self.sources['natmag'][sol_mask] += natmag_corr
self.sources['color_term'][sol_mask] = cterm
self.sources['natmag_residual'][ind_calibstar_u] = \
(self.sources['cat_natmag'][ind_calibstar_u] -
self.sources['natmag'][ind_calibstar_u])
# Apply flags and errors to sources outside the magnitude range
# of calibration stars
brange = (mag_auto_sol < plate_mag_brightest)
ind = np.where(sol_mask)[0][brange]
if brange.sum() > 0:
self.sources['phot_range_flags'][ind] = 1
self.sources['natmag_error'][ind] = s_rmse(plate_mag_brightest)
brange = (mag_auto_sol > plate_mag_lim)
ind = np.where(sol_mask)[0][brange]
if brange.sum() > 0:
self.sources['phot_range_flags'][ind] = 2
self.sources['natmag_error'][ind] = s_rmse(plate_mag_lim)
# Select stars with known external photometry
bgaia = (sol_mask &
~self.sources['gaiaedr3_bpmag'].mask &
~self.sources['gaiaedr3_rpmag'].mask)
if bgaia.sum() > 0:
bp_rp = self.sources['gaiaedr3_bp_rp'][bgaia]
bp_rp_err = 0.
self.sources['rpmag'][bgaia] = (self.sources['natmag'][bgaia]
- cterm * bp_rp)
self.sources['bpmag'][bgaia] = (self.sources['natmag'][bgaia]
- (cterm - 1.) * bp_rp)
rpmagerr = np.sqrt(self.sources['natmag_error'][bgaia]**2 +
(cterm_err * bp_rp)**2 +
(cterm * bp_rp_err)**2)
bpmagerr = np.sqrt(self.sources['natmag_error'][bgaia]**2 +
(cterm_err * bp_rp)**2 +
((cterm - 1.) * bp_rp_err)**2)
self.sources['rpmag_error'][bgaia] = rpmagerr
self.sources['bpmag_error'][bgaia] = bpmagerr
try:
brightlim = min([cal['bright_limit']
for cal in self.phot_calib_list
if cal['solution_num'] == solution_num
and cal['iteration'] == iteration])
faintlim = max([cal['faint_limit']
for cal in self.phot_calib_list
if cal['solution_num'] == solution_num
and cal['iteration'] == iteration])
mag_range = faintlim - brightlim
except Exception:
brightlim = None
faintlim = None
mag_range = None
if num_valid > 0:
self.phot_calibrated = True
self.bright_limit = brightlim
self.faint_limit = faintlim
self.log.write('Photometric calibration results (solution {:d}, '
'iteration {:d}): '
'bright limit {:.3f}, faint limit {:.3f}'
.format(solution_num, iteration, brightlim,
faintlim),
level=4, event=73, solution_num=solution_num)
if self.write_phot_dir:
fcaldata.close()
|
astrotuviREPO_NAMEpyplatePATH_START.@pyplate_extracted@pyplate-master@pyplate@process@photometry.py@.PATH_END.py
|
{
"filename": "data_list.py",
"repo_name": "threeML/threeML",
"repo_path": "threeML_extracted/threeML-master/threeML/data_list.py",
"type": "Python"
}
|
# Author: G.Vianello (giacomov@stanford.edu)
import collections
class DataList(object):
"""
A container for data sets. Can be accessed as a dictionary,
with the [key] operator.
"""
def __init__(self, *data_sets):
"""
Container for data sets (i.e., plugin instances)
:param data_sets: as many data sets as needed
:return: (none)
"""
self._inner_dictionary = collections.OrderedDict()
for d in data_sets:
if d.name in self._inner_dictionary.keys():
raise RuntimeError(
"You have to use unique names for data sets. %s already exists."
% (d.name)
)
else:
self._inner_dictionary[d.name] = d
def insert(self, dataset):
# Enforce the unique name
if dataset.name in self.keys():
raise RuntimeError(
"You have to use unique names for data sets. %s already exists." % dataset.name
)
else:
self._inner_dictionary[dataset.name] = dataset
def __getitem__(self, key):
return self._inner_dictionary[key]
def keys(self):
return self._inner_dictionary.keys()
def values(self):
return self._inner_dictionary.values()
|
threeMLREPO_NAMEthreeMLPATH_START.@threeML_extracted@threeML-master@threeML@data_list.py@.PATH_END.py
|
{
"filename": "_tick0.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/heatmap/colorbar/_tick0.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class Tick0Validator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="tick0", parent_name="heatmap.colorbar", **kwargs):
super(Tick0Validator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
implied_edits=kwargs.pop("implied_edits", {"tickmode": "linear"}),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@heatmap@colorbar@_tick0.py@.PATH_END.py
|
{
"filename": "_outlinecolor.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/mapbox/layer/fill/_outlinecolor.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class OutlinecolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name="outlinecolor",
parent_name="layout.mapbox.layer.fill",
**kwargs,
):
super(OutlinecolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@mapbox@layer@fill@_outlinecolor.py@.PATH_END.py
|
{
"filename": "_tickfont.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/graph_objs/scatter3d/line/colorbar/_tickfont.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Tickfont(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scatter3d.line.colorbar"
_path_str = "scatter3d.line.colorbar.tickfont"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans", "Droid Serif", "Droid Sans Mono", "Gravitas One",
"Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow",
"Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# lineposition
# ------------
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
# shadow
# ------
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# style
# -----
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
# textcase
# --------
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
# variant
# -------
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
# weight
# ------
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans", "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Tickfont object
Sets the color bar's tick label font
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scatter3d.line
.colorbar.Tickfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans", "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Tickfont
"""
super(Tickfont, self).__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatter3d.line.colorbar.Tickfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatter3d.line.colorbar.Tickfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("lineposition", None)
_v = lineposition if lineposition is not None else _v
if _v is not None:
self["lineposition"] = _v
_v = arg.pop("shadow", None)
_v = shadow if shadow is not None else _v
if _v is not None:
self["shadow"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("style", None)
_v = style if style is not None else _v
if _v is not None:
self["style"] = _v
_v = arg.pop("textcase", None)
_v = textcase if textcase is not None else _v
if _v is not None:
self["textcase"] = _v
_v = arg.pop("variant", None)
_v = variant if variant is not None else _v
if _v is not None:
self["variant"] = _v
_v = arg.pop("weight", None)
_v = weight if weight is not None else _v
if _v is not None:
self["weight"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@graph_objs@scatter3d@line@colorbar@_tickfont.py@.PATH_END.py
|
{
"filename": "attention_test.py",
"repo_name": "keras-team/keras",
"repo_path": "keras_extracted/keras-master/keras/src/layers/attention/attention_test.py",
"type": "Python"
}
|
import numpy as np
from keras.src import layers
from keras.src import ops
from keras.src import testing
class AttentionTest(testing.TestCase):
def test_attention_basics(self):
# No scale, no concat.
self.run_layer_test(
layers.Attention,
init_kwargs={
"score_mode": "dot",
"dropout": 0.5,
},
input_shape=[(2, 3, 4), (2, 4, 4)],
expected_output_shape=(2, 3, 4),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=1,
expected_num_losses=0,
supports_masking=True,
run_training_check=False,
)
# Scale and concat.
self.run_layer_test(
layers.Attention,
init_kwargs={
"use_scale": True,
"score_mode": "concat",
"dropout": 0.5,
},
input_shape=[(2, 3, 4), (2, 4, 4)],
expected_output_shape=(2, 3, 4),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=1,
expected_num_losses=0,
supports_masking=True,
run_training_check=False,
)
def test_attention_correctness(self):
query = np.array([[[1.0, 0.0], [0.0, 1.0]]])
key = np.array([[[0.0, 1.0], [1.0, 0.0]]])
value = np.array([[[1.0, 2.0], [3.0, 4.0]]])
# Dot.
layer = layers.Attention(score_mode="dot")
output, scores = layer(
[query, value, key],
return_attention_scores=True,
)
self.assertAllClose(
output, [[[2.462, 3.462], [1.538, 2.538]]], atol=1e-3
)
self.assertAllClose(
scores, [[[0.269, 0.731], [0.731, 0.269]]], atol=1e-3
)
# Concat.
layer = layers.Attention(score_mode="concat")
output, scores = layer(
[query, value, key],
return_attention_scores=True,
)
self.assertAllClose(
output, [[[1.727, 2.727], [2.272, 3.272]]], atol=1e-3
)
self.assertAllClose(
scores, [[[0.636, 0.363], [0.363, 0.636]]], atol=1e-3
)
def test_attention_with_mask(self):
layer = layers.Attention()
query = np.array([[[1.0, 0.0], [0.0, 1.0]]])
value = np.array([[[1.0, 1.0], [1.0, 1.0]]])
query_mask = np.array([[True, False]])
value_mask = np.array([[True, False]])
output, scores = layer(
[query, value],
mask=[query_mask, value_mask],
return_attention_scores=True,
)
self.assertAllClose(output, [[[1.0, 1.0], [0.0, 0.0]]])
self.assertAllClose(scores, [[[1.0, 0.0], [1.0, 0.0]]])
def test_attention_errors(self):
layer = layers.Attention()
tensor = np.array([[[1.0, 1.0], [1.0, 1.0]]])
with self.assertRaisesRegex(ValueError, "must be called on a list"):
layer(tensor)
with self.assertRaisesRegex(ValueError, "length 2 or 3"):
layer([tensor, tensor, tensor, tensor])
with self.assertRaisesRegex(ValueError, "layer mask must be a list"):
layer([tensor, tensor], mask=tensor)
with self.assertRaisesRegex(ValueError, "length 2 or 3"):
layer([tensor, tensor], mask=[tensor])
def test_attention_with_dropout(self):
query = np.array([[[1.0, 0.0], [0.0, 1.0]]])
value = np.array([[[1.0, 1.0], [1.0, 1.0]]])
layer_with_dropout = layers.Attention(dropout=0.2)
layer_without_dropout = layers.Attention()
output1, scores1 = layer_with_dropout(
[query, value], return_attention_scores=True, training=True
)
output2, scores2 = layer_without_dropout(
[query, value], return_attention_scores=True, training=True
)
self.assertNotAllClose(output1, output2)
self.assertNotAllClose(scores1, scores2)
def test_attention_invalid_score_mode(self):
with self.assertRaisesRegex(
ValueError,
"Invalid value for argument score_mode. "
"Expected one of {'dot', 'concat'}",
):
layers.Attention(score_mode="invalid_mode")
def test_attention_calculate_scores_with_scale(self):
query = np.random.random((2, 3, 4))
key = np.random.random((2, 4, 4))
layer = layers.Attention(use_scale=True, score_mode="dot")
layer.build(input_shape=[(2, 3, 4), (2, 4, 4)])
expected_scores = np.matmul(query, key.transpose((0, 2, 1)))
expected_scores *= layer.scale.numpy()
actual_scores = layer._calculate_scores(query, key)
self.assertAllClose(actual_scores, expected_scores)
def test_attention_calculate_score_mask_no_causal_no_vmask(self):
scores = np.random.random((2, 3, 4))
layer = layers.Attention()
mask = layer._calculate_score_mask(
scores, v_mask=None, use_causal_mask=False
)
self.assertIsNone(
mask,
"Mask should be None when no causal mask and no value mask "
"are used",
)
def test_attention_calculate_score_mask_with_causal_no_vmask(self):
scores = np.random.random((2, 3, 4))
layer = layers.Attention()
causal_mask = layer._calculate_score_mask(
scores, v_mask=None, use_causal_mask=True
)
expected_causal_mask = np.tril(
np.ones((1, scores.shape[1], scores.shape[2])), k=0
)
self.assertAllClose(causal_mask, expected_causal_mask, atol=1e-6)
def test_attention_calculate_score_mask_with_causal_and_vmask(self):
scores = np.random.random((2, 3, 4))
layer = layers.Attention()
v_mask = np.array([[True, False, True, False]])
combined_mask = layer._calculate_score_mask(
scores, v_mask=v_mask, use_causal_mask=True
)
expected_causal_mask = np.tril(
np.ones((1, scores.shape[1], scores.shape[2])), k=0
)
expected_combined_mask = np.logical_and(
expected_causal_mask, v_mask[:, np.newaxis, :]
)
self.assertAllClose(combined_mask, expected_combined_mask, atol=1e-6)
def test_attention_compute_mask_with_no_mask(self):
layer = layers.Attention()
dummy_inputs = [
np.random.random((2, 3, 4)),
np.random.random((2, 4, 4)),
]
self.assertIsNone(
layer.compute_mask(inputs=dummy_inputs, mask=None),
"compute_mask should return None when mask is None",
)
def test_attention_compute_mask_with_first_element_none(self):
layer = layers.Attention()
dummy_inputs = [
np.random.random((2, 3, 4)),
np.random.random((2, 4, 4)),
]
mask = [None, np.array([True, False, True])]
self.assertIsNone(
layer.compute_mask(inputs=dummy_inputs, mask=mask),
"compute_mask should return None when the first element is None",
)
def test_attention_compute_mask_does_not_return_none_with_valid_mask(self):
layer = layers.Attention()
dummy_inputs = [
np.random.random((2, 3, 4)),
np.random.random((2, 4, 4)),
]
valid_mask = np.array([True, False, True])
mask = [valid_mask, np.array([False, True, False])]
computed_mask = layer.compute_mask(inputs=dummy_inputs, mask=mask)
computed_mask = ops.convert_to_numpy(computed_mask)
self.assertIsNotNone(
computed_mask,
"compute_mask should not return None with a valid mask",
)
def test_attention_compute_mask_returns_correct_tensor_with_valid_mask(
self,
):
layer = layers.Attention()
dummy_inputs = [
np.random.random((2, 3, 4)),
np.random.random((2, 4, 4)),
]
valid_mask = np.array([True, False, True])
mask = [valid_mask, np.array([False, True, False])]
computed_mask = layer.compute_mask(inputs=dummy_inputs, mask=mask)
computed_mask = ops.convert_to_numpy(computed_mask)
self.assertTrue(
np.array_equal(computed_mask, valid_mask),
"compute_mask did not return the correct mask tensor",
)
def test_attention_compute_mask_returns_correct_tensor_with_all_true_mask(
self,
):
layer = layers.Attention()
dummy_inputs = [np.ones((2, 3, 4)), np.ones((2, 4, 4))]
valid_mask = np.array([True, True, True])
mask = [valid_mask, np.array([True, True, True])]
computed_mask = layer.compute_mask(inputs=dummy_inputs, mask=mask)
computed_mask = ops.convert_to_numpy(computed_mask)
expected_mask = np.array([True, True, True])
self.assertTrue(
np.array_equal(computed_mask, expected_mask),
"compute_mask did not return the correct mask tensor",
)
def test_attention_compute_mask_returns_correct_tensor_with_all_false_mask(
self,
):
layer = layers.Attention()
dummy_inputs = [np.ones((2, 3, 4)), np.ones((2, 4, 4))]
valid_mask = np.array([False, False, False])
mask = [valid_mask, np.array([False, False, False])]
computed_mask = layer.compute_mask(inputs=dummy_inputs, mask=mask)
computed_mask = ops.convert_to_numpy(computed_mask)
expected_mask = np.array([False, False, False])
self.assertTrue(
np.array_equal(computed_mask, expected_mask),
"compute_mask did not return the correct mask tensor",
)
def test_attention_compute_mask_with_tolerance_1e_3(self):
layer = layers.Attention()
dummy_inputs = [np.ones((2, 3, 4)), np.ones((2, 4, 4))]
valid_mask = np.array([1.0, 0.0, 1.0], dtype=float)
mask = [valid_mask, np.array([0.0, 1.0, 0.0], dtype=float)]
computed_mask = layer.compute_mask(inputs=dummy_inputs, mask=mask)
computed_mask = ops.convert_to_numpy(computed_mask)
expected_mask = valid_mask
self.assertTrue(
np.allclose(computed_mask, expected_mask, atol=1e-3),
"Incorrect mask tensor within tolerance 1e-3",
)
def test_attention_compute_mask_with_tolerance_1e_5(self):
layer = layers.Attention()
dummy_inputs = [np.ones((2, 3, 4)), np.ones((2, 4, 4))]
valid_mask = np.array([1.0, 0.0, 1.0], dtype=float)
mask = [valid_mask, np.array([0.0, 1.0, 0.0], dtype=float)]
computed_mask = layer.compute_mask(inputs=dummy_inputs, mask=mask)
computed_mask = ops.convert_to_numpy(computed_mask)
expected_mask = valid_mask
self.assertTrue(
np.allclose(computed_mask, expected_mask, atol=1e-5),
"Incorrect mask tensor within tolerance 1e-5",
)
def test_attention_compute_mask_with_tolerance_1e_7(self):
layer = layers.Attention()
dummy_inputs = [np.ones((2, 3, 4)), np.ones((2, 4, 4))]
valid_mask = np.array([1.0, 0.0, 1.0], dtype=float)
mask = [valid_mask, np.array([0.0, 1.0, 0.0], dtype=float)]
computed_mask = layer.compute_mask(inputs=dummy_inputs, mask=mask)
computed_mask = ops.convert_to_numpy(computed_mask)
expected_mask = valid_mask
self.assertTrue(
np.allclose(computed_mask, expected_mask, atol=1e-7),
"Incorrect mask tensor within tolerance 1e-7 ",
)
def test_attention_compute_mask_with_single_element_masks(self):
layer = layers.Attention()
dummy_inputs = [np.ones((2, 3, 4)), np.ones((2, 4, 4))]
valid_mask = np.array([True])
mask = [valid_mask, np.array([False])]
computed_mask = layer.compute_mask(inputs=dummy_inputs, mask=mask)
computed_mask = ops.convert_to_numpy(computed_mask)
expected_shape = (1,)
self.assertEqual(computed_mask.shape, expected_shape)
def test_attention_compute_mask_with_non_boolean_masks(self):
layer = layers.Attention()
dummy_inputs = [np.ones((2, 3, 4)), np.ones((2, 4, 4))]
valid_mask = np.array([1, 0, 1])
mask = [valid_mask, np.array([0, 1, 0])]
computed_mask = layer.compute_mask(inputs=dummy_inputs, mask=mask)
computed_mask = ops.convert_to_numpy(computed_mask)
self.assertTrue(np.array_equal(computed_mask, valid_mask))
def test_attention_compute_mask_with_edge_case_masks(self):
layer = layers.Attention()
dummy_inputs = [np.ones((2, 3, 4)), np.ones((2, 4, 4))]
edge_case_masks = [
np.array([True, True, True]),
np.array([False, False, False]),
np.array([True, False, True]),
]
for mask in edge_case_masks:
computed_mask = layer.compute_mask(
inputs=dummy_inputs, mask=[mask, mask]
)
computed_mask = ops.convert_to_numpy(computed_mask)
self.assertTrue(np.array_equal(computed_mask, mask))
def test_attention_compute_mask_with_different_input_shapes(self):
layer = layers.Attention()
input_shapes = [(2, 3, 4), (3, 2, 5), (4, 1, 6)]
valid_mask = np.array([True, False, True])
for shape in input_shapes:
dummy_inputs = [np.ones(shape), np.ones(shape)]
mask = [valid_mask, np.array([False, True, False])]
computed_mask = layer.compute_mask(inputs=dummy_inputs, mask=mask)
computed_mask = ops.convert_to_numpy(computed_mask)
self.assertTrue(np.array_equal(computed_mask, valid_mask))
def test_attention_compute_output_shape(self):
layer = layers.Attention()
query = np.random.random((2, 3, 4))
value = np.random.random((2, 3, 5))
key = np.random.random((2, 3, 4))
layer = layers.Attention()
output = layer([query, value, key])
self.assertAllEqual(output.shape, value.shape)
self.assertAllEqual(
layer.compute_output_shape(
input_shape=[query.shape, value.shape, key.shape]
),
output.shape,
)
|
keras-teamREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@keras@src@layers@attention@attention_test.py@.PATH_END.py
|
{
"filename": "loss.py",
"repo_name": "keras-team/keras",
"repo_path": "keras_extracted/keras-master/keras/src/losses/loss.py",
"type": "Python"
}
|
from keras.src import backend
from keras.src import dtype_policies
from keras.src import ops
from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.saving.keras_saveable import KerasSaveable
from keras.src.utils.naming import auto_name
@keras_export(["keras.Loss", "keras.losses.Loss"])
class Loss(KerasSaveable):
"""Loss base class.
This is the class to subclass in order to create new custom losses.
Args:
reduction: Type of reduction to apply to the loss. In almost all cases
this should be `"sum_over_batch_size"`. Supported options are
`"sum"`, `"sum_over_batch_size"`, `"mean"`,
`"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
`"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
sample size, and `"mean_with_sample_weight"` sums the loss and
divides by the sum of the sample weights. `"none"` and `None`
perform no aggregation. Defaults to `"sum_over_batch_size"`.
name: Optional name for the loss instance.
dtype: The dtype of the loss's computations. Defaults to `None`, which
means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
`"float32"` unless set to different value
(via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
provided, then the `compute_dtype` will be utilized.
To be implemented by subclasses:
* `call()`: Contains the logic for loss calculation using `y_true`,
`y_pred`.
Example subclass implementation:
```python
class MeanSquaredError(Loss):
def call(self, y_true, y_pred):
return ops.mean(ops.square(y_pred - y_true), axis=-1)
```
"""
def __init__(self, name=None, reduction="sum_over_batch_size", dtype=None):
self.name = name or auto_name(self.__class__.__name__)
self.reduction = standardize_reduction(reduction)
self._dtype_policy = dtype_policies.get(dtype or backend.floatx())
self._dtype = self._dtype_policy.compute_dtype
@property
def dtype(self):
return self._dtype
def __call__(self, y_true, y_pred, sample_weight=None):
in_mask = backend.get_keras_mask(y_pred)
with ops.name_scope(self.name):
y_pred = tree.map_structure(
lambda x: ops.convert_to_tensor(x, dtype=self.dtype), y_pred
)
y_true = tree.map_structure(
lambda x: ops.convert_to_tensor(x, dtype=self.dtype), y_true
)
losses = self.call(y_true, y_pred)
out_mask = backend.get_keras_mask(losses)
if in_mask is not None and out_mask is not None:
mask = in_mask & out_mask
elif in_mask is not None:
mask = in_mask
elif out_mask is not None:
mask = out_mask
else:
mask = None
return reduce_weighted_values(
losses,
sample_weight=sample_weight,
mask=mask,
reduction=self.reduction,
dtype=self.dtype,
)
def call(self, y_true, y_pred):
raise NotImplementedError
def get_config(self):
return {"name": self.name, "reduction": self.reduction}
@classmethod
def from_config(cls, config):
return cls(**config)
def _obj_type(self):
return "Loss"
def standardize_reduction(reduction):
allowed = {
"sum_over_batch_size",
"sum",
None,
"none",
"mean",
"mean_with_sample_weight",
}
if reduction not in allowed:
raise ValueError(
"Invalid value for argument `reduction`. "
f"Expected one of {allowed}. Received: "
f"reduction={reduction}"
)
return reduction
def squeeze_or_expand_to_same_rank(x1, x2, expand_rank_1=True):
"""Squeeze/expand last dim if ranks differ from expected by exactly 1."""
x1_rank = len(x1.shape)
x2_rank = len(x2.shape)
if x1_rank == x2_rank:
return x1, x2
if x1_rank == x2_rank + 1:
if x1.shape[-1] == 1:
if x2_rank == 1 and expand_rank_1:
x2 = ops.expand_dims(x2, axis=-1)
else:
x1 = ops.squeeze(x1, axis=-1)
if x2_rank == x1_rank + 1:
if x2.shape[-1] == 1:
if x1_rank == 1 and expand_rank_1:
x1 = ops.expand_dims(x1, axis=-1)
else:
x2 = ops.squeeze(x2, axis=-1)
return x1, x2
def reduce_values(values, sample_weight=None, reduction="sum_over_batch_size"):
if (
reduction is None
or reduction == "none"
or tuple(values.shape) == ()
or tuple(values.shape) == (0,)
):
return values
loss = ops.sum(values)
if reduction in ("sum_over_batch_size", "mean", "mean_with_sample_weight"):
if reduction == "mean_with_sample_weight" and sample_weight is not None:
divisor = ops.cast(ops.sum(sample_weight), loss.dtype)
else:
divisor = ops.cast(
ops.prod(
ops.convert_to_tensor(ops.shape(values), dtype="int32")
),
loss.dtype,
)
loss = ops.divide_no_nan(loss, divisor)
loss = scale_loss_for_distribution(loss)
return loss
def reduce_weighted_values(
values,
sample_weight=None,
mask=None,
reduction="sum_over_batch_size",
dtype=None,
):
reduction = standardize_reduction(reduction)
values = ops.convert_to_tensor(values, dtype=dtype)
if sample_weight is not None:
sample_weight = ops.convert_to_tensor(sample_weight, dtype=dtype)
if mask is not None:
mask = ops.convert_to_tensor(mask, dtype=dtype)
# Merge mask and sample weight into sample weight.
sample_weight = apply_mask(
sample_weight, mask, dtype=values.dtype, reduction=reduction
)
if sample_weight is not None:
sample_weight = ops.cast(sample_weight, values.dtype)
# Update dimensions of `sample_weight` to match `losses`.
values, sample_weight = squeeze_or_expand_to_same_rank(
values, sample_weight
)
values = values * sample_weight
# Apply reduction function to the individual weighted losses.
loss = reduce_values(values, sample_weight, reduction)
return loss
def apply_mask(sample_weight, mask, dtype, reduction):
"""Applies any mask on predictions to sample weights."""
if mask is not None:
mask = ops.cast(mask, dtype=dtype)
if reduction in ("mean", "sum_over_batch_size"):
# Valid entries have weight `total/valid`, while invalid ones
# have 0. When summed over batch, they will be reduced to:
#
# mean(loss * sample_weight * total / valid)
# = sum(loss * sample_weight * total / valid) / total
# = sum(loss * sample_weight) / total * total / valid
# = sum(loss * sample_weight) / valid
total = ops.cast(
ops.prod(ops.convert_to_tensor(ops.shape(mask), dtype="int32")),
dtype,
)
valid = ops.sum(mask) # May be 0!
mask *= total / (valid + backend.epsilon())
if sample_weight is not None:
sample_weight = ops.cast(sample_weight, dtype=dtype)
mask, sample_weight = squeeze_or_expand_to_same_rank(
mask, sample_weight
)
sample_weight *= mask
else:
sample_weight = mask
return sample_weight
def scale_loss_for_distribution(value):
"""Scales the given value by the number of replicas in the strategy.
Currently, this function is only effective when using the tensorflow backend
and `tf.distribute`.
"""
if backend.backend() == "tensorflow":
import tensorflow as tf
num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
if num_replicas > 1:
value = ops.multiply(
value, ops.cast(1.0 / num_replicas, value.dtype)
)
return value
def unscale_loss_for_distribution(value):
"""Unscales the given value by the number of replicas in the strategy.
Currently, this function is only effective when using the tensorflow backend
and `tf.distribute`.
"""
if backend.backend() == "tensorflow":
import tensorflow as tf
num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
if num_replicas > 1:
value = ops.multiply(value, ops.cast(num_replicas, value.dtype))
return value
|
keras-teamREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@keras@src@losses@loss.py@.PATH_END.py
|
{
"filename": "quadratic_tri.py",
"repo_name": "hpparvi/PyTransit",
"repo_path": "PyTransit_extracted/PyTransit-master/pytransit/models/limb_darkening/quadratic_tri.py",
"type": "Python"
}
|
from math import pi, sqrt
from numba import njit
from numpy import zeros
@njit(fastmath=True)
def ld_quadratic_tri(mu, pv):
a, b = sqrt(pv[0]), 2 * pv[1]
u, v = a * b, a * (1. - b)
return 1. - u * (1. - mu) - v * (1. - mu) ** 2
@njit(fastmath=True)
def ldi_quadratic_tri(pv):
a, b = sqrt(pv[0]), 2 * pv[1]
u, v = a * b, a * (1. - b)
return 2 * pi * 1 / 12 * (-2 * u - v + 6)
|
hpparviREPO_NAMEPyTransitPATH_START.@PyTransit_extracted@PyTransit-master@pytransit@models@limb_darkening@quadratic_tri.py@.PATH_END.py
|
{
"filename": "calc_depth_from_completeness.py",
"repo_name": "BEAST-Fitting/beast",
"repo_path": "beast_extracted/beast-master/beast/tools/calc_depth_from_completeness.py",
"type": "Python"
}
|
import numpy as np
from astropy.table import Table, vstack
from beast.physicsmodel.grid import SEDGrid
import beast.observationmodel.noisemodel.generic_noisemodel as noisemodel
from beast.observationmodel.vega import Vega
def calc_depth(
physgrid_list,
noise_model_list,
completeness_value=0.5,
vega_mag=True,
vega_fname=None,
):
"""
Calculate the observation depth of a field using the completeness. Some
fields have low completeness at both faint and bright fluxes; this finds
the faintest flux at which the completeness exceeds the given value(s).
Parameters
----------
physgrid_list : string or list of strings
Name of the physics model file. If there are multiple physics model
grids (i.e., if there are subgrids), list them all here.
noise_model_list : string or list of strings
Name of the noise model file. If there are multiple files for
physgrid_list (because of subgrids), list the noise model file
associated with each physics model file.
completeness_value : float or list of floats
The completeness(es) at which to evaluate the depth. Completeness is
defined in the range 0.0 to 1.0.
vega_mag : boolean (default=True)
If True, return results in Vega mags. Otherwise returns flux in
erg/s/cm^2/A.
vega_fname : string
filename for the vega info (useful for testing)
Returns
-------
depth_dict : dictionary
keys are the filters present in the grid, each value is the flux or Vega
mag for each of the requested completeness values
"""
# ------ Reading in data
# If there are subgrids, we can't read them all into memory. Therefore,
# we'll go through each one and just grab the relevant parts.
compl_table_list = []
# make a table for each physics model + noise model
for physgrid, noise_model in zip(
np.atleast_1d(physgrid_list), np.atleast_1d(noise_model_list)
):
# get the physics model grid - includes priors
modelsedgrid = SEDGrid(str(physgrid))
if hasattr(modelsedgrid.seds, "read"):
sed_grid = modelsedgrid.seds.read()
else:
sed_grid = modelsedgrid.seds
# get list of filters
filter_list = modelsedgrid.filters
# read in the noise model
noisegrid = noisemodel.get_noisemodelcat(str(noise_model))
# get the completeness
model_compl = noisegrid["completeness"]
# put it all into a table
table_dict = {filt: sed_grid[:, f] for f, filt in enumerate(filter_list)}
table_dict.update(
{filt + "_compl": model_compl[:, f] for f, filt in enumerate(filter_list)}
)
# append to the list
compl_table_list.append(Table(table_dict))
# stack all the tables into one
compl_table = vstack(compl_table_list)
# if chosen, get the vega fluxes for the filters
if vega_mag:
_, vega_flux, _ = Vega(source=vega_fname).getFlux(filter_list)
# ------ Calculate depth
# initialize dictionary to hold results
depth_dict = {filt: [] for filt in filter_list}
# grab numbers for each filter
for f, filt in enumerate(filter_list):
use_sed = compl_table[filt]
use_comp = compl_table[filt + "_compl"]
# get sorted versions of data
sort_ind = np.argsort(use_sed)
sort_sed = use_sed[sort_ind]
sort_comp = use_comp[sort_ind]
# grab depths
for compl in np.atleast_1d(completeness_value):
# first check whether the noise model even covers this completeness
# (in case there weren't sufficient ASTs)
if (compl < np.min(sort_comp)) or (compl > np.max(sort_comp)):
depth_dict[filt].append(np.nan)
continue
# find first instance of completeness > N
first_ind = np.where(sort_comp > compl)[0][0]
# corresponding flux
comp_flux = sort_sed[first_ind]
# save it
if vega_mag:
depth_dict[filt].append(-2.5 * np.log10(comp_flux / vega_flux[f]))
else:
depth_dict[filt].append(comp_flux)
# return the results
return depth_dict
|
BEAST-FittingREPO_NAMEbeastPATH_START.@beast_extracted@beast-master@beast@tools@calc_depth_from_completeness.py@.PATH_END.py
|
{
"filename": "sample_fit_MdwarfPlanets_4D_MRSStM.py",
"repo_name": "shbhuk/mrexo",
"repo_path": "mrexo_extracted/mrexo-master/mrexo/datasets/MdwarfRuns/MdwarfPlanets_4D_MRSStM_20231102/sample_fit_MdwarfPlanets_4D_MRSStM.py",
"type": "Python"
}
|
import os, sys
from astropy.table import Table
import numpy as np
import numpy as np
import pandas as pd
import shutil
from mrexo.mle_utils_nd import InputData, MLE_fit
from mrexo.fit_nd import fit_relation
from mrexo.plotting_nd import Plot2DJointDistribution, Plot2DWeights, Plot1DInputDataHistogram
import matplotlib.pyplot as plt
Platform = sys.platform
if Platform == 'win32':
HomeDir = 'C:\\Users\\skanodia\\Documents\\\\GitHub\\'
else:
HomeDir = r"/storage/home/szk381/work/"
HomeDir = r"/home/skanodia/work/"
try :
pwd = os.path.dirname(__file__)
except NameError:
pwd = os.path.join(HomeDir, 'mrexo', 'sample_scripts')
print('Could not find pwd')
# Directory with dataset to be fit
DataDirectory = os.path.join(HomeDir, 'Mdwarf-Exploration', 'Data', 'MdwarfPlanets')
print(DataDirectory)
t = pd.read_csv(os.path.join(DataDirectory, 'Teff_4200_ExcUpperLimits_20231102.csv'))
# Mask NaNs
t = t[~np.isnan(t['pl_insolerr1'])]
t = t[~np.isnan(t['pl_masse'])]
# Define bounds in different dimensions
RadiusBounds = [0, 20]# None# [0, 100]
MassBounds = None# [0, 6000]
InsolationBounds = None# [0.01, 5000]
StellarMassBounds = None# [0.2, 1.2]
# Measurements with very small errors are set to NaN to avoid integration errors
t['st_masserr1'][t['st_masserr1'] < 0.005] = np.nan
t['st_masserr2'][t['st_masserr2'] < 0.005] = np.nan
if RadiusBounds is not None:
t = t[(t.pl_rade > RadiusBounds[0]) & (t.pl_rade < RadiusBounds[1])]
if MassBounds is not None:
t = t[(t.pl_masse > MassBounds[0]) & (t.pl_masse < MassBounds[1])]
if InsolationBounds is not None:
t = t[(t.pl_insol > InsolationBounds[0]) & (t.pl_insol < InsolationBounds[1])]
if StellarMassBounds is not None:
t = t[(t.st_mass > StellarMassBounds[0]) & (t.st_mass < StellarMassBounds[1])]
# Remove particular planets
RemovePlanets = ['Kepler-54 b', 'Kepler-54 c']
t = t[~np.isin(t.pl_name, RemovePlanets)]
print(len(t))
# In Earth units
Mass = np.array(t['pl_masse'])
# Asymmetrical errorbars
MassUSigma = np.array(abs(t['pl_masseerr1']))
MassLSigma = np.array(abs(t['pl_masseerr2']))
Radius = np.array(t['pl_rade'])
# Asymmetrical errorbars
RadiusUSigma = np.array(abs(t['pl_radeerr1']))
RadiusLSigma = np.array(abs(t['pl_radeerr2']))
StellarMass = np.array(t['st_mass'])
StellarMassUSigma = np.array(t['st_masserr1'])
StellarMassLSigma = np.array(t['st_masserr2'])
Insolation = np.array(t['pl_insol'])
InsolationUSigma = np.array(t['pl_insolerr1'])
InsolationLSigma = np.array(t['pl_insolerr2'])
# Let the script pick the max and min bounds, or can hard code those in. Note that the dataset must fall within the bounds if they are specified.
Max, Min = np.nan, np.nan
# Define input dictionary for each dimension
RadiusDict = {'Data': Radius, 'LSigma': RadiusLSigma, "USigma":RadiusUSigma, 'Max':Max, 'Min':Min, 'Label':'Radius ($R_{\oplus}$)', 'Char':'r'}
MassDict = {'Data': Mass, 'LSigma': MassLSigma, "USigma":MassUSigma, 'Max':Max, 'Min':Min, 'Label':'Mass ($M_{\oplus}$)', 'Char':'m'}
# PeriodDict = {'Data': Period, 'LSigma': PeriodSigma, "USigma":PeriodSigma, 'Max':Max, 'Min':Min, 'Label':'Period (d)', 'Char':'p'}
StellarMassDict = {'Data': StellarMass, 'LSigma': StellarMassLSigma, "USigma":StellarMassUSigma, 'Max':Max, 'Min':Min, 'Label':'Stellar Mass (M$_{\odot}$)', 'Char':'stm'}
InsolationDict = {'Data': Insolation, 'LSigma': InsolationLSigma, "USigma":InsolationUSigma, 'Max':Max, 'Min':Min, 'Label':'Pl Insol ($S_{\oplus}$)', 'Char':'insol'}
# 3D fit with planetary radius, mass and insolation
InputDictionaries = [RadiusDict, MassDict, InsolationDict, StellarMassDict]
DataDict = InputData(InputDictionaries)
ndim = len(InputDictionaries)
RunName = 'MdwarfPlanets_4D_MRSStM'
save_path = os.path.join(pwd, 'TestRuns', RunName)
# Harcode the number of degrees per dimension. Read the `fit_relation()` documentation for alternatives
deg_per_dim = [25, 25, 25]
if __name__ == '__main__':
outputs= fit_relation(DataDict, select_deg='cv', save_path=save_path, degree_max=60, cores=20,SymmetricDegreePerDimension=True, NumMonteCarlo=0, NumBootstrap=0)
shutil.copy(os.path.join(pwd, 'sample_fit.py'), os.path.join(save_path, 'sample_fit_{}.py'.format(RunName)))
_ = Plot1DInputDataHistogram(save_path)
if ndim==2:
_ = Plot2DJointDistribution(save_path)
_ = Plot2DWeights(save_path)
|
shbhukREPO_NAMEmrexoPATH_START.@mrexo_extracted@mrexo-master@mrexo@datasets@MdwarfRuns@MdwarfPlanets_4D_MRSStM_20231102@sample_fit_MdwarfPlanets_4D_MRSStM.py@.PATH_END.py
|
{
"filename": "_edgeshape.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/treemap/pathbar/_edgeshape.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class EdgeshapeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="edgeshape", parent_name="treemap.pathbar", **kwargs
):
super(EdgeshapeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
values=kwargs.pop("values", [">", "<", "|", "/", "\\"]),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@treemap@pathbar@_edgeshape.py@.PATH_END.py
|
{
"filename": "_fill.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/volume/caps/x/_fill.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FillValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="fill", parent_name="volume.caps.x", **kwargs):
super(FillValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 1),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@volume@caps@x@_fill.py@.PATH_END.py
|
{
"filename": "test_starlets.py",
"repo_name": "sibirrer/lenstronomy",
"repo_path": "lenstronomy_extracted/lenstronomy-main/test/test_LightModel/test_Profiles/test_starlets.py",
"type": "Python"
}
|
import numpy as np
import numpy.testing as npt
import pytest
import unittest
from lenstronomy.LightModel.light_model import LightModel
from lenstronomy.LightModel.Profiles.gaussian import Gaussian
from lenstronomy.LightModel.Profiles.starlets import SLIT_Starlets
from lenstronomy.Util import util
_force_no_pysap = (
True # if issues on Travis-CI to install pysap, force use python-only functions
)
class TestSLITStarlets(object):
"""Class to test SLIT_Starlets light profile."""
def setup_method(self):
# different versions of Starlet transforms
self.starlets = SLIT_Starlets(
fast_inverse=False, second_gen=False, force_no_pysap=_force_no_pysap
)
self.starlets_fast = SLIT_Starlets(
fast_inverse=True, second_gen=False, force_no_pysap=_force_no_pysap
)
self.starlets_2nd = SLIT_Starlets(
second_gen=True, force_no_pysap=_force_no_pysap
)
# define a test image with gaussian components
self.num_pix = 50
self.n_scales = 3
self.n_pixels = self.num_pix**2
self.x, self.y = util.make_grid(self.num_pix, 1)
# build a non-trivial positive image from sum of gaussians
gaussian = Gaussian()
gaussian1 = gaussian.function(
self.x, self.y, amp=100, sigma=1, center_x=-7, center_y=-7
)
gaussian2 = gaussian.function(
self.x, self.y, amp=500, sigma=3, center_x=-3, center_y=-3
)
gaussian3 = gaussian.function(
self.x, self.y, amp=2000, sigma=5, center_x=+5, center_y=+5
)
self.test_image = util.array2image(gaussian1 + gaussian2 + gaussian3)
self.test_coeffs = np.zeros((self.n_scales, self.num_pix, self.num_pix))
self.test_coeffs[0, :, :] = util.array2image(gaussian1)
self.test_coeffs[1, :, :] = util.array2image(gaussian2)
self.test_coeffs[2, :, :] = util.array2image(gaussian3)
def test_reconstructions_2d(self):
"""
:return:
"""
# PySAP requires call to decomposition once before anything else
self.starlets.decomposition_2d(self.test_image, self.n_scales)
self.starlets_fast.decomposition_2d(self.test_image, self.n_scales)
self.starlets_2nd.decomposition_2d(self.test_image, self.n_scales)
image = self.starlets.function_2d(
coeffs=self.test_coeffs, n_scales=self.n_scales, n_pixels=self.n_pixels
)
image_fast = self.starlets_fast.function_2d(
coeffs=self.test_coeffs, n_scales=self.n_scales, n_pixels=self.n_pixels
)
assert image.shape == (self.num_pix, self.num_pix)
assert image_fast.shape == (self.num_pix, self.num_pix)
image_2nd = self.starlets_2nd.function_2d(
coeffs=self.test_coeffs, n_scales=self.n_scales, n_pixels=self.n_pixels
)
assert image_2nd.shape == (self.num_pix, self.num_pix)
assert np.all(image_2nd >= 0)
def test_decompositions_2d(self):
"""
:return:
"""
# test equality between fast and std transform (which are identical)
coeffs = self.starlets.decomposition_2d(self.test_image, self.n_scales)
coeffs_fast = self.starlets_fast.decomposition_2d(
self.test_image, self.n_scales
)
assert coeffs.shape == (self.n_scales, self.num_pix, self.num_pix)
assert coeffs_fast.shape == (self.n_scales, self.num_pix, self.num_pix)
npt.assert_almost_equal(coeffs, coeffs_fast, decimal=3)
# test non-negativity of second generation starlet transform
coeffs_2nd = self.starlets_2nd.decomposition_2d(self.test_image, self.n_scales)
assert coeffs_2nd.shape == (self.n_scales, self.num_pix, self.num_pix)
def test_function(self):
"""
:return:
"""
# PySAP requires call to decomposition once before anything else
self.starlets.decomposition(self.test_image, self.n_scales)
self.starlets_fast.decomposition(self.test_image, self.n_scales)
self.starlets_2nd.decomposition(self.test_image, self.n_scales)
# test with a 1D input
self.starlets.decomposition(util.image2array(self.test_image), self.n_scales)
coeffs_1d = self.test_coeffs.reshape(self.n_scales * self.num_pix**2)
image_1d = self.starlets.function(
self.x,
self.y,
amp=coeffs_1d,
n_scales=self.n_scales,
n_pixels=self.n_pixels,
)
assert image_1d.shape == (self.num_pix**2,)
image_1d_fast = self.starlets_fast.function(
self.x,
self.y,
amp=coeffs_1d,
n_scales=self.n_scales,
n_pixels=self.n_pixels,
)
assert image_1d_fast.shape == (self.num_pix**2,)
image_1d_2nd = self.starlets_2nd.function(
self.x,
self.y,
amp=coeffs_1d,
n_scales=self.n_scales,
n_pixels=self.n_pixels,
)
assert image_1d_2nd.shape == (self.num_pix**2,)
def test_identity_operations_fast(self):
"""Test the decomposition/reconstruction.
:return:
"""
coeffs = self.starlets_fast.decomposition_2d(self.test_image, self.n_scales)
test_image_recon = self.starlets_fast.function_2d(
coeffs=coeffs, n_scales=self.n_scales, n_pixels=self.n_pixels
)
npt.assert_almost_equal(self.test_image, test_image_recon, decimal=5)
def test_identity_operations_2nd(self):
"""Test the decomposition/reconstruction.
:return:
"""
coeffs = self.starlets_2nd.decomposition_2d(self.test_image, self.n_scales)
test_image_recon = self.starlets_2nd.function_2d(
coeffs=coeffs, n_scales=self.n_scales, n_pixels=self.n_pixels
)
npt.assert_almost_equal(self.test_image, test_image_recon, decimal=5)
def test_delete_cache(self):
amp = self.test_coeffs.reshape(self.n_scales * self.num_pix**2)
kwargs_starlets = dict(
amp=amp,
n_scales=self.n_scales,
n_pixels=self.n_pixels,
center_x=0,
center_y=0,
scale=1,
)
output = self.starlets_fast.function(self.x, self.y, **kwargs_starlets)
assert hasattr(self.starlets_fast.interpol, "_image_interp")
self.starlets_fast.delete_cache()
assert not hasattr(self.starlets_fast.interpol, "_image_interp")
def test_coeffs2pysap(self):
n_scales = 3
num_pix = 20
coeffs = np.ones((n_scales, num_pix, num_pix))
pysap_list = self.starlets._coeffs2pysap(coeffs)
assert len(pysap_list) == n_scales
for i in range(n_scales):
assert pysap_list[i].shape == coeffs[i].shape
def test_pysap2coeffs(self):
n_scales = 3
num_pix = 20
pysap_list = n_scales * [np.ones((num_pix, num_pix))]
coeffs = self.starlets._pysap2coeffs(pysap_list)
assert coeffs.shape == (n_scales, num_pix, num_pix)
for i in range(n_scales):
assert pysap_list[i].shape == coeffs[i].shape
class TestRaise(unittest.TestCase):
def test_raise(self):
with self.assertRaises(ValueError):
# try to set decomposition scale to higher than maximal value
starlets = SLIT_Starlets(force_no_pysap=True)
# define a test image with gaussian components
num_pix = 50
x, y = util.make_grid(num_pix, 1)
# build a non-trivial positive image from sum of gaussians
gaussian = Gaussian()
gaussian1 = gaussian.function(
x, y, amp=100, sigma=1, center_x=-7, center_y=-7
)
gaussian2 = gaussian.function(
x, y, amp=500, sigma=3, center_x=-3, center_y=-3
)
gaussian3 = gaussian.function(
x, y, amp=2000, sigma=5, center_x=+5, center_y=+5
)
test_image = util.array2image(gaussian1 + gaussian2 + gaussian3)
n_scales = 100
_ = starlets.decomposition_2d(test_image, n_scales)
with self.assertRaises(ValueError):
# try to set decomposition scale to negative value
starlets = SLIT_Starlets(force_no_pysap=True)
# define a test image with gaussian components
num_pix = 50
x, y = util.make_grid(num_pix, 1)
# build a non-trivial positive image from sum of gaussians
gaussian = Gaussian()
gaussian1 = gaussian.function(
x, y, amp=100, sigma=1, center_x=-7, center_y=-7
)
gaussian2 = gaussian.function(
x, y, amp=500, sigma=3, center_x=-3, center_y=-3
)
gaussian3 = gaussian.function(
x, y, amp=2000, sigma=5, center_x=+5, center_y=+5
)
test_image = util.array2image(gaussian1 + gaussian2 + gaussian3)
n_scales = -1
_ = starlets.decomposition_2d(test_image, n_scales)
with self.assertRaises(ValueError):
# function_split is not supported/defined for pixel-based profiles
light_model = LightModel(["SLIT_STARLETS"])
num_pix = 20
x, y = util.make_grid(num_pix, 1)
kwargs_list = [
{
"amp": np.ones((3, num_pix, num_pix)),
"n_scales": 3,
"n_pixels": 20**2,
"center_x": 0,
"center_y": 0,
"scale": 1,
}
]
_ = light_model.functions_split(x, y, kwargs_list)
with self.assertRaises(ValueError):
# provided a wrong shape for starlet coefficients
starlet_class = SLIT_Starlets()
num_pix = 20
x, y = util.make_grid(num_pix, 1)
coeffs_wrong = np.ones((3, num_pix**2))
kwargs_list = {
"amp": coeffs_wrong,
"n_scales": 3,
"n_pixels": 20**2,
"center_x": 0,
"center_y": 0,
"scale": 1,
}
_ = starlet_class.function(x, y, **kwargs_list)
image_wrong = np.ones((1, num_pix, num_pix))
_ = starlet_class.decomposition(image_wrong, 3)
with self.assertRaises(ValueError):
# provided a wrong shape for image to be decomposed
starlet_class = SLIT_Starlets()
num_pix = 20
image_wrong = np.ones((2, num_pix, num_pix))
_ = starlet_class.decomposition(image_wrong, 3)
if __name__ == "__main__":
pytest.main()
|
sibirrerREPO_NAMElenstronomyPATH_START.@lenstronomy_extracted@lenstronomy-main@test@test_LightModel@test_Profiles@test_starlets.py@.PATH_END.py
|
{
"filename": "test_meshtools.py",
"repo_name": "bccp/nbodykit",
"repo_path": "nbodykit_extracted/nbodykit-master/nbodykit/tests/test_meshtools.py",
"type": "Python"
}
|
from runtests.mpi import MPITest
from nbodykit.lab import *
from nbodykit import setup_logging
from nbodykit.meshtools import SlabIterator
from pmesh.pm import ParticleMesh, RealField, ComplexField
import pytest
from numpy.testing import assert_array_equal
# debug logging
setup_logging("debug")
@MPITest([1])
def test_wrong_ndim(comm):
numpy.random.seed(42)
pm = ParticleMesh(BoxSize=8.0, Nmesh=[8,8], comm=comm, dtype='f8')
rfield = RealField(pm)
data = numpy.random.random(size=rfield.shape)
rfield[...] = data[:]
# SlabIterator only works for 2D or 3D coordinate meshes
with pytest.raises(NotImplementedError):
for slab in SlabIterator([rfield.x[0]], axis=0, symmetry_axis=None):
pass
@MPITest([1])
def test_wrong_coords_shape(comm):
numpy.random.seed(42)
pm = ParticleMesh(BoxSize=8.0, Nmesh=[8, 8], comm=comm, dtype='f8')
rfield = RealField(pm)
data = numpy.random.random(size=rfield.shape)
rfield[...] = data[:]
# coords arrays should not be squeezed
x = [numpy.squeeze(xx) for xx in rfield.x]
with pytest.raises(ValueError):
for slab in SlabIterator(x, axis=0, symmetry_axis=None):
pass
@MPITest([1, 4])
def test_2d_slab(comm):
numpy.random.seed(42)
pm = ParticleMesh(BoxSize=8.0, Nmesh=[8, 8], comm=comm, dtype='f8')
rfield = RealField(pm)
data = numpy.random.random(size=rfield.shape)
rfield[...] = data[:]
x = rfield.x
for i, slab in enumerate(SlabIterator(x, axis=0, symmetry_axis=None)):
assert slab.__str__() == slab.__repr__()
assert slab.shape == (8,)
assert_array_equal(slab.hermitian_weights, numpy.ones(slab.shape))
assert_array_equal(rfield[slab.index], data[i])
@MPITest([1, 4])
def test_hermitian_weights(comm):
numpy.random.seed(42)
pm = ParticleMesh(BoxSize=8.0, Nmesh=[8, 8, 8], comm=comm, dtype='f8')
cfield = ComplexField(pm)
data = numpy.random.random(size=cfield.shape)
data = data[:] + 1j*data[:]
cfield[...] = data[:]
x = cfield.x
# iterate over symmetry axis
for i, slab in enumerate(SlabIterator(x, axis=2, symmetry_axis=2)):
# nonsingular weights give indices of positive frequencies
nonsig = slab.nonsingular
weights = slab.hermitian_weights
# weights == 2 when iterating frequency is positive
if numpy.float(slab.coords(2)) > 0.:
assert weights > 1
assert numpy.all(nonsig == True)
else:
assert weights == 1.0
assert numpy.all(nonsig == False)
|
bccpREPO_NAMEnbodykitPATH_START.@nbodykit_extracted@nbodykit-master@nbodykit@tests@test_meshtools.py@.PATH_END.py
|
{
"filename": "_family.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattersmith/marker/colorbar/tickfont/_family.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="family",
parent_name="scattersmith.marker.colorbar.tickfont",
**kwargs,
):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
no_blank=kwargs.pop("no_blank", True),
strict=kwargs.pop("strict", True),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scattersmith@marker@colorbar@tickfont@_family.py@.PATH_END.py
|
{
"filename": "CONTRIBUTING.md",
"repo_name": "gwastro/pycbc",
"repo_path": "pycbc_extracted/pycbc-master/CONTRIBUTING.md",
"type": "Markdown"
}
|
# Contributing to PyCBC
This page outlines the recommended procedure for contributing changes to the PyCBC repository. Please familiarise yourself with [GitHub](https://github.com) ensure your account is configured according to these instructions.
## Reporting Issues
When reporting issues, please include as much detail as possible to reproduce the error, including information about your operating system and the version of each (relevant) component of PyCBC.
If possible, please include a brief, self-contained code example that demonstrates the problem.
## Contributing code
All contributions to PyCBC code must be made using the [GitHub Flow](https://guides.github.com/introduction/flow/) model, which must then be reviewed by one of the project maintainers.
If you wish to contribute new code, or changes to existing code, please follow the following development workflow.
### Make a fork (copy) of PyCBC
**You only need to do this once**
1. Go to the [PyCBC repository home page](https://github.com/gwastro/pycbc)
2. Click on the *Fork* button (top-right-hand corner)
3. Select the namespace that you want to create the fork in, this will usually be your personal namespace
### Clone your fork
```bash
git clone https://github.com/<username>/pycbc.git
```
### Updating your fork
If you already have a fork of PyCBC, and are starting work on a new project you can link your clone to the main (`gwastro`) repository and pull in changes that have been merged since the time you created your fork, or last updated:
1. Link your fork to the main repository:
```bash
cd pycbc
git remote add gwastro https://github.com/gwastro/pycbc.git
```
2. Fetch new changes from the `gwastro` repo
```bash
git fetch gwastro
```
### Creating a new feature branch
All changes should be developed on a feature branch, in order to keep them separate from other work, simplifying review and merge once the work is done.
To create a new feature branch:
```bash
git fetch gwastro
git checkout -b my-new-feature gwastro/master
```
### Hack away
1. Develop the changes you would like to introduce, using `git commit` to finalise a specific change.
Ideally commit small units of change often, rather than creating one large commit at the end, this will simplify review and make modifying any changes easier.
Commit messages should be clear, identifying which code was changed, and why.
Common practice is to use a short summary line (<50 characters), followed by a blank line, then more information in longer lines.
2. Push your changes to the remote copy of your fork on GitHub
```bash
git push origin my-new-feature
```
**Note:** For the first `push` of any new feature branch, you will likely have to use the `-u/--set-upstream` option to `push` to create a link between your new branch and the `origin` remote:
```bash
git push --set-upstream origin my-new-feature
```
### Open a Pull Request
When you feel that your work is finished, you should create a Pull Request to propose that your changes be merged into the main (`gwastro`) repository.
After you have pushed your new feature branch to `origin`, you should find a new button on the [PyCBC repository home page](https://github.com/gwastro/pycbc/) inviting you to create a Pull Request out of your newly pushed branch.
You should click the button, and proceed to fill in the title and description boxes on the PR page.
Once the request has been opened, one of the maintainers will assign someone to review the change.
During this process you might have some suggestions about code layout (mostly automated). Please see here for guidance on that:
https://github.com/gwastro/pycbc/wiki/How-to-satisfy-CodeClimate
## More Information
More information regarding the usage of GitHub can be found in the [GitHub Guides](https://guides.github.com/).
|
gwastroREPO_NAMEpycbcPATH_START.@pycbc_extracted@pycbc-master@CONTRIBUTING.md@.PATH_END.py
|
{
"filename": "sophia.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/Pygments/py3/pygments/lexers/sophia.py",
"type": "Python"
}
|
"""
pygments.lexers.sophia
~~~~~~~~~~~~~~~~~~~~~~
Lexer for Sophia.
Derived from pygments/lexers/reason.py.
:copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, default, words
from pygments.token import Comment, Keyword, Name, Number, Operator, \
Punctuation, String, Text
__all__ = ['SophiaLexer']
class SophiaLexer(RegexLexer):
"""
A Sophia lexer.
"""
name = 'Sophia'
aliases = ['sophia']
filenames = ['*.aes']
mimetypes = []
url = 'https://docs.aeternity.com/aesophia'
version_added = '2.11'
keywords = (
'contract', 'include', 'let', 'switch', 'type', 'record', 'datatype',
'if', 'elif', 'else', 'function', 'stateful', 'payable', 'public',
'entrypoint', 'private', 'indexed', 'namespace', 'interface', 'main',
'using', 'as', 'for', 'hiding',
)
builtins = ('state', 'put', 'abort', 'require')
word_operators = ('mod', 'band', 'bor', 'bxor', 'bnot')
primitive_types = ('int', 'address', 'bool', 'bits', 'bytes', 'string',
'list', 'option', 'char', 'unit', 'map', 'event',
'hash', 'signature', 'oracle', 'oracle_query')
tokens = {
'escape-sequence': [
(r'\\[\\"\'ntbr]', String.Escape),
(r'\\[0-9]{3}', String.Escape),
(r'\\x[0-9a-fA-F]{2}', String.Escape),
],
'root': [
(r'\s+', Text.Whitespace),
(r'(true|false)\b', Keyword.Constant),
(r'\b([A-Z][\w\']*)(?=\s*\.)', Name.Class, 'dotted'),
(r'\b([A-Z][\w\']*)', Name.Function),
(r'//.*?\n', Comment.Single),
(r'\/\*(?!/)', Comment.Multiline, 'comment'),
(r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex),
(r'#[\da-fA-F][\da-fA-F_]*', Name.Label),
(r'\d[\d_]*', Number.Integer),
(words(keywords, suffix=r'\b'), Keyword),
(words(builtins, suffix=r'\b'), Name.Builtin),
(words(word_operators, prefix=r'\b', suffix=r'\b'), Operator.Word),
(words(primitive_types, prefix=r'\b', suffix=r'\b'), Keyword.Type),
(r'[=!<>+\\*/:&|?~@^-]', Operator.Word),
(r'[.;:{}(),\[\]]', Punctuation),
(r"(ak_|ok_|oq_|ct_)[\w']*", Name.Label),
(r"[^\W\d][\w']*", Name),
(r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'",
String.Char),
(r"'.'", String.Char),
(r"'[a-z][\w]*", Name.Variable),
(r'"', String.Double, 'string')
],
'comment': [
(r'[^/*]+', Comment.Multiline),
(r'\/\*', Comment.Multiline, '#push'),
(r'\*\/', Comment.Multiline, '#pop'),
(r'\*', Comment.Multiline),
],
'string': [
(r'[^\\"]+', String.Double),
include('escape-sequence'),
(r'\\\n', String.Double),
(r'"', String.Double, '#pop'),
],
'dotted': [
(r'\s+', Text),
(r'\.', Punctuation),
(r'[A-Z][\w\']*(?=\s*\.)', Name.Function),
(r'[A-Z][\w\']*', Name.Function, '#pop'),
(r'[a-z_][\w\']*', Name, '#pop'),
default('#pop'),
],
}
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@Pygments@py3@pygments@lexers@sophia.py@.PATH_END.py
|
{
"filename": "_minexponent.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scatter/marker/colorbar/_minexponent.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class MinexponentValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="minexponent", parent_name="scatter.marker.colorbar", **kwargs
):
super(MinexponentValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scatter@marker@colorbar@_minexponent.py@.PATH_END.py
|
{
"filename": "YaleCAMGalaxyCatalog.py",
"repo_name": "LSSTDESC/descqa",
"repo_path": "descqa_extracted/descqa-master/v1/GCRCatalogs/YaleCAMGalaxyCatalog.py",
"type": "Python"
}
|
# Yale CAM galaxy catalogue class
# Duncan Campbell
# Yale University
# February, 2016
# load modules
import os
import re
import numpy as np
import h5py
from astropy.cosmology import FlatLambdaCDM
import astropy.units as u
from .GalaxyCatalogInterface import GalaxyCatalog
__all__ = ['YaleCAMGalaxyCatalog']
class YaleCAMGalaxyCatalog(GalaxyCatalog):
"""
Yale CAM galaxy catalog class.
Notes
-----
The Yale CAM galaxy mocks store all physical properties internally in units where h=1.
"""
def __init__(self, **kwargs):
"""
Initialize Yale CAM galaxy catalog class.
Parameters
----------
fn : string
filename of mock catalog
"""
fn = kwargs.get('fn')
# set file type and location
self.type_ext = 'hdf5'
# set fixed properties
self.lightcone = False
self.cosmology = FlatLambdaCDM(H0=70.2, Om0 = 0.275)
self.simulation = 'Massive Black'
self.box_size = 100.0 / self.cosmology.h
self.volume = self.box_size**3.0
self.SDSS_kcorrection_z = 0
# translates between desc keywords to those used in the stored mock
# note: all appropriate quantities are in h=1 units.
self.quantities = { 'stellar_mass': self._stored_property_wrapper('stellar_mass'),
'mass': self._stored_property_wrapper('halo_mvir'),
'ssfr': self._stored_property_wrapper('SSFR'),
'halo_id': self._stored_property_wrapper('halo_id'),
'positionX': self._stored_property_wrapper('x'),
'positionY': self._stored_property_wrapper('y'),
'positionZ': self._stored_property_wrapper('z'),
'velocityX': self._stored_property_wrapper('vx'),
'velocityY': self._stored_property_wrapper('vy'),
'velocityZ': self._stored_property_wrapper('vz'),
'SDSS_u:rest:': self._stored_property_wrapper('absmag_u'),
'SDSS_g:rest:': self._stored_property_wrapper('absmag_g'),
'SDSS_r:rest:': self._stored_property_wrapper('absmag_r'),
'SDSS_i:rest:': self._stored_property_wrapper('absmag_i'),
'SDSS_z:rest:': self._stored_property_wrapper('absmag_z'),
'SDSS_u:observed:': self._stored_property_wrapper('mag_u'),
'SDSS_g:observed:': self._stored_property_wrapper('mag_g'),
'SDSS_r:observed:': self._stored_property_wrapper('mag_r'),
'SDSS_i:observed:': self._stored_property_wrapper('mag_i'),
'SDSS_z:observed:': self._stored_property_wrapper('mag_z'),
'g-r': self._stored_property_wrapper('g-r'),
'parent_halo_id': self._stored_property_wrapper('halo_upid'),
}
return GalaxyCatalog.__init__(self, fn)
def load(self, fn='yale_cam_age_matching_LiWhite_2009_z0.0.hdf5'):
"""
load mock galaxy catalog
Parameters
----------
fn : string
filename of mock catalog
"""
#extract mock parameters from filename
nums = re.findall(r"[-+]?\d*\.\d+|\d+", fn)
self.redshift = float(nums[-2])
f = h5py.File(fn, 'r')
toplevel=f.keys()[0]
self._data = f.get(toplevel)
#convert quantities into physical units given the cosmology
#see 'notes' section of the Yale CAM class.
#see arXiv:1308.4150
#cast hdf5 data as numpy array to allow modification of data without modifying file contents
self.Xdata=np.array(self._data)
self.Xdata['stellar_mass'] = self._data['stellar_mass']/(self.cosmology.h)**2
self.Xdata['x'] = self._data['x']/(self.cosmology.h)
self.Xdata['y'] = self._data['y']/(self.cosmology.h)
self.Xdata['z'] = self._data['z']/(self.cosmology.h)
self.Xdata['halo_mvir'] = self._data['halo_mvir']/(self.cosmology.h)
self.Xdata['absmag_u'] = self._data['absmag_u'] + 5.0*np.log10(self.cosmology.h)
self.Xdata['absmag_g'] = self._data['absmag_g'] + 5.0*np.log10(self.cosmology.h)
self.Xdata['absmag_r'] = self._data['absmag_r'] + 5.0*np.log10(self.cosmology.h)
self.Xdata['absmag_i'] = self._data['absmag_i'] + 5.0*np.log10(self.cosmology.h)
self.Xdata['absmag_z'] = self._data['absmag_z'] + 5.0*np.log10(self.cosmology.h)
#I think this is the correct thing to do with apparent magnitudes
self.Xdata['mag_u'] = self._data['mag_u'] - 5.0*np.log10(self.cosmology.h)
self.Xdata['mag_g'] = self._data['mag_g'] - 5.0*np.log10(self.cosmology.h)
self.Xdata['mag_r'] = self._data['mag_r'] - 5.0*np.log10(self.cosmology.h)
self.Xdata['mag_i'] = self._data['mag_i'] - 5.0*np.log10(self.cosmology.h)
self.Xdata['mag_z'] = self._data['mag_z'] - 5.0*np.log10(self.cosmology.h)
#how many galaxies are in the catalog?
self.Ngals = len(self._data)
return self
def _construct_mask(self, filters):
"""
Construct a mask array for use in filtering the catalog.
Parameters
----------
filters: dict
dictionary of filter constraints
Returns
-------
mask : numpy.array
numpy array boolean mask
"""
#check that filters is of the correct type
if type(filters) is not dict:
msg = ('filters must be given as a dictionary type.')
raise TypeError(msg)
#initialize filter
mask = np.ones((self.Ngals), dtype=bool)
#generate boolean mask
for filter_name in filters.keys():
#place code here to create filter(s)
pass
return mask
def _get_stored_property(self, quantity, filters):
"""
Return the requested property of galaxies in the mock catalog.
Parameters
----------
quantity : string
key into mock galaxy catalogue of galaxy property
filters : dict
dictionary of filter constraints
Returns
-------
property : numpy.array
numpy array of requested property from the catalogue
"""
#build filter
filter_mask = self._construct_mask(filters)
#return requested data as an array
return self.Xdata[quantity][np.where(filter_mask)]
def _stored_property_wrapper(self, name):
"""
private function used to translate desc keywords into stored keywords in the mock
Parameters
----------
name : string
key into stored mock catalogue
"""
return (lambda quantity, filter : self._get_stored_property(name, filter))
|
LSSTDESCREPO_NAMEdescqaPATH_START.@descqa_extracted@descqa-master@v1@GCRCatalogs@YaleCAMGalaxyCatalog.py@.PATH_END.py
|
{
"filename": "_tickformatstopdefaults.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/bar/marker/colorbar/_tickformatstopdefaults.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickformatstopdefaultsValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self,
plotly_name="tickformatstopdefaults",
parent_name="bar.marker.colorbar",
**kwargs,
):
super(TickformatstopdefaultsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Tickformatstop"),
data_docs=kwargs.pop(
"data_docs",
"""
""",
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@bar@marker@colorbar@_tickformatstopdefaults.py@.PATH_END.py
|
{
"filename": "Greybody_emulator.ipynb",
"repo_name": "H-E-L-P/XID_plus",
"repo_path": "XID_plus_extracted/XID_plus-master/docs/build/doctrees/nbsphinx/notebooks/examples/SED_emulator/Greybody_emulator.ipynb",
"type": "Jupyter Notebook"
}
|
```python
import fitIR
import fitIR.models as models
import fitIR.analyse as analyse
from astropy.cosmology import WMAP9 as cosmo
import jax
import numpy as np
import pylab as plt
import astropy.units as u
import scipy.integrate as integrate
%matplotlib inline
import jax.numpy as jnp
from jax import grad, jit, vmap, value_and_grad
from jax import random
# Generate key which is used to generate random numbers
key = random.PRNGKey(1)
```
/Users/pdh21/anaconda3/envs/xidplus/lib/python3.6/site-packages/jax/lib/xla_bridge.py:130: UserWarning: No GPU/TPU found, falling back to CPU.
warnings.warn('No GPU/TPU found, falling back to CPU.')
```python
from jax.experimental import stax
```
## Make training set by randomly sampling parameter space
```python
totlir = np.arange(8,15,0.6)
redshift = np.arange(0.01,5.01,0.5)
temperature = np.arange(10,70,10)
```
```python
def generate_samples(size=100):
#get parameter values from uniform distribution
LIR=np.random.uniform(low=8,high=15,size=size)
redshift=np.random.uniform(low=0.01,high=6,size=size)
temperature=np.random.uniform(low=10,high=70,size=size)
#get standard deviation and mean for uniform dist
LIR_sd=(15-8)/np.sqrt(12)
LIR_mean=0.5*(15+8)
red_sd=(6-0.01)/np.sqrt(12)
red_mean=0.5*(0.01+6)
temp_sd=(70-10)/np.sqrt(12)
temp_mean=0.5*(70+10)
return np.vstack((LIR,redshift,temperature)).T,np.vstack(((LIR-LIR_mean)/LIR_sd,(redshift-red_mean)/red_sd,(temperature-temp_mean)/temp_sd)).T
```
```python
samp,samp_stand=generate_samples(1000)
```
```python
samp.shape
```
(1000, 3)
```python
import xidplus
from xidplus import filters
filter_=filters.FilterFile(file=xidplus.__path__[0]+'/../test_files/filters.res')
```
/Users/pdh21/anaconda3/envs/xidplus/lib/python3.6/site-packages/dask/config.py:168: YAMLLoadWarning: calling yaml.load() without Loader=... is deprecated, as the default Loader is unsafe. Please read https://msg.pyyaml.org/load for full details.
data = yaml.load(f.read()) or {}
WARNING: AstropyDeprecationWarning: block_reduce was moved to the astropy.nddata.blocks module. Please update your import statement. [astropy.nddata.utils]
WARNING: Logging before flag parsing goes to stderr.
W0104 11:48:46.144933 4605595072 logger.py:204] AstropyDeprecationWarning: block_reduce was moved to the astropy.nddata.blocks module. Please update your import statement.
```python
SPIRE_250=filter_.filters[215]
SPIRE_350=filter_.filters[216]
SPIRE_500=filter_.filters[217]
MIPS_24=filter_.filters[201]
PACS_100=filter_.filters[250]
PACS_160=filter_.filters[251]
bands=[SPIRE_250,SPIRE_350,SPIRE_500,MIPS_24,PACS_100,PACS_160]
eff_lam=[250.0,350.0,500.0,24.0, 100.0,160.0]
```
```python
from scipy.interpolate import interp1d
def get_fluxes(samples):
measured=np.empty_like(samples)
val = np.linspace(np.log10(3E8/8E-6),np.log10(3E8/1E-3),1000)
val = 10**val
for i,s in enumerate(samples):
z=s[1]
prior = {}
prior['z'] = s[1]
prior['log10LIR'] = s[0]
prior['T'] = s[2]
prior['emissivity'] = 1.5
source = models.greybody(prior)
nu,lnu = source.generate_greybody(val,z)
wave = 3E8/nu*1E6
sed=interp1d(wave,lnu)
dist = cosmo.luminosity_distance(z).to(u.cm).value
for b in range(0,3):
measured[i,b]=(1.0+z)*filters.fnu_filt(sed(bands[b].wavelength/1E4),
3E8/(bands[b].wavelength/1E10),
bands[b].transmission,
3E8/(eff_lam[b]*1E-6),
sed(eff_lam[b]))/(4*np.pi*dist**2)
return measured/10**(-26)
```
```python
measured=get_fluxes(samp)
```
## Use stax to create neural net
```python
from jax.experimental import stax
from jax.experimental.stax import (BatchNorm, Conv, Dense, Flatten,
Relu, LogSoftmax,LeakyRelu)
import time
```
```python
batch_size=100
num_inputs=3
num_bands=3
#stax provides an intialising function and a function for doing a forward pass
init_fun,sed_emu = stax.serial(Dense(num_inputs),Relu,
Dense(20),Relu,
Dense(num_bands))
```
```python
#function to get initial parameters of neural net
_, params = init_fun(key, (batch_size, num_inputs))
```
```python
import torch
from torch.utils.data import Dataset, DataLoader
```
```python
```
```python
## class for sed using the torch dataset class
class sed_data(Dataset):
def __init__(self,params,fluxes):
self.X=params
self.y=fluxes
def __len__(self):
return len(self.X)
def __getitem__(self,idx):
return self.X[idx],self.y[idx]
```
```python
## generate random SED samples
samp_train,samp_stand_train=generate_samples(2000)
## Use Steve's code and xidplus filters to get fluxes
measured_train=get_fluxes(samp_train)
## use data in SED dataclass
ds = sed_data(samp_stand_train,measured_train)
## use torch DataLoader
train_loader = DataLoader(ds, batch_size=batch_size,)
## do same but for test set
samp_test,samp_stand_test=generate_samples(500)
measured_test=get_fluxes(samp_test)
ds = sed_data(samp_stand_test,measured_test)
test_loader = DataLoader(ds, batch_size=batch_size)
```
the update function computes teh gradient of the loss with respect to the parameters for a batch. We use predefined optimisers and choose Adan to be our optmisier
```python
from jax.experimental import optimizers
@jit
def update(params, x, y, opt_state):
""" Compute the gradient for a batch and update the parameters """
value, grads = value_and_grad(loss)(params, x, y)
opt_state = opt_update(0, grads, opt_state)
return get_params(opt_state), opt_state, value
```
```python
def accuracy(params, data_loader):
""" Compute the accuracy for the SED emulator"""
acc_total = 0
for batch_idx, (data, target) in enumerate(data_loader):
x=jnp.asarray(data)
predicted_flux=sed_emu(params, x)
acc_total += jnp.sum((predicted_flux - jnp.asarray(target))**2)
return acc_total/len(data_loader.dataset)
def loss(params, images, targets):
preds = sed_emu(params, jnp.asarray(images))
return jnp.sqrt(jnp.mean((jnp.asarray(targets) - preds)**2))
```
```python
def run_greybody_training_loop(num_epochs, opt_state):
""" Implements a learning loop over epochs. """
# Initialize placeholder for loggin
log_acc_train, log_acc_test, train_loss = [], [], []
# Get the initial set of parameters
params = get_params(opt_state)
# Get initial accuracy after random init
train_acc = accuracy(params, train_loader)
test_acc = accuracy(params, test_loader)
log_acc_train.append(train_acc)
log_acc_test.append(test_acc)
# Loop over the training epochs
for epoch in range(num_epochs):
start_time = time.time()
for batch_idx, (data, target) in enumerate(train_loader):
x=jnp.asarray(data)
y=jnp.asarray(target)
params, opt_state, loss = update(params, x, y, opt_state)
train_loss.append(loss)
epoch_time = time.time() - start_time
print(len(params))
train_acc = accuracy(params, train_loader)
test_acc = accuracy(params, test_loader)
log_acc_train.append(train_acc)
log_acc_test.append(test_acc)
print("Epoch {} | T: {:0.2f} | Train A: {:0.3f} | Test A: {:0.3f}".format(epoch+1, epoch_time,
train_acc, test_acc,))
return train_loss, log_acc_train, log_acc_test
```
```python
step_size = 1e-5
opt_init, opt_update, get_params = optimizers.adam(step_size)
opt_state = opt_init(params)
num_epochs = 20
train_loss, train_log, test_log = run_greybody_training_loop(num_epochs,
opt_state)
```
5
Epoch 1 | T: 0.08 | Train A: 1612982910976.000 | Test A: 202752896.000
5
Epoch 2 | T: 0.08 | Train A: 1612982910976.000 | Test A: 202753440.000
5
Epoch 3 | T: 0.07 | Train A: 1612982910976.000 | Test A: 202753904.000
5
Epoch 4 | T: 0.08 | Train A: 1612982910976.000 | Test A: 202754352.000
5
Epoch 5 | T: 0.08 | Train A: 1612982910976.000 | Test A: 202754864.000
5
Epoch 6 | T: 0.07 | Train A: 1612982910976.000 | Test A: 202755440.000
5
Epoch 7 | T: 0.08 | Train A: 1612982910976.000 | Test A: 202756128.000
5
Epoch 8 | T: 0.07 | Train A: 1612983042048.000 | Test A: 202756848.000
5
Epoch 9 | T: 0.06 | Train A: 1612983042048.000 | Test A: 202757568.000
5
Epoch 10 | T: 0.07 | Train A: 1612983042048.000 | Test A: 202758224.000
5
Epoch 11 | T: 0.07 | Train A: 1612983042048.000 | Test A: 202758912.000
5
Epoch 12 | T: 0.06 | Train A: 1612983042048.000 | Test A: 202759600.000
5
Epoch 13 | T: 0.07 | Train A: 1612983042048.000 | Test A: 202760272.000
5
Epoch 14 | T: 0.08 | Train A: 1612983042048.000 | Test A: 202760960.000
5
Epoch 15 | T: 0.07 | Train A: 1612983042048.000 | Test A: 202761712.000
5
Epoch 16 | T: 0.10 | Train A: 1612983304192.000 | Test A: 202762480.000
5
Epoch 17 | T: 0.06 | Train A: 1612983304192.000 | Test A: 202763232.000
5
Epoch 18 | T: 0.06 | Train A: 1612983304192.000 | Test A: 202764016.000
5
Epoch 19 | T: 0.06 | Train A: 1612983304192.000 | Test A: 202764816.000
5
Epoch 20 | T: 0.09 | Train A: 1612983304192.000 | Test A: 202765600.000
```python
plt.plot(train_log)
```
[<matplotlib.lines.Line2D at 0x7f8149a10978>]

## try simplifying
```python
samp_train,samp_stand_train=generate_samples(10000)
measured_train=get_fluxes(samp_train)
ds = sed_data(samp_stand_train,measured_train)
train_loader = DataLoader(ds, batch_size=batch_size,)
samp_test,samp_stand_test=generate_samples(500)
measured_test=get_fluxes(samp_test)
num_inputs=3
num_bands=1
init_fun,sed_emu = stax.serial(Dense(num_inputs),Relu,
Dense(num_bands))
# Initialise the network with four inputs
out_shape, net_params = init_fun(key,(-1, 3))
ytrain=jnp.asarray(measured_train[:,0])
Xtrain=jnp.asarray(samp_stand_train)
ytest=jnp.asarray(measured_test[:,0])
Xtest=jnp.asarray(samp_stand_test)
def loss(params):
predictions = sed_emu(params, Xtrain)
return jnp.mean((ytrain - predictions)**2)
def loss_test(params):
predictions = sed_emu(params, Xtest)
return jnp.mean((ytest - predictions)**2)
opt_init, opt_update, get_params = optimizers.adam(0.01)
@jit
def step(i, opt_state):
# Parameters for the optimisation algorithm
params = get_params(opt_state)
# Gradient of the loss function
g = grad(loss)(params)
# Update step
return opt_update(i, g, opt_state),loss(params),loss_test(params)
# Optimiser initialisation
opt_state = opt_init(net_params)
all_loss=[]
all_loss_test=[]
for i in range(50000):
# Train step
opt_state,loss_iter,loss_test_iter = step(i, opt_state)
all_loss.append(loss_iter)
all_loss_test.append(loss_test_iter)
# Final parameters after training
net_params = get_params(opt_state)
```
---------------------------------------------------------------------------
KeyboardInterrupt Traceback (most recent call last)
<ipython-input-568-393ad2f19ff4> in <module>
47 for i in range(50000):
48 # Train step
---> 49 opt_state,loss_iter,loss_test_iter = step(i, opt_state)
50 all_loss.append(loss_iter)
51 all_loss_test.append(loss_test_iter)
~/anaconda3/envs/xidplus/lib/python3.6/site-packages/jax/api.py in f_jitted(*args, **kwargs)
215 backend=backend,
216 name=flat_fun.__name__,
--> 217 donated_invars=donated_invars)
218 return tree_unflatten(out_tree(), out)
219
~/anaconda3/envs/xidplus/lib/python3.6/site-packages/jax/core.py in bind(self, fun, *args, **params)
1160
1161 def bind(self, fun, *args, **params):
-> 1162 return call_bind(self, fun, *args, **params)
1163
1164 def process(self, trace, fun, tracers, params):
~/anaconda3/envs/xidplus/lib/python3.6/site-packages/jax/core.py in call_bind(primitive, fun, *args, **params)
1151 tracers = map(top_trace.full_raise, args)
1152 with maybe_new_sublevel(top_trace):
-> 1153 outs = primitive.process(top_trace, fun, tracers, params)
1154 return map(full_lower, apply_todos(env_trace_todo(), outs))
1155
~/anaconda3/envs/xidplus/lib/python3.6/site-packages/jax/core.py in process(self, trace, fun, tracers, params)
1163
1164 def process(self, trace, fun, tracers, params):
-> 1165 return trace.process_call(self, fun, tracers, params)
1166
1167 def post_process(self, trace, out_tracers, params):
~/anaconda3/envs/xidplus/lib/python3.6/site-packages/jax/core.py in process_call(self, primitive, f, tracers, params)
573
574 def process_call(self, primitive, f, tracers, params):
--> 575 return primitive.impl(f, *tracers, **params)
576 process_map = process_call
577
~/anaconda3/envs/xidplus/lib/python3.6/site-packages/jax/interpreters/xla.py in _xla_call_impl(fun, device, backend, name, donated_invars, *args)
557 *unsafe_map(arg_spec, args))
558 try:
--> 559 return compiled_fun(*args)
560 except FloatingPointError:
561 assert FLAGS.jax_debug_nans # compiled_fun can only raise in this case
~/anaconda3/envs/xidplus/lib/python3.6/site-packages/jax/interpreters/xla.py in _execute_compiled(compiled, avals, handlers, *args)
799 device, = compiled.local_devices()
800 input_bufs = list(it.chain.from_iterable(device_put(x, device) for x in args if x is not token))
--> 801 out_bufs = compiled.execute(input_bufs)
802 if FLAGS.jax_debug_nans: check_nans(xla_call_p, out_bufs)
803 return [handler(*bs) for handler, bs in zip(handlers, _partition_outputs(avals, out_bufs))]
KeyboardInterrupt:
```python
sed_emu(net_params,jnp.asarray(samp_stand_test))
```
DeviceArray([[ 1.15468442e+00],
[-9.58508172e-04],
[ 2.54261899e+00],
[ 3.60584527e-01],
[-9.58508172e-04],
[ 2.07726550e+00],
[ 9.92553949e-01],
[-9.58508172e-04],
[ 2.42397380e+00],
[ 2.25675121e-01],
[-9.58508172e-04],
[ 1.56079197e+00],
[-9.58508172e-04],
[ 2.42112923e+00],
[-4.59634215e-02],
[-2.07831711e-02],
[-3.63815483e-03],
[-4.87463474e-02],
[ 1.26780784e-02],
[ 2.65504003e+00],
[ 9.33449447e-01],
[ 3.37957054e-01],
[-9.58508172e-04],
[-9.58508172e-04],
[-9.58508172e-04],
[-9.58508172e-04],
[ 7.08476484e-01],
[-9.58508172e-04],
[ 6.70050085e-01],
[-9.58508172e-04],
[-9.58508172e-04],
[-9.58508172e-04],
[-9.58508172e-04],
[-9.58508172e-04],
[ 2.29025197e+00],
[ 2.45833492e+00],
[-9.58508172e-04],
[ 2.53023952e-01],
[ 3.36646605e+00],
[ 1.12742388e+00],
[-9.58508172e-04],
[-9.58508172e-04],
[-9.58508172e-04],
[-9.58508172e-04],
[-9.58508172e-04],
[ 7.37486780e-01],
[ 1.14556301e+00],
[-9.58508172e-04],
[ 2.45107889e+00],
[-8.30899831e-03],
[-9.58508172e-04],
[-9.58508172e-04],
[-9.58508172e-04],
[-9.58508172e-04],
[-9.58508172e-04],
[-9.58508172e-04],
[-9.58508172e-04],
[-9.58508172e-04],
[-9.58508172e-04],
[ 1.88620603e+00],
[-9.58508172e-04],
[-8.54014605e-02],
[ 1.26462663e-03],
[ 7.60492921e-01],
[-9.58508172e-04],
[-9.58508172e-04],
[ 2.11973637e-02],
[-9.58508172e-04],
[ 5.43062389e-01],
[-9.58508172e-04],
[-3.95501554e-02],
[-9.58508172e-04],
[ 5.96175730e-01],
[ 1.57901466e+00],
[ 3.13268751e-02],
[-9.58508172e-04],
[-4.78294715e-02],
[-9.58508172e-04],
[ 6.70954704e-01],
[ 2.45240355e+00],
[ 1.23606078e-01],
[ 3.76608753e+00],
[-9.58508172e-04],
[ 9.04388279e-02],
[-9.58508172e-04],
[ 9.68263298e-02],
[ 1.20879924e+00],
[-9.58508172e-04],
[ 3.99760783e-01],
[-9.58508172e-04],
[ 1.06341207e+00],
[-9.58508172e-04],
[ 1.59416497e+00],
[-9.58508172e-04],
[-9.58508172e-04],
[-9.58508172e-04],
[-9.58508172e-04],
[-9.58508172e-04],
[-9.58508172e-04],
[ 1.98000729e+00],
[-9.58508172e-04],
[-9.58508172e-04],
[-9.58508172e-04],
[ 3.10756564e-01],
[ 1.43007517e+00],
[ 3.09739560e-01],
[-9.58508172e-04],
[-9.58508172e-04],
[ 2.00219893e+00],
[ 4.44864899e-01],
[-9.58508172e-04],
[-2.23131012e-02],
[-9.58508172e-04],
[ 5.54561436e-01],
[ 3.48940992e+00],
[-9.58508172e-04],
[ 8.38076234e-01],
[ 2.51870096e-01],
[ 1.22517738e-02],
[ 1.65864229e+00],
[-9.58508172e-04],
[ 3.26522887e-01],
[-9.58508172e-04],
[ 2.00894713e+00],
[ 1.69056046e+00],
[ 2.11957169e+00],
[ 4.13133651e-02],
[ 2.30553699e+00],
[ 4.39177006e-01],
[ 9.29203212e-01],
[-7.32375905e-02],
[ 1.95634222e+00],
[ 9.30451453e-02],
[ 2.66024208e+00],
[ 1.20262635e+00],
[ 6.85943246e-01],
[-9.58508172e-04],
[ 2.04046631e+00],
[-9.58508172e-04],
[-9.58508172e-04],
[-9.58508172e-04],
[ 4.22765702e-01],
[-9.58508172e-04],
[ 1.16487467e+00],
[-9.58508172e-04],
[ 1.86354935e+00],
[-9.58508172e-04],
[-9.58508172e-04],
[ 2.06652239e-01],
[ 2.36769032e+00],
[-3.49428840e-02],
[ 1.80661845e+00],
[ 2.92194390e+00],
[-9.58508172e-04],
[-9.58508172e-04],
[ 8.83281305e-02],
[ 1.00651944e+00],
[ 1.32672429e+00],
[-9.58508172e-04],
[-9.58508172e-04],
[ 1.23403108e+00],
[-9.58508172e-04],
[ 2.55370712e+00],
[ 1.57427931e+00],
[-9.58508172e-04],
[ 2.74964261e+00],
[-9.58508172e-04],
[ 2.01618028e+00],
[ 2.12437820e+00],
[-9.58508172e-04],
[ 1.35899627e+00],
[-9.58508172e-04],
[ 8.47430825e-02],
[ 1.93455076e+00],
[ 4.79390144e-01],
[ 4.26982582e-01],
[-9.58508172e-04],
[ 1.36406803e+00],
[ 1.03376615e+00],
[ 2.17851710e+00],
[-4.90818778e-03],
[-7.52505660e-02],
[ 9.08556998e-01],
[ 2.23048449e+00],
[ 1.88323116e+00],
[-9.58508172e-04],
[-9.58508172e-04],
[-7.51304105e-02],
[ 1.63734949e+00],
[-9.58508172e-04],
[ 9.05178666e-01],
[ 1.92167208e-01],
[-9.58508172e-04],
[-9.58508172e-04],
[-9.58508172e-04],
[ 6.48726404e-01],
[-9.58508172e-04],
[-9.58508172e-04],
[ 4.73501742e-01],
[ 2.48021126e+00],
[ 5.46120346e-01],
[ 2.39680481e+00],
[-9.58508172e-04],
[-9.58508172e-04],
[ 3.52483541e-01],
[ 9.23355043e-01],
[-4.91245277e-02],
[-9.58508172e-04],
[-9.58508172e-04],
[ 4.40184474e-02],
[-9.58508172e-04],
[ 1.92564821e+00],
[ 2.45729908e-02],
[ 8.60888958e-01],
[-9.58508172e-04],
[ 1.10020638e+00],
[ 2.01163435e+00],
[-9.58508172e-04],
[-9.58508172e-04],
[ 6.40236139e-02],
[-9.58508172e-04],
[ 2.89744663e+00],
[ 1.31634557e+00],
[ 3.54226613e+00],
[-9.58508172e-04],
[ 5.94921231e-01],
[-9.58508172e-04],
[-9.58508172e-04],
[-2.43302714e-02],
[ 4.88250367e-02],
[-9.58508172e-04],
[-9.58508172e-04],
[-5.64744212e-02],
[ 1.38399935e+00],
[-9.58508172e-04],
[-9.58508172e-04],
[-9.58508172e-04],
[ 2.02204275e+00],
[-9.58508172e-04],
[ 3.76921058e-01],
[-9.58508172e-04],
[ 2.43769269e-02],
[-9.58508172e-04],
[ 2.69250751e+00],
[-9.58508172e-04],
[ 2.32621837e+00],
[-9.58508172e-04],
[ 2.58438587e+00],
[-9.58508172e-04],
[-9.58508172e-04],
[ 1.01696908e+00],
[ 1.73390257e+00],
[-9.58508172e-04],
[-9.58508172e-04],
[ 1.99509430e+00],
[ 2.67987669e-01],
[-9.58508172e-04],
[-9.58508172e-04],
[-9.58508172e-04],
[ 1.55209315e+00],
[ 2.27504349e+00],
[ 4.64258194e-01],
[ 1.10433686e+00],
[-9.58508172e-04],
[-9.58508172e-04],
[ 6.90037131e-01],
[ 1.00987956e-01],
[ 2.27970600e+00],
[-9.58508172e-04],
[-9.58508172e-04],
[ 1.37808812e+00],
[-9.58508172e-04],
[-9.58508172e-04],
[-9.58508172e-04],
[-9.58508172e-04],
[ 2.39720154e+00],
[ 4.33399320e-01],
[ 1.65839744e+00],
[ 8.97593558e-01],
[-9.58508172e-04],
[ 2.35519743e+00],
[-9.58508172e-04],
[ 5.07159948e-01],
[-9.58508172e-04],
[-9.58508172e-04],
[ 3.15303898e+00],
[-9.58508172e-04],
[-9.58508172e-04],
[-5.45516573e-02],
[-9.58508172e-04],
[-9.58508172e-04],
[-9.58508172e-04],
[ 3.09337878e+00],
[ 2.78536773e+00],
[ 1.55179846e+00],
[ 1.26088941e+00],
[ 3.61017078e-01],
[ 3.45767707e-01],
[-9.58508172e-04],
[ 2.36897063e+00],
[ 1.37667274e+00],
[-9.58508172e-04],
[ 1.83456016e+00],
[ 1.44041264e+00],
[ 1.20654106e+00],
[ 2.65489578e+00],
[ 5.55496335e-01],
[ 1.02238917e+00],
[ 1.37920137e-02],
[ 7.86814094e-01],
[-9.58508172e-04],
[-6.98722973e-02],
[ 9.13126469e-01],
[ 1.06834280e+00],
[ 1.88309038e+00],
[-9.58508172e-04],
[ 1.54450321e+00],
[ 1.04756260e+00],
[ 2.53113604e+00],
[ 2.85527802e+00],
[-2.84498073e-02],
[ 3.24711490e+00],
[-5.81370434e-03],
[ 3.06740403e+00],
[ 1.86203986e-01],
[ 1.23081028e+00],
[-7.00905593e-03],
[-9.58508172e-04],
[ 6.43971324e-01],
[-9.58508172e-04],
[ 1.09394586e+00],
[ 5.28709926e-02],
[ 1.72040805e-01],
[-9.58508172e-04],
[-2.61565717e-03],
[ 1.51694965e+00],
[-9.58508172e-04],
[-9.58508172e-04],
[ 2.31565833e+00],
[-9.58508172e-04],
[ 1.35126865e+00],
[ 1.45849001e+00],
[ 3.05494547e-01],
[ 2.87097645e+00],
[-9.58508172e-04],
[-9.58508172e-04],
[-9.58508172e-04],
[ 4.68674414e-02],
[ 1.03463399e+00],
[ 1.31633985e+00],
[-9.58508172e-04],
[ 1.18405735e+00],
[ 3.13880825e+00],
[-9.58508172e-04],
[ 1.50886238e+00],
[-2.93864030e-02],
[ 2.03841142e-02],
[-9.58508172e-04],
[-9.58508172e-04],
[ 3.01921701e+00],
[ 2.29553556e+00],
[ 2.01732588e+00],
[-9.58508172e-04],
[ 9.46528018e-01],
[-9.58508172e-04],
[ 2.86717629e+00],
[ 2.71883035e+00],
[ 7.31794894e-01],
[ 5.31512618e-01],
[ 8.79150271e-01],
[-2.28600409e-02],
[ 3.83761692e+00],
[ 1.34883475e+00],
[ 2.17892602e-01],
[ 1.80289721e+00],
[-9.58508172e-04],
[ 3.15685129e+00],
[-9.58508172e-04],
[ 2.56981421e+00],
[ 1.70582068e+00],
[-9.58508172e-04],
[ 3.58788967e+00],
[ 2.52764678e+00],
[-7.90248811e-02],
[-9.58508172e-04],
[ 3.26229744e-02],
[-9.58508172e-04],
[-9.58508172e-04],
[ 2.07186556e+00],
[-9.58508172e-04],
[-9.58508172e-04],
[ 3.46536845e-01],
[-9.58508172e-04],
[-9.58508172e-04],
[-9.58508172e-04],
[-9.58508172e-04],
[-3.50891314e-02],
[ 2.16071844e+00],
[ 2.22161841e+00],
[-9.58508172e-04],
[ 1.61531448e+00],
[ 2.28193617e+00],
[-9.58508172e-04],
[-9.58508172e-04],
[-9.58508172e-04],
[ 7.46373296e-01],
[-4.68290448e-02],
[ 1.97060442e+00],
[-9.58508172e-04],
[-9.58508172e-04],
[-9.58508172e-04],
[ 1.59249604e-01],
[-2.71804426e-02],
[-9.58508172e-04],
[-9.58508172e-04],
[-9.58508172e-04],
[ 5.32205880e-01],
[ 1.67027545e+00],
[ 1.73068613e-01],
[-9.58508172e-04],
[ 9.47348952e-01],
[ 1.40376246e+00],
[-9.58508172e-04],
[-9.58508172e-04],
[ 2.01150608e+00],
[-9.58508172e-04],
[ 1.25453806e+00],
[-9.58508172e-04],
[-9.58508172e-04],
[-9.58508172e-04],
[ 6.99561387e-02],
[-9.66768190e-02],
[ 1.45439386e+00],
[-9.58508172e-04],
[ 9.94647443e-01],
[-9.58508172e-04],
[-9.58508172e-04],
[ 2.70569825e+00],
[ 1.54835320e+00],
[ 2.61701584e-01],
[ 1.57478595e+00],
[ 2.00593066e+00],
[-9.58508172e-04],
[-9.58508172e-04],
[ 2.87573314e+00],
[ 2.29976583e+00],
[-5.11386767e-02],
[ 2.86163974e+00],
[-2.39126179e-02],
[-9.58508172e-04],
[ 1.57842660e+00],
[-9.58508172e-04],
[ 5.12519062e-01],
[ 5.17725408e-01],
[-5.52048571e-02],
[-9.58508172e-04],
[-3.10496055e-02],
[ 3.59770894e-01],
[ 8.05077255e-02],
[ 1.21304858e+00],
[ 1.74713719e+00],
[-9.58508172e-04],
[-9.58508172e-04],
[ 2.16574207e-01],
[ 1.17360139e+00],
[-4.75306176e-02],
[ 4.00898308e-01],
[-1.12742849e-01],
[ 2.83119202e-01],
[-9.58508172e-04],
[ 3.15581274e+00],
[ 2.45394722e-01],
[ 5.26303768e-01],
[ 5.82113922e-01],
[-9.58508172e-04],
[ 1.65560460e+00],
[-9.58508172e-04],
[-9.58508172e-04],
[-9.58508172e-04],
[ 1.34197697e-01],
[ 2.85542655e+00],
[ 2.10580516e+00],
[ 1.23970079e+00],
[ 1.26059330e+00],
[ 1.71865857e+00],
[-9.58508172e-04],
[ 2.72349739e+00],
[-9.58508172e-04],
[ 5.35831869e-01],
[-4.67675552e-02],
[ 6.16959214e-01],
[ 7.35738099e-01],
[ 1.25880384e+00],
[ 7.86045194e-01],
[ 1.98532951e+00],
[-9.58508172e-04],
[-9.58508172e-04],
[ 2.66943359e+00],
[-9.58508172e-04],
[-9.58508172e-04]], dtype=float32)
```python
plt.semilogy(all_loss)
plt.plot(all_loss_test)
```
[<matplotlib.lines.Line2D at 0x7fb1b4507358>]

### Try simple linear regression with Stax
To get to the bottom of Stax, I will use to predict $y=xcos(x)+sin(2x)$
```python
x=np.arange(0,20,0.1)
def simple_func(x):
y=np.sin(x)+0.2*np.random.normal(size=x.shape)
return y
```
```python
plt.plot(x,simple_func(x))
plt.xlabel('x')
plt.ylabel('y')
```
Text(0, 0.5, 'y')

```python
batch_size=40
num_inputs=1
num_bands=1
#stax provides an intialising function and a function for doing a forward pass
init_fun,simple_emu = stax.serial(Dense(num_inputs),LeakyRelu,
Dense(200),LeakyRelu,
Dense(num_bands))
```
```python
#function to get initial parameters of neural net
_, params = init_fun(key, (batch_size, num_inputs))
## class for sed using the torch dataset class
class simple_data(Dataset):
def __init__(self,x,y):
self.X=x
self.y=y
def __len__(self):
return len(self.X)
def __getitem__(self,idx):
return self.X[idx],self.y[idx]
```
```python
train_x=np.random.uniform(-10,10,(4000,1))
train_y=simple_func(train_x)
test_x=np.random.uniform(-10,10,(1000,1))
test_y=simple_func(test_x)
```
```python
## use data in Simple dataclass
ds = simple_data(train_x,train_y)
## use torch DataLoader
train_loader = DataLoader(ds, batch_size=batch_size,)
## do same but for test set
ds = sed_data(test_x,test_y)
test_loader = DataLoader(ds, batch_size=batch_size)
```
```python
def accuracy(params, data_loader):
""" Compute the accuracy for the SED emulator"""
acc_total = 0
for batch_idx, (data, target) in enumerate(data_loader):
x=jnp.asarray(data)
predicted=simple_emu(params, x)
acc_total += jnp.sum((predicted - jnp.asarray(target))**2)
return acc_total/len(data_loader.dataset)
def loss(params, images, targets):
preds = simple_emu(params, jnp.asarray(images))
return jnp.mean((jnp.asarray(targets) - preds)**2)
```
```python
def run_simple_training_loop(num_epochs, opt_state):
""" Implements a learning loop over epochs. """
# Initialize placeholder for loggin
log_acc_train, log_acc_test, train_loss = [], [], []
# Get the initial set of parameters
params = get_params(opt_state)
# Get initial accuracy after random init
train_acc = accuracy(params, train_loader)
test_acc = accuracy(params, test_loader)
log_acc_train.append(train_acc)
log_acc_test.append(test_acc)
# Loop over the training epochs
for epoch in range(num_epochs):
start_time = time.time()
for batch_idx, (data, target) in enumerate(train_loader):
x=jnp.asarray(data)
y=jnp.asarray(target)
params, opt_state, loss = update(params, x, y, opt_state)
train_loss.append(loss)
epoch_time = time.time() - start_time
train_acc = accuracy(params, train_loader)
test_acc = accuracy(params, test_loader)
log_acc_train.append(train_acc)
log_acc_test.append(test_acc)
if epoch%50==0:
print("Epoch {} | T: {:0.2f} | Train A: {:0.3f} | Test A: {:0.3f}".format(epoch+1, epoch_time,
train_acc, test_acc,))
return train_loss, log_acc_train, log_acc_test,params
```
```python
#function to get initial parameters of neural net
_, params = init_fun(key, (batch_size, num_inputs))
step_size = 0.05
opt_init, opt_update, get_params = optimizers.adam(step_size)
opt_state = opt_init(params)
num_epochs = 500
my_images = []
train_loss, train_log, test_log,params = run_simple_training_loop(num_epochs,
opt_state)
```
[(Traced<ShapedArray(float32[1,1])>with<DynamicJaxprTrace(level=0/1)>, Traced<ShapedArray(float32[1])>with<DynamicJaxprTrace(level=0/1)>), (), (Traced<ShapedArray(float32[1,200])>with<DynamicJaxprTrace(level=0/1)>, Traced<ShapedArray(float32[200])>with<DynamicJaxprTrace(level=0/1)>), (), (Traced<ShapedArray(float32[200,1])>with<DynamicJaxprTrace(level=0/1)>, Traced<ShapedArray(float32[1])>with<DynamicJaxprTrace(level=0/1)>)]
Epoch 1 | T: 0.87 | Train A: 0.527 | Test A: 0.509
Epoch 51 | T: 0.21 | Train A: 0.508 | Test A: 0.498
Epoch 101 | T: 0.25 | Train A: 0.502 | Test A: 0.500
Epoch 151 | T: 0.25 | Train A: 0.503 | Test A: 0.504
Epoch 201 | T: 0.18 | Train A: 0.503 | Test A: 0.500
Epoch 251 | T: 0.27 | Train A: 0.503 | Test A: 0.500
Epoch 301 | T: 0.22 | Train A: 0.503 | Test A: 0.501
Epoch 351 | T: 0.27 | Train A: 0.503 | Test A: 0.500
Epoch 401 | T: 0.22 | Train A: 0.504 | Test A: 0.501
Epoch 451 | T: 0.23 | Train A: 0.503 | Test A: 0.500
```python
plt.plot(train_loss)
```
[<matplotlib.lines.Line2D at 0x7f811e3e2940>]

```python
predicted=simple_emu(params, test_x)
```
```python
print(test_y.shape)
```
(1000, 1)
```python
plt.plot(test_x,predicted,'o')
plt.plot(test_x,test_y,'ro',alpha=0.5)
```
[<matplotlib.lines.Line2D at 0x7f81233f6828>]

### Retry with [Eric Jang's tutorial on meta learning](https://blog.evjang.com/2019/02/maml-jax.html)
```python
import jax.numpy as np
from jax import grad
```
```python
f = lambda x : np.exp(x)
g = lambda x : np.square(x)
print(grad(f)(1.)) # = e^{1}
print(grad(grad(f))(1.))
print(grad(grad(grad(f)))(1.))
print(grad(g)(2.)) # 2x = 4
print(grad(grad(g))(2.)) # x = 2
print(grad(grad(grad(g)))(2.)) # x = 0
```
2.7182817
2.7182817
2.7182817
4.0
2.0
0.0
```python
from jax import vmap # for auto-vectorizing functions
from functools import partial # for use with vmap
from jax import jit # for compiling functions for speedup
from jax.experimental import stax # neural network library
from jax.experimental.stax import Conv, Dense, MaxPool, Relu, Flatten, LogSoftmax # neural network layers
import matplotlib.pyplot as plt # visualization
```
```python
# Use stax to set up network initialization and evaluation functions
net_init, net_apply = stax.serial(
Dense(40), Relu,
Dense(40), Relu,
Dense(1)
)
in_shape = (-1, 1,)
out_shape, net_params = net_init(key,in_shape)
```
```python
def loss(params, inputs, targets):
# Computes average loss for the batch
predictions = net_apply(params, inputs)
return np.mean((targets - predictions)**2)
```
```python
# batch the inference across K=100
xrange_inputs = np.linspace(-5,5,100).reshape((100, 1)) # (k, 1)
targets = np.sin(xrange_inputs)
predictions = vmap(partial(net_apply, net_params))(xrange_inputs)
losses = vmap(partial(loss, net_params))(xrange_inputs, targets) # per-input loss
plt.plot(xrange_inputs, predictions, label='prediction')
plt.plot(xrange_inputs, losses, label='loss')
plt.plot(xrange_inputs, targets, label='target')
plt.legend()
```
<matplotlib.legend.Legend at 0x7f811f500cf8>

```python
import numpy as onp
from jax.experimental import optimizers
from jax.tree_util import tree_multimap # Element-wise manipulation of collections of numpy arrays
```
```python
opt_init, opt_update,get_params = optimizers.adam(step_size=1e-2)
opt_state = opt_init(net_params)
# Define a compiled update step
@jit
def step(i, opt_state, x1, y1):
p = get_params(opt_state)
g = grad(loss)(p, x1, y1)
loss_tmp=loss(p,x1,y1)
return opt_update(i, g, opt_state),loss_tmp
loss_all=[]
for i in range(100):
opt_state,loss_tmp = step(i, opt_state, xrange_inputs, targets)
loss_all.append(loss_tmp)
net_params = get_params(opt_state)
```
```python
#batch the inference across K=100
xrange_inputs = np.linspace(-5,5,100).reshape((100, 1)) # (k, 1)
targets = np.sin(xrange_inputs)
predictions = vmap(partial(net_apply, net_params))(xrange_inputs)
losses = vmap(partial(loss, net_params))(xrange_inputs, targets) # per-input loss
plt.plot(xrange_inputs, predictions, label='prediction')
plt.plot(xrange_inputs, losses, label='loss')
plt.plot(xrange_inputs, targets, label='target')
plt.legend()
```
<matplotlib.legend.Legend at 0x7f81366d8b38>

```python
plt.plot(loss_all)
```
[<matplotlib.lines.Line2D at 0x7f8133029208>]

```python
def maml_loss(p, x1, y1, x2, y2):
p2 = inner_update(p, x1, y1)
return loss(p2, x2, y2)
```
```python
# vmapped version of maml loss.
# returns scalar for all tasks.
def batch_maml_loss(p, x1_b, y1_b, x2_b, y2_b):
task_losses = vmap(partial(maml_loss, p))(x1_b, y1_b, x2_b, y2_b)
return np.mean(task_losses)
```
```python
def batch_loss(p,x_b,y_b):
loss_b=vmap(partial(loss,p))(x_b,y_b)
return np.mean(loss_b)
```
```python
import numpy as onp
# batch the inference across K=100
xrange_inputs = np.linspace(-5,5,100).reshape((100, 1)) # (k, 1)
targets = np.sin(xrange_inputs)
def sample_batch(outer_batch_size,inner_batch_size):
def get_batch():
xs, ys = [], []
A=1
phase=0
for i in range(0,outer_batch_size):
x = onp.random.uniform(low=-5., high=5., size=(inner_batch_size, 1))
y = A * onp.sin(x + phase)
xs.append(x)
ys.append(y)
return np.stack(xs), np.stack(ys)
x1, y1 = get_batch()
return x1, y1
```
```python
opt_init, opt_update, get_params= optimizers.adam(step_size=1e-3)
out_shape, net_params = net_init(key,in_shape)
opt_state = opt_init(net_params)
@jit
def step(i, opt_state, x1, y1):
p = get_params(opt_state)
g = grad(batch_loss)(p, x1, y1)
loss_tmp=batch_loss(p,x1,y1)
return opt_update(i, g, opt_state),loss_tmp
np_batched_loss_1 = []
K=20
for i in range(20000):
x1_b, y1_b = sample_batch(1, K)
opt_state, l = step(i, opt_state, x1_b, y1_b)
np_batched_loss_1.append(l)
if i % 1000 == 0:
print(i)
net_params = get_params(opt_state)
```
0
1000
2000
3000
4000
5000
6000
7000
8000
9000
10000
11000
12000
13000
14000
15000
16000
17000
18000
19000
```python
```
[<matplotlib.lines.Line2D at 0x7f8110a182e8>]

```python
#batch the inference across K=100
xrange_inputs = np.linspace(-5,5,100).reshape((100, 1)) # (k, 1)
targets = np.sin(xrange_inputs)
predictions = vmap(partial(net_apply, net_params))(xrange_inputs)
losses = vmap(partial(loss, net_params))(xrange_inputs, targets) # per-input loss
plt.plot(xrange_inputs, predictions, label='prediction')
plt.plot(xrange_inputs, losses, label='loss')
plt.plot(xrange_inputs, targets, label='target')
plt.legend()
```
<matplotlib.legend.Legend at 0x7f81125b06d8>

```python
```
|
H-E-L-PREPO_NAMEXID_plusPATH_START.@XID_plus_extracted@XID_plus-master@docs@build@doctrees@nbsphinx@notebooks@examples@SED_emulator@Greybody_emulator.ipynb@.PATH_END.py
|
{
"filename": "_color.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scatter3d/marker/_color.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(self, plotly_name="color", parent_name="scatter3d.marker", **kwargs):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
colorscale_path=kwargs.pop(
"colorscale_path", "scatter3d.marker.colorscale"
),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scatter3d@marker@_color.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/matplotlib/backends/qt_editor/__init__.py",
"type": "Python"
}
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@matplotlib@backends@qt_editor@__init__.py@.PATH_END.py
|
{
"filename": "michi2_filter_flux_2sigma_only_Optical_NIR.py",
"repo_name": "1054/Crab.Toolkit.michi2",
"repo_path": "Crab.Toolkit.michi2_extracted/Crab.Toolkit.michi2-master/bin/michi2_filter_flux_2sigma_only_Optical_NIR.py",
"type": "Python"
}
|
#!/usr/bin/env python
#
import os
import sys
import numpy
import astropy
import astropy.io.ascii as asciitable
from copy import copy
####################################
# MAIN #
####################################
if not len(sys.argv) > 2:
print('Usage: michi2_filter_flux_3sigma.py input_flux.txt output_flux.txt')
sys.exit()
data_table = asciitable.read(sys.argv[1])
if not len(data_table.colnames) >= 3:
print('Error! The input flux data table does not have at least three columns: wavelength, flux density and error in flux density.')
sys.exit()
#
w = data_table.field(data_table.colnames[0])
f = data_table.field(data_table.colnames[1])
ferr = data_table.field(data_table.colnames[2])
mask = (f<2.0*ferr) | (w>20) | (w<=0)
isel = numpy.argwhere(mask).flatten()
if len(isel) > 0:
#print(isel)
#print(data_table)
data_table.remove_rows(isel)
#print(data_table)
# deal with duplicated w
i = 0
while i < len(data_table):
w = data_table.field(data_table.colnames[0])
f = data_table.field(data_table.colnames[1])
ferr = data_table.field(data_table.colnames[2])
# identify duplicated w
mask2 = (w==w[i])
isel2 = numpy.argwhere(mask2).flatten()
if len(isel2) >= 2:
# found duplicated w
print('Found wavelength-duplicated rows: %s'%(isel2))
print(data_table[isel2])
f_to_average = f[mask2]
ferr_to_average = ferr[mask2]
f_averaged = numpy.sum(f_to_average/ferr_to_average**2)/numpy.sum(1/ferr_to_average**2)
ferr_averaged = numpy.sqrt(1/numpy.sum(ferr_to_average**(-2))) # error propagation of weighted mean, see -- http://www.physics.umd.edu/courses/Phys261/F06/ErrorPropagation.pdf
# limit S/N not larger than 10
#if ferr_averaged < f_averaged/10.0:
# ferr_averaged = f_averaged/10.0
# store into data_table
f[i] = f_averaged # change f will directly change data_table!
ferr[i] = ferr_averaged # change ferr will directly change data_table!
print('Averaged wavelength-duplicated rows: w = %s, f = %s, ferr = %s'%(w[i], f_averaged, ferr_averaged))
# remove those duplicated rows, but keep current i row.
isel3 = isel2[(isel2 != i)]
for iseli in isel3:
print('data_table.remove_rows(%d)'%(iseli))
data_table.remove_rows(isel3)
i = i+1
# limit S/N to be not larger than 10
w = data_table.field(data_table.colnames[0])
f = data_table.field(data_table.colnames[1])
ferr = data_table.field(data_table.colnames[2])
mask = (ferr<f/10.0)
isel = numpy.argwhere(mask).flatten()
if len(isel) > 0:
ferr[mask] = f[mask] / 10.0
for iseli in isel:
print('Limited row %d S/N no larger than 10: w = %s, f = %s, ferr = %s'%(iseli, w[iseli], f[iseli], ferr[iseli]))
# output
out_file = sys.argv[2]
asciitable.write(data_table, out_file, Writer=asciitable.Ipac, delimiter=' ', overwrite=True)
#asciitable.write(data_table, sys.stdout, Writer=asciitable.Ipac, delimiter=' ')
with open(out_file, 'r+') as fp:
out_content = fp.readlines() # read everything in the file
out_iline = 0
out_header = [] # Ipac format has multiple comment lines (commented by the char '\\') and 4 header lines.
fp.seek(0)
while out_iline < len(out_content):
if out_content[out_iline][0] == '\\':
# if his is a commented line, then we change the comment mark to '#'
out_content[out_iline] = '#' + out_content[out_iline][1:]
fp.write(out_content[out_iline])
else:
if len(out_header) == 0:
# if this is the first header line, then replace the first white space by '#', or if there is no white space, preprend '#'.
if out_content[out_iline][0] == ' ':
out_content[out_iline] = '#' + out_content[out_iline][1:]
else:
out_content[out_iline] = '#' + out_content[out_iline]
# append header to 'out_header' list
out_header.append(out_content[out_iline])
# write only one header line
fp.write(out_content[out_iline])
#
elif len(out_header) < 4:
# append header to 'out_header' list
out_header.append(out_content[out_iline])
# skip the 2nd to 4th header line
pass
else:
# write data line
fp.write(out_content[out_iline])
#
out_iline = out_iline + 1
fp.truncate()
fp.close()
#os.system('sed -i.bak -e "$(grep \"\\\" %s | wc -l)s/^ /#/" "%s"'%(out_file, out_file))
#os.system('sed -i.bak -e "2d;3d;4d" "%s"'%(out_file))
#if os.path.isfile(out_file+'.bak'):
# os.system('rm "%s"'%(out_file+'.bak'))
print('Output to "%s"!'%(out_file))
|
1054REPO_NAMECrab.Toolkit.michi2PATH_START.@Crab.Toolkit.michi2_extracted@Crab.Toolkit.michi2-master@bin@michi2_filter_flux_2sigma_only_Optical_NIR.py@.PATH_END.py
|
{
"filename": "Gravitational_Lensing.ipynb",
"repo_name": "astro-datalab/notebooks-latest",
"repo_path": "notebooks-latest_extracted/notebooks-latest-master/06_EPO/TeenAstronomyCafe/05_Gravitational_Lensing/Gravitational_Lensing.ipynb",
"type": "Jupyter Notebook"
}
|
<br>
<br>
<font size='6'><u><b>Gravitational Lensing</b></u></font>
<br>
##### Written by A. Bolton, 2017
##### Updated 2018: Elliot Kisiel and Connie Walker
##### Revised by Andres Jaramillo
You have learned about how we can measure the mass of a galaxy based on the gravitational lensing of a foreground galaxy. This lensing effect can be seen in the various images below. In this activity, we will be walking through some of the steps that astronomers use to determine the mass of a galaxy by examining the lensing effects.
<img src='Figures/SloanLensACS_Survey.png' width='900'>
___
# Table of Contents
* [How to Use This Notebook](#How-to-Use-This-Notebook)
* [Pre-Activity Setup](#Pre-Activity-Setup)
* [Activity 1: The Lensed Galaxy](#Activity-1:-The-Lensed-Galaxy)
* [Activity 2: The Lensing Galaxy](#Activity-2:-The-Lensing-Galaxy)
* [Activity 3: The Lensed and Lensing Galaxies](#Activity-3:-The-Lensed-and-Lensing-Galaxies)
* [Part 3.1: Matching the Galaxy](#Part-3.1:-Matching-the-Galaxy)
* [Part 3.2: Real Lensed Galaxies](#Part-3.2:-Real-Lensed-Galaxies)
* [Part 3.3: Relation to the Mass of Galaxies](#Part-3.3:-Relation-to-the-Mass-of-Galaxies)
___
# How to Use This Notebook
The webpage you are in is actually an app - much like the ones on your cellphone. This app consists of cells.
An *input* cell looks like a light grey box with an `In [ ]:` on its left. Input cells each contain code - instructions to make the computer do something.
To activate or select a cell, click anywhere inside of it.
<div class='alert alert-info'>
<font size='3'><b>Select the cell below and read its contents.</b></font>
</div>
```python
# Text that follows a "#" is known as a comment.
# Comments do not affect your code in any way.
# You should always read the comments at the top of each cell you interact with.
# Comments will be used to describe what the cell's code is actually doing.
```
To execute or run a selected cell, hit `[Shift + Enter]` on your keyboard.
<div class='alert alert-info'>
<font size='3'><b>Select the cell below and read its contents. Then, run the cell.</b></font>
</div>
```python
# Text that DOESN'T follow a "#" is considered code.
# Lines of code are instructions given to your computer.
# The line of code below is a "print" statement.
# A print statement literally prints out the text between its quotes.
print("Congrats! You have successfully run your first cell!")
```
Running a cell creates an *output* directly below it. An output can be some text, a graph, an interactive slider, or even nothing at all! For that last case, you know you have run a cell when the `In [ ]:` becomes `In [#]:`, where "#" is any number.
You can learn more about how Jupyter notebooks work at https://try.jupyter.org/
___
# Pre-Activity Setup
In order for any of the activities to work properly, you must import the libraries needed for the code in this notebook.
<div class='alert alert-info'>
<font size='3'><b>Select and run the cell below.</b></font>
</div>
```python
# Import necessary modules.
import numpy as np
%matplotlib widget
import matplotlib.pyplot as plt
from matplotlib import cm
import lensdemo_funcs as ldf
from ipywidgets import *
```
### Programming Side Note
The previous cell is a way of importing the necessary functions and other stuff to allow us to make pictures and do math super easy. In the next cell, we will be using a dictionary to easily group things together. Like how in a normal dictionary there is a word and a definition, this dictionary acts in a similar manner but instead of a word, we call it a key and instead of a definition we call it a value.
```python
dictionary = {key1 : value1, key2 : value2 ...}
```
```python
# Package some image display preferences in a "dictionary object", for use below:
myargs = {'interpolation': 'nearest', 'origin': 'lower', 'cmap': cm.nipy_spectral}
# The following specifies an alternate color map, which some may find preferable.
# Un-comment and execute the following line if you want to use it:
# myargs = {'interpolation': 'nearest', 'origin': 'lower', 'cmap': cm.hot}
```
___
# Activity 1: The Lensed Galaxy
In this exercise you will be playing around with the different parameters of a distant galaxy; we will name this galaxy Pat. This will be the galaxy that will eventually be lensed. What would you expect a very distant galaxy to look like? Would it be shaped like a spiral? A point? Some sort of smear? Think about this and then discuss with a person near you.
Once you have thought about that, we will start to set up some of the parameters below to get an image of what Pat is going to look like. First, run the program as it is set up. Then, use the sliders to tweak the parameters until Pat looks how you want it. Our recommendation is to tweak one parameter (e.g. `g_sig`, `g_amp`, `g_axrat`, etc.) at a time to see how that parameter changes Pat. Try this a few times. Talk to the person next to you and describe one or two ways in which Pat looks different.
```python
gpar = [g_amp, g_sig, g_xcen, g_ycen, g_axrat, g_pa]
```
Where the parameters are defined as:
>`g_amp` is the peak brightness value.
>`g_sig` is how spread out Pat is.
>`g_xcen` is the x position of the center of Pat.
>`g_ycen` is the y position of the center of Pat.
>`g_axrat` is the minor-to-major axis ratio.
>`g_pa` is the major-axis position angle [degrees] c.c.w. from x axis.
Each of the parameters has the recommended ranges in values below:
>$0 \le$ `g_amp` $\le 10$
>$0 \le$ `g_sig` $\le 0.5$
>$-1 \le$ `g_xcen` $\le 1$
>$-1 \le$ `g_ycen` $\le 1$
>$0.01 \le$ `g_axrat` $\le 1$
>$0 \le$ `g_pa` $\le 180$
<div class='alert alert-info'>
<h3 class='alert-heading'>Helpful Reminder(s)</h3>
<ul>
<li>Click anywhere inside of a cell to select it.</li>
<li>Hit [Shift + Enter] to run a selected cell.</li>
</ul>
</div>
```python
# Set some parameters to make x and y coordinate images.
nx = 501
ny = 501
xhilo = [-2.5, 2.5]
yhilo = [-2.5, 2.5]
x = (xhilo[1] - xhilo[0]) * np.outer(np.ones(ny), np.arange(nx)) / float(nx-1) + xhilo[0]
y = (yhilo[1] - yhilo[0]) * np.outer(np.arange(ny), np.ones(nx)) / float(ny-1) + yhilo[0]
# Set some Pat image parameters and pack them into an array.
g_amp = 5 # Peak brightness value
g_sig = 0.25 # How spread out Pat is
g_xcen = 0 # x position of center
g_ycen = 0 # y position of center
g_axrat = 1 # Minor-to-major axis ratio
g_pa = 0 # Major-axis position angle [degrees] c.c.w. from x axis
gpar = np.asarray([g_amp, g_sig, g_xcen, g_ycen, g_axrat, g_pa])
g_image = ldf.gauss_2d(x, y, gpar)
# Have a look at the un-lensed Pat.
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title('Un-Lensed Galaxy')
ax.set_xticks([])
ax.set_yticks([])
plt.imshow(g_image, **myargs)
# Create the interactive widget.
def f(g_amp, g_sig, g_xcen, g_ycen, g_axrat, g_pa):
gpar = np.asarray([g_amp, g_sig, g_xcen, g_ycen, g_axrat, g_pa])
g_image = ldf.gauss_2d(x, y, gpar)
plt.imshow(g_image, **myargs)
layout = {'width':'initial'}
box_layout = {'display':'flex', 'flex_flow':'column', 'border':'5px solid grey', 'width':'initial'}
g_amp = FloatSlider(value=5, min=0, max=10, step=0.01, continuous_update=False, layout=layout)
g_sig = FloatSlider(value=0.25, min=0, max=0.5, step=0.01, continuous_update=False, layout=layout)
g_xcen = FloatSlider(value=0, min=-1, max=1, step=0.01, continuous_update=False, layout=layout)
g_ycen = FloatSlider(value=0, min=-1, max=1, step=0.01, continuous_update=False, layout=layout)
g_axrat = FloatSlider(value=1, min=0.01, max=1, step=0.01, continuous_update=False, layout=layout)
g_pa = FloatSlider(value=0, min=0, max=180, step=0.01, continuous_update=False, layout=layout)
interactive(f, g_amp=g_amp, g_sig=g_sig, g_xcen=g_xcen, g_ycen=g_ycen, g_axrat=g_axrat, g_pa=g_pa)
Box([g_amp, g_sig, g_xcen, g_ycen, g_axrat, g_pa], layout=box_layout)
```
___
# Activity 2: The Lensing Galaxy
Now that we have Pat, it is time to add a very massive, closer galaxy; we will name this one Chris. Scientists call Chris the lensing galaxy and Pat the lensed galaxy. The most important parameter for Chris is the Einstein radius. Knowing this parameter allows scientists to predict the mass of Chris, the lensing galaxy. We will be using Pat as our distant galaxy and tweak the parameters of Chris to see how each of the parameters changes what Pat looks like to us. First, run the program as it is set up. Then, use the sliders to tweak one parameter (e.g. `l_amp`, `l_axrat`, etc.) at a time to see how that parameter changes Pat. Try this a few times. Talk to the person next to you and describe one or two ways in which Pat looks different.
```python
lpar = [l_amp, l_xcen, l_ycen, l_axrat, l_pa]
```
Where the parameters are defined as:
>`l_amp` is the Einstein radius of Chris.
>`l_xcen` is the x position of the center of Chris.
>`l_ycen` is the y position of the center of Chris.
>`l_axrat` is the minor-to-major axis ratio.
>`l_pa` is the major-axis position angle [degrees] c.c.w. from x axis.
Each of the parameters has the recommended ranges in values below:
>$0 \le$ `l_amp` $\le 2$
>$-1 \le$ `l_xcen` $\le 1$
>$-1 \le$ `l_ycen` $\le 1$
>$0.01 \le$ `l_axrat` $\le 1$
>$0 \le$ `l_pa` $\le 180$
<div class='alert alert-info'>
<h3 class='alert-heading'>Helpful Reminder(s)</h3>
<ul>
<li>Click anywhere inside of a cell to select it.</li>
<li>Hit [Shift + Enter] to run a selected cell.</li>
</ul>
</div>
```python
# Set some lens-model parameters and pack them into an array.
l_amp = 1 # Einstein radius
l_xcen = 0 # x position of center
l_ycen = 0 # y position of center
l_axrat = 1 # Minor-to-major axis ratio
l_pa = 0 # Major-axis position angle [degrees] c.c.w. from x axis
lpar = np.asarray([l_amp, l_xcen, l_ycen, l_axrat, l_pa])
(xg, yg) = ldf.sie_grad(x, y, lpar)
g_lensimage = ldf.gauss_2d(x-xg, y-yg, gpar)
# Have a look at the lensed Pat.
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title('Lensed Galaxy')
ax.set_xticks([])
ax.set_yticks([])
plt.imshow(g_lensimage, **myargs)
# Create the interactive widget.
def f(l_amp, l_xcen, l_ycen, l_axrat, l_pa):
lpar = np.asarray([l_amp, l_xcen, l_ycen, l_axrat, l_pa])
(xg, yg) = ldf.sie_grad(x, y, lpar)
g_lensimage = ldf.gauss_2d(x-xg, y-yg, gpar)
plt.imshow(g_lensimage, **myargs)
l_amp = FloatSlider(value=1, min=0, max=2, step=0.01, continuous_update=False, layout=layout)
l_xcen = FloatSlider(value=0, min=-1, max=1, step=0.01, continuous_update=False, layout=layout)
l_ycen = FloatSlider(value=0, min=-1, max=1, step=0.01, continuous_update=False, layout=layout)
l_axrat = FloatSlider(value=1, min=0.01, max=1, step=0.01, continuous_update=False, layout=layout)
l_pa = FloatSlider(value=0, min=0, max=180, step=0.01, continuous_update=False, layout=layout)
interactive(f, l_amp=l_amp, l_xcen=l_xcen, l_ycen=l_ycen, l_axrat=l_axrat, l_pa=l_pa)
Box([l_amp, l_xcen, l_ycen, l_axrat, l_pa], layout=box_layout)
```
___
# Activity 3: The Lensed and Lensing Galaxies
Now that we have tweaked the parameters for both Pat and Chris individually, what happens if we change the parameters of both at the same time? In the following cells, the left image is Pat before it is lensed and the right image is Pat after its light has been bent by Chris and therefore lensed. **It is important to note here that we are not displaying Chris.**
<img src="Figures/Lensed_Galaxy_Components_v2.png" width="800">
___
## Part 3.1: Matching the Galaxy
You should have been given a random number at the start of this activity. If you do not, raise your hand now. Using the sliders below, try to reproduce the image corresponding to the number recieved.
For your convenience, the parameters are defined again below:
```python
gpar = [g_amp, g_sig, g_xcen, g_ycen, g_axrat, g_pa]
```
Where the parameters are defined as:
>`g_amp` is the peak brightness value.
>`g_sig` is how spread out Pat is.
>`g_xcen` is the x position of the center of Pat.
>`g_ycen` is the y position of the center of Pat.
>`g_axrat` is the minor-to-major axis ratio.
>`g_pa` is the major-axis position angle [degrees] c.c.w. from x axis.
```python
lpar = [l_amp, l_xcen, l_ycen, l_axrat, l_pa]
```
Where the parameters are defined as:
>`l_amp` is the Einstein radius of Chris.
>`l_xcen` is the x position of the center of Chris.
>`l_ycen` is the y position of the center of Chris.
>`l_axrat` is the minor-to-major axis ratio.
>`l_pa` is the major-axis position angle [degrees] c.c.w. from x axis.
As a hint, all of these have the same `l_amp` $= 1.5$, `l_xcen` $= 0$, `l_ycen` $= 0$, `l_pa` $= 0$, but have different `l_axrat` between $0.01$ and $1$. Finally, the range of values for Pat (the unlensed galaxy) are given by:
>$0 \le$ `g_amp` $\le 10$
>$0 \le$ `g_sig` $\le 0.5$
>$-1 \le$ `g_xcen` $\le 1$
>$-1 \le$ `g_ycen` $\le 1$
>$0.01 \le$ `g_axrat` $\le 1$
>$0 \le$ `g_pa` $\le 180$
<img src='Figures/lens_image_mosaic_v2.png'>
<div class='alert alert-info'>
<h3 class='alert-heading'>Helpful Reminder(s)</h3>
<ul>
<li>Click anywhere inside of a cell to select it.</li>
<li>Hit [Shift + Enter] to run a selected cell.</li>
</ul>
</div>
```python
# The following lines will plot the un-lensed and lensed images side by side.
gpar1 = np.asarray([5, 0.25, 0, 0, 1, 0])
lpar1 = np.asarray([1.5, 0, 0, 1, 0])
g_image1 = ldf.gauss_2d(x, y, gpar1)
(xg1, yg1) = ldf.sie_grad(x, y, lpar1)
g_lensimage1 = ldf.gauss_2d(x-xg1, y-yg1, gpar1)
# Display the images.
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title('Un-Lensed Galaxy (Left) and Lensed Galaxy (Right)')
ax.set_xticks([])
ax.set_yticks([])
plt.imshow(np.hstack((g_image1, g_lensimage1)), **myargs)
# Create the interactive widget.
def f(g_amp, g_sig, g_xcen, g_ycen, g_axrat, g_pa, l_amp, l_xcen, l_ycen, l_axrat, l_pa):
gpar1 = np.asarray([g_amp, g_sig, g_xcen, g_ycen, g_axrat, g_pa])
lpar1 = np.asarray([l_amp, l_xcen, l_ycen, l_axrat, l_pa])
g_image1 = ldf.gauss_2d(x, y, gpar1)
(xg1, yg1) = ldf.sie_grad(x, y, lpar1)
g_lensimage1 = ldf.gauss_2d(x-xg1, y-yg1, gpar1)
plt.imshow(np.hstack((g_image1, g_lensimage1)), **myargs)
g_amp = FloatSlider(value=5, min=0, max=10, step=0.01, continuous_update=False, layout=layout)
g_sig = FloatSlider(value=0.25, min=0, max=0.5, step=0.01, continuous_update=False, layout=layout)
g_xcen = FloatSlider(value=0, min=-1, max=1, step=0.01, continuous_update=False, layout=layout)
g_ycen = FloatSlider(value=0, min=-1, max=1, step=0.01, continuous_update=False, layout=layout)
g_axrat = FloatSlider(value=1, min=0.01, max=1, step=0.01, continuous_update=False, layout=layout)
g_pa = FloatSlider(value=0, min=0, max=180, step=0.01, continuous_update=False, layout=layout)
l_amp = FloatSlider(value=1.5, min=0, max=2, step=0.01, continuous_update=False, layout=layout)
l_xcen = FloatSlider(value=0, min=-1, max=1, step=0.01, continuous_update=False, layout=layout)
l_ycen = FloatSlider(value=0, min=-1, max=1, step=0.01, continuous_update=False, layout=layout)
l_axrat = FloatSlider(value=1, min=0.01, max=1, step=0.01, continuous_update=False, layout=layout)
l_pa = FloatSlider(value=0, min=0, max=180, step=0.01, continuous_update=False, layout=layout)
interactive(f, g_amp=g_amp, g_sig=g_sig, g_xcen=g_xcen, g_ycen=g_ycen, g_axrat=g_axrat, g_pa=g_pa,
l_amp=l_amp, l_xcen=l_xcen, l_ycen=l_ycen, l_axrat=l_axrat, l_pa=l_pa)
Box([g_amp, g_sig, g_xcen, g_ycen, g_axrat, g_pa, l_amp, l_xcen, l_ycen, l_axrat, l_pa], layout=box_layout)
```
___
## Part 3.2: Real Lensed Galaxies
Now that you have successfully remade one of the above images, it is time to try it with some real lensed galaxies. Below are several real galaxies from the presentation. Your goal is to choose one galaxy below and try to adjust the parameters to match the image (the lensed galaxy). Good luck! As a note, it may be a useful visual effect to change the value of the key `cmap` in the first cell below to something like `cm.magma`, `cm.inferno`, `cm.hot`, or `cm.Blues`.
For your convenience, the parameters are defined again below:
```python
gpar = [g_amp, g_sig, g_xcen, g_ycen, g_axrat, g_pa]
```
Where the parameters are defined as:
>`g_amp` is the peak brightness value.
>`g_sig` is how spread out Pat is.
>`g_xcen` is the x position of the center of Pat.
>`g_ycen` is the y position of the center of Pat.
>`g_axrat` is the minor-to-major axis ratio.
>`g_pa` is the major-axis position angle [degrees] c.c.w. from x axis.
```python
lpar = [l_amp, l_xcen, l_ycen, l_axrat, l_pa]
```
Where the parameters are defined as:
>`l_amp` is the Einstein radius of Chris.
>`l_xcen` is the x position of the center of Chris.
>`l_ycen` is the y position of the center of Chris.
>`l_axrat` is the minor-to-major axis ratio.
>`l_pa` is the major-axis position angle [degrees] c.c.w. from x axis.
The values should be between the given ranges:
>$0 \le$ `l_amp` $\le 10$
>$-1 \le$ `l_xcen` $\le 1$
>$-1 \le$ `l_ycen` $\le 1$
>$0 \le$ `l_axrat` $\le 1$
>$0 \le$ `l_pa` $\le 180$
<img src = 'Figures/real_galaxies.png'>
```python
# You can change the last one to change the color of the map.
myargs = {'interpolation': 'nearest', 'origin': 'lower', 'cmap': cm.Blues}
```
```python
# The following lines will plot the un-lensed and lensed images side by side.
gpar1 = np.asarray([5, 0.25, 0, 0, 1, 0])
lpar1 = np.asarray([1, 0, 0, 1, 0])
g_image1 = ldf.gauss_2d(x, y, gpar1)
(xg1, yg1) = ldf.sie_grad(x, y, lpar1)
g_lensimage1 = ldf.gauss_2d(x-xg1, y-yg1, gpar1)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
plt.imshow(np.hstack((g_image1, g_lensimage1)), **myargs)
# Create the interactive widget.
def f(g_amp, g_sig, g_xcen, g_ycen, g_axrat, g_pa, l_amp, l_xcen, l_ycen, l_axrat, l_pa):
gpar1 = np.asarray([g_amp, g_sig, g_xcen, g_ycen, g_axrat, g_pa])
lpar1 = np.asarray([l_amp, l_xcen, l_ycen, l_axrat, l_pa])
g_image1 = ldf.gauss_2d(x, y, gpar1)
(xg1, yg1) = ldf.sie_grad(x, y, lpar1)
g_lensimage1 = ldf.gauss_2d(x-xg1, y-yg1, gpar1)
plt.imshow(np.hstack((g_image1, g_lensimage1)), **myargs)
g_amp = FloatSlider(value=5, min=0, max=10, step=0.01, continuous_update=False, layout=layout)
g_sig = FloatSlider(value=0.25, min=0, max=0.5, step=0.01, continuous_update=False, layout=layout)
g_xcen = FloatSlider(value=0, min=-1, max=1, step=0.01, continuous_update=False, layout=layout)
g_ycen = FloatSlider(value=0, min=-1, max=1, step=0.01, continuous_update=False, layout=layout)
g_axrat = FloatSlider(value=1, min=0.01, max=1, step=0.01, continuous_update=False, layout=layout)
g_pa = FloatSlider(value=0, min=0, max=180, step=0.01, continuous_update=False, layout=layout)
l_amp = FloatSlider(value=1, min=0, max=2, step=0.01, continuous_update=False, layout=layout)
l_xcen = FloatSlider(value=0, min=-1, max=1, step=0.01, continuous_update=False, layout=layout)
l_ycen = FloatSlider(value=0, min=-1, max=1, step=0.01, continuous_update=False, layout=layout)
l_axrat = FloatSlider(value=1, min=0.01, max=1, step=0.01, continuous_update=False, layout=layout)
l_pa = FloatSlider(value=0, min=0, max=180, step=0.01, continuous_update=False, layout=layout)
interactive(f, g_amp=g_amp, g_sig=g_sig, g_xcen=g_xcen, g_ycen=g_ycen, g_axrat=g_axrat, g_pa=g_pa,
l_amp=l_amp, l_xcen=l_xcen, l_ycen=l_ycen, l_axrat=l_axrat, l_pa=l_pa)
Box([g_amp, g_sig, g_xcen, g_ycen, g_axrat, g_pa, l_amp, l_xcen, l_ycen, l_axrat, l_pa], layout=box_layout)
```
___
## Part 3.3: Relation to the Mass of Galaxies
As stated above, the Einstein radius gives us the ability to measure the mass of the lensing galaxy. The parameters we have played with have to do with the geometry between the two galaxies and us and with the Einstein radius in particular. By optimizing the parameters in the last two activities, you estimated the value for the Einstein radius. Knowing that and the various distances involved, you can use the following formula to find the mass of the lensing galaxy.
$$ \theta_1 = \sqrt{\frac{4GM}{c^2}\frac{D_{LS}}{D_LD_S}} $$
In the equation above:
>$\theta_1$ is the Einstein radius.
>$c$ is the speed of light.
>$G$ is the gravitational constant.
>$M$ is the mass of the lensed galaxy.
>$D_L$ is the distance to the lensing galaxy (Chris).
>$D_S$ is the distance to the lensed galaxy (Pat).
>$D_{LS}$ is the distance from the lensed galaxy (Pat) to the lensing galaxy (Chris).
The distances to these galaxies are determined by the galaxies' respective redshifted spectra.
In the diagram below, the Einstein radius is given by $\theta_1$.
<img src = "Figures/Einstein_radius.png" width = 400 height = 400>
<img src = "Figures/grav_lens.png">
___
|
astro-datalabREPO_NAMEnotebooks-latestPATH_START.@notebooks-latest_extracted@notebooks-latest-master@06_EPO@TeenAstronomyCafe@05_Gravitational_Lensing@Gravitational_Lensing.ipynb@.PATH_END.py
|
{
"filename": "massive_star_composition.py",
"repo_name": "AMReX-Astro/Castro",
"repo_path": "Castro_extracted/Castro-main/Exec/science/massive_star/analysis/massive_star_composition.py",
"type": "Python"
}
|
#!/usr/bin/env python3
import matplotlib
matplotlib.use('agg')
import os
import sys
import yt
import matplotlib.pyplot as plt
import numpy as np
from functools import reduce
from mpl_toolkits.axes_grid1 import ImageGrid
# assume that our data is in CGS
from yt.units import cm, amu
from yt.frontends.boxlib.api import CastroDataset
from yt.visualization.image_writer import multi_image_composite
def _iron_group(field, data):
return (
data["boxlib", "X(Cr48)"] +
data["boxlib", "X(Fe52)"] +
data["boxlib", "X(Fe54)"] +
data["boxlib", "X(Ni56)"] + 1.e-10)
def _si_group(field, data):
return (
data["boxlib", "X(Si28)"] +
data["boxlib", "X(S32)"] +
data["boxlib", "X(Ar36)"] +
data["boxlib", "X(Ca40)"] +
data["boxlib", "X(Ti44)"] + 1.e-10)
def _light_nuclei(field, data):
return (
data["boxlib", "X(H1)"] +
data["boxlib", "X(He3)"] +
data["boxlib", "X(He4)"] +
data["boxlib", "X(C12)"] +
data["boxlib", "X(N14)"] +
data["boxlib", "X(O16)"] +
data["boxlib", "X(Ne20)"] +
data["boxlib", "X(Mg24)"] +
data["boxlib", "X(n)"] +
data["boxlib", "X(p)"] + 1.e-10)
yt.add_field(
name=("gas", "iron_group"),
function=_iron_group,
sampling_type="local",
units="",
)
yt.add_field(
name=("gas", "si_group"),
function=_si_group,
sampling_type="local",
units="",
)
yt.add_field(
name=("gas", "light_nuclei"),
function=_light_nuclei,
sampling_type="local",
units="",
)
plotfile = sys.argv[1]
ds = CastroDataset(plotfile)
xmin = ds.domain_left_edge[0]
xmax = ds.domain_right_edge[0]
ymin = ds.domain_left_edge[1]
ymax = ds.domain_right_edge[1]
xctr = 0.0 * xmin
L_x = xmax - xmin
yctr = 0.5 * (ymin + ymax)
L_y = ymax - ymin
fig = plt.figure()
fig.set_size_inches(12.0, 9.0)
width_frac = 0.1
center=[xmin + 0.25*width_frac*L_x, yctr, 0.0*cm]
width=[0.5*width_frac*L_x, width_frac*L_y]
slc = yt.SlicePlot(ds, "theta",
fields=[("gas", "iron_group"), ("gas", "si_group"), ("gas", "light_nuclei")],
center=[xmin + 0.25*width_frac*L_x, yctr, 0.0*cm],
width=[0.5*width_frac*L_x, width_frac*L_y, 0.0*cm], fontsize="12")
res = (1024, 512)
frb = slc.data_source.to_frb(width[0], res, height=width[1]) #width, res)#, center=center)
multi_image_composite("multi_channel1.png",
np.transpose(np.log10(frb["iron_group"])),
np.transpose(np.log10(frb["si_group"])),
np.transpose(np.log10(frb["light_nuclei"])))
|
AMReX-AstroREPO_NAMECastroPATH_START.@Castro_extracted@Castro-main@Exec@science@massive_star@analysis@massive_star_composition.py@.PATH_END.py
|
{
"filename": "ACSImport.py",
"repo_name": "ACS-Community/ACS",
"repo_path": "ACS_extracted/ACS-master/LGPL/Kit/acsutilpy/src/AcsutilPy/ACSImport.py",
"type": "Python"
}
|
#! /usr/bin/env python
#*******************************************************************************
# ALMA - Atacama Large Millimiter Array
# (c) National Research Council of Canada, 2007
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# The 'knee' module, from which this module is derived, is
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007
# Python Software Foundation; All Rights Reserved. It is used
# under the terms of the Python Software Foundation License Version 2.
#
# No changes were made to Python during the preparation of this
# module nor are any required to use it.
#
# "@(#) $Id: ACSImport.py,v 1.4 2007/06/11 21:49:42 agrimstrup Exp $"
#
# who when what
# -------- -------- ----------------------------------------------
# agrimstr 2007-02-20 created
#
"""A customized hierarchical module import for ACS.
In ACS, Python modules from the same package can reside in several
places in the PYTHONPATH, such as INTROOT, INTLIST and ACSROOT.
The default Python import will stop searching for modules when it
finds the first instance of the package directory. As a result,
modules stored in the other parts of the tree are never found.
The replacement function preserves the hierarchical structure
of Python modules and supports both 'import foo' and
'from foo import bar' syntax.
It is based on the 'knee' module that appears in the Python demo source
tree.
"""
#------------------------------------------------------------------------------
__revision__ = "$Id: ACSImport.py,v 1.4 2007/06/11 21:49:42 agrimstrup Exp $"
#--REGULAR IMPORTS-------------------------------------------------------------
import sys
import os
import imp
import inspect
import __builtin__
#------------------------------------------------------------------------------
def searchingImport(name, globals=None, locals=None, fromlist=None):
"""Replacement for Python default import
Arguments:
name -- Name of the module to be imported (No default)
Keyword arguments:
globals -- Dictionary containing the globally defined names
(Default: None)
locals -- Dictionary containing the locally defined names
(Default: None)
fromlist -- List of classes to be imported (Default: None)
Returns:
m -- The module object imported
Exceptions:
ImportError is thrown if the module is not found.
"""
try:
m = _original_import(name, globals, locals, fromlist)
if inspect.ismodule(m) and not imp.is_builtin(m.__name__) and not hasattr(m,'__file__'):
reload(m)
except ImportError:
parent = _determineParent(globals)
q, tail = _findHeadPackage(parent, name)
m = _loadTail(q, tail)
if not fromlist:
return q
if hasattr(m, "__path__"):
_ensureFromList(m, fromlist)
return m
def _determineParent(globals):
"""Determine the parent of this module
Arguments:
globals -- Dictionary containing the globally defined names
Returns:
parent -- The module object of the parent
"""
if not globals or not globals.has_key("__name__"):
return None
pname = globals['__name__']
if globals.has_key("__path__"):
parent = sys.modules[pname]
assert globals is parent.__dict__
return parent
if '.' in pname:
i = pname.rfind('.')
pname = pname[:i]
parent = sys.modules[pname]
assert parent.__name__ == pname
return parent
return None
def _findHeadPackage(parent, name):
"""Find and load the package root
Arguments:
parent -- Parent module of this package
name -- Name of the module to be loaded
Returns:
q -- The module object for the package root
tail -- The remainder of the module tree to be loaded
Exceptions:
ImportError is raised if the module was not found.
"""
if '.' in name:
i = name.find('.')
head = name[:i]
tail = name[i+1:]
else:
head = name
tail = ""
if parent:
qname = "%s.%s" % (parent.__name__, head)
else:
qname = head
q = _importModule(head, qname, parent)
if q: return q, tail
if parent:
qname = head
parent = None
q = _importModule(head, qname, parent)
if q: return q, tail
raise ImportError, "No module named " + qname
def _loadTail(q, tail):
"""Load the remainder of the module hierarchy
Arguments:
q -- Root module of the hierarchy
tail -- The remainder of the module tree
Returns:
m -- The module object for the leaf module
Exceptions:
ImportError is raised if any module in the hierarchy
cannot be found."""
m = q
while tail:
i = tail.find('.')
if i < 0: i = len(tail)
head, tail = tail[:i], tail[i+1:]
mname = "%s.%s" % (m.__name__, head)
m = _importModule(head, mname, m)
if not m:
raise ImportError, "No module named " + mname
return m
def _ensureFromList(m, fromlist, recursive=0):
for sub in fromlist:
if sub == "*":
if not recursive:
try:
all = m.__all__
except AttributeError:
pass
else:
_ensureFromList(m, all, 1)
continue
if sub != "*" and not hasattr(m, sub):
subname = "%s.%s" % (m.__name__, sub)
submod = _importModule(sub, subname, m)
if not submod:
raise ImportError, "No module named " + subname
def _importModule(partname, fqname, parent):
try:
return sys.modules[fqname]
except KeyError:
pass
m = None
try:
fp, pathname, stuff = imp.find_module(partname,
parent and parent.__path__)
except ImportError:
try:
dirpath = parent.__name__.replace('.','/')
except AttributeError:
dirpath = ""
for dir in sys.path:
pkgpath = os.path.join(dir, dirpath)
if os.access(pkgpath, os.F_OK):
filepath = pkgpath + '/' + partname + '.py'
if os.access(filepath, os.R_OK):
m = imp.load_source(fqname, filepath)
if m: break
if m is None:
return None
else:
if parent:
setattr(parent, partname, m)
return m
try:
m = imp.load_module(fqname, fp, pathname, stuff)
finally:
if fp: fp.close()
if parent:
setattr(parent, partname, m)
return m
# Save the original hooks
_original_import = __builtin__.__import__
# Now install our hooks
__builtin__.__import__ = searchingImport
#
# ___oOo___
|
ACS-CommunityREPO_NAMEACSPATH_START.@ACS_extracted@ACS-master@LGPL@Kit@acsutilpy@src@AcsutilPy@ACSImport.py@.PATH_END.py
|
{
"filename": "setup.py",
"repo_name": "mlipatov/paint_atmospheres",
"repo_path": "paint_atmospheres_extracted/paint_atmospheres-master/setup.py",
"type": "Python"
}
|
from setuptools import setup, find_packages
setup(
name='paint_atmospheres',
version='1.2.0',
packages=find_packages(),
package_dir={'paint_atmospheres': 'pa'},
entry_points={'console_scripts': [
'calc_limbdark=pa.calc_limbdark:run',
'calc_star=pa.calc_star:run',
'calc_spectra=pa.calc_spectra:run'
]}
)
|
mlipatovREPO_NAMEpaint_atmospheresPATH_START.@paint_atmospheres_extracted@paint_atmospheres-master@setup.py@.PATH_END.py
|
{
"filename": "_database.py",
"repo_name": "ucl-exoplanets/ExoTETHyS",
"repo_path": "ExoTETHyS_extracted/ExoTETHyS-master/exotethys/_database.py",
"type": "Python"
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
#import pkg_resources
import importlib.resources
#from ._0imports import *
import os
import sys
if sys.version_info[0] > 2:
from urllib.request import urlretrieve
else:
from urllib import urlretrieve
input = raw_input
#import matplotlib
#if os.environ.get('DISPLAY', '') == '':
# print('no display found. Using non-interactive Agg backend')
# matplotlib.use('Agg')
#else:
# matplotlib.use('TkAgg')
import glob
import time
import shutil
import pickle
import h5py
import astropy.units as u
__all__ = ["databases"]
class Database:
def __init__(self, database_name, vital=False, date_to_update='daily', force_update=False, ask_size=None):
self.database_name = database_name
package_name = 'exotethys'
info_file_name = '_0database.pickle'
package_data = {package_name:'Passbands/*.pass'}
last_update_file_name = 'database_last_update.txt'
#info_file_path = pkg_resources.resource_filename(package_name, info_file_name)
with importlib.resources.path(package_name, info_file_name) as resource_path:
info_file_path = str(resource_path)
package_path = os.path.join(os.path.expanduser('~'), '.{0}'.format(package_name))
if not os.path.isdir(package_path):
os.mkdir(package_path)
self.package_path = package_path
#self.directory_path = os.path.join(package_path, '{0}_{1}'.format(database_name, directory_name))
self.directory_path = os.path.join(package_path, '{0}'.format(database_name))
last_update_file_path = os.path.join(package_path, '{0}_{1}'.format(database_name, last_update_file_name))
if date_to_update == 'daily':
date_to_update = int(time.strftime('%y%m%d'))
else:
date_to_update = int(date_to_update)
if os.path.isdir(self.directory_path):
if force_update and len(glob.glob(os.path.join(self.directory_path, '*'))) != 0:
shutil.rmtree(self.directory_path)
os.mkdir(self.directory_path)
update = True
elif len(glob.glob(os.path.join(self.directory_path, '*'))) == 0:
update = True
else:
if not os.path.isfile(last_update_file_path):
update = True
elif int(open(last_update_file_path).readlines()[0]) < date_to_update:
update = True
else:
update = False
else:
os.mkdir(self.directory_path)
update = True
#with open(os.path.join(package_name, info_file_name), 'rb') as file:
with open(info_file_path,'rb') as file: #Christophe correction
dbx_files_dict = pickle.load(file)
self.dbx_files = dbx_files_dict[database_name]
#dbx_files = pickle.load(open(info_file_path, 'rb'))
#dbx_files = dbx_files['{0}_{1}'.format(database_name, directory_name)]
# def self_print(self):
# print(self.database_name)
# print(self.directory_name)
# print(self.package_path)
# print(self.directory_path)
def get_file_content(self, dbx_file):
abs_path_file = os.path.join(self.package_path, self.dbx_files[dbx_file]['local_path'])
if not os.path.isfile(abs_path_file):
print('Downloading... ', dbx_file)
urlretrieve(self.dbx_files[dbx_file]['link'], os.path.join(self.package_path, self.dbx_files[dbx_file]['local_path']))
else:
print('File already here... ', dbx_file)
file_ext = os.path.splitext(abs_path_file)[-1]
if file_ext == '.pickle':
try:
with open(abs_path_file, 'rb') as file:
model_dict = pickle.load(file)
except:
print('Re-downloading... ', dbx_file)
urlretrieve(self.dbx_files[dbx_file]['link'], os.path.join(self.package_path, self.dbx_files[dbx_file]['local_path']))
with open(abs_path_file, 'rb') as file:
model_dict = pickle.load(file)
elif file_ext in ['.h5', '.hdf5']:
try:
imodel_dict = h5py.File(abs_path_file,'r')
except:
print('Re-downloading... ', dbx_file)
urlretrieve(self.dbx_files[dbx_file]['link'], os.path.join(self.package_path, self.dbx_files[dbx_file]['local_path']))
imodel_dict = h5py.File(abs_path_file,'r')
model_dict = {}
for key in list(imodel_dict.keys()):
model_dict[key] = imodel_dict[key][()] * u.Unit( imodel_dict[key].attrs['unit'] )
return model_dict
def get_filename_list(self):
file_list = list(self.dbx_files.keys())
return file_list
databases = {
"Phoenix_2012_13":Database('Phoenix_2012_13', date_to_update='200412', vital=True), "Phoenix_drift_2012":Database('Phoenix_drift_2012', date_to_update='201205', vital=True),
"Phoenix_2018":Database('Phoenix_2018', date_to_update='200412', vital=True), "Atlas_2000":Database('Atlas_2000', date_to_update='200403', vital=True),
"Stagger_2015":Database('Stagger_2015', date_to_update='201001', vital=True), "Stagger_2018":Database('Stagger_2018', date_to_update='220216', vital=True),
"MPS_Atlas_set1_2023":Database('MPS_Atlas_set1_2023', date_to_update='230512', vital=True), "MPS_Atlas_set2_2023":Database('MPS_Atlas_set2_2023', date_to_update='230704', vital=True)
}
|
ucl-exoplanetsREPO_NAMEExoTETHySPATH_START.@ExoTETHyS_extracted@ExoTETHyS-master@exotethys@_database.py@.PATH_END.py
|
{
"filename": "vis_spectrometer.py",
"repo_name": "mtalapinto/moes",
"repo_path": "platospec/optics/vis_spectrometer.py",
"type": "Python"
}
|
from . import slit
from . import fn_system
from . import collimator
from . import echelle
from . import flat_mirror
from . import grism
from . import camera
from . import field_flattener
from . import CCD_vis
from . import refraction_index
from . import trace
from . import transform
import numpy as np
from . import cte
from . import parameters
def tracing(spectrum, params, fib, temps):
#
# Variables initialization
#
temp_scaling = 1.
H_init = np.zeros([len(spectrum), 3])
DC_init = np.zeros([len(spectrum), 3])
order = np.zeros(len(spectrum))
wave = np.zeros(len(spectrum))
order[:] = spectrum[:, 0]
wave[:] = spectrum[:, 1]
x = []
y = []
z = []
H_init[:, 0] = np.zeros(len(spectrum))
H_init[:, 1] = np.zeros(len(spectrum))
H_init[:, 2] = np.zeros(len(spectrum))
DC_init[:, 0] = np.zeros(len(spectrum))
DC_init[:, 1] = np.zeros(len(spectrum))
DC_init[:, 2] = np.zeros(len(spectrum))
#
# Environmental data
#
p = params[43] # in Pa, 10e-5 in mbar
temps_spec = temps[1:]
t = np.average(temps_spec)
wave = refraction_index.waves_air(wave, t, p)
#
# Slit data
#
if fib == 'A':
slit_dec_x = np.full(len(spectrum), params[0])
slit_dec_y = np.full(len(spectrum), params[1])
elif fib == 'B':
slit_dec_x = np.full(len(spectrum), params[2])
slit_dec_y = np.full(len(spectrum), params[3])
#
# Position and initial orientation
#
defocus = params[4]
H0, DC0 = slit.slit_params_init(H_init, DC_init, slit_dec_x, slit_dec_y, defocus)
# To paraxial plane of the fn system
d_fib_fn = 35.16
H0 = trace.to_next_surface(H0, DC0, d_fib_fn)
T_fib = np.asarray([0. * np.pi / 180, 0. * np.pi / 180, 0. * np.pi / 180])
#
# FN system
#
t_fn = temps[6]
fndata = fn_system.load_data()
fn_system_data = fn_system.set_data(fndata)
H0, DC0 = fn_system.tracing(H0, DC0, T_fib, wave, t_fn, p, fn_system_data)
T_slit = np.asarray([params[5]*np.pi/180, params[6]*np.pi/180, params[7]*np.pi/180])
H1 = transform.transform(H0, -T_slit)
DC1 = transform.transform(DC0, -T_slit)
x.append(H1[:, 2])
y.append(H1[:, 1])
z.append(H1[:, 0])
#
# Collimator 1st pass
#
t_bench = temps[3]
z_pos_col = params[8] # 1590
z_pos_col = temp_scaling*cte.recalc(z_pos_col, 'alum5083', t_bench)
d_slit_col = np.abs(z_pos_col - H1[:, 2])
t_coll_left = temps[2]
t_coll_right = temps[1]
t_coll = (t_coll_left + t_coll_right)/2
coll_tilt_x = temp_scaling*cte.recalc(params[9], 'alum5083', t_coll)
coll_tilt_y = temp_scaling*cte.recalc(params[10], 'alum5083', t_coll)
T_coll = np.asarray([coll_tilt_x*np.pi/180, coll_tilt_y*np.pi/180, 0.*np.pi/180])
H2 = trace.to_next_surface(H1, DC1, d_slit_col)
curv_rad_aux = -1594.
curv_rad_aux = temp_scaling*cte.recalc(curv_rad_aux, 'zerodur', t_coll_left)
curvature_rad = np.full(len(H2), curv_rad_aux*2)
H2, DC2 = collimator.DCcoll(H2, DC1, T_coll, curvature_rad)
x.append(H2[:, 2])
y.append(H2[:, 1])
z.append(H2[:, 0])
#
# Echelle dispersion
#
d_col_ech_aux = -1594.305
d_col_ech = temp_scaling*cte.recalc(d_col_ech_aux, 'alum5083', temps[5])
z_pos_ech_aux = d_col_ech - d_col_ech_aux
z_pos_ech = np.full(len(H_init), z_pos_ech_aux)
H3 = trace.to_next_surface(H2, DC2, z_pos_ech)
# Grating data
G = params[11]*1e-3
d = 1/G
temp_echelle = (temps[6] + temps[8])/2
d_new = temp_scaling*cte.recalc(d, 'zerodur', temp_echelle)
G_new = 1/d_new
# Orientation and diffraction
ech_blaze = temp_scaling*cte.recalc(params[12], 'alum5083', temp_echelle)
ech_gamma = temp_scaling*cte.recalc(params[13], 'alum5083', temp_echelle)
ech_z_tilt = temp_scaling*cte.recalc(params[14], 'alum5083', temp_echelle)
T_echelle = np.asarray([ech_blaze*np.pi/180, ech_gamma*np.pi/180, ech_z_tilt*np.pi/180])
H3, DC3 = echelle.diffraction(H3, DC2, T_echelle, order, wave, G_new)
#
# Collimator 2nd pass
#
d_ech_col = np.full(len(H_init), z_pos_col)
H4 = trace.to_next_surface(H3, DC3, d_ech_col)
H4, DC4 = collimator.DCcoll(H4, DC3, T_coll, curvature_rad)
#
# Transfer mirror
#
d_col_tm_aux = params[15]
d_col_tm = temp_scaling*cte.recalc(d_col_tm_aux, 'alum5083', t_bench)
z_pos_tm_aux = d_col_ech - d_col_tm
z_pos_tm = np.full(len(H_init), z_pos_tm_aux)
H5 = trace.to_next_surface(H4, DC4, z_pos_tm)
# Orientation
tm_tilt_x = temp_scaling*cte.recalc(params[16], 'alum5083', temps[6])
tm_tilt_y = temp_scaling*cte.recalc(params[17], 'alum5083', temps[6])
T_flat = np.asarray([tm_tilt_x*np.pi/180, tm_tilt_y*np.pi/180, 0.0*np.pi/180])
H5, DC5 = flat_mirror.flat_out(H5, DC4, T_flat)
#
# Collimator 3rd pass
#
d_trf_col = np.full(len(H_init), z_pos_col)
H6 = trace.to_next_surface(H5, DC5, d_trf_col)
curv_rad_aux = -1594.
curv_rad_aux = temp_scaling*cte.recalc(curv_rad_aux, 'zerodur', t_coll_right)
curvature_rad = np.full(len(H2), curv_rad_aux*2)
H6, DC6 = collimator.DCcoll(H6, DC5, T_coll, curvature_rad)
#
# Grism
#
z_pos_grism = temp_scaling*cte.recalc(params[18], 'alum5083', temps[4])
dcoll3_grism = np.full(len(H_init), z_pos_grism)
H7 = trace.to_next_surface(H6, DC6, dcoll3_grism)
# Position and orientation
grm_dec_x = temp_scaling*cte.recalc(params[19], 'alum5083', temps[4])
grm_dec_y = temp_scaling*cte.recalc(params[20], 'alum5083', temps[4])
grism_dec_x = np.full(len(H_init), grm_dec_x)
grism_dec_y = np.full(len(H_init), grm_dec_y)
grm_tilt_x = temp_scaling*cte.recalc(params[21], 'alum5083', temps[4])
grm_tilt_y = temp_scaling*cte.recalc(params[22], 'alum5083', temps[4])
T_grism_in = np.asarray([grm_tilt_x * np.pi / 180, grm_tilt_y * np.pi / 180, 0. * np.pi / 180])
# Material and grating data
grism_material = 'LF5'
dG = 1/(params[23] * 1e-3)
dG_new = temp_scaling*cte.recalc(dG, 'lf5', temps[4])
GD_new = 1/dG_new
GD = np.full(len(H_init), GD_new)
apex_grism = params[24]
apex_grism = temp_scaling*cte.recalc(apex_grism, 'lf5', temps[4])
H7, DC7 = grism.dispersion(H7, DC6, T_grism_in, wave, grism_material, apex_grism, GD, t, p, grism_dec_x, grism_dec_y)
#
#Camera
#
z_pos_cam = params[25]
z_pos_cam = temp_scaling*cte.recalc(z_pos_cam, 'alum5083', temps[7])
d_grism_cam = np.full(len(H_init), z_pos_cam)
H8 = trace.to_next_surface(H7, DC7, d_grism_cam)
# Position
cam_dec_x = temp_scaling*cte.recalc(params[26], 'alum5083', temps[7])
cam_dec_y = temp_scaling*cte.recalc(params[27], 'alum5083', temps[7])
dec_x = np.full(len(H8), cam_dec_x)
dec_y = np.full(len(H8), cam_dec_y)
H8[:, 0] = H8[:, 0] + dec_x
H8[:, 1] = H8[:, 1] + dec_y
epx = H8[:, 0]
epy = H8[:, 1]
# Orientation
cam_tilt_x = temp_scaling*cte.recalc(params[28], 'alum5083', temps[7])
cam_tilt_y = temp_scaling*cte.recalc(params[29], 'alum5083', temps[7])
T_cam = np.asarray([cam_tilt_x * np.pi / 180, cam_tilt_y * np.pi / 180, 0. * np.pi / 180])
# Tracing camera lens 1 to 5
d_cam_ff = temp_scaling*cte.recalc(params[30], 'alum5083', temps[7])
camdata = camera.load_data()
cam_data = camera.set_data(camdata)
cam_data[-1][2] = d_cam_ff
H8, DC8, H_cam_in = camera.tracing(H8, DC7, T_cam, wave, temps[7], p, cam_data)
#
# Field flattener
#
# position
ccd_ff_dec_x = temp_scaling*cte.recalc(params[31], 'alum5083', temps[7])
ccd_ff_dec_y = temp_scaling*cte.recalc(params[32], 'alum5083', temps[7])
ff_dec_x = np.full(len(H_init), ccd_ff_dec_x, dtype='float64')
ff_dec_y = np.full(len(H_init), ccd_ff_dec_y, dtype='float64')
H8[:, 0] = H8[:, 0] + ff_dec_x
H8[:, 1] = H8[:, 1] + ff_dec_y
# orientation
ccd_ff_tilt_x = temp_scaling*cte.recalc(params[33], 'alum5083', temps[7])
ccd_ff_tilt_y = temp_scaling*cte.recalc(params[34], 'alum5083', temps[7])
ccd_ff_tilt_z = temp_scaling*cte.recalc(params[35], 'alum5083', temps[7])
T_ff_ccd = np.array([ccd_ff_tilt_x*np.pi/180, ccd_ff_tilt_y*np.pi/180, ccd_ff_tilt_z*np.pi/180])
# Tracing
ffdata = field_flattener.load_data()
ff_data = field_flattener.set_data(ffdata)
d_ff_ccd = temp_scaling*cte.recalc(params[36], 'alum5083', temps[7])
ff_data[-1][2] = d_ff_ccd
H9, DC9 = field_flattener.tracing(H8, DC8, T_ff_ccd, wave, temps[7], p, ff_data)
Hff = H9.copy()
# End Camera
#
# Detector
#
# Position
temps_spec = np.average(temps[1:])
t = np.average(temps_spec)
ccd_dec_x = temp_scaling*cte.recalc(params[37], 'alum5083', t)
ccd_dec_y = temp_scaling*cte.recalc(params[38], 'alum5083', t)
ccd_defocus = temp_scaling*cte.recalc(params[39], 'alum5083', t)
ccd_dec_x = np.full(len(H_init), ccd_dec_x, dtype='float64')
ccd_dec_y = np.full(len(H_init), ccd_dec_y, dtype='float64')
ccd_defocus = np.full(len(H_init), ccd_defocus, dtype='float64')
H9[:, 0] = H9[:, 0] - ccd_dec_x
H9[:, 1] = H9[:, 1] - ccd_dec_y
H9[:, 2] = H9[:, 2] - ccd_defocus
z_ff_ccd = ff_data[2][2]
z_ff_ccd = temp_scaling*cte.recalc(z_ff_ccd, 'alum5083', t)
H9 = trace.to_next_surface(H9, DC9, z_ff_ccd)
H9[:, 2] = 0.
# Orientation
ccd_tilt_x = temp_scaling*cte.recalc(params[40], 'alum5083', t)
ccd_tilt_y = temp_scaling*cte.recalc(params[41], 'alum5083', t)
ccd_tilt_z = temp_scaling*cte.recalc(params[42], 'alum5083', t)
T_ccd = np.array([ccd_tilt_x*np.pi/180, ccd_tilt_y*np.pi/180, ccd_tilt_z*180/np.pi])
H9 = transform.transform(H9, -T_ccd)
# Rotation to match with CARMENES frame geometry
H9x_aux = H9[:, 0].copy()
H9[:, 0] = -H9[:, 1]
H9[:, 1] = H9x_aux
Hff_aux = Hff[:, 0]
Hff[:, 0] = -Hff[:, 1]
Hff[:, 1] = Hff_aux
ws = []
for i in range(len(order)):
ws.append([order[i], wave[i], H9[i][0], H9[i][1], H9[i][2], DC9[i][0], DC9[i][1], DC9[i][2], epx[i], epy[i]])
ws = CCD_vis.mm2pix(np.asarray(ws))
return np.array(ws)
def tracing_fn(spectrum, params, fib, temps, fndata):
# Variables initialization
H_init = np.zeros([len(spectrum), 3])
DC_init = np.zeros([len(spectrum), 3])
order = np.zeros(len(spectrum))
wave = np.zeros(len(spectrum))
order[:] = spectrum[:, 0]
wave[:] = spectrum[:, 1]
x = []
y = []
z = []
H_init[:, 0] = np.zeros(len(spectrum))
H_init[:, 1] = np.zeros(len(spectrum))
H_init[:, 2] = np.zeros(len(spectrum))
DC_init[:, 0] = np.zeros(len(spectrum))
DC_init[:, 1] = np.zeros(len(spectrum))
DC_init[:, 2] = np.zeros(len(spectrum))
# Environment data
p = params[43] # in Pa, 10e-5 in mbar
# We adjust wavelengths to environment
temps_spec = temps[1:]
t = np.average(temps_spec)
wave = refraction_index.waves_air(wave, t, p)
# Slit data
if fib == 'A':
slit_dec_x = np.full(len(spectrum), params[0])
slit_dec_y = np.full(len(spectrum), params[1])
elif fib == 'B':
slit_dec_x = np.full(len(spectrum), params[2])
slit_dec_y = np.full(len(spectrum), params[3])
defocus = params[4]
# Position and initial orientation
H0, DC0 = slit.slit_params_init(H_init, DC_init, slit_dec_x, slit_dec_y, defocus)
# To paraxial plane of the fn system
d_fib_fn = 35.16
d_fib_fn = cte.recalc(d_fib_fn, 'alum5083', temps_spec[6])
H0 = trace.to_next_surface(H0, DC0, d_fib_fn)
T_fib = np.asarray([0. * np.pi / 180, 0. * np.pi / 180, 0. * np.pi / 180])
# FN system
t_fn = temps[6]
fn_system_data = fn_system.set_data(fndata)
H0, DC0 = fn_system.tracing(H0, DC0, T_fib, wave, t_fn, p, fn_system_data)
T_slit = np.asarray([params[5] * np.pi / 180, params[6] * np.pi / 180, params[7] * np.pi / 180])
H1 = transform.transform(H0, -T_slit)
DC1 = transform.transform(DC0, -T_slit)
x.append(H1[:, 2])
y.append(H1[:, 1])
z.append(H1[:, 0])
# Collimator
# temps[0]: CCD temp, temps[1]: TS01 and so on
t_bench = temps[3]
z_pos_col = params[8] # 1590
z_pos_col = cte.recalc(z_pos_col, 'alum5083', t_bench)
d_slit_col = np.abs(z_pos_col - H1[:, 2])
t_coll_left = temps[2]
t_coll_right = temps[1]
t_coll = (t_coll_left + t_coll_right) / 2
coll_tilt_x = cte.recalc(params[9], 'alum5083', t_coll)
coll_tilt_y = cte.recalc(params[10], 'alum5083', t_coll)
T_coll = np.asarray([coll_tilt_x * np.pi / 180, coll_tilt_y * np.pi / 180, 0. * np.pi / 180])
H2 = trace.to_next_surface(H1, DC1, d_slit_col)
curv_rad_aux = -1594.
curv_rad_aux = cte.recalc(curv_rad_aux, 'zerodur', t_coll_left)
curvature_rad = np.full(len(H2), curv_rad_aux * 2)
H2, DC2 = collimator.DCcoll(H2, DC1, T_coll, curvature_rad)
x.append(H2[:, 2])
y.append(H2[:, 1])
z.append(H2[:, 0])
# Echelle
d_col_ech_aux = -1594.305
d_col_ech = cte.recalc(d_col_ech_aux, 'alum5083', temps[5])
z_pos_ech_aux = d_col_ech - d_col_ech_aux
z_pos_ech = np.full(len(H_init), z_pos_ech_aux)
H3 = trace.to_next_surface(H2, DC2, z_pos_ech)
# Data
G = params[11] * 1e-3
d = 1 / G
temp_echelle = (temps[6] + temps[8]) / 2
d_new = cte.recalc(d, 'zerodur', temp_echelle)
G_new = 1 / d_new
# Orientation and diffraction
ech_blaze = cte.recalc(params[12], 'alum5083', temp_echelle)
ech_gamma = cte.recalc(params[13], 'alum5083', temp_echelle)
ech_z_tilt = cte.recalc(params[14], 'alum5083', temp_echelle)
T_echelle = np.asarray([ech_blaze * np.pi / 180, ech_gamma * np.pi / 180, ech_z_tilt * np.pi / 180])
H3, DC3 = echelle.diffraction(H3, DC2, T_echelle, order, wave, G_new)
# Collimator 2nd pass
d_ech_col = np.full(len(H_init), z_pos_col)
H4 = trace.to_next_surface(H3, DC3, d_ech_col)
H4, DC4 = collimator.DCcoll(H4, DC3, T_coll, curvature_rad)
# Transfer mirror
d_col_tm_aux = params[15]
d_col_tm = cte.recalc(d_col_tm_aux, 'alum5083', t_bench)
z_pos_tm_aux = d_col_ech - d_col_tm
z_pos_tm = np.full(len(H_init), z_pos_tm_aux)
H5 = trace.to_next_surface(H4, DC4, z_pos_tm)
# Orientation
tm_tilt_x = cte.recalc(params[16], 'alum5083', temps[6])
tm_tilt_y = cte.recalc(params[17], 'alum5083', temps[6])
T_flat = np.asarray([tm_tilt_x * np.pi / 180, tm_tilt_y * np.pi / 180, 0.0 * np.pi / 180])
H5, DC5 = flat_mirror.flat_out(H5, DC4, T_flat)
# Collimator 3rd pass
d_trf_col = np.full(len(H_init), z_pos_col)
H6 = trace.to_next_surface(H5, DC5, d_trf_col)
curv_rad_aux = -1594.
curv_rad_aux = cte.recalc(curv_rad_aux, 'zerodur', t_coll_right)
curvature_rad = np.full(len(H2), curv_rad_aux * 2)
H6, DC6 = collimator.DCcoll(H6, DC5, T_coll, curvature_rad)
# Grism
z_pos_grism = cte.recalc(params[18], 'alum5083', temps[4])
dcoll3_grism = np.full(len(H_init), z_pos_grism)
H7 = trace.to_next_surface(H6, DC6, dcoll3_grism)
# position and orientation
grm_dec_x = cte.recalc(params[19], 'alum5083', temps[4])
grm_dec_y = cte.recalc(params[20], 'alum5083', temps[4])
grism_dec_x = np.full(len(H_init), grm_dec_x)
grism_dec_y = np.full(len(H_init), grm_dec_y)
grm_tilt_x = cte.recalc(params[21], 'alum5083', temps[4])
grm_tilt_y = cte.recalc(params[22], 'alum5083', temps[4])
T_grism_in = np.asarray([grm_tilt_x * np.pi / 180, grm_tilt_y * np.pi / 180, 0. * np.pi / 180])
# Material and grating data
grism_material = 'LF5'
dG = 1 / (params[23] * 1e-3)
dG_new = cte.recalc(dG, 'lf5', temps[4])
GD_new = 1 / dG_new
GD = np.full(len(H_init), GD_new)
apex_grism = params[24]
apex_grism = cte.recalc(apex_grism, 'lf5', temps[4])
H7, DC7 = grism.dispersion(H7, DC6, T_grism_in, wave, grism_material, apex_grism, GD, t, p, grism_dec_x,
grism_dec_y)
# Camera
z_pos_cam = params[25]
z_pos_cam = cte.recalc(z_pos_cam, 'alum5083', temps[7])
d_grism_cam = np.full(len(H_init), z_pos_cam)
H8 = trace.to_next_surface(H7, DC7, d_grism_cam)
# Position
cam_dec_x = cte.recalc(params[26], 'alum5083', temps[7])
cam_dec_y = cte.recalc(params[27], 'alum5083', temps[7])
dec_x = np.full(len(H8), cam_dec_x)
dec_y = np.full(len(H8), cam_dec_y)
H8[:, 0] = H8[:, 0] + dec_x
H8[:, 1] = H8[:, 1] + dec_y
epx = H8[:, 0]
epy = H8[:, 1]
# Orientation
cam_tilt_x = cte.recalc(params[28], 'alum5083', temps[7])
cam_tilt_y = cte.recalc(params[29], 'alum5083', temps[7])
T_cam = np.asarray([cam_tilt_x * np.pi / 180, cam_tilt_y * np.pi / 180, 0. * np.pi / 180])
# Tracing camera lens 1 to 5
d_cam_ff = cte.recalc(params[30], 'alum5083', temps[7])
camdata = camera.load_data()
cam_data = camera.set_data(camdata)
cam_data[-1][2] = d_cam_ff
H8, DC8, H_cam_in = camera.tracing(H8, DC7, T_cam, wave, temps[7], p, cam_data)
# Field flattener + CCD
# position
ccd_ff_dec_x = cte.recalc(params[31], 'alum5083', temps[7])
ccd_ff_dec_y = cte.recalc(params[32], 'alum5083', temps[7])
ff_dec_x = np.full(len(H_init), ccd_ff_dec_x, dtype='float64')
ff_dec_y = np.full(len(H_init), ccd_ff_dec_y, dtype='float64')
H8[:, 0] = H8[:, 0] + ff_dec_x
H8[:, 1] = H8[:, 1] + ff_dec_y
# orientation
ccd_ff_tilt_x = cte.recalc(params[33], 'alum5083', temps[7])
ccd_ff_tilt_y = cte.recalc(params[34], 'alum5083', temps[7])
ccd_ff_tilt_z = cte.recalc(params[35], 'alum5083', temps[7])
T_ff_ccd = np.array([ccd_ff_tilt_x * np.pi / 180, ccd_ff_tilt_y * np.pi / 180, ccd_ff_tilt_z * np.pi / 180])
# Tracing through field flattener
ffdat = field_flattener.load_data()
ff_data = field_flattener.set_data(ffdat)
d_ff_ccd = cte.recalc(params[36], 'alum5083', temps[7])
ff_data[-1][2] = d_ff_ccd
H9, DC9 = field_flattener.tracing(H8, DC8, T_ff_ccd, wave, temps[7], p, ff_data)
Hff = H9.copy()
# End Camera
# Detector
# Position
temps_spec = np.average(temps[1:])
t = np.average(temps_spec)
ccd_dec_x = cte.recalc(params[37], 'alum5083', t)
ccd_dec_y = cte.recalc(params[38], 'alum5083', t)
ccd_defocus = cte.recalc(params[39], 'alum5083', t)
ccd_dec_x = np.full(len(H_init), ccd_dec_x, dtype='float64')
ccd_dec_y = np.full(len(H_init), ccd_dec_y, dtype='float64')
ccd_defocus = np.full(len(H_init), ccd_defocus, dtype='float64')
H9[:, 0] = H9[:, 0] - ccd_dec_x
H9[:, 1] = H9[:, 1] - ccd_dec_y
H9[:, 2] = H9[:, 2] - ccd_defocus
# print ccd_dec_x
z_ff_ccd = ff_data[2][2]
z_ff_ccd = cte.recalc(z_ff_ccd, 'alum5083', t)
H9 = trace.to_next_surface(H9, DC9, z_ff_ccd)
H9[:, 2] = 0.
# Orientation
ccd_tilt_x = cte.recalc(params[40], 'alum5083', t)
ccd_tilt_y = cte.recalc(params[41], 'alum5083', t)
ccd_tilt_z = cte.recalc(params[42], 'alum5083', t)
T_ccd = np.array([ccd_tilt_x * np.pi / 180, ccd_tilt_y * np.pi / 180, ccd_tilt_z * 180 / np.pi])
H9 = transform.transform(H9, -T_ccd)
# Aberrations corrections
# We first rotate into carmenes frame
H9x_aux = H9[:, 0].copy()
H9[:, 0] = -H9[:, 1]
H9[:, 1] = H9x_aux
Hff_aux = Hff[:, 0]
Hff[:, 0] = -Hff[:, 1]
Hff[:, 1] = Hff_aux
# Polynomial correction
# ab_3_poly_x, ab_3_poly_y = camera.seidel_aberration_poly(Hff, H9)
# dx_seidel, dy_seidel = camera.seidel_aberration_correction(ab_3_poly_x, ab_3_poly_y, H9[:, 0], H9[:, 1])
ws = []
t_ccd = temps[0]
wave_temp = wave / refraction_index.waves_air(wave, t_ccd, p)
for i in range(len(order)):
ws.append([order[i], wave[i], H9[i][0], H9[i][1], H9[i][2], DC9[i][0], DC9[i][1], DC9[i][2], epx[i], epy[i],
wave_temp[i]])
ws = CCD_vis.mm2pix(np.asarray(ws))
# ws = polyfit.correction(ws)
return np.array(ws)
def tracing_cam(spectrum, params, fib, temps, camdata):
# Variables initialization
temp_scaling = 1 # 0.99999 #1.0045
H_init = np.zeros([len(spectrum), 3])
DC_init = np.zeros([len(spectrum), 3])
order = np.zeros(len(spectrum))
wave = np.zeros(len(spectrum))
order[:] = spectrum[:, 0]
wave[:] = spectrum[:, 1]
x = []
y = []
z = []
H_init[:, 0] = np.zeros(len(spectrum))
H_init[:, 1] = np.zeros(len(spectrum))
H_init[:, 2] = np.zeros(len(spectrum))
DC_init[:, 0] = np.zeros(len(spectrum))
DC_init[:, 1] = np.zeros(len(spectrum))
DC_init[:, 2] = np.zeros(len(spectrum))
# Environment data
p = params[43] # in Pa, 10e-5 in mbar
fndata = fn_system.load_data()
fn_system_data = fn_system.set_data(fndata)
# We adjust wavelengths to environment
temps_spec = temps[1:]
t = np.average(temps_spec)
wave = refraction_index.waves_air(wave, t, p)
# Slit data
if fib == 'A':
slit_dec_x = np.full(len(spectrum), params[0])
slit_dec_y = np.full(len(spectrum), params[1])
elif fib == 'B':
slit_dec_x = np.full(len(spectrum), params[2])
slit_dec_y = np.full(len(spectrum), params[3])
defocus = params[4]
# Position and initial orientation
H0, DC0 = slit.slit_params_init(H_init, DC_init, slit_dec_x, slit_dec_y, defocus)
# To paraxial plane of the fn system
d_fib_fn = 35.16
H0 = trace.to_next_surface(H0, DC0, d_fib_fn)
T_fib = np.asarray([0. * np.pi / 180, 0. * np.pi / 180, 0. * np.pi / 180])
# FN system
t_fn = temps[6]
H0, DC0 = fn_system.tracing(H0, DC0, T_fib, wave, t_fn, p, fn_system_data)
T_slit = np.asarray([params[5] * np.pi / 180, params[6] * np.pi / 180, params[7] * np.pi / 180])
H1 = transform.transform(H0, -T_slit)
DC1 = transform.transform(DC0, -T_slit)
x.append(H1[:, 2])
y.append(H1[:, 1])
z.append(H1[:, 0])
# Collimator
# temps[0]: CCD temp, temps[1]: TS01 and so on
t_bench = temps[3]
z_pos_col = params[8] # 1590
z_pos_col = cte.recalc(z_pos_col, 'alum5083', t_bench)
d_slit_col = np.abs(z_pos_col - H1[:, 2])
t_coll_left = temps[2]
t_coll_right = temps[1]
t_coll = (t_coll_left + t_coll_right) / 2
coll_tilt_x = cte.recalc(params[9], 'alum5083', t_coll)
coll_tilt_y = cte.recalc(params[10], 'alum5083', t_coll)
T_coll = np.asarray([coll_tilt_x * np.pi / 180, coll_tilt_y * np.pi / 180, 0. * np.pi / 180])
H2 = trace.to_next_surface(H1, DC1, d_slit_col)
curv_rad_aux = -1594.
curv_rad_aux = cte.recalc(curv_rad_aux, 'zerodur', t_coll_left)
curvature_rad = np.full(len(H2), curv_rad_aux * 2)
H2, DC2 = collimator.DCcoll(H2, DC1, T_coll, curvature_rad)
x.append(H2[:, 2])
y.append(H2[:, 1])
z.append(H2[:, 0])
# Echelle
d_col_ech_aux = -1594.305
d_col_ech = cte.recalc(d_col_ech_aux, 'alum5083', temps[5])
z_pos_ech_aux = d_col_ech - d_col_ech_aux
z_pos_ech = np.full(len(H_init), z_pos_ech_aux)
H3 = trace.to_next_surface(H2, DC2, z_pos_ech)
# Data
G = params[11] * 1e-3
d = 1 / G
temp_echelle = (temps[6] + temps[8]) / 2
d_new = cte.recalc(d, 'zerodur', temp_echelle)
G_new = 1 / d_new
# Orientation and diffraction
ech_blaze = cte.recalc(params[12], 'alum5083', temp_echelle)
ech_gamma = cte.recalc(params[13], 'alum5083', temp_echelle)
ech_z_tilt = cte.recalc(params[14], 'alum5083', temp_echelle)
T_echelle = np.asarray([ech_blaze * np.pi / 180, ech_gamma * np.pi / 180, ech_z_tilt * np.pi / 180])
H3, DC3 = echelle.diffraction(H3, DC2, T_echelle, order, wave, G_new)
# Collimator 2nd pass
d_ech_col = np.full(len(H_init), z_pos_col)
H4 = trace.to_next_surface(H3, DC3, d_ech_col)
H4, DC4 = collimator.DCcoll(H4, DC3, T_coll, curvature_rad)
# Transfer mirror
d_col_tm_aux = params[15]
d_col_tm = cte.recalc(d_col_tm_aux, 'alum5083', t_bench)
z_pos_tm_aux = d_col_ech - d_col_tm
z_pos_tm = np.full(len(H_init), z_pos_tm_aux)
H5 = trace.to_next_surface(H4, DC4, z_pos_tm)
# Orientation
tm_tilt_x = cte.recalc(params[16], 'alum5083', temps[6])
tm_tilt_y = cte.recalc(params[17], 'alum5083', temps[6])
T_flat = np.asarray([tm_tilt_x * np.pi / 180, tm_tilt_y * np.pi / 180, 0.0 * np.pi / 180])
H5, DC5 = flat_mirror.flat_out(H5, DC4, T_flat)
# Collimator 3rd pass
d_trf_col = np.full(len(H_init), z_pos_col)
H6 = trace.to_next_surface(H5, DC5, d_trf_col)
curv_rad_aux = -1594.
curv_rad_aux = cte.recalc(curv_rad_aux, 'zerodur', t_coll_right)
curvature_rad = np.full(len(H2), curv_rad_aux * 2)
H6, DC6 = collimator.DCcoll(H6, DC5, T_coll, curvature_rad)
# Grism
z_pos_grism = cte.recalc(params[18], 'alum5083', temps[4])
dcoll3_grism = np.full(len(H_init), z_pos_grism)
H7 = trace.to_next_surface(H6, DC6, dcoll3_grism)
# position and orientation
grm_dec_x = cte.recalc(params[19], 'alum5083', temps[4])
grm_dec_y = cte.recalc(params[20], 'alum5083', temps[4])
grism_dec_x = np.full(len(H_init), grm_dec_x)
grism_dec_y = np.full(len(H_init), grm_dec_y)
grm_tilt_x = cte.recalc(params[21], 'alum5083', temps[4])
grm_tilt_y = cte.recalc(params[22], 'alum5083', temps[4])
T_grism_in = np.asarray([grm_tilt_x * np.pi / 180, grm_tilt_y * np.pi / 180, 0. * np.pi / 180])
# Material and grating data
grism_material = 'LF5'
dG = 1 / (params[23] * 1e-3)
dG_new = cte.recalc(dG, 'lf5', temps[4])
GD_new = 1 / dG_new
GD = np.full(len(H_init), GD_new)
apex_grism = params[24]
apex_grism = cte.recalc(apex_grism, 'lf5', temps[4])
H7, DC7 = grism.dispersion(H7, DC6, T_grism_in, wave, grism_material, apex_grism, GD, t, p, grism_dec_x,
grism_dec_y)
# Camera
z_pos_cam = params[25]
z_pos_cam = cte.recalc(z_pos_cam, 'alum5083', temps[7])
d_grism_cam = np.full(len(H_init), z_pos_cam)
H8 = trace.to_next_surface(H7, DC7, d_grism_cam)
# Position
cam_dec_x = cte.recalc(params[26], 'alum5083', temps[7])
cam_dec_y = cte.recalc(params[27], 'alum5083', temps[7])
dec_x = np.full(len(H8), cam_dec_x)
dec_y = np.full(len(H8), cam_dec_y)
H8[:, 0] = H8[:, 0] + dec_x
H8[:, 1] = H8[:, 1] + dec_y
epx = H8[:, 0]
epy = H8[:, 1]
# Orientation
cam_tilt_x = cte.recalc(params[28], 'alum5083', temps[7])
cam_tilt_y = cte.recalc(params[29], 'alum5083', temps[7])
T_cam = np.asarray([cam_tilt_x * np.pi / 180, cam_tilt_y * np.pi / 180, 0. * np.pi / 180])
# Tracing camera lens 1 to 5
# cam_data = camera.load_data()
d_cam_ff = cte.recalc(params[30], 'alum5083', temps[7])
cam_data = camera.set_data(camdata)
cam_data[-1][2] = d_cam_ff
H8, DC8, H_cam_in = camera.tracing(H8, DC7, T_cam, wave, temps[7], p, cam_data)
# file_comp = 'ws_zemax_camera_l1_sfin_mock_before.txt'
# compare_zemax.difference_z_negative(H8, DC7, file_comp)
# print DC7[:, 0] ** 2 + DC7[:, 1] ** 2 + DC7[:, 2] ** 2
# Field flattener + CCD
# position
ccd_ff_dec_x = cte.recalc(params[31], 'alum5083', temps[7])
ccd_ff_dec_y = cte.recalc(params[32], 'alum5083', temps[7])
ff_dec_x = np.full(len(H_init), ccd_ff_dec_x, dtype='float64')
ff_dec_y = np.full(len(H_init), ccd_ff_dec_y, dtype='float64')
H8[:, 0] = H8[:, 0] + ff_dec_x
H8[:, 1] = H8[:, 1] + ff_dec_y
# orientation
ccd_ff_tilt_x = cte.recalc(params[33], 'alum5083', temps[7])
ccd_ff_tilt_y = cte.recalc(params[34], 'alum5083', temps[7])
ccd_ff_tilt_z = cte.recalc(params[35], 'alum5083', temps[7])
T_ff_ccd = np.array([ccd_ff_tilt_x * np.pi / 180, ccd_ff_tilt_y * np.pi / 180, ccd_ff_tilt_z * np.pi / 180])
# Tracing through field flattener
ffdat = field_flattener.load_data()
ff_data = field_flattener.set_data(ffdat)
d_ff_ccd = cte.recalc(params[36], 'alum5083', temps[7])
ff_data[-1][2] = d_ff_ccd
H9, DC9 = field_flattener.tracing(H8, DC8, T_ff_ccd, wave, temps[7], p, ff_data)
Hff = H9.copy()
# End Camera
# Detector
# Position
temps_spec = np.average(temps[1:])
t = np.average(temps_spec)
ccd_dec_x = cte.recalc(params[37], 'alum5083', t)
ccd_dec_y = cte.recalc(params[38], 'alum5083', t)
ccd_defocus = cte.recalc(params[39], 'alum5083', t)
ccd_dec_x = np.full(len(H_init), ccd_dec_x, dtype='float64')
ccd_dec_y = np.full(len(H_init), ccd_dec_y, dtype='float64')
ccd_defocus = np.full(len(H_init), ccd_defocus, dtype='float64')
H9[:, 0] = H9[:, 0] - ccd_dec_x
H9[:, 1] = H9[:, 1] - ccd_dec_y
H9[:, 2] = H9[:, 2] - ccd_defocus
# print ccd_dec_x
z_ff_ccd = ff_data[2][2]
z_ff_ccd = cte.recalc(z_ff_ccd, 'alum5083', t)
H9 = trace.to_next_surface(H9, DC9, z_ff_ccd)
H9[:, 2] = 0.
# Orientation
ccd_tilt_x = cte.recalc(params[40], 'alum5083', t)
ccd_tilt_y = cte.recalc(params[41], 'alum5083', t)
ccd_tilt_z = cte.recalc(params[42], 'alum5083', t)
T_ccd = np.array([ccd_tilt_x * np.pi / 180, ccd_tilt_y * np.pi / 180, ccd_tilt_z * 180 / np.pi])
H9 = transform.transform(H9, -T_ccd)
# Aberrations corrections
# We first rotate into carmenes frame
H9x_aux = H9[:, 0].copy()
H9[:, 0] = -H9[:, 1]
H9[:, 1] = H9x_aux
Hff_aux = Hff[:, 0]
Hff[:, 0] = -Hff[:, 1]
Hff[:, 1] = Hff_aux
# Polynomial correction
# ab_3_poly_x, ab_3_poly_y = camera.seidel_aberration_poly(Hff, H9)
# dx_seidel, dy_seidel = camera.seidel_aberration_correction(ab_3_poly_x, ab_3_poly_y, H9[:, 0], H9[:, 1])
ws = []
t_ccd = temps[0]
wave_temp = wave / refraction_index.waves_air(wave, t_ccd, p)
for i in range(len(order)):
ws.append([order[i], wave[i], H9[i][0], H9[i][1], H9[i][2], DC9[i][0], DC9[i][1], DC9[i][2], epx[i], epy[i],
wave_temp[i]])
ws = CCD_vis.mm2pix(np.asarray(ws))
# ws = polyfit.correction(ws)
return np.array(ws)
def tracing_ff(spectrum, params, fib, temps, ffdata):
# Variables initialization
temp_scaling = 1 # 0.99999 #1.0045
H_init = np.zeros([len(spectrum), 3])
DC_init = np.zeros([len(spectrum), 3])
order = np.zeros(len(spectrum))
wave = np.zeros(len(spectrum))
order[:] = spectrum[:, 0]
wave[:] = spectrum[:, 1]
x = []
y = []
z = []
H_init[:, 0] = np.zeros(len(spectrum))
H_init[:, 1] = np.zeros(len(spectrum))
H_init[:, 2] = np.zeros(len(spectrum))
DC_init[:, 0] = np.zeros(len(spectrum))
DC_init[:, 1] = np.zeros(len(spectrum))
DC_init[:, 2] = np.zeros(len(spectrum))
# Environment data
p = params[43] # in Pa, 10e-5 in mbar
fndata = fn_system.load_data()
fn_system_data = fn_system.set_data(fndata)
# We adjust wavelengths to environment
temps_spec = temps[1:]
t = np.average(temps_spec)
wave = refraction_index.waves_air(wave, t, p)
# Slit data
if fib == 'A':
slit_dec_x = np.full(len(spectrum), params[0])
slit_dec_y = np.full(len(spectrum), params[1])
elif fib == 'B':
slit_dec_x = np.full(len(spectrum), params[2])
slit_dec_y = np.full(len(spectrum), params[3])
defocus = params[4]
# Position and initial orientation
H0, DC0 = slit.slit_params_init(H_init, DC_init, slit_dec_x, slit_dec_y, defocus)
# To paraxial plane of the fn system
d_fib_fn = 35.16
H0 = trace.to_next_surface(H0, DC0, d_fib_fn)
T_fib = np.asarray([0. * np.pi / 180, 0. * np.pi / 180, 0. * np.pi / 180])
# FN system
t_fn = temps[6]
H0, DC0 = fn_system.tracing(H0, DC0, T_fib, wave, t_fn, p, fn_system_data)
T_slit = np.asarray([params[5] * np.pi / 180, params[6] * np.pi / 180, params[7] * np.pi / 180])
H1 = transform.transform(H0, -T_slit)
DC1 = transform.transform(DC0, -T_slit)
x.append(H1[:, 2])
y.append(H1[:, 1])
z.append(H1[:, 0])
# Collimator
# temps[0]: CCD temp, temps[1]: TS01 and so on
t_bench = temps[3]
z_pos_col = params[8] # 1590
z_pos_col = cte.recalc(z_pos_col, 'alum5083', t_bench)
d_slit_col = np.abs(z_pos_col - H1[:, 2])
t_coll_left = temps[2]
t_coll_right = temps[1]
t_coll = (t_coll_left + t_coll_right) / 2
coll_tilt_x = cte.recalc(params[9], 'alum5083', t_coll)
coll_tilt_y = cte.recalc(params[10], 'alum5083', t_coll)
T_coll = np.asarray([coll_tilt_x * np.pi / 180, coll_tilt_y * np.pi / 180, 0. * np.pi / 180])
H2 = trace.to_next_surface(H1, DC1, d_slit_col)
curv_rad_aux = -1594.
curv_rad_aux = cte.recalc(curv_rad_aux, 'zerodur', t_coll_left)
curvature_rad = np.full(len(H2), curv_rad_aux * 2)
H2, DC2 = collimator.DCcoll(H2, DC1, T_coll, curvature_rad)
x.append(H2[:, 2])
y.append(H2[:, 1])
z.append(H2[:, 0])
# Echelle
d_col_ech_aux = -1594.305
d_col_ech = cte.recalc(d_col_ech_aux, 'alum5083', temps[5])
z_pos_ech_aux = d_col_ech - d_col_ech_aux
z_pos_ech = np.full(len(H_init), z_pos_ech_aux)
H3 = trace.to_next_surface(H2, DC2, z_pos_ech)
# Data
G = params[11] * 1e-3
d = 1 / G
temp_echelle = (temps[6] + temps[8]) / 2
d_new = cte.recalc(d, 'zerodur', temp_echelle)
G_new = 1 / d_new
# Orientation and diffraction
ech_blaze = cte.recalc(params[12], 'alum5083', temp_echelle)
ech_gamma = cte.recalc(params[13], 'alum5083', temp_echelle)
ech_z_tilt = cte.recalc(params[14], 'alum5083', temp_echelle)
T_echelle = np.asarray([ech_blaze * np.pi / 180, ech_gamma * np.pi / 180, ech_z_tilt * np.pi / 180])
H3, DC3 = echelle.diffraction(H3, DC2, T_echelle, order, wave, G_new)
# Collimator 2nd pass
d_ech_col = np.full(len(H_init), z_pos_col)
H4 = trace.to_next_surface(H3, DC3, d_ech_col)
H4, DC4 = collimator.DCcoll(H4, DC3, T_coll, curvature_rad)
# Transfer mirror
d_col_tm_aux = params[15]
d_col_tm = cte.recalc(d_col_tm_aux, 'alum5083', t_bench)
z_pos_tm_aux = d_col_ech - d_col_tm
z_pos_tm = np.full(len(H_init), z_pos_tm_aux)
H5 = trace.to_next_surface(H4, DC4, z_pos_tm)
# Orientation
tm_tilt_x = cte.recalc(params[16], 'alum5083', temps[6])
tm_tilt_y = cte.recalc(params[17], 'alum5083', temps[6])
T_flat = np.asarray([tm_tilt_x * np.pi / 180, tm_tilt_y * np.pi / 180, 0.0 * np.pi / 180])
H5, DC5 = flat_mirror.flat_out(H5, DC4, T_flat)
# Collimator 3rd pass
d_trf_col = np.full(len(H_init), z_pos_col)
H6 = trace.to_next_surface(H5, DC5, d_trf_col)
curv_rad_aux = -1594.
curv_rad_aux = cte.recalc(curv_rad_aux, 'zerodur', t_coll_right)
curvature_rad = np.full(len(H2), curv_rad_aux * 2)
H6, DC6 = collimator.DCcoll(H6, DC5, T_coll, curvature_rad)
# Grism
z_pos_grism = cte.recalc(params[18], 'alum5083', temps[4])
dcoll3_grism = np.full(len(H_init), z_pos_grism)
H7 = trace.to_next_surface(H6, DC6, dcoll3_grism)
# position and orientation
grm_dec_x = cte.recalc(params[19], 'alum5083', temps[4])
grm_dec_y = cte.recalc(params[20], 'alum5083', temps[4])
grism_dec_x = np.full(len(H_init), grm_dec_x)
grism_dec_y = np.full(len(H_init), grm_dec_y)
grm_tilt_x = cte.recalc(params[21], 'alum5083', temps[4])
grm_tilt_y = cte.recalc(params[22], 'alum5083', temps[4])
T_grism_in = np.asarray([grm_tilt_x * np.pi / 180, grm_tilt_y * np.pi / 180, 0. * np.pi / 180])
# Material and grating data
grism_material = 'LF5'
dG = 1 / (params[23] * 1e-3)
dG_new = cte.recalc(dG, 'lf5', temps[4])
GD_new = 1 / dG_new
GD = np.full(len(H_init), GD_new)
apex_grism = params[24]
apex_grism = cte.recalc(apex_grism, 'lf5', temps[4])
H7, DC7 = grism.dispersion(H7, DC6, T_grism_in, wave, grism_material, apex_grism, GD, t, p, grism_dec_x,
grism_dec_y)
# Camera
z_pos_cam = params[25]
z_pos_cam = cte.recalc(z_pos_cam, 'alum5083', temps[7])
d_grism_cam = np.full(len(H_init), z_pos_cam)
H8 = trace.to_next_surface(H7, DC7, d_grism_cam)
# Position
cam_dec_x = cte.recalc(params[26], 'alum5083', temps[7])
cam_dec_y = cte.recalc(params[27], 'alum5083', temps[7])
dec_x = np.full(len(H8), cam_dec_x)
dec_y = np.full(len(H8), cam_dec_y)
H8[:, 0] = H8[:, 0] + dec_x
H8[:, 1] = H8[:, 1] + dec_y
epx = H8[:, 0]
epy = H8[:, 1]
# Orientation
cam_tilt_x = cte.recalc(params[28], 'alum5083', temps[7])
cam_tilt_y = cte.recalc(params[29], 'alum5083', temps[7])
T_cam = np.asarray([cam_tilt_x * np.pi / 180, cam_tilt_y * np.pi / 180, 0. * np.pi / 180])
# Tracing camera lens 1 to 5
# cam_data = camera.load_data()
d_cam_ff = cte.recalc(params[30], 'alum5083', temps[7])
camdata = camera.load_data()
cam_data = camera.set_data(camdata)
cam_data[-1][2] = d_cam_ff
H8, DC8, H_cam_in = camera.tracing(H8, DC7, T_cam, wave, temps[7], p, cam_data)
# file_comp = 'ws_zemax_camera_l1_sfin_mock_before.txt'
# compare_zemax.difference_z_negative(H8, DC7, file_comp)
# print DC7[:, 0] ** 2 + DC7[:, 1] ** 2 + DC7[:, 2] ** 2
# Field flattener + CCD
# position
ccd_ff_dec_x = cte.recalc(params[31], 'alum5083', temps[7])
ccd_ff_dec_y = cte.recalc(params[32], 'alum5083', temps[7])
ff_dec_x = np.full(len(H_init), ccd_ff_dec_x, dtype='float64')
ff_dec_y = np.full(len(H_init), ccd_ff_dec_y, dtype='float64')
H8[:, 0] = H8[:, 0] + ff_dec_x
H8[:, 1] = H8[:, 1] + ff_dec_y
# orientation
ccd_ff_tilt_x = cte.recalc(params[33], 'alum5083', temps[7])
ccd_ff_tilt_y = cte.recalc(params[34], 'alum5083', temps[7])
ccd_ff_tilt_z = cte.recalc(params[35], 'alum5083', temps[7])
T_ff_ccd = np.array([ccd_ff_tilt_x * np.pi / 180, ccd_ff_tilt_y * np.pi / 180, ccd_ff_tilt_z * np.pi / 180])
# Tracing through field flattener
ff_data = field_flattener.set_data(ffdata)
d_ff_ccd = cte.recalc(params[36], 'alum5083', temps[7])
ff_data[-1][2] = d_ff_ccd
H9, DC9 = field_flattener.tracing(H8, DC8, T_ff_ccd, wave, temps[7], p, ff_data)
Hff = H9.copy()
# End Camera
# Detector
# Position
temps_spec = np.average(temps[1:])
t = np.average(temps_spec)
ccd_dec_x = cte.recalc(params[37], 'alum5083', t)
ccd_dec_y = cte.recalc(params[38], 'alum5083', t)
ccd_defocus = cte.recalc(params[39], 'alum5083', t)
ccd_dec_x = np.full(len(H_init), ccd_dec_x, dtype='float64')
ccd_dec_y = np.full(len(H_init), ccd_dec_y, dtype='float64')
ccd_defocus = np.full(len(H_init), ccd_defocus, dtype='float64')
H9[:, 0] = H9[:, 0] - ccd_dec_x
H9[:, 1] = H9[:, 1] - ccd_dec_y
H9[:, 2] = H9[:, 2] - ccd_defocus
# print ccd_dec_x
z_ff_ccd = ff_data[2][2]
z_ff_ccd = cte.recalc(z_ff_ccd, 'alum5083', t)
H9 = trace.to_next_surface(H9, DC9, z_ff_ccd)
H9[:, 2] = 0.
# Orientation
ccd_tilt_x = cte.recalc(params[40], 'alum5083', t)
ccd_tilt_y = cte.recalc(params[41], 'alum5083', t)
ccd_tilt_z = cte.recalc(params[42], 'alum5083', t)
T_ccd = np.array([ccd_tilt_x * np.pi / 180, ccd_tilt_y * np.pi / 180, ccd_tilt_z * 180 / np.pi])
H9 = transform.transform(H9, -T_ccd)
# Aberrations corrections
# We first rotate into carmenes frame
H9x_aux = H9[:, 0].copy()
H9[:, 0] = -H9[:, 1]
H9[:, 1] = H9x_aux
Hff_aux = Hff[:, 0]
Hff[:, 0] = -Hff[:, 1]
Hff[:, 1] = Hff_aux
# Polynomial correction
# ab_3_poly_x, ab_3_poly_y = camera.seidel_aberration_poly(Hff, H9)
# dx_seidel, dy_seidel = camera.seidel_aberration_correction(ab_3_poly_x, ab_3_poly_y, H9[:, 0], H9[:, 1])
ws = []
t_ccd = temps[0]
wave_temp = wave / refraction_index.waves_air(wave, t_ccd, p)
for i in range(len(order)):
ws.append([order[i], wave[i], H9[i][0], H9[i][1], H9[i][2], DC9[i][0], DC9[i][1], DC9[i][2], epx[i], epy[i],
wave_temp[i]])
ws = CCD_vis.mm2pix(np.asarray(ws))
# ws = polyfit.correction(ws)
return np.array(ws)
|
mtalapintoREPO_NAMEmoesPATH_START.@platospec@optics@vis_spectrometer.py@.PATH_END.py
|
{
"filename": "CalTempFromBias_iraf.py",
"repo_name": "spacetelescope/stsdas_stripped",
"repo_path": "stsdas_stripped_extracted/stsdas_stripped-master/stsdas/pkg/hst_calib/nicmos/CalTempFromBias_iraf.py",
"type": "Python"
}
|
from __future__ import division, print_function # confidence high
import os
# Import IRAF classes
from pyraf import iraf
from pyraf.iraf import stsdas, hst_calib
from nictools import CalTempFromBias
version = '2.01 (2008-Oct-21)'
# Point to default parameter file for task
_parfile = 'nicmos$CalTempFromBias.par'
_taskname = 'CalTempFromBias'
def _CalTempFromBias_iraf(filename, edit_type="RAW", hdr_key="TFBTEMP", err_key="TFBERR",nref_par= "/grp/hst/cdbs/nref/",
force=None, noclean=iraf.no, dry_run=1, verbosity=1):
# Convert IRAF empty strings to Python None
if force == '':
force = None
try:
tfb = CalTempFromBias.CalTempFromBias(filename, edit_type=edit_type, hdr_key=hdr_key, err_key=err_key,
nref_par=nref_par, force=force, noclean=noclean, dry_run=dry_run, verbosity=verbosity)
[temp, sigma, winner, in_flag, dry_run ]= tfb.calctemp()
stat = tfb.update_header( temp, sigma, winner)
except ValueError as msg:
print("FATAL ERROR: ", msg)
return
# Set up CalTempFromBias as an IRAF task.
tfbias = iraf.IrafTaskFactory( taskname = _taskname, value = iraf.osfn(_parfile),
pkgname = PkgName, pkgbinary = PkgBinary,
function = _CalTempFromBias_iraf)
|
spacetelescopeREPO_NAMEstsdas_strippedPATH_START.@stsdas_stripped_extracted@stsdas_stripped-master@stsdas@pkg@hst_calib@nicmos@CalTempFromBias_iraf.py@.PATH_END.py
|
{
"filename": "amr_linwave.py",
"repo_name": "PrincetonUniversity/athena",
"repo_path": "athena_extracted/athena-master/tst/regression/scripts/tests/implicit_radiation/amr_linwave.py",
"type": "Python"
}
|
# Regression test based on linear wave convergence problem
# for full radiation hydro equations, using the
# implicit radiation hydro module and AMR
# Modules
import logging
import scripts.utils.athena as athena
import sys
sys.path.insert(0, '../../vis/python')
import athena_read # noqa
athena_read.check_nan_flag = True
logger = logging.getLogger('athena' + __name__[7:]) # set logger name based on module
# Prepare Athena++
def prepare(**kwargs):
logger.debug('Running test ' + __name__)
athena.configure('implicit_radiation',
prob='rad_linearwave',
coord='cartesian',
flux='hllc', **kwargs)
athena.make()
# Run Athena++
def run(**kwargs):
# L-going fast wave (set by default in input)
arguments = ['time/cfl_number=0.3', # default =0.4, but tolerances measured w/ 0.3
'time/ncycle_out=100'
]
athena.run('radiation/athinput.rad_linearwave_amr', arguments)
# Analyze outputs
def analyze():
# read data from error file
filename = 'bin/linearwave-errors.dat'
data = []
with open(filename, 'r') as f:
raw_data = f.readlines()
for line in raw_data:
if line.split()[0][0] == '#':
continue
data.append([float(val) for val in line.split()])
if data[0][4] > 1.05e-8:
print("error in regime 8: ", data[0][4])
return False
return True
|
PrincetonUniversityREPO_NAMEathenaPATH_START.@athena_extracted@athena-master@tst@regression@scripts@tests@implicit_radiation@amr_linwave.py@.PATH_END.py
|
{
"filename": "sed.py",
"repo_name": "GalSim-developers/GalSim",
"repo_path": "GalSim_extracted/GalSim-main/galsim/config/sed.py",
"type": "Python"
}
|
# Copyright (c) 2012-2023 by the GalSim developers team on GitHub
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
# https://github.com/GalSim-developers/GalSim
#
# GalSim is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
#
from astropy.units import Quantity, Unit
from .util import LoggerWrapper
from .value import ParseValue, GetAllParams, GetIndex
from .input import RegisterInputConnectedType
from .bandpass import BuildBandpass
from ..errors import GalSimConfigError, GalSimConfigValueError
from ..sed import SED
from ..utilities import basestring, LRU_Cache
# This module-level dict will store all the registered SED types.
# See the RegisterSEDType function at the end of this file.
# The keys are the (string) names of the SED types, and the values will be builders that know
# how to build the SED object.
valid_sed_types = {}
def BuildSED(config, key, base, logger=None):
"""Read the SED parameters from config[key] and return a constructed SED object.
Parameters:
config: A dict with the configuration information.
key: The key name in config indicating which object to build.
base: The base dict of the configuration.
logger: Optionally, provide a logger for logging debug statements. [default: None]
Returns:
(sed, safe) where sed is an SED instance, and safe is whether it is safe to reuse.
"""
logger = LoggerWrapper(logger)
logger.debug('obj %d: Start BuildSED key = %s',base.get('obj_num',0),key)
param = config[key]
# Check for direct value, else get the SED type
if isinstance(param, SED):
return param, True
elif isinstance(param, basestring) and (param[0] == '$' or param[0] == '@'):
return ParseValue(config, key, base, None)
elif isinstance(param, dict):
sed_type = param.get('type','FileSED')
else:
raise GalSimConfigError("%s must be either an SED or a dict"%key)
# For these two, just do the usual ParseValue function.
if sed_type in ('Eval', 'Current'):
return ParseValue(config, key, base, None)
# Check if we can use the current cached object
index, index_key = GetIndex(param, base)
if 'current' in param:
csed, csafe, cvalue_type, cindex, cindex_key = param['current']
if cindex == index:
logger.debug('obj %d: The SED object is already current', base.get('obj_num',0))
logger.debug('obj %d: index_key = %s, index = %d',base.get('obj_num',0),
cindex_key, cindex)
return csed, csafe
if sed_type not in valid_sed_types:
raise GalSimConfigValueError("Invalid sed.type.", sed_type, list(valid_sed_types.keys()))
logger.debug('obj %d: Building sed type %s', base.get('obj_num',0), sed_type)
builder = valid_sed_types[sed_type]
sed, safe = builder.buildSED(param, base, logger)
logger.debug('obj %d: sed = %s', base.get('obj_num',0), sed)
param['current'] = sed, safe, SED, index, index_key
return sed, safe
class SEDBuilder:
"""A base class for building SED objects.
The base class defines the call signatures of the methods that any derived class should follow.
"""
def buildSED(self, config, base, logger):
"""Build the SED based on the specifications in the config dict.
Note: Sub-classes must override this function with a real implementation.
Parameters:
config: The configuration dict for the SED type.
base: The base configuration dict.
logger: If provided, a logger for logging debug statements.
Returns:
the constructed SED object.
"""
raise NotImplementedError("The %s class has not overridden buildSED"%self.__class__)
def _read_sed_file(file_name, wave_type, flux_type):
return SED(file_name, wave_type, flux_type)
read_sed_file = LRU_Cache(_read_sed_file)
class FileSEDBuilder(SEDBuilder):
"""A class for loading an SED from a file
FileSED expected the following parameters:
file_name (required) The file to load
wave_type(required) The units (nm or Ang) of the wavelengths in the file
flux_type (required) Which kind of flux values are in the file
Allowed values: flambda, fnu, fphotons, 1
"""
def buildSED(self, config, base, logger):
"""Build the SED based on the specifications in the config dict.
Parameters:
config: The configuration dict for the SED type.
base: The base configuration dict.
logger: If provided, a logger for logging debug statements.
Returns:
the constructed SED object.
"""
logger = LoggerWrapper(logger)
req = {
'file_name': str,
'wave_type': (Unit, str),
'flux_type': (Unit, str),
}
opt = {
'norm_flux_density': (float, Quantity),
'norm_wavelength': (float, Quantity),
'norm_flux': float,
'redshift': float
}
ignore = ['norm_bandpass']
kwargs, safe = GetAllParams(config, base, req=req, opt=opt, ignore=ignore)
file_name = kwargs.pop('file_name')
norm_flux_density = kwargs.pop('norm_flux_density', None)
norm_wavelength = kwargs.pop('norm_wavelength', None)
norm_flux = kwargs.pop('norm_flux', None)
redshift = kwargs.pop('redshift', 0.)
wave_type = kwargs.pop('wave_type')
flux_type = kwargs.pop('flux_type')
logger.info("Using SED file: %s",file_name)
sed = read_sed_file(file_name, wave_type, flux_type)
if norm_flux_density is not None:
sed = sed.withFluxDensity(norm_flux_density, wavelength=norm_wavelength)
elif norm_flux:
bandpass, safe1 = BuildBandpass(config, 'norm_bandpass', base, logger)
sed = sed.withFlux(norm_flux, bandpass=bandpass)
safe = safe and safe1
sed = sed.atRedshift(redshift)
return sed, safe
def RegisterSEDType(sed_type, builder, input_type=None):
"""Register a SED type for use by the config apparatus.
Parameters:
sed_type: The name of the type in the config dict.
builder: A builder object to use for building the SED object. It should
be an instance of a subclass of SEDBuilder.
input_type: If the SED builder utilises an input object, give the key name of the
input type here. (If it uses more than one, this may be a list.)
[default: None]
"""
valid_sed_types[sed_type] = builder
RegisterInputConnectedType(input_type, sed_type)
RegisterSEDType('FileSED', FileSEDBuilder())
|
GalSim-developersREPO_NAMEGalSimPATH_START.@GalSim_extracted@GalSim-main@galsim@config@sed.py@.PATH_END.py
|
{
"filename": "_line.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/scattermapbox/_line.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Line(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scattermapbox"
_path_str = "scattermapbox.line"
_valid_props = {"color", "width"}
# color
# -----
@property
def color(self):
"""
Sets the line color.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# width
# -----
@property
def width(self):
"""
Sets the line width (in px).
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the line color.
width
Sets the line width (in px).
"""
def __init__(self, arg=None, color=None, width=None, **kwargs):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattermapbox.Line`
color
Sets the line color.
width
Sets the line width (in px).
Returns
-------
Line
"""
super(Line, self).__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattermapbox.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattermapbox.Line`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("width", None)
_v = width if width is not None else _v
if _v is not None:
self["width"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@scattermapbox@_line.py@.PATH_END.py
|
{
"filename": "inspect_fit_results.py",
"repo_name": "omsharansalafia/grbpop",
"repo_path": "grbpop_extracted/grbpop-main/inspect_fit_results.py",
"type": "Python"
}
|
import numpy as np
import matplotlib.pyplot as plt
import corner
import emcee
from sklearn.decomposition import PCA
def credible(x_i,level=0.68,bins=50):
y,x = np.histogram(x_i,bins=bins)
s = np.argsort(y)
c = np.zeros_like(y)
c[s] = np.cumsum(y[s])
c = c/c.max()
c0 = np.min(x[:-1][c>(1.-level)])
c1 = np.max(x[1:][c>(1.-level)])
return c0,c1
def read_chain(chain_filename,burnin_fraction=0.1,thin=1,bounds=None):
# open chain file
reader = emcee.backends.HDFBackend(chain_filename,read_only=True)
# extract chain and loglike
discard = int(burnin_fraction*reader.iteration)
flatchain = reader.get_chain(flat=True,discard=discard,thin=thin)
flatll = reader.get_log_prob(flat=True,discard=discard,thin=thin)
# impose bounds if given
if bounds is not None:
for i in range(flatchain.shape[-1]):
if i in bounds.keys():
flatll = flatll[(flatchain[:,i]>bounds[i][0])&(flatchain[:,i]<bounds[i][1])]
flatchain = flatchain[(flatchain[:,i]>bounds[i][0])&(flatchain[:,i]<bounds[i][1])]
return flatchain,flatll
def Delta_AIC(chain_filename1,chain_filename2):
# open chain file
reader = emcee.backends.HDFBackend(chain_filename1,read_only=True)
# extract chain and loglike
discard = int(burnin_fraction*reader.iteration)
flatchain = reader.get_chain(flat=True,discard=discard,thin=thin)
k1 = flatchain.shape[-1]
flatll1 = reader.get_log_prob(flat=True,discard=discard,thin=thin)
# open chain file
reader = emcee.backends.HDFBackend(chain_filename2,read_only=True)
# extract chain and loglike
discard = int(burnin_fraction*reader.iteration)
flatchain = reader.get_chain(flat=True,discard=discard,thin=thin)
k2 = flatchain.shape[-1]
flatll2 = reader.get_log_prob(flat=True,discard=discard,thin=thin)
return 2*(k2-k1+flatll2.max()-flatll1.max())
def corner_plot(chain_filename,burnin_fraction=0.1,savefig=None,show=False,verbose=True,credible_level=0.9,limit_level=0.95,labels=None,truths=None,bestfit_method='pca',bounds=None,transform=None,thin=1,return_chain=False,hide=None,return_figure=False,truth_color='r',cred_int_color='k',annotate_cred_int=True,greedy=True,**kwargs):
"""
Make a corner plot of the posterior probability density distribution.
Parameters:
- chain_filename: the name of the emcee .h5 file that contains the
chain
- burnin_fraction: the fraction of the posterior samples to be
discarded as burnin
- savefig: if set to a string, this is the filename to which
the output figure is saved
- show: if True, show the corner plot in an interactive window.
- verbose: if True, print some information, including the
parameter confidence bounds
- credible_level: the level at which credible bounds are given.
Default: 0.9 (90%)
- limit_level: the ccredible level at which upper or lower limits
must be given. Default: 0.95 (2 sigma)
- labels: Parameter names to be used as labels in the plot. Optional.
- truths: The "true" parameter values.
- bestfit_method: 'pca' approximates the maximum a posteriori by doing
a principal component analysis decomposition and taking
the mean in the decomposed variable space; 'median'
gives the median of the marginalised posterior;
'best sample' just takes the sample with the best
probability.
- bounds: dictionary in the form {i:(l,h), ...}, where i is the
index of the parameter and l and h are the lower and
upper bounds. If given, the chain is cut off to exclude
parameter values outside of the bounds.
- transform: dictionary in the form {i:f, ...}, where i is the
index of the parameter and f is a function that is
applied to all the samples to transform the parameter
to a different representation (e.g. to transform from
log to linear).
- hide : list containing indices of parameters that must not
be shown in the corner plot.
- thin: factor by which the chain must be thinned. Default: 1
- return_chain: if True, return flat chain and flat log probability
- return_figure: if True, return figure
- cred_int_color: the color of the vertical lines that show the credible ranges
in the 1D plots. Default: 'black'
- annotate_cred_int: whether to annotate the credible intervals on top of the
diagonal plots. Default: True
- **kwargs: arguments passed to corner.corner
"""
# open chain file
reader = emcee.backends.HDFBackend(chain_filename,read_only=True)
mean_autocorr_time = np.mean(reader.get_autocorr_time(tol=0))
# get autocorr time & other info
if verbose:
print('Number of iterations recorded in chain file: ',reader.iteration)
print('Mean autocorrelation time: ',mean_autocorr_time)
if thin=='autocorr':
thin = int(mean_autocorr_time)//5
# extract chain and loglike
discard = int(burnin_fraction*reader.iteration)
flatchain = reader.get_chain(flat=True,discard=discard,thin=thin)
flatll = reader.get_log_prob(flat=True,discard=discard,thin=thin)
ndim = flatchain.shape[-1]
# apply transforms if given
if transform is not None:
for i in range(flatchain.shape[-1]):
if i in transform.keys():
flatchain[:,i] = transform[i](flatchain[:,i])
# impose bounds if given
if bounds is not None:
for i in range(flatchain.shape[-1]):
if i in bounds.keys():
flatll = flatll[(flatchain[:,i]>bounds[i][0])&(flatchain[:,i]<bounds[i][1])]
flatchain = flatchain[(flatchain[:,i]>bounds[i][0])&(flatchain[:,i]<bounds[i][1])]
if hide is not None:
h = np.array([(i not in hide) for i in np.arange(ndim)],dtype=bool)
flatchain = flatchain[:,h]
ndim -= len(hide)
# find maximum a posteriori
if bestfit_method=='median':
maxap = np.median(flatchain,axis=0)
elif bestfit_method=='pca':
# find best fit using PCA
pca = PCA()
pca.fit(flatchain)
xt = pca.transform(flatchain)
maxap = pca.inverse_transform(np.mean(xt,axis=0))
elif bestfit_method=='best sample':
maxap = flatchain[np.argmax(flatll)]
else:
maxap = np.mean(flatchain,axis=0)
if labels is None:
labels = ['' for i in range(ndim)]
if truths is None:
figure = corner.corner(flatchain,labels=labels,truths=maxap,truth_color=truth_color,**kwargs)
else:
figure = corner.corner(flatchain,labels=labels,truths=truths,truth_color=truth_color,**kwargs)
axes = np.array(figure.axes).reshape((ndim, ndim))
# show maximum a posteriori & credible intervals on the diagonal
lower_bounds = np.zeros(ndim)
upper_bounds = np.zeros(ndim)
for i in range(ndim):
lolim = False
uplim = False
lev = credible_level
if greedy:
c0,c1 = credible(flatchain[:,i],level=lev)
else:
c0,c1 = np.percentile(flatchain[:,i],[50.-100.*lev/2.,50.+100.*lev/2.])
if c0==flatchain[:,i].min():
lev = limit_level
uplim = True
c0,c1 = credible(flatchain[:,i],level=lev)
elif c1==flatchain[:,i].max():
lev = limit_level
lolim = True
c0,c1 = credible(flatchain[:,i],level=lev)
Dh = c1-maxap[i]
Dl = maxap[i]-c0
if lolim:
print('{0} > {1:.3f} ({2:.0%})'.format(labels[i%len(labels)],c0,lev))
elif uplim:
print('{0} < {1:.3f} ({2:.0%})'.format(labels[i%len(labels)],c1,lev))
else:
print('{0} = {1:.3f} (+{2:.3f}, -{3:.3f}) ({4:.0%})'.format(labels[i%len(labels)],maxap[i],Dh,Dl,lev))
ax = axes[i, i]
ax.axvline(c0, color=cred_int_color,linestyle='--')
ax.axvline(c1, color=cred_int_color,linestyle='--')
if annotate_cred_int:
tit = r'$' + '{0:.2f}'.format(maxap[i]) + r'^{' + '+{0:.2f}'.format(Dh) + r'}_{' + '-{0:.2f}'.format(Dl)
if lev!=credible_level:
tit = tit + r'}$' + ' ({0:.0f}%)'.format(lev*100.)
else:
tit = tit + r'}$'
ax.set_title(tit)
lower_bounds[i]=c0
upper_bounds[i]=c1
if savefig is not None:
plt.savefig(savefig)
if show:
plt.show()
if return_chain and return_figure:
return figure,maxap,lower_bounds,upper_bounds,flatchain,flatll
elif return_chain:
return maxap,lower_bounds,upper_bounds,flatchain,flatll
elif return_figure:
return figure,maxap,lower_bounds,upper_bounds
else:
return maxap,lower_bounds,upper_bounds
def chain_plot(chain_filename,burnin_fraction=0.1,labels=None,truths=None):
"""
Make a plot of the chains and logprobability.
Parameters:
- chain_filename: the name of the emcee .h5 file that contains the
chain
- burnin_fraction: the fraction of the posterior samples to be
discarded as burnin
- labels:
- truths:
"""
# open chain file
reader = emcee.backends.HDFBackend(chain_filename,read_only=True)
# extract chain and loglike
chain = reader.get_chain(flat=False)
ll = reader.get_log_prob(flat=False)
ndim = chain.shape[-1]
if labels is None:
labels = ['' for i in range(ndim)]
for i in range(ndim):
plt.subplot(ndim+1,1,i+1)
for j in range(chain.shape[1]):
plt.plot(np.arange(chain.shape[0]),chain[:,j,i],alpha=0.5)
plt.ylabel(labels[i])
plt.tick_params(which='both',direction='in',top=True,right=True,labelbottom=False)
plt.xlim([0,chain.shape[0]])
if truths is not None:
plt.axhline(y=truths[i],ls='--',color='k',zorder=100)
plt.subplot(ndim+1,1,ndim+1)
plt.plot(np.arange(chain.shape[0]),ll,alpha=0.5)
plt.ylabel('logprob')
plt.xlabel('step')
plt.xlim([0,chain.shape[0]])
plt.tick_params(which='both',direction='in',top=True,right=True,labelbottom=True)
plt.subplots_adjust(hspace=0.075)
plt.show()
|
omsharansalafiaREPO_NAMEgrbpopPATH_START.@grbpop_extracted@grbpop-main@inspect_fit_results.py@.PATH_END.py
|
{
"filename": "test_detector_db.py",
"repo_name": "nu-radio/NuRadioMC",
"repo_path": "NuRadioMC_extracted/NuRadioMC-master/NuRadioReco/detector/test_detector_db.py",
"type": "Python"
}
|
import NuRadioReco.detector.detector
import argparse
import datetime
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='test DB detector description')
parser.add_argument('station_number', type=int,
help='the station number')
parser.add_argument("--time", dest='timestamp', type=str, default=None,
help='the time to evaluate the DB')
args = parser.parse_args()
det = NuRadioReco.detector.detector.Detector()
if args.timestamp is not None:
print("update detector to time {}".format(args.timestamp))
print(datetime.datetime.strptime(args.timestamp, "%Y-%m-%d"))
det.update(datetime.datetime.strptime(args.timestamp, "%Y-%m-%d"))
# result = det.get_everything(args.station_number)
result = det.get_relative_positions(args.station_number)
print(result)
|
nu-radioREPO_NAMENuRadioMCPATH_START.@NuRadioMC_extracted@NuRadioMC-master@NuRadioReco@detector@test_detector_db.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/volume/slices/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._z import ZValidator
from ._y import YValidator
from ._x import XValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__, [], ["._z.ZValidator", "._y.YValidator", "._x.XValidator"]
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@volume@slices@__init__.py@.PATH_END.py
|
{
"filename": "_font.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/pie/hoverlabel/_font.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "pie.hoverlabel"
_path_str = "pie.hoverlabel.font"
_valid_props = {"color", "colorsrc", "family", "familysrc", "size", "sizesrc"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for family .
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
family .
size
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Font object
Sets the font used in hover labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.pie.hoverlabel.Font`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
family .
size
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.pie.hoverlabel.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.pie.hoverlabel.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("familysrc", None)
_v = familysrc if familysrc is not None else _v
if _v is not None:
self["familysrc"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("sizesrc", None)
_v = sizesrc if sizesrc is not None else _v
if _v is not None:
self["sizesrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@pie@hoverlabel@_font.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "microsoft/vscode",
"repo_path": "vscode_extracted/vscode-main/.devcontainer/README.md",
"type": "Markdown"
}
|
# Code - OSS Development Container
[](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/microsoft/vscode)
This repository includes configuration for a development container for working with Code - OSS in a local container or using [GitHub Codespaces](https://github.com/features/codespaces).
> **Tip:** The default VNC password is `vscode`. The VNC server runs on port `5901` and a web client is available on port `6080`.
## Quick start - local
If you already have VS Code and Docker installed, you can click the badge above or [here](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/microsoft/vscode) to get started. Clicking these links will cause VS Code to automatically install the Dev Containers extension if needed, clone the source code into a container volume, and spin up a dev container for use.
1. Install Docker Desktop or Docker for Linux on your local machine. (See [docs](https://aka.ms/vscode-remote/containers/getting-started) for additional details.)
2. **Important**: Docker needs at least **4 Cores and 8 GB of RAM** to run a full build with **9 GB of RAM** being recommended. If you are on macOS, or are using the old Hyper-V engine for Windows, update these values for Docker Desktop by right-clicking on the Docker status bar item and going to **Preferences/Settings > Resources > Advanced**.
> **Note:** The [Resource Monitor](https://marketplace.visualstudio.com/items?itemName=mutantdino.resourcemonitor) extension is included in the container so you can keep an eye on CPU/Memory in the status bar.
3. Install [Visual Studio Code Stable](https://code.visualstudio.com/) or [Insiders](https://code.visualstudio.com/insiders/) and the [Dev Containers](https://aka.ms/vscode-remote/download/containers) extension.

> **Note:** The Dev Containers extension requires the Visual Studio Code distribution of Code - OSS. See the [FAQ](https://aka.ms/vscode-remote/faq/license) for details.
4. Press <kbd>Ctrl/Cmd</kbd> + <kbd>Shift</kbd> + <kbd>P</kbd> or <kbd>F1</kbd> and select **Dev Containers: Clone Repository in Container Volume...**.
> **Tip:** While you can use your local source tree instead, operations like `npm i` can be slow on macOS or when using the Hyper-V engine on Windows. We recommend using the WSL filesystem on Windows or the "clone repository in container" approach on Windows and macOS instead since it uses "named volume" rather than the local filesystem.
5. Type `https://github.com/microsoft/vscode` (or a branch or PR URL) in the input box and press <kbd>Enter</kbd>.
6. After the container is running:
1. If you have the `DISPLAY` or `WAYLAND_DISPLAY` environment variables set locally (or in WSL on Windows), desktop apps in the container will be shown in local windows.
2. If these are not set, open a web browser and go to [http://localhost:6080](http://localhost:6080), or use a [VNC Viewer][def] to connect to `localhost:5901` and enter `vscode` as the password. Anything you start in VS Code, or the integrated terminal, will appear here.
Next: **[Try it out!](#try-it)**
## Quick start - GitHub Codespaces
1. From the [microsoft/vscode GitHub repository](https://github.com/microsoft/vscode), click on the **Code** dropdown, select **Open with Codespaces**, and then click on **New codespace**. If prompted, select the **Standard** machine size (which is also the default).
> **Note:** You will not see these options within GitHub if you are not in the Codespaces beta.
2. After the codespace is up and running in your browser, press <kbd>Ctrl/Cmd</kbd> + <kbd>Shift</kbd> + <kbd>P</kbd> or <kbd>F1</kbd> and select **Ports: Focus on Ports View**.
3. You should see **VNC web client (6080)** under in the list of ports. Select the line and click on the globe icon to open it in a browser tab.
> **Tip:** If you do not see the port, <kbd>Ctrl/Cmd</kbd> + <kbd>Shift</kbd> + <kbd>P</kbd> or <kbd>F1</kbd>, select **Forward a Port** and enter port `6080`.
4. In the new tab, you should see noVNC. Click **Connect** and enter `vscode` as the password.
Anything you start in VS Code, or the integrated terminal, will appear here.
Next: **[Try it out!](#try-it)**
### Using VS Code with GitHub Codespaces
You may see improved VNC responsiveness when accessing a codespace from VS Code client since you can use a [VNC Viewer][def]. Here's how to do it.
1. Install [Visual Studio Code Stable](https://code.visualstudio.com/) or [Insiders](https://code.visualstudio.com/insiders/) and the the [GitHub Codespaces extension](https://marketplace.visualstudio.com/items?itemName=GitHub.codespaces).
> **Note:** The GitHub Codespaces extension requires the Visual Studio Code distribution of Code - OSS.
2. After the VS Code is up and running, press <kbd>Ctrl/Cmd</kbd> + <kbd>Shift</kbd> + <kbd>P</kbd> or <kbd>F1</kbd>, choose **Codespaces: Create New Codespace**, and use the following settings:
- `microsoft/vscode` for the repository.
- Select any branch (e.g. **main**) - you can select a different one later.
- Choose **Standard** (4-core, 8GB) as the size.
3. After you have connected to the codespace, you can use a [VNC Viewer][def] to connect to `localhost:5901` and enter `vscode` as the password.
> **Tip:** You may also need change your VNC client's **Picture Quality** setting to **High** to get a full color desktop.
4. Anything you start in VS Code, or the integrated terminal, will appear here.
Next: **[Try it out!](#try-it)**
## Try it
This container uses the [Fluxbox](http://fluxbox.org/) window manager to keep things lean. **Right-click on the desktop** to see menu options. It works with GNOME and GTK applications, so other tools can be installed if needed.
> **Note:** You can also set the resolution from the command line by typing `set-resolution`.
To start working with Code - OSS, follow these steps:
1. In your local VS Code client, open a terminal (<kbd>Ctrl/Cmd</kbd> + <kbd>Shift</kbd> + <kbd>\`</kbd>) and type the following commands:
```bash
npm i
bash scripts/code.sh
```
2. After the build is complete, open a web browser or a [VNC Viewer][def] to connect to the desktop environment as described in the quick start and enter `vscode` as the password.
3. You should now see Code - OSS!
Next, let's try debugging.
1. Shut down Code - OSS by clicking the box in the upper right corner of the Code - OSS window through your browser or VNC viewer.
2. Go to your local VS Code client, and use the **Run / Debug** view to launch the **VS Code** configuration. (Typically the default, so you can likely just press <kbd>F5</kbd>).
> **Note:** If launching times out, you can increase the value of `timeout` in the "VS Code", "Attach Main Process", "Attach Extension Host", and "Attach to Shared Process" configurations in [launch.json](../../.vscode/launch.json). However, running `./scripts/code.sh` first will set up Electron which will usually solve timeout issues.
3. After a bit, Code - OSS will appear with the debugger attached!
Enjoy!
### Notes
The container comes with VS Code Insiders installed. To run it from an Integrated Terminal use `VSCODE_IPC_HOOK_CLI= /usr/bin/code-insiders .`.
[def]: https://www.realvnc.com/en/connect/download/viewer/
|
microsoftREPO_NAMEvscodePATH_START.@vscode_extracted@vscode-main@.devcontainer@README.md@.PATH_END.py
|
{
"filename": "_colorscale.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/barpolar/marker/_colorscale.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorscaleValidator(_plotly_utils.basevalidators.ColorscaleValidator):
def __init__(
self, plotly_name="colorscale", parent_name="barpolar.marker", **kwargs
):
super(ColorscaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {"autocolorscale": False}),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@barpolar@marker@_colorscale.py@.PATH_END.py
|
{
"filename": "test_customdetrend.py",
"repo_name": "ekaterinailin/AltaiPony",
"repo_path": "AltaiPony_extracted/AltaiPony-master/altaipony/tests/test_customdetrend.py",
"type": "Python"
}
|
import pytest
import numpy as np
import pandas as pd
from ..flarelc import FlareLightCurve
from ..customdetrend import (custom_detrending,
estimate_detrended_noise,
remove_exponential_fringes,
remove_sines_iteratively,
measure_flare)
from ..altai import find_iterative_median
cases = [(.05, 0.005, 1.5, 24.4, 1.5, 0.1),
(.1, 0.005, 1.5, 14.4, 1.5, 0.5),
(.1, 0.05, 1.5, 8, 1.5, 0.5),
(.01, .1, .5, 1, -.5, 1.5),
(.3, .05, .5, 30, -.5, 0.25),
(.01, .1, .5, 2.2, -.1, .1),
]
@pytest.mark.parametrize("a1,a2,period1,period2,quad,cube", cases)
def test_custom_detrending(a1, a2, period1, period2, quad, cube,):
# fix uncertainty
errorval = 15.
np.random.seed(40)
lc = generate_lightcurve(errorval, a1, a2, period1, period2, quad, cube)
# lc.plot()
flcc = custom_detrending(lc)
flccc = estimate_detrended_noise(flcc, mask_pos_outliers_sigma=2.5,
std_window=100)
flccc = find_iterative_median(flccc)
flares = flccc.find_flares(addtail=True).flares
print(flares.ed_rec)
print(flares.ampl_rec)
# check that uncertainty is
assert np.nanmedian(flccc.detrended_flux_err) == pytest.approx(errorval, abs=2)
compare = pd.DataFrame({'istart': {0: 5280, 1: 13160, 2: 23160},
'istop': {0: 5346, 1: 13163, 2: 23175}})
assert (flares[["istart"]] == compare[["istart"]]).all().all()
edrec = np.array([802.25, 4.7907, 40.325])
amplrec = np.array([0.28757, 0.03004, 0.064365])
for i in range(3):
assert [float(flares[["istop"]].values[i])] == pytest.approx([float(compare[["istop"]].values[i])],
rel=.09)
for i in range(3):
assert [flares.ed_rec.values[i]] == pytest.approx([edrec[i]], rel=0.2)
for i in range(3):
assert flares.ampl_rec.values[i] == pytest.approx(amplrec[i], rel=0.25)
return
cases = [(0.1, 3., 0.1, 5.),
(0.1, 3., 0.1, 10.),
(0.1, 8., 0.1, 10.),
(0.05, 4., 0.1, 10.),
(0.05, 4., 0.1, 3.),]
@pytest.mark.parametrize("a,b,c,d", cases)
def test_remove_sines_iteratively(a, b, c, d):
# define light curve with two sinusoidal modulation
x = np.linspace(10, 40, 1200)
y1 = 20. + np.random.normal(0, .01, 1200) + a * np.sin(b * x) + c * np.sin(d * x)
flc = FlareLightCurve(time=x, flux=y1, flux_err=np.full_like(y1, .01),)
flc.detrended_flux = y1
flc.detrended_flux_err = np.full_like(y1, .01)
# find median
flc = find_iterative_median(flc)
# flc.plot()
# apply function
flcd = remove_sines_iteratively(flc)
# plt.plot(flcd.time, flcd.flux)
# plt.plot(flcd.time, flcd.detrended_flux)
# do some checks
assert flcd.detrended_flux.std() == pytest.approx(0.01, rel=1e-1)
assert flcd.detrended_flux.max() < 20.2
assert flcd.detrended_flux.min() > 19.8
cases = [(1., 40.,20.,1.,10.),
(1., 40.,20.,-1.,10.),
(-1., 40.,20.,-1.,10.),
(-1., 40.,20.,1.,10.),
(1., 40.,5.,1.,10.),
]
@pytest.mark.parametrize("a,b,median,c,d", cases)
def test_remove_exponential_fringes(a,b,median,c,d):
# seed numpy random to exclude outliers
np.random.seed(42)
# define light curve with two positive fringes
x = np.linspace(10,40,1200)
y1 = (a*np.exp(-1 * (b - x) * 2) +
median +
c*np.exp((d - x) * 2) +
np.random.normal(0, .0005*median, 1200))
y1[800:810] = median + median * .05 * np.linspace(1,0,10)
# define lightcurve
flc = FlareLightCurve(time=x, flux=y1, flux_err=np.full_like(y1, .0005*median))
flc.detrended_flux = y1
flc.detrended_flux_err = np.full_like(y1, .0005*median)
# get iterative median
flc = find_iterative_median(flc)
# run the function
flcd = remove_exponential_fringes(flc)
# plt.plot(flcd.time, flcd.flux)
# plt.plot(flcd.time, flcd.detrended_flux)
# do some checks
# print(flcd.detrended_flux.std(), flcd.detrended_flux.min(), flcd.detrended_flux.max())
assert flcd.detrended_flux[:799].std() == pytest.approx(.0005*median, rel=1e-1)
assert flcd.detrended_flux.max() == pytest.approx(median * 1.05)
assert flcd.detrended_flux.min() > median * 0.995
def generate_lightcurve(errorval, a1, a2,period1, period2, quad, cube,
mean=3400.):
"""Generate wild light curves with variability on several
timescales.
Returns:
---------
FlareLightCurve with time, flux, and flux_err attributes
"""
time = np.arange(10, 10 + 10 * np.pi,.0008)
# define the flux
flux = (np.random.normal(0,errorval,time.shape[0]) +
mean +
a1*mean*np.sin(period1*time +1.) +
a2*mean*np.sin(period2*time) +
quad*(time-25)**2 -
cube*(time-25)**3)
# add a gap in the data
flux[5600:7720] = np.nan
# add big and long flare
l = 66
flux[5280:5280 + l] = flux[5280:5280 + l] + np.linspace(1000,250,l)
# add tiny flare
l = 3
flux[15280:15280 + l] = flux[15280:15280 + l] + np.linspace(100,60,l)
# add intermediate flare
l, s = 15, 25280
flux[s:s + l] = flux[s:s + l] + np.linspace(200,60,l)
# typically Kepler and TESS underestimate the real noise
err = np.full_like(time,errorval/3*2)
# define FLC
return FlareLightCurve(time=time, flux=flux, flux_err=err)
def test_estimate_detrended_noise():
# setup light curve
time = np.linspace(10,30,200)
# seed numpy to get the same error array
np.random.seed(30)
# define flux with gaussian noise and baseline flux
flux = np.random.normal(0,40, time.shape[0]) + 200.
# define light curve
flc = FlareLightCurve(time=time)
flc.detrended_flux=flux
# this should work
flces = estimate_detrended_noise(flc, mask_pos_outliers_sigma=2.5,
std_window=100)
# error should be similar to input error of 40
np.median(flces.detrended_flux_err.value) == pytest.approx(41.38048677022836)
# re-seed and add a flare
np.random.seed(30)
flux = np.random.normal(0,40, time.shape[0]) + 200.
flux[120:124] = [500,380,300,270]
flc = FlareLightCurve(time=time)
flc.detrended_flux = flux
# should mask flare, error should not grow
flces = estimate_detrended_noise(flc, mask_pos_outliers_sigma=2.5,
std_window=100)
np.median(flces.detrended_flux_err.value) == pytest.approx(41.24232394552432)
# re-seed and add some NaNs
np.random.seed(30)
flux = np.random.normal(0,40, time.shape[0]) + 200.
flux[120:124] = [500,380,300,270]
flux[30:40] = np.nan
flc = FlareLightCurve(time=time)
flc.detrended_flux = flux
# should work regardless
flces = estimate_detrended_noise(flc, mask_pos_outliers_sigma=2.5,
std_window=100)
# error should not change too much
np.median(flces.detrended_flux_err.value) == pytest.approx(41.23144256208637)
def test_measure_flare():
"""Simple test: Generate light curve with flare,
detrend, and manually measure the flare.
"""
# generate LC
flc = generate_lightcurve(15,.01,.03,4,.3,.1,.02)
# de-trend LC
flcc = custom_detrending(flc)
# measure flare
measure_flare(flcc,5280,5280+66)
# get pandas.Series
measured_flare = flcc.flares.iloc[0]
# do checks
assert measured_flare.istart == 5280
assert measured_flare.istop == 5346
assert measured_flare.tstart == pytest.approx(14.224)
assert measured_flare.tstop == pytest.approx(14.276800)
assert measured_flare.ed_rec == pytest.approx((250 + 750 * 0.5) / 3400 * 0.052800 * 24 * 3600, rel=.03)
assert measured_flare.tstop -measured_flare.tstart == measured_flare.dur
|
ekaterinailinREPO_NAMEAltaiPonyPATH_START.@AltaiPony_extracted@AltaiPony-master@altaipony@tests@test_customdetrend.py@.PATH_END.py
|
{
"filename": "getStats.py",
"repo_name": "ACS-Community/ACS",
"repo_path": "ACS_extracted/ACS-master/Benchmark/bulkDataNTPerf/src/getStats.py",
"type": "Python"
}
|
#! /usr/bin/env python
#*******************************************************************************
# ALMA - Atacama Large Millimiter Array
# Copyright (c) European Southern Observatory, 2016
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
#
# who when what
# -------- -------- ----------------------------------------------
# acaproni 2016-03-13 created
#
'''
Elaborates statistics fro the files passed in the command line.
The files are those generated bybdntSenderSimulator
'''
import sys
def getTransferRatesPerStream(fileName):
'''
Scan the passed file looking for the transfer rate of all the stream
@return: A dictionary with key the name of the stream and
and value a vector of data rates
'''
# Lines read from the file
linesRead=0
# Lines containing data
linesSelected=0
statsForStream={}
with open(fileName) as f:
for line in f:
linesRead = linesRead +1
if line.strip().startswith("Transfer rate for"):
linesSelected = linesSelected +1
parts=line.split()
flowName=parts[3].strip()[:-1]
dataRate=parts[4].split("M")[0].strip()
values=[]
if statsForStream.has_key(flowName):
values=statsForStream[flowName]
values.append(float(dataRate))
statsForStream[flowName]=values
print "Checked",linesRead,"in",fileName, "of which",linesSelected,"containing useful data"
print "Found",len(statsForStream),"streams"
for key in statsForStream.keys():
print "\t",len(statsForStream[flowName]),"data samples for stream",key
return statsForStream
def elaborateStatsForFile(fileName,stats):
''' Elaborates and prints the stats for each stream of a file
@return a dictiornary with the values of the calculated stats
'''
statsToReturn = []
# Iterate over each stream
for key in stats.keys():
statsForStream = {'Name':key, 'FileName':fileName}
values=stats[key]
values.sort()
statsForStream['Min']=values[0]
statsForStream['Max']=values[len(values)-1]
# Cal the average
avg=0
for val in values:
avg=avg+val
statsForStream['Avg']=avg/len(values)
statsToReturn.append(statsForStream)
return statsToReturn
if __name__ == "__main__":
print "Elaborating stats from:", sys.argv[1:len(sys.argv)]
# the statistics read from each file by getTransferRatesPerStream(...)
statsForFile={}
for fileName in sys.argv[1:len(sys.argv)]:
statsForFile[fileName]=getTransferRatesPerStream(fileName)
#print "statsForFile",statsForFile
# Elaborate and prints the stats from each file
# allStats is a vector containing one entry for each file
allStats=[]
for key in statsForFile.keys():
print "Calculating stats from",key
allStats.append(elaborateStatsForFile(key,statsForFile[key]))
#print "allStats",allStats
# Print the statistics of each flow of each file
print
print "==============================================================="
totMin=0
totAvg=0
totMax=0
totEntries=0
for fileStat in allStats:
for flowStat in fileStat:
print "From file",flowStat['FileName']+":","Flow",flowStat['Name'],": min=",flowStat['Min'],"avg=",flowStat['Avg'], "max=",flowStat['Max']
totMin = totMin + flowStat['Min']
totAvg = totAvg + flowStat['Avg']
totMax = totMax + flowStat['Max']
totEntries = totEntries +1
print
print "Summary: min=",totMin/totEntries,"avg=",totAvg/totEntries, "max=",totMax/totEntries
print "==============================================================="
print
print
#
# ___oOo___
|
ACS-CommunityREPO_NAMEACSPATH_START.@ACS_extracted@ACS-master@Benchmark@bulkDataNTPerf@src@getStats.py@.PATH_END.py
|
{
"filename": "_side.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/histogram2dcontour/colorbar/title/_side.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SideValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="side",
parent_name="histogram2dcontour.colorbar.title",
**kwargs,
):
super(SideValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["right", "top", "bottom"]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@histogram2dcontour@colorbar@title@_side.py@.PATH_END.py
|
{
"filename": "basic_walkthrough.py",
"repo_name": "dmlc/xgboost",
"repo_path": "xgboost_extracted/xgboost-master/demo/guide-python/basic_walkthrough.py",
"type": "Python"
}
|
"""
Getting started with XGBoost
============================
This is a simple example of using the native XGBoost interface, there are other
interfaces in the Python package like scikit-learn interface and Dask interface.
See :doc:`/python/python_intro` and :doc:`/tutorials/index` for other references.
"""
import os
import pickle
import numpy as np
from sklearn.datasets import load_svmlight_file
import xgboost as xgb
# Make sure the demo knows where to load the data.
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
XGBOOST_ROOT_DIR = os.path.dirname(os.path.dirname(CURRENT_DIR))
DEMO_DIR = os.path.join(XGBOOST_ROOT_DIR, "demo")
# X is a scipy csr matrix, XGBoost supports many other input types,
X, y = load_svmlight_file(os.path.join(DEMO_DIR, "data", "agaricus.txt.train"))
dtrain = xgb.DMatrix(X, y)
# validation set
X_test, y_test = load_svmlight_file(os.path.join(DEMO_DIR, "data", "agaricus.txt.test"))
dtest = xgb.DMatrix(X_test, y_test)
# specify parameters via map, definition are same as c++ version
param = {"max_depth": 2, "eta": 1, "objective": "binary:logistic"}
# specify validations set to watch performance
watchlist = [(dtest, "eval"), (dtrain, "train")]
# number of boosting rounds
num_round = 2
bst = xgb.train(param, dtrain, num_boost_round=num_round, evals=watchlist)
# run prediction
preds = bst.predict(dtest)
labels = dtest.get_label()
print(
"error=%f"
% (
sum(1 for i in range(len(preds)) if int(preds[i] > 0.5) != labels[i])
/ float(len(preds))
)
)
bst.save_model("model-0.json")
# dump model
bst.dump_model("dump.raw.txt")
# dump model with feature map
bst.dump_model("dump.nice.txt", os.path.join(DEMO_DIR, "data/featmap.txt"))
# save dmatrix into binary buffer
dtest.save_binary("dtest.dmatrix")
# save model
bst.save_model("model-1.json")
# load model and data in
bst2 = xgb.Booster(model_file="model-1.json")
dtest2 = xgb.DMatrix("dtest.dmatrix")
preds2 = bst2.predict(dtest2)
# assert they are the same
assert np.sum(np.abs(preds2 - preds)) == 0
# alternatively, you can pickle the booster
pks = pickle.dumps(bst2)
# load model and data in
bst3 = pickle.loads(pks)
preds3 = bst3.predict(dtest2)
# assert they are the same
assert np.sum(np.abs(preds3 - preds)) == 0
|
dmlcREPO_NAMExgboostPATH_START.@xgboost_extracted@xgboost-master@demo@guide-python@basic_walkthrough.py@.PATH_END.py
|
{
"filename": "sensitivity_analysis.py",
"repo_name": "sbi-dev/sbi",
"repo_path": "sbi_extracted/sbi-main/sbi/analysis/sensitivity_analysis.py",
"type": "Python"
}
|
# This file is part of sbi, a toolkit for simulation-based inference. sbi is licensed
# under the Apache License Version 2.0, see <https://www.apache.org/licenses/>
import logging
from copy import deepcopy
from typing import Any, Callable, Optional, Tuple, Union
from warnings import warn
import torch
from pyknos.nflows.nn import nets
from torch import Tensor, nn, relu
from torch.nn import MSELoss
from torch.nn.utils.clip_grad import clip_grad_norm_
from torch.optim.adam import Adam
from torch.utils import data
from torch.utils.data.sampler import SubsetRandomSampler
from sbi.utils.sbiutils import handle_invalid_x, standardizing_net
class Destandardize(nn.Module):
def __init__(self, mean: Union[Tensor, float], std: Union[Tensor, float]):
super(Destandardize, self).__init__()
mean, std = map(torch.as_tensor, (mean, std))
self.mean = mean
self.std = std
self.register_buffer("_mean", mean)
self.register_buffer("_std", std)
def forward(self, tensor):
return tensor * self._std + self._mean
def destandardizing_net(batch_t: Tensor, min_std: float = 1e-7) -> nn.Module:
"""Net that de-standardizes the output so the NN can learn the standardized target.
Args:
batch_t: Batched tensor from which mean and std deviation (across
first dimension) are computed.
min_std: Minimum value of the standard deviation to use when z-scoring to
avoid division by zero.
Returns:
Neural network module for z-scoring
"""
is_valid_t, *_ = handle_invalid_x(batch_t, True)
t_mean = torch.mean(batch_t[is_valid_t], dim=0)
if len(batch_t > 1):
t_std = torch.std(batch_t[is_valid_t], dim=0)
t_std[t_std < min_std] = min_std
else:
t_std = 1
logging.warning(
"""Using a one-dimensional batch will instantiate a Standardize transform
with (mean, std) parameters which are not representative of the data. We
allow this behavior because you might be loading a pre-trained. If this is
not the case, please be sure to use a larger batch."""
)
return Destandardize(t_mean, t_std)
def build_input_output_layer(
batch_theta: Tensor,
batch_property: Tensor,
z_score_theta: bool = True,
z_score_property: bool = True,
embedding_net_theta: nn.Module = nn.Identity(),
) -> Tuple[nn.Module, nn.Module]:
r"""Builds input layer for the `ActiveSubspace` that optionally z-scores.
The regression network used in the `ActiveSubspace` will receive batches of
$\theta$s and properties.
Args:
batch_theta: Batch of $\theta$s, used to infer dimensionality and (optional)
z-scoring.
batch_property: Batch of properties, used for (optional) z-scoring.
z_score_theta: Whether to z-score $\theta$s passing into the network.
z_score_property: Whether to z-score properties passing into the network.
embedding_net_theta: Optional embedding network for $\theta$s.
Returns:
Input layer that optionally z-scores.
"""
if z_score_theta:
input_layer = nn.Sequential(standardizing_net(batch_theta), embedding_net_theta)
else:
input_layer = embedding_net_theta
if z_score_property:
output_layer = destandardizing_net(batch_property)
else:
output_layer = nn.Identity()
return input_layer, output_layer
class ActiveSubspace:
def __init__(self, posterior: Any):
"""Identify the active subspace for sensitivity analyses.
- Introduction to active subspaces: Constantine et al. 2015.
- Application to analyse the sensitivity in neuroscience models:
Deistler et al. 2021, in preparation.
Args:
posterior: Posterior distribution obtained with `SNPE`, `SNLE`, or `SNRE`.
Needs to have a `.sample()` method. If we want to analyse the
sensitivity of the posterior probability, it also must have a
`.potential()` method.
"""
self._posterior = posterior
self._regression_net = None
self._theta = None
self._emergent_property = None
self._device = posterior._device
self._validation_log_probs = []
def add_property(
self,
theta: Tensor,
emergent_property: Tensor,
model: Union[str, Callable] = "resnet",
hidden_features: int = 100,
num_blocks: int = 2,
dropout_probability: float = 0.5,
z_score_theta: bool = True,
z_score_property: bool = True,
embedding_net: nn.Module = nn.Identity(),
) -> "ActiveSubspace":
r"""Add a property whose sensitivity is to be analysed.
To analyse the sensitivity of an emergent property, we train a neural network
to predict the property from the parameter set $\theta$. The hyperparameters of
this neural network also have to be specified here.
Args:
theta: Parameter sets $\theta$ sampled from the posterior.
emergent_property: Tensor containing the values of the property given each
parameter set $\theta$.
model: Neural network used to distinguish valid from bad samples. If it is
a string, use a pre-configured network of the provided type (either
mlp or resnet). Alternatively, a function that builds a custom
neural network can be provided. The function will be called with the
first batch of parameters (theta,), which can thus be used for shape
inference and potentially for z-scoring. It needs to return a PyTorch
`nn.Module` implementing the classifier.
hidden_features: Number of hidden units of the classifier if `model` is a
string.
num_blocks: Number of hidden layers of the classifier if `model` is a
string.
dropout_probability: Dropout probability of the classifier if `model` is
`resnet`.
z_score_theta: Whether to z-score the parameters $\theta$ used to train the
classifier.
z_score_property: Whether to z-score the property used to train the
classifier.
embedding_net: Neural network used to encode the parameters before they are
passed to the classifier.
Returns:
`ActiveSubspace` to make the call chainable.
"""
assert emergent_property.shape == (
theta.shape[0],
1,
), "The `emergent_property` must have shape (N, 1)."
self._theta = theta
self._emergent_property = emergent_property
def build_resnet(theta):
classifier = nets.ResidualNet(
in_features=theta.shape[1],
out_features=1,
hidden_features=hidden_features,
context_features=None,
num_blocks=num_blocks,
activation=relu,
dropout_probability=dropout_probability,
use_batch_norm=True,
)
input_layer, output_layer = build_input_output_layer(
theta,
emergent_property,
z_score_theta,
z_score_property,
embedding_net,
)
classifier = nn.Sequential(input_layer, classifier, output_layer)
return classifier
def build_mlp(theta):
classifier = nn.Sequential(
nn.Linear(theta.shape[1], hidden_features),
nn.BatchNorm1d(hidden_features),
nn.ReLU(),
nn.Linear(hidden_features, hidden_features),
nn.BatchNorm1d(hidden_features),
nn.ReLU(),
nn.Linear(hidden_features, 1),
)
input_layer, output_layer = build_input_output_layer(
theta,
emergent_property,
z_score_theta,
z_score_property,
embedding_net,
)
classifier = nn.Sequential(input_layer, classifier, output_layer)
return classifier
if isinstance(model, str):
if model == "resnet":
self._build_nn = build_resnet
elif model == "mlp":
self._build_nn = build_mlp
else:
raise NameError
else:
self._build_nn = model
return self
def train(
self,
training_batch_size: int = 200,
learning_rate: float = 5e-4,
validation_fraction: float = 0.1,
stop_after_epochs: int = 20,
max_num_epochs: int = 2**31 - 1,
clip_max_norm: Optional[float] = 5.0,
) -> nn.Module:
r"""Train a regression network to predict the specified property from $\theta$.
Args:
training_batch_size: Training batch size.
learning_rate: Learning rate for Adam optimizer.
validation_fraction: The fraction of data to use for validation.
stop_after_epochs: The number of epochs to wait for improvement on the
validation set before terminating training.
max_num_epochs: Maximum number of epochs to run. If reached, we stop
training even when the validation loss is still decreasing. Otherwise,
we train until validation loss increases (see also `stop_after_epochs`).
clip_max_norm: Value at which to clip the total gradient norm in order to
prevent exploding gradients. Use `None` for no clipping.
"""
assert (
self._theta is not None and self._emergent_property is not None
), "You must call .add_property() first."
# Get indices for permutation of the data.
num_examples = len(self._theta)
permuted_indices = torch.randperm(num_examples)
num_training_examples = int((1 - validation_fraction) * num_examples)
num_validation_examples = num_examples - num_training_examples
train_indices, val_indices = (
permuted_indices[:num_training_examples],
permuted_indices[num_training_examples:],
)
# Dataset is shared for training and validation loaders.
dataset = data.TensorDataset(self._theta, self._emergent_property)
# Create neural_net and validation loaders using a subset sampler.
train_loader = data.DataLoader(
dataset,
batch_size=training_batch_size,
drop_last=True,
sampler=SubsetRandomSampler(train_indices.tolist()),
)
val_loader = data.DataLoader(
dataset,
batch_size=min(training_batch_size, num_examples - num_training_examples),
shuffle=False,
drop_last=True,
sampler=SubsetRandomSampler(val_indices.tolist()),
)
if self._regression_net is None:
self._regression_net = self._build_nn(self._theta[train_indices]).to(
self._device
)
optimizer = Adam(
list(self._regression_net.parameters()),
lr=learning_rate,
)
max_num_epochs = 2**31 - 1 if max_num_epochs is None else max_num_epochs
# criterion / loss
criterion = MSELoss()
epoch, self._val_log_prob = 0, float("-Inf")
while epoch <= max_num_epochs and not self._converged(epoch, stop_after_epochs):
self._regression_net.train()
for parameters, observations in train_loader:
optimizer.zero_grad()
outputs = self._regression_net(parameters.to(self._device))
loss = criterion(outputs, observations.to(self._device))
loss.backward()
if clip_max_norm is not None:
clip_grad_norm_(
self._regression_net.parameters(),
max_norm=clip_max_norm,
)
optimizer.step()
epoch += 1
# calculate validation performance
self._regression_net.eval()
val_loss = 0.0
with torch.no_grad():
for parameters, observations in val_loader:
outputs = self._regression_net(parameters.to(self._device))
loss = criterion(outputs, observations.to(self._device))
val_loss += loss.item()
self._val_log_prob = -val_loss / num_validation_examples
self._validation_log_probs.append(self._val_log_prob)
print("\r", "Training neural network. Epochs trained: ", epoch, end="")
return deepcopy(self._regression_net)
def _converged(self, epoch: int, stop_after_epochs: int) -> bool:
r"""Return whether the training converged yet and save best model state so far.
Checks for improvement in validation performance over previous epochs.
Args:
epoch: Current epoch in training.
stop_after_epochs: How many fruitless epochs to let pass before stopping.
Returns:
Whether the training has stopped improving, i.e. has converged.
"""
converged = False
assert self._regression_net is not None
posterior_nn = self._regression_net
# (Re)-start the epoch count with the first epoch or any improvement.
if epoch == 0 or self._val_log_prob > self._best_val_log_prob:
self._best_val_log_prob = self._val_log_prob
self._epochs_since_last_improvement = 0
self._best_model_state_dict = deepcopy(posterior_nn.state_dict())
else:
self._epochs_since_last_improvement += 1
# If no validation improvement over many epochs, stop training.
if self._epochs_since_last_improvement > stop_after_epochs - 1:
posterior_nn.load_state_dict(self._best_model_state_dict)
converged = True
return converged
def find_directions(
self,
posterior_log_prob_as_property: bool = False,
norm_gradients_to_prior: bool = True,
num_monte_carlo_samples: int = 1000,
) -> Tuple[Tensor, Tensor]:
r"""Return eigenvectors and values corresponding to directions of sensitivity.
The directions of sensitivity are the directions along which a specific
property changes in the fastest way. They will have the largest eigenvalues.
This computes the matrix:
$\mathbf{M} = \mathbb{E}_{p(\theta|x_o)}[\nabla_{\theta} f(\theta)^T
\nabla_{\theta}
f(\theta)]$
where $f(\cdot)$ is the trained regression network. The expected value is
approximated with a Monte-Carlo mean. Next, do an eigenvalue
decomposition of the matrix $\mathbf{M}$:
$\mathbf{M} = \mathbf{Q} \mathbf{\Lambda} \mathbf{Q}^{-1}$
We then return the eigenvectors and eigenvalues found by this decomposition.
Eigenvectors with large eigenvalues are directions along which the property is
sensitive to changes in the parameters $\theta$ (`active` directions).
Increases along these directions will increase the value of the property.
Args:
posterior_log_prob_as_property: Whether to use the posterior
log-probability the key property whose sensitivity is analysed. If
`False`, one must have specified an emergent property and trained a
regression network using `.add_property().train()`. If `True`,
any previously specified property is ignored.
norm_gradients_to_prior: Whether to normalize each entry of the gradient
by the standard deviation of the prior in each dimension. If set to
`False`, the directions with the strongest eigenvalues might correspond
to directions in which the prior is broad.
num_monte_carlo_samples: Number of Monte Carlo samples that the average is
based on. A larger value will make the results more accurate while
requiring more compute time.
Returns:
Eigenvectors and corresponding eigenvalues. They are sorted in ascending
order. The column `eigenvectors[:, j]` is the eigenvector corresponding to
the `j`-th eigenvalue.
"""
self._gradients_are_normed = norm_gradients_to_prior
if self._emergent_property is None and not posterior_log_prob_as_property:
raise ValueError(
"You have not yet passed an emergent property whose "
"sensitivity you would like to analyse. Please use "
"`.add_emergent_property().train()` to do so. If you want "
"to use all features that had also been used to infer the "
"posterior distribution (i.e. you want to analyse the "
"sensitivity of the posterior probability), use: "
"`.find_active(posterior_log_prob_as_property=True)`."
)
if self._emergent_property is not None and posterior_log_prob_as_property:
warn(
"You specified a property with `.add_property()`, but also set "
"`posterior_log_prob_as_property=True`. The specified property will "
"be ignored.",
stacklevel=2,
)
thetas = self._posterior.sample((num_monte_carlo_samples,))
thetas.requires_grad = True
if posterior_log_prob_as_property:
predictions = self._posterior.potential(thetas, track_gradients=True)
else:
assert (
self._regression_net is not None
), "self._regression_net is None, you must call `.train()` first."
predictions = self._regression_net.forward(thetas)
loss = predictions.mean()
loss.backward()
gradients = torch.squeeze(thetas.grad)
if norm_gradients_to_prior:
if hasattr(self._posterior.prior, "stddev") and hasattr(
self._posterior.prior, "mean"
):
self._prior_mean = self._posterior.prior.mean
self._prior_scale = self._posterior.prior.stddev
else:
prior_samples = self._posterior.prior.sample((10000,))
self._prior_scale = torch.std(prior_samples, dim=0)
self._prior_mean = torch.mean(prior_samples, dim=0)
gradients *= self._prior_scale
outer_products = torch.einsum("bi,bj->bij", (gradients, gradients))
average_outer_product = outer_products.mean(dim=0)
eigen_values, eigen_vectors = torch.linalg.eigh(average_outer_product, UPLO="U")
# Identify the direction of the eigenvectors. Above, we have computed an outer
# product m*mT=A. Note that the same matrix A can be constructed with the
# negative vector (-m)(-mT)=A. Thus, when performing an eigen-decomposition of
# A, we can not determine if the eigenvector was -m or m. We solve this issue
# below. We use that the average gradient m should be obtained by a mean over
# the eigenvectors (weighted by the eigenvalues).
av_gradient = torch.mean(gradients, dim=0)
av_gradient = av_gradient / torch.norm(av_gradient)
av_eigenvec = torch.mean(eigen_vectors * eigen_values, dim=1)
av_eigenvec = av_eigenvec / torch.norm(av_eigenvec)
# Invert if the negative eigenvectors are closer to the average gradient.
if (torch.mean((av_eigenvec - av_gradient) ** 2)) > (
torch.mean((-av_eigenvec - av_gradient) ** 2)
):
eigen_vectors = -eigen_vectors
self._eigen_vectors = eigen_vectors
return eigen_values, eigen_vectors
def project(self, theta: Tensor, num_dimensions: int) -> Tensor:
r"""Return $\theta$ that were projected into the subspace.
To identify the dimensionality of the active subspace `num_dimensions`,
Constantine et al. 2015 suggest to look at gaps in the eigenvalue spectrum.
Performs a linear projection. Also takes care of normalizing the data. The mean
and standard deviation used for normalizing are the same as used to compute the
eigenvectors and eigenvalues (mean and std of prior).
Args:
theta: Parameter sets to be projected.
num_dimensions: Dimensionality of the subspace into which to project.
Returns:
Projected parameters of shape `(theta.shape[0], num_dimensions)`.
"""
theta = theta.to(self._device)
if self._gradients_are_normed:
theta = (theta - self._prior_mean) / self._prior_scale
projection_mat = self._eigen_vectors[:, -num_dimensions:]
projected_theta = torch.mm(theta, projection_mat)
return projected_theta
|
sbi-devREPO_NAMEsbiPATH_START.@sbi_extracted@sbi-main@sbi@analysis@sensitivity_analysis.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "james-trayford/strauss",
"repo_path": "strauss_extracted/strauss-main/README.md",
"type": "Markdown"
}
|
# STRAUSS
***S**onification **T**ools and **R**esources for **A**nalysis **U**sing **S**ound **S**ynthesis*

## Sonification and STRAUSS
*"Sonification"* is the process of conveying data via the medium of sound. Sonification can be used to make scientific data more accessible to those with visual impairments, enhance visualisations and movies, and even convey information more efficiently than by visual means. The *STRAUSS* python package is intended to make sonification simple for both scientific and outreach applications.
## Getting Started
Access the [full documentation here](https://strauss.readthedocs.io/) *(under construction!)* and read more about the associated [Audio Universe project here](https://www.audiouniverse.org/).
*STRAUSS* is [PyPI hosted package](https://pypi.org/project/strauss/) and can be installed directly via `pip`:
`pip install strauss`
For a standard install (without text-to speech support).
If you would like access to all the resources and explore the code directly, make a copy of the *STRAUSS* repository via SSH,
`git clone git@github.com:james-trayford/strauss.git strauss`
or HTTPS if you don't have [SSH keys set up](https://docs.github.com/en/github/authenticating-to-github/connecting-to-github-with-ssh),
`git clone https://github.com/james-trayford/strauss.git strauss`
and install *STRAUSS* from your local repository using `pip`
`cd strauss`
`pip install .`
For development purposes, you can instead use:
`pip install -e .`
where the `-e` option allows a local install, such that you can modify and run the source code on the fly without needing to reinstall each time.
We recommend using a conda environment to avoid package conflicts. Type
`conda env create -f environment.yml`
before `pip install -e .`
and activate the environment with
`conda activate strauss`
### Installing with text-to-speech support
*STRAUSS* can also be installed with text-to-speech (TTS) support, allowing audio captioning of sonifications and future accessibility features, via the [TTS module](https://github.com/coqui-ai/TTS). Due to the specific module requirements of this module, install can sometimes lead to incompatibilities with other modules and be slower, so is packaged with *STRAUSS* as an optional extra. If you'd like to use these features, its easy to directly from PyPI:
`pip install strauss[TTS]`
or if you're working from a local copy of the repository, as above, use
`pip install -e ".[TTS]"`
with or without the `-e` option depending on whether you want to edit the *STRAUSS* code or not, as above.
## Acknowledgments
*STRAUSS* is developed and provided free and open source, supported by a [UKRI Early Stage Research and Development Award](https://www.ukri.org/opportunity/early-stage-research-and-development-scheme/), grant code ST/X004651/1.
The *STRAUSS* code has also benefited from funding via an [Royal Astronomical Society Education & Outreach grant award](https://ras.ac.uk/awards-and-grants/outreach/education-outreach-small-grants-scheme), providing hardware and software for sound development and spatialisation testing.
|
james-trayfordREPO_NAMEstraussPATH_START.@strauss_extracted@strauss-main@README.md@.PATH_END.py
|
{
"filename": "vesde.py",
"repo_name": "AlexandreAdam/score_models",
"repo_path": "score_models_extracted/score_models-master/score_models/sde/vesde.py",
"type": "Python"
}
|
import torch
from .sde import SDE
from torch import Tensor
import numpy as np
from torch.distributions import Normal, Independent
from score_models.utils import DEVICE
class VESDE(SDE):
def __init__(
self,
sigma_min: float,
sigma_max: float,
T:float=1.0,
epsilon:float=0.0,
**kwargs
):
"""
Variance Exploding stochastic differential equation
Args:
sigma_min (float): The minimum value of the standard deviation of the noise term.
sigma_max (float): The maximum value of the standard deviation of the noise term.
T (float, optional): The time horizon for the VESDE. Defaults to 1.0.
device (str, optional): The device to use for computation. Defaults to DEVICE.
"""
super().__init__(T, epsilon)
self.sigma_min = sigma_min
self.sigma_max = sigma_max
def sigma(self, t: Tensor) -> Tensor:
return self.sigma_min * (self.sigma_max / self.sigma_min) ** (t/self.T)
def prior(self, shape, mu=None, device=DEVICE):
"""
Technically, VESDE does not change the mean of the 0 temperature distribution,
so I give the option to provide for more accuracy. In practice,
sigma_max is chosen large enough to make this choice irrelevant
"""
if mu is None:
mu = torch.zeros(shape).to(device)
else:
assert mu.shape == shape
return Independent(Normal(loc=mu, scale=self.sigma_max, validate_args=False), len(shape))
def marginal_prob_scalars(self, t) -> tuple[Tensor, Tensor]:
return torch.ones_like(t), self.sigma(t)
def diffusion(self, t: Tensor, x: Tensor) -> Tensor:
_, *D = x.shape # broadcast diffusion coefficient to x shape
return self.sigma(t).view(-1, *[1]*len(D)) * np.sqrt(2 * (np.log(self.sigma_max) - np.log(self.sigma_min)))
def drift(self, t: Tensor, x: Tensor) -> Tensor:
return torch.zeros_like(x)
|
AlexandreAdamREPO_NAMEscore_modelsPATH_START.@score_models_extracted@score_models-master@score_models@sde@vesde.py@.PATH_END.py
|
{
"filename": "Installation.ipynb",
"repo_name": "PrincetonUniversity/charis-dep",
"repo_path": "charis-dep_extracted/charis-dep-main/documentation/source/Installation.ipynb",
"type": "Jupyter Notebook"
}
|
# Install the CHARIS Data reduction pipeline
### Requirements
Python 2.7
Cython with a C compiler and OpenMP. C compilers as gcc-5, gcc-mp-5, gcc-mp-6 are acceptable.
Your computer should have at least ~2 GB of RAM to extract data cubes, and at least 2 GB/core (and at least 4 GB total) to build the calibration files. The calibration files can take a long time to generate if you do not have multiple processors.
### Dependencies
numpy, scipy, astropy
### Installation
The easy way to install is to use the setup.py in this directory with
~~~~
python setup.py install
~~~~
I strongly recommend that you first install the anaconda Python 2.7 from here if you are not already using anaconda:
https://www.continuum.io/downloads
If you are running this on a Mac, you need gcc from Xcode, and you probably need a homebrew installation of gcc-5 to enable OpenMP linking. Follow the instructions here:
http://mathcancer.blogspot.com/2016/01/PrepOSXForCoding-Homebrew.html
You may need to specify the C compiler when running the setup script using something like
```
CC=gcc-5 python setup.py install
```
or
```
CC=gcc-mp-5 python setup.py install
```
Type gcc [tab][tab] in a terminal to see your available gcc compilers. If you use tcsh instead of bash, your export command will be different, but something like this should work:
```
set CC = gcc-5
python setup.py install
```
You can check that the installation was successful by typing:
```
buildcal
Must call buildcal with at least three arguments:
1: The path to the narrow-band flatfield image
2: The wavelength, in nm, of the narrow-band image
3: The band/filter: 'J', 'H', 'K', or 'lowres'
Example: buildcal CRSA00000000.fits 1550 lowres
Optional additional arguments: filenames of darks
taken with the same observing setup.
Example: buildcal CRSA00000000.fits 1550 lowres darks/CRSA*.fits
```
## Windows 10 install
These instructions were provided by Zach Long and use the bash functionality within Windows 10. This procedure
1. Download Anaconda 2.7 Linux installer from their website
2. Download and extract CHARIS code
3. Navigate to the appropriate folder (the folder is in mnt/c/ somewhere, c refers to the windows C: drive or whatever the main drive is)
4. Install using "bash Anaconda2-4.3.1-Linux-x86_64.sh"
5. Install pip using "sudo apt-get install python-pip"
6. Do a "sudo pip install cython"
7. If you get an error with gcc exiting with code 1 use this command "sudo apt-get install python-dev"
8. Do another "sudo pip install cython"
9. Do a "conda install numpy" (might have to close and reopen bash for this)
10. Do a "conda install astropy"
11. Do a "pip install -U scipy" (I'm not sure why it needs the upgraded version but it does)
12. Navigate to the charis code folder again in /mnt/c/
13. Run "sudo python setup.py install"
14. Run "export KMP_AFFINITY=disabled" (this is because there is a known bug in scipy for windows bash. You'll know if it's happening if you get the following errors when you try to run buildcal
```
OMP: Error #100: Fatal system error detected.
OMP: System error #22: Invalid argument
Aborted (core dumped)
```
As far as I can tell you have to do it after you open a new bash terminal)
15. Done, it should work at this point, it worked for me at least. If something doesn't work it's usually something to do with scipy so you many need to run a sudo "pip install --upgrade pip"
16. Occasionally I have had to do a "alias sudo='sudo env PATH=$PATH'" if Cython decides not work because it's using the wrong python, I'm assuming the one that came preinstalled
# Troubleshooting
Most of the installation issues on Macs that have been encountered so far come from slight differences in the versions and the installations of python and gcc. We recommend following the steps above thoroughly. Here are sharing some of the issues and potential solutions.
### Install gcc through Macports
Macports is a good alternative to download and install the correct version of gcc. First, make sure that the latest package definitions are updated with "selfupdate", and then download and install the latest version of gcc5.
~~~
sudo port selfupdate
sudo port install gcc5
~~~
This should have installed the latest version of gcc5. You can check what versions of gcc are available by typing gcc [tab][tab]. For example on a 2017 MacBook Pro with xcode installed, these are the programs that are installed:
~~~
gcc gcc-mp-6 gcc-ranlib-mp-6
gcc-ar-mp-6 gcc-nm-mp-6 gccmakedep
~~~
One would then run
~~~
CC=gcc-mp-6 python setup.py install
~~~
### Install the CHARIS DRP with AstroConda (or if you regular conda installation uses Python 3.xx)
In order to not disturb your installation of Python 3.xx (with which the pipeline is not yet compatible), we can create a separate, isolated Python environment with conda. You can find some instructions on how to do this for example at https://uoa-eresearch.github.io/eresearch-cookbook/recipe/2014/11/20/conda/. Below are the key elements:
~~~
conda create -n py27 python=2.7 anaconda
~~~
And once the installation is complete, you can activate your environment
~~~
source activate py27
~~~
To jump back out of the environment to your regular environment without having to close your terminal, you can type:
~~~
source deactivate py27
~~~
|
PrincetonUniversityREPO_NAMEcharis-depPATH_START.@charis-dep_extracted@charis-dep-main@documentation@source@Installation.ipynb@.PATH_END.py
|
{
"filename": "sigproc.py",
"repo_name": "fjankowsk/scatfit",
"repo_path": "scatfit_extracted/scatfit-master/scatfit/sigproc.py",
"type": "Python"
}
|
#
# SIGPROC filterbank functions.
# 2022 - 2024 Fabian Jankowski
#
from mtcutils import Candidate
import numpy as np
def load_frb_data(filename, dm, fscrunch, tscrunch, norfi):
"""
Load the FRB data from a SIGPROC filterbank file.
Parameters
----------
filname: str
The filterbank file to load.
dm: float
The dispersion measure to use to dedisperse the data.
fscrunch: int
The number of frequency channels to sum.
tscrunch: int
The number of time samples to sum.
norfi: bool
Do not perform RFI excision.
Returns
-------
cand: ~mtcutils.Candidate
The candidate FRB data.
"""
cand = Candidate.from_filterbank(filename)
cand.normalise()
# XXX: move the rfi excision methods outside the specific data loader
if not norfi:
# calculates and applies both IQRM and ACC1 masks
mask = cand.apply_chanmask()
print(
"Channels masked based on stddev (via IQRM) and acc1: {} / {} ({:.2%})".format(
mask.sum(), cand.nchans, mask.sum() / cand.nchans
)
)
# z-dot filter
print("Applying z-dot filter")
cand.zdot()
# dedisperse
cand.set_dm(dm)
dynspec = cand.scrunched_data(f=fscrunch, t=tscrunch, select="left") / fscrunch**0.5
cand.dynspec = dynspec
times = np.arange(cand.nsamp // tscrunch) * cand.tsamp * tscrunch
cand.tval = times
freqs = cand.fch1 + np.arange(cand.nchans // fscrunch) * cand.foff * fscrunch
cand.fval = freqs
return cand
|
fjankowskREPO_NAMEscatfitPATH_START.@scatfit_extracted@scatfit-master@scatfit@sigproc.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/library/python/fs/__init__.py",
"type": "Python"
}
|
# coding: utf-8
import codecs
import errno
import logging
import os
import random
import shutil
import six
import stat
import sys
import library.python.func
import library.python.strings
import library.python.windows
logger = logging.getLogger(__name__)
try:
WindowsError
except NameError:
WindowsError = None
_diehard_win_tries = 10
errorfix_win = library.python.windows.errorfix
class CustomFsError(OSError):
def __init__(self, errno, message='', filename=None):
super(CustomFsError, self).__init__(message)
self.errno = errno
self.strerror = os.strerror(errno)
self.filename = filename
# Directories creation
# If dst is already exists and is a directory - does nothing
# Throws OSError
@errorfix_win
def ensure_dir(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST or not os.path.isdir(path):
raise
# Directories creation
# If dst is already exists and is a directory - does nothing
# Returns path
# Throws OSError
@errorfix_win
def create_dirs(path):
ensure_dir(path)
return path
# Atomic file/directory move (rename)
# Doesn't guarantee dst replacement
# Atomic if no device boundaries are crossed
# Depends on ctypes on Windows
# Throws OSError
# On Unix, if dst exists:
# if dst is file or empty dir - replaces it
# if src is dir and dst is not dir - throws OSError (errno ENOTDIR)
# if src is dir and dst is non-empty dir - throws OSError (errno ENOTEMPTY)
# if src is file and dst is dir - throws OSError (errno EISDIR)
# On Windows, if dst exists - throws OSError (errno EEXIST)
@errorfix_win
@library.python.windows.diehard(library.python.windows.RETRIABLE_FILE_ERRORS, tries=_diehard_win_tries)
def move(src, dst):
os.rename(src, dst)
# Atomic replacing file move (rename)
# Replaces dst if exists and not a dir
# Doesn't guarantee dst dir replacement
# Atomic if no device boundaries are crossed
# Depends on ctypes on Windows
# Throws OSError
# On Unix, if dst exists:
# if dst is file - replaces it
# if dst is dir - throws OSError (errno EISDIR)
# On Windows, if dst exists:
# if dst is file - replaces it
# if dst is dir - throws OSError (errno EACCES)
@errorfix_win
@library.python.windows.diehard(library.python.windows.RETRIABLE_FILE_ERRORS, tries=_diehard_win_tries)
def replace_file(src, dst):
if library.python.windows.on_win():
library.python.windows.replace_file(src, dst)
else:
os.rename(src, dst)
# File/directory replacing move (rename)
# Removes dst if exists
# Non-atomic
# Depends on ctypes on Windows
# Throws OSError
@errorfix_win
def replace(src, dst):
try:
move(src, dst)
except OSError as e:
if e.errno not in (errno.EEXIST, errno.EISDIR, errno.ENOTDIR, errno.ENOTEMPTY):
raise
remove_tree(dst)
move(src, dst)
# Atomic file remove
# Throws OSError
@errorfix_win
@library.python.windows.diehard(library.python.windows.RETRIABLE_FILE_ERRORS, tries=_diehard_win_tries)
def remove_file(path):
os.remove(path)
# Atomic empty directory remove
# Throws OSError
@errorfix_win
@library.python.windows.diehard(library.python.windows.RETRIABLE_DIR_ERRORS, tries=_diehard_win_tries)
def remove_dir(path):
os.rmdir(path)
def fix_path_encoding(path):
return library.python.strings.to_str(path, library.python.strings.fs_encoding())
# File/directory remove
# Non-atomic
# Throws OSError, AssertionError
@errorfix_win
def remove_tree(path):
@library.python.windows.diehard(library.python.windows.RETRIABLE_DIR_ERRORS, tries=_diehard_win_tries)
def rmtree(path):
if library.python.windows.on_win():
library.python.windows.rmtree(path)
else:
shutil.rmtree(fix_path_encoding(path))
st = os.lstat(path)
if stat.S_ISLNK(st.st_mode) or stat.S_ISREG(st.st_mode):
remove_file(path)
elif stat.S_ISDIR(st.st_mode):
rmtree(path)
else:
assert False
# File/directory remove ignoring errors
# Non-atomic
@errorfix_win
def remove_tree_safe(path):
try:
st = os.lstat(path)
if stat.S_ISLNK(st.st_mode) or stat.S_ISREG(st.st_mode):
os.remove(path)
elif stat.S_ISDIR(st.st_mode):
shutil.rmtree(fix_path_encoding(path), ignore_errors=True)
# XXX
except UnicodeDecodeError as e:
logging.exception(u'remove_tree_safe with argument %s raise exception: %s', path, e)
raise
except OSError:
pass
# File/directory remove
# If path doesn't exist - does nothing
# Non-atomic
# Throws OSError, AssertionError
@errorfix_win
def ensure_removed(path):
try:
remove_tree(path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
# Atomic file hardlink
# Dst must not exist
# Depends on ctypes on Windows
# Throws OSError
# If dst exists - throws OSError (errno EEXIST)
@errorfix_win
def hardlink(src, lnk):
if library.python.windows.on_win():
library.python.windows.hardlink(src, lnk)
else:
os.link(src, lnk)
# Atomic file/directory symlink (Unix only)
# Dst must not exist
# Throws OSError
# If dst exists - throws OSError (errno EEXIST)
@errorfix_win
def symlink(src, lnk):
if library.python.windows.on_win():
library.python.windows.run_disabled(src, lnk)
else:
os.symlink(src, lnk)
# shutil.copy2 with follow_symlinks=False parameter (Unix only)
def copy2(src, lnk, follow_symlinks=True):
if six.PY3:
shutil.copy2(src, lnk, follow_symlinks=follow_symlinks)
return
if follow_symlinks or not os.path.islink(src):
shutil.copy2(src, lnk)
return
symlink(os.readlink(src), lnk)
def copy2_safe(src, lnk, follow_symlinks=True):
try:
copy2(src, lnk, follow_symlinks)
except shutil.SameFileError:
pass
@errorfix_win
def hardlink_or_copy(src, lnk, copy_function=copy2):
def should_fallback_to_copy(exc):
if WindowsError is not None and isinstance(exc, WindowsError) and exc.winerror == 1142: # too many hardlinks
return True
# cross-device hardlink or too many hardlinks, or some known WSL error
if isinstance(exc, OSError) and exc.errno in (
errno.EXDEV,
errno.EMLINK,
errno.EINVAL,
errno.EACCES,
errno.EPERM,
):
return True
return False
try:
hardlink(src, lnk)
except Exception as e:
logger.debug('Failed to hardlink %s to %s with error %s, will copy it', src, lnk, repr(e))
if should_fallback_to_copy(e):
copy_function(src, lnk, follow_symlinks=False)
else:
raise
# Recursively hardlink directory
# Uses plain hardlink for files
# Dst must not exist
# Non-atomic
# Throws OSError
@errorfix_win
def hardlink_tree(src, dst, hardlink_function=hardlink, mkdir_function=os.mkdir):
if not os.path.exists(src):
raise CustomFsError(errno.ENOENT, filename=src)
if os.path.isfile(src):
hardlink_function(src, dst)
return
for dirpath, _, filenames in walk_relative(src):
src_dirpath = os.path.join(src, dirpath) if dirpath != '.' else src
dst_dirpath = os.path.join(dst, dirpath) if dirpath != '.' else dst
mkdir_function(dst_dirpath)
for filename in filenames:
hardlink_function(os.path.join(src_dirpath, filename), os.path.join(dst_dirpath, filename))
# File copy
# throws EnvironmentError (OSError, IOError)
@errorfix_win
def copy_file(src, dst, copy_function=shutil.copy2):
if os.path.isdir(dst):
raise CustomFsError(errno.EISDIR, filename=dst)
copy_function(src, dst)
# File/directory copy
# throws EnvironmentError (OSError, IOError, shutil.Error)
@errorfix_win
def copy_tree(src, dst, copy_function=shutil.copy2):
if os.path.isfile(src):
copy_file(src, dst, copy_function=copy_function)
return
copytree3(src, dst, copy_function=copy_function)
# File read
# Throws OSError
@errorfix_win
def read_file(path, binary=True):
kwargs = {}
if not binary and six.PY3:
kwargs['encoding'] = sys.getfilesystemencoding()
with open(path, 'r' + ('b' if binary else ''), **kwargs) as f:
return f.read()
# Text file read
# Throws OSError
@errorfix_win
def read_text(path):
return read_file(path, binary=False)
# Decoding file read
# Throws OSError
@errorfix_win
def read_file_unicode(path, binary=True, enc='utf-8'):
if not binary:
if six.PY2:
with open(path, 'r') as f:
return library.python.strings.to_unicode(f.read(), enc)
else:
with open(path, 'r', encoding=enc) as f:
return f.read()
# codecs.open is always binary
with codecs.open(path, 'r', encoding=enc, errors=library.python.strings.ENCODING_ERRORS_POLICY) as f:
return f.read()
@errorfix_win
def open_file(*args, **kwargs):
return (
library.python.windows.open_file(*args, **kwargs) if library.python.windows.on_win() else open(*args, **kwargs)
)
# Atomic file write
# Throws OSError
@errorfix_win
def write_file(path, data, binary=True):
dir_path = os.path.dirname(path)
if dir_path:
ensure_dir(dir_path)
tmp_path = path + '.tmp.' + str(random.random())
with open_file(tmp_path, 'w' + ('b' if binary else '')) as f:
if not isinstance(data, bytes) and binary:
data = data.encode('UTF-8')
f.write(data)
replace_file(tmp_path, path)
# Atomic text file write
# Throws OSError
@errorfix_win
def write_text(path, data):
write_file(path, data, binary=False)
# File size
# Throws OSError
@errorfix_win
def get_file_size(path):
return os.path.getsize(path)
# File/directory size
# Non-recursive mode for directory counts size for immediates
# While raise_all_errors is set to False, file size fallbacks to zero in case of getsize errors
# Throws OSError
@errorfix_win
def get_tree_size(path, recursive=False, raise_all_errors=False):
if os.path.isfile(path):
return get_file_size(path)
total_size = 0
for dir_path, _, files in os.walk(path):
for f in files:
fp = os.path.join(dir_path, f)
try:
total_size += get_file_size(fp)
except OSError as e:
if raise_all_errors:
raise
logger.debug("Cannot calculate file size: %s", e)
if not recursive:
break
return total_size
# Directory copy ported from Python 3
def copytree3(
src,
dst,
symlinks=False,
ignore=None,
copy_function=shutil.copy2,
ignore_dangling_symlinks=False,
dirs_exist_ok=False,
):
"""Recursively copy a directory tree.
The copytree3 is a port of shutil.copytree function from python-3.2.
It has additional useful parameters and may be helpful while we are
on python-2.x. It has to be removed as soon as we have moved to
python-3.2 or higher.
The destination directory must not already exist.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied. If the file pointed by the symlink doesn't
exist, an exception will be added in the list of errors raised in
an Error exception at the end of the copy process.
You can set the optional ignore_dangling_symlinks flag to true if you
want to silence this exception. Notice that this has no effect on
platforms that don't support os.symlink.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree3(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree3() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
The optional copy_function argument is a callable that will be used
to copy each file. It will be called with the source path and the
destination path as arguments. By default, copy2() is used, but any
function that supports the same signature (like copy()) can be used.
"""
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
if not (dirs_exist_ok and os.path.isdir(dst)):
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if os.path.islink(srcname):
linkto = os.readlink(srcname)
if symlinks:
# We can't just leave it to `copy_function` because legacy
# code with a custom `copy_function` may rely on copytree3
# doing the right thing.
os.symlink(linkto, dstname)
else:
# ignore dangling symlink if the flag is on
if not os.path.exists(linkto) and ignore_dangling_symlinks:
continue
# otherwise let the copy occurs. copy2 will raise an error
copy_function(srcname, dstname)
elif os.path.isdir(srcname):
copytree3(srcname, dstname, symlinks, ignore, copy_function, dirs_exist_ok=dirs_exist_ok)
else:
# Will raise a SpecialFileError for unsupported file types
copy_function(srcname, dstname)
# catch the Error from the recursive copytree3 so that we can
# continue with other files
except shutil.Error as err:
errors.extend(err.args[0])
except EnvironmentError as why:
errors.append((srcname, dstname, str(why)))
try:
shutil.copystat(src, dst)
except OSError as why:
if WindowsError is not None and isinstance(why, WindowsError):
# Copying file access times may fail on Windows
pass
else:
errors.extend((src, dst, str(why)))
if errors:
raise shutil.Error(errors)
def walk_relative(path, topdown=True, onerror=None, followlinks=False):
for dirpath, dirnames, filenames in os.walk(path, topdown=topdown, onerror=onerror, followlinks=followlinks):
yield os.path.relpath(dirpath, path), dirnames, filenames
def supports_clone():
if 'darwin' in sys.platform:
import platform
return list(map(int, platform.mac_ver()[0].split('.'))) >= [10, 13]
return False
def commonpath(paths):
assert paths
if len(paths) == 1:
return next(iter(paths))
split_paths = [path.split(os.sep) for path in paths]
smin = min(split_paths)
smax = max(split_paths)
common = smin
for i, c in enumerate(smin):
if c != smax[i]:
common = smin[:i]
break
return os.path.sep.join(common)
def set_execute_bits(filename):
stm = os.stat(filename).st_mode
exe = stm | 0o111
if stm != exe:
os.chmod(filename, exe)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@library@python@fs@__init__.py@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.