metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
|---|---|---|
{
"filename": "_theory_cmb.py",
"repo_name": "LoganAMorrison/Hazma",
"repo_path": "Hazma_extracted/Hazma-master/hazma/theory/_theory_cmb.py",
"type": "Python"
}
|
from scipy.interpolate import interp1d
from scipy.integrate import quad
from hazma.cmb import vx_cmb, f_eff_g, f_eff_ep, p_ann_planck_temp_pol
import numpy as np
class TheoryCMB:
def cmb_limit(self, x_kd=1.0e-4, p_ann=p_ann_planck_temp_pol):
r"""
Computes the CMB limit on <sigma v>.
This is derived by requiring that
.. math::
f_{\mathrm{eff}} \langle \sigma v \rangle / m_{\chi} < p_{\mathrm{ann}},
where :math:`f_{\mathrm{eff}}` is the efficiency with which dark matter
annihilations around recombination inject energy into the plasma and
:math:`p_{\mathrm{ann}}` is derived from CMB observations.
Parameters
----------
x_kd: float
T_kd / m_x, where T_kd is the dark matter's kinetic decoupling
temperature. This will be computed self-consistently in future
versions of ``hazma``.
p_ann : float
Constraint on energy release per DM annihilation in cm^3 s^-1
MeV^-1.
Returns
-------
<sigma v> : float
Upper bound on <sigma v>, in cm^3 s^-1.
"""
# TODO: account for non-self-conjugate DM. See discussion in Gondolo
# and Gelmini.
# if self.self_conjugate:
# factor = 1.0
# else:
# factor = 0.5
return p_ann * self.mx / self.f_eff(x_kd) # type: ignore
def _f_eff_helper(self, fs, x_kd=1e-4, mode="quad"):
"""Computes f_eff^gg or f_eff^ep for DM annihilation.
Parameters
----------
fs : string
"g g" or "e e", depending on which f_eff the user wants to compute.
x_kd: float
T_kd / m_x, where T_kd is the dark matter's kinetic decoupling
temperature.
mode : string
"quad" or "interp". The first mode should be used if fs is "g g"
("e e") and none of the gamma ray spectrum functions (positron
spectrum functions) use RAMBO as it is more accurate.
Returns
-------
f_eff_dm : float
f_eff for photons or electrons and positrons.
"""
# Center of mass energy
mx = self.mx # type: ignore
e_cm = 2.0 * mx * (1.0 + 0.5 * vx_cmb(mx, x_kd) ** 2)
if fs == "g g":
f_eff_base = f_eff_g
lines = self.gamma_ray_lines(e_cm) # type: ignore
spec_fn = self.total_spectrum # type: ignore
elif fs == "e e":
f_eff_base = f_eff_ep
lines = self.positron_lines(e_cm) # type: ignore
def spec_fn(es, e_cm):
return 2.0 * self.total_positron_spectrum(es, e_cm) # type: ignore
# Lower bound on integrals. Upper bound is many GeV, so we don't need
# to do error checking.
e_min = f_eff_base.x[0] # type: ignore
# Continuum contributions from photons. Create an interpolator to avoid
# recomputing spectrum.
if mode == "interp":
# If RAMBO is needed to compute the spectrum, it is prohibitively
# time-consuming to try integrating the spectrum function. Instead,
# simultaneously compute the spectrum over a grid of points.
es = np.geomspace(e_min, e_cm / 2, 1000)
dnde_tot = spec_fn(es, e_cm) # type: ignore
spec_interp = interp1d(es, dnde_tot, bounds_error=False, fill_value=0.0)
def integrand(e):
return e * spec_interp(e) * f_eff_base(e) # type: ignore
f_eff_dm = quad(integrand, e_min, e_cm / 2, epsabs=0, epsrel=1e-3)[0] / e_cm
elif mode == "quad":
# If RAMBO is not needed to compute the spectrum, this will give
# much cleaner results.
def integrand(e):
return e * spec_fn(e, e_cm) * f_eff_base(e) # type: ignore
f_eff_dm = quad(integrand, e_min, e_cm / 2, epsabs=0, epsrel=1e-3)[0] / e_cm
# Sum up line contributions
f_eff_line_dm = 0.0
for ch, line in lines.items(): # type: ignore
energy = line["energy"]
# Make sure the base f_eff is defined at this energy
if energy > e_min:
bf = line["bf"]
multiplicity = 2.0 if ch == fs else 1.0
f_eff_line_dm += (
energy
* bf
* f_eff_base(energy) # type: ignore
* multiplicity
/ e_cm
)
return f_eff_dm + f_eff_line_dm # type: ignore
def f_eff_g(self, x_kd=1e-4):
return self._f_eff_helper("g g", x_kd, "quad")
def f_eff_ep(self, x_kd=1e-4):
return self._f_eff_helper("e e", x_kd, "quad")
def f_eff(self, x_kd=1.0e-4):
r"""
Computes :math:`f_{\mathrm{eff}}` the efficiency with which dark matter
annihilations around recombination inject energy into the thermal
plasma.
"""
return self.f_eff_ep(x_kd) + self.f_eff_g(x_kd)
|
LoganAMorrisonREPO_NAMEHazmaPATH_START.@Hazma_extracted@Hazma-master@hazma@theory@_theory_cmb.py@.PATH_END.py
|
{
"filename": "generic.py",
"repo_name": "scikit-image/scikit-image",
"repo_path": "scikit-image_extracted/scikit-image-main/skimage/filters/rank/generic.py",
"type": "Python"
}
|
"""
General Description
-------------------
These filters compute the local histogram at each pixel, using a sliding window
similar to the method described in [1]_. A histogram is built using a moving
window in order to limit redundant computation. The moving window follows a
snake-like path:
...------------------------↘
↙--------------------------↙
↘--------------------------...
The local histogram is updated at each pixel as the footprint window
moves by, i.e. only those pixels entering and leaving the footprint
update the local histogram. The histogram size is 8-bit (256 bins) for 8-bit
images and 2- to 16-bit for 16-bit images depending on the maximum value of the
image.
The filter is applied up to the image border, the neighborhood used is
adjusted accordingly. The user may provide a mask image (same size as input
image) where non zero values are the part of the image participating in the
histogram computation. By default the entire image is filtered.
This implementation outperforms :func:`skimage.morphology.dilation`
for large footprints.
Input images will be cast in unsigned 8-bit integer or unsigned 16-bit integer
if necessary. The number of histogram bins is then determined from the maximum
value present in the image. Eventually, the output image is cast in the input
dtype, or the `output_dtype` if set.
To do
-----
* add simple examples, adapt documentation on existing examples
* add/check existing doc
* adapting tests for each type of filter
References
----------
.. [1] Huang, T. ,Yang, G. ; Tang, G.. "A fast two-dimensional
median filtering algorithm", IEEE Transactions on Acoustics, Speech and
Signal Processing, Feb 1979. Volume: 27 , Issue: 1, Page(s): 13 - 18.
"""
import numpy as np
from scipy import ndimage as ndi
from ..._shared.utils import check_nD, warn
from ...morphology.footprints import _footprint_is_sequence
from ...util import img_as_ubyte
from . import generic_cy
__all__ = [
'autolevel',
'equalize',
'gradient',
'maximum',
'mean',
'geometric_mean',
'subtract_mean',
'median',
'minimum',
'modal',
'enhance_contrast',
'pop',
'threshold',
'noise_filter',
'entropy',
'otsu',
]
def _preprocess_input(
image,
footprint=None,
out=None,
mask=None,
out_dtype=None,
pixel_size=1,
shift_x=None,
shift_y=None,
):
"""Preprocess and verify input for filters.rank methods.
Parameters
----------
image : 2-D array (integer or float)
Input image.
footprint : 2-D array (integer or float), optional
The neighborhood expressed as a 2-D array of 1's and 0's.
out : 2-D array (integer or float), optional
If None, a new array is allocated.
mask : ndarray (integer or float), optional
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
out_dtype : data-type, optional
Desired output data-type. Default is None, which means we cast output
in input dtype.
pixel_size : int, optional
Dimension of each pixel. Default value is 1.
shift_x, shift_y : int, optional
Offset added to the footprint center point. Shift is bounded to the
footprint size (center must be inside of the given footprint).
Returns
-------
image : 2-D array (np.uint8 or np.uint16)
footprint : 2-D array (np.uint8)
The neighborhood expressed as a binary 2-D array.
out : 3-D array (same dtype out_dtype or as input)
Output array. The two first dimensions are the spatial ones, the third
one is the pixel vector (length 1 by default).
mask : 2-D array (np.uint8)
Mask array that defines (>0) area of the image included in the local
neighborhood.
n_bins : int
Number of histogram bins.
"""
check_nD(image, 2)
input_dtype = image.dtype
if input_dtype in (bool, bool) or out_dtype in (bool, bool):
raise ValueError('dtype cannot be bool.')
if input_dtype not in (np.uint8, np.uint16):
message = (
f'Possible precision loss converting image of type '
f'{input_dtype} to uint8 as required by rank filters. '
f'Convert manually using skimage.util.img_as_ubyte to '
f'silence this warning.'
)
warn(message, stacklevel=5)
image = img_as_ubyte(image)
if _footprint_is_sequence(footprint):
raise ValueError(
"footprint sequences are not currently supported by rank filters"
)
footprint = np.ascontiguousarray(img_as_ubyte(footprint > 0))
if footprint.ndim != image.ndim:
raise ValueError('Image dimensions and neighborhood dimensions' 'do not match')
image = np.ascontiguousarray(image)
if mask is not None:
mask = img_as_ubyte(mask)
mask = np.ascontiguousarray(mask)
if image is out:
raise NotImplementedError("Cannot perform rank operation in place.")
if out is None:
if out_dtype is None:
out_dtype = image.dtype
out = np.empty(image.shape + (pixel_size,), dtype=out_dtype)
else:
if len(out.shape) == 2:
out = out.reshape(out.shape + (pixel_size,))
if image.dtype in (np.uint8, np.int8):
n_bins = 256
else:
# Convert to a Python int to avoid the potential overflow when we add
# 1 to the maximum of the image.
n_bins = int(max(3, image.max())) + 1
if n_bins > 2**10:
warn(
f'Bad rank filter performance is expected due to a '
f'large number of bins ({n_bins}), equivalent to an approximate '
f'bitdepth of {np.log2(n_bins):.1f}.',
stacklevel=2,
)
for name, value in zip(("shift_x", "shift_y"), (shift_x, shift_y)):
if np.dtype(type(value)) == bool:
warn(
f"Paramter `{name}` is boolean and will be interpreted as int. "
"This is not officially supported, use int instead.",
category=UserWarning,
stacklevel=4,
)
return image, footprint, out, mask, n_bins
def _handle_input_3D(
image,
footprint=None,
out=None,
mask=None,
out_dtype=None,
pixel_size=1,
shift_x=None,
shift_y=None,
shift_z=None,
):
"""Preprocess and verify input for filters.rank methods.
Parameters
----------
image : 3-D array (integer or float)
Input image.
footprint : 3-D array (integer or float), optional
The neighborhood expressed as a 3-D array of 1's and 0's.
out : 3-D array (integer or float), optional
If None, a new array is allocated.
mask : ndarray (integer or float), optional
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
out_dtype : data-type, optional
Desired output data-type. Default is None, which means we cast output
in input dtype.
pixel_size : int, optional
Dimension of each pixel. Default value is 1.
shift_x, shift_y, shift_z : int, optional
Offset added to the footprint center point. Shift is bounded to the
footprint size (center must be inside of the given footprint).
Returns
-------
image : 3-D array (np.uint8 or np.uint16)
footprint : 3-D array (np.uint8)
The neighborhood expressed as a binary 3-D array.
out : 3-D array (same dtype out_dtype or as input)
Output array. The two first dimensions are the spatial ones, the third
one is the pixel vector (length 1 by default).
mask : 3-D array (np.uint8)
Mask array that defines (>0) area of the image included in the local
neighborhood.
n_bins : int
Number of histogram bins.
"""
check_nD(image, 3)
if image.dtype not in (np.uint8, np.uint16):
message = (
f'Possible precision loss converting image of type '
f'{image.dtype} to uint8 as required by rank filters. '
f'Convert manually using skimage.util.img_as_ubyte to '
f'silence this warning.'
)
warn(message, stacklevel=2)
image = img_as_ubyte(image)
footprint = np.ascontiguousarray(img_as_ubyte(footprint > 0))
if footprint.ndim != image.ndim:
raise ValueError('Image dimensions and neighborhood dimensions' 'do not match')
image = np.ascontiguousarray(image)
if mask is None:
mask = np.ones(image.shape, dtype=np.uint8)
else:
mask = img_as_ubyte(mask)
mask = np.ascontiguousarray(mask)
if image is out:
raise NotImplementedError("Cannot perform rank operation in place.")
if out is None:
if out_dtype is None:
out_dtype = image.dtype
out = np.empty(image.shape + (pixel_size,), dtype=out_dtype)
else:
out = out.reshape(out.shape + (pixel_size,))
is_8bit = image.dtype in (np.uint8, np.int8)
if is_8bit:
n_bins = 256
else:
# Convert to a Python int to avoid the potential overflow when we add
# 1 to the maximum of the image.
n_bins = int(max(3, image.max())) + 1
if n_bins > 2**10:
warn(
f'Bad rank filter performance is expected due to a '
f'large number of bins ({n_bins}), equivalent to an approximate '
f'bitdepth of {np.log2(n_bins):.1f}.',
stacklevel=2,
)
for name, value in zip(
("shift_x", "shift_y", "shift_z"), (shift_x, shift_y, shift_z)
):
if np.dtype(type(value)) == bool:
warn(
f"Parameter `{name}` is boolean and will be interpreted as int. "
"This is not officially supported, use int instead.",
category=UserWarning,
stacklevel=4,
)
return image, footprint, out, mask, n_bins
def _apply_scalar_per_pixel(
func, image, footprint, out, mask, shift_x, shift_y, out_dtype=None
):
"""Process the specific cython function to the image.
Parameters
----------
func : function
Cython function to apply.
image : 2-D array (integer or float)
Input image.
footprint : 2-D array (integer or float)
The neighborhood expressed as a 2-D array of 1's and 0's.
out : 2-D array (integer or float)
If None, a new array is allocated.
mask : ndarray (integer or float)
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int
Offset added to the footprint center point. Shift is bounded to the
footprint sizes (center must be inside the given footprint).
out_dtype : data-type, optional
Desired output data-type. Default is None, which means we cast output
in input dtype.
"""
# preprocess and verify the input
image, footprint, out, mask, n_bins = _preprocess_input(
image, footprint, out, mask, out_dtype, shift_x=shift_x, shift_y=shift_y
)
# apply cython function
func(
image,
footprint,
shift_x=shift_x,
shift_y=shift_y,
mask=mask,
out=out,
n_bins=n_bins,
)
return np.squeeze(out, axis=-1)
def _apply_scalar_per_pixel_3D(
func, image, footprint, out, mask, shift_x, shift_y, shift_z, out_dtype=None
):
image, footprint, out, mask, n_bins = _handle_input_3D(
image,
footprint,
out,
mask,
out_dtype,
shift_x=shift_x,
shift_y=shift_y,
shift_z=shift_z,
)
func(
image,
footprint,
shift_x=shift_x,
shift_y=shift_y,
shift_z=shift_z,
mask=mask,
out=out,
n_bins=n_bins,
)
return out.reshape(out.shape[:3])
def _apply_vector_per_pixel(
func, image, footprint, out, mask, shift_x, shift_y, out_dtype=None, pixel_size=1
):
"""
Parameters
----------
func : function
Cython function to apply.
image : 2-D array (integer or float)
Input image.
footprint : 2-D array (integer or float)
The neighborhood expressed as a 2-D array of 1's and 0's.
out : 2-D array (integer or float)
If None, a new array is allocated.
mask : ndarray (integer or float)
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int
Offset added to the footprint center point. Shift is bounded to the
footprint sizes (center must be inside the given footprint).
out_dtype : data-type, optional
Desired output data-type. Default is None, which means we cast output
in input dtype.
pixel_size : int, optional
Dimension of each pixel.
Returns
-------
out : 3-D array with float dtype of dimensions (H,W,N), where (H,W) are
the dimensions of the input image and N is n_bins or
``image.max() + 1`` if no value is provided as a parameter.
Effectively, each pixel is a N-D feature vector that is the histogram.
The sum of the elements in the feature vector will be 1, unless no
pixels in the window were covered by both footprint and mask, in which
case all elements will be 0.
"""
# preprocess and verify the input
image, footprint, out, mask, n_bins = _preprocess_input(
image,
footprint,
out,
mask,
out_dtype,
pixel_size,
shift_x=shift_x,
shift_y=shift_y,
)
# apply cython function
func(
image,
footprint,
shift_x=shift_x,
shift_y=shift_y,
mask=mask,
out=out,
n_bins=n_bins,
)
return out
def autolevel(image, footprint, out=None, mask=None, shift_x=0, shift_y=0, shift_z=0):
"""Auto-level image using local histogram.
This filter locally stretches the histogram of gray values to cover the
entire range of values from "white" to "black".
Parameters
----------
image : ([P,] M, N) ndarray (uint8, uint16)
Input image.
footprint : ndarray
The neighborhood expressed as an ndarray of 1's and 0's.
out : ([P,] M, N) array (same dtype as input)
If None, a new array is allocated.
mask : ndarray (integer or float), optional
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y, shift_z : int
Offset added to the footprint center point. Shift is bounded to the
footprint sizes (center must be inside the given footprint).
Returns
-------
out : ([P,] M, N) ndarray (same dtype as input image)
Output image.
Examples
--------
>>> from skimage import data
>>> from skimage.morphology import disk, ball
>>> from skimage.filters.rank import autolevel
>>> import numpy as np
>>> img = data.camera()
>>> rng = np.random.default_rng()
>>> volume = rng.integers(0, 255, size=(10,10,10), dtype=np.uint8)
>>> auto = autolevel(img, disk(5))
>>> auto_vol = autolevel(volume, ball(5))
"""
np_image = np.asanyarray(image)
if np_image.ndim == 2:
return _apply_scalar_per_pixel(
generic_cy._autolevel,
image,
footprint,
out=out,
mask=mask,
shift_x=shift_x,
shift_y=shift_y,
)
elif np_image.ndim == 3:
return _apply_scalar_per_pixel_3D(
generic_cy._autolevel_3D,
image,
footprint,
out=out,
mask=mask,
shift_x=shift_x,
shift_y=shift_y,
shift_z=shift_z,
)
raise ValueError(f'`image` must have 2 or 3 dimensions, got {np_image.ndim}.')
def equalize(image, footprint, out=None, mask=None, shift_x=0, shift_y=0, shift_z=0):
"""Equalize image using local histogram.
Parameters
----------
image : ([P,] M, N) ndarray (uint8, uint16)
Input image.
footprint : ndarray
The neighborhood expressed as an ndarray of 1's and 0's.
out : ([P,] M, N) array (same dtype as input)
If None, a new array is allocated.
mask : ndarray (integer or float), optional
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y, shift_z : int
Offset added to the footprint center point. Shift is bounded to the
footprint sizes (center must be inside the given footprint).
Returns
-------
out : ([P,] M, N) ndarray (same dtype as input image)
Output image.
Examples
--------
>>> from skimage import data
>>> from skimage.morphology import disk, ball
>>> from skimage.filters.rank import equalize
>>> import numpy as np
>>> img = data.camera()
>>> rng = np.random.default_rng()
>>> volume = rng.integers(0, 255, size=(10,10,10), dtype=np.uint8)
>>> equ = equalize(img, disk(5))
>>> equ_vol = equalize(volume, ball(5))
"""
np_image = np.asanyarray(image)
if np_image.ndim == 2:
return _apply_scalar_per_pixel(
generic_cy._equalize,
image,
footprint,
out=out,
mask=mask,
shift_x=shift_x,
shift_y=shift_y,
)
elif np_image.ndim == 3:
return _apply_scalar_per_pixel_3D(
generic_cy._equalize_3D,
image,
footprint,
out=out,
mask=mask,
shift_x=shift_x,
shift_y=shift_y,
shift_z=shift_z,
)
raise ValueError(f'`image` must have 2 or 3 dimensions, got {np_image.ndim}.')
def gradient(image, footprint, out=None, mask=None, shift_x=0, shift_y=0, shift_z=0):
"""Return local gradient of an image (i.e. local maximum - local minimum).
Parameters
----------
image : ([P,] M, N) ndarray (uint8, uint16)
Input image.
footprint : ndarray
The neighborhood expressed as an ndarray of 1's and 0's.
out : ([P,] M, N) array (same dtype as input)
If None, a new array is allocated.
mask : ndarray (integer or float), optional
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y, shift_z : int
Offset added to the footprint center point. Shift is bounded to the
footprint sizes (center must be inside the given footprint).
Returns
-------
out : ([P,] M, N) ndarray (same dtype as input image)
Output image.
Examples
--------
>>> from skimage import data
>>> from skimage.morphology import disk, ball
>>> from skimage.filters.rank import gradient
>>> import numpy as np
>>> img = data.camera()
>>> rng = np.random.default_rng()
>>> volume = rng.integers(0, 255, size=(10,10,10), dtype=np.uint8)
>>> out = gradient(img, disk(5))
>>> out_vol = gradient(volume, ball(5))
"""
np_image = np.asanyarray(image)
if np_image.ndim == 2:
return _apply_scalar_per_pixel(
generic_cy._gradient,
image,
footprint,
out=out,
mask=mask,
shift_x=shift_x,
shift_y=shift_y,
)
elif np_image.ndim == 3:
return _apply_scalar_per_pixel_3D(
generic_cy._gradient_3D,
image,
footprint,
out=out,
mask=mask,
shift_x=shift_x,
shift_y=shift_y,
shift_z=shift_z,
)
raise ValueError(f'`image` must have 2 or 3 dimensions, got {np_image.ndim}.')
def maximum(image, footprint, out=None, mask=None, shift_x=0, shift_y=0, shift_z=0):
"""Return local maximum of an image.
Parameters
----------
image : ([P,] M, N) ndarray (uint8, uint16)
Input image.
footprint : ndarray
The neighborhood expressed as an ndarray of 1's and 0's.
out : ([P,] M, N) array (same dtype as input)
If None, a new array is allocated.
mask : ndarray (integer or float), optional
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y, shift_z : int
Offset added to the footprint center point. Shift is bounded to the
footprint sizes (center must be inside the given footprint).
Returns
-------
out : ([P,] M, N) ndarray (same dtype as input image)
Output image.
See also
--------
skimage.morphology.dilation
Notes
-----
The lower algorithm complexity makes `skimage.filters.rank.maximum`
more efficient for larger images and footprints.
Examples
--------
>>> from skimage import data
>>> from skimage.morphology import disk, ball
>>> from skimage.filters.rank import maximum
>>> import numpy as np
>>> img = data.camera()
>>> rng = np.random.default_rng()
>>> volume = rng.integers(0, 255, size=(10,10,10), dtype=np.uint8)
>>> out = maximum(img, disk(5))
>>> out_vol = maximum(volume, ball(5))
"""
np_image = np.asanyarray(image)
if np_image.ndim == 2:
return _apply_scalar_per_pixel(
generic_cy._maximum,
image,
footprint,
out=out,
mask=mask,
shift_x=shift_x,
shift_y=shift_y,
)
elif np_image.ndim == 3:
return _apply_scalar_per_pixel_3D(
generic_cy._maximum_3D,
image,
footprint,
out=out,
mask=mask,
shift_x=shift_x,
shift_y=shift_y,
shift_z=shift_z,
)
raise ValueError(f'`image` must have 2 or 3 dimensions, got {np_image.ndim}.')
def mean(image, footprint, out=None, mask=None, shift_x=0, shift_y=0, shift_z=0):
"""Return local mean of an image.
Parameters
----------
image : ([P,] M, N) ndarray (uint8, uint16)
Input image.
footprint : ndarray
The neighborhood expressed as an ndarray of 1's and 0's.
out : ([P,] M, N) array (same dtype as input)
If None, a new array is allocated.
mask : ndarray (integer or float), optional
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y, shift_z : int
Offset added to the footprint center point. Shift is bounded to the
footprint sizes (center must be inside the given footprint).
Returns
-------
out : ([P,] M, N) ndarray (same dtype as input image)
Output image.
Examples
--------
>>> from skimage import data
>>> from skimage.morphology import disk, ball
>>> from skimage.filters.rank import mean
>>> import numpy as np
>>> img = data.camera()
>>> rng = np.random.default_rng()
>>> volume = rng.integers(0, 255, size=(10,10,10), dtype=np.uint8)
>>> avg = mean(img, disk(5))
>>> avg_vol = mean(volume, ball(5))
"""
np_image = np.asanyarray(image)
if np_image.ndim == 2:
return _apply_scalar_per_pixel(
generic_cy._mean,
image,
footprint,
out=out,
mask=mask,
shift_x=shift_x,
shift_y=shift_y,
)
elif np_image.ndim == 3:
return _apply_scalar_per_pixel_3D(
generic_cy._mean_3D,
image,
footprint,
out=out,
mask=mask,
shift_x=shift_x,
shift_y=shift_y,
shift_z=shift_z,
)
raise ValueError(f'`image` must have 2 or 3 dimensions, got {np_image.ndim}.')
def geometric_mean(
image, footprint, out=None, mask=None, shift_x=0, shift_y=0, shift_z=0
):
"""Return local geometric mean of an image.
Parameters
----------
image : ([P,] M, N) ndarray (uint8, uint16)
Input image.
footprint : ndarray
The neighborhood expressed as an ndarray of 1's and 0's.
out : ([P,] M, N) array (same dtype as input)
If None, a new array is allocated.
mask : ndarray (integer or float), optional
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y, shift_z : int
Offset added to the footprint center point. Shift is bounded to the
footprint sizes (center must be inside the given footprint).
Returns
-------
out : ([P,] M, N) ndarray (same dtype as input image)
Output image.
Examples
--------
>>> from skimage import data
>>> from skimage.morphology import disk, ball
>>> from skimage.filters.rank import mean
>>> import numpy as np
>>> img = data.camera()
>>> rng = np.random.default_rng()
>>> volume = rng.integers(0, 255, size=(10,10,10), dtype=np.uint8)
>>> avg = geometric_mean(img, disk(5))
>>> avg_vol = geometric_mean(volume, ball(5))
References
----------
.. [1] Gonzalez, R. C. and Woods, R. E. "Digital Image Processing
(3rd Edition)." Prentice-Hall Inc, 2006.
"""
np_image = np.asanyarray(image)
if np_image.ndim == 2:
return _apply_scalar_per_pixel(
generic_cy._geometric_mean,
image,
footprint,
out=out,
mask=mask,
shift_x=shift_x,
shift_y=shift_y,
)
elif np_image.ndim == 3:
return _apply_scalar_per_pixel_3D(
generic_cy._geometric_mean_3D,
image,
footprint,
out=out,
mask=mask,
shift_x=shift_x,
shift_y=shift_y,
shift_z=shift_z,
)
raise ValueError(f'`image` must have 2 or 3 dimensions, got {np_image.ndim}.')
def subtract_mean(
image, footprint, out=None, mask=None, shift_x=0, shift_y=0, shift_z=0
):
"""Return image subtracted from its local mean.
Parameters
----------
image : ([P,] M, N) ndarray (uint8, uint16)
Input image.
footprint : ndarray
The neighborhood expressed as an ndarray of 1's and 0's.
out : ([P,] M, N) array (same dtype as input)
If None, a new array is allocated.
mask : ndarray (integer or float), optional
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y, shift_z : int
Offset added to the footprint center point. Shift is bounded to the
footprint sizes (center must be inside the given footprint).
Returns
-------
out : ([P,] M, N) ndarray (same dtype as input image)
Output image.
Notes
-----
Subtracting the mean value may introduce underflow. To compensate
this potential underflow, the obtained difference is downscaled by
a factor of 2 and shifted by `n_bins / 2 - 1`, the median value of
the local histogram (`n_bins = max(3, image.max()) +1` for 16-bits
images and 256 otherwise).
Examples
--------
>>> from skimage import data
>>> from skimage.morphology import disk, ball
>>> from skimage.filters.rank import subtract_mean
>>> import numpy as np
>>> img = data.camera()
>>> rng = np.random.default_rng()
>>> volume = rng.integers(0, 255, size=(10,10,10), dtype=np.uint8)
>>> out = subtract_mean(img, disk(5))
>>> out_vol = subtract_mean(volume, ball(5))
"""
np_image = np.asanyarray(image)
if np_image.ndim == 2:
return _apply_scalar_per_pixel(
generic_cy._subtract_mean,
image,
footprint,
out=out,
mask=mask,
shift_x=shift_x,
shift_y=shift_y,
)
elif np_image.ndim == 3:
return _apply_scalar_per_pixel_3D(
generic_cy._subtract_mean_3D,
image,
footprint,
out=out,
mask=mask,
shift_x=shift_x,
shift_y=shift_y,
shift_z=shift_z,
)
raise ValueError(f'`image` must have 2 or 3 dimensions, got {np_image.ndim}.')
def median(
image,
footprint=None,
out=None,
mask=None,
shift_x=0,
shift_y=0,
shift_z=0,
):
"""Return local median of an image.
Parameters
----------
image : ([P,] M, N) ndarray (uint8, uint16)
Input image.
footprint : ndarray
The neighborhood expressed as an ndarray of 1's and 0's. If None, a
full square of size 3 is used.
out : ([P,] M, N) array (same dtype as input)
If None, a new array is allocated.
mask : ndarray (integer or float), optional
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y, shift_z : int
Offset added to the footprint center point. Shift is bounded to the
footprint sizes (center must be inside the given footprint).
Returns
-------
out : ([P,] M, N) ndarray (same dtype as input image)
Output image.
See also
--------
skimage.filters.median : Implementation of a median filtering which handles
images with floating precision.
Examples
--------
>>> from skimage import data
>>> from skimage.morphology import disk, ball
>>> from skimage.filters.rank import median
>>> import numpy as np
>>> img = data.camera()
>>> rng = np.random.default_rng()
>>> volume = rng.integers(0, 255, size=(10,10,10), dtype=np.uint8)
>>> med = median(img, disk(5))
>>> med_vol = median(volume, ball(5))
"""
np_image = np.asanyarray(image)
if footprint is None:
footprint = ndi.generate_binary_structure(image.ndim, image.ndim)
if np_image.ndim == 2:
return _apply_scalar_per_pixel(
generic_cy._median,
image,
footprint,
out=out,
mask=mask,
shift_x=shift_x,
shift_y=shift_y,
)
elif np_image.ndim == 3:
return _apply_scalar_per_pixel_3D(
generic_cy._median_3D,
image,
footprint,
out=out,
mask=mask,
shift_x=shift_x,
shift_y=shift_y,
shift_z=shift_z,
)
raise ValueError(f'`image` must have 2 or 3 dimensions, got {np_image.ndim}.')
def minimum(image, footprint, out=None, mask=None, shift_x=0, shift_y=0, shift_z=0):
"""Return local minimum of an image.
Parameters
----------
image : ([P,] M, N) ndarray (uint8, uint16)
Input image.
footprint : ndarray
The neighborhood expressed as an ndarray of 1's and 0's.
out : ([P,] M, N) array (same dtype as input)
If None, a new array is allocated.
mask : ndarray (integer or float), optional
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y, shift_z : int
Offset added to the footprint center point. Shift is bounded to the
footprint sizes (center must be inside the given footprint).
Returns
-------
out : ([P,] M, N) ndarray (same dtype as input image)
Output image.
See also
--------
skimage.morphology.erosion
Notes
-----
The lower algorithm complexity makes `skimage.filters.rank.minimum` more
efficient for larger images and footprints.
Examples
--------
>>> from skimage import data
>>> from skimage.morphology import disk, ball
>>> from skimage.filters.rank import minimum
>>> import numpy as np
>>> img = data.camera()
>>> rng = np.random.default_rng()
>>> volume = rng.integers(0, 255, size=(10,10,10), dtype=np.uint8)
>>> out = minimum(img, disk(5))
>>> out_vol = minimum(volume, ball(5))
"""
np_image = np.asanyarray(image)
if np_image.ndim == 2:
return _apply_scalar_per_pixel(
generic_cy._minimum,
image,
footprint,
out=out,
mask=mask,
shift_x=shift_x,
shift_y=shift_y,
)
elif np_image.ndim == 3:
return _apply_scalar_per_pixel_3D(
generic_cy._minimum_3D,
image,
footprint,
out=out,
mask=mask,
shift_x=shift_x,
shift_y=shift_y,
shift_z=shift_z,
)
raise ValueError(f'`image` must have 2 or 3 dimensions, got {np_image.ndim}.')
def modal(image, footprint, out=None, mask=None, shift_x=0, shift_y=0, shift_z=0):
"""Return local mode of an image.
The mode is the value that appears most often in the local histogram.
Parameters
----------
image : ([P,] M, N) ndarray (uint8, uint16)
Input image.
footprint : ndarray
The neighborhood expressed as an ndarray of 1's and 0's.
out : ([P,] M, N) array (same dtype as input)
If None, a new array is allocated.
mask : ndarray (integer or float), optional
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y, shift_z : int
Offset added to the footprint center point. Shift is bounded to the
footprint sizes (center must be inside the given footprint).
Returns
-------
out : ([P,] M, N) ndarray (same dtype as input image)
Output image.
Examples
--------
>>> from skimage import data
>>> from skimage.morphology import disk, ball
>>> from skimage.filters.rank import modal
>>> import numpy as np
>>> img = data.camera()
>>> rng = np.random.default_rng()
>>> volume = rng.integers(0, 255, size=(10,10,10), dtype=np.uint8)
>>> out = modal(img, disk(5))
>>> out_vol = modal(volume, ball(5))
"""
np_image = np.asanyarray(image)
if np_image.ndim == 2:
return _apply_scalar_per_pixel(
generic_cy._modal,
image,
footprint,
out=out,
mask=mask,
shift_x=shift_x,
shift_y=shift_y,
)
elif np_image.ndim == 3:
return _apply_scalar_per_pixel_3D(
generic_cy._modal_3D,
image,
footprint,
out=out,
mask=mask,
shift_x=shift_x,
shift_y=shift_y,
shift_z=shift_z,
)
raise ValueError(f'`image` must have 2 or 3 dimensions, got {np_image.ndim}.')
def enhance_contrast(
image, footprint, out=None, mask=None, shift_x=0, shift_y=0, shift_z=0
):
"""Enhance contrast of an image.
This replaces each pixel by the local maximum if the pixel gray value is
closer to the local maximum than the local minimum. Otherwise it is
replaced by the local minimum.
Parameters
----------
image : ([P,] M, N) ndarray (uint8, uint16)
Input image.
footprint : ndarray
The neighborhood expressed as an ndarray of 1's and 0's.
out : ([P,] M, N) array (same dtype as input)
If None, a new array is allocated.
mask : ndarray (integer or float), optional
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y, shift_z : int
Offset added to the footprint center point. Shift is bounded to the
footprint sizes (center must be inside the given footprint).
Returns
-------
out : ([P,] M, N) ndarray (same dtype as input image)
Output image
Examples
--------
>>> from skimage import data
>>> from skimage.morphology import disk, ball
>>> from skimage.filters.rank import enhance_contrast
>>> import numpy as np
>>> img = data.camera()
>>> rng = np.random.default_rng()
>>> volume = rng.integers(0, 255, size=(10,10,10), dtype=np.uint8)
>>> out = enhance_contrast(img, disk(5))
>>> out_vol = enhance_contrast(volume, ball(5))
"""
np_image = np.asanyarray(image)
if np_image.ndim == 2:
return _apply_scalar_per_pixel(
generic_cy._enhance_contrast,
image,
footprint,
out=out,
mask=mask,
shift_x=shift_x,
shift_y=shift_y,
)
elif np_image.ndim == 3:
return _apply_scalar_per_pixel_3D(
generic_cy._enhance_contrast_3D,
image,
footprint,
out=out,
mask=mask,
shift_x=shift_x,
shift_y=shift_y,
shift_z=shift_z,
)
raise ValueError(f'`image` must have 2 or 3 dimensions, got {np_image.ndim}.')
def pop(image, footprint, out=None, mask=None, shift_x=0, shift_y=0, shift_z=0):
"""Return the local number (population) of pixels.
The number of pixels is defined as the number of pixels which are included
in the footprint and the mask.
Parameters
----------
image : ([P,] M, N) ndarray (uint8, uint16)
Input image.
footprint : ndarray
The neighborhood expressed as an ndarray of 1's and 0's.
out : ([P,] M, N) array (same dtype as input)
If None, a new array is allocated.
mask : ndarray (integer or float), optional
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y, shift_z : int
Offset added to the footprint center point. Shift is bounded to the
footprint sizes (center must be inside the given footprint).
Returns
-------
out : ([P,] M, N) ndarray (same dtype as input image)
Output image.
Examples
--------
>>> from skimage.morphology import footprint_rectangle # Need to add 3D example
>>> import skimage.filters.rank as rank
>>> img = 255 * np.array([[0, 0, 0, 0, 0],
... [0, 1, 1, 1, 0],
... [0, 1, 1, 1, 0],
... [0, 1, 1, 1, 0],
... [0, 0, 0, 0, 0]], dtype=np.uint8)
>>> rank.pop(img, footprint_rectangle((3, 3)))
array([[4, 6, 6, 6, 4],
[6, 9, 9, 9, 6],
[6, 9, 9, 9, 6],
[6, 9, 9, 9, 6],
[4, 6, 6, 6, 4]], dtype=uint8)
"""
np_image = np.asanyarray(image)
if np_image.ndim == 2:
return _apply_scalar_per_pixel(
generic_cy._pop,
image,
footprint,
out=out,
mask=mask,
shift_x=shift_x,
shift_y=shift_y,
)
elif np_image.ndim == 3:
return _apply_scalar_per_pixel_3D(
generic_cy._pop_3D,
image,
footprint,
out=out,
mask=mask,
shift_x=shift_x,
shift_y=shift_y,
shift_z=shift_z,
)
raise ValueError(f'`image` must have 2 or 3 dimensions, got {np_image.ndim}.')
def sum(image, footprint, out=None, mask=None, shift_x=0, shift_y=0, shift_z=0):
"""Return the local sum of pixels.
Note that the sum may overflow depending on the data type of the input
array.
Parameters
----------
image : ([P,] M, N) ndarray (uint8, uint16)
Input image.
footprint : ndarray
The neighborhood expressed as an ndarray of 1's and 0's.
out : ([P,] M, N) array (same dtype as input)
If None, a new array is allocated.
mask : ndarray (integer or float), optional
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y, shift_z : int
Offset added to the footprint center point. Shift is bounded to the
footprint sizes (center must be inside the given footprint).
Returns
-------
out : ([P,] M, N) ndarray (same dtype as input image)
Output image.
Examples
--------
>>> from skimage.morphology import footprint_rectangle # Need to add 3D example
>>> import skimage.filters.rank as rank # Cube seems to fail but
>>> img = np.array([[0, 0, 0, 0, 0], # Ball can pass
... [0, 1, 1, 1, 0],
... [0, 1, 1, 1, 0],
... [0, 1, 1, 1, 0],
... [0, 0, 0, 0, 0]], dtype=np.uint8)
>>> rank.sum(img, footprint_rectangle((3, 3)))
array([[1, 2, 3, 2, 1],
[2, 4, 6, 4, 2],
[3, 6, 9, 6, 3],
[2, 4, 6, 4, 2],
[1, 2, 3, 2, 1]], dtype=uint8)
"""
np_image = np.asanyarray(image)
if np_image.ndim == 2:
return _apply_scalar_per_pixel(
generic_cy._sum,
image,
footprint,
out=out,
mask=mask,
shift_x=shift_x,
shift_y=shift_y,
)
elif np_image.ndim == 3:
return _apply_scalar_per_pixel_3D(
generic_cy._sum_3D,
image,
footprint,
out=out,
mask=mask,
shift_x=shift_x,
shift_y=shift_y,
shift_z=shift_z,
)
raise ValueError(f'`image` must have 2 or 3 dimensions, got {np_image.ndim}.')
def threshold(image, footprint, out=None, mask=None, shift_x=0, shift_y=0, shift_z=0):
"""Local threshold of an image.
The resulting binary mask is True if the gray value of the center pixel is
greater than the local mean.
Parameters
----------
image : ([P,] M, N) ndarray (uint8, uint16)
Input image.
footprint : ndarray
The neighborhood expressed as an ndarray of 1's and 0's.
out : ([P,] M, N) array (same dtype as input)
If None, a new array is allocated.
mask : ndarray (integer or float), optional
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y, shift_z : int
Offset added to the footprint center point. Shift is bounded to the
footprint sizes (center must be inside the given footprint).
Returns
-------
out : ([P,] M, N) ndarray (same dtype as input image)
Output image.
Examples
--------
>>> from skimage.morphology import footprint_rectangle # Need to add 3D example
>>> from skimage.filters.rank import threshold
>>> img = 255 * np.array([[0, 0, 0, 0, 0],
... [0, 1, 1, 1, 0],
... [0, 1, 1, 1, 0],
... [0, 1, 1, 1, 0],
... [0, 0, 0, 0, 0]], dtype=np.uint8)
>>> threshold(img, footprint_rectangle((3, 3)))
array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 0, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]], dtype=uint8)
"""
np_image = np.asanyarray(image)
if np_image.ndim == 2:
return _apply_scalar_per_pixel(
generic_cy._threshold,
image,
footprint,
out=out,
mask=mask,
shift_x=shift_x,
shift_y=shift_y,
)
elif np_image.ndim == 3:
return _apply_scalar_per_pixel_3D(
generic_cy._threshold_3D,
image,
footprint,
out=out,
mask=mask,
shift_x=shift_x,
shift_y=shift_y,
shift_z=shift_z,
)
raise ValueError(f'`image` must have 2 or 3 dimensions, got {np_image.ndim}.')
def noise_filter(
image, footprint, out=None, mask=None, shift_x=0, shift_y=0, shift_z=0
):
"""Noise feature.
Parameters
----------
image : ([P,] M, N) ndarray (uint8, uint16)
Input image.
footprint : ndarray
The neighborhood expressed as an ndarray of 1's and 0's.
out : ([P,] M, N) array (same dtype as input)
If None, a new array is allocated.
mask : ndarray (integer or float), optional
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y, shift_z : int
Offset added to the footprint center point. Shift is bounded to the
footprint sizes (center must be inside the given footprint).
References
----------
.. [1] N. Hashimoto et al. Referenceless image quality evaluation
for whole slide imaging. J Pathol Inform 2012;3:9.
Returns
-------
out : ([P,] M, N) ndarray (same dtype as input image)
Output image.
Examples
--------
>>> from skimage import data
>>> from skimage.morphology import disk, ball
>>> from skimage.filters.rank import noise_filter
>>> import numpy as np
>>> img = data.camera()
>>> rng = np.random.default_rng()
>>> volume = rng.integers(0, 255, size=(10,10,10), dtype=np.uint8)
>>> out = noise_filter(img, disk(5))
>>> out_vol = noise_filter(volume, ball(5))
"""
np_image = np.asanyarray(image)
if _footprint_is_sequence(footprint):
raise ValueError(
"footprint sequences are not currently supported by rank filters"
)
if np_image.ndim == 2:
# ensure that the central pixel in the footprint is empty
centre_r = int(footprint.shape[0] / 2) + shift_y
centre_c = int(footprint.shape[1] / 2) + shift_x
# make a local copy
footprint_cpy = footprint.copy()
footprint_cpy[centre_r, centre_c] = 0
return _apply_scalar_per_pixel(
generic_cy._noise_filter,
image,
footprint_cpy,
out=out,
mask=mask,
shift_x=shift_x,
shift_y=shift_y,
)
elif np_image.ndim == 3:
# ensure that the central pixel in the footprint is empty
centre_r = int(footprint.shape[0] / 2) + shift_y
centre_c = int(footprint.shape[1] / 2) + shift_x
centre_z = int(footprint.shape[2] / 2) + shift_z
# make a local copy
footprint_cpy = footprint.copy()
footprint_cpy[centre_r, centre_c, centre_z] = 0
return _apply_scalar_per_pixel_3D(
generic_cy._noise_filter_3D,
image,
footprint_cpy,
out=out,
mask=mask,
shift_x=shift_x,
shift_y=shift_y,
shift_z=shift_z,
)
raise ValueError(f'`image` must have 2 or 3 dimensions, got {np_image.ndim}.')
def entropy(image, footprint, out=None, mask=None, shift_x=0, shift_y=0, shift_z=0):
"""Local entropy.
The entropy is computed using base 2 logarithm i.e. the filter returns the
minimum number of bits needed to encode the local gray level
distribution.
Parameters
----------
image : ([P,] M, N) ndarray (uint8, uint16)
Input image.
footprint : ndarray
The neighborhood expressed as an ndarray of 1's and 0's.
out : ([P,] M, N) array (same dtype as input)
If None, a new array is allocated.
mask : ndarray (integer or float), optional
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y, shift_z : int
Offset added to the footprint center point. Shift is bounded to the
footprint sizes (center must be inside the given footprint).
Returns
-------
out : ([P,] M, N) ndarray (float)
Output image.
References
----------
.. [1] `https://en.wikipedia.org/wiki/Entropy_(information_theory) <https://en.wikipedia.org/wiki/Entropy_(information_theory)>`_
Examples
--------
>>> from skimage import data
>>> from skimage.filters.rank import entropy
>>> from skimage.morphology import disk, ball
>>> import numpy as np
>>> img = data.camera()
>>> rng = np.random.default_rng()
>>> volume = rng.integers(0, 255, size=(10,10,10), dtype=np.uint8)
>>> ent = entropy(img, disk(5))
>>> ent_vol = entropy(volume, ball(5))
"""
np_image = np.asanyarray(image)
if np_image.ndim == 2:
return _apply_scalar_per_pixel(
generic_cy._entropy,
image,
footprint,
out=out,
mask=mask,
shift_x=shift_x,
shift_y=shift_y,
out_dtype=np.float64,
)
elif np_image.ndim == 3:
return _apply_scalar_per_pixel_3D(
generic_cy._entropy_3D,
image,
footprint,
out=out,
mask=mask,
shift_x=shift_x,
shift_y=shift_y,
shift_z=shift_z,
out_dtype=np.float64,
)
raise ValueError(f'`image` must have 2 or 3 dimensions, got {np_image.ndim}.')
def otsu(image, footprint, out=None, mask=None, shift_x=0, shift_y=0, shift_z=0):
"""Local Otsu's threshold value for each pixel.
Parameters
----------
image : ([P,] M, N) ndarray (uint8, uint16)
Input image.
footprint : ndarray
The neighborhood expressed as an ndarray of 1's and 0's.
out : ([P,] M, N) array (same dtype as input)
If None, a new array is allocated.
mask : ndarray (integer or float), optional
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y, shift_z : int
Offset added to the footprint center point. Shift is bounded to the
footprint sizes (center must be inside the given footprint).
Returns
-------
out : ([P,] M, N) ndarray (same dtype as input image)
Output image.
References
----------
.. [1] https://en.wikipedia.org/wiki/Otsu's_method
Examples
--------
>>> from skimage import data
>>> from skimage.filters.rank import otsu
>>> from skimage.morphology import disk, ball
>>> import numpy as np
>>> img = data.camera()
>>> rng = np.random.default_rng()
>>> volume = rng.integers(0, 255, size=(10,10,10), dtype=np.uint8)
>>> local_otsu = otsu(img, disk(5))
>>> thresh_image = img >= local_otsu
>>> local_otsu_vol = otsu(volume, ball(5))
>>> thresh_image_vol = volume >= local_otsu_vol
"""
np_image = np.asanyarray(image)
if np_image.ndim == 2:
return _apply_scalar_per_pixel(
generic_cy._otsu,
image,
footprint,
out=out,
mask=mask,
shift_x=shift_x,
shift_y=shift_y,
)
elif np_image.ndim == 3:
return _apply_scalar_per_pixel_3D(
generic_cy._otsu_3D,
image,
footprint,
out=out,
mask=mask,
shift_x=shift_x,
shift_y=shift_y,
shift_z=shift_z,
)
raise ValueError(f'`image` must have 2 or 3 dimensions, got {np_image.ndim}.')
def windowed_histogram(
image, footprint, out=None, mask=None, shift_x=0, shift_y=0, n_bins=None
):
"""Normalized sliding window histogram
Parameters
----------
image : 2-D array (integer or float)
Input image.
footprint : 2-D array (integer or float)
The neighborhood expressed as a 2-D array of 1's and 0's.
out : 2-D array (integer or float), optional
If None, a new array is allocated.
mask : ndarray (integer or float), optional
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int, optional
Offset added to the footprint center point. Shift is bounded to the
footprint sizes (center must be inside the given footprint).
n_bins : int or None
The number of histogram bins. Will default to ``image.max() + 1``
if None is passed.
Returns
-------
out : 3-D array (float)
Array of dimensions (H,W,N), where (H,W) are the dimensions of the
input image and N is n_bins or ``image.max() + 1`` if no value is
provided as a parameter. Effectively, each pixel is a N-D feature
vector that is the histogram. The sum of the elements in the feature
vector will be 1, unless no pixels in the window were covered by both
footprint and mask, in which case all elements will be 0.
Examples
--------
>>> from skimage import data
>>> from skimage.filters.rank import windowed_histogram
>>> from skimage.morphology import disk, ball
>>> import numpy as np
>>> img = data.camera()
>>> rng = np.random.default_rng()
>>> volume = rng.integers(0, 255, size=(10,10,10), dtype=np.uint8)
>>> hist_img = windowed_histogram(img, disk(5))
"""
if n_bins is None:
n_bins = int(image.max()) + 1
return _apply_vector_per_pixel(
generic_cy._windowed_hist,
image,
footprint,
out=out,
mask=mask,
shift_x=shift_x,
shift_y=shift_y,
out_dtype=np.float64,
pixel_size=n_bins,
)
def majority(
image,
footprint,
*,
out=None,
mask=None,
shift_x=0,
shift_y=0,
shift_z=0,
):
"""Assign to each pixel the most common value within its neighborhood.
Parameters
----------
image : ndarray
Image array (uint8, uint16 array).
footprint : 2-D array (integer or float)
The neighborhood expressed as a 2-D array of 1's and 0's.
out : ndarray (integer or float), optional
If None, a new array will be allocated.
mask : ndarray (integer or float), optional
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int, optional
Offset added to the footprint center point. Shift is bounded to the
footprint sizes (center must be inside the given footprint).
Returns
-------
out : 2-D array (same dtype as input image)
Output image.
Examples
--------
>>> from skimage import data
>>> from skimage.filters.rank import majority
>>> from skimage.morphology import disk, ball
>>> import numpy as np
>>> img = data.camera()
>>> rng = np.random.default_rng()
>>> volume = rng.integers(0, 255, size=(10,10,10), dtype=np.uint8)
>>> maj_img = majority(img, disk(5))
>>> maj_img_vol = majority(volume, ball(5))
"""
np_image = np.asanyarray(image)
if np_image.ndim == 2:
return _apply_scalar_per_pixel(
generic_cy._majority,
image,
footprint,
out=out,
mask=mask,
shift_x=shift_x,
shift_y=shift_y,
)
elif np_image.ndim == 3:
return _apply_scalar_per_pixel_3D(
generic_cy._majority_3D,
image,
footprint,
out=out,
mask=mask,
shift_x=shift_x,
shift_y=shift_y,
shift_z=shift_z,
)
raise ValueError(f'`image` must have 2 or 3 dimensions, got {np_image.ndim}.')
|
scikit-imageREPO_NAMEscikit-imagePATH_START.@scikit-image_extracted@scikit-image-main@skimage@filters@rank@generic.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/histogram/insidetextfont/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._weight import WeightValidator
from ._variant import VariantValidator
from ._textcase import TextcaseValidator
from ._style import StyleValidator
from ._size import SizeValidator
from ._shadow import ShadowValidator
from ._lineposition import LinepositionValidator
from ._family import FamilyValidator
from ._color import ColorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._weight.WeightValidator",
"._variant.VariantValidator",
"._textcase.TextcaseValidator",
"._style.StyleValidator",
"._size.SizeValidator",
"._shadow.ShadowValidator",
"._lineposition.LinepositionValidator",
"._family.FamilyValidator",
"._color.ColorValidator",
],
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@histogram@insidetextfont@__init__.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "carronj/LensIt",
"repo_path": "LensIt_extracted/LensIt-master/lensit/sims/__init__.py",
"type": "Python"
}
|
carronjREPO_NAMELensItPATH_START.@LensIt_extracted@LensIt-master@lensit@sims@__init__.py@.PATH_END.py
|
|
{
"filename": "util_test.py",
"repo_name": "aymgal/COOLEST",
"repo_path": "COOLEST_extracted/COOLEST-main/test/api/util_test.py",
"type": "Python"
}
|
__author__ = 'aymgal'
import pytest
import numpy as np
import numpy.testing as npt
from coolest.api import util
@pytest.mark.parametrize("field_of_view_x", [[0., 2.], [-1., 1.], [-2., 0.]])
@pytest.mark.parametrize("field_of_view_y", [[0., 2.], [-1., 1.], [-2., 0.]])
@pytest.mark.parametrize("num_pix_x", [10, 11])
@pytest.mark.parametrize("num_pix_y", [10, 11])
def test_get_coordinates_from_regular_grid(field_of_view_x, field_of_view_y, num_pix_x, num_pix_y):
# tests that the coordinates object has the same center and FoV as the original settings
coordinates = util.get_coordinates_from_regular_grid(field_of_view_x, field_of_view_y, num_pix_x, num_pix_y)
assert coordinates.num_points == num_pix_x*num_pix_y
plt_extent = coordinates.plt_extent
retrieved_field_of_view_x = [plt_extent[0], plt_extent[1]]
npt.assert_allclose(retrieved_field_of_view_x, field_of_view_x, atol=1e-8)
retrieved_field_of_view_y = [plt_extent[2], plt_extent[3]]
npt.assert_allclose(retrieved_field_of_view_y, field_of_view_y, atol=1e-8)
npt.assert_allclose(coordinates.center, (np.mean(field_of_view_x), np.mean(field_of_view_y)), atol=1e-8)
|
aymgalREPO_NAMECOOLESTPATH_START.@COOLEST_extracted@COOLEST-main@test@api@util_test.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "tomasstolker/species",
"repo_path": "species_extracted/species-main/species/__init__.py",
"type": "Python"
}
|
from species.core.species_init import SpeciesInit
__author__ = "Tomas Stolker"
__license__ = "MIT"
__version__ = "0.9.0"
__maintainer__ = "Tomas Stolker"
__email__ = "stolker@strw.leidenuniv.nl"
__status__ = "Development"
|
tomasstolkerREPO_NAMEspeciesPATH_START.@species_extracted@species-main@species@__init__.py@.PATH_END.py
|
{
"filename": "_valueminus.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scatter3d/error_y/_valueminus.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ValueminusValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="valueminus", parent_name="scatter3d.error_y", **kwargs
):
super(ValueminusValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scatter3d@error_y@_valueminus.py@.PATH_END.py
|
{
"filename": "test_antenna_uvw_decomposition.py",
"repo_name": "ratt-ru/montblanc",
"repo_path": "montblanc_extracted/montblanc-master/montblanc/tests/test_antenna_uvw_decomposition.py",
"type": "Python"
}
|
import unittest
from pprint import pformat
from six.moves import range
import numpy as np
from montblanc.util import antenna_uvw
class TestAntennaUvWDecomposition(unittest.TestCase):
def test_uvw_antenna(self):
na = 17
ntime = 1
# For both auto correlations and without them
for auto_cor in (0, 1):
# Compute default antenna pairs
ant1, ant2 = np.triu_indices(na, auto_cor)
# Create random per-antenna UVW coordinates.
# zeroing the first antenna
ant_uvw = np.random.random(size=(ntime, na, 3)).astype(np.float64)
ant_uvw[0, 0, :] = 0
time_chunks = np.array([ant1.size], dtype=ant1.dtype)
# Compute per-baseline UVW coordinates.
bl_uvw = (ant_uvw[:, ant1, :] - ant_uvw[:, ant2, :]).reshape(-1, 3)
# Now recover the per-antenna and per-baseline UVW coordinates.
rant_uvw = antenna_uvw(bl_uvw, ant1, ant2, time_chunks,
nr_of_antenna=na, check_decomposition=True)
def test_uvw_disjoint(self):
# Three initially disjoint baselines here, but the last baseline [2, 9]
# connects the first and the last
# Set 1: 0, 1, 2, 3
# Set 2: 4, 5, 6, 7, 8
# Set 3: 8, 10, 11, 12
# Connection between Set 1 and Set 3 is the last baseline [2, 9]
ant1 = np.array([1, 2, 3, 4, 5, 5, 7, 9, 10, 11, 2])
ant2 = np.array([2, 2, 0, 5, 5, 6, 8, 10, 11, 12, 9])
na = np.unique(np.concatenate([ant1, ant2])).size
ntime = 1
# Create random per-antenna UVW coordinates.
# zeroing the first antenna
ant_uvw = np.random.random(size=(ntime, na, 3)).astype(np.float64)
ant_uvw[0, 0, :] = 0
time_chunks = np.array([ant1.size], dtype=ant1.dtype)
# Compute per-baseline UVW coordinates.
bl_uvw = (ant_uvw[:, ant1, :] - ant_uvw[:, ant2, :]).reshape(-1, 3)
# Now recover the per-antenna and per-baseline UVW coordinates.
rant_uvw = antenna_uvw(bl_uvw, ant1, ant2, time_chunks,
nr_of_antenna=na, check_decomposition=True)
def test_uvw_antenna_missing_bl_impl(self):
na = 17
removed_ants_per_time = ([0, 1, 7], [2, 10, 15, 9], [3, 6, 9, 12])
# For both auto correlations and without them
for auto_cor in (0, 1):
def _create_ant_arrays():
for remove_ants in removed_ants_per_time:
# Compute default antenna pairs
ant1, ant2 = np.triu_indices(na, auto_cor)
# Shuffle the antenna indices
idx = np.arange(ant1.size)
np.random.shuffle(idx)
ant1 = ant1[idx]
ant2 = ant2[idx]
# Remove any baselines containing flagged antenna
reduce_tuple = tuple(a != ra for a in (ant1, ant2)
for ra in remove_ants)
keep = np.logical_and.reduce(reduce_tuple)
ant1 = ant1[keep]
ant2 = ant2[keep]
valid_ants = list(set(range(na)).difference(remove_ants))
yield valid_ants, remove_ants, ant1, ant2
tup = zip(*list(_create_ant_arrays()))
valid_ants, remove_ants, ant1, ant2 = tup
bl_uvw = []
# Create per-baseline UVW coordinates for each time chunk
it = enumerate(zip(valid_ants, remove_ants, ant1, ant2))
for t, (va, ra, a1, a2) in it:
# Create random per-antenna UVW coordinates.
# zeroing the first valid antenna
ant_uvw = np.random.random(size=(na, 3)).astype(np.float64)
ant_uvw[va[0], :] = 0
# Create per-baseline UVW coordinates for this time chunk
bl_uvw.append(ant_uvw[a1, :] - ant_uvw[a2, :])
# Produced concatenated antenna and baseline uvw arrays
time_chunks = np.array([a.size for a in ant1], dtype=ant1[0].dtype)
cant1 = np.concatenate(ant1)
cant2 = np.concatenate(ant2)
cbl_uvw = np.concatenate(bl_uvw)
# Now recover the per-antenna and per-baseline UVW coordinates
# for the ntime chunks
rant_uvw = antenna_uvw(cbl_uvw, cant1, cant2, time_chunks,
nr_of_antenna=na, check_decomposition=True)
if __name__ == "__main__":
unittest.main()
|
ratt-ruREPO_NAMEmontblancPATH_START.@montblanc_extracted@montblanc-master@montblanc@tests@test_antenna_uvw_decomposition.py@.PATH_END.py
|
{
"filename": "widget_bool.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/ipywidgets/py3/ipywidgets/widgets/widget_bool.py",
"type": "Python"
}
|
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
"""Bool class.
Represents a boolean using a widget.
"""
from .widget_description import DescriptionStyle, DescriptionWidget
from .widget_core import CoreWidget
from .valuewidget import ValueWidget
from .widget import register, widget_serialization
from .trait_types import Color, InstanceDict
from traitlets import Unicode, Bool, CaselessStrEnum
@register
class CheckboxStyle(DescriptionStyle, CoreWidget):
"""Checkbox widget style."""
_model_name = Unicode('CheckboxStyleModel').tag(sync=True)
background = Unicode(None, allow_none=True, help="Background specifications.").tag(sync=True)
@register
class ToggleButtonStyle(DescriptionStyle, CoreWidget):
"""ToggleButton widget style."""
_model_name = Unicode('ToggleButtonStyleModel').tag(sync=True)
font_family = Unicode(None, allow_none=True, help="Toggle button text font family.").tag(sync=True)
font_size = Unicode(None, allow_none=True, help="Toggle button text font size.").tag(sync=True)
font_style = Unicode(None, allow_none=True, help="Toggle button text font style.").tag(sync=True)
font_variant = Unicode(None, allow_none=True, help="Toggle button text font variant.").tag(sync=True)
font_weight = Unicode(None, allow_none=True, help="Toggle button text font weight.").tag(sync=True)
text_color = Color(None, allow_none=True, help="Toggle button text color").tag(sync=True)
text_decoration = Unicode(None, allow_none=True, help="Toggle button text decoration.").tag(sync=True)
class _Bool(DescriptionWidget, ValueWidget, CoreWidget):
"""A base class for creating widgets that represent booleans."""
value = Bool(False, help="Bool value").tag(sync=True)
disabled = Bool(False, help="Enable or disable user changes.").tag(sync=True)
def __init__(self, value=None, **kwargs):
if value is not None:
kwargs['value'] = value
super().__init__(**kwargs)
_model_name = Unicode('BoolModel').tag(sync=True)
@register
class Checkbox(_Bool):
"""Displays a boolean `value` in the form of a checkbox.
Parameters
----------
value : {True,False}
value of the checkbox: True-checked, False-unchecked
description : str
description displayed next to the checkbox
indent : {True,False}
indent the control to align with other controls with a description. The style.description_width attribute controls this width for consistence with other controls.
"""
_view_name = Unicode('CheckboxView').tag(sync=True)
_model_name = Unicode('CheckboxModel').tag(sync=True)
indent = Bool(True, help="Indent the control to align with other controls with a description.").tag(sync=True)
style = InstanceDict(CheckboxStyle, help="Styling customizations").tag(sync=True, **widget_serialization)
@register
class ToggleButton(_Bool):
"""Displays a boolean `value` in the form of a toggle button.
Parameters
----------
value : {True,False}
value of the toggle button: True-pressed, False-unpressed
description : str
description displayed on the button
icon: str
font-awesome icon name
style: instance of DescriptionStyle
styling customizations
button_style: enum
button predefined styling
"""
_view_name = Unicode('ToggleButtonView').tag(sync=True)
_model_name = Unicode('ToggleButtonModel').tag(sync=True)
icon = Unicode('', help= "Font-awesome icon.").tag(sync=True)
button_style = CaselessStrEnum(
values=['primary', 'success', 'info', 'warning', 'danger', ''], default_value='',
help="""Use a predefined styling for the button.""").tag(sync=True)
style = InstanceDict(ToggleButtonStyle, help="Styling customizations").tag(sync=True, **widget_serialization)
@register
class Valid(_Bool):
"""Displays a boolean `value` in the form of a green check (True / valid)
or a red cross (False / invalid).
Parameters
----------
value: {True,False}
value of the Valid widget
"""
readout = Unicode('Invalid', help="Message displayed when the value is False").tag(sync=True)
_view_name = Unicode('ValidView').tag(sync=True)
_model_name = Unicode('ValidModel').tag(sync=True)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@ipywidgets@py3@ipywidgets@widgets@widget_bool.py@.PATH_END.py
|
{
"filename": "12497_rval_150015.py",
"repo_name": "shreeyesh-biswal/Rvalue_3D",
"repo_path": "Rvalue_3D_extracted/Rvalue_3D-main/Codes/M-class/AR_12497/12497_rval_150015.py",
"type": "Python"
}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 2 09:09:21 2023
@author: shreeyeshbiswal
"""
import os
import numpy as np
import matplotlib as mpl
from matplotlib import pyplot as plt
from matplotlib.pyplot import figure
AR = "12497"
core_dir = "/home/shreeyeshbiswal/IDLWorkspace/Dataset_PF/"
base_dir = "/home/shreeyeshbiswal/IDLWorkspace/Dataset_PF/AR_" + AR
dir_list = sorted(os.listdir(base_dir))
n = len(dir_list)
m = 10 # values per file
d = '15'
th = '150'
rval_matrix = np.zeros(shape=(n,m))
index = np.arange(0,n)
height = np.arange(0,m)*0.36
P4 = 'Log of R-value (Mx); AR ' + AR
colorbarticks = [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]
cbar_min = 15
cbar_max = 23
flare_time = 106.60 #M1
for i in range(0,n):
Time_tag = dir_list[i]
Time = Time_tag[0:19]
Hour = Time[11:13]
print(Time)
dir = "/home/shreeyeshbiswal/IDLWorkspace/Dataset_PF/AR_" + AR + "/" + Time_tag
os.chdir(dir)
# the if-else statement takes care of missing data
if len(os.listdir(dir)) != 0:
rval = np.loadtxt("PF_ext_rvals_150015_" + Time + ".dat")
rval = rval + 15.1172 # LOG FACTOR FOR 1.3141 x 10^15
print(rval)
print(np.shape(rval))
rval_matrix[i,:] = rval
print(Hour)
else:
rval_matrix[i,:] = np.nan
print("Empty directory")
os.chdir(core_dir)
x = np.arange(0,n)
figure(figsize=(10,10), dpi=100000)
figure, axs = plt.subplots(10)
figure.set_figheight(15)
figure.set_figwidth(9)
cm = plt.cm.get_cmap('afmhot')
mpl.rc('xtick', labelsize=13)
# Plot
sc = axs[0].scatter(x, rval_matrix[:,9], c = rval_matrix[:,9], vmin=cbar_min, vmax=cbar_max, s=10, cmap=cm)
for i in range(0,m):
axs[i].scatter(x, rval_matrix[:,9-i], c = rval_matrix[:,9-i], vmin=cbar_min, vmax=cbar_max, s=10, cmap=cm)
for i in range(0,m):
axs[i].set_ylim([cbar_min, cbar_max])
plt.setp(plt.gcf().get_axes(), xticks=[], yticks=[]);
axs[9].tick_params(axis='x', labelsize=16)
axs[9].set_xticks(np.arange(0,n,24))
# Hide the ylims of individual boxes
for i in range(0,m):
axs[i].set_yticks([])
# Show heights in the altitude
heightfont = 16
for i in range(0,m):
max_alt = (m-1)*0.36
altitude = max_alt-(i*0.36)
alt_str = "{:.2f}".format(altitude)
axs[i].set_ylabel(alt_str + ' ', fontsize = heightfont, rotation = 0)
# Show flare occurence in dotted lines
for i in range(0,m):
axs[i].axvline(x = flare_time, ymin = 0, ymax = 1, linestyle = '--', color = 'k', alpha=0.40)# Show heights in the altitude
# Orient the text
st = dir_list[0]
start_time = st[0:4] + '/' + st[5:7] + '/' + st[8:10] + '/' + st[11:13] + ':' + st[14:16]
axs[0].text(12, (cbar_max + (0.35*(cbar_max - cbar_min))), P4, fontsize=23)
axs[5].text(-42, cbar_min + 0.5*(cbar_max - cbar_min), 'Height (Mm)', rotation = 90, fontsize=18)
axs[9].text(-18, (cbar_min - (0.65*(cbar_max - cbar_min))), 'Time after ' + start_time + ' (hrs)' + '; ($B_{th}$, $D_{sep}$) = ' + '(' + th + ',' + d + ')', rotation = 0, fontsize=18)
figure.subplots_adjust(right=0.80)
cbar_ax = figure.add_axes([0.85, 0.15, 0.05, 0.7])
cbar_ax.tick_params(labelsize=16)
figure.colorbar(sc, cax=cbar_ax, ticks=range(cbar_min,cbar_max+1,1))
plt.subplots_adjust(wspace=0.5, hspace=0)
plt.show()
mpl.rcParams.update(mpl.rcParamsDefault)
|
shreeyesh-biswalREPO_NAMERvalue_3DPATH_START.@Rvalue_3D_extracted@Rvalue_3D-main@Codes@M-class@AR_12497@12497_rval_150015.py@.PATH_END.py
|
{
"filename": "lambertw.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/scipy/special/lambertw.py",
"type": "Python"
}
|
from __future__ import division, print_function, absolute_import
from ._ufuncs import _lambertw
def lambertw(z, k=0, tol=1e-8):
r"""
lambertw(z, k=0, tol=1e-8)
Lambert W function.
The Lambert W function `W(z)` is defined as the inverse function
of ``w * exp(w)``. In other words, the value of ``W(z)`` is
such that ``z = W(z) * exp(W(z))`` for any complex number
``z``.
The Lambert W function is a multivalued function with infinitely
many branches. Each branch gives a separate solution of the
equation ``z = w exp(w)``. Here, the branches are indexed by the
integer `k`.
Parameters
----------
z : array_like
Input argument.
k : int, optional
Branch index.
tol : float, optional
Evaluation tolerance.
Returns
-------
w : array
`w` will have the same shape as `z`.
Notes
-----
All branches are supported by `lambertw`:
* ``lambertw(z)`` gives the principal solution (branch 0)
* ``lambertw(z, k)`` gives the solution on branch `k`
The Lambert W function has two partially real branches: the
principal branch (`k = 0`) is real for real ``z > -1/e``, and the
``k = -1`` branch is real for ``-1/e < z < 0``. All branches except
``k = 0`` have a logarithmic singularity at ``z = 0``.
**Possible issues**
The evaluation can become inaccurate very close to the branch point
at ``-1/e``. In some corner cases, `lambertw` might currently
fail to converge, or can end up on the wrong branch.
**Algorithm**
Halley's iteration is used to invert ``w * exp(w)``, using a first-order
asymptotic approximation (O(log(w)) or `O(w)`) as the initial estimate.
The definition, implementation and choice of branches is based on [2]_.
See Also
--------
wrightomega : the Wright Omega function
References
----------
.. [1] http://en.wikipedia.org/wiki/Lambert_W_function
.. [2] Corless et al, "On the Lambert W function", Adv. Comp. Math. 5
(1996) 329-359.
http://www.apmaths.uwo.ca/~djeffrey/Offprints/W-adv-cm.pdf
Examples
--------
The Lambert W function is the inverse of ``w exp(w)``:
>>> from scipy.special import lambertw
>>> w = lambertw(1)
>>> w
(0.56714329040978384+0j)
>>> w * np.exp(w)
(1.0+0j)
Any branch gives a valid inverse:
>>> w = lambertw(1, k=3)
>>> w
(-2.8535817554090377+17.113535539412148j)
>>> w*np.exp(w)
(1.0000000000000002+1.609823385706477e-15j)
**Applications to equation-solving**
The Lambert W function may be used to solve various kinds of
equations, such as finding the value of the infinite power
tower :math:`z^{z^{z^{\ldots}}}`:
>>> def tower(z, n):
... if n == 0:
... return z
... return z ** tower(z, n-1)
...
>>> tower(0.5, 100)
0.641185744504986
>>> -lambertw(-np.log(0.5)) / np.log(0.5)
(0.64118574450498589+0j)
"""
return _lambertw(z, k, tol)
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@scipy@special@lambertw.py@.PATH_END.py
|
{
"filename": "PreProcess.py",
"repo_name": "sPaMFouR/RedPipe",
"repo_path": "RedPipe_extracted/RedPipe-master/preprocess/PreProcess.py",
"type": "Python"
}
|
#!/usr/bin/env python
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx #
# xxxxxxxxxxxxxxxxxxxxxxx------------------------PERFORMS PRE-PROCESSING----------------------xxxxxxxxxxxxxxxxxxxxxxx #
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx #
# ------------------------------------------------------------------------------------------------------------------- #
# Import Required Libraries
# ------------------------------------------------------------------------------------------------------------------- #
import os
import re
import glob
import numpy as np
import pandas as pd
import easygui as eg
import ccdproc as ccdp
from astropy import units as u
from astropy.stats import mad_std
from astropy.nddata import CCDData
from photutils import detect_sources
from astropy.nddata.blocks import block_replicate
# ------------------------------------------------------------------------------------------------------------------- #
# ------------------------------------------------------------------------------------------------------------------- #
# Files to Be Read
# ------------------------------------------------------------------------------------------------------------------- #
file_telescopes = 'TelescopeList.dat'
# ------------------------------------------------------------------------------------------------------------------- #
# ------------------------------------------------------------------------------------------------------------------- #
# GLOBAL VARIABLES
# ------------------------------------------------------------------------------------------------------------------- #s
READ_NOISE = 4.87 # Units in electrons / ADU
GAIN = 1.22 * u.electron / u.adu
SATURATION = 800000 # Units in electrons / ADU
# ------------------------------------------------------------------------------------------------------------------- #
# ------------------------------------------------------------------------------------------------------------------- #
# Functions For File Handling
# ------------------------------------------------------------------------------------------------------------------- #
def group_similar_files(text_list, common_text, exceptions=''):
"""
Groups similar files based on the string 'common_text'. Writes the similar files
onto the list 'text_list' (only if this string is not empty) and appends the similar
files to a list 'python_list'.
Args:
text_list : Name of the output text file with names grouped based on the 'common_text'
common_text : String containing partial name of the files to be grouped
exceptions : String containing the partial name of the files that need to be excluded
Returns:
list_files : Python list containing the names of the grouped files
"""
list_files = glob.glob(common_text)
if exceptions != '':
list_exception = exceptions.split(',')
for file_name in glob.glob(common_text):
for text in list_exception:
test = re.search(text, file_name)
if test:
try:
list_files.remove(file_name)
except ValueError:
pass
list_files.sort()
if len(text_list) != 0:
with open(text_list, 'w') as f:
for file_name in list_files:
f.write(file_name + '\n')
return list_files
def display_text(text_to_display):
"""
Displays text mentioned in the string 'text_to_display'
Args:
text_to_display : Text to be displayed
Returns:
None
"""
print ("\n" + "# " + "-" * (12 + len(text_to_display)) + " #")
print ("# " + "-" * 5 + " " + str(text_to_display) + " " + "-" * 5 + " #")
print ("# " + "-" * (12 + len(text_to_display)) + " #" + "\n")
# ------------------------------------------------------------------------------------------------------------------- #
# ------------------------------------------------------------------------------------------------------------------- #
# Functions for Performing Pre-Processing
# ------------------------------------------------------------------------------------------------------------------- #
def inv_median(a):
"""
Used as a scaling function in flat combine task.
"""
return 1 / np.median(a)
def bias_combine(textlist_bias, master_bias='mbias.fits'):
"""
Combines bias files to make a master bias.
Args:
textlist_bias : A python list object with paths/names to the individual files.
master_bias : Output master bias file (default 'mbias.fits')
Returns:
None
"""
new_list = [CCDData.read(filename, unit=u.adu) for filename in textlist_bias]
combined_bias = ccdp.combine(img_list=new_list, method='average',
sigma_clip=True,sigma_clip_low_thresh=5,
sigma_clip_high_thresh=5,
sigma_clip_func=np.ma.median,
sigma_clip_dev_func=mad_std)
combined_bias.meta['combined'] = True
combined_bias.data = combined_bias.data.astype('float32')
combined_bias.write(master_bias, hdu_mask=None, hdu_uncertainty=None)
def subtract_bias(textlist_files, master_bias='mbias.fits', prefix_str='bs_'):
"""
Subtract bias from the given files list
Args:
textlist_files : A python list object with paths/names to the individual files.
master_bias : Master bias used for subtraction (default 'mbias.fits')
prefix_str : String to be prefixed for subtracted files
Returns:
None
"""
master = CCDData.read(master_bias, unit=u.adu)
for filename in textlist_files:
ccd = CCDData.read(filename, unit=u.adu)
bias_subtracted = ccdp.subtract_bias(ccd=ccd, master=master)
bias_subtracted.meta['biassub'] = True
bias_subtracted.data = bias_subtracted.data.astype('float32')
bias_subtracted.write(prefix_str+filename, hdu_mask=None, hdu_uncertainty=None)
def flat_combine(textlist_files, band, outfile='mflat'):
"""
Combines multiple flats for a given input files list
Args:
textlist_files : A python list object with paths/names to the individual files.
band : Filter name associated with the flat files
outfile : Master flat name (default outfile + band + '.fits')
Returns:
None
"""
new_list = [CCDData.read(filename, unit=u.adu) for filename in textlist_files]
combined_flat = ccdp.combine(img_list=new_list, method='average', scale=inv_median,
sigma_clip=True, sigma_clip_low_thresh=5,
sigma_clip_high_thresh=5, sigma_clip_func=np.ma.median,
sigma_clip_dev_func=mad_std)
combined_flat.meta['combined'] = True
file_name = outfile + band + '.fits'
combined_flat.data = combined_flat.data.astype('float32')
combined_flat.write(file_name, hdu_mask=None, hdu_uncertainty=None)
def flat_correction(textlist_files, master_flat, prefix_str='f'):
"""
To flat field the science images using master flat
Args:
textlist_files : A python list object with paths/names to the individual files.
master_flat : Master flat used to flat field the science image
prefix_str : String to be added to newly created sceince image
Returns:
None
"""
master = CCDData.read(master_flat, unit=u.adu)
for filename in textlist_files:
ccd = CCDData.read(filename, unit=u.adu)
flat_corrected = ccdp.flat_correct(ccd=ccd, flat=master, min_value=0.9)
flat_corrected.meta['flatcorr'] = True
flat_corrected.data = flat_corrected.data.astype('float32')
flat_corrected.write(prefix_str + filename, hdu_mask=None, hdu_uncertainty=None)
def cosmic_ray_corr(textlist_files, prefix_str='c'):
"""
Gain correction and Cosmic ray correction using LA Cosmic method.
Args:
textlist_files : A python list object with paths/names to the individual files.
prefix_str : String appended to the name of newly created file
Returns:
None
"""
for filename in textlist_files:
file_corr = CCDData.read(filename, unit=u.adu)
file_corr = ccdp.gain_correct(file_corr, gain=GAIN)
new_ccd = ccdp.cosmicray_lacosmic(file_corr, readnoise=READ_NOISE, sigclip=7,
satlevel=SATURATION, niter=4,
gain_apply=False, verbose=True)
new_ccd.meta['crcorr'] = True
new_ccd.data = new_ccd.data.astype('float32')
new_ccd.write(prefix_str + filename, hdu_mask=None, hdu_uncertainty=None)
# ------------------------------------------------------------------------------------------------------------------- #
# ------------------------------------------------------------------------------------------------------------------- #
# Main Function
# ------------------------------------------------------------------------------------------------------------------- #
def main():
"""
Step 1: GUI Code for User Input
Step 2: Group FITS Files whose header are to be Updated + Read Input File
Step 3: Extract the details from 'file_telescopes' in a Pandas DataFrame
Step 4: Updates the Header with changes in 'HeaderInfo.dat' & Appends AIRMASS etc. Details to the Header
"""
# GUI Code for User Input
DIR_FILES = eg.enterbox('Enter the directory in which preprocessing has to be performed:',
title='Enter the Directory Path', default=[os.path.join(os.getcwd(), 'preprocessed')])
common_text = eg.enterbox('Enter the common text of files for which Details are to be appended:',
title='Enter the Common Text', default=['*.fits'])
telescopename = eg.enterbox('Enter the Name of the Telescope from which the data was observed:',
title='Enter the Name of the Telescope', default=['HCT'])
instrument = eg.enterbox('Enter the Instrument from which the data was observed:',
title='Enter the Short Name of the Instrument', default=['HFOSC2'])
input_file = eg.enterbox('Enter the name of the output file containing the header info:',
title='Enter the Name of the Output File', default=['HeaderInfo.dat'])
# Group FITS Files whose header are to be Updated + Read Input File
# Extract the details from 'file_telescopes' in a Pandas DataFrame
telescope_df = pd.read_csv(file_telescopes, sep='\s+', comment='#').set_index('ShortName')
list_files = group_similar_files('', common_text=os.path.join(DIR_FILES, common_text))
# ------------------------------------------------------------------------------------------------------------------- #
# ------------------------------------------------------------------------------------------------------------------- #
# Execute the Standalone Code
# ------------------------------------------------------------------------------------------------------------------- #
if __name__ == '__main__':
main()
# ------------------------------------------------------------------------------------------------------------------- #
|
sPaMFouRREPO_NAMERedPipePATH_START.@RedPipe_extracted@RedPipe-master@preprocess@PreProcess.py@.PATH_END.py
|
{
"filename": "file_handling.py",
"repo_name": "markusbonse/applefy",
"repo_path": "applefy_extracted/applefy-main/applefy/utils/file_handling.py",
"type": "Python"
}
|
"""
Simple helper functions to handle files.
"""
import os
import json
import warnings
from typing import List, Dict, Tuple, Union
from copy import deepcopy
from pathlib import Path
import h5py
from astropy.io import fits
import numpy as np
def read_apples_with_apples_root() -> str:
"""
A simple function which reads in the APPLES_ROOT_DIR specified by the user.
This function is needed to reproduce the results of the Apples with Apples
paper. Raises an error if the directory does not exist.
Returns:
The path to the root directory
"""
root_dir = os.getenv('APPLES_ROOT_DIR')
if not os.path.isdir(root_dir):
raise IOError("The path in APPLES_ROOT_DIR does not exist. Make sure "
"to download the data an specify its location.")
print("Data in the APPLES_ROOT_DIR found. Location: " + str(root_dir))
return root_dir
def create_checkpoint_folders(
checkpoint_dir: Union[str, Path]
) -> Union[None, Tuple[Path, Path, Path]]:
"""
This function create the classical checkpoint folder structure as used by
:meth:`~applefy.detections.contrast.Contrast`. It creates three
sub-folders. configs_cgrid, residuals, scratch. Returns None if
checkpoint_dir is None.
Args:
checkpoint_dir: The root directory in which the sub-folders are created.
Returns:
1. Path to the configs_cgrid folder.
2. Path to the residuals folder.
3. Path to the scratch folder.
"""
# if no experiment_root_dir is given we don't save results
if checkpoint_dir is None:
return None
# use pathlib for easy path handling
checkpoint_dir = Path(checkpoint_dir)
# check if the experiment_root_dir exists
if not checkpoint_dir.is_dir():
raise IOError("The directory " + str(checkpoint_dir) +
" does not exist. Please create it.")
# create sub-folders if they do not exist
config_dir = checkpoint_dir / "configs_cgrid"
residual_dir = checkpoint_dir / "residuals"
scratch_dir = checkpoint_dir / "scratch"
config_dir.mkdir(parents=False, exist_ok=True)
residual_dir.mkdir(parents=False, exist_ok=True)
scratch_dir.mkdir(parents=False, exist_ok=True)
return config_dir, residual_dir, scratch_dir
def search_for_config_and_residual_files(
config_dir: Union[str, Path],
method_dir: Path
) -> List[Tuple[str, str]]:
"""
Searches for tuples of existing contrast grid config files and
corresponding residuals. Raises an Error if the .json files and residual
files in config_dir and method_dir do not match.
Args:
config_dir: Directory where the contrast grid config files are stored.
method_dir: Directory where the residuals are stored.
Returns:
A list with paired paths (config path, residual path)
"""
collected_result_file = []
# find all config files
config_files = dict(collect_all_contrast_grid_configs(config_dir))
for tmp_file in method_dir.iterdir():
if not tmp_file.name.startswith("residual_"):
continue
tmp_idx = tmp_file.name.split("_ID_")[1].split(".")[0]
tmp_config_path = str(config_files[tmp_idx])
tmp_residual_path = str(tmp_file)
del config_files[tmp_idx]
collected_result_file.append((tmp_config_path, tmp_residual_path))
if len(config_files) != 0:
raise FileNotFoundError(
"Some residuals are missing. Check if all config files "
"have a matching residual.")
return collected_result_file
def collect_all_contrast_grid_configs(
contrast_grid_configs: str
) -> List[Tuple[str, str]]:
"""
Simple function which looks for all auto generated contrast grid config
files in one directory.
Args:
contrast_grid_configs: The directory which contains the config files.
Returns:
A list of tuples (job_id, file path)
"""
# 1.) Collect all jobs to be run
all_datasets_configs = []
for tmp_file in sorted(os.listdir(contrast_grid_configs)):
if not tmp_file.startswith("exp_"):
continue
tmp_id = tmp_file.split(".")[0].split("_")[-1]
all_datasets_configs.append(
(tmp_id, os.path.join(contrast_grid_configs, tmp_file)))
return all_datasets_configs
def save_experiment_configs(
experimental_setups: Dict[str, Dict],
experiment_config_dir: Path,
overwrite: bool = False
) -> None:
"""
Saves all contrast grid config files in experimental_setups as .json files
into the experiment_config_dir. Overwrites existing files is requested.
Args:
experimental_setups: The contrast grid config files as created by
:meth:`~applefy.detections.preparation.generate_fake_planet_experiments`
experiment_config_dir: The directory where the .json files are saved.
overwrite: Whether to overwrite existing config files.
"""
# check if the config dir is empty
if any(i.suffix == ".json" for i in experiment_config_dir.iterdir()):
if not overwrite:
raise FileExistsError(
"The directory \"" + str(experiment_config_dir) + "\" already "
"contains config files. Delete them if you want to create a "
"new experiment setup or use overwrite=True to automatically "
" remove them.")
print("Overwriting existing config files.")
for tmp_file in experiment_config_dir.iterdir():
if tmp_file.suffix == ".json":
tmp_file.unlink()
for tmp_id, tmp_config in experimental_setups.items():
with open(os.path.join(
experiment_config_dir,
"exp_ID_" + str(tmp_id) + ".json"),
'w') as file:
json.dump(tmp_config, file, indent=4)
def read_fake_planet_results(
result_files: List[Tuple[str, str]]
) -> List[Tuple[dict, np.ndarray]]:
"""
Read all contrast grid config files and .fits residuals as listed in
result_files.
Args:
result_files: A list of tuples
(path to config file, path to residual)
Returns:
The load results as a list of
(config, residual)
"""
load_results = []
for tmp_input in result_files:
tmp_config_path, tmp_residual_path = tmp_input
# 1.) load the residual, ignore non-existing files
if not os.path.isfile(tmp_residual_path):
warnings.warn("File " + str(tmp_residual_path) + "not found")
continue
if not os.path.isfile(tmp_config_path):
warnings.warn("File " + str(tmp_config_path) + "not found")
continue
tmp_residual = np.squeeze(open_fits(tmp_residual_path))
# 1.1) load the config file
with open(tmp_config_path) as json_file:
tmp_setup_config = json.load(json_file)
load_results.append((tmp_setup_config, tmp_residual))
return load_results
def cut_around_center(
psf_template: np.array,
science_data: np.array,
cut_off_psf: int,
cut_off_data: int
) -> Tuple[np.ndarray, np.ndarray]:
"""
Function which cuts the psf_template and science_data around the center of
the image. Can be useful to reduce the dimensionality of the data in order
to save time during the calculation of contrast grids and contrast curves.
Args:
psf_template: 2D numpy array with the pst template.
science_data: 2D numpy array with the science data. Shape (time, x, y)
cut_off_psf: The number of pixel to be cut off on each side of the
pst template.
cut_off_data: The number of pixel to be cut off on each side of the
science data.
Returns:
1. The cut science data.
2. The cut pst template.
"""
# cut the data
science_data_cut = science_data[:,
cut_off_data:-cut_off_data,
cut_off_data:-cut_off_data]
psf_template_cut = psf_template[cut_off_psf:-cut_off_psf,
cut_off_psf:-cut_off_psf]
return science_data_cut, psf_template_cut
def load_adi_data(
hdf5_dataset: str,
data_tag: str,
psf_template_tag: str,
para_tag="PARANG"
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Loads ADI data stored as a hdf5 file. This function is needed to read
the data and reproduce the results of the Apples with Apples paper.
Args:
hdf5_dataset: The path to the hdf5 file.
data_tag: Tag of the science data in the hdf5 database.
psf_template_tag: Tag of the PSF template in the hdf5 database.
para_tag: Tag of the parallactic angles in the hdf5 database.
Returns:
1. The science data as a 3d numpy array.
2. The parallactic angles as a 1d numpy array
3. The PSF template as a 3d numpy array.
"""
hdf5_file = h5py.File(hdf5_dataset, 'r')
data = hdf5_file[data_tag][...]
angles = np.deg2rad(hdf5_file[para_tag][...])
psf_template_data = hdf5_file[psf_template_tag][...]
hdf5_file.close()
return data, angles, psf_template_data
def save_as_fits(
data: np.ndarray,
file_name: str
) -> None:
"""
Saves data as .fits file.
Args:
data: The data to be saved.
file_name: The filename of the fits file.
"""
hdu = fits.PrimaryHDU(data)
hdul = fits.HDUList([hdu])
hdul.writeto(file_name)
def open_fits(file_name):
"""
Opens a fits file as a numpy array.
Args:
file_name: Path to the fits file.
Returns:
Load data as numpy array.
"""
with fits.open(file_name) as hdul:
# pylint: disable=no-member
data = deepcopy(hdul[0].data[...])
del hdul[0].data
del hdul
return data
|
markusbonseREPO_NAMEapplefyPATH_START.@applefy_extracted@applefy-main@applefy@utils@file_handling.py@.PATH_END.py
|
{
"filename": "conversion.py",
"repo_name": "ratt-ru/QuartiCal",
"repo_path": "QuartiCal_extracted/QuartiCal-main/quartical/gains/conversion.py",
"type": "Python"
}
|
import dask.array as da
import numpy as np
from quartical.gains.parameterized_gain import ParameterizedGain
class Converter(object):
def __init__(self, gain_obj):
self.parameterized = issubclass(gain_obj.__class__, ParameterizedGain)
self.converted_dtype = gain_obj.converted_dtype
self.native_dtype = gain_obj.native_dtype
self.native_to_converted = gain_obj.native_to_converted
self.converted_to_native = gain_obj.converted_to_native
@property
def conversion_ratio(self):
input_fields = sum(cf[0] for cf in self.native_to_converted)
output_fields = len(self.native_to_converted)
return (input_fields, output_fields)
def convert(self, arr):
cr = self.conversion_ratio
dtype = self.converted_dtype
return da.blockwise(
self._convert, 'tfadc',
arr, 'tfadc',
dtype, None,
dtype=dtype,
adjust_chunks={'c': arr.shape[-1] // cr[0] * cr[1]}
)
def _convert(self, arr, dtype):
cr = self.conversion_ratio
out_shape = arr.shape[:-1] + (arr.shape[-1] // cr[0] * cr[1],)
out_arr = np.empty(out_shape, dtype=dtype)
inp_ind = 0
out_ind = 0
while inp_ind < arr.shape[-1]:
for (n_consumed, cfs) in self.native_to_converted:
tmp = arr[..., inp_ind]
for cf in cfs:
tmp = cf(tmp)
out_arr[..., out_ind] = tmp[...]
inp_ind += n_consumed
out_ind += 1
return out_arr
def revert(self, arr):
cr = self.conversion_ratio
dtype = self.native_dtype
return da.blockwise(
self._revert, 'tfadc',
arr, 'tfadc',
dtype, None,
dtype=dtype,
adjust_chunks={'c': arr.shape[-1] // cr[1] * cr[0]}
)
def _revert(self, arr, dtype):
cr = self.conversion_ratio
out_shape = arr.shape[:-1] + (arr.shape[-1] // cr[1] * cr[0],)
out_arr = np.empty(out_shape, dtype=dtype)
inp_ind = 0
out_ind = 0
while inp_ind < arr.shape[-1]:
for (n_consumed, rf) in self.converted_to_native:
inputs = [arr[..., inp_ind + k] for k in range(n_consumed)]
out_arr[..., out_ind] = rf(*inputs)
inp_ind += n_consumed
out_ind += 1
return out_arr
def no_op(passthrough):
return passthrough
def trig_to_angle(cos_arr, sin_arr):
return np.arctan2(sin_arr, cos_arr)
def amp_to_complex(amp_arr):
return amp_arr * np.exp(1j)
def amp_trig_to_complex(amp_arr, cos_arr, sin_arr):
return amp_arr * np.exp(1j * np.arctan2(sin_arr, cos_arr))
|
ratt-ruREPO_NAMEQuartiCalPATH_START.@QuartiCal_extracted@QuartiCal-main@quartical@gains@conversion.py@.PATH_END.py
|
{
"filename": "initialise.py",
"repo_name": "davidharvey1986/pyRRG",
"repo_path": "pyRRG_extracted/pyRRG-master/unittests/bugFixPyRRG/lib/python3.7/site-packages/pip/_vendor/colorama/initialise.py",
"type": "Python"
}
|
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
import atexit
import contextlib
import sys
from .ansitowin32 import AnsiToWin32
orig_stdout = None
orig_stderr = None
wrapped_stdout = None
wrapped_stderr = None
atexit_done = False
def reset_all():
if AnsiToWin32 is not None: # Issue #74: objects might become None at exit
AnsiToWin32(orig_stdout).reset_all()
def init(autoreset=False, convert=None, strip=None, wrap=True):
if not wrap and any([autoreset, convert, strip]):
raise ValueError('wrap=False conflicts with any other arg=True')
global wrapped_stdout, wrapped_stderr
global orig_stdout, orig_stderr
orig_stdout = sys.stdout
orig_stderr = sys.stderr
if sys.stdout is None:
wrapped_stdout = None
else:
sys.stdout = wrapped_stdout = \
wrap_stream(orig_stdout, convert, strip, autoreset, wrap)
if sys.stderr is None:
wrapped_stderr = None
else:
sys.stderr = wrapped_stderr = \
wrap_stream(orig_stderr, convert, strip, autoreset, wrap)
global atexit_done
if not atexit_done:
atexit.register(reset_all)
atexit_done = True
def deinit():
if orig_stdout is not None:
sys.stdout = orig_stdout
if orig_stderr is not None:
sys.stderr = orig_stderr
@contextlib.contextmanager
def colorama_text(*args, **kwargs):
init(*args, **kwargs)
try:
yield
finally:
deinit()
def reinit():
if wrapped_stdout is not None:
sys.stdout = wrapped_stdout
if wrapped_stderr is not None:
sys.stderr = wrapped_stderr
def wrap_stream(stream, convert, strip, autoreset, wrap):
if wrap:
wrapper = AnsiToWin32(stream,
convert=convert, strip=strip, autoreset=autoreset)
if wrapper.should_wrap():
stream = wrapper.stream
return stream
|
davidharvey1986REPO_NAMEpyRRGPATH_START.@pyRRG_extracted@pyRRG-master@unittests@bugFixPyRRG@lib@python3.7@site-packages@pip@_vendor@colorama@initialise.py@.PATH_END.py
|
{
"filename": "jla_lite.py",
"repo_name": "ggalloni/cobaya",
"repo_path": "cobaya_extracted/cobaya-master/cobaya/likelihoods/sn/jla_lite.py",
"type": "Python"
}
|
from cobaya.likelihoods.base_classes import SN
class JLA_lite(SN):
r"""
Likelihood (marginalized over nuisance parameters) of the JLA type Ia supernova sample
\cite{Betoule:2014frx}, based on observations obtained by the SDSS-II and SNLS
collaborations.
"""
pass
|
ggalloniREPO_NAMEcobayaPATH_START.@cobaya_extracted@cobaya-master@cobaya@likelihoods@sn@jla_lite.py@.PATH_END.py
|
{
"filename": "xcom.py",
"repo_name": "lucabaldini/ixpeobssim",
"repo_path": "ixpeobssim_extracted/ixpeobssim-main/ixpeobssim/irfgen/xcom.py",
"type": "Python"
}
|
#!/usr/bin/env python
#
# Copyright (C) 2017--2019, the ixpeobssim team.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from __future__ import print_function, division
"""Small convenience module for interfacing with the XCOM database.
https://physics.nist.gov/PhysRefData/Xcom/html/xcom1.html
"""
import numpy
import os
from ixpeobssim import IXPEOBSSIM_IRFGEN
from ixpeobssim.utils.logging_ import logger
from ixpeobssim.core.spline import xInterpolatedUnivariateSplineLinear
from ixpeobssim.utils.matplotlib_ import plt
IXPEOBSSIM_XCOM_DATA = os.path.join(IXPEOBSSIM_IRFGEN, 'data', 'xcom')
__CACHE = {}
def create_energy_grid(emin=0.001, emax=0.015, num_points=100):
"""Create a text file with a list of additional energies to query the
XCOM databse, to be passed to the XCOM web interface.
By default this is writing out a grid of logarithmically-spaced energy
values between 1 and 15 keV---one energy per line, in MeV.
"""
file_path = os.path.join(IXPEOBSSIM_XCOM_DATA, 'egrid.txt')
logger.info('Writing XCOM energy grid to %s...' % file_path)
with open(file_path, 'w') as output_file:
for energy in numpy.logspace(numpy.log10(emin), numpy.log10(emax),
num_points):
output_file.write('%.6f\n' % energy)
logger.info('Done.')
class xCrossSectionTable:
def __init__(self, identifier):
"""Constructor.
Note we are converting Mev to keV in place.
"""
self.identifier = identifier
file_name = '%s.txt' % identifier.lower()
file_path = os.path.join(IXPEOBSSIM_XCOM_DATA, file_name)
logger.info('Parsing XCOM data file %s...' % file_path)
self.energy, self.coherent, self.incoherent, self.photoelectric = \
numpy.loadtxt(file_path, unpack=True)
self.energy *= 1000.
self.total = self.coherent + self.incoherent + self.photoelectric
self.photoelectric_spline = self.__spline(self.photoelectric)
def __spline(self, values):
"""Return a spline with a given cross section as a function of the
energy.
"""
fmt = dict(xlabel='Energy [keV]',
ylabel='Cross section [cm$^2$ g$^{-1}$]')
return xInterpolatedUnivariateSplineLinear(self.energy, values, **fmt)
def transparency(self, energy, thickness, density):
"""Return the transparency of a slab of material with a given density,
evaluated on a grid of energy values.
Mind this is based on the phototelectric cross section only
(i.e., coherent and incoherent scattering are not included.)
Args
----
energy : array-like
The array of energy values to evaluate the efficiency.
thickness : float
The thickness of the slab in cm.
density : float
The density of the material in g cm^{-3}
"""
return numpy.exp(-density * self.photoelectric_spline(energy) * thickness)
def photoabsorption_efficiency(self, energy, thickness, density):
"""Return the photoabsorption efficiency for a slab of material of
a given thickness and density, evaluated on a grid of energy values.
Args
----
energy : array-like
The array of energy values to evaluate the efficiency.
thickness : float
The thickness of the slab in cm.
density : float
The density of the material in g cm^{-3}
"""
return 1. - self.transparency(energy, thickness, density)
def plot(self):
"""Plot the cross section.
"""
plt.figure(self.identifier)
fmt = dict(logx=True, logy=True)
self.__spline(self.coherent).plot(label='Coherent', **fmt)
self.__spline(self.incoherent).plot(label='Incoherent', **fmt)
self.__spline(self.photoelectric).plot(label='Photoelectric', **fmt)
self.__spline(self.total).plot(label='Total', **fmt)
plt.axis([self.energy.min(), self.energy.max(), None, None])
plt.legend()
def __str__(self):
"""Terminal formatting.
"""
data = [self.energy, self.coherent, self.incoherent,\
self.photoelectric, self.total]
header = 'Energy [keV] Coherent Incoherent Photoel. Total'
return '%s\n%s' % (header, numpy.around(numpy.vstack(data).T, 3))
def load_xsection_data(identifier):
"""Load the cross-section data for a given element or compound.
"""
if identifier not in __CACHE:
__CACHE[identifier] = xCrossSectionTable(identifier)
return __CACHE[identifier]
|
lucabaldiniREPO_NAMEixpeobssimPATH_START.@ixpeobssim_extracted@ixpeobssim-main@ixpeobssim@irfgen@xcom.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "pmelchior/scarlet",
"repo_path": "scarlet_extracted/scarlet-master/README.md",
"type": "Markdown"
}
|
[](https://travis-ci.org/pmelchior/scarlet)
[](https://github.com/pmelchior/scarlet/blob/master/LICENSE.md)
[](https://doi.org/10.1016/j.ascom.2018.07.001)
[](https://arxiv.org/abs/1802.10157)
# Scarlet
This package performs source separation (aka "deblending") on multi-band images. It's geared towards optical astronomy, where scenes are composed of stars and galaxies, but it is straightforward to apply it to other imaging data.
**For the full documentation see [the docs](https://pmelchior.github.io/scarlet/).**
Separation is achieved through a constrained matrix factorization, which models each source with a Spectral Energy Distribution (SED) and a non-parametric morphology, or multiple such components per source. In astronomy jargon, the code performs forced photometry (with PSF matching if needed) using an optimal weight function given by the signal-to-noise weighted morphology across bands. The approach works well if the sources in the scene have different colors and can be further strengthened by imposing various additional constraints/priors on each source.
The minimization itself uses the proximal gradient method (PGM). In short, we iteratively compute gradients of the likelihood (or of the posterior if priors are included), perform a downhill step, and project the outcome on a sub-manifold that satisfies one or multiple non-differentiable constraints for any of the sources.
This package provides a stand-alone implementation that contains the core components of the source separation algorithm. However, the development of this package is part of the [LSST Science Pipeline](https://pipelines.lsst.io); the [meas_deblender](https://github.com/lsst/meas_deblender) package contains a wrapper to implement the algorithms here for the LSST stack.
The API is reasonably stable, but feel free to contact the authors [fred3m](https://github.com/fred3m) and [pmelchior](https://github.com/pmelchior) for guidance. For bug reports and feature request, open an issue.
If you make use of scarlet, please acknowledge [Melchior et al. (2018)](https://doi.org/10.1016/j.ascom.2018.07.001), which describes in detail the concepts and algorithms used in this package.
## Prerequisites
The code runs on python>=3.5. In addition, you'll need
* numpy
* pybind11
* autograd
* [proxmin](https://github.com/pmelchior/proxmin)
|
pmelchiorREPO_NAMEscarletPATH_START.@scarlet_extracted@scarlet-master@README.md@.PATH_END.py
|
{
"filename": "sdssTest.py",
"repo_name": "AstroVPK/kali",
"repo_path": "kali_extracted/kali-master/examples/sdssTest.py",
"type": "Python"
}
|
import numpy as np
import argparse
import os
import sys
import pdb
try:
import kali.carma
import kali.s82
except ImportError:
print 'kali is not setup. Setup kali by sourcing bin/setup.sh'
sys.exit(1)
parser = argparse.ArgumentParser()
parser.add_argument('-pwd', '--pwd', type=str, default=os.path.join(
os.environ['KALI'], 'examples/data'), help=r'Path to working directory')
parser.add_argument('-n', '--name', type=str, default='rand', help=r'SDSS ID')
args = parser.parse_args()
nl = kali.s82.sdssLC(name=args.name, band='g', pwd=args.pwd)
nt = kali.carma.CARMATask(3, 1)
Rho = np.array([-1.0/100.0, -1.0/55.0, -1.0/10.0, -1.0/25.0, 2.0e-08])
Theta = kali.carma.coeffs(3, 1, Rho)
nt.set(nl.dt, Theta)
print "logPrior: %+8.7e"%(nt.logPrior(nl))
print "logLikelihood: %+8.7e"%(nt.logLikelihood(nl))
print "logPosterior: %+8.7e"%(nt.logPosterior(nl))
|
AstroVPKREPO_NAMEkaliPATH_START.@kali_extracted@kali-master@examples@sdssTest.py@.PATH_END.py
|
{
"filename": "_xcalendar.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/histogram2d/_xcalendar.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class XcalendarValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="xcalendar", parent_name="histogram2d", **kwargs):
super(XcalendarValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop(
"values",
[
"chinese",
"coptic",
"discworld",
"ethiopian",
"gregorian",
"hebrew",
"islamic",
"jalali",
"julian",
"mayan",
"nanakshahi",
"nepali",
"persian",
"taiwan",
"thai",
"ummalqura",
],
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@histogram2d@_xcalendar.py@.PATH_END.py
|
{
"filename": "clustering.py",
"repo_name": "HETDEX/elixer",
"repo_path": "elixer_extracted/elixer-main/elixer/clustering.py",
"type": "Python"
}
|
"""
Scans elixer h5 file for neighbors with matching emission lines to cluster at a common redshift
The intent is largely to resolve redshift mis-classifications as high-z that are actually single emission lines
in the outskirts of a bright object where the aperture fails to get significant continuum
"""
from __future__ import print_function
import logging
import numpy as np
import os.path as op
try:
from elixer import global_config as G
from elixer import utilities
from elixer import spectrum
except:
import global_config as G
import utilities
import spectrum
log = G.Global_Logger('clustering')
log.setlevel(G.LOG_LEVEL)
def find_cluster(detectid,elixerh5,outfile=True,delta_arcsec=G.CLUSTER_POS_SEARCH,delta_lambda=G.CLUSTER_WAVE_SEARCH,
gmag_thresh=G.CLUSTER_MAG_THRESH):
"""
Note: caller is responisble for any pre-checks on the target detectid and for opening and closing the h5 file
:param detectid: detectid of the target HETDEX detection
:param elixerh5: handle to the elixer h5 file to search
:param outfile: if TRUE writes out a file with the clutering info (if a cluster is made) named as detectid.cluster
:param delta_arcsec: +/- width in arsecs to search in RA and in Dec
:param delta_lambda: +/- width to search in found emission lines
:param gmag_thresh: faint limit gmag to allow as a match (we only want to match against bright objects)
:return: cluster dictionary with key info
"""
cluster_dict = None
# {"detectid": detectid,
# "neighborid": None, #detectID of the neighbor
# "neighbor_dist": None, #distance to HETDEX ra, dec of neighbor
# "neighbor_ra": None,
# "neighbor_dec": None,
# "neighhor_gmag": None,
# "neighbor_z": None,
# "neighbor_qz": None,
# "neighbor_plya": None
# }
try:
log.info(f"Clustering on {detectid} ...")
z_col = 'z_best'
pz_col = 'z_best_pz'
dtb = elixerh5.root.Detections
ltb = elixerh5.root.SpectraLines
#first get the detectid related info
q_detectid = detectid
target_rows = dtb.read_where("detectid == q_detectid")
if len(target_rows) != 1:
log.info(f"Invalid detectid {detectid}")
return cluster_dict
flags = target_rows[0]['flags'] #could explicitly check for a magnitude mismatch
target_gmag = 0
target_gmag_err = 0
try:
plya_classification = target_rows[0]['plya_classification']
except:
try:
plya_classification = target_rows[0]['plae_classification']
except:
plya_classification = None
try:
#if target_rows[0]['review'] == 0: #if we are NOT set to review, check the gmag
target_gmag = target_rows[0]['mag_g_wide'] #this could fail
target_gmag_err = target_rows[0]['mag_g_wide_err'] #this could fail
# try: #could be bad gmag
# if 0 < target_gmag < 99:
# pass #all good
# else:
# target_gmag = target_rows[0]['mag_g_wide'] #this could fail
# target_gmag_err = target_rows[0]['mag_g_wide_err'] #this could fail
# except:
# target_gmag = rotarget_rowsws[0]['mag_g_wide'] #this could fail
# target_gmag_err = target_rows[0]['mag_g_wide_err'] #this could fail
if abs(target_gmag_err) > 2.0:
old = target_gmag_err
target_gmag_err = 2.0
log.debug(f"Clustering: Detectid {detectid} capped gmag error to {target_gmag_err} from {old}")
# if (flags & G.DETFLAG_DISTANT_COUNTERPART) or (flags & G.DETFLAG_COUNTERPART_MAG_MISMATCH) or
# (flags &)
#flags that can influence the classification such that we would want to re-classify
#some flags are bad (like negative spectrum) but there is no need to re-classify because it is just junk
# basically, if the object is bright BUT has one or more of these flags, go ahead and re-classify
# if it is faint, always check
bad_flags_list = [
G.DETFLAG_FOLLOWUP_NEEDED,
G.DETFLAG_BLENDED_SPECTRA,
G.DETFLAG_COUNTERPART_MAG_MISMATCH,
G.DETFLAG_COUNTERPART_NOT_FOUND,
G.DETFLAG_DISTANT_COUNTERPART,
G.DETFLAG_EXT_CAT_QUESTIONABLE_Z,
G.DETFLAG_NO_IMAGING,
G.DETFLAG_BAD_PIXELS,
#G.DETFLAG_EXCESSIVE_ZERO_PIXELS,
# G.DETFLAG_POOR_SHOT,
# G.DETFLAG_BAD_DITHER_NORM,
# G.DETFLAG_POOR_THROUGHPUT,
#G.DETFLAG_NEGATIVE_SPECTRUM,
# G.DETFLAG_DUPLICATE_FIBERS,
# G.DETFLAG_BAD_PIXEL_FLAT,
# G.DETFLAG_POSSIBLE_LOCAL_TRANSIENT,
#G.DETFLAG_LARGE_NEIGHBOR, #irrelevant, it is likely this large neighbor we'd classify against
# G.DETFLAG_LARGE_SKY_SUB,
# G.DETFLAG_POSSIBLE_PN,
]
if np.any([flags & x for x in bad_flags_list]) == 0: #if there are flags, skip this check as we are going to check this object regardless
try:
if (target_gmag+target_gmag_err) < G.CLUSTER_SELF_MAG_THRESH: #too bright
log.info(f"Clustering: Detectid {detectid}. Too bright. gmag = {target_gmag} +/- {target_gmag_err}")
return cluster_dict
except: #the sdss might not be there or may be invalid
target_gmag = 25.0
target_gmag_err = 0.0
log.info(f"Detectid {detectid}. Invalid gmag. Set to dummy value.")
except:
pass #older ones may not have a 'review' field
target_ra = target_rows[0]['ra']
target_dec = target_rows[0]['dec']
try:
target_z = target_rows[0][z_col] #use the primary (instead of the alternate plya thresholds)
target_pz = target_rows[0][pz_col]
#what if ther other z_best don't match??
try:
target_z_2 = target_rows[0][z_col+"_2"]
target_z_3 = target_rows[0][z_col + "_3"]
target_pz_2 = target_rows[0][pz_col+"_2"]
target_pz_3 = target_rows[0][pz_col + "_3"]
except:
target_z_2 = target_z
target_pz_2 = target_pz
target_z_3 = target_z
target_pz_3 = target_pz
except:
z_col = 'best_z'
pz_col = 'best_pz'
target_z = target_rows[0][z_col] #use the primary (instead of the alternate plya thresholds)
target_pz = target_rows[0][pz_col]
target_z_2 = target_z
target_pz_2 = target_pz
target_z_3 = target_z
target_pz_3 = target_pz
target_wave = target_rows[0]['wavelength_obs']
target_wave_err = target_rows[0]['wavelength_obs_err']
deg_err = delta_arcsec / 3600.0
#box defined by COORDINATEs not by real delta_arcsec
ra1 = target_ra - deg_err #* np.cos(target_dec*np.pi/180.)
ra2 = target_ra + deg_err # * np.cos(target_dec*np.pi/180.)
dec1 = target_dec - deg_err
dec2 = target_dec + deg_err
#now search for RA, Dec neighbors
#there is an index on ra and dec
rows = dtb.read_where("(ra > ra1) & (ra < ra2) & (dec > dec1) & (dec < dec2) & (detectid != q_detectid)")
if len(rows) == 0: #there are none
log.info(f"Clustering on {detectid}. No neighbors found.")
return cluster_dict
#otherwise, check for other conditions
#gmag limit
sel = np.array(rows['mag_g_wide'] < gmag_thresh) #| np.array(rows['mag_full_spec'] < gmag_thresh)
if np.sum(sel) == 0:
log.info(f"Clustering on {detectid}. No neighbors meet minimum requirements.")
return cluster_dict
rows = rows[sel]
#check lines
neighbor_ids = rows['detectid']
neighbor_z = rows[z_col]
line_scores = np.zeros(len(neighbor_ids))
line_w_obs = np.zeros(len(neighbor_ids))
used_in_solution = np.full(len(neighbor_ids),False)
w1 = target_wave - target_wave_err - delta_lambda
w2 = target_wave + target_wave_err + delta_lambda
sel = np.full(len(neighbor_ids),True)
sp = spectrum.Spectrum() #dummy spectrum for utilities
for i,id in enumerate(neighbor_ids):
lrows = ltb.read_where("(detectid==id) & (sn > 4.5) & (score > 5.0) & (wavelength > w1) & (wavelength < w2)")
if len(lrows) != 1:
sel[i] = False
continue
# if rows[i]['flags'] & (G.DETFLAG_FOLLOWUP_NEEDED | G.DETFLAG_EXT_CAT_QUESTIONABLE_Z | G.DETFLAG_UNCERTAIN_CLASSIFICATION):
# #G.DETFLAG_IMAGING_MAG_INCONSISTENT | G.DETFLAG_DEX_GMAG_INCONSISTENT |
#
# sel[i] = False
# log.debug(f"Clustering for {detectid}. Rejected {rows[i]['detectid']} due to flags: {rows[i]['flags']:08x}")
# continue
lines = sp.match_lines( lrows[0]['wavelength'],
rows[i][z_col],
z_error=None,#0.001,
z_frac_err=None,#0.017, #1.7% as 1.5% plus some slop
aa_error=G.NOMINAL_WAVELENGTH_MATCH_MAX_OFFSET,#None
allow_absorption=False,
max_rank=3)
if lines is None or len(lines) == 0:
log.debug(f"Clustering for {detectid}. Rejected {rows[i]['detectid']} due to no matching lines.")
sel[i] = False
continue #this one is inconsistent (probably it is not the strongest line as the HETDEX line)
line_scores[i] = np.max(lrows['score'])
line_w_obs[i] = lrows[np.argmax(lrows['score'])]['wavelength']
used_in_solution[i] = lrows[np.argmax(lrows['score'])]['used'] #NOTE: this might not be a multiline solution
#in which case, used can be False
if np.sum(sel) == 0:
log.info(f"Clustering on {detectid}. No neighbors meet minimum emission line requirements.")
return cluster_dict
#is there a mix of z that would trigger a flag?
#or are they all (or almost all) at the same redshift?
std = np.std(neighbor_z[sel])
avg = np.mean(neighbor_z[sel])
dict_flag = 0
use_avg = False
if std > (0.1 * avg):
dict_flag |= G.DETFLAG_UNCERTAIN_CLASSIFICATION
elif np.sum(sel) > 2: #3 or more
#sloppy but quick
if (abs(avg - target_z) < 0.1) and (abs(avg - target_z_2) < 0.1) and (abs(avg - target_z_3) < 0.1) :
log.info(f"Clustering on {detectid}. Neighbors at same average z = {target_z:0.5f}")
return cluster_dict
else: #we can use the average even if the brightest neighbor does not provide a good redshift
use_avg = True
#now choose the "best" one from those that remain
rows = rows[sel]
line_scores = line_scores[sel]
line_w_obs = line_w_obs[sel]
used_in_solution = used_in_solution[sel]
neighbor_id = neighbor_ids[sel]
#best could be brightest? or highest score on the matching line?
brightest = np.argmin(rows['mag_g_wide'])
best_line = np.argmax(line_scores)
#best_pz = np.argmax(rows['best_pz'])
#take brightest unless the best_line does not match and is more than 25% better?
best_idx = brightest
if brightest != best_line:
if line_scores[brightest] / best_line < 0.75:
best_idx = best_line
if use_avg and abs(rows[best_idx][z_col] - avg) > 0.1:
#this is a problem ... this should normally match that average
#we are going to keep going, but set the flag
dict_flag |= G.DETFLAG_UNCERTAIN_CLASSIFICATION
#the assumption is that the many in the average are "wrong" and this best is right
#could be the many are around the periphery of a bright object
#check if the z is the same, then don't bother ... basically both positive and differnce of less than 5%
keep_going = []
for tz, pz in zip([target_z,target_z_2,target_z_3],[target_pz, target_pz_2, target_pz_3]):
#for tz in [target_z, target_z_2, target_z_3]:
if np.isclose(rows[best_idx][z_col],tz, rtol=0.017) or \
(2 * abs((rows[best_idx][z_col] - tz)/(rows[best_idx][z_col] + tz)) < 0.05 and \
rows[best_idx][z_col] > 0 and tz > 0):
keep_going.append(False)
log.debug(f"Clustering on {detectid}. Neighbors at same z = {tz:0.5f} (for one of z_best)")
else: #redshift is different, but is it an improvement?
#keep_going.append(True)
if rows[best_idx][pz_col] < pz:
rel_diff_pz = 2 * (pz - rows[best_idx][pz_col]) / (pz + rows[best_idx][pz_col])
if rel_diff_pz > 0.1:
#not an improvement, BUT, special case for LyA where P(LyA) is low/ambiguous
if pz < 0.35 and plya_classification is not None and plya_classification < 0.55 and tz > 1.87 and\
rows[best_idx][pz_col] >= 0.1 and rows[best_idx][z_col] < 0.5:
keep_going.append(True)
log.debug(f"Clustering on {detectid}. Override inferior P(z) for special LyA case.")
else:
keep_going.append(False)
log.info(f"Clustering on {detectid}. Best neighbor {neighbor_ids[best_idx]} Q(z) "
f"not significantly improved. "
f"Target Q(z) {pz}, neighbor Q(z) {rows[best_idx][pz_col]} (for one z_best_pz)")
else:
keep_going.append(True)
else:
keep_going.append(True)
if np.count_nonzero(keep_going) == 0:
log.info(f"Clustering on {detectid}. Neighbors at same z or no improvement in P(z)")
return cluster_dict
#check that the neighbor is brighter than the target
if not use_avg and (target_gmag > 0 and (rows[best_idx]['mag_g_wide'] - target_gmag) > -0.2):
log.info(f"Clustering on {detectid}. Neighbor not brighter than target.")
return cluster_dict
#don't enforce too close ... it could be the same object, just in a slightly better position
#or it could be the same object from a better shot
# if utilities.angular_distance(target_ra,target_dec,rows[best_idx]['ra'],rows[best_idx]['dec']) < 0.5:
# log.info(f"Clustering on {detectid}. Neighbor too close.")
# return cluster_dict
#check that the emission line IS USED in the solution
#or if not used, that it is CONSISTENT with the solution
if not used_in_solution[best_idx]:
sp = spectrum.Spectrum()
lines = sp.match_lines(line_w_obs[best_idx],
rows[best_idx][z_col],
z_error=None,#0.001,
z_frac_err=None,#0.017, #1.5% + some slop for 1.7%
aa_error=G.NOMINAL_WAVELENGTH_MATCH_MAX_OFFSET,#None,
allow_absorption=False)
if lines is None or len(lines) == 0:
log.info(f"Clustering on {detectid}. Best neighbor {neighbor_ids[best_idx]} line {line_w_obs[best_idx]:0.2f} inconsistent with redshift {rows[best_idx][z_col]:0.4f}."
f"No common lines near rest {line_w_obs[best_idx]/(1 + rows[best_idx][z_col]):0.2f}")
return cluster_dict
#check that this is an improvement?
# for pz in ([target_pz, target_pz_2, target_pz_3]):
# if rows[best_idx][pz_col] < pz:
# if 2*(pz-rows[best_idx][pz_col])/(pz+rows[best_idx][pz_col]) > 0.1:
# #not improved
# log.info(f"Clustering on {detectid}. Best neighbor {neighbor_ids[best_idx]} Q(z) not significantly improved. "
# f"Target Q(z) {target_pz}, neighbor Q(z) {rows[best_idx][pz_col]}.")
# return cluster_dict
#else they are close enough that this may still be the better choice
#cannot go from a higher rank line to a lower one??
#now populate the dictionary
try:
plya = rows[best_idx]['plya_classification']
except:
plya = rows[best_idx]['plae_classification']
cluster_dict = {"detectid": detectid,
"neighborid": rows[best_idx]['detectid'], #detectID of the neighbor
"neighbor_dist": utilities.angular_distance(target_ra,target_dec,rows[best_idx]['ra'],rows[best_idx]['dec']), #distance to HETDEX ra, dec of neighbor
"neighbor_ra": rows[best_idx]['ra'],
"neighbor_dec": rows[best_idx]['dec'],
"neighhor_gmag": rows[best_idx]['mag_g_wide'],
"neighbor_z": rows[best_idx][z_col],
"neighbor_qz": rows[best_idx][pz_col],
"neighbor_plya": plya,
"flag": dict_flag
}
log.info(f"Clustering on {detectid}. Found bright neighbor ({rows[best_idx]['detectid']}) at z = {rows[best_idx][z_col]:0.5f}")
if outfile:
with open(f"{detectid}.cluster","w+") as f:
f.write("# detectid n_z n_qz n_detectid n_ra n_dec n_dist n_gmag n_p(lya)\n")
f.write(f"{detectid} {cluster_dict['neighbor_z']:0.5f} {cluster_dict['neighbor_qz']:0.2f} {cluster_dict['neighborid']} "
f"{cluster_dict['neighbor_ra']:0.5f} {cluster_dict['neighbor_dec']:0.5f} {cluster_dict['neighbor_dist']:0.2f} "
f"{cluster_dict['neighhor_gmag']:0.2f} {cluster_dict['neighbor_plya']:0.2f}\n")
except:
log.error("Exception! Excpetion in clustering::find_cluster()",exc_info=True)
return cluster_dict
def cluster_multiple_detectids(detectid_list,elixerh5,outfile=True,delta_arcsec=G.CLUSTER_POS_SEARCH,
delta_lambda=G.CLUSTER_WAVE_SEARCH, gmag_thresh=G.CLUSTER_MAG_THRESH):
"""
Wraper for find_cluster that takes a list of detectids instead
:param detectid_list:
:param elixerh5:
:param outfile:
:param delta_arcsec:
:param delta_lambda:
:param gmag_thresh:
:return:
"""
cluster_list = []
try:
for d in detectid_list:
try:
cluster_dict = find_cluster(d,elixerh5,outfile,delta_arcsec,delta_lambda,gmag_thresh)
if cluster_dict is not None:
cluster_list.append(cluster_dict)
except:
log.error("Exception! Exception iterating in clustering::cluster_multiple_detectids().",exc_info=True)
except:
log.error("Exception! Exception in clustering::cluster_multiple_detectids().",exc_info=True)
return cluster_list
def cluster_all_detectids(elixerh5,outfile=True,delta_arcsec=G.CLUSTER_POS_SEARCH,delta_lambda=G.CLUSTER_WAVE_SEARCH,
gmag_thresh=G.CLUSTER_MAG_THRESH):
"""
Wraper for find_cluster that takes a list of detectids instead
:param detectid_list:
:param elixerh5:
:param outfile:
:param delta_arcsec:
:param delta_lambda:
:param gmag_thresh:
:return:
"""
cluster_list = []
try:
detectid_list = elixerh5.root.Detections.read(field="detectid")
for d in detectid_list:
try:
cluster_dict = find_cluster(d,elixerh5,outfile,delta_arcsec,delta_lambda,gmag_thresh)
if cluster_dict is not None:
cluster_list.append(cluster_dict)
except:
log.error("Exception! Exception iterating in clustering::cluster_multiple_detectids().",exc_info=True)
except:
log.error("Exception! Exception in clustering::cluster_multiple_detectids().",exc_info=True)
return cluster_list
|
HETDEXREPO_NAMEelixerPATH_START.@elixer_extracted@elixer-main@elixer@clustering.py@.PATH_END.py
|
{
"filename": "tfsa-2022-051.md",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/security/advisory/tfsa-2022-051.md",
"type": "Markdown"
}
|
## TFSA-2022-051: Integer overflow in Grappler cost estimation of crop and resize operation
### CVE Number
CVE-2022-23587
### Impact
Under certain scenarios, Grappler component of TensorFlow is vulnerable to an integer overflow during [cost estimation for crop and resize](https://github.com/tensorflow/tensorflow/blob/a1320ec1eac186da1d03f033109191f715b2b130/tensorflow/core/grappler/costs/op_level_cost_estimator.cc#L2621-L2689). Since the cropping parameters are user controlled, a malicious person can trigger undefined behavior.
### Patches
We have patched the issue in GitHub commit [0aaaae6eca5a7175a193696383f582f53adab23f](https://github.com/tensorflow/tensorflow/commit/0aaaae6eca5a7175a193696383f582f53adab23f).
The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
### For more information
Please consult [our security guide](https://github.com/tensorflow/tensorflow/blob/master/SECURITY.md) for more information regarding the security model and how to contact us with issues and questions.
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@security@advisory@tfsa-2022-051.md@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "sibirrer/lenstronomy",
"repo_path": "lenstronomy_extracted/lenstronomy-main/test/test_Sampling/__init__.py",
"type": "Python"
}
|
sibirrerREPO_NAMElenstronomyPATH_START.@lenstronomy_extracted@lenstronomy-main@test@test_Sampling@__init__.py@.PATH_END.py
|
|
{
"filename": "tpu_embedding_v1_checkpoint_test.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/tpu/tests/tpu_embedding_v1_checkpoint_test.py",
"type": "Python"
}
|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TPUEmbeddingV0 mid level API on TPU."""
from absl.testing import parameterized
import numpy as np
from tensorflow.python.checkpoint import checkpoint as util
from tensorflow.python.compat import v2_compat
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import init_ops_v2
from tensorflow.python.platform import test
from tensorflow.python.saved_model import load
from tensorflow.python.saved_model import save
from tensorflow.python.tpu import tpu_embedding_for_serving
from tensorflow.python.tpu import tpu_embedding_v1
from tensorflow.python.tpu import tpu_embedding_v2_utils
from tensorflow.python.tpu.tests import tpu_embedding_base_test
from tensorflow.python.training import checkpoint_utils
class TPUEmbeddingCheckpointTest(tpu_embedding_base_test.TPUEmbeddingBaseTest):
def _get_strategy(self):
# We can cache the strategy as TPUEmbeddingV0 doesn't require
# reconfiguration to the tpu.
if hasattr(self, 'strategy'):
return self.strategy
return super(TPUEmbeddingCheckpointTest, self)._get_strategy()
def _create_mid_level(self, optimizer=None):
# Create `TPUEmbedding` object.
if optimizer is None:
optimizer = tpu_embedding_v2_utils.SGD(learning_rate=0.1)
return tpu_embedding_v1.TPUEmbeddingV0(
feature_config=self.feature_config, optimizer=optimizer)
def test_checkpoint_save_and_restore(self):
strategy = self._get_strategy()
with strategy.scope():
first_mid_level_contents = np.ones((4, 4))
first_mid_level_optimizer = tpu_embedding_v2_utils.SGD(learning_rate=0.1)
initializer = init_ops_v2.Constant(first_mid_level_contents)
table = tpu_embedding_v2_utils.TableConfig(
vocabulary_size=4,
dim=4,
initializer=initializer,
combiner='sum',
name='table')
feature_config = (tpu_embedding_v2_utils.FeatureConfig(
table=table, name='feature'),)
first_mid_level = tpu_embedding_v1.TPUEmbeddingV0(
feature_config, first_mid_level_optimizer)
first_mid_level.build()
first_checkpoint = util.Checkpoint(model=first_mid_level)
first_checkpoint.save(self._get_tmpdir('restore', 'save'))
with strategy.scope():
second_mid_level_contents = np.ones((4, 4)) * 2
second_mid_level_optimizer = tpu_embedding_v2_utils.SGD(learning_rate=0.1)
initializer = init_ops_v2.Constant(second_mid_level_contents)
table = tpu_embedding_v2_utils.TableConfig(
vocabulary_size=4,
dim=4,
initializer=initializer,
combiner='sum',
name='table')
feature_config = (tpu_embedding_v2_utils.FeatureConfig(
table=table, name='feature'),)
second_mid_level = tpu_embedding_v1.TPUEmbeddingV0(
feature_config, second_mid_level_optimizer)
second_mid_level.build()
# We restore the checkpoint of our first model into our second model.
second_checkpoint = util.Checkpoint(model=second_mid_level)
second_checkpoint.restore(self._get_tmpdir('restore', 'save-1'))
self.assertAllClose(
first_mid_level_contents,
second_mid_level._variables['table']['parameters'].numpy(),
msg='Second mid level api should have restored the first model values.')
def test_model_export_cpu(self):
strategy = self._get_strategy()
with strategy.scope():
first_mid_level_contents = np.ones((4, 4))
first_mid_level_optimizer = tpu_embedding_v2_utils.SGD(learning_rate=0.1)
initializer = init_ops_v2.Constant(first_mid_level_contents)
table = tpu_embedding_v2_utils.TableConfig(
vocabulary_size=4,
dim=4,
initializer=initializer,
combiner='sum',
name='table')
feature_config = (tpu_embedding_v2_utils.FeatureConfig(
table=table, name='feature'),)
first_mid_level = tpu_embedding_v1.TPUEmbeddingV0(
feature_config, first_mid_level_optimizer)
first_mid_level.build()
cpu_mid_level_optimizer = tpu_embedding_v2_utils.SGD(learning_rate=0.1)
cpu_mid_level = tpu_embedding_for_serving.TPUEmbeddingForServing(
feature_config, cpu_mid_level_optimizer)
cpu_mid_level.build()
tpu_checkpoint = util.Checkpoint(model=first_mid_level)
tpu_checkpoint.save(self._get_tmpdir('export_cpu', 'save'))
# We restore the checkpoint of our tpu mid level onto our cpu mid level.
cpu_checkpoint = util.Checkpoint(model=cpu_mid_level)
cpu_checkpoint.restore(self._get_tmpdir('export_cpu', 'save-1'))
@def_function.function
def serve_tensors(features):
features = tpu_embedding_for_serving.cpu_embedding_lookup(
features, None, cpu_mid_level.embedding_tables,
cpu_mid_level._feature_config)
return features[0]
signatures = {
'serving_default':
serve_tensors.get_concrete_function((tensor_spec.TensorSpec(
shape=(2,), dtype=dtypes.int32, name='feature'),))
}
save.save(
cpu_mid_level,
export_dir=self._get_tmpdir('export_cpu', 'exported_model'),
signatures=signatures)
imported = load.load(self._get_tmpdir('export_cpu', 'exported_model'))
predict_fn = imported.signatures['serving_default']
input_feature_value = np.array([1, 0])
input_batch = (constant_op.constant(
input_feature_value, dtype=dtypes.int32),)
prediction = predict_fn(*input_batch)['output_0']
self.assertAllClose(prediction.numpy(),
first_mid_level_contents[input_feature_value])
@parameterized.parameters(tpu_embedding_v2_utils.SGD,
tpu_embedding_v2_utils.Adagrad,
tpu_embedding_v2_utils.Adam,
tpu_embedding_v2_utils.FTRL)
def test_check_checkpoint_variable_names_are_same_on_cpu_and_tpu(
self, optimizer):
# Reinitialize the TPU so that we can re-initialize the embeddings with the
# given optimizer.
if optimizer != tpu_embedding_v2_utils.SGD:
self.skip_if_oss()
strategy = self._get_strategy()
with strategy.scope():
first_mid_level_contents = np.ones((4, 4))
first_mid_level_optimizer = optimizer(learning_rate=0.1)
initializer = init_ops_v2.Constant(first_mid_level_contents)
table = tpu_embedding_v2_utils.TableConfig(
vocabulary_size=4,
dim=4,
initializer=initializer,
combiner='sum',
name='table')
feature_config = (tpu_embedding_v2_utils.FeatureConfig(
table=table, name='feature'),)
first_mid_level = tpu_embedding_v1.TPUEmbeddingV0(
feature_config, first_mid_level_optimizer)
first_mid_level.build()
cpu_mid_level_optimizer = optimizer(learning_rate=0.1)
cpu_mid_level = tpu_embedding_for_serving.TPUEmbeddingForServing(
feature_config, cpu_mid_level_optimizer)
cpu_mid_level.build()
tpu_checkpoint = util.Checkpoint(model=first_mid_level)
tpu_checkpoint.save(self._get_tmpdir('save-tpu', 'save'))
tpu_variables = checkpoint_utils.list_variables(
self._get_tmpdir('save-tpu'))
cpu_checkpoint = util.Checkpoint(model=cpu_mid_level)
cpu_checkpoint.save(self._get_tmpdir('save-cpu', 'save'))
cpu_variables = checkpoint_utils.list_variables(
self._get_tmpdir('save-cpu'))
self.assertAllEqual(tpu_variables, cpu_variables)
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main()
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@tpu@tests@tpu_embedding_v1_checkpoint_test.py@.PATH_END.py
|
{
"filename": "test_auto_background.py",
"repo_name": "JohannesBuchner/BXA",
"repo_path": "BXA_extracted/BXA-master/examples/sherpa/test_auto_background.py",
"type": "Python"
}
|
import json
import numpy as np
import sherpa.astro.ui as shp
from bxa.sherpa.background.pca import auto_background
def test_chandra_pcabkg():
_reset_sherpa()
_load_chandra_data(emin=0.5, emax=8)
filename = "chandra/179_pcabkg.json"
bkgmodel = auto_background(1, max_lines=0)
# _save_pca_params(bkgmodel, filename)
_test_pca_params(filename, bkgmodel)
def test_chandra_detect_line():
_reset_sherpa()
bkgdata = _load_chandra_data(emin=0.5, emax=8)
channel = _inject_count_excess(bkgdata)
bkgmodel = auto_background(1, max_lines=1)
_check_injected_line(bkgdata, bkgmodel, channel)
def _load_chandra_data(emin, emax):
shp.load_pha("chandra/179.pi")
_ungroup_and_ignore_bkg(emin, emax)
return shp.get_bkg()
def test_swift_pcabkg():
_reset_sherpa()
_load_swift_data(emin=0.5, emax=5.0)
filename = "swift/interval0pc_pcabkg.json"
bkgmodel = auto_background(1, max_lines=0)
# _save_pca_params(bkgmodel, filename)
_test_pca_params(filename, bkgmodel)
def test_swift_detect_line():
_reset_sherpa()
bkgdata = _load_swift_data(emin=0.5, emax=5.0)
channel = _inject_count_excess(bkgdata)
bkgmodel = auto_background(1, max_lines=1)
_check_injected_line(bkgdata, bkgmodel, channel)
def _load_swift_data(emin, emax):
shp.load_pha("swift/interval0pc.pi")
_ungroup_and_ignore_bkg(emin, emax)
return shp.get_bkg()
def test_xmmpn_pcabkg():
_reset_sherpa()
_load_xmmpn_data(emin=0.2, emax=10.0)
filename = "xmm/pn_pcabkg.json"
bkgmodel = auto_background(1, max_lines=0)
# _save_pca_params(bkgmodel, filename)
_test_pca_params(filename, bkgmodel)
def test_xmmpn_detect_line():
_reset_sherpa()
bkgdata = _load_xmmpn_data(emin=0.2, emax=10.0)
channel = _inject_count_excess(bkgdata)
bkgmodel = auto_background(1, max_lines=1)
_check_injected_line(bkgdata, bkgmodel, channel)
def _load_xmmpn_data(emin, emax):
shp.load_pha("xmm/pn_spec.fits")
shp.load_bkg("xmm/pn_backspec.fits")
shp.load_rmf("xmm/pn.rmf")
shp.load_arf("xmm/pn.arf")
shp.load_bkg_rmf("xmm/pn.rmf")
shp.load_bkg_arf("xmm/pn.arf")
_ungroup_and_ignore_bkg(emin, emax)
return shp.get_bkg()
def test_xmmmos_pcabkg():
_reset_sherpa()
_load_xmmmos_data(emin=0.2, emax=10.0)
filename = "xmm/mos_pcabkg.json"
bkgmodel = auto_background(1, max_lines=0)
# _save_pca_params(bkgmodel, filename)
_test_pca_params(filename, bkgmodel)
def test_xmmmos_detect_line():
_reset_sherpa()
bkgdata = _load_xmmmos_data(emin=0.2, emax=10.0)
channel = _inject_count_excess(bkgdata)
bkgmodel = auto_background(1, max_lines=1)
_check_injected_line(bkgdata, bkgmodel, channel)
def _load_xmmmos_data(emin, emax):
shp.load_pha("xmm/mos_spec.fits")
shp.load_bkg("xmm/mos_backspec.fits")
shp.load_rmf("xmm/mos.rmf")
shp.load_arf("xmm/mos.arf")
shp.load_bkg_rmf("xmm/mos.rmf")
shp.load_bkg_arf("xmm/mos.arf")
_ungroup_and_ignore_bkg(emin, emax)
return shp.get_bkg()
def test_nustar_pcabkg():
_reset_sherpa()
_load_nustar_data(emin=5.0, emax=77.0)
filename = "nustar/nu60360003002A01_pcabkg.json"
bkgmodel = auto_background(1, max_lines=0)
# _save_pca_params(bkgmodel, filename)
_test_pca_params(filename, bkgmodel)
def test_nustar_detect_line():
_reset_sherpa()
bkgdata = _load_nustar_data(emin=5.0, emax=77.0)
channel = _inject_count_excess(bkgdata)
bkgmodel = auto_background(1, max_lines=1)
_check_injected_line(bkgdata, bkgmodel, channel)
def _load_nustar_data(emin, emax):
shp.load_pha("nustar/nu60360003002A01_sr_grp.pha")
_ungroup_and_ignore_bkg(emin, emax)
return shp.get_bkg()
def test_erosita_pcabkg():
_reset_sherpa()
_load_erosita_data(emin=0.2, emax=8.0)
filename = "erosita/em01_182117_020_pcabkg.json"
bkgmodel = auto_background(1, max_lines=0)
_test_pca_params(filename, bkgmodel)
# _save_pca_params(bkgmodel, filename)
def test_erosita_detect_line():
_reset_sherpa()
bkgdata = _load_erosita_data(emin=0.2, emax=8.0)
channel = _inject_count_excess(bkgdata)
bkgmodel = auto_background(1, max_lines=1)
_check_injected_line(bkgdata, bkgmodel, channel)
def _load_erosita_data(emin, emax):
shp.load_pha("erosita/em01_182117_020_SourceSpec_00005_c010.fits")
_ungroup_and_ignore_bkg(emin, emax)
return shp.get_bkg()
def test_suzaku_pcabkg():
_reset_sherpa()
_load_suzaku_data(emin=0.2, emax=8.0)
filename = "suzaku/ae900001010xi0_0_3x3n000a_pcabkg.json"
bkgmodel = auto_background(1, max_lines=0)
_test_pca_params(filename, bkgmodel)
# _save_pca_params(bkgmodel, filename)
def test_suzaku_detect_line():
_reset_sherpa()
bkgdata = _load_suzaku_data(emin=0.2, emax=8.0)
channel = _inject_count_excess(bkgdata)
bkgmodel = auto_background(1, max_lines=1)
_check_injected_line(bkgdata, bkgmodel, channel)
def _load_suzaku_data(emin, emax):
shp.load_pha("suzaku/ae900001010xi0_0_3x3n000a_sr.pi")
_ungroup_and_ignore_bkg(emin, emax)
return shp.get_bkg()
def _reset_sherpa():
shp.clean()
shp.set_stat("cstat")
def _ungroup_and_ignore_bkg(emin, emax):
shp.ungroup(bkg_id=1)
shp.notice(bkg_id=1)
shp.ignore(lo=None, hi=emin, bkg_id=1)
shp.ignore(lo=emax, hi=None, bkg_id=1)
# Set dummy source model for auto_background
shp.set_model("powlaw1d.dummy")
def _save_pca_params(bkgmodel, filename):
pca_params = {p.name: p.val for p in bkgmodel.pars}
with open(filename, "w") as fp:
json.dump(pca_params, fp, indent=2)
def _load_pca_params(filename):
with open(filename, "r") as fp:
return json.load(fp)
def _test_pca_params(filename, bkgmodel):
test_pca_params = _load_pca_params(filename)
bkgdata = shp.get_bkg()
assert len(bkgdata.channel) == bkgmodel.rmf.detchans
assert len(bkgmodel.pars) == len(test_pca_params)
for p in bkgmodel.pars:
assert p.name in test_pca_params
assert np.isclose(p.val, test_pca_params[p.name], rtol=0.05, atol=0.02), (p.name, p.val, test_pca_params[p.name])
def _inject_count_excess(bkgdata):
# Insert a high excess of counts in the middle
# of the noticed energy range
cmin = bkgdata.channel[bkgdata.mask][0]
cmax = bkgdata.channel[bkgdata.mask][-1]
c = int((cmin + cmax) / 2)
c0 = int(bkgdata.channel[0])
bkgdata.counts[c - c0] = 100 * bkgdata.counts.max()
print(f"Injecting {bkgdata.counts[c - c0]:.0f} counts at {bkgdata.x[c - c0]:.2f} keV [channel {c}]")
return c
def _check_injected_line(bkgdata, bkgmodel, channel_injected):
for p in bkgmodel.pars:
if p.name == "LineE":
first_line_energy = p.val
break
first_line_channel = bkgdata.channel[0] + np.argmin(np.absolute(bkgdata.x - first_line_energy))
assert int(first_line_channel) == channel_injected
def run_tests():
instruments = [
"chandra",
"swift",
"xmmpn",
"xmmmos",
# "nustar",
# "erosita",
# "suzaku",
# "rxtepca",
# "rxtehexa",
# "rxtehexb",
]
for instrument in instruments:
test_continuum = globals()[f"test_{instrument}_pcabkg"]
test_continuum()
test_lines = globals()[f"test_{instrument}_detect_line"]
test_lines()
if __name__ == "__main__":
run_tests()
|
JohannesBuchnerREPO_NAMEBXAPATH_START.@BXA_extracted@BXA-master@examples@sherpa@test_auto_background.py@.PATH_END.py
|
{
"filename": "particle_filters.py",
"repo_name": "rennehan/yt-swift",
"repo_path": "yt-swift_extracted/yt-swift-main/yt/data_objects/particle_filters.py",
"type": "Python"
}
|
import copy
from contextlib import contextmanager
from yt.fields.field_info_container import NullFunc, TranslationFunc
from yt.funcs import mylog
from yt.utilities.exceptions import YTIllDefinedFilter
# One to one mapping
filter_registry: dict[str, "ParticleFilter"] = {}
class DummyFieldInfo:
particle_type = True
sampling_type = "particle"
dfi = DummyFieldInfo()
class ParticleFilter:
def __init__(self, name, function, requires, filtered_type):
self.name = name
self.function = function
self.requires = requires[:]
self.filtered_type = filtered_type
@contextmanager
def apply(self, dobj):
with dobj._chunked_read(dobj._current_chunk):
with dobj._field_type_state(self.filtered_type, dfi):
# We won't be storing the field data from the whole read, so we
# start by filtering now.
filter = self.function(self, dobj)
yield
# Retain a reference here, and we'll filter all appropriate fields
# later.
fd = dobj.field_data
for f, tr in fd.items():
if f[0] != self.filtered_type:
continue
if tr.shape != filter.shape and tr.shape[0] != filter.shape[0]:
raise YTIllDefinedFilter(self, tr.shape, filter.shape)
else:
d = tr[filter]
dobj.field_data[self.name, f[1]] = d
def available(self, field_list):
# Note that this assumes that all the fields in field_list have the
# same form as the 'requires' attributes. This won't be true if the
# fields are implicitly "all" or something.
return all((self.filtered_type, field) in field_list for field in self.requires)
def missing(self, field_list):
return [
(self.filtered_type, field)
for field in self.requires
if (self.filtered_type, field) not in field_list
]
def wrap_func(self, field_name, old_fi):
new_fi = copy.copy(old_fi)
new_fi.name = (self.name, field_name[1])
if old_fi._function == NullFunc:
new_fi._function = TranslationFunc(old_fi.name)
# Marking the field as inherited
new_fi._inherited_particle_filter = True
return new_fi
def add_particle_filter(name, function, requires=None, filtered_type="all"):
r"""Create a new particle filter in the global namespace of filters
A particle filter is a short name that corresponds to an algorithm for
filtering a set of particles into a subset. This is useful for creating new
particle types based on a cut on a particle field, such as particle mass, ID
or type. After defining a new filter, it still needs to be added to the
dataset by calling
:func:`~yt.data_objects.static_output.add_particle_filter`.
.. note::
Alternatively, you can make use of the
:func:`~yt.data_objects.particle_filters.particle_filter` decorator to
define a new particle filter.
Parameters
----------
name : string
The name of the particle filter. New particle fields with particle type
set by this name will be added to any dataset that enables this particle
filter.
function : reference to a function
The function that defines the particle filter. The function should
accept two arguments: a reference to a particle filter object and a
reference to an abstract yt data object. See the example below.
requires : a list of field names
A list of field names required by the particle filter definition.
filtered_type : string
The name of the particle type to be filtered.
Examples
--------
>>> import yt
>>> def _stars(pfilter, data):
... return data[(pfilter.filtered_type, "particle_type")] == 2
>>> yt.add_particle_filter(
... "stars", function=_stars, filtered_type="all", requires=["particle_type"]
... )
>>> ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
>>> ds.add_particle_filter("stars")
>>> ad = ds.all_data()
>>> print(ad["stars", "particle_mass"])
[ 1.68243760e+38 1.65690882e+38 1.65813321e+38 ..., 2.04238266e+38
2.04523901e+38 2.04770938e+38] g
"""
if requires is None:
requires = []
filter = ParticleFilter(name, function, requires, filtered_type)
if filter_registry.get(name, None) is not None:
mylog.warning("The %s particle filter already exists. Overriding.", name)
filter_registry[name] = filter
def particle_filter(name=None, requires=None, filtered_type="all"):
r"""A decorator that adds a new particle filter
A particle filter is a short name that corresponds to an algorithm for
filtering a set of particles into a subset. This is useful for creating new
particle types based on a cut on a particle field, such as particle mass, ID
or type.
.. note::
Alternatively, you can make use of the
:func:`~yt.data_objects.particle_filters.add_particle_filter` function
to define a new particle filter using a more declarative syntax.
Parameters
----------
name : string
The name of the particle filter. New particle fields with particle type
set by this name will be added to any dataset that enables this particle
filter. If not set, the name will be inferred from the name of the
filter function.
requires : a list of field names
A list of field names required by the particle filter definition.
filtered_type : string
The name of the particle type to be filtered.
Examples
--------
>>> import yt
>>> # define a filter named "stars"
>>> @yt.particle_filter(requires=["particle_type"], filtered_type="all")
... def stars(pfilter, data):
... return data[(pfilter.filtered_type, "particle_type")] == 2
>>> ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
>>> ds.add_particle_filter("stars")
>>> ad = ds.all_data()
>>> print(ad["stars", "particle_mass"])
[ 1.68243760e+38 1.65690882e+38 1.65813321e+38 ..., 2.04238266e+38
2.04523901e+38 2.04770938e+38] g
"""
def wrapper(function):
if name is None:
used_name = function.__name__
else:
used_name = name
return add_particle_filter(used_name, function, requires, filtered_type)
return wrapper
|
rennehanREPO_NAMEyt-swiftPATH_START.@yt-swift_extracted@yt-swift-main@yt@data_objects@particle_filters.py@.PATH_END.py
|
{
"filename": "_family.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/heatmap/legendgrouptitle/font/_family.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="family",
parent_name="heatmap.legendgrouptitle.font",
**kwargs,
):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
no_blank=kwargs.pop("no_blank", True),
strict=kwargs.pop("strict", True),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@heatmap@legendgrouptitle@font@_family.py@.PATH_END.py
|
{
"filename": "_marker.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/scatterpolar/unselected/_marker.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Marker(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scatterpolar.unselected"
_path_str = "scatterpolar.unselected.marker"
_valid_props = {"color", "opacity", "size"}
# color
# -----
@property
def color(self):
"""
Sets the marker color of unselected points, applied only when a
selection exists.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the marker opacity of unselected points, applied only when
a selection exists.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# size
# ----
@property
def size(self):
"""
Sets the marker size of unselected points, applied only when a
selection exists.
The 'size' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the marker color of unselected points, applied
only when a selection exists.
opacity
Sets the marker opacity of unselected points, applied
only when a selection exists.
size
Sets the marker size of unselected points, applied only
when a selection exists.
"""
def __init__(self, arg=None, color=None, opacity=None, size=None, **kwargs):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scatterpolar.u
nselected.Marker`
color
Sets the marker color of unselected points, applied
only when a selection exists.
opacity
Sets the marker opacity of unselected points, applied
only when a selection exists.
size
Sets the marker size of unselected points, applied only
when a selection exists.
Returns
-------
Marker
"""
super(Marker, self).__init__("marker")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatterpolar.unselected.Marker
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterpolar.unselected.Marker`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@scatterpolar@unselected@_marker.py@.PATH_END.py
|
{
"filename": "_hovertext.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/bar/_hovertext.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class HovertextValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="hovertext", parent_name="bar", **kwargs):
super(HovertextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "style"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@bar@_hovertext.py@.PATH_END.py
|
{
"filename": "_showgrid.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/geo/lonaxis/_showgrid.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShowgridValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="showgrid", parent_name="layout.geo.lonaxis", **kwargs
):
super(ShowgridValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@geo@lonaxis@_showgrid.py@.PATH_END.py
|
{
"filename": "params.py",
"repo_name": "dtamayo/reboundx",
"repo_path": "reboundx_extracted/reboundx-main/reboundx/params.py",
"type": "Python"
}
|
import rebound
import sys
if sys.version_info[:2] >= (3, 8):
from collections.abc import MutableMapping
else:
from collections import MutableMapping
from .extras import Param, Node, Force, Operator, Extras, REBX_CTYPES
from . import clibreboundx
from ctypes import byref, c_double, c_int, c_int32, c_int64, c_uint, c_uint32, c_longlong, c_char_p, POINTER, cast
from ctypes import c_void_p, memmove, sizeof, addressof
from rebound import hash as rebhash
class Params(MutableMapping):
def __init__(self, parent):
self.verbose = 0 # set to 1 to diagnose problems
self.parent = parent # Particle, Force, Operator. Will work with any ctypes.Structure with appropriate ._sim and .ap fields
offset = type(parent).ap.offset # Need this hack to get address of initially NULL ap ptr. See my stackoverflow
self.ap = (c_void_p).from_buffer(parent, offset)
# We want to be able to access params from objects like particles, objects etc. These must somehow point back to rebx for memory management
# We can't add a rebx pointer in rebound.Particle, but can use its sim pointer. So we add a sim pointer to all rebx objects that need params.
extrasvp = parent._sim.contents.extras
if not extrasvp: # .extras = None
raise AttributeError("Need to attach reboundx.Extras instance to simulation before setting params.")
else:
self.rebx = cast(extrasvp, POINTER(Extras))
def __getitem__(self, key):
param_type = clibreboundx.rebx_get_type(self.rebx, c_char_p(key.encode('ascii')))
ctype = REBX_CTYPES[param_type]
if ctype == None:
raise AttributeError("REBOUNDx Error: Parameter '{0}' not found in REBOUNDx. Need to register it first.".format(key))
clibreboundx.rebx_get_param.restype = c_void_p
valptr = clibreboundx.rebx_get_param(self.rebx, self.ap, c_char_p(key.encode('ascii')))
if ctype == c_void_p: # Don't know how to cast it, so return for user to cast
if valptr is None:
raise AttributeError("REBOUNDx Error: Parameter '{0}' not found on object.".format(key))
return valptr
elif ctype == rebound.Vec3d:
# Special case
valptr = cast(valptr, POINTER(rebound.Vec3dBasic))
else:
valptr = cast(valptr, POINTER(ctype))
try:
val = valptr.contents.value # return python int or float rather than c_int or c_double
except AttributeError:
val = valptr.contents # Structure, return ctypes object
if isinstance(val, rebound.Vec3dBasic):
return rebound.Vec3d(val)
except ValueError: # NULL access
raise AttributeError("REBOUNDx Error: Parameter '{0}' not found on object.".format(key))
return val
def __setitem__(self, key, value):
param_type = clibreboundx.rebx_get_type(self.rebx, c_char_p(key.encode('ascii')))
ctype = REBX_CTYPES[param_type]
if ctype == None:
raise AttributeError("REBOUNDx Error: Parameter '{0}' not found in REBOUNDx. Need to register it first.".format(key))
if ctype == c_double:
clibreboundx.rebx_set_param_double(self.rebx, byref(self.ap), c_char_p(key.encode('ascii')), c_double(value))
if ctype == c_int:
clibreboundx.rebx_set_param_int(self.rebx, byref(self.ap), c_char_p(key.encode('ascii')), c_int(value))
if ctype == c_uint32:
clibreboundx.rebx_set_param_uint32(self.rebx, byref(self.ap), c_char_p(key.encode('ascii')), value)
if ctype == rebound.Vec3d:
clibreboundx.rebx_set_param_vec3d(self.rebx, byref(self.ap), c_char_p(key.encode('ascii')), rebound.Vec3d(value)._vec3d)
if ctype == Force:
if not isinstance(value, Force):
raise AttributeError("REBOUNDx Error: Parameter '{0}' must be assigned a Force object.".format(key))
clibreboundx.rebx_set_param_pointer(self.rebx, byref(self.ap), c_char_p(key.encode('ascii')), byref(value))
if ctype == rebound.Orbit:
if not isinstance(value, rebound.Orbit):
raise AttributeError("REBOUNDx Error: Parameter '{0}' must be assigned an Orbit object.".format(key))
clibreboundx.rebx_set_param_pointer(self.rebx, byref(self.ap), c_char_p(key.encode('ascii')), byref(value))
if ctype == rebound.ODE:
if not isinstance(value, rebound.ODE):
raise AttributeError("REBOUNDx Error: Parameter '{0}' must be assigned a rebound.ODE object.".format(key))
clibreboundx.rebx_set_param_pointer(self.rebx, byref(self.ap), c_char_p(key.encode('ascii')), byref(value))
if ctype == c_void_p:
clibreboundx.rebx_set_param_pointer(self.rebx, byref(self.ap), c_char_p(key.encode('ascii')), byref(value))
def __delitem__(self, key):
raise AttributeError("REBOUNDx Error: Removing particle params not implemented.")
def __iter__(self):
raise AttributeError("REBOUNDx Error: Iterator for params not implemented.")
def __len__(self):
clibreboundx.rebx_len.restype = c_int
return clibreboundx.rebx_len(self.ap)
|
dtamayoREPO_NAMEreboundxPATH_START.@reboundx_extracted@reboundx-main@reboundx@params.py@.PATH_END.py
|
{
"filename": "test_spectools.py",
"repo_name": "danielrd6/ifscube",
"repo_path": "ifscube_extracted/ifscube-master/tests/test_spectools.py",
"type": "Python"
}
|
import numpy as np
from astropy import units
from ifscube.elprofile import gauss_vel
from ifscube import spectools
def test_find_intermediary_value():
x = np.array([0, 1, 2, 3, 4, 5])
y = np.array([10, 20, 30, 40, 50])
xv = spectools.find_intermediary_value(x, y, 31)
assert xv == 2.1
def test_velocity_width_oversample():
rest_wavelength = units.Quantity(6500, 'angstrom')
p = np.array([1, -5000, 2000])
wavelength = np.linspace(5000, 8000, 15000)
g = gauss_vel(wavelength, rest_wavelength.value, p)
obs = np.random.normal(g, .05)
vw_natural = spectools.velocity_width(
wavelength=wavelength, model=g, data=obs, rest_wavelength=rest_wavelength, oversample=1,
fractional_pixels=True)
wavelength = np.linspace(5000, 8000, 100)
g = gauss_vel(wavelength, rest_wavelength.value, p)
obs = np.random.normal(g, .05)
vw_oversampled = spectools.velocity_width(
wavelength=wavelength, model=g, data=obs, rest_wavelength=rest_wavelength, oversample=15,
fractional_pixels=True)
assert ((vw_oversampled['model_velocity_width'] / vw_natural['model_velocity_width']) - 1.0) < 1e-3
|
danielrd6REPO_NAMEifscubePATH_START.@ifscube_extracted@ifscube-master@tests@test_spectools.py@.PATH_END.py
|
{
"filename": "DcxImagePlugin.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/Pillow/py3/PIL/DcxImagePlugin.py",
"type": "Python"
}
|
#
# The Python Imaging Library.
# $Id$
#
# DCX file handling
#
# DCX is a container file format defined by Intel, commonly used
# for fax applications. Each DCX file consists of a directory
# (a list of file offsets) followed by a set of (usually 1-bit)
# PCX files.
#
# History:
# 1995-09-09 fl Created
# 1996-03-20 fl Properly derived from PcxImageFile.
# 1998-07-15 fl Renamed offset attribute to avoid name clash
# 2002-07-30 fl Fixed file handling
#
# Copyright (c) 1997-98 by Secret Labs AB.
# Copyright (c) 1995-96 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
from . import Image
from ._binary import i32le as i32
from .PcxImagePlugin import PcxImageFile
MAGIC = 0x3ADE68B1 # QUIZ: what's this value, then?
def _accept(prefix):
return len(prefix) >= 4 and i32(prefix) == MAGIC
##
# Image plugin for the Intel DCX format.
class DcxImageFile(PcxImageFile):
format = "DCX"
format_description = "Intel DCX"
_close_exclusive_fp_after_loading = False
def _open(self):
# Header
s = self.fp.read(4)
if not _accept(s):
msg = "not a DCX file"
raise SyntaxError(msg)
# Component directory
self._offset = []
for i in range(1024):
offset = i32(self.fp.read(4))
if not offset:
break
self._offset.append(offset)
self._fp = self.fp
self.frame = None
self.n_frames = len(self._offset)
self.is_animated = self.n_frames > 1
self.seek(0)
def seek(self, frame):
if not self._seek_check(frame):
return
self.frame = frame
self.fp = self._fp
self.fp.seek(self._offset[frame])
PcxImageFile._open(self)
def tell(self):
return self.frame
Image.register_open(DcxImageFile.format, DcxImageFile, _accept)
Image.register_extension(DcxImageFile.format, ".dcx")
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@Pillow@py3@PIL@DcxImagePlugin.py@.PATH_END.py
|
{
"filename": "discrete.py",
"repo_name": "pymc-devs/pymc",
"repo_path": "pymc_extracted/pymc-main/pymc/distributions/discrete.py",
"type": "Python"
}
|
# Copyright 2024 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import numpy as np
import pytensor.tensor as pt
from pytensor.tensor import TensorConstant
from pytensor.tensor.random.basic import (
ScipyRandomVariable,
bernoulli,
betabinom,
binomial,
categorical,
geometric,
hypergeometric,
nbinom,
poisson,
uniform,
)
from pytensor.tensor.random.utils import normalize_size_param
from scipy import stats
import pymc as pm
from pymc.distributions.dist_math import (
betaln,
binomln,
check_icdf_parameters,
check_icdf_value,
check_parameters,
factln,
log_diff_normal_cdf,
logpow,
normal_lccdf,
normal_lcdf,
)
from pymc.distributions.distribution import Discrete, SymbolicRandomVariable
from pymc.distributions.shape_utils import implicit_size_from_params, rv_size_is_none
from pymc.logprob.basic import logcdf, logp
from pymc.math import sigmoid
__all__ = [
"Bernoulli",
"BetaBinomial",
"Binomial",
"Categorical",
"DiscreteUniform",
"DiscreteWeibull",
"Geometric",
"HyperGeometric",
"NegativeBinomial",
"OrderedLogistic",
"OrderedProbit",
"Poisson",
]
from pymc.pytensorf import normalize_rng_param
class Binomial(Discrete):
R"""
Binomial log-likelihood.
The discrete probability distribution of the number of successes
in a sequence of n independent yes/no experiments, each of which
yields success with probability p.
The pmf of this distribution is
.. math:: f(x \mid n, p) = \binom{n}{x} p^x (1-p)^{n-x}
.. plot::
:context: close-figs
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
import arviz as az
plt.style.use('arviz-darkgrid')
x = np.arange(0, 22)
ns = [10, 17]
ps = [0.5, 0.7]
for n, p in zip(ns, ps):
pmf = st.binom.pmf(x, n, p)
plt.plot(x, pmf, '-o', label='n = {}, p = {}'.format(n, p))
plt.xlabel('x', fontsize=14)
plt.ylabel('f(x)', fontsize=14)
plt.legend(loc=1)
plt.show()
======== ==========================================
Support :math:`x \in \{0, 1, \ldots, n\}`
Mean :math:`n p`
Variance :math:`n p (1 - p)`
======== ==========================================
Parameters
----------
n : tensor_like of int
Number of Bernoulli trials (n >= 0).
p : tensor_like of float
Probability of success in each trial (0 < p < 1).
logit_p : tensor_like of float
Alternative log odds for the probability of success.
"""
rv_op = binomial
@classmethod
def dist(cls, n, p=None, logit_p=None, *args, **kwargs):
if p is not None and logit_p is not None:
raise ValueError("Incompatible parametrization. Can't specify both p and logit_p.")
elif p is None and logit_p is None:
raise ValueError("Incompatible parametrization. Must specify either p or logit_p.")
if logit_p is not None:
p = pt.sigmoid(logit_p)
n = pt.as_tensor_variable(n, dtype=int)
p = pt.as_tensor_variable(p)
return super().dist([n, p], **kwargs)
def support_point(rv, size, n, p):
mean = pt.round(n * p)
if not rv_size_is_none(size):
mean = pt.full(size, mean)
return mean
def logp(value, n, p):
res = pt.switch(
pt.or_(pt.lt(value, 0), pt.gt(value, n)),
-np.inf,
binomln(n, value) + logpow(p, value) + logpow(1 - p, n - value),
)
return check_parameters(
res,
n >= 0,
0 <= p,
p <= 1,
msg="n >= 0, 0 <= p <= 1",
)
def logcdf(value, n, p):
value = pt.floor(value)
res = pt.switch(
pt.lt(value, 0),
-np.inf,
pt.switch(
pt.lt(value, n),
pt.log(pt.betainc(n - value, value + 1, 1 - p)),
0,
),
)
return check_parameters(
res,
n >= 0,
0 <= p,
p <= 1,
msg="n >= 0, 0 <= p <= 1",
)
class BetaBinomial(Discrete):
R"""
Beta-binomial log-likelihood.
Equivalent to binomial random variable with success probability
drawn from a beta distribution.
The pmf of this distribution is
.. math::
f(x \mid \alpha, \beta, n) =
\binom{n}{x}
\frac{B(x + \alpha, n - x + \beta)}{B(\alpha, \beta)}
.. plot::
:context: close-figs
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
from scipy import special
import arviz as az
plt.style.use('arviz-darkgrid')
def BetaBinom(a, b, n, x):
pmf = special.binom(n, x) * (special.beta(x+a, n-x+b) / special.beta(a, b))
return pmf
x = np.arange(0, 11)
alphas = [0.5, 1, 2.3]
betas = [0.5, 1, 2]
n = 10
for a, b in zip(alphas, betas):
pmf = BetaBinom(a, b, n, x)
plt.plot(x, pmf, '-o', label=r'$\alpha$ = {}, $\beta$ = {}, n = {}'.format(a, b, n))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.ylim(0)
plt.legend(loc=9)
plt.show()
======== =================================================================
Support :math:`x \in \{0, 1, \ldots, n\}`
Mean :math:`n \dfrac{\alpha}{\alpha + \beta}`
Variance :math:`\dfrac{n \alpha \beta (\alpha+\beta+n)}{(\alpha+\beta)^2 (\alpha+\beta+1)}`
======== =================================================================
Parameters
----------
n : tensor_like of int
Number of Bernoulli trials (n >= 0).
alpha : tensor_like of float
alpha > 0.
beta : tensor_like of float
beta > 0.
"""
rv_op = betabinom
@classmethod
def dist(cls, alpha, beta, n, *args, **kwargs):
alpha = pt.as_tensor_variable(alpha)
beta = pt.as_tensor_variable(beta)
n = pt.as_tensor_variable(n, dtype=int)
return super().dist([n, alpha, beta], **kwargs)
def support_point(rv, size, n, alpha, beta):
mean = pt.round((n * alpha) / (alpha + beta))
if not rv_size_is_none(size):
mean = pt.full(size, mean)
return mean
def logp(value, n, alpha, beta):
res = pt.switch(
pt.or_(pt.lt(value, 0), pt.gt(value, n)),
-np.inf,
binomln(n, value) + betaln(value + alpha, n - value + beta) - betaln(alpha, beta),
)
return check_parameters(
res,
n >= 0,
alpha > 0,
beta > 0,
msg="n >= 0, alpha > 0, beta > 0",
)
def logcdf(value, n, alpha, beta):
# logcdf can only handle scalar values at the moment
if np.ndim(value):
raise TypeError(
f"BetaBinomial.logcdf expects a scalar value but received a {np.ndim(value)}-dimensional object."
)
safe_lower = pt.switch(pt.lt(value, 0), value, 0)
res = pt.switch(
pt.lt(value, 0),
-np.inf,
pt.switch(
pt.lt(value, n),
pt.logsumexp(
logp(
BetaBinomial.dist(alpha=alpha, beta=beta, n=n),
pt.arange(safe_lower, value + 1),
),
keepdims=False,
),
0,
),
)
return check_parameters(
res,
n >= 0,
alpha > 0,
beta > 0,
msg="n >= 0, alpha > 0, beta > 0",
)
class Bernoulli(Discrete):
R"""Bernoulli log-likelihood.
The Bernoulli distribution describes the probability of successes
(x=1) and failures (x=0).
The pmf of this distribution is
.. math:: f(x \mid p) = p^{x} (1-p)^{1-x}
.. plot::
:context: close-figs
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
import arviz as az
plt.style.use('arviz-darkgrid')
x = [0, 1]
for p in [0, 0.5, 0.8]:
pmf = st.bernoulli.pmf(x, p)
plt.plot(x, pmf, '-o', label='p = {}'.format(p))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.ylim(0)
plt.legend(loc=9)
plt.show()
======== ======================
Support :math:`x \in \{0, 1\}`
Mean :math:`p`
Variance :math:`p (1 - p)`
======== ======================
The bernoulli distribution can be parametrized either in terms of p or logit_p.
The link between the parametrizations is given by
.. math:: logit(p) = ln(\frac{p}{1-p})
Parameters
----------
p : tensor_like of float
Probability of success (0 < p < 1).
logit_p : tensor_like of float
Alternative log odds for the probability of success.
"""
rv_op = bernoulli
@classmethod
def dist(cls, p=None, logit_p=None, *args, **kwargs):
if p is not None and logit_p is not None:
raise ValueError("Incompatible parametrization. Can't specify both p and logit_p.")
elif p is None and logit_p is None:
raise ValueError("Incompatible parametrization. Must specify either p or logit_p.")
if logit_p is not None:
p = pt.sigmoid(logit_p)
p = pt.as_tensor_variable(p)
return super().dist([p], **kwargs)
def support_point(rv, size, p):
if not rv_size_is_none(size):
p = pt.full(size, p)
return pt.switch(p < 0.5, 0, 1)
def logp(value, p):
res = pt.switch(
pt.or_(pt.lt(value, 0), pt.gt(value, 1)),
-np.inf,
pt.switch(value, pt.log(p), pt.log1p(-p)),
)
return check_parameters(
res,
0 <= p,
p <= 1,
msg="0 <= p <= 1",
)
def logcdf(value, p):
res = pt.switch(
pt.lt(value, 0),
-np.inf,
pt.switch(
pt.lt(value, 1),
pt.log1p(-p),
0,
),
)
return check_parameters(
res,
0 <= p,
p <= 1,
msg="0 <= p <= 1",
)
class DiscreteWeibullRV(SymbolicRandomVariable):
name = "discrete_weibull"
extended_signature = "[rng],[size],(),()->[rng],()"
_print_name = ("dWeibull", "\\operatorname{dWeibull}")
@classmethod
def rv_op(cls, q, beta, *, size=None, rng=None):
q = pt.as_tensor(q)
beta = pt.as_tensor(beta)
rng = normalize_rng_param(rng)
size = normalize_size_param(size)
if rv_size_is_none(size):
size = implicit_size_from_params(q, beta, ndims_params=cls.ndims_params)
next_rng, p = uniform(size=size, rng=rng).owner.outputs
draws = pt.ceil(pt.power(pt.log(1 - p) / pt.log(q), 1.0 / beta)) - 1
draws = draws.astype("int64")
return cls(inputs=[rng, size, q, beta], outputs=[next_rng, draws])(rng, size, q, beta)
class DiscreteWeibull(Discrete):
R"""Discrete Weibull log-likelihood.
The discrete Weibull distribution is a flexible model of count data that
can handle both over- and under-dispersion.
The pmf of this distribution is
.. math:: f(x \mid q, \beta) = q^{x^{\beta}} - q^{(x + 1)^{\beta}}
.. plot::
:context: close-figs
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
from scipy import special
import arviz as az
plt.style.use('arviz-darkgrid')
def DiscreteWeibull(q, b, x):
return q**(x**b) - q**((x + 1)**b)
x = np.arange(0, 10)
qs = [0.1, 0.9, 0.9]
betas = [0.3, 1.3, 3]
for q, b in zip(qs, betas):
pmf = DiscreteWeibull(q, b, x)
plt.plot(x, pmf, '-o', label=r'q = {}, $\beta$ = {}'.format(q, b))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.ylim(0)
plt.legend(loc=1)
plt.show()
======== ======================
Support :math:`x \in \mathbb{N}_0`
Mean :math:`\mu = \sum_{x = 1}^{\infty} q^{x^{\beta}}`
Variance :math:`2 \sum_{x = 1}^{\infty} x q^{x^{\beta}} - \mu - \mu^2`
======== ======================
Parameters
----------
q : tensor_like of float
Shape parameter (0 < q < 1).
beta : tensor_like of float
Shape parameter (beta > 0).
"""
rv_type = DiscreteWeibullRV
rv_op = DiscreteWeibullRV.rv_op
@classmethod
def dist(cls, q, beta, *args, **kwargs):
return super().dist([q, beta], **kwargs)
def support_point(rv, size, q, beta):
median = pt.power(pt.log(0.5) / pt.log(q), 1 / beta) - 1
if not rv_size_is_none(size):
median = pt.full(size, median)
return median
def logp(value, q, beta):
res = pt.switch(
pt.lt(value, 0),
-np.inf,
pt.log(pt.power(q, pt.power(value, beta)) - pt.power(q, pt.power(value + 1, beta))),
)
return check_parameters(
res,
0 < q,
q < 1,
beta > 0,
msg="0 < q < 1, beta > 0",
)
def logcdf(value, q, beta):
res = pt.switch(
pt.lt(value, 0),
-np.inf,
pt.log1p(-pt.power(q, pt.power(value + 1, beta))),
)
return check_parameters(
res,
0 < q,
q < 1,
beta > 0,
msg="0 < q < 1, beta > 0",
)
class Poisson(Discrete):
R"""
Poisson log-likelihood.
Often used to model the number of events occurring in a fixed period
of time when the times at which events occur are independent.
The pmf of this distribution is
.. math:: f(x \mid \mu) = \frac{e^{-\mu}\mu^x}{x!}
.. plot::
:context: close-figs
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
import arviz as az
plt.style.use('arviz-darkgrid')
x = np.arange(0, 15)
for m in [0.5, 3, 8]:
pmf = st.poisson.pmf(x, m)
plt.plot(x, pmf, '-o', label='$\mu$ = {}'.format(m))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.ylim(0)
plt.legend(loc=1)
plt.show()
======== ==========================
Support :math:`x \in \mathbb{N}_0`
Mean :math:`\mu`
Variance :math:`\mu`
======== ==========================
Parameters
----------
mu : tensor_like of float
Expected number of occurrences during the given interval
(mu >= 0).
Notes
-----
The Poisson distribution can be derived as a limiting case of the
binomial distribution.
"""
rv_op = poisson
@classmethod
def dist(cls, mu, *args, **kwargs):
mu = pt.as_tensor_variable(mu)
return super().dist([mu], *args, **kwargs)
def support_point(rv, size, mu):
mu = pt.floor(mu)
if not rv_size_is_none(size):
mu = pt.full(size, mu)
return mu
def logp(value, mu):
res = pt.switch(
pt.lt(value, 0),
-np.inf,
logpow(mu, value) - factln(value) - mu,
)
# Return zero when mu and value are both zero
res = pt.switch(
pt.eq(mu, 0) * pt.eq(value, 0),
0,
res,
)
return check_parameters(
res,
mu >= 0,
msg="mu >= 0",
)
def logcdf(value, mu):
value = pt.floor(value)
# Avoid C-assertion when the gammaincc function is called with invalid values (#4340)
safe_mu = pt.switch(pt.lt(mu, 0), 0, mu)
safe_value = pt.switch(pt.lt(value, 0), 0, value)
res = pt.switch(
pt.lt(value, 0),
-np.inf,
pt.log(pt.gammaincc(safe_value + 1, safe_mu)),
)
return check_parameters(
res,
mu >= 0,
msg="mu >= 0",
)
class NegativeBinomial(Discrete):
R"""
Negative binomial log-likelihood.
The negative binomial distribution describes a Poisson random variable
whose rate parameter is gamma distributed.
Its pmf, parametrized by the parameters alpha and mu of the gamma distribution, is
.. math::
f(x \mid \mu, \alpha) =
\binom{x + \alpha - 1}{x}
(\alpha/(\mu+\alpha))^\alpha (\mu/(\mu+\alpha))^x
.. plot::
:context: close-figs
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
from scipy import special
import arviz as az
plt.style.use('arviz-darkgrid')
def NegBinom(a, m, x):
pmf = special.binom(x + a - 1, x) * (a / (m + a))**a * (m / (m + a))**x
return pmf
x = np.arange(0, 22)
alphas = [0.9, 2, 4]
mus = [1, 2, 8]
for a, m in zip(alphas, mus):
pmf = NegBinom(a, m, x)
plt.plot(x, pmf, '-o', label=r'$\alpha$ = {}, $\mu$ = {}'.format(a, m))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== ==================================
Support :math:`x \in \mathbb{N}_0`
Mean :math:`\mu`
Variance :math:`\frac{\mu^2}{\alpha} + \mu`
======== ==================================
The negative binomial distribution can be parametrized either in terms of mu or p,
and either in terms of alpha or n. The link between the parametrizations is given by
.. math::
p &= \frac{\alpha}{\mu + \alpha} \\
n &= \alpha
If it is parametrized in terms of n and p, the negative binomial describes the probability to have x failures
before the n-th success, given the probability p of success in each trial. Its pmf is
.. math::
f(x \mid n, p) =
\binom{x + n - 1}{x}
(p)^n (1 - p)^x
Parameters
----------
alpha : tensor_like of float
Gamma distribution shape parameter (alpha > 0).
mu : tensor_like of float
Gamma distribution mean (mu > 0).
p : tensor_like of float
Alternative probability of success in each trial (0 < p < 1).
n : tensor_like of float
Alternative number of target success trials (n > 0)
"""
rv_op = nbinom
@classmethod
def dist(cls, mu=None, alpha=None, p=None, n=None, *args, **kwargs):
n, p = cls.get_n_p(mu=mu, alpha=alpha, p=p, n=n)
n = pt.as_tensor_variable(n)
p = pt.as_tensor_variable(p)
return super().dist([n, p], *args, **kwargs)
@classmethod
def get_n_p(cls, mu=None, alpha=None, p=None, n=None):
if n is None:
if alpha is not None:
n = alpha
else:
raise ValueError("Incompatible parametrization. Must specify either alpha or n.")
elif alpha is not None:
raise ValueError("Incompatible parametrization. Can't specify both alpha and n.")
if p is None:
if mu is not None:
p = n / (mu + n)
else:
raise ValueError("Incompatible parametrization. Must specify either mu or p.")
elif mu is not None:
raise ValueError("Incompatible parametrization. Can't specify both mu and p.")
return n, p
def support_point(rv, size, n, p):
mu = pt.floor(n * (1 - p) / p)
if not rv_size_is_none(size):
mu = pt.full(size, mu)
return mu
def logp(value, n, p):
alpha = n
mu = alpha * (1 - p) / p
res = pt.switch(
pt.lt(value, 0),
-np.inf,
(
binomln(value + alpha - 1, value)
+ logpow(mu / (mu + alpha), value)
+ logpow(alpha / (mu + alpha), alpha)
),
)
negbinom = check_parameters(
res,
mu > 0,
alpha > 0,
msg="mu > 0, alpha > 0",
)
# Return Poisson when alpha gets very large.
return pt.switch(pt.gt(alpha, 1e10), logp(Poisson.dist(mu=mu), value), negbinom)
def logcdf(value, n, p):
res = pt.switch(
pt.lt(value, 0),
-np.inf,
pt.log(pt.betainc(n, pt.floor(value) + 1, p)),
)
return check_parameters(
res,
n > 0,
0 <= p,
p <= 1,
msg="n > 0, 0 <= p <= 1",
)
class Geometric(Discrete):
R"""
Geometric log-likelihood.
The probability that the first success in a sequence of Bernoulli
trials occurs on the x'th trial.
The pmf of this distribution is
.. math:: f(x \mid p) = p(1-p)^{x-1}
.. plot::
:context: close-figs
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
import arviz as az
plt.style.use('arviz-darkgrid')
x = np.arange(1, 11)
for p in [0.1, 0.25, 0.75]:
pmf = st.geom.pmf(x, p)
plt.plot(x, pmf, '-o', label='p = {}'.format(p))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== =============================
Support :math:`x \in \mathbb{N}_{>0}`
Mean :math:`\dfrac{1}{p}`
Variance :math:`\dfrac{1 - p}{p^2}`
======== =============================
Parameters
----------
p : tensor_like of float
Probability of success on an individual trial (0 < p <= 1).
"""
rv_op = geometric
@classmethod
def dist(cls, p, *args, **kwargs):
p = pt.as_tensor_variable(p)
return super().dist([p], *args, **kwargs)
def support_point(rv, size, p):
mean = pt.round(1.0 / p)
if not rv_size_is_none(size):
mean = pt.full(size, mean)
return mean
def logp(value, p):
res = pt.switch(
pt.lt(value, 1),
-np.inf,
pt.log(p) + logpow(1 - p, value - 1),
)
return check_parameters(
res,
0 <= p,
p <= 1,
msg="0 <= p <= 1",
)
def logcdf(value, p):
res = pt.switch(
pt.lt(value, 0),
-np.inf,
pt.log1mexp(pt.log1p(-p) * value),
)
return check_parameters(
res,
0 <= p,
p <= 1,
msg="0 <= p <= 1",
)
def icdf(value, p):
res = pt.ceil(pt.log1p(-value) / pt.log1p(-p)).astype("int64")
res_1m = pt.maximum(res - 1, 0)
dist = pm.Geometric.dist(p=p)
value_1m = pt.exp(logcdf(dist, res_1m))
res = pt.switch(value_1m >= value, res_1m, res)
res = check_icdf_value(res, value)
return check_icdf_parameters(
res,
0 <= p,
p <= 1,
msg="0 <= p <= 1",
)
class HyperGeometric(Discrete):
R"""
Discrete hypergeometric distribution.
The probability of :math:`x` successes in a sequence of :math:`n` bernoulli
trials taken without replacement from a population of :math:`N` objects,
containing :math:`k` good (or successful or Type I) objects.
The pmf of this distribution is
.. math:: f(x \mid N, n, k) = \frac{\binom{k}{x}\binom{N-k}{n-x}}{\binom{N}{n}}
.. plot::
:context: close-figs
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
import arviz as az
plt.style.use('arviz-darkgrid')
x = np.arange(1, 15)
N = 50
k = 10
for n in [20, 25]:
pmf = st.hypergeom.pmf(x, N, k, n)
plt.plot(x, pmf, '-o', label='N = {}, k = {}, n = {}'.format(N, k, n))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== =============================
Support :math:`x \in \left[\max(0, n - N + k), \min(k, n)\right]`
Mean :math:`\dfrac{nk}{N}`
Variance :math:`\dfrac{(N-n)nk(N-k)}{(N-1)N^2}`
======== =============================
Parameters
----------
N : tensor_like of int
Total size of the population (N > 0)
k : tensor_like of int
Number of successful individuals in the population (0 <= k <= N)
n : tensor_like of int
Number of samples drawn from the population (0 <= n <= N)
"""
rv_op = hypergeometric
@classmethod
def dist(cls, N, k, n, *args, **kwargs):
good = pt.as_tensor_variable(k, dtype=int)
bad = pt.as_tensor_variable(N - k, dtype=int)
n = pt.as_tensor_variable(n, dtype=int)
return super().dist([good, bad, n], *args, **kwargs)
def support_point(rv, size, good, bad, n):
N, k = good + bad, good
mode = pt.floor((n + 1) * (k + 1) / (N + 2))
if not rv_size_is_none(size):
mode = pt.full(size, mode)
return mode
def logp(value, good, bad, n):
tot = good + bad
result = (
betaln(good + 1, 1)
+ betaln(bad + 1, 1)
+ betaln(tot - n + 1, n + 1)
- betaln(value + 1, good - value + 1)
- betaln(n - value + 1, bad - n + value + 1)
- betaln(tot + 1, 1)
)
# value in [max(0, n - N + k), min(k, n)]
lower = pt.switch(pt.gt(n - tot + good, 0), n - tot + good, 0)
upper = pt.switch(pt.lt(good, n), good, n)
res = pt.switch(
pt.lt(value, lower),
-np.inf,
pt.switch(
pt.le(value, upper),
result,
-np.inf,
),
)
return check_parameters(
res,
lower <= upper,
msg="lower <= upper",
)
def logcdf(value, good, bad, n):
# logcdf can only handle scalar values at the moment
if np.ndim(value):
raise TypeError(
f"HyperGeometric.logcdf expects a scalar value but received a {np.ndim(value)}-dimensional object."
)
N = good + bad
# TODO: Use lower upper in locgdf for smarter logsumexp?
safe_lower = pt.switch(pt.lt(value, 0), value, 0)
res = pt.switch(
pt.lt(value, 0),
-np.inf,
pt.switch(
pt.lt(value, n),
pt.logsumexp(
HyperGeometric.logp(pt.arange(safe_lower, value + 1), good, bad, n),
keepdims=False,
),
0,
),
)
return check_parameters(
res,
N > 0,
0 <= good,
good <= N,
0 <= n,
n <= N,
msg="N > 0, 0 <= good <= N, 0 <= n <= N",
)
class DiscreteUniformRV(ScipyRandomVariable):
name = "discrete_uniform"
signature = "(),()->()"
dtype = "int64"
_print_name = ("DiscreteUniform", "\\operatorname{DiscreteUniform}")
@classmethod
def rng_fn_scipy(cls, rng, lower, upper, size=None):
return stats.randint.rvs(lower, upper + 1, size=size, random_state=rng)
discrete_uniform = DiscreteUniformRV()
class DiscreteUniform(Discrete):
R"""Discrete uniform distribution.
The pmf of this distribution is
.. math:: f(x \mid lower, upper) = \frac{1}{upper-lower+1}
.. plot::
:context: close-figs
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
import arviz as az
plt.style.use('arviz-darkgrid')
ls = [1, -2]
us = [6, 2]
for l, u in zip(ls, us):
x = np.arange(l, u+1)
pmf = [1.0 / (u - l + 1)] * len(x)
plt.plot(x, pmf, '-o', label='lower = {}, upper = {}'.format(l, u))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.ylim(0, 0.4)
plt.legend(loc=1)
plt.show()
======== ===============================================
Support :math:`x \in {lower, lower + 1, \ldots, upper}`
Mean :math:`\dfrac{lower + upper}{2}`
Variance :math:`\dfrac{(upper - lower)^2}{12}`
======== ===============================================
Parameters
----------
lower : tensor_like of int
Lower limit.
upper : tensor_like of int
Upper limit (upper > lower).
"""
rv_op = discrete_uniform
@classmethod
def dist(cls, lower, upper, *args, **kwargs):
lower = pt.floor(lower)
upper = pt.floor(upper)
return super().dist([lower, upper], **kwargs)
def support_point(rv, size, lower, upper):
mode = pt.maximum(pt.floor((upper + lower) / 2.0), lower)
if not rv_size_is_none(size):
mode = pt.full(size, mode)
return mode
def logp(value, lower, upper):
res = pt.switch(
pt.or_(pt.lt(value, lower), pt.gt(value, upper)),
-np.inf,
pt.fill(value, -pt.log(upper - lower + 1)),
)
return check_parameters(
res,
lower <= upper,
msg="lower <= upper",
)
def logcdf(value, lower, upper):
res = pt.switch(
pt.le(value, lower),
-np.inf,
pt.switch(
pt.lt(value, upper),
pt.log(pt.minimum(pt.floor(value), upper) - lower + 1) - pt.log(upper - lower + 1),
0,
),
)
return check_parameters(
res,
lower <= upper,
msg="lower <= upper",
)
def icdf(value, lower, upper):
res = pt.ceil(value * (upper - lower + 1)).astype("int64") + lower - 1
res_1m = pt.maximum(res - 1, lower)
dist = pm.DiscreteUniform.dist(lower=lower, upper=upper)
value_1m = pt.exp(logcdf(dist, res_1m))
res = pt.switch(value_1m >= value, res_1m, res)
res = check_icdf_value(res, value)
return check_icdf_parameters(
res,
lower <= upper,
msg="lower <= upper",
)
class Categorical(Discrete):
R"""
Categorical log-likelihood.
The most general discrete distribution. The pmf of this distribution is
.. math:: f(x \mid p) = p_x
.. plot::
:context: close-figs
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
import arviz as az
plt.style.use('arviz-darkgrid')
ps = [[0.1, 0.6, 0.3], [0.3, 0.1, 0.1, 0.5]]
for p in ps:
x = range(len(p))
plt.plot(x, p, '-o', label='p = {}'.format(p))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.ylim(0)
plt.legend(loc=1)
plt.show()
======== ===================================
Support :math:`x \in \{0, 1, \ldots, |p|-1\}`
======== ===================================
Parameters
----------
p : array of floats
p > 0 and the elements of p must sum to 1.
logit_p : float
Alternative log odds for the probability of success.
"""
rv_op = categorical
@classmethod
def dist(cls, p=None, logit_p=None, **kwargs):
if p is not None and logit_p is not None:
raise ValueError("Incompatible parametrization. Can't specify both p and logit_p.")
elif p is None and logit_p is None:
raise ValueError("Incompatible parametrization. Must specify either p or logit_p.")
if logit_p is not None:
p = pm.math.softmax(logit_p, axis=-1)
p = pt.as_tensor_variable(p)
if isinstance(p, TensorConstant):
p_ = np.asarray(p.data)
if np.any(p_ < 0):
raise ValueError(f"Negative `p` parameters are not valid, got: {p_}")
p_sum_ = np.sum([p_], axis=-1)
if not np.all(np.isclose(p_sum_, 1.0)):
warnings.warn(
f"`p` parameters sum to {p_sum_}, instead of 1.0. "
"They will be automatically rescaled. "
"You can rescale them directly to get rid of this warning.",
UserWarning,
)
p_ = p_ / pt.sum(p_, axis=-1, keepdims=True)
p = pt.as_tensor_variable(p_)
return super().dist([p], **kwargs)
def support_point(rv, size, p):
mode = pt.argmax(p, axis=-1)
if not rv_size_is_none(size):
mode = pt.full(size, mode)
return mode
def logp(value, p):
k = pt.shape(p)[-1]
value_clip = pt.clip(value, 0, k - 1)
# In the standard case p has one more dimension than value
dim_diff = p.type.ndim - value.type.ndim
if dim_diff > 1:
# p brodacasts implicitly beyond value
value_clip = pt.shape_padleft(value_clip, dim_diff - 1)
elif dim_diff < 1:
# value broadcasts implicitly beyond p
p = pt.shape_padleft(p, 1 - dim_diff)
a = pt.log(pt.take_along_axis(p, value_clip[..., None], axis=-1).squeeze(-1))
res = pt.switch(
pt.or_(pt.lt(value, 0), pt.gt(value, k - 1)),
-np.inf,
a,
)
return check_parameters(
res,
0 <= p,
p <= 1,
pt.isclose(pt.sum(p, axis=-1), 1),
msg="0 <= p <=1, sum(p) = 1",
)
class OrderedLogistic:
R"""Ordered Logistic distribution.
Useful for regression on ordinal data values whose values range
from 1 to K as a function of some predictor, :math:`\eta`. The
cutpoints, :math:`c`, separate which ranges of :math:`\eta` are
mapped to which of the K observed dependent variables. The number
of cutpoints is K - 1. It is recommended that the cutpoints are
constrained to be ordered.
.. math::
f(k \mid \eta, c) = \left\{
\begin{array}{l}
1 - \text{logit}^{-1}(\eta - c_1)
\,, \text{if } k = 0 \\
\text{logit}^{-1}(\eta - c_{k - 1}) -
\text{logit}^{-1}(\eta - c_{k})
\,, \text{if } 0 < k < K \\
\text{logit}^{-1}(\eta - c_{K - 1})
\,, \text{if } k = K \\
\end{array}
\right.
Parameters
----------
eta : tensor_like of float
The predictor.
cutpoints : tensor_like of array
The length K - 1 array of cutpoints which break :math:`\eta` into
ranges. Do not explicitly set the first and last elements of
:math:`c` to negative and positive infinity.
compute_p: boolean, default True
Whether to compute and store in the trace the inferred probabilities of each categories,
based on the cutpoints' values. Defaults to True.
Might be useful to disable it if memory usage is of interest.
Examples
--------
.. code-block:: python
# Generate data for a simple 1 dimensional example problem
n1_c = 300; n2_c = 300; n3_c = 300
cluster1 = np.random.randn(n1_c) + -1
cluster2 = np.random.randn(n2_c) + 0
cluster3 = np.random.randn(n3_c) + 2
x = np.concatenate((cluster1, cluster2, cluster3))
y = np.concatenate((1*np.ones(n1_c),
2*np.ones(n2_c),
3*np.ones(n3_c))) - 1
# Ordered logistic regression
with pm.Model() as model:
cutpoints = pm.Normal("cutpoints", mu=[-1,1], sigma=10, shape=2,
transform=pm.distributions.transforms.ordered)
y_ = pm.OrderedLogistic("y", cutpoints=cutpoints, eta=x, observed=y)
idata = pm.sample()
# Plot the results
plt.hist(cluster1, 30, alpha=0.5);
plt.hist(cluster2, 30, alpha=0.5);
plt.hist(cluster3, 30, alpha=0.5);
posterior = idata.posterior.stack(sample=("chain", "draw"))
plt.hist(posterior["cutpoints"][0], 80, alpha=0.2, color='k');
plt.hist(posterior["cutpoints"][1], 80, alpha=0.2, color='k');
"""
def __new__(cls, name, eta, cutpoints, compute_p=True, **kwargs):
p = cls.compute_p(eta, cutpoints)
if compute_p:
p = pm.Deterministic(f"{name}_probs", p)
out_rv = Categorical(name, p=p, **kwargs)
return out_rv
@classmethod
def dist(cls, eta, cutpoints, **kwargs):
p = cls.compute_p(eta, cutpoints)
return Categorical.dist(p=p, **kwargs)
@classmethod
def compute_p(cls, eta, cutpoints):
eta = pt.as_tensor_variable(eta)
cutpoints = pt.as_tensor_variable(cutpoints)
pa = sigmoid(cutpoints - pt.shape_padright(eta))
p_cum = pt.concatenate(
[
pt.zeros_like(pt.shape_padright(pa[..., 0])),
pa,
pt.ones_like(pt.shape_padright(pa[..., 0])),
],
axis=-1,
)
p = p_cum[..., 1:] - p_cum[..., :-1]
return p
class OrderedProbit:
R"""
Ordered Probit distributions.
Useful for regression on ordinal data values whose values range
from 1 to K as a function of some predictor, :math:`\eta`. The
cutpoints, :math:`c`, separate which ranges of :math:`\eta` are
mapped to which of the K observed dependent variables. The number
of cutpoints is K - 1. It is recommended that the cutpoints are
constrained to be ordered.
In order to stabilize the computation, log-likelihood is computed
in log space using the scaled error function `erfcx`.
.. math::
f(k \mid \eta, c) = \left\{
\begin{array}{l}
1 - \text{normal_cdf}(0, \sigma, \eta - c_1)
\,, \text{if } k = 0 \\
\text{normal_cdf}(0, \sigma, \eta - c_{k - 1}) -
\text{normal_cdf}(0, \sigma, \eta - c_{k})
\,, \text{if } 0 < k < K \\
\text{normal_cdf}(0, \sigma, \eta - c_{K - 1})
\,, \text{if } k = K \\
\end{array}
\right.
Parameters
----------
eta : tensor_like of float
The predictor.
cutpoints : tensor_like array of floats
The length K - 1 array of cutpoints which break :math:`\eta` into
ranges. Do not explicitly set the first and last elements of
:math:`c` to negative and positive infinity.
sigma : tensor_like of float, default 1.0
Standard deviation of the probit function.
compute_p : boolean, default True
Whether to compute and store in the trace the inferred probabilities of each categories,
based on the cutpoints' values. Defaults to True.
Might be useful to disable it if memory usage is of interest.
Examples
--------
.. code:: python
# Generate data for a simple 1 dimensional example problem
n1_c = 300; n2_c = 300; n3_c = 300
cluster1 = np.random.randn(n1_c) + -1
cluster2 = np.random.randn(n2_c) + 0
cluster3 = np.random.randn(n3_c) + 2
x = np.concatenate((cluster1, cluster2, cluster3))
y = np.concatenate((1*np.ones(n1_c),
2*np.ones(n2_c),
3*np.ones(n3_c))) - 1
# Ordered probit regression
with pm.Model() as model:
cutpoints = pm.Normal("cutpoints", mu=[-1,1], sigma=10, shape=2,
transform=pm.distributions.transforms.ordered)
y_ = pm.OrderedProbit("y", cutpoints=cutpoints, eta=x, observed=y)
idata = pm.sample()
# Plot the results
plt.hist(cluster1, 30, alpha=0.5);
plt.hist(cluster2, 30, alpha=0.5);
plt.hist(cluster3, 30, alpha=0.5);
posterior = idata.posterior.stack(sample=("chain", "draw"))
plt.hist(posterior["cutpoints"][0], 80, alpha=0.2, color='k');
plt.hist(posterior["cutpoints"][1], 80, alpha=0.2, color='k');
"""
def __new__(cls, name, eta, cutpoints, sigma=1, compute_p=True, **kwargs):
p = cls.compute_p(eta, cutpoints, sigma)
if compute_p:
p = pm.Deterministic(f"{name}_probs", p)
out_rv = Categorical(name, p=p, **kwargs)
return out_rv
@classmethod
def dist(cls, eta, cutpoints, sigma=1, **kwargs):
p = cls.compute_p(eta, cutpoints, sigma)
return Categorical.dist(p=p, **kwargs)
@classmethod
def compute_p(cls, eta, cutpoints, sigma):
eta = pt.as_tensor_variable(eta)
cutpoints = pt.as_tensor_variable(cutpoints)
probits = pt.shape_padright(eta) - cutpoints
log_p = pt.concatenate(
[
pt.shape_padright(normal_lccdf(0, sigma, probits[..., 0])),
log_diff_normal_cdf(
0, pt.shape_padright(sigma), probits[..., :-1], probits[..., 1:]
),
pt.shape_padright(normal_lcdf(0, sigma, probits[..., -1])),
],
axis=-1,
)
p = pt.exp(log_p)
return p
|
pymc-devsREPO_NAMEpymcPATH_START.@pymc_extracted@pymc-main@pymc@distributions@discrete.py@.PATH_END.py
|
{
"filename": "test_gaussian.py",
"repo_name": "pyro-ppl/pyro",
"repo_path": "pyro_extracted/pyro-master/tests/infer/autoguide/test_gaussian.py",
"type": "Python"
}
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
from collections import OrderedDict, namedtuple
import pytest
import torch
import pyro
import pyro.distributions as dist
import pyro.poutine as poutine
from pyro.infer import SVI, JitTrace_ELBO, Predictive, Trace_ELBO
from pyro.infer.autoguide import AutoGaussian, AutoGuideList
from pyro.infer.autoguide.gaussian import (
AutoGaussianDense,
AutoGaussianFunsor,
_break_plates,
)
from pyro.infer.reparam import LocScaleReparam
from pyro.optim import ClippedAdam
from tests.common import assert_close, assert_equal, xfail_if_not_implemented
BACKENDS = [
"dense",
pytest.param("funsor", marks=[pytest.mark.stage("funsor")]),
]
def test_break_plates():
shape = torch.Size([5, 4, 3, 2])
x = torch.arange(shape.numel()).reshape(shape)
MockPlate = namedtuple("MockPlate", "dim, size")
h = MockPlate(-4, 6)
i = MockPlate(-3, 5)
j = MockPlate(-2, 4)
k = MockPlate(-1, 3)
actual = _break_plates(x, {i, j, k}, set())
expected = x.reshape(-1)
assert_equal(actual, expected)
actual = _break_plates(x, {i, j, k}, {i})
expected = x.reshape(5, 1, 1, -1)
assert_equal(actual, expected)
actual = _break_plates(x, {i, j, k}, {j})
expected = x.permute((1, 0, 2, 3)).reshape(4, 1, -1)
assert_equal(actual, expected)
actual = _break_plates(x, {i, j, k}, {k})
expected = x.permute((2, 0, 1, 3)).reshape(3, -1)
assert_equal(actual, expected)
actual = _break_plates(x, {i, j, k}, {i, j})
expected = x.reshape(5, 4, 1, -1)
assert_equal(actual, expected)
actual = _break_plates(x, {i, j, k}, {i, k})
expected = x.permute((0, 2, 1, 3)).reshape(5, 1, 3, -1)
assert_equal(actual, expected)
actual = _break_plates(x, {i, j, k}, {j, k})
expected = x.permute((1, 2, 0, 3)).reshape(4, 3, -1)
assert_equal(actual, expected)
actual = _break_plates(x, {i, j, k}, {i, j, k})
expected = x
assert_equal(actual, expected)
actual = _break_plates(x, {i, j, k}, {h, i, j, k})
expected = x
assert_equal(actual, expected)
@pytest.mark.parametrize("backend", BACKENDS)
def test_backend_dispatch(backend):
def model():
pyro.sample("x", dist.Normal(0, 1))
guide = AutoGaussian(model, backend=backend)
if backend == "dense":
assert isinstance(guide, AutoGaussianDense)
guide = AutoGaussianDense(model)
assert isinstance(guide, AutoGaussianDense)
elif backend == "funsor":
assert isinstance(guide, AutoGaussianFunsor)
guide = AutoGaussianFunsor(model)
assert isinstance(guide, AutoGaussianFunsor)
else:
raise ValueError(f"Unknown backend: {backend}")
def check_structure(model, expected_str, expected_dependencies=None):
guide = AutoGaussian(model, backend="dense")
guide() # initialize
if expected_dependencies is not None:
assert guide.dependencies == expected_dependencies
# Inject random noise into all unconstrained parameters.
for parameter in guide.parameters():
parameter.data.normal_()
with torch.no_grad():
# Check flatten & unflatten.
mvn = guide._dense_get_mvn()
expected = mvn.sample()
samples = guide._dense_unflatten(expected)
actual = guide._dense_flatten(samples)
assert_equal(actual, expected)
# Check sparsity structure.
precision = mvn.precision_matrix
actual = precision.abs().gt(1e-5).long()
str_to_number = {"?": 1, ".": 0}
expected = torch.tensor(
[[str_to_number[c] for c in row if c != " "] for row in expected_str]
)
assert (actual == expected).all()
def check_backends_agree(model):
guide1 = AutoGaussian(model, backend="dense")
guide2 = AutoGaussian(model, backend="funsor")
guide1()
with xfail_if_not_implemented():
guide2()
# Inject random noise into all unconstrained parameters.
params1 = dict(guide1.named_parameters())
params2 = dict(guide2.named_parameters())
assert set(params1) == set(params2)
for k, v in params1.items():
v.data.add_(torch.zeros_like(v).normal_())
params2[k].data.copy_(v.data)
names = sorted(params1)
# Check densities agree between backends.
with torch.no_grad(), poutine.trace() as tr:
aux = guide2._sample_aux_values(temperature=1.0)
flat = guide1._dense_flatten(aux)
tr.trace.compute_log_prob()
log_prob_funsor = tr.trace.nodes["_AutoGaussianFunsor_latent"]["log_prob"]
with torch.no_grad(), poutine.trace() as tr:
with poutine.condition(data={"_AutoGaussianDense_latent": flat}):
guide1._sample_aux_values(temperature=1.0)
tr.trace.compute_log_prob()
log_prob_dense = tr.trace.nodes["_AutoGaussianDense_latent"]["log_prob"]
assert_equal(log_prob_funsor, log_prob_dense)
# Check Monte Carlo estimate of entropy.
entropy1 = guide1._dense_get_mvn().entropy()
with pyro.plate("particle", 100000, dim=-3), poutine.trace() as tr:
guide2._sample_aux_values(temperature=1.0)
tr.trace.compute_log_prob()
entropy2 = -tr.trace.nodes["_AutoGaussianFunsor_latent"]["log_prob"].mean()
assert_close(entropy1, entropy2, atol=1e-2)
grads1 = torch.autograd.grad(
entropy1, [params1[k] for k in names], allow_unused=True
)
grads2 = torch.autograd.grad(
entropy2, [params2[k] for k in names], allow_unused=True
)
for name, grad1, grad2 in zip(names, grads1, grads2):
# Gradients should agree to very high precision.
if grad1 is None and grad2 is not None:
grad1 = torch.zeros_like(grad2)
elif grad2 is None and grad1 is not None:
grad2 = torch.zeros_like(grad1)
assert_close(grad1, grad2, msg=f"{name}:\n{grad1} vs {grad2}")
# Check elbos agree between backends.
elbo = Trace_ELBO(num_particles=1000000, vectorize_particles=True)
loss1 = elbo.differentiable_loss(model, guide1)
loss2 = elbo.differentiable_loss(model, guide2)
assert_close(loss1, loss2, atol=1e-2, rtol=0.05)
grads1 = torch.autograd.grad(loss1, [params1[k] for k in names], allow_unused=True)
grads2 = torch.autograd.grad(loss2, [params2[k] for k in names], allow_unused=True)
for name, grad1, grad2 in zip(names, grads1, grads2):
assert_close(
grad1, grad2, atol=0.05, rtol=0.05, msg=f"{name}:\n{grad1} vs {grad2}"
)
@pytest.mark.parametrize("backend", BACKENDS)
def test_structure_0(backend):
def model():
a = pyro.sample("a", dist.Normal(0, 1))
pyro.sample("b", dist.Normal(a, 1), obs=torch.ones(()))
# size = 1
structure = [
"?",
]
dependencies = {
"a": {"a": set()},
"b": {"b": set(), "a": set()},
}
if backend == "funsor":
check_backends_agree(model)
else:
check_structure(model, structure, dependencies)
@pytest.mark.parametrize("backend", BACKENDS)
def test_structure_1(backend):
def model():
a = pyro.sample("a", dist.Normal(0, 1))
with pyro.plate("i", 3):
pyro.sample("b", dist.Normal(a, 1), obs=torch.ones(3))
# size = 1
structure = [
"?",
]
dependencies = {
"a": {"a": set()},
"b": {"b": set(), "a": set()},
}
if backend == "funsor":
check_backends_agree(model)
else:
check_structure(model, structure, dependencies)
@pytest.mark.parametrize("backend", BACKENDS)
def test_structure_2(backend):
def model():
a = pyro.sample("a", dist.Normal(0, 1))
b = pyro.sample("b", dist.Normal(a, 1))
c = pyro.sample("c", dist.Normal(b, 1))
pyro.sample("d", dist.Normal(c, 1), obs=torch.tensor(1.0))
# size = 1 + 1 + 1 = 3
structure = [
"? ? .",
"? ? ?",
". ? ?",
]
dependencies = {
"a": {"a": set()},
"b": {"b": set(), "a": set()},
"c": {"c": set(), "b": set()},
"d": {"c": set(), "d": set()},
}
if backend == "funsor":
check_backends_agree(model)
else:
check_structure(model, structure, dependencies)
@pytest.mark.parametrize("backend", BACKENDS)
def test_structure_3(backend):
def model():
with pyro.plate("i", 2):
a = pyro.sample("a", dist.Normal(0, 1))
b = pyro.sample("b", dist.Normal(a, 1))
c = pyro.sample("c", dist.Normal(b, 1))
pyro.sample("d", dist.Normal(c, 1), obs=torch.tensor(1.0))
# size = 2 + 2 + 2 = 6
structure = [
"? . ? . . .",
". ? . ? . .",
"? . ? . ? .",
". ? . ? . ?",
". . ? . ? .",
". . . ? . ?",
]
dependencies = {
"a": {"a": set()},
"b": {"b": set(), "a": set()},
"c": {"c": set(), "b": set()},
"d": {"c": set(), "d": set()},
}
if backend == "funsor":
check_backends_agree(model)
else:
check_structure(model, structure, dependencies)
@pytest.mark.parametrize("backend", BACKENDS)
def test_structure_4(backend):
def model():
a = pyro.sample("a", dist.Normal(0, 1))
with pyro.plate("i", 2):
b = pyro.sample("b", dist.Normal(a, 1))
c = pyro.sample("c", dist.Normal(b, 1))
pyro.sample("d", dist.Normal(c.sum(), 1), obs=torch.tensor(1.0))
# size = 1 + 2 + 2 = 5
structure = [
"? ? ? . .",
"? ? . ? .",
"? . ? . ?",
". ? . ? ?",
". . ? ? ?",
]
dependencies = {
"a": {"a": set()},
"b": {"b": set(), "a": set()},
"c": {"c": set(), "b": set()},
"d": {"c": set(), "d": set()},
}
if backend == "funsor":
check_backends_agree(model)
else:
check_structure(model, structure, dependencies)
@pytest.mark.parametrize("backend", BACKENDS)
def test_structure_5(backend):
def model():
a = pyro.sample("a", dist.Normal(0, 1))
b = pyro.sample("b", dist.Normal(0, 1))
with pyro.plate("i", 2):
c = pyro.sample("c", dist.Normal(a, b.exp()))
pyro.sample("d", dist.Normal(c, 1), obs=torch.tensor(1.0))
# size = 1 + 1 + 2 = 4
structure = [
"? ? ? ?",
"? ? ? ?",
"? ? ? .",
"? ? . ?",
]
if backend == "funsor":
check_backends_agree(model)
else:
check_structure(model, structure)
@pytest.mark.parametrize("backend", BACKENDS)
def test_structure_6(backend):
I, J = 2, 3
def model():
i_plate = pyro.plate("i", I, dim=-1)
j_plate = pyro.plate("j", J, dim=-2)
with i_plate:
w = pyro.sample("w", dist.Normal(0, 1))
with j_plate:
x = pyro.sample("x", dist.Normal(0, 1))
with i_plate, j_plate:
y = pyro.sample("y", dist.Normal(w, x.exp()))
pyro.sample("z", dist.Normal(1, 1), obs=y)
# size = 2 + 3 + 2 * 3 = 2 + 3 + 6 = 11
structure = [
"? . ? ? ? ? . ? . ? .",
". ? ? ? ? . ? . ? . ?",
"? ? ? . . ? ? . . . .",
"? ? . ? . . . ? ? . .",
"? ? . . ? . . . . ? ?",
"? . ? . . ? . . . . .",
". ? ? . . . ? . . . .",
"? . . ? . . . ? . . .",
". ? . ? . . . . ? . .",
"? . . . ? . . . . ? .",
". ? . . ? . . . . . ?",
]
if backend == "funsor":
check_backends_agree(model)
else:
check_structure(model, structure)
@pytest.mark.parametrize("backend", BACKENDS)
def test_structure_7(backend):
I, J = 2, 3
def model():
i_plate = pyro.plate("i", I, dim=-1)
j_plate = pyro.plate("j", J, dim=-2)
a = pyro.sample("a", dist.Normal(0, 1))
with i_plate:
b = pyro.sample("b", dist.Normal(a, 1))
with j_plate:
c = pyro.sample("c", dist.Normal(b.mean(), 1))
d = pyro.sample("d", dist.Normal(c.mean(), 1))
pyro.sample("e", dist.Normal(1, 1), obs=d)
# size = 1 + 2 + 3 + 1 = 7
structure = [
"? ? ? . . . .",
"? ? ? ? ? ? .",
"? ? ? ? ? ? .",
". ? ? ? ? ? ?",
". ? ? ? ? ? ?",
". ? ? ? ? ? ?",
". . . ? ? ? ?",
]
if backend == "funsor":
check_backends_agree(model)
else:
check_structure(model, structure)
@pytest.mark.parametrize("backend", BACKENDS)
def test_structure_8(backend):
def model():
i_plate = pyro.plate("i", 2, dim=-1)
with i_plate:
a = pyro.sample("a", dist.Normal(0, 1))
b = pyro.sample("b", dist.Normal(a.mean(-1), 1))
with i_plate:
pyro.sample("c", dist.Normal(b, 1), obs=torch.ones(2))
# size = 2 + 1 = 3
structure = [
"? ? ?",
"? ? ?",
"? ? ?",
]
if backend == "funsor":
check_backends_agree(model)
else:
check_structure(model, structure)
@pytest.mark.parametrize("backend", BACKENDS)
def test_broken_plates_smoke(backend):
def model():
with pyro.plate("i", 2):
a = pyro.sample("a", dist.Normal(0, 1))
pyro.sample("b", dist.Normal(a.mean(-1), 1), obs=torch.tensor(0.0))
guide = AutoGaussian(model, backend=backend)
svi = SVI(model, guide, ClippedAdam({"lr": 1e-8}), Trace_ELBO())
for step in range(2):
with xfail_if_not_implemented():
svi.step()
guide()
predictive = Predictive(model, guide=guide, num_samples=2)
predictive()
@pytest.mark.parametrize("backend", BACKENDS)
def test_intractable_smoke(backend):
def model():
i_plate = pyro.plate("i", 2, dim=-1)
j_plate = pyro.plate("j", 3, dim=-2)
with i_plate:
a = pyro.sample("a", dist.Normal(0, 1))
with j_plate:
b = pyro.sample("b", dist.Normal(0, 1))
with i_plate, j_plate:
c = pyro.sample("c", dist.Normal(a + b, 1))
pyro.sample("d", dist.Normal(c, 1), obs=torch.zeros(3, 2))
guide = AutoGaussian(model, backend=backend)
svi = SVI(model, guide, ClippedAdam({"lr": 1e-8}), Trace_ELBO())
for step in range(2):
with xfail_if_not_implemented():
svi.step()
guide()
predictive = Predictive(model, guide=guide, num_samples=2)
predictive()
# Simplified from https://github.com/pyro-cov/tree/master/pyrocov/mutrans.py
def pyrocov_model(dataset):
# Tensor shapes are commented at the end of some lines.
features = dataset["features"]
local_time = dataset["local_time"][..., None] # [T, P, 1]
T, P, _ = local_time.shape
S, F = features.shape
weekly_strains = dataset["weekly_strains"]
assert weekly_strains.shape == (T, P, S)
# Sample global random variables.
coef_scale = pyro.sample("coef_scale", dist.InverseGamma(5e3, 1e2))[..., None]
rate_scale = pyro.sample("rate_scale", dist.LogNormal(-4, 2))[..., None]
init_loc_scale = pyro.sample("init_loc_scale", dist.LogNormal(0, 2))[..., None]
init_scale = pyro.sample("init_scale", dist.LogNormal(0, 2))[..., None]
# Assume relative growth rate depends strongly on mutations and weakly on place.
coef_loc = torch.zeros(F)
coef = pyro.sample("coef", dist.Logistic(coef_loc, coef_scale).to_event(1)) # [F]
rate_loc = pyro.deterministic(
"rate_loc", 0.01 * coef @ features.T, event_dim=1
) # [S]
# Assume initial infections depend strongly on strain and place.
init_loc = pyro.sample(
"init_loc", dist.Normal(torch.zeros(S), init_loc_scale).to_event(1)
) # [S]
with pyro.plate("place", P, dim=-1):
rate = pyro.sample(
"rate", dist.Normal(rate_loc, rate_scale).to_event(1)
) # [P, S]
init = pyro.sample(
"init", dist.Normal(init_loc, init_scale).to_event(1)
) # [P, S]
# Finally observe counts.
with pyro.plate("time", T, dim=-2):
logits = init + rate * local_time # [T, P, S]
pyro.sample(
"obs",
dist.Multinomial(logits=logits, validate_args=False),
obs=weekly_strains,
)
# This is modified by relaxing rate from deterministic to latent.
def pyrocov_model_relaxed(dataset):
# Tensor shapes are commented at the end of some lines.
features = dataset["features"]
local_time = dataset["local_time"][..., None] # [T, P, 1]
T, P, _ = local_time.shape
S, F = features.shape
weekly_strains = dataset["weekly_strains"]
assert weekly_strains.shape == (T, P, S)
# Sample global random variables.
coef_scale = pyro.sample("coef_scale", dist.InverseGamma(5e3, 1e2))[..., None]
rate_loc_scale = pyro.sample("rate_loc_scale", dist.LogNormal(-4, 2))[..., None]
rate_scale = pyro.sample("rate_scale", dist.LogNormal(-4, 2))[..., None]
init_loc_scale = pyro.sample("init_loc_scale", dist.LogNormal(0, 2))[..., None]
init_scale = pyro.sample("init_scale", dist.LogNormal(0, 2))[..., None]
# Assume relative growth rate depends strongly on mutations and weakly on place.
coef_loc = torch.zeros(F)
coef = pyro.sample("coef", dist.Logistic(coef_loc, coef_scale).to_event(1)) # [F]
rate_loc = pyro.sample(
"rate_loc",
dist.Normal(0.01 * coef @ features.T, rate_loc_scale).to_event(1),
) # [S]
# Assume initial infections depend strongly on strain and place.
init_loc = pyro.sample(
"init_loc", dist.Normal(torch.zeros(S), init_loc_scale).to_event(1)
) # [S]
with pyro.plate("place", P, dim=-1):
rate = pyro.sample(
"rate", dist.Normal(rate_loc, rate_scale).to_event(1)
) # [P, S]
init = pyro.sample(
"init", dist.Normal(init_loc, init_scale).to_event(1)
) # [P, S]
# Finally observe counts.
with pyro.plate("time", T, dim=-2):
logits = init + rate * local_time # [T, P, S]
pyro.sample(
"obs",
dist.Multinomial(logits=logits, validate_args=False),
obs=weekly_strains,
)
# This is modified by more precisely tracking plates for features and strains.
def pyrocov_model_plated(dataset):
# Tensor shapes are commented at the end of some lines.
features = dataset["features"]
local_time = dataset["local_time"][..., None] # [T, P, 1]
T, P, _ = local_time.shape
S, F = features.shape
weekly_strains = dataset["weekly_strains"] # [T, P, S]
assert weekly_strains.shape == (T, P, S)
feature_plate = pyro.plate("feature", F, dim=-1)
strain_plate = pyro.plate("strain", S, dim=-1)
place_plate = pyro.plate("place", P, dim=-2)
time_plate = pyro.plate("time", T, dim=-3)
# Sample global random variables.
coef_scale = pyro.sample("coef_scale", dist.InverseGamma(5e3, 1e2))
rate_loc_scale = pyro.sample("rate_loc_scale", dist.LogNormal(-4, 2))
rate_scale = pyro.sample("rate_scale", dist.LogNormal(-4, 2))
init_loc_scale = pyro.sample("init_loc_scale", dist.LogNormal(0, 2))
init_scale = pyro.sample("init_scale", dist.LogNormal(0, 2))
with feature_plate:
coef = pyro.sample("coef", dist.Logistic(0, coef_scale)) # [F]
rate_loc_loc = 0.01 * coef @ features.T
with strain_plate:
rate_loc = pyro.sample(
"rate_loc", dist.Normal(rate_loc_loc, rate_loc_scale)
) # [S]
init_loc = pyro.sample("init_loc", dist.Normal(0, init_loc_scale)) # [S]
with place_plate, strain_plate:
rate = pyro.sample("rate", dist.Normal(rate_loc, rate_scale)) # [P, S]
init = pyro.sample("init", dist.Normal(init_loc, init_scale)) # [P, S]
# Finally observe counts.
with time_plate, place_plate:
logits = (init + rate * local_time)[..., None, :] # [T, P, 1, S]
pyro.sample(
"obs",
dist.Multinomial(logits=logits, validate_args=False),
obs=weekly_strains[..., None, :],
)
# This is modified by replacing the multinomial likelihood with poisson.
def pyrocov_model_poisson(dataset):
# Tensor shapes are commented at the end of some lines.
features = dataset["features"]
local_time = dataset["local_time"][..., None] # [T, P, 1]
T, P, _ = local_time.shape
S, F = features.shape
weekly_strains = dataset["weekly_strains"] # [T, P, S]
if not torch._C._get_tracing_state():
assert weekly_strains.shape == (T, P, S)
strain_plate = pyro.plate("strain", S, dim=-1)
place_plate = pyro.plate("place", P, dim=-2)
time_plate = pyro.plate("time", T, dim=-3)
# Sample global random variables.
coef_scale = pyro.sample("coef_scale", dist.LogNormal(-4, 2))
rate_loc_scale = pyro.sample("rate_loc_scale", dist.LogNormal(-4, 2))
rate_scale = pyro.sample("rate_scale", dist.LogNormal(-4, 2))
init_loc_scale = pyro.sample("init_loc_scale", dist.LogNormal(0, 2))
init_scale = pyro.sample("init_scale", dist.LogNormal(0, 2))
pois_loc = pyro.sample("pois_loc", dist.Normal(0, 2))
pois_scale = pyro.sample("pois_scale", dist.LogNormal(0, 2))
coef = pyro.sample(
"coef", dist.Logistic(torch.zeros(F), coef_scale).to_event(1)
) # [F]
rate_loc_loc = 0.01 * coef @ features.T
with strain_plate:
rate_loc = pyro.sample(
"rate_loc", dist.Normal(rate_loc_loc, rate_loc_scale)
) # [S]
init_loc = pyro.sample("init_loc", dist.Normal(0, init_loc_scale)) # [S]
with place_plate, strain_plate:
rate = pyro.sample("rate", dist.Normal(rate_loc, rate_scale)) # [P, S]
init = pyro.sample("init", dist.Normal(init_loc, init_scale)) # [P, S]
# Finally observe counts.
with time_plate, place_plate:
pois = pyro.sample("pois", dist.LogNormal(pois_loc, pois_scale))
with time_plate, place_plate, strain_plate:
# Note .softmax() breaks conditional independence over strain, but only
# weakly. We could directly call .exp(), but .softmax is more
# numerically stable.
logits = pois * (init + rate * local_time).softmax(-1) # [T, P, S]
pyro.sample("obs", dist.Poisson(logits), obs=weekly_strains)
class PoissonGuide(AutoGuideList):
def __init__(self, model, backend):
super().__init__(model)
self.append(
AutoGaussian(poutine.block(model, hide_fn=self.hide_fn_1), backend=backend)
)
self.append(
AutoGaussian(poutine.block(model, hide_fn=self.hide_fn_2), backend=backend)
)
@staticmethod
def hide_fn_1(msg):
return msg["type"] == "sample" and "pois" in msg["name"]
@staticmethod
def hide_fn_2(msg):
return msg["type"] == "sample" and "pois" not in msg["name"]
PYRO_COV_MODELS = [
(pyrocov_model, AutoGaussian),
(pyrocov_model_relaxed, AutoGaussian),
(pyrocov_model_plated, AutoGaussian),
(pyrocov_model_poisson, PoissonGuide),
]
@pytest.mark.parametrize("model, Guide", PYRO_COV_MODELS)
@pytest.mark.parametrize("backend", BACKENDS)
def test_pyrocov_smoke(model, Guide, backend):
T, P, S, F = 3, 4, 5, 6
dataset = {
"features": torch.randn(S, F),
"local_time": torch.randn(T, P),
"weekly_strains": torch.randn(T, P, S).exp().round(),
}
guide = Guide(model, backend=backend)
svi = SVI(model, guide, ClippedAdam({"lr": 1e-8}), Trace_ELBO())
for step in range(2):
with xfail_if_not_implemented():
svi.step(dataset)
guide(dataset)
predictive = Predictive(model, guide=guide, num_samples=2)
predictive(dataset)
@pytest.mark.parametrize("model, Guide", PYRO_COV_MODELS)
@pytest.mark.parametrize("backend", BACKENDS)
def test_pyrocov_reparam(model, Guide, backend):
T, P, S, F = 2, 3, 4, 5
dataset = {
"features": torch.randn(S, F),
"local_time": torch.randn(T, P),
"weekly_strains": torch.randn(T, P, S).exp().round(),
}
# Reparametrize the model.
config = {
"coef": LocScaleReparam(),
"rate_loc": None if model is pyrocov_model else LocScaleReparam(),
"rate": LocScaleReparam(),
"init_loc": LocScaleReparam(),
"init": LocScaleReparam(),
}
model = poutine.reparam(model, config)
guide = Guide(model, backend=backend)
svi = SVI(model, guide, ClippedAdam({"lr": 1e-8}), Trace_ELBO())
for step in range(2):
with xfail_if_not_implemented():
svi.step(dataset)
guide(dataset)
predictive = Predictive(model, guide=guide, num_samples=2)
predictive(dataset)
@pytest.mark.stage("funsor")
def test_pyrocov_structure():
from funsor import Bint, Real, Reals
T, P, S, F = 2, 3, 4, 5
dataset = {
"features": torch.randn(S, F),
"local_time": torch.randn(T, P),
"weekly_strains": torch.randn(T, P, S).exp().round(),
}
guide = PoissonGuide(pyrocov_model_poisson, backend="funsor")
guide(dataset) # initialize
guide = guide[0] # pull out AutoGaussian part of PoissonGuide
expected_plates = frozenset(["place", "strain"])
assert guide._funsor_plates == expected_plates
expected_eliminate = frozenset(
[
"coef",
"coef_scale",
"init",
"init_loc",
"init_loc_scale",
"init_scale",
"place",
"rate",
"rate_loc",
"rate_loc_scale",
"rate_scale",
"strain",
]
)
assert guide._funsor_eliminate == expected_eliminate
expected_factor_inputs = {
"coef_scale": OrderedDict([("coef_scale", Real)]),
"rate_loc_scale": OrderedDict([("rate_loc_scale", Real)]),
"rate_scale": OrderedDict([("rate_scale", Real)]),
"init_loc_scale": OrderedDict([("init_loc_scale", Real)]),
"init_scale": OrderedDict([("init_scale", Real)]),
"coef": OrderedDict([("coef", Reals[5]), ("coef_scale", Real)]),
"rate_loc": OrderedDict(
[
("strain", Bint[4]),
("rate_loc", Real),
("rate_loc_scale", Real),
("coef", Reals[5]),
]
),
"init_loc": OrderedDict(
[
("strain", Bint[4]),
("init_loc", Real),
("init_loc_scale", Real),
]
),
"rate": OrderedDict(
[
("place", Bint[3]),
("strain", Bint[4]),
("rate", Real),
("rate_scale", Real),
("rate_loc", Real),
]
),
"init": OrderedDict(
[
("place", Bint[3]),
("strain", Bint[4]),
("init", Real),
("init_scale", Real),
("init_loc", Real),
]
),
"obs": OrderedDict(
[
("place", Bint[3]),
("strain", Bint[4]),
("rate", Real),
("init", Real),
]
),
}
assert guide._funsor_factor_inputs == expected_factor_inputs
@pytest.mark.parametrize("jit", [False, True], ids=["nojit", "jit"])
@pytest.mark.parametrize("backend", BACKENDS)
def test_profile(backend, jit, n=1, num_steps=1, log_every=1):
"""
Helper function for profiling.
"""
print("Generating fake data")
model = pyrocov_model_poisson
T, P, S, F = min(n, 50), n + 1, n + 2, n + 3
dataset = {
"features": torch.randn(S, F),
"local_time": torch.randn(T, P),
"weekly_strains": torch.randn(T, P, S).exp().round(),
}
print("Initializing guide")
guide = PoissonGuide(model, backend=backend)
guide(dataset) # initialize
print("Parameter shapes:")
for name, param in guide.named_parameters():
print(f" {name}: {tuple(param.shape)}")
print("Training")
Elbo = JitTrace_ELBO if jit else Trace_ELBO
elbo = Elbo(max_plate_nesting=3, ignore_jit_warnings=True)
svi = SVI(model, guide, ClippedAdam({"lr": 1e-8}), elbo)
for step in range(num_steps):
loss = svi.step(dataset)
if log_every and step % log_every == 0:
print(f"step {step} loss = {loss}")
if __name__ == "__main__":
import argparse
import cProfile
# Usage: time python -m tests.infer.autoguide.test_autoguide
parser = argparse.ArgumentParser(description="Profiler for pyro-cov model")
parser.add_argument("-b", "--backend", default="funsor")
parser.add_argument("-s", "--size", default=10, type=int)
parser.add_argument("-n", "--num-steps", default=1001, type=int)
parser.add_argument("-fp64", "--double", action="store_true")
parser.add_argument("-fp32", "--float", action="store_false", dest="double")
parser.add_argument("--cuda", action="store_true")
parser.add_argument("--cpu", dest="cuda", action="store_false")
parser.add_argument("--jit", default=True, action="store_true")
parser.add_argument("--no-jit", dest="jit", action="store_false")
parser.add_argument("-l", "--log-every", default=1, type=int)
parser.add_argument("-p", "--profile")
args = parser.parse_args()
torch.set_default_dtype(torch.double if args.double else torch.float)
if args.cuda:
torch.set_default_device("cuda")
if args.profile:
p = cProfile.Profile()
p.enable()
test_profile(
backend=args.backend,
jit=args.jit,
n=args.size,
num_steps=args.num_steps,
log_every=args.log_every,
)
if args.profile:
p.disable()
p.dump_stats(args.profile)
|
pyro-pplREPO_NAMEpyroPATH_START.@pyro_extracted@pyro-master@tests@infer@autoguide@test_gaussian.py@.PATH_END.py
|
{
"filename": "crosscorrNorm.py",
"repo_name": "eogarvin/MLCCS",
"repo_path": "MLCCS_extracted/MLCCS-main/ml_spectroscopy/crosscorrNorm.py",
"type": "Python"
}
|
import numpy as np
from PyAstronomy.pyaC import pyaErrors as PE
from PyAstronomy.pyasl import _ic
def crosscorrRVnorm(w, f, tw, tf, rvmin, rvmax, drv, mode="doppler", skipedge=0, edgeTapering=None):
"""
Cross-correlate a spectrum with a template.
The algorithm implemented here works as follows: For
each RV shift to be considered, the wavelength axis
of the template is shifted, either linearly or using
a proper Doppler shift depending on the `mode`. The
shifted template is then linearly interpolated at
the wavelength points of the observation
(spectrum) to calculate the cross-correlation function.
Parameters
----------
w : array
The wavelength axis of the observation.
f : array
The flux axis of the observation.
tw : array
The wavelength axis of the template.
tf : array
The flux axis of the template.
rvmin : float
Minimum radial velocity for which to calculate
the cross-correlation function [km/s].
rvmax : float
Maximum radial velocity for which to calculate
the cross-correlation function [km/s].
drv : float
The width of the radial-velocity steps to be applied
in the calculation of the cross-correlation
function [km/s].
mode : string, {lin, doppler}, optional
The mode determines how the wavelength axis will be
modified to represent a RV shift. If "lin" is specified,
a mean wavelength shift will be calculated based on the
mean wavelength of the observation. The wavelength axis
will then be shifted by that amount. If "doppler" is
specified (the default), the wavelength axis will
properly be Doppler-shifted.
skipedge : int, optional
If larger zero, the specified number of bins will be
skipped from the begin and end of the observation. This
may be useful if the template does not provide sufficient
coverage of the observation.
edgeTapering : float or tuple of two floats
If not None, the method will "taper off" the edges of the
observed spectrum by multiplying with a sine function. If a float number
is specified, this will define the width (in wavelength units)
to be used for tapering on both sides. If different tapering
widths shall be used, a tuple with two (positive) numbers
must be given, specifying the width to be used on the low- and
high wavelength end. If a nonzero 'skipedge' is given, it
will be applied first. Edge tapering can help to avoid
edge effects (see, e.g., Gullberg and Lindegren 2002, A&A 390).
Returns
-------
dRV : array
The RV axis of the cross-correlation function. The radial
velocity refer to a shift of the template, i.e., positive
values indicate that the template has been red-shifted and
negative numbers indicate a blue-shift of the template.
The numbers are given in km/s.
CC : array
The cross-correlation function.
"""
if not _ic.check["scipy"]:
raise(PE.PyARequiredImport("This routine needs scipy (.interpolate.interp1d).", \
where="crosscorrRV", \
solution="Install scipy"))
import scipy.interpolate as sci
# Copy and cut wavelength and flux arrays
w, f = w.copy(), f.copy()
if skipedge > 0:
w, f = w[skipedge:-skipedge], f[skipedge:-skipedge]
if edgeTapering is not None:
# Smooth the edges using a sine
if isinstance(edgeTapering, float):
edgeTapering = [edgeTapering, edgeTapering]
if len(edgeTapering) != 2:
raise(PE.PyAValError("'edgeTapering' must be a float or a list of two floats.", \
where="crosscorrRV"))
if edgeTapering[0] < 0.0 or edgeTapering[1] < 0.0:
raise(PE.PyAValError("'edgeTapering' must be (a) number(s) >= 0.0.", \
where="crosscorrRV"))
# Carry out edge tapering (left edge)
indi = np.where(w < w[0]+edgeTapering[0])[0]
f[indi] *= np.sin((w[indi] - w[0])/edgeTapering[0]*np.pi/2.0)
# Carry out edge tapering (right edge)
indi = np.where(w > (w[-1]-edgeTapering[1]))[0]
f[indi] *= np.sin((w[indi] - w[indi[0]])/edgeTapering[1]*np.pi/2.0 + np.pi/2.0)
# Speed of light in km/s
c = 299792.458
# Check order of rvmin and rvmax
if rvmax <= rvmin:
raise(PE.PyAValError("rvmin needs to be smaller than rvmax.",
where="crosscorrRV", \
solution="Change the order of the parameters."))
# Check whether template is large enough
if mode == "lin":
meanWl = np.mean(w)
dwlmax = meanWl * (rvmax/c)
dwlmin = meanWl * (rvmin/c)
if (tw[0] + dwlmax) > w[0]:
raise(PE.PyAValError("The minimum wavelength is not covered by the template for all indicated RV shifts.", \
where="crosscorrRV", \
solution=["Provide a larger template", "Try to use skipedge"]))
if (tw[-1] + dwlmin) < w[-1]:
raise(PE.PyAValError("The maximum wavelength is not covered by the template for all indicated RV shifts.", \
where="crosscorrRV", \
solution=["Provide a larger template", "Try to use skipedge"]))
elif mode == "doppler":
# Ensure that the template covers the entire observation for all shifts
maxwl = tw[-1] * (1.0+rvmin/c)
minwl = tw[0] * (1.0+rvmax/c)
if minwl > w[0]:
raise(PE.PyAValError("The minimum wavelength is not covered by the template for all indicated RV shifts.", \
where="crosscorrRV", \
solution=["Provide a larger template", "Try to use skipedge"]))
if maxwl < w[-1]:
raise(PE.PyAValError("The maximum wavelength is not covered by the template for all indicated RV shifts.", \
where="crosscorrRV", \
solution=["Provide a larger template", "Try to use skipedge"]))
else:
raise(PE.PyAValError("Unknown mode: " + str(mode), \
where="crosscorrRV", \
solution="See documentation for available modes."))
# Calculate the cross correlation
drvs = np.arange(rvmin, rvmax, drv)
cc = np.zeros(len(drvs))
for i, rv in enumerate(drvs):
if mode == "lin":
# Shift the template linearly
fi = sci.interp1d(tw+meanWl*(rv/c), tf)
elif mode == "doppler":
# Apply the Doppler shift
fi = sci.interp1d(tw*(1.0 + rv/c), tf)
# Shifted template evaluated at location of spectrum
cc[i] = np.sum(f * fi(w))/np.sqrt(np.sum(f*f)*np.sum(fi(w)*fi(w)))
return cc, drvs
|
eogarvinREPO_NAMEMLCCSPATH_START.@MLCCS_extracted@MLCCS-main@ml_spectroscopy@crosscorrNorm.py@.PATH_END.py
|
{
"filename": "notebook.py",
"repo_name": "ashleychontos/sort-a-survey",
"repo_path": "sort-a-survey_extracted/sort-a-survey-main/sortasurvey/notebook.py",
"type": "Python"
}
|
import os
import numpy as np
import pandas as pd
import time as clock
from tqdm import tqdm
pd.set_option('mode.chained_assignment', None)
from sortasurvey.sample import Sample
from sortasurvey import observing
from sortasurvey import utils
class Survey:
"""
Loads in survey information and the vetted survey sample.
Parameters
----------
args : argparse.Namespace
command line arguments
Attributes
----------
verbose : bool
verbose output (default is `True`)
save : bool
save output (default is `True`)
outdir : str
path to save output files
progress : bool
show progress bar of selection process (this will only work with the verbose output on)
sample : pandas.DataFrame
pandas dataframe containing the sample to select targets from -> this is not updated, this is preserved
candidates : pandas.DataFrame
copy of the vetted survey sample dataframe -> this is updated during the selection process
programs : pandas.DataFrame
pandas dataframe containing survey information -> this is not updated, this is preserved
sciences : pandas.DataFrame
copy of the survey programs dataframe -> this is updated during the selection process
track : dict
logs each iteration of the target selection
iter : int
number of selection process iterations. Default is `1` (via args.iter).
n : int
iteration number
emcee : bool
`True` if iter > 1 but `False` by default.
"""
def __init__(self, verbose=True, path_sample='info/TKS_sample.csv', path_survey='info/survey_info.csv',
path_priority='info/high_priority.csv', path_ignore='info/no_no.csv', iter=1, hours=10., nights=50., progress=True,
save=True, note='', notebook=True):
self.verbose = verbose
self.save = save
self.outdir = os.path.join(os.path.abspath(os.getcwd()), 'results')
self.note = note
self.iter = iter
self.hours = hours
self.nights = nights
self.notebook = notebook
self.progress = progress
self.track = {}
for n in np.arange(1,self.iter+1):
self.track[n] = {}
self.note += '\n -----------------------------\n -- prioritization starting --\n -----------------------------\n\n - loading sample and survey science information\n'
self.get_sample(path_survey, path_sample, nights, hours)
self.get_programs(path_survey, path_ignore, path_priority)
self.get_seeds()
if self.iter > 1:
self.emcee = True
if self.verbose and self.progress:
self.pbar = tqdm(total=self.iter)
else:
self.emcee = False
self.candidates = self.sample.copy()
self.sciences = self.programs.copy()
def get_sample(self, path_survey, path_sample, nights, hours, dec=-30., ruwe=2.):
"""
Fixes the sample based on specific survey needs. Broadly for TKS,
this only required that the target is observable (dec > -30.) and
possessed a reasonable Gaia RUWE metric (ruwe < 2., where higher
values typically indicate unresolved binaries).
Parameters
----------
dec : float
lowest possible declination for targets to observe with Keck HIRES
ruwe : float
astrometric Gaia RUWE (renormalized unit weight error) metric
"""
df = pd.read_csv(path_survey, comment="#")
self.programs = df.programs.values.tolist()
sample = pd.read_csv(path_sample)
self.path_sample = path_sample
self.sample = sample.query("dec > %f and ruwe < %f"%(dec, ruwe))
self.remove_bad()
self.add_columns()
# science-case-specific metrics
self.get_sc3_info()
self.get_counts(nights, hours)
def remove_bad(self, disp=['FP','EB','NEB','BEB','SV','BD','NPC','SB1','SB2','FA']):
"""
Removes unfavorable targets from the survey. In this case, this includes false
alarms and/or false positives, including nearby/blended eclipsing binaries as well
as spectroscopic false positives (e.g., SB1, SB2).
Parameters
----------
disp : List[str]
a list of unfavorable dispositions to ignore for the target selection process
"""
for bad in disp:
self.sample.query("disp != '%s'"%bad, inplace=True)
self.sample.query("drop == False", inplace=True)
self.sample.query("finish == False", inplace=True)
def add_columns(self, cols=["npl","select_DG","in_other_programs","n_select","priority"]):
"""
Adds in additional columns that might be relevant for the target selection.
"""
cols += ["in_%s"%program for program in self.programs]
for col in cols:
if col == 'npl':
self.sample[col] = self.sample.groupby('tic')['tic'].transform('count')
else:
self.sample[col] = [0]*len(self.sample)
def get_counts(self, nights, hours):
"""
Compute the number of targets that passed the different vetting steps.
"""
query = self.sample.drop_duplicates(subset='tic')
new_query = query.query("photo_vetting == 'passed' and spec_vetting != 'failed' and spec_vetting != 'do not observe' and ao_vetting != 'failed'")
self.passed_tks = len(query)
self.passed_vet = len(new_query)
self.note += ' - %d targets make the standard survey cuts\n - %d have also passed various vetting steps\n - ranking algorithm initialized using %.1f nights (%.1f hr/n)'%(self.passed_tks,self.passed_vet, nights, hours)
def get_programs(self, path_survey, path_ignore, path_priority):
"""
Stores all relevant information for every program in the survey.
This is initially loaded in via args.path_survey but also loads in
any high priority targets and/or targets to ignore.
Parameters
----------
args : argparse.Namespace
command line arguments
Attributes
----------
programs : pandas.DataFrame
**very important** dataframe containing all survey program information
"""
programs = pd.read_csv(path_survey, comment="#")
programs.set_index('programs', inplace=True, drop=False)
if path_ignore is not None:
nono = pd.read_csv(path_ignore)
if path_priority is not None:
priority = pd.read_csv(path_priority)
for program in programs.index.values.tolist():
# get initial allocation
programs.loc[program,'total_time'] = (programs.loc[program,'allocations']/(np.sum(programs.allocations)))*self.nights*self.hours
if not np.isnan(programs.loc[program,'remaining_hours']):
programs.loc[program,'total_time'] += programs.loc[program,'remaining_hours']
programs.loc[program,'remaining_hours'] = programs.loc[program,'total_time']
# adjust filter for priority/ignore targets
if path_priority is not None:
high_priority = [float(target) for target in priority[program].values if target != '-']
else:
high_priority = []
if path_ignore is not None:
no_no = [float(target) for target in nono[program].values if target != '-']
else:
no_no = []
if high_priority + no_no != []:
for toi in (high_priority + no_no):
programs.loc[program,'filter'] += " and toi != %.2f"%toi
if programs.loc[program,'n_maximum'] != -1:
programs.loc[program,'n_targets_left'] = programs.loc[program,'n_maximum']
else:
query = self.sample.query(programs.loc[program,'filter'])
targets = query.toi.values.tolist() + high_priority
targets = [int(np.floor(each)) for each in targets]
programs.loc[program,'n_targets_left'] = len(list(set(targets)))
# get the prioritization metric
prioritize_by=[]
for each in programs.loc[program,'prioritize_by'].strip().split('|'):
prioritize_by.append(each)
programs.at[program,'prioritize_by'] = prioritize_by
ascending_by=[]
for each in programs.loc[program,'ascending_by'].strip().split('|'):
if each == 'True' or str(each) == 'True' or str(each) == 'TRUE':
ascending_by.append(True)
else:
ascending_by.append(False)
programs.at[program,'ascending_by'] = ascending_by
programs['pick_number'] = np.zeros(len(programs)).astype('int64')
programs['n_targets_left'] = programs['n_targets_left'].astype('int64')
programs.rename(columns={'programs':'name'}, inplace=True)
programs.drop(columns=['allocations'], inplace=True)
programs = programs.to_dict('index')
for program in programs:
if path_priority is not None:
programs[program]['high_priority'] = [float(target) for target in priority[program].values if target != '-']
else:
programs[program]['high_priority'] = []
programs = pd.DataFrame.from_dict(programs, orient='index', columns=['name','method','filter','prioritize_by','ascending_by','remaining_hours','n_maximum','total_time','high_priority','n_targets_left','pick_number'])
programs.to_csv('%s_copy.%s'%(path_survey.split('.')[0],path_survey.split('.')[-1]))
self.programs = programs.copy()
# science-case-specific functions
def get_sc3_info(self, include_qlp=True, mask=None):
"""
TODO
"""
if not include_qlp:
mask = self.sample['source'] == 'spoc'
self.calculate_TSM(mask=mask)
sc3_df = self.sample.copy()
sc3_mask = pd.notnull(sc3_df['TSM'])
rp_bins = 10**(np.linspace(0,1,6))
rp_bins[-1] = 11.2
sinc_bins = 10**(np.linspace(-1,4,6))
teff_bins = np.array([2500,3900,5200,6500])
bins = [rp_bins, sinc_bins, teff_bins]
sc3_df['rt_5sig'] = sc3_df['rt_5sig'].replace(0., 1e-2)
sc3_mask &= pd.notnull(sc3_df['rt_5sig'])
sc3_df = sc3_df[sc3_mask]
sc3_df['X'] = sc3_df['TSM']/sc3_df['rt_5sig']
sc3_df = self.sc3_binning_function(sc3_df, bins, sort_val='X')
sc3_df['SC3_bin_rank'] = sc3_df['SC3_bin_rank'].replace(0., np.nan)
cols_to_use = ['toi','SC3_bin_rank']
self.sample = self.sample.merge(sc3_df[cols_to_use], how='left', on='toi')
def calculate_TSM(self, mask=None):
"""
Calculate the transmission spectroscopy metric (TSM) for all targets
in the survey.sample
Parameters
----------
mask : bool
"""
self.sample['TSM'] = [np.nan]*len(self.sample)
if mask is None:
mask = np.ones(len(self.sample), dtype=bool)
for key in ['rp','mp','a_to_R','teff','jmag','r_s']:
mask &= pd.notnull(self.sample[key])
def rp_to_scale_factor(rp):
if rp < 1.5:
scale_factor = 0.19
elif rp > 1.5 and rp < 2.75:
scale_factor = 1.26
elif rp > 2.75 and rp < 4:
scale_factor = 1.28
else:
scale_factor = 1.15
return scale_factor
scale_factors = self.sample.loc[mask,'rp'].apply(rp_to_scale_factor)
Teqs = self.sample.loc[mask,'teff']*np.sqrt(np.reciprocal(self.sample.loc[mask,'a_to_R'])*np.sqrt(0.25))
numerator = scale_factors*np.power(self.sample.loc[mask, 'rp'], 3)*Teqs*np.power(10, -1*self.sample.loc[mask,'jmag']/5)
denominator = self.sample.loc[mask,'mp']*np.square(self.sample.loc[mask,'r_s'])
self.sample.loc[mask,'TSM'] = numerator/denominator
def sc3_binning_function(self, df, bins, sort_val='TSM', num_to_rank=5):
"""
TODO
"""
pre_bin = df.assign(
rp_bin = pd.cut(df['rp'],bins=bins[0],labels = [1,2,3,4,5]),
sinc_bin = pd.cut(df['sinc'],bins=bins[1],labels = [1,2,3,4,5]),
teff_bin = pd.cut(df['teff'],bins=bins[2],labels = [1,2,3])
)
binned_df = pre_bin.dropna(subset=['rp_bin','sinc_bin','teff_bin']).\
groupby(['rp_bin','sinc_bin','teff_bin']).apply(lambda _pre_bin:\
_pre_bin.sort_values(by=[sort_val],ascending=False))\
.reset_index(level = 3,drop=True)
all_idx = binned_df.index.tolist()
unique_idx = []
for element in all_idx:
if element not in unique_idx:
unique_idx.append(element)
binned_df['SC3_bin_rank'] = np.zeros(len(binned_df))
for idx in unique_idx:
bin_items = len(binned_df.loc[idx].sort_values(sort_val,ascending=False).iloc[0:num_to_rank]['toi'])
for i in range(1, num_to_rank+1):
if bin_items == i and bin_items <= num_to_rank:
for j in range(i):
binned_df.loc[binned_df['toi'] == binned_df.loc[idx].sort_values\
(sort_val,ascending=False).iloc[0:num_to_rank]['toi'].iloc[j],'SC3_bin_rank'] = j+1
elif bin_items > num_to_rank:
for j in range(num_to_rank):
binned_df.loc[binned_df['toi'] == binned_df.loc[idx].sort_values\
(sort_val,ascending=False).iloc[0:num_to_rank]['toi'].iloc[j],'SC3_bin_rank'] = j+1
return binned_df
def reset_track(self):
"""
For MC iterations > 1, this module resets all the required information
back to initial starting conditions, including the starting sample and
and initial program information (e.g., allocation, etc.), as well as a
new survey.track to log the new selection process to.
"""
# make copies of the original dataframes, thus resetting the information
self.candidates = self.sample.copy()
self.sciences = self.programs.copy()
self.track[self.n][0] = {}
for program, hours in zip(self.sciences.index.values.tolist(), self.sciences.remaining_hours.values.tolist()):
self.track[self.n][0][program] = round(hours,3)
self.track[self.n][0]['total_time'] = round(np.sum(self.sciences.remaining_hours.values.tolist()),3)
self.track[self.n][0]['program'] = '--'
self.track[self.n][0]['program_pick'] = 0
self.track[self.n][0]['overall_priority'] = 0
self.track[self.n][0]['toi'] = 0
self.track[self.n][0]['tic'] = 0
self.priority = 1
self.i = 1
np.random.seed(self.seeds[self.n-1])
def update(self, pick, program):
"""
Updates appropriate information and tables with new program selection. This module
operates in roughly the following order:
1) adds the program and the program pick to the survey.track
2) reduces the available number of targets left in a program by 1
3) checks if the target has been selected by other programs and if `True`, credits the
appropriate programs back the difference in cost
4) after crediting/debiting all relevant programs, the remaining hours in all programs
in the survey is logged in the survey.track, along with the overall priority of the
selected target in the survey as well as the internal program priority
"""
self.add_program_pick(pick, program)
self.sciences.loc[program,'n_targets_left'] -= 1
self.sciences.loc[program,'pick_number'] += 1
self = observing.check_observing(self, pick, program)
if not int(pick.in_other_programs):
net = {program:-1.*(float(pick.actual_cost)/3600.)}
self.track[self.n][self.i]['overall_priority'] = self.priority
self.candidates.loc[self.candidates['tic'] == int(pick.tic),'priority'] = int(self.priority)
self.priority += 1
else:
net = observing.adjust_costs(self, pick, program)
idx = self.candidates.loc[self.candidates['tic'] == int(pick.tic)].index.values.tolist()[0]
self.track[self.n][self.i]['overall_priority'] = int(self.candidates.loc[idx,'priority'])
for key in net.keys():
self.sciences.loc[key,'remaining_hours'] += net[key]
self.update_program_hours()
self.candidates.loc[self.candidates['tic'] == int(pick.tic),'in_%s'%program] = 1
self.update_targets()
self.i += 1
def add_program_pick(self, pick, program):
"""
Updates the survey.track with the new selection, including the program, the internal
program priority (or pick number), the selected target's TOI and TIC.
"""
self.track[self.n][self.i] = {}
self.track[self.n][self.i]['program'] = program
self.track[self.n][self.i]['program_pick'] = self.sciences.loc[program,'pick_number']+1
self.track[self.n][self.i]['toi'] = float(pick.toi)
self.track[self.n][self.i]['tic'] = int(pick.tic)
def update_program_hours(self):
"""
Updates the survey.track with the final remaining hours for each program
after any credits or debits were made in the single iteration (transaction).
"""
for program, hours in zip(self.sciences.index.values.tolist(), self.sciences.remaining_hours.values.tolist()):
self.track[self.n][self.i][program] = round(hours,3)
self.track[self.n][self.i]['total_time'] = round(np.sum(self.sciences.remaining_hours.values.tolist()),3)
def update_targets(self):
"""
Updates the survey sample (via survey.candidates), which counts the number of programs
a given target was selected by.
"""
start = np.array([0]*len(self.candidates))
for science in self.sciences.index.values.tolist():
start += self.candidates['in_%s'%science].values.tolist()
self.candidates['in_other_programs'] = start
def get_seeds(self):
"""
Ensures reproducibility due to the instrinsic randomness of the algorithm.
"""
self.seeds = [2222, 5531, 5348, 9632, 3755, 3401, 1061, 9307, 2033, 2114, 3103, 8120, 5442, 9179, 3165, 6114, 8757, 8574, 8078, 7724, 9056, 9066, 8423, 5278, 663, 4542, 6448, 7261, 6999, 7212, 3832, 3199, 6444, 1704, 8872, 2743, 9163, 1293, 8458, 5782, 7144, 9339, 3961, 9127, 4105, 3209, 7662, 5592, 4672, 2365, 8214, 3725, 2088, 1234, 6984, 2756, 3962, 7279, 9686, 112, 8936, 8807, 4149, 2535, 1541, 1422, 7991, 6445, 4384, 570, 9719, 5834, 5372, 1376, 1192, 1499, 8653, 730, 5469, 7541, 6546, 4002, 5677, 9251, 5459, 630, 908, 9074, 2675, 9517, 1015, 5272, 6846, 6820, 4516, 5632, 5671, 2126, 4440, 9670, 7768, 1405, 5330, 1854, 3156, 6949, 1119, 5257, 2999, 4251, 9674, 5362, 5009, 7526, 8293, 4518, 8641, 1365, 2492, 5061, 4804, 2710, 8823, 6637, 9382, 7928, 9219, 7840, 895, 5647, 3966, 6452, 9027, 8673, 1006, 469, 5056, 42, 8067, 7571, 3304, 6795, 9131, 6327, 5781, 5336, 4484, 5137, 3231, 4465, 91, 5135, 3303, 1890, 7593, 359, 6051, 1236, 9967, 3149, 9913, 3114, 9267, 3049, 6089, 6439, 828, 8893, 7708, 6766, 2818, 8745, 8791, 3639, 461, 3917, 8917, 2863, 1865, 9410, 1851, 617, 7563, 915, 1773, 4997, 6121, 8540, 6358, 1630, 5468, 8585, 4959, 8115, 6337, 355, 1977, 4800, 6831, 932, 1028, 8232, 1381, 3260, 2937, 7031, 6310, 5348, 2172, 3321, 4422, 1195, 2021, 481, 731, 5566, 9719, 7468, 9499, 1326, 4071, 7660, 6583, 5067, 5693, 2933, 8679, 9988, 550, 2599, 5536, 3081, 4429, 3592, 8140, 1398, 1481, 6823, 9006, 9264, 6037, 95, 9807, 2768, 4792, 7417, 6095, 8049, 79, 5070, 1457, 3099, 736, 2332, 2228, 146, 3862, 2153, 7800, 8664, 625, 2393, 88, 780, 4266, 9412, 4973, 426, 7742, 4593, 408, 7296, 1981, 867, 7636, 2455, 3519, 3093, 882, 7396, 815, 7717, 4792, 3103, 2747, 290, 8302, 2124, 2516, 3170, 8224, 3693, 5721, 3599, 9778, 5903, 8544, 69, 7648, 4860, 212, 517, 3765, 1401, 8722, 1689, 3281, 3061, 9293, 4954, 4584, 3357, 6380, 5266, 8972, 5578, 9289, 859, 486, 3746, 7928, 7240, 2861, 7615, 651, 5633, 4687, 7439, 2572, 1999, 1476, 5806, 1966, 9249, 3439, 4559, 6899, 5633, 1973, 6469, 1636, 4922, 5059, 7772, 3907, 7410, 1822, 9659, 8230, 3643, 9106, 9524, 8971, 2887, 705, 4252, 6198, 1420, 9063, 5272, 9641, 195, 5217, 1819, 2286, 431, 5379, 26, 7690, 7241, 3735, 2987, 1490, 2807, 5059, 6556, 5921, 3949, 6128, 606, 7636, 1451, 4598, 2446, 9877, 635, 876, 9594, 1742, 5887, 5355, 365, 8197, 7919, 6969, 9736, 1703, 8703, 3358, 8321, 6817, 3617, 9069, 6406, 3938, 3077, 6166, 1546, 4393, 1026, 9479, 2568, 1787, 1434, 8390, 3844, 4028, 5643, 9291, 5072, 8022, 7260, 1209, 5579, 6860, 2871, 2662, 4769, 7361, 7427, 8737, 1608, 6613, 7941, 5619, 6949, 3217, 4204, 1439, 3439, 4521, 4761, 4089, 2066, 9623, 3076, 9230, 1503, 9896, 7110, 2152, 1291, 1339, 5088, 2959, 8092, 5381, 7283, 8831, 8448, 6775, 5414, 5871, 2728, 8828, 6320, 3294, 7953, 4157, 5654, 6890, 5134, 45, 6881, 4237, 9561, 913, 9990, 9667, 650, 1353, 2963, 3896, 4368, 8162, 5630, 5889, 9093, 5298, 17, 7958, 6417, 7574, 6461, 7446, 8398, 5486, 7742, 7503, 1740, 6987, 2238, 1159, 6552, 7968, 440, 1671, 7755, 9214, 1099, 7801, 4910, 878, 3278, 667, 1813, 7540, 2082, 3182, 5580, 3256, 9619, 5890, 8902, 9635, 2516, 864, 823, 9222, 6156, 5011, 7191, 4584, 4112, 9991, 110, 2361, 2709, 6469, 9592, 9668, 6788, 7505, 4174, 3119, 5693, 429, 6224, 3174, 6134, 6902, 9692, 2620, 1532, 7973, 5644, 6105, 2495, 1368, 9342, 3747, 9358, 1039, 311, 5382, 7309, 2482, 1889, 1162, 5620, 8439, 5487, 975, 4845, 4641, 7027, 747, 1016, 5728, 6175, 5252, 598, 4920, 5544, 6273, 9336, 8096, 8059, 2467, 1098, 72, 372, 737, 4500, 2736, 7458, 3742, 3156, 8420, 5311, 8532, 7186, 9113, 7041, 6658, 2370, 2733, 8258, 139, 6127, 4489, 692, 5627, 8139, 9744, 9773, 3674, 9103, 9896, 897, 2939, 8342, 3031, 4991, 3110, 3845, 2214, 5184, 7482, 3367, 5030, 9570, 7613, 1394, 1491, 2570, 5573, 9688, 2731, 8333, 6764, 4922, 4886, 8623, 2301, 8688, 9286, 832, 439, 2502, 8934, 5356, 6584, 6322, 6958, 1542, 9526, 9040, 10000, 8659, 1672, 610, 4050, 6616, 7105, 6073, 9004, 5102, 7781, 1615, 8225, 2511, 3862, 6110, 9382, 5402, 1501, 1972, 6596, 2496, 2523, 2710, 3515, 4024, 7273, 6509, 1913, 8888, 5892, 6173, 1836, 7008, 1328, 6628, 6840, 126, 3190, 4511, 5644, 8944, 6386, 8863, 5022, 5361, 3799, 6701, 750, 785, 2069, 6609, 6429, 7252, 5477, 2309, 9163, 7957, 4056, 8866, 6815, 8583, 2891, 6979, 1242, 3795, 4564, 1785, 1292, 3009, 8132, 3837, 5357, 5549, 9030, 9177, 7603, 3764, 347, 3695, 3836, 7269, 1196, 5401, 4362, 4053, 9416, 2994, 6420, 8527, 7178, 1084, 1582, 967, 7636, 3565, 6510, 4259, 6769, 7106, 1102, 2072, 5721, 4149, 1459, 4861, 39, 1404, 44, 7296, 3745, 2023, 3162, 4885, 9147, 2716, 4395, 9489, 9240, 9882, 3761, 2755, 1862, 9856, 404, 7118, 8258, 5581, 1477, 4694, 463, 598, 9566, 9119, 6289, 5209, 6703, 4719, 8622, 9687, 8361, 5639, 812, 6559, 9332, 6663, 5722, 3930, 8141, 6207, 7787, 1572, 6012, 6052, 609, 6106, 606, 3013, 3915, 6504, 7301, 5596, 1644, 4915, 5623, 943, 1779, 1028, 5734, 8674, 6440, 5126, 5988, 4179, 2955, 9198, 4068, 7912, 6211, 8559, 7260, 193, 7662, 8317, 7231, 181, 1482, 9115, 9971, 4519, 4073, 4300, 2938, 5456, 2939, 3906, 6385, 5011, 9510, 1227, 8649, 4715, 6021, 5418, 3568, 3571, 622, 5414, 1137, 5375, 6263, 4930, 2352, 8565, 6277, 563, 1113, 4847, 1355, 3215, 5297, 8883, 8371, 6917, 5952, 3448, 6154, 52, 2204, 763, 919, 7355, 2095, 8809, 2362, 7590, 5591, 4950, 2817, 7882, 9214, 7549, 5118, 1046, 2298, 7458, 8277, 339, 2443, 8941, 2072, 324, 1350, 5502, 2501, 2680, 5925, 9935, 6294, 5578, 6686, 5888, 5921, 2690, 8177, 2405, 5438, 6754, 2331, 5550, 1591, 9183, 3714, 1097, 7171, 1552, 117, 1135, 1067, 4952, 6742, 3960, 1489, 4201, 2390, 7777, 979, 2114, 9652, 7569, 5795, 4386, 7838, 6684, 1262, 7700, 8091, 1979, 3566, 6058, 5834, 5443, 50, 1658, 6047, 4273, 9477, 3761, 5953, 2732, 7142, 81, 4457, 4703, 5029, 9303, 1936, 468, 5888, 7253, 6611, 1883, 8285, 7267, 2028, 2842, 9302, 4262, 812, 2696, 9879, 2992, 4865, 5031, 6292, 2439, 2261, 8345, 9926, 8960, 2960, 4199]
def rank(survey, stuck=0):
"""
Initializes the Survey class and runs the ranking algorithm to determine
a final prioritized list of targets while balancing various sub-science
goals using the provided selection criteria and prioritization metrics.
The selection process will continue until either:
1) the allocated survey time is successfully exhausted (i.e. == 0), or
2) all programs in the survey are 'stuck' (i.e. cannot afford their next highest priority pick).
Parameters
----------
args : argparse.Namespace
the command line arguments
stuck : int
the number of programs currently 'stuck' in the Survey. This variable resets to 0 any time a new selection is made
"""
if survey.verbose:
print(survey.note)
ti = clock.time()
# Monte-Carlo simulations of sampler (args.iter=1 by default)
for n in range(1,survey.iter+1):
survey.n = n
survey.reset_track()
# Begin selection process
while np.sum(survey.sciences.remaining_hours.values.tolist()) > 0.:
# Select program
program = utils.pick_program(survey.sciences)
# Create an instance of the Sample class w/ the updated vetted sample
sample = Sample(program, survey=survey)
# Only continue if the selected program has targets left
if not survey.sciences.loc[program,'n_targets_left']:
continue
# pick highest priority target not yet selected
pick = sample.get_highest_priority()
if pick is None:
continue
# what is the cost of the selected target
cost = float((pick.actual_cost))/3600.
# if the program cannot afford the target, it is "stuck"
if cost > survey.sciences.loc[program,'remaining_hours']:
stuck += 1
else:
# reset counter
stuck = 0
# update records with the program pick
survey.update(pick, program)
if stuck >= len(survey.sciences):
break
if survey.emcee:
survey.df = survey.candidates.copy()
utils.make_data_products(survey)
tf = clock.time()
survey.ranking_time = float(tf-ti)
survey.df = survey.candidates.copy()
utils.make_data_products(survey)
|
ashleychontosREPO_NAMEsort-a-surveyPATH_START.@sort-a-survey_extracted@sort-a-survey-main@sortasurvey@notebook.py@.PATH_END.py
|
{
"filename": "numpy_backend.py",
"repo_name": "tslearn-team/tslearn",
"repo_path": "tslearn_extracted/tslearn-main/tslearn/backend/numpy_backend.py",
"type": "Python"
}
|
"""The Numpy backend.
Several backend functions are inspired from a python package about Machine Learning
in Riemannian manifolds named geomstats [JMLR:v21:19-027], also implementing several backends.
References
----------------
[JMLR:v21:19-027] Nina Miolane, Nicolas Guigui, Alice Le Brigant, Johan Mathe,
Benjamin Hou, Yann Thanwerdas, Stefan Heyder, Olivier Peltre, Niklas Koep, Hadi Zaatiti,
Hatem Hajri, Yann Cabanes, Thomas Gerald, Paul Chauchat, Christian Shewmake, Daniel Brooks,
Bernhard Kainz, Claire Donnat, Susan Holmes and Xavier Pennec.
Geomstats: A Python Package for Riemannian Geometry in Machine Learning,
Journal of Machine Learning Research, 2020, volume 21, number 223, pages 1-9,
http://jmlr.org/papers/v21/19-027.html, https://github.com/geomstats/geomstats/
"""
import numpy as _np
from scipy.spatial.distance import cdist, pdist
from sklearn.metrics.pairwise import euclidean_distances, pairwise_distances
class NumPyBackend(object):
"""Class for the Numpy backend."""
def __init__(self):
self.backend_string = "numpy"
self.linalg = NumPyLinalg()
self.random = NumPyRandom()
self.testing = NumPyTesting()
self.int8 = _np.int8
self.int16 = _np.int16
self.int32 = _np.int32
self.int64 = _np.int64
self.float32 = _np.float32
self.float64 = _np.float64
self.complex64 = _np.complex64
self.complex128 = _np.complex128
self.abs = _np.abs
self.all = _np.all
self.any = _np.any
self.arange = _np.arange
self.argmax = _np.argmax
self.argmin = _np.argmin
self.array = _np.array
self.cdist = cdist
self.ceil = _np.ceil
self.dbl_max = _np.finfo("double").max
self.diag = _np.diag
self.empty = _np.empty
self.exp = _np.exp
self.eye = _np.eye
self.floor = _np.floor
self.full = _np.full
self.hstack = _np.hstack
self.inf = _np.inf
self.iscomplex = _np.iscomplex
self.isfinite = _np.isfinite
self.isnan = _np.isnan
self.log = _np.log
self.max = _np.max
self.mean = _np.mean
self.median = _np.median
self.min = _np.min
self.nan = _np.nan
self.pairwise_distances = pairwise_distances
self.pairwise_euclidean_distances = euclidean_distances
self.pdist = pdist
self.reshape = _np.reshape
self.round = _np.round
self.shape = _np.shape
self.sqrt = _np.sqrt
self.sum = _np.sum
self.tril = _np.tril
self.tril_indices = _np.tril_indices
self.triu = _np.triu
self.triu_indices = _np.triu_indices
self.vstack = _np.vstack
self.zeros = _np.zeros
self.zeros_like = _np.zeros_like
@staticmethod
def belongs_to_backend(x):
return "numpy" in str(type(x)).lower()
@staticmethod
def cast(x, dtype):
return x.astype(dtype)
@staticmethod
def copy(x):
return x.copy()
@staticmethod
def from_numpy(x):
return x
@staticmethod
def is_array(x):
return type(x) is _np.ndarray
@staticmethod
def is_float(x):
return isinstance(x, (_np.floating, float))
@staticmethod
def is_float32(x):
return isinstance(x, _np.float32)
@staticmethod
def is_float64(x):
return isinstance(x, _np.float64)
@staticmethod
def ndim(x):
return x.ndim
@staticmethod
def to_numpy(x):
return x
class NumPyLinalg:
def __init__(self):
self.inv = _np.linalg.inv
self.norm = _np.linalg.norm
class NumPyRandom:
def __init__(self):
self.rand = _np.random.rand
self.randint = _np.random.randint
self.randn = _np.random.randn
class NumPyTesting:
def __init__(self):
self.assert_allclose = _np.testing.assert_allclose
self.assert_equal = _np.testing.assert_equal
|
tslearn-teamREPO_NAMEtslearnPATH_START.@tslearn_extracted@tslearn-main@tslearn@backend@numpy_backend.py@.PATH_END.py
|
{
"filename": "main.py",
"repo_name": "stevecunnington/gridimp",
"repo_path": "gridimp_extracted/gridimp-main/scripts/main.py",
"type": "Python"
}
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import gaussian_filter
from astropy import units as u
##### CHANGE THIS TO PIP INSTALL GRIDIMP ##############
import sys
sys.path.insert(1, '/Users/user/Documents/gridimp/gridimp')
sys.path.insert(1, '/users/scunnington/gridimp/gridimp')
sys.path.insert(1, '/Users/user/Documents/gridimp/data')
sys.path.insert(1, '/users/scunnington/gridimp/data')
import params
dohealpy = True
dobeam = True
survey = 'FineChannel'
fft2hp_ratio = 1
Pmod,b_HI,T_21cm,nside,hpmask,ramin,ramax,decmin,decmax,ra,dec,nu,dims_0,dims_hp,dims_fft,R_beam,kbins,nkbin,nyq = params.init(survey=survey,dobeam=dobeam,dohealpy=dohealpy,fft2hp_ratio=fft2hp_ratio)
lx,ly,lz,n0x,n0y,n0z = dims_0[:6]
lx,ly,lz,nfftx,nffty,nfftz = dims_fft[:6]
import cosmo
import mock
import grid
import telescope
import power
import line
def runPkloop(Nmock,window,compensate,interlace,R_beam,dohealpy,loadMap):
Pk_0 = np.zeros((Nmock,nkbin))
Pk_fft = np.zeros((Nmock,nkbin))
for i in range(Nmock):
print(i)
if loadMap==False:
f_0 = mock.Generate(Pmod,dims=dims_0,b=b_HI,Tbar=T_21cm,doRSD=False)
if R_beam!=0:
f_0 = telescope.gaussbeam(f_0,R_beam,dims_0)
#Pk_0[i],k,nmodes = power.Pk(f_0,dims_0,kbins)
if dohealpy==True:
# Create healpy sky map or "lightcone":
map = grid.lightcone_healpy(f_0,dims_0,ra,dec,nu,nside,Np=Np,verbose=True)
map_compress = grid.compress_healpix_map(map,hpmask)
np.save('/idia/projects/hi_im/steve/gridimp/data/inputmaps/map_n=%s_deg=%s_Rbeam=%s_survey=%s_%s'%(n0x,round((ramax-ramin)*(decmax-decmin),0),round(R_beam,2),survey,i),map_compress)
if dohealpy==False: # Mimic healpy lightcone simulation creation/pixelisation
# always ngp,no compensate/interlacing - as in real healpy map-making scenario
xp,yp,zp,cellvals = grid.ParticleSampling(f_0,dims_0,dims_hp,Np=Np,sample_ingrid=False)
map,W01_rg,counts = grid.mesh(xp,yp,zp,cellvals,dims_hp,window='ngp',compensate=False,interlace=False)
W_hp = np.load('data/W01_ncell=%s.npy'%nhpx)
map[W_hp==0] = 0
xp,yp,zp,pixvals = grid.ParticleSampling(map,dims_hp,dims_fft,Np=Np,sample_ingrid=True)
if dohealpy==True:
map_compress = np.load('/idia/projects/hi_im/steve/gridimp/data/inputmaps/map_n=%s_deg=%s_Rbeam=%s_survey=%s_%s.npy'%(n0x,round((ramax-ramin)*(decmax-decmin),0),round(R_beam,2),survey,i))
map = grid.compress_healpix_map(map_compress,hpmask,uncompress=True)
ra_p,dec_p,nu_p,pixvals = grid.SkyPixelParticles_healpy(ra,dec,nu,nside,map,Np=Np)
xp,yp,zp = grid.SkyCoordtoCartesian(ra_p.to(u.deg).value,dec_p.to(u.deg).value,line.nu21cm_to_z(nu_p),ramean_arr=ra.to(u.deg).value,decmean_arr=dec.to(u.deg).value,doTile=False)
# Regrid to FFT:
f_fft,W_fft,counts = grid.mesh(xp,yp,zp,pixvals,dims_fft,window,compensate,interlace,verbose=True)
np.save('/idia/projects/hi_im/steve/gridimp/data/W01_ncell=%s'%nfftx,W_fft)
Pk_fft[i],k,nmodes = power.Pk(f_fft,dims_fft,kbins,w1=W_fft,W1=W_fft)
### Save outputs:
if dohealpy==True: np.save('/idia/projects/hi_im/steve/gridimp/data/Pks_healpy=%s_Rbeam=%s_hp2fft=%s_survey=%s_%s_interlace=%s_compensate=%s'%(dohealpy,np.round(R_beam,2),fft2hp_ratio,survey,window,interlace,compensate),[Pk_0,Pk_fft])
loadMap = False
Nmock = 100
Np = 5
survey = 'FineChannel'
dobeam = True
fft2hp_ratio = 1
Pmod,b_HI,T_21cm,nside,hpmask,ramin,ramax,decmin,decmax,ra,dec,nu,dims_0,dims_hp,dims_fft,R_beam,kbins,nkbin,nyq = params.init(survey=survey,dobeam=dobeam,dohealpy=dohealpy,fft2hp_ratio=fft2hp_ratio)
compensate = True
#windows = ['cic','tsc','ngp','pcs']
#interlaces = [False,True]
windows = ['tsc']
interlaces = ['True']
for window in windows:
for interlace in interlaces:
runPkloop(Nmock,window,compensate,interlace,R_beam,dohealpy,loadMap)
exit()
survey = 'Initial'
Pmod,b_HI,T_21cm,nside,hpmask,ramin,ramax,decmin,decmax,ra,dec,nu,dims_0,dims_hp,dims_fft,R_beam,kbins,nkbin,nyq = params.init(survey=survey,dobeam=dobeam,dohealpy=dohealpy,fft2hp_ratio=fft2hp_ratio)
for compensate in compensates:
runPkloop(Nmock,window,compensate,interlace,R_beam,dohealpy,loadMap)
exit()
'''
## No treatment biased case:
window = 'ngp'
interlace = False
compensate = False
runPkloop(Nmock,window,compensate,interlace,R_beam,dohealpy,loadMap)
exit()
'''
#'''
compensate = True
window = 'tcs'
interlace = False
loadMap = False
runPkloop(Nmock,window,compensate,interlace,R_beam,dohealpy,loadMap)
exit()
#'''
## Loop over all MAS possibilities::
loadMap = False
windows = ['ngp','pcs','cic','tsc']
interlaces = [False,True]
compensate = True
for window in windows:
for interlace in interlaces:
runPkloop(Nmock,window,compensate,interlace,R_beam,dohealpy,loadMap)
#'''
exit()
window = 'ngp'
interlace = False
compensate = False
Pk_0,Pk_fft = np.load('data/Pks_healpy=%s_Rbeam=%s_%s_interlace=%s_compensate=%s.npy'%(dohealpy,np.round(R_beam,2),window,interlace,compensate))
### Modelling:
import model
#pkmod_0,k,nmodes = model.PkMod(Pmod,dims_0,kbins,b1=b_HI,Tbar1=T_21cm,R_beam1=R_beam)
if dohealpy==False:
s_para = lz/nhpz
s_pix = np.mean([lx/nhpx,ly/nhpy])
pkmod_hp,k,nmodes = model.PkMod(Pmod,dims_hp,kbins,b1=b_HI,Tbar1=T_21cm,R_beam1=R_beam,w1=W_hp,W1=W_hp,s_pix=s_pix,s_para=s_para,nsum=1,window='ngp')
if dohealpy==True:
zs = line.nu21cm_to_z(nu)
d_c = cosmo.d_com(zs) # Comoving distance to frequency binra[ra>180] = ra[ra>180] - 360 # Make continuous RA i.e. 359,360,1 -> -1,0,1 so mean RA is correct
s_para = np.mean( cosmo.d_com(zs[:-1]) - cosmo.d_com(zs[1:]) )
hppixwin = model.HealpixPixelWindow(nside,np.mean(d_c))
# Resample binary window mask into the approximated healpix dimensional space:
W_fft = np.load('data/W01_ncell=%s.npy'%nfftx)
xp,yp,zp,cellvals = grid.ParticleSampling(W_fft,dims_fft,dims_hp,Np=1,sample_ingrid=False)
W_hp = grid.mesh(xp,yp,zp,cellvals,dims_hp,window='ngp',compensate=False,interlace=False)[1]
#s_para,hppixwin = None,None
nsum = 1
pkmod_hp,k,nmodes = model.PkMod(Pmod,dims_hp,kbins,b1=b_HI,Tbar1=T_21cm,R_beam1=R_beam,w1=W_hp,W1=W_hp,s_para=s_para,hppixwin=hppixwin,nsum=nsum,window='ngp')
plt.axhline(1,color='black',lw=0.8,ls=':')
plt.axvline(nyq,color='red',lw=1,ls='--')
plt.axvline(nyq/2,color='red',lw=1,ls='--')
#plt.errorbar(k,np.mean(Pk_0,0)/pkmod_0,np.std(Pk_0,0)/pkmod_0)
plt.errorbar(k,np.mean(Pk_fft,0)/pkmod_hp,np.std(Pk_fft,0)/pkmod_hp)
plt.show()
|
stevecunningtonREPO_NAMEgridimpPATH_START.@gridimp_extracted@gridimp-main@scripts@main.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "spedas/pyspedas",
"repo_path": "pyspedas_extracted/pyspedas-master/pyspedas/projects/erg/satellite/erg/mepi/__init__.py",
"type": "Python"
}
|
spedasREPO_NAMEpyspedasPATH_START.@pyspedas_extracted@pyspedas-master@pyspedas@projects@erg@satellite@erg@mepi@__init__.py@.PATH_END.py
|
|
{
"filename": "kernels.py",
"repo_name": "dfm/george",
"repo_path": "george_extracted/george-main/templates/kernels.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function
__all__ = [
"Kernel", "Sum", "Product",
{%- for spec in specs %}
"{{ spec.name }}",
{%- endfor %}
]
import numpy as np
from .modeling import Model, ModelSet
from .metrics import Metric, Subspace
from .kernel_interface import KernelInterface
class Kernel(ModelSet):
"""
The abstract kernel type. Every kernel implemented in George should be
a subclass of this object.
"""
is_kernel = True
kernel_type = -1
# This function deals with weird behavior when performing arithmetic
# operations with numpy scalars.
def __array_wrap__(self, array, context=None):
if context is None:
raise TypeError("Invalid operation")
ufunc, args, _ = context
if ufunc.__name__ == "multiply":
return float(args[0]) * args[1]
elif ufunc.__name__ == "add":
return float(args[0]) + args[1]
raise TypeError("Invalid operation")
__array_priority__ = np.inf
def __getstate__(self):
odict = self.__dict__.copy()
odict["_kernel"] = None
return odict
# We must overload the ModelSet attribute getter to pass the requests
# to the "BaseKernel"
def __getattr__(self, name):
if "models" in self.__dict__:
if name in self.models:
return self.models[name]
if None in self.models:
return getattr(self.models[None], name)
raise AttributeError(name)
@property
def kernel(self):
return KernelInterface(self)
def __repr__(self):
kernel = self.models[None]
params = ["{0}={1}".format(k, getattr(kernel, k))
for k in kernel.parameter_names]
if self.stationary:
params += ["metric={0}".format(repr(self.metric)),
"block={0}".format(repr(self.block))]
else:
params += ["ndim={0}".format(self.ndim),
"axes={0}".format(repr(self.axes))]
return "{0}({1})".format(self.__class__.__name__, ", ".join(params))
def __add__(self, b):
if not hasattr(b, "is_kernel"):
return Sum(ConstantKernel(log_constant=np.log(float(b)/self.ndim),
ndim=self.ndim), self)
return Sum(self, b)
def __radd__(self, b):
return self.__add__(b)
def __mul__(self, b):
if not hasattr(b, "is_kernel"):
log_constant = np.log(float(b)/self.ndim)
return Product(ConstantKernel(log_constant=log_constant,
ndim=self.ndim), self)
return Product(self, b)
def __rmul__(self, b):
return self.__mul__(b)
def get_value(self, x1, x2=None, diag=False):
x1 = np.ascontiguousarray(x1, dtype=np.float64)
if x2 is None:
if diag:
return self.kernel.value_diagonal(x1, x1)
else:
return self.kernel.value_symmetric(x1)
x2 = np.ascontiguousarray(x2, dtype=np.float64)
if diag:
return self.kernel.value_diagonal(x1, x2)
else:
return self.kernel.value_general(x1, x2)
def get_gradient(self, x1, x2=None, include_frozen=False):
mask = (
np.ones(self.full_size, dtype=bool)
if include_frozen else self.unfrozen_mask
)
which = mask.astype(np.uint32)
x1 = np.ascontiguousarray(x1, dtype=np.float64)
if x2 is None:
g = self.kernel.gradient_symmetric(which, x1)
else:
x2 = np.ascontiguousarray(x2, dtype=np.float64)
g = self.kernel.gradient_general(which, x1, x2)
return g[:, :, mask]
def get_x1_gradient(self, x1, x2=None):
x1 = np.ascontiguousarray(x1, dtype=np.float64)
if x2 is None:
x2 = x1
else:
x2 = np.ascontiguousarray(x2, dtype=np.float64)
return self.kernel.x1_gradient_general(x1, x2)
def get_x2_gradient(self, x1, x2=None):
x1 = np.ascontiguousarray(x1, dtype=np.float64)
if x2 is None:
x2 = x1
else:
x2 = np.ascontiguousarray(x2, dtype=np.float64)
return self.kernel.x2_gradient_general(x1, x2)
def test_gradient(self, x1, x2=None, eps=1.32e-6, **kwargs):
vector = self.get_parameter_vector()
g0 = self.get_gradient(x1, x2=x2)
for i, v in enumerate(vector):
vector[i] = v + eps
self.set_parameter_vector(vector)
kp = self.get_value(x1, x2=x2)
vector[i] = v - eps
self.set_parameter_vector(vector)
km = self.get_value(x1, x2=x2)
vector[i] = v
self.set_parameter_vector(vector)
grad = 0.5 * (kp - km) / eps
assert np.allclose(g0[:, :, i], grad, **kwargs), \
"incorrect gradient for parameter '{0}' ({1})" \
.format(self.get_parameter_names()[i], i)
def test_x1_gradient(self, x1, x2=None, eps=1.32e-6, **kwargs):
kwargs["atol"] = kwargs.get("atol", 0.5 * eps)
g0 = self.get_x1_gradient(x1, x2=x2)
if x2 is None:
x2 = np.array(x1)
for i in range(len(x1)):
for k in range(self.ndim):
x1[i, k] += eps
kp = self.get_value(x1, x2=x2)
x1[i, k] -= 2*eps
km = self.get_value(x1, x2=x2)
x1[i, k] += eps
grad = 0.5 * (kp - km) / eps
assert np.allclose(g0[i, :, k], grad[i], **kwargs)
def test_x2_gradient(self, x1, x2=None, eps=1.32e-6, **kwargs):
kwargs["atol"] = kwargs.get("atol", 0.5 * eps)
g0 = self.get_x2_gradient(x1, x2=x2)
if x2 is None:
x2 = np.array(x1)
for i in range(len(x2)):
for k in range(self.ndim):
x2[i, k] += eps
kp = self.get_value(x1, x2=x2)
x2[i, k] -= 2*eps
km = self.get_value(x1, x2=x2)
x2[i, k] += eps
grad = 0.5 * (kp - km) / eps
assert np.allclose(g0[:, i, k], grad[:, i], **kwargs)
class _operator(Kernel):
is_kernel = False
kernel_type = -1
operator_type = -1
def __init__(self, k1, k2):
if k1.ndim != k2.ndim:
raise ValueError("Dimension mismatch")
self.ndim = k1.ndim
self._dirty = True
super(_operator, self).__init__([("k1", k1), ("k2", k2)])
@property
def k1(self):
return self.models["k1"]
@property
def k2(self):
return self.models["k2"]
@property
def dirty(self):
return self._dirty or self.k1.dirty or self.k2.dirty
@dirty.setter
def dirty(self, v):
self._dirty = v
self.k1.dirty = False
self.k2.dirty = False
class Sum(_operator):
is_kernel = False
operator_type = 0
def __repr__(self):
return "{0} + {1}".format(self.k1, self.k2)
class Product(_operator):
is_kernel = False
operator_type = 1
def __repr__(self):
return "{0} * {1}".format(self.k1, self.k2)
{% for spec in specs %}
class Base{{ spec.name }} (Model):
parameter_names = ({% for p in spec.params -%}"{{ p }}", {% endfor %})
class {{ spec.name }} (Kernel):
r"""
{{ spec.doc | indent(4) }}
"""
kernel_type = {{ spec.index }}
stationary = {{ spec.stationary }}
def __init__(self,
{% for p in spec.params %}{{ p }}=None,
{% endfor -%}
{% for con in spec.constants %}{{ con.name }}=None,
{% endfor -%}
{% if spec.stationary -%}
metric=None,
metric_bounds=None,
lower=True,
block=None,
{% endif -%}
bounds=None,
ndim=1,
axes=None):
{% for con in spec.constants %}
if {{ con.name }} is None:
raise ValueError("missing required parameter '{{ con.name }}'")
self.{{ con.name }} = {{ con.name }}
{% endfor %}
{% if spec.stationary -%}
if metric is None:
raise ValueError("missing required parameter 'metric'")
metric = Metric(metric, bounds=metric_bounds, ndim=ndim,
axes=axes, lower=lower)
self.ndim = metric.ndim
self.axes = metric.axes
self.block = block
{%- else -%}
self.subspace = Subspace(ndim, axes=axes)
self.ndim = self.subspace.ndim
self.axes = self.subspace.axes
{%- endif %}
kwargs = dict({% for p in spec.params -%}{{ p }}={{ p }}, {% endfor -%})
if bounds is not None:
kwargs["bounds"] = bounds
base = Base{{ spec.name }}(**kwargs)
super({{ spec.name }}, self).__init__([
(None, base), {% if spec.stationary -%}("metric", metric){%- endif %}
])
# Common setup.
self.dirty = True
{% if spec.stationary %}
@property
def block(self):
if not self.blocked:
return None
return list(zip(self.min_block, self.max_block))
@block.setter
def block(self, block):
if block is None:
self.blocked = False
self.min_block = -np.inf + np.zeros(len(self.axes))
self.max_block = np.inf + np.zeros(len(self.axes))
return
block = np.atleast_2d(block)
if block.shape != (len(self.axes), 2):
raise ValueError("dimension mismatch in block specification")
self.blocked = True
self.min_block, self.max_block = map(np.array, zip(*block))
{% endif %}
{% endfor %}
|
dfmREPO_NAMEgeorgePATH_START.@george_extracted@george-main@templates@kernels.py@.PATH_END.py
|
{
"filename": "igimf_epoch_55.py",
"repo_name": "juzikong/photGalIMF",
"repo_path": "photGalIMF_extracted/photGalIMF-main/simulation_results_from_galaxy_evol/example/igimf_epoch_55.py",
"type": "Python"
}
|
# File to define a custom IMF
# The return value represents the chosen IMF value for the input mass
def custom_imf(mass, time): # there is no time dependence for IGIMF
if mass < 0.08:
return 0
elif mass < 0.101:
return -881912095498.0004 * mass + 126349388048.4443
elif mass < 0.10201:
return -852999343379.5463 * mass + 123429200084.48044
elif mass < 0.10510100501:
return -771825086636.5791 * mass + 115067381227.91583
elif mass < 0.10828567056280801:
return -698375642355.5582 * mass + 107272041085.80531
elif mass < 0.11156683466653165:
return -631915891670.408 * mass + 100004803063.36438
elif mass < 0.11494742132376223:
return -571780672073.184 * mass + 93229890421.70459
elif mass < 0.11843044313729356:
return -517368120134.2647 * mass + 86913950148.33691
elif mass < 0.12201900399479669:
return -468133647751.17334 * mass + 81025888759.68959
elif mass < 0.12571630183484303:
return -423584491638.08563 * mass + 75536719227.383
elif mass < 0.12952563149674062:
return -383274781503.5729 * mass + 70419418274.561
elif mass < 0.13345038765672337:
return -346801077557.1967 * mass + 65648793339.83267
elif mass < 0.13749406785310975:
return -313798332681.9032 * mass + 61201358553.86909
elif mass < 0.14166027560312686:
return -283936238859.25653 * mass + 57055219118.036606
elif mass < 0.14595272361417722:
return -256915921281.43967 * mass + 53189963515.95951
elif mass < 0.15037523709241038:
return -232466947062.03006 * mass + 49586563027.230675
elif mass < 0.15493175715154747:
return -210344618608.2964 * mass + 46227278048.721466
elif mass < 0.1596263443249965:
return -190327524564.86612 * mass + 43095570762.188644
elif mass < 0.16446318218438824:
return -172215323817.93555 * mass + 40176023718.31866
elif mass < 0.1694465810677574:
return -155826740380.95596 * mass + 37454263936.35827
elif mass < 0.17458098192069152:
return -140997749093.59116 * mass + 34916892145.66835
elif mass < 0.1798709602538704:
return -127579933975.75043 * mass + 32551416820.89501
elif mass < 0.18532123022052294:
return -115439002806.02321 * mass + 30346192685.970474
elif mass < 0.190936648817435:
return -104453443057.75208 * mass + 28290363384.209488
elif mass < 0.19672222021325209:
return -94513305740.80682 * mass + 26373808032.28923
elif mass < 0.20268310020793384:
return -85519104976.9103 * mass + 24587091394.955666
elif mass < 0.20882460082733445:
return -77380822295.08893 * mass + 22921417435.19663
elif mass < 0.21515219505700353:
return -70017005681.73714 * mass + 21368586011.210938
elif mass < 0.2216715217194258:
return -63353954367.92658 * mass + 19920952506.94763
elif mass < 0.22838839049904613:
return -57324981195.24943 * mass + 18571390197.54771
elif mass < 0.23530878711955774:
return -51869745177.25965 * mass + 17313255164.34588
elif mass < 0.24243887867806746:
return -46933647576.609474 * mass + 16140353586.739586
elif mass < 0.24978501914089157:
return -42467285453.54647 * mass + 15046911249.911201
elif mass < 0.2573537550058797:
return -38425957216.49946 * mass + 14027545118.26312
elif mass < 0.26515183113631285:
return -34769215226.14052 * mass + 13077236834.640993
elif mass < 0.27318619677157424:
return -31460460975.12397 * mass + 12191308014.872707
elif mass < 0.2814640117199497:
return -28466578791.900406 * mass + 11365397216.00891
elif mass < 0.28999265273907593:
return -25757604402.43424 * mass + 10595438464.85373
elif mass < 0.29877972010972265:
return -23306425032.748863 * mass + 9877641241.114536
elif mass < 0.30783304440876735:
return -21088508050.685318 * mass + 9208471816.603436
elif mass < 0.31716069348739745:
return -19081655431.023422 * mass + 8584635858.633564
elif mass < 0.32677097966075913:
return -17265781586.4992 * mass + 8003062211.957123
elif mass < 0.33667246711545984:
return -15622712341.196539 * mass + 7460887779.420978
elif mass < 0.34687397954152543:
return -14136003034.26778 * mass + 6955443426.885202
elif mass < 0.3573846079956132:
return -12790773933.533293 * mass + 6484240843.032959
elif mass < 0.36821371900248834:
return -11573561311.646477 * mass + 6044960289.365638
elif mass < 0.3793709629019827:
return -10472182694.378483 * mass + 5635439180.096561
elif mass < 0.39086628244887567:
return -9475614932.292295 * mass + 5253661435.698137
elif mass < 0.40270992167335906:
return -8573883875.54401 * mass + 4897747557.71019
elif mass < 0.41491243500998354:
return -7757964547.586873 * mass + 4565945375.935192
elif mass < 0.4274846967032211:
return -7019690818.683823 * mass + 4256621422.4751673
elif mass < 0.4404379104980254:
return -6351673675.18426 * mass + 3968252890.1399918
elif mass < 0.453783619624026:
return -5747227266.568564 * mass + 3699420135.6402307
elif mass < 0.4675337170822536:
return -5200301990.110294 * mass + 3448799690.66144
elif mass < 0.48170045624356295:
return -4705423943.412504 * mass + 3215157746.403937
elif mass < 0.49629646176819914:
return -4257640138.8508053 * mass + 2997344079.521927
elif mass < 0.5113347408562374:
return -3852468931.589354 * mass + 2794286389.553832
elif mass < 0.5268286948389223:
return -3485855165.0324383 * mass + 2604985019.9686236
elif mass < 0.5427921311212365:
return -3154129584.782682 * mass + 2428508036.838693
elif mass < 0.5592392754863411:
return -2853972114.905384 * mass + 2263986640.914044
elif mass < 0.5761847847728528:
return -2582378628.942507 * mass + 2110610890.5078216
elif mass < 0.593643759936255:
return -2336630883.1087937 * mass + 1967625714.139305
elif mass < 0.6116317595060835:
return -2114269310.7448237 * mass + 1834327193.3037505
elif mass < 0.6301648134508773:
return -1913068405.742814 * mass + 1710059097.0706878
elif mass < 0.6492594374632522:
return -1731014448.562266 * mass + 1594209651.4456558
elif mass < 0.6689326476778262:
return -1566285351.917694 * mass + 1486208527.597762
elif mass < 0.689201975835112:
return -1417232424.4142952 * mass + 1385524034.1202335
elif mass < 0.7100854849048917:
return -1282363869.6177766 * mass + 1291660499.5043395
elif mass < 0.7316017851829947:
return -1160329855.4088144 * mass + 1204155831.9407647
elif mass < 0.7537700508758246:
return -1049909004.1847667 * mass + 1122579244.432484
elif mass < 0.7766100371874131:
return -949996168.7014714 * mass + 1046529134.0237803
elif mass < 0.8001420979242287:
return -859591371.2048419 * mass + 975631104.701447
elif mass < 0.8243872036334308:
return -777789795.1523108 * mass + 909536124.2368587
elif mass < 0.8493669602907274:
return -703772729.355241 * mass + 847918805.8942916
elif mass < 0.8751036285544969:
return -636799373.9069062 * mass + 790475806.5464351
elif mass < 0.9016201436033267:
return -576199425.885282 * mass + 736924333.3106526
elif mass < 0.9289401355746512:
return -521366370.6256449 * mass + 687000751.3549962
elif mass < 0.9570879506226987:
return -471751411.4175549 * mass + 640459286.0192342
elif mass < 0.9860886726145172:
return -426857976.8721611 * mass + 597070812.8618844
elif mass < 1.0159681454834097:
return -378224034.20224994 * mass + 548611192.837717
elif mass < 1.0467529962597026:
return -342742154.28123385 * mass + 512208975.96922934
elif mass < 1.078470658799368:
return -310588893.6146222 * mass + 478222169.8875282
elif mass < 1.1111493982316476:
return -281451988.41692126 * mass + 446490503.8791636
elif mass < 1.1448183361474649:
return -255048468.93246916 * mass + 416864341.73713684
elif mass < 1.1795074765510691:
return -231121911.30959457 * mass + 389203976.12528235
elif mass < 1.215247732598043:
return -209439947.281311 * mass + 363378969.76383543
elif mass < 1.2520709541434965:
return -189792007.4675967 * mass + 339267540.3298471
elif mass < 1.2900099561249987:
return -171987276.38238186 * mass + 316755986.1713393
elif mass < 1.3290985478055424:
return -155852839.28501594 * mass + 295738150.12725586
elif mass < 1.3693715629025982:
return -141232002.87908453 * mass + 276114918.9249596
elif mass < 1.4108648906301098:
return -127982773.5493596 * mass + 257793755.7942201
elif mass < 1.4536155076810928:
return -115976478.35815808 * mass + 240688264.09393674
elif mass < 1.4976615111793377:
return -105096515.40857038 * mass + 224717779.89378324
elif mass < 1.5430421526295828:
return -95237221.43822642 * mass + 209806991.5892322
elif mass < 1.589797872896412:
return -86302845.64635111 * mass + 195885584.7567351
elif mass < 1.6379703382430462:
return -78206619.78770545 * mass + 182887910.57358485
elif mass < 1.6876024774621488:
return -70869915.5029363 * mass + 170752676.23962244
elif mass < 1.7387385201317294:
return -64221480.701078944 * mass + 159422655.94017956
elif mass < 1.791424036030241:
return -58196747.57857353 * mass + 148844420.98790362
elif mass < 1.8457059757459933:
return -52737205.554136746 * mass + 138968087.87036958
elif mass < 1.9016327125170727:
return -47789833.029834844 * mass + 129747083.01574302
elif mass < 1.9592540853390523:
return -43306582.45961368 * mass + 121137923.16692841
elif mass < 2.018621443378911:
return -39243913.72449903 * mass + 113100010.32866889
elif mass < 2.0797876917347353:
return -35562371.28275334 * mass + 105595440.32068571
elif mass < 2.14280733858199:
return -32226200.988276925 * mass + 98588824.03384253
elif mass < 2.2077365437483625:
return -29203002.856017157 * mass + 92047120.54667199
elif mass < 2.2746331687604817:
return -26463416.40203488 * mass + 85939481.3150872
elif mass < 2.3435568284070927:
return -23980835.502444685 * mass + 80237104.70076022
elif mass < 2.4145689438646563:
return -21731150.001893587 * mass + 74913100.15190408
elif mass < 2.4877327974326993:
return -19692511.562272504 * mass + 69942361.39625351
elif mass < 2.5631135889277075:
return -17845121.47753356 * mass + 65301448.04800619
elif mass < 2.640778493785806:
return -16171038.394003037 * mass + 60968475.07059142
elif mass < 2.7207967229260097:
return -14654004.068820138 * mass + 56923009.57401629
elif mass < 2.8032395844273905:
return -13279285.474247394 * mass + 53145974.45994402
elif mass < 2.888180547075125:
return -12033531.714499747 * mass + 49619558.46035055
elif mass < 2.9756953058320486:
return -10904644.36544424 * mass + 46327132.145368725
elif mass < 3.0658618492940652:
return -9881659.977970002 * mass + 43253169.50430538
elif mass < 3.1587605291895247:
return -8954643.603935985 * mass + 40383174.730044276
elif mass < 3.2544741319844963:
return -8114592.310631201 * mass + 37703613.86152058
elif mass < 3.353087952657759:
return -7353347.746728442 * mass + 35201850.96198223
elif mass < 3.4546898707112415:
return -6663516.910575382 * mass + 32866088.532013968
elif mass < 3.559370428483663:
return -6038400.351360663 * mass + 30685311.876376472
elif mass < 3.667222911837147:
return -5471927.105856929 * mass + 28649237.16229057
elif mass < 3.7783434332887245:
return -4958595.738863783 * mass + 26748262.92422453
elif mass < 3.892831017660806:
return -4493420.914756326 * mass + 24973424.786514346
elif mass < 4.010787690326946:
return -4071884.9812498903 * mass + 23316353.19027959
elif mass < 4.132318568131543:
return -3689894.095182401 * mass + 21769233.92531387
elif mass < 4.257531953064498:
return -3343738.464214127 * mass + 20324771.28080991
elif mass < 4.386539428774305:
return -3030056.3183271675 * mass + 18976153.641166396
elif mass < 4.519455960005596:
return -2745801.2612217274 * mass + 17717021.364621893
elif mass < 4.656399995049725:
return -2488212.684538307 * mass + 16541436.793256415
elif mass < 4.797493571299727:
return -2254788.957574303 * mass + 15443856.252927545
elif mass < 4.94286242400368:
return -2043263.1321236799 * mass + 14419103.91111523
elif mass < 5.092636098313417:
return -1851580.9264861692 * mass + 13462347.369371472
elif mass < 5.246948064728412:
return -1677880.7748385717 * mass + 12569074.875304293
elif mass < 5.405935838037748:
return -1520475.7482114013 * mass + 11735074.04662867
elif mass < 5.569741099866129:
return -1377837.171488895 * mass + 10956412.006936198
elif mass < 5.738509824933173:
return -1248579.7773293327 * mass + 10229416.839531496
elif mass < 5.912392411138472:
return -1131448.2528230778 * mass + 9550660.27187066
elif mass < 6.091543813588379:
return -1025305.0482320755 * mass + 8916941.508941254
elif mass < 6.27612368268392:
return -929119.3294146868 * mass + 8325272.139359356
elif mass < 6.466296506392926:
return -841956.966641636 * mass + 7772862.042986926
elif mass < 6.662231756833138:
return -762971.4625816531 * mass + 7257106.233641686
elif mass < 6.8641040412969385:
return -691395.7313472136 * mass + 6775572.574825793
elif mass < 7.072093257852278:
return -626534.6487633276 * mass + 6325990.310560303
elif mass < 7.286384755658459:
return -567758.3015101728 * mass + 5906239.357245501
elif mass < 7.507169500139667:
return -514495.8695739057 * mass + 5514340.306029102
elif mass < 7.734644243163398:
return -466230.0825977056 * mass + 5148445.088564688
elif mass < 7.969011698375493:
return -422492.19629126147 * mass + 4806828.262119563
elif mass < 8.210480721847969:
return -382857.44011296297 * mass + 4487878.872949734
elif mass < 8.459266498200684:
return -346940.8920130764 * mass + 4190092.859566341
elif mass < 8.715590732362662:
return -314393.74017471925 * mass + 3912065.9600711716
elif mass < 8.979681847143983:
return -284899.89544761827 * mass + 3652487.0901141223
elif mass < 9.251775186794292:
return -258172.9215758934 * mass + 3410132.1602485017
elif mass < 9.532113226729347:
return -233953.25340609462 * mass + 3183858.3035202585
elif mass < 9.820945789612473:
return -212005.67606085283 * mass + 2972598.4860824393
elif mass < 10.118530267983521:
return -192117.04059527343 * mass + 2775356.475408798
elif mass < 10.42513185363369:
return -174094.1939521161 * mass + 2591202.1423872104
elif mass < 10.741023773930646:
return -157762.10311133554 * mass + 2419267.0751323723
elif mass < 11.06648753530452:
return -142962.15521672467 * mass + 2258740.483838777
elif mass < 11.401813174111782:
return -129550.61717061569 * mass + 2108865.3773591933
elif mass < 11.747299515100543:
return -117397.23973695925 * mass + 1968934.9934820938
elif mass < 12.103254437707607:
return -106383.99259577072 * mass + 1838289.4660695463
elif mass < 12.469995150424586:
return -96403.91806463679 * mass + 1716312.7133444645
elif mass < 12.847848473477601:
return -87360.09235456264 * mass + 1602429.5326492898
elif mass < 13.237151130072446:
return -79164.6842722866 * mass + 1496102.8879772783
elif mass < 13.638250046464773:
return -71738.10222743743 * mass + 1396831.37748537
elif mass < 14.051502661122703:
return -65008.221260554754 * mass + 1304146.869047045
elif mass < 14.477277243257383:
return -58909.68258489232 * mass + 1217612.2926927358
elif mass < 14.915953221005326:
return -53383.25883957604 * mass + 1136819.579530759
elif mass < 15.367921519555008:
return -48375.27888946114 * mass + 1061387.7374272011
elif mass < 15.833584909519045:
return -43837.10658551415 * mass + 990961.054370338
elif mass < 16.31335836586238:
return -39724.66842374193 * mass + 925207.4210497297
elif mass < 16.807669437706377:
return -35998.02551516624 * mass + 863816.764735992
elif mass < 17.316958629338316:
return -32620.98571012829 * mass + 806499.5870788775
elif mass < 17.841679792765895:
return -29560.752109917725 * mass + 752985.5989275265
elif mass < 18.382300532166493:
return -26787.604552143403 * mass + 703022.4457347527
elif mass < 18.93930262059167:
return -24274.61097653632 * mass + 656374.5175350609
elif mass < 19.51318242929822:
return -21997.365868049095 * mass + 612821.837884678
elif mass < 20.104451370088384:
return -19933.75323709776 * mass + 572159.0265245157
elif mass < 20.71363635105343:
return -18063.73183503036 * mass + 534194.3308734964
elif mass < 21.3412802461267:
return -16369.140518938324 * mass + 498748.721785933
elif mass < 21.987942378864584:
return -14833.521875538247 * mass + 465655.0493083184
elif mass < 22.654199020886562:
return -13441.962391212743 * mass + 434757.25445444183
elif mass < 23.340643905418442:
return -12180.947616004374 * mass + 405909.6332822564
elif mass < 24.047888756396528:
return -11038.230914917563 * mass + 378976.1498013189
elif mass < 24.7765638336041:
return -10002.71453190961 * mass + 353829.79447141325
elif mass < 25.52731849432614:
return -9064.341811481938 * mass + 330351.9852669377
elif mass < 26.300821772022715:
return -8213.999531154454 * mass + 308432.0084826039
elif mass < 27.097762972536774:
return -7443.429396312369 * mass + 287966.49664371257
elif mass < 27.91885228836761:
return -6745.1478378761585 * mass + 268858.94105872384
elif mass < 28.764821431557436:
return -6112.373333903076 * mass + 251019.2367157852
elif mass < 29.63642428575506:
return -5538.96054927282 * mass + 234363.25737670768
elif mass < 30.53443757803772:
return -5019.34065385514 * mass + 218812.45886509324
elif mass < 31.459661571089846:
return -4548.467239534732 * mass + 204293.50867755222
elif mass < 32.41292077635544:
return -4121.767310858169 * mass + 190737.94017148804
elif mass < 33.395064688799785:
return -3735.096873336325 * mass + 178081.8296986817
elif mass < 34.40696854393511:
return -3384.7006880894783 * mass + 166265.495162195
elif mass < 35.44953409778489:
return -3067.175801981311 * mass + 155233.21457503116
elif mass < 36.52369043048187:
return -2779.4384990565954 * mass + 144932.96329337286
elif mass < 37.63039477421591:
return -2518.6943523251293 * mass + 135316.16868532004
elif mass < 38.77063336626942:
return -2282.4110850403245 * mass + 126337.48107813275
elif mass < 39.94542232790075:
return -2098.830304819598 * mass + 119149.66772500594
elif mass < 41.15580856985847:
return -1886.2412854129125 * mass + 110574.58168060276
elif mass < 42.402870725333756:
return -1721.2251525174197 * mass + 103716.39177930114
elif mass < 43.68772011118209:
return -1558.5915916816336 * mass + 96757.97174053216
elif mass < 45.01150171827101:
return -1400.4085968484894 * mass + 89779.67112006168
elif mass < 46.37539523183634:
return -1267.9757671119032 * mass + 83750.65664387631
elif mass < 47.78061608275621:
return -1157.3487629808835 * mass + 78569.80448206994
elif mass < 49.22841653067981:
return -1047.9670308564878 * mass + 73296.4201529144
elif mass < 50.720086779975944:
return -948.9152231708367 * mass + 68376.38739686998
elif mass < 52.256956129496:
return -852.2676650968601 * mass + 63426.434415172145
elif mass < 53.84039415717585:
return -771.6413266114362 * mass + 59164.75518994806
elif mass < 55.47181194053243:
return -698.6369762907558 * mass + 55188.962723190736
elif mass < 57.1526633141425:
return -632.5330787055476 * mass + 51479.82283481699
elif mass < 58.88444616522433:
return -572.6793366195942 * mass + 48019.56213315388
elif mass < 60.668703768476796:
return -518.4840159751182 * mass + 44791.43434745292
elif mass < 62.50702616136541:
return -469.4137314874244 * mass + 41779.9596309389
elif mass < 64.40105156108095:
return -428.70207155685165 * mass + 39207.686490530454
elif mass < 66.35246782443328:
return -388.1554827750738 * mass + 36573.149005620304
elif mass < 68.36301395198143:
return -351.44119004559644 * mass + 34115.354301338746
elif mass < 70.43448163774043:
return -318.1430781657978 * mass + 31816.660841848618
elif mass < 72.5687168658456:
return -288.0456767502926 * mass + 29677.91564178936
elif mass < 74.76762155559759:
return -258.3598336364044 * mass + 27500.73324765288
elif mass < 77.03315525635374:
return -236.07609477009555 * mass + 25816.499419184136
elif mass < 79.36733689377651:
return -213.69788027666138 * mass + 24075.721432927
elif mass < 81.77224656899482:
return -191.6048703181 * mass + 22303.55636399969
elif mass < 84.25002741228194:
return -175.1308349925522 * mass + 20941.720795221983
elif mass < 86.8028874929015:
return -158.52447133121225 * mass + 19528.907705399888
elif mass < 89.4331017868239:
return -143.49062072130545 * mass + 18211.122915502798
elif mass < 92.14301420406645:
return -129.85589276250371 * mass + 16978.53657696608
elif mass < 94.93503967746386:
return -117.53733946730361 * mass + 15832.331460204688
elif mass < 97.81166631473069:
return -106.38621211242004 * mass + 14763.31224564384
elif mass < 100.77545761573333:
return -96.27220727177901 * mass + 13763.303578911187
elif mass < 103.82905475694768:
return -87.11809610786906 * mass + 12830.763433328146
elif mass < 106.97517894513796:
return -78.83277492961786 * mass + 11961.139319274958
elif mass < 110.21663384235457:
return -71.33396883599468 * mass + 11150.205667431943
elif mass < 113.55630806441175:
return -65.25968997331644 * mass + 10474.128501135436
elif mass < 116.99717775507149:
return -58.39214107599081 * mass + 9686.645525210588
elif mass < 120.54230923822792:
return -52.82258508484542 * mass + 9027.137875397693
elif mass < 124.19486175045546:
return -48.33882451770344 * mass + 8480.626537567978
elif mass < 127.95809025635602:
return -43.725775306179074 * mass + 7902.089208774394
elif mass < 131.83534834921383:
return -39.551609523690175 * mass + 7362.731228954252
elif mass < 135.83009123954335:
return -35.774718468081005 * mass + 6859.918666609693
elif mass < 139.94587883419274:
return -32.754930560282254 * mass + 6445.70371016537
elif mass < 144.1863789087476:
return -29.621013927287606 * mass + 6003.374484117547
elif mass < 148.55537037606157:
return -27.133515506848582 * mass + 5642.235489786828
elif mass < 153.05674665382662:
return -24.211271151611026 * mass + 5204.37643183128
elif mass < 157.69451913418422:
return -21.878122000081255 * mass + 4842.8025954939685
elif mass < 162.47282075846914:
return -20.0457000220736 * mass + 4550.067523571768
elif mass < 167.39590970027152:
return -18.37401625079183 * mass + 4275.345142828897
elif mass < 172.46817316009944:
return -16.851098356037333 * mass + 4017.9099444185154
elif mass < 177.69413127502364:
return -15.701958729294592 * mass + 3817.7722681209907
elif mass < 183.07844114678812:
return -14.662876310591198 * mass + 3631.325700812953
elif mass < 188.62590099197692:
return -14.655764398061297 * mass + 3630.4831542698353
elif mass < 194.3414544179348:
return -16.798722448739376 * mass + 4041.2264183852003
elif mass < 198.24771765173531:
return 0 * mass + 0
elif mass < 198.24771765173531:
return 0 * mass + 0
else:
return 0
|
juzikongREPO_NAMEphotGalIMFPATH_START.@photGalIMF_extracted@photGalIMF-main@simulation_results_from_galaxy_evol@example@igimf_epoch_55.py@.PATH_END.py
|
{
"filename": "lcsftest.py",
"repo_name": "lmwalkowicz/Cheetah",
"repo_path": "Cheetah_extracted/Cheetah-master/finalcode/lcsftest.py",
"type": "Python"
}
|
from numpy import *
from lcspot import *
from lcsinglefit import *
from lcgenerate import *
import sys
def main():
ntrials = int(sys.argv[1])
isteps = int(sys.argv[2])
nclusters = int(sys.argv[3])
threshratio = float(sys.argv[4])
noisefac = float(sys.argv[5])
for i in range(ntrials):
phase, intesity, tparams = genrandlc(noisefactor=noisefac)
paramsets = vincfit((phase,intesity),[],isteps,nclusters,threshratio,plsprint='none',plsplot=False)
pdists = [paramdist(fps,tparams) for fps in paramsets]
print pdists
if 'lcsftest.py' in sys.argv:
main()
|
lmwalkowiczREPO_NAMECheetahPATH_START.@Cheetah_extracted@Cheetah-master@finalcode@lcsftest.py@.PATH_END.py
|
{
"filename": "run_sntd_init.py",
"repo_name": "jpierel14/sntd",
"repo_path": "sntd_extracted/sntd-master/sntd/batch/run_sntd_init.py",
"type": "Python"
}
|
import pickle,sys,sntd,os,traceback
from optparse import OptionParser
from copy import copy
import numpy as np
njobs=njobsreplace
nlcs=nlcsreplace
parser = OptionParser()
(options,args)=parser.parse_args()
batchinitreplace
all_dat=pickle.load(open(os.path.join(os.path.abspath(os.path.dirname(__file__)),
'sntd_data.pkl'),'rb'))
all_const=pickle.load(open(os.path.join(os.path.abspath(os.path.dirname(__file__)),
'sntd_constants.pkl'),'rb'))
inds=[int(nlcs/njobs)*int(sys.argv[1]),int(nlcs/njobs)*int(sys.argv[1])+int(nlcs/njobs)]
inds[1]=min(inds[-1],len(all_dat))
all_res=[]
for i in range(inds[0],inds[1]):
if isinstance(all_dat[i],str):
all_dat[i]=pickle.load(open(all_dat[i],'rb'))
all_dat[i].constants={}
if all_const is not None:
for c in all_const.keys():
if isinstance(all_const[c],(list,tuple,np.ndarray)):
all_dat[i].constants[c]=all_const[c][i]
else:
all_dat[i].constants[c]=all_const[c]
try:
fitCurves=sntdcommandreplace
all_res.append(copy(fitCurves))
except Exception as e:
print('Failed')
print(traceback.format_exc())
all_res.append(None)
filename=os.path.join(os.path.abspath(os.path.dirname(__file__)),'sntd_fit%s.pkl'%sys.argv[1])
pickle.dump(all_res,open(filename,'wb'))
|
jpierel14REPO_NAMEsntdPATH_START.@sntd_extracted@sntd-master@sntd@batch@run_sntd_init.py@.PATH_END.py
|
{
"filename": "_range.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/geo/lataxis/_range.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class RangeValidator(_plotly_utils.basevalidators.InfoArrayValidator):
def __init__(self, plotly_name="range", parent_name="layout.geo.lataxis", **kwargs):
super(RangeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
items=kwargs.pop(
"items",
[
{"editType": "plot", "valType": "number"},
{"editType": "plot", "valType": "number"},
],
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@geo@lataxis@_range.py@.PATH_END.py
|
{
"filename": "Kramer_1998_raw_to_yaml.py",
"repo_name": "NickSwainston/pulsar_spectra",
"repo_path": "pulsar_spectra_extracted/pulsar_spectra-main/pulsar_spectra/catalogue_papers/Kramer_1998_raw_to_yaml.py",
"type": "Python"
}
|
import json
import psrqpy
import csv
query = psrqpy.QueryATNF(params=['PSRJ', 'NAME', 'PSRB', 'P0']).pandas
all_jnames = list(query['PSRJ'])
# was converted from image to csv using ABBYY FineReader
with open("Kramer_1998_raw.csv") as file:
tsv_file = csv.reader(file, delimiter=" ")
lines = []
for line in tsv_file:
lines.append(line)
pulsar_dict = {}
for row in lines:
row = [r.strip() for r in row]
pulsar = row[0].strip().replace("–", "-").replace(" ", "").replace("−", "-")
if pulsar.startswith("B"):
pid = list(query['PSRB']).index(pulsar)
pulsar = query['PSRJ'][pid]
# Wrong names
if pulsar == "J1730-2324":
pulsar = "J1730-2304"
if pulsar not in all_jnames:
print(pulsar)
flux= float(row[4][:-1])
flux_err = float(row[5])
pulsar_dict[pulsar] = {
"Frequency MHz":[1400.],
"Bandwidth MHz":[40.],
"Flux Density mJy":[flux],
"Flux Density error mJy":[flux_err]
}
with open("Kramer_1998.yaml", "w") as cat_file:
cat_file.write(json.dumps(pulsar_dict, indent=1))
|
NickSwainstonREPO_NAMEpulsar_spectraPATH_START.@pulsar_spectra_extracted@pulsar_spectra-main@pulsar_spectra@catalogue_papers@Kramer_1998_raw_to_yaml.py@.PATH_END.py
|
{
"filename": "reports.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/pytest/py2/_pytest/reports.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
from pprint import pprint
import py
import six
from _pytest._code.code import ExceptionInfo
from _pytest._code.code import ReprEntry
from _pytest._code.code import ReprEntryNative
from _pytest._code.code import ReprExceptionInfo
from _pytest._code.code import ReprFileLocation
from _pytest._code.code import ReprFuncArgs
from _pytest._code.code import ReprLocals
from _pytest._code.code import ReprTraceback
from _pytest._code.code import TerminalRepr
from _pytest.outcomes import skip
from _pytest.pathlib import Path
def getslaveinfoline(node):
try:
return node._slaveinfocache
except AttributeError:
d = node.slaveinfo
ver = "%s.%s.%s" % d["version_info"][:3]
node._slaveinfocache = s = "[%s] %s -- Python %s %s" % (
d["id"],
d["sysplatform"],
ver,
d["executable"],
)
return s
class BaseReport(object):
when = None
location = None
def __init__(self, **kw):
self.__dict__.update(kw)
def toterminal(self, out):
if hasattr(self, "node"):
out.line(getslaveinfoline(self.node))
longrepr = self.longrepr
if longrepr is None:
return
if hasattr(longrepr, "toterminal"):
longrepr.toterminal(out)
else:
try:
out.line(longrepr)
except UnicodeEncodeError:
out.line("<unprintable longrepr>")
def get_sections(self, prefix):
for name, content in self.sections:
if name.startswith(prefix):
yield prefix, content
@property
def longreprtext(self):
"""
Read-only property that returns the full string representation
of ``longrepr``.
.. versionadded:: 3.0
"""
tw = py.io.TerminalWriter(stringio=True)
tw.hasmarkup = False
self.toterminal(tw)
exc = tw.stringio.getvalue()
return exc.strip()
@property
def caplog(self):
"""Return captured log lines, if log capturing is enabled
.. versionadded:: 3.5
"""
return "\n".join(
content for (prefix, content) in self.get_sections("Captured log")
)
@property
def capstdout(self):
"""Return captured text from stdout, if capturing is enabled
.. versionadded:: 3.0
"""
return "".join(
content for (prefix, content) in self.get_sections("Captured stdout")
)
@property
def capstderr(self):
"""Return captured text from stderr, if capturing is enabled
.. versionadded:: 3.0
"""
return "".join(
content for (prefix, content) in self.get_sections("Captured stderr")
)
passed = property(lambda x: x.outcome == "passed")
failed = property(lambda x: x.outcome == "failed")
skipped = property(lambda x: x.outcome == "skipped")
@property
def fspath(self):
return self.nodeid.split("::")[0]
@property
def count_towards_summary(self):
"""
**Experimental**
Returns True if this report should be counted towards the totals shown at the end of the
test session: "1 passed, 1 failure, etc".
.. note::
This function is considered **experimental**, so beware that it is subject to changes
even in patch releases.
"""
return True
@property
def head_line(self):
"""
**Experimental**
Returns the head line shown with longrepr output for this report, more commonly during
traceback representation during failures::
________ Test.foo ________
In the example above, the head_line is "Test.foo".
.. note::
This function is considered **experimental**, so beware that it is subject to changes
even in patch releases.
"""
if self.location is not None:
fspath, lineno, domain = self.location
return domain
def _get_verbose_word(self, config):
_category, _short, verbose = config.hook.pytest_report_teststatus(
report=self, config=config
)
return verbose
def _to_json(self):
"""
This was originally the serialize_report() function from xdist (ca03269).
Returns the contents of this report as a dict of builtin entries, suitable for
serialization.
Experimental method.
"""
def disassembled_report(rep):
reprtraceback = rep.longrepr.reprtraceback.__dict__.copy()
reprcrash = rep.longrepr.reprcrash.__dict__.copy()
new_entries = []
for entry in reprtraceback["reprentries"]:
entry_data = {
"type": type(entry).__name__,
"data": entry.__dict__.copy(),
}
for key, value in entry_data["data"].items():
if hasattr(value, "__dict__"):
entry_data["data"][key] = value.__dict__.copy()
new_entries.append(entry_data)
reprtraceback["reprentries"] = new_entries
return {
"reprcrash": reprcrash,
"reprtraceback": reprtraceback,
"sections": rep.longrepr.sections,
}
d = self.__dict__.copy()
if hasattr(self.longrepr, "toterminal"):
if hasattr(self.longrepr, "reprtraceback") and hasattr(
self.longrepr, "reprcrash"
):
d["longrepr"] = disassembled_report(self)
else:
d["longrepr"] = six.text_type(self.longrepr)
else:
d["longrepr"] = self.longrepr
for name in d:
if isinstance(d[name], (py.path.local, Path)):
d[name] = str(d[name])
elif name == "result":
d[name] = None # for now
return d
@classmethod
def _from_json(cls, reportdict):
"""
This was originally the serialize_report() function from xdist (ca03269).
Factory method that returns either a TestReport or CollectReport, depending on the calling
class. It's the callers responsibility to know which class to pass here.
Experimental method.
"""
if reportdict["longrepr"]:
if (
"reprcrash" in reportdict["longrepr"]
and "reprtraceback" in reportdict["longrepr"]
):
reprtraceback = reportdict["longrepr"]["reprtraceback"]
reprcrash = reportdict["longrepr"]["reprcrash"]
unserialized_entries = []
reprentry = None
for entry_data in reprtraceback["reprentries"]:
data = entry_data["data"]
entry_type = entry_data["type"]
if entry_type == "ReprEntry":
reprfuncargs = None
reprfileloc = None
reprlocals = None
if data["reprfuncargs"]:
reprfuncargs = ReprFuncArgs(**data["reprfuncargs"])
if data["reprfileloc"]:
reprfileloc = ReprFileLocation(**data["reprfileloc"])
if data["reprlocals"]:
reprlocals = ReprLocals(data["reprlocals"]["lines"])
reprentry = ReprEntry(
lines=data["lines"],
reprfuncargs=reprfuncargs,
reprlocals=reprlocals,
filelocrepr=reprfileloc,
style=data["style"],
)
elif entry_type == "ReprEntryNative":
reprentry = ReprEntryNative(data["lines"])
else:
_report_unserialization_failure(entry_type, cls, reportdict)
unserialized_entries.append(reprentry)
reprtraceback["reprentries"] = unserialized_entries
exception_info = ReprExceptionInfo(
reprtraceback=ReprTraceback(**reprtraceback),
reprcrash=ReprFileLocation(**reprcrash),
)
for section in reportdict["longrepr"]["sections"]:
exception_info.addsection(*section)
reportdict["longrepr"] = exception_info
return cls(**reportdict)
def _report_unserialization_failure(type_name, report_class, reportdict):
url = "https://github.com/pytest-dev/pytest/issues"
stream = py.io.TextIO()
pprint("-" * 100, stream=stream)
pprint("INTERNALERROR: Unknown entry type returned: %s" % type_name, stream=stream)
pprint("report_name: %s" % report_class, stream=stream)
pprint(reportdict, stream=stream)
pprint("Please report this bug at %s" % url, stream=stream)
pprint("-" * 100, stream=stream)
raise RuntimeError(stream.getvalue())
class TestReport(BaseReport):
""" Basic test report object (also used for setup and teardown calls if
they fail).
"""
__test__ = False
def __init__(
self,
nodeid,
location,
keywords,
outcome,
longrepr,
when,
sections=(),
duration=0,
user_properties=None,
**extra
):
#: normalized collection node id
self.nodeid = nodeid
#: a (filesystempath, lineno, domaininfo) tuple indicating the
#: actual location of a test item - it might be different from the
#: collected one e.g. if a method is inherited from a different module.
self.location = location
#: a name -> value dictionary containing all keywords and
#: markers associated with a test invocation.
self.keywords = keywords
#: test outcome, always one of "passed", "failed", "skipped".
self.outcome = outcome
#: None or a failure representation.
self.longrepr = longrepr
#: one of 'setup', 'call', 'teardown' to indicate runtest phase.
self.when = when
#: user properties is a list of tuples (name, value) that holds user
#: defined properties of the test
self.user_properties = list(user_properties or [])
#: list of pairs ``(str, str)`` of extra information which needs to
#: marshallable. Used by pytest to add captured text
#: from ``stdout`` and ``stderr``, but may be used by other plugins
#: to add arbitrary information to reports.
self.sections = list(sections)
#: time it took to run just the test
self.duration = duration
self.__dict__.update(extra)
def __repr__(self):
return "<%s %r when=%r outcome=%r>" % (
self.__class__.__name__,
self.nodeid,
self.when,
self.outcome,
)
@classmethod
def from_item_and_call(cls, item, call):
"""
Factory method to create and fill a TestReport with standard item and call info.
"""
when = call.when
duration = call.stop - call.start
keywords = {x: 1 for x in item.keywords}
excinfo = call.excinfo
sections = []
if not call.excinfo:
outcome = "passed"
longrepr = None
else:
if not isinstance(excinfo, ExceptionInfo):
outcome = "failed"
longrepr = excinfo
elif excinfo.errisinstance(skip.Exception):
outcome = "skipped"
r = excinfo._getreprcrash()
longrepr = (str(r.path), r.lineno, r.message)
else:
outcome = "failed"
if call.when == "call":
longrepr = item.repr_failure(excinfo)
else: # exception in setup or teardown
longrepr = item._repr_failure_py(
excinfo, style=item.config.getoption("tbstyle", "auto")
)
for rwhen, key, content in item._report_sections:
sections.append(("Captured %s %s" % (key, rwhen), content))
return cls(
item.nodeid,
item.location,
keywords,
outcome,
longrepr,
when,
sections,
duration,
user_properties=item.user_properties,
)
class CollectReport(BaseReport):
when = "collect"
def __init__(self, nodeid, outcome, longrepr, result, sections=(), **extra):
self.nodeid = nodeid
self.outcome = outcome
self.longrepr = longrepr
self.result = result or []
self.sections = list(sections)
self.__dict__.update(extra)
@property
def location(self):
return (self.fspath, None, self.fspath)
def __repr__(self):
return "<CollectReport %r lenresult=%s outcome=%r>" % (
self.nodeid,
len(self.result),
self.outcome,
)
class CollectErrorRepr(TerminalRepr):
def __init__(self, msg):
self.longrepr = msg
def toterminal(self, out):
out.line(self.longrepr, red=True)
def pytest_report_to_serializable(report):
if isinstance(report, (TestReport, CollectReport)):
data = report._to_json()
data["_report_type"] = report.__class__.__name__
return data
def pytest_report_from_serializable(data):
if "_report_type" in data:
if data["_report_type"] == "TestReport":
return TestReport._from_json(data)
elif data["_report_type"] == "CollectReport":
return CollectReport._from_json(data)
assert False, "Unknown report_type unserialize data: {}".format(
data["_report_type"]
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@pytest@py2@_pytest@reports.py@.PATH_END.py
|
{
"filename": "calculate_ska_nz_standalone.py",
"repo_name": "philbull/RadioFisher",
"repo_path": "RadioFisher_extracted/RadioFisher-master/calculate_ska_nz_standalone.py",
"type": "Python"
}
|
#!/usr/bin/python
"""
Calculate dn/dz for SKA HI galaxy redshift surveys, using dn/dz curves for
given flux thresholds (from Mario Santos) and flux scalings with redshift for
specific arrays.
-- Phil Bull, 2017
"""
import numpy as np
import pylab as P
import scipy.interpolate
import scipy.optimize
import sys
C = 2.99792458e5 # Speed of light, km/s
D2RAD = np.pi / 180. # Convert degrees to radians
HRS_MHZ = 3.6e9 # 1 hour in MHz^-1
try:
SAREA = float(sys.argv[1])
TTOT = float(sys.argv[2])
NSIGMA = float(sys.argv[3])
except:
print "Expects three arguments: Sarea[deg^2] t_tot[hrs] nsigma"
sys.exit(1)
DEBUG_PLOT = True # Whether to plot fitting functions or not
NU_LINE = 1.420406 # HI emission line freq. in GHz
FULLSKY = (4.*np.pi * (180./np.pi)**2.) # deg^2 in the full sky
NGAL_MIN = 1e3 # Min. no. of galaxies to tolerate in a redshift bin
CBM = 1. #np.sqrt(1.57) # Correction factor due to effective beam for MID/MK (OBSOLETE)
CTH = 0.5 # Correction factor due to taking 5 sigma (not 10 sigma) cuts for SKA1
SBIG = 500. # Flux rms to extrapolate dn/dz out to (constrains behaviour at large Srms)
DZBINS = 0.05 #0.1 # Width of redshift bins
# Define fiducial cosmology and parameters
# Planck 2015 flat LambdaCDM best-fit parameters (from arXiv:1502.01589, Table 3)
# (Planck TT,TE,EE+lowP)
cosmo = {
'omega_M_0': 0.31387,
'omega_lambda_0': 0.68613,
'omega_b_0': 0.04917,
'N_eff': 3.046,
'h': 0.6727,
'ns': 0.9645,
'sigma_8': 0.831,
'gamma': 0.55,
'w0': -1.,
'wa': 0.,
'fNL': 0.,
'mnu': 0.,
'k_piv': 0.05,
'A': 1.,
'sigma_nl': 7.,
}
#-------------------------------------------------------------------------------
# Survey specifications
#-------------------------------------------------------------------------------
# SKA1-MID B2 Rebaselined
name = "SKA1-MID B2 Design"
numin = 950.
numax = 1420.
Sarea = SAREA #5e3
ttot = TTOT #1e4
dnu = 0.01
Nsig = NSIGMA
# Tinst1, Ddish1, Ndish1, effic1, numin1, numax1
# Tinst = T_recv
expt1 = (7.5, 15., 133., 0.85, 950., 1420.) # MID B2 Design Baseline
expt2 = (30., 13.5, 64., 0.85, 900., 1420.) # MeerKAT L-band
#-------------------------------------------------------------------------------
# Fitting coeffs. from Table 3 in v1 of Yahya et al. paper
Srms = np.array([0., 1., 3., 5., 6., 7.3, 10., 23., 40., 70., 100., 150., 200.])
c1 = [6.21, 6.55, 6.53, 6.55, 6.58, 6.55, 6.44, 6.02, 5.74, 5.62, 5.63, 5.48, 5.0]
c2 = [1.72, 2.02, 1.93, 1.93, 1.95, 1.92, 1.83, 1.43, 1.22, 1.11, 1.41, 1.33, 1.04]
c3 = [0.79, 3.81, 5.22, 6.22, 6.69, 7.08, 7.59, 9.03, 10.58, 13.03, 15.49, 16.62, 17.52]
c4 = [0.5874, 0.4968, 0.5302, 0.5504, 0.5466, 0.5623, 0.5928, 0.6069, 0.628,
0.6094, 0.6052, 0.6365, 1., 1.] # Needs end padding 1
c5 = [0.3577, 0.7206, 0.7809, 0.8015, 0.8294, 0.8233, 0.8072, 0.8521, 0.8442,
0.9293, 1.0859, 0.965, 0., 0.] # Needs end padding 0
c1 = np.array(c1); c2 = np.array(c2); c3 = np.array(c3)
c4 = np.array(c4); c5 = np.array(c5)
Smax = np.max(Srms)
def fluxrms(nu, Tinst, Ddish, Ndish, Sarea, ttot, dnu=0.01, effic=0.7):
"""
Calculate the flux rms [uJy] for a given array config., using expression
from Yahya et al. (2015), near Eq. 3.
"""
# Tsky = T_CMB + T_atm + T_gal
Tsky = 2.73 + 3. + 25.2*(408./nu)**2.75 # [K]
Tsys = Tinst + Tsky # [K]
Aeff = effic * Ndish * np.pi * (Ddish/2.)**2. # [m^2]
fov = (np.pi/8.) * (1.3 * 3e8 / (nu*1e6 * Ddish))**2. * (180./np.pi)**2. # [deg^2]
tp = ttot * (fov / Sarea)
Srms = 260. * (Tsys/20.) * (25e3 / Aeff) * np.sqrt( (0.01/dnu) * (1./tp) )
return Srms
def fluxrms_combined(nu, expt1, expt2, Sarea, ttot, dnu=0.01):
"""
Calculate the flux rms [uJy] for a combination of arrays, using expression
from Yahya et al. (2015), near Eq. 3.
"""
Tinst1, Ddish1, Ndish1, effic1, numin1, numax1 = expt1
Tinst2, Ddish2, Ndish2, effic2, numin2, numax2 = expt2
# Calculate Aeff / Tsys for each sub-array
# Tsky = T_CMB + T_atm + T_gal
Tsky = 2.73 + 3. + 25.2*(408./nu)**2.75 # [K]
Tsys1 = Tinst1 + Tsky
Tsys2 = Tinst2 + Tsky
Aeff1 = effic1 * Ndish1 * np.pi * (Ddish1/2.)**2. # [m^2]
Aeff2 = effic2 * Ndish2 * np.pi * (Ddish2/2.)**2. # [m^2]
# Define band masks
msk1 = np.zeros(nu.shape); msk2 = np.zeros(nu.shape)
msk1[np.where(np.logical_and(nu >= numin1, nu <= numax1))] = 1.
msk2[np.where(np.logical_and(nu >= numin2, nu <= numax2))] = 1.
# Calculate combined Aeff / Tsys
Aeff_over_Tsys = Aeff1/Tsys1*msk1 + Aeff2/Tsys2*msk2
# Calculate mean FOV
fov1 = (np.pi/8.) * (1.3 * 3e8 / (nu*1e6 * Ddish1))**2.
fov2 = (np.pi/8.) * (1.3 * 3e8 / (nu*1e6 * Ddish1))**2.
fov = (Ndish1 * fov1 + Ndish2 * fov2) / float(Ndish1 + Ndish2)
fov *= (180./np.pi)**2. # [deg^2]
# Calculate time per pointing and overall sensitivity
tp = ttot * (fov / Sarea)
Srms = 260. * (25e3/20.) / Aeff_over_Tsys * np.sqrt( (0.01/dnu) * (1./tp) )
return Srms
def extend_with_linear_interp(xnew, x, y):
"""
Extend an array using a linear interpolation from the last two points.
"""
dx = x[-1] - x[-2]
dy = y[-1] - y[-2]
ynew = y[-1] + dy * (xnew - x[-1]) / dx
y = np.concatenate((y, [ynew,]))
return y
def n_bin(zmin, zmax, dndz, bias=None):
"""
Number density of galaxies in a given z bin (assumes full sky). Also
returns volume of bin. dndz argument expects an interpolation fn. in units
of deg^-2.
"""
_z = np.linspace(zmin, zmax, 500)
vol = 4.*np.pi*C * scipy.integrate.simps(r(_z)**2. / H(_z), _z)
N_bin = FULLSKY * scipy.integrate.simps(dndz(_z), _z)
nz = N_bin / vol
# Calculate mean bias (weighted by number density)
if bias is not None:
b = scipy.integrate.simps(bias(_z)*dndz(_z), _z) / (N_bin / FULLSKY)
#b = bias(0.5*(zmin+zmax))
return nz, vol, b
return nz, vol
def redshift_bins(dz=0.1, Nbins=None):
"""
Calculate redshift bins.
"""
zmin = NU_LINE*1e3 / numax - 1.
zmax = NU_LINE*1e3 / numin - 1.
if zmin < 0.: zmin = 0.
if Nbins is not None:
zbins = np.linspace(zmin, zmax, Nbins+1)
else:
Nbins = np.floor((zmax - zmin) / dz)
zbins = np.linspace(zmin, zmin + dz*Nbins, Nbins+1)
if zmax - np.max(zbins) > 0.04:
zbins = np.concatenate((zbins, [zmax,]))
return zbins
def Ez(cosmo, z):
"""
Dimensionless Hubble rate.
"""
a = 1. / (1. + z)
w0 = cosmo['w0']; wa = cosmo['wa']
om = cosmo['omega_M_0']; ol = cosmo['omega_lambda_0']
ok = 1. - om - ol
omegaDE = ol * np.exp(3.*wa*(a - 1.)) / a**(3.*(1. + w0 + wa))
return np.sqrt( om * a**(-3.) + ok * a**(-2.) + omegaDE )
def fgrowth(cosmo, z, usegamma=False):
"""
Generalised form for the growth rate.
Parameters
----------
cosmo : dict
Standard cosmological parameter dictionary.
z : array_like of floats
Redshifts.
usegamma : bool, optional
Override the MG growth parameters and just use the standard 'gamma'
parameter.
"""
c = cosmo
Oma = c['omega_M_0'] * (1.+z)**3. / Ez(cosmo, z)**2.
a = 1. / (1. + z)
# Modified gravity parameters
if 'gamma0' not in c.keys() or usegamma == True:
gamma = c['gamma']
else:
gamma = c['gamma0'] + c['gamma1']*(1. - a)
eta = 0. if 'eta0' not in c.keys() else (c['eta0'] + c['eta1']*(1. - a))
f = Oma**gamma * (1. + eta)
return f
def background_evolution_splines(cosmo, zmax=10., nsamples=500):
"""
Get interpolation functions for background functions of redshift:
* H(z), Hubble rate in km/s/Mpc
* r(z), comoving distance in Mpc
* D(z), linear growth factor
* f(z), linear growth rate
"""
_z = np.linspace(0., zmax, nsamples)
a = 1. / (1. + _z)
H0 = (100.*cosmo['h']); w0 = cosmo['w0']; wa = cosmo['wa']
om = cosmo['omega_M_0']; ol = cosmo['omega_lambda_0']
ok = 1. - om - ol
# Sample Hubble rate H(z) and comoving dist. r(z) at discrete points
omegaDE = ol * np.exp(3.*wa*(a - 1.)) / a**(3.*(1. + w0 + wa))
E = np.sqrt( om * a**(-3.) + ok * a**(-2.) + omegaDE )
_H = H0 * E
r_c = np.concatenate( ([0.], scipy.integrate.cumtrapz(1./E, _z)) )
if ok > 0.:
_r = C/(H0*np.sqrt(ok)) * np.sinh(r_c * np.sqrt(ok))
elif ok < 0.:
_r = C/(H0*np.sqrt(-ok)) * np.sin(r_c * np.sqrt(-ok))
else:
_r = (C/H0) * r_c
# Integrate linear growth rate to find linear growth factor, D(z)
# N.B. D(z=0) = 1.
a = 1. / (1. + _z)
_f = fgrowth(cosmo, _z)
_D = np.concatenate( ([0.,], scipy.integrate.cumtrapz(_f, np.log(a))) )
_D = np.exp(_D)
# Construct interpolating functions and return
r = scipy.interpolate.interp1d(_z, _r, kind='linear', bounds_error=False)
H = scipy.interpolate.interp1d(_z, _H, kind='linear', bounds_error=False)
D = scipy.interpolate.interp1d(_z, _D, kind='linear', bounds_error=False)
f = scipy.interpolate.interp1d(_z, _f, kind='linear', bounds_error=False)
return H, r, D, f
if __name__ == '__main__':
# Extrapolate fitting functions to high flux rms
c1 = extend_with_linear_interp(SBIG, Srms, c1)
c2 = np.concatenate((c2, [1.,])) # Asymptote to linear fn. of redshift
c3 = extend_with_linear_interp(SBIG, Srms, c3)
Srms = np.concatenate((Srms, [SBIG,]))
# Construct grid of dn/dz (deg^-2) as a function of flux rms and redshift and
# then construct 2D interpolator
z = np.linspace(0., 4., 400)
nu = NU_LINE / (1. + z)
_dndz = np.array([10.**c1[j] * z**c2[j] * np.exp(-c3[j]*z) for j in range(Srms.size)])
_bias = np.array([c4[j] * np.exp(c5[j]*z) for j in range(Srms.size)])
dndz = scipy.interpolate.RectBivariateSpline(Srms, z, _dndz, kx=1, ky=1)
bias = scipy.interpolate.RectBivariateSpline(Srms, z, _bias, kx=1, ky=1)
# Construct dndz(z) interpolation fn. for the sensitivity of actual experiment
fsky = Sarea / FULLSKY
nu = 1420. / (1. + z)
# Calculate flux
Sz = (Nsig/10.) * fluxrms_combined(nu, expt1, expt2, Sarea, ttot, dnu)
#Sref = fluxrms(1000., Tinst, Ddish, Ndish, Sarea, ttot, dnu, effic)
#print "Srms = %3.1f uJy [%d sigma]" % (Sref, Nsig)
dndz_expt = scipy.interpolate.interp1d(z, dndz.ev(Sz, z))
bias_expt = scipy.interpolate.interp1d(z, bias.ev(Sz, z))
# Fit function to dn/dz [deg^-2]
_z = np.linspace(1e-7, 1., 1e4)
dndz_vals = dndz_expt(_z)
bias_vals = bias_expt(_z)
p0 = [100.*np.max(dndz_vals), 2., 10.]
def lsq(params):
A, c2, c3 = params
model = A * _z**c2 * np.exp(-c3*_z)
return model - dndz_vals
p = scipy.optimize.leastsq(lsq, p0)[0]
# Fit function to bias
p0 = [np.max(bias_vals), 0.5]
def lsq(params):
c4, c5 = params
model = c4 * np.exp(c5*_z)
w = np.sqrt(dndz_vals) # Weight fit by sqrt(dn/dz)
return (model - bias_vals) * w
pb = scipy.optimize.leastsq(lsq, p0)[0]
# Print best-fit coefficients
print "-"*50
print "%s (%d deg^2) [%s-sigma]" % (name, Sarea, Nsig)
print "-"*50
print "Fitting coeffs."
print "c1: %6.4f" % np.log10(p[0])
print "c2: %6.4f" % p[1]
print "c3: %6.4f" % p[2]
print "c4: %6.4f" % pb[0]
print "c5: %6.4f" % pb[1]
print " & ".join(["%6.4f" % n for n in [np.log10(p[0]), p[1], p[2], pb[0], pb[1]]])
# Calculate cosmo. functions
cosmo_fns = background_evolution_splines(cosmo)
H, r, D, f = cosmo_fns
# Calculate binned number densities
zbins = redshift_bins(dz=DZBINS)
zc = np.array([0.5*(zbins[i] + zbins[i+1]) for i in range(zbins.size-1)])
nz, vol, b = np.array( [n_bin(zbins[i], zbins[i+1], dndz_expt, bias_expt)
for i in range(zbins.size-1)] ).T
vol *= fsky
# Find z_max
zz = np.linspace(0., 3., 1500)
zzc = 0.5 * (zz[:-1] + zz[1:])
_nz, _vol, _b = np.array( [n_bin(zz[i], zz[i+1], dndz_expt, bias_expt)
for i in range(zz.size-1)] ).T
#print "z_min = %3.3f" % zz[np.argmin(np.abs(_nz - 5e-4))]
print name
# Load P(k) and get 1/(b^2 P(k_NL))
k, pk = np.genfromtxt("cache_pk.dat").T
knl = 0.14 * (1. + zzc)**(2./(2. + cosmo['ns']))
kref = knl #0.1
pk02 = scipy.interpolate.interp1d(k, pk, kind='linear')(0.5*kref)
pkinv = 1./ ( pk02 * (D(zzc) * _b)**2. )
print "z_max = %3.3f" % zzc[np.argmin(np.abs(_nz - pkinv))]
# Output survey info
print "-"*30
print "zc zmin zmax n Mpc^-3 bias vol. Ngal Srms"
for i in range(zc.size):
#Szz = fluxrms[ID] * Scorr[ID]
#Szz = NU_LINE/(1.+zc[i]) * Szz if not Sconst[ID] else Szz
nu_c = np.atleast_1d( 1420. / (1. + zc[i]) )
#Szz = (Nsig/10.) * fluxrms(nu_c, Tinst, Ddish, Ndish, Sarea, ttot, dnu, effic)
Szz = (Nsig/10.) * fluxrms_combined(nu_c, expt1, expt2, Sarea, ttot, dnu) # 5 sigma
print "%2.2f %2.2f %2.2f %3.3e %6.3f %5.2f %5.3e %6.2f" % \
(zc[i], zbins[i], zbins[i+1], nz[i], b[i], vol[i]/1e9, nz[i]*vol[i], Szz),
if (nz[i]*vol[i]) < NGAL_MIN: print "*",
if Szz > Smax: print "#",
print ""
print "-"*30
print "Ntot: %3.3e" % np.sum(nz * vol)
print "fsky: %3.3f" % fsky
_zmin = (NU_LINE*1e3 / numax - 1.)
print "zmin: %3.3f" % (_zmin if _zmin >= 0. else 0.)
print "zmax: %3.3f" % (NU_LINE*1e3 / numin - 1.)
#print "Srms const: %s" % Sconst[ID]
print "-"*30
print ""
# Output fitting function coeffs as a fn. of survey area
print "%10s %d %6.4f %6.4f %6.4f %6.4f %6.4f %3.3e" % (name, Sarea, np.log10(p[0]), p[1], p[2], pb[0], pb[1], np.sum(nz * vol))
# Plot dn/dz in arcmin^-2
P.subplot(111)
P.plot(_z, dndz_expt(_z) / 60.**2., 'b-', lw=1.8)
P.tick_params(axis='both', which='major', labelsize=18, size=8., width=1.5, pad=5.)
P.ylabel(r"$N(z)$ $[{\rm amin}^{-2}]$", fontsize=18.)
P.xlabel("$z$", fontsize=18.)
P.xlim((0., 0.6))
P.tight_layout()
P.show()
exit()
# Comparison plot of dndz, bias, and fitting function
if DEBUG_PLOT:
P.subplot(211)
P.plot(_z, dndz_expt(_z))
P.plot(_z, p[0] * _z**p[1] * np.exp(-p[2]*_z))
P.subplot(212)
P.plot(_z, bias_expt(_z))
P.plot(_z, pb[0] * np.exp(pb[1]*_z))
P.show()
|
philbullREPO_NAMERadioFisherPATH_START.@RadioFisher_extracted@RadioFisher-master@calculate_ska_nz_standalone.py@.PATH_END.py
|
{
"filename": "smooth_cal_inspect_2458159.ipynb",
"repo_name": "HERA-Team/H1C_IDR3_Notebooks",
"repo_path": "H1C_IDR3_Notebooks-main/smooth_cal_inspect/smooth_cal_inspect_2458159.ipynb",
"type": "Jupyter Notebook"
}
|
# Stage 2 Calibration Smoothing Nightly Notebook
**Josh Dillon**, Last Revised 12/4/20
```python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from hera_cal import io, redcal, apply_cal, abscal, utils
from hera_cal.smooth_cal import build_time_blacklist
from hera_qm.metrics_io import load_metric_file
import pyuvdata
import glob
import os
from copy import deepcopy
import inspect
import h5py
import matplotlib.cm as cm
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
```
```python
# If you want to run this notebook locally, copy the output of the next cell into the first few lines of this cell.
# JD = '2459122'
# data_path = '/lustre/aoc/projects/hera/H4C/2459122'
# lst_blacklist_string = '0-1.3 2.5-4.3 5.0-5.7 6.5-9.1 10.6-11.5 11.9-14.3 16.3-1.3'
# abscal_model_glob = '/lustre/aoc/projects/hera/zmartino/hera_calib_model/H3C/abscal_files_unique_baselines/zen.2458894.?????.uvh5'
# os.environ["JULIANDATE"] = JD
# os.environ["DATA_PATH"] = data_path
# os.environ["LST_BLACKLIST_STRING"] = lst_blacklist_string
# os.environ["ABSCAL_MODEL_GLOB"] = abscal_model_glob
```
```python
# Use environment variables to figure out path to data
JD = os.environ['JULIANDATE']
data_path = os.environ['DATA_PATH']
lst_blacklist_string = os.environ['LST_BLACKLIST_STRING']
abscal_model_glob = os.environ['ABSCAL_MODEL_GLOB']
print(f'JD = "{JD}"')
print(f'data_path = "{data_path}"')
print(f'lst_blacklist_string = "{lst_blacklist_string}"')
print(f'abscal_model_glob = "{abscal_model_glob}"')
```
JD = "2458159"
data_path = "/lustre/aoc/projects/hera/H1C_IDR3/IDR3_2/2458159"
lst_blacklist_string = ""
abscal_model_glob = "/lustre/aoc/projects/hera/H1C_IDR3/abscal_model/zen.245804*.HH.uvRXLS.uvh5"
```python
print('Looking for data in', data_path, 'on JD', JD)
data_list = sorted(glob.glob(os.path.join(data_path, f'zen.{JD}.?????.sum.uvh5')))
if len(data_list) == 0:
data_list = sorted(glob.glob(os.path.join(data_path, f'zen.{JD}.?????.uvh5')))
print('...found {} data files.'.format(len(data_list)))
abscal_list = sorted(glob.glob(os.path.join(data_path, f'zen.{JD}.*.abs.calfits')))
print('...found {} abscal files.'.format(len(abscal_list)))
smooth_cal_list = sorted(glob.glob(os.path.join(data_path, f'zen.{JD}.*.sum.smooth_abs.calfits')))
print('...found {} smooth_cal files.'.format(len(smooth_cal_list)))
```
Looking for data in /lustre/aoc/projects/hera/H1C_IDR3/IDR3_2/2458159 on JD 2458159
...found 73 data files.
...found 73 abscal files.
...found 73 smooth_cal files.
```python
# get all JDs and LSTs
_, _, file_lst_arrays, file_time_arrays = io.get_file_times(data_list)
# parse lst_blacklist_string
lst_blacklists = []
if len(lst_blacklist_string) > 0:
lst_blacklists = [tuple([float(arg) for arg in arg_pair.split('-', maxsplit=1)])
for arg_pair in lst_blacklist_string.split(' ')]
# get times that are blacklisted and reshape them like file_time_arrays
time_blacklisted_flat = build_time_blacklist(np.hstack(file_time_arrays), lst_blacklists=lst_blacklists)
time_blacklisted = [fta.astype(bool) for fta in file_time_arrays]
n = 0
for i in range(len(file_time_arrays)):
time_blacklisted[i] = np.zeros_like(time_blacklisted[i], dtype=bool)
for j in range(len(file_time_arrays[i])):
time_blacklisted[i][j] = time_blacklisted_flat[n]
n += 1
# pick the central time from among the not-LST blacklisted files, if possible
good_indices = [i for i, tb in enumerate(time_blacklisted) if not np.any(tb)]
if len(good_indices) > 0:
file_index = good_indices[len(good_indices)//2]
else:
file_index = len(data_list)//2
file_JD = '.'.join([s for s in data_list[file_index].split('.') if s.isdigit()])
```
/lustre/aoc/projects/hera/heramgr/anaconda2/envs/h1c_idr3/lib/python3.7/site-packages/numpy/core/_asarray.py:83: VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray
return array(a, dtype, copy=False, order=order)
```python
# Load abscal gains
hca = io.HERACal(abscal_list[file_index])
ga, gaf, _, _ = hca.read()
# Get min_bl_cut, we only want to compare baselines actually used in absolute calibration
try:
min_bl_cut = float(hca.history.replace('\n','').split('--min_bl_cut')[-1].split('--')[0].strip())
except:
print('Could not find min_bl_cut, setting to 1 m.')
min_bl_cut = 1.0
# Load the most common redundant baseline longer than min_bl_cut
hd = io.HERAData(data_list[file_index])
bls_to_plot = []
for pol in ['ee', 'nn']:
reds = redcal.get_reds(hd.antpos, pols=[pol])
reds = sorted(reds, key=len, reverse=True)
bl_lens = np.array([np.linalg.norm(hd.antpos[red[0][1]] - hd.antpos[red[0][0]]) for red in reds])
try:
bl_group_to_plot = (np.array(reds)[bl_lens >= min_bl_cut])[0]
except:
bl_group_to_plot = reds[0]
bls_to_plot.extend(bl_group_to_plot)
# Load smooth_cal gains and determine ex_ants
hc = io.HERACal(smooth_cal_list[file_index])
gains, gain_flags, _, _ = hc.read()
ex_ants = [ant for ant in gain_flags if np.all(gain_flags[ant])]
# Load data and calibrate
data, flags, nsamples = hd.read(bls=bls_to_plot)
sc_data, sc_flags = deepcopy(data), deepcopy(flags)
ac_data, ac_flags = deepcopy(data), deepcopy(flags)
apply_cal.calibrate_in_place(sc_data, gains, data_flags=sc_flags, cal_flags=gain_flags)
apply_cal.calibrate_in_place(ac_data, ga, data_flags=ac_flags, cal_flags=gaf)
```
Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray
```python
plt.figure(figsize=(8,8))
plt.scatter(np.array(list(hd.antpos.values()))[:,0],
np.array(list(hd.antpos.values()))[:,1], c='w', s=0)
for ant,pos in hd.antpos.items():
bad = ant in [ant[0] for ant in ex_ants]
plt.gca().add_artist(plt.Circle(tuple(pos[0:2]), radius=7,
fill=(~bad), color=['grey','r'][bad]))
plt.text(pos[0],pos[1],str(ant), va='center', ha='center', color='w')
plt.xlabel("Antenna East-West Position (meters)")
plt.ylabel("Antenna North-South Position (meters)")
plt.title('Antenna Positions on {} (Red = Flagged)'.format(file_JD));
plt.axis('equal')
plt.tight_layout()
plt.show()
```

### Figure 1: Array and Flagged Antennas
#### OBSERVER CHECKLIST:
* Check that the array configuration looks reasonable.
* Check that all flags expected to be flagged are actually flagged but also that not everything is getting flagged.
```python
#check whether the model is redudnant by looking at the history
model_is_redundant = ('--model_is_redundant' in "".join(hc.history.split()))
# Find files that overlap with this file
abscal_matched_files = list(abscal.match_times(data_list[file_index],
sorted(glob.glob(abscal_model_glob)),
filetype='uvh5', atol=1e-5))
hdm = io.HERAData(abscal_matched_files)
# Get model baselines to load
model_bls = hdm.bls
model_antpos = hdm.antpos
if isinstance(model_bls, dict):
model_bls = list(model_bls.values())[0]
model_antpos = {ant: pos for antpos in hdm.antpos.values() for ant, pos in antpos.items()}
_, model_bl_to_load, data_to_model_bl_map = abscal.match_baselines(bls_to_plot, model_bls,
hd.antpos, model_antpos=model_antpos,
model_is_redundant=model_is_redundant)
model, model_flags, _ = hdm.read(bls=model_bl_to_load)
# Rephase model at index of best match to mean LST in the data
model_index = np.argmin(np.abs(model.lsts - np.mean(data.lsts)))
model_blvecs = {bl: model.antpos[bl[0]] - model.antpos[bl[1]] for bl in model.keys()}
utils.lst_rephase(model, model_blvecs, model.freqs, np.mean(data.lsts) - model.lsts[model_index],
lat=hdm.telescope_location_lat_lon_alt_degrees[0], inplace=True)
if not model_is_redundant:
model, _, _ = utils.red_average(model, flags=model_flags)
```
Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray
```python
import warnings
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'All-NaN (slice|axis) encountered')
for pol in ['ee', 'nn']:
for func, plot, ylabel in zip([np.abs, np.angle], [plt.semilogy, plt.plot], ['Amplitude (Jy)', 'Phase (Radians)']):
plt.figure(figsize=(16,4))
for d, f, l, m in zip([ac_data, sc_data],
[ac_flags, sc_flags],
['Abs Calibrated Data', 'Smooth Calibrated Data'],
['r-', 'b.']):
to_avg = []
for bl in [k for k in bls_to_plot if k[2] == pol]:
blvec = hd.antpos[bl[0]] - hd.antpos[bl[1]]
to_avg.append(deepcopy(d[bl]))
to_avg[-1][f[bl]] = np.nan + 1.0j * np.nan
to_plot = np.nanmedian(np.real(to_avg), axis=(0,1)) + 1.0j * np.nanmedian(np.imag(to_avg), axis=(0,1))
plot(hd.freqs/1e6, func(to_plot), m, label=l)
for bl in [k for k in model if k[2] == pol]:
plot(hd.freqs/1e6, func(model[bl][model_index]), 'k-', label='Abscal Model')
plt.xlabel('Frequency (MHz)')
plt.ylabel(ylabel)
plt.legend(loc='lower right')
plt.title('{}-Polarized, {:f} m East, {:f} m North Visibility on {}'.format(pol, blvec[0], blvec[1], file_JD))
```




### Figure 2: Example redundant baseline average, both absolute calibrated and smoothed, compared to the Abscal Model
#### OBSERVER CHECKLIST:
* Check that the abscaled data and the smoothcaled data are reasonably consistent
* Check that both match the abscal model fairly well.
# Load a whole day
```python
# Load relative difference and flagging info from smooth_cal gains
ant_flags_dict = {}
avg_rel_diff_ee_dict = {}
avg_rel_diff_nn_dict = {}
rel_diff_med_dict = {}
ants = set([])
for cal in smooth_cal_list:
hc = io.HERACal(cal)
_, flags, rel_diff, avg_rel_diff = hc.read()
ants |= set(flags.keys())
ant_flags_dict[cal] = {ant: np.all(flags[ant]) for ant in flags}
avg_rel_diff_ee_dict[cal] = avg_rel_diff['Jee']
avg_rel_diff_nn_dict[cal] = avg_rel_diff['Jnn']
rel_diff_med_dict[cal] = {ant: np.nanmedian(rel_diff[ant], axis=1) for ant in rel_diff}
all_flagged_dict = {ant: np.all([af[ant] for af in ant_flags_dict.values()]) for ant in ants}
avg_rel_diff_ee = np.vstack(np.array(list(avg_rel_diff_ee_dict.values())))
avg_rel_diff_nn = np.vstack(np.array(list(avg_rel_diff_nn_dict.values())))
```
Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray
Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray
```python
# save middle-numbered ants with a minimal number of flags
ants_to_save = {}
ant_to_nflags_dict = {ant: np.sum([af[ant] for af in ant_flags_dict.values()]) for ant in ants}
for pol in ['Jee', 'Jnn']:
min_flags = np.min([ant_to_nflags_dict[ant] for ant in ants if ant[1] == pol])
ant_candidates = sorted([ant for ant in ants if ant_to_nflags_dict[ant] == min_flags and ant[1] == pol])
Nac = len(ant_candidates)
ants_to_save[pol] = ant_candidates[(Nac // 2 - 1):(Nac // 2 + 1)]
```
```python
# Load smooth_cal gains/flags
times_dict = {}
sc_gain_dict = {}
sc_flag_dict = {}
for cal in smooth_cal_list:
hc = io.HERACal(cal)
gains, flags, _, _ = hc.read()
times_dict[cal] = hc.times
sc_gain_dict[cal] = {ant: gains[ant] for pol in ants_to_save for ant in ants_to_save[pol]}
sc_flag_dict[cal] = {ant: flags[ant] for pol in ants_to_save for ant in ants_to_save[pol]}
# Load abscal gains/flags
ac_gain_dict = {}
ac_flag_dict = {}
for cal in abscal_list:
hc = io.HERACal(cal)
gains, flags, _, _ = hc.read()
ac_gain_dict[cal] = {ant: gains[ant] for pol in ants_to_save for ant in ants_to_save[pol]}
ac_flag_dict[cal] = {ant: flags[ant] for pol in ants_to_save for ant in ants_to_save[pol]}
# Organize gains/flags into grids
times = np.hstack(list(times_dict.values()))
lsts = 12 / np.pi * pyuvdata.utils.get_lst_for_time(times, *hd.telescope_location_lat_lon_alt_degrees)
sc_gains = {ant: np.vstack([sc_gain_dict[cal][ant] for cal in sc_gain_dict])
for pol in ants_to_save for ant in ants_to_save[pol]}
sc_flags = {ant: np.vstack([sc_flag_dict[cal][ant] for cal in sc_flag_dict])
for pol in ants_to_save for ant in ants_to_save[pol]}
flag_mask = np.all([f for f in sc_flags.values()], axis=0)
ac_gains = {ant: np.vstack([ac_gain_dict[cal][ant] for cal in ac_gain_dict])
for pol in ants_to_save for ant in ants_to_save[pol]}
ac_flags = {ant: np.vstack([ac_flag_dict[cal][ant] for cal in ac_flag_dict])
for pol in ants_to_save for ant in ants_to_save[pol]}
```
# Inspect a whole day
```python
# for overplotting blacklisted LSTs
my_cmap = cm.binary
my_cmap.set_under('k', alpha=0)
blacklist = np.ones_like(avg_rel_diff_ee) * np.hstack(time_blacklisted)[:, np.newaxis]
```
You are modifying the state of a globally registered colormap. In future versions, you will not be able to modify a registered colormap in-place. To remove this warning, you can make a copy of the colormap first. cmap = copy.copy(mpl.cm.get_cmap("binary"))
```python
# Pick vmax to not saturate 90% of the abscal gains
vmax = np.max([np.percentile(np.abs(sc_gains[ants_to_save[pol][1]][~flag_mask]), 99) for pol in ['Jee', 'Jnn']])
# Plot abscal gain amplitude waterfalls for a single antenna
fig, axes = plt.subplots(4, 2, figsize=(16,16), gridspec_kw={'height_ratios': [1, .25, .25, 1]})
for ax, pol in zip(axes[0], ['Jee', 'Jnn']):
ant = ants_to_save[pol][1]
extent=[hd.freqs[0]/1e6, hd.freqs[-1]/1e6, times[-1], times[0]]
im = ax.imshow(np.abs(sc_gains[ant]) / ~sc_flags[ant], aspect='auto', cmap='inferno',
interpolation='nearest', vmin=0, vmax=vmax, extent=extent)
ax.imshow(blacklist, aspect='auto', cmap=my_cmap, interpolation=None, clim=[0.9, 1], alpha=.25, extent=extent)
ax.set_title(f'Smoothcal Gain Amplitude of Antenna {ant[0]}: {pol[1:]}-polarized' )
ax.set_xlabel('Frequency (MHz)')
ax.set_ylabel('LST (Hours)')
ax.set_xlim([hd.freqs[0]/1e6, hd.freqs[-1]/1e6])
ax.set_yticklabels(np.around(lsts[[min(max(np.searchsorted(times, t), 0), len(times) - 1) for t in ax.get_yticks()]], 2))
plt.colorbar(im, ax=ax, orientation='horizontal', pad=.1)
# Now plot median gain spectra
for ax, pol in zip(axes[1], ['Jee', 'Jnn']):
ant = ants_to_save[pol][1]
# plot abscal
to_med = deepcopy(np.abs(ac_gains[ant]))
to_med[sc_flags[ant]] = np.nan
if not np.all(np.hstack(time_blacklisted)):
ax.plot(hd.freqs / 1e6, np.nanmedian(to_med[~np.hstack(time_blacklisted), :], axis=0), 'r.', label='Abscal')
# plot smooth_cal
to_med = deepcopy(np.abs(sc_gains[ant]))
to_med[sc_flags[ant]] = np.nan
if not np.all(np.hstack(time_blacklisted)):
ax.plot(hd.freqs / 1e6, np.nanmedian(to_med[~np.hstack(time_blacklisted), :], axis=0), 'k.', ms=2, label='Smoothcal')
ax.set_ylim([0, vmax])
ax.set_xlim([hd.freqs[0]/1e6, hd.freqs[-1]/1e6])
ax.set_xlabel('Frequency (MHz)')
ax.set_ylabel('|g| (unitless)')
ax.set_title(f'Median Non-Blacklisted or Flagged Gain Amplitude Spectrum of Antenna {ant[0]}: {pol[1:]}-polarized')
ax.legend()
# Now plot median gain time series
for ax, pol in zip(axes[2], ['Jee', 'Jnn']):
ant = ants_to_save[pol][1]
to_med = deepcopy(np.abs(ac_gains[ant]))
to_med[:, np.all(sc_flags[ant], axis=0)] = np.nan
# plot abscal
if not np.all(np.hstack(time_blacklisted)):
ax.plot(lsts[~np.hstack(time_blacklisted)],
np.nanmedian(to_med[~np.hstack(time_blacklisted), :], axis=1),
'b.', label='Abscal: Not Blacklisted LSTs')
if np.any(np.hstack(time_blacklisted)):
ax.plot(lsts[np.hstack(time_blacklisted)],
np.nanmedian(to_med[np.hstack(time_blacklisted), :], axis=1),
'r.', label='Abscal: Blacklisted LSTs')
# plot smooth_cal
to_med = deepcopy(np.abs(sc_gains[ant]))
to_med[:, np.all(sc_flags[ant], axis=0)] = np.nan
ax.plot(lsts, np.nanmedian(to_med, axis=1),'k.', ms=2, label='Smoothcal')
ax.set_ylim([0, vmax])
ax.set_xlabel('LST (hours)')
ax.set_ylabel('|g| (unitless)')
ax.set_title(f'Median Over Unflagged Channels Gain Amplitude Time-Series of Antenna {ant[0]}: {pol[1:]}-polarized')
ax.legend()
# Now flagged plot abscal waterfall
for ax, pol in zip(axes[3], ['Jee', 'Jnn']):
ant = ants_to_save[pol][1]
extent=[hd.freqs[0]/1e6, hd.freqs[-1]/1e6, times[-1], times[0]]
im = ax.imshow(np.abs(ac_gains[ant]) / ~sc_flags[ant], aspect='auto', cmap='inferno',
interpolation='nearest', vmin=0, vmax=vmax, extent=extent)
ax.imshow(blacklist, aspect='auto', cmap=my_cmap, interpolation=None, clim=[0.9, 1], alpha=.25, extent=extent)
ax.set_title(f'Flagged Abscal Gain Amplitude of Antenna {ant[0]}: {pol[1:]}-polarized' )
ax.set_xlabel('Frequency (MHz)')
ax.set_ylabel('LST (Hours)')
ax.set_xlim([hd.freqs[0]/1e6, hd.freqs[-1]/1e6])
ax.set_yticklabels(np.around(lsts[[min(max(np.searchsorted(times, t), 0), len(times) - 1) for t in ax.get_yticks()]], 2))
plt.colorbar(im, ax=ax, orientation='horizontal', pad=.1)
plt.tight_layout()
```
divide by zero encountered in true_divide
FixedFormatter should only be used together with FixedLocator
All-NaN slice encountered
All-NaN slice encountered
All-NaN slice encountered
All-NaN slice encountered
divide by zero encountered in true_divide
FixedFormatter should only be used together with FixedLocator

### Figure 3 Example Smoothing of Gain Amplitudes
Smoothcal (top row) and Abscal (bottom row) gain amplitudes for an example antenna. In the waterfalls, grayed out regions are "blacklisted," meaning they are not flagged but they are given zero weight when performing calibration smoothing. We also plot median non-blacklisted amplitudes as a function of frequency (second row) and the median amplitude as a function of time (third row) for both abscal and smoothcal.
#### OBSERVER CHECKLIST:
* Check that the smoothcal solution matches the abscal solution reasonably well in the non-blacklisted regions.
* Check to see that the overall bandpass looks reasonable
```python
# Plot abscal gain amplitude waterfalls for a single antenna
fig, axes = plt.subplots(4, 2, figsize=(16,16), gridspec_kw={'height_ratios': [1, .25, .25, 1]})
for ax, pol in zip(axes[0], ['Jee', 'Jnn']):
ant0, ant1 = ants_to_save[pol]
extent=[hd.freqs[0]/1e6, hd.freqs[-1]/1e6, times[-1], times[0]]
im = ax.imshow(np.angle(sc_gains[ant0] / sc_gains[ant1]) / ~sc_flags[ant0], aspect='auto', cmap='inferno',
interpolation='nearest', vmin=-np.pi, vmax=np.pi, extent=extent)
ax.imshow(blacklist, aspect='auto', cmap=my_cmap, interpolation=None, clim=[0.9, 1], alpha=.25, extent=extent)
ax.set_title(f'Smoothcal Gain Phase of Ant {ant0[0]} / Ant {ant1[0]}: {pol[1:]}-polarized')
ax.set_xlabel('Frequency (MHz)')
ax.set_ylabel('LST (Hours)')
ax.set_xlim([hd.freqs[0]/1e6, hd.freqs[-1]/1e6])
ax.set_yticklabels(np.around(lsts[[min(max(np.searchsorted(times, t), 0), len(times) - 1) for t in ax.get_yticks()]], 2))
plt.colorbar(im, ax=ax, orientation='horizontal', pad=.1)
# Now plot median gain spectra
for ax, pol in zip(axes[1], ['Jee', 'Jnn']):
ant0, ant1 = ants_to_save[pol]
# plot abscal
to_med = deepcopy(ac_gains[ant0] / ac_gains[ant1])
to_med[sc_flags[ant0]] = np.nan + 1.0j * np.nan
if not np.all(np.hstack(time_blacklisted)):
med = 1.0j * np.nanmedian(to_med[~np.hstack(time_blacklisted), :].imag, axis=0)
med += np.nanmedian(to_med[~np.hstack(time_blacklisted), :].real, axis=0)
ax.plot(hd.freqs / 1e6, np.angle(med), 'r.', label='Abscal')
# plot smooth_cal
to_med = deepcopy(sc_gains[ant0] / sc_gains[ant1])
to_med[sc_flags[ant0]] = np.nan + 1.0j * np.nan
if not np.all(np.hstack(time_blacklisted)):
med = 1.0j * np.nanmedian(to_med[~np.hstack(time_blacklisted), :].imag, axis=0)
med += np.nanmedian(to_med[~np.hstack(time_blacklisted), :].real, axis=0)
ax.plot(hd.freqs / 1e6, np.angle(med), 'k.', ms=2, label='Smoothcal')
ax.set_ylim([-np.pi, np.pi])
ax.set_xlabel('Frequency (MHz)')
ax.set_ylabel(f'Phase of g$_{ant0[0]}$ / g$_{ant1[0]}$')
ax.set_title(f'Median Non-Blacklisted or Flagged Gain Phase Spectrum of Ant {ant0[0]} / Ant {ant1[0]}: {pol[1:]}-polarized')
ax.legend()
# Now plot median gain time series
for ax, pol in zip(axes[2], ['Jee', 'Jnn']):
ant = ants_to_save[pol][1]
to_med = deepcopy(np.abs(ac_gains[ant]))
to_med[:, np.all(sc_flags[ant], axis=0)] = np.nan
# plot abscal
to_med = deepcopy(ac_gains[ant0] / ac_gains[ant1])
to_med[:, np.all(sc_flags[ant], axis=0)] = np.nan + 1.0j * np.nan
if not np.all(np.hstack(time_blacklisted)):
med = 1.0j * np.nanmedian(to_med[~np.hstack(time_blacklisted), :].imag, axis=1)
med += np.nanmedian(to_med[~np.hstack(time_blacklisted), :].real, axis=1)
ax.plot(lsts[~np.hstack(time_blacklisted)], np.angle(med), 'b.', label='Abscal: Not Blacklisted LSTs')
if np.any(np.hstack(time_blacklisted)):
med = 1.0j * np.nanmedian(to_med[np.hstack(time_blacklisted), :].imag, axis=1)
med += np.nanmedian(to_med[np.hstack(time_blacklisted), :].real, axis=1)
ax.plot(lsts[np.hstack(time_blacklisted)], np.angle(med), 'r.', label='Abscal: Blacklisted LSTs')
# plot smooth_cal
to_med = deepcopy(sc_gains[ant0] / sc_gains[ant1])
to_med[:, np.all(sc_flags[ant], axis=0)] = np.nan + 1.0j * np.nan
med = 1.0j * np.nanmedian(to_med.imag, axis=1) + np.nanmedian(to_med.real, axis=1)
ax.plot(lsts, np.angle(med), 'k.', ms=2, label='Smoothcal')
ax.set_ylim([-np.pi, np.pi])
ax.set_xlabel('LST (hours)')
ax.set_ylabel(f'Phase of g$_{ant0[0]}$ / g$_{ant1[0]}$')
ax.set_title(f'Median Non-Blacklisted or Flagged Gain Phase Spectrum of Ant {ant0[0]} / Ant {ant1[0]}: {pol[1:]}-polarized')
ax.legend()
# Now flagged plot abscal waterfall
for ax, pol in zip(axes[3], ['Jee', 'Jnn']):
ant0, ant1 = ants_to_save[pol]
extent=[hd.freqs[0]/1e6, hd.freqs[-1]/1e6, times[-1], times[0]]
im = ax.imshow(np.angle(ac_gains[ant0] / ac_gains[ant1]) / ~sc_flags[ant], aspect='auto', cmap='inferno',
interpolation='nearest', vmin=-np.pi, vmax=np.pi, extent=extent)
ax.imshow(blacklist, aspect='auto', cmap=my_cmap, interpolation=None, clim=[0.9, 1], alpha=.25, extent=extent)
ax.set_title(f'Flagged Abscal Gain Phase of Ant {ant0[0]} / Ant {ant1[0]}: {pol[1:]}-polarized')
ax.set_xlabel('Frequency (MHz)')
ax.set_ylabel('LST (Hours)')
ax.set_xlim([hd.freqs[0]/1e6, hd.freqs[-1]/1e6])
ax.set_yticklabels(np.around(lsts[[min(max(np.searchsorted(times, t), 0), len(times) - 1) for t in ax.get_yticks()]], 2))
plt.colorbar(im, ax=ax, orientation='horizontal', pad=.1)
plt.tight_layout()
```
divide by zero encountered in true_divide
FixedFormatter should only be used together with FixedLocator
All-NaN slice encountered
All-NaN slice encountered
All-NaN slice encountered
All-NaN slice encountered
divide by zero encountered in true_divide
invalid value encountered in true_divide
FixedFormatter should only be used together with FixedLocator

### Figure 4 Example Smoothing of Gain Phases
Smoothcal (top row) and Abscal (bottom row) gain phases for an example antenna. In the waterfalls, grayed out regions are "blacklisted," meaning they are not flagged but they are given zero weight when performing calibration smoothing. We also plot median non-blacklisted phases as a function of frequency (second row) and the median phases as a function of time (third row) for both abscal and smoothcal.
#### OBSERVER CHECKLIST:
* Check that the smoothcal solution matches the abscal solution reasonably well in the non-blacklisted regions.
* Check to see that the final gain solution is reasonably approximated by a single time-independent delay (linear phase ramp in row 2).
```python
fig, axes = plt.subplots(1, 2, figsize=(20,12))
for ax, rd, t in zip(axes, [avg_rel_diff_ee, avg_rel_diff_nn], ['ee-polarized', 'nn-polarized']):
extent=[hd.freqs[0]/1e6, hd.freqs[-1]/1e6, times[-1], times[0]]
im = ax.imshow(rd / ~sc_flags[ant0], aspect='auto', vmin=0, cmap='inferno', vmax=.2, interpolation='nearest', extent=extent)
ax.imshow(blacklist, aspect='auto',
cmap=my_cmap, interpolation=None, clim=[0.9, 1], alpha=.25, extent=extent)
ax.set_title('Relative Difference Between Smoothcal and Abscal: ' + t)
ax.set_xlabel('Frequency (MHz)')
ax.set_ylabel('LST (Hours)')
ax.set_yticklabels(np.around(lsts[[min(max(np.searchsorted(times, t), 0), len(times) - 1) for t in ax.get_yticks()]], 2))
plt.colorbar(im, ax=ax, label='$|g_{smooth} - g_{abs}| / |g_{abs}|$ (unitless)')
```
invalid value encountered in true_divide
FixedFormatter should only be used together with FixedLocator

### Figure 5: Relative difference between Abscal and Smoothcal
Where omnical calfits files store $\chi^2$ per antenna, smooth_cal calfits files store the relative difference between Abscal and Smoothcal gains. This difference is done before taking the absolute value, so this metric is sensitive both to phase errors and amplitude errors.
#### OBSERVER CHECKLIST:
* Look for regions of high relative difference that are not blacklisted. This would indicate a problem with smoothing.
# Metadata
```python
print(redcal.version.history_string())
```
------------
This file was produced by the function <module>() in <ipython-input-1-c6de44361328> using:
git_branch: master
git_description: v3.0-733-gd2dd8ccf
git_hash: d2dd8ccf3fe43d5e5eb6a4c28ceaf4a6e3d1fcb7
git_origin: git@github.com:HERA-Team/hera_cal.git
version: 3.0
------------
```python
```
|
HERA-TeamREPO_NAMEH1C_IDR3_NotebooksPATH_START.@H1C_IDR3_Notebooks-main@smooth_cal_inspect@smooth_cal_inspect_2458159.ipynb@.PATH_END.py
|
{
"filename": "ctr__desc__ctrbordertype__supported-cpu.md",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/catboost/docs/en/_includes/work_src/reusage/ctr__desc__ctrbordertype__supported-cpu.md",
"type": "Markdown"
}
|
Supported values for training on CPU:
- Uniform
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@catboost@docs@en@_includes@work_src@reusage@ctr__desc__ctrbordertype__supported-cpu.md@.PATH_END.py
|
{
"filename": "ISSUE_TEMPLATE.md",
"repo_name": "ytree-project/ytree",
"repo_path": "ytree_extracted/ytree-main/.github/ISSUE_TEMPLATE.md",
"type": "Markdown"
}
|
<!--To help us understand and resolve your issue, please fill out the form to
the best of your ability.-->
<!--You can feel free to delete the sections that do not apply.-->
### Bug report
**Bug summary**
<!--A short 1-2 sentences that succinctly describes the bug-->
**Code for reproduction**
<!--A minimum code snippet required to reproduce the bug, also minimizing the
number of dependencies required.-->
<!-- If you need to use a data file to trigger the issue you're having, consider
using one of the sample datasets from the
ytree collection (https://girder.hub.yt/#collection/59835a1ee2a67400016a2cda)
on the yt hub (https://girder.hub.yt/). If your issue cannot be triggered using
a public dataset, you can use the yt curldrop
(https://docs.hub.yt/services.html#curldrop) to share data files. Please include
a link to the dataset in the issue if you use the curldrop.-->
```python
# Paste your code here
#
#
```
**Actual outcome**
<!--The output produced by the above code, which may be a screenshot, console
output, etc.-->
```
# If applicable, paste the console output here
#
#
```
**Expected outcome**
<!--A description of the expected outcome from the code snippet-->
<!--If this used to work in an earlier version of ytree, please note the
version it used to work on-->
**Version Information**
<!--Please specify your platform and versions of the relevant libraries you are
using:-->
* Operating System:
* Python Version:
* ytree version:
* Other Libraries (if applicable):
|
ytree-projectREPO_NAMEytreePATH_START.@ytree_extracted@ytree-main@.github@ISSUE_TEMPLATE.md@.PATH_END.py
|
{
"filename": "_title.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/heatmap/colorbar/_title.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TitleValidator(_plotly_utils.basevalidators.TitleValidator):
def __init__(self, plotly_name="title", parent_name="heatmap.colorbar", **kwargs):
super(TitleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Title"),
data_docs=kwargs.pop(
"data_docs",
"""
font
Sets this color bar's title font.
side
Determines the location of color bar's title
with respect to the color bar. Defaults to
"top" when `orientation` if "v" and defaults
to "right" when `orientation` if "h".
text
Sets the title of the color bar.
""",
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@heatmap@colorbar@_title.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "PrefectHQ/prefect",
"repo_path": "prefect_extracted/prefect-main/src/prefect/utilities/__init__.py",
"type": "Python"
}
|
PrefectHQREPO_NAMEprefectPATH_START.@prefect_extracted@prefect-main@src@prefect@utilities@__init__.py@.PATH_END.py
|
|
{
"filename": "oldcylinder.py",
"repo_name": "TianlaiProject/tlpipe",
"repo_path": "tlpipe_extracted/tlpipe-master/tlpipe/map/drift/telescope/oldcylinder.py",
"type": "Python"
}
|
import numpy as np
from caput import config
from cora.util import coord
from drift.core import telescope, visibility
class CylinderTelescope(telescope.TransitTelescope):
"""Common functionality for all Cylinder Telescopes.
Attributes
----------
num_cylinders : integer
The number of cylinders.
num_feeds : integer
Number of regularly spaced feeds along each cylinder.
cylinder_width : scalar
Width in metres.
feed_spacing : scalar
Gap between feeds in metres.
in_cylinder : boolean
Include in cylinder correlations?
touching : boolean
Are the cylinders touching (no spacing between them)?
cylspacing : scalar
If not `touching` this is the spacing in metres.
"""
num_cylinders = config.Property(proptype=int, default=2)
num_feeds = config.Property(proptype=int, default=6)
cylinder_width = config.Property(proptype=float, default=20.0)
feed_spacing = config.Property(proptype=float, default=0.5)
in_cylinder = config.Property(proptype=bool, default=True)
touching = config.Property(proptype=bool, default=True)
cylspacing = config.Property(proptype=float, default=0.0)
non_commensurate = config.Property(proptype=bool, default=False)
## u-width property override
@property
def u_width(self):
return self.cylinder_width
## v-width property override
@property
def v_width(self):
return 0.0
def _unique_baselines(self):
"""Calculate the unique baseline pairs.
Pairs are considered identical if they have the same baseline
separation,
Parameters
----------
fpairs : np.ndarray
An array of all the feed pairs, packed as [[i1, i2, ...], [j1, j2, ...] ].
Returns
-------
baselines : np.ndarray
An array of all the unique pairs. Packed as [ [i1, i2, ...], [j1, j2, ...]].
redundancy : np.ndarray
For each unique pair, give the number of equivalent pairs.
"""
base_map, base_mask = super(CylinderTelescope, self)._unique_baselines()
if not self.in_cylinder:
# Construct array of indices
fshape = [self.nfeed, self.nfeed]
f_ind = np.indices(fshape)
# Construct array of baseline separations in complex representation
bl1 = (self.feedpositions[f_ind[0]] - self.feedpositions[f_ind[1]])
ic_mask = np.where(bl1[..., 0] != 0.0, np.ones(fshape, dtype=np.bool), np.zeros(fshape, dtype=np.bool))
base_mask = np.logical_and(base_mask, ic_mask)
base_map = telescope._remap_keyarray(base_map, base_mask)
return base_map, base_mask
@property
def _single_feedpositions(self):
"""The set of feed positions on *all* cylinders.
Returns
-------
feedpositions : np.ndarray
The positions in the telescope plane of the receivers. Packed as
[[u1, v1], [u2, v2], ...].
"""
fplist = [self.feed_positions_cylinder(i) for i in range(self.num_cylinders)]
return np.vstack(fplist)
@property
def cylinder_spacing(self):
if self.touching:
return self.cylinder_width
else:
if self.cylspacing is None:
raise Exception("Need to set cylinder spacing if not touching.")
return self.cylspacing
def feed_positions_cylinder(self, cylinder_index):
"""Get the feed positions on the specified cylinder.
Parameters
----------
cylinder_index : integer
The cylinder index, an integer from 0 to self.num_cylinders.
Returns
-------
feed_positions : np.ndarray
The positions in the telescope plane of the receivers. Packed as
[[u1, v1], [u2, v2], ...].
"""
if cylinder_index >= self.num_cylinders or cylinder_index < 0:
raise Exception("Cylinder index is invalid.")
nf = self.num_feeds
sp = self.feed_spacing
if self.non_commensurate:
nf = self.num_feeds - cylinder_index
sp = self.feed_spacing / (nf - 1.0) * nf
pos = np.empty([nf, 2], dtype=np.float64)
pos[:, 0] = cylinder_index * self.cylinder_spacing
pos[:, 1] = np.arange(nf) * sp
return pos
class UnpolarisedCylinderTelescope(CylinderTelescope, telescope.SimpleUnpolarisedTelescope):
"""A complete class for an Unpolarised Cylinder telescope.
"""
def beam(self, feed, freq):
"""Beam for a particular feed.
Parameters
----------
feed : integer
Index for the feed.
freq : integer
Index for the frequency.
Returns
-------
beam : np.ndarray
A Healpix map (of size self._nside) of the beam. Potentially
complex.
"""
return visibility.cylinder_beam(self._angpos, self.zenith,
self.cylinder_width / self.wavelengths[freq])
class PolarisedCylinderTelescope(CylinderTelescope, telescope.SimplePolarisedTelescope):
"""A complete class for an Unpolarised Cylinder telescope.
"""
# Change the illuminated width in X and Y
illumination_x = config.Property(proptype=float, default=1.0)
illumination_y = config.Property(proptype=float, default=1.0)
ortho_pol = config.Property(proptype=bool, default=True)
#@util.cache_last
def beamx(self, feed, freq):
bpat = visibility.cylinder_beam(self._angpos, self.zenith,
self.illumination_x * self.cylinder_width / self.wavelengths[freq])
bm = np.zeros_like(self._angpos)
if self.ortho_pol:
bm[:, 1] = bpat
else:
thatz, phatz = coord.thetaphi_plane_cart(self.zenith)
thatp, phatp = coord.thetaphi_plane_cart(self._angpos)
bm[:, 0] = np.dot(thatp, phatz) * bpat
bm[:, 1] = np.dot(phatp, phatz) * bpat
return bm
#@util.cache_last
def beamy(self, feed, freq):
bpat = visibility.cylinder_beam(self._angpos, self.zenith,
self.illumination_y * self.cylinder_width / self.wavelengths[freq])
bm = np.zeros_like(self._angpos)
if self.ortho_pol:
bm[:, 0] = bpat
else:
thatz, phatz = coord.thetaphi_plane_cart(self.zenith)
thatp, phatp = coord.thetaphi_plane_cart(self._angpos)
bm[:, 0] = np.dot(thatp, thatz) * bpat
bm[:, 1] = np.dot(phatp, thatz) * bpat
return bm
|
TianlaiProjectREPO_NAMEtlpipePATH_START.@tlpipe_extracted@tlpipe-master@tlpipe@map@drift@telescope@oldcylinder.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/funnelarea/title/__init__.py",
"type": "Python"
}
|
import sys
if sys.version_info < (3, 7):
from ._text import TextValidator
from ._position import PositionValidator
from ._font import FontValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._text.TextValidator",
"._position.PositionValidator",
"._font.FontValidator",
],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@funnelarea@title@__init__.py@.PATH_END.py
|
{
"filename": "_mypath.py",
"repo_name": "LSSTDESC/chroma",
"repo_path": "chroma_extracted/chroma-master/bin/simulations/_mypath.py",
"type": "Python"
}
|
import os, sys
thisdir = os.path.dirname(os.path.abspath(__file__))
libdir = os.path.abspath(os.path.join(thisdir, '../../'))
if libdir not in sys.path:
sys.path.insert(0, libdir)
|
LSSTDESCREPO_NAMEchromaPATH_START.@chroma_extracted@chroma-master@bin@simulations@_mypath.py@.PATH_END.py
|
{
"filename": "write_mock_to_disk_singlemet.py",
"repo_name": "LSSTDESC/lsstdesc-diffsky",
"repo_path": "lsstdesc-diffsky_extracted/lsstdesc-diffsky-main/lsstdesc_diffsky/write_mock_to_disk_singlemet.py",
"type": "Python"
}
|
"""
write_mock_to_disk.py
=====================
Main module for production of mock galaxy catalogs for LSST DESC.
"""
import gc
import os
import re
import threading
import warnings
from time import time
import h5py
import numpy as np
import psutil
from astropy.cosmology import WMAP7, FlatLambdaCDM
from astropy.table import Table, vstack
from astropy.utils.misc import NumpyRNGContext
from diffstar.defaults import FB
from diffstar.sfh import sfh_galpop
from dsps.metallicity.mzr import DEFAULT_MET_PDICT
from galsampler import crossmatch
from galsampler.galmatch import galsample
from halotools.empirical_models import halo_mass_to_halo_radius
# Galsampler
from halotools.utils.value_added_halo_table_functions import compute_uber_hostid
from halotools.utils.vector_utilities import normalized_vectors
from jax import random as jran
from .black_hole_modeling.black_hole_accretion_rate import monte_carlo_bh_acc_rate
from .black_hole_modeling.black_hole_mass import monte_carlo_black_hole_mass
from .defaults import CosmoParams
from .diffstarpop.mc_diffstar import mc_diffstarpop
from .dspspop.boris_dust import DEFAULT_U_PARAMS as DEFAULT_BORIS_U_PARAMS
from .dspspop.burstshapepop import DEFAULT_BURSTSHAPE_U_PARAMS
from .dspspop.dust_deltapop import DEFAULT_DUST_DELTA_U_PARAMS
from .dspspop.lgavpop import DEFAULT_LGAV_U_PARAMS
from .dspspop.lgfburstpop import DEFAULT_LGFBURST_U_PARAMS
from .ellipticity_modeling.ellipticity_model import monte_carlo_ellipticity_bulge_disk
# Halo shapes
from .halo_information.get_fof_halo_shapes import get_halo_shapes, get_matched_shapes
# Synthetics
from .halo_information.get_healpix_cutout_info import get_snap_redshift_min
# SED generation
from .legacy.roman_rubin_2023.dsps.data_loaders.load_ssp_data import (
load_ssp_templates_singlemet,
)
from .param_data.param_reader import DiffskyPopParams
from .pecZ import pecZ
from .photometry.get_SFH_from_params import (
get_diff_params,
get_log_safe_ssfr,
get_logsm_sfr_obs,
)
from .photometry.load_filter_data import assemble_filter_data, get_filter_wave_trans
from .photometry.photometry_lc_interp_singlemet import get_diffsky_sed_info_singlemet
from .photometry.precompute_ssp_tables import (
precompute_ssp_obsmags_on_z_table_singlemet,
precompute_ssp_restmags_singlemet,
)
# Additional catalog properties
from .size_modeling.zhang_yang17 import (
mc_size_vs_luminosity_early_type,
mc_size_vs_luminosity_late_type,
)
from .synthetic_subhalos.extend_subhalo_mpeak_range import (
create_synthetic_lowmass_mock_with_centrals,
map_mstar_onto_lowmass_extension,
)
from .synthetic_subhalos.synthetic_cluster_satellites import (
model_synthetic_cluster_satellites,
)
from .synthetic_subhalos.synthetic_lowmass_subhalos import synthetic_logmpeak
from .triaxial_satellite_distributions.axis_ratio_model import monte_carlo_halo_shapes
from .triaxial_satellite_distributions.monte_carlo_triaxial_profile import (
generate_triaxial_satellite_distribution,
)
# metadata
from .infer_diffcode_versions import infer_software_versions
fof_halo_mass = "fof_halo_mass"
# fof halo mass in healpix cutouts
fof_mass = "fof_mass"
mass = "mass"
fof_max = 14.5
sod_mass = "sod_mass"
m_particle_1000 = 1.85e12
Nside_sky = 2048 # fine pixelization for determining sky area
# halo id offsets
# offset to generate unique id for cutouts and snapshots
cutout_id_offset_halo = int(1e3)
# offset to guarantee unique halo ids across cutout files and snapshots
halo_id_offset = int(1e8)
# galaxy id offsets for non image-sim catalogs (eg. 5000 sq. deg.)
cutout_id_offset = int(1e9)
z_offsets_not_im = {"32": [0, 1e8, 2e8, 3e8]}
# constants to determine synthetic number density
Ntotal_synthetics = 1932058570 # total number of synthetic galaxies in cosmoDC2_image
nhpx_total = float(131) # number of healpixels in image area
snapshot_min = 121
# specify edges of octant
volume_minx = 0.0
volume_miny = 0.0
volume_maxz = 0.0
__all__ = (
"write_umachine_healpix_mock_to_disk",
"build_output_snapshot_mock",
"write_output_mock_to_disk",
)
warnings.filterwarnings("ignore", category=DeprecationWarning)
def write_umachine_healpix_mock_to_disk(
umachine_mstar_ssfr_mock_fname_list,
healpix_data,
snapshots,
output_color_mock_fname,
shape_dir,
redshift_list,
commit_hash,
SED_pars={},
cosmological_params={},
synthetic_params={},
shear_params={},
versionMajor=0,
versionMinor=1,
versionMinorMinor=0,
Nside=32,
mstar_min=7e6,
z2ts={},
Lbox=3000.0,
num_synthetic_gal_ratio=1.0,
mass_match_noise=0.1,
):
"""
GalSample the UM mock into the lightcone healpix cutout,
compute the SEDs using DSPS and
write the healpix mock to disk.
Parameters
----------
umachine_mstar_ssfr_mock_fname_list : list
List of length num_snaps storing the absolute path to the
value-added UniverseMachine snapshot mock
healpix_data : <HDF5 file>
Pointer to open hdf5 file for the lightcone healpix cutout
source halos into which UniverseMachine will be GalSampled
snapshots : list
List of snapshots in lightcone healpix cutout
output_color_mock_fname : string
Absolute path to the output healpix mock
shape_dir: string
Directory storing files with halo-shape information
redshift_list : list
List of length num_snaps storing the value of the redshifts
in the target halo lightcone cutout
commit_hash : string
Commit hash of the version of the cosmodc2 repo used when
calling this function.
After updating the code repo to the desired version,
the commit_hash can be determined by navigating to the root
directory and typing ``git log --pretty=format:'%h' -n 1``
synthetic_params: dict contains values for
skip_synthetics: boolean
Flag to control if ultra-faint synthetics are added to mock
synthetic_halo_minimum_mass: float
Minimum value of log_10 of synthetic halo mass
num_synthetic_gal_ratio: float
Ratio to control number of synthetic galaxies generated
randomize_redshift_synthetic: boolean
Flag to control if noise is added to redshifts in UM snapshot
SED_pars: dict containing values for SED choices read in from configuration file
values supplied by calling script
shear_params: dict containing values for shear choices
add_dummy_shears: boolean
mstar_min: stellar mass cut for synthetic galaxies (not used in image simulations)
mass_match_noise: noise added to log of source halo masses to randomize the match
to target halos
versionMajor: int
major version number
versionMinor: int
minor version number
versionMinorMinor: int
minor.minor version number
Returns
-------
None
"""
from .constants import SED_params_singlemet as SED_params
output_mock = {}
gen = zip(umachine_mstar_ssfr_mock_fname_list, redshift_list, snapshots)
start_time = time()
process = psutil.Process(os.getpid())
assert len(cosmological_params) > 0, "No cosmology parameters supplied"
H0 = cosmological_params["H0"]
OmegaM = cosmological_params["OmegaM"]
OmegaB = cosmological_params["OmegaB"]
w0 = cosmological_params["w0"]
wa = cosmological_params["wa"]
print(
"Cosmology Parameters:\n",
"H0: {:.2g}, OmegaM: {:.3g}, OmegaB: {:.3g}\n".format(H0, OmegaM, OmegaB),
"w0: {:.1g} wa: {:.1g}".format(w0, wa),
)
cosmology = FlatLambdaCDM(H0=H0, Om0=OmegaM, Ob0=OmegaB)
# determine number of healpix cutout to use as offset for galaxy ids
output_mock_basename = os.path.basename(output_color_mock_fname)
file_ids = [
int(d) for d in re.findall(r"\d+", os.path.splitext(output_mock_basename)[0])
]
cutout_number_true = file_ids[-1]
z_range_id = file_ids[-3] # 3rd-last digits in filename
cutout_number = cutout_number_true # used for output
galaxy_id_offset = int(
cutout_number_true * cutout_id_offset + z_offsets_not_im[str(Nside)][z_range_id]
)
halo_id_cutout_offset = int(cutout_number_true * cutout_id_offset_halo)
# determine seed from output filename
seed = get_random_seed(output_mock_basename)
# determine maximum redshift and volume covered by catalog
redshift_max = [float(k) for k, v in z2ts.items() if int(v) == snapshot_min][0]
Vtotal = cosmology.comoving_volume(redshift_max).value
# determine total number of synthetic galaxies for arbitrary healpixel for
# full z range
synthetic_number = int(Ntotal_synthetics / nhpx_total)
# number for healpixels straddling the edge of the octant will be adjusted later
# initialize previous redshift for computing synthetic galaxy distributions
previous_redshift = get_snap_redshift_min(z2ts, snapshots)
# initialize book-keeping variables
fof_halo_mass_max = 0.0
Ngals_total = 0
print("\nStarting snapshot processing")
print("Using initial seed = {}".format(seed))
print("Using nside = {}".format(Nside))
print("Maximum redshift for catalog = {}".format(redshift_max))
print("Minimum redshift for catalog = {}".format(previous_redshift))
print(
"Writing catalog version number {}.{}.{}".format(
versionMajor, versionMinor, versionMinorMinor
)
)
print("\nUsing halo-id offset = {}".format(halo_id_offset))
print(
"Using galaxy-id offset = {} for cutout number {}".format(
galaxy_id_offset, cutout_number_true
)
)
if synthetic_params and not synthetic_params["skip_synthetics"]:
synthetic_halo_minimum_mass = synthetic_params["synthetic_halo_minimum_mass"]
synthetic_number = synthetic_params["synthetic_number"]
randomize_redshift_synthetic = synthetic_params["randomize_redshift_synthetic"]
print("Synthetic-halo minimum mass = {}".format(synthetic_halo_minimum_mass))
print("Number of synthetic ultra-faint galaxies = {}".format(synthetic_number))
print("Randomize synthetic redshifts = {}".format(randomize_redshift_synthetic))
else:
print("Not adding synthetic galaxies")
# initialize SED parameters
if SED_pars:
# save any supplied parameters from function call
for k, v in SED_pars.items():
SED_params[k] = v
T0 = cosmology.age(SED_params["z0"]).value
SED_params["LGT0"] = np.log10(T0)
print(
"\nUsing SED parameters:\n...{}".format(
",\n...".join([": ".join([k, str(v)]) for k, v in SED_params.items()])
)
)
dsps_data_DRN = SED_params["dsps_data_dirname"]
dsps_data_fn = SED_params["dsps_data_filename"]
# get ssp_wave, ssp_flux, lg_age_gyr
ssp_data_singlemet = load_ssp_templates_singlemet(
os.path.join(dsps_data_DRN, dsps_data_fn)
)
# enforce that single-metallicity SSPs were in fact loaded
assert "ssp_lgmet" not in ssp_data_singlemet._fields
ssp_wave = ssp_data_singlemet.ssp_wave
ssp_flux = ssp_data_singlemet.ssp_flux
filter_data = assemble_filter_data(dsps_data_DRN, SED_params["filters"])
filter_waves, filter_trans, filter_keys = get_filter_wave_trans(filter_data)
print(
"\nUsing filters and bands: {} ({} bands)".format(
", ".join(filter_keys), len(filter_keys)
)
)
# generate precomputed ssp tables
min_snap = 0 if len(healpix_data[snapshots[0]]["a"]) > 0 else 1
zmin = 1.0 / np.max(healpix_data[snapshots[min_snap]]["a"][()]) - 1.0
zmax = 1.0 / np.min(healpix_data[snapshots[-1]]["a"][()]) - 1.0
dz = float(SED_params["dz"])
z_min = zmin - dz if (zmin - dz) > 0 else zmin / 2 # ensure z_min is > 0
z_max = zmax + dz
n_z_table = int(np.ceil((z_max - z_min) / dz))
ssp_z_table = np.linspace(z_min, z_max, n_z_table)
msg = "\nComputing ssp tables for {} z values: {:.2f} < z < {:.2f} (dz={:.2f})\n"
print(msg.format(n_z_table, z_min, z_max, dz))
ssp_restmag_table = precompute_ssp_restmags_singlemet(
ssp_wave, ssp_flux, filter_waves, filter_trans
)
ssp_obsmag_table = precompute_ssp_obsmags_on_z_table_singlemet(
ssp_wave,
ssp_flux,
filter_waves,
filter_trans,
ssp_z_table,
OmegaM,
cosmological_params["w0"],
cosmological_params["wa"],
H0 / 100.0,
)
# save in SED_params for passing to other modules
SED_params["ssp_z_table"] = ssp_z_table
SED_params["ssp_lg_age_gyr"] = ssp_data_singlemet.ssp_lg_age_gyr
SED_params["ssp_restmag_table"] = ssp_restmag_table
SED_params["ssp_obsmag_table"] = ssp_obsmag_table
SED_params["filter_keys"] = filter_keys
SED_params["filter_waves"] = filter_waves
SED_params["filter_trans"] = filter_trans
for k in SED_params["xkeys"]:
dims = (
SED_params[k].shape
if not isinstance(SED_params[k], list)
else len(SED_params[k])
)
print("...Saving {} to SED_params with shape: {}".format(k, dims))
default_list = [k for k in SED_params.values() if type(k) is str and "default" in k]
for k in default_list: # placeholder for better code
SED_params["lgfburst_pop_u_params"] = DEFAULT_LGFBURST_U_PARAMS
SED_params["burstshapepop_u_params"] = DEFAULT_BURSTSHAPE_U_PARAMS
SED_params["lgav_dust_u_params"] = DEFAULT_LGAV_U_PARAMS
SED_params["dust_delta_u_params"] = DEFAULT_DUST_DELTA_U_PARAMS
SED_params["fracuno_pop_u_params"] = DEFAULT_BORIS_U_PARAMS
SED_params["lgmet_params"] = list(DEFAULT_MET_PDICT.values())
roman_rubin_list = [
k.split("roman_rubin_2023/")[1]
for k in SED_params.values()
if type(k) is str and "roman_rubin" in k
]
for k in roman_rubin_list:
if "lgfburst_u_params" in k:
SED_params["lgfburst_pop_u_params"] = get_sed_model_params(
SED_params["param_data_dirname"], SED_params["lgfburst_fname"]
)
if "burstshape_u_params" in k:
SED_params["burstshapepop_u_params"] = get_sed_model_params(
SED_params["param_data_dirname"], SED_params["burstshape_fname"]
)
if "lgav_dust_u_params" in k:
SED_params["lgav_dust_u_params"] = get_sed_model_params(
SED_params["param_data_dirname"], SED_params["lgav_dust_fname"]
)
if "delta_dust_u_params" in k:
SED_params["delta_dust_u_params"] = get_sed_model_params(
SED_params["param_data_dirname"], SED_params["delta_dust_fname"]
)
if "funo_dust_u_params" in k:
SED_params["fracuno_pop_u_params"] = get_sed_model_params(
SED_params["param_data_dirname"], SED_params["fracuno_pop_fname"]
)
if "lgmet_params" in k:
SED_params["lgmet_params"] = get_sed_model_params(
SED_params["param_data_dirname"], SED_params["lgmet_fname"]
)
print()
model_keys = [k for k in SED_params.keys() if "_model" in k]
for key in model_keys:
# parse column name to extract filter, frame and band
_res = SED_params[key].split("_")
if len(_res) >= 2:
filt_req = SED_params[key].split("_")[0]
frame_req = SED_params[key].split("_")[1]
band_req = SED_params[key].split("_")[-1].lower()
model_req = [
k for k in SED_params["filter_keys"] if filt_req in k and band_req in k
]
if len(model_req) == 1 and frame_req in SED_params["frames"]:
print("...Using {} for galaxy-{}".format(SED_params[key], key))
else:
print("...{} not available for galaxy-{}".format(SED_params[key], key))
SED_params[key] = None # filter not available; overwrite key
else:
if "skip" not in SED_params[key]:
print(
"...incorrect option {} for galaxy-{}".format(SED_params[key], key)
)
print("...Skipping galaxy-{}".format(key))
SED_params[key] = None # filter not available; overwrite key
t_table = np.linspace(SED_params["t_table_0"], T0, SED_params["N_t_table"])
SED_params["t_table"] = t_table
for a, b, c in gen:
umachine_mock_fname = a
redshift = b
snapshot = c
halo_unique_id = int(halo_id_cutout_offset + int(snapshot))
print(
"\n...Using halo_unique id = {} for snapshot {}".format(
halo_unique_id, snapshot
)
)
new_time_stamp = time()
# seed should be changed for each new shell
seed = seed + 2
# check for halos in healpixel
if len(healpix_data[snapshot]["id"]) == 0:
output_mock[snapshot] = {}
print("\n...skipping empty snapshot {}".format(snapshot))
continue
# Get galaxy properties from UM catalogs and target halo properties
print("\n...loading z = {0:.2f} galaxy catalog into memory".format(redshift))
mock = Table.read(umachine_mock_fname, path="data")
print(".....Number of available UM galaxies: {}".format(len(mock)))
# print('.....UM catalog colnames: {}'.format(', '.join(mock.colnames)))
# Assemble table of target halos
target_halos = get_astropy_table(
healpix_data[snapshot], halo_unique_id=halo_unique_id
)
fof_halo_mass_max = max(
np.max(target_halos[fof_halo_mass].quantity.value), fof_halo_mass_max
)
################################################################################
# generate halo shapes
################################################################################
print("\n...Generating halo shapes")
b_to_a, c_to_a, e, p = monte_carlo_halo_shapes(
np.log10(target_halos[fof_halo_mass])
)
target_halos["halo_ellipticity"] = e
target_halos["halo_prolaticity"] = p
spherical_halo_radius = halo_mass_to_halo_radius(
target_halos[fof_halo_mass], WMAP7, redshift, "vir"
)
target_halos["axis_A_length"] = (
1.5 * spherical_halo_radius
) # crude fix for B and C shrinking
target_halos["axis_B_length"] = b_to_a * target_halos["axis_A_length"]
target_halos["axis_C_length"] = c_to_a * target_halos["axis_A_length"]
nvectors = len(target_halos)
rng = np.random.RandomState(seed)
random_vectors = rng.uniform(-1, 1, nvectors * 3).reshape((nvectors, 3))
axis_A = normalized_vectors(random_vectors) * target_halos[
"axis_A_length"
].reshape((-1, 1))
target_halos["axis_A_x"] = axis_A[:, 0]
target_halos["axis_A_y"] = axis_A[:, 1]
target_halos["axis_A_z"] = axis_A[:, 2]
# now add halo shape information for those halos with matches in shape files
print("\n...Matching halo shapes for selected halos")
shapes = get_halo_shapes(
snapshot,
target_halos["fof_halo_id"],
target_halos["lightcone_replication"],
shape_dir,
)
if shapes:
target_halos = get_matched_shapes(
shapes, target_halos, rep_key="lightcone_replication"
)
################################################################################
# Galsampler - For every target halo,
# find a source halo with closely matching mass
################################################################################
print("\n...Finding halo--halo correspondence with GalSampler")
# Set up target and source halo arrays for indices and masses
target_halo_ids = target_halos["halo_id"]
log_target_mass = np.log10(target_halos[fof_halo_mass])
# Ensure that mock uses uber hostid
mock.rename_column("hostid", "uber_hostid")
upid, uber_hostid, _ = compute_uber_hostid(mock["upid"], mock["halo_id"])
if not np.array_equal(mock["upid"], upid):
print(".....overwriting upid with corrected upid")
mock["upid"] = upid
if not np.array_equal(mock["uber_hostid"], uber_hostid):
print(".....overwriting hostid with corrected uber hostid")
mock["uber_hostid"] = uber_hostid
cenmask = mock["upid"] == -1
# match to host halos not subhalos
source_halo_ids = mock["uber_hostid"][cenmask]
assert len(np.unique(source_halo_ids)) == len(
source_halo_ids
), "Source halo ids not unique"
source_galaxies_host_halo_id = mock["uber_hostid"]
log_src_mass = np.log10(mock["mp"][cenmask])
# Add noise to randomize the selections around the closest source halo match
noisy_log_src_mass = np.random.normal(loc=log_src_mass, scale=mass_match_noise)
# Use GalSampler to calculate the indices of the galaxies that will be selected
print("...GalSampling z={0:.2f} galaxies to OuterRim halos".format(redshift))
gs_results = galsample(
source_galaxies_host_halo_id,
source_halo_ids,
target_halo_ids,
[noisy_log_src_mass],
[log_target_mass],
)
########################################################################
# Add synthetic galaxies if requested
########################################################################
if not synthetic_params["skip_synthetics"]:
mock, synthetic_dict = add_low_mass_synthetic_galaxies(
mock,
seed,
synthetic_halo_minimum_mass,
redshift,
synthetic_number,
randomize_redshift_synthetic,
previous_redshift,
Vtotal,
cosmology,
mstar_min,
)
else:
synthetic_dict = {}
########################################################################
# Assemble the output mock by snapshot
########################################################################
print("\n...Building output snapshot mock for snapshot {}".format(snapshot))
output_mock[snapshot] = build_output_snapshot_mock(
ssp_data_singlemet,
float(redshift),
mock,
target_halos,
gs_results,
galaxy_id_offset,
synthetic_dict,
Nside_sky,
cutout_number_true,
float(previous_redshift),
cosmology,
w0,
wa,
volume_minx=volume_minx,
SED_params=SED_params,
volume_miny=volume_miny,
volume_maxz=volume_maxz,
seed=seed,
shear_params=shear_params,
halo_unique_id=halo_unique_id,
redshift_method="galaxy",
)
galaxy_id_offset = galaxy_id_offset + len(
output_mock[snapshot]["galaxy_id"]
) # increment offset
# check that offset is within index bounds
galaxy_id_bound = (
cutout_number * cutout_id_offset
+ z_offsets_not_im[str(Nside)][z_range_id + 1]
)
if galaxy_id_offset > galaxy_id_bound:
print(
"...Warning: galaxy_id bound of {} exceeded for snapshot {}".format(
galaxy_id_bound, snapshot
)
)
Ngals_total += len(output_mock[snapshot]["galaxy_id"])
print(
"...Saved {} galaxies to dict".format(
len(output_mock[snapshot]["galaxy_id"])
)
)
previous_redshift = redshift # update for next snap
# do garbage collection
gc.collect()
time_stamp = time()
msg = "\nLightcone-shell runtime = {0:.2f} minutes"
print(msg.format((time_stamp - new_time_stamp) / 60.0))
mem = "Memory usage = {0:.2f} GB"
print(mem.format(process.memory_info().rss / 1.0e9))
mem = "Thread count = {}"
print(mem.format(threading.active_count()))
for env in ["OMP_NUM_THREADS", "MKL_NUM_THREADS", "NUMEXPR_NUM_THREADS"]:
print("Checking {} = {}".format(env, os.environ.get(env)))
########################################################################
# Write the output mock to disk
########################################################################
if len(output_mock) > 0:
check_time = time()
write_output_mock_to_disk(
output_color_mock_fname,
output_mock,
commit_hash,
seed,
synthetic_params,
cutout_number_true,
Nside,
cosmology,
versionMajor=versionMajor,
versionMinor=versionMinor,
versionMinorMinor=versionMinorMinor,
)
print(
"...time to write mock to disk = {:.2f} minutes".format(
(time() - check_time) / 60.0
)
)
print(
"Maximum halo mass for {} ={}\n".format(output_mock_basename, fof_halo_mass_max)
)
print("Number of galaxies for {} ={}\n".format(output_mock_basename, Ngals_total))
time_stamp = time()
msg = "\nEnd-to-end runtime = {0:.2f} minutes\n"
print(msg.format((time_stamp - start_time) / 60.0))
# reduce max seed by 200 to allow for 60 light-cone shells
def get_random_seed(filename, seed_max=4294967095):
import hashlib
s = hashlib.md5(filename.encode("utf-8")).hexdigest()
seed = int(s, 16)
# enforce seed is below seed_max and odd
seed = seed % seed_max
if seed % 2 == 0:
seed = seed + 1
return seed
def get_volume_factor(redshift, previous_redshift, Vtotal, cosmology):
Vshell = (
cosmology.comoving_volume(float(redshift)).value
- cosmology.comoving_volume(float(previous_redshift)).value
)
return Vshell / Vtotal
def get_sed_model_params(param_dir, param_file):
param_filename = os.path.join(param_dir, param_file)
with open(param_filename) as fh:
params = [x.split() for x in fh.readlines()]
print("\nReading model parameters from file {}".format(param_filename))
keys = tuple([p[0] for p in params])
values = tuple([float(p[1]) for p in params])
param_dict = dict(zip(keys, values))
for k, v in param_dict.items():
print("....{} = {:.4f}".format(k, v))
return param_dict
def get_astropy_table(table_data, halo_unique_id=0, check=False, cosmology=None):
""" """
t = Table()
for k in table_data.keys():
t[k] = table_data[k]
t.rename_column("id", "fof_halo_id")
t.rename_column("rot", "lightcone_rotation")
t.rename_column("rep", "lightcone_replication")
t["halo_redshift"] = 1 / t["a"] - 1.0
t["halo_id"] = (
np.arange(len(table_data["id"])) * halo_id_offset + halo_unique_id
).astype(int)
# rename column mass if found
if mass in t.colnames:
t.rename_column(mass, fof_halo_mass)
elif fof_mass in t.colnames:
t.rename_column(fof_mass, fof_halo_mass)
else:
print(" Warning; halo mass or fof_mass not found")
# check sod information and clean bad values
if sod_mass in t.colnames:
mask_valid = t[sod_mass] > 0
mask = mask_valid & (t[sod_mass] < m_particle_1000)
# overwrite
for cn in ["sod_cdelta", "sod_cdelta_error", sod_mass, "sod_radius"]:
t[cn][mask] = -1
if np.count_nonzero(mask) > 0:
print(
".....Overwrote {}/{} SOD quantities failing {:.2g} mass cut".format(
np.count_nonzero(mask),
np.count_nonzero(mask_valid),
m_particle_1000,
)
)
if check and cosmology is not None:
# compute comoving distance from z and from position
r = np.sqrt(t["x"] * t["x"] + t["y"] * t["y"] + t["z"] * t["z"])
comoving_distance = (
cosmology.comoving_distance(t["halo_redshift"]) * cosmology.H0.value / 100.0
)
print("r == comoving_distance(z) is {}", np.isclose(r, comoving_distance))
print(
"...Number of target halos to populate with galaxies: {}".format(
len(t["halo_id"])
)
)
return t
def add_low_mass_synthetic_galaxies(
mock,
seed,
synthetic_halo_minimum_mass,
redshift,
synthetic_number,
randomize_redshift_synthetic,
previous_redshift,
Vtotal,
cosmology,
mstar_min,
use_substeps_synthetic=False,
nzdivs=6,
):
# Correct stellar mass for low-mass subhalos and create synthetic mpeak
print(".....correcting low mass mpeak and assigning synthetic mpeak values")
# First generate the appropriate number of synthetic galaxies for the snapshot
mpeak_synthetic_snapshot = 10 ** synthetic_logmpeak(
mock["mpeak"], seed=seed, desired_logm_completeness=synthetic_halo_minimum_mass
)
print(".....assembling {} synthetic galaxies".format(len(mpeak_synthetic_snapshot)))
# Add call to map_mstar_onto_lowmass_extension function after
# pre-determining low-mass slope
print(".....re-assigning low-mass mstar values")
new_mstar_real, mstar_synthetic_snapshot = map_mstar_onto_lowmass_extension(
mock["mpeak"], mock["obs_sm"], mpeak_synthetic_snapshot
)
diff = np.equal(new_mstar_real, mock["obs_sm"])
print(
".......changed {}/{} M* values; max/min new log(M*) {:.2f}/{:.2f}".format(
np.count_nonzero(~diff),
len(diff),
np.max(np.log10(new_mstar_real[~diff])),
np.min(np.log10(new_mstar_real[~diff])),
)
)
mock["obs_sm"] = new_mstar_real
mstar_mask = np.isclose(mstar_synthetic_snapshot, 0.0)
if np.sum(mstar_mask) > 0:
print(
".....Warning: Number of synthetics with zero mstar = {}".format(
np.sum(mstar_mask)
)
)
# Assign diffstar parameters to synthetic low-mass galaxies
print("TBD: get diffstar parameters for synthetic galaxies")
# Now downsample the synthetic galaxies to adjust for volume of lightcone shell
# desired number = synthetic_number*comoving_vol(snapshot)/comoving_vol(healpixel)
volume_factor = get_volume_factor(redshift, previous_redshift, Vtotal, cosmology)
num_selected_synthetic = int(synthetic_number * volume_factor)
num_synthetic_gals_in_snapshot = len(mpeak_synthetic_snapshot)
synthetic_indices = np.arange(0, num_synthetic_gals_in_snapshot).astype(int)
with NumpyRNGContext(seed):
selected_synthetic_indices = np.random.choice(
synthetic_indices, size=num_selected_synthetic, replace=False
)
msg = ".....down-sampling synthetic galaxies with volume factor {} to yield {}"
print(
"{} selected synthetics".format(
msg.format(volume_factor, num_selected_synthetic)
)
)
mstar_synthetic = mstar_synthetic_snapshot[selected_synthetic_indices]
# Apply additional M* cut to reduce number of synthetics for 5000 sq. deg. catalog
if mstar_min > 0:
mstar_mask = mstar_synthetic > mstar_min
msg = ".....removing synthetics with M* < {:.1e} to yield {} total synthetics"
print(msg.format(mstar_min, np.count_nonzero(mstar_mask)))
mstar_synthetic = mstar_synthetic[mstar_mask]
mpeak_synthetic = mpeak_synthetic_snapshot[selected_synthetic_indices][mstar_mask]
synthetic_dict = dict(
mpeak=mpeak_synthetic,
obs_sm=mstar_synthetic,
)
return mock, synthetic_dict
def build_output_snapshot_mock(
ssp_data_singlemet,
snapshot_redshift,
umachine,
target_halos,
gs_results,
galaxy_id_offset,
synthetic_dict,
Nside,
cutout_number_true,
previous_redshift,
cosmology,
w0,
wa,
volume_minx=0.0,
volume_miny=0.0,
volume_maxz=0.0,
SED_params={},
seed=41,
shear_params={},
mah_keys="mah_keys",
ms_keys="ms_keys",
q_keys="q_keys",
mah_pars="mah_params",
ms_pars="ms_params",
q_pars="q_params",
halo_unique_id=0,
redshift_method="galaxy",
source_galaxy_tag="um_source_galaxy_",
bulge_frac="bulge_frac",
):
"""
Collect the GalSampled snapshot mock into an astropy table
Parameters
----------
snapshot_redshift : float
Float of the snapshot redshift
umachine : astropy.table.Table
Astropy Table of shape (num_source_gals, )
storing the UniverseMachine snapshot mock
target_halos : astropy.table.Table
Astropy Table of shape (num_target_halos, )
storing the target halo catalog
gs_results: named ntuple
Named ntuple returned by galsample containing 3 arrays
of shape (num_target_gals, )
storing integers valued between [0, num_source_gals)
commit_hash : string
Commit hash of the version of the code repo used when
calling this function.
After updating the code repo to the desired version,
the commit_hash can be determined by navigating to the root
directory and typing ``git log --pretty=format:'%h' -n 1``
Returns
-------
dc2 : astropy.table.Table
Astropy Table of shape (num_target_gals, )
storing the GalSampled galaxy catalog
"""
dc2 = Table()
# unpack galsampler results
galaxy_indices = gs_results.target_gals_selection_indx
target_gals_target_halo_ids = gs_results.target_gals_target_halo_ids
target_gals_source_halo_ids = gs_results.target_gals_source_halo_ids
dc2["source_halo_uber_hostid"] = target_gals_source_halo_ids
dc2["target_halo_id"] = target_gals_target_halo_ids
# save target halo information into mock
# compute richness
# tgt_unique, tgt_inv, tgt_counts = np.unique(target_gals_target_halo_ids,
# return_inverse=True,
# return_counts=True)
# dc2['richness'] = tgt_counts[tgt_inv]
#
# Method 1: use unique arrays to get values
# hunique, hidx = np.unique(target_halos['halo_id'], return_index=True)
# reconstruct mock arrays using target_halos[name][hidx][tgt_inv]
# Method 2: cross-match to get target halo information
idxA, idxB = crossmatch(target_gals_target_halo_ids, target_halos["halo_id"])
msg = "target IDs do not match!"
assert np.all(dc2["target_halo_id"][idxA] == target_halos["halo_id"][idxB]), msg
for col in ["lightcone_rotation", "lightcone_replication"]:
dc2[col] = 0.0
dc2[col][idxA] = target_halos[col][idxB]
dc2["target_halo_fof_halo_id"] = 0.0
dc2["target_halo_redshift"] = 0.0
dc2["target_halo_x"] = 0.0
dc2["target_halo_y"] = 0.0
dc2["target_halo_z"] = 0.0
dc2["target_halo_vx"] = 0.0
dc2["target_halo_vy"] = 0.0
dc2["target_halo_vz"] = 0.0
dc2["target_halo_fof_halo_id"][idxA] = target_halos["fof_halo_id"][idxB]
dc2["target_halo_redshift"][idxA] = target_halos["halo_redshift"][idxB]
dc2["target_halo_x"][idxA] = target_halos["x"][idxB]
dc2["target_halo_y"][idxA] = target_halos["y"][idxB]
dc2["target_halo_z"][idxA] = target_halos["z"][idxB]
dc2["target_halo_vx"][idxA] = target_halos["vx"][idxB]
dc2["target_halo_vy"][idxA] = target_halos["vy"][idxB]
dc2["target_halo_vz"][idxA] = target_halos["vz"][idxB]
dc2["target_halo_mass"] = 0.0
dc2["target_halo_mass"][idxA] = target_halos["fof_halo_mass"][idxB]
dc2["target_halo_ellipticity"] = 0.0
dc2["target_halo_ellipticity"][idxA] = target_halos["halo_ellipticity"][idxB]
dc2["target_halo_prolaticity"] = 0.0
dc2["target_halo_prolaticity"][idxA] = target_halos["halo_prolaticity"][idxB]
dc2["target_halo_axis_A_length"] = 0.0
dc2["target_halo_axis_B_length"] = 0.0
dc2["target_halo_axis_C_length"] = 0.0
dc2["target_halo_axis_A_length"][idxA] = target_halos["axis_A_length"][idxB]
dc2["target_halo_axis_B_length"][idxA] = target_halos["axis_B_length"][idxB]
dc2["target_halo_axis_C_length"][idxA] = target_halos["axis_C_length"][idxB]
dc2["target_halo_axis_A_x"] = 0.0
dc2["target_halo_axis_A_y"] = 0.0
dc2["target_halo_axis_A_z"] = 0.0
dc2["target_halo_axis_A_x"][idxA] = target_halos["axis_A_x"][idxB]
dc2["target_halo_axis_A_y"][idxA] = target_halos["axis_A_y"][idxB]
dc2["target_halo_axis_A_z"][idxA] = target_halos["axis_A_z"][idxB]
# add SOD information from target_halo table
dc2["sod_halo_cdelta"] = 0.0
dc2["sod_halo_cdelta_error"] = 0.0
dc2["sod_halo_mass"] = 0.0
dc2["sod_halo_radius"] = 0.0
dc2["sod_halo_cdelta"][idxA] = target_halos["sod_cdelta"][idxB]
dc2["sod_halo_cdelta_error"][idxA] = target_halos["sod_cdelta_error"][idxB]
dc2["sod_halo_mass"][idxA] = target_halos["sod_mass"][idxB]
dc2["sod_halo_radius"][idxA] = target_halos["sod_radius"][idxB]
# Here the host_centric_xyz_vxvyvz in umachine should be overwritten
# Then we can associate x <--> A, y <--> B, z <--> C and then apply
# a random rotation
# It will be important to record the true direction of the major axis as a
# stored column
source_galaxy_prop_keys = (
"mp",
"vmp",
"rvir",
"upid",
"host_rvir",
"host_mp",
"host_rvir",
"halo_id",
"has_fit",
"is_main_branch",
"obs_sm",
"obs_sfr",
"sfr_percentile",
)
source_galaxy_pv_keys = (
"host_dx",
"host_dy",
"host_dz",
"host_dvx",
"host_dvy",
"host_dvz",
)
# check for no-fit replacement
if "nofit_replace" in umachine.colnames:
source_galaxy_prop_keys = source_galaxy_prop_keys + ("nofit_replace",)
SFH_param_keys = SED_params[mah_keys] + SED_params[ms_keys] + SED_params[q_keys]
source_galaxy_keys = (
source_galaxy_pv_keys + source_galaxy_prop_keys + tuple(SFH_param_keys)
)
for key in source_galaxy_keys:
newkey = source_galaxy_tag + key if key in source_galaxy_prop_keys else key
try:
dc2[newkey] = umachine[key][galaxy_indices]
except KeyError:
msg = (
"The build_output_snapshot_mock function was passed a umachine mock\n"
"that does not contain the ``{0}`` key"
)
raise KeyError(msg.format(key))
# remap M* for high-mass halos
max_umachine_halo_mass = np.max(umachine["mp"])
ultra_high_mvir_halo_mask = (dc2[source_galaxy_tag + "upid"] == -1) & (
dc2["target_halo_mass"] > max_umachine_halo_mass
)
num_to_remap = np.count_nonzero(ultra_high_mvir_halo_mask)
if num_to_remap > 0:
print(
".....remapping stellar mass of {0} BCGs in ultra-massive halos".format(
num_to_remap
)
)
halo_mass_array = dc2["target_halo_mass"][ultra_high_mvir_halo_mask]
mpeak_array = dc2[source_galaxy_tag + "mp"][ultra_high_mvir_halo_mask]
mhalo_ratio = halo_mass_array / mpeak_array
mstar_array = dc2[source_galaxy_tag + "obs_sm"][ultra_high_mvir_halo_mask]
redshift_array = dc2["target_halo_redshift"][ultra_high_mvir_halo_mask]
upid_array = dc2[source_galaxy_tag + "upid"][ultra_high_mvir_halo_mask]
assert np.shape(halo_mass_array) == (
num_to_remap,
), "halo_mass_array has shape = {0}".format(np.shape(halo_mass_array))
assert np.shape(mstar_array) == (
num_to_remap,
), "mstar_array has shape = {0}".format(np.shape(mstar_array))
assert np.shape(redshift_array) == (
num_to_remap,
), "redshift_array has shape = {0}".format(np.shape(redshift_array))
assert np.shape(upid_array) == (
num_to_remap,
), "upid_array has shape = {0}".format(np.shape(upid_array))
assert np.all(
mhalo_ratio >= 1
), "Bookkeeping error: all values of mhalo_ratio ={0} should be >= 1".format(
mhalo_ratio
)
obs_sm_key = source_galaxy_tag + "obs_sm"
halo_id_key = source_galaxy_tag + "halo_id"
dc2[obs_sm_key][ultra_high_mvir_halo_mask] = mstar_array * (mhalo_ratio**0.5)
idx = np.argmax(dc2[obs_sm_key])
halo_id_most_massive = dc2[halo_id_key][idx]
assert (
dc2[obs_sm_key][idx] < 10**13.5
), "halo_id = {0} has stellar mass {1:.3e}".format(
halo_id_most_massive, dc2[obs_sm_key][idx]
)
# generate triaxial satellite distributions based on halo shapes
satmask = dc2[source_galaxy_tag + "upid"] != -1
nsats = np.count_nonzero(satmask)
host_conc = 5.0
if nsats > 0:
host_Ax = dc2["target_halo_axis_A_x"][satmask]
host_Ay = dc2["target_halo_axis_A_y"][satmask]
host_Az = dc2["target_halo_axis_A_z"][satmask]
b_to_a = (
dc2["target_halo_axis_B_length"][satmask]
/ dc2["target_halo_axis_A_length"][satmask]
)
c_to_a = (
dc2["target_halo_axis_C_length"][satmask]
/ dc2["target_halo_axis_A_length"][satmask]
)
host_dx, host_dy, host_dz = generate_triaxial_satellite_distribution(
host_conc, host_Ax, host_Ay, host_Az, b_to_a, c_to_a
)
dc2["host_dx"][satmask] = host_dx
dc2["host_dy"][satmask] = host_dy
dc2["host_dz"][satmask] = host_dz
# save positions and velocities
dc2["x"] = dc2["target_halo_x"] + dc2["host_dx"]
dc2["vx"] = dc2["target_halo_vx"] + dc2["host_dvx"]
dc2["y"] = dc2["target_halo_y"] + dc2["host_dy"]
dc2["vy"] = dc2["target_halo_vy"] + dc2["host_dvy"]
dc2["z"] = dc2["target_halo_z"] + dc2["host_dz"]
dc2["vz"] = dc2["target_halo_vz"] + dc2["host_dvz"]
print(
".....number of galaxies before adding synthetic satellites = {}".format(
len(dc2[source_galaxy_tag + "halo_id"])
)
)
# add synthetic cluster galaxies
fake_cluster_sats = model_synthetic_cluster_satellites(
dc2,
Lbox=0.0,
host_conc=host_conc,
SFH_keys=list(SFH_param_keys),
source_halo_mass_key=source_galaxy_tag + "host_mp",
source_halo_id_key=source_galaxy_tag + "halo_id",
upid_key=source_galaxy_tag + "upid",
tri_axial_positions=True,
source_galaxy_tag=source_galaxy_tag,
) # turn off periodicity
if len(fake_cluster_sats) > 0:
print(".....generating and stacking synthetic cluster satellites")
check_time = time()
dc2 = vstack((dc2, fake_cluster_sats))
print(
".....time to create {} galaxies in fake_cluster_sats = {:.2f} secs".format(
len(fake_cluster_sats["target_halo_id"]), time() - check_time
)
)
else:
print(".....no synthetic cluster satellites required")
# generate redshifts, ra and dec
dc2 = get_sky_coords(
dc2, cosmology, redshift_method, source_galaxy_tag=source_galaxy_tag
)
# save number of galaxies in shell
Ngals = len(dc2["target_halo_id"])
# generate mags
dc2 = generate_SEDs(
ssp_data_singlemet,
dc2,
SED_params,
cosmology,
w0,
wa,
seed,
snapshot_redshift,
mah_keys,
ms_keys,
q_keys,
Ngals,
mah_pars=mah_pars,
ms_pars=ms_pars,
q_pars=q_pars,
source_galaxy_tag=source_galaxy_tag,
)
# Add low-mass synthetic galaxies
if synthetic_dict and len(synthetic_dict["mp"]) > 0:
check_time = time()
lowmass_mock = create_synthetic_lowmass_mock_with_centrals(
umachine,
dc2,
synthetic_dict,
previous_redshift,
snapshot_redshift,
cosmology,
Nside=Nside,
cutout_id=cutout_number_true,
H0=cosmology.H0.value,
volume_minx=volume_minx,
volume_miny=volume_miny,
volume_maxz=volume_maxz,
halo_id_offset=halo_id_offset,
halo_unique_id=halo_unique_id,
)
lowmass_mock = get_sky_coords(lowmass_mock, cosmology, redshift_method="halo")
if len(lowmass_mock) > 0:
# astropy vstack pads missing values with zeros in lowmass_mock
dc2 = vstack((dc2, lowmass_mock))
msg = ".....time to create {} galaxies in lowmass_mock = {:.2f} secs"
print(
msg.format(
len(lowmass_mock["target_halo_id"]),
time() - check_time,
)
)
# Add shears and magnification
if shear_params["add_dummy_shears"]:
print("\n.....adding dummy shears and magnification")
dc2["shear1"] = np.zeros(Ngals, dtype="f4")
dc2["shear2"] = np.zeros(Ngals, dtype="f4")
dc2["magnification"] = np.ones(Ngals, dtype="f4")
dc2["convergence"] = np.zeros(Ngals, dtype="f4")
else:
print("\n.....TBD: add real shears")
# Add auxiliary quantities for sizes and ellipticities and black-hole masses
# TBD Need dc2['bulge_to_total_ratio'] for some quantities
if SED_params["size_model_mag"]:
size_disk, size_sphere, arcsec_per_kpc = get_galaxy_sizes(
dc2[SED_params["size_model_mag"]], dc2["redshift"], cosmology
)
dc2["spheroidHalfLightRadius"] = size_sphere
dc2["spheroidHalfLightRadiusArcsec"] = size_sphere * arcsec_per_kpc
dc2["diskHalfLightRadius"] = size_disk
dc2["diskHalfLightRadiusArcsec"] = size_disk * arcsec_per_kpc
if SED_params["ellipticity_model_mag"]:
pos_angle = np.random.uniform(size=Ngals) * np.pi
spheroid_ellip_cosmos, disk_ellip_cosmos = monte_carlo_ellipticity_bulge_disk(
dc2[SED_params["ellipticity_model_mag"]]
)
# Returns distortion ellipticity = 1-q^2 / 1+q^2; q=axis ratio
spheroid_axis_ratio = np.sqrt(
(1 - spheroid_ellip_cosmos) / (1 + spheroid_ellip_cosmos)
)
disk_axis_ratio = np.sqrt((1 - disk_ellip_cosmos) / (1 + disk_ellip_cosmos))
# Calculate ellipticity from the axis ratios using shear ellipticity e =
# 1-q / 1+q
ellip_disk = (1.0 - disk_axis_ratio) / (1.0 + disk_axis_ratio)
ellip_spheroid = (1.0 - spheroid_axis_ratio) / (1.0 + spheroid_axis_ratio)
dc2["spheroidAxisRatio"] = np.array(spheroid_axis_ratio, dtype="f4")
dc2["spheroidEllipticity"] = np.array(ellip_spheroid, dtype="f4")
dc2["spheroidEllipticity1"] = np.array(
np.cos(2.0 * pos_angle) * ellip_spheroid, dtype="f4"
)
dc2["spheroidEllipticity2"] = np.array(
np.sin(2.0 * pos_angle) * ellip_spheroid, dtype="f4"
)
dc2["diskAxisRatio"] = np.array(disk_axis_ratio, dtype="f4")
dc2["diskEllipticity"] = np.array(ellip_disk, dtype="f4")
dc2["diskEllipticity1"] = np.array(
np.cos(2.0 * pos_angle) * ellip_disk, dtype="f4"
)
dc2["diskEllipticity2"] = np.array(
np.sin(2.0 * pos_angle) * ellip_disk, dtype="f4"
)
dc2["positionAngle"] = np.array(pos_angle * 180.0 / np.pi, dtype="f4")
if bulge_frac in dc2.colnames:
tot_ellip = (1.0 - dc2[bulge_frac]) * ellip_disk + dc2[
bulge_frac
] * ellip_spheroid
dc2["totalEllipticity"] = np.array(tot_ellip, dtype="f4")
dc2["totalAxisRatio"] = np.array(
(1.0 - tot_ellip) / (1.0 + tot_ellip), dtype="f4"
)
dc2["totalEllipticity1"] = np.array(
np.cos(2.0 * pos_angle) * tot_ellip, dtype="f4"
)
dc2["totalEllipticity2"] = np.array(
np.sin(2.0 * pos_angle) * tot_ellip, dtype="f4"
)
# srsc_indx_disk = 1.0*np.ones(lum_disk.size,dtype='f4')
# srsc_indx_sphere = 4.0*np.ones(lum_disk.size,dtype='f4')
# srsc_indx_tot = srsc_indx_disk*(1. - dc2['bulge_to_total_ratio'])
# + srsc_indx_sphere*dc2['bulge_to_total_ratio']
# dc2['diskSersicIndex'] = srsc_indx_disk
# dc2['spheroidSersicIndex'] = srsc_indx_sphere
# dc2['totalSersicIndex'] = srsc_indx_tot
if SED_params["black_hole_model"]:
percentile_sfr = dc2[source_galaxy_tag + "sfr_percentile"]
# percentile_sfr = np.random.uniform(size=Ngals)
dc2["bulge_stellar_mass"] = dc2[bulge_frac] * np.power(10, dc2["logsm_obs"])
dc2["blackHoleMass"] = monte_carlo_black_hole_mass(dc2["bulge_stellar_mass"])
eddington_ratio, bh_acc_rate = monte_carlo_bh_acc_rate(
snapshot_redshift, dc2["blackHoleMass"], percentile_sfr
)
dc2["blackHoleAccretionRate"] = bh_acc_rate * 1e9
dc2["blackHoleEddingtonRatio"] = eddington_ratio
# Add column for redshifts including peculiar velocities
_, z_obs, v_pec, _, _, _, _ = pecZ(
dc2["x"], dc2["y"], dc2["z"], dc2["vx"], dc2["vy"], dc2["vz"], dc2["redshift"]
)
dc2["peculiarVelocity"] = np.array(v_pec, dtype="f4")
dc2.rename_column("redshift", "redshiftHubble")
dc2["redshift"] = z_obs
# Galaxy ids
dc2["galaxy_id"] = np.arange(
galaxy_id_offset, galaxy_id_offset + len(dc2["target_halo_id"])
).astype(int)
print(
"\n.....Min and max galaxy_id = {} -> {}".format(
np.min(dc2["galaxy_id"]), np.max(dc2["galaxy_id"])
)
)
# convert table to dict
check_time = time()
output_dc2 = {}
for k in dc2.keys():
output_dc2[k] = dc2[k].quantity.value
print(".....time to new dict = {:.4f} secs".format(time() - check_time))
return output_dc2
def generate_SEDs(
ssp_data_singlemet,
dc2,
SED_params,
cosmology,
w0,
wa,
seed,
snapshot_redshift,
mah_keys,
ms_keys,
q_keys,
Ngals,
mah_pars="mah_params",
ms_pars="ms_params",
q_pars="q_params",
source_galaxy_tag="source_galaxy",
):
check = validate_SED_params(
SED_params,
mah_keys=mah_keys,
ms_keys=ms_keys,
q_keys=q_keys,
)
assert check, "SED_params does not have required contents"
dc2 = substitute_SFH_fit_failures(
dc2,
SED_params,
source_galaxy_tag,
seed,
cosmology,
snapshot_redshift,
mah_keys=mah_keys,
ms_keys=ms_keys,
q_keys=q_keys,
)
"""
assemble params from UM matches and compute SFH
"""
_res = get_diff_params(
dc2,
mah_keys=SED_params[mah_keys],
ms_keys=SED_params[ms_keys],
q_keys=SED_params[q_keys],
)
mah_params, ms_params, q_params = _res
t_obs = cosmology.age(dc2["redshift"]).value
# get SFH table and observed stellar mass
sfh_table = sfh_galpop(
SED_params["t_table"],
mah_params,
ms_params,
q_params,
lgt0=SED_params["LGT0"],
fb=FB,
)
logsm_obs, sfr_obs = get_logsm_sfr_obs(sfh_table, t_obs, SED_params["t_table"])
dc2["logsm_obs"] = logsm_obs
dc2["sfr"] = sfr_obs
log_ssfr = get_log_safe_ssfr(np.power(10, logsm_obs), sfr_obs)
dc2["log_ssfr"] = log_ssfr
fb = cosmology.Ob0 / cosmology.Om0
cosmo_params = CosmoParams(cosmology.Om0, w0, wa, cosmology.H0.value / 100, fb)
# compute SEDs
ran_key = jran.PRNGKey(seed)
diffskypop_params = DiffskyPopParams(
np.array(list(SED_params["lgfburst_pop_u_params"].values())),
np.array(list(SED_params["burstshapepop_u_params"].values())),
np.array(list(SED_params["lgav_dust_u_params"].values())),
np.array(list(SED_params["delta_dust_u_params"].values())),
np.array(list(SED_params["fracuno_pop_u_params"].values())),
np.array(list(SED_params["lgmet_params"].values())),
)
_res = get_diffsky_sed_info_singlemet(
ran_key,
dc2["redshift"],
mah_params,
ms_params,
q_params,
SED_params["ssp_z_table"],
SED_params["ssp_restmag_table"],
SED_params["ssp_obsmag_table"],
ssp_data_singlemet,
SED_params["t_table"],
SED_params["filter_waves"],
SED_params["filter_trans"],
SED_params["filter_waves"],
SED_params["filter_trans"],
diffskypop_params,
cosmo_params,
)
# save quantities to DC2
dc2 = save_sed_info(dc2, _res, SED_params)
return dc2
def validate_SED_params(
SED_params,
required=[
"use_diffmah_pop",
"LGT0",
"t_table",
"ssp_z_table",
"ssp_restmag_table",
"ssp_obsmag_table",
"ssp_lg_age_gyr",
"t_table",
"filter_waves",
"filter_trans",
"lgfburst_pop_u_params",
"burstshapepop_u_params",
"lgav_dust_u_params",
"delta_dust_u_params",
"fracuno_pop_u_params",
"lgmet_params",
],
mah_keys="mah_keys",
ms_keys="ms_keys",
q_keys="q_keys",
):
check = True
for k in required + [mah_keys] + [ms_keys] + [q_keys]:
if k not in SED_params.keys():
print(".....Validate SED_params: {} not found".format(k))
check = False
return check
def substitute_SFH_fit_failures(
dc2,
SED_params,
source_galaxy_tag,
seed,
cosmology,
snapshot_redshift,
mah_keys="mah_keys",
ms_keys="ms_keys",
q_keys="q_keys",
):
# check for fit failures
has_fit = dc2[source_galaxy_tag + "has_fit"] == 1
# check for replacement
nfail = np.count_nonzero(~has_fit)
nmissed = -1
use_diffmah_pop = SED_params["use_diffmah_pop"]
if source_galaxy_tag + "nofit_replace" in dc2.colnames:
nofit_replace = dc2[source_galaxy_tag + "nofit_replace"][~has_fit] == 1
n_replace = np.count_nonzero(nofit_replace)
if n_replace > 0:
msg = ".....Replaced {} diffmah/diffstar fit failures with {}"
print("{} resampled UM fit successes".format(msg.format(nfail, n_replace)))
else:
msg = ".....No replacements required; {} fit failures, {} replacements"
print(msg.format(nfail, n_replace))
nmissed = nfail - n_replace
if nmissed > 0 or (nmissed < 0 and nfail > 0) or (use_diffmah_pop and nfail > 0):
msg = ".....Replacing parameters for {} fit failures with diffmah{} pop"
if nmissed > 0 and not use_diffmah_pop:
failed_mask = ~nofit_replace
print(".......{}".format(msg.format(nmissed, "/diffstar")))
else:
failed_mask = ~has_fit
txt = "" if use_diffmah_pop else "/diffstar"
print(".......{}".format(msg.format(nfail, txt)))
logmh = np.array(np.log10(dc2["target_halo_mass"][failed_mask]))
# identify satellites without fits and use UM mp value as proxy for subhalo mass
sat_mask = dc2[source_galaxy_tag + "upid"][failed_mask] != -1
logmh[sat_mask] = np.array(
np.log10(dc2[source_galaxy_tag + 'mp'][failed_mask])[sat_mask])
print('.......replaced {} satellite halo masses with UM mp values'.format(
np.count_nonzero(sat_mask)))
logmh = logmh.astype(np.float32)
ran_key = jran.PRNGKey(seed)
t_obs = cosmology.age(snapshot_redshift).value
mc_galpop = mc_diffstarpop(ran_key, t_obs, logmh=logmh)
mc_mah_params, mc_msk_is_quenched, mc_ms_params, mc_q_params = mc_galpop
# copy requested mc_params to dc2 table
key_labels = [mah_keys] if use_diffmah_pop else [mah_keys, ms_keys, q_keys]
mc_parlist = (
[mc_mah_params]
if use_diffmah_pop
else [mc_mah_params, mc_ms_params, mc_q_params]
)
for key_label, mc_params in zip(key_labels, mc_parlist):
for i, key in enumerate(
SED_params[key_label]
): # potential bug here if some other subset of fit params selected
dc2[key][failed_mask] = mc_params[:, i]
print(".......saving pop model parameters {}".format(key))
return dc2
def save_sed_info(dc2, _res, SED_params):
gal_weights, gal_frac_trans_obs, gal_frac_trans_rest = _res[:3]
gal_att_curve_params = _res[3]
gal_frac_unobs, gal_fburst, gal_burstshape_params = _res[4:7]
gal_frac_bulge_t_obs, gal_fbulge_params, gal_fknot = _res[7:10]
gal_obsmags_nodust, gal_restmags_nodust = _res[10:12]
gal_obsmags_dust, gal_restmags_dust = _res[12:]
# add values to catalog
dc2["dust_eb"] = gal_att_curve_params[:, 0]
dc2["dust_delta"] = gal_att_curve_params[:, 1]
dc2["dust_av"] = gal_att_curve_params[:, 2]
dc2["fburst"] = gal_fburst
dc2["burstshape_lgyr_peak"] = gal_burstshape_params[:, 0]
dc2["burstshape_lgyr_max"] = gal_burstshape_params[:, 1]
dc2["bulge_frac"] = gal_frac_bulge_t_obs
dc2["fbulge_tcrit"] = gal_fbulge_params[:, 0]
dc2["fbulge_early"] = gal_fbulge_params[:, 1]
dc2["fbulge_late"] = gal_fbulge_params[:, 2]
dc2["fknot"] = gal_fknot
for dustlabel, results in zip(
["", "_nodust"],
[
[gal_restmags_dust, gal_obsmags_dust],
[gal_restmags_nodust, gal_obsmags_nodust],
],
):
for fr, vals in zip(["rest", "obs"], results):
for k in SED_params["filter_keys"]:
filt = k.split("_")[0]
band = k.split("_")[1]
band = band.upper() if fr == "rest" else band
colname = "{}_{}_{}{}".format(filt, fr, band, dustlabel)
column = SED_params["filter_keys"].index(k)
dc2[colname] = vals[:, column]
return dc2
def get_galaxy_sizes(SDSS_R, redshift, cosmology):
if len(redshift) > 0:
arcsec_per_kpc = cosmology.arcsec_per_kpc_proper(redshift).value
else:
arcsec_per_kpc = np.zeros(0, dtype=np.float)
size_disk = mc_size_vs_luminosity_late_type(SDSS_R, redshift)
size_sphere = mc_size_vs_luminosity_early_type(SDSS_R, redshift)
return size_disk, size_sphere, arcsec_per_kpc
def get_sky_coords(
dc2, cosmology, redshift_method="halo", Nzgrid=50, source_galaxy_tag="source_galaxy"
):
# compute galaxy redshift, ra and dec
if redshift_method is not None:
print(
"\n.....Generating lightcone redshifts using {} method".format(
redshift_method
)
)
r = np.sqrt(dc2["x"] * dc2["x"] + dc2["y"] * dc2["y"] + dc2["z"] * dc2["z"])
mask = r > 5000.0
if np.sum(mask) > 0:
print("WARNING: Found {} co-moving distances > 5000".format(np.sum(mask)))
dc2["redshift"] = dc2["target_halo_redshift"] # copy halo redshifts to galaxies
H0 = cosmology.H0.value
if redshift_method == "galaxy":
# generate distance estimates for values between min and max redshifts
zmin = np.min(dc2["redshift"])
zmax = np.max(dc2["redshift"])
zgrid = np.logspace(np.log10(zmin), np.log10(zmax), Nzgrid)
CDgrid = cosmology.comoving_distance(zgrid) * H0 / 100.0
# use interpolation to get redshifts for satellites only
sat_mask = dc2[source_galaxy_tag + "upid"] != -1
dc2["redshift"][sat_mask] = np.interp(r[sat_mask], CDgrid, zgrid)
dc2["dec"] = 90.0 - np.arccos(dc2["z"] / r) * 180.0 / np.pi # co-latitude
dc2["ra"] = np.arctan2(dc2["y"], dc2["x"]) * 180.0 / np.pi
dc2["ra"][(dc2["ra"] < 0)] += 360.0 # force value 0->360
print(
".......min/max z for shell: {:.3f}/{:.3f}".format(
np.min(dc2["redshift"]), np.max(dc2["redshift"])
)
)
return dc2
def get_skyarea(output_mock, Nside):
""" """
import healpy as hp
# compute sky area from ra and dec ranges of galaxies
nominal_skyarea = np.rad2deg(np.rad2deg(4.0 * np.pi / hp.nside2npix(Nside)))
if Nside > 8:
skyarea = nominal_skyarea
else:
pixels = set()
for k in output_mock.keys():
if output_mock[k].has_key("ra") and output_mock[k].has_key("dec"):
for ra, dec in zip(output_mock[k]["ra"], output_mock[k]["dec"]):
pixels.add(hp.ang2pix(Nside, ra, dec, lonlat=True))
frac = len(pixels) / float(hp.nside2npix(Nside))
skyarea = frac * np.rad2deg(np.rad2deg(4.0 * np.pi))
# agreement to about 1 sq. deg.
if np.isclose(skyarea, nominal_skyarea, rtol=0.02):
print(" Replacing calculated sky area {} with nominal_area".format(skyarea))
skyarea = nominal_skyarea
if np.isclose(
skyarea, nominal_skyarea / 2.0, rtol=0.01
): # check for half-filled pixels
print(
" Replacing calculated sky area {} with (nominal_area)/2".format(
skyarea
)
)
skyarea = nominal_skyarea / 2.0
return skyarea
def write_output_mock_to_disk(
output_color_mock_fname,
output_mock,
commit_hash,
seed,
synthetic_params,
cutout_number,
Nside,
cosmology,
versionMajor=1,
versionMinor=1,
versionMinorMinor=1,
):
"""
Write the assembled mock to specified output file in hdf5 format
"""
print(
"\n...Writing to file {} using commit hash {}".format(
output_color_mock_fname, commit_hash
)
)
hdfFile = h5py.File(output_color_mock_fname, "w")
hdfFile.create_group("metaData")
hdfFile["metaData"]["commit_hash"] = commit_hash
hdfFile["metaData"]["seed"] = seed
hdfFile["metaData"]["versionMajor"] = versionMajor
hdfFile["metaData"]["versionMinor"] = versionMinor
hdfFile["metaData"]["versionMinorMinor"] = versionMinorMinor
hdfFile["metaData"]["H_0"] = cosmology.H0.value
hdfFile["metaData"]["Omega_matter"] = cosmology.Om0
hdfFile["metaData"]["Omega_b"] = cosmology.Ob0
hdfFile["metaData"]["skyArea"] = get_skyarea(output_mock, Nside)
hdfFile["metaData"]["healpix_cutout_number"] = cutout_number
if synthetic_params and not synthetic_params["skip_synthetics"]:
synthetic_halo_minimum_mass = synthetic_params["synthetic_halo_minimum_mass"]
hdfFile["metaData"]["synthetic_halo_minimum_mass"] = synthetic_halo_minimum_mass
# save software versions
versions = infer_software_versions()
for k, v in versions.items():
hdfFile["metaData"][k] = v
for k, v in output_mock.items():
gGroup = hdfFile.create_group(k)
check_time = time()
for tk in v.keys():
# gGroup[tk] = v[tk].quantity.value
gGroup[tk] = v[tk]
print(
".....time to write group {} = {:.4f} secs".format(k, time() - check_time)
)
check_time = time()
hdfFile.close()
print(".....time to close file {:.4f} secs".format(time() - check_time))
print(".....time to close file {:.4f} secs".format(time() - check_time))
|
LSSTDESCREPO_NAMElsstdesc-diffskyPATH_START.@lsstdesc-diffsky_extracted@lsstdesc-diffsky-main@lsstdesc_diffsky@write_mock_to_disk_singlemet.py@.PATH_END.py
|
{
"filename": "test_BackgroundSources.py",
"repo_name": "dsavransky/EXOSIMS",
"repo_path": "EXOSIMS_extracted/EXOSIMS-master/tests/BackgroundSources/test_BackgroundSources.py",
"type": "Python"
}
|
import unittest
import os
import EXOSIMS
import EXOSIMS.Prototypes.BackgroundSources
import EXOSIMS.BackgroundSources
from EXOSIMS.util.get_module import get_module
import pkgutil
from tests.TestSupport.Info import resource_path
import json
from tests.TestSupport.Utilities import RedirectStreams
from EXOSIMS.Prototypes.TargetList import TargetList
import numpy as np
import astropy.units as u
import sys
from io import StringIO
from astropy.coordinates import SkyCoord
class TestBackgroundSources(unittest.TestCase):
"""
Global BackgroundSources tests.
Applied to all implementations, for overloaded methods only.
Any implementation-specific methods, or to test specific new
method functionality, separate tests are needed.
"""
def setUp(self):
self.dev_null = open(os.devnull, "w")
modtype = getattr(
EXOSIMS.Prototypes.BackgroundSources.BackgroundSources, "_modtype"
)
pkg = EXOSIMS.BackgroundSources
self.allmods = [get_module(modtype)]
for loader, module_name, is_pkg in pkgutil.walk_packages(
pkg.__path__, pkg.__name__ + "."
):
if not is_pkg:
mod = get_module(module_name.split(".")[-1], modtype)
self.assertTrue(
mod._modtype is modtype, "_modtype mismatch for %s" % mod.__name__
)
self.allmods.append(mod)
# need a TargetList object for testing
# script = resource_path("test-scripts/template_prototype_testing.json")
script = resource_path("test-scripts/template_minimal.json")
with open(script) as f:
spec = json.loads(f.read())
spec["ntargs"] = 10 # generate fake targets list with 10 stars
with RedirectStreams(stdout=self.dev_null):
self.TL = TargetList(**spec)
# assign different coordinates
self.TL.coords = SkyCoord(
ra=np.random.uniform(low=0, high=180, size=self.TL.nStars) * u.deg,
dec=np.random.uniform(low=-90, high=90, size=self.TL.nStars) * u.deg,
distance=np.random.uniform(low=1, high=10, size=self.TL.nStars),
)
def tearDown(self):
self.dev_null.close()
def test_dNbackground(self):
"""
Test to ensure that dN returned has correct length, units, and is >= 0.
"""
coords = self.TL.coords
intDepths = np.random.uniform(15.0, 25.0, len(coords))
for mod in self.allmods:
with RedirectStreams(stdout=self.dev_null):
obj = mod()
dN = obj.dNbackground(coords, intDepths)
self.assertTrue(
len(dN) == len(intDepths),
"dNbackground returns different length than input for %s"
% mod.__name__,
)
self.assertTrue(
dN.unit == 1 / u.arcmin**2,
"dNbackground does not return 1/arcmin**2 for %s" % mod.__name__,
)
self.assertTrue(
np.all(dN >= 0.0),
"dNbackground returns negative values for %s" % mod.__name__,
)
def test_str(self):
"""
Test __str__ method, for full coverage and check that all modules have
required attributes.
"""
for mod in self.allmods:
if "__str__" not in mod.__dict__:
continue
with RedirectStreams(stdout=self.dev_null):
obj = mod()
original_stdout = sys.stdout
# sys.stdout = StringIO.StringIO()
sys.stdout = StringIO()
# call __str__ method
result = obj.__str__()
# examine what was printed
contents = sys.stdout.getvalue()
self.assertEqual(type(contents), type(""))
sys.stdout.close()
# it also returns a string, which is not necessary
self.assertEqual(type(result), type(""))
# put stdout back
sys.stdout = original_stdout
|
dsavranskyREPO_NAMEEXOSIMSPATH_START.@EXOSIMS_extracted@EXOSIMS-master@tests@BackgroundSources@test_BackgroundSources.py@.PATH_END.py
|
{
"filename": "_bgcolor.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/icicle/hoverlabel/_bgcolor.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class BgcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="bgcolor", parent_name="icicle.hoverlabel", **kwargs
):
super(BgcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@icicle@hoverlabel@_bgcolor.py@.PATH_END.py
|
{
"filename": "_y0.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/bar/_y0.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class Y0Validator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="y0", parent_name="bar", **kwargs):
super(Y0Validator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
anim=kwargs.pop("anim", True),
edit_type=kwargs.pop("edit_type", "calc+clearAxisTypes"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@bar@_y0.py@.PATH_END.py
|
{
"filename": "ChandrasekharDynamicalFrictionForce.py",
"repo_name": "jobovy/galpy",
"repo_path": "galpy_extracted/galpy-main/galpy/potential/ChandrasekharDynamicalFrictionForce.py",
"type": "Python"
}
|
###############################################################################
# ChandrasekharDynamicalFrictionForce: Class that implements the
# Chandrasekhar dynamical friction
###############################################################################
import copy
import hashlib
import numpy
from scipy import interpolate, special
from ..util import conversion
from .DissipativeForce import DissipativeForce
from .Potential import _check_c, evaluateDensities
from .Potential import flatten as flatten_pot
_INVSQRTTWO = 1.0 / numpy.sqrt(2.0)
_INVSQRTPI = 1.0 / numpy.sqrt(numpy.pi)
class ChandrasekharDynamicalFrictionForce(DissipativeForce):
"""Class that implements the Chandrasekhar dynamical friction force
.. math::
\\mathbf{F}(\\mathbf{x},\\mathbf{v}) = -2\\pi\\,[G\\,M]\\,[G\\,\\rho(\\mathbf{x})]\\,\\ln[1+\\Lambda^2] \\,\\left[\\mathrm{erf}(X)-\\frac{2X}{\\sqrt{\\pi}}\\exp\\left(-X^2\\right)\\right]\\,\\frac{\\mathbf{v}}{|\\mathbf{v}|^3}\\,
on a mass (e.g., a satellite galaxy or a black hole) :math:`M` at position :math:`\\mathbf{x}` moving at velocity :math:`\\mathbf{v}` through a background density :math:`\\rho`. The quantity :math:`X` is the usual :math:`X=|\\mathbf{v}|/[\\sqrt{2}\\sigma_r(r)`. The factor :math:`\\Lambda` that goes into the Coulomb logarithm is taken to be
.. math::
\\Lambda = \\frac{r/\\gamma}{\\mathrm{max}\\left(r_{\\mathrm{hm}},GM/|\\mathbf{v}|^2\\right)}\\,,
where :math:`\\gamma` is a constant. This :math:`\\gamma` should be the absolute value of the logarithmic slope of the density :math:`\\gamma = |\\mathrm{d} \\ln \\rho / \\mathrm{d} \\ln r|`, although for :math:`\\gamma<1` it is advisable to set :math:`\\gamma=1`. Implementation here roughly follows [2]_ and earlier work.
"""
def __init__(
self,
amp=1.0,
GMs=0.1,
gamma=1.0,
rhm=0.0,
dens=None,
sigmar=None,
const_lnLambda=False,
minr=0.0001,
maxr=25.0,
nr=501,
ro=None,
vo=None,
):
"""
Initialize a Chandrasekhar Dynamical Friction force [1]_.
Parameters
----------
amp : float
Amplitude to be applied to the potential (default: 1).
GMs : float or Quantity
Satellite mass; can be a Quantity with units of mass or Gxmass; can be adjusted after initialization by setting obj.GMs= where obj is your ChandrasekharDynamicalFrictionForce instance (note that the mass of the satellite can *not* be changed simply by multiplying the instance by a number, because he mass is not only used as an amplitude).
rhm : float or Quantity
Half-mass radius of the satellite (set to zero for a black hole); can be adjusted after initialization by setting obj.rhm= where obj is your ChandrasekharDynamicalFrictionForce instance.
gamma : float
Free-parameter in :math:`\\Lambda`.
dens : Potential instance or list thereof, optional
Potential instance or list thereof that represents the density [default: LogarithmicHaloPotential(normalize=1.,q=1.)].
sigmar : callable, optional
Function that gives the velocity dispersion as a function of r (has to be in natural units!); if None, computed from the dens potential using the spherical Jeans equation (in galpy.df.jeans) assuming zero anisotropy; if set to a lambda function, *the object cannot be pickled* (so set it to a real function).
const_lnLambda : bool, optional
If set to a number, use a constant ln(Lambda) instead with this value.
minr : float or Quantity, optional
Minimum r at which to apply dynamical friction: at r < minr, friction is set to zero.
maxr : float or Quantity, optional
Maximum r for which sigmar gets interpolated; for best performance set this to the maximum r you will consider.
nr : int, optional
Number of radii to use in the interpolation of sigmar.
ro : float or Quantity, optional
Distance scale for translation into internal units (default from configuration file).
vo : float or Quantity, optional
Velocity scale for translation into internal units (default from configuration file).
Notes
-----
- 2011-12-26 - Started - Bovy (NYU)
- 2018-03-18 - Re-started: updated to r dependent Lambda form and integrated into galpy framework - Bovy (UofT)
- 2018-07-23 - Calculate sigmar from the Jeans equation and interpolate it; allow GMs and rhm to be set on the fly - Bovy (UofT)
References
----------
.. [1] Chandrasekhar, S. (1943), Astrophysical Journal, 97, 255. ADS: http://adsabs.harvard.edu/abs/1943ApJ....97..255C.
.. [2] Petts, J. A., Gualandris, A., Read, J. I., & Bovy, J. (2016), Monthly Notices of the Royal Astronomical Society, 463, 858. ADS: http://adsabs.harvard.edu/abs/2016MNRAS.463..858P.
"""
DissipativeForce.__init__(self, amp=amp * GMs, ro=ro, vo=vo, amp_units="mass")
rhm = conversion.parse_length(rhm, ro=self._ro)
minr = conversion.parse_length(minr, ro=self._ro)
maxr = conversion.parse_length(maxr, ro=self._ro)
self._gamma = gamma
self._ms = (
self._amp / amp
) # from handling in __init__ above, should be ms in galpy units
self._rhm = rhm
self._minr = minr
self._maxr = maxr
self._dens_kwarg = dens # for pickling
self._sigmar_kwarg = sigmar # for pickling
# Parse density
if dens is None:
from .LogarithmicHaloPotential import LogarithmicHaloPotential
dens = LogarithmicHaloPotential(normalize=1.0, q=1.0)
if sigmar is None: # we know this solution!
sigmar = lambda x: _INVSQRTTWO
dens = flatten_pot(dens)
self._dens_pot = dens
self._dens = lambda R, z, phi=0.0, t=0.0: evaluateDensities(
self._dens_pot, R, z, phi=phi, t=t, use_physical=False
)
if sigmar is None:
from ..df import jeans
sigmar = lambda x: jeans.sigmar(
self._dens_pot, x, beta=0.0, use_physical=False
)
self._sigmar_rs_4interp = numpy.linspace(self._minr, self._maxr, nr)
self._sigmars_4interp = numpy.array(
[sigmar(x) for x in self._sigmar_rs_4interp]
)
if numpy.any(numpy.isnan(self._sigmars_4interp)):
# Check for case where density is zero, in that case, just
# paint in the nearest neighbor for the interpolation
# (doesn't matter in the end, because force = 0 when dens = 0)
nanrs_indx = numpy.isnan(self._sigmars_4interp)
if numpy.all(
numpy.array(
[
self._dens(r * _INVSQRTTWO, r * _INVSQRTTWO)
for r in self._sigmar_rs_4interp[nanrs_indx]
]
)
== 0.0
):
self._sigmars_4interp[nanrs_indx] = interpolate.interp1d(
self._sigmar_rs_4interp[True ^ nanrs_indx],
self._sigmars_4interp[True ^ nanrs_indx],
kind="nearest",
fill_value="extrapolate",
)(self._sigmar_rs_4interp[nanrs_indx])
self.sigmar_orig = sigmar
self.sigmar = interpolate.InterpolatedUnivariateSpline(
self._sigmar_rs_4interp, self._sigmars_4interp, k=3
)
if const_lnLambda:
self._lnLambda = const_lnLambda
else:
self._lnLambda = False
self._amp *= 4.0 * numpy.pi
self._force_hash = None
self.hasC = _check_c(self._dens_pot, dens=True)
return None
def GMs(self, gms):
gms = conversion.parse_mass(gms, ro=self._ro, vo=self._vo)
self._amp *= gms / self._ms
self._ms = gms
# Reset the hash
self._force_hash = None
return None
GMs = property(None, GMs)
def rhm(self, new_rhm):
self._rhm = conversion.parse_length(new_rhm, ro=self._ro)
# Reset the hash
self._force_hash = None
return None
rhm = property(None, rhm)
def lnLambda(self, r, v):
"""
Evaluate the Coulomb logarithm ln Lambda.
Parameters
----------
r : float
Spherical radius (natural units).
v : float
Current velocity in cylindrical coordinates (natural units).
Returns
-------
lnLambda : float
Coulomb logarithm.
Notes
-----
- 2018-03-18 - Started - Bovy (UofT)
"""
if self._lnLambda:
lnLambda = self._lnLambda
else:
GMvs = self._ms / v**2.0
if GMvs < self._rhm:
Lambda = r / self._gamma / self._rhm
else:
Lambda = r / self._gamma / GMvs
lnLambda = 0.5 * numpy.log(1.0 + Lambda**2.0)
return lnLambda
def _calc_force(self, R, phi, z, v, t):
r = numpy.sqrt(R**2.0 + z**2.0)
if r < self._minr:
self._cached_force = 0.0
else:
vs = numpy.sqrt(v[0] ** 2.0 + v[1] ** 2.0 + v[2] ** 2.0)
if r > self._maxr:
sr = self.sigmar_orig(r)
else:
sr = self.sigmar(r)
X = vs * _INVSQRTTWO / sr
Xfactor = special.erf(X) - 2.0 * X * _INVSQRTPI * numpy.exp(-(X**2.0))
lnLambda = self.lnLambda(r, vs)
self._cached_force = (
-self._dens(R, z, phi=phi, t=t) / vs**3.0 * Xfactor * lnLambda
)
def _Rforce(self, R, z, phi=0.0, t=0.0, v=None):
new_hash = hashlib.md5(
numpy.array([R, phi, z, v[0], v[1], v[2], t])
).hexdigest()
if new_hash != self._force_hash:
self._calc_force(R, phi, z, v, t)
return self._cached_force * v[0]
def _phitorque(self, R, z, phi=0.0, t=0.0, v=None):
new_hash = hashlib.md5(
numpy.array([R, phi, z, v[0], v[1], v[2], t])
).hexdigest()
if new_hash != self._force_hash:
self._calc_force(R, phi, z, v, t)
return self._cached_force * v[1] * R
def _zforce(self, R, z, phi=0.0, t=0.0, v=None):
new_hash = hashlib.md5(
numpy.array([R, phi, z, v[0], v[1], v[2], t])
).hexdigest()
if new_hash != self._force_hash:
self._calc_force(R, phi, z, v, t)
return self._cached_force * v[2]
# Pickling functions
def __getstate__(self):
pdict = copy.copy(self.__dict__)
# rm lambda function
del pdict["_dens"]
if self._sigmar_kwarg is None:
# because an object set up with sigmar = user-provided function
# cannot typically be picked, disallow this explicitly
# (so if it can, everything should be fine; if not, pickling error)
del pdict["sigmar_orig"]
return pdict
def __setstate__(self, pdict):
self.__dict__ = pdict
# Re-setup _dens
self._dens = lambda R, z, phi=0.0, t=0.0: evaluateDensities(
self._dens_pot, R, z, phi=phi, t=t, use_physical=False
)
# Re-setup sigmar_orig
if self._dens_kwarg is None and self._sigmar_kwarg is None:
self.sigmar_orig = lambda x: _INVSQRTTWO
else:
from ..df import jeans
self.sigmar_orig = lambda x: jeans.sigmar(
self._dens_pot, x, beta=0.0, use_physical=False
)
return None
|
jobovyREPO_NAMEgalpyPATH_START.@galpy_extracted@galpy-main@galpy@potential@ChandrasekharDynamicalFrictionForce.py@.PATH_END.py
|
{
"filename": "dfitpack.py",
"repo_name": "scipy/scipy",
"repo_path": "scipy_extracted/scipy-main/scipy/interpolate/dfitpack.py",
"type": "Python"
}
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.interpolate` namespace for importing the functions
# included below.
from scipy._lib.deprecation import _sub_module_deprecation
__all__ = [ # noqa: F822
'bispeu',
'bispev',
'curfit',
'dblint',
'fpchec',
'fpcurf0',
'fpcurf1',
'fpcurfm1',
'parcur',
'parder',
'pardeu',
'pardtc',
'percur',
'regrid_smth',
'regrid_smth_spher',
'spalde',
'spherfit_lsq',
'spherfit_smth',
'splder',
'splev',
'splint',
'sproot',
'surfit_lsq',
'surfit_smth',
'types',
]
def __dir__():
return __all__
def __getattr__(name):
return _sub_module_deprecation(sub_package="interpolate", module="dfitpack",
private_modules=["_dfitpack"], all=__all__,
attribute=name)
|
scipyREPO_NAMEscipyPATH_START.@scipy_extracted@scipy-main@scipy@interpolate@dfitpack.py@.PATH_END.py
|
{
"filename": "kmpfit_gausshermite.py",
"repo_name": "kapteyn-astro/kapteyn",
"repo_path": "kapteyn_extracted/kapteyn-master/doc/source/EXAMPLES/kmpfit_gausshermite.py",
"type": "Python"
}
|
#!/usr/bin/env python
#------------------------------------------------------------
# Script which demonstrates how to find the best-fit
# parameters of a Gauss-Hermite line shape model
#
# Vog, 26 Mar 2012
#------------------------------------------------------------
import numpy
from matplotlib.pyplot import figure, show, rc
from scipy.special import wofz
from scipy.optimize import fsolve
from kapteyn import kmpfit
ln2 = numpy.log(2)
PI = numpy.pi
from math import sqrt
def gausshermiteh3h4(x, A, x0, s, h3, h4):
#------------------------------------------------------------
# The Gauss-Hermite function is a superposition of functions of the form
# F = (x-xc)/s
# E = A.Exp[-1/2.F^2] * {1 + h3[c1.F+c3.F^3] + h4[c5+c2.F^2+c4.F^4]}
#------------------------------------------------------------
c0 = sqrt(6.0)/4.0
c1 = -sqrt(3.0)
c2 = -sqrt(6.0)
c3 = 2.0*sqrt(3.0)/3.0
c4 = sqrt(6.0)/3.0
F = (x-x0)/s
E = A*numpy.exp(-0.5*F*F)*( 1.0 + h3*F*(c3*F*F+c1) + h4*(c0+F*F*(c2+c4*F*F)) )
return E
def hermite2gauss(par, dpar):
#------------------------------------------------------------
# Convert Gauss-Hermite parameters to Gauss(like)parameters.
#
# We use the first derivative of the Gauss-Hermite function
# to find the maximum, usually around 'x0' which is the center
# of the (pure) Gaussian part of the function.
# If F = (x-x0)/s then the function for which we want the
# the zero's is A0+A1*F+A2*F^2+A3*F^3+A4*F^4+A5*F^5 = 0
# c0 = 1/4sqrt(6) c1 = -sqrt(3) c2 = -sqrt(6)
# c3 = 2/3sqrt(3) c4 = 1/3sqrt(6)
#------------------------------------------------------------
sqrt2pi = sqrt(2.0*PI)
amp, x0, s, h3, h4 = par
damp, dx0, ds, dh3, dh4 = dpar # The errors in those parameters
c0 = sqrt(6.0)/4.0
c1 = -sqrt(3.0)
c2 = -sqrt(6.0)
c3 = 2.0*sqrt(3.0)/3.0
c4 = sqrt(6.0)/3.0
A = numpy.zeros(6)
A[0] = -c1*h3
A[1] = h4*(c0-2.0*c2) + 1.0
A[2] = h3*(c1-3.0*c3)
A[3] = h4*(c2 - 4.0*c4)
A[4] = c3*h3
A[5] = c4*h4
# Define the function that represents the derivative of
# the GH function. You need it to find the position of the maximum.
fx = lambda x: A[0] + x*(A[1]+x*(A[2]+x*(A[3]+x*(A[4]+x*A[5]))))
xr = fsolve(fx, 0, full_output=True)
xm = s*xr[0] + x0
ampmax = gausshermiteh3h4(xm, amp, x0, s, h3, h4)
# Get line strength
f = 1.0 + h4 * sqrt(6.0) / 4.0
area = amp * s * f * sqrt2pi
d_area = sqrt2pi * sqrt(s*s*f*f*damp*damp +\
amp*amp*f*f*ds*ds +\
3.0*amp*amp*s*s*dh4*dh4/8.0)
# Get mean
mean = x0 + sqrt(3.0)*h3*s
d_mean = sqrt(dx0*dx0 + 3.0*h3*h3*ds*ds + 3.0*s*s*dh3*dh3)
# Get dispersion
f = 1.0 + h4*sqrt(6.0)
dispersion = abs(s * f)
d_dispersion = sqrt(f*f*ds*ds + 6.0*s*s*dh4*dh4)
# Skewness
f = 4.0 * sqrt(3.0)
skewness = f * h3
d_skewness = f * dh3
# Kurtosis
f = 8.0 * sqrt(6.0)
kurtosis = f * h4
d_kurtosis = f * dh4
res = dict(xmax=xm, amplitude=ampmax, area=area, mean=mean, dispersion=dispersion,\
skewness=skewness, kurtosis=kurtosis, d_area=d_area, d_mean=d_mean,\
d_dispersion=d_dispersion, d_skewness=d_skewness, d_kurtosis=d_kurtosis)
return res
def voigt(x, y):
# The Voigt function is also the real part of
# w(z) = exp(-z^2) erfc(iz), the complex probability function,
# which is also known as the Faddeeva function. Scipy has
# implemented this function under the name wofz()
z = x + 1j*y
I = wofz(z).real
return I
def Voigt(nu, alphaD, alphaL, nu_0, A):
# The Voigt line shape in terms of its physical parameters
f = numpy.sqrt(ln2)
x = (nu-nu_0)/alphaD * f
y = alphaL/alphaD * f
V = A*f/(alphaD*numpy.sqrt(numpy.pi)) * voigt(x, y)
return V
def funcV(p, x):
# Compose the Voigt line-shape
alphaD, alphaL, nu_0, I, z0 = p
return Voigt(x, alphaD, alphaL, nu_0, I) + z0
def funcG(p, x):
# Model function is a gaussian
A, mu, sigma, zerolev = p
return( A * numpy.exp(-(x-mu)*(x-mu)/(2*sigma*sigma)) + zerolev )
def funcGH(p, x):
# Model is a Gauss-Hermite function
A, xo, s, h3, h4, zerolev= p
return gausshermiteh3h4(x, A, xo, s, h3, h4) + zerolev
def residualsV(p, data):
# Return weighted residuals of Voigt
x, y, err = data
return (y-funcV(p,x)) / err
def residualsG(p, data):
# Return weighted residuals of Gauss
x, y, err = data
return (y-funcG(p,x)) / err
def residualsGH(p, data):
# Return weighted residuals of Gauss-Hermite
x, y, err = data
return (y-funcGH(p,x)) / err
# Artificial data derive from GH-series
x = numpy.linspace(853, 859, 30)
A1 = -2.18
X1 = 855.54
S1 = 0.55
h31 = 0.17
h41 = 0.0
z01 = 6.95
y = gausshermiteh3h4(x, A1, X1, S1, h31, h41) + z01
N = len(y)
y += numpy.random.normal(0.0, 0.05, N) # Add somne noise
err = numpy.ones(N)
A = -2
alphaD = 0.5
alphaL = 0.5
z0 = 6
nu_0 = 855
p0 = [alphaD, alphaL, nu_0, A, z0]
# Do the fit
fitter = kmpfit.Fitter(residuals=residualsV, data=(x,y,err))
fitter.parinfo = [{'limits':(0,None)}, {'limits':(0,None)}, {}, {}, {}]
fitter.fit(params0=p0)
print("\n========= Fit results Voigt profile ==========")
print("Initial params:", fitter.params0)
print("Params: ", fitter.params)
print("Iterations: ", fitter.niter)
print("Function ev: ", fitter.nfev)
print("Uncertainties: ", fitter.xerror)
print("dof: ", fitter.dof)
print("chi^2, rchi2: ", fitter.chi2_min, fitter.rchi2_min)
print("stderr: ", fitter.stderr)
print("Status: ", fitter.status)
alphaD, alphaL, nu_0, I, z0V = fitter.params
c1 = 1.0692
c2 = 0.86639
hwhm = 0.5*(c1*alphaL+numpy.sqrt(c2*alphaL**2+4*alphaD**2))
print("\nFWHM Voigt profile: ", 2*hwhm)
f = numpy.sqrt(ln2)
Y = alphaL/alphaD * f
amp = I/alphaD*numpy.sqrt(ln2/numpy.pi)*voigt(0,Y)
print("Amplitude Voigt profile:", amp)
print("Area under profile: ", I)
# Fit the Gaussian model
p0 = [-3, 855, 0.5, 6.3]
fitterG = kmpfit.Fitter(residuals=residualsG, data=(x,y,err))
#fitterG.parinfo = [{}, {}, {}, {}, {}] # Take zero level fixed in fit
fitterG.fit(params0=p0)
print("\n========= Fit results Gaussian profile ==========")
print("Initial params:", fitterG.params0)
print("Params: ", fitterG.params)
print("Iterations: ", fitterG.niter)
print("Function ev: ", fitterG.nfev)
print("Uncertainties: ", fitterG.xerror)
print("dof: ", fitterG.dof)
print("chi^2, rchi2: ", fitterG.chi2_min, fitterG.rchi2_min)
print("stderr: ", fitterG.stderr)
print("Status: ", fitterG.status)
fwhmG = 2*numpy.sqrt(2*numpy.log(2))*fitterG.params[2]
print("FWHM Gaussian: ", fwhmG)
z0G = fitterG.params0[-1] # Store background
# Fit the Gauss-Hermite model
# Initial estimates for A, xo, s, h3, h4, z0
p0 = [-3, 855, 0.5, 0, 0, 6.3]
fitterGH = kmpfit.Fitter(residuals=residualsGH, data=(x,y,err))
#fitterGH.parinfo = [{}, {}, {}, {}, {}] # Take zero level fixed in fit
fitterGH.fit(params0=p0)
print("\n========= Fit results Gaussian profile ==========")
print("Initial params:", fitterGH.params0)
print("Params: ", fitterGH.params)
print("Iterations: ", fitterGH.niter)
print("Function ev: ", fitterGH.nfev)
print("Uncertainties: ", fitterGH.xerror)
print("dof: ", fitterGH.dof)
print("chi^2, rchi2: ", fitterGH.chi2_min, fitterGH.rchi2_min)
print("stderr: ", fitterGH.stderr)
print("Status: ", fitterGH.status)
A, x0, s, h3, h4, z0GH = fitterGH.params
#xm, ampmax, area, mean, dispersion, skewness, kurtosis
res = hermite2gauss(fitterGH.params[:-1], fitterGH.stderr[:-1])
print("Gauss-Hermite max=%g at x=%g"%(res['amplitude'], res['xmax']))
print("Area :", res['area'], '+-', res['d_area'])
print("Mean (X0) :", res['mean'], '+-', res['d_mean'])
print("Dispersion:", res['dispersion'], '+-', res['d_dispersion'])
print("Skewness :", res['skewness'], '+-', res['d_skewness'])
print("Kurtosis :", res['kurtosis'], '+-', res['d_kurtosis'])
# Plot the result
rc('legend', fontsize=6)
fig = figure()
frame1 = fig.add_subplot(1,1,1)
xd = numpy.linspace(x.min(), x.max(), 500)
frame1.plot(x, y, 'bo', label="data")
label = "Model with Voigt function"
frame1.plot(xd, funcV(fitter.params,xd), 'g', label=label)
label = "Model with Gaussian function"
frame1.plot(xd, funcG(fitterG.params,xd), 'm', ls='--', label=label)
label = "Model with Gauss-Hermite function"
frame1.plot(xd, funcGH(fitterGH.params,xd), 'c', ls='--', label=label)
frame1.plot((nu_0-hwhm,nu_0+hwhm), (z0V+amp/2,z0V+amp/2), 'r', label='fwhm')
frame1.plot(xd, [z0V]*len(xd), "y", label='Background Voigt')
frame1.plot(xd, [z0G]*len(xd), "y", ls="-.", label='Background G')
frame1.plot(xd, [z0GH]*len(xd), "y", ls="--", label='Background G-H')
frame1.set_xlabel("$\\nu$")
frame1.set_ylabel("$\\phi(\\nu)$")
vals = (fitter.chi2_min, fitter.rchi2_min, fitter.dof)
title = "Profile data with Voigt- vs. Gaussian model\n"
t = (res['area'], res['mean'], res['dispersion'], res['skewness'], res['kurtosis'])
title += "GH: $\gamma_{gh}$=%.1f $x_{0_{gh}}$=%.1f $\sigma_{gh}$ = %.2f $\\xi_1$=%.2f $\\xi_f$=%.2f"%t
frame1.set_title(title, fontsize=9)
frame1.grid(True)
leg = frame1.legend(loc=4)
show()
|
kapteyn-astroREPO_NAMEkapteynPATH_START.@kapteyn_extracted@kapteyn-master@doc@source@EXAMPLES@kmpfit_gausshermite.py@.PATH_END.py
|
{
"filename": "smooth.py",
"repo_name": "revoltek/losoto",
"repo_path": "losoto_extracted/losoto-master/losoto/operations/smooth.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from losoto.lib_operations import *
from losoto._logging import logger as logging
logging.debug('Loading SMOOTH module.')
def _run_parser(soltab, parser, step):
axesToSmooth = parser.getarraystr( step, 'axesToSmooth' ) # no default
size = parser.getarrayint( step, 'size', [] )
mode = parser.getstr( step, 'mode', 'runningmedian' )
degree = parser.getint( step, 'degree', 1 )
replace = parser.getbool( step, 'replace', False )
log = parser.getbool( step, 'log', False )
refAnt = parser.getstr( step, 'refAnt', '' )
parser.checkSpelling( step, soltab, ['axesToSmooth', 'size', 'mode', 'degree', 'replace', 'log', 'refAnt'])
return run(soltab, axesToSmooth, size, mode, degree, replace, log, refAnt)
def _savitzky_golay(y, window_size, order):
from scipy.signal import savgol_filter
# replace any NaNs using linear interpolation
nans = np.isnan(y)
if np.any(nans):
x = np.array(range(len(y)))
y_nonan = np.interp(x, x[~nans], y[~nans])
else:
y_nonan = y
y_filt = savgol_filter(y_nonan, window_size, order)
# put any NaNs back
if np.any(nans):
y_filt[nans] = np.nan
return y_filt
def run( soltab, axesToSmooth, size=[], mode='runningmedian', degree=1, replace=False, log=False, refAnt=''):
"""
A smoothing function: running-median on an arbitrary number of axes, running polyfit and Savitzky-Golay on one axis, or set all solutions to the mean/median value.
WEIGHT: flag ready.
Parameters
----------
axesToSmooth : array of str
Axes used to compute the smoothing function.
size : array of int, optional
Window size for the runningmedian, savitzky-golay, and runningpoly (array of same size of axesToSmooth), by default [].
mode : {'runningmedian','runningpoly','savitzky-golay','mean','median'}, optional
Runningmedian or runningpoly or Savitzky-Golay or mean or median (these last two values set all the solutions to the mean/median), by default "runningmedian".
degree : int, optional
Degrees of the polynomia for the runningpoly or savitzky-golay modes, by default 1.
replace : bool, optional
Flagged data are replaced with smoothed value and unflagged, by default False.
log : bool, optional
clip is done in log10 space, by default False
refAnt : str, optional
Reference antenna for phases. By default None.
"""
import numpy as np
from scipy.ndimage import generic_filter
if refAnt == '': refAnt = None
elif refAnt != 'closest' and refAnt != 'auto' and not refAnt in soltab.getAxisValues('ant', ignoreSelection = True):
logging.warning('Reference antenna '+refAnt+' not found. Using: atomatic search.')
refAnt = 'auto'
if mode == "runningmedian" and len(axesToSmooth) != len(size):
logging.error("Axes and Size lengths must be equal for runningmedian.")
return 1
if (mode == "runningpoly" or mode=="savitzky-golay") and (len(axesToSmooth) != 1 or len(size) != 1):
logging.error("Axes and size lengths must be 1 for runningpoly or savitzky-golay.")
return 1
if (mode == "runningpoly" or mode=="savitzky-golay") and soltab.getType() == 'phase':
logging.error("Runningpoly and savitzky-golay modes cannot work on phases.")
return 1
for i, s in enumerate(size):
if s % 2 == 0:
logging.warning('Size should be odd, adding 1.')
size[i] += 1
logging.info("Smoothing soltab: "+soltab.name)
for i, axis in enumerate(axesToSmooth[:]):
if axis not in soltab.getAxesNames():
del axesToSmooth[i]
del size[i]
logging.warning('Axis \"'+axis+'\" not found. Ignoring.')
if soltab.getType() == 'amplitude' and not log:
logging.warning('Amplitude solution tab detected and log=False. Amplitude solution tables should be treated in log space.')
if mode == 'median' or mode == 'mean':
vals = soltab.getValues(retAxesVals=False, refAnt=refAnt)
if log: vals = np.log10(vals)
weights = soltab.getValues(retAxesVals=False, weight=True)
np.putmask(vals, weights==0, np.nan)
idx_axes = [soltab.getAxesNames().index(axisToSmooth) for axisToSmooth in axesToSmooth]
# handle phases by using a complex array
if soltab.getType() == 'phase':
vals = np.exp(1j*vals)
if mode == 'median':
vals[:] = np.nanmedian( vals, axis=idx_axes, keepdims=True)
if mode == 'mean':
vals[:] = np.nanmean( vals, axis=tuple(idx_axes), keepdims=True) # annoying np.nanmean does not accept axis=list!
# go back to phases
if soltab.getType() == 'phase':
vals = np.angle(vals)
# write back
if log: vals = 10**vals
soltab.setValues(vals)
if replace:
weights[ (weights == 0) ] = 1
weights[ np.isnan(vals) ] = 0 # all the slice was flagged, cannot estrapolate value
soltab.setValues(weights, weight=True)
else:
for vals, weights, coord, selection in soltab.getValuesIter(returnAxes=axesToSmooth, weight=True, refAnt=refAnt):
# skip completely flagged selections
if (weights == 0).all(): continue
if log: vals = np.log10(vals)
if mode == 'runningmedian':
vals_bkp = vals[ weights == 0 ]
# handle phases by using a complex array
if soltab.getType() == 'phase':
vals = np.exp(1j*vals)
valsreal = np.real(vals)
valsimag = np.imag(vals)
np.putmask(valsreal, weights == 0, np.nan)
np.putmask(valsimag, weights == 0, np.nan)
# run generic_filter twice, once for real once for imaginary
valsrealnew = generic_filter(valsreal, np.nanmedian, size=size, mode='constant', cval=np.nan)
valsimagnew = generic_filter(valsimag, np.nanmedian, size=size, mode='constant', cval=np.nan)
valsnew = valsrealnew + 1j*valsimagnew # go back to complex
valsnew = np.angle(valsnew) # go back to phases
else: # other than phases
np.putmask(vals, weights == 0, np.nan)
valsnew = generic_filter(vals, np.nanmedian, size=size, mode='constant', cval=np.nan)
if replace:
weights[ weights == 0] = 1
weights[ np.isnan(valsnew) ] = 0 # all the size was flagged cannoth estrapolate value
else:
valsnew[ weights == 0 ] = vals_bkp
elif mode == 'runningpoly':
def polyfit(data):
if (np.isnan(data)).all(): return np.nan # all size is flagged
x = np.arange(len(data))[ ~np.isnan(data)]
y = data[ ~np.isnan(data) ]
p = np.polynomial.polynomial.polyfit(x, y, deg=degree)
#import matplotlib as mpl
#mpl.use("Agg")
#import matplotlib.pyplot as plt
#plt.plot(x, y, 'ro')
#plt.plot(x, np.polyval( p[::-1], x ), 'k-')
#plt.savefig('test.png')
#sys.exit()
return np.polyval( p[::-1], (size[0]-1)/2 ) # polyval has opposite convention for polynomial order
# flags and at edges pass 0 and then remove them
vals_bkp = vals[ weights == 0 ]
np.putmask(vals, weights==0, np.nan)
valsnew = generic_filter(vals, polyfit, size=size[0], mode='constant', cval=np.nan)
if replace:
weights[ weights == 0] = 1
weights[ np.isnan(valsnew) ] = 0 # all the size was flagged cannot extrapolate value
else:
valsnew[ weights == 0 ] = vals_bkp
#print coord['ant'], vals, valsnew
elif mode == 'savitzky-golay':
vals_bkp = vals[ weights == 0 ]
np.putmask(vals, weights==0, np.nan)
valsnew = _savitzky_golay(vals, size[0], degree)
if replace:
weights[ weights == 0] = 1
weights[ np.isnan(valsnew) ] = 0 # all the size was flagged cannot extrapolate value
else:
valsnew[ weights == 0 ] = vals_bkp
else:
logging.error('Mode must be: runningmedian, runningpoly, savitzky-golay, median or mean')
return 1
if log: valsnew = 10**valsnew
soltab.setValues(valsnew, selection)
if replace: soltab.setValues(weights, selection, weight=True)
soltab.flush()
soltab.addHistory('SMOOTH (over %s with mode = %s)' % (axesToSmooth, mode))
return 0
|
revoltekREPO_NAMElosotoPATH_START.@losoto_extracted@losoto-master@losoto@operations@smooth.py@.PATH_END.py
|
{
"filename": "download_solo.py",
"repo_name": "RobertJaro/InstrumentToInstrument",
"repo_path": "InstrumentToInstrument_extracted/InstrumentToInstrument-master/itipy/download/download_solo.py",
"type": "Python"
}
|
import argparse
import logging
import os
import shutil
from datetime import timedelta, datetime
from multiprocessing import Pool
from urllib.request import urlopen
from warnings import simplefilter
from random import sample
import drms
import numpy as np
import pandas as pd
from astropy import units as u
from astropy.io.fits import getheader, HDUList
from dateutil.relativedelta import relativedelta
from sunpy.map import Map
from sunpy.net import Fido, attrs as a
import sunpy_soar
from tqdm import tqdm
class SOLODownloader:
"""
Class to download Solar Orbiter data from the VSO.
Args:
base_path (str): Path to the directory where the downloaded data should be stored.
"""
def __init__(self, base_path):
self.base_path = base_path
self.wavelengths_fsi = ['eui-fsi174-image', 'eui-fsi304-image']
self.wavelengths_hri = ['eui-hrieuv174-image']
self.dirs = ['eui-fsi174-image', 'eui-fsi304-image', 'eui-hrieuv174-image']
[os. makedirs(os.path.join(base_path, dir), exist_ok=True) for dir in self.dirs]
def downloadDate(self, date, FSI=True):
"""
Download the data for the given date.
Args:
date (datetime): The date for which the data should be downloaded.
FSI (bool): If True, download FSI data, else download HRI data.
Returns:
list: List of paths to the downloaded files.
"""
files = []
if FSI:
try:
# Download FSI
for wl in self.wavelengths_fsi:
files += [self.downloadFSI(date, wl)]
logging.info('Download complete %s' % date.isoformat())
except Exception as ex:
logging.error('Unable to download %s: %s' % (date.isoformat(), str(ex)))
[os.remove(f) for f in files if os.path.exists(f)]
else:
try:
# Download HRI
for wl in self.wavelengths_hri:
files += [self.downloadHRI(date, wl)]
logging.info('Download complete %s' % date.isoformat())
except Exception as ex:
#logging.error('Unable to download %s: %s' % (date.isoformat(), str(ex)))
[os.remove(f) for f in files if os.path.exists(f)]
def downloadFSI(self, query_date, wl):
"""
Download the FSI data for the given date and wavelength.
Args:
query_date (datetime): The date for which the data should be downloaded.
wl (str): The wavelength for which the data should be downloaded.
Returns:
str: Path to the downloaded file.
"""
file_path = os.path.join(self.base_path, wl, "%s.fits" % query_date.isoformat("T", timespec='seconds'))
if os.path.exists(file_path):
return file_path
#
search = Fido.search(a.Time(query_date - timedelta(minutes=10), query_date + timedelta(minutes=10)),
a.Instrument('EUI'), a.soar.Product(wl), a.Level(2))
assert search.file_num > 0, "No data found for %s (%s)" % (query_date.isoformat(), wl)
search = sorted(search['soar'], key=lambda x: abs(pd.to_datetime(x['Start time']) - query_date).total_seconds())
#
for entry in search:
files = Fido.fetch(entry, path=self.base_path, progress=False)
if len(files) != 1:
continue
file = files[0]
# Clean data with header info or add printing meta data info
header = getheader(file, 1)
if header['CDELT1'] != 4.44012445:
os.remove(file)
continue
shutil.move(file, file_path)
return file_path
raise Exception("No valid file found for %s (%s)!" % (query_date.isoformat(), wl))
def downloadHRI(self, query_date, wl):
"""
Download the HRI data for the given date and wavelength.
Args:
query_date (datetime): The date for which the data should be downloaded.
wl (str): The wavelength for which the data should be downloaded.
Returns:
str: Path to the downloaded file.
"""
file_path = os.path.join(self.base_path, wl, "%s.fits" % query_date.isoformat("T", timespec='seconds'))
if os.path.exists(file_path):
return file_path
#
search = Fido.search(a.Time(query_date - timedelta(hours=1), query_date + timedelta(hours=1)),
a.Instrument('EUI'), a.soar.Product(wl), a.Level(2))
assert search.file_num > 0, "No data found for %s (%s)" % (query_date.isoformat(), wl)
search = sorted(search['soar'], key=lambda x: abs(pd.to_datetime(x['Start time']) - query_date).total_seconds())
#
for entry in search:
files = Fido.fetch(entry, path=self.base_path, progress=False)
if len(files) != 1:
continue
file = files[0]
#header = Map(file.meta)
shutil.move(file, file_path)
return file_path
raise Exception("No valid file found for %s (%s)!" % (query_date.isoformat(), wl))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Download Solar Orbiter data')
parser.add_argument('--download_dir', type=str, help='path to the download directory.')
parser.add_argument('--n_workers', type=str, help='number of parallel threads.', required=False, default=4)
parser.add_argument('--start_date', type=str, help='start date in format YYYY-MM-DD.')
parser.add_argument('--end_date', type=str, help='end date in format YYYY-MM-DD.', required=False,
default=str(datetime.now()).split(' ')[0])
args = parser.parse_args()
base_path = args.download_dir
n_workers = args.n_workers
start_date = args.start_date
end_date = args.end_date
start_date_datetime = datetime.strptime(start_date, "%Y-%m-%d")
end_date_datetime = datetime.strptime(end_date, "%Y-%m-%d")
download_util = SOLODownloader(base_path=base_path)
for d in [start_date_datetime + i * timedelta(hours=1) for i in
range((end_date_datetime - start_date_datetime) // timedelta(hours=1))]:
download_util.downloadDate(d)
|
RobertJaroREPO_NAMEInstrumentToInstrumentPATH_START.@InstrumentToInstrument_extracted@InstrumentToInstrument-master@itipy@download@download_solo.py@.PATH_END.py
|
{
"filename": "utils.py",
"repo_name": "astropy/astropy",
"repo_path": "astropy_extracted/astropy-main/astropy/timeseries/periodograms/lombscargle/implementations/utils.py",
"type": "Python"
}
|
from math import factorial
import numpy as np
def bitceil(N):
"""
Find the bit (i.e. power of 2) immediately greater than or equal to N
Note: this works for numbers up to 2 ** 64.
Roughly equivalent to int(2 ** np.ceil(np.log2(N))).
"""
return 1 << int(N - 1).bit_length()
def extirpolate(x, y, N=None, M=4):
"""
Extirpolate the values (x, y) onto an integer grid range(N),
using lagrange polynomial weights on the M nearest points.
Parameters
----------
x : array-like
array of abscissas
y : array-like
array of ordinates
N : int
number of integer bins to use. For best performance, N should be larger
than the maximum of x
M : int
number of adjoining points on which to extirpolate.
Returns
-------
yN : ndarray
N extirpolated values associated with range(N)
Examples
--------
>>> rng = np.random.default_rng(0)
>>> x = 100 * rng.random(20)
>>> y = np.sin(x)
>>> y_hat = extirpolate(x, y)
>>> x_hat = np.arange(len(y_hat))
>>> f = lambda x: np.sin(x / 10)
>>> np.allclose(np.sum(y * f(x)), np.sum(y_hat * f(x_hat)))
True
Notes
-----
This code is based on the C implementation of spread() presented in
Numerical Recipes in C, Second Edition (Press et al. 1989; p.583).
"""
x, y = map(np.ravel, np.broadcast_arrays(x, y))
if N is None:
N = int(np.max(x) + 0.5 * M + 1)
# Now use legendre polynomial weights to populate the results array;
# This is an efficient recursive implementation (See Press et al. 1989)
result = np.zeros(N, dtype=y.dtype)
# first take care of the easy cases where x is an integer
integers = x % 1 == 0
np.add.at(result, x[integers].astype(int), y[integers])
x, y = x[~integers], y[~integers]
# For each remaining x, find the index describing the extirpolation range.
# i.e. ilo[i] < x[i] < ilo[i] + M with x[i] in the center,
# adjusted so that the limits are within the range 0...N
ilo = np.clip((x - M // 2).astype(int), 0, N - M)
numerator = y * np.prod(x - ilo - np.arange(M)[:, np.newaxis], 0)
denominator = factorial(M - 1)
for j in range(M):
if j > 0:
denominator *= j / (j - M)
ind = ilo + (M - 1 - j)
np.add.at(result, ind, numerator / (denominator * (x - ind)))
return result
def trig_sum(t, h, df, N, f0=0, freq_factor=1, oversampling=5, use_fft=True, Mfft=4):
"""Compute (approximate) trigonometric sums for a number of frequencies.
This routine computes weighted sine and cosine sums::
S_j = sum_i { h_i * sin(2 pi * f_j * t_i) }
C_j = sum_i { h_i * cos(2 pi * f_j * t_i) }
Where f_j = freq_factor * (f0 + j * df) for the values j in 1 ... N.
The sums can be computed either by a brute force O[N^2] method, or
by an FFT-based O[Nlog(N)] method.
Parameters
----------
t : array-like
array of input times
h : array-like
array weights for the sum
df : float
frequency spacing
N : int
number of frequency bins to return
f0 : float, optional
The low frequency to use
freq_factor : float, optional
Factor which multiplies the frequency
use_fft : bool
if True, use the approximate FFT algorithm to compute the result.
This uses the FFT with Press & Rybicki's Lagrangian extirpolation.
oversampling : int (default = 5)
oversampling freq_factor for the approximation; roughly the number of
time samples across the highest-frequency sinusoid. This parameter
contains the trade-off between accuracy and speed. Not referenced
if use_fft is False.
Mfft : int
The number of adjacent points to use in the FFT approximation.
Not referenced if use_fft is False.
Returns
-------
S, C : ndarray
summation arrays for frequencies f = df * np.arange(1, N + 1)
"""
df *= freq_factor
f0 *= freq_factor
if df <= 0:
raise ValueError("df must be positive")
t, h = map(np.ravel, np.broadcast_arrays(t, h))
if use_fft:
Mfft = int(Mfft)
if Mfft <= 0:
raise ValueError("Mfft must be positive")
# required size of fft is the power of 2 above the oversampling rate
Nfft = bitceil(N * oversampling)
t0 = t.min()
if f0 > 0:
h = h * np.exp(2j * np.pi * f0 * (t - t0))
tnorm = ((t - t0) * Nfft * df) % Nfft
grid = extirpolate(tnorm, h, Nfft, Mfft)
fftgrid = np.fft.ifft(grid)[:N]
if t0 != 0:
f = f0 + df * np.arange(N)
fftgrid *= np.exp(2j * np.pi * t0 * f)
C = Nfft * fftgrid.real
S = Nfft * fftgrid.imag
else:
f = f0 + df * np.arange(N)
C = np.dot(h, np.cos(2 * np.pi * f * t[:, np.newaxis]))
S = np.dot(h, np.sin(2 * np.pi * f * t[:, np.newaxis]))
return S, C
|
astropyREPO_NAMEastropyPATH_START.@astropy_extracted@astropy-main@astropy@timeseries@periodograms@lombscargle@implementations@utils.py@.PATH_END.py
|
{
"filename": "RegexTokenizerOptionsStart.md",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/lite/g3doc/api_docs/python/tflite_support/metadata_schema_py_generated/RegexTokenizerOptionsStart.md",
"type": "Markdown"
}
|
page_type: reference
<link rel="stylesheet" href="/site-assets/css/style.css">
<!-- DO NOT EDIT! Automatically generated file. -->
<div itemscope itemtype="http://developers.google.com/ReferenceObject">
<meta itemprop="name" content="tflite_support.metadata_schema_py_generated.RegexTokenizerOptionsStart" />
<meta itemprop="path" content="Stable" />
</div>
# tflite_support.metadata_schema_py_generated.RegexTokenizerOptionsStart
<!-- Insert buttons and diff -->
<table class="tfo-notebook-buttons tfo-api nocontent" align="left">
<td>
<a target="_blank" href="https://github.com/tensorflow/tflite-support/blob/v0.4.4/tensorflow_lite_support/metadata/metadata_schema_py_generated.py#L1609-L1610">
<img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />
View source on GitHub
</a>
</td>
</table>
<pre class="devsite-click-to-copy prettyprint lang-py tfo-signature-link">
<code>tflite_support.metadata_schema_py_generated.RegexTokenizerOptionsStart(
builder
)
</code></pre>
<!-- Placeholder for "Used in" -->
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@lite@g3doc@api_docs@python@tflite_support@metadata_schema_py_generated@RegexTokenizerOptionsStart.md@.PATH_END.py
|
{
"filename": "value_locate.py",
"repo_name": "ggmichael/craterstats",
"repo_path": "craterstats_extracted/craterstats-main/src/craterstats/gm/idl/value_locate.py",
"type": "Python"
}
|
# Copyright (c) 2021, Greg Michael
# Licensed under BSD 3-Clause License. See LICENSE.txt for details.
import numpy as np
def value_locate(x,v):
'''
safe replacement for IDL value_locate function
:param x:
:param v:
:return:
'''
return np.searchsorted(x,v)-1
|
ggmichaelREPO_NAMEcraterstatsPATH_START.@craterstats_extracted@craterstats-main@src@craterstats@gm@idl@value_locate.py@.PATH_END.py
|
{
"filename": "mc_corr_jobs.py",
"repo_name": "NoahSailer/MaPar",
"repo_path": "MaPar_extracted/MaPar-main/mc_correction/mc_corr_jobs.py",
"type": "Python"
}
|
from do_mc_corr import *
import sys
sys.path.append('../')
from globe import NSIDE
job = int(sys.argv[1])
# load some masks (which may or may not be used depending on the job)
isamp = 1
bdir = '/pscratch/sd/m/mwhite/DESI/MaPar/maps/'
lrg_mask = hp.read_map(bdir+f'lrg_s0{isamp}_msk.hpx2048.fits')
north = hp.read_map('../maps/masks/north_mask.fits')
des = hp.read_map('../maps/masks/des_mask.fits')
decals = hp.read_map('../maps/masks/decals_mask.fits')
PR3mask = hp.read_map(f'../maps/masks/PR3_lens_mask.fits')
PR4mask = hp.read_map(f'../maps/masks/PR4_lens_mask.fits')
PR4maska = hp.read_map(f'../maps/masks/PR4_lens_mask_alt.fits')
DECm15 = hp.read_map('../maps/masks/DECm15_mask.fits')
DECp15 = 1 - DECm15
# baseline LRG mask ("full") correlated with act dr6
def do_dr6(lrg_mask,lrg_name,option='baseline'):
release = 'dr6_lensing_v1'
bdir = f'/global/cfs/projectdirs/act/www/{release}/'
dr6_mask= hp.ud_grade(hp.read_map(f'{bdir}maps/{option}/mask_act_dr6_lensing_v1_healpix_nside_4096_{option}.fits'),NSIDE)
make_mc_cls(f'lrg-full-z{isamp}',lrg_mask,dr6_mask,'c',lensmap='DR6',option=option)
if job==0: do_dr6(lrg_mask,f'lrg-full-z{isamp}',option='baseline')
if job==1: do_dr6(lrg_mask,f'lrg-full-z{isamp}',option='cibdeproj')
if job==2: do_dr6(lrg_mask,f'lrg-full-z{isamp}',option='f090')
if job==3: do_dr6(lrg_mask,f'lrg-full-z{isamp}',option='f090_tonly')
if job==4: do_dr6(lrg_mask,f'lrg-full-z{isamp}',option='f150')
if job==5: do_dr6(lrg_mask,f'lrg-full-z{isamp}',option='f150_tonly')
if job==6: do_dr6(lrg_mask,f'lrg-full-z{isamp}',option='galcut040')
if job==7: do_dr6(lrg_mask,f'lrg-full-z{isamp}',option='galcut040_polonly')
if job==8: do_dr6(lrg_mask,f'lrg-full-z{isamp}',option='polonly')
if job==9: do_dr6(lrg_mask,f'lrg-full-z{isamp}',option='tonly')
# different LRG masks correlated with PR3
if job==10: make_mc_cls(f'lrg-full-z{isamp}' ,lrg_mask, PR3mask,'c',lensmap='PR3')
if job==11: make_mc_cls(f'lrg-north-z{isamp}' ,lrg_mask*north, PR3mask,'c',lensmap='PR3')
if job==12: make_mc_cls(f'lrg-decals-z{isamp}',lrg_mask*decals,PR3mask,'c',lensmap='PR3')
if job==13: make_mc_cls(f'lrg-des-z{isamp}' ,lrg_mask*des, PR3mask,'c',lensmap='PR3')
# different LRG masks correlated with PR4
if job==14: make_mc_cls(f'lrg-full-z{isamp}' ,lrg_mask, PR4mask,'c',lensmap='PR4')
if job==15: make_mc_cls(f'lrg-north-z{isamp}' ,lrg_mask*north, PR4mask,'c',lensmap='PR4')
if job==16: make_mc_cls(f'lrg-decals-z{isamp}',lrg_mask*decals,PR4mask,'c',lensmap='PR4')
if job==17: make_mc_cls(f'lrg-des-z{isamp}' ,lrg_mask*des, PR4mask,'c',lensmap='PR4')
if job==18: make_mc_cls(f'lrg-DECp15-z{isamp}',lrg_mask*DECp15,PR4mask,'c',lensmap='PR4')
|
NoahSailerREPO_NAMEMaParPATH_START.@MaPar_extracted@MaPar-main@mc_correction@mc_corr_jobs.py@.PATH_END.py
|
{
"filename": "client_test.py",
"repo_name": "triton-inference-server/server",
"repo_path": "server_extracted/server-main/qa/L0_client_nobatch/client_test.py",
"type": "Python"
}
|
#!/usr/bin/env python3
# Copyright 2018-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
sys.path.append("../common")
import unittest
import numpy as np
import test_util as tu
import tritonclient.grpc as tritongrpcclient
import tritonclient.http as tritonhttpclient
from tritonclient.utils import InferenceServerException
class ClientNoBatchTest(tu.TestResultCollector):
def test_nobatch_request_for_batching_model(self):
input_size = 16
# graphdef_int32_int8_int8 has a batching version with max batch size of 8.
# The server should return an error if the batch size is not included in the
# input shapes.
tensor_shape = (input_size,)
for protocol in ["http", "grpc"]:
model_name = tu.get_model_name("graphdef", np.int32, np.int8, np.int8)
in0 = np.random.randint(low=0, high=100, size=tensor_shape, dtype=np.int32)
in1 = np.random.randint(low=0, high=100, size=tensor_shape, dtype=np.int32)
inputs = []
outputs = []
if protocol == "http":
triton_client = tritonhttpclient.InferenceServerClient(
url="localhost:8000", verbose=True
)
inputs.append(
tritonhttpclient.InferInput("INPUT0", tensor_shape, "INT32")
)
inputs.append(
tritonhttpclient.InferInput("INPUT1", tensor_shape, "INT32")
)
outputs.append(tritonhttpclient.InferRequestedOutput("OUTPUT0"))
outputs.append(tritonhttpclient.InferRequestedOutput("OUTPUT1"))
else:
triton_client = tritongrpcclient.InferenceServerClient(
url="localhost:8001", verbose=True
)
inputs.append(
tritongrpcclient.InferInput("INPUT0", tensor_shape, "INT32")
)
inputs.append(
tritongrpcclient.InferInput("INPUT1", tensor_shape, "INT32")
)
outputs.append(tritongrpcclient.InferRequestedOutput("OUTPUT0"))
outputs.append(tritongrpcclient.InferRequestedOutput("OUTPUT1"))
# Initialize the data
inputs[0].set_data_from_numpy(in0)
inputs[1].set_data_from_numpy(in1)
try:
_ = triton_client.infer(model_name, inputs, outputs=outputs)
self.assertTrue(
False, "expected failure with no batch request for batching model"
)
except InferenceServerException as ex:
pass
def test_batch_request_for_nobatching_model(self):
input_size = 16
# graphdef_nobatch_int32_int8_int8 is non batching version.
# The server should return an error if the batch size dimension
# is included in the shape
tensor_shape = (1, input_size)
for protocol in ["http", "grpc"]:
model_name = tu.get_model_name(
"graphdef_nobatch", np.int32, np.int8, np.int8
)
in0 = np.random.randint(low=0, high=100, size=tensor_shape, dtype=np.int32)
in1 = np.random.randint(low=0, high=100, size=tensor_shape, dtype=np.int32)
inputs = []
outputs = []
if protocol == "http":
triton_client = tritonhttpclient.InferenceServerClient(
url="localhost:8000", verbose=True
)
inputs.append(
tritonhttpclient.InferInput("INPUT0", tensor_shape, "INT32")
)
inputs.append(
tritonhttpclient.InferInput("INPUT1", tensor_shape, "INT32")
)
outputs.append(tritonhttpclient.InferRequestedOutput("OUTPUT0"))
outputs.append(tritonhttpclient.InferRequestedOutput("OUTPUT1"))
else:
triton_client = tritongrpcclient.InferenceServerClient(
url="localhost:8001", verbose=True
)
inputs.append(
tritongrpcclient.InferInput("INPUT0", tensor_shape, "INT32")
)
inputs.append(
tritongrpcclient.InferInput("INPUT1", tensor_shape, "INT32")
)
outputs.append(tritongrpcclient.InferRequestedOutput("OUTPUT0"))
outputs.append(tritongrpcclient.InferRequestedOutput("OUTPUT1"))
# Initialize the data
inputs[0].set_data_from_numpy(in0)
inputs[1].set_data_from_numpy(in1)
try:
_ = triton_client.infer(model_name, inputs, outputs=outputs)
self.assertTrue(
False,
"expected failure with batched request for non-batching model",
)
except InferenceServerException as ex:
pass
def test_nobatch_request_for_nonbatching_model(self):
input_size = 16
# graphdef_int32_int8_int8 has a batching version with max batch size of 8.
# The server should return an error if the batch size is not included in the
# input shapes.
tensor_shape = (input_size,)
for protocol in ["http", "grpc"]:
model_name = tu.get_model_name(
"graphdef_nobatch", np.int32, np.int8, np.int8
)
in0 = np.random.randint(low=0, high=100, size=tensor_shape, dtype=np.int32)
in1 = np.random.randint(low=0, high=100, size=tensor_shape, dtype=np.int32)
inputs = []
outputs = []
if protocol == "http":
triton_client = tritonhttpclient.InferenceServerClient(
url="localhost:8000", verbose=True
)
inputs.append(
tritonhttpclient.InferInput("INPUT0", tensor_shape, "INT32")
)
inputs.append(
tritonhttpclient.InferInput("INPUT1", tensor_shape, "INT32")
)
outputs.append(tritonhttpclient.InferRequestedOutput("OUTPUT0"))
outputs.append(tritonhttpclient.InferRequestedOutput("OUTPUT1"))
else:
triton_client = tritongrpcclient.InferenceServerClient(
url="localhost:8001", verbose=True
)
inputs.append(
tritongrpcclient.InferInput("INPUT0", tensor_shape, "INT32")
)
inputs.append(
tritongrpcclient.InferInput("INPUT1", tensor_shape, "INT32")
)
outputs.append(tritongrpcclient.InferRequestedOutput("OUTPUT0"))
outputs.append(tritongrpcclient.InferRequestedOutput("OUTPUT1"))
# Initialize the data
inputs[0].set_data_from_numpy(in0)
inputs[1].set_data_from_numpy(in1)
results = triton_client.infer(model_name, inputs, outputs=outputs)
def test_batch_request_for_batching_model(self):
input_size = 16
# graphdef_nobatch_int32_int8_int8 is non batching version.
# The server should return an error if the batch size dimension
# is included in the shape
tensor_shape = (1, input_size)
for protocol in ["http", "grpc"]:
model_name = tu.get_model_name("graphdef", np.int32, np.int8, np.int8)
in0 = np.random.randint(low=0, high=100, size=tensor_shape, dtype=np.int32)
in1 = np.random.randint(low=0, high=100, size=tensor_shape, dtype=np.int32)
inputs = []
outputs = []
if protocol == "http":
triton_client = tritonhttpclient.InferenceServerClient(
url="localhost:8000", verbose=True
)
inputs.append(
tritonhttpclient.InferInput("INPUT0", tensor_shape, "INT32")
)
inputs.append(
tritonhttpclient.InferInput("INPUT1", tensor_shape, "INT32")
)
outputs.append(tritonhttpclient.InferRequestedOutput("OUTPUT0"))
outputs.append(tritonhttpclient.InferRequestedOutput("OUTPUT1"))
else:
triton_client = tritongrpcclient.InferenceServerClient(
url="localhost:8001", verbose=True
)
inputs.append(
tritongrpcclient.InferInput("INPUT0", tensor_shape, "INT32")
)
inputs.append(
tritongrpcclient.InferInput("INPUT1", tensor_shape, "INT32")
)
outputs.append(tritongrpcclient.InferRequestedOutput("OUTPUT0"))
outputs.append(tritongrpcclient.InferRequestedOutput("OUTPUT1"))
# Initialize the data
inputs[0].set_data_from_numpy(in0)
inputs[1].set_data_from_numpy(in1)
results = triton_client.infer(model_name, inputs, outputs=outputs)
if __name__ == "__main__":
unittest.main()
|
triton-inference-serverREPO_NAMEserverPATH_START.@server_extracted@server-main@qa@L0_client_nobatch@client_test.py@.PATH_END.py
|
{
"filename": "runpipe.py",
"repo_name": "anchal-009/SAVED21cm",
"repo_path": "SAVED21cm_extracted/SAVED21cm-master/src/runpipe.py",
"type": "Python"
}
|
import os
from src.readset import Modset, Inputs
from src.basis import Basis
from src.noise import Noise
from src.infocrit import InfoCrit
from src.extractor import Extractor
from src.visuals import Visual
class Pipeline:
def __init__(self, nu, nLST, ant, path21TS, pathFgTS,
obsDate, obsDateTime, intBins, numReg, fgModel,
dT=6, modesFg=50, modes21=80, quantity='DIC',
file='test.txt', indexFg=0, index21=0, visual=False, save=False):
"""Initialize to run the pipeline with the given settings.
Args:
nu (array): Frequency range
nLST (int): Number of time bins to fit
ant (list): List of antenna designs
path21TS (string): Path to 21cm modelling set
pathFgTS (string): Path to foregrounds modelling set
dT (int, optional): Integration time in hours. Defaults to 6.
modesFg (int, optional): Total number of FG modes. Defaults to 50.
modes21 (int, optional): Total number of 21 modes. Defaults to 80.
quantity (string): Quantity to minimize\
'DIC' for Deviance Information Criterion,\
'BIC' for Bayesian Information Criterion
file (str, optional): Filename to store the gridded IC. Defaults to 'test.txt'.
indexFg (int, optional): Index to get input from the FG modelling set. Defaults to 0.
index21 (int, optional): Index to get input from the 21 modelling set. Defaults to 0.
visual (bool, optional): Option to plot the extracted signal. Defaults to False.
save (bool, optional): Option to save the figures. Defaults to False.
"""
self.nu = nu
self.nLST = nLST
self.ant = ant
self.path21TS = path21TS
self.pathFgTS = pathFgTS
self.obsDate = obsDate
self.obsDateTime = obsDateTime
self.intBins = intBins
self.numReg = numReg
self.fgModel = fgModel
self.dT = dT
self.modesFg = modesFg
self.modes21 = modes21
self.quantity = quantity
self.file = file
self.indFg = indexFg
self.ind21 = index21
self.visual = visual
self.save = save
def runPipeline(self):
"""To run the pipeline.
"""
print('-------------------- Running the pipeline ---------------------\n')
''' Reading in the modelling sets '''
models = Modset(nu=self.nu, nLST=self.nLST, ant=self.ant)
m21 = models.get21modset(file=self.path21TS, nuMin=50, nuMax=200)
mFg = models.getcFgModsetGivenTimeAnt(path=self.pathFgTS, date=self.obsDate,
numReg=self.numReg, fgModel=self.fgModel,
dateTimeList=self.obsDateTime,
intBins=self.intBins, antenna=self.ant)
''' Generating inputs from the modelling sets '''
inputs = Inputs(nu=self.nu, nLST=self.nLST, ant=self.ant)
y21, y_x21 = inputs.getExp21(modset=m21, ind=self.ind21)
yFg = inputs.getFg(modset=mFg, ind=self.indFg)
''' Generating the noise and getting its covariance '''
noise = Noise(nu=self.nu, nLST=self.nLST, ant=self.ant, power=y_x21+yFg,
deltaNu=self.nu[1] - self.nu[0], deltaT=self.dT)
thermRealz = noise.noiseRealz()
cmat = noise.covmat()
cmatInv = noise.covmatInv()
''' Getting the noise covariance weighted modelling sets '''
wgt_m21 = noise.wgtTs(modset=m21, opt='21')
wgt_mFg = noise.wgtTs(modset=mFg, opt='FG')
''' Generating the mock observation '''
y = y_x21 + yFg + thermRealz
''' Weighted SVD for getting the optimal modes '''
basis = Basis(nu=self.nu, nLST=self.nLST, ant=self.ant)
b21 = basis.wgtSVDbasis(modset=wgt_m21, covmat=cmat, opt='21')
bFg = basis.wgtSVDbasis(modset=wgt_mFg, covmat=cmat, opt='FG')
''' Minimizing information criterion for selecting the number of modes '''
ic = InfoCrit(nu=self.nu, nLST=self.nLST, ant=self.ant)
ic.gridinfo(modesFg=self.modesFg, modes21=self.modes21, wgtBasis21=b21, wgtBasisFg=bFg,
quantity=self.quantity, covmatInv=cmatInv, mockObs=y, file=self.file)
icmodesFg, icmodes21, _ = ic.searchMinima(file=self.file)
''' Finally extracting the signal! '''
ext = Extractor(nu=self.nu, nLST=self.nLST, ant=self.ant)
extInfo = ext.extract(modesFg=icmodesFg, modes21=icmodes21,
wgtBasisFg=bFg, wgtBasis21=b21,
covmatInv=cmatInv, mockObs=y, y21=y21)
''' Visuals '''
if self.visual:
vis = Visual(nu=self.nu, nLST=self.nLST, ant=self.ant, save=self.save)
vis.plotModset(set=m21, opt='21', n_curves=1000)
vis.plotModset(set=mFg, opt='FG', n_curves=100)
vis.plotMockObs(y21=y21, yFg=yFg, noise=thermRealz)
vis.plotBasis(basis=b21, opt='21')
vis.plotBasis(basis=bFg, opt='FG')
vis.plotInfoGrid(file=self.file, modesFg=self.modesFg, modes21=self.modes21,
quantity=self.quantity, minModesFg=icmodesFg, minModes21=icmodes21)
vis.plotExtSignal(y21=y21, recons21=extInfo[1], sigma21=extInfo[3])
os.system('rm %s'%self.file)
''' Statistical Measures '''
qDic = extInfo[8]
qBias = extInfo[10]
qNormD = extInfo[11]
qRms = extInfo[7] * qBias[0]
return icmodesFg, icmodes21, qDic[0][0], qBias[0], qNormD, qRms
class Pipeline_P2:
def __init__(self, nu, lst2fit, nLST, ant, ant1, ant2, path21TS, pathFgTS,
obsDate, obsDateTime1, obsDateTime2, intBins, numReg, fgModel,
dT=6, modesFg=50, modes21=80, quantity='DIC',
file='test.txt', indexFg=0, index21=0, visual=False, save=False):
"""Initialize to run the pipeline with the given settings.
Args:
nu (array): Frequency range
nLST (int): Number of time bins to fit
ant (list): List of antenna designs
path21TS (string): Path to 21cm modelling set
pathFgTS (string): Path to foregrounds modelling set
dT (int, optional): Integration time in hours. Defaults to 6.
modesFg (int, optional): Total number of FG modes. Defaults to 50.
modes21 (int, optional): Total number of 21 modes. Defaults to 80.
quantity (string): Quantity to minimize\
'DIC' for Deviance Information Criterion,\
'BIC' for Bayesian Information Criterion
file (str, optional): Filename to store the gridded IC. Defaults to 'test.txt'.
indexFg (int, optional): Index to get input from the FG modelling set. Defaults to 0.
index21 (int, optional): Index to get input from the 21 modelling set. Defaults to 0.
visual (bool, optional): Option to plot the extracted signal. Defaults to False.
save (bool, optional): Option to save the figures. Defaults to False.
"""
self.nu = nu
self.lst_2_fit = lst2fit
self.nLST = nLST
self.ant1 = ant1
self.ant2 = ant2
self.ant = ant
self.path21TS = path21TS
self.pathFgTS = pathFgTS
self.obsDate = obsDate
self.obsDateTime1 = obsDateTime1
self.obsDateTime2 = obsDateTime2
self.intBins = intBins
self.numReg = numReg
self.fgModel = fgModel
self.dT = dT
self.modesFg = modesFg
self.modes21 = modes21
self.quantity = quantity
self.file = file
self.indFg = indexFg
self.ind21 = index21
self.visual = visual
self.save = save
def runPipeline(self):
"""To run the pipeline.
"""
print('-------------------- Running the pipeline ---------------------\n')
''' Reading in the modelling sets '''
models = Modset(nu=self.nu, nLST=self.nLST, ant=self.ant)
m21 = models.get21modset(file=self.path21TS, nuMin=50, nuMax=200)
mFg1 = models.getcFgModsetGivenTimeAnt(path=self.pathFgTS, date=self.obsDate,
numReg=self.numReg, fgModel=self.fgModel,
dateTimeList=self.obsDateTime1,
intBins=self.intBins, antenna=self.ant1)
mFg2 = models.getcFgModsetGivenTimeAnt(path=self.pathFgTS, date=self.obsDate,
numReg=self.numReg, fgModel=self.fgModel,
dateTimeList=self.obsDateTime2,
intBins=self.intBins, antenna=self.ant2)
mFg = models.concatenateModels(mFg1, mFg2)
''' Generating inputs from the modelling sets '''
inputs21 = Inputs(nu=self.nu, nLST=self.nLST, ant=self.ant)
y21, y_x21 = inputs21.getExp21(modset=m21, ind=self.ind21)
inputs = Inputs(nu=self.nu, nLST=self.lst_2_fit, ant=self.ant)
yFg1 = inputs.getFg(modset=mFg1, ind=self.indFg)
yFg2 = inputs.getFg(modset=mFg2, ind=self.indFg)
yFg = inputs.concatenateInputs(yFg1, yFg2)
''' Generating the noise and getting its covariance '''
noise = Noise(nu=self.nu, nLST=self.nLST, ant=self.ant, power=y_x21+yFg,
deltaNu=self.nu[1] - self.nu[0], deltaT=self.dT)
thermRealz = noise.noiseRealz()
cmat = noise.covmat()
cmatInv = noise.covmatInv()
''' Getting the noise covariance weighted modelling sets '''
wgt_m21 = noise.wgtTs(modset=m21, opt='21')
wgt_mFg = noise.wgtTs(modset=mFg, opt='FG')
''' Generating the mock observation '''
y = y_x21 + yFg + thermRealz
''' Weighted SVD for getting the optimal modes '''
basis = Basis(nu=self.nu, nLST=self.nLST, ant=self.ant)
b21 = basis.wgtSVDbasis(modset=wgt_m21, covmat=cmat, opt='21')
bFg = basis.wgtSVDbasis(modset=wgt_mFg, covmat=cmat, opt='FG')
''' Minimizing information criterion for selecting the number of modes '''
ic = InfoCrit(nu=self.nu, nLST=self.nLST, ant=self.ant)
ic.gridinfo(modesFg=self.modesFg, modes21=self.modes21, wgtBasis21=b21, wgtBasisFg=bFg,
quantity=self.quantity, covmatInv=cmatInv, mockObs=y, file=self.file)
icmodesFg, icmodes21, _ = ic.searchMinima(file=self.file)
''' Finally extracting the signal! '''
ext = Extractor(nu=self.nu, nLST=self.nLST, ant=self.ant)
extInfo = ext.extract(modesFg=icmodesFg, modes21=icmodes21,
wgtBasisFg=bFg, wgtBasis21=b21,
covmatInv=cmatInv, mockObs=y, y21=y21)
''' Visuals '''
if self.visual:
vis = Visual(nu=self.nu, nLST=self.nLST, ant=self.ant, save=self.save)
vis.plotModset(set=m21, opt='21', n_curves=1000)
vis.plotModset(set=mFg, opt='FG', n_curves=100)
vis.plotMockObs(y21=y21, yFg=yFg, noise=thermRealz)
vis.plotBasis(basis=b21, opt='21')
vis.plotBasis(basis=bFg, opt='FG')
vis.plotInfoGrid(file=self.file, modesFg=self.modesFg, modes21=self.modes21,
quantity=self.quantity, minModesFg=icmodesFg, minModes21=icmodes21)
vis.plotExtSignal(y21=y21, recons21=extInfo[1], sigma21=extInfo[3])
os.system('rm %s'%self.file)
''' Statistical Measures '''
qDic = extInfo[8]
qBias = extInfo[10]
qNormD = extInfo[11]
qRms = extInfo[7] * qBias[0]
return icmodesFg, icmodes21, qDic[0][0], qBias[0], qNormD, qRms
|
anchal-009REPO_NAMESAVED21cmPATH_START.@SAVED21cm_extracted@SAVED21cm-master@src@runpipe.py@.PATH_END.py
|
{
"filename": "BlankImage.py",
"repo_name": "bill-cotton/Obit",
"repo_path": "Obit_extracted/Obit-master/ObitSystem/Obit/share/scripts/BlankImage.py",
"type": "Python"
}
|
# interactively blank portions of an image
#x=imlod('Arcade_A_wide_3secHG.fits',0,'Arcade_A','3secHG',1,1,err)
#x=imlod('ArcadeAB3comb.fits',0,'Arcade_BC','Blank',1,1,err)
import OErr, Image, OWindow, ODisplay, FArray
BlankImage = None
# Define image
Aname = 'Arcade_A'; Aclass='3secHG'; disk = 1; seq = 1; offs = [0,0]
Aname = 'Arcade_BC'; Aclass='Blank'; disk = 1; seq = 1; offs = [-139,-169]
plane = [1,1,1,1,1]
image = Image.newPAImage("Im", Aname, Aclass, disk, seq, True, err)
OErr.printErr(err)
# Read image
image.GetPlane(None, plane, err)
OErr.printErr(err)
# Define blanking interactively defining a OWindow
naxis = image.Desc.Dict['inaxes'][0:2]
win = OWindow.PCreate1('wind', naxis, err)
ODisplay.PImage(disp, image, err,window=win)
OErr.printErr(err)
wlist = OWindow.PGetList(win, 1, err)
OErr.printErr(err)
# Apply
BlankImage(image,OWindow.PGetList(win,1,err),off=offs)
OErr.printErr(err)
# Update image
image.PutPlane( None, plane, err)
OErr.printErr(err)
# Blank Image function
del BlankImage
def BlankImage(image, win, off = [0,0]):
"""
Blank portion of image described by win
Blank rectangular or round regions specified in win
image Image to blank, FArray should be filled in
win List containing windows (OWindow.PGetList)
off offset in image
"""
fblank = FArray.fblank
nx = image.Desc.Dict['inaxes'][0]-1; ny = image.Desc.Dict['inaxes'][1]-1
for w in win:
if w[1]==0: # Rectangular
ix1 = w[2]-1; ix2 = w[4]; iy1 = w[3]-1; iy2 = w[5];
ix1 = max(0,ix1); iy1 = max(0,iy1); ix2 = min(nx,ix2); iy2 = min(ny,iy2);
for ix in range(ix1,ix2):
for iy in range(iy1,iy2):
image.FArray.set(fblank, ix+off[0], iy+off[1])
elif w[1]==1: # Round
r = float(w[2]); r2 = r*r; xc = float(w[3]); yc = float(w[4])
ix1 = w[3]-w[2]-1; ix2 = w[3]+w[2]; iy1 = w[4]-w[2]-1; iy2 = w[4]+w[2];
ix1 = max(0,ix1); iy1 = max(0,iy1); ix2 = min(nx,ix2); iy2 = min(ny,iy2);
for ix in range(ix1,ix2):
for iy in range(iy1,iy2):
d = (ix-xc)**2 + (iy-yc)**2
if d<=r2:
image.FArray.set(fblank, ix+off[0], iy+off[1])
# end loop over win
# end BlankImage
|
bill-cottonREPO_NAMEObitPATH_START.@Obit_extracted@Obit-master@ObitSystem@Obit@share@scripts@BlankImage.py@.PATH_END.py
|
{
"filename": "stop_connection.py",
"repo_name": "HERA-Team/hera_mc",
"repo_path": "hera_mc_extracted/hera_mc-main/scripts/stop_connection.py",
"type": "Python"
}
|
#! /usr/bin/env python
# -*- mode: python; coding: utf-8 -*-
# Copyright 2017 the HERA Collaboration
# Licensed under the 2-clause BSD license.
"""
Script to add a general connection to the database.
"""
from hera_mc import cm_handling, cm_partconnect, cm_utils, mc
def query_args(args):
"""
Gets information from user
"""
if args.uppart is None:
args.uppart = input("Upstream part number: ")
if args.uprev is None:
args.uprev = input("Upstream part revision: ")
if args.upport is None:
args.upport = input("Upstream output port: ")
if args.dnpart is None:
args.dnpart = input("Downstream part number: ")
if args.dnrev is None:
args.dnrev = input("Downstream part revision: ")
if args.dnport is None:
args.dnport = input("Downstream input port: ")
if args.date == "now":
args.date = cm_utils.query_default("date", args)
return args
if __name__ == "__main__":
parser = mc.get_mc_argument_parser()
parser.add_argument("-u", "--uppart", help="Upstream part number", default=None)
parser.add_argument("--uprev", help="Upstream part revision", default=None)
parser.add_argument("--upport", help="Upstream output port", default=None)
parser.add_argument("-d", "--dnpart", help="Downstream part number", default=None)
parser.add_argument("--dnrev", help="Downstream part revision", default=None)
parser.add_argument("--dnport", help="Downstream input port", default=None)
cm_utils.add_date_time_args(parser)
args = parser.parse_args()
args = query_args(args)
# Pre-process some args
if args.date is not None:
at_date = cm_utils.get_astropytime(args.date, args.time, args.format)
c = cm_partconnect.Connections()
c.connection(
upstream_part=args.uppart,
up_part_rev=args.uprev,
upstream_output_port=args.upport,
downstream_part=args.dnpart,
down_part_rev=args.dnrev,
downstream_input_port=args.dnport,
)
db = mc.connect_to_mc_db(args)
with db.sessionmaker() as session:
handling = cm_handling.Handling(session)
chk = handling.get_specific_connection(c, at_date)
if len(chk) == 1 and chk[0].stop_gpstime is None:
connection_start_was = chk[0].start_gpstime
print(
"Stopping connection {}:{}:{} <-> {}:{}:{} : {}".format(
args.uppart,
args.uprev,
args.upport,
args.dnpart,
args.dnrev,
args.dnport,
connection_start_was,
)
)
go_ahead = True
else:
print("Error: Connection to stop is not valid. Quitting.")
print(
"{}:{}:{} <X> {}:{}:{}".format(
args.uppart,
args.uprev,
args.upport,
args.dnpart,
args.dnrev,
args.dnport,
)
)
go_ahead = False
if go_ahead:
# Connect parts
npc = [
[
args.uppart,
args.uprev,
args.dnpart,
args.dnrev,
args.upport,
args.dnport,
connection_start_was,
]
]
cm_partconnect.stop_connections(session, npc, at_date)
|
HERA-TeamREPO_NAMEhera_mcPATH_START.@hera_mc_extracted@hera_mc-main@scripts@stop_connection.py@.PATH_END.py
|
{
"filename": "conf.py",
"repo_name": "alisonkyoung1/phantom",
"repo_path": "phantom_extracted/phantom-master/docs/conf.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'Phantom'
copyright = '2024 The Authors'
author = 'Daniel Price'
# The short X.Y version
version = '2024.0'
# The full version, including alpha/beta/rc tags
release = '2024.0.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinxfortran.fortran_domain',
'sphinxfortran.fortran_autodoc',
'sphinx.ext.mathjax',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
#
# custom css files
#
html_css_files = [
'css/custom.css',
]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Phantomdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Phantom.tex', 'Phantom Documentation',
'Daniel Price', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'phantom', 'Phantom Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Phantom', 'Phantom Documentation',
author, 'Phantom', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- options for sphinx-fortran
fortran_ext = [ 'f90','F90' ]
fortran_src = [
'../src/setup/density_profiles.f90',
'../src/setup/relax_star.f90',
'../src/setup/set_bfield.f90',
'../src/setup/set_binary.f90',
'../src/setup/set_disc.f90',
'../src/setup/set_dust.f90',
'../src/setup/set_dust_options.f90',
'../src/setup/set_flyby.f90',
'../src/setup/set_planets.f90',
'../src/setup/set_shock.f90',
'../src/setup/set_slab.f90',
'../src/setup/set_softened_core.f90',
'../src/setup/set_sphere.f90',
'../src/setup/set_stellar_core.f90',
'../src/setup/set_vfield.f90',
'../src/setup/stretchmap.f90',
'../src/setup/velfield_fromcubes.f90',
'../src/setup/phantomsetup.f90'
'../src/tests/directsum.f90',
'../src/tests/phantomtest.f90',
'../src/tests/test_cooling.f90',
'../src/tests/test_corotate.f90',
'../src/tests/test_derivs.f90',
'../src/tests/test_dust.f90',
'../src/tests/test_eos.f90',
'../src/tests/test_externf.f90',
#'../src/tests/test_externf_gr.f90',
'../src/tests/test_fastmath.f90',
'../src/tests/test_geometry.f90',
'../src/tests/test_gnewton.f90',
'../src/tests/test_gr.f90',
'../src/tests/test_gravity.f90',
'../src/tests/test_growth.f90',
'../src/tests/test_indtstep.f90',
'../src/tests/test_kdtree.f90',
'../src/tests/test_kernel.f90',
'../src/tests/test_link.f90',
'../src/tests/test_luminosity.f90',
'../src/tests/test_nonidealmhd.f90',
'../src/tests/test_ptmass.f90',
'../src/tests/test_radiation.f90',
'../src/tests/test_rwdump.f90',
'../src/tests/test_sedov.f90',
'../src/tests/test_setdisc.f90',
'../src/tests/test_smol.f90',
'../src/tests/test_step.f90',
'../src/tests/testsuite.f90',
'../src/tests/utils_testsuite.f90',
'../src/main/boundary.f90',
'../src/main/centreofmass.f90',
'../src/main/commons.f90',
'../src/main/cons2prim.f90',
'../src/main/cons2primsolver.f90',
'../src/main/damping.f90',
'../src/main/datafiles.f90',
'../src/main/eos_helmholtz.f90',
'../src/main/eos_idealplusrad.f90',
'../src/main/eos_mesa.f90',
#'../src/main/eos_shen.f90',
'../src/main/extern_Bfield.f90',
'../src/main/extern_binary.f90',
#'../src/main/extern_binary_gw.f90',
'../src/main/extern_corotate.f90',
'../src/main/extern_densprofile.f90',
'../src/main/extern_gwinspiral.f90',
'../src/main/extern_lensethirring.f90',
'../src/main/extern_spiral.f90',
'../src/main/extern_staticsine.f90',
'../src/main/fastmath.f90',
'../src/main/fs_data.f90',
'../src/main/geometry.f90',
'../src/main/gitinfo.f90',
'../src/main/h2chem.f90',
'../src/main/h2cooling.f90',
'../src/main/inverse4x4.f90',
'../src/main/krome.f90',
'../src/main/mf_write.f90',
'../src/main/mol_data.f90',
'../src/main/options.f90',
'../src/main/photoevap.f90',
'../src/main/physcon.f90',
'../src/main/quitdump.f90',
'../src/main/random.f90',
'../src/main/units.f90',
'../src/main/utils_allocate.f90',
'../src/main/utils_binary.f90',
'../src/main/utils_cpuinfo.f90',
'../src/main/utils_datafiles.f90',
'../src/main/utils_deriv.f90',
'../src/main/utils_dumpfiles.f90',
'../src/main/utils_filenames.f90',
'../src/main/utils_infiles.f90',
'../src/main/utils_inject.f90',
'../src/main/utils_mathfunc.f90',
'../src/main/utils_sort.f90',
'../src/main/utils_sphng.f90',
'../src/main/utils_spline.f90',
'../src/main/utils_tables.f90',
'../src/main/utils_timing.f90',
'../src/main/utils_vectors.f90',
'../src/main/viscosity.f90',
]
sys.setrecursionlimit(10000)
|
alisonkyoung1REPO_NAMEphantomPATH_START.@phantom_extracted@phantom-master@docs@conf.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "jfcrenshaw/aos_notebooks",
"repo_path": "aos_notebooks_extracted/aos_notebooks-main/README.md",
"type": "Markdown"
}
|
# aos_notebooks
Notebooks for AOS
|
jfcrenshawREPO_NAMEaos_notebooksPATH_START.@aos_notebooks_extracted@aos_notebooks-main@README.md@.PATH_END.py
|
{
"filename": "example_tess_centroids_simple.ipynb",
"repo_name": "stevepur/transit-diffImage",
"repo_path": "transit-diffImage_extracted/transit-diffImage-main/examples/example_tess_centroids_simple.ipynb",
"type": "Jupyter Notebook"
}
|
```python
import matplotlib.pyplot as plt
import sys
import pickle
import numpy as np
from transitDiffImage import tessDiffImage, transitCentroids
from transitDiffImage import tessprfmodel as tprf
import tess_stars2px
```
```python
def quick_flux_centroid(arr, extent, constrain=True):
xpix = np.linspace(extent[0], extent[1]-1, arr.shape[1])
ypix = np.linspace(extent[2], extent[3]-1, arr.shape[0])
X, Y = np.meshgrid(xpix, ypix)
normArr = arr.copy() - np.median(arr.ravel())
sum_f = np.sum(normArr.ravel())
sum_x = np.sum((X*normArr).ravel())
sum_y = np.sum((Y*normArr).ravel())
xc = sum_x/sum_f
yc = sum_y/sum_f
if constrain:
# if the centroid is outside the extent then return the center of the image
if (xc < extent[0]) | (xc > extent[1]):
xc = np.mean(extent[0:2])
if (yc < extent[2]) | (yc > extent[3]):
yc = np.mean(extent[2:])
return [xc, yc]
```
We use the same exmaple as the notebook example_diffimages. First we make and display the difference image, exactly like in that notebook. See example_diffimages for details of what's happening.
```python
star = {}
star['id'] = 25375553
star['raDegrees'] = 328.76768
star['decDegrees'] = -22.61258
planet0 = {}
planet0['planetID'] = "TOI_" + str(143.01)
planet0['period'] = 2.31097
planet0['epoch'] = 1325.58249
planet0['durationHours'] = 3.129
```
```python
from tess_stars2px import tess_stars2px_function_entry
outID, outEclipLong, outEclipLat, outSec, outCam, outCcd,\
outColPix, outRowPix, scinfo = tess_stars2px.tess_stars2px_function_entry(star['id'],
star['raDegrees'],
star['decDegrees'])
print(outSec, outCam, outCcd)
```
[ 1 28 68] [1 1 1] [3 3 3]
We will get make difference images for all possible sectors, and need to record the ccd.
```python
star['sector'] = 1
star['cam'] = None
star['ccd'] = None
star['planetData'] = [planet0]
# If you have quality flags for each cadence in the sector in a file, name the file here; otherwise None
star['qualityFiles'] = None
# If you have quality flags for each cadence in the sector in an array, put the array here; otherwise None
star['qualityFlags'] = None
tdi = tessDiffImage.tessDiffImage(star)
tdi.make_ffi_difference_image(thisPlanet=0)
```
curl "https://mast.stsci.edu/tesscut/api/v0.1/astrocut?ra=328.76768&dec=-22.61258&y=21&x=21§or=1" --output ./tic25375553_s1.zip
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 10.9M 0 10.9M 0 0 2239k 0 --:--:-- 0:00:04 --:--:-- 3093k
Archive: ./tic25375553_s1.zip
extracting: ./tic25375553/tess-s0001-1-3_328.767680_-22.612580_21x21_astrocut.fits
['./tic25375553/tess-s0001-1-3_328.767680_-22.612580_21x21_astrocut.fits']
./tic25375553/tess-s0001-1-3_328.767680_-22.612580_21x21_astrocut.fits
making difference image for sector 1
/Users/steve/opt/anaconda3/lib/python3.9/site-packages/erfa/core.py:4613: RuntimeWarning: invalid value encountered in ld
p1 = ufunc.ld(bm, p, q, e, em, dlim)
/Users/steve/opt/anaconda3/lib/python3.9/site-packages/erfa/core.py:19005: RuntimeWarning: invalid value encountered in anp
c_retval = ufunc.anp(a)
/Users/steve/opt/anaconda3/lib/python3.9/site-packages/transitDiffImage/tessDiffImage.py:456: RuntimeWarning: Mean of empty slice
meanInTransit = np.nanmean(pixelData["flux"][inTransitIndices,::-1,:], axis=0)
/Users/steve/opt/anaconda3/lib/python3.9/site-packages/transitDiffImage/tessDiffImage.py:457: RuntimeWarning: invalid value encountered in true_divide
meanInTransitSigma = np.sqrt(np.sum(pixelData["fluxErr"][inTransitIndices,::-1,:]**2, axis=0)/len(inTransitIndices))
/Users/steve/opt/anaconda3/lib/python3.9/site-packages/transitDiffImage/tessDiffImage.py:458: RuntimeWarning: Mean of empty slice
meanOutTransit = np.nanmean(pixelData["flux"][outTransitIndices,::-1,:], axis=0)
/Users/steve/opt/anaconda3/lib/python3.9/site-packages/transitDiffImage/tessDiffImage.py:459: RuntimeWarning: invalid value encountered in true_divide
meanOutTransitSigma = np.sqrt(np.sum(pixelData["fluxErr"][outTransitIndices,::-1,:]**2, axis=0)/len(outTransitIndices))
For various reasons not all sectors will get difference images, so see what sectors actually have difference images.
```python
tdi.sectorList
```
[1]
```python
if star['sector'] is not None:
sector = star['sector']
else: # have to set desired sector by hand
sector = 1
sectorIndex = tdi.sectorList.index(sector) # make sure outSec[sectorIndex] is in tdi.sectorList
fname = 'tic25375553/imageData_TOI_143.01_sector' + str(sector) + '.pickle'
with open(fname, 'rb') as f:
imageData = pickle.load(f)
```
```python
diffImageData = imageData[0]
catalogData = imageData[1]
fig, ax = plt.subplots(2,2,figsize=(10,10))
tdi.draw_pix_catalog(diffImageData['diffImage'], catalogData, catalogData["extent"], ax=ax[0,0], fs=14, ss=60, filterStars=True, dMagThreshold=4, annotate=True)
tdi.draw_pix_catalog(diffImageData['diffImage'], catalogData, catalogData["extentClose"], ax=ax[0,1], fs=14, ss=60, filterStars=True, dMagThreshold=4, annotate=True, close=True)
tdi.draw_pix_catalog(diffImageData['meanOutTransit'], catalogData, catalogData["extent"], ax=ax[1,0], fs=14, ss=60, filterStars=True, dMagThreshold=4, annotate=True)
tdi.draw_pix_catalog(diffImageData['meanOutTransit'], catalogData, catalogData["extentClose"], ax=ax[1,1], fs=14, ss=60, filterStars=True, dMagThreshold=4, annotate=True, close=True)
ax[0,0].set_title('Difference Image')
ax[0,1].set_title('Difference Image (Close-up)')
ax[1,0].set_title('Direct Image')
ax[1,1].set_title('Direct Image (Close-up)')
```
Text(0.5, 1.0, 'Direct Image (Close-up)')

Now we perform a centroid analysis to find the position of the star that is making the difference image. We use the principle that, if all the change in the pixel flux is due to changes in one star such as a transit, then the difference image contain that star's image. The star's image is determined by the TESS Pixel Response Function (PRF).
We will use PRF centroiding, which finds the position of the TESS PRF that results in pixels that are the best match for the difference image. That pixel position should correspond to the position of the star causing the change.
We perform PRF centroiding using a nonliner minimization of the sum of the squares of the difference between a synthetic image created using the PRF and the observed difference image. Our synthetic image is
$$ I = a*PRF(x,y) + o. $$ We find the set $(x, y, a, o)$ so that the image $I$ best matches the difference image in a least squares sense.
To seed the minimzation, we compute a flux-weighted centroid. We do not expect this centroid to be very accurate because of residual pixel values in the difference image.
We do our centroiding in a 7x7 closeup of the diference imaging, rather than the default 5x5 closeup. This helps us get a good seed position from the flux-weighted centroid. So first we extract the central 7x7 pixels, and define the appropriate extent.
Create the TESS PRF object.
```python
prf = tprf.SimpleTessPRF(shape=diffImageData["diffImage"].shape,
sector = outSec[sectorIndex],
camera = outCam[sectorIndex],
ccd = outCcd[sectorIndex],
column=catalogData["extent"][0],
row=catalogData["extent"][2],
# prfFileLocation = "../../tessPrfFiles/"
)
```
Compute the centroid
```python
fitVector, prfFitQuality, fluxCentroid, closeDiffImage, closeExtent = transitCentroids.tess_PRF_centroid(prf,
catalogData["extent"],
diffImageData["diffImage"],
catalogData)
```
```python
print("PRF fit quality = " + str(prfFitQuality))
```
PRF fit quality = 0.541993964578577
Show the flux-weighted and PRF-fit centroids on the difference image, along with the position of the target star (the first star in the catalog data).
```python
plt.imshow(closeDiffImage, cmap='jet', origin='lower', extent=closeExtent)
plt.plot(fluxCentroid[0], fluxCentroid[1], 'w+', label = "flux-weighted centroid", zorder=200)
plt.plot(fitVector[0], fitVector[1], 'ws', label = "PRF-fit centroid", zorder=200)
plt.axvline(catalogData["targetColPix"][0], c='y', label = "target star")
plt.axhline(catalogData["targetRowPix"][0], c='y')
plt.colorbar()
plt.legend()
```
<matplotlib.legend.Legend at 0x7fa579997be0>

Make a full-size synthetic difference image to compare with the original difference image.
```python
plt.figure(figsize=(15,5))
plt.subplot(1, 2, 1)
plt.imshow(transitCentroids.render_prf(prf, fitVector, catalogData),
cmap='jet', origin='lower', extent=catalogData["extent"])
plt.colorbar()
plt.subplot(1, 2, 2)
plt.imshow(diffImageData["diffImage"],
cmap='jet', origin='lower', extent=catalogData["extent"])
plt.colorbar();
```

```python
centroidRa, centroidDec, scinfo = tess_stars2px.tess_stars2px_reverse_function_entry(outSec[sectorIndex],
outCam[sectorIndex], outCcd[sectorIndex],
fitVector[0], fitVector[1], scInfo=scinfo)
print([centroidRa, centroidDec])
print([catalogData['correctedRa'][0], catalogData['correctedDec'][0]])
dRa = centroidRa - catalogData['correctedRa'][0]
dDec = centroidDec - catalogData['correctedDec'][0]
print("distance = " + str(3600*np.sqrt((dRa*np.cos(catalogData['correctedDec'][0]*np.pi/180))**2 + dDec**2)) + " arcsec")
```
[328.77314121904953, -22.609677058856168]
[328.7677505341583, -22.61260354944462]
distance = 20.78285497131868 arcsec
```python
print([centroidRa, centroidDec])
print([star["raDegrees"], star["decDegrees"]])
dRa = centroidRa - star["raDegrees"]
dDec = centroidDec - star["decDegrees"]
print("distance = " + str(3600*np.sqrt((dRa*np.cos(star["decDegrees"]*np.pi/180))**2 + dDec**2)) + " arcsec")
```
[328.77314121904953, -22.609677058856168]
[328.76768, -22.61258]
distance = 20.942813542974307 arcsec
```python
outID, outEclipLong, outEclipLat, outSec, outCam, outCcd,\
outColPix, outRowPix, scinfo = tess_stars2px.tess_stars2px_function_entry(star['id'],
star['raDegrees'],
star['decDegrees'], aberrate=True)
pixRa, pixDec, scinfo = tess_stars2px.tess_stars2px_reverse_function_entry(outSec[sectorIndex],
outCam[sectorIndex], outCcd[sectorIndex],
outColPix[sectorIndex], outRowPix[sectorIndex], scInfo=scinfo)
print([pixRa, pixDec])
print([star["raDegrees"], star["decDegrees"]])
dRa = pixRa - star["raDegrees"]
dDec = pixDec - star["decDegrees"]
print("distance = " + str(3600*np.sqrt((dRa*np.cos(star["decDegrees"]*np.pi/180))**2 + dDec**2)) + " arcsec")
```
[328.7733179891728, -22.610562289050502]
[328.76768, -22.61258]
distance = 20.095205614658767 arcsec
```python
starID, starEclipLong, starEclipLat, starSec, starCam, starCcd,\
starColPix, starRowPix, scinfo = tess_stars2px.tess_stars2px_function_entry(catalogData['ticID'][0],
catalogData['correctedRa'][0],
catalogData['correctedDec'][0],
trySector = 1,
aberrate=True)
```
```python
[starColPix[0], starRowPix[0]]
```
[882.0039173522332, 620.8323006001381]
```python
centroidRa, centroidDec, scinfo = tess_stars2px.tess_stars2px_reverse_function_entry(tdi.sectorList[sectorIndex],
outCam[sectorIndex], outCcd[sectorIndex],
fitVector[0], fitVector[1])
outID, centroidEclipLong, centroidEclipLat, centroidSec, centroidCam, centroidCcd,\
centroidColPix, centroidRowPix, scinfo = tess_stars2px.tess_stars2px_function_entry(0,
centroidRa,
centroidDec,
aberrate=True,
trySector=tdi.sectorList[sectorIndex])
dCol = centroidColPix[0] - fitVector[0]
dRow = centroidRowPix[0] - fitVector[1]
d2 = dCol*dCol + dRow*dRow
print(np.sqrt(d2))
```
0.9927342096254843
```python
centroidRa, centroidDec, scinfo = tess_stars2px.tess_stars2px_reverse_function_entry(tdi.sectorList[sectorIndex],
outCam[sectorIndex], outCcd[sectorIndex],
fitVector[0], fitVector[1])
print(tessDiffImage.pix_distance([centroidRa, centroidDec], tdi.sectorList[sectorIndex], outCam[sectorIndex],
outCcd[sectorIndex], fitVector[0], fitVector[1]))
```
0.985521210960735
```python
raDec = tessDiffImage.pix_to_ra_dec(tdi.sectorList[sectorIndex],
outCam[sectorIndex], outCcd[sectorIndex],
fitVector[0], fitVector[1])
print(raDec)
print([centroidRa, centroidDec])
print(tessDiffImage.pix_distance(raDec, tdi.sectorList[sectorIndex], outCam[sectorIndex], outCcd[sectorIndex],
fitVector[0], fitVector[1]))
```
[328.76750326 -22.61169476]
[328.77314121904953, -22.609677058856168]
1.4435792935224958e-12
```python
dRa = raDec[0] - catalogData['correctedRa'][0]
dDec = raDec[1] - catalogData['correctedDec'][0]
print("distance = " + str(3600*np.sqrt((dRa*np.cos(catalogData['correctedDec'][0]*np.pi/180))**2 + dDec**2)) + " arcsec")
```
distance = 3.373267049948371 arcsec
```python
```
|
stevepurREPO_NAMEtransit-diffImagePATH_START.@transit-diffImage_extracted@transit-diffImage-main@examples@example_tess_centroids_simple.ipynb@.PATH_END.py
|
{
"filename": "conf.py",
"repo_name": "Jashcraf/poke",
"repo_path": "poke_extracted/poke-main/docs/conf.py",
"type": "Python"
}
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = 'poke'
copyright = '2023, Jaren N. Ashcraft'
author = 'Jaren N. Ashcraft'
# The full version, including alpha/beta/rc tags
release = 'v1.0.0'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'numpydoc',
'nbsphinx',
'sphinx_rtd_theme'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# -- Extension configuration -------------------------------------------------
|
JashcrafREPO_NAMEpokePATH_START.@poke_extracted@poke-main@docs@conf.py@.PATH_END.py
|
{
"filename": "core.py",
"repo_name": "jakevdp/nfft",
"repo_path": "nfft_extracted/nfft-master/nfft/core.py",
"type": "Python"
}
|
from __future__ import division
import numpy as np
from .kernels import KERNELS
from .utils import nfft_matrix, fourier_sum, inv_fourier_sum
def ndft(x, f_hat):
"""Compute the non-equispaced direct Fourier transform
f_j = \sum_{-N/2 \le k < N/2} \hat{f}_k \exp(-2 \pi i k x_j)
Parameters
----------
x : array_like, shape=(M,)
The locations of the data points.
f_hat : array_like, shape=(N,)
The amplitudes at each wave number k = range(-N/2, N/2)
Returns
-------
f : ndarray, shape=(M,)
The direct Fourier summation corresponding to x
See Also
--------
nfft : non-equispaced fast Fourier transform
ndft_adjoint : adjoint non-equispaced direct Fourier transform
nfft_adjoint : adjoint non-equispaced fast Fourier transform
"""
x, f_hat = map(np.asarray, (x, f_hat))
assert x.ndim == 1
assert f_hat.ndim == 1
N = len(f_hat)
assert N % 2 == 0
k = -(N // 2) + np.arange(N)
return np.dot(f_hat, np.exp(-2j * np.pi * x * k[:, None]))
def ndft_adjoint(x, f, N):
"""Compute the adjoint non-equispaced direct Fourier transform
\hat{f}_k = \sum_{0 \le j < N} f_j \exp(2 \pi i k x_j)
where k = range(-N/2, N/2)
Parameters
----------
x : array_like, shape=(M,)
The locations of the data points.
f : array_like, shape=(M,)
The amplitudes at each location x
N : int
The number of frequencies at which to evaluate the result
Returns
-------
f_hat : ndarray, shape=(N,)
The amplitudes corresponding to each wave number k = range(-N/2, N/2)
See Also
--------
nfft_adjoint : adjoint non-equispaced fast Fourier transform
ndft : non-equispaced direct Fourier transform
nfft : non-equispaced fast Fourier transform
"""
x, f = np.broadcast_arrays(x, f)
assert x.ndim == 1
N = int(N)
assert N % 2 == 0
k = -(N // 2) + np.arange(N)
return np.dot(f, np.exp(2j * np.pi * k * x[:, None]))
def nfft(x, f_hat, sigma=3, tol=1E-8, m=None, kernel='gaussian',
use_fft=True, truncated=True):
"""Compute the non-equispaced fast Fourier transform
f_j = \sum_{-N/2 \le k < N/2} \hat{f}_k \exp(-2 \pi i k x_j)
Parameters
----------
x : array_like, shape=(M,)
The locations of the data points. Each value in x should lie
in the range [-1/2, 1/2).
f_hat : array_like, shape=(N,)
The amplitudes at each wave number k = range(-N/2, N/2).
sigma : int (optional, default=5)
The oversampling factor for the FFT gridding.
tol : float (optional, default=1E-8)
The desired tolerance of the truncation approximation.
m : int (optional)
The half-width of the truncated window. If not specified, ``m`` will
be estimated based on ``tol``.
kernel : string or NFFTKernel (optional, default='gaussian')
The desired convolution kernel for the calculation.
use_fft : bool (optional, default=True)
If True, use the FFT rather than DFT for fast computation.
truncated : bool (optional, default=True)
If True, use a fast truncated approximate summation matrix.
If False, use a slow full summation matrix.
Returns
-------
f : ndarray, shape=(M,)
The approximate Fourier summation evaluated at points x
See Also
--------
ndft : non-equispaced direct Fourier transform
nfft_adjoint : adjoint non-equispaced fast Fourier transform
ndft_adjoint : adjoint non-equispaced direct Fourier transform
"""
# Validate inputs
x, f_hat = map(np.asarray, (x, f_hat))
assert x.ndim == 1
assert f_hat.ndim == 1
N = len(f_hat)
assert N % 2 == 0
sigma = int(sigma)
assert sigma >= 2
n = N * sigma
kernel = KERNELS.get(kernel, kernel)
if m is None:
m = kernel.estimate_m(tol, N, sigma)
m = int(m)
assert m <= n // 2
k = -(N // 2) + np.arange(N)
# Compute the NFFT
ghat = f_hat / kernel.phi_hat(k, n, m, sigma) / n
g = fourier_sum(ghat, N, n, use_fft=use_fft)
mat = nfft_matrix(x, n, m, sigma, kernel, truncated=truncated)
f = mat.dot(g)
return f
def nfft_adjoint(x, f, N, sigma=3, tol=1E-8, m=None, kernel='gaussian',
use_fft=True, truncated=True):
"""Compute the adjoint non-equispaced fast Fourier transform
\hat{f}_k = \sum_{0 \le j < N} f_j \exp(2 \pi i k x_j)
where k = range(-N/2, N/2)
Parameters
----------
x : array_like, shape=(M,)
The locations of the data points.
f : array_like, shape=(M,)
The amplitudes at each location x
N : int
The number of frequencies at which to evaluate the result
sigma : int (optional, default=5)
The oversampling factor for the FFT gridding.
tol : float (optional, default=1E-8)
The desired tolerance of the truncation approximation.
m : int (optional)
The half-width of the truncated window. If not specified, ``m`` will
be estimated based on ``tol``.
kernel : string or NFFTKernel (optional, default='gaussian')
The desired convolution kernel for the calculation.
use_fft : bool (optional, default=True)
If True, use the FFT rather than DFT for fast computation.
truncated : bool (optional, default=True)
If True, use a fast truncated approximate summation matrix.
If False, use a slow full summation matrix.
Returns
-------
f_hat : ndarray, shape=(N,)
The approximate amplitudes corresponding to each wave number
k = range(-N/2, N/2)
See Also
--------
ndft_adjoint : adjoint non-equispaced direct Fourier transform
nfft : non-equispaced fast Fourier transform
ndft : non-equispaced direct Fourier transform
"""
# Validate inputs
x, f = np.broadcast_arrays(x, f)
assert x.ndim == 1
N = int(N)
assert N % 2 == 0
sigma = int(sigma)
assert sigma >= 2
n = N * sigma
kernel = KERNELS.get(kernel, kernel)
if m is None:
m = kernel.estimate_m(tol, N, sigma)
m = int(m)
assert m <= n // 2
k = -(N // 2) + np.arange(N)
# Compute the adjoint NFFT
mat = nfft_matrix(x, n, m, sigma, kernel, truncated=truncated)
g = mat.T.dot(f)
ghat = inv_fourier_sum(g, N, n, use_fft=use_fft)
fhat = ghat / kernel.phi_hat(k, n, m, sigma) / n
return fhat
|
jakevdpREPO_NAMEnfftPATH_START.@nfft_extracted@nfft-master@nfft@core.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "npirzkal/GRISM_NIRCAM",
"repo_path": "GRISM_NIRCAM_extracted/GRISM_NIRCAM-master/README.md",
"type": "Markdown"
}
|
# GRISM_NIRCAM
grismconf (https://github.com/npirzkal/GRISMCONF) configuration files for JWST NIRCAM
## Note: ##
The V1, V2, and V3 grismconf configuration files are based on pre-launch data and meant to be used for simulations purposes only.
They do not reflect the field dependence of the traces, not the sensitivities as measured during JWST commisssioning.
|
npirzkalREPO_NAMEGRISM_NIRCAMPATH_START.@GRISM_NIRCAM_extracted@GRISM_NIRCAM-master@README.md@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/langchain/langchain/agents/agent_toolkits/gmail/__init__.py",
"type": "Python"
}
|
"""Gmail toolkit."""
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@langchain@langchain@agents@agent_toolkits@gmail@__init__.py@.PATH_END.py
|
{
"filename": "dosnowballflags.py",
"repo_name": "chriswillott/jwst",
"repo_path": "jwst_extracted/jwst-master/dosnowballflags.py",
"type": "Python"
}
|
#!/usr/bin/env python
import numpy as np
import os
from astropy.io import fits
from jwst.datamodels import dqflags
from photutils.segmentation import detect_sources
from photutils.segmentation import SourceCatalog
from copy import deepcopy
from skimage.draw import disk
def snowballflags(jumpdirfile,filtername,npixfind,satpixradius,halofactorradius,imagingmode):
"""
Flag pixels in snowballs - expand saturated ring and diffuse halo jump ring.
The GROUPDQ array will be flagged with the SATURATED and JUMP_DET flags.
Saturating snowballs early in short ramps can have unflagged central pixels that jump in the previous group.
This is called after the regular jump step.
The output file is overwritten.
You need a working installation of WebbPSF.
Requires checkifstar.py if imagingmode is True
Parameters:
jumpdirfile - path to the input file, which has jump step applied
filtername - used for comparing to a WebbPSF star
npixfind - number of connected pixels to find snowballs
satpixradius - how many extra pixels to flag as saturated to account for slow charge migration near the saturated core - default 2
halofactorradius - factor to increase radius of whole snowball for jump flagging - default 2.0
imagingmode - boolean for whether imaging or spectroscopy. For imaging mode checks to see if the detected object is a star and then does not expand DQ arrays
"""
with fits.open(jumpdirfile) as hdulist:
sci = hdulist['SCI'].data
pdq = hdulist['PIXELDQ'].data
gdq = hdulist['GROUPDQ'].data
header = hdulist[0].header
ins = header['INSTRUME'].lower()
nint = header['NINTS']
ngroup = header['NGROUPS']
grouparray=np.arange(ngroup)+1
#Set up webbpsf psfs for checking if a star
if ins=='niriss':
pixscale=0.0656
cutsize=37
radmin = 9
radmax = 16
spikeratio = 1.4
elif ins=='nircam':
channel = header['CHANNEL']
if channel == 'SHORT':
pixscale = 0.033
cutsize = 74
radmin = 18
radmax = 32
spikeratio = 1.4
else:
pixscale = 0.66
cutsize = 37
radmin = 9
radmax = 16
spikeratio = 1.4
elif ins=='nirspec':
#Note these parameters only used for imaging mode so dummy values for NIRSpec
pixscale = 0.10
cutsize = 25
radmin = 6
radmax = 12
spikeratio = 1.4
#Make the WebbPSF mask (will not repeat the actual WebbPSF call if the file already exists)
if imagingmode == True:
from checkifstar import checkif, makewebbpsfmask
print ('Running makewebbpsfmask',ins,filtername,pixscale,cutsize,radmin,radmax)
webbpsfcutoutmask = makewebbpsfmask(ins,filtername,pixscale,cutsize,radmin,radmax)
#iterate over integrations
for h in range(nint):
ctsnow=0
#Skip first group because no jumps there
for j in range(1,ngroup):
#Find pixels in this group with jump detected and/or saturation detected
jumps = np.zeros((2048,2048),dtype='uint8')
sat = np.zeros((2048,2048),dtype='uint8')
jumpsorsat = np.zeros((2048,2048),dtype='uint8')
scithisgroup = np.squeeze(sci[h,j,:,:])
dqthisgroup = np.squeeze(gdq[h,j,:,:])
i_yy,i_xx, = np.where(np.bitwise_and(dqthisgroup, dqflags.group['JUMP_DET']) != 0)
jumps[i_yy,i_xx] = 1
jumpsorsat[i_yy,i_xx] = 1
i_yy,i_xx, = np.where(np.bitwise_and(dqthisgroup, dqflags.group['SATURATED']) != 0)
sat[i_yy,i_xx] = 1
jumpsorsat[i_yy,i_xx] = 1
#Set some low threshold for finding sources in noiseless DQ array
threshsigma = 3.0
bkg = 0.0
stddev = 0.00007
photthreshold = bkg + (threshsigma * stddev)
#Run initial find on jumps or saturated because some short ramps do not have a jump in the regions that saturate
segm_detect = detect_sources(jumpsorsat, photthreshold, npixels=npixfind)
segimage = segm_detect.data.astype(np.uint32)
if np.max(segimage)>0:
segmcat = SourceCatalog(jumps, segm_detect)
segmtbl = segmcat.to_table()
ctsnowballs = segmtbl['xcentroid'][:].size
#print (j,ctsnowballs,segmtbl)
#Iterate over each possible snowball
for k in range(ctsnowballs):
#If low eccentricity proceed, otherwise remove source from segmentation image
#Use both eccentricity and segmentation box axis since not always consistent, e.g. for merged jumps
segboxaxisratio = np.abs((segmtbl['bbox_xmax'][k]-segmtbl['bbox_xmin'][k])/(segmtbl['bbox_ymax'][k]-segmtbl['bbox_ymin'][k]))
if segboxaxisratio<1.0:
segboxaxisratio = np.abs((segmtbl['bbox_ymax'][k]-segmtbl['bbox_ymin'][k])/(segmtbl['bbox_xmax'][k]-segmtbl['bbox_xmin'][k]))
if ((segmtbl['eccentricity'][k]<0.6)&(segboxaxisratio<1.5)):
#print (j,k+1,segmtbl['xcentroid'][k],segmtbl['ycentroid'][k],'eccen=',segmtbl['eccentricity'][k],segmtbl['bbox_ymin'][k],segmtbl['bbox_ymax'][k],segmtbl['bbox_xmin'][k],segmtbl['bbox_xmax'][k],segboxaxisratio)
#Check if a star by running the checkifstar.py code on the relevant group of the jump cube sci array masking out bad pixels inc jump and saturated pixels
#First cutout should be same size as WebbPSF PSF
if imagingmode == True:
xlo = int(segmtbl['xcentroid'][k]-(cutsize-1)/2)
xhi = xlo+cutsize
ylo = int(segmtbl['ycentroid'][k]-(cutsize-1)/2)
yhi = ylo+cutsize
scicutout = deepcopy(scithisgroup[ylo:yhi,xlo:xhi])
pdqcutout = deepcopy(pdq[ylo:yhi,xlo:xhi])
jumpscutout = jumps[ylo:yhi,xlo:xhi]
satcutout = sat[ylo:yhi,xlo:xhi]
pdqcutout[np.where(jumpscutout>0)] = 1
pdqcutout[np.where(satcutout>0)] = 1
#Run the check to see if this is a saturated star rather than a snowball
isstar = checkif(scicutout,pdqcutout,webbpsfcutoutmask,radmin,radmax,spikeratio)
else:
isstar = False
if isstar == False:
jumpscutout = jumps[int(segmtbl['bbox_ymin'][k]):int(segmtbl['bbox_ymax'][k]),int(segmtbl['bbox_xmin'][k]):int(segmtbl['bbox_xmax'][k])]
satcutout = sat[int(segmtbl['bbox_ymin'][k]):int(segmtbl['bbox_ymax'][k]),int(segmtbl['bbox_xmin'][k]):int(segmtbl['bbox_xmax'][k])]
jumpsorsatcutout = jumpsorsat[int(segmtbl['bbox_ymin'][k]):int(segmtbl['bbox_ymax'][k]),int(segmtbl['bbox_xmin'][k]):int(segmtbl['bbox_xmax'][k])]
#Triple box size for increased area to flag further out
bigoffsetx = int((segmtbl['bbox_xmax'][k]-int(segmtbl['bbox_xmin'][k])))
bigoffsety = int((segmtbl['bbox_ymax'][k]-int(segmtbl['bbox_ymin'][k])))
bigsizex = jumpscutout.shape[1]+2*bigoffsetx
bigsizey = jumpscutout.shape[0]+2*bigoffsety
jumpsbigcutout = np.zeros((bigsizey,bigsizex),dtype=np.uint8)
jumpsbigcutout[bigoffsety:(bigoffsety+jumpscutout.shape[0]),bigoffsetx:(bigoffsetx+jumpscutout.shape[1])] = jumpscutout
satbigcutout = np.zeros((bigsizey,bigsizex),dtype=np.uint8)
satbigcutout[bigoffsety:(bigoffsety+jumpscutout.shape[0]),bigoffsetx:(bigoffsetx+jumpscutout.shape[1])] = satcutout
#For jumps assume round and use all jump or saturated pixels to get area
numjumporsat = jumpsorsatcutout[np.where(jumpsorsatcutout>0)].size
radiusjumporsat = (numjumporsat/3.14159)**0.5
radius = int(halofactorradius*radiusjumporsat)
rr, cc = disk((bigsizey/2-0.5,bigsizex/2-0.5), radius)
jumpsbigcutout[rr, cc] = 4
#For saturation assume round and use saturated pixels to get area
numsat = satcutout[np.where(satcutout>0)].size
radiussat = (numsat/3.14159)**0.5
radius = int(radiussat+satpixradius)
rr, cc = disk((bigsizey/2-0.5,bigsizex/2-0.5), radius)
satbigcutout[rr, cc] = 2
xlo = int(segmtbl['bbox_xmin'][k])-bigoffsetx
xhi = xlo+bigsizex
ylo = int(segmtbl['bbox_ymin'][k])-bigoffsety
yhi = ylo+bigsizey
#Update pixels in GROUPDQ array for halo
i_yy,i_xx, = np.where(jumpsbigcutout>0)
i_yy+=ylo
i_xx+=xlo
numpix = len(i_xx)
for l in range(numpix):
if ((i_xx[l]>3) & (i_xx[l]<2044) & (i_yy[l]>3) & (i_yy[l]<2044)):
gdq[h,j,i_yy[l],i_xx[l]] = np.bitwise_or(gdq[h,j,i_yy[l],i_xx[l]],dqflags.group['JUMP_DET'])
#Update pixels in GROUPDQ array for saturated core
i_yy,i_xx, = np.where(satbigcutout>0)
i_yy+=ylo
i_xx+=xlo
numpix = len(i_xx)
for l in range(numpix):
if ((i_xx[l]>3) & (i_xx[l]<2044) & (i_yy[l]>3) & (i_yy[l]<2044)):
gdq[h,j,i_yy[l],i_xx[l]] = np.bitwise_or(gdq[h,j,i_yy[l],i_xx[l]],dqflags.group['SATURATED'])
#if the snowball happened in the third group, flag the second group similarly in case first effects happened there and not enough data for good ramps up to there anyway.
if j==2:
i_yy,i_xx, = np.where(jumpsbigcutout>0)
i_yy+=ylo
i_xx+=xlo
numpix = len(i_xx)
for l in range(numpix):
if ((i_xx[l]>3) & (i_xx[l]<2044) & (i_yy[l]>3) & (i_yy[l]<2044)):
gdq[h,j-1,i_yy[l],i_xx[l]] = np.bitwise_or(gdq[h,j-1,i_yy[l],i_xx[l]],dqflags.group['JUMP_DET'])
i_yy,i_xx, = np.where(satbigcutout>0)
i_yy+=ylo
i_xx+=xlo
numpix = len(i_xx)
for l in range(numpix):
if ((i_xx[l]>3) & (i_xx[l]<2044) & (i_yy[l]>3) & (i_yy[l]<2044)):
gdq[h,j-1,i_yy[l],i_xx[l]] = np.bitwise_or(gdq[h,j-1,i_yy[l],i_xx[l]],dqflags.group['SATURATED'])
ctsnow+=1
#Any pixel flagged as saturated in a group must be flagged as saturated in all subsequent groups
for j in range(ngroup):
#Find pixels in this group with saturation detected
sat = np.zeros((2048,2048),dtype='uint8')
dqthisgroup = np.squeeze(gdq[h,j,:,:])
i_yy,i_xx, = np.where(np.bitwise_and(dqthisgroup, dqflags.group['SATURATED']) != 0)
numpix = len(i_xx)
for l in range(numpix):
gdq[h,j:,i_yy[l],i_xx[l]] = np.bitwise_or(gdq[h,j:,i_yy[l],i_xx[l]],dqflags.group['SATURATED'])
header['HISTORY'] = 'Corrected {} snowballs in integration {}'.format(ctsnow,(h+1))
hdulist['GROUPDQ'].data = gdq
header.set('SNOWCORR', 'COMPLETE', 'dosnowballflags.py DQ flagging applied')
hdulist[0].header = header
#uncomment below to output ramp in a different file
#snowfile = jumpdirfile.replace('.fits','_snow.fits')
snowfile = jumpdirfile
hdulist.writeto(snowfile,overwrite=True)
#Run directly for testing
direct=False
if direct:
jumpdirfile = './jw01345001001_02201_00001_nrca1_jump.fits'
imagingmode = True
filtername = 'F115W'
npixfind = 50
satpixradius=3
halofactorradius=2
snowballflags(jumpdirfile,filtername,npixfind,satpixradius,halofactorradius,imagingmode)
|
chriswillottREPO_NAMEjwstPATH_START.@jwst_extracted@jwst-master@dosnowballflags.py@.PATH_END.py
|
{
"filename": "_bgcolorsrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/choropleth/hoverlabel/_bgcolorsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class BgcolorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="bgcolorsrc", parent_name="choropleth.hoverlabel", **kwargs
):
super(BgcolorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@choropleth@hoverlabel@_bgcolorsrc.py@.PATH_END.py
|
{
"filename": "HR.py",
"repo_name": "orlox/mesa_input_data",
"repo_path": "mesa_input_data_extracted/mesa_input_data-master/2016_ULX/scripts/che_examples/HR/HR.py",
"type": "Python"
}
|
#!/usr/bin/env python
from mesa_data import *
import matplotlib.pyplot as plt
from pylab import *
import numpy as np
import matplotlib.patheffects as pe
params = {'backend': 'pdf',
'figure.figsize': [4.3, 3.0],
'font.family':'serif',
'font.size':10,
'font.serif': 'Times Roman',
'axes.titlesize': 'medium',
'axes.labelsize': 'medium',
'legend.fontsize': 8,
'legend.frameon' : False,
'text.usetex': True,
'figure.dpi': 600,
'lines.markersize': 2,
'lines.linewidth': 3,
'lines.antialiased': False,
'path.simplify': False,
'legend.handlelength':3,
'figure.subplot.bottom':0.15,
'figure.subplot.top':0.9,
'figure.subplot.left':0.15,
'figure.subplot.right':0.92}
hexcols = ['#332288', '#88CCEE', '#44AA99', '#117733', '#999933', '#DDCC77',\
'#CC6677', '#882255', '#AA4499', '#661100', '#6699CC', '#AA4466','#4477AA']
mpl.rcParams.update(params)
fig, axes= plt.subplots(1)
profs_A = [Mesa_Data("history005_A.data"), Mesa_Data("history020_A.data"), Mesa_Data("history060_A.data")]
colors = [hexcols[1], hexcols[5], hexcols[3]]
labels = ["$M_1=70M_\odot, q=0.05, P_{\\rm i}=0.8{\\rm d}$",\
"$M_1=70M_\odot, q=0.2, P_{\\rm i}=1.1{\\rm d}$",\
"$M_1=70M_\odot, q=0.6, P_{\\rm i}=1.2{\\rm d}$"]
for i in [1,2]:
for k in range(len(profs_A[i].get("log_Teff"))):
if profs_A[i].get("star_age")[k] > 50000:
break
axes.plot(profs_A[i].get("log_Teff")[k:],profs_A[i].get("log_L")[k:],color=colors[i], label = labels[i])
axes.plot(profs_A[i].get("log_Teff")[-1],profs_A[i].get("log_L")[-1],'o',color=colors[i], ms=6)
if i == 1:
for j, centerh1 in enumerate(profs_A[i].get("center_h1")):
if centerh1 < 1e-6:
axes.plot(profs_A[i].get("log_Teff")[j],profs_A[i].get("log_L")[j],'o',color=colors[i], ms=6)
break
axes.text(5.0,5.95,"Secondary RLOF", color = hexcols[3])
axes.text(5.29,6.45,"BH formation", color = hexcols[5])
axes.text(4.995,6.3,"TAMS", color = hexcols[5])
axes.text(4.78,5.76,"ZAMS")
axes.set_xlim([5.3,4.6])
axes.set_ylim([5.72,6.5])
axes.set_xlabel("$\\log~T_\\mathrm{eff}\\;\\rm[K]$")
axes.set_ylabel("$\\log~L\\;\\rm[L_\\odot]$")
axes.legend(loc="lower left")
text(0.95, 0.95,'Primary',
horizontalalignment='right',
verticalalignment='top',
transform = axes.transAxes, fontsize=15)
plt.savefig("../../images/HR_primary.pdf")
plt.figure()
fig, axes= plt.subplots(1)
profs_B = [Mesa_Data("history005_B.data"), Mesa_Data("history020_B.data"), Mesa_Data("history060_B.data")]
labels = ["$M_1=70M_\odot, q=0.05, P_{\\rm i}=0.8{\\rm d}$",\
"$M_1=70M_\odot, q=0.2, P_{\\rm i}=1.1{\\rm d}$",\
"$M_1=70M_\odot, q=0.6, P_{\\rm i}=1.2{\\rm d}$"]
agelims = [500000,50000,50000]
for i in [1,2]:
for k in range(len(profs_B[i].get("log_Teff"))):
if profs_B[i].get("star_age")[k] > agelims[i]:
break
axes.plot(profs_B[i].get("log_Teff")[k:],profs_B[i].get("log_L")[k:],color=colors[i])
axes.plot(profs_B[i].get("log_Teff")[-1],profs_B[i].get("log_L")[-1],'o',color=colors[i], ms=6)
if i == 1:
for k, centerhe4 in enumerate(profs_A[i].get("center_he4")):
if centerhe4 < 1e-3:
axes.plot(profs_B[i].get("log_Teff")[k],profs_B[i].get("log_L")[k],'o',color=colors[i], ms=6)
break
for k, centerh1 in enumerate(profs_B[i].get("center_h1")):
if centerh1 < 1e-6:
axes.plot(profs_B[i].get("log_Teff")[k],profs_B[i].get("log_L")[k],'o',color=colors[i], ms=6)
break
for k, centerhe4 in enumerate(profs_B[i].get("center_he4")):
if centerhe4 < 1e-6:
axes.plot(profs_B[i].get("log_Teff")[k],profs_B[i].get("log_L")[k],'o',color=colors[i], ms=6)
break
axes.plot(profs_B[i].get("log_Teff")[3791:4273],profs_B[i].get("log_L")[3791:4273],color=hexcols[8],\
path_effects=[pe.Stroke(linewidth=7, foreground='k'), pe.Normal()], solid_capstyle='round',lw=6, zorder=-100)
axes.plot(profs_B[i].get("log_Teff")[4471:4600],profs_B[i].get("log_L")[4471:4600],color=hexcols[8],\
path_effects=[pe.Stroke(linewidth=7, foreground='k'), pe.Normal()], solid_capstyle='round',lw=6, zorder=-100)
axes.plot(profs_B[i].get("log_Teff")[5891:6200],profs_B[i].get("log_L")[5891:6200],color=hexcols[8],\
path_effects=[pe.Stroke(linewidth=7, foreground='k'), pe.Normal()], solid_capstyle='round',lw=6, zorder=-100)
axes.text(4.71,5.35,"ZAMS", color = hexcols[3],ha='right',va='center')
axes.text(4.67,5.5,"RLOF", color = hexcols[3],ha='left',va='center')
axes.text(4.65,4.2,"ZAMS", color = hexcols[5])
axes.text(4.53,4.3,"Primary forms BH", color = hexcols[5])
axes.text(4.44,4.44,"Case A", color = hexcols[8], rotation=-7)
axes.text(4.27,4.85,"Case AB", color = hexcols[8], rotation=80)
axes.text(4.28,5.0,"Case ABB", color = hexcols[8])
axes.text(4.4,4.65,"TAMS", color = hexcols[5])
axes.text(4.68,5.05,"He depletion", color = hexcols[5])
axes.text(4.37,5.1,"C depletion", color = hexcols[5])
#axes.set_ylim([4.1,5.2])
#axes.set_xlim([4.8,4.15])
axes.set_ylim([4.15,5.57])
axes.set_xlim([4.8,4.15])
axes.set_xlabel("$\\log~T_\\mathrm{eff}\;\\rm[K]$")
axes.set_ylabel("$\\log~L\\;\\rm[L_\\odot]$")
axes.legend(loc="lower left")
plt.sca(axes)
text(0.95, 0.95,'Secondary',
horizontalalignment='right',
verticalalignment='top',
transform = axes.transAxes, fontsize=15)
plt.savefig("../../images/HR_secondary.pdf")
|
orloxREPO_NAMEmesa_input_dataPATH_START.@mesa_input_data_extracted@mesa_input_data-master@2016_ULX@scripts@che_examples@HR@HR.py@.PATH_END.py
|
{
"filename": "test_psfr.py",
"repo_name": "sibirrer/psfr",
"repo_path": "psfr_extracted/psfr-main/tests/test_psfr.py",
"type": "Python"
}
|
#!/usr/bin/env python
"""Tests for `psfr` package."""
from psfr import psfr
from lenstronomy.Util import kernel_util
from lenstronomy.Util import util
from lenstronomy.LightModel.light_model import LightModel
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import numpy.testing as npt
import numpy as np
import os
import astropy.io.fits as pyfits
np.random.seed(42)
def test_shift_psf():
oversampling = 4
x, y = 0.2, -0.3
shift = [x, y]
from lenstronomy.LightModel.light_model import LightModel
gauss = LightModel(['GAUSSIAN'])
numpix = 21
num_pix_super = numpix * oversampling
oversampling = 4
if oversampling % 2 == 0:
num_pix_super += 1
sigma = 2
kwargs_true = [{'amp': 1, 'sigma': sigma, 'center_x': 0, 'center_y': 0}]
kwargs_shifted = [{'amp': 1, 'sigma': sigma, 'center_x': x, 'center_y': y}]
x_grid_super, y_grid_super = util.make_grid(numPix=num_pix_super, deltapix=1. / oversampling,
left_lower=False)
flux_true_super = gauss.surface_brightness(x_grid_super, y_grid_super, kwargs_true)
psf_true_super = util.array2image(flux_true_super)
psf_true_super /= np.sum(psf_true_super)
psf_shifted_super_true = gauss.surface_brightness(x_grid_super, y_grid_super, kwargs_shifted)
psf_shifted_super_true = util.array2image(psf_shifted_super_true)
psf_shifted_true = kernel_util.degrade_kernel(psf_shifted_super_true, degrading_factor=oversampling)
psf_shifted_true = kernel_util.cut_psf(psf_shifted_true, numpix)
psf_shifted_psfr = psfr.shift_psf(psf_true_super, oversampling, shift, degrade=True, n_pix_star=numpix, order=2)
if False:
f, axes = plt.subplots(1, 2, figsize=(4 * 2, 4))
vmin, vmax = -5, -1
ax = axes[0]
im = ax.imshow(psf_shifted_true - psf_shifted_psfr, origin='lower')
ax.autoscale(False)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im, cax=cax)
ax.set_title('True - Interpol shifted')
ax = axes[1]
im = ax.imshow(np.log10(psf_shifted_psfr), origin='lower', vmin=vmin, vmax=vmax)
ax.autoscale(False)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im, cax=cax)
ax.set_title('PSF-r numerical shifted')
plt.show()
print(np.sum(np.abs(psf_shifted_true - psf_shifted_psfr)), 'sum of absolute residuals')
npt.assert_almost_equal(psf_shifted_true, psf_shifted_psfr, decimal=5)
def test_linear_amplitude():
amp = 2
data = np.ones((5, 5)) * amp
model = np.ones((5, 5))
amp_return = psfr._linear_amplitude(data, model)
npt.assert_almost_equal(amp_return, amp)
mask = np.ones_like(data)
amp_return = psfr._linear_amplitude(data, model, mask=mask)
npt.assert_almost_equal(amp_return, amp)
def test_fit_centroid():
from lenstronomy.LightModel.light_model import LightModel
numpix = 41
x_grid, y_grid = util.make_grid(numPix=numpix, deltapix=1)
gauss = LightModel(['GAUSSIAN'])
x_c, y_c = -3.5, 2.2
kwargs_true = [{'amp': 2, 'sigma': 3, 'center_x': x_c, 'center_y': y_c}]
kwargs_model = [{'amp': 1, 'sigma': 3, 'center_x': 0, 'center_y': 0}]
flux_true = gauss.surface_brightness(x_grid, y_grid, kwargs_true)
flux_true = util.array2image(flux_true)
flux_model = gauss.surface_brightness(x_grid, y_grid, kwargs_model)
flux_model = util.array2image(flux_model)
mask = np.ones_like(flux_true)
center = psfr.centroid_fit(flux_true, flux_model, mask=mask, variance=None)
npt.assert_almost_equal(center[0], x_c, decimal=3)
npt.assert_almost_equal(center[1], y_c, decimal=3)
variance = np.ones_like(flux_true)
center = psfr.centroid_fit(flux_true, flux_model, mask=None, variance=variance)
npt.assert_almost_equal(center[0], x_c, decimal=3)
npt.assert_almost_equal(center[1], y_c, decimal=3)
def test_fit_centroid_pso():
from lenstronomy.LightModel.light_model import LightModel
numpix = 41
x_grid, y_grid = util.make_grid(numPix=numpix, deltapix=1)
gauss = LightModel(['GAUSSIAN'])
x_c, y_c = -3.5, 2.2
kwargs_true = [{'amp': 2, 'sigma': 3, 'center_x': x_c, 'center_y': y_c}]
kwargs_model = [{'amp': 1, 'sigma': 3, 'center_x': 0, 'center_y': 0}]
flux_true = gauss.surface_brightness(x_grid, y_grid, kwargs_true)
flux_true = util.array2image(flux_true)
flux_model = gauss.surface_brightness(x_grid, y_grid, kwargs_model)
flux_model = util.array2image(flux_model)
mask = np.ones_like(flux_true)
center = psfr.centroid_fit(flux_true, flux_model, mask=mask, variance=None, optimizer_type='PSO')
npt.assert_almost_equal(center[0], x_c, decimal=3)
npt.assert_almost_equal(center[1], y_c, decimal=3)
variance = np.ones_like(flux_true)
center = psfr.centroid_fit(flux_true, flux_model, mask=None, variance=variance, optimizer_type='PSO')
npt.assert_almost_equal(center[0], x_c, decimal=3)
npt.assert_almost_equal(center[1], y_c, decimal=3)
def test_one_step_psf_estimation():
numpix = 21
n_c = (numpix - 1) / 2
x_grid, y_grid = util.make_grid(numPix=21, deltapix=1, left_lower=True)
gauss = LightModel(['GAUSSIAN'])
x_c, y_c = -0.6, 0.2
sigma = 1
kwargs_true = [{'amp': 1, 'sigma': sigma, 'center_x': n_c, 'center_y': n_c}]
flux_true = gauss.surface_brightness(x_grid, y_grid, kwargs_true)
psf_true = util.array2image(flux_true)
psf_true /= np.sum(psf_true)
kwargs_guess = [{'amp': 1, 'sigma': 1.2, 'center_x': n_c, 'center_y': n_c}]
flux_guess = gauss.surface_brightness(x_grid, y_grid, kwargs_guess)
psf_guess = util.array2image(flux_guess)
psf_guess /= np.sum(psf_guess)
center_list = []
star_list = []
displacement_scale = 1
for i in range(4):
x_c, y_c = np.random.uniform(-0.5, 0.5) * displacement_scale, np.random.uniform(-0.5, 0.5) * displacement_scale
center_list.append(np.array([x_c, y_c]))
kwargs_model = [{'amp': 1, 'sigma': sigma, 'center_x': n_c + x_c, 'center_y': n_c + y_c}]
flux_model = gauss.surface_brightness(x_grid, y_grid, kwargs_model)
star = util.array2image(flux_model)
star_list.append(star)
psf_after, amplitude_list= psfr.one_step_psf_estimate(star_list, psf_guess, center_list, mask_list=None, error_map_list=None,
step_factor=0.2)
# psf_after should be a better guess of psf_true than psf_guess
diff_after = np.sum((psf_after - psf_true) ** 2)
diff_before = np.sum((psf_guess - psf_true) ** 2)
assert diff_after < diff_before
oversampling = 2
numpix_super = numpix * oversampling
if oversampling % 2 == 0:
numpix_super -= 1
x_grid_super, y_grid_super = util.make_grid(numPix=numpix_super, deltapix=1. / oversampling, left_lower=True)
flux_guess_super = gauss.surface_brightness(x_grid_super, y_grid_super, kwargs_guess)
psf_guess_super = util.array2image(flux_guess_super)
psf_guess_super /= np.sum(psf_guess_super)
flux_true_super = gauss.surface_brightness(x_grid_super, y_grid_super, kwargs_true)
psf_true_super = util.array2image(flux_true_super)
psf_true_super /= np.sum(psf_true_super)
psf_after_super, amplitude_list = psfr.one_step_psf_estimate(star_list, psf_guess_super, center_list, mask_list=None,
error_map_list=None, step_factor=0.2, oversampling=oversampling)
diff_after = np.sum((psf_after_super - psf_true_super) ** 2)
diff_before = np.sum((psf_guess_super - psf_true_super) ** 2)
assert diff_after < diff_before
# de-shifting in oversampled space (should be a bit lower quality but still better than initial guess)
psf_after_super, amplitude_list = psfr.one_step_psf_estimate(star_list, psf_guess_super, center_list, mask_list=None,
error_map_list=None, step_factor=0.2, oversampling=oversampling,
oversampled_residual_deshifting=True)
diff_after = np.sum((psf_after_super - psf_true_super) ** 2)
diff_before = np.sum((psf_guess_super - psf_true_super) ** 2)
assert diff_after < diff_before
def test_saturation_limit():
# check if psf with saturation limit is more accurate than one without
import lenstronomy.Util.kernel_util as util
module_path = os.path.dirname(psfr.__file__)
psf_filename = module_path + '/Data/JWST_mock/psf_f090w_supersample5_crop.fits'
kernel = pyfits.getdata(psf_filename)
oversampling = 5
saturation_limit = 50
star_list_webb = []
x_shift, y_shift = np.random.uniform(-0.5, 0.5), np.random.uniform(-0.5, 0.5)
# very bright star added to list of stars and all flux values above saturation limit fixed
bright_star = psfr.shift_psf(psf_center=kernel, oversampling=5, shift=[x_shift, y_shift], degrade=True,
n_pix_star=kernel.shape[0] / oversampling) * 4000
bright_star[bright_star > saturation_limit] = saturation_limit
# 5 less bright stars are added
brightnesses = np.abs(np.random.normal(400, 100, 10))
star_list_webb.append(bright_star)
for i in range(10):
x_shift, y_shift = np.random.uniform(-0.5, 0.5), np.random.uniform(-0.5, 0.5)
# star generated and flux multiplied by relevant brightness factor
star = psfr.shift_psf(psf_center=kernel, oversampling=5, shift=[x_shift, y_shift], degrade=True,
n_pix_star=kernel.shape[0] / oversampling) * brightnesses[i]
star_list_webb.append(star)
# psf reconstructed with a saturation limit
psf_psfr_super_sat, center_list_psfr_super_sat, mask_list_sat, amplitude_list_sat = psfr.stack_psf(star_list_webb,
oversampling=oversampling,
saturation_limit=saturation_limit,
num_iteration=10,
n_recenter=20,
centroid_optimizer='Nelder-Mead')
# psf reconstructed without a saturation limit
psf_psfr_super, center_list_psfr_super, mask_list, amplitude_list_super = psfr.stack_psf(star_list_webb, oversampling=oversampling,
saturation_limit=None, num_iteration=10,
n_recenter=20)
kernel_degraded = util.degrade_kernel(kernel, oversampling)
stacked_psf_sat_degraded = psfr.oversampled2regular(psf_psfr_super_sat, oversampling)
stacked_psf_degraded = psfr.oversampled2regular(psf_psfr_super, oversampling)
diff1 = np.sum((stacked_psf_sat_degraded - kernel_degraded) ** 2)
diff2 = np.sum((stacked_psf_degraded - kernel_degraded) ** 2)
# reconstructed psf with saturation limit should perform better than without
npt.assert_array_less(diff2, diff1, err_msg='reconstructed psf with saturation limit is worse than without limit')
def test_noisy_psf():
# create 2 psfs with noisy and noiseless stars. checks if noisy psf has larger residual with the true psf
import lenstronomy.Util.kernel_util as util
import lenstronomy.Util.image_util as image_util
module_path = os.path.dirname(psfr.__file__)
psf_filename = module_path + '/Data/JWST_mock/psf_f090w_supersample5_crop.fits'
kernel = pyfits.getdata(psf_filename)
oversampling = 5
star_list_webb_noisy = []
star_list_webb = []
brightnesses = abs(np.random.normal(loc=600, scale=200, size=(5,)))
for i in range(5):
x_shift, y_shift = np.random.uniform(-0.5, 0.5), np.random.uniform(-0.5, 0.5)
star = psfr.shift_psf(psf_center=kernel, oversampling=5, shift=[x_shift, y_shift], degrade=True,
n_pix_star=kernel.shape[0] / oversampling) * brightnesses[i]
star_list_webb.append(star)
star_n1 = image_util.add_poisson(star, exp_time=100.)
star_n2 = image_util.add_background(star, sigma_bkd=0.5)
star_noisy = star + star_n1 + star_n2
star_list_webb_noisy.append(star_noisy)
psf_psfr_super_noisy, center_list_psfr_super_sat, mask_list_sat, amplitude_list_noisy = psfr.stack_psf(star_list_webb_noisy,
oversampling=oversampling,
saturation_limit=None,
num_iteration=10,
n_recenter=5)
psf_psfr_super, center_list_psfr_super_sat, mask_list_sat, amplitude_list = psfr.stack_psf(star_list_webb,
oversampling=oversampling,
saturation_limit=None, num_iteration=10,
n_recenter=20)
kernel_degraded = util.degrade_kernel(kernel, oversampling)
stacked_psf_noisy_degraded = psfr.oversampled2regular(psf_psfr_super_noisy, oversampling)
stacked_psf_degraded = psfr.oversampled2regular(psf_psfr_super, oversampling)
diff1 = np.sum((stacked_psf_noisy_degraded - kernel_degraded) ** 2)
diff2 = np.sum((stacked_psf_degraded - kernel_degraded) ** 2)
npt.assert_array_less(diff2, diff1, err_msg='reconstructed psf with noisy stars is better than noiseless stars')
def test_combine_psf():
from psfr.psfr import combine_psf
nx, ny = 11, 11
module_path = os.path.dirname(psfr.__file__)
psf_filename = module_path + '/Data/JWST_mock/psf_f090w_supersample5_crop.fits'
kernel = pyfits.getdata(psf_filename)
nx, ny = np.shape(kernel)
kernel_list_input = []
amplitude_list = np.ones(10)
for i in range(10):
kernel_list_input.append(np.random.randn(nx, ny) + kernel)
diff_input = np.sum((kernel_list_input[0] - kernel) ** 2)
kernel_new = combine_psf(kernel_list_input, kernel, mask_list=None, amplitude_list=amplitude_list, factor=1.,
stacking_option='median', symmetry=1, combine_with_old=True)
diff_output = np.sum((kernel_new - kernel) ** 2)
assert diff_input > diff_output
kernel_new = combine_psf(kernel_list_input, kernel, mask_list=None, amplitude_list=amplitude_list, factor=1.,
stacking_option='median_weight', symmetry=1, combine_with_old=False)
diff_output = np.sum((kernel_new - kernel) ** 2)
assert diff_input > diff_output
mask_list = np.ones_like(np.array(kernel_list_input), dtype='int')
error_map_list = np.ones_like(np.array(kernel_list_input))
kernel_new = combine_psf(kernel_list_input, kernel, mask_list=mask_list, amplitude_list=amplitude_list, factor=1.,
stacking_option='median_weight', symmetry=2, combine_with_old=False,
error_map_list=error_map_list)
diff_output = np.sum((kernel_new - kernel) ** 2)
assert diff_input > diff_output
kernel_new = combine_psf(kernel_list_input, kernel, mask_list=None, amplitude_list=None, factor=1.,
stacking_option='mean', symmetry=1, combine_with_old=False)
diff_output = np.sum((kernel_new - kernel) ** 2)
assert diff_input > diff_output
# here with an uniform error map = 1
error_map_list = [np.ones_like(kernel)] * len(kernel_list_input)
kernel_new_error_map = combine_psf(kernel_list_input, kernel, mask_list=None, amplitude_list=None, factor=1.,
stacking_option='mean', symmetry=1, combine_with_old=False,
error_map_list=error_map_list)
npt.assert_almost_equal(kernel_new_error_map, kernel_new)
# here with an uniform error map != 1
error_map_list = [np.ones_like(kernel) * 10**(-10)] * len(kernel_list_input)
kernel_new_error_map = combine_psf(kernel_list_input, kernel, mask_list=None, amplitude_list=None, factor=1.,
stacking_option='mean', symmetry=1, combine_with_old=False,
error_map_list=error_map_list)
npt.assert_almost_equal(kernel_new_error_map, kernel_new)
def test_luminosity_centring():
gauss = LightModel(['GAUSSIAN'])
x_grid, y_grid = util.make_grid(numPix=21, deltapix=1., left_lower=False)
kwargs_guess = [{'amp': 1, 'sigma': 1.2, 'center_x': -0.5, 'center_y': 0.5}]
flux_guess = gauss.surface_brightness(x_grid, y_grid, kwargs_guess)
star = util.array2image(flux_guess)
star_shifted = psfr.luminosity_centring(star)
x_grid, y_grid = util.array2image(x_grid), util.array2image(y_grid)
x_c, y_c = np.sum(star_shifted * x_grid) / np.sum(star_shifted), np.sum(star_shifted * y_grid) / np.sum(star_shifted)
npt.assert_almost_equal(x_c, 0, decimal=5)
npt.assert_almost_equal(y_c, 0, decimal=5)
def test_centroid_fit():
gauss = LightModel(['GAUSSIAN'])
x_grid, y_grid = util.make_grid(numPix=21, deltapix=1., left_lower=False)
kwargs_data = [{'amp': 1, 'sigma': 1.2, 'center_x': -0.5, 'center_y': 0.5}]
flux_guess = gauss.surface_brightness(x_grid, y_grid, kwargs_data)
data = util.array2image(flux_guess)
kwargs_model = [{'amp': 1, 'sigma': 1.2, 'center_x': 0, 'center_y': 0}]
flux_guess = gauss.surface_brightness(x_grid, y_grid, kwargs_model)
model = util.array2image(flux_guess)
mask = np.ones_like(model, dtype='int')
variance = np.ones_like(model, dtype='float')
center_shift = psfr.centroid_fit(data, model, mask=mask, variance=variance, oversampling=1, optimizer_type='Nelder-Mead')
npt.assert_almost_equal(center_shift, [-0.5, 0.5], decimal=3)
center_shift = psfr.centroid_fit(data, model, mask=mask, variance=variance, oversampling=1, optimizer_type='PSO')
npt.assert_almost_equal(center_shift, [-0.5, 0.5], decimal=3)
def test_psf_error_map():
from lenstronomy.LightModel.light_model import LightModel
numpix = 11
x_grid, y_grid = util.make_grid(numPix=numpix, deltapix=1)
gauss = LightModel(['GAUSSIAN'])
kwargs_model = [{'amp': 1, 'sigma': 1.5, 'center_x': 0, 'center_y': 0}]
flux_true = gauss.surface_brightness(x_grid, y_grid, kwargs_model)
psf_kernel = util.array2image(flux_true)
star_list, center_list, error_map_list, mask_list = [], [], [], []
for i in range(100):
star = psf_kernel * i + np.random.randn(numpix, numpix)
center_list.append([0, 0])
star_list.append(star)
error_map_list.append(np.ones_like(star) * 5)
mask_list.append(np.ones_like(star))
star_list = np.array(star_list)
error_map = psfr.psf_error_map(star_list, psf_kernel, center_list, mask_list=None, error_map_list=None,
oversampling=1)
npt.assert_almost_equal(error_map, 0, decimal=2)
# this tests that if the star-to-star variation is below the noise level, the psf error map should be zero
error_map = psfr.psf_error_map(star_list, psf_kernel, center_list, mask_list=mask_list, error_map_list=error_map_list,
oversampling=1)
npt.assert_almost_equal(error_map, 0, decimal=5)
|
sibirrerREPO_NAMEpsfrPATH_START.@psfr_extracted@psfr-main@tests@test_psfr.py@.PATH_END.py
|
{
"filename": "test_logging.py",
"repo_name": "PrefectHQ/prefect",
"repo_path": "prefect_extracted/prefect-main/tests/telemetry/test_logging.py",
"type": "Python"
}
|
import logging
from prefect.telemetry.bootstrap import setup_telemetry
from prefect.telemetry.logging import (
add_telemetry_log_handler,
get_log_handler,
set_log_handler,
)
def test_add_telemetry_log_handler_with_handler(enable_telemetry):
logger = logging.getLogger("test")
initial_handlers = list(logger.handlers)
setup_telemetry()
handler = get_log_handler()
assert handler is not None
add_telemetry_log_handler(logger)
assert list(logger.handlers) == initial_handlers + [handler]
def test_add_telemetry_log_handler_without_handler():
logger = logging.getLogger("test")
initial_handlers = list(logger.handlers)
set_log_handler(None)
add_telemetry_log_handler(logger)
assert list(logger.handlers) == initial_handlers
|
PrefectHQREPO_NAMEprefectPATH_START.@prefect_extracted@prefect-main@tests@telemetry@test_logging.py@.PATH_END.py
|
{
"filename": "tilecov_plus_lyapredict.py",
"repo_name": "desihub/LSS",
"repo_path": "LSS_extracted/LSS-main/Sandbox/tilecov_plus_lyapredict.py",
"type": "Python"
}
|
#copied from https://github.com/desihub/desispec/blob/master/py/desispec/tile_qa_plot.py#L957
#small updates to print basic prediction for lya numbers
from desitarget.io import read_targets_in_tiles
from desispec.maskbits import fibermask
from desispec.io import read_fibermap, findfile
from desispec.tsnr import tsnr2_to_efftime
from desimodel.focalplane.geometry import get_tile_radius_deg
from desimodel.footprint import is_point_in_desi
from desiutil.log import get_logger
from desiutil.dust import ebv as dust_ebv
from astropy.table import Table, vstack
from astropy.io import fits
from astropy import units
from astropy.coordinates import SkyCoord
tile_radius_deg = get_tile_radius_deg()
def get_quantz_cmap(name, n, cmin=0, cmax=1):
"""
Creates a quantized colormap.
Args:
name: matplotlib colormap name (e.g. "tab20") (string)
n: number of colors
cmin (optional, defaults to 0): first color of the original colormap to use (between 0 and 1) (float)
cmax (optional, defaults to 1): last color of the original colormap to use (between 0 and 1) (float)
Returns:
A matplotlib cmap object.
Notes:
https://matplotlib.org/examples/api/colorbar_only.html
"""
cmaporig = matplotlib.cm.get_cmap(name)
mycol = cmaporig(np.linspace(cmin, cmax, n))
cmap = matplotlib.colors.ListedColormap(mycol)
cmap.set_under(mycol[0])
cmap.set_over (mycol[-1])
return cmap
def get_tilecov(
tileid,
surveys="main",
programs=None,
lastnight=None,
indesi=True,
outpng=None,
plot_tiles=False,
verbose=False,
):
"""
Computes the average number of observed tiles covering a given tile.
Args:
tileid: tileid (int)
surveys (optional, defaults to "main"): comma-separated list of surveys to consider (reads the tiles-SURVEY.ecsv file) (str)
programs (optional, defaults to None): comma-separated list of programs (case-sensitive) to consider in the tiles-SURVEY.ecsv file (str)
lastnight (optional, defaults to today): only consider tiles observed up to lastnight (int)
surveys (optional, defaults to "main"): comma-separated list of surveys to consider (reads the tiles-SURVEY.ecsv file) (str)
indesi (optional, defaults to True): restrict to IN_DESI=True tiles? (bool)
outpng (optional, defaults to None): if provided, output file with a plot (str)
plot_tiles (optional, defaults to False): plot overlapping tiles? (bool)
verbose (optional, defaults to False): print log.info() (bool)
Returns:
ntilecov: average number of observed tiles covering the considered tile (float)
outdict: a dictionary, with an entry for each observed, overlapping tile, containing the list of observed overlapping tiles (dict)
Notes:
If the tile is not covered by randoms, ntilecov=np.nan, tileids=[] (and no plot is made).
The "regular" use is to provide a single PROGRAM in programs (e.g., programs="DARK").
This function relies on the following files:
$DESI_SURVEYOPS/ops/tiles-{SURVEY}.ecsv for SURVEY in surveys (to get the tiles to consider)
$DESI_ROOT/spectro/redux/daily/exposures-daily.fits (to get the existing observations up to lastnight)
$DESI_TARGET/catalogs/dr9/2.4.0/randoms/resolve/randoms-1-0/
If one wants to consider the latest observations, one should wait the 10am pacific update of exposures-daily.fits.
"""
# AR lastnight
if lastnight is None:
lastnight = int(datetime.now().strftime("%Y%m%d"))
# AR files
allowed_surveys = ["sv1", "sv2", "sv3", "main", "catchall"]
sel = ~np.in1d(surveys.split(","), allowed_surveys)
if sel.sum() > 0:
msg = "surveys={} not in allowed_surveys={}".format(
",".join([survey for survey in np.array(surveys.split(","))[sel]]),
",".join(allowed_surveys),
)
log.error(msg)
raise ValueError(msg)
tilesfns = [
os.path.join(os.getenv("DESI_SURVEYOPS"), "ops", "tiles-{}.ecsv".format(survey))
for survey in surveys.split(",")
]
expsfn = os.path.join(os.getenv("DESI_ROOT"), "spectro", "redux", "daily", "exposures-daily.fits")
# AR we need that specific version which is healpix-split, hence readable by read_targets_in_tile(quick=True))
randdir = os.path.join(os.getenv("DESI_TARGET"), "catalogs", "dr9", "2.4.0", "randoms", "resolve", "randoms-1-0")
# AR exposures with EFFTIME_SPEC>0 and NIGHT<=LASTNIGHT
exps = Table.read(expsfn, "EXPOSURES")
sel = (exps["EFFTIME_SPEC"] > 0) & (exps["NIGHT"] <= lastnight)
exps = exps[sel]
# AR read the tiles
ds = []
for tilesfn in tilesfns:
if verbose:
log.info("reading {}".format(tilesfn))
d = Table.read(tilesfn)
if d["RA"].unit == "deg":
d["RA"].unit, d["DEC"].unit = None, None
if "sv2" in tilesfn:
d["IN_DESI"] = d["IN_DESI"].astype(bool)
ds.append(d)
tiles = vstack(ds, metadata_conflicts="silent")
# AR first, before any cut:
# AR - get the considered tile
# AR - read the randoms inside that tile
sel = tiles["TILEID"] == tileid
if sel.sum() == 0:
msg = "no TILEID={} found in {}".format(tileid, tilesfn)
log.error(msg)
raise ValueError(msg)
if programs is None:
log.warning("programs=None, will consider *all* kind of tiles")
else:
if tiles["PROGRAM"][sel][0] not in programs.split(","):
log.warning(
"TILEID={} has PROGRAM={}, not included in the programs={} used for computation".format(
tileid, tiles["PROGRAM"][sel][0], programs,
)
)
c = SkyCoord(
ra=tiles["RA"][sel][0] * units.degree,
dec=tiles["DEC"][sel][0] * units.degree,
frame="icrs"
)
d = read_targets_in_tiles(randdir, tiles=tiles[sel], quick=True)
if len(d) == 0:
log.warning("found 0 randoms in TILEID={}; cannot proceed; returning np.nan, empty_dictionary".format(tileid))
return np.nan, {}
if verbose:
log.info("found {} randoms in TILEID={}".format(len(d), tileid))
# AR then cut on:
# AR - PROGRAM, IN_DESI: to get the tiles to consider
# AR - exposures: to get the observations with NIGHT <= LASTNIGHT
sel = np.ones(len(tiles), dtype=bool)
if verbose:
log.info("starting from {} tiles".format(len(tiles)))
if programs is not None:
sel = np.in1d(tiles["PROGRAM"], programs.split(","))
if verbose:
log.info("considering {} tiles after cutting on PROGRAM={}".format(sel.sum(), programs))
if indesi:
sel &= tiles["IN_DESI"]
if verbose:
log.info("considering {} tiles after cutting on IN_DESI".format(sel.sum()))
sel &= np.in1d(tiles["TILEID"], exps["TILEID"])
if verbose:
log.info("considering {} tiles after cutting on NIGHT <= {}".format(sel.sum(), lastnight))
tiles = tiles[sel]
# AR overlap
cs = SkyCoord(ra=tiles["RA"] * units.degree, dec=tiles["DEC"] * units.degree, frame="icrs")
sel = cs.separation(c).value <= 2 * tile_radius_deg
tiles = tiles[sel]
if verbose:
log.info("selecting {} overlapping tiles: {}".format(len(tiles), tiles["TILEID"].tolist()))
# AR get exposures
outdict = {
tileid : exps["EXPID"][exps["TILEID"] == tileid].tolist()
for tileid in tiles["TILEID"]
}
# AR count the number of tile coverage
ntile = np.zeros(len(d), dtype=int)
for i in range(len(tiles)):
sel = is_point_in_desi(tiles[[i]], d["RA"], d["DEC"])
if verbose:
log.info("fraction of TILEID={} covered by TILEID={}: {:.2f}".format(tileid, tiles[i]["TILEID"], sel.mean()))
ntile[sel] += 1
ntilecov = ntile.mean()
if verbose:
log.info("mean coverage of TILEID={}: {:.2f}".format(tileid, ntilecov))
nlya = 0
ntot = len(ntile)
mod_nlya = [0,300,150,75]
for nt in range(1,4):
sel = ntile == nt
nlya += len(ntile[sel])/ntot*mod_nlya[nt]
print('predicted number of lya '+str(nlya))
# AR plot?
if plot_tiles:
# AR cbar settings
cmin = 0
# AR for "regular" programs, setting cmax to the
# AR designed max. npass (though considering future possibility
# AR to have more pass, e.g. for mainBRIGHT, hence the np.max())
refcmaxs = {
"sv3BACKUP" : 5, "sv3BRIGHT" : 11, "sv3DARK" : 14,
"mainBACKUP" : 1, "mainBRIGHT" : 4, "mainDARK" : 7,
}
if "{}{}".format(surveys, programs) in refcmaxs:
cmax = np.max([refcmaxs["{}{}".format(surveys, programs)], ntile.max()])
else:
cmax = ntile.max()
cmap = get_quantz_cmap(matplotlib.cm.jet, cmax - cmin + 1, 0, 1)
# AR case overlap Dec.=0
if d["RA"].max() - d["RA"].min() > 100:
dowrap = True
else:
dowrap = False
if dowrap:
d["RA"][d["RA"] > 300] -= 360
#
fig, ax = plt.subplots()
sc = ax.scatter(d["RA"], d["DEC"], c=ntile, s=1, cmap=cmap, vmin=cmin, vmax=cmax)
# AR plot overlapping tiles?
if plot_tiles:
angs = np.linspace(0, 2 * np.pi, 100)
dras = tile_radius_deg * np.cos(angs)
ddecs = tile_radius_deg * np.sin(angs)
for i in range(len(tiles)):
if tiles["TILEID"][i] != tileid:
ras = tiles["RA"][i] + dras / np.cos(np.radians(tiles["DEC"][i] + ddecs))
if dowrap:
ras[ras > 300] -= 360
decs = tiles["DEC"][i] + ddecs
ax.plot(ras, decs, label="TILEID={}".format(tiles["TILEID"][i]))
ax.legend(loc=2, ncol=2, fontsize=8)
#
ax.set_title("Mean coverage of TILEID={} on {}: {:.2f}".format(tileid, lastnight, ntilecov))
ax.set_xlabel("R.A. [deg]")
ax.set_ylabel("Dec. [deg]")
dra = 1.1 * tile_radius_deg / np.cos(np.radians(d["DEC"].mean()))
ddec = 1.1 * tile_radius_deg
ax.set_xlim(d["RA"].mean() + dra, d["RA"].mean() - dra)
ax.set_ylim(d["DEC"].mean() - ddec, d["DEC"].mean() + ddec)
ax.grid()
cbar = plt.colorbar(sc, ticks=np.arange(cmin, cmax + 1, dtype=int))
cbar.set_label("Number of observed tiles on {}".format(lastnight))
cbar.mappable.set_clim(cmin, cmax)
plt.show()
#plt.savefig(outpng, bbox_inches="tight")
#plt.close()
return ntilecov, ntile
|
desihubREPO_NAMELSSPATH_START.@LSS_extracted@LSS-main@Sandbox@tilecov_plus_lyapredict.py@.PATH_END.py
|
{
"filename": "_tickvals.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/isosurface/colorbar/_tickvals.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickvalsValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(
self, plotly_name="tickvals", parent_name="isosurface.colorbar", **kwargs
):
super(TickvalsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@isosurface@colorbar@_tickvals.py@.PATH_END.py
|
{
"filename": "_tickangle.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/bar/marker/colorbar/_tickangle.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickangleValidator(_plotly_utils.basevalidators.AngleValidator):
def __init__(
self, plotly_name="tickangle", parent_name="bar.marker.colorbar", **kwargs
):
super(TickangleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@bar@marker@colorbar@_tickangle.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/pie/legendgrouptitle/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._text import TextValidator
from ._font import FontValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__, [], ["._text.TextValidator", "._font.FontValidator"]
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@pie@legendgrouptitle@__init__.py@.PATH_END.py
|
{
"filename": "paper.md",
"repo_name": "ashleychontos/pySYD",
"repo_path": "pySYD_extracted/pySYD-master/paper/paper.md",
"type": "Markdown"
}
|
---
title: "`pySYD`: Automated measurements of global asteroseismic parameters"
tags:
- Python
- astronomy
- stellar astrophysics
- asteroseismology
- stellar oscillations
- fundamental stellar properties
- solar-like oscillations
- global asteroseismology
authors:
- name: Ashley Chontos
orcid: 0000-0003-1125-2564
affiliation: "1, 2"
- name: Daniel Huber
orcid: 0000-0001-8832-4488
affiliation: 3
- name: Maryum Sayeed
orcid: 0000-0001-6180-8482
affiliation: 4
- name: Pavadol Yamsiri
affiliation: 5
affiliations:
- name: Department of Astrophysical Sciences, Princeton University, 4 Ivy Lane, Princeton, NJ 08540, USA
index: 1
- name: Henry Norris Russell Fellow
index: 2
- name: Institute for Astronomy, University of Hawai'i, 2680 Woodlawn Drive, Honolulu, HI 96822, USA
index: 3
- name: Department of Astronomy, Columbia University, Pupin Physics Laboratories, New York, NY 10027, USA
index: 4
- name: Sydney Institute for Astronomy, School of Physics, University of Sydney, NSW 2006, Australia
index: 5
date: 6 Nov 2022
bibliography: paper.bib
---
# Summary
Asteroseismology is well-established in astronomy as the gold standard for determining
precise and accurate fundamental stellar properties like masses, radii, and ages. Several
tools have been developed for asteroseismic analyses but many of them are closed-source
and therefore not accessible to the general astronomy community. Here we present `pySYD`,
a Python package for detecting solar-like oscillations and measuring global asteroseismic
parameters. `pySYD` was adapted from the IDL-based `SYD` pipeline, which was extensively
used to measure asteroseismic parameters for *Kepler* stars. `pySYD` was developed using
the same well-tested methodology and comes with several new improvements to provide
accessible and reproducible results. Well-documented, open-source asteroseismology software
that has been benchmarked against closed-source tools are critical to ensure the
reproducibility of legacy results from the *Kepler* mission. Moreover, `pySYD` will also
be a promising tool for the broader astronomy community to analyze current and forthcoming
data from the NASA TESS mission.
# Introduction
The study of stellar oscillations is a powerful tool for studying the interiors
of stars and determining their fundamental properties [@aerts2021]. For stars
with temperatures that are similar to the Sun, turbulent near-surface convection
excites sound waves that propagate within the stellar cavity [@bedding2014].
These waves probe different depths within the star and therefore, provide critical
constraints for stellar interiors that would otherwise be inaccessible by other means.
Asteroseismology of such "solar-like" oscillators provide precise fundamental
properties like masses, radii, densities, and ages for single stars, which has
broad impacts on several fields in astronomy. For example, ages of stars are
important to reconstruct the formation history of the Milky Way (so-called
galactic archaeology). For exoplanets that are discovered indirectly through
changes in stellar observables, precise and accurate stellar masses and radii
are critical for learning about the planets that orbit them.
The NASA space telescopes *Kepler* [@borucki2010] and TESS [@ricker2015]
have recently provided very large databases of high-precision light curves of
stars. By detecting brightness variations due to oscillations, these
light curves allow the application of asteroseismology to large numbers of stars,
which requires automated software tools to efficiently extract observables.
Several tools have been developed for asteroseismic analyses [e.g., `A2Z`,
@mathur2010; `COR`, @mosser2009; `OCT`, @hekker2010; `SYD`, @huber2009], but
many of them are closed-source and therefore inaccessible to the general
astronomy community. Some open-source tools exist [e.g., `DIAMONDS` and `FAMED`,
@corsaro2014; `PBjam`, @nielsen2021; `lightkurve`, @lightkurve], but they are
either optimized for smaller samples of stars or have not yet been extensively
tested against closed-source tools.
# Statement of need
There is a strong need within the astronomy community for an open-source
asteroseismology tool that is 1) accessible, 2) reproducible, and 3) scalable,
which will only grow with the continued success of the NASA TESS mission. In
this paper we present a Python tool that automatically detects solar-like
oscillations and characterizes their properties, called `pySYD`,
which prioritizes these three key aspects:
- **Accessible.** The `pySYD` library and source directory are both
publicly available, hosted on the Python Package Index
([PyPI](https://pypi.org/project/pysyd/)) and GitHub. The
[`pySYD` GitHub Page](https://github.com/ashleychontos/pySYD)
also serves as a multifaceted platform to promote community engagement
through discussion forums to communicate and share science, laying out
instructions to contribute and encourage inclusivity, and providing
a clear path for issue tracking. To facilitate future use and adaptations,
the [documentation](https://pysyd.readthedocs.io) includes a broad
spectrum of examples that showcase the versatility of the software.
Additionally, Python usage has become standard practice within the
community, which will promote integrations with complementary tools
like [`lightkurve`](https://docs.lightkurve.org) and [`echelle`](https://github.com/danhey/echelle).
- **Reproducible.** `pySYD` implements a similar framework to the closed-source IDL-based
`SYD` pipeline [@huber2009], which has been used frequently to measure global asteroseismic
parameters for many *Kepler* stars [@huber2011;@chaplin2014;@serenelli2017;@yu2018] and has
been extensively tested against other closed-source tools [@verner2011;@hekker2011].
\autoref{fig:benchmark} compares global parameter results from the `pySYD` and `SYD` pipelines
for $\sim100$ *Kepler* legacy stars, showing excellent agreement with no significant offsets.
In fact, the small amount of scatter is likely because `pySYD` is *not* a direct 1:1 translation,
incorporating many new custom features and software enhancements. In addition to the important
benchmark sample, `pySYD` ensures reproducible results for *every* locally-processed star by
saving and setting seeds for any randomly occurring analyses.
- **Scalable.** `pySYD` was developed for speed and efficiency. `pySYD` has more
than 50 optional commands that enable a customized analysis at the individual star
level and on average, takes less than a minute to complete a single star
(with sampling). The software also features parallel processing capabilities
and is therefore suitable for large samples of stars.
Well-documented, open-source asteroseismology software that has been benchmarked
against closed-source tools are critical to ensure the reproducibility of legacy
results from the *Kepler* mission. `pySYD` will also be a promising tool for the
broader community to analyze current and forthcoming data from the NASA TESS mission.
# Software package overview
`pySYD` depends on a number of powerful libraries, including [`astropy`](https://www.astropy.org)
[@astropy1;@astropy2], [`matplotlib`](https://matplotlib.org) [@matplotlib], [`numpy`](https://numpy.org) [@numpy],
[`pandas`](https://pandas.pydata.org) [@pandas] and [`scipy`](https://scipy.org) [@scipy]. The software package is structured
around the following main modules, details of which are described in the
online package documentation:
- [`target`](https://pysyd.readthedocs.io/en/latest/library/target.html) includes
the `Target` class object, which is instantiated for every processed star and
roughly operates in the following steps:
* checks for and loads in data for a given star and applies any relevant time- and/or
frequency-domain tools e.g., computing spectra, mitigating *Kepler* artefacts, etc.
* searches for localized power excess due to solar-like oscillations and then estimates
its initial properties
* uses estimates to mask out that region in the power spectrum and implements an
automated background fitting routine that characterizes amplitudes ($\sigma$) and
characteristic time scales ($\tau$) of various granulation processes
* derives global asteroseismic quantities $\rm \nu_{max}$ and $\Delta\nu$ from the
background-corrected power spectrum
* performs Monte-Carlo simulations by drawing from a chi-squared distribution (with 2 dof)
to estimate uncertainties
- [`plots`](https://pysyd.readthedocs.io/en/latest/library/plots.html) includes all plotting routines
- [`models`](https://pysyd.readthedocs.io/en/latest/library/utils.html) comprises different
frequency distributions used to fit and model properties in a given power spectrum
- [`cli`](https://pysyd.readthedocs.io/en/latest/usage/intro.html) &
[`pipeline`](https://pysyd.readthedocs.io/en/latest/library/pipeline.html) are the main
entry points for terminal and command prompt usage
- [`utils`](https://pysyd.readthedocs.io/en/latest/library/utils.html) includes a suite
of utilities such as the container class `Parameters`, which contains all default
parameters, or utility functions like binning data or finding peaks in a series of data
![Comparison of global parameters $\rm \nu_{max}$ (left) and $\Delta\nu$ (right)
measured by `pySYD` and `SYD` for $\sim100$ *Kepler* stars [@serenelli2017], with
fractional residuals shown in the bottom panels. The comparison shows excellent
agreement, with median offsets of $0.07 \pm 0.07\%$ for $\rm \nu_{max}$ and
$0.004 \pm 0.008\%$ for $\Delta\nu$. Typical random errors for such measurements
are 1-2 orders of magnitude larger.\label{fig:benchmark}](benchmark.png)
# Documentation
For installation instructions and package information, the main documentation
for the `pySYD` software is hosted at [ReadTheDocs](https://pysyd.readthedocs.io/en/latest/).
`pySYD` comes with a setup feature which will download information and data for
three example stars and then establish the recommended, local directory structure.
The documentation comprises a diverse range of applications and examples to
make the software more accessible and adaptable. Tutorials include:
- basic command-line examples for stars of varying signal-to-noise detections
- customized command-line examples to showcase some of the new, optional features
- different ways to run a large number of stars
- a notebook tutorial walkthrough of a single star from data to results
- other notebook tutorials demonstrating the use of some optional commands
and/or software hacks
The documentation also contains a [complete list](https://pysyd.readthedocs.io/en/latest/usage/glossary.html)
of all parameters, which includes everything from their object type,
default value(s), and how it is stored within the package, as well as
relevant links or similar keyword arguments.
# Acknowledgements
We thank Dennis Stello, Tim Bedding, Marc Hon, Yifan Chen, Yaguang Li, and other
`pySYD` users for discussion and suggestions which helped with the development
of this software. We acknowledge support from the National Aeronautics and Space
Administration (80NSSC19K0597, 80NSSC21K0652) as well the Alfred P. Sloan Foundation.
# References
|
ashleychontosREPO_NAMEpySYDPATH_START.@pySYD_extracted@pySYD-master@paper@paper.md@.PATH_END.py
|
{
"filename": "B_Model_Tutorial_4_Hidden_Markov_Models.ipynb",
"repo_name": "jmschrei/pomegranate",
"repo_path": "pomegranate_extracted/pomegranate-master/docs/tutorials/B_Model_Tutorial_4_Hidden_Markov_Models.ipynb",
"type": "Jupyter Notebook"
}
|
## Hidden Markov Models
author: Jacob Schreiber <br>
contact: jmschreiber91@gmail.com
Hidden Markov models (HMMs) are a probability distribution over sequences that are made up of two components: a set of probability distributions and a transition matrix (sometimes represented as a graph) describing how sequences can proceed through the model. HMMs are the flagship implementation in pomegranate and were the first algorithm originally implemented.
HMMs are a form of structured prediction method that are popular for tagging all elements in a sequence with some "hidden" state. They can be thought of as extensions of Markov chains where, instead of the probability of the next observation being dependant on the current observation, the probability of the next hidden state is dependant on the current hidden state, and the next observation is derived from that hidden state. An example of this can be part of speech tagging, where the observations are words and the hidden states are parts of speech. Each word gets tagged with a part of speech, but dynamic programming is utilized to search through all potential word-tag combinations to identify the best set of tags across the entire sentence.
```python
%pylab inline
import seaborn; seaborn.set_style('whitegrid')
import torch
numpy.random.seed(0)
numpy.set_printoptions(suppress=True)
%load_ext watermark
%watermark -m -n -p numpy,scipy,torch,pomegranate
```
Populating the interactive namespace from numpy and matplotlib
numpy : 1.23.4
scipy : 1.9.3
torch : 1.13.0
pomegranate: 1.0.0
Compiler : GCC 11.2.0
OS : Linux
Release : 4.15.0-208-generic
Machine : x86_64
Processor : x86_64
CPU cores : 8
Architecture: 64bit
### Identification of GC-rich regions of the genome
Lets take the simplified example of CG island detection on a sequence of DNA. DNA is made up of the four canonical nucleotides, abbreviated 'A', 'C', 'G', and 'T'. We can say that regions of the genome that are enriched for nucleotides 'C' and 'G' are 'CG islands', which is a simplification of the real biological concept but sufficient for our example. The issue with identifying these regions is that they are not exclusively made up of the nucleotides 'C' and 'G', but have some 'A's and 'T's scatted amongst them. A simple model that looked for long stretches of C's and G's would not perform well, because it would miss most of the real regions.
We can start off by building the model. Because HMMs involve the transition matrix, which is often represented using a graph over the hidden states, building them requires a few more steps that a simple distribution or the mixture model. Our simple model will be composed of two distributions. One distribution will be a uniform distribution across all four characters and one will have a preference for the nucleotides C and G, while still allowing the nucleotides A and T to be present.
```python
from pomegranate.distributions import Categorical
d1 = Categorical([[0.25, 0.25, 0.25, 0.25]])
d2 = Categorical([[0.10, 0.40, 0.40, 0.10]])
```
Now we can define the HMM and pass in states. Note that in pomegranate v1.0.0, HMMs are split into two implementations: `DenseHMM`, which has a dense transition matrix and so can use standard matrix multiplies in the backend, and `SparseHMM`, which has a sparse transition matrix and uses more complicated scatter-add operations. Also note that you no longer need to wrap the distributions in `Node` objects. You pass the distributions in directly.
```python
from pomegranate.hmm import DenseHMM
model = DenseHMM()
model.add_distributions([d1, d2])
```
Then we have to define the transition matrix, which is the probability of going from one hidden state to the next hidden state. In some cases, like this one, there are high self-loop probabilities, indicating that it's likely that one will stay in the same hidden state from one observation to the next in the sequence. Other cases have a lower probability of staying in the same state, like the part of speech tagger. A part of the transition matrix is the start probabilities, which is the probability of starting in each of the hidden states. Because we create these transitions one at a time, they are very amenable to sparse transition matrices, where it is impossible to transition from one hidden state to the next. Note that we are passing in distribution objects, not node objects as in the previous version, here.
```python
model.add_edge(model.start, d1, 0.5)
model.add_edge(model.start, d2, 0.5)
model.add_edge(d1, d1, 0.9)
model.add_edge(d1, d2, 0.1)
model.add_edge(d2, d1, 0.1)
model.add_edge(d2, d2, 0.9)
```
Another big change is that we no longer need to bake the HMM once we're done!
```python
#model.bake()
```
Now we can create a sequence to run through the model. Make sure that this sequence has been converted to a numeric representation of categories. This can be done either simply, as below, or using a preprocessing tool from some other package like scikit-learn. Also, make sure that your input sequence is 3D with the three dimensions corresponding to (batch_size, sequence length, dimensionality). Here, batch_size and dimensionality are both 1. The inclusion of batch size helps significantly when processing several sequences in parallel.
```python
sequence = 'CGACTACTGACTACTCGCCGACGCGACTGCCGTCTATACTGCGCATACGGC'
X = numpy.array([[[['A', 'C', 'G', 'T'].index(char)] for char in sequence]])
X.shape
```
(1, 51, 1)
Now we can make predictions on some sequence. Let's create some sequence that has a CG enriched region in the middle and see whether we can identify it.
```python
y_hat = model.predict(X)
print("sequence: {}".format(''.join(sequence)))
print("hmm pred: {}".format(''.join([str(y.item()) for y in y_hat[0]])))
```
sequence: CGACTACTGACTACTCGCCGACGCGACTGCCGTCTATACTGCGCATACGGC
hmm pred: 000000000000000111111111111111100000000000000001111
It looks like it successfully identified a CG island in the middle (the long stretch of 1's) and another shorter one at the end. More importantly, the model wasn't tricked into thinking that every CG or even pair of CGs was an island. It required many C's and G's to be part of a longer stretch to identify that region as an island. Naturally, the balance of the transition and emission probabilities will heavily influence what regions are detected.
Let's say, though, that we want to get rid of that CG island prediction at the end because we don't believe that real islands can occur at the end of the sequence. We can take care of this by adding in an explicit end state that only the non-island hidden state can get to. We enforce that the model has to end in the end state, and if only the non-island state gets there, the sequence of hidden states must end in the non-island state. Here's how:
```python
model = DenseHMM()
model.add_distributions([d1, d2])
model.add_edge(model.start, d1, 0.5)
model.add_edge(model.start, d2, 0.5)
model.add_edge(d1, d1, 0.89 )
model.add_edge(d1, d2, 0.10 )
model.add_edge(d1, model.end, 0.01)
model.add_edge(d2, d1, 0.1 )
model.add_edge(d2, d2, 0.9)
```
Note that all we did was add a transition from n1 to model.end with some low probability. This probability doesn't have to be high if there's only a single transition there, because there's no other possible way of getting to the end state.
```python
y_hat = model.predict(X)
print("sequence: {}".format(''.join(sequence)))
print("hmm pred: {}".format(''.join([str(y.item()) for y in y_hat[0]])))
```
sequence: CGACTACTGACTACTCGCCGACGCGACTGCCGTCTATACTGCGCATACGGC
hmm pred: 000000000000000111111111111111100000000000000000000
This seems far more reasonable. There is a single CG island surrounded by background sequence, and something at the end. If we knew that CG islands cannot occur at the end of sequences, we need only modify the underlying structure of the HMM in order to say that the sequence must end from the background state.
In the same way that mixtures could provide probabilistic estimates of class assignments rather than only hard labels, hidden Markov models can do the same. These estimates are the posterior probabilities of belonging to each of the hidden states given the observation, but also given the rest of the sequence.
```python
plt.plot(model.predict_proba(X)[0], label=['background', 'CG island'])
plt.legend()
plt.show()
```

We can see here the transition from the first non-island region to the middle island region, with high probabilities in one column turning into high probabilities in the other column. The predict method is just taking the most likely element --- the maximum-a-posteriori estimate.
In addition to using the forward-backward algorithm to just calculate posterior probabilities for each observation, we can count the number of transitions that are predicted to occur across each edge.
```python
transitions = model.forward_backward(X)[0][0]
transitions
```
tensor([[28.9100, 2.4128],
[ 2.8955, 15.7806]])
### Initializing Hidden Markov Models
There are two ways to initialize an HMM using pomegranate. The first is to explicitly pass a list of distributions, a dense transition matrix, and optionally start and end probabilities. We can recreate the above model using this approach.
```python
model = DenseHMM([d1, d2], edges=[[0.89, 0.1], [0.1, 0.9]], starts=[0.5, 0.5], ends=[0.01, 0.0])
```
We can check that this initialization produces the same model by making the same plot of predicted probabilities across the sequence.
```python
plt.plot(model.predict_proba(X)[0], label=['background', 'CG island'])
plt.legend()
plt.show()
```

This also works when creating a `SparseHMM` object, though the edges must be a list of 3-ples rather than a matrix.
```python
from pomegranate.hmm import SparseHMM
edges = [
[d1, d1, 0.89],
[d1, d2, 0.1],
[d2, d1, 0.1],
[d2, d2, 0.9]
]
model = SparseHMM([d1, d2], edges=edges, starts=[0.5, 0.5], ends=[0.01, 0.0])
plt.plot(model.predict_proba(X)[0], label=['background', 'CG island'])
plt.legend()
plt.show()
```

The second way is to follow the procedure outlined above where you first create an uninitialized model with nothing passed into it and then you add the distributions and edges using the appropriate methods. Although this can be used for both `DenseHMM` and `SparseHMM` modls, this is likely most useful for `SparseHMM` models that are very sparse or when you're trying to procedurally generate an HMM based on external factors. Here is the same code as before that implements the HMM using this approach.
```python
model = SparseHMM()
model.add_distributions([d1, d2])
model.add_edge(model.start, d1, 0.5)
model.add_edge(model.start, d2, 0.5)
model.add_edge(d1, d1, 0.89 )
model.add_edge(d1, d2, 0.10 )
model.add_edge(d1, model.end, 0.01)
model.add_edge(d2, d1, 0.1 )
model.add_edge(d2, d2, 0.9)
```
Similar to other methods, we can create an HMM with uninitialized distributions. These distributions will be initialized using k-means clustering when provided with data. When using a `DenseHMM` we can also choose to not pass in edges and have them be initialized to uniform probabilities. When using a `SparseHMM` we must pass in edges because, otherwise, the model will not know which edges exist and which do not. Essentially, it would have to assume uniform probabilities as well, at which point the model would have a dense transition matrix but just operate inefficiently by treating it as a sparse matrix.
```python
from pomegranate.distributions import Normal
X3 = torch.randn(100, 50, 2)
model3 = DenseHMM([Normal(), Normal(), Normal()], verbose=True)
model3._initialize(X3)
model3.distributions[0].means, model3.distributions[1].means, model3.distributions[2].means
```
(Parameter containing:
tensor([-0.6929, 0.2710]),
Parameter containing:
tensor([1.0994, 0.7262]),
Parameter containing:
tensor([ 0.4207, -1.0267]))
You can control how the k-means itself is initialized by passing in a value to `init`.
However, you do not need to manually initialize your models. If you call the `fit` method or the `summarize` method, these distributions will be initialized if they have not yet been.
```python
X3 = torch.randn(100, 50, 2)
model3 = DenseHMM([Normal(), Normal(), Normal()], max_iter=5, verbose=True)
model3.fit(X3)
```
[1] Improvement: -23.134765625, Time: 0.007689s
DenseHMM(
(start): Silent()
(end): Silent()
(distributions): ModuleList(
(0): Normal()
(1): Normal()
(2): Normal()
)
)
#### Dense and Sparse HMMs
Separately from whether the HMM is initialized by passing in the distributions and edges initially or building the model programmatically, the transition matrix can be represented using a sparse matrix or a dense matrix. Although a dense transition matrix allows fast matrix multiplications to be used for each algorithm, once there are enough zeroes a matrix multiply wastes a lot of computation handling them.
Because the backend and strategy for initializing the model (i.e., passing in a dense transition matrix or a list of edges) differ, pomegranate v1.0.0 splits the implementation in two: `DenseHMM` and `SparseHMM`. Both have the same functionality and API and, given the same transition matrix, will yield the same result.
```python
edges = [[0.89, 0.1], [0.1, 0.9]]
starts = [0.5, 0.5]
ends = [0.01, 0.0]
model1 = DenseHMM([d1, d2], edges=edges, starts=starts, ends=ends)
model1.predict_proba(X)[0][12:19]
```
tensor([[0.8016, 0.1984],
[0.6708, 0.3292],
[0.6163, 0.3837],
[0.4196, 0.5804],
[0.3092, 0.6908],
[0.2535, 0.7465],
[0.2361, 0.7639]])
```python
edges = [
[d1, d1, 0.89],
[d1, d2, 0.1],
[d2, d1, 0.1],
[d2, d2, 0.9]
]
model2 = SparseHMM([d1, d2], edges=edges, starts=starts, ends=ends)
model2.predict_proba(X)[0][12:19]
```
tensor([[0.8016, 0.1984],
[0.6708, 0.3292],
[0.6163, 0.3837],
[0.4196, 0.5804],
[0.3092, 0.6908],
[0.2535, 0.7465],
[0.2361, 0.7639]])
```python
X = numpy.random.choice(4, size=(1000, 500, 1))
%timeit -n 10 -r 5 model1.predict_proba(X)
%timeit -n 10 -r 5 model2.predict_proba(X)
```
102 ms ± 13.9 ms per loop (mean ± std. dev. of 5 runs, 10 loops each)
177 ms ± 12.9 ms per loop (mean ± std. dev. of 5 runs, 10 loops each)
Check out the benchmarks folder to see a more thorough comparison of the two, but it looks like even for a tiny model that the dense transition matrix is around twice as fast.
### Fitting Hidden Markov Models
Hidden Markov models are usually fit to unlabeled data using the Baum-Welch algorithm. This is a structured EM algorithm that accounts for transitions between distributions as well as the distribution parameters themselves. Essentially, one uses the forward-backward algorithm to infer the expected number of transitions across each edge and the expected probability of each observation aligning to each state and uses those estimates to update the underlying parameters.
```python
d1 = Categorical([[0.25, 0.25, 0.25, 0.25]])
d2 = Categorical([[0.10, 0.40, 0.40, 0.10]])
edges = [[0.89, 0.1], [0.1, 0.9]]
starts = [0.5, 0.5]
ends = [0.01, 0.0]
model = DenseHMM([d1, d2], edges=edges, starts=starts, ends=ends, verbose=True)
model.fit(X)
```
[1] Improvement: 16405.125, Time: 0.09408s
[2] Improvement: 420.75, Time: 0.0876s
[3] Improvement: 207.25, Time: 0.09545s
[4] Improvement: 116.125, Time: 0.107s
[5] Improvement: 75.3125, Time: 0.09284s
[6] Improvement: 47.6875, Time: 0.0797s
[7] Improvement: 34.75, Time: 0.1045s
[8] Improvement: 27.3125, Time: 0.08161s
[9] Improvement: 16.0625, Time: 0.08903s
[10] Improvement: 15.6875, Time: 0.1197s
[11] Improvement: 9.1875, Time: 0.09623s
[12] Improvement: 10.625, Time: 0.08911s
[13] Improvement: 8.125, Time: 0.1064s
[14] Improvement: 6.3125, Time: 0.07936s
[15] Improvement: 2.3125, Time: 0.08887s
[16] Improvement: 5.0625, Time: 0.108s
[17] Improvement: 4.3125, Time: 0.08656s
[18] Improvement: 3.1875, Time: 0.08103s
[19] Improvement: 1.75, Time: 0.1282s
[20] Improvement: 4.4375, Time: 0.09025s
[21] Improvement: -1.6875, Time: 0.07991s
DenseHMM(
(start): Silent()
(end): Silent()
(distributions): ModuleList(
(0): Categorical()
(1): Categorical()
)
)
We can change the number of iterations by setting either the `max_iter` parameter or the `tol` parameter.
```python
d1 = Categorical([[0.25, 0.25, 0.25, 0.25]])
d2 = Categorical([[0.10, 0.40, 0.40, 0.10]])
edges = [[0.89, 0.1], [0.1, 0.9]]
starts = [0.5, 0.5]
ends = [0.01, 0.0]
model = DenseHMM([d1, d2], edges=edges, starts=starts, ends=ends, max_iter=5, verbose=True)
model.fit(X)
```
[1] Improvement: 16405.125, Time: 0.1029s
[2] Improvement: 420.75, Time: 0.1356s
[3] Improvement: 207.25, Time: 0.1192s
[4] Improvement: 116.125, Time: 0.08874s
[5] Improvement: 75.3125, Time: 0.1781s
DenseHMM(
(start): Silent()
(end): Silent()
(distributions): ModuleList(
(0): Categorical()
(1): Categorical()
)
)
```python
d1 = Categorical([[0.25, 0.25, 0.25, 0.25]])
d2 = Categorical([[0.10, 0.40, 0.40, 0.10]])
edges = [[0.89, 0.1], [0.1, 0.9]]
starts = [0.5, 0.5]
ends = [0.01, 0.0]
model = DenseHMM([d1, d2], edges=edges, starts=starts, ends=ends, tol=50, verbose=True)
model.fit(X)
```
[1] Improvement: 16405.125, Time: 0.1512s
[2] Improvement: 420.75, Time: 0.1788s
[3] Improvement: 207.25, Time: 0.1501s
[4] Improvement: 116.125, Time: 0.127s
[5] Improvement: 75.3125, Time: 0.08657s
[6] Improvement: 47.6875, Time: 0.1011s
DenseHMM(
(start): Silent()
(end): Silent()
(distributions): ModuleList(
(0): Categorical()
(1): Categorical()
)
)
The same parameters and signature applies to `SparseHMM` models.
|
jmschreiREPO_NAMEpomegranatePATH_START.@pomegranate_extracted@pomegranate-master@docs@tutorials@B_Model_Tutorial_4_Hidden_Markov_Models.ipynb@.PATH_END.py
|
{
"filename": "test_ring_slice.py",
"repo_name": "wdpozzo/raynest",
"repo_path": "raynest_extracted/raynest-main/raynest/tests/test_ring_slice.py",
"type": "Python"
}
|
import unittest
import numpy as np
import raynest.model
class RingModel(raynest.model.Model):
"""
A circular ring in parameter space
logZ ~ -2.31836
"""
names=['x','y']
bounds=[[-2,2],[-2,2]]
data = None
analytic_log_Z = -2.31836
@staticmethod
def log_likelihood(x):
return log_ring(x['x'],x['y'])
def force(self, p):
f = np.zeros(1, dtype = {'names':p.names, 'formats':['f8' for _ in p.names]})
return f
def log_ring(x, y, radius=1.0, thickness=0.1):
r = np.sqrt(x**2+y**2)
return -0.5*(radius-r)**2/thickness**2
class RingTestCase(unittest.TestCase):
"""
Test the gaussian model
"""
def setUp(self):
self.model=RingModel()
self.work=raynest.raynest(self.model,verbose=1,nslice=4,nlive=1000,maxmcmc=1000)
self.work.run()
def test_evidence(self):
# 2 sigma tolerance
logZ = self.work.logZ
H = self.work.information
tolerance = 2.0*self.work.logZ_error
print('2-sigma statistic error in logZ: {0:0.3f}'.format(tolerance))
print('Analytic logZ {0}'.format(self.model.analytic_log_Z))
print('Estimated logZ {0}'.format(logZ))
self.assertTrue(np.abs(logZ - RingModel.analytic_log_Z)<tolerance, 'Incorrect evidence for normalised distribution: {0:.3f} +/ {2:.3f} instead of {1:.3f}'.format(logZ,RingModel.analytic_log_Z,tolerance ))
def test_all():
unittest.main(verbosity=2)
if __name__=='__main__':
unittest.main(verbosity=0)
|
wdpozzoREPO_NAMEraynestPATH_START.@raynest_extracted@raynest-main@raynest@tests@test_ring_slice.py@.PATH_END.py
|
{
"filename": "reshape_op_test.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/kernel_tests/array_ops/reshape_op_test.py",
"type": "Python"
}
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.reshape_op."""
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.platform import test
@test_util.with_eager_op_as_function
class ReshapeTest(test.TestCase):
def _testReshape(self, x, y, use_gpu=False):
with self.cached_session(use_gpu=use_gpu):
np_ans = x.reshape(y)
tf_ans = array_ops.reshape(x, y)
out = self.evaluate(tf_ans)
self.assertEqual(tf_ans.get_shape(), out.shape)
self.assertShapeEqual(np_ans, tf_ans)
# Repeat with an int64 shape tensor.
y64 = constant_op.constant(y, dtype=dtypes.int64)
tf_ans = array_ops.reshape(x, y64)
out = self.evaluate(tf_ans)
self.assertEqual(tf_ans.get_shape(), out.shape)
self.assertShapeEqual(np_ans, tf_ans)
def _testZeroDimReshape(self, x, shape, expected, use_gpu=False):
with self.cached_session(use_gpu=use_gpu):
y = array_ops.reshape(x, shape)
out = self.evaluate(y)
self.assertEqual(expected, out.shape)
# Repeat with an int64 shape tensor.
shape64 = constant_op.constant(shape, dtype=dtypes.int64)
y = array_ops.reshape(x, shape64)
out = self.evaluate(y)
self.assertEqual(expected, out.shape)
def _testBothReshape(self, x, y):
self._testReshape(x, y, False)
self._testReshape(x, y, True)
def testBoolBasic(self):
x = np.arange(1., 7.).reshape([1, 6]) > 3
self._testBothReshape(x, [2, 3])
def testFloatBasic(self):
x = np.arange(1., 7.).reshape([1, 6]).astype(np.float32)
self._testBothReshape(x, [2, 3])
def testFloat16Basic(self):
x = np.arange(1., 7.).reshape([1, 6]).astype(np.float16)
self._testBothReshape(x, [2, 3])
def testBfloat16Basic(self):
x = np.arange(1., 7.).reshape([1, 6]).astype(dtypes.bfloat16.as_numpy_dtype)
self._testBothReshape(x, [2, 3])
def testDoubleBasic(self):
x = np.arange(1., 7.).reshape([1, 6]).astype(np.float64)
self._testBothReshape(x, [2, 3])
def testInt32Basic(self):
x = np.arange(1., 7.).reshape([1, 6]).astype(np.int32)
self._testBothReshape(x, [2, 3])
def testComplex64Basic(self):
x = np.arange(1., 7.).reshape([1, 6]).astype(np.complex64)
self._testBothReshape(x, [2, 3])
def testComplex128Basic(self):
x = np.arange(1., 7.).reshape([1, 6]).astype(np.complex128)
self._testBothReshape(x, [2, 3])
def testFloatReshapeThreeDimensions(self):
x = np.arange(1., 28.).reshape([1, 27]).astype(np.float32)
self._testBothReshape(x, [3, 3, 3])
def testFloatUnspecifiedDimOnly(self):
x = np.arange(1., 7.).reshape([6]).astype(np.float32)
self._testBothReshape(x, [-1])
def testFloatUnspecifiedDimBegin(self):
x = np.arange(1., 7.).reshape([6]).astype(np.float32)
self._testBothReshape(x, [-1, 2])
def testFloatUnspecifiedDimEnd(self):
x = np.arange(1., 7.).reshape([6]).astype(np.float32)
self._testBothReshape(x, [3, -1])
def testZeroDimBasic(self):
x = np.zeros([0, 6]).astype(np.float32)
self._testBothReshape(x, [0, 2, 3])
def testZeroDimReshapeR1(self):
x = np.zeros([0, 6]).astype(np.float32)
self._testBothReshape(x, [-1])
def testZeroDimReshapeR3(self):
x = np.zeros([0, 6]).astype(np.float32)
self._testBothReshape(x, [-1, 2, 3])
# TODO(vrv): Add tests for failure conditions once python test_util
# reports errors.
def testFloatReshapeGradThreeDimensions(self):
x = np.arange(1., 25.).reshape([2, 3, 4]).astype(np.float32)
input_tensor = constant_op.constant(x)
def reshape(x):
return array_ops.reshape(x, [1, 8, 3])
with self.cached_session():
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(reshape, [input_tensor]))
self.assertLess(err, 1e-3)
def testFloatEmpty(self):
x = np.empty((0, 0, 0, 0), dtype=np.float32)
self._testBothReshape(x, [1, 2, 3, 0])
self._testBothReshape(x, [1, 0, 0, 4])
self._testBothReshape(x, [0, 0, 0, 0])
self._testBothReshape(x, [1, 2, 0])
self._testBothReshape(x, [0, 0, 0])
self._testBothReshape(x, [1, -1, 5])
def testZeroDimWithUnspecifiedDim(self):
for use_gpu in (True, False):
self._testZeroDimReshape(x=np.zeros([0, 6]).astype(np.float32),
shape=[0, -1, 3],
expected=(0, 2, 3),
use_gpu=use_gpu)
@test_util.run_deprecated_v1
def testErrors(self):
y = constant_op.constant(0.0, shape=[23, 29, 31])
with self.assertRaisesRegex(ValueError, "must be evenly divisible by 17"):
array_ops.reshape(y, [17, -1])
z = constant_op.constant(0.0, shape=[32, 128])
with self.assertRaisesRegex(ValueError,
"Cannot reshape a tensor with 4096 elements"):
array_ops.reshape(z, [4095])
def testPartialShapes(self):
# Testing unknown shapes in graph building.
with ops.Graph().as_default():
x = array_ops.placeholder(dtypes.float32)
# Unknown input shape, partial new shape.
y = array_ops.reshape(x, [1, 1, -1, 1])
self.assertEqual([1, 1, None, 1], y.get_shape().as_list())
# Unknown input shape, unknown new shape.
y = array_ops.reshape(x, array_ops.placeholder(dtypes.int32))
self.assertEqual(None, y.get_shape().ndims)
# Unknown input shape, known rank for new shape.
y = array_ops.reshape(x, array_ops.placeholder(dtypes.int32, shape=(3,)))
self.assertEqual([None, None, None], y.get_shape().as_list())
# Unknown input shape, partial new shape using `tf.stack()`.
y = array_ops.reshape(x, [array_ops.placeholder(dtypes.int32), 37])
self.assertEqual([None, 37], y.get_shape().as_list())
# Unknown input shape, partial new shape using `tf.concat()`.
y = array_ops.reshape(
x,
array_ops.concat(
[array_ops.placeholder(
dtypes.int32, shape=(2,)), [37, 42]], 0))
self.assertEqual([None, None, 37, 42], y.get_shape().as_list())
# Unknown input shape, partial new shape using `tf.shape()`.
y = array_ops.reshape(
x,
array_ops.shape(
array_ops.placeholder(
dtypes.float32, shape=[None, 37, None])))
self.assertEqual([None, 37, None], y.get_shape().as_list())
def testTensorShape(self):
x = array_ops.zeros([1, 100])
y = array_ops.reshape(
x, [tensor_shape.Dimension(100),
tensor_shape.Dimension(1)])
self.assertEqual([100, 1], y.get_shape().as_list())
y = array_ops.reshape(x, tensor_shape.TensorShape([100, 1]))
self.assertEqual([100, 1], y.get_shape().as_list())
def testInt64Shape(self):
with ops.device("/device:CPU:0"):
x = array_ops.zeros([50000, 50000], dtype=dtypes.bool)
# Provide dimension larger than int32
y = array_ops.reshape(x, [50000**2])
self.assertEqual([50000**2], y.get_shape().as_list())
# Even if first dimension is within int32, ensure we correctly go to int64
y = array_ops.reshape(x, [1, 50000**2])
self.assertEqual([1, 50000**2], y.get_shape().as_list())
@test_util.run_v2_only
def testTooLargeShape(self):
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
"too many elements"):
x = array_ops.reshape([1], np.array([21943, 45817, 30516, 61760, 38987]))
self.evaluate(x)
if __name__ == "__main__":
test.main()
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@kernel_tests@array_ops@reshape_op_test.py@.PATH_END.py
|
{
"filename": "deconv.py",
"repo_name": "HERA-Team/aipy",
"repo_path": "aipy_extracted/aipy-main/aipy/deconv.py",
"type": "Python"
}
|
"""
A module implementing various techniques for deconvolving an image by a
kernel. Currently implemented are Clean, Least-Squares, Maximum Entropy,
and Annealing. Standard parameters to these functions are:
im = image to be deconvolved.
ker = kernel to deconvolve by (must be same size as im).
mdl = a priori model of what the deconvolved image should look like.
maxiter = maximum number of iterations performed before terminating.
tol = termination criterion, lower being more optimized.
verbose = print info on how things are progressing.
lower = lower bound of pixel values in deconvolved image
upper = upper bound of pixel values in deconvolved image
"""
from __future__ import print_function, division, absolute_import
import numpy as np
import sys
from . import _deconv
# Find smallest representable # > 0 for setting clip level
lo_clip_lev = np.finfo(np.float64).tiny
def clean(im, ker, mdl=None, area=None, gain=.1, maxiter=10000, tol=1e-3,
stop_if_div=True, verbose=False, pos_def=False):
"""This standard Hoegbom clean deconvolution algorithm operates on the
assumption that the image is composed of point sources. This makes it a
poor choice for images with distributed flux. In each iteration, a point
is added to the model at the location of the maximum residual, with a
fraction (specified by 'gain') of the magnitude. The convolution of that
point is removed from the residual, and the process repeats. Termination
happens after 'maxiter' iterations, or when the clean loops starts
increasing the magnitude of the residual. This implementation can handle
1 and 2 dimensional data that is real valued or complex.
gain: The fraction of a residual used in each iteration. If this is too
low, clean takes unnecessarily long. If it is too high, clean does
a poor job of deconvolving."""
if mdl is None:
mdl = np.zeros(im.shape, dtype=im.dtype)
res = im.copy()
else:
mdl = mdl.copy()
if len(mdl.shape) == 1:
res = im - np.fft.ifft(np.fft.fft(mdl) * \
np.fft.fft(ker)).astype(im.dtype)
elif len(mdl.shape) == 2:
res = im - np.fft.ifft2(np.fft.fft2(mdl) * \
np.fft.fft2(ker)).astype(im.dtype)
else: raise ValueError('Number of dimensions != 1 or 2')
if area is None:
area = np.ones(im.shape, dtype=np.int_)
else:
area = area.astype(np.int_)
iter = _deconv.clean(res, ker, mdl, area,
gain=gain, maxiter=maxiter, tol=tol,
stop_if_div=int(stop_if_div), verbose=int(verbose),
pos_def=int(pos_def))
score = np.sqrt(np.average(np.abs(res)**2))
info = {'success':iter > 0 and iter < maxiter, 'tol':tol}
if iter < 0: info.update({'term':'divergence', 'iter':-iter})
elif iter < maxiter: info.update({'term':'tol', 'iter':iter})
else: info.update({'term':'maxiter', 'iter':iter})
info.update({'res':res, 'score':score})
if verbose:
print('Term Condition:', info['term'])
print('Iterations:', info['iter'])
print('Score:', info['score'])
return mdl, info
def recenter(a, c):
"""Slide the (0,0) point of matrix a to a new location tuple c."""
s = a.shape
c = (c[0] % s[0], c[1] % s[1])
a1 = np.concatenate([a[c[0]:], a[:c[0]]], axis=0)
a2 = np.concatenate([a1[:,c[1]:], a1[:,:c[1]]], axis=1)
return a2
def lsq(im, ker, mdl=None, area=None, gain=.1, tol=1e-3, maxiter=200,
lower=lo_clip_lev, upper=np.inf, verbose=False):
"""This simple least-square fitting procedure for deconvolving an image
saves computing by assuming a diagonal pixel-pixel gradient of the fit.
In essence, this assumes that the convolution kernel is a delta-function.
This works for small kernels, but not so well for large ones. See Cornwell
and Evans, 1984 "A Simple Maximum Entropy Deconvolution Algorithm" for more
information about this approximation. Unlike maximum entropy, lsq makes
no promises about maximizing smoothness, but needs no information
about noise levels. Structure can be introduced for which there is no
evidence in the original image. Termination happens when the fractional
score change is less than 'tol' between iterations.
gain: The fraction of the step size (calculated from the gradient) taken
in each iteration. If this is too low, the fit takes unnecessarily
long. If it is too high, the fit process can oscillate."""
if mdl is None:
#mdl = np.zeros_like(im)
mdl = np.zeros(im.shape, dtype=im.dtype)
x = mdl.copy()
if area is None:
area = np.ones(im.shape, dtype=np.int_)
else:
area = area.astype(np.int_)
# Estimate gain of the kernel
q = np.sqrt((np.abs(ker)**2).sum())
ker_i = np.fft.fft2(ker)
info = {'success':True, 'term':'maxiter', 'tol':tol}
# Function to calculate chi-square and gradient
def f(x):
x_conv_ker = np.fft.ifft2(np.fft.fft2(x) * ker_i).real
diff = (im - x_conv_ker) * area
g_chi2 = -2*q*(diff)
chi2 = np.abs(diff)**2
return chi2, g_chi2
score = 0
# Start the fit loop
for i in range(maxiter):
chi2, g_chi2 = f(x)
n_score = np.average(chi2)
term = abs(1 - score/n_score)
if verbose:
slope = np.sqrt(np.average(g_chi2**2))
print('Step %d:' % i, 'score', score, 'slope', slope, 'term', term)
if term < tol:
info['term'] = 'tol'
break
score = n_score
# For especially clean imgs, g_chi2 in some components can go to 0.
# This check makes lsq a little slower for most images, though...
d_x = np.where(abs(g_chi2) > 0, -(1/g_chi2) * chi2, 0)
x = np.clip(x + gain * d_x, lower, upper)
info.update({'res':im - np.fft.ifft2(np.fft.fft2(x) * ker_i).real,
'score': score, 'iter':i+1})
return x, info
def maxent(im, ker, var0, mdl=None, gain=.1, tol=1e-3, maxiter=200,
lower=lo_clip_lev, upper=np.inf, verbose=False):
"""Maximum entropy deconvolution (MEM) (see Cornwell and Evans 1984
"A Simple Maximum Entropy Deconvolution Algorithm" and Sault 1990
"A Modification of the Cornwell and Evans Maximum Entropy Algorithm")
is similar to lsq, but the fit is only optimized to within the specified
variance (var0) and then "smoothness" is maximized. This has several
desirable effects including uniqueness of solution, equal weighting of
Fourier components, and absence of spurious structure. The same
delta-kernel approximation (see lsq) is made here.
var0: The estimated variance (noise power) in the image. If none is
provided, a quick lsq is used to estimate the variance of the residual.
gain: The fraction of the step size (calculated from the gradient) taken
in each iteration. If this is too low, the fit takes unnecessarily
long. If it is too high, the fit process can oscillate."""
d_i = im.flatten()
q = np.sqrt((ker**2).sum())
minus_two_q = -2*q
two_q_sq = 2*q**2
if mdl is None:
#mdl = np.ones_like(im) * np.average(im) / ker.sum()
mdl = np.ones(im.shape, dtype=im.dtype) * np.average(im) / ker.sum()
#if mdl is None: mdl = np.ones_like(im) * np.average(im) / q
Nvar0 = d_i.size * var0
inv_ker = np.fft.fft2(ker)
m_i = mdl.flatten()
def next_step(b_i, alpha, verbose=False):
b_i.shape = im.shape
b_i_conv_ker = np.fft.ifft2(np.fft.fft2(b_i) * inv_ker).real
b_i.shape = m_i.shape
diff = (im - b_i_conv_ker).flatten()
chi2 = np.dot(diff,diff) - Nvar0
g_chi2 = minus_two_q*(diff)
g_J = (-np.log(b_i/m_i) - 1) - alpha * g_chi2
gg_J = (-1/b_i) - alpha * two_q_sq
# Define dot product using the metric of -gg_J^-1
def dot(x, y): return (x*y/-gg_J).sum()
score = dot(g_J, g_J) / dot(1,1)
d_alpha = (chi2 + dot(g_chi2,g_J)) / dot(g_chi2,g_chi2)
# For especially clean images, gg_J in some components can go to 0.
# This check makes lsq a little slower for most images, though...
d_b_i = np.where(abs(gg_J) > 0, -1/gg_J * (g_J - d_alpha*g_chi2), 0)
if verbose:
print(' score', score, 'fit', np.dot(diff,diff))
print(' alpha', alpha, 'd_alpha', d_alpha)
return d_b_i, d_alpha, score
alpha = 0.
b_i = m_i.copy()
info = {'success':True, 'term':'maxiter', 'var0':var0, 'tol':tol}
for i in range(maxiter):
if verbose: print('Step %d:' % i)
d_b_i, d_alpha, score = next_step(b_i, alpha, verbose=verbose)
if score < tol and score > 0:
info['term'] = 'tol'
break
elif score > 1e10 or np.isnan(score) or score <= 0:
info.update({'term':'divergence', 'success':False})
break
b_i = np.clip(b_i + gain * d_b_i, lower, upper)
alpha += gain * d_alpha
b_i.shape = im.shape
info.update({'res':im - np.fft.ifft2(np.fft.fft2(b_i) * inv_ker).real,
'score': score, 'alpha': alpha, 'iter':i+1})
return b_i, info
def maxent_findvar(im, ker, var=None, f_var0=.6, mdl=None, gain=.1, tol=1e-3,
maxiter=200, lower=lo_clip_lev, upper=np.inf, verbose=False,
maxiterok=False):
"""This frontend to maxent tries to find a variance for which maxent will
converge. If the starting variance (var) is not specified, it will be
estimated as a fraction (f_var0) of the variance of the residual of a
lsq deconvolution, and then a search algorithm tests an ever-widening
range around that value. This function will search until it succeeds."""
cl, info, cnt = None, None, -1
if var is None:
# Get a starting estimate of variance to use via residual of lsq
junk, info = lsq(im, ker, mdl=mdl, gain=gain, tol=tol,
maxiter=maxiter/4, lower=lower, upper=upper, verbose=False)
var = np.var(info['res'])
if verbose: print('Using', f_var0, 'of LSQ estimate of var=', var)
var *= f_var0
else:
if verbose: print('Using specified var=', var)
while cl is None:
print(cnt)
if cnt == -1: v = var
else: v = var / (1.5**cnt)
while cnt < 0 or v < var * (1.5**cnt):
if verbose:
print('Trying var=', v, end='')
sys.stdout.flush()
c, i = maxent(im, ker, v, mdl=mdl, gain=gain, tol=tol,
maxiter=maxiter, lower=lower, upper=upper, verbose=False)
if verbose:
print('success %d,' % i['success'], 'term: %s,' % i['term'], 'score:', i['score'])
# Check if fit converged
if i['success'] and (maxiterok or i['term'] == 'tol'):
cl, info = c, i
break
else:
if not cl is None or cnt == -1: break
v *= 1.2 ** (1./(2*(cnt+1)))
cnt += 1
if verbose: print('Done with MEM.')
return cl, info
def anneal(im, ker, mdl=None, maxiter=1000, lower=lo_clip_lev, upper=np.inf,
cooling=lambda i,x: 1e+1*(1-np.cos(i/50.))*(x**2), verbose=False):
"""Annealing takes a non-deterministic approach to deconvolution by
randomly perturbing the model and selecting perturbations that improve the
residual. By slowly reducing the temperature of the perturbations,
annealing attempts to settle into a global minimum. Annealing is slower
than lsq for a known gradient, but is less sensitive to gradient errors
(it can solve for wider kernels). Faster cooling speeds terminate more
quickly, but are less likely to find the global minimum. This
implementation assigns a temperature to each pixel proportional to the
magnitude of the residual in that pixel and the global cooling speed.
cooling: A function accepting (iteration,residuals) that returns a
vector of standard deviation for noise in the respective pixels.
Picking the scaling of this function correctly is vital for annealing
to work."""
if mdl is None: mdl = np.zeros_like(im)
q = np.sqrt((ker**2).sum())
inv_ker = np.fft.fft2(ker)
dif = im - np.fft.ifft2(np.fft.fft2(mdl) * inv_ker).real
score = np.average(dif**2)
#info = {'success':True, 'term':'maxiter', 'speed':speed}
info = {'success':True, 'term':'maxiter'}
for i in range(maxiter):
delta = np.random.normal(scale=1., size=mdl.shape) * cooling(i, dif/q)
n_mdl = np.clip(mdl + delta, lower, upper)
n_dif = im - np.fft.ifft2(np.fft.fft2(n_mdl) * inv_ker).real
n_score = np.average(n_dif**2)
if verbose: print('Step %d:' % i, n_score, score)
if n_score < score: mdl, dif, score = n_mdl, n_dif, n_score
info.update({'res':dif, 'score': score, 'iter':i+1})
return mdl, info
|
HERA-TeamREPO_NAMEaipyPATH_START.@aipy_extracted@aipy-main@aipy@deconv.py@.PATH_END.py
|
{
"filename": "preprocess.py",
"repo_name": "adammoss/supernovae",
"repo_path": "supernovae_extracted/supernovae-master/preprocess.py",
"type": "Python"
}
|
import matplotlib.pyplot as plt
import numpy as np
import glob
import csv
import gzip
import scipy.interpolate as si
from itertools import groupby
import random
import sys
import argparse
flux_norm = 1.0
time_norm = 1.0
position_norm = 1.0
grouping = 1
key_types = {'Ia':1, 'II': 2, 'Ibc': 3, 'IIn': 21, 'IIP': 22, 'IIL': 23, 'Ib': 32, 'Ic': 33}
def index_min(values):
'''
Return the index of an array.
* values is an array (intended to be times)
- Used in time_collector() for grouping times
- Used in parser_spline() for placing flux errors at the correct time in the time sequence
'''
return min(xrange(len(values)),key=values.__getitem__)
def spline(arr,t):
'''
Returns the interpolated spline value of the fluxes at a given time. If the length of the
array is too short for the spline routine it pads the values so that spline interpolation
can be carried out.
* arr is an array with arr = [[times],[fluxes],[flux errors]]
* t is array of grouped times
- Used in parser_spline() to create the spline of the fluxes
'''
if (len(arr[0]) < 4):
if (len(arr[0])==0):
arr[0] = [t[0],int((t[-1]-t[0])/2),t[-1]]
arr[1] = [0,0,0]
arr[2] = [0,0,0]
if (len(arr[0])==1):
arr[0] = [t[0],arr[0][0],t[-1]]
arr[1] = [arr[1][0],arr[1][0],arr[1][0]]
arr[2] = [arr[2][0],arr[2][0],arr[2][0]]
spline = si.InterpolatedUnivariateSpline(arr[0], arr[1],k=1)
else:
spline = si.InterpolatedUnivariateSpline(arr[0], arr[1])
return spline
def time_collector(arr,frac=grouping):
'''
Returns the an array of average times about clustered observation times. Default grouping is
for times on the order of 1 day, although this is reduced if there are too many observations
in that time. Also returns the index of the indices of the closest times in each flux band
and the grouping fraction.
* arr is an array containing all of the observation times
* frac is the clustering scale where frac=1 is group times within a day
* a is the array of grouped times
- Used in parser_spline() for grouping flux errors to the nearest grouped time
- Used in parser_augment() for grouping times from all observations
'''
bestclustering = True
while bestclustering:
a = []
for key, group in groupby(arr, key=lambda n: n//(1./frac)):
s = sorted(group)
a.append(np.sum(s)/len(s))
ind = []
i = 0
for key,group in groupby(arr, key=lambda n: n//(1./frac)):
ind.append([])
for j in group:
ind[i].append(index_min(abs(j-np.array(arr))))
i += 1
if len([len(i) for i in ind if len(i)>4])!=0:
frac += 0.1
else:
bestclustering = False
return a,ind,frac
def create_colourband_array(ind,arr,err_arr,temp_arr,err_temp_arr):
'''
Returns arrays containing the all of the flux observations, all of the flux error observations
and an option to check that times are grouped such that there is only one observation in a
cluster of times.
* ind is the list of indices containing the nearest grouped time for each observation
* arr is array of all of the flux observations at all observation times
* err_arr is the array of all of the flux error observations at all observation times
* temp_arr is the array containing the fluxes at grouped times
* temp_err_arr is the array containing the flux errors at grouped times
* out is a boolean which is True if there is only one observation per grouped time and False
if there is more than one grouped time - the grouping factor is then reduced.
- Used in parser_augment() to create the flux and flux error arrays at grouped times
'''
temp = [arr[ind[i]] for i in xrange(len(ind)) if arr[ind[i]]!=-999]
err_temp = [err_arr[ind[i]] for i in xrange(len(ind)) if err_arr[ind[i]]!=-999]
if len(temp)==0:
temp_arr.append(-999)
err_temp_arr.append(-999)
out = True
elif len(temp)>1:
out = False
else:
temp_arr.append(temp[0])
err_temp_arr.append(err_temp[0])
out = True
return temp_arr,err_temp_arr,out
def fill_in_points(arr,err_arr):
'''
Returns flux and flux error arrays where missing data is filled in with a random value between
the previous and the next filled array elements. Missing intial or final data is filled in with
the first or last non-missing data value respectively.
* arr is the array of fluxes
* err_arr is the array of flux errors
- Used in parser_augment() to fill in missing data in flux and flux error arrays.
'''
ind = np.where(np.array(arr)!=-999)[0]
length = len(arr)
if len(ind)==0:
arr = [0 for i in xrange(length)]
err_arr = [0 for i in xrange(length)]
else:
for i in xrange(len(ind)-1):
diff = ind[i+1]-ind[i]
arr[ind[i]+1:ind[i+1]] = np.random.uniform(arr[ind[i]],arr[ind[i+1]],diff-1)
err_arr[ind[i]+1:ind[i+1]] = np.random.uniform(err_arr[ind[i]],err_arr[ind[i+1]],diff-1)
for i in xrange(len(arr[:ind[0]])):
arr[i] = arr[ind[0]]
err_arr[i] = err_arr[ind[0]]
for i in xrange(len(arr[ind[-1]+1:])):
arr[ind[-1]+1+i] = arr[ind[-1]]
err_arr[ind[-1]+1+i] = err_arr[ind[-1]]
return arr,err_arr
def parser_last(filename):
'''
Reads and returns supernovae data into format to be read by the neural network. Replaces missing observation
data with previous non-missing observation data - steps in data are present.
* filename is a string containing the path to the supernovae light curve data
* survey is a string containing the survey name
* snid is an integer containing the supernova ID
* ra is a float containing the RA of the supernova
* dec is a float containing the Dec of the supernova
* mwebv is a float describing the dust extinction
* hostid is an integer containing the host galaxy ID
* hostz is an array of floats containing the photometric redshift of the galaxy and the error on the measurement
* spec is an array of floats containing the redshift
* sim_type is a string containing the supernova type
* sim_z is a float containing the redshift of the supernova
* obs is a sequence of arrays each element containing [time since first observation,fluxes in each colourband,flux errors in each colourband]
- Used in __main__() to read in the data
'''
survey = snid = ra = dec = mwebv = hostid = hostz = spec = sim_type = sim_z = None
obs = []
g = r = i = z = 0
g_error = r_error = i_error = z_error = 0
with open(filename, 'rU') as f:
first_obs = None
for line in f:
s = line.split(':')
if len(s) > 0:
if s[0] == 'SURVEY':
survey = s[1].strip()
elif s[0] == 'SNID':
snid = int(s[1].strip())
elif s[0] == 'SNTYPE':
sn_type = int(s[1].strip())
elif s[0] == 'RA':
ra = float(s[1].split('deg')[0].strip())/position_norm
elif s[0] == 'DECL':
decl = float(s[1].split('deg')[0].strip())/position_norm
elif s[0] == 'MWEBV':
mwebv = float(s[1].split('MW')[0].strip())
elif s[0] == 'HOST_GALAXY_GALID':
hostid = int(s[1].strip())
elif s[0] == 'HOST_GALAXY_PHOTO-Z':
hostz = float(s[1].split('+-')[0].strip()), float(s[1].split('+-')[1].strip())
elif s[0] == 'REDSHIFT_SPEC':
spec = float(s[1].split('+-')[0].strip()), float(s[1].split('+-')[1].strip())
elif s[0] == 'SIM_COMMENT':
sim_type = s[1].split('SN Type =')[1].split(',')[0].strip()
elif s[0] == 'SIM_REDSHIFT':
sim_z = float(s[1])
elif s[0] == 'OBS':
o = s[1].split()
if first_obs is None:
first_obs = float(o[0])
if o[1] == 'g':
g = float(o[3])/flux_norm
g_error = float(o[4])/flux_norm
elif o[1] == 'r':
r = float(o[3])/flux_norm
r_error = float(o[4])/flux_norm
elif o[1] == 'i':
i = float(o[3])/flux_norm
i_error = float(o[4])/flux_norm
elif o[1] == 'z':
z = float(o[3])/flux_norm
z_error = float(o[4])/flux_norm
obs.append([(float(o[0]) - first_obs)/time_norm] + [g,r,i,z] + [g_error,r_error,i_error,z_error])
return survey, snid, sn_type, sim_type, sim_z, ra, decl, mwebv, hostid, hostz, spec, obs
def parser_spline(filename):
'''
Reads and returns supernovae data into format to be read by the neural network. Flux observations are interpolated at grouped times
and the errors are attributed to the grouped time closest to when they were actually measured.
* filename is a string containing the path to the supernovae light curve data
* survey is a string containing the survey name
* snid is an integer containing the supernova ID
* ra is a float containing the RA of the supernova
* dec is a float containing the Dec of the supernova
* mwebv is a float describing the dust extinction
* hostid is an integer containing the host galaxy ID
* hostz is an array of floats containing the photometric redshift of the galaxy and the error on the measurement
* spec is an array of floats containing the redshift
* sim_type is a string containing the supernova type
* sim_z is a float containing the redshift of the supernova
* obs is a sequence of arrays each element containing [time since first observation,fluxes in each colourband,flux errors in each colourband]
- Used in __main__() to read in the data
'''
survey = snid = ra = dec = mwebv = hostid = hostz = spec = sim_type = sim_z = None
obs = []
t = []
t_arr = []
g_arr = [[],[],[]]
r_arr = [[],[],[]]
i_arr = [[],[],[]]
z_arr = [[],[],[]]
with open(filename, 'rU') as f:
first_obs = None
for line in f:
s = line.split(':')
if len(s) > 0:
if s[0] == 'SURVEY':
survey = s[1].strip()
elif s[0] == 'SNID':
snid = int(s[1].strip())
elif s[0] == 'SNTYPE':
sn_type = int(s[1].strip())
elif s[0] == 'RA':
ra = float(s[1].split('deg')[0].strip())/position_norm
elif s[0] == 'DECL':
decl = float(s[1].split('deg')[0].strip())/position_norm
elif s[0] == 'MWEBV':
mwebv = float(s[1].split('MW')[0].strip())
elif s[0] == 'HOST_GALAXY_GALID':
hostid = int(s[1].strip())
elif s[0] == 'HOST_GALAXY_PHOTO-Z':
hostz = float(s[1].split('+-')[0].strip()), float(s[1].split('+-')[1].strip())
elif s[0] == 'REDSHIFT_SPEC':
spec = float(s[1].split('+-')[0].strip()), float(s[1].split('+-')[1].strip())
elif s[0] == 'SIM_COMMENT':
sim_type = s[1].split('SN Type =')[1].split(',')[0].strip()
elif s[0] == 'SIM_REDSHIFT':
sim_z = float(s[1])
elif s[0] == 'OBS':
o = s[1].split()
if first_obs is None:
first_obs = float(o[0])
t_arr.append((float(o[0])-first_obs)/time_norm)
if o[1] == 'g':
g_arr[0].append((float(o[0])-first_obs)/time_norm)
g_arr[1].append(float(o[3])/flux_norm)
g_arr[2].append(float(o[4])/flux_norm)
elif o[1] == 'r':
r_arr[0].append((float(o[0])-first_obs)/time_norm)
r_arr[1].append(float(o[3])/flux_norm)
r_arr[2].append(float(o[4])/flux_norm)
elif o[1] == 'i':
i_arr[0].append((float(o[0])-first_obs)/time_norm)
i_arr[1].append(float(o[3])/flux_norm)
i_arr[2].append(float(o[4])/flux_norm)
elif o[1] == 'z':
z_arr[0].append((float(o[0])-first_obs)/time_norm)
z_arr[1].append(float(o[3])/flux_norm)
z_arr[2].append(float(o[4])/flux_norm)
g_spline = spline(g_arr,t_arr)
r_spline = spline(r_arr,t_arr)
i_spline = spline(i_arr,t_arr)
z_spline = spline(z_arr,t_arr)
t,ind,frac = time_collector(t_arr)
obs = [[t[i],g_spline(t[i]).tolist(),r_spline(t[i]).tolist(),i_spline(t[i]).tolist(),z_spline(t[i]).tolist(),g_arr[2][index_min(abs(g_arr[0]-t[i]))],r_arr[2][index_min(abs(r_arr[0]-t[i]))],i_arr[2][index_min(abs(i_arr[0]-t[i]))],z_arr[2][index_min(abs(z_arr[0]-t[i]))]] for i in xrange(len(t))]
return survey, snid, sn_type, sim_type, sim_z, ra, decl, mwebv, hostid, hostz, spec, obs
def parser_augment(filename):
'''
Reads and returns supernovae data into format to be read by the neural network. Flux observations and errors are grouped by time
and any missing information is filled in with random numbers between the previous and next non-missing array elements. This can
be run many times to augment the data and create a larger train/test set. This is the preferred method of reading data.
* filename is a string containing the path to the supernovae light curve data
* survey is a string containing the survey name
* snid is an integer containing the supernova ID
* ra is a float containing the RA of the supernova
* dec is a float containing the Dec of the supernova
* mwebv is a float describing the dust extinction
* hostid is an integer containing the host galaxy ID
* hostz is an array of floats containing the photometric redshift of the galaxy and the error on the measurement
* spec is an array of floats containing the redshift
* sim_type is a string containing the supernova type
* sim_z is a float containing the redshift of the supernova
* obs is a sequence of arrays each element containing [time since first observation,fluxes in each colourband,flux errors in each colourband]
- Used in __main__() to read in the data
'''
survey = snid = ra = dec = mwebv = hostid = hostz = spec = sim_type = sim_z = None
obs = []
with open(filename, 'rU') as f:
first_obs = None
for line in f:
s = line.split(':')
g = r = i = z = -999
g_error = r_error = i_error = z_error = -999
if len(s) > 0:
if s[0] == 'SURVEY':
survey = s[1].strip()
elif s[0] == 'SNID':
snid = int(s[1].strip())
elif s[0] == 'SNTYPE':
sn_type = int(s[1].strip())
elif s[0] == 'RA':
ra = float(s[1].split('deg')[0].strip())/position_norm
elif s[0] == 'DECL':
decl = float(s[1].split('deg')[0].strip())/position_norm
elif s[0] == 'MWEBV':
mwebv = float(s[1].split('MW')[0].strip())
elif s[0] == 'HOST_GALAXY_GALID':
hostid = int(s[1].strip())
elif s[0] == 'HOST_GALAXY_PHOTO-Z':
hostz = float(s[1].split('+-')[0].strip()), float(s[1].split('+-')[1].strip())
elif s[0] == 'REDSHIFT_SPEC':
spec = float(s[1].split('+-')[0].strip()), float(s[1].split('+-')[1].strip())
elif s[0] == 'SIM_COMMENT':
sim_type = s[1].split('SN Type =')[1].split(',')[0].strip()
elif s[0] == 'SIM_REDSHIFT':
sim_z = float(s[1])
elif s[0] == 'OBS':
o = s[1].split()
if first_obs is None:
first_obs = float(o[0])
if o[1] == 'g':
g = float(o[3])/flux_norm
g_error = float(o[4])/flux_norm
elif o[1] == 'r':
r = float(o[3])/flux_norm
r_error = float(o[4])/flux_norm
elif o[1] == 'i':
i = float(o[3])/flux_norm
i_error = float(o[4])/flux_norm
elif o[1] == 'z':
z = float(o[3])/flux_norm
z_error = float(o[4])/flux_norm
obs.append([(float(o[0]) - first_obs)/time_norm] + [g,r,i,z] + [g_error,r_error,i_error,z_error])
t_arr = [obs[i][0] for i in xrange(len(obs))]
g_arr = [obs[i][1] for i in xrange(len(obs))]
g_err_arr = [obs[i][5] for i in xrange(len(obs))]
r_arr = [obs[i][2] for i in xrange(len(obs))]
r_err_arr = [obs[i][6] for i in xrange(len(obs))]
i_arr = [obs[i][3] for i in xrange(len(obs))]
i_err_arr = [obs[i][7] for i in xrange(len(obs))]
z_arr = [obs[i][4] for i in xrange(len(obs))]
z_err_arr = [obs[i][8] for i in xrange(len(obs))]
correctplacement = True
frac = grouping
j = 0
while correctplacement:
t,index,frac = time_collector(t_arr,frac)
g_temp_arr = []
g_err_temp_arr = []
r_temp_arr = []
r_err_temp_arr = []
i_temp_arr = []
i_err_temp_arr = []
z_temp_arr = []
z_err_temp_arr = []
tot = []
for i in xrange(len(index)):
g_temp_arr,g_err_temp_arr,gfail = create_colourband_array(index[i],g_arr,g_err_arr,g_temp_arr,g_err_temp_arr)
r_temp_arr,r_err_temp_arr,rfail = create_colourband_array(index[i],r_arr,r_err_arr,r_temp_arr,r_err_temp_arr)
i_temp_arr,i_err_temp_arr,ifail = create_colourband_array(index[i],i_arr,i_err_arr,i_temp_arr,i_err_temp_arr)
z_temp_arr,z_err_temp_arr,zfail = create_colourband_array(index[i],z_arr,z_err_arr,z_temp_arr,z_err_temp_arr)
tot.append(gfail*rfail*ifail*zfail)
if all(tot):
correctplacement = False
else:
frac += 0.1
g_temp_arr,g_err_temp_arr = fill_in_points(g_temp_arr,g_err_temp_arr)
r_temp_arr,r_err_temp_arr = fill_in_points(r_temp_arr,r_err_temp_arr)
i_temp_arr,i_err_temp_arr = fill_in_points(i_temp_arr,i_err_temp_arr)
z_temp_arr,z_err_temp_arr = fill_in_points(z_temp_arr,z_err_temp_arr)
obs = [[t[i],g_temp_arr[i],r_temp_arr[i],i_temp_arr[i],z_temp_arr[i],g_err_temp_arr[i],r_err_temp_arr[i],i_err_temp_arr[i],z_err_temp_arr[i]] for i in xrange(len(t))]
return survey, snid, sn_type, sim_type, sim_z, ra, decl, mwebv, hostid, hostz, spec, obs
if __name__ == '__main__':
'''
Program to preprocess supernovae data. Reads in all supernova data and writes it out to one file to
be read in by the neural network training program.
- Reads in files from ./data/SIMGEN_PUBLIC_DES/ which contains all light curve data.
- Creates files in ./data/
'''
parser = argparse.ArgumentParser(description='')
parser.add_argument('-p','--p', type=str, help='Parser type')
parser.add_argument('-pr','--pr', type=str, help='File prefix')
parser.add_argument('-na','--na', type=int, help='Number of augmentations')
args = parser.parse_args()
if args.na:
nb_augment = args.na
else:
nb_augment = 5
if args.p:
if args.p == 'augment':
parser = parser_augment
elif args.p == 'spline':
parser = parser_spline
nb_augment = 1
elif args.p == 'last':
parser = parser_last
nb_augment = 1
else:
parser = parser_augment
else:
parser = parser_augment
if args.pr:
prefix = args.pr
else:
prefix = ''
for i in xrange(1,nb_augment+1):
print 'Processing augmentation: ',i
if prefix:
fhost = open('data/'+prefix+'_unblind_hostz_'+str(i)+'.csv', 'w')
fnohost = open('data/'+prefix+'_unblind_nohostz_'+str(i)+'.csv', 'w')
else:
fhost = open('data/unblind_hostz_'+str(i)+'.csv', 'w')
fnohost = open('data/unblind_nohostz_'+str(i)+'.csv', 'w')
whost = csv.writer(fhost)
wnohost = csv.writer(fnohost)
sn_types = {}
nb_sn = 0
for f in glob.glob('data/SIMGEN_PUBLIC_DES/DES_*.DAT'):
survey, snid, sn_type, sim_type, sim_z, ra, decl, mwebv, hostid, hostz, spec, obs = parser(f)
try:
unblind = [sim_z, key_types[sim_type]]
except:
print 'No information for', snid
for o in obs:
whost.writerow([snid,o[0],ra,decl,mwebv,hostz[0]] + o[1:9] + unblind)
wnohost.writerow([snid,o[0],ra,decl,mwebv] + o[1:9] + unblind)
try:
sn_types[unblind[1]] += 1
except:
sn_types[unblind[1]] = 0
nb_sn += 1
fhost.close()
fnohost.close()
print 'Num train: ', nb_sn
print 'SN types: ', sn_types
|
adammossREPO_NAMEsupernovaePATH_START.@supernovae_extracted@supernovae-master@preprocess.py@.PATH_END.py
|
{
"filename": "_circle.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/mapbox/layer/_circle.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class CircleValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="circle", parent_name="layout.mapbox.layer", **kwargs
):
super(CircleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Circle"),
data_docs=kwargs.pop(
"data_docs",
"""
radius
Sets the circle radius
(mapbox.layer.paint.circle-radius). Has an
effect only when `type` is set to "circle".
""",
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@mapbox@layer@_circle.py@.PATH_END.py
|
{
"filename": "tpu_embedding_v2_hd_valid_input_test.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/tpu/tests/tpu_embedding_v2_hd_valid_input_test.py",
"type": "Python"
}
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TPU Embeddings mid level API on TPU."""
import numpy as np
from tensorflow.python.compat import v2_compat
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework.tensor_shape import TensorShape
from tensorflow.python.platform import test
from tensorflow.python.tpu.tests import tpu_embedding_base_test
class TPUEmbeddingTest(tpu_embedding_base_test.TPUEmbeddingBaseTest):
def test_enqueue_dense_sparse_ragged(self):
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
dataset = self._create_high_dimensional_dense_dataset(strategy)
dense_iter = iter(
strategy.experimental_distribute_dataset(
dataset,
options=distribute_lib.InputOptions(
experimental_fetch_to_device=False)))
sparse = self._create_high_dimensional_sparse_dataset(strategy)
sparse_iter = iter(
strategy.experimental_distribute_dataset(
sparse,
options=distribute_lib.InputOptions(
experimental_fetch_to_device=False)))
ragged = self._create_high_dimensional_ragged_dataset(strategy)
ragged_iter = iter(
strategy.experimental_distribute_dataset(
ragged,
options=distribute_lib.InputOptions(
experimental_fetch_to_device=False)))
mid_level_api.build([
TensorShape([self.batch_size, self.data_batch_size, 1]),
TensorShape([self.batch_size, self.data_batch_size, 2]),
TensorShape([self.batch_size, self.data_batch_size, 3])
])
@def_function.function
def test_fn():
def step():
return mid_level_api.dequeue()
features = (next(dense_iter)[0], next(sparse_iter)[1],
next(ragged_iter)[2])
mid_level_api.enqueue(features, training=False)
return strategy.run(step)
test_fn()
def test_different_input_shapes(self):
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
sparse = self._create_high_dimensional_sparse_dataset(strategy)
sparse_iter = iter(
strategy.experimental_distribute_dataset(
sparse,
options=distribute_lib.InputOptions(
experimental_fetch_to_device=False)))
# Create a feature with shape (1, 3, 1)
dense_feature = constant_op.constant(
np.zeros(3), shape=(1, 3, 1), dtype=dtypes.int32)
dense_dataset = dataset_ops.DatasetV2.from_tensors(
dense_feature).unbatch().repeat().batch(
1 * strategy.num_replicas_in_sync, drop_remainder=True)
dense_iter = iter(
strategy.experimental_distribute_dataset(
dense_dataset,
options=distribute_lib.InputOptions(
experimental_fetch_to_device=False)))
@def_function.function
def test_fn():
def step():
return mid_level_api.dequeue()
features = (next(dense_iter), next(sparse_iter)[1], next(sparse_iter)[2])
mid_level_api.enqueue(features, training=False)
return strategy.run(step)
test_fn()
self.assertEqual(mid_level_api._output_shapes, [
TensorShape((1, 3)),
TensorShape((self.batch_size, self.data_batch_size)),
TensorShape((self.batch_size, self.data_batch_size))
])
def test_output_shapes_priority_over_feature_config_and_build(self):
_, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
# The output shapes setting in the feature config has the first priority.
mid_level_api._output_shapes = [TensorShape((2, 4)) for _ in range(3)]
mid_level_api.build([TensorShape((2, None, None)) for _ in range(3)])
self.assertEqual(mid_level_api._output_shapes,
[TensorShape((2, 4)) for _ in range(3)])
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main()
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@tpu@tests@tpu_embedding_v2_hd_valid_input_test.py@.PATH_END.py
|
{
"filename": "api.py",
"repo_name": "rennehan/yt-swift",
"repo_path": "yt-swift_extracted/yt-swift-main/yt/frontends/ramses/api.py",
"type": "Python"
}
|
from . import tests
from .data_structures import RAMSESDataset
from .definitions import field_aliases
from .fields import RAMSESFieldInfo
from .io import IOHandlerRAMSES
|
rennehanREPO_NAMEyt-swiftPATH_START.@yt-swift_extracted@yt-swift-main@yt@frontends@ramses@api.py@.PATH_END.py
|
{
"filename": "datamodules.py",
"repo_name": "devinamhn/RadioGalaxies-BNNs",
"repo_path": "RadioGalaxies-BNNs_extracted/RadioGalaxies-BNNs-main/radiogalaxies_bnns/inference/datamodules.py",
"type": "Python"
}
|
from pathlib import Path
import numpy as np
import torch
import torchvision.transforms as transforms
from torchvision import datasets
from torchvision.transforms import InterpolationMode
from pytorch_lightning.demos.mnist_datamodule import MNIST
from torch.utils.data import DataLoader, random_split
from torch.utils.data.sampler import SubsetRandomSampler
import pytorch_lightning as pl
#from utils import *
from PIL import Image
from radiogalaxies_bnns.datasets import mirabest
class MNISTDataModule(pl.LightningDataModule):
def __init__(self, batch_size, hmc, DATASETS_PATH = Path('./MNISTdataset')):
super().__init__()
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
dataset = MNIST(DATASETS_PATH, train = True, download = True, transform = transform)
self.mnist_test = MNIST(DATASETS_PATH, train = False, download = True, transform = transform)
self.mnist_train, self.mnist_val = random_split(dataset, [50000, 10000])
self.batch_size = batch_size
if(hmc == True):
self.batch_size_train = 50000
self.batch_size_val = 10000
self.batch_size_test = 10000
else:
self.batch_size_train = self.batch_size
self.batch_size_val = self.batch_size
self.batch_size_test = self.batch_size
def train_dataloader(self):
return DataLoader(self.mnist_train, batch_size = self.batch_size_train)
#return DataLoader(self.mnist_train, num_workers = 4,prefetch_factor = 2, pin_memory = True, batch_size = self.batch_size_train)
def val_dataloader(self):
return DataLoader(self.mnist_val, batch_size = self.batch_size_val)
#return DataLoader(self.mnist_val, num_workers = 4, prefetch_factor = 2, pin_memory = True, batch_size = self.batch_size_val)
def test_dataloader(self):
return DataLoader(self.mnist_test, batch_size = self.batch_size_test)
#return DataLoader(self.mnist_val, num_workers = 4, prefetch_factor = 2, pin_memory = True, batch_size = self.batch_size_test)
#why is this function required
def predict_dataloader(self):
return DataLoader(self.mnist_test, batch_size = self.batch_size_test)
#return DataLoader(self.mnist_test, num_workers = 4, prefetch_factor = 2, pin_memory = True, batch_size = self.batch_size_test)
class MiraBestDataModule(pl.LightningDataModule):
def __init__(self, config_dict, hmc, seed_dataset = 15): #, config):
super().__init__()
self.batch_size = config_dict['training']['batch_size']
self.validation_split = config_dict['training']['frac_val']
self.dataset = config_dict['data']['dataset']
self.path = Path(config_dict['data']['datadir'])
self.datamean = config_dict['data']['datamean']
self.datastd = config_dict['data']['datastd']
self.augment = 'True' #'False' #config_dict['data']['augment'] #more useful to define while calling train/val loader?
self.imsize = config_dict['training']['imsize']
self.seed_dataset = seed_dataset
if(hmc == True):
self.batch_size_train = 584
self.batch_size_val = 145
self.batch_size_test = 104
else:
self.batch_size_train = self.batch_size
self.batch_size_val = self.batch_size
self.batch_size_test = self.batch_size
def transforms(self, aug):
if(aug == 'False'):
transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize((self.datamean ,), (self.datastd,))])
else:
print("AUGMENTING")
#crop, pad(reflect), rotate, to tensor, normalise
#change transform to transform_aug for the training and validation set only:train_data_confident
crop = transforms.CenterCrop(self.imsize)
#pad = transforms.Pad((0, 0, 1, 1), fill=0)
transform = transforms.Compose([crop,
#pad,
transforms.RandomRotation(360, interpolation=InterpolationMode.BILINEAR, expand=False),
transforms.ToTensor(),
transforms.Normalize((self.datamean ,), (self.datastd,)),
])
return transform
def train_val_loader(self, set):
transform = self.transforms(self.augment)
if(self.dataset =='MBFRConf'):
train_data_confident = mirabest.MBFRConfident(self.path, train=True,
transform=transform, target_transform=None,
download=True)
train_data_conf = train_data_confident
elif(self.dataset == 'MBFRConf+Uncert'):
train_data_confident = mirabest.MBFRConfident(self.path, train=True,
transform=transform, target_transform=None,
download=True)
train_data_uncertain = mirabest.MBFRUncertain(self.path, train=True,
transform=transform, target_transform=None,
download=True)
#concatenate datasets
train_data_conf= torch.utils.data.ConcatDataset([train_data_confident, train_data_uncertain])
#train-valid
dataset_size = len(train_data_conf)
indices = list(range(dataset_size))
split = int(dataset_size*0.2) #int(np.floor(validation_split * dataset_size))
shuffle_dataset = True
random_seed = self.seed_dataset
if shuffle_dataset :
np.random.seed(random_seed)
np.random.shuffle(indices)
train_indices, val_indices = indices[split:], indices[:split]
# Creating data samplers and loaders:
train_sampler = SubsetRandomSampler(train_indices)
valid_sampler = SubsetRandomSampler(val_indices)
train_loader = torch.utils.data.DataLoader(train_data_conf, batch_size=self.batch_size_train, sampler=train_sampler)
validation_loader = torch.utils.data.DataLoader(train_data_conf, batch_size=self.batch_size_val, sampler=valid_sampler)
if(set == 'train'):
return train_loader
elif(set == 'val'):
return validation_loader #, train_sampler, valid_sampler
def train_dataloader(self):
return self.train_val_loader('train')
def val_dataloader(self):
return self.train_val_loader('val')
#add hybrids
def test_dataloader(self):
#no augmentation for test_loader
transform = self.transforms(aug='False')
if(self.dataset =='MBFRConf'):
test_data_confident = mirabest.MBFRConfident(self.path, train=False,
transform=transform, target_transform=None,
download=False)
test_data_conf = test_data_confident
elif(self.dataset == 'MBFRConf+Uncert'):
#confident
test_data_confident = mirabest.MBFRConfident(self.path, train=False,
transform=transform, target_transform=None,
download=True)
#uncertain
test_data_uncertain = mirabest.MBFRUncertain(self.path, train=False,
transform=transform, target_transform=None,
download=True)
#concatenate datasets
test_data_conf = torch.utils.data.ConcatDataset([test_data_confident, test_data_uncertain])
test_loader = torch.utils.data.DataLoader(dataset=test_data_conf, batch_size=self.batch_size_test,shuffle=False)
return test_loader
def testloader_mb_uncert(test_data_uncert, path):
transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.0031 ,), (0.0350,))])
#options for test_data and test_data1
if(test_data_uncert == 'MBFRConfident'):
# confident
test_data = mirabest.MBFRConfident(path, train=False,
transform=transform, target_transform=None,
download=False)
test_dataloader = torch.utils.data.DataLoader(dataset=test_data, batch_size = 104, shuffle=False)
test_data1 = mirabest.MBFRConfident(path, train=False,
transform=None, target_transform=None,
download=False)
data_type = 'Conf'
elif(test_data_uncert == 'MBFRUncertain'):
# uncertain
test_data = mirabest.MBFRUncertain(path, train=False,
transform=transform, target_transform=None,
download=False)
test_dataloader = torch.utils.data.DataLoader(dataset=test_data, batch_size = 49, shuffle=False)
test_data1 = mirabest.MBFRUncertain(path, train=False,
transform=None, target_transform=None,
download=False)
data_type = 'Uncert'
elif(test_data_uncert == 'MBHybrid'):
#hybrid
test_data = mirabest.MBHybrid(path, train=True,
transform=transform, target_transform=None,
download=False)
test_dataloader = torch.utils.data.DataLoader(dataset=test_data, batch_size = 30, shuffle=False)
test_data1 = mirabest.MBHybrid(path, train=True,
transform=None, target_transform=None,
download=False)
data_type = 'Hybrid'
else:
print("Test data for uncertainty quantification misspecified")
# test_dataloader = torch.utils.data.DataLoader(dataset=test_data, batch_size = , shuffle=False)
return test_dataloader, test_data1, data_type, test_data
|
devinamhnREPO_NAMERadioGalaxies-BNNsPATH_START.@RadioGalaxies-BNNs_extracted@RadioGalaxies-BNNs-main@radiogalaxies_bnns@inference@datamodules.py@.PATH_END.py
|
{
"filename": "test_compose.py",
"repo_name": "yt-project/yt",
"repo_path": "yt_extracted/yt-main/yt/data_objects/tests/test_compose.py",
"type": "Python"
}
|
import numpy as np
from numpy.testing import assert_array_equal
from yt.testing import fake_amr_ds, fake_random_ds
from yt.units._numpy_wrapper_functions import uintersect1d
from yt.units.yt_array import YTArray
def setup_module():
from yt.config import ytcfg
ytcfg["yt", "internals", "within_testing"] = True
# Copied from test_boolean for computing a unique identifier for
# each cell from cell positions
def _IDFIELD(field, data):
width = data.ds.domain_right_edge - data.ds.domain_left_edge
min_dx = YTArray(1.0 / 8192, units="code_length", registry=data.ds.unit_registry)
delta = width / min_dx
x = data["index", "x"] - min_dx / 2.0
y = data["index", "y"] - min_dx / 2.0
z = data["index", "z"] - min_dx / 2.0
xi = x / min_dx
yi = y / min_dx
zi = z / min_dx
index = xi + delta[0] * (yi + delta[1] * zi)
return index
def test_compose_no_overlap():
r"""Test to make sure that composed data objects that don't
overlap behave the way we expect (return empty collections)
"""
empty = np.array([])
for n in [1, 2, 4, 8]:
ds = fake_random_ds(64, nprocs=n)
ds.add_field(("index", "ID"), sampling_type="cell", function=_IDFIELD)
# position parameters for initial region
center = [0.25] * 3
left_edge = [0.1] * 3
right_edge = [0.4] * 3
normal = [1, 0, 0]
radius = height = 0.15
# initial 3D regions
sources = [
ds.sphere(center, radius),
ds.region(center, left_edge, right_edge),
ds.disk(center, normal, radius, height),
]
# position parameters for non-overlapping regions
center = [0.75] * 3
left_edge = [0.6] * 3
right_edge = [0.9] * 3
# subselect non-overlapping 0, 1, 2, 3D regions
for data1 in sources:
data2 = ds.sphere(center, radius, data_source=data1)
assert_array_equal(data2["index", "ID"], empty)
data2 = ds.region(center, left_edge, right_edge, data_source=data1)
assert_array_equal(data2["index", "ID"], empty)
data2 = ds.disk(center, normal, radius, height, data_source=data1)
assert_array_equal(data2["index", "ID"], empty)
for d in range(3):
data2 = ds.slice(d, center[d], data_source=data1)
assert_array_equal(data2["index", "ID"], empty)
for d in range(3):
data2 = ds.ortho_ray(
d, center[0:d] + center[d + 1 :], data_source=data1
)
assert_array_equal(data2["index", "ID"], empty)
data2 = ds.point(center, data_source=data1)
assert_array_equal(data2["index", "ID"], empty)
def test_compose_overlap():
r"""Test to make sure that composed data objects that do
overlap behave the way we expect
"""
for n in [1, 2, 4, 8]:
ds = fake_random_ds(64, nprocs=n)
ds.add_field(("index", "ID"), sampling_type="cell", function=_IDFIELD)
# position parameters for initial region
center = [0.4, 0.5, 0.5]
left_edge = [0.1] * 3
right_edge = [0.7] * 3
normal = [1, 0, 0]
radius = height = 0.15
# initial 3D regions
sources = [
ds.sphere(center, radius),
ds.region(center, left_edge, right_edge),
ds.disk(center, normal, radius, height),
]
# position parameters for overlapping regions
center = [0.6, 0.5, 0.5]
left_edge = [0.3] * 3
right_edge = [0.9] * 3
# subselect non-overlapping 0, 1, 2, 3D regions
for data1 in sources:
id1 = data1["index", "ID"]
data2 = ds.sphere(center, radius)
data3 = ds.sphere(center, radius, data_source=data1)
id2 = data2["index", "ID"]
id3 = data3["index", "ID"]
id3.sort()
assert_array_equal(uintersect1d(id1, id2), id3)
data2 = ds.region(center, left_edge, right_edge)
data3 = ds.region(center, left_edge, right_edge, data_source=data1)
id2 = data2["index", "ID"]
id3 = data3["index", "ID"]
id3.sort()
assert_array_equal(uintersect1d(id1, id2), id3)
data2 = ds.disk(center, normal, radius, height)
data3 = ds.disk(center, normal, radius, height, data_source=data1)
id2 = data2["index", "ID"]
id3 = data3["index", "ID"]
id3.sort()
assert_array_equal(uintersect1d(id1, id2), id3)
for d in range(3):
data2 = ds.slice(d, center[d])
data3 = ds.slice(d, center[d], data_source=data1)
id2 = data2["index", "ID"]
id3 = data3["index", "ID"]
id3.sort()
assert_array_equal(uintersect1d(id1, id2), id3)
for d in range(3):
data2 = ds.ortho_ray(d, center[0:d] + center[d + 1 :])
data3 = ds.ortho_ray(
d, center[0:d] + center[d + 1 :], data_source=data1
)
id2 = data2["index", "ID"]
id3 = data3["index", "ID"]
id3.sort()
assert_array_equal(uintersect1d(id1, id2), id3)
data2 = ds.point(center)
data3 = ds.point(center, data_source=data1)
id2 = data2["index", "ID"]
id3 = data3["index", "ID"]
id3.sort()
assert_array_equal(uintersect1d(id1, id2), id3)
def test_compose_max_level_min_level():
ds = fake_amr_ds()
ad = ds.all_data()
ad.max_level = 2
slc = ds.slice("x", 0.5, data_source=ad)
assert slc["index", "grid_level"].max() == 2
frb = slc.to_frb(1.0, 128)
assert np.all(frb["stream", "Density"] > 0)
assert frb["index", "grid_level"].max() == 2
|
yt-projectREPO_NAMEytPATH_START.@yt_extracted@yt-main@yt@data_objects@tests@test_compose.py@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.