metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
|---|---|---|
{
"filename": "measurement.py",
"repo_name": "mpound/pdrtpy",
"repo_path": "pdrtpy_extracted/pdrtpy-master/pdrtpy/measurement.py",
"type": "Python"
}
|
"""Manage spectral line or continuum observations"""
# @Todo it would be nice to be able to get Measurment[index] as a Measurement instead of
# a float. This is the behavior for CCDData, somehow lost in Measurement See NDUncertainty __getitem__
# this will have ripple effects if implemented.
import warnings
from copy import deepcopy
from os import remove
from os.path import exists
import astropy.units as u
import numpy as np
import numpy.ma as ma
from astropy import log
from astropy.io import fits, registry
from astropy.nddata import CCDData, StdDevUncertainty
from astropy.table import Table
from scipy.interpolate import RegularGridInterpolator
from . import pdrutils as utils
class Measurement(CCDData):
r"""Measurement represents one or more observations of a given spectral
line or continuum. It is made up of a value array, an
uncertainty array, units, and a string identifier. It is based
on :class:`astropy.nddata.CCDData`. It can represent a single pixel
observation or an image. Mathematical operations using Measurements
will correctly propagate errors.
Typically, Measurements will be instantiated from a FITS file by using the the :func:`read` or :func:`make_measurement` methods. For a list of recognized spectral line identifiers, see :meth:`~pdrtpy.modelset.Modelset.supported_lines`.
:param data: The actual data contained in this :class:`Measurement` object.
Note that the data will always be saved by *reference*, so you should
make a copy of the ``data`` before passing it in if that's the desired
behavior.
:type data: :class:`numpy.ndarray`-like
:param uncertainty: Uncertainties on the data. If the uncertainty is a :class:`numpy.ndarray`, it assumed to be, and stored as, a :class:`astropy.nddata.StdDevUncertainty`. Required.
:type uncertainty: :class:`astropy.nddata.StdDevUncertainty`, \
:class:`astropy.nddata.VarianceUncertainty`, \
:class:`astropy.nddata.InverseVariance` or :class:`numpy.ndarray`
:param unit: The units of the data. Required.
:type unit: :class:`astropy.units.Unit` or str
:param identifier: A string indicating what this is an observation of, e.g., "CO_10" for CO(1-0)
:type identifier: str
:param title: A formatted string (e.g., LaTeX) describing this observation that can be used for plotting. Python r-strings are accepted, e.g., r'$^{13}$CO(3-2)' would give :math:`^{13}{\rm CO(3-2)}`.
:type title: str
:param bmaj: [optional] beam major axis diameter. This will be converted to degrees for storage in FITS header
:type bmaj: :class:`astropy.units.Quantity`
:param bmin: [optional] beam minor axis diameter. This will be converted to degrees for storage in FITS header
:type bmin: :class:`astropy.units.Quantity`
:param bpa: [optional] beam position angle. This will be converted to degrees for storage in FITS header
:type bpa: :class:`astropy.units.Quantity`
:raises TypeError: if beam parameters are not Quantities
Measurements can also be instantiated by the **read(\\*args, \\**kwargs)**,
to create an Measurement instance based on a ``FITS`` file.
This method uses :func:`fits_measurement_reader` with the provided
parameters. Example usage:
.. code-block:: python
from pdrtpy.measurement import Measurement
my_obs = Measurement.read("file.fits",identifier="CII_158")
my_other_obs = Measurement.read("file2.fits",identifier="CO2_1",
unit="K km/s",
bmaj=9.3*u.arcsec,
bmin=14.1*u.arcsec,
bpa=23.2*u.degrees)
By default image axes with only a single dimension are removed on read. If you do not want this behavior, used `read(squeeze=False)`. See also: :class:`astropy.nddata.CCDData`.
"""
def __init__(self, *args, **kwargs):
warnings.simplefilter("ignore", DeprecationWarning)
debug = kwargs.pop("debug", False)
if debug:
print("args=", *args)
print("kwargs=", *kwargs)
self._identifier = kwargs.pop("identifier", "unknown")
self._title = kwargs.pop("title", None)
_beam = dict()
_beam["BMAJ"] = self._beam_convert(kwargs.pop("bmaj", None))
_beam["BMIN"] = self._beam_convert(kwargs.pop("bmin", None))
_beam["BPA"] = self._beam_convert(kwargs.pop("bpa", None))
self._restfreq = kwargs.pop("restfreq", None)
self._filename = None
self._data = None # shut up Codacy
# This won't work: On arithmetic operations, this raises the exception.
# if self._identifier is None:
# raise ValueError("an identifier for Measurement must be specified.")
# On arithmetic operations, this causes an annoying
# log.info() message from CCDData about overwriting Quantity
# This workaround is needed because CCDData raises an exception if unit
# not given. Whereas having BUNIT in the image header instead would be
# perfectly reasonable...
# The side-effect of this is that Measurement not instantiated from
# an image and with no unit given gets "adu" as the unit.
self._defunit = "adu"
unitpresent = "unit" in kwargs
_unit = kwargs.pop("unit", self._defunit)
# Also works: super().__init__(*args, **kwargs, unit=_unit)
CCDData.__init__(self, *args, **kwargs, unit=_unit)
# force single pixel data to be interable arrays.
# I consider this a bug in CCDData, StdDevUncertainty that they don't do this.
# also StdDevUncertainty does not convert float to np.float!
# print("DU",np.shape(self.data),np.shape(self.uncertainty.array))
# print(type(self.data))
if np.shape(self.data) == ():
self.data = np.array([self.data])
if self.error is not None and np.shape(self.error) == ():
self.uncertainty.array = np.array([self.uncertainty.array])
# If user provided restfreq, insert it into header
# FITS standard is Hz
if self._restfreq is not None:
rf = u.Unit(self._restfreq).to("Hz")
self.header["RESTFREQ"] = rf
# Set unit to header BUNIT or put BUNIT into header if it
# wasn't present AND if unit wasn't given in the constructor
if not unitpresent and "BUNIT" in self.header:
self._unit = u.Unit(self.header["BUNIT"])
if self.uncertainty is not None:
self.uncertainty._unit = u.Unit(self.header["BUNIT"])
else:
# use str in case a astropy.Unit was given
self.header["BUNIT"] = str(_unit)
# Ditto beam parameters
if "BMAJ" not in self.header:
self.header["BMAJ"] = _beam["BMAJ"]
if "BMIN" not in self.header:
self.header["BMIN"] = _beam["BMIN"]
if "BPA" not in self.header:
self.header["BPA"] = _beam["BPA"]
if self.wcs is not None:
self._set_up_for_interp()
def _beam_convert(self, bpar):
if bpar is None:
return bpar
if isinstance(bpar, u.Quantity):
return bpar.to("degree").value
raise TypeError("Beam parameters must be astropy Quantities")
@staticmethod
def make_measurement(datafile, error, outfile, rms=None, masknan=True, overwrite=False, unit="adu"):
"""Create a FITS files with 2 HDUS, the first being the datavalue and the 2nd being
the data uncertainty. This format makes allows the resulting file to be read into the underlying :class:'~astropy.nddata.CCDData` class.
:param datafile: The FITS file containing the data as a function of spatial coordinates
:type datafile: str
:param error: The errors on the data Possible values for error are:
- a filename with the same shape as datafile containing the error values per pixel
- a percentage value 'XX%' must have the "%" symbol in it
- 'rms' meaning use the rms parameter if given, otherwise look for the RMS keyword in the FITS header of the datafile
:type error: str
:param outfile: The output file to write the result in (FITS format)
:type outfile: str
:param rms: If error == 'rms', this value may give the rms in same units as data (e.g 'erg s-1 cm-2 sr-1').
:type rms: float or :class:`astropy.units.Unit`
:param masknan: Whether to mask any pixel where the data or the error is NaN. Default:true
:type masknan: bool
:param overwrite: If `True`, overwrite the output file if it exists. Default: `False`.
:type overwrite: bool
:param unit: Intensity unit to use for the data, this will override BUNIT in header if present.
:type unit: :class:`astropy.units.Unit` or str
:raises Exception: on various FITS header issues
:raises OSError: if `overwrite` is `False` and the output file exists.
Example usage:
.. code-block:: python
# example with percentage error
Measurement.make_measurement("my_infile.fits",error='10%',outfile="my_outfile.fits")
# example with measurement in units of K km/s and error
# indicated by RMS keyword in input file.
Measurement.make_measurement("my_infile.fits",error='rms',outfile="my_outfile.fits",unit="K km/s",overwrite=True)
"""
_data = fits.open(datafile)
needsclose = False
if error == "rms":
_error = deepcopy(_data)
if rms is None:
rms = _data[0].header.get("RMS", None)
if rms is None:
raise Exception("rms not given as parameter and RMS keyword not present in data header")
else:
print("Found RMS in header: %.2E %s" % (rms, _error[0].data.shape))
# tmp = np.full(_error[0].data.shape,rms)
_error[0].data[:] = rms
elif "%" in error:
percent = float(error.strip("%")) / 100.0
_error = deepcopy(_data)
_error[0].data = _data[0].data * percent
else:
_error = fits.open(error)
needsclose = True
fb = _data[0].header.get("bunit", str(unit)) # use str in case Unit was given
eb = _error[0].header.get("bunit", str(unit))
if fb != eb:
raise Exception("BUNIT must be the same in both data (%s) and error (%s) maps" % (fb, eb))
# Sigh, this is necessary since there is no mode available in
# fits.open that will truncate an existing file for writing
if overwrite and exists(outfile):
remove(outfile)
_out = fits.open(name=outfile, mode="ostream")
_out.append(_data[0])
_out[0].header["bunit"] = fb
_out.append(_error[0])
_out[1].header["extname"] = "UNCERT"
_out[1].header["bunit"] = eb
_out[1].header["utype"] = "StdDevUncertainty"
if masknan:
fmasked = ma.masked_invalid(_data[0].data)
emasked = ma.masked_invalid(_error[0].data)
final_mask = utils.mask_union([fmasked, emasked])
# Convert boolean mask to uint since io.fits cannot handle bool.
hduMask = fits.ImageHDU(final_mask.astype(np.uint8), name="MASK")
_out.append(hduMask)
_out.writeto(outfile, overwrite=overwrite)
_data.close()
_out.close()
if needsclose:
_error.close()
@property
def value(self):
"""Return the underlying data array
:rtype: :class:`numpy.ndarray`
"""
return self.data
@property
def error(self):
"""Return the underlying error array
:rtype: :class:`numpy.ndarray`
"""
if self.uncertainty is None:
return None
return self.uncertainty._array
@property
def SN(self):
"""Return the signal to noise ratio (value/error)
:rtype: :class:`numpy.ndarray`
"""
if self.uncertainty is None:
return None
return self.value / self.error
@property
def id(self):
"""Return the string ID of this measurement, e.g., CO_10
:rtype: str
"""
return self._identifier
def identifier(self, id):
"""Set the string ID of this measurement, e.g., CO_10
:param id: the identifier
:type id: str
"""
self._identifier = id
@property
def beam(self):
"""Return the beam parameters as astropy Quantities or None if beam is not set"""
if "BMAJ" in self.header and self.header["BMAJ"] is not None:
return [self.header["BMAJ"], self.header["BMIN"], self.header["BPA"]] * u.degree
else:
return None
def is_ratio(self):
"""Indicate if this `Measurement` is a ratio..
This method looks for the '/' past the first character of the` Measurement` *identifier*, such as "CII_158/CO_32"
See also pdrutils.is_ratio(string)
:returns: True if the Measurement is a ratio, False otherwise
:rtype: bool"""
return utils.is_ratio(self.id) # pdrutils method
@property
def title(self):
"""A formatted title (e.g., LaTeX) that can be in plotting.
:rtype: str or None
"""
return self._title
@property
def filename(self):
"""The FITS file that created this measurement, or None if it didn't originate from a file
:rtype: str or None
"""
return self._filename
def write(self, filename, **kwd):
"""Write this Measurement to a FITS file with value in 1st HDU and error in 2nd HDU. See :meth:`astropy.nddata.CCDData.write`.
:param filename: Name of file.
:type filename: str
:param kwd: All additional keywords are passed to :py:mod:`astropy.io.fits`
"""
hdu = self.to_hdu()
hdu.writeto(filename, **kwd)
def _set_up_for_interp(self, kind="linear"):
# @TODO this will always return nan if there are nan in the data.
# See eg. https://stackoverflow.com/questions/35807321/scipy-interpolation-with-masked-data
"""
We don't want to have to do a call to get a pixel value at a particular WCS every time it's needed.
So make one call that converts the entire NAXIS1 and NAXIS2 to an array of world coordinates and stash that away
so we can pass it to scipy.interp2d when needed
"""
self._world_axis = utils.get_xy_from_wcs(self, quantity=False, linear=False)
self._world_axis_lin = utils.get_xy_from_wcs(self, quantity=False, linear=True)
# self._interp_log_old = interp2d(
# self._world_axis[0], self._world_axis[1], z=self.data, kind=kind, bounds_error=True
# )
# self._interp_lin_old = interp2d(
# self._world_axis_lin[0], self._world_axis_lin[1], z=self.data, kind=kind, bounds_error=True
# )
self._interp_log = RegularGridInterpolator(self._world_axis, values=self.data.T, method=kind, bounds_error=True)
self._interp_lin = RegularGridInterpolator(
self._world_axis_lin, values=self.data.T, method=kind, bounds_error=True
)
def get_pixel(self, world_x, world_y):
"""Return the nearest pixel coordinates to the input world coordinates
:param world_x: The horizontal world coordinate
:type world_x: float
:param world_y: The vertical world coordinate
:type world_y: float
"""
if self.wcs is None:
raise Exception(f"No wcs in this Measurement {self.id}")
return tuple(np.round(self.wcs.world_to_pixel_values(world_x, world_y)).astype(int))
def get(self, world_x, world_y, log=False):
"""Get the value(s) at the give world coordinates
:param world_x: the x value in world units of naxis1
:type world_x: float or array-like
:param world_y: the y value in world units of naxis2
:type world_y: float or array-lke
:param log: True if the input coords are logarithmic Default:False
:type log: bool
:returns: The value(s) of the Measurement at input coordinates
:rtype: float
"""
if log:
return float(self._interp_log((world_x, world_y)))
else:
return float(self._interp_lin((world_x, world_y)))
@property
def levels(self):
if self.value.size != 1:
raise Exception("This only works for Measurements with a single pixel")
return np.array([float(self.value - self.error), float(self.value), float(self.value + self.error)])
def _modify_id(self, other, op):
"""Handle ID string for arithmetic operations with Measurements or numbers
:param other: a Measurement or number
:type other: :class:`Measurement` or number
:param op: descriptive string of operation, e.g. "+", "*"
:type op: str
"""
if getattr(other, "id", None) is not None:
return self.id + op + other.id
else:
return self.id
def add(self, other):
"""Add this Measurement to another, propagating errors, units, and updating identifiers. Masks are logically or'd.
:param other: a Measurement or number to add
:type other: :class:`Measurement` or number
"""
# need to do tricky stuff to preserve unit propogation.
# super().add() does not work because it instantiates a Measurement
# with the default unit "adu" and then units for the operation are
# not conformable. I blame astropy CCDData authors for making that
# class so hard to subclass.
z = CCDData.add(self, other, handle_mask=np.logical_or)
z = Measurement(z, unit=z._unit)
z._identifier = self._modify_id(other, "+")
z._unit = self.unit
return z
def subtract(self, other):
"""Subtract another Measurement from this one, propagating errors, units, and updating identifiers. Masks are logically or'd.
:param other: a Measurement or number to subtract
:type other: :class:`Measurement` or number
"""
z = CCDData.subtract(self, other, handle_mask=np.logical_or)
z = Measurement(z, unit=z._unit)
z._identifier = self._modify_id(other, "-")
return z
def multiply(self, other):
"""Multiply this Measurement by another, propagating errors, units, and updating identifiers. Masks are logically or'd.
:param other: a Measurement or number to multiply
:type other: :class:`Measurement` or number
"""
z = CCDData.multiply(self, other, handle_mask=np.logical_or)
z = Measurement(z, unit=z._unit)
z._identifier = self._modify_id(other, "*")
return z
def divide(self, other):
"""Divide this Measurement by another, propagating errors, units, and updating identifiers. Masks are logically or'd.
:param other: a Measurement or number to divide by
:type other: :class:`Measurement` or number
"""
z = CCDData.divide(self, other, handle_mask=np.logical_or)
z = Measurement(z, unit=z._unit)
z._identifier = self._modify_id(other, "/")
return z
def is_single_pixel(self):
"""Is this Measurement a single value?
:returns: True if a single value (pixel)
:rtype: bool
"""
return self.data.size == 1
def __add__(self, other):
"""Add this Measurement to another using + operator, propagating errors, units, and updating identifiers"""
z = self.add(other)
return z
def __sub__(self, other):
"""Subtract another Measurement from this one using - operator, propagating errors, units, and updating identifiers"""
z = self.subtract(other)
return z
def __mul__(self, other):
"""Multiply this Measurement by another using * operator, propagating errors, units, and updating identifiers"""
z = self.multiply(other)
return z
def __truediv__(self, other):
"""Divide this Measurement by another using / operator, propagating errors, units, and updating identifiers"""
z = self.divide(other)
return z
def __repr__(self):
m = "%s +/- %s %s" % (np.squeeze(self.data), np.squeeze(self.error), self.unit)
return m
def __str__(self):
# this fails for array data
# return "{:3.2e} +/- {:3.2e} {:s}".format(self.data,self.error,self.unit)
# m = "%s +/- %s %s" % (self.data,self.error,self.unit)
m = "%s +/- %s %s" % (np.squeeze(self.data), np.squeeze(self.error), self.unit)
return m
def __format__(self, spec):
# todo look more closely how Quantity does this
# print("using __format__")
if spec == "":
return str(self)
# this can't possibly be the way you are supposed to use this, but it works
spec = "{:" + spec + "}"
a = np.array2string(np.squeeze(self.data), formatter={"float": lambda x: spec.format(x)})
b = np.array2string(np.squeeze(self.error), formatter={"float": lambda x: spec.format(x)})
# this does not always work
# a = np.vectorize(spec.__mod__,otypes=[np.float64])(self.data)
# b = np.vectorize(spec.__mod__,otypes=[np.float64])(self.error)
return "%s +/- %s %s" % (a, b, self.unit)
def __getitem__(self, index):
"""Allows us to use [] to index into the data array"""
return self._data[index]
@staticmethod
def from_table(filename, format="ipac", array=False):
r"""Table file reader for Measurement class.
Create one or more Measurements from a table.
The input table header must contain the columns:
*data* - the data value
*uncertainty* - the error on the data, can be absolute error or percent. If percent, the header unit row entry for this column must be "%"
*identifier* - the identifier of this Measurement which should match a model in the ModelSet you are using, e.g., "CII_158" for [C II] 158 $\\mu$m
The following columns are optional:
*bmaj* - beam major axis size
*bmin* - beam minor axis size
*bpa* - beam position angle
The table must specify the units of each column, e.g. a unit row in the header for IPAC format. Leave column entry blank if unitless. Units of value and error should be the same or conformable. Units must be transformable to a valid astropy.unit.Unit.
:param filename: Name of table file.
:type filename: str
:param format: `Astropy Table format format. <https://docs.astropy.org/en/stable/io/unified.html#built-in-readers-writers>`_ e.g., ascii, ipac, votable. Default is `IPAC format <https://docs.astropy.org/en/stable/api/astropy.io.ascii.Ipac.html#astropy.io.ascii.Ipac>`_
:param array: Controls whether a list of Measurements or a single Measurement is returned. If `array` is True, one Measurement instance will be created for each row in the table and a Python list of Measurements will be returned. If `array` is False, one Measurement containing all the points in the `data` member will be returned. If `array` is False, the *identifier* and beam parameters of the first row will be used. If feeding the return value to a plot method such as :meth:`~pdrtpy.plot.modelplot.ModelPlot.phasespace`, choose `array=False`. Default:False.
:type array: bool
:rtype: :class:`~pdrtpy.measurement.Measurement` or list of :class:`~pdrtpy.measurement.Measurement`
"""
# @todo support input of a astropy.Table directly
t = Table.read(filename, format=format)
required = ["data", "uncertainty", "identifier"]
options = ["bmaj", "bmin", "bpa"]
errmsg = ""
for r in required:
if r not in t.colnames:
errmsg += "{0} is a required column. ".format(r)
if errmsg != "":
raise Exception("Insufficient information in table to create Measurement. {0}".format(errmsg))
# check for beam parameters in table.
# IFF all beam parameters present, they will be added to the Measurements.
if sorted(list(set(options) & set(t.colnames))) == sorted(options):
hasBeams = True
else:
hasBeams = False
if t["data"].unit is None:
t["data"].unit = ""
if t["uncertainty"].unit is None:
t["uncertainty"].unit = ""
if array:
a = list()
for x in t: # x is a astropy.table.row.Row
if t.columns["uncertainty"].unit == "%":
err = StdDevUncertainty(array=x["uncertainty"] * x["data"] / 100.0, unit=t.columns["data"].unit)
else:
err = StdDevUncertainty(array=x["uncertainty"], unit=t.columns["uncertainty"].unit)
if hasBeams:
# NB: I tried to do something tricky here with Qtable, but it actually became *more* complicated
m = Measurement(
data=x["data"].data,
identifier=x["identifier"],
unit=t.columns["data"].unit,
uncertainty=err,
bmaj=x["bmaj"] * t.columns["bmaj"].unit,
bmin=x["bmin"] * t.columns["bmaj"].unit,
bpa=x["bpa"] * t.columns["bpa"].unit,
)
else:
m = Measurement(
data=x["data"].data, identifier=x["identifier"], unit=t.columns["data"].unit, uncertainty=err
)
a.append(m)
return a
else:
if t.columns["uncertainty"].unit == "%":
err = StdDevUncertainty(t["uncertainty"] * t["data"] / 100.0, unit=t.columns["data"].unit)
else:
err = StdDevUncertainty(t["uncertainty"], unit=t.columns["uncertainty"].unit)
if hasBeams:
m = Measurement(
data=t["data"].data,
identifier=t["identifier"][0],
unit=t.columns["data"].unit,
uncertainty=err,
bmaj=t["bmaj"][0] * t["bmaj"].unit,
bmin=t["bmin"][0] * t["bmaj"].unit,
bpa=t["bpa"][0] * t["bpa"].unit,
)
else:
m = Measurement(
data=t["data"].data, identifier=t["identifier"][0], unit=t.columns["data"].unit, uncertainty=err
)
return m
def fits_measurement_reader(
filename, hdu=0, unit=None, hdu_mask="MASK", hdu_flags=None, key_uncertainty_type="UTYPE", **kwd
):
"""FITS file reader for Measurement class, which will be called by :meth:`Measurement.read`.
:param filename: Name of FITS file.
:type filename: str
:param identifier: string indicating what this is an observation of, e.g., "CO_10" for CO(1-0)
:type identifier: str
:param squeeze: If ``True``, remove single dimension axes from the input image. Default: ``True``
:type squeeze: bool
:param hdu: FITS extension from which Measurement should be initialized.
If zero and and no data in the primary extension, it will
search for the first extension with data. The header will be
added to the primary header. Default is 0.
:type hdu: int, optional
:type unit: :class:`astropy.units.Unit`, optional
:param unit:
Units of the image data. If this argument is provided and there is a
unit for the image in the FITS header (the keyword ``BUNIT`` is used
as the unit, if present), this argument is used for the unit.
Default is ``None``.
:type hdu_uncertainty: str or None, optional
:param hdu_uncertainty: FITS extension from which the uncertainty
should be initialized. If the extension does not exist the
uncertainty of the Measurement is ``None``. Default is
``'UNCERT'``.
:type hdu_mask: str or None, optional
:param hdu_mask: FITS extension from which the mask should be initialized. If the extension does not exist the mask of the Measurement is ``None``. Default is ``'MASK'``.
:type hdu_flags: str or None, optional
:param hdu_flags: Currently not implemented. Default is ``None``.
:type key_uncertainty_type: str, optional
:param key_uncertainty_type: The header key name where the class name of the uncertainty is stored in the hdu of the uncertainty (if any). Default is ``UTYPE``.
:param kwd: Any additional keyword parameters are passed through to the FITS reader in :mod:`astropy.io.fits`
:raises TypeError: If the conversion from CCDData to Measurement fails
"""
_id = kwd.pop("identifier", "unknown")
_title = kwd.pop("title", None)
_squeeze = kwd.pop("squeeze", True)
# suppress INFO messages about units in FITS file. e.g. useless ones like:
# "INFO: using the unit erg / (cm2 s sr) passed to the FITS reader instead of the unit erg s-1 cm-2 sr-1 in the FITS file."
log.setLevel("WARNING")
z = CCDData.read(filename, unit=unit) # ,hdu,uu,hdu_uncertainty,hdu_mask,hdu_flags,key_uncertainty_type, **kwd)
if _squeeze:
z = utils.squeeze(z)
# @TODO if uncertainty plane not present, look for RMS keyword
# @TODO header values get stuffed into WCS, others may be dropped by CCDData._generate_wcs_and_update_header
try:
z = Measurement(z, unit=z._unit, title=_title)
except Exception:
raise TypeError("could not convert fits_measurement_reader output to Measurement")
z.identifier(_id)
# astropy.io.registry.read creates a FileIO object before calling the registered
# reader (this method), so the filename is FileIO.name.
z._filename = filename.name
log.setLevel("INFO") # set back to default
return z
with registry.delay_doc_updates(Measurement):
registry.register_reader("fits", Measurement, fits_measurement_reader)
|
mpoundREPO_NAMEpdrtpyPATH_START.@pdrtpy_extracted@pdrtpy-master@pdrtpy@measurement.py@.PATH_END.py
|
{
"filename": "LoadRestartFile_nod_VIS.py",
"repo_name": "martinsparre/XSHPipelineManager",
"repo_path": "XSHPipelineManager_extracted/XSHPipelineManager-master/LoadRestartFile/LoadRestartFile_nod_VIS.py",
"type": "Python"
}
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#Martin Sparre, DARK, March 2011
#version 5.9.0
from PipelineManager import *
VIS = LoadRestartFile('OutputBase/xsh_2dmap_VIS.restart')
VIS.ResetAllRecipes()
EsorexName='xsh_scired_slit_nod'
SOFFileName = EsorexName
VIS.DeclareNewRecipe(EsorexName,SOFFileName)
VIS.DeclareRecipeInputTag(SOFFileName, "OBJECT_SLIT_NOD_VIS", "1..n", "any", "100k")
VIS.DeclareRecipeInputTag(SOFFileName, "SPECTRAL_FORMAT_TAB_VIS", "1", "-", "-")
VIS.DeclareRecipeInputTag(SOFFileName, "MASTER_FLAT_SLIT_VIS", "1", "match", "match")
VIS.DeclareRecipeInputTag(SOFFileName, "MASTER_BIAS_VIS", "1", "match", "match")
VIS.DeclareRecipeInputTag(SOFFileName, "ORDER_TAB_EDGES_SLIT_VIS", "1", "match", "match")
VIS.DeclareRecipeInputTag(SOFFileName, "XSH_MOD_CFG_TAB_VIS", "1", "1x1", "400k")
VIS.DeclareRecipeInputTag(SOFFileName, "XSH_MOD_CFG_OPT_2D_VIS", "1", "-", "-")
VIS.DeclareRecipeInputTag(SOFFileName, "MASTER_BP_MAP_VIS", "?", "match", "match")
VIS.DeclareRecipeInputTag(SOFFileName, "DISP_TAB_VIS", "?", "1x1", "400k")
VIS.DeclareRecipeInputTag(SOFFileName,"FLUX_STD_CATALOG_VIS", "?", "-" ,"-")
VIS.DeclareRecipeInputTag(SOFFileName,"ATMOS_EXT_VIS", "?", "-" , "-")
VIS.EnableRecipe(SOFFileName)
VIS.SetFiles('OBJECT_SLIT_NOD_VIS',['/home/ms/Desktop/tmp_xsh/XSHOOTER_SLT_OBJ_VIS_098_0005.fits','/home/ms/Desktop/tmp_xsh/XSHOOTER_SLT_OBJ_VIS_098_0006.fits','/home/ms/Desktop/tmp_xsh/XSHOOTER_SLT_OBJ_VIS_098_0007.fits','/home/ms/Desktop/tmp_xsh/XSHOOTER_SLT_OBJ_VIS_098_0008.fits'])
VIS.RunPipeline()
|
martinsparreREPO_NAMEXSHPipelineManagerPATH_START.@XSHPipelineManager_extracted@XSHPipelineManager-master@LoadRestartFile@LoadRestartFile_nod_VIS.py@.PATH_END.py
|
{
"filename": "wrapper.py",
"repo_name": "statsmodels/statsmodels",
"repo_path": "statsmodels_extracted/statsmodels-main/statsmodels/base/wrapper.py",
"type": "Python"
}
|
import functools
import inspect
from textwrap import dedent
class ResultsWrapper:
"""
Class which wraps a statsmodels estimation Results class and steps in to
reattach metadata to results (if available)
"""
_wrap_attrs = {}
_wrap_methods = {}
def __init__(self, results):
self._results = results
self.__doc__ = results.__doc__
def __dir__(self):
return [x for x in dir(self._results)]
def __getattribute__(self, attr):
get = lambda name: object.__getattribute__(self, name)
try:
results = get('_results')
except AttributeError:
pass
try:
return get(attr)
except AttributeError:
pass
obj = getattr(results, attr)
data = results.model.data
how = self._wrap_attrs.get(attr)
if how and isinstance(how, tuple):
obj = data.wrap_output(obj, how[0], *how[1:])
elif how:
obj = data.wrap_output(obj, how=how)
return obj
def __getstate__(self):
# print 'pickling wrapper', self.__dict__
return self.__dict__
def __setstate__(self, dict_):
# print 'unpickling wrapper', dict_
self.__dict__.update(dict_)
def save(self, fname, remove_data=False):
"""
Save a pickle of this instance.
Parameters
----------
fname : {str, handle}
Either a filename or a valid file handle.
remove_data : bool
If False (default), then the instance is pickled without changes.
If True, then all arrays with length nobs are set to None before
pickling. See the remove_data method.
In some cases not all arrays will be set to None.
"""
from statsmodels.iolib.smpickle import save_pickle
if remove_data:
self.remove_data()
save_pickle(self, fname)
@classmethod
def load(cls, fname):
"""
Load a pickled results instance
.. warning::
Loading pickled models is not secure against erroneous or
maliciously constructed data. Never unpickle data received from
an untrusted or unauthenticated source.
Parameters
----------
fname : {str, handle}
A string filename or a file handle.
Returns
-------
Results
The unpickled results instance.
"""
from statsmodels.iolib.smpickle import load_pickle
return load_pickle(fname)
def union_dicts(*dicts):
result = {}
for d in dicts:
result.update(d)
return result
def make_wrapper(func, how):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
results = object.__getattribute__(self, '_results')
data = results.model.data
if how and isinstance(how, tuple):
obj = data.wrap_output(func(results, *args, **kwargs), how[0], how[1:])
elif how:
obj = data.wrap_output(func(results, *args, **kwargs), how)
return obj
sig = inspect.signature(func)
formatted = str(sig)
doc = dedent(wrapper.__doc__) if wrapper.__doc__ else ''
wrapper.__doc__ = f"\n{func.__name__}{formatted}\n{doc}"
return wrapper
def populate_wrapper(klass, wrapping):
for meth, how in klass._wrap_methods.items():
if not hasattr(wrapping, meth):
continue
func = getattr(wrapping, meth)
wrapper = make_wrapper(func, how)
setattr(klass, meth, wrapper)
|
statsmodelsREPO_NAMEstatsmodelsPATH_START.@statsmodels_extracted@statsmodels-main@statsmodels@base@wrapper.py@.PATH_END.py
|
{
"filename": "run_unravel_image_region.py",
"repo_name": "astrostat/LIRA",
"repo_path": "LIRA_extracted/LIRA-master/lira/inst/docs/examples/PyProcUtils/run_unravel_image_region.py",
"type": "Python"
}
|
import numpy as n
import pyfits
# These are regions dividing the image into 4 quadrants:
regions = []
regions.append( [( 0,15),( 0,15)] )
regions.append( [( 0,15),(16,32)] )
regions.append( [(16,32),(16,32)] )
regions.append( [(16,32),( 0,15)] )
#These are regions containing an "E" -- or none!
regions = []
regions.append( [( 1, 7),( 2,11)] ) # Diffuse E
regions.append( [( 1, 7),(18,27)] ) # None
regions.append( [(17,23),(18,27)] ) # Point source E
regions.append( [(17,23),( 2,11)] ) # None
#These are single pixels of the "E"s -- or none!
regions = []
regions.append( [( 1, 2),( 2,3)] ) # Diffuse E
regions.append( [( 1, 2),(18,19)] ) # None
regions.append( [(17,18),(18,19)] ) # Point source E
regions.append( [(17,18),( 2,3)] ) # None
wrkdir = 'intermediatefiles/'
infiles = ( \
'PoisDatons32x32EEMC2_NoBckgrnd_1_2.chop.MSratios_Lvl0.fits', \
'PoisDatons32x32EEMC2_NoBckgrnd_1_2.chop.MSratios_Lvl1.fits', \
'PoisDatons32x32EEMC2_NoBckgrnd_1_2.chop.MSratios_Lvl2.fits', \
)
##-----------------------------
for ifil in infiles:
this_lvl = pyfits.open(wrkdir+ifil)
this_nsamples = this_lvl[0].data.shape[0]
for k in range(len(regions)):
new_ar = this_lvl[0].data[0:this_nsamples, \
regions[k][0][0]:regions[k][0][1] , \
regions[k][1][0]:regions[k][1][1] ]
new_fil_name = ifil[0:-4]+'PixOfReg'+str(k)+'.fits'
new_HDU =pyfits.PrimaryHDU(data=n.asarray(new_ar).flatten())
new_HDU.writeto(new_fil_name)
#end-for
# new_ar = this_lvl[0].data[0:, \
# 0:15, 0:15].flatten()
# new_fil_name = ifil+'Reg'+str(0)
# new_HDU =pyfits.PrimaryHDU(data=n.asarray(new_ar))
# new_HDU.writeto(new_fil_name)
# new_ar = this_lvl[0].data[0:, \
# 0:15, 16:32].flatten()
# new_fil_name = ifil+'Reg'+str(1)
# new_HDU =pyfits.PrimaryHDU(data=n.asarray(new_ar))
# new_HDU.writeto(new_fil_name)
# new_ar = this_lvl[0].data[0:, \
# 16:32, 0:15].flatten()
# new_fil_name = ifil+'Reg'+str(2)
# new_HDU =pyfits.PrimaryHDU(data=n.asarray(new_ar))
# new_HDU.writeto(new_fil_name)
# new_ar = this_lvl[0].data[0:, \
# 16:32,16:32].flatten()
# new_fil_name = ifil+'Reg'+str(3)
# new_HDU =pyfits.PrimaryHDU(data=n.asarray(new_ar))
# new_HDU.writeto(new_fil_name)
#end-for-ifil
|
astrostatREPO_NAMELIRAPATH_START.@LIRA_extracted@LIRA-master@lira@inst@docs@examples@PyProcUtils@run_unravel_image_region.py@.PATH_END.py
|
{
"filename": "linalg.py",
"repo_name": "jax-ml/jax",
"repo_path": "jax_extracted/jax-main/jax/scipy/sparse/linalg.py",
"type": "Python"
}
|
# Copyright 2020 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Note: import <name> as <name> is required for names to be exported.
# See PEP 484 & https://github.com/jax-ml/jax/issues/7570
from jax._src.scipy.sparse.linalg import (
cg as cg,
gmres as gmres,
bicgstab as bicgstab,
)
|
jax-mlREPO_NAMEjaxPATH_START.@jax_extracted@jax-main@jax@scipy@sparse@linalg.py@.PATH_END.py
|
{
"filename": "tfsa-2022-123.md",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/security/advisory/tfsa-2022-123.md",
"type": "Markdown"
}
|
## TFSA-2022-123: `CHECK` fail in `tf.sparse.cross`
### CVE Number
CVE-2022-35997
### Impact
If `tf.sparse.cross` receives an input `separator` that is not a scalar, it gives a `CHECK` fail that can be used to trigger a denial of service attack.
```python
import tensorflow as tf
tf.sparse.cross(inputs=[],name='a',separator=tf.constant(['a', 'b'],dtype=tf.string))
```
### Patches
We have patched the issue in GitHub commit [83dcb4dbfa094e33db084e97c4d0531a559e0ebf](https://github.com/tensorflow/tensorflow/commit/83dcb4dbfa094e33db084e97c4d0531a559e0ebf).
The fix will be included in TensorFlow 2.10.0. We will also cherrypick this commit on TensorFlow 2.9.1, TensorFlow 2.8.1, and TensorFlow 2.7.2, as these are also affected and still in supported range.
### For more information
Please consult [our security guide](https://github.com/tensorflow/tensorflow/blob/master/SECURITY.md) for more information regarding the security model and how to contact us with issues and questions.
### Attribution
This vulnerability has been reported by Kang Hong Jin.
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@security@advisory@tfsa-2022-123.md@.PATH_END.py
|
{
"filename": "residual_calculator.ipynb",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/galfit/residual_calculator.ipynb",
"type": "Jupyter Notebook"
}
|
```python
#!pip3 install --user astropy
```
```python
import numpy as np
import astropy as ap
import pandas as pd
from astropy.io import fits
import scipy.linalg as slg
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from matplotlib.colors import LinearSegmentedColormap
import glob
import os
from IPython.display import Image
```
```python
run_path = "/home/portmanm/run2_1000_galfit"
#run_path = "/home/portmanm/run3_1000_galfit_psf"
sparc_in = "sparcfire-in"
sparc_out = "sparcfire-out"
sparc_tmp = "sparcfire-tmp"
galfits_tmp = "galfits"
galfit_masks = "galfit_masks"
galfit_out = "all_galfit_out"
```
```python
def check_galfit_chi(gal_name, base_path):
# An example line
# # Chi^2/nu = 4.661, Chi^2 = 12025.575, Ndof = 2580
galfit_txt_out = "galfit.01" # in the future galfit.01 may change
filename = os.path.join(base_path, gal_name, galfit_txt_out)
with open(filename, "r") as f:
for line in f:
if "Chi" in line:
chi_line = line.strip("# ")
# This also works but it's quite devious...
# chi_line.replace("^", "").replace("/", "_").replace(", ", "\n").lower()
# exec(chi_line)
out_vals = chi_line.split(",")
chi2_nu = float(out_vals[0].strip().split("=")[-1])
chi2 = float(out_vals[1].strip().split("=")[-1])
ndof = int(out_vals[2].strip().split("=")[-1])
return chi2_nu, chi2, ndof
```
```python
norms = {}
#residual_arrays = {}
total_gal = len(glob.glob(os.path.join(run_path, sparc_in, "*.fits")))
outpath = os.path.join(run_path, sparc_out)#, galfit_out)
temppath = os.path.join(run_path, sparc_tmp, galfits_tmp)
maskpath = os.path.join(run_path, sparc_tmp, galfit_masks)
#successes = [os.path.basename(i) for i in glob.glob(os.path.join(temppath, "*.fits"))]
for i in glob.glob(os.path.join(temppath, "*.fits")):
fits_file = fits.open(i)
filename = os.path.basename(i)
galaxy_name = filename.split("_")[0]
star_mask_name = f"{galaxy_name}_star-rm.fits"
mask_fits_name = os.path.join(maskpath, star_mask_name)
mask_fits_file = fits.open(mask_fits_name)
crop_box = fits_file[2].header["FITSECT"]
crop_box = eval(crop_box.replace(":", ","))
# To adjust for python indexing
box_min, box_max = crop_box[0] - 1, crop_box[1]
# To invert the matrix since galfit keeps 0 valued areas
crop_mask = 1 - mask_fits_file[0].data[box_min:box_max, box_min:box_max]
#print(mask_fits_file[0].data[box_min:box_max, box_min:box_max])
#print(star_mask_name)
try:
#masked_observation = fits_file[1].data - (fits_file[1].data * crop_mask)
observation = fits_file[1].data
#masked_model = fits_file[2].data - (fits_file[2].data*crop_mask)
model = fits_file[2].data
masked_residual = (observation - model)*crop_mask
except:
print(f"There is likely an observation error with galaxy {galaxy_name}, continuing...")
#print(np.shape(mask_fits_file[0].data))
#print(np.shape(fits_file[1].data))
#print(crop_box)
continue
#residual = masked_observation - masked_model
#residual_arrays[filename] = residual
try:
norms[filename] = *check_galfit_chi(galaxy_name, outpath)[:2], round(slg.norm(masked_residual), 3), round(slg.norm(fits_file[3].data), 3), masked_residual
#print(f"{galaxy_name}", round(slg.norm(residual), 3))
#norms[filename] = check_galfit_chi(galaxy_name, outpath)[:2]#, slg.norm(fits_file[3].data)
# for infs, nans in residual
except ValueError:
pass
except FileNotFoundError:
print(f"Could not find galfit.# output file for {filename}. Continuing...")
pass
fits_file.close()
mask_fits_file.close()
#glob.glob("./" + galaxy_path + '/' + galaxy_name + '.tsv')[0]
```
There is likely an observation error with galaxy 1237667910602522764, continuing...
There is likely an observation error with galaxy 1237671140406919323, continuing...
```python
norms_df = pd.DataFrame(norms).T
norms_df.rename(columns={0: 'chi^2_nu', 1: 'chi^2', 2 : 'norm_masked_residual', 3 : 'norm_galfit_residual', 4 : 'masked_residual_array'}, inplace=True)
```
```python
#norms = dict(sorted(norms.items(), key=lambda item: item[1][2]))
norms_df = norms_df.sort_values(by=['norm_masked_residual']) #, inplace=True)
#norm_items = list(norms.items())
```
```python
png_dir = os.path.join(run_path, sparc_out, galfit_out, "galfit_png/")
```
```python
# Grab top 1000 highest resolution galaxies from Darren's 27K and run this on those
# Change pitch angle plotting to only plot sparcfire's line between r_in and r_out and see how that compare to galfit
# Tile residuals for presentation???
```
```python
# Thanks to https://jakevdp.github.io/PythonDataScienceHandbook/04.07-customizing-colorbars.html
def grayscale_cmap(cmap):
"""Return a grayscale version of the given colormap"""
cmap = plt.cm.get_cmap(cmap)
colors = cmap(np.arange(cmap.N))
# convert RGBA to perceived grayscale luminance
# cf. http://alienryderflex.com/hsp.html
RGB_weight = [0.299, 0.587, 0.114]
luminance = np.sqrt(np.dot(colors[:, :3] ** 2, RGB_weight))
colors[:, :3] = luminance[:, np.newaxis]
return LinearSegmentedColormap.from_list(cmap.name + "_gray", colors, cmap.N)
```
```python
# https://stackoverflow.com/questions/32370281/how-to-embed-image-or-picture-in-jupyter-notebook-either-from-a-local-machine-o
# Could also be useful https://eltos.github.io/gradient/#FFFFFF-A5A5A5-000000-A5A5A5-FFFFFF
index_num = 3
galaxy_info = norms_df.iloc[index_num]
# iloc returns a series, name returns the name of the row
galaxy_out_name = galaxy_info.name
chi2_nu = galaxy_info[0]
chi2 = galaxy_info[1]
norm_masked_residual = galaxy_info[2]
norm_galfit_residual = galaxy_info[3]
masked_residual = galaxy_info[-1]
print(galaxy_out_name)
print(f"chi^2/nu = {chi2_nu:.2f}")
print(f"chi^2 = {chi2:.2f}")
print(f"Norm masked residual = {norm_masked_residual:.2f}")
print(f"Norm GALFIT residual = {norm_galfit_residual:.2f}")
#galfit_cmap = grayscale_cmap('RdBu')
residual_plot = plt.imshow(np.flipud(masked_residual[:,:]), norm=colors.LogNorm())
residual_plot.set_cmap('Greys')
#residual_plot.set_cmap(galfit_cmap)
cbar = plt.colorbar()
#plt.imshow(residual_plot)
#imgplot = plt.imshow(arr[:, :, 0])
out_str = galaxy_out_name.replace("out.fits", "combined.png").strip()
Image(filename = os.path.join(png_dir, out_str), width=600, height=600)
```
1237668623014035629_out.fits
chi^2/nu = 0.28
chi^2 = 295.57
Norm masked residual = 227.61
Norm GALFIT residual = 227.61

```python
#print(len(test_list), len(norms), 717/809*100)
print(f"Total number of galaxies attempted: {total_gal}")
print(f"Number of Galfit models generated: {len(norms)}")
#print(f"Residuals below arbitrary cutoff: {len(test_list)}")
print()
#print(f"Percent models generated: {100*len(norms)/total_gal:.2f}%")
#print(f"Percent successful below cutoff excluding failures: {100*len(test_list)/len(norms):.2f}%")
#print(f"Percent successful below cutoff including failures: {100*len(test_list)/total_gal:.2f}%")
```
Total number of galaxies attempted: 1000
Number of Galfit models generated: 934
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@galfit@residual_calculator.ipynb@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "Keck-DataReductionPipelines/KPF-Pipeline",
"repo_path": "KPF-Pipeline_extracted/KPF-Pipeline-master/modules/wavelength_cal/src/__init__.py",
"type": "Python"
}
|
Keck-DataReductionPipelinesREPO_NAMEKPF-PipelinePATH_START.@KPF-Pipeline_extracted@KPF-Pipeline-master@modules@wavelength_cal@src@__init__.py@.PATH_END.py
|
|
{
"filename": "justdoit.py",
"repo_name": "natashabatalha/PandExo",
"repo_path": "PandExo_extracted/PandExo-master/pandexo/engine/justdoit.py",
"type": "Python"
}
|
import numpy as np
from pandeia.engine.instrument_factory import InstrumentFactory
from .pandexo import wrapper
from .load_modes import SetDefaultModes
import os
import pickle as pkl
from joblib import Parallel, delayed
import multiprocessing
import json
from .exomast import get_target_data
from astroquery.simbad import Simbad
import astropy.units as u
import copy
user_cores = multiprocessing.cpu_count()
ALL = {"WFC3 G141":False,
"MIRI LRS":False,
"NIRISS SOSS":False,
"NIRSpec G140M":False,
"NIRSpec G140H":False,
"NIRSpec G235M":False,
"NIRSpec G235H":False,
"NIRSpec G395M":False,
"NIRSpec G395H":False,
"NIRSpec Prism":False,
"NIRCam F322W2":False,
"NIRCam F444W":False}
def print_instruments(verbose=True):
"""Prints a list of the possible instrument templates to load
"""
if verbose: print("Choose from the following:")
return ALL.keys()
def getStarName(planet_name):
"""
Given a string with a (supposed) planet name, this function returns the star name. For example:
- If `planet_name` is 'HATS-5b' this returns 'HATS-5'.
- If `planet_name` is 'Kepler-12Ab' this returns 'Kepler-12A'.
It also handles the corner case in which `planet_name` is *not* a planet name, but a star name itself, e.g.:
- If `planet_name` is 'HAT-P-1' it returns 'HAT-P-1'.
- If `planet_name` is 'HAT-P-1 ' it returns 'HAT-P-1'.
"""
star_name = planet_name.strip()
# Check if last character is a letter:
if str.isalpha(star_name[-1]):
if star_name[-1] == star_name[-1].lower():
star_name = star_name[:-1]
# Return trimmed string:
return star_name.strip()
def load_exo_dict(planet_name=None,pl_kwargs={}):
"""Loads in empty exoplanet dictionary for pandexo input
Loads in empty exoplanet dictionary so that the user can manually edit different planet
parameters. Must be done for every bash run. User must edit each keys within the dictionary.
Parameters
----------
planet_name : str
(Optional) Planet name e.g. 'HD 189733 b' or 'HD189733b'
pl_kwargs : dict
(Optional) : dict
if you get an error that there is a missing field you can enter it in dictionary form using this
e.g. pl_kwargs = {"Jmag":7}
Returns
-------
dict
Empty dictionary to be filled out by the user before running PandExo
Example
-------
>>> exo_dict = load_exo_dict()
>>> exo_dict['planet']['transit_duration'] = 2*60*60 #2 hours
"""
with open(os.path.join(os.path.dirname(__file__), "reference",
"exo_input.json")) as data_file:
pandexo_input = json.load(data_file)
if not isinstance(planet_name, type(None)):
planet_data = get_target_data(planet_name)[0]
pandexo_input['star']['type'] = 'phoenix'
pandexo_input['star']['temp'] = planet_data['Teff']
pandexo_input['star']['metal'] = planet_data['Fe/H']
pandexo_input['star']['logg'] = planet_data['stellar_gravity']
Simbad.add_votable_fields('flux(H)')
Simbad.add_votable_fields('flux(J)')
star_name = getStarName(planet_name)
if 'Jmag' in planet_data.keys():
jmag = planet_data['Jmag']
else:
try:
jmag = Simbad.query_object(star_name)['FLUX_J'][0]
#removing for how as blind str cutoffs are bug prone
#if np.ma.is_masked(jmag):
# # Remove 'A' from star_name for systems with binary stars (e.g., WASP-77A)
# star_name = star_name[:-1]
# jmag = Simbad.query_object(star_name)['FLUX_J'][0]
except:
jmag = pl_kwargs.get('Jmag',0)
if jmag==0:
raise Exception("Uh oh. Exo.MAST/simbad do not have a Jmag. Please enter it with pl_kwargs. E.g. pl_wargs={'Jmag':8} ")
if 'Hmag' in planet_data.keys():
hmag = planet_data['Hmag']
else:
try:
hmag = Simbad.query_object(star_name)['FLUX_H'][0]
#removing for how as blind str cutoffs are bug prone
#if np.ma.is_masked(hmag):
# # Remove 'A' from star_name for systems with binary stars (e.g., WASP-77A)
# star_name = star_name[:-1]
# hmag = Simbad.query_object(star_name)['FLUX_H'][0]
except:
hmag = pl_kwargs.get('Hmag',0)
if hmag==0:
raise Exception("Uh oh. Exo.MAST/simbad do not have a Hmag. Please enter it with pl_kwargs. E.g. pl_wargs={'Hmag':8} ")
pandexo_input["star"]["mag"] = jmag
pandexo_input["star"]["ref_wave"] = 1.25
pandexo_input["star"]["jmag"] = jmag
pandexo_input["star"]["hmag"] = hmag
#optinoal star radius
pandexo_input["star"]["radius"] = planet_data['Rs']
pandexo_input["star"]["r_unit"] = planet_data['Rs_unit'][0]+ planet_data['Rs_unit'][1:].lower()
#optional planet radius/mass
pandexo_input["planet"]["radius"] = planet_data['Rp']
pandexo_input["planet"]["r_unit"] = planet_data['Rp_unit'][0]+ planet_data['Rp_unit'][1:].lower()
try:
pandexo_input["planet"]["mass"] = planet_data['Mp']
pandexo_input["planet"]["m_unit"] = planet_data['Mp_unit'][0]+ planet_data['Mp_unit'][1:].lower()
except:
print("No mass found. Setting mass to np.nan. Mass is only required for model grids. If you want to enter one please enter it with pl_kwargs. E.g. pl_wargs={'Mp':1,'Mp_unit':'M_jupiter'}")
pandexo_input["planet"]["mass"] = pl_kwargs.get('mass',np.nan)
pandexo_input["planet"]["m_unit"] = pl_kwargs.get('m_unit',np.nan)
pandexo_input["planet"]["transit_duration"] = planet_data['transit_duration']
pandexo_input["planet"]["td_unit"] = planet_data['transit_duration_unit']
depth = pandexo_input["planet"]["radius"]**2 / ((pandexo_input["star"]["radius"]
*u.Unit(pandexo_input["star"]["r_unit"]) )
.to(u.Unit(pandexo_input["planet"]["r_unit"]))).value**2
pandexo_input["planet"]["depth"] = depth
if planet_data['inclination'] == None:
inc = 90
else:
inc = planet_data['inclination']
pandexo_input["planet"]["i"] = inc
pandexo_input["planet"]["ars"] = planet_data['a/Rs']
period = planet_data['orbital_period']
period_unit = planet_data['orbital_period_unit']
pandexo_input["planet"]["period"] = (period*u.Unit(period_unit)).to(u.Unit('day')).value
pandexo_input["planet"]["ecc"] = planet_data['eccentricity']
pandexo_input["planet"]["ecc"] = planet_data['eccentricity']
try:
pandexo_input["planet"]["w"] = float(planet_data['omega'] )
except:
pandexo_input["planet"]["w"] = 90.
return pandexo_input
def load_mode_dict(inst):
"""Function to pull in correct instrument dictionary
This is the instrument counterpart to load_exo_dict. It loads in a template instrument
dictionary for a specific instrument mode.
Parameters
----------
inst : str
One of the allowable instrument keys. To see which instrument modes are available
use `print_instruments()`
Returns
-------
dict
Filled out template of instrument dictionary, which can be editted before
running to PandExo (not required).
Example
-------
>>> inst_dict = load_mode_dict('MIRI LRS')
>>> inst_dict['configuration']['instrument']['aperture'] = 'lrsslit'
"""
return SetDefaultModes(inst).pick()
def get_thruput(inst, niriss=1, nirspec='f100lp', wmin='default', wmax='default'):
"""Returns complete instrument photon to electron conversion efficiency
Pulls complete instrument photon to electron conversion efficiency
(PCE) based on instrument key input
Parameters
----------
inst : str
One of the instrument keys in `print_instruments`
niriss : int
(Optional) defines which niriss order you want (1 or 2)
nirspec : str
(Optional) for NIRISS G140M/H there are two available filters (f100lp and f070lp)
if you are selecting G140M or G140H, this allows you to pick which one
wmin : str / float
(Optional) minimum wavlength to compute PCE across, 'default' will use
values from Pandeia.
wmax : str / float
(Optional) maximum wavlength to compute PCE across, 'default' will use
values from Pandeia.
Returns
-------
dict
Dictionary with wave solution and PCE
Example
-------
>>> thru_dict = get_thruput('NIRISS SOSS_Or1')
"""
#pull correct dictionary
input_dict = SetDefaultModes(inst).pick()
conf = input_dict['configuration']
conf['detector']['ngroup'] = 2
if conf['instrument']['instrument'].lower() =='niriss':
conf["instrument"]["disperser"] = conf["instrument"]["disperser"] +'_'+str(niriss)
i = InstrumentFactory(config=conf)
wr = i.get_wave_range()
if wmin == 'default':
wmin = wr['wmin']
if wmax == 'default':
wmax = wr['wmax']
wave = np.linspace(wmin, wmax, num=500)
pce = i.get_total_eff(wave)
return {'wave':wave,'pce':pce}
elif (conf['instrument']['instrument'].lower() =='nirspec') and ('g140' in conf["instrument"]["disperser"]):
conf["instrument"]["filter"] = nirspec
i = InstrumentFactory(config=conf)
wr = i.get_wave_range()
if wmin == 'default':
wmin = wr['wmin']
if wmax == 'default':
wmax = wr['wmax']
wave = np.linspace(wmin, wmax, num=500) #
pce = i.get_total_eff(wave)
return {'wave':wave,'pce':pce}
def run_param_space(i,exo,inst,param_space, verbose=False):
"""Changes exo dictionary and submits run
This function is used to reset the exo dictionary based on what parameter
is being looped over and submits run to `wrapper` so that all the jobs
are run in parallel
Parameters
----------
i : str or float
Can be either a str or float based on what you are looping through (str for
filenames, float for stellar temps, float for magnitudes, etc)
exo : dict
Exoplanet dictionary which can be loaded in and editted through `load_exo_dict`
inst : str
Key which indicates with instrument
param_space : str
Set of keys within exo_dict to indicate which parameter to loop through.
Should be in the format of "first level of dict"+"second level of dict".
For example, for stellar temp `param_space` would be "star+temp"
verbose : bool
(Optional) prints out checkpoints. Assuming the user does not want a load of
print statements for parallel runs
Returns
-------
dict
Dictionary with output of pandexo. Key is the value of the parameter that was
looped through.
"""
#break up parameter space to two separate dictionary keys
key1 = param_space[0:param_space.find('+')]
key2 = param_space[param_space.find('+')+1:len(param_space)]
exo[key1][key2] = i
#load in correct dict format
inst_dict = load_mode_dict(inst)
name = os.path.split(str(i))[1]
return {name: wrapper({"pandeia_input": inst_dict , "pandexo_input":exo}, verbose=verbose)}
def run_inst_space(inst,exo, verbose=False):
"""Changes inst dictionary and submits run
This function is used to reset the instrument dictionary.
Parameters
----------
exo : dict
Exoplanet dictionary which can be loaded in and editted through `load_exo_dict`
inst : str
Key which indicates with instrument
verbose : bool
(Optional) prints out checkpoints. Assuming the user does not want a load of
print statements for parallel runs
Returns
-------
dict
Dictionary with output of pandexo. Key is the value of the parameter that was
looped through.
"""
#load in correct dict format
inst_dict = load_mode_dict(inst)
return {inst: wrapper({"pandeia_input": inst_dict , "pandexo_input":exo}, verbose=verbose)}
def run_pandexo(exo, inst, param_space = 0, param_range = 0,save_file = True,
output_path=os.getcwd(), output_file = '',num_cores=user_cores, verbose=True):
"""Submits multiple runs of pandexo in parallel.
Functionality: program contains functionality for running single or
multiple runs of PandExo
Parameters
----------
exo : dict
exoplanet input dictionary
inst : dict or str or list of str
instrument input dictionary OR LIST of keys (for allowable keys see `print_instruments()`
param_space : str or 0
(Optional) Default is 0 = no exoplanet parameter space. To run through a parameter
specify which one need to specify two keys from exo dict with + in between.
i.e. observation+fraction
star+temp
planet+exopath
param_range : list of str or list of float
(Optional) Default = 0 An array or list over which to run the parameters space.
i.e. array of temperatures if running through stellar temp or
array of files if running through planet models. Must specify param_space
if using this.
save_file : bool
(Optional) Default = True saves file, False does not
output_path : str
(Optional) Defaults to current working directory
output_file : str
(Optional) Default is "singlerun.p" for single runs, "param_space.p" for exo parameter runs
or "instrument_run.p" for instrument parameter space runs.
verbose : bool
(Optional) For single runs, if false, it turns off all print statements. For parameter space
runs it is defaulted to never print statements out.
Returns
-------
dict
For single run output will just be a single PandExo output dictionary
https://github.com/natashabatalha/PandExo/wiki/PandExo-Output
For multiple runs the output will be organized into a list with each
a dictionary named by whatever you are looping through
i.e. [{'First temp': PandExoDict}, {'Second temp': PandExoDict}, etc..]
Example
-------
For single run:
>>> a = run_pandexo(exo_dict, ['MIRI LRS'])
For multiple instruments:
>>> a = run_pandexo(exo_dict, ['MIRI LRS','NIRSpec G395H'])
Loop through a exoplanet parameter (stellar magnitude):
>>> a = run_pandexo(exo_dict, ['NIRSpec G395M'],
param_space ='star+mag',param_range = np.linspace(6,10,5))
"""
#single instrument mode with dictionary input OR single planet
if type(inst) == dict:
if verbose: print("Running Single Case w/ User Instrument Dict")
results =wrapper({"pandeia_input": inst , "pandexo_input":exo}, verbose=verbose)
if output_file == '':
output_file = 'singlerun.p'
if save_file: pkl.dump(results, open(os.path.join(output_path,output_file),'wb'))
return results
#make sure inst is in list format.. makes my life so much easier
try:
if type(inst) != list:
raise ValueError
except ValueError:
print('Instrument input is not dict so must be list')
print('Enter in format ["NIRSpec G140M"] or ["NIRISS SOSS","MIRI LRS"]')
return
#single instrument mode and single planet OR several planets
if len(inst)==1 and inst[0] != 'RUN ALL':
#start case of no parameter space run
if isinstance(param_space, (float, int)) or isinstance(param_range, (float, int)):
if verbose: print("Running Single Case for: " + inst[0])
inst_dict = load_mode_dict(inst[0])
results =wrapper({"pandeia_input": inst_dict , "pandexo_input":exo}, verbose=verbose)
if output_file == '':
output_file = 'singlerun.p'
if save_file: pkl.dump(results, open(os.path.join(output_path,output_file),'wb'))
return results
#if there are parameters to cycle through this will run
if verbose: print("Running through exo parameters in parallel: " + param_space)
#run the above function in parallel
results = Parallel(n_jobs=num_cores)(delayed(run_param_space)(i,exo,inst[0],param_space) for i in param_range)
#Default dump all results [an array of dictionaries] into single file
#and return results immediately to user
if output_file == '':
output_file = param_space + '.p'
if save_file: pkl.dump(results, open(os.path.join(output_path,output_file),'wb'))
return results
#run several different instrument modes and single planet
if verbose: print("Running select instruments")
if len(inst)>1:
results = Parallel(n_jobs=num_cores)(delayed(run_inst_space)(i, exo) for i in inst)
#Default dump all results [an array of dictionaries] into single file
#and return results immediately to user
if output_file == '':
output_file = 'instrument_run.p'
if save_file: pkl.dump(results, open(os.path.join(output_path,output_file),'wb'))
return results
#cycle through all options
elif inst[0].lower() == 'run all':
if verbose: print("Running through all instruments")
results = Parallel(n_jobs=num_cores)(delayed(run_inst_space)(i, exo) for i in ALL.keys())
#Default dump all results [an array of dictionaries] into single file
#and return results immediately to user
if output_file == '':
output_file = 'instrument_run.p'
if save_file: pkl.dump(results, open(os.path.join(output_path,output_file),'wb'))
return results
def subarrays(inst):
"""function to show availalble subarrays and their times (in secons)
Parameters
----------
inst : str
string of either niriss, nirspec, miri or nircam
Returns
-------
dict
dictionary with name of subarray as keys and time in seconds as entry
"""
print("Subarray field stored in inst_dict['configuration']['detector']['subarray']")
if inst.lower() == 'niriss':
return {'substrip96':2.2129,'substrip256':5.4913}
elif inst.lower() == 'nirspec':
return {'sub1024a':0.451,'sub1024b':0.451,'sub2048':0.90156,'sub512':0.22572,'sub512s':0.14392}
elif inst.lower() == 'miri':
return {'slitlessprism':0.159}
elif inst.lower() == 'nircam':
return {"subgrism64":0.34, "subgrism128":0.67, "subgrism256":1.34,
"subgrism64 (noutputs=1)":1.3, "subgrism128 (noutputs=1)":2.6, "subgrism256 (noutputs=1)":5.2}
else:
raise Exception("Only instruments are niriss, nirspec, miri, nircam. Pick one.")
def dispersers(inst):
"""function to show available dispersers
Parameters
----------
inst : str
string of either niriss, nirspec, miri or nircam
Returns
-------
list
lsit with available dispersers
"""
print("Dispersers field stored in inst_dict['configuration']['instrument']['disperser']")
if inst.lower() == 'niriss':
return ['gr700xd']
elif inst.lower() == 'nirspec':
return ['g140m','g140h','g235m','g235h','g395m','g395h','prism']
elif inst.lower() == 'miri':
return ['p750l']
elif inst.lower() == 'nircam':
return ['grismr']
else:
raise Exception("Only instruments are niriss, nirspec, miri, nircam. Pick one.")
def filters(inst):
"""Function to show availalbe filters
Parameters
----------
inst : str
string of either niriss, nirspec, miri or nircam
Returns
-------
list
list with availalbe filters
"""
print("Filters field stored in inst_dict['configuration']['instrument']['filter']")
if inst.lower() == 'niriss':
return ["clear","f277w"]
elif inst.lower() == 'nirspec':
return ['f070lp','f100lp','f170lp','f290lp','clear']
elif inst.lower() == 'miri':
print("No filters for miri lrs, type None, or null in filter field")
return [None]
elif inst.lower() == 'nircam':
return ['f322w2','f444w']
else:
raise Exception("Only instruments are niriss, nirspec, miri, nircam. Pick one.")
def grid_options(grid = 'fortney'):
"""Function to show available grid options
PandExo now supports various grid options. Currently, the only one that is availalbe
is the Fortney grid for giant planets. We will be implementing others, as they
become available. It will become increasingly difficult for users to see what
options are availalbe to them. This function should guide users to choose
the grid that best fits their needs.
Parameters
----------
grid : str
(Optional) string which 'fortney'
"""
return
|
natashabatalhaREPO_NAMEPandExoPATH_START.@PandExo_extracted@PandExo-master@pandexo@engine@justdoit.py@.PATH_END.py
|
{
"filename": "stash.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/pytest/py3/_pytest/stash.py",
"type": "Python"
}
|
from typing import Any
from typing import cast
from typing import Dict
from typing import Generic
from typing import TypeVar
from typing import Union
__all__ = ["Stash", "StashKey"]
T = TypeVar("T")
D = TypeVar("D")
class StashKey(Generic[T]):
"""``StashKey`` is an object used as a key to a :class:`Stash`.
A ``StashKey`` is associated with the type ``T`` of the value of the key.
A ``StashKey`` is unique and cannot conflict with another key.
"""
__slots__ = ()
class Stash:
r"""``Stash`` is a type-safe heterogeneous mutable mapping that
allows keys and value types to be defined separately from
where it (the ``Stash``) is created.
Usually you will be given an object which has a ``Stash``, for example
:class:`~pytest.Config` or a :class:`~_pytest.nodes.Node`:
.. code-block:: python
stash: Stash = some_object.stash
If a module or plugin wants to store data in this ``Stash``, it creates
:class:`StashKey`\s for its keys (at the module level):
.. code-block:: python
# At the top-level of the module
some_str_key = StashKey[str]()
some_bool_key = StashKey[bool]()
To store information:
.. code-block:: python
# Value type must match the key.
stash[some_str_key] = "value"
stash[some_bool_key] = True
To retrieve the information:
.. code-block:: python
# The static type of some_str is str.
some_str = stash[some_str_key]
# The static type of some_bool is bool.
some_bool = stash[some_bool_key]
"""
__slots__ = ("_storage",)
def __init__(self) -> None:
self._storage: Dict[StashKey[Any], object] = {}
def __setitem__(self, key: StashKey[T], value: T) -> None:
"""Set a value for key."""
self._storage[key] = value
def __getitem__(self, key: StashKey[T]) -> T:
"""Get the value for key.
Raises ``KeyError`` if the key wasn't set before.
"""
return cast(T, self._storage[key])
def get(self, key: StashKey[T], default: D) -> Union[T, D]:
"""Get the value for key, or return default if the key wasn't set
before."""
try:
return self[key]
except KeyError:
return default
def setdefault(self, key: StashKey[T], default: T) -> T:
"""Return the value of key if already set, otherwise set the value
of key to default and return default."""
try:
return self[key]
except KeyError:
self[key] = default
return default
def __delitem__(self, key: StashKey[T]) -> None:
"""Delete the value for key.
Raises ``KeyError`` if the key wasn't set before.
"""
del self._storage[key]
def __contains__(self, key: StashKey[T]) -> bool:
"""Return whether key was set."""
return key in self._storage
def __len__(self) -> int:
"""Return how many items exist in the stash."""
return len(self._storage)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@pytest@py3@_pytest@stash.py@.PATH_END.py
|
{
"filename": "imaging_with_wsclean.py",
"repo_name": "lucatelli/morphen",
"repo_path": "morphen_extracted/morphen-main/selfcal/imaging_with_wsclean.py",
"type": "Python"
}
|
import pandas as pd
import numpy as np
import argparse
import os
import glob
import time
def imaging(g_name, field, uvtaper, robust, base_name='clean_image',
continue_clean='False',nc=4):
g_vis = g_name + '.ms'
"""
# uvtaper_mode+uvtaper_args+'.'+uvtaper_addmode+uvtaper_addargs+
"""
print(uvtaper_addmode, uvtaper_addargs, robust)
if uvtaper != '':
taper = 'taper_'
else:
taper = ''
if continue_clean == 'True':
print('*************************************')
image_to_continue = glob.glob(f"{root_dir_sys}/*MFS-image.fits")
image_to_continue.sort(key=os.path.getmtime, reverse=False)
image_to_continue = os.path.basename(image_to_continue[-1])
image_deepclean_name = image_to_continue.replace('-MFS-image.fits','')
print('Using prefix from previous image: ', image_deepclean_name)
if continue_clean == 'False':
image_deepclean_name = (base_name + '_' + g_name + '_' +
imsizex + 'x' + imsizey + '_' + \
cell + '_' + niter + '.' + weighting + '.' + \
deconvolver[1:] + '.' + taper + \
uvtaper + '.' + str(robust))
ext = ''
if '-join-channels' in deconvolver_args:
print('Using mtmfs method.')
ext = ext + '-MFS'
ext = ext + '-image.fits'
print(image_deepclean_name)
if not os.path.exists(root_dir_sys + image_deepclean_name + ext) or continue_clean == 'True':
if nc < 4:
_nc = 4
else:
_nc = 4
if running_container == 'native':
# 'mpirun -np 4 wsclean-mp'
command_exec = (
'mpirun -np '+str(_nc)+' wsclean-mp -name ' + root_dir + image_deepclean_name +
' -size ' + imsizex + ' ' + imsizey + ' -scale ' + cell +
' ' + gain_args + ' -niter ' + niter + ' -weight ' + weighting +
' ' + robust + ' ' + auto_mask + ' ' + auto_threshold + mask_file +
' ' + deconvolver + ' ' + deconvolver_options +
' ' + deconvolver_args + ' ' + taper_mode + uvtaper +
' ' + opt_args + ' ' + data_column + ' ' + root_dir + g_vis)
print(' ++==>> Command to be executed by WSClean: ')
print(command_exec)
os.system(command_exec)
if running_container == 'singularity':
command_exec = (
'singularity exec --nv --bind ' + mount_dir + ' ' + wsclean_dir +
# ' ' + 'wsclean -name ' + root_dir +
' ' + 'mpirun -np ' + str(_nc) + ' wsclean-mp -name ' + root_dir +
# ' ' + 'mpirun --use-hwthread-cpus wsclean-mp -name ' + root_dir +
image_deepclean_name +
' -size ' + imsizex + ' ' + imsizey + ' -scale ' + cell +
' ' + gain_args + ' -niter ' + niter + ' -weight ' + weighting +
' ' + robust + ' ' + auto_mask + ' ' + auto_threshold + mask_file +
' ' + deconvolver + ' ' + deconvolver_options +
' ' + deconvolver_args + ' ' + taper_mode + uvtaper +
' ' + opt_args + ' ' + data_column + ' ' + root_dir + g_vis)
print(' ++==>> Command to be executed by Singularity > WSClean: ')
print(command_exec)
os.system(command_exec)
image_stats = {
"#basename": image_deepclean_name + ext} # get_image_statistics(image_deep_selfcal + ext)
image_stats['imagename'] = image_deepclean_name + ext
'''
save dictionary to file
'''
return (image_stats)
else:
print('Skipping imaging; already done.')
return (None)
# pass
def parse_float_list(str_values):
return [float(val.strip()) for val in str_values.strip('[]').split(',')]
def parse_str_list(str_values):
return [s.strip() for s in str_values.strip('[]').split(',')]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Helper for wsclean imaging.")
parser.add_argument("--p", type=str, help="The path to the MS file.")
parser.add_argument("--f", nargs='?', default=False,
const=True, help="The name of the ms file")
parser.add_argument("--r",
type=parse_float_list, nargs='?',
const=True, default=[0.5],
help="List of robust values")
parser.add_argument("--t", type=parse_str_list, nargs='?',
const=True, default=[''],
help="List of sky-tapers values")
parser.add_argument("--data", type=str, nargs='?', default='DATA', # 'CORRECTED_DATA'
help="Which data column to use")
parser.add_argument("--wsclean_install", type=str, nargs='?', default='singularity',
help="How wsclean was installed (singularity or native)?")
# To do: add option for wsclean singularity image path.
parser.add_argument("--update_model", type=str, nargs='?', default='False',
help="Update model after cleaning?")
parser.add_argument("--deconvolution_mode", type=str, nargs='?', default='good',
help="This is not a wsclean parameter. "
"If 'good' will use a proper set of wsclean arguments to perform a "
"good deconvolution, but slower. If 'fast' will not use complex "
"deconvolution, "
"e.g. no multi-scale, no MFS, etc. This is not intended for final "
"science data. It is intended for quick-look data.")
parser.add_argument("--with_multiscale", type=str, nargs='?', default='False',
help="Use multiscale deconvolver?")
parser.add_argument("--shift", type=str, nargs='?', default=None,
help="New phase center to shift for imaging."
"Eg. --shift 13:15:30.68 +62.07.45.357")
parser.add_argument("--scales", type=str, nargs='?', default="None",
help="Scales to be used with the multiscale deconvolver in WSClean. "
"If None, scales will be determined automatically by WSClean.")
parser.add_argument("--sx", type=str, nargs='?', default='2048',
help="Image Size x-axis")
parser.add_argument("--sy", type=str, nargs='?', default='2048',
help="Image Size y-axis")
parser.add_argument("--cellsize", type=str, nargs='?', default='0.05asec',
help="Cell size")
parser.add_argument("--niter", type=str, nargs='?', default='50000',
help="Number of iterations during cleaning.")
parser.add_argument("--maxuv_l", type=str, nargs='?', default=None,
help="Max uv distance in lambda.")
parser.add_argument("--minuv_l", type=str, nargs='?', default=None,
help="Min uv distance in lambda.")
parser.add_argument("--nsigma_automask", type=str, nargs='?', default='3.0',
help="Sigma level for automasking in wsclean.")
parser.add_argument("--nsigma_autothreshold", type=str, nargs='?', default='1.5',
help="Sigma level for autothreshold in wsclean.")
parser.add_argument("--mask", nargs='?', default=None,
const=True, help="A fits-file mask to be used.")
parser.add_argument("--nc", type=int, nargs='?', default=4,
help="Number of channels division to be used in "
"the MFS deconvolution.")
parser.add_argument("--negative_arg", type=str, nargs='?', default='negative',
help="Print wsclean output?")
parser.add_argument("--quiet", type=str, nargs='?', default='False',
help="Print wsclean output?")
parser.add_argument("--continue_clean", type=str, nargs='?', default='False',
help="Continue cleaning?")
parser.add_argument("--opt_args", type=str, nargs='?', default='',
help="Optional/additional arguments to be passed to wsclean. "
"Warning: Do not repeat previously defined arguments."
"Example: ' -apply-facet-beam -dd-psf-grid 6 6 -facet-beam-update 60 '")
parser.add_argument("--save_basename", type=str, nargs='?', default='image',
help="optional basename for saving image files.")
args = parser.parse_args()
if args.update_model == 'True':
update_model_option = ' -update-model-required '
else:
update_model_option = ' -no-update-model-required '
running_container = args.wsclean_install
if running_container == 'native':
os.system('export OPENBLAS_NUM_THREADS=1')
# for i in range(len(image_list)):
field = os.path.basename(args.f).replace('.ms', '')
g_name = field
root_dir_sys = os.path.dirname(args.f) + '/'
robusts = args.r
tapers = args.t
if running_container == 'singularity':
mount_dir = root_dir_sys + ':/mnt'
root_dir = '/mnt/'
# wsclean_dir = '/home/sagauga/apps/wsclean_wg_eb.simg'
# wsclean_dir = '/media/sagauga/xfs_evo/morphen_gpu_v2.simg'
wsclean_dir = '/media/sagauga/xfs_evo/morphen_stable_cpu_v2.simg'
# wsclean_dir = '/media/sagauga/xfs_evo/wsclean3.4-idg-everybeam-eMERLIN_portable.sif'
# wsclean_dir = '/mnt/scratch/lucatelli/apps/morphen_test.simg/morphen_test.simg'
# wsclean_dir = '/media/sagauga/xfs_evo/morphen_stable_v1.simg'
# wsclean_dir = '/media/sagauga/xfs_evo/morphen_gpu_v2.simg'
# wsclean_dir = '/home/sagauga/apps/wsclean_nvidia470_gpu.simg'
# wsclean_dir = '/raid1/scratch/lucatelli/apps/wsclean_wg_eb.simg'
# wsclean_dir = '/raid1/scratch/lucatelli/apps/morphen_test.simg'
if running_container == 'native':
mount_dir = ''
root_dir = root_dir_sys
os.system('export OPENBLAS_NUM_THREADS=1')
base_name = args.save_basename
## Setting image and deconvolution noptions.
### Cleaning arguments
auto_mask = ' -auto-mask ' + args.nsigma_automask
# auto_mask = ' '
auto_threshold = ' -auto-threshold ' + args.nsigma_autothreshold
if args.mask == 'None' or args.mask == None:
mask_file = ' '
else:
# if args.mask != 'None' or args.mask != None:
if running_container == 'native':
mask_file = ' -fits-mask ' + args.mask + ' '
if running_container == 'singularity':
mask_file = ' -fits-mask ' + root_dir+os.path.basename(args.mask) + ' '
# data to run deconvolution
data_column = ' -data-column ' + args.data
with_multiscale = args.with_multiscale
### Selecting the deconvolver
# deconvolution_mode = 'good'
if args.deconvolution_mode == 'good':
if with_multiscale == True or with_multiscale == 'True':
deconvolver = '-multiscale'
deconvolver_options = ' -multiscale-scale-bias 0.7 -multiscale-gain 0.05 '
if (args.scales is None) or (args.scales == 'None'):
deconvolver_options = deconvolver_options + ' -multiscale-max-scales 6 '
else:
deconvolver_options = (deconvolver_options + ' -multiscale-scales ' + args.scales + ' ')
else:
deconvolver = ' '
deconvolver_options = (' ')
# deconvolver_options = ('-multiscale-max-scales 5 -multiscale-scale-bias 0.5 ')
nc = args.nc
negative_arg = '-'+args.negative_arg
deconvolver_args = (' '
# '-channel-division-frequencies 4.0e9,4.5e9,5.0e9,5.5e9,'
# '29e9,31e9,33e9,35e9 ' #-gap-channel-division
'-deconvolution-threads 16 -j 16 -parallel-reordering 16 '
'-parallel-deconvolution 1024 '
# '-weighting-rank-filter 3 -weighting-rank-filter-size 128 '
# '-save-weights -local-rms -local-rms-window 50 '
'-gridder wgridder -wstack-nwlayers-factor 3 -wgridder-accuracy 1e-7 '
'' #-beam-fitting-size 0.7 -no-negative
# '-no-negative ' #
'-circular-beam ' #
# ' -circular-beam -beam-size 0.1arcsec -no-negative -beam-fitting-size = 0.7 '
# '-no-mf-weighting ' #
# '-save-psf-pb -apply-primary-beam '
# '-facet-beam-update 60 -save-aterms -diagonal-solutions '
# '-apply-facet-solutions '
# '-gridder idg -idg-mode hybrid -apply-primary-beam
# -apply-facet-beam -facet-beam-update 120 -save-aterms
# -diagonal-solutions '
'-save-source-list '
'-channels-out '+str(nc)+' -join-channels ' + negative_arg + ' '
'-fit-spectral-pol ' +str(nc)+' -deconvolution-channels ' +str(16)+' '
)
if args.deconvolution_mode == 'fast':
deconvolver = ' '
deconvolver_options = (' ')
deconvolver_args = (' -save-source-list '
'-deconvolution-threads 16 -j 16 -parallel-reordering 16 '
'-parallel-deconvolution 2048') #-parallel-gridding 24
# image parameters
weighting = 'briggs'
imsizex = args.sx
imsizey = args.sy
cell = args.cellsize
niter = args.niter
"""
# taper options (this is a to-do)
uvtaper_mode = '-taper-tukey'
uvtaper_args = '900000'
uvtaper_addmode = '-maxuv-l'
uvtaper_addargs = '800000'
taper_mode='-taper-gaussian '
uvtaper_mode = '-taper-gaussian'
uvtaper_args = '0.05asec'
"""
uvtaper_addmode = ''
uvtaper_addargs = ''
# uvtaper = uvtaper_mode + ' '+ uvtaper_args + ' ' +uvtaper_addmode + ' ' +
# uvtaper_addargs
uvselection = ''
if args.maxuv_l is not None:
uvselection = ' -maxuv-l ' + args.maxuv_l + ' '
if args.minuv_l is not None:
uvselection = uvselection + ' -minuv-l ' + args.minuv_l + ' '
# general arguments
gain_args = ' -mgain 0.4 -gain 0.05 -nmiter 500 ' # -super-weight 9.0
if args.shift == 'None' or args.shift == None:
# if args.shift != ' ':
shift_options = ' '
else:
shift_options = ' -shift ' + args.shift + ' '
# shift_options = ' ' # -shift 13:15:30.68 +62.07.45.357 '#' -shift 13:15:28.903
# +62.07.11.886 '
if args.quiet == 'True':
quiet = ' -quiet '
else:
quiet = ' '
if args.continue_clean == 'True':
"""
Not HANDLED PROPERLY YET
"""
continue_clean = ' --continue '
else:
continue_clean = ' '
opt_args = (
# ' -mem 80 -abs-mem 35 '
# '-pol RL,LR -no-negative -circular-beam -no-reorder '
# ' -save-first-residual -save-weights -save-uv '-maxuv-l 3150000
' '+uvselection+continue_clean+args.opt_args+' '
' -log-time -field all ' + quiet + update_model_option + ' ')
opt_args = opt_args + shift_options
# wsclean_dir = '/home/sagauga/apps/wsclean_nvidia470_gpu.simg'
# wsclean_dir = '/raid1/scratch/lucatelli/apps/wsclean_wg_eb.simg'
for robust in robusts:
for uvtaper in tapers:
if uvtaper == '':
taper_mode = ''
else:
taper_mode = '-taper-gaussian '
startTime = time.time()
image_statistics = imaging(g_name=g_name,
# base_name='2_selfcal_update_model',
# base_name='image',
base_name=base_name,
field=field, robust=str(robust),
uvtaper=uvtaper,
continue_clean=args.continue_clean,
nc=int(1*nc))
exec_time = time.time() - startTime
print(f" ++==>> Exec time cleaning = {exec_time:.1f} s")
if image_statistics is not None:
image_statistics['robust'] = robust
# image_statistics['vwt'] = vwt
image_statistics['uvtaper'] = uvtaper
df = pd.DataFrame.from_dict(image_statistics, orient='index').T
df.to_csv(root_dir_sys + image_statistics['imagename'].replace('.fits',
'_data.csv'),
header=True,
index=False)
else:
pass
|
lucatelliREPO_NAMEmorphenPATH_START.@morphen_extracted@morphen-main@selfcal@imaging_with_wsclean.py@.PATH_END.py
|
{
"filename": "type_registry.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/autograph/utils/type_registry.py",
"type": "Python"
}
|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Registry mechanism implementing the registry pattern for general use."""
class TypeRegistry(object):
"""Provides a type registry for the python registry pattern.
Contains mappings between types and type specific objects, to implement the
registry pattern.
Some example uses of this would be to register different functions depending
on the type of object.
"""
def __init__(self):
self._registry = {}
def register(self, obj, value):
"""Registers a Python object within the registry.
Args:
obj: The object to add to the registry.
value: The stored value for the 'obj' type.
Raises:
KeyError: If the same obj is used twice.
"""
if obj in self._registry:
raise KeyError(f"{type(obj)} has already been registered.")
self._registry[obj] = value
def lookup(self, obj):
"""Looks up 'obj'.
Args:
obj: The object to lookup within the registry.
Returns:
Value for 'obj' in the registry if found.
Raises:
LookupError: if 'obj' has not been registered.
"""
for registered in self._registry:
if isinstance(
obj, registered
):
return self._registry[registered]
raise LookupError(f"{type(obj)} has not been registered.")
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@autograph@utils@type_registry.py@.PATH_END.py
|
{
"filename": "test_isomap.py",
"repo_name": "scikit-learn/scikit-learn",
"repo_path": "scikit-learn_extracted/scikit-learn-main/sklearn/manifold/tests/test_isomap.py",
"type": "Python"
}
|
import math
from itertools import product
import numpy as np
import pytest
from scipy.sparse import rand as sparse_rand
from sklearn import clone, datasets, manifold, neighbors, pipeline, preprocessing
from sklearn.datasets import make_blobs
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.utils._testing import (
assert_allclose,
assert_allclose_dense_sparse,
assert_array_equal,
)
from sklearn.utils.fixes import CSR_CONTAINERS
eigen_solvers = ["auto", "dense", "arpack"]
path_methods = ["auto", "FW", "D"]
def create_sample_data(dtype, n_pts=25, add_noise=False):
# grid of equidistant points in 2D, n_components = n_dim
n_per_side = int(math.sqrt(n_pts))
X = np.array(list(product(range(n_per_side), repeat=2))).astype(dtype, copy=False)
if add_noise:
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(n_pts, 1).astype(dtype, copy=False)
X = np.concatenate((X, noise), 1)
return X
@pytest.mark.parametrize("n_neighbors, radius", [(24, None), (None, np.inf)])
@pytest.mark.parametrize("eigen_solver", eigen_solvers)
@pytest.mark.parametrize("path_method", path_methods)
def test_isomap_simple_grid(
global_dtype, n_neighbors, radius, eigen_solver, path_method
):
# Isomap should preserve distances when all neighbors are used
n_pts = 25
X = create_sample_data(global_dtype, n_pts=n_pts, add_noise=False)
# distances from each point to all others
if n_neighbors is not None:
G = neighbors.kneighbors_graph(X, n_neighbors, mode="distance")
else:
G = neighbors.radius_neighbors_graph(X, radius, mode="distance")
clf = manifold.Isomap(
n_neighbors=n_neighbors,
radius=radius,
n_components=2,
eigen_solver=eigen_solver,
path_method=path_method,
)
clf.fit(X)
if n_neighbors is not None:
G_iso = neighbors.kneighbors_graph(clf.embedding_, n_neighbors, mode="distance")
else:
G_iso = neighbors.radius_neighbors_graph(
clf.embedding_, radius, mode="distance"
)
atol = 1e-5 if global_dtype == np.float32 else 0
assert_allclose_dense_sparse(G, G_iso, atol=atol)
@pytest.mark.parametrize("n_neighbors, radius", [(24, None), (None, np.inf)])
@pytest.mark.parametrize("eigen_solver", eigen_solvers)
@pytest.mark.parametrize("path_method", path_methods)
def test_isomap_reconstruction_error(
global_dtype, n_neighbors, radius, eigen_solver, path_method
):
if global_dtype is np.float32:
pytest.skip(
"Skipping test due to numerical instabilities on float32 data"
"from KernelCenterer used in the reconstruction_error method"
)
# Same setup as in test_isomap_simple_grid, with an added dimension
n_pts = 25
X = create_sample_data(global_dtype, n_pts=n_pts, add_noise=True)
# compute input kernel
if n_neighbors is not None:
G = neighbors.kneighbors_graph(X, n_neighbors, mode="distance").toarray()
else:
G = neighbors.radius_neighbors_graph(X, radius, mode="distance").toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G**2)
clf = manifold.Isomap(
n_neighbors=n_neighbors,
radius=radius,
n_components=2,
eigen_solver=eigen_solver,
path_method=path_method,
)
clf.fit(X)
# compute output kernel
if n_neighbors is not None:
G_iso = neighbors.kneighbors_graph(clf.embedding_, n_neighbors, mode="distance")
else:
G_iso = neighbors.radius_neighbors_graph(
clf.embedding_, radius, mode="distance"
)
G_iso = G_iso.toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso**2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / n_pts
atol = 1e-5 if global_dtype == np.float32 else 0
assert_allclose(reconstruction_error, clf.reconstruction_error(), atol=atol)
@pytest.mark.parametrize("n_neighbors, radius", [(2, None), (None, 0.5)])
def test_transform(global_dtype, n_neighbors, radius):
n_samples = 200
n_components = 10
noise_scale = 0.01
# Create S-curve dataset
X, y = datasets.make_s_curve(n_samples, random_state=0)
X = X.astype(global_dtype, copy=False)
# Compute isomap embedding
iso = manifold.Isomap(
n_components=n_components, n_neighbors=n_neighbors, radius=radius
)
X_iso = iso.fit_transform(X)
# Re-embed a noisy version of the points
rng = np.random.RandomState(0)
noise = noise_scale * rng.randn(*X.shape)
X_iso2 = iso.transform(X + noise)
# Make sure the rms error on re-embedding is comparable to noise_scale
assert np.sqrt(np.mean((X_iso - X_iso2) ** 2)) < 2 * noise_scale
@pytest.mark.parametrize("n_neighbors, radius", [(2, None), (None, 10.0)])
def test_pipeline(n_neighbors, radius, global_dtype):
# check that Isomap works fine as a transformer in a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
X, y = datasets.make_blobs(random_state=0)
X = X.astype(global_dtype, copy=False)
clf = pipeline.Pipeline(
[
("isomap", manifold.Isomap(n_neighbors=n_neighbors, radius=radius)),
("clf", neighbors.KNeighborsClassifier()),
]
)
clf.fit(X, y)
assert 0.9 < clf.score(X, y)
def test_pipeline_with_nearest_neighbors_transformer(global_dtype):
# Test chaining NearestNeighborsTransformer and Isomap with
# neighbors_algorithm='precomputed'
algorithm = "auto"
n_neighbors = 10
X, _ = datasets.make_blobs(random_state=0)
X2, _ = datasets.make_blobs(random_state=1)
X = X.astype(global_dtype, copy=False)
X2 = X2.astype(global_dtype, copy=False)
# compare the chained version and the compact version
est_chain = pipeline.make_pipeline(
neighbors.KNeighborsTransformer(
n_neighbors=n_neighbors, algorithm=algorithm, mode="distance"
),
manifold.Isomap(n_neighbors=n_neighbors, metric="precomputed"),
)
est_compact = manifold.Isomap(
n_neighbors=n_neighbors, neighbors_algorithm=algorithm
)
Xt_chain = est_chain.fit_transform(X)
Xt_compact = est_compact.fit_transform(X)
assert_allclose(Xt_chain, Xt_compact)
Xt_chain = est_chain.transform(X2)
Xt_compact = est_compact.transform(X2)
assert_allclose(Xt_chain, Xt_compact)
@pytest.mark.parametrize(
"metric, p, is_euclidean",
[
("euclidean", 2, True),
("manhattan", 1, False),
("minkowski", 1, False),
("minkowski", 2, True),
(lambda x1, x2: np.sqrt(np.sum(x1**2 + x2**2)), 2, False),
],
)
def test_different_metric(global_dtype, metric, p, is_euclidean):
# Isomap must work on various metric parameters work correctly
# and must default to euclidean.
X, _ = datasets.make_blobs(random_state=0)
X = X.astype(global_dtype, copy=False)
reference = manifold.Isomap().fit_transform(X)
embedding = manifold.Isomap(metric=metric, p=p).fit_transform(X)
if is_euclidean:
assert_allclose(embedding, reference)
else:
with pytest.raises(AssertionError, match="Not equal to tolerance"):
assert_allclose(embedding, reference)
def test_isomap_clone_bug():
# regression test for bug reported in #6062
model = manifold.Isomap()
for n_neighbors in [10, 15, 20]:
model.set_params(n_neighbors=n_neighbors)
model.fit(np.random.rand(50, 2))
assert model.nbrs_.n_neighbors == n_neighbors
@pytest.mark.parametrize("eigen_solver", eigen_solvers)
@pytest.mark.parametrize("path_method", path_methods)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_sparse_input(
global_dtype, eigen_solver, path_method, global_random_seed, csr_container
):
# TODO: compare results on dense and sparse data as proposed in:
# https://github.com/scikit-learn/scikit-learn/pull/23585#discussion_r968388186
X = csr_container(
sparse_rand(
100,
3,
density=0.1,
format="csr",
dtype=global_dtype,
random_state=global_random_seed,
)
)
iso_dense = manifold.Isomap(
n_components=2,
eigen_solver=eigen_solver,
path_method=path_method,
n_neighbors=8,
)
iso_sparse = clone(iso_dense)
X_trans_dense = iso_dense.fit_transform(X.toarray())
X_trans_sparse = iso_sparse.fit_transform(X)
assert_allclose(X_trans_sparse, X_trans_dense, rtol=1e-4, atol=1e-4)
def test_isomap_fit_precomputed_radius_graph(global_dtype):
# Isomap.fit_transform must yield similar result when using
# a precomputed distance matrix.
X, y = datasets.make_s_curve(200, random_state=0)
X = X.astype(global_dtype, copy=False)
radius = 10
g = neighbors.radius_neighbors_graph(X, radius=radius, mode="distance")
isomap = manifold.Isomap(n_neighbors=None, radius=radius, metric="precomputed")
isomap.fit(g)
precomputed_result = isomap.embedding_
isomap = manifold.Isomap(n_neighbors=None, radius=radius, metric="minkowski")
result = isomap.fit_transform(X)
atol = 1e-5 if global_dtype == np.float32 else 0
assert_allclose(precomputed_result, result, atol=atol)
def test_isomap_fitted_attributes_dtype(global_dtype):
"""Check that the fitted attributes are stored accordingly to the
data type of X."""
iso = manifold.Isomap(n_neighbors=2)
X = np.array([[1, 2], [3, 4], [5, 6]], dtype=global_dtype)
iso.fit(X)
assert iso.dist_matrix_.dtype == global_dtype
assert iso.embedding_.dtype == global_dtype
def test_isomap_dtype_equivalence():
"""Check the equivalence of the results with 32 and 64 bits input."""
iso_32 = manifold.Isomap(n_neighbors=2)
X_32 = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float32)
iso_32.fit(X_32)
iso_64 = manifold.Isomap(n_neighbors=2)
X_64 = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float64)
iso_64.fit(X_64)
assert_allclose(iso_32.dist_matrix_, iso_64.dist_matrix_)
def test_isomap_raise_error_when_neighbor_and_radius_both_set():
# Isomap.fit_transform must raise a ValueError if
# radius and n_neighbors are provided.
X, _ = datasets.load_digits(return_X_y=True)
isomap = manifold.Isomap(n_neighbors=3, radius=5.5)
msg = "Both n_neighbors and radius are provided"
with pytest.raises(ValueError, match=msg):
isomap.fit_transform(X)
def test_multiple_connected_components():
# Test that a warning is raised when the graph has multiple components
X = np.array([0, 1, 2, 5, 6, 7])[:, None]
with pytest.warns(UserWarning, match="number of connected components"):
manifold.Isomap(n_neighbors=2).fit(X)
def test_multiple_connected_components_metric_precomputed(global_dtype):
# Test that an error is raised when the graph has multiple components
# and when X is a precomputed neighbors graph.
X = np.array([0, 1, 2, 5, 6, 7])[:, None].astype(global_dtype, copy=False)
# works with a precomputed distance matrix (dense)
X_distances = pairwise_distances(X)
with pytest.warns(UserWarning, match="number of connected components"):
manifold.Isomap(n_neighbors=1, metric="precomputed").fit(X_distances)
# does not work with a precomputed neighbors graph (sparse)
X_graph = neighbors.kneighbors_graph(X, n_neighbors=2, mode="distance")
with pytest.raises(RuntimeError, match="number of connected components"):
manifold.Isomap(n_neighbors=1, metric="precomputed").fit(X_graph)
def test_get_feature_names_out():
"""Check get_feature_names_out for Isomap."""
X, y = make_blobs(random_state=0, n_features=4)
n_components = 2
iso = manifold.Isomap(n_components=n_components)
iso.fit_transform(X)
names = iso.get_feature_names_out()
assert_array_equal([f"isomap{i}" for i in range(n_components)], names)
|
scikit-learnREPO_NAMEscikit-learnPATH_START.@scikit-learn_extracted@scikit-learn-main@sklearn@manifold@tests@test_isomap.py@.PATH_END.py
|
{
"filename": "stratsi_params.py",
"repo_name": "minkailin/stratsi",
"repo_path": "stratsi_extracted/stratsi-master/mass_check/stratsi_params.py",
"type": "Python"
}
|
'''
common parameters and functions for stratified streaming instability
natural units: Hgas=Omega=1, so cs=1
'''
import sys
import numpy as np
from mpi4py import MPI
import matplotlib.pyplot as plt
from dedalus import public as de
import h5py
import argparse
import time
from scipy.integrate import quad
from scipy.optimize import broyden1
import logging
logger = logging.getLogger(__name__)
matplotlib_logger = logging.getLogger('matplotlib')
matplotlib_logger.setLevel(logging.WARNING)
comm = MPI.COMM_WORLD
'''
disk parameters
'''
rhog0 = 1.0 #midplane gas density, density normalization
alpha = 1e-6 #alpha viscosity value, assumed constant
eta_hat = 0.05 #dimensionless radial pressure gradient
'''
dust parameters
'''
dg0 = 2.0 #midplane d/g ratio
metal = 0.03 #metallicity
stokes = 1e-2 #assume a constant stokes number throughout
delta = alpha*(1.0 + stokes + 4.0*stokes*stokes)/(1.0+stokes*stokes)**2
beta = (1.0/stokes - (1.0/stokes)*np.sqrt(1.0 - 4.0*stokes**2))/2.0
'''
grid parameters
'''
zmin = 0.0
zmax = 0.05
nz_vert = 1024
'''
mode parameters
'''
kx = 400.0
kx_min = 400
kx_max = 1e4
nkx = 1
'''
vertical resolution
'''
nz_waves = 160
'''
physics options
'''
fix_metal = True
viscosity_eqm = False
viscosity_pert= False
diffusion = True
backreaction = True
'''
numerical options
'''
all_solve_dense = True #solve for all eigenvals for all kx
first_solve_dense = True #use the dense solver for very first eigen calc
Neig = 10 #number of eigenvalues to get for sparse solver
eigen_trial = 3.902597e-1-4.110389e-3*1j #0.336815 -1j*0.020939 #trial eigenvalue
growth_filter = 2.0 #mode filter, only allow growth rates < growth_filter
tol = 1e-12
'''
analytic vertical profiles for d/g, vdz, rhog assuming constant stokes number
'''
def epsilon(z):
return dg0*np.exp(-0.5*beta*z*z/delta)
def rhog(z):
return rhog0*np.exp( (delta/stokes)*(epsilon(z) - dg0) - 0.5*z*z)
def integrand_rhog(z, dg):
return np.exp( dg*(delta/stokes)*(np.exp(-0.5*beta*z*z/delta) - 1.0) - 0.5*z*z)
def integrand_rhod(z, dg):
rg = integrand_rhog(z, dg)
eps = dg*np.exp(-0.5*beta*z*z/delta)
return eps*rg
def sigma_g(dg):
I = quad(integrand_rhog, 0.0, np.inf, args=(dg))
return I[0]
def sigma_d(dg):
I = quad(integrand_rhod, 0.0, np.inf, args=(dg))
return I[0]
def vdz(z):
return -beta*z
def dvdz(z):
return -beta
def ln_epsilon(z):
eps = epsilon(z)
return np.log(eps)
def dln_epsilon(z):
return -beta*z/delta
def d2ln_epsilon(z):
return -beta/delta
def depsilon(z):
eps = epsilon(z)
dln_eps = dln_epsilon(z)
return eps*dln_eps
def d2epsilon(z):
eps = epsilon(z)
deps = depsilon(z)
dln_eps = dln_epsilon(z)
d2ln_eps = d2ln_epsilon(z)
return deps*dln_eps + eps*d2ln_eps
def dln_rhog(z):
deps = depsilon(z)
return (delta/stokes)*deps - z
def d2ln_rhog(z):
d2eps = d2epsilon(z)
return (delta/stokes)*d2eps - 1.0
def dln_rhod(z):
return dln_rhog(z) + dln_epsilon(z)
def metallicity_error(dg):
sigg = sigma_g(dg)
sigd = sigma_d(dg)
Z = sigd/sigg
return Z - metal
def get_dg0_from_metal():
Hd = np.sqrt(delta/(stokes+delta))
dgguess = metal/Hd
#print("dgguess=",dgguess)
sol = broyden1(metallicity_error,[dgguess],f_tol=1e-16)
return sol[0]
def Nz2(z):
eps = epsilon(z)
deps = depsilon(z)
dlnP = dln_rhog(z) #isothermal gas
return dlnP*deps/(1.0+eps)**2
if fix_metal == True:
dg0 = get_dg0_from_metal()
print("adjust midplane d/g={0:4.2f} to satisfy Z={1:4.2f}".format(dg0, metal))
|
minkailinREPO_NAMEstratsiPATH_START.@stratsi_extracted@stratsi-master@mass_check@stratsi_params.py@.PATH_END.py
|
{
"filename": "session_wrapper.py",
"repo_name": "rhayes777/PyAutoFit",
"repo_path": "PyAutoFit_extracted/PyAutoFit-main/autofit/database/migration/session_wrapper.py",
"type": "Python"
}
|
import logging
from functools import wraps
from sqlalchemy import text
from typing import Optional
from ..sqlalchemy_ import sa
logger = logging.getLogger(__name__)
def needs_revision_table(func):
"""
Applies to functions that depend on the existence
of the revision table. If the table does not exist
it is created and then the function is executed.
If the table already existed but an OperationalError
is raised then that error is propagated.
Parameters
----------
func
Some function that depends on the revision table
Returns
-------
A decorated function
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except sa.exc.OperationalError as e:
if self.is_table:
raise e
self._init_revision_table()
return func(self, *args, **kwargs)
return wrapper
class SessionWrapper:
def __init__(self, session: sa.orm.Session):
"""
Wraps a SQLAlchemy session so that certain commands can be
encapsulated.
Parameters
----------
session
"""
self.session = session
def _init_revision_table(self):
"""
Creates the revision table with a single null entry
"""
self.session.execute(
text("CREATE TABLE revision (revision_id VARCHAR PRIMARY KEY)")
)
self.session.execute(text("INSERT INTO revision (revision_id) VALUES (null)"))
@property
def is_table(self) -> bool:
"""
Does the revision table exist?
"""
try:
self.session.execute(text("SELECT 1 FROM revision"))
return True
except sa.exc.OperationalError:
return False
@property
@needs_revision_table
def revision_id(self) -> Optional[str]:
"""
Describes the current revision of the database. None if no
revisions have been made.
"""
for row in self.session.execute(text("SELECT revision_id FROM revision")):
return row[0]
return None
@revision_id.setter
@needs_revision_table
def revision_id(self, revision_id: str):
self.session.execute(
text(f"UPDATE revision SET revision_id = :revision_id"),
{"revision_id": revision_id},
)
|
rhayes777REPO_NAMEPyAutoFitPATH_START.@PyAutoFit_extracted@PyAutoFit-main@autofit@database@migration@session_wrapper.py@.PATH_END.py
|
{
"filename": "input.py",
"repo_name": "florian-lienhard/MM-LSD",
"repo_path": "MM-LSD_extracted/MM-LSD-main/stars/Sun/input.py",
"type": "Python"
}
|
import os
star = "Sun"
#checklist for preprocessing:
#star name defined above
#rassine installed in rassine_loc
#s2d spectra, ccf files, s1d files in rawdir
#checklist for next steps:
#name vald file e.g. yourtarget.txt and put it into VALD_files folder
#star
#the steps after preprocessing are independent of the data format
from datetime import date
today = date.today()
#indic = str(today.year)+str(today.month)+str(today.day)
indic = "1"
# -------------------------------------------------------------
# Settings for MM-LSD run (what should be done in this run?)
# -------------------------------------------------------------
#preprocess data (if not done already)
preprocess = True
#generate grid on which to evaluate LSD, delete old rv, rv_err pickle files
generate_grid = True
#run on this parameter combination grid
run_on_grid = True
# -------------------------------------------------------------
# Folder locations
# -------------------------------------------------------------
#home
rt = "/home/fl386"
#folder where the input.py file is located.
stardir = './stars/'+star+'/'
#results of MM-LSD will be saved here
resdir = stardir+f"results_{indic}/"
#location of RASSINE installation
rassine_loc = rt + "/Rassine_public-master"
#location of data
datadir = rt + "/MM-LSD/data/"
#your targets data
maindir = datadir + star
#fits files in here
rawdir = maindir +"/data/"
#this will be produced, contains output from preprocess.py
dirdir = maindir+"/processed/"
#preprocess.py will save rassine output in this file. not needed anymore after initial run.
rassine_res = maindir + "/rassine_res/"
#make sure spectrum_name is set to cwd+'/spectra_library/stars/spec_toreduce.csv' in Rassine_config.py.
if not os.path.exists(resdir):
os.mkdir(resdir)
#rvs and associated uncertainties from MM-LSD will be saved in these pickle files
rvresfile = resdir+"lsd_rv_"+star+f"_{indic}.pkl"
rverrresfile = resdir+"lsd_rv_err_"+star+f"_{indic}.pkl"
commonprofilefile = resdir+"common_profile_"+star+f"_{indic}.pkl"
# -------------------------------------------------------------
# Data
# -------------------------------------------------------------
#name of pipeline (e.g. HARPS-N new DRS (Dumusque et al, 2021)
#current version of "new drs" (April 2022)
pipname = "DRS_2.3.5"
#"old drs". older version has higher number
#pipname = "DRS_3.7"
# -------------------------------------------------------------
# Preprocessing
# -------------------------------------------------------------
#only needed if you haven't already preprocessed your data (i.e. you have run this at least once)
#extract info from fits headers
extract_info = True
run_rassine = True
save_spectra_as_pkl = True
overlap_correction = True
#in case you want to reduce only the first x spectra, set number_of_spectra_to_reduce to x. Otherwise, set it to a number >= number of spectra.
number_of_spectra_to_reduce = 100000
#name of pipeline
if pipname == "DRS_3.7":
pipeline_keyword = "DRS"
sid ="TNG"
if pipname == "DRS_2.3.5":
#espresso pipeline
pipeline_keyword = "QC"
sid ="TNG"
if pipname == "ESPRESSO":
sid="ESO"
#velocity step for common profile approx velocity step between two adjacent pixels. Must be constant (assumption in cvmt function).
if pipname == "DRS_2.3.5" or pipname == "DRS_3.7":
vStep = 0.82
else:
vStep = 0.7
#------------------------------------------------------------------------------
#RV-EXTRACTION
#set to 1 to correct order overlap discrepancy. no major influence on RVs expected.
rassoption = 1
#set to 0 for flux weights
#set to 1 to use (almost) flat weights per absorption line. (that's inverse squared of the upper envelope of the error)
#set to 2 for uniform weights per order
erroption = 0
#set to 1 for removing all barycentric wavelengths ever affected by telluric lines. recommended.
telloption = 1
grid = {}
#run code on max max_nr_of_specs spectra
grid["max_nr_of_specs"]= [10000]
#velocity grid width parameter (FWHM * velgridwidth)
#dvel = np.round(vel_hwhm)*velgridwidth
#vel = np.arange(systemrv-dvel, systemrv+dvel, vStep)
grid["velgridwidth"] = [2.5,3.0,3.5,4.0]
#remove data affected by tellurics deeper than telluric_cut (0.1 = depth of telluric line = 90% transmission)
grid["telluric_cut"] = [0.2,0.1]
#minimal depth of included vald3 absorption lines
grid["mindepthparam"] = [0.1]
#maximal depth of included vald3 absorption lines
grid["maxdepthparam"] = [0.8,1.0]
#if absolute difference between spectrum and first convolution model greater than this: mask.
grid["modelspecdeviationcut"] = [0.5,1.0]
#exclude wide lines? 0 = no, 1 = yes. see run_lsd.
grid["exclwidelinesparam"] = [0]
grid["telloption"] = [telloption]
grid["erroption"] = [erroption]
grid["rassoption"] = [rassoption]
#mean-combine use_n_time_series many of the produced time series
#i.e. e.g. use the 16 timeseries with the lowest RMS (out of the 32 time series from the 32 parameter combinations)
use_n_time_series = 16
#what do you define as an outlier?
#remove rv if difference between median and rv is greater than this value
delta_rv_outlier = 200
#weighting scheme. letting the weights of the orders vary as defined in code is recommended.
weight_schemes = ["flux weight_can_vary"]
#alternatively:
#weight_schemes = ["flux weight_fixed_throughout_time_series"]
# -------------------------------------------------------------
# Don't change these
# -------------------------------------------------------------
#compute nr of grid combinations
nr_of_combinations = 1
for key in grid.keys():
nr_of_combinations*=len(grid[key])
assert use_n_time_series <= nr_of_combinations, "set use_n_time_series to value smaller than nr_of_combinations"
#remove data point if flux < excllower
#note that the spectra are between -1 and 0 (i.e. normalise to 1, then subtract 1)
excllower = -1.1
exclupper = 0.05
usetapas = True
#CONSTANTS
c = 299792.458
#------------------------------------------------------------------------------
|
florian-lienhardREPO_NAMEMM-LSDPATH_START.@MM-LSD_extracted@MM-LSD-main@stars@Sun@input.py@.PATH_END.py
|
{
"filename": "_sizesrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scattermap/cluster/_sizesrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SizesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="sizesrc", parent_name="scattermap.cluster", **kwargs
):
super(SizesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scattermap@cluster@_sizesrc.py@.PATH_END.py
|
{
"filename": "_style.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/choropleth/legendgrouptitle/font/_style.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class StyleValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="style",
parent_name="choropleth.legendgrouptitle.font",
**kwargs,
):
super(StyleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
values=kwargs.pop("values", ["normal", "italic"]),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@choropleth@legendgrouptitle@font@_style.py@.PATH_END.py
|
{
"filename": "worker.py",
"repo_name": "PrefectHQ/prefect",
"repo_path": "prefect_extracted/prefect-main/src/prefect/cli/worker.py",
"type": "Python"
}
|
import asyncio
import json
import os
from enum import Enum
from typing import List, Optional, Type
import typer
from prefect._internal.integrations import KNOWN_EXTRAS_FOR_PACKAGES
from prefect.cli._prompts import confirm
from prefect.cli._types import PrefectTyper, SettingsOption
from prefect.cli._utilities import exit_with_error
from prefect.cli.root import app, is_interactive
from prefect.client.collections import get_collections_metadata_client
from prefect.client.orchestration import get_client
from prefect.client.schemas.filters import WorkQueueFilter, WorkQueueFilterName
from prefect.exceptions import ObjectNotFound
from prefect.plugins import load_prefect_collections
from prefect.settings import (
PREFECT_WORKER_HEARTBEAT_SECONDS,
PREFECT_WORKER_PREFETCH_SECONDS,
)
from prefect.utilities.dispatch import lookup_type
from prefect.utilities.processutils import (
get_sys_executable,
run_process,
setup_signal_handlers_worker,
)
from prefect.workers.base import BaseWorker
worker_app = PrefectTyper(name="worker", help="Start and interact with workers.")
app.add_typer(worker_app)
class InstallPolicy(str, Enum):
ALWAYS = "always"
IF_NOT_PRESENT = "if-not-present"
NEVER = "never"
PROMPT = "prompt"
@worker_app.command()
async def start(
worker_name: str = typer.Option(
None,
"-n",
"--name",
help=(
"The name to give to the started worker. If not provided, a unique name"
" will be generated."
),
),
work_pool_name: str = typer.Option(
...,
"-p",
"--pool",
help="The work pool the started worker should poll.",
prompt=True,
),
work_queues: List[str] = typer.Option(
None,
"-q",
"--work-queue",
help=(
"One or more work queue names for the worker to pull from. If not provided,"
" the worker will pull from all work queues in the work pool."
),
),
worker_type: Optional[str] = typer.Option(
None,
"-t",
"--type",
help=(
"The type of worker to start. If not provided, the worker type will be"
" inferred from the work pool."
),
),
prefetch_seconds: int = SettingsOption(
PREFECT_WORKER_PREFETCH_SECONDS,
help="Number of seconds to look into the future for scheduled flow runs.",
),
run_once: bool = typer.Option(
False, help="Only run worker polling once. By default, the worker runs forever."
),
limit: int = typer.Option(
None,
"-l",
"--limit",
help="Maximum number of flow runs to start simultaneously.",
),
with_healthcheck: bool = typer.Option(
False, help="Start a healthcheck server for the worker."
),
install_policy: InstallPolicy = typer.Option(
InstallPolicy.PROMPT.value,
"--install-policy",
help="Install policy to use workers from Prefect integration packages.",
case_sensitive=False,
),
base_job_template: typer.FileText = typer.Option(
None,
"--base-job-template",
help=(
"The path to a JSON file containing the base job template to use. If"
" unspecified, Prefect will use the default base job template for the given"
" worker type. If the work pool already exists, this will be ignored."
),
),
):
"""
Start a worker process to poll a work pool for flow runs.
"""
is_paused = await _check_work_pool_paused(work_pool_name)
if is_paused:
app.console.print(
(
f"The work pool {work_pool_name!r} is currently paused. This worker"
" will not execute any flow runs until the work pool is unpaused."
),
style="yellow",
)
is_queues_paused = await _check_work_queues_paused(
work_pool_name,
work_queues,
)
if is_queues_paused:
queue_scope = (
"All work queues" if not work_queues else "Specified work queue(s)"
)
app.console.print(
(
f"{queue_scope} in the work pool {work_pool_name!r} are currently"
" paused. This worker will not execute any flow runs until the work"
" queues are unpaused."
),
style="yellow",
)
worker_cls = await _get_worker_class(worker_type, work_pool_name, install_policy)
if worker_cls is None:
exit_with_error(
"Unable to start worker. Please ensure you have the necessary dependencies"
" installed to run your desired worker type."
)
worker_process_id = os.getpid()
setup_signal_handlers_worker(
worker_process_id, f"the {worker_type} worker", app.console.print
)
template_contents = None
if base_job_template is not None:
template_contents = json.load(fp=base_job_template)
worker = worker_cls(
name=worker_name,
work_pool_name=work_pool_name,
work_queues=work_queues,
limit=limit,
prefetch_seconds=prefetch_seconds,
heartbeat_interval_seconds=int(PREFECT_WORKER_HEARTBEAT_SECONDS.value()),
base_job_template=template_contents,
)
try:
await worker.start(
run_once=run_once,
with_healthcheck=with_healthcheck,
printer=app.console.print,
)
except asyncio.CancelledError:
app.console.print(f"Worker {worker.name!r} stopped!", style="yellow")
async def _check_work_pool_paused(work_pool_name: str) -> bool:
try:
async with get_client() as client:
work_pool = await client.read_work_pool(work_pool_name=work_pool_name)
return work_pool.is_paused
except ObjectNotFound:
return False
async def _check_work_queues_paused(
work_pool_name: str, work_queues: Optional[List[str]]
) -> bool:
"""
Check if all work queues in the work pool are paused. If work queues are specified,
only those work queues are checked.
Args:
- work_pool_name (str): the name of the work pool to check
- work_queues (Optional[List[str]]): the names of the work queues to check
Returns:
- bool: True if work queues are paused, False otherwise
"""
try:
work_queues_filter = (
WorkQueueFilter(name=WorkQueueFilterName(any_=work_queues))
if work_queues
else None
)
async with get_client() as client:
wqs = await client.read_work_queues(
work_pool_name=work_pool_name, work_queue_filter=work_queues_filter
)
return all(queue.is_paused for queue in wqs) if wqs else False
except ObjectNotFound:
return False
async def _retrieve_worker_type_from_pool(work_pool_name: Optional[str] = None) -> str:
try:
async with get_client() as client:
work_pool = await client.read_work_pool(work_pool_name=work_pool_name)
worker_type = work_pool.type
app.console.print(
f"Discovered type {worker_type!r} for work pool {work_pool.name!r}."
)
if work_pool.is_push_pool or work_pool.is_managed_pool:
exit_with_error(
"Workers are not required for push work pools. "
"See https://docs.prefect.io/latest/deploy/infrastructure-examples/serverless "
"for more details."
)
except ObjectNotFound:
app.console.print(
(
f"Work pool {work_pool_name!r} does not exist and no worker type was"
" provided. Starting a process worker..."
),
style="yellow",
)
worker_type = "process"
return worker_type
def _load_worker_class(worker_type: str) -> Optional[Type[BaseWorker]]:
try:
load_prefect_collections()
return lookup_type(BaseWorker, worker_type)
except KeyError:
return None
async def _install_package(
package: str, upgrade: bool = False
) -> Optional[Type[BaseWorker]]:
app.console.print(f"Installing {package}...")
install_package = KNOWN_EXTRAS_FOR_PACKAGES.get(package, package)
command = [get_sys_executable(), "-m", "pip", "install", install_package]
if upgrade:
command.append("--upgrade")
await run_process(command, stream_output=True)
async def _find_package_for_worker_type(worker_type: str) -> Optional[str]:
async with get_collections_metadata_client() as client:
worker_metadata = await client.read_worker_metadata()
worker_types_with_packages = {
worker_type: package_name
for package_name, worker_dict in worker_metadata.items()
for worker_type in worker_dict
if worker_type != "prefect-agent"
}
try:
return worker_types_with_packages[worker_type]
except KeyError:
app.console.print(
f"Could not find a package for worker type {worker_type!r}.",
style="yellow",
)
return None
async def _get_worker_class(
worker_type: Optional[str] = None,
work_pool_name: Optional[str] = None,
install_policy: InstallPolicy = InstallPolicy.PROMPT,
) -> Optional[Type[BaseWorker]]:
if worker_type is None and work_pool_name is None:
raise ValueError("Must provide either worker_type or work_pool_name.")
if worker_type is None:
worker_type = await _retrieve_worker_type_from_pool(work_pool_name)
if worker_type == "prefect-agent":
exit_with_error(
"'prefect-agent' typed work pools work with Prefect Agents instead of"
" Workers. Please use the 'prefect agent start' to start a Prefect Agent."
)
if install_policy == InstallPolicy.ALWAYS:
package = await _find_package_for_worker_type(worker_type)
if package:
await _install_package(package, upgrade=True)
worker_cls = _load_worker_class(worker_type)
worker_cls = _load_worker_class(worker_type)
if worker_cls is None:
package = await _find_package_for_worker_type(worker_type)
# Check if the package exists
if package:
# Prompt to install if the package is not present
if install_policy == InstallPolicy.IF_NOT_PRESENT:
should_install = True
# Confirm with the user for installation in an interactive session
elif install_policy == InstallPolicy.PROMPT and is_interactive():
message = (
"Could not find the Prefect integration library for the"
f" {worker_type} worker in the current environment."
" Install the library now?"
)
should_install = confirm(message, default=True)
# If none of the conditions met, don't install the package
else:
should_install = False
# If should_install is True, install the package
if should_install:
await _install_package(package)
worker_cls = _load_worker_class(worker_type)
return worker_cls
|
PrefectHQREPO_NAMEprefectPATH_START.@prefect_extracted@prefect-main@src@prefect@cli@worker.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "jvines/Ceres-plusplus",
"repo_path": "Ceres-plusplus_extracted/Ceres-plusplus-main/cerespp/__init__.py",
"type": "Python"
}
|
"""
Ceres plusplus is a module designed to extend the functionality of ceres
(https://github.com/rabrahm/ceres) in order to calculate activity indicators
from spectra reduced with the tool, and to easily extract the indicators that
ceres does calculate, for example the CCF FWHM and the BIS.
"""
from pkg_resources import get_distribution
from .cerespp import get_activities
from .cpplots import ccf_gauss_plot
from .cpplots import ccf_plot
from .cpplots import line_plot
from .cpplots import line_plot_from_file
from .crosscorr import ccf
from .crosscorr import ccf_fit
from .spectra_utils import median_combine
from .spectra_utils import median_combine_1d
from .spectra_utils import merge_echelle
from .spectra_utils import velocity_correction
from .spectra_utils import correct_to_rest
__version__ = get_distribution('cerespp').version
|
jvinesREPO_NAMECeres-plusplusPATH_START.@Ceres-plusplus_extracted@Ceres-plusplus-main@cerespp@__init__.py@.PATH_END.py
|
{
"filename": "process.py",
"repo_name": "ML4GW/hermes",
"repo_path": "hermes_extracted/hermes-main/hermes/aeriel/monitor/process.py",
"type": "Python"
}
|
import multiprocessing as mp
import sys
import time
from queue import Empty
from typing import TYPE_CHECKING, Optional
from hermes.aeriel.monitor.logging import listener, logger
from hermes.aeriel.monitor.utils import ExceptionWrapper, Throttle
if TYPE_CHECKING:
from queue import Queue
from hermes.aeriel.monitor.utils import Package
class PipelineProcess(mp.Process):
def __init__(
self, name: str, rate: Optional[float] = None, join_timeout: float = 10
) -> None:
self._pause_event = mp.Event()
self._stop_event = mp.Event()
self.in_q = mp.Queue()
self.out_q = mp.Queue()
# build a throttle to use during the target process
# to limit ourselves to the target rate if we passed
# one, otherwise we'll just iterate infinitely. In
# either case, use the stop event's set status to
# indicate when the loop should be interrupted
if rate is not None:
self.throttle = Throttle(rate, condition=self._stop_event.is_set)
else:
self.throttle = iter(self._stop_event.is_set, True)
self.join_timeout = join_timeout
self.logger = None
super().__init__(name=name)
@property
def stopped(self):
return self._stop_event.is_set()
def stop(self) -> None:
self._stop_event.set()
def cleanup(self, exc: Exception) -> None:
"""Gracefully clean up the process if an exception is encountered"""
if self.logger is not None:
self.logger.error(f"Encountered {exc.__class__.__name__}: {exc}")
self.out_q.put(ExceptionWrapper(exc))
self.stop()
def _impatient_get(self, q: "Queue") -> "Package":
"""Wait forever to get an object from a queue
Gets and item from a queue in a way that
waits forever without blocking so that
errors that get bubbled up can interrupt
appropriately. Also checks to see if upstream
processes have passed an exception and raises
them so that the traceback is maintained.
Args:
q:
The queue to get from
"""
while True:
try:
item = q.get_nowait()
except Empty:
time.sleep(1e-6)
else:
if isinstance(item, ExceptionWrapper):
item.reraise()
elif item == StopIteration or isinstance(item, StopIteration):
raise StopIteration
return item
def get_package(self) -> "Package":
return self._impatient_get(self.in_q)
def process(self, package: "Package") -> None:
self.out_q.put(package)
def run(self) -> None:
exitcode = 0
try:
# create a multiprocessing logger that
# write logs to the main process for handling
self.logger = listener.add_process(self)
# run everything in a throttle context in
# case we want to rate control everything
for _ in self.throttle:
# try to get the next package, and process
# it if there's anything to process
inputs = self.get_package()
if inputs is not None:
# try passing a starmap so that subclasses
# don't always have to return lists, but
# otherwise call the downstream process
# normally
if not isinstance(inputs, dict):
try:
self.process(*inputs)
except TypeError:
self.process(inputs)
else:
self.process(inputs)
except Exception as e:
# pass on any exceptions to downstream processes
# and set the exitcode to indicate an error
# TODO: should we do a special case for StopIterations?
self.cleanup(e)
exitcode = 1
finally:
# send one last log to the main process then
# close the queue and wait for the thread to join
self.logger.debug("Target completed")
listener.queue.close()
listener.queue.join_thread()
# exit the process with the indicated code
sys.exit(exitcode)
def __enter__(self) -> "PipelineProcess":
self.start()
return self
def __exit__(self, type_, value, traceback) -> None:
if not self.stopped:
self.stop()
# stop the listener before exiting so
# that it doesn't try to poll connections
# from dead threads
if type_ is not None and listener._thread is not None:
listener.stop()
# try to join the process if we can
self.join(self.join_timeout)
if self.exitcode is None:
# if the process is still running after the wait
# time, terminate it and log a warning
logger.warning(
f"Process {self.name} couldn't join gracefully. Terminating"
)
self.terminate()
time.sleep(1)
else:
logger.debug(f"Process {self.name} joined gracefully")
# close the process
self.close()
# clear and close the input queue
# to kill the daemon thread
logger.debug(f"Clearing input queue for process {self.name}")
while True:
try:
self.in_q.get_nowait()
except Empty:
break
logger.debug(f"Input queue for process {self.name} cleared")
self.in_q.close()
def __iter__(self) -> "PipelineProcess":
return self
def __next__(self) -> "Package":
return self._impatient_get(self.out_q)
def __rshift__(self, child) -> "PipelineProcess":
if isinstance(child, Pipeline):
child.processes[0].in_q = self.out_q
processes = [self] + child.processes
return Pipeline(processes)
elif isinstance(child, PipelineProcess):
child.in_q = self.out_q
return Pipeline([self, child])
else:
raise TypeError(
"Unsupported operand type(s) for >> "
"PipelineProcess and {}".format(type(child))
)
class Pipeline:
def __init__(self, processes):
self.processes = processes
def __enter__(self):
for p in self.processes:
p.__enter__()
return self
def __exit__(self, *exc_args):
for p in self.processes:
p.__exit__(*exc_args)
def __iter__(self):
return iter(self.processes[-1])
def __rshift__(self, child):
if isinstance(child, PipelineProcess):
child.in_q = self.processes[-1].out_q
processes = self.processes + [child]
return Pipeline(processes)
elif isinstance(child, Pipeline):
child.processes[0].in_q = self.processes[-1].out_q
processes = self.processes + child.processes
return Pipeline(processes)
else:
raise TypeError(
"Unsupported operand type(s) for >> "
"Pipeline and {}".format(type(child))
)
|
ML4GWREPO_NAMEhermesPATH_START.@hermes_extracted@hermes-main@hermes@aeriel@monitor@process.py@.PATH_END.py
|
{
"filename": "accented_text.py",
"repo_name": "matplotlib/matplotlib",
"repo_path": "matplotlib_extracted/matplotlib-main/galleries/examples/text_labels_and_annotations/accented_text.py",
"type": "Python"
}
|
r"""
=============
Accented text
=============
Matplotlib supports accented characters via TeX mathtext or Unicode.
Using mathtext, the following accents are provided: \\hat, \\breve, \\grave,
\\bar, \\acute, \\tilde, \\vec, \\dot, \\ddot. All of them have the same
syntax, e.g. \\bar{o} yields "o overbar", \\ddot{o} yields "o umlaut".
Shortcuts such as \\"o \\'e \\`e \\~n \\.x \\^y are also supported.
"""
import matplotlib.pyplot as plt
# Mathtext demo
fig, ax = plt.subplots()
ax.plot(range(10))
ax.set_title(r'$\ddot{o}\acute{e}\grave{e}\hat{O}'
r'\breve{i}\bar{A}\tilde{n}\vec{q}$', fontsize=20)
# Shorthand is also supported and curly braces are optional
ax.set_xlabel(r"""$\"o\ddot o \'e\`e\~n\.x\^y$""", fontsize=20)
ax.text(4, 0.5, r"$F=m\ddot{x}$")
fig.tight_layout()
# %%
# You can also use Unicode characters directly in strings.
fig, ax = plt.subplots()
ax.set_title("GISCARD CHAHUTΓ Γ L'ASSEMBLΓE")
ax.set_xlabel("LE COUP DE DΓ DE DE GAULLE")
ax.set_ylabel('AndrΓ© was here!')
ax.text(0.2, 0.8, 'Institut fΓΌr FestkΓΆrperphysik', rotation=45)
ax.text(0.4, 0.2, 'AVA (check kerning)')
plt.show()
|
matplotlibREPO_NAMEmatplotlibPATH_START.@matplotlib_extracted@matplotlib-main@galleries@examples@text_labels_and_annotations@accented_text.py@.PATH_END.py
|
{
"filename": "dataverse.py",
"repo_name": "GBTAmmoniaSurvey/GAS",
"repo_path": "GAS_extracted/GAS-master/GAS/dataverse.py",
"type": "Python"
}
|
GBTAmmoniaSurveyREPO_NAMEGASPATH_START.@GAS_extracted@GAS-master@GAS@dataverse.py@.PATH_END.py
|
|
{
"filename": "lensing_table.py",
"repo_name": "cmbant/CosmoMC",
"repo_path": "CosmoMC_extracted/CosmoMC-master/batch3/outputs/lensing_table.py",
"type": "Python"
}
|
from __future__ import absolute_import
from __future__ import print_function
import planckStyle as s
g = s.getSinglePlotter()
class comb(object):
def __init__(self, varname, prior, title, blockstart=False):
self.varname = varname
self.prior = prior
self.title = title
self.blockstart = blockstart
items = []
items += [comb('', 'lenspriors', r'MV conservative $8\le L \le 400$')]
items += [comb('DESlens', 'lenspriors', r'DES lensing joint')]
items += [comb('DES', 'lenspriors', r'DES combined joint')]
items += [comb('theta', 'lenspriors', '$100\\thetaMC = 1.0409 \pm 0.0006$ joint')]
items += [comb('', 'plikHM_TT_lowl_lowE_lensing', r'\planckTT\ joint')]
items += [comb('', 'plikHM_TTTEEE_lowl_lowE_lensing', r'\planckall\ joint')]
items += [comb('conslmin40', 'lenspriors', r'MV conservative $40\le L \le 400$ ', blockstart=True)]
items += [comb('agrlmax425', 'lenspriors', r'MV aggressive $8\le L \le 425$')]
items += [comb('agr2', 'lenspriors', r'MV aggressive $8\le L \le 2048$')]
items += [comb('ptt', 'lenspriors', r'TT conservative $8\le L \le 400$')]
items += [comb('pttagr2', 'lenspriors', r'TT aggressive $8\le L \le 2048$')]
items += [comb('Apr6', 'lenspriors', r"CompSep mask $8\le L \le 400$")]
items += [comb('', 'DESpriors', r'DES priors', blockstart=True)]
items += [comb('', 'DESpriors_CookeDH', r"$'$$'$ + ($\Omega_{\rm b}h^2=0.0222\pm 0.0005$)")]
items += [comb('bfcl', 'lenspriors', r'Best-fit $C^{\rm CMB}_\ell$')]
items += [comb('agr2bfcl', 'lenspriors', r"$'$$'$ (MV aggressive $8\le L \le 2048$)")]
items += [comb('takahashi', 'lenspriors', r"Takahashi {\HALOFIT}")]
items += [comb('agr2takahashi', 'lenspriors', r"$'$$'$ (MV aggressive $8\le L \le 2048$)")]
items += [comb('linear', 'lenspriors', r'Linear theory')]
items += [comb('acc', 'lenspriors', r'Higher accuracy')]
items += [comb('agr2acc', 'lenspriors', r"$'$$'$ (MV aggressive $8\le L \le 2048$)")]
lines = []
heading = ''
for i, item in enumerate(items):
line = item.title
if item.blockstart:
line = '\\hline\n' + line
roots = ['base_lensing_%s', 'base_lensing_%s_BAO',
'base_mnu_lensing_%s', 'base_mnu_lensing_%s_BAO']
for root, pars in zip(roots, [['s8omegamp25'], ['sigma8', 'H0', 'omegam'], ['s8omegamp25'], ['sigma8', 'mnu']]):
if 'plikHM' in item.prior:
root = root.replace('lensing_%s', item.prior)
else:
root = root % item.prior
if item.varname: root += '_' + item.varname
print(root)
try:
if 'base_mnu' in root:
paramtag = 'mnu'
datatag = root.replace('base_mnu_', '')
else:
paramtag = ''
datatag = root.replace('base_', '')
root = g.getRoot(paramtag, datatag)
samples = g.sampleAnalyser.samplesForRoot(root)
samples.paramNames.setLabelsAndDerivedFromParamNames(g.settings.param_names_for_labels)
if 'planck' in item.title:
latex = samples.getLatex(params=pars, limit=1, err_sig_figs=1)
else:
latex = samples.getLatex(params=pars, limit=1)
if 'DESpriors' in item.prior and 'mnu' in pars:
latex[1][1] = '\\text{--}'
except Exception as e:
print(e)
print('Missing root:' + root)
latex = [[''] * len(pars), [''] * len(pars)]
if i == 0: heading += '& $' + '$ & $'.join(latex[0]) + '$'
line += r' & $' + '$ & $'.join(latex[1]) + '$'
lines.append(line)
print(heading + '\\\\\n\\hline\n' + '\\\\\n'.join(lines) + '\\\\\n')
|
cmbantREPO_NAMECosmoMCPATH_START.@CosmoMC_extracted@CosmoMC-master@batch3@outputs@lensing_table.py@.PATH_END.py
|
{
"filename": "_traceref.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/histogram/error_x/_traceref.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TracerefValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self, plotly_name="traceref", parent_name="histogram.error_x", **kwargs
):
super(TracerefValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@histogram@error_x@_traceref.py@.PATH_END.py
|
{
"filename": "this.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/tools/python3/Lib/this.py",
"type": "Python"
}
|
s = """Gur Mra bs Clguba, ol Gvz Crgref
Ornhgvshy vf orggre guna htyl.
Rkcyvpvg vf orggre guna vzcyvpvg.
Fvzcyr vf orggre guna pbzcyrk.
Pbzcyrk vf orggre guna pbzcyvpngrq.
Syng vf orggre guna arfgrq.
Fcnefr vf orggre guna qrafr.
Ernqnovyvgl pbhagf.
Fcrpvny pnfrf nera'g fcrpvny rabhtu gb oernx gur ehyrf.
Nygubhtu cenpgvpnyvgl orngf chevgl.
Reebef fubhyq arire cnff fvyragyl.
Hayrff rkcyvpvgyl fvyraprq.
Va gur snpr bs nzovthvgl, ershfr gur grzcgngvba gb thrff.
Gurer fubhyq or bar-- naq cersrenoyl bayl bar --boivbhf jnl gb qb vg.
Nygubhtu gung jnl znl abg or boivbhf ng svefg hayrff lbh'er Qhgpu.
Abj vf orggre guna arire.
Nygubhtu arire vf bsgra orggre guna *evtug* abj.
Vs gur vzcyrzragngvba vf uneq gb rkcynva, vg'f n onq vqrn.
Vs gur vzcyrzragngvba vf rnfl gb rkcynva, vg znl or n tbbq vqrn.
Anzrfcnprf ner bar ubaxvat terng vqrn -- yrg'f qb zber bs gubfr!"""
d = {}
for c in (65, 97):
for i in range(26):
d[chr(i+c)] = chr((i+13) % 26 + c)
print("".join([d.get(c, c) for c in s]))
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@tools@python3@Lib@this.py@.PATH_END.py
|
{
"filename": "_bordercolor.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/choroplethmap/hoverlabel/_bordercolor.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class BordercolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name="bordercolor",
parent_name="choroplethmap.hoverlabel",
**kwargs,
):
super(BordercolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@choroplethmap@hoverlabel@_bordercolor.py@.PATH_END.py
|
{
"filename": "projected_normal.py",
"repo_name": "pyro-ppl/pyro",
"repo_path": "pyro_extracted/pyro-master/pyro/distributions/projected_normal.py",
"type": "Python"
}
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import math
import torch
from pyro.ops.tensor_utils import safe_normalize
from . import constraints
from .torch_distribution import TorchDistribution
class ProjectedNormal(TorchDistribution):
"""
Projected isotropic normal distribution of arbitrary dimension.
This distribution over directional data is qualitatively similar to the von
Mises and von Mises-Fisher distributions, but permits tractable variational
inference via reparametrized gradients.
To use this distribution with autoguides, use ``poutine.reparam`` with a
:class:`~pyro.infer.reparam.projected_normal.ProjectedNormalReparam`
reparametrizer in the model, e.g.::
@poutine.reparam(config={"direction": ProjectedNormalReparam()})
def model():
direction = pyro.sample("direction",
ProjectedNormal(torch.zeros(3)))
...
or simply wrap in :class:`~pyro.infer.reparam.strategies.MinimalReparam` or
:class:`~pyro.infer.reparam.strategies.AutoReparam` , e.g.::
@MinimalReparam()
def model():
...
.. note:: This implements :meth:`log_prob` only for dimensions {2,3}.
[1] D. Hernandez-Stumpfhauser, F.J. Breidt, M.J. van der Woerd (2017)
"The General Projected Normal Distribution of Arbitrary Dimension:
Modeling and Bayesian Inference"
https://projecteuclid.org/euclid.ba/1453211962
:param torch.Tensor concentration: A combined location-and-concentration
vector. The direction of this vector is the location, and its
magnitude is the concentration.
"""
arg_constraints = {"concentration": constraints.real_vector}
support = constraints.sphere
has_rsample = True
_log_prob_impls = {} # maps dim -> function(concentration, value)
def __init__(self, concentration, *, validate_args=None):
assert concentration.dim() >= 1
self.concentration = concentration
batch_shape = concentration.shape[:-1]
event_shape = concentration.shape[-1:]
super().__init__(batch_shape, event_shape, validate_args=validate_args)
@staticmethod
def infer_shapes(concentration):
batch_shape = concentration[:-1]
event_shape = concentration[-1:]
return batch_shape, event_shape
def expand(self, batch_shape, _instance=None):
batch_shape = torch.Size(batch_shape)
new = self._get_checked_instance(ProjectedNormal, _instance)
new.concentration = self.concentration.expand(batch_shape + (-1,))
super(ProjectedNormal, new).__init__(
batch_shape, self.event_shape, validate_args=False
)
new._validate_args = self.__dict__.get("_validate_args")
return new
@property
def mean(self):
"""
Note this is the mean in the sense of a centroid in the submanifold
that minimizes expected squared geodesic distance.
"""
return safe_normalize(self.concentration)
@property
def mode(self):
return safe_normalize(self.concentration)
def rsample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
x = self.concentration.new_empty(shape).normal_()
x = x + self.concentration
x = safe_normalize(x)
return x
def log_prob(self, value):
if self._validate_args:
event_shape = value.shape[-1:]
if event_shape != self.event_shape:
raise ValueError(
f"Expected event shape {self.event_shape}, "
f"but got {event_shape}"
)
self._validate_sample(value)
dim = int(self.concentration.size(-1))
try:
impl = self._log_prob_impls[dim]
except KeyError:
msg = f"ProjectedNormal.log_prob() is not implemented for dim = {dim}."
if value.requires_grad: # For latent variables but not observations.
msg += " Consider using poutine.reparam with ProjectedNormalReparam."
raise NotImplementedError(msg)
return impl(self.concentration, value)
@classmethod
def _register_log_prob(cls, dim, fn=None):
if fn is None:
return lambda fn: cls._register_log_prob(dim, fn)
cls._log_prob_impls[dim] = fn
return fn
def _dot(x, y):
return (x[..., None, :] @ y[..., None])[..., 0, 0]
def _safe_log(x):
return x.clamp(min=torch.finfo(x.dtype).eps).log()
@ProjectedNormal._register_log_prob(dim=2)
def _log_prob_2(concentration, value):
# We integrate along a ray, factorizing the integrand as a product of:
# a truncated normal distribution over coordinate t parallel to the ray, and
# a univariate normal distribution over coordinate r perpendicular to the ray.
t = _dot(concentration, value)
t2 = t.square()
r2 = _dot(concentration, concentration) - t2
perp_part = r2.mul(-0.5) - 0.5 * math.log(2 * math.pi)
# This is the log of a definite integral, computed by mathematica:
# Integrate[x/(E^((x-t)^2/2) Sqrt[2 Pi]), {x, 0, Infinity}]
# = (t + Sqrt[2/Pi]/E^(t^2/2) + t Erf[t/Sqrt[2]])/2
# = (Sqrt[2/Pi]/E^(t^2/2) + t (1 + Erf[t/Sqrt[2]]))/2
# = (Sqrt[2/Pi]/E^(t^2/2) + t Erfc[-t/Sqrt[2]])/2
para_part = _safe_log(
(t2.mul(-0.5).exp().mul((2 / math.pi) ** 0.5) + t * (t * -(0.5**0.5)).erfc())
/ 2
)
return para_part + perp_part
@ProjectedNormal._register_log_prob(dim=3)
def _log_prob_3(concentration, value):
# We integrate along a ray, factorizing the integrand as a product of:
# a truncated normal distribution over coordinate t parallel to the ray, and
# a bivariate normal distribution over coordinate r perpendicular to the ray.
t = _dot(concentration, value)
t2 = t.square()
r2 = _dot(concentration, concentration) - t2
perp_part = r2.mul(-0.5) - math.log(2 * math.pi)
# This is the log of a definite integral, computed by mathematica:
# Integrate[x^2/(E^((x-t)^2/2) Sqrt[2 Pi]), {x, 0, Infinity}]
# = t/(E^(t^2/2) Sqrt[2 Pi]) + ((1 + t^2) (1 + Erf[t/Sqrt[2]]))/2
# = t/(E^(t^2/2) Sqrt[2 Pi]) + ((1 + t^2) Erfc[-t/Sqrt[2]])/2
para_part = _safe_log(
t * t2.mul(-0.5).exp() / (2 * math.pi) ** 0.5
+ (1 + t2) * (t * -(0.5**0.5)).erfc() / 2
)
return para_part + perp_part
@ProjectedNormal._register_log_prob(dim=4)
def _log_prob_4(concentration, value):
# We integrate along a ray, factorizing the integrand as a product of:
# a truncated normal distribution over coordinate t parallel to the ray, and
# a bivariate normal distribution over coordinate r perpendicular to the ray.
t = _dot(concentration, value)
t2 = t.square()
r2 = _dot(concentration, concentration) - t2
perp_part = r2.mul(-0.5) - 1.5 * math.log(2 * math.pi)
# This is the log of a definite integral, computed by mathematica:
# Integrate[x^3/(E^((x-t)^2/2) Sqrt[2 Pi]), {x, 0, Infinity}]
# = (2 + t^2)/(E^(t^2/2) Sqrt[2 Pi]) + (t (3 + t^2) (1 + Erf[t/Sqrt[2]]))/2
# = (2 + t^2)/(E^(t^2/2) Sqrt[2 Pi]) + (t (3 + t^2) Erfc[-t/Sqrt[2]])/2
para_part = _safe_log(
(2 + t2) * t2.mul(-0.5).exp() / (2 * math.pi) ** 0.5
+ t * (3 + t2) * (t * -(0.5**0.5)).erfc() / 2
)
return para_part + perp_part
|
pyro-pplREPO_NAMEpyroPATH_START.@pyro_extracted@pyro-master@pyro@distributions@projected_normal.py@.PATH_END.py
|
{
"filename": "arunfinedr7full.py",
"repo_name": "rohinkumar/correlcalc",
"repo_path": "correlcalc_extracted/correlcalc-master/clusterresults/arunfinedr7full.py",
"type": "Python"
}
|
from correlcalc import *
bins = np.arange(0.001,0.051,0.001)
acorrdr7flcdmls=atpcf('/usr3/vstr/yrohin/Downloads/DR7-Full.ascii',bins,bins,randfile='/usr3/vstr/yrohin/Downloads/random-DR7-Full.ascii',vtype='sigpi',estimator='ls',weights='eq')
print("-------------------------------------")
#acorrdr7flcdmlsw=atpcf('/usr3/vstr/yrohin/Downloads/DR7-Full.ascii',bins,bins,randfile='/usr3/vstr/yrohin/Downloads/random-DR7-Full.ascii',vtype='sigpi',estimator='ls',weights=True)
#print("-------------------------------------")
#print("Calulating for sflat-mu")
#binspar = np.arange(0.001,0.051,0.001)
#binsper = np.arange(0.01,1.01,0.01)
#acorrdr7flcdmsmu=atpcf('/usr3/vstr/yrohin/Downloads/DR7-Full.ascii',binspar,binsper,randfile='/usr3/vstr/yrohin/Downloads/random-DR7-Full.ascii',vtype='smu',estimator='ls',weights='eq')
#print("-------------------------------------")
#acorrdr7flcdmsmu=atpcf('/usr3/vstr/yrohin/Downloads/DR7-Full.ascii',binspar,binsper,randfile='/usr3/vstr/yrohin/Downloads/random-DR7-Full.ascii',vtype='smu',estimator='ls',weights=True)
print("-------------------------------------")
bins = np.arange(0.0005,0.0205,0.0005)
acorrdr7ap=atpcf('/usr3/vstr/yrohin/Downloads/DR7-Full.ascii',bins,bins,randfile='/usr3/vstr/yrohin/Downloads/random-DR7-Full.ascii',vtype='ap',estimator='ls',weights='eq')
#print("-------------------------------------")
#acorrdr7apw=atpcf('/usr3/vstr/yrohin/Downloads/DR7-Full.ascii',bins,bins,randfile='/usr3/vstr/yrohin/Downloads/random-DR7-Full.ascii',vtype='ap',estimator='ls',weights=True)
#print("-------------------------------------")
#print("Calulating for Rh=ct flat lc s-mu")
#binspar = np.arange(0.001,0.02,0.001)
#binsper = np.arange(0.05,1.05,0.05)
#acorrdr7flcsmu=atpcf('/usr3/vstr/yrohin/Downloads/DR7-Full.ascii',binspar,binsper,randfile='/usr3/vstr/yrohin/Downloads/random-DR7-Full.ascii',vtype='smu',estimator='ls',weights='eq',cosmology='lc',geometry='flat')
#print("-------------------------------------")
#acorrdr7flcsmu=atpcf('/usr3/vstr/yrohin/Downloads/DR7-Full.ascii',binspar,binsper,randfile='/usr3/vstr/yrohin/Downloads/random-DR7-Full.ascii',vtype='smu',estimator='ls',weights=True,cosmology='lc',geometry='flat')
#print("-------------------------------------")
#print("Calulating for Milne open lc s-mu")
#binspar = np.arange(0.001,0.02,0.001)
#binsper = np.arange(0.05,1.05,0.05)
#acorrdr7olcsmu=atpcf('/usr3/vstr/yrohin/Downloads/DR7-Full.ascii',binspar,binsper,randfile='/usr3/vstr/yrohin/Downloads/random-DR7-Full.ascii',vtype='smu',estimator='ls',weights='eq',cosmology='lc',geometry='open')
#print("-------------------------------------")
#acorrdr7olcsmu=atpcf('/usr3/vstr/yrohin/Downloads/DR7-Full.ascii',binspar,binsper,randfile='/usr3/vstr/yrohin/Downloads/random-DR7-Full.ascii',vtype='smu',estimator='ls',weights=True,cosmology='lc',geometry='open')
|
rohinkumarREPO_NAMEcorrelcalcPATH_START.@correlcalc_extracted@correlcalc-master@clusterresults@arunfinedr7full.py@.PATH_END.py
|
{
"filename": "_shadow.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/choroplethmap/legendgrouptitle/font/_shadow.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShadowValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="shadow",
parent_name="choroplethmap.legendgrouptitle.font",
**kwargs,
):
super(ShadowValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@choroplethmap@legendgrouptitle@font@_shadow.py@.PATH_END.py
|
{
"filename": "kernels.py",
"repo_name": "danieljprice/phantom",
"repo_path": "phantom_extracted/phantom-master/scripts/kernels.py",
"type": "Python"
}
|
#!/Users/dprice/anaconda3/bin/python3
#---------------------------------------------------------------
#
# Daniel Price's automatic smoothing kernel generation script
# Prints kernels and all related functions in suitable
# manner for direct incorporation into SPH codes
#
#---------------------------------------------------------------
from __future__ import division
from sympy import *
q, x, y = symbols('q x y')
##############################################
# #
# various functions to actually perform the #
# symbolic calculations #
# #
##############################################
#---------------------------------------------
# function to get normalisation constants for
# kernel in 1, 2 and 3 dimensions
#---------------------------------------------
def getnorm(w,R):
c1D = sympify(1)/(2*integrate(w,(q,0,R)))
c2D = sympify(1)/(integrate(2*pi*q*w,(q,0,R)))
c3D = sympify(1)/(integrate(4*pi*q*q*w,(q,0,R)))
return (c1D, c2D, c3D)
#-----------------------------------------------
# work out the integration constant by matching
# the different parts of the piecewise function
#-----------------------------------------------
def intconst(g):
if isinstance(g, Piecewise):
garg = list(g.args)
for i, (e, c) in reversed(list(enumerate(g.args))):
if i < len(g.args) - 1:
(ep, cp) = garg[i+1]
s = ("%s" %c)
qval = sympify(s.split()[2])
ge = simplify(e + (ep.subs(q,qval) - e.subs(q,qval)))
garg[i] = (ge,c)
tuple(garg)
g = Piecewise(*garg)
return g
#----------------------------------------------
# function to get force softening kernel and
# related function from the density kernel
#----------------------------------------------
def getkernelfuncs(w,R):
dw = diff(w,q)
d2w = diff(dw,q)
c1D, c2D, c3D = getnorm(w,R)
#
#--force softening function
#
fsoft = piecewise_fold(4*pi*c3D*integrate(f*q*q,q)/q**2)
farg = list(fsoft.args)
lastarg = len(fsoft.args) - 1
farg[lastarg] = (sympify(1/(q*q)),fsoft.args[lastarg].cond)
#
#--work out the integration constant for the force softening
# by matching the different parts of the piecewise function
#
if isinstance(fsoft, Piecewise):
for i, (e, c) in reversed(list(enumerate(fsoft.args))):
if i < lastarg:
(ep, cp) = farg[i+1]
s = ("%s" %c)
qval = sympify(s.split()[2])
fe = simplify(e + qval**2*(ep.subs(q,qval) - e.subs(q,qval))/q**2)
farg[i] = (fe,c)
tuple(farg)
fsoft = Piecewise(*farg)
#
#--potential function
#
pot = integrate(fsoft,q)
#print (' GOT pot ',pot)
#
#--work out the integration constant for the potential
#
parg = list(pot.args)
lastarg = len(pot.args) - 1
parg[lastarg] = (sympify(-1/(q)),pot.args[lastarg].cond)
if isinstance(pot, Piecewise):
for i, (e, c) in reversed(list(enumerate(pot.args))):
if i < len(pot.args) - 1:
(ep, cp) = parg[i+1]
s = ("%s" %c)
qval = sympify(s.split()[2])
pote = simplify(e + (ep.subs(q,qval) - e.subs(q,qval)))
parg[i] = (pote,c)
tuple(parg)
pot = Piecewise(*parg)
#
#--derivative of potential with respect to h
#
dpotdh = pot
pharg = list(pot.args)
if isinstance(pot, Piecewise):
for i, (e, c) in enumerate(pot.args):
ep = simplify(-e - q*diff(e,q))
pharg[i] = (ep, c)
tuple(pharg)
dpotdh = Piecewise(*pharg)
#
#--kernel function needed in gradient acceleration
# for 4th order Forward Symplectic Integrator
#
farg = list(fsoft.args)
if isinstance(fsoft, Piecewise):
for i, (e, c) in enumerate(fsoft.args):
ep = simplify(q*diff(e,q) - e)
farg[i] = (ep, c)
tuple(farg)
gsoft = Piecewise(*farg)
#gsoft = piecewise_fold(simplify(diff(q*fsoft,q) - fsoft))
return (dw, d2w, c1D, c2D, c3D, fsoft, pot, dpotdh, gsoft)
#---------------------------------------------
# function to get the variance of the kernel
#---------------------------------------------
def getvar(w,R):
c1D, c2D, c3D = getnorm(w,R)
var = (integrate(c1D*q*q*w,(q,0,R)),
integrate(c2D*q*q*2*pi*q*w,(q,0,R)),
integrate(c3D*q*q*4*pi*q*q*w,(q,0,R)))
varcubic = (sympify(1)/6, sympify(31)/49, sympify(9)/10)
relvar = (var[0]/varcubic[0], var[1]/varcubic[1], var[2]/varcubic[2])
reldev = (sqrt(1.0*relvar[0]),sqrt(1.0*relvar[1]),sqrt(1.0*relvar[2]))
return (var,relvar,reldev)
#---------------------------------------------
# get proportionality factor in artificial viscosity
#---------------------------------------------
def get_avdiss(w,R):
dw = diff(w,q)
c1D, c2D, c3D = getnorm(w,R)
# see equation B21 in Meru & Bate 2012
avdiss_mg83 = integrate(-2*pi/15*c3D*q**3*dw,(q,0,R))
avdiss_m97 = integrate(-2*pi/15*c3D*q**4*dw,(q,0,R))
ratio = avdiss_m97/avdiss_mg83
return (avdiss_mg83,avdiss_m97,ratio)
def print_avdiss(w,R):
a, b, c = get_avdiss(w,R)
print ("art visc factor (MG83) = ",a)
print ("art visc factor (M97) = ",b)
print (" ratio = ",c,float(c))
return
#-------------------------------------------------------
# function to get the standard deviation of the kernel
# scaled relative to the cubic spline
#-------------------------------------------------------
def getreldev(w,R):
var, relvar, reldev = getvar(w,R)
return (reldev)
#--------------------------------------------------------
# Functions to return kernels that are constructed from
# other kernels in some way
#--------------------------------------------------------
def intkernel(wref,R):
f, name = wref(R)
g = piecewise_fold(integrate(-q*f,q))
g = intconst(g)
name = ("Integrated %s" %(name))
return(g,name)
def intkernel2(wref,R):
f, name = wref(R)
g = piecewise_fold(integrate(-q*f,q))
g = intconst(g)
g = piecewise_fold(integrate(-q*g,q))
g = intconst(g)
name = ("Twice-integrated %s" %(name))
return(g,name)
def intkernel3(wref,R):
f, name = wref(R)
g = piecewise_fold(integrate(-q*f,q))
g = intconst(g)
g = piecewise_fold(integrate(-q*g,q))
g = intconst(g)
g = piecewise_fold(integrate(-q*g,q))
g = intconst(g)
name = ("Triple-integrated %s" %(name))
return(g,name)
def doublehump(wref,R):
f, name = wref(R)
g = piecewise_fold(f*q*q)
name = ("Double-hump %s" %(name))
return(g,name)
def doublehump3(wref,R):
f, name = wref(R)
g = piecewise_fold(f*q*q*q)
name = ("Double-hump-on-steroids %s" %(name))
return(g,name)
def doublehump5(wref,R):
f, name = wref(R)
g = piecewise_fold(f*q*q*q*q*q)
name = ("Double-hump-on-overdrive %s" %(name))
return(g,name)
##############################################
# #
# various output functions to print kernel #
# information in different ways #
# #
##############################################
#-------------------------------------------------------
# function to print the variance and standard deviation
# of a kernel, and to print these relative to the cubic
#-------------------------------------------------------
def printvariances(w,R):
var, relvar, reldev = getvar(w,R)
print ("\nVariance of kernel in 1, 2, 3D:")
print (var[0],var[1],var[2])
print ("\nVariance and standard dev relative to cubic:")
print (relvar[0],relvar[1],relvar[2])
print (reldev[0],reldev[1],reldev[2])
print ("\nKernel radius required to get same std. dev as cubic:")
print (2/reldev[0],2/reldev[1],2/reldev[2])
print ("\neta = 1.2 is equivalent to:")
print (1.2/reldev[0],1.2/reldev[1],1.2/reldev[2])
return
#-----------------------------------------------------------
# function to print basic kernel information to the screen
#-----------------------------------------------------------
def printkernel(w,R):
dw, d2w, c1D, c2D, c3D, fsoft, pot, dpotdh, gsoft = getkernelfuncs(w,R)
print ("\n%s W:" %name)
print (w)
print ("\nFirst derivative:")
print (dw)
#print (fmt(dw))
print ("\n2nd derivative:")
print (d2w)
print ("\nnormalisation:")
print ("[ %s, %s, %s ]" %(c1D,c2D,c3D))
print ("\n3D normalisation of artificial viscosity term:")
avnorm = -sympify(2)*pi/15*c3D*integrate(q*q*q*dw,(q,0,R))
print (avnorm)
print ("\n2D normalisation of artificial viscosity term:")
avnorm = -pi/8*c2D*integrate(q*q*dw,(q,0,R))
print (avnorm)
printvariances(w,R)
print ("\n gradient acceleration term:")
print (gsoft)
return
#-------------------------------------------------------------
# print start of a LaTeX table containing kernel information
#-------------------------------------------------------------
def printheader_latex():
print ("\\begin{tabular}{|l|l|l|l|l|l|l|l|}\n")
print ("\\hline\nName & Functional form & C$_{1D}$ & C$_{2D}$ & C$_{3D}$ & $\sigma^2_{1D}$ & $\sigma^2_{2D}$ & $\sigma^2_{3D}$\\\\ \n")
#-----------------------------------------------------------
# print end of a LaTeX table containing kernel information
#-----------------------------------------------------------
def printfooter_latex():
print ("\\hline\\end{tabular}\n")
#---------------------------------------------------------------
# print contents of a LaTeX table containing kernel information
#---------------------------------------------------------------
def printkernel_latex(w,R):
c1D, c2D, c3D = getnorm(w,R)
var, relvar, reldev = getvar(w,R)
print ("\\hline\n%s & $" %fmttex(name))
print (latex(w))
print ("$ & $%s$ & $%s$ & $%s$ & $%s$ & $%s$ & $%s$ \\\\" %(latex(c1D),latex(c2D),latex(c3D),latex(var[0]),latex(var[1]),latex(var[2])))
return
#--------------------------------
# format names for LaTeX output
#--------------------------------
def fmttex(s):
import re
s = re.sub("\^\S+","$\g<0>$", s)
s = re.sub("\_\S+","$\g<0>$", s)
return s
#-------------------------------------------------------------------------------
# utility to format output of real numbers correctly for Fortran floating point
#-------------------------------------------------------------------------------
def fmt(e):
import re
s = ("%s" %e)
# add decimal points to numbers, but not if powers like q**2 (or *2 with one digit)
# note that \g<0> gives first matching argument in the regex
# rules are: (not **n)(not 0.123)(match ab0123) or (not *n with one digit)
s = re.sub("((?!\*\*\d+)(?!\D\D\d+\.)\D\D\d+)|((!?\*\d+)\D\d+)|(/\d+)|((?!^\.\d+)^\d+)|((?!^-\d+\.)^-\d+)","\g<0>.", s)
# replace 15*x with 15.*x as long as it is not **15*x
s = re.sub("(?!\*\d+)(\D\d+)\*","\g<1>.*", s)
# replace " 2)" with " 2.)"
# Use re.sub to replace " digit)" with " digit.)"
s = re.sub(r" (\d)\)", r" \1.)", s)
f = sympify(s)
#
# expand if it makes it shorter
#
h = ("%s" %(expand(f)))
#f = h
if (len(h) <= len(s)):
f = h
g = ("%s" %simplify(f))
# replace 1.4000000 with 1.4
g = re.sub("(\.[1-9]*)(0+)(\D|$)","\g<1>\g<3>", g)
# replace " 2)" with " 2.)"
# Use re.sub to replace " digit)" with " digit.)"
g = re.sub(r" (\d)\)", r" \1.)", g)
# only return simplify-ed strings if no fully expanded floats 0.345242545..
if re.search("(\.\d\d\d\d\d+)",g):
return s
else:
return g
#------------------------------------------------------------------------
# extended version of above, replacing q**2 with q2, q**3 with q2*q etc.
# and getting rid of excess zeros after decimal point, e.g. 7.0000->7
#------------------------------------------------------------------------
def fmte(e,useqsub,useodd):
import re
s = ("%s" %fmt(e))
#fs = ""
#for arg in (split(s,' ')):
# fs = fs+arg
f = sympify(s)
g = ("%s" %simplify(f))
if len(g) <= len(s) + 1:
s = g
if (useqsub):
s = re.sub("q\*\*12","q6*q6", s)
s = re.sub("q\*\*11","q6*q4*q", s)
s = re.sub("q\*\*10","q6*q4", s)
s = re.sub("q\*\*8","q8", s)
if (useodd):
s = re.sub("q\*\*9","q9", s)
s = re.sub("q\*\*7","q7", s)
s = re.sub("q\*\*5","q5", s)
s = re.sub("q\*\*3","q3", s)
else:
s = re.sub("q\*\*9","q8*q", s)
s = re.sub("q\*\*7","q6*q", s)
s = re.sub("q\*\*5","q4*q", s)
s = re.sub("q\*\*3","q2*q", s)
s = re.sub("q\*\*6","q6", s)
s = re.sub("q\*\*4","q4", s)
s = re.sub("q\*\*2","q2", s)
s = re.sub("q\*\*\(-2\.\)","1./q2",s)
s = re.sub("q\*\*-2\.0","1./q2",s)
s = re.sub("q\*\*\(-2\.0\)","1./q2",s)
# remove excess zeros after decimal place
# handles case of 3.0*q4 -> 3.*q4, zeros must be followed by non-digit or end of line
s = re.sub("(\.0+)(\D+|$)",".\g<2>", s)
return s
#-----------------------------------------------------
# wrap output to 72 characters for Fortran 77 output
#-----------------------------------------------------
def wrapit(s,indent):
if len(s) > 72:
pos = s.rfind(" ", 6, 72)
if pos == -1:
pos = 72
hunk = s[:pos]
rest = s[pos:].lstrip()
s = ("%s &\n" %(hunk) + " "*indent)
while len(rest) > 0:
pos = rest.rfind(" ", 0, 66)
if pos == -1 or len(rest) < 66:
pos = 66
hunk = rest[:pos]
rest = rest[pos:].lstrip()
if len(rest) > 0:
s = ("%s%s &\n" %(s,hunk) + " "*indent)
else:
s = ("%s%s" % (s,hunk))
return s
#------------------------------------
# wrap and indent Fortran 77 output
#------------------------------------
def wrapf77(s,indent):
maxl = (72 - indent)
if len(s) > maxl:
pos = s.rfind(" ", 6, maxl)
if pos == -1:
pos = maxl
hunk = s[:pos]
rest = s[pos:].lstrip()
s = ("%s \n" %(hunk) + " &"+ " "*(indent-6))
while len(rest) > 0:
pos = rest.rfind(" ", 0, (maxl-6))
if pos == -1 or len(rest) < (maxl-6):
pos = maxl - 6
hunk = rest[:pos]
rest = rest[pos:].lstrip()
if len(rest) > 0:
s = ("%s%s\n" %(s,hunk) + " &"+" "*(indent-6))
else:
s = ("%s%s" % (s,hunk))
return s
#---------------------------------------------------------
# wrappers for above routines, specific to output formats
# these define the line length and the indent
#---------------------------------------------------------
def fmtp(e):
s = ("%s" %fmte(e,True,False))
s = wrapit(s,17)
return s
def fmtn(e):
s = ("%s" %fmte(e,True,False))
s = wrapit(s,25)
return s
def fmts(e):
s = ("%s" %fmte(e,True,True))
s = wrapf77(s,18)
return s
def stripcond(e):
import re
s = ("%s" %fmt(e,True,True))
s = re.sub("q|<|>|\s","",s)
return s
#-------------------------------
# print FORTRAN77 comment line
#-------------------------------
def printc(s):
print ("c\nc--%s\nc" %(s))
return s
#---------------------------------
# print kernel code for ndspmhd
#---------------------------------
def printkernel_ndspmhd(w,R,name):
useoddq = False
dw, d2w, c1D, c2D, c3D, fsoft, pot, dpotdh = getkernelfuncs(w,R)
print ("!")
print ("!--%s (auto-generated by kernels.py)" %name)
print ("!")
print (" kernellabel = '%s' \n" %name)
print (" radkern = %.1f" %(R))
print (" radkern2 = radkern*radkern")
print (" dq2table = radkern2/real(ikern)")
print (" select case(ndim)")
print (" case(1)")
print (" cnormk = %s" %fmt(c1D))
print (" case(2)")
print (" cnormk = %s" %fmt(c2D))
print (" case(3)")
print (" cnormk = %s" %fmt(c3D))
print (" end select")
print (" do i=0,ikern")
print (" q2 = i*dq2table")
print (" q4 = q2*q2")
print (" q6 = q4*q2")
print (" q8 = q4*q4")
print (" q = sqrt(q2)")
if isinstance(w, Piecewise):
for i, (e, c) in enumerate(w.args):
(de, dc) = dw.args[i]
(d2e, d2c) = d2w.args[i]
(fe,fc) = fsoft.args[i]
(pe,pc) = pot.args[i]
(pdhe,pdhc) = dpotdh.args[i]
if i == 0:
print (" if (%s) then" %fmt(c))
elif i == len(w.args)-1 and c == True:
print (" else")
else:
print (" elseif (%s) then" %fmt(c))
print (" wkern(i) = %s " %fmtn(e))
print (" grwkern(i) = %s " %fmtn(de))
print (" grgrwkern(i) = %s " %fmtn(d2e))
print (" fsoft(i) = %s " %fmtn(fe))
print (" potensoft(i) = %s " %fmtn(pe))
print (" dphidh(i) = %s " %fmtn(pdhe))
print (" endif")
else:
print (w)
print (" enddo\n")
#---------------------------------
# print kernel code for sphNG
#---------------------------------
def printkernel_sphNG(w,R,name):
import datetime
dw, d2w, c1D, c2D, c3D, fsoft, pot, dpotdh = getkernelfuncs(w,R)
print (" SUBROUTINE ktable")
print ("c*********************************************************")
print ("c This subroutine builds a table for the kernel,")
print ("c the gradient of the kernel, the mass fraction,")
print ("c and the potential energy.")
print ("c The entry is v**2.")
print ("c")
print ("c DO NOT EDIT: AUTO-GENERATED by kernels.py")
print ("c KERNEL NAME: %s " %name)
print ("c AUTHOR: kernels.py, by Daniel Price")
print ("c GENERATED:",datetime.datetime.now())
print ("c")
print ("c*********************************************************")
print (" IMPLICIT NONE ! because life is worth living")
print (" INCLUDE 'idim'\n")
print (" REAL*8 sum, v2max, q, q2, q3, q4, q5, q6, q7, q8, q9")
print (" INTEGER i")
print ("\n INCLUDE 'COMMONS/physcon'")
print (" INCLUDE 'COMMONS/kerne'")
print (" INCLUDE 'COMMONS/table'")
print (" INCLUDE 'COMMONS/logun'")
print (" INCLUDE 'COMMONS/debug'")
printc("Allow for tracing flow")
print (" IF (itrace.EQ.'all') WRITE(iprint, 99001)")
print ("99001 FORMAT (' entry subroutine ktable')")
printc("Maximum interaction length and step size")
print (" radkernel = %.1f" %(R))
if isinstance(w, Piecewise):
for i, (e, c) in enumerate(w.args):
if (c != True and i < 2):
print (" part%ikernel = %s" %(i+1,stripcond(c)))
print (" v2max = radkernel*radkernel")
print (" dvtable = v2max/itable")
print (" ddvtable = itable/v2max")
printc("Build tables")
print (" DO i=0,itable")
print (" q2 = i*dvtable")
print (" q = sqrt(q2)")
print (" q3 = q*q2")
print (" q4 = q*q3")
print (" q5 = q*q4")
print (" q6 = q*q5")
print (" q7 = q*q6")
print (" q8 = q*q7")
print (" q9 = q*q8")
if isinstance(w, Piecewise):
for i, (e, c) in enumerate(w.args):
(de, dc) = dw.args[i]
(d2e, d2c) = d2w.args[i]
(fe,fc) = fsoft.args[i]
(pe,pc) = pot.args[i]
(pdhe,pdhc) = dpotdh.args[i]
if i == 0:
print (" IF (%s) THEN" %fmt(c))
elif i == len(w.args)-1 and c == True:
print (" ELSE")
else:
print (" ELSEIF (%s) THEN" %fmt(c))
print (" sum = %s" %fmts(e))
print (" wij(i) = sum")
print (" sum = %s" %fmts(de))
print (" grwij(i) = sum")
print (" sum = %s" %fmts(q*q*fe))
print (" fmass(i) = sum")
print (" sum = %s" %fmts(pe))
print (" fpoten(i) = sum")
print (" sum = %s" %fmts(-pdhe))
print (" dphidh(i) = sum")
print (" ENDIF")
print (" ENDDO")
printc("Normalisation constant")
print (" cnormk = %s" %fmt(c3D))
print (" selfnormkernel = %s" %fmt(w.subs(q,0)))
print (" part1potenkernel = 0.0 ! term already included in fpoten above")
print (" part2potenkernel = 0.0 ! see above")
#--double hump normalisation
wdrag = piecewise_fold(w*q*q)
c3Ddrag = sympify(1)/(integrate(4*pi*q*q*wdrag,(q,0,R)))
printc("For dust/gas drag, need double humped kernel")
print (" doublehumpnormk = %s" %fmt(c3Ddrag))
print ("\n RETURN")
print (" END")
#-------------------------------------------------
# print q4 = q2*q2 and similar definitions
# e.g. if the string q4 is found in the function
#-------------------------------------------------
def print_defs(indent,*args):
import re
gotq4 = False
# look for q4, q6, q8 in function string
for i in (4,6,8):
str = ("q%i" %i)
doPrint = False
# match in any functions about to be printed
for arg in args:
if (re.search(str,arg)):
doPrint = True
# print definition
if (doPrint):
print (" "*indent+"q%i = q%i*q2" %(i,i-2))
#----------------------------------------
# print code declarations of q4, q6 etc.
#----------------------------------------
def print_decl(w):
import re
str = ""
for qstr in ("q4","q6","q8"):
printQ = False
for i, (e, c) in enumerate(w.args):
if (re.search(qstr,fmtp(e))):
printQ = True
if (printQ):
if (len(str) > 0):
str = str+", "+qstr
else:
str = qstr
if (len(str) > 0):
print (" real :: %s\n" %(str))
else:
print ("")
#---------------------------------
# print kernel code for Phantom
#---------------------------------
def printkernel_phantom(w,R,name):
import datetime
dw, d2w, c1D, c2D, c3D, fsoft, pot, dpotdh, gsoft = getkernelfuncs(w,R)
w0 = w.subs(q,0)
dpotdh0 = dpotdh.subs(q,0)
#print("GOT dpotdh0",simplify(dpotdh0))
#
#--double-hump kernel used in drag routines, with normalisation
#
wdrag = piecewise_fold(w*q*q)
c3Ddrag = sympify(1)/(integrate(4*pi*q*q*wdrag,(q,0,R)))
avm83, avm97, avratio = get_avdiss(w,R)
lb = "!"+"-"*62
print ("!--------------------------------------------------------------------------!")
print ("! The Phantom Smoothed Particle Hydrodynamics code, by Daniel Price et al. !")
print ("! Copyright (c) 2007-2024 The Authors (see AUTHORS) !")
print ("! See LICENCE file for usage and distribution conditions !")
print ("! http://phantomsph.github.io/ !")
print ("!--------------------------------------------------------------------------!")
print ("module kernel")
print ("!")
print ("! This module implements the %s kernel" %name)
print ("! DO NOT EDIT - auto-generated by kernels.py")
print ("!")
print ("! :References: None")
print ("!")
print ("! :Owner: Daniel Price")
print ("!")
print ("! :Runtime parameters: None")
print ("!")
print ("! :Dependencies: physcon")
print ("!")
print ("! :Generated:",datetime.datetime.now())
print ("!")
print ("!--------------------------------------------------------------------------")
print (" use physcon, only:pi")
print (" implicit none")
print (" character(len=%i), public :: kernelname = '%s'" %(len(name),name))
print (" real, parameter, public :: radkern = %s" %fmt(R))
print (" real, parameter, public :: radkern2 = %s" %fmt(R*R))
print (" real, parameter, public :: cnormk = %s" %fmt(c3D))
print (" real, parameter, public :: wab0 = %s, gradh0 = -3.*wab0" %fmt(w0))
print (" real, parameter, public :: dphidh0 = %s" %fmtp(dpotdh0))
print (" real, parameter, public :: cnormk_drag = %s" %fmt(c3Ddrag))
var, relvar, reldev = getvar(w,R)
print (" real, parameter, public :: hfact_default = %.1f" %(1.2/reldev[2]))
#print " real, parameter, public :: hfact_default = %s " %fmt(reldev[2])
print (" real, parameter, public :: av_factor = %s" %fmt(avratio))
print ("\ncontains\n")
print ("pure subroutine get_kernel(q2,q,wkern,grkern)")
print (" real, intent(in) :: q2,q")
print (" real, intent(out) :: wkern,grkern")
print_decl(w)
print (" !--%s" %name)
if isinstance(w, Piecewise):
for i, (e, c) in enumerate(w.args):
(de, dc) = dw.args[i]
if i == 0:
print (" if (%s) then" %fmt(c))
elif i == len(w.args)-1 and c == True:
print (" else")
else:
print (" elseif (%s) then" %fmt(c))
print_defs(4,fmtp(e),fmtp(de))
print (" wkern = %s" %fmtp(e))
print (" grkern = %s" %fmtp(de))
print (" endif")
else:
print (w)
print ("\nend subroutine get_kernel\n")
print ("pure elemental real function wkern(q2,q)")
print (" real, intent(in) :: q2,q")
print_decl(w)
if isinstance(w, Piecewise):
for i, (e, c) in enumerate(w.args):
if i == 0:
print (" if (%s) then" %fmt(c))
elif i == len(w.args)-1 and c == True:
print (" else")
else:
print (" elseif (%s) then" %fmt(c))
print_defs(4,fmtp(e))
print (" wkern = %s" %fmtp(e))
print (" endif")
else:
print_defs(4,w)
print (" wkern = %s" %w)
print ("\nend function wkern\n")
print ("pure elemental real function grkern(q2,q)")
print (" real, intent(in) :: q2,q")
print_decl(dw)
if isinstance(dw, Piecewise):
for i, (e, c) in enumerate(dw.args):
if i == 0:
print (" if (%s) then" %fmt(c))
elif i == len(w.args)-1 and c == True:
print (" else")
else:
print (" elseif (%s) then" %fmt(c))
print_defs(4,fmtp(e))
print (" grkern = %s" %fmtp(e))
print (" endif")
else:
print_defs(4,fmtp(dw))
print (" grkern = %s " %fmtp(dw))
print ("\nend function grkern\n")
print ("pure subroutine get_kernel_grav1(q2,q,wkern,grkern,dphidh)")
print (" real, intent(in) :: q2,q")
print (" real, intent(out) :: wkern,grkern,dphidh")
print_decl(dpotdh)
if isinstance(w, Piecewise):
for i, (e, c) in enumerate(w.args):
(de, dc) = dw.args[i]
(dphie, dphic) = dpotdh.args[i]
if i == 0:
print (" if (%s) then" %fmt(c))
elif i == len(w.args)-1 and c == True:
print (" else")
else:
print (" elseif (%s) then" %fmt(c))
print_defs(4,fmtp(e),fmtp(de),fmtp(dphie))
print (" wkern = %s" %fmtp(e))
print (" grkern = %s" %fmtp(de))
print (" dphidh = %s" %fmtp(dphie))
print (" endif")
else:
print_defs(4,fmtp(w),fmtp(dw),fmtp(dpotdh))
print (" wkern = %s" %fmtp(w))
print (" grkern = %s" %fmtp(dw))
print (" dphidh = %s" %fmtp(dpotdh))
print ("\nend subroutine get_kernel_grav1\n")
# print "pure subroutine get_kernel_grav2(q2,q,grkern,potensoft,fsoft)"
# print " real, intent(in) :: q2,q"
# print " real, intent(out) :: grkern,potensoft,fsoft\n"
# if isinstance(dw, Piecewise):
# for i, (de, c) in enumerate(dw.args):
# (pote, potc) = pot.args[i]
# (fe, fc) = fsoft.args[i]
# if i == 0:
# print " if (%s) then" %fmt(c)
# elif i == len(dw.args)-1 and c == True:
# print " else"
# else:
# print " elseif (%s) then" %fmt(c)
# print " grkern = %s" %fmtp(de)
# print " potensoft = %s" %fmtp(pote)
# print " fsoft = %s" %fmtp(fe)
# print " endif"
# else:
# print " wkern = %s" %fmtp(w)
# print " grkern = %s" %fmtp(dw)
# print " potensoft = %s" %fmtp(pot)
# print " fsoft = %s" %fmtp(fsoft)
# print "\nend subroutine get_kernel_grav2\n"
print ("pure subroutine kernel_softening(q2,q,potensoft,fsoft)")
print (" real, intent(in) :: q2,q")
print (" real, intent(out) :: potensoft,fsoft")
print_decl(fsoft)
if isinstance(dw, Piecewise):
for i, (de, c) in enumerate(dw.args):
(pote, potc) = pot.args[i]
(fe, fc) = fsoft.args[i]
if i == 0:
print (" if (%s) then" %fmt(c))
elif i == len(dw.args)-1 and c == True:
print (" else")
else:
print (" elseif (%s) then" %fmt(c))
print_defs(4,fmtp(pote),fmtp(fe))
print (" potensoft = %s" %fmtp(pote))
print (" fsoft = %s" %fmtp(fe))
print (" endif")
else:
print (" potensoft = %s" %fmtp(pot))
print (" fsoft = %s" %fmtp(fsoft))
print ("\nend subroutine kernel_softening\n")
print ("!------------------------------------------")
print ("! gradient acceleration kernel needed for")
print ("! use in Forward symplectic integrator")
print ("!------------------------------------------")
print ("pure subroutine kernel_grad_soft(q2,q,gsoft)")
print (" real, intent(in) :: q2,q")
print (" real, intent(out) :: gsoft")
print_decl(gsoft)
if isinstance(dw, Piecewise):
for i, (de, c) in enumerate(dw.args):
(ge, gc) = gsoft.args[i]
if i == 0:
print (" if (%s) then" %fmt(c))
elif i == len(dw.args)-1 and c == True:
print (" else")
else:
print (" elseif (%s) then" %fmt(c))
print_defs(4,fmtp(ge))
print (" gsoft = %s" %fmtp(ge))
print (" endif")
else:
print (" gsoft = %s" %fmtp(gsoft))
print ("\nend subroutine kernel_grad_soft\n")
print ("!------------------------------------------")
print ("! double-humped version of the kernel for")
print ("! use in drag force calculations")
print ("!------------------------------------------")
print ("pure elemental real function wkern_drag(q2,q)")
print (" real, intent(in) :: q2,q")
print_decl(wdrag)
print (" !--double hump %s kernel" %name)
if isinstance(wdrag, Piecewise):
for i, (e, c) in enumerate(wdrag.args):
if i == 0:
print (" if (%s) then" %fmt(c))
elif i == len(wdrag.args)-1 and c == True:
print (" else")
else:
print (" elseif (%s) then" %fmt(c))
print_defs(4,fmtp(e))
print (" wkern_drag = %s" %fmtp(e))
print (" endif")
else:
print_defs(4,fmtp(wdrag))
print (" wkern_drag = %s" %fmtp(wdrag))
print ("\nend function wkern_drag\n")
print ("end module kernel")
def printalltex():
R = sympify(2)
printheader_latex()
for x in m4, m5, m6, w2_1D, w4_1D, w6_1D, w2, w4, w6, intm4, intm5, intm6, int2m4, int3m4, f6:
f, name = x(R)
printkernel_latex(f,R)
printfooter_latex()
def print_stddevs():
for x in m4, m5, m6, w2_1D, w4_1D, w6_1D, w2, w4, w6, intm4, intm5, intm6, int2m4, int3m4, f6:
f, name = x(R)
reldev = getreldev(f,R)
print (x.__name__,1.0/reldev[0],1.0/reldev[1],1.0/reldev[2])
#####################################
# KERNEL DEFINITIONS START BELOW #
#####################################
#-------------------------
# B-spline kernels
#-------------------------
def m4(R):
f = Piecewise((sympify(1)/4*(R-q)**3 - (R/2 - q)**3,q < R/2), (sympify(1)/4*(R-q)**3, q < R), (0, True))
return(f,'M_4 cubic')
def m5(R):
term1 = sympify((R-q)**4)
term2 = -5*(sympify(3)/5*R - q)**4
term3 = 10*(sympify(1)/5*R - q)**4
f = Piecewise((term1 + term2 + term3,q < sympify(1)/5*R), (term1 + term2, q < sympify(3)/5*R), (term1, q < R), (0, True))
return(f,'M_5 quartic')
def m6(R):
f = symbols('f',cls=Function)
term1 = sympify((R-q)**5)
term2 = -6*(sympify(2)/3*R - q)**5
term3 = 15*(sympify(1)/3*R - q)**5
f = Piecewise((term1 + term2 + term3,q < sympify(1)/3*R), (term1 + term2, q < sympify(2)/3*R), (term1, q < R), (0, True))
return(f,'M_6 quintic')
#-------------------------
# Wendland kernels in 1D
#-------------------------
def w2_1D(R):
f = Piecewise(((1 - q/R)**3*(1 + 3*q/R),q < R), (0, True))
return(f,'Wendland 1D C^2')
def w4_1D(R):
f = Piecewise(((1 - q/R)**5*(1 + 5*q/R + 8*(q/R)**2),q < R), (0, True))
return(f,'Wendland 1D C^4')
def w6_1D(R):
f = Piecewise(((1 - q/R)**7*(1 + 7*q/R + 19*(q/R)**2 + 21*(q/R)**3),q < R), (0, True))
return(f,'Wendland 1D C^6')
#--------------------------
# Wendland kernels in 2/3D
#--------------------------
def w2(R):
f = Piecewise(((1 - q/R)**4*(1 + 4*q/R),q < R), (0, True))
return(f,'Wendland 2/3D C^2')
def w4(R):
f = Piecewise(((1 - q/R)**6*(1 + 6*q/R + sympify(35)/3*(q/R)**2),q < R), (0, True))
return(f,'Wendland 2/3D C^4')
def w6(R):
f = Piecewise(((1 - q/R)**8*(1 + 8*q/R + 25*(q/R)**2 + 32*(q/R)**3),q < R), (0, True))
return(f,'Wendland 2/3D C^6')
def sinq(R,n):
f = Piecewise(((sin(pi*q/R)/q)**n,q < R), (0, True))
name = "[sin(q)/q]**%i" %n
return(f,name)
def pcubic(R):
f = Piecewise((q**3 - 6*q + 6,q < 1),((2-q)**3,q < 2), (0, True))
return(f,'A peaked cubic')
def bcubic(R):
f = Piecewise(((sympify(10) - sympify(13)*q**2 + sympify(6)*q**3)/16,q < 1), ((2 - q)**2*(5 - sympify(2)*q)/16, q < 2),(0, True))
return(f,'Better cubic')
#-----------------------------
# integrated B-spline kernels
#-----------------------------
def intm4(R):
return(intkernel(m4,R))
def int2m4(R):
return(intkernel2(m4,R))
def int3m4(R):
return(intkernel3(m4,R))
def intm5(R):
return(intkernel(m5,R))
def intm6(R):
return(intkernel(m6,R))
#------------------
# Ferrers kernels
#------------------
def f2(R):
f = Piecewise(((1 - (q/R)**2)**2,q < R), (0, True))
return(f,'Ferrers n=4')
def f3(R):
f = Piecewise(((1 - (q/R)**2)**3,q < R), (0, True))
return(f,'Ferrers n=4')
def f4(R):
f = Piecewise(((1 - (q/R)**2)**4,q < R), (0, True))
return(f,'Ferrers n=4')
def f5(R):
f = Piecewise(((1 - (q/R)**2)**5,q < R), (0, True))
return(f,'Ferrers n=5')
def f6(R):
f = Piecewise(((1 - (q/R)**2)**6,q < R), (0, True))
return(f,'Ferrers n=6')
########################################################
# The actual program
# Change the lines below to print the kernel you want
########################################################
# set kernel range
#R = sympify(3)
#R = sympify(5)/2
R = sympify(2)
#R = symbols('R')
# define which kernel to use
#f, name = sinq(R,3)
f, name = m4(R)
#f, name = w6(R)
#print_avdiss(f,R)
#printvariances(f,R)
#f, name = doublehump(m6,R)
# print the desired output
#printkernel(f,R)
#printkernel_ndspmhd(f,R,name)
printkernel_phantom(f,R,name)
#printkernel_sphNG(f,R,name)
#printall_tex
|
danieljpriceREPO_NAMEphantomPATH_START.@phantom_extracted@phantom-master@scripts@kernels.py@.PATH_END.py
|
{
"filename": "api.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/freezegun/py3/freezegun/api.py",
"type": "Python"
}
|
from . import config
from ._async import wrap_coroutine
import asyncio
import copyreg
import dateutil
import datetime
import functools
import sys
import time
import uuid
import calendar
import unittest
import platform
import warnings
import types
import numbers
import inspect
from typing import TYPE_CHECKING, overload
from typing import Any, Awaitable, Callable, Dict, Iterator, List, Optional, Set, Type, TypeVar, Tuple, Union
from dateutil import parser
from dateutil.tz import tzlocal
try:
from maya import MayaDT # type: ignore
except ImportError:
MayaDT = None
if TYPE_CHECKING:
from typing_extensions import ParamSpec
P = ParamSpec("P")
T = TypeVar("T")
_TIME_NS_PRESENT = hasattr(time, 'time_ns')
_MONOTONIC_NS_PRESENT = hasattr(time, 'monotonic_ns')
_PERF_COUNTER_NS_PRESENT = hasattr(time, 'perf_counter_ns')
_EPOCH = datetime.datetime(1970, 1, 1)
_EPOCHTZ = datetime.datetime(1970, 1, 1, tzinfo=dateutil.tz.UTC)
T2 = TypeVar("T2")
_Freezable = Union[str, datetime.datetime, datetime.date, datetime.timedelta, types.FunctionType, Callable[[], Union[str, datetime.datetime, datetime.date, datetime.timedelta]], Iterator[datetime.datetime]]
real_time = time.time
real_localtime = time.localtime
real_gmtime = time.gmtime
real_monotonic = time.monotonic
real_perf_counter = time.perf_counter
real_strftime = time.strftime
real_date = datetime.date
real_datetime = datetime.datetime
real_date_objects = [real_time, real_localtime, real_gmtime, real_monotonic, real_perf_counter, real_strftime, real_date, real_datetime]
if _TIME_NS_PRESENT:
real_time_ns = time.time_ns
real_date_objects.append(real_time_ns)
if _MONOTONIC_NS_PRESENT:
real_monotonic_ns = time.monotonic_ns
real_date_objects.append(real_monotonic_ns)
if _PERF_COUNTER_NS_PRESENT:
real_perf_counter_ns = time.perf_counter_ns
real_date_objects.append(real_perf_counter_ns)
_real_time_object_ids = {id(obj) for obj in real_date_objects}
# time.clock is deprecated and was removed in Python 3.8
real_clock = getattr(time, 'clock', None)
freeze_factories: List[Union["StepTickTimeFactory", "TickingDateTimeFactory", "FrozenDateTimeFactory"]] = []
tz_offsets: List[datetime.timedelta] = []
ignore_lists: List[Tuple[str, ...]] = []
tick_flags: List[bool] = []
try:
# noinspection PyUnresolvedReferences
real_uuid_generate_time = uuid._uuid_generate_time # type: ignore
uuid_generate_time_attr = '_uuid_generate_time'
except AttributeError:
# noinspection PyUnresolvedReferences
if hasattr(uuid, '_load_system_functions'):
# A no-op after Python ~3.9, being removed in 3.13.
uuid._load_system_functions()
# noinspection PyUnresolvedReferences
real_uuid_generate_time = uuid._generate_time_safe # type: ignore
uuid_generate_time_attr = '_generate_time_safe'
except ImportError:
real_uuid_generate_time = None
uuid_generate_time_attr = None # type: ignore
try:
# noinspection PyUnresolvedReferences
real_uuid_create = uuid._UuidCreate # type: ignore
except (AttributeError, ImportError):
real_uuid_create = None
# keep a cache of module attributes otherwise freezegun will need to analyze too many modules all the time
_GLOBAL_MODULES_CACHE: Dict[str, Tuple[str, List[Tuple[str, Any]]]] = {}
def _get_module_attributes(module: types.ModuleType) -> List[Tuple[str, Any]]:
result: List[Tuple[str, Any]] = []
try:
module_attributes = dir(module)
except (ImportError, TypeError):
return result
for attribute_name in module_attributes:
try:
attribute_value = getattr(module, attribute_name)
except (ImportError, AttributeError, TypeError):
# For certain libraries, this can result in ImportError(_winreg) or AttributeError (celery)
continue
else:
result.append((attribute_name, attribute_value))
return result
def _setup_module_cache(module: types.ModuleType) -> None:
date_attrs = []
all_module_attributes = _get_module_attributes(module)
for attribute_name, attribute_value in all_module_attributes:
if id(attribute_value) in _real_time_object_ids:
date_attrs.append((attribute_name, attribute_value))
_GLOBAL_MODULES_CACHE[module.__name__] = (_get_module_attributes_hash(module), date_attrs)
def _get_module_attributes_hash(module: types.ModuleType) -> str:
try:
module_dir = dir(module)
except (ImportError, TypeError):
module_dir = []
return f'{id(module)}-{hash(frozenset(module_dir))}'
def _get_cached_module_attributes(module: types.ModuleType) -> List[Tuple[str, Any]]:
module_hash, cached_attrs = _GLOBAL_MODULES_CACHE.get(module.__name__, ('0', []))
if _get_module_attributes_hash(module) == module_hash:
return cached_attrs
# cache miss: update the cache and return the refreshed value
_setup_module_cache(module)
# return the newly cached value
module_hash, cached_attrs = _GLOBAL_MODULES_CACHE[module.__name__]
return cached_attrs
_is_cpython = (
hasattr(platform, 'python_implementation') and
platform.python_implementation().lower() == "cpython"
)
call_stack_inspection_limit = 5
def _should_use_real_time() -> bool:
if not call_stack_inspection_limit:
return False
# Means stop() has already been called, so we can now return the real time
if not ignore_lists:
return True
if not ignore_lists[-1]:
return False
frame = inspect.currentframe().f_back.f_back # type: ignore
for _ in range(call_stack_inspection_limit):
module_name = frame.f_globals.get('__name__') # type: ignore
if module_name and module_name.startswith(ignore_lists[-1]):
return True
frame = frame.f_back # type: ignore
if frame is None:
break
return False
def get_current_time() -> datetime.datetime:
return freeze_factories[-1]()
def fake_time() -> float:
if _should_use_real_time():
return real_time()
current_time = get_current_time()
return calendar.timegm(current_time.timetuple()) + current_time.microsecond / 1000000.0
if _TIME_NS_PRESENT:
def fake_time_ns() -> int:
if _should_use_real_time():
return real_time_ns()
return int(fake_time() * 1e9)
def fake_localtime(t: Optional[float]=None) -> time.struct_time:
if t is not None:
return real_localtime(t)
if _should_use_real_time():
return real_localtime()
shifted_time = get_current_time() - datetime.timedelta(seconds=time.timezone)
return shifted_time.timetuple()
def fake_gmtime(t: Optional[float]=None) -> time.struct_time:
if t is not None:
return real_gmtime(t)
if _should_use_real_time():
return real_gmtime()
return get_current_time().timetuple()
def _get_fake_monotonic() -> float:
# For monotonic timers like .monotonic(), .perf_counter(), etc
current_time = get_current_time()
return (
calendar.timegm(current_time.timetuple()) +
current_time.microsecond / 1e6
)
def _get_fake_monotonic_ns() -> int:
# For monotonic timers like .monotonic(), .perf_counter(), etc
current_time = get_current_time()
return (
calendar.timegm(current_time.timetuple()) * 1000000 +
current_time.microsecond
) * 1000
def fake_monotonic() -> float:
if _should_use_real_time():
return real_monotonic()
return _get_fake_monotonic()
def fake_perf_counter() -> float:
if _should_use_real_time():
return real_perf_counter()
return _get_fake_monotonic()
if _MONOTONIC_NS_PRESENT:
def fake_monotonic_ns() -> int:
if _should_use_real_time():
return real_monotonic_ns()
return _get_fake_monotonic_ns()
if _PERF_COUNTER_NS_PRESENT:
def fake_perf_counter_ns() -> int:
if _should_use_real_time():
return real_perf_counter_ns()
return _get_fake_monotonic_ns()
def fake_strftime(format: Any, time_to_format: Any=None) -> str:
if time_to_format is None:
if not _should_use_real_time():
time_to_format = fake_localtime()
if time_to_format is None:
return real_strftime(format)
else:
return real_strftime(format, time_to_format)
if real_clock is not None:
def fake_clock() -> Any:
if _should_use_real_time():
return real_clock() # type: ignore
if len(freeze_factories) == 1:
return 0.0 if not tick_flags[-1] else real_clock() # type: ignore
first_frozen_time = freeze_factories[0]()
last_frozen_time = get_current_time()
timedelta = (last_frozen_time - first_frozen_time)
total_seconds = timedelta.total_seconds()
if tick_flags[-1]:
total_seconds += real_clock() # type: ignore
return total_seconds
class FakeDateMeta(type):
@classmethod
def __instancecheck__(self, obj: Any) -> bool:
return isinstance(obj, real_date)
@classmethod
def __subclasscheck__(cls, subclass: Any) -> bool:
return issubclass(subclass, real_date)
def datetime_to_fakedatetime(datetime: datetime.datetime) -> "FakeDatetime":
return FakeDatetime(datetime.year,
datetime.month,
datetime.day,
datetime.hour,
datetime.minute,
datetime.second,
datetime.microsecond,
datetime.tzinfo)
def date_to_fakedate(date: datetime.date) -> "FakeDate":
return FakeDate(date.year,
date.month,
date.day)
class FakeDate(real_date, metaclass=FakeDateMeta):
def __add__(self, other: Any) -> "FakeDate":
result = real_date.__add__(self, other)
if result is NotImplemented:
return result
return date_to_fakedate(result)
def __sub__(self, other: Any) -> "FakeDate": # type: ignore
result = real_date.__sub__(self, other)
if result is NotImplemented:
return result # type: ignore
if isinstance(result, real_date):
return date_to_fakedate(result)
else:
return result # type: ignore
@classmethod
def today(cls: Type["FakeDate"]) -> "FakeDate":
result = cls._date_to_freeze() + cls._tz_offset()
return date_to_fakedate(result)
@staticmethod
def _date_to_freeze() -> datetime.datetime:
return get_current_time()
@classmethod
def _tz_offset(cls) -> datetime.timedelta:
return tz_offsets[-1]
FakeDate.min = date_to_fakedate(real_date.min)
FakeDate.max = date_to_fakedate(real_date.max)
class FakeDatetimeMeta(FakeDateMeta):
@classmethod
def __instancecheck__(self, obj: Any) -> bool:
return isinstance(obj, real_datetime)
@classmethod
def __subclasscheck__(cls, subclass: Any) -> bool:
return issubclass(subclass, real_datetime)
class FakeDatetime(real_datetime, FakeDate, metaclass=FakeDatetimeMeta):
def __add__(self, other: Any) -> "FakeDatetime": # type: ignore
result = real_datetime.__add__(self, other)
if result is NotImplemented:
return result
return datetime_to_fakedatetime(result)
def __sub__(self, other: Any) -> "FakeDatetime": # type: ignore
result = real_datetime.__sub__(self, other)
if result is NotImplemented:
return result # type: ignore
if isinstance(result, real_datetime):
return datetime_to_fakedatetime(result)
else:
return result # type: ignore
def astimezone(self, tz: Optional[datetime.tzinfo]=None) -> "FakeDatetime":
if tz is None:
tz = tzlocal()
return datetime_to_fakedatetime(real_datetime.astimezone(self, tz))
@classmethod
def fromtimestamp(cls, t: float, tz: Optional[datetime.tzinfo]=None) -> "FakeDatetime":
if tz is None:
tz = dateutil.tz.tzoffset("freezegun", cls._tz_offset())
result = real_datetime.fromtimestamp(t, tz=tz).replace(tzinfo=None)
else:
result = real_datetime.fromtimestamp(t, tz)
return datetime_to_fakedatetime(result)
def timestamp(self) -> float:
if self.tzinfo is None:
return (self - _EPOCH - self._tz_offset()).total_seconds() # type: ignore
return (self - _EPOCHTZ).total_seconds() # type: ignore
@classmethod
def now(cls, tz: Optional[datetime.tzinfo] = None) -> "FakeDatetime":
now = cls._time_to_freeze() or real_datetime.now()
if tz:
result = tz.fromutc(now.replace(tzinfo=tz)) + cls._tz_offset()
else:
result = now + cls._tz_offset()
return datetime_to_fakedatetime(result)
def date(self) -> "FakeDate":
return date_to_fakedate(self)
@property
def nanosecond(self) -> int:
try:
# noinspection PyUnresolvedReferences
return real_datetime.nanosecond # type: ignore
except AttributeError:
return 0
@classmethod
def today(cls) -> "FakeDatetime":
return cls.now(tz=None)
@classmethod
def utcnow(cls) -> "FakeDatetime":
result = cls._time_to_freeze() or real_datetime.now(datetime.timezone.utc)
return datetime_to_fakedatetime(result)
@staticmethod
def _time_to_freeze() -> Optional[datetime.datetime]:
if freeze_factories:
return get_current_time()
return None
@classmethod
def _tz_offset(cls) -> datetime.timedelta:
return tz_offsets[-1]
FakeDatetime.min = datetime_to_fakedatetime(real_datetime.min)
FakeDatetime.max = datetime_to_fakedatetime(real_datetime.max)
def convert_to_timezone_naive(time_to_freeze: datetime.datetime) -> datetime.datetime:
"""
Converts a potentially timezone-aware datetime to be a naive UTC datetime
"""
if time_to_freeze.tzinfo:
time_to_freeze -= time_to_freeze.utcoffset() # type: ignore
time_to_freeze = time_to_freeze.replace(tzinfo=None)
return time_to_freeze
def pickle_fake_date(datetime_: datetime.date) -> Tuple[Type[FakeDate], Tuple[int, int, int]]:
# A pickle function for FakeDate
return FakeDate, (
datetime_.year,
datetime_.month,
datetime_.day,
)
def pickle_fake_datetime(datetime_: datetime.datetime) -> Tuple[Type[FakeDatetime], Tuple[int, int, int, int, int, int, int, Optional[datetime.tzinfo]]]:
# A pickle function for FakeDatetime
return FakeDatetime, (
datetime_.year,
datetime_.month,
datetime_.day,
datetime_.hour,
datetime_.minute,
datetime_.second,
datetime_.microsecond,
datetime_.tzinfo,
)
def _parse_time_to_freeze(time_to_freeze_str: Optional[_Freezable]) -> datetime.datetime:
"""Parses all the possible inputs for freeze_time
:returns: a naive ``datetime.datetime`` object
"""
if time_to_freeze_str is None:
time_to_freeze_str = datetime.datetime.now(datetime.timezone.utc)
if isinstance(time_to_freeze_str, datetime.datetime):
time_to_freeze = time_to_freeze_str
elif isinstance(time_to_freeze_str, datetime.date):
time_to_freeze = datetime.datetime.combine(time_to_freeze_str, datetime.time())
elif isinstance(time_to_freeze_str, datetime.timedelta):
time_to_freeze = datetime.datetime.now(datetime.timezone.utc) + time_to_freeze_str
else:
time_to_freeze = parser.parse(time_to_freeze_str) # type: ignore
return convert_to_timezone_naive(time_to_freeze)
def _parse_tz_offset(tz_offset: Union[datetime.timedelta, float]) -> datetime.timedelta:
if isinstance(tz_offset, datetime.timedelta):
return tz_offset
else:
return datetime.timedelta(hours=tz_offset)
class TickingDateTimeFactory:
def __init__(self, time_to_freeze: datetime.datetime, start: datetime.datetime):
self.time_to_freeze = time_to_freeze
self.start = start
def __call__(self) -> datetime.datetime:
return self.time_to_freeze + (real_datetime.now() - self.start)
def tick(self, delta: Union[datetime.timedelta, float]=datetime.timedelta(seconds=1)) -> datetime.datetime:
if isinstance(delta, numbers.Integral):
self.move_to(self.time_to_freeze + datetime.timedelta(seconds=int(delta)))
elif isinstance(delta, numbers.Real):
self.move_to(self.time_to_freeze + datetime.timedelta(seconds=float(delta)))
else:
self.move_to(self.time_to_freeze + delta) # type: ignore
return self.time_to_freeze
def move_to(self, target_datetime: _Freezable) -> None:
"""Moves frozen date to the given ``target_datetime``"""
self.start = real_datetime.now()
self.time_to_freeze = _parse_time_to_freeze(target_datetime)
class FrozenDateTimeFactory:
def __init__(self, time_to_freeze: datetime.datetime):
self.time_to_freeze = time_to_freeze
def __call__(self) -> datetime.datetime:
return self.time_to_freeze
def tick(self, delta: Union[datetime.timedelta, float]=datetime.timedelta(seconds=1)) -> datetime.datetime:
if isinstance(delta, numbers.Integral):
self.move_to(self.time_to_freeze + datetime.timedelta(seconds=int(delta)))
elif isinstance(delta, numbers.Real):
self.move_to(self.time_to_freeze + datetime.timedelta(seconds=float(delta)))
else:
self.time_to_freeze += delta # type: ignore
return self.time_to_freeze
def move_to(self, target_datetime: _Freezable) -> None:
"""Moves frozen date to the given ``target_datetime``"""
target_datetime = _parse_time_to_freeze(target_datetime)
delta = target_datetime - self.time_to_freeze
self.tick(delta=delta)
class StepTickTimeFactory:
def __init__(self, time_to_freeze: datetime.datetime, step_width: float):
self.time_to_freeze = time_to_freeze
self.step_width = step_width
def __call__(self) -> datetime.datetime:
return_time = self.time_to_freeze
self.tick()
return return_time
def tick(self, delta: Union[datetime.timedelta, float, None]=None) -> datetime.datetime:
if not delta:
delta = datetime.timedelta(seconds=self.step_width)
elif isinstance(delta, numbers.Integral):
delta = datetime.timedelta(seconds=int(delta))
elif isinstance(delta, numbers.Real):
delta = datetime.timedelta(seconds=float(delta))
self.time_to_freeze += delta # type: ignore
return self.time_to_freeze
def update_step_width(self, step_width: float) -> None:
self.step_width = step_width
def move_to(self, target_datetime: _Freezable) -> None:
"""Moves frozen date to the given ``target_datetime``"""
target_datetime = _parse_time_to_freeze(target_datetime)
delta = target_datetime - self.time_to_freeze
self.tick(delta=delta)
class _freeze_time:
def __init__(
self,
time_to_freeze_str: Optional[_Freezable],
tz_offset: Union[int, datetime.timedelta],
ignore: List[str],
tick: bool,
as_arg: bool,
as_kwarg: str,
auto_tick_seconds: float,
real_asyncio: Optional[bool],
):
self.time_to_freeze = _parse_time_to_freeze(time_to_freeze_str)
self.tz_offset = _parse_tz_offset(tz_offset)
self.ignore = tuple(ignore)
self.tick = tick
self.auto_tick_seconds = auto_tick_seconds
self.undo_changes: List[Tuple[types.ModuleType, str, Any]] = []
self.modules_at_start: Set[str] = set()
self.as_arg = as_arg
self.as_kwarg = as_kwarg
self.real_asyncio = real_asyncio
@overload
def __call__(self, func: Type[T2]) -> Type[T2]:
...
@overload
def __call__(self, func: "Callable[P, Awaitable[Any]]") -> "Callable[P, Awaitable[Any]]":
...
@overload
def __call__(self, func: "Callable[P, T]") -> "Callable[P, T]":
...
def __call__(self, func: Union[Type[T2], "Callable[P, Awaitable[Any]]", "Callable[P, T]"]) -> Union[Type[T2], "Callable[P, Awaitable[Any]]", "Callable[P, T]"]: # type: ignore
if inspect.isclass(func):
return self.decorate_class(func)
elif inspect.iscoroutinefunction(func):
return self.decorate_coroutine(func)
return self.decorate_callable(func) # type: ignore
def decorate_class(self, klass: Type[T2]) -> Type[T2]:
if issubclass(klass, unittest.TestCase):
# If it's a TestCase, we freeze time around setup and teardown, as well
# as for every test case. This requires some care to avoid freezing
# the time pytest sees, as otherwise this would distort the reported
# timings.
orig_setUpClass = klass.setUpClass
orig_tearDownClass = klass.tearDownClass
# noinspection PyDecorator
@classmethod # type: ignore
def setUpClass(cls: type) -> None:
self.start()
if orig_setUpClass is not None:
orig_setUpClass()
self.stop()
# noinspection PyDecorator
@classmethod # type: ignore
def tearDownClass(cls: type) -> None:
self.start()
if orig_tearDownClass is not None:
orig_tearDownClass()
self.stop()
klass.setUpClass = setUpClass # type: ignore
klass.tearDownClass = tearDownClass # type: ignore
orig_setUp = klass.setUp
orig_tearDown = klass.tearDown
def setUp(*args: Any, **kwargs: Any) -> None:
self.start()
if orig_setUp is not None:
orig_setUp(*args, **kwargs)
def tearDown(*args: Any, **kwargs: Any) -> None:
if orig_tearDown is not None:
orig_tearDown(*args, **kwargs)
self.stop()
klass.setUp = setUp # type: ignore[method-assign]
klass.tearDown = tearDown # type: ignore[method-assign]
else:
seen = set()
klasses = klass.mro()
for base_klass in klasses:
for (attr, attr_value) in base_klass.__dict__.items():
if attr.startswith('_') or attr in seen:
continue
seen.add(attr)
if not callable(attr_value) or inspect.isclass(attr_value) or isinstance(attr_value, staticmethod):
continue
try:
setattr(klass, attr, self(attr_value))
except (AttributeError, TypeError):
# Sometimes we can't set this for built-in types and custom callables
continue
return klass
def __enter__(self) -> Union[StepTickTimeFactory, TickingDateTimeFactory, FrozenDateTimeFactory]:
return self.start()
def __exit__(self, *args: Any) -> None:
self.stop()
def start(self) -> Union[StepTickTimeFactory, TickingDateTimeFactory, FrozenDateTimeFactory]:
if self.auto_tick_seconds:
freeze_factory: Union[StepTickTimeFactory, TickingDateTimeFactory, FrozenDateTimeFactory] = StepTickTimeFactory(self.time_to_freeze, self.auto_tick_seconds)
elif self.tick:
freeze_factory = TickingDateTimeFactory(self.time_to_freeze, real_datetime.now())
else:
freeze_factory = FrozenDateTimeFactory(self.time_to_freeze)
is_already_started = len(freeze_factories) > 0
freeze_factories.append(freeze_factory)
tz_offsets.append(self.tz_offset)
ignore_lists.append(self.ignore)
tick_flags.append(self.tick)
if is_already_started:
return freeze_factory
# Change the modules
datetime.datetime = FakeDatetime # type: ignore[misc]
datetime.date = FakeDate # type: ignore[misc]
time.time = fake_time
time.monotonic = fake_monotonic
time.perf_counter = fake_perf_counter
time.localtime = fake_localtime # type: ignore
time.gmtime = fake_gmtime # type: ignore
time.strftime = fake_strftime # type: ignore
if uuid_generate_time_attr:
setattr(uuid, uuid_generate_time_attr, None)
uuid._UuidCreate = None # type: ignore[attr-defined]
uuid._last_timestamp = None # type: ignore[attr-defined]
copyreg.dispatch_table[real_datetime] = pickle_fake_datetime
copyreg.dispatch_table[real_date] = pickle_fake_date
# Change any place where the module had already been imported
to_patch = [
('real_date', real_date, FakeDate),
('real_datetime', real_datetime, FakeDatetime),
('real_gmtime', real_gmtime, fake_gmtime),
('real_localtime', real_localtime, fake_localtime),
('real_monotonic', real_monotonic, fake_monotonic),
('real_perf_counter', real_perf_counter, fake_perf_counter),
('real_strftime', real_strftime, fake_strftime),
('real_time', real_time, fake_time),
]
if _TIME_NS_PRESENT:
time.time_ns = fake_time_ns
to_patch.append(('real_time_ns', real_time_ns, fake_time_ns))
if _MONOTONIC_NS_PRESENT:
time.monotonic_ns = fake_monotonic_ns
to_patch.append(('real_monotonic_ns', real_monotonic_ns, fake_monotonic_ns))
if _PERF_COUNTER_NS_PRESENT:
time.perf_counter_ns = fake_perf_counter_ns
to_patch.append(('real_perf_counter_ns', real_perf_counter_ns, fake_perf_counter_ns))
if real_clock is not None:
# time.clock is deprecated and was removed in Python 3.8
time.clock = fake_clock # type: ignore[attr-defined]
to_patch.append(('real_clock', real_clock, fake_clock))
self.fake_names = tuple(fake.__name__ for real_name, real, fake in to_patch) # type: ignore
self.reals = {id(fake): real for real_name, real, fake in to_patch}
fakes = {id(real): fake for real_name, real, fake in to_patch}
add_change = self.undo_changes.append
# Save the current loaded modules
self.modules_at_start = set(sys.modules.keys())
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
for mod_name, module in list(sys.modules.items()):
if mod_name is None or module is None or mod_name == __name__:
continue
elif mod_name.startswith(self.ignore) or mod_name.endswith('.six.moves'):
continue
elif (not hasattr(module, "__name__") or module.__name__ in ('datetime', 'time')):
continue
module_attrs = _get_cached_module_attributes(module)
for attribute_name, attribute_value in module_attrs:
fake = fakes.get(id(attribute_value))
if fake:
setattr(module, attribute_name, fake)
add_change((module, attribute_name, attribute_value))
if self.real_asyncio:
# To avoid breaking `asyncio.sleep()`, let asyncio event loops see real
# monotonic time even though we've just frozen `time.monotonic()` which
# is normally used there. If we didn't do this, `await asyncio.sleep()`
# would be hanging forever breaking many tests that use `freeze_time`.
#
# Note that we cannot statically tell the class of asyncio event loops
# because it is not officially documented and can actually be changed
# at run time using `asyncio.set_event_loop_policy`. That's why we check
# the type by creating a loop here and destroying it immediately.
event_loop = asyncio.new_event_loop()
event_loop.close()
EventLoopClass = type(event_loop)
add_change((EventLoopClass, "time", EventLoopClass.time)) # type: ignore
EventLoopClass.time = lambda self: real_monotonic() # type: ignore[method-assign]
return freeze_factory
def stop(self) -> None:
freeze_factories.pop()
ignore_lists.pop()
tick_flags.pop()
tz_offsets.pop()
if not freeze_factories:
datetime.datetime = real_datetime # type: ignore[misc]
datetime.date = real_date # type: ignore[misc]
copyreg.dispatch_table.pop(real_datetime)
copyreg.dispatch_table.pop(real_date)
for module_or_object, attribute, original_value in self.undo_changes:
setattr(module_or_object, attribute, original_value)
self.undo_changes = []
# Restore modules loaded after start()
modules_to_restore = set(sys.modules.keys()) - self.modules_at_start
self.modules_at_start = set()
with warnings.catch_warnings():
warnings.simplefilter('ignore')
for mod_name in modules_to_restore:
module = sys.modules.get(mod_name, None)
if mod_name is None or module is None:
continue
elif mod_name.startswith(self.ignore) or mod_name.endswith('.six.moves'):
continue
elif not hasattr(module, "__name__") or module.__name__ in ('datetime', 'time'):
continue
for module_attribute in dir(module):
if module_attribute in self.fake_names:
continue
try:
attribute_value = getattr(module, module_attribute)
except (ImportError, AttributeError, TypeError):
# For certain libraries, this can result in ImportError(_winreg) or AttributeError (celery)
continue
real = self.reals.get(id(attribute_value))
if real:
setattr(module, module_attribute, real)
time.time = real_time
time.monotonic = real_monotonic
time.perf_counter = real_perf_counter
time.gmtime = real_gmtime
time.localtime = real_localtime
time.strftime = real_strftime
time.clock = real_clock # type: ignore[attr-defined]
if _TIME_NS_PRESENT:
time.time_ns = real_time_ns
if _MONOTONIC_NS_PRESENT:
time.monotonic_ns = real_monotonic_ns
if _PERF_COUNTER_NS_PRESENT:
time.perf_counter_ns = real_perf_counter_ns
if uuid_generate_time_attr:
setattr(uuid, uuid_generate_time_attr, real_uuid_generate_time)
uuid._UuidCreate = real_uuid_create # type: ignore[attr-defined]
uuid._last_timestamp = None # type: ignore[attr-defined]
def decorate_coroutine(self, coroutine: "Callable[P, Awaitable[T]]") -> "Callable[P, Awaitable[T]]":
return wrap_coroutine(self, coroutine)
def decorate_callable(self, func: "Callable[P, T]") -> "Callable[P, T]":
@functools.wraps(func)
def wrapper(*args: "P.args", **kwargs: "P.kwargs") -> T:
with self as time_factory:
if self.as_arg and self.as_kwarg:
assert False, "You can't specify both as_arg and as_kwarg at the same time. Pick one."
elif self.as_arg:
result = func(time_factory, *args, **kwargs) # type: ignore
elif self.as_kwarg:
kwargs[self.as_kwarg] = time_factory
result = func(*args, **kwargs)
else:
result = func(*args, **kwargs)
return result
return wrapper
def freeze_time(time_to_freeze: Optional[_Freezable]=None, tz_offset: Union[int, datetime.timedelta]=0, ignore: Optional[List[str]]=None, tick: bool=False, as_arg: bool=False, as_kwarg: str='',
auto_tick_seconds: float=0, real_asyncio: bool=False) -> _freeze_time:
acceptable_times: Any = (type(None), str, datetime.date, datetime.timedelta,
types.FunctionType, types.GeneratorType)
if MayaDT is not None:
acceptable_times += MayaDT,
if not isinstance(time_to_freeze, acceptable_times):
raise TypeError(('freeze_time() expected None, a string, date instance, datetime '
'instance, MayaDT, timedelta instance, function or a generator, but got '
'type {}.').format(type(time_to_freeze)))
if tick and not _is_cpython:
raise SystemError('Calling freeze_time with tick=True is only compatible with CPython')
if isinstance(time_to_freeze, types.FunctionType):
return freeze_time(time_to_freeze(), tz_offset, ignore, tick, as_arg, as_kwarg, auto_tick_seconds, real_asyncio=real_asyncio)
if isinstance(time_to_freeze, types.GeneratorType):
return freeze_time(next(time_to_freeze), tz_offset, ignore, tick, as_arg, as_kwarg, auto_tick_seconds, real_asyncio=real_asyncio)
if MayaDT is not None and isinstance(time_to_freeze, MayaDT):
return freeze_time(time_to_freeze.datetime(), tz_offset, ignore,
tick, as_arg, as_kwarg, auto_tick_seconds, real_asyncio=real_asyncio)
if ignore is None:
ignore = []
ignore = ignore[:]
if config.settings.default_ignore_list:
ignore.extend(config.settings.default_ignore_list)
return _freeze_time(
time_to_freeze_str=time_to_freeze,
tz_offset=tz_offset,
ignore=ignore,
tick=tick,
as_arg=as_arg,
as_kwarg=as_kwarg,
auto_tick_seconds=auto_tick_seconds,
real_asyncio=real_asyncio,
)
# Setup adapters for sqlite
try:
# noinspection PyUnresolvedReferences
import sqlite3
except ImportError:
# Some systems have trouble with this
pass
else:
# These are copied from Python sqlite3.dbapi2
def adapt_date(val: datetime.date) -> str:
return val.isoformat()
def adapt_datetime(val: datetime.datetime) -> str:
return val.isoformat(" ")
sqlite3.register_adapter(FakeDate, adapt_date)
sqlite3.register_adapter(FakeDatetime, adapt_datetime)
# Setup converters for pymysql
try:
import pymysql.converters
except ImportError:
pass
else:
pymysql.converters.encoders[FakeDate] = pymysql.converters.encoders[real_date]
pymysql.converters.conversions[FakeDate] = pymysql.converters.encoders[real_date]
pymysql.converters.encoders[FakeDatetime] = pymysql.converters.encoders[real_datetime]
pymysql.converters.conversions[FakeDatetime] = pymysql.converters.encoders[real_datetime]
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@freezegun@py3@freezegun@api.py@.PATH_END.py
|
{
"filename": "pixsim.py",
"repo_name": "desihub/desisim",
"repo_path": "desisim_extracted/desisim-main/py/desisim/scripts/pixsim.py",
"type": "Python"
}
|
"""
desisim.scripts.pixsim
======================
This is a module.
"""
from __future__ import absolute_import, division, print_function
import os,sys
import os.path
import shutil
import random
from time import asctime
import numpy as np
import desimodel.io
from desiutil.log import get_logger
import desispec.io
from desispec.parallel import stdouterr_redirected
from ..pixsim import simulate_exposure
from .. import io
log = get_logger()
def expand_args(args):
'''expand camera string into list of cameras
'''
#if args.simspec is None:
# if args.night is None or args.expid is None:
# msg = 'Must set --simspec or both --night and --expid'
# log.error(msg)
# raise ValueError(msg)
# args.simspec = io.findfile('simspec', args.night, args.expid)
#- expand camera list
if args.cameras is not None:
args.cameras = args.cameras.split(',')
#- write to same directory as simspec
#if args.rawfile is None:
# rawfile = os.path.basename(desispec.io.findfile('raw', args.night, args.expid))
# args.rawfile = os.path.join(os.path.dirname(args.simspec), rawfile)
#if args.simpixfile is None:
# outdir = os.path.dirname(os.path.abspath(args.rawfile))
# args.simpixfile = io.findfile(
# 'simpix', night=args.night, expid=args.expid, outdir=outdir)
if args.keywords is not None :
res={}
for kv in args.keywords.split(",") :
t=kv.split("=")
if len(t)==2 :
k=t[0]
v=t[1]
if isinstance(v,str) :
try :
v=int(v)
typed=True
except :
pass
if isinstance(v,str) :
try :
v=float(v)
except :
pass
res[k]=v
args.keywords=res
#-------------------------------------------------------------------------
#- Parse options
def parse(options=None):
import argparse
parser = argparse.ArgumentParser(
description = 'Generates simulated DESI pixel-level raw data',
)
#- Inputs
parser.add_argument("--simspec", type=str, help="input simspec file", required=True)
parser.add_argument("--psf", type=str, help="PSF filename, optional", default=None)
parser.add_argument("--cosmics", action="store_true", help="Add cosmics")
#- Outputs
parser.add_argument("--rawfile", type=str, help="output raw data file", required=True)
parser.add_argument("--simpixfile", type=str, required=False, default=None,
help="optional output truth image file")
parser.add_argument("--outfibermap", type=str, required=False, default=None,
help="optional output fibermap")
parser.add_argument("--cameras", type=str, help="cameras, e.g. b0,r5,z9")
parser.add_argument("--keywords", type=str, default=None, help="optional additional keywords in header of rawfile of the form 'key1=val1,key2=val2,...")
parser.add_argument("--ccd_npix_x", type=int,
help="for testing; number of x (columns) to include in output",
default=None)
parser.add_argument("--ccd_npix_y", type=int,
help="for testing; number of y (rows) to include in output",
default=None)
parser.add_argument("--verbose", action="store_true",
help="Include debug log info")
parser.add_argument("--overwrite", action="store_true",
help="Overwrite existing raw and simpix files")
#- Not yet supported so don't pretend it is
### parser.add_argument("--seed", type=int, help="random number seed")
parser.add_argument("--ncpu", type=int,
help="Number of cpu cores per thread to use", default=0)
parser.add_argument("--wavemin", type=float,
help="Minimum wavelength to simulate")
parser.add_argument("--wavemax", type=float,
help="Maximum wavelength to simulate")
parser.add_argument("--nspec", type=int,
help="Number of spectra to simulate per camera")
if options is None:
args = parser.parse_args()
else:
options = [str(x) for x in options]
args = parser.parse_args(options)
expand_args(args)
return args
def main(args, comm=None):
if args.verbose:
import logging
log.setLevel(logging.DEBUG)
if comm is None or comm.rank == 0:
log.info('Starting pixsim at {}'.format(asctime()))
if args.overwrite and os.path.exists(args.rawfile):
log.debug('Removing {}'.format(args.rawfile))
os.remove(args.rawfile)
simulate_exposure(args.simspec, args.rawfile, cameras=args.cameras,
simpixfile=args.simpixfile, addcosmics=args.cosmics,
nspec=args.nspec, wavemin=args.wavemin, wavemax=args.wavemax,
comm=comm,keywords=args.keywords, outfibermap=args.outfibermap)
|
desihubREPO_NAMEdesisimPATH_START.@desisim_extracted@desisim-main@py@desisim@scripts@pixsim.py@.PATH_END.py
|
{
"filename": "computeOccurrenceFaOnly-checkpoint.ipynb",
"repo_name": "stevepur/DR25-occurrence-public",
"repo_path": "DR25-occurrence-public_extracted/DR25-occurrence-public-main/GKbaseline_gaiaRadCut/.ipynb_checkpoints/computeOccurrenceFaOnly-checkpoint.ipynb",
"type": "Jupyter Notebook"
}
|
```python
import os
import requests
import pandas as pd
from astropy.io import fits
from cStringIO import StringIO
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import gamma
from scipy.optimize import minimize
from scipy.interpolate import RectBivariateSpline
import emcee
import corner
import scipy.io as sio
from ipywidgets import FloatProgress
from IPython.display import display
import time
```
```python
stellarCatalog = "../stellarCatalogs/dr25_stellar_supp_gaia_clean_GK.txt"
pcCatalog = "koiCatalogs/dr25_GK_PCs.csv"
period_rng = (50, 400)
n_period = 57
rp_rng = (0.75, 2.5)
n_rp = 61
# for quick tests
# nWalkers = 6
# nBurnin = 200
# nMcmc = 1000
# for production runs
nWalkers = 16
nBurnin = 1000
nMcmc = 5000
model = "dualPowerLaw"
whichRadii = "corrected"
```
```python
def rateModel(x, y, xRange, yRange, theta, model):
if model == "dualPowerLaw":
f0, alpha, beta = theta
ap1 = alpha+1;
bp1 = beta+1;
r = f0*(ap1*(x**alpha)/(xRange[1]**ap1-xRange[0]**ap1))*(bp1*(y**beta)/(yRange[1]**bp1-yRange[0]**bp1))
elif model == "dualPowerLawGap":
f0, alpha, beta, gd, gw, gapOffset, gapSlope = theta
ap1 = alpha+1;
bp1 = beta+1;
# van Eylen fit, really only good for p<100 days
# gapSlope = -0.13
# gapOffset = 0.41
# constant-radius valley, to match radius marginals
# gapSlope = 0
# gapOffset = 0.26
gapModel = 10**(gapSlope*np.log10(x) + gapOffset)
gapDist2 = (gapModel - y)**2
r = f0*(ap1*(x**alpha)/(xRange[1]**ap1-xRange[0]**ap1))*(bp1*(y**beta)/(yRange[1]**bp1-yRange[0]**bp1)
- gd*np.exp(-gapDist2/(2*gw*gw)))
elif model == "dualPowerLawGapFixedSlope":
f0, alpha, beta, gd, gw, gapOffset = theta
ap1 = alpha+1;
bp1 = beta+1;
# van Eylen fit, really only good for p<100 days
# gapSlope = -0.13
# gapOffset = 0.41
# constant-radius valley, to match radius marginals
gapSlope = 0
# gapOffset = 0.26
gapModel = 10**(gapSlope*np.log10(x) + gapOffset)
gapDist2 = (gapModel - y)**2
r = f0*(ap1*(x**alpha)/(xRange[1]**ap1-xRange[0]**ap1))*(bp1*(y**beta)/(yRange[1]**bp1-yRange[0]**bp1)
- gd*np.exp(-gapDist2/(2*gw*gw)))
elif model == "dualPowerLawFixedValley":
f0, alpha, beta = theta
ap1 = alpha+1;
bp1 = beta+1;
# van Eylen fit, really only good for p<100 days
# gapSlope = -0.13
# gapOffset = 0.41
# constant-radius valley, to match radius marginals
gd = 0.29297043
gw = 0.14683756
gapSlope = 0
gapOffset = 0.29125824
gapModel = 10**(gapSlope*np.log10(x) + gapOffset)
gapDist2 = (gapModel - y)**2
r = f0*(ap1*(x**alpha)/(xRange[1]**ap1-xRange[0]**ap1))*(bp1*(y**beta)/(yRange[1]**bp1-yRange[0]**bp1)
- gd*np.exp(-gapDist2/(2*gw*gw)))
else:
raise ValueError('Bad model name');
return r
def getModelLabels(model):
if model == "dualPowerLaw":
return [r"$F_0$", r"$\beta$", r"$\alpha$"]
elif model == "dualPowerLawGap":
return [r"$F_0$", r"$\beta$", r"$\alpha$", r"$d_g$", r"$w_g$", r"$o_g$", r"$s_g$"]
elif model == "dualPowerLawGapFixedSlope":
return [r"$F_0$", r"$\beta$", r"$\alpha$", r"$d_g$", r"$w_g$", r"$o_g$"]
elif model == "dualPowerLawFixedValley":
return [r"$F_0$", r"$\beta$", r"$\alpha$"]
else:
raise ValueError('Bad model name');
def initRateModel(model):
if model == "dualPowerLaw":
f0 = 0.75
alpha = -0.53218
beta = -0.5
theta = [f0, alpha, beta]
elif model == "dualPowerLawGap":
f0 = 0.75
alpha = -0.69
beta = -0.1
gd = 0.22
gw = 0.1
go = 0.26
gs = 0.0
theta = [f0, alpha, beta, gd, gw, go, gs]
elif model == "dualPowerLawGapFixedSlope":
f0 = 0.75
alpha = -0.69
beta = -0.1
gd = 0.22
gw = 0.1
go = 0.26
theta = [f0, alpha, beta, gd, gw, go]
elif model == "dualPowerLawFixedValley":
f0 = 0.75
alpha = -0.53218
beta = -0.5
theta = [f0, alpha, beta]
else:
raise ValueError('Bad model name');
return theta
def lnPoisprior(theta, model):
if model == "dualPowerLaw":
if 0.0 <= theta[0] <= 5 \
and -5.0 <= theta[1] <= 5.0 \
and -5.0 <= theta[2] <= 5.0:
return 1.0
elif model == "dualPowerLawGap":
if 0.0 <= theta[0] <= 5 \
and -5.0 <= theta[1] <= 5.0 \
and -5.0 <= theta[2] <= 5.0 \
and 0 <= theta[3] < 5 \
and 0.1 <= theta[4] < 0.3 \
and 0.2 <= theta[5] < 0.4 \
and -0.0 <= theta[6] < 0.05:
return 1.0
elif model == "dualPowerLawGapFixedSlope":
if 0.0 <= theta[0] <= 5 \
and -5.0 <= theta[1] <= 5.0 \
and -5.0 <= theta[2] <= 5.0 \
and 0 <= theta[3] < 0.6 \
and 0.1 <= theta[4] < 0.3 \
and 0.2 <= theta[5] < 0.4:
return 1.0
elif model == "dualPowerLawFixedValley":
if 0.0 <= theta[0] <= 5 \
and -5.0 <= theta[1] <= 5.0 \
and -5.0 <= theta[2] <= 5.0:
return 1.0
else:
raise ValueError('Bad model name');
# print(theta)
return -np.inf
```
```python
def medianAndErrorbars(data):
if data.ndim > 1:
dataResult = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(data, [16, 50, 84],
axis=0)))
dataResult = list(dataResult)
return dataResult
else:
v = np.percentile(data, [16, 50, 84])
return [v[1], v[2]-v[1], v[1]-v[0]]
def printMedianAndErrorbars(data):
e = medianAndErrorbars(data)
if data.ndim > 1:
print("printMedianAndErrorbars only works for 1D arrays")
else:
return "{:.3f}".format(e[0]) +"^{+" + "{:.3f}".format(e[1]) + "}_{-" + "{:.3f}".format(e[2]) + "}"
```
```python
```
```python
```
```python
from scipy.integrate import romb
def integrate2DGrid(g, dx, dy):
if g.shape[0]%2 == 0 or g.shape[1]%2 == 0:
raise ValueError('integrate2DGrid requires a grid with odd number of points on a side');
return romb(romb(g, dx), dy)
def integrateRateModel(periodRange, rpRange, theta, model):
nPts = 2**5+1 # must be 2**n + 1
pGrid, rGrid = np.meshgrid(np.linspace(periodRange[0], periodRange[1], nPts),
np.linspace(rpRange[0], rpRange[1], nPts),
indexing="ij")
dp = (pGrid[1,0]-pGrid[0,0])
dr = (rGrid[0,1]-rGrid[0,0])
if theta.ndim == 1:
y = rateModel(pGrid, rGrid, period_rng, rp_rng, theta, model)
return integrate2DGrid(y, dp, dr)
else: # assume first dimension is array of thetas
ret = np.zeros(theta.shape[0])
if len(ret) > 100:
f = FloatProgress(min=0, max=len(ret))
display(f)
for i in range(len(ret)):
y = rateModel(pGrid, rGrid, period_rng, rp_rng, theta[i,:], model)
ret[i] = integrate2DGrid(y, dp, dr)
if len(ret) > 100:
f.value += 1
return ret
def integratePopTimesComp(periodRange, rpRange, theta, model, compGrid):
nP = compGrid.shape[0]
nR = compGrid.shape[1]
pGrid, rGrid = np.meshgrid(np.linspace(periodRange[0], periodRange[1], nP),
np.linspace(rpRange[0], rpRange[1], nR),
indexing="ij")
dp = (pGrid[1,0]-pGrid[0,0])
dr = (rGrid[0,1]-rGrid[0,0])
y = rateModel(pGrid, rGrid, period_rng, rp_rng, theta, model)*compGrid
return integrate2DGrid(y, dp, dr)
```
```python
# population inference functions
def lnlike(theta):
pop = rateModel(period_grid, rp_grid, period_rng, rp_rng, theta, model) * summedCompleteness
pop = 0.5 * (pop[:-1, :-1] + pop[1:, 1:])
norm = np.sum(pop * vol)
ll = np.sum(np.log(rateModel(koi_periods, koi_rps, period_rng, rp_rng, theta, model))) - norm
return ll if np.isfinite(ll) else -np.inf
# The ln-probability function is just propotional to the ln-likelihood
# since we're assuming uniform priors.
def lnprob(theta):
lp = lnPoisprior(theta, model)
if not np.isfinite(lp):
return -np.inf
return lnlike(theta)
# The negative ln-likelihood is useful for optimization.
# Optimizers want to *minimize* your function.
def nll(theta):
ll = lnlike(theta)
return -ll if np.isfinite(ll) else 1e15
```
```python
# population analysis functions
# We'll reuse these functions to plot all of our results.
def make_plot(pop_comp, x0, x, y, ax):
# print("in make_plot, pop_comp:")
# print(pop_comp.shape)
pop = 0.5 * (pop_comp[:, 1:] + pop_comp[:, :-1])
# print("pop:")
# print(pop.shape)
pop = np.sum(pop * np.diff(y)[None, :, None], axis=1)
a, b, c, d, e = np.percentile(pop * np.diff(x)[0], [2.5, 16, 50, 84, 97.5], axis=0)
ax.fill_between(x0, a, e, color="k", alpha=0.1, edgecolor="none")
ax.fill_between(x0, b, d, color="k", alpha=0.3, edgecolor="none")
ax.plot(x0, c, "k", lw=1)
def plot_results(samples):
# Loop through the samples and compute the list of population models.
samples = np.atleast_2d(samples)
pop = np.empty((len(samples), period_grid.shape[0], period_grid.shape[1]))
gamma_earth = np.empty((len(samples)))
for i, p in enumerate(samples):
pop[i] = rateModel(period_grid, rp_grid, period_rng, rp_rng, p, model)
gamma_earth[i] = rateModel(365.25, 1.0, period_rng, rp_rng, p, model) * 365.
fig, axes = plt.subplots(2, 2, figsize=(10, 8))
fig.subplots_adjust(wspace=0.4, hspace=0.4)
# Integrate over period.
dx = 0.25
x = np.arange(rp_rng[0], rp_rng[1] + dx, dx)
n, _ = np.histogram(koi_rps, x)
fsize = 18
# Plot the observed radius distribution.
ax = axes[0, 0]
make_plot(pop * summedCompleteness[None, :, :], rp, x, period, ax)
ax.errorbar(0.5*(x[:-1]+x[1:]), n, yerr=np.sqrt(n), fmt=".k",
capsize=0)
ax.set_xlim(rp_rng[0], rp_rng[1])
ax.set_xlabel("$R_p\,[R_\oplus]$", fontsize = fsize)
ax.set_ylabel("# of detected planets", fontsize = fsize)
# Plot the true radius distribution.
ax = axes[0, 1]
make_plot(pop, rp, x, period, ax)
ax.set_xlim(rp_rng[0], rp_rng[1])
ax.set_ylim(0, 0.37)
ax.set_xlabel("$R_p\,[R_\oplus]$", fontsize = fsize)
ax.set_ylabel("$\mathrm{d}N / \mathrm{d}R$; $\Delta R = 0.25\,R_\oplus$", fontsize = fsize)
# Integrate over period.
dx = 31.25
x = np.arange(period_rng[0], period_rng[1] + dx, dx)
n, _ = np.histogram(koi_periods, x)
# Plot the observed period distribution.
ax = axes[1, 0]
make_plot(np.swapaxes(pop * summedCompleteness[None, :, :], 1, 2), period, x, rp, ax)
ax.errorbar(0.5*(x[:-1]+x[1:]), n, yerr=np.sqrt(n), fmt=".k",
capsize=0)
ax.set_xlim(period_rng[0], period_rng[1])
ax.set_ylim(0, 79)
ax.set_xlabel("$P\,[\mathrm{days}]$", fontsize = fsize)
ax.set_ylabel("# of detected planets", fontsize = fsize)
# Plot the true period distribution.
ax = axes[1, 1]
make_plot(np.swapaxes(pop, 1, 2), period, x, rp, ax)
ax.set_xlim(period_rng[0], period_rng[1])
ax.set_ylim(0, 0.27)
ax.set_xlabel("$P\,[\mathrm{days}]$", fontsize = fsize)
ax.set_ylabel("$\mathrm{d}N / \mathrm{d}P$; $\Delta P = 31.25\,\mathrm{days}$", fontsize = fsize)
return gamma_earth, fig
```
```python
def getRadii(catalog):
if whichRadii == "corrected":
return catalog.corrected_prad
if whichRadii == "corrected Minus 1Sigma":
return catalog.corrected_prad - catalog.corrected_prad_err1
elif whichRadii == "kic":
return catalog.koi_prad
else:
raise ValueError('Bad whichRadii string');
```
```python
stellarTargets = pd.read_csv(stellarCatalog)
base_kois = pd.read_csv(pcCatalog)
m = (period_rng[0] <= base_kois.koi_period) & (base_kois.koi_period <= period_rng[1])
thisRadii = getRadii(base_kois)
m &= np.isfinite(thisRadii) & (rp_rng[0] <= thisRadii) & (thisRadii <= rp_rng[1])
kois = pd.DataFrame(base_kois[m])
allKois = kois
```
```python
fig, ax = plt.subplots(figsize=(15,10));
ax.errorbar(kois.koi_period, kois.koi_prad,
yerr = [-kois.koi_prad_err2, kois.koi_prad_err1],
fmt="k.", alpha = 0.5);
ax.errorbar(kois.koi_period, kois.corrected_prad,
yerr = [-kois.corrected_prad_err2, kois.corrected_prad_err1],
fmt="r.", alpha = 0.5);
plt.xlabel("period");
plt.ylabel("planet radius");
plt.title("KOI Radius Change");
plt.ylim([0, 2.5])
plt.xlim([50, 400])
```
(50, 400)

```python
period = np.linspace(period_rng[0], period_rng[1], n_period)
rp = np.linspace(rp_rng[0], rp_rng[1], n_rp)
period_grid, rp_grid = np.meshgrid(period, rp, indexing="ij")
periodShape = period_grid.shape
```
```python
inputgrid = "../completenessContours/out_sc0_GK_baseline.fits.gz"
hdulist = fits.open(inputgrid)
cumulative_array = hdulist[0].data
kiclist = np.asarray(hdulist[1].data, dtype=np.int32)
probdet = np.transpose(cumulative_array[0])
probtot = np.transpose(cumulative_array[1])
prihdr = hdulist[0].header
min_comp_period = prihdr["MINPER"]
max_comp_period = prihdr["MAXPER"]
n_comp_period = prihdr["NPER"]
min_comp_rp = prihdr["MINRP"]
max_comp_rp = prihdr["MAXRP"]
n_comp_rp = prihdr["NRP"]
# print "KIC list length" + '{:6d}'.format(kiclist.size)
period_want = np.linspace(min_comp_period, max_comp_period, n_comp_period)
rp_want = np.linspace(min_comp_rp, max_comp_rp, n_comp_rp)
period_want2d, rp_want2d = np.meshgrid(period_want, rp_want)
# interpolate the numerical grids onto the period_grid, rp_grid space
#print("size probtot = " + str(np.shape(probtot)))
#print("size period_want = " + str(np.shape(period_want)))
#print("size rp_want = " + str(np.shape(rp_want)))
numCompVeInterp = RectBivariateSpline(period_want, rp_want, probtot)
numProbDetInterp = RectBivariateSpline(period_want, rp_want, probdet)
```
```python
```
```python
summedCompleteness = numCompVeInterp(period, rp)
summedProbDet = numProbDetInterp(period, rp)
```
```python
# contourLevels = np.arange(1e-2, 1, 5e-2)
contourLevels = [0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 1.0]
fig, ax = plt.subplots(figsize=(15,10));
plt.pcolor(period_grid, rp_grid, summedProbDet, cmap="BuGn")
c = plt.contour(period_grid, rp_grid, summedProbDet / kiclist.size, contourLevels,
colors="k", alpha=0.8)
scf = plt.scatter(kois.koi_period, getRadii(kois), cmap="plasma",
c=kois.reliability, edgecolors='k', s=100*kois.totalReliability, alpha = 1.0)
cbh = plt.colorbar(scf);
cbh.ax.set_ylabel("Instrumental FP Reliability");
#c = plt.contour(period_grid, rp_grid, numCompVe / kiclist.size,
# colors="k", alpha=0.8)
plt.ylim(0.5, 2.5)
plt.xlim(50, 400)
plt.clabel(c, fontsize=12, inline=1, fmt="%.3f")
plt.title("Summed detection*vetting efficiency, " + whichRadii + " radii", fontsize = 18)
plt.xlabel("period [days]", fontsize = 18)
plt.ylabel("$R_p \, [R_\oplus]$", fontsize = 18);
plt.plot([200, 200], [1, 2], color='k', linestyle='--', linewidth=1)
plt.plot([50, 200], [1, 1], color='k', linestyle='--', linewidth=1)
plt.plot([50, 200], [2, 2], color='k', linestyle='--', linewidth=1)
# plt.savefig("summedCompleteness.pdf",bbox_inches='tight')
```

```python
# contourLevels = np.arange(1e-2, 1, 5e-2)
contourLevels = np.arange(1e-3, 1e-2, 1e-3)
contourLevels = np.insert(contourLevels, 0, [1e-4, 5e-4])
fig, ax = plt.subplots(figsize=(15,10));
plt.pcolor(period_grid, rp_grid, summedCompleteness, cmap="BuGn")
c = plt.contour(period_grid, rp_grid, summedCompleteness / kiclist.size, contourLevels,
colors="k", alpha=0.8)
ax.errorbar(kois.koi_period, getRadii(kois),
yerr = [-kois.corrected_prad_err2, kois.corrected_prad_err1],
fmt="none", ecolor="k", alpha = 0.15, marker = None);
scf = plt.scatter(kois.koi_period, getRadii(kois), cmap="plasma",
c=kois.totalReliability, edgecolors='k', s=100*kois.totalReliability, alpha = 1.0)
cbh = plt.colorbar(scf);
cbh.ax.set_ylabel("Reliability", fontsize = 24);
#c = plt.contour(period_grid, rp_grid, numCompVe / kiclist.size,
# colors="k", alpha=0.8)
plt.ylim(0.75, 2.5)
plt.xlim(50, 400)
plt.clabel(c, fontsize=12, inline=1, fmt="%.4f")
# plt.title("DR25 PC Average detection*vetting efficiency", fontsize = 18)
plt.tick_params(labelsize = 18)
plt.xlabel("period [days]", fontsize = 24)
plt.ylabel("$R_p \, [R_\oplus]$", fontsize = 24);
plt.plot([200, 200], [1, 2], color='k', linestyle=':', linewidth=1)
plt.plot([50, 200], [1, 1], color='k', linestyle=':', linewidth=1)
plt.plot([50, 200], [2, 2], color='k', linestyle=':', linewidth=1)
plt.plot([0.8*365, 0.8*365], [0.8, 1.2], color='k', linestyle='--', linewidth=1)
plt.plot([1.2*365, 1.2*365], [0.8, 1.2], color='k', linestyle='--', linewidth=1)
plt.plot([0.8*365, 1.2*365], [0.8, 0.8], color='k', linestyle='--', linewidth=1)
plt.plot([0.8*365, 1.2*365], [1.2, 1.2], color='k', linestyle='--', linewidth=1)
# plt.savefig("summedCompleteness.pdf",bbox_inches='tight')
```

```python
1.2*365
```
438.0
```python
```
Compute a basic occurrence rate without reliability
```python
kois = allKois
if model == "dualPowerLaw":
bounds = [(0, 5), (-5, 5), (-5, 5)]
elif model == "dualPowerLawGap":
bounds = [(0, 5), (-5, 5), (-5, 5), (0, 5), (0.0, 0.3), (0.2, 0.4), (-0.2, 0.2)]
elif model == "dualPowerLawGapFixedSlope":
bounds = [(0, 5), (-5, 5), (-5, 5), (0, 5), (0.0, 0.3), (0.2, 0.4)]
elif model == "dualPowerLawFixedValley":
bounds = [(0, 5), (-5, 5), (-5, 5)]
# The ln-likelihood function given at the top of this post.
koi_periods = np.array(kois.koi_period)
koi_rps = np.array(getRadii(kois))
# koi_rps = getRadii(kois)
vol = np.diff(period_grid, axis=0)[:, :-1] * np.diff(rp_grid, axis=1)[:-1, :]
theta_0 = initRateModel(model)
r = minimize(nll, theta_0, method="L-BFGS-B", bounds=bounds)
print(r.x)
ge, fig = plot_results(r.x);
```
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/ipykernel_launcher.py:7: RuntimeWarning: divide by zero encountered in log
import sys
[ 0.56796888 -0.56547296 0.34333841]

```python
rateModel(365.25, 1.0, period_rng, rp_rng, r.x, model)*365
```
0.19504586776969884
```python
##################################################################
ndim, nwalkers = len(r.x), nWalkers
pos = [r.x + 1e-5 * np.random.randn(ndim) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, threads=8)
# Burn in.
pos, _, _ = sampler.run_mcmc(pos, nBurnin)
sampler.reset()
# Production.
start_time = time.time()
pos, _, _ = sampler.run_mcmc(pos, nMcmc)
print("--- %s seconds ---" % (time.time() - start_time))
kois.to_csv("occurenceRatePosteriors/selectedPcs_noreliability.csv")
samples_noreliability = sampler.flatchain
np.save("occurenceRatePosteriors/occurenceRatePosteriors_noreliability.npy", samples_noreliability)
```
--- 23.8872659206 seconds ---
```python
##################################################################
##################################################################
corner.corner(samples_noreliability, labels=getModelLabels(model), label_kwargs = {"fontsize": 32});
# plt.savefig("occPostNoReliability.pdf",bbox_inches='tight')
##################################################################
gamma_earth_no_reliability, fig = plot_results(samples_noreliability)
# plt.savefig("occMargNoReliability.pdf",bbox_inches='tight')
print(np.mean(gamma_earth_no_reliability))
##################################################################
```
0.22208808389515844


```python
print("F = " + printMedianAndErrorbars(samples_noreliability[:,0]))
print("radius exp (alpha) = " + printMedianAndErrorbars(samples_noreliability[:,2]))
print("period exp (beta) = " + printMedianAndErrorbars(samples_noreliability[:,1]))
```
F = 0.583^{+0.109}_{-0.086}
radius exp (alpha) = 0.296^{+0.523}_{-0.473}
period exp (beta) = -0.548^{+0.177}_{-0.178}
```python
plt.hist(np.log10(gamma_earth_no_reliability), 50, histtype="step", color="k", density=True)
plt.gca().set_yticklabels([])
plt.title("the rate of Earth analogs: " + str(10**np.mean(np.log10(gamma_earth_no_reliability))))
plt.xlabel(r"$\log_{10}\Gamma_\oplus = \left. \log_{10}\mathrm{d}N / \mathrm{d}\ln P \, \mathrm{d}\ln R_p \right |_\oplus$");
print("Mean Gamma_Earth = {0}".format(10**np.mean(np.log10(gamma_earth_no_reliability))))
print("Gamma at p=365 days, r=1Re without reliability = " + printMedianAndErrorbars(gamma_earth_no_reliability))
```
Mean Gamma_Earth = 0.202443046125
Gamma at p=365 days, r=1Re without reliability = 0.205^{+0.106}_{-0.073}

```python
F1Dist_nr = integrateRateModel([50.,200.], [1., 2.], samples_noreliability, model)
print("1-2Re, 50-200 Days without reliability = " + printMedianAndErrorbars(F1Dist_nr))
F0IntDist_nr = integrateRateModel([50.,400.], [0.75, 2.5], samples_noreliability, model)
print("0.75-2.5Re, 50-400 Days integrated without reliability = " + printMedianAndErrorbars(F0IntDist_nr))
```
FloatProgress(value=0.0, max=80000.0)
1-2Re, 50-200 Days without reliability = 0.182^{+0.033}_{-0.029}
FloatProgress(value=0.0, max=80000.0)
0.75-2.5Re, 50-400 Days integrated without reliability = 0.583^{+0.109}_{-0.086}
Compute an occurrence rate with reliability
```python
nTrials = 100
f = FloatProgress(min=0, max=nTrials)
display(f)
allKois = kois
for mCount in range(nTrials):
# randomly select kois
# koiSelect = (np.random.rand(len(allKois)) < allKois.totalReliability)
koiSelect = (np.random.rand(len(allKois)) < allKois.reliability)
kois = allKois[koiSelect]
kois.to_csv("occurenceRatePosteriors/selectedPcs" + str (mCount) + ".csv")
# print(str(mCount) + " of " + str(nTrials) + ", selected " + str(len(kois))
# + " kois out of " + str(len(allKois)) + " after reliability cut")
koi_periods = np.array(kois.koi_period)
koi_rps = np.array(getRadii(kois))
theta_0 = initRateModel(model)
r = minimize(nll, theta_0, method="L-BFGS-B", bounds=bounds)
##################################################################
ndim, nwalkers = len(r.x), 2*len(r.x)
pos = [r.x + 1e-5 * np.random.randn(ndim) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob)
# Burn in.
pos, _, _ = sampler.run_mcmc(pos, 400)
sampler.reset()
# Production.
pos, _, _ = sampler.run_mcmc(pos, 2000)
samples = sampler.flatchain
np.save("occurenceRatePosteriors/occurenceRatePosteriors_" + str(mCount) + ".npy", samples)
f.value += 1
```
FloatProgress(value=0.0)
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/ipykernel_launcher.py:7: RuntimeWarning: divide by zero encountered in log
import sys
```python
import gc # for memory management
for mCount in range(nTrials):
samples = np.load("occurenceRatePosteriors/occurenceRatePosteriors_" + str(mCount) + ".npy");
subsampleFactor = int(np.round(nTrials/10))
if mCount == 0:
allSamples = samples[0:-1:subsampleFactor,:]
else:
allSamples = np.concatenate((allSamples, samples[0:-1:subsampleFactor,:]))
gc.collect() # force garbage collection before loading another one
```
```python
corner.corner(allSamples, labels=getModelLabels(model), label_kwargs = {"fontsize": 32});
# plt.savefig("occPostReliability.pdf",bbox_inches='tight')
```

```python
modelLabels = getModelLabels(model)
for i in range(0,ndim):
print("MCMC no reliability " + modelLabels[i] + "=" + printMedianAndErrorbars(samples_noreliability[:,i]))
for i in range(0,ndim):
print("MCMC with reliability " + modelLabels[i] + "=" + printMedianAndErrorbars(allSamples[:,i]))
```
MCMC no reliability $F_0$=0.583^{+0.109}_{-0.086}
MCMC no reliability $\beta$=-0.548^{+0.177}_{-0.178}
MCMC no reliability $\alpha$=0.296^{+0.523}_{-0.473}
MCMC with reliability $F_0$=0.402^{+0.081}_{-0.066}
MCMC with reliability $\beta$=-0.839^{+0.202}_{-0.209}
MCMC with reliability $\alpha$=0.870^{+0.624}_{-0.600}
```python
gamma_earth, fig = plot_results(allSamples)
# plt.savefig("occMargReliability.pdf",bbox_inches='tight')
```

```python
fig, ax = plt.subplots(figsize=(10,5));
rateGrid = rateModel(period_grid, rp_grid, period_rng, rp_rng, np.median(allSamples, 0), model)
CS = ax.contour(period_grid, rp_grid, rateGrid);
ax.clabel(CS, inline=1, fontsize=10);
plt.xlabel("Period", fontsize = 18);
plt.ylabel("Radius", fontsize = 18);
plt.title("Occurrence Rate Fit", fontsize = 24);
```

```python
plt.figure(figsize=(15,10));
plt.hist(np.log10(gamma_earth), 50, histtype="step", color="k", density=True)
plt.hist(np.log10(gamma_earth_no_reliability), 50, histtype="step", color="b", density=True)
plt.gca().set_yticklabels([])
plt.xlabel(r"$\log_{10}\Gamma_\oplus = \left. \log_{10}\mathrm{d}N / \mathrm{d}\ln P \, \mathrm{d}\ln R_p \right |_\oplus$", fontSize = 24);
plt.tick_params(labelsize = 18)
# plt.savefig("logGammaDist.pdf",bbox_inches='tight')
plt.title("the rate of Earth analogs: " + str(round(10**np.mean(np.log10(gamma_earth)), 3))
+ "/" + str(round(10**np.mean(np.log10(gamma_earth_no_reliability)), 3)))
```
Text(0.5,1,'the rate of Earth analogs: 0.082/0.202')

```python
plt.figure(figsize=(15,10));
plt.hist(gamma_earth, 50, histtype="step", color="k", density=True)
plt.hist(gamma_earth_no_reliability, 50, histtype="step", color="b", density=True)
plt.gca().set_yticklabels([])
plt.xlabel(r"$\Gamma_\oplus$", fontSize = 36);
plt.tick_params(labelsize = 24)
# plt.savefig("gammaDist.pdf",bbox_inches='tight')
plt.title("the rate of Earth analogs: " + str(round(np.median(gamma_earth), 3))
+ "/" + str(round(np.median(gamma_earth_no_reliability), 3)))
```
Text(0.5,1,'the rate of Earth analogs: 0.083/0.205')

```python
print("Gamma at p=365 days, r=1Re = " + printMedianAndErrorbars(gamma_earth))
print("Gamma at p=365 days, r=1Re without reliability = " + printMedianAndErrorbars(gamma_earth_no_reliability))
```
Gamma at p=365 days, r=1Re = 0.083^{+0.059}_{-0.036}
Gamma at p=365 days, r=1Re without reliability = 0.205^{+0.106}_{-0.073}
```python
plt.figure(figsize=(15,10));
greyLevel = "0.7"
plt.hist(allSamples[:,0], 50, histtype="step", color="k", density=True);
plt.hist(samples_noreliability[:,0], 50, histtype="step", color="b", density=True);
plt.gca().set_yticklabels([])
plt.tick_params(labelsize = 24)
plt.xlabel(r"$F_0$", fontSize = 36);
# plt.savefig("f0Dist.pdf",bbox_inches='tight')
plt.title("Distribution for 50-400 days, 0.75-2.5 $R_\oplus$", fontsize=18);
```

```python
```
```python
F1Dist = integrateRateModel([50.,200.], [1., 2.], allSamples, model)
F1Dist_nr = integrateRateModel([50.,200.], [1., 2.], samples_noreliability, model)
```
FloatProgress(value=0.0, max=120000.0)
FloatProgress(value=0.0, max=80000.0)
```python
plt.figure(figsize=(15,10));
greyLevel = "0.7"
plt.hist(F1Dist, 50, histtype="step", color="k", density=True);
plt.hist(F1Dist_nr, 50, histtype="step", color="b", density=True);
plt.gca().set_yticklabels([])
plt.plot([0.34, 0.34], [0, 10], color=greyLevel, linestyle='--', linewidth=1)
plt.plot([0.23, 0.23], [0, 10], color=greyLevel, linestyle='--', linewidth=1)
plt.tick_params(labelsize = 24)
plt.xlabel(r"$F_1$", fontSize = 36);
# plt.savefig("f1Dist.pdf",bbox_inches='tight')
plt.title("Distribution for 50-200 days, 1-2 $R_\oplus$", fontsize=18);
```

```python
print("median theta: 1-2Re, 50-200 Days = " + str(integrateRateModel([50.,200.],
[1., 2.], np.median(allSamples, 0), model)))
print("median theta: 1-2Re, 50-200 Days without reliability = " + str(integrateRateModel([50.,200.],
[1., 2.], np.median(samples_noreliability, 0), model)))
print("1-2Re, 50-200 Days = " + printMedianAndErrorbars(F1Dist))
print("1-2Re, 50-200 Days without reliability = " + printMedianAndErrorbars(F1Dist_nr))
```
median theta: 1-2Re, 50-200 Days = 0.13518307486529876
median theta: 1-2Re, 50-200 Days without reliability = 0.18297443902057628
1-2Re, 50-200 Days = 0.134^{+0.030}_{-0.025}
1-2Re, 50-200 Days without reliability = 0.182^{+0.033}_{-0.029}
```python
zetaDist = integrateRateModel([.8*365.25,1.2*365.25],
[0.8,1.2], allSamples, model)
zetaDist_nr = integrateRateModel([.8*365.25,1.2*365.25],
[0.8,1.2], samples_noreliability, model)
```
FloatProgress(value=0.0, max=120000.0)
FloatProgress(value=0.0, max=80000.0)
```python
plt.figure(figsize=(15,10));
plt.hist(zetaDist, 50, histtype="step", color="k", density=True);
plt.hist(zetaDist_nr, 50, histtype="step", color="b", density=True);
plt.gca().set_yticklabels([])
plt.plot([0.1, 0.1], [0, 40], color=greyLevel, linestyle='--', linewidth=1)
plt.plot([0.03, 0.03], [0, 40], color=greyLevel, linestyle='--', linewidth=1)
plt.tick_params(labelsize = 24)
plt.xlabel(r"$\zeta_{\oplus}$", fontSize = 36);
# plt.savefig("zetaEarthDist.pdf",bbox_inches='tight')
plt.title("Distribution of $\zeta_\oplus$", fontsize=18);
```

```python
print("median theta: zeta-Earth = " + str(integrateRateModel([.8*365.25,1.2*365.25],
[0.8,1.2], np.median(allSamples, 0), model)))
print("median theta: zeta-Earth without reliability = " + str(integrateRateModel([.8*365.25,1.2*365.25],
[0.8,1.2], np.median(samples_noreliability, 0), model)))
print("zeta-Earth = " + printMedianAndErrorbars(zetaDist))
print("zeta-Earth without reliability = " + printMedianAndErrorbars(zetaDist_nr))
```
median theta: zeta-Earth = 0.013640526291302297
median theta: zeta-Earth without reliability = 0.03338640426157651
zeta-Earth = 0.013^{+0.009}_{-0.006}
zeta-Earth without reliability = 0.033^{+0.017}_{-0.012}
```python
np.save("gammaDistReliability.npy", gamma_earth)
np.save("f1DistReliability.npy", F1Dist)
np.save("zetaDistReliability.npy", zetaDist)
```
```python
sag13HZDist = integrateRateModel([237,860], [0.5,1.5], allSamples, model)
plt.hist(sag13HZDist, 50, histtype="step", color="k", density=True);
sag13HZDist_nr = integrateRateModel([237,860], [0.5,1.5], samples_noreliability, model)
plt.hist(sag13HZDist_nr, 50, histtype="step", color="b", density=True);
```
FloatProgress(value=0.0, max=120000.0)
FloatProgress(value=0.0, max=80000.0)

```python
print("median theta: SAG13 HZ = " + str(integrateRateModel([237,860],
[0.5,1.5], np.median(allSamples, 0), model)))
print("median theta: SAG13 HZ without reliability = " + str(integrateRateModel([237,860],
[0.5,1.5], np.median(samples_noreliability, 0), model)))
print("SAG13 HZ = " + printMedianAndErrorbars(sag13HZDist))
print("SAG13 HZ without reliability = " + printMedianAndErrorbars(sag13HZDist_nr))
```
median theta: SAG13 HZ = 0.11225399195760667
median theta: SAG13 HZ without reliability = 0.2961586394298381
SAG13 HZ = 0.111^{+0.084}_{-0.048}
SAG13 HZ without reliability = 0.294^{+0.173}_{-0.110}
```python
hsuFordDist = integrateRateModel([237,500], [1.0,1.75], allSamples, model)
plt.hist(hsuFordDist, 50, histtype="step", color="k", density=True);
hsuFordDist_nr = integrateRateModel([237,500], [1.0,1.75], samples_noreliability, model)
plt.hist(hsuFordDist_nr, 50, histtype="step", color="b", density=True);
```
FloatProgress(value=0.0, max=120000.0)
FloatProgress(value=0.0, max=80000.0)

```python
print("median theta: Hsu and Ford HZ = " + str(integrateRateModel([237,500], [1.0,1.75], np.median(allSamples, 0), model)))
print("median theta: Hsu and Ford HZ without reliability = " + str(integrateRateModel([237,500], [1.0,1.75], np.median(samples_noreliability, 0), model)))
print("Hsu and Ford HZ = " + printMedianAndErrorbars(hsuFordDist))
print("Hsu and Ford HZ without reliability = " + printMedianAndErrorbars(hsuFordDist_nr))
```
median theta: Hsu and Ford HZ = 0.06172684831475565
median theta: Hsu and Ford HZ without reliability = 0.12471795886210892
Hsu and Ford HZ = 0.061^{+0.028}_{-0.020}
Hsu and Ford HZ without reliability = 0.123^{+0.043}_{-0.033}
```python
habDist = integrateRateModel([0.61*365,2.216*365], [0.72,1.7], allSamples, model)
plt.hist(habDist, 50, histtype="step", color="k", density=True);
habDist_nr = integrateRateModel([0.61*365,2.216*365], [0.72,1.7], samples_noreliability, model)
plt.hist(habDist_nr, 50, histtype="step", color="b", density=True);
```
FloatProgress(value=0.0, max=120000.0)
FloatProgress(value=0.0, max=80000.0)

```python
print("median theta: Zink HZ = " + str(integrateRateModel([0.61*365,2.216*365], [0.72,1.7], np.median(allSamples, 0), model)))
print("median theta: Zink HZ without reliability = " + str(integrateRateModel([0.61*365,2.216*365], [0.72,1.7], np.median(samples_noreliability, 0), model)))
print("Zink HZ = " + printMedianAndErrorbars(habDist))
print("Zink HZ without reliability = " + printMedianAndErrorbars(habDist_nr))
```
median theta: Zink HZ = 0.1289037098775841
median theta: Zink HZ without reliability = 0.2998823852829215
Zink HZ = 0.128^{+0.075}_{-0.049}
Zink HZ without reliability = 0.297^{+0.137}_{-0.095}
```python
plt.hist(allKois.reliability, 30, histtype="step", color="b", density=True);
plt.hist(allKois.totalReliability, 30, histtype="step", color="k", density=True);
```

```javascript
%%javascript
IPython.notebook.save_notebook()
```
<IPython.core.display.Javascript object>
```bash
%%bash -s "$model"
jupyter nbconvert --to html computeOccurrence.ipynb
mv computeOccurrence.html htmlArchive/computeOccurrence_$1.html
```
[NbConvertApp] Converting notebook computeOccurrence.ipynb to html
[NbConvertApp] Writing 1434999 bytes to computeOccurrence.html
```python
[0.61*365,2.216*365]
```
[222.65, 808.84]
```python
```
|
stevepurREPO_NAMEDR25-occurrence-publicPATH_START.@DR25-occurrence-public_extracted@DR25-occurrence-public-main@GKbaseline_gaiaRadCut@.ipynb_checkpoints@computeOccurrenceFaOnly-checkpoint.ipynb@.PATH_END.py
|
{
"filename": "_weight.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/histogram2d/colorbar/tickfont/_weight.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class WeightValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self,
plotly_name="weight",
parent_name="histogram2d.colorbar.tickfont",
**kwargs,
):
super(WeightValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
extras=kwargs.pop("extras", ["normal", "bold"]),
max=kwargs.pop("max", 1000),
min=kwargs.pop("min", 1),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@histogram2d@colorbar@tickfont@_weight.py@.PATH_END.py
|
{
"filename": "_variant.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scatter3d/legendgrouptitle/font/_variant.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class VariantValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="variant",
parent_name="scatter3d.legendgrouptitle.font",
**kwargs,
):
super(VariantValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
values=kwargs.pop(
"values",
[
"normal",
"small-caps",
"all-small-caps",
"all-petite-caps",
"petite-caps",
"unicase",
],
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scatter3d@legendgrouptitle@font@_variant.py@.PATH_END.py
|
{
"filename": "Teff_from_SpT.py",
"repo_name": "psheehan/pdspy",
"repo_path": "pdspy_extracted/pdspy-master/pdspy/stars/Teff_from_SpT.py",
"type": "Python"
}
|
import numpy
def Teff_from_SpT(SpT, relation="HH14"):
# Set up the data.
SpT_list = ['F5','F8','G0',"G2", "G5", "G8", "K0", "K2", "K5", "K7", "M0", \
"M1", "M2", "M3", "M4", "M5", "M6", "M7"]
SpT_numbers = [float(test[0].replace("F","4").replace("G","3").\
replace("K","2").replace("M","1") + str(9-float(test[1:]))) \
for test in SpT_list]
if relation == 'HH14':
Teff_list = [6600, 6130, 5930, 5690, 5430, 5180, 4870, 4710, 4210, \
4020, 3900, 3720, 3560, 3410, 3190, 2980, 2860, 2770]
elif relation == 'PM13':
Teff_list = [6420, 6100, 6050, 5870, 5500, 5210, 5030, 4760, 4140, \
3970, 3770, 3630, 3490, 3360, 3160, 2880]
SpT_list = SpT_list[0:-2]
SpT_numbers = SpT_numbers[0:-2]
# Now turn the provided spectral type into a number.
SpT_number = float(SpT[0].replace("F","4").replace("G","3").\
replace("K","2").replace("M","1") + str(9-float(SpT[1:])))
# Finally, interpolate to a temperature.
Teff = numpy.interp(SpT_number, SpT_numbers[::-1], Teff_list[::-1])
return Teff
|
psheehanREPO_NAMEpdspyPATH_START.@pdspy_extracted@pdspy-master@pdspy@stars@Teff_from_SpT.py@.PATH_END.py
|
{
"filename": "test_e2e.py",
"repo_name": "LCOGT/banzai-nres",
"repo_path": "banzai-nres_extracted/banzai-nres-main/banzai_nres/tests/test_e2e.py",
"type": "Python"
}
|
import pytest
from banzai.tests.utils import FakeResponse, get_min_and_max_dates
from banzai_nres import settings
from banzai.logs import get_logger
import os
import mock
import numpy as np
from banzai.utils import file_utils
import time
from glob import glob
import pkg_resources
from banzai.celery import app, schedule_calibration_stacking
from banzai.dbs import get_session
from banzai import dbs
from types import ModuleType
from datetime import datetime
from dateutil.parser import parse
from astropy.io import fits
from astropy.table import Table
from banzai.utils import fits_utils
import banzai_nres.dbs
import json
logger = get_logger()
TEST_PACKAGE = 'banzai_nres.tests'
DATA_ROOT = os.path.join(os.sep, 'archive', 'engineering')
SITES = [os.path.basename(site_path) for site_path in glob(os.path.join(DATA_ROOT, '???'))]
INSTRUMENTS = [os.path.join(site, os.path.basename(instrument_path)) for site in SITES
for instrument_path in glob(os.path.join(os.path.join(DATA_ROOT, site, '*')))]
DAYS_OBS = [os.path.join(instrument, os.path.basename(dayobs_path)) for instrument in INSTRUMENTS
for dayobs_path in glob(os.path.join(DATA_ROOT, instrument, '201*'))]
TEST_PACKAGE = 'banzai_nres.tests'
CONFIGDB_FILENAME = pkg_resources.resource_filename('banzai_nres.tests', 'data/configdb_example.json')
PHOENIX_FILENAME = pkg_resources.resource_filename('banzai_nres.tests', 'data/phoenix.json')
def observation_portal_side_effect(*args, **kwargs):
site = kwargs['params']['site']
start = datetime.strftime(parse(kwargs['params']['start_after']).replace(tzinfo=None).date(), '%Y%m%d')
filename = 'test_obs_portal_response_{site}_{start}.json'.format(site=site, start=start)
filename = pkg_resources.resource_filename('banzai_nres.tests', f'data/{filename}')
return FakeResponse(filename)
def get_instrument_ids(db_address, names):
with get_session(db_address) as db_session:
instruments = []
for name in names:
criteria = dbs.Instrument.name == name
instruments.extend(db_session.query(dbs.Instrument).filter(criteria).all())
return [instrument.id for instrument in instruments]
def celery_join():
celery_inspector = app.control.inspect()
log_counter = 0
while True:
time.sleep(1)
log_counter += 1
if log_counter % 5 == 0:
logger.info('Processing: ' + '. ' * (log_counter // 5))
queues = [celery_inspector.active(), celery_inspector.scheduled(), celery_inspector.reserved()]
if any([queue is None or 'celery@banzai-celery-worker' not in queue for queue in queues]):
logger.warning('No valid celery queues were detected, retrying...', extra_tags={'queues': queues})
# Reset the celery connection
celery_inspector = app.control.inspect()
continue
if all([len(queue['celery@banzai-celery-worker']) == 0 for queue in queues]):
break
def run_reduce_individual_frames(raw_filenames):
logger.info('Reducing individual frames for filenames: {filenames}'.format(filenames=raw_filenames))
for day_obs in DAYS_OBS:
raw_path = os.path.join(DATA_ROOT, day_obs, 'raw')
for filename in glob(os.path.join(raw_path, raw_filenames)):
file_utils.post_to_archive_queue(filename, os.getenv('FITS_BROKER'),
exchange_name=os.getenv('FITS_EXCHANGE'))
celery_join()
logger.info('Finished reducing individual frames for filenames: {filenames}'.format(filenames=raw_filenames))
def stack_calibrations(frame_type):
logger.info('Stacking calibrations for frame type: {frame_type}'.format(frame_type=frame_type))
logger.info('Stacking calibrations for frame type: {frame_type}'.format(frame_type=frame_type))
for day_obs in DAYS_OBS:
site, camera, dayobs = day_obs.split('/')
timezone = dbs.get_timezone(site, db_address=os.environ['DB_ADDRESS'])
min_date, max_date = get_min_and_max_dates(timezone, dayobs=dayobs)
runtime_context = dict(processed_path=DATA_ROOT, log_level='debug', post_to_archive=False,
post_to_opensearch=False, fpack=True, reduction_level=92,
db_address=os.environ['DB_ADDRESS'], opensearch_qc_index='banzai_qc',
opensearch_url='https://opensearch.lco.global',
no_bpm=False, ignore_schedulability=True, use_only_older_calibrations=False,
preview_mode=False, max_tries=5, broker_url=os.getenv('FITS_BROKER'),
no_file_cache=False)
for setting in dir(settings):
if '__' != setting[:2] and not isinstance(getattr(settings, setting), ModuleType):
runtime_context[setting] = getattr(settings, setting)
schedule_calibration_stacking(site, runtime_context, min_date=min_date, max_date=max_date,
frame_types=[frame_type])
celery_join()
logger.info('Finished stacking calibrations for frame type: {frame_type}'.format(frame_type=frame_type))
def mark_frames_as_good(raw_filenames):
logger.info('Marking frames as good for filenames: {filenames}'.format(filenames=raw_filenames))
for day_obs in DAYS_OBS:
for filename in glob(os.path.join(DATA_ROOT, day_obs, 'processed', raw_filenames)):
dbs.mark_frame(os.path.basename(filename), "good", db_address=os.environ['DB_ADDRESS'])
logger.info('Finished marking frames as good for filenames: {filenames}'.format(filenames=raw_filenames))
def get_expected_number_of_calibrations(raw_filenames, calibration_type):
number_of_stacks_that_should_have_been_created = 0
for day_obs in DAYS_OBS:
raw_filenames_for_this_dayobs = glob(os.path.join(DATA_ROOT, day_obs, 'raw', raw_filenames))
if calibration_type.lower() == 'lampflat' or calibration_type.lower() == 'double':
# Group by fibers lit if we are stacking lampflats or doubles (arc frames)
observed_fibers = []
for raw_filename in raw_filenames_for_this_dayobs:
cal_hdu = fits.open(raw_filename)
observed_fibers.append(cal_hdu[1].header.get('OBJECTS'))
cal_hdu.close()
observed_fibers = set(observed_fibers)
number_of_stacks_that_should_have_been_created += len(observed_fibers)
else:
# Just one calibration per night
if len(raw_filenames_for_this_dayobs) > 0:
number_of_stacks_that_should_have_been_created += 1
return number_of_stacks_that_should_have_been_created
def check_if_individual_frames_exist(filenames):
for day_obs in DAYS_OBS:
raw_files = glob(os.path.join(DATA_ROOT, day_obs, 'raw', filenames))
processed_files = glob(os.path.join(DATA_ROOT, day_obs, 'processed', filenames.replace('00', '92')))
assert len(raw_files) == len(processed_files)
def run_check_if_stacked_calibrations_were_created(raw_filenames, calibration_type):
created_stacked_calibrations = []
number_of_stacks_that_should_have_been_created = get_expected_number_of_calibrations(raw_filenames,
calibration_type)
for day_obs in DAYS_OBS:
created_stacked_calibrations += glob(os.path.join(DATA_ROOT, day_obs, 'processed',
'*' + calibration_type.lower() + '*.fits*'))
assert number_of_stacks_that_should_have_been_created > 0
assert len(created_stacked_calibrations) == number_of_stacks_that_should_have_been_created
def run_check_if_stacked_calibrations_have_extensions(calibration_type, extensions_to_check):
created_stacked_calibrations = []
for day_obs in DAYS_OBS:
created_stacked_calibrations += glob(os.path.join(DATA_ROOT, day_obs, 'processed',
'*' + calibration_type.lower() + '*.fits*'))
for cal in created_stacked_calibrations:
hdulist = fits.open(cal)
extnames = [hdulist[i].header.get('extname', None) for i in range(len(hdulist))]
for ext in extensions_to_check:
logger.info(f'checking if {ext} is in the saved extensions of {cal}')
assert ext in extnames
def check_extracted_spectra(raw_filename, spec_extname, columns):
created_images = []
for day_obs in DAYS_OBS:
created_images += glob(os.path.join(DATA_ROOT, day_obs, 'processed', raw_filename))
for filename in created_images:
with fits.open(filename) as f:
hdu = fits_utils.unpack(f)
spectrum = Table(hdu[spec_extname].data)
for colname in columns:
assert colname in spectrum.colnames
assert not np.allclose(spectrum[colname], 0)
assert 'RV' in hdu[0].header
def run_check_if_stacked_calibrations_are_in_db(raw_filenames, calibration_type):
number_of_stacks_that_should_have_been_created = get_expected_number_of_calibrations(raw_filenames,
calibration_type)
with dbs.get_session(os.environ['DB_ADDRESS']) as db_session:
calibrations_in_db = db_session.query(dbs.CalibrationImage).filter(
dbs.CalibrationImage.type == calibration_type)
calibrations_in_db = calibrations_in_db.filter(dbs.CalibrationImage.is_master).all()
assert number_of_stacks_that_should_have_been_created > 0
assert len(calibrations_in_db) == number_of_stacks_that_should_have_been_created
def mock_phoenix_models_in_db(db_address):
with open(PHOENIX_FILENAME) as f:
phoenix_data = json.load(f)
with dbs.get_session(db_address) as db_session:
db_session.bulk_insert_mappings(banzai_nres.dbs.PhoenixModel, phoenix_data)
dbs.add_or_update_record(db_session, banzai_nres.dbs.ResourceFile, {'key': 'phoenix_wavelengths'},
{'filename': 'phoenix_wavelength.fits',
'location': 's3://banzai-nres-phoenix-models-lco-global',
'key': 'phoenix_wavelengths'})
@pytest.mark.e2e
@pytest.fixture(scope='module')
@mock.patch('banzai.dbs.requests.get', return_value=FakeResponse(CONFIGDB_FILENAME))
def init(configdb):
os.system(f'banzai_nres_create_db --db-address={os.environ["DB_ADDRESS"]}')
dbs.populate_instrument_tables(db_address=os.environ["DB_ADDRESS"], configdb_address='http://fakeconfigdb')
os.system((f'banzai_add_site --site elp --latitude 30.67986944 --longitude -104.015175'
f' --elevation 2027 --timezone -6 --db-address={os.environ["DB_ADDRESS"]}'))
os.system((f'banzai_add_site --site lsc --latitude -30.1673833333 --longitude -70.8047888889'
f' --elevation 2198 --timezone -4 --db-address={os.environ["DB_ADDRESS"]}'))
os.system((f'banzai_add_instrument --site lsc --camera fl09 --name nres01'
f' --instrument-type 1m0-NRES-SciCam --db-address={os.environ["DB_ADDRESS"]}'))
os.system((f'banzai_add_instrument --site elp --camera fl17 --name nres02'
f' --instrument-type 1m0-NRES-SciCam --db-address={os.environ["DB_ADDRESS"]}'))
mock_phoenix_models_in_db(os.environ["DB_ADDRESS"])
for instrument in INSTRUMENTS:
for bpm_filename in glob(os.path.join(DATA_ROOT, instrument, 'bpm/*bpm*')):
logger.info(f'adding bpm {bpm_filename} to the database')
os.system(f'banzai_nres_add_bpm --filename {bpm_filename} --db-address={os.environ["DB_ADDRESS"]}')
@pytest.mark.e2e
@pytest.mark.master_bias
class TestMasterBiasCreation:
@pytest.fixture(autouse=True)
@mock.patch('banzai.utils.observation_utils.requests.get', side_effect=observation_portal_side_effect)
def stack_bias_frames(self, mock_lake, init):
run_reduce_individual_frames('*b00.fits*')
mark_frames_as_good('*b92.fits*')
stack_calibrations('bias')
def test_if_stacked_bias_frame_was_created(self):
check_if_individual_frames_exist('*b00*')
run_check_if_stacked_calibrations_were_created('*b00.fits*', 'bias')
run_check_if_stacked_calibrations_are_in_db('*b00.fits*', 'BIAS')
@pytest.mark.e2e
@pytest.mark.master_dark
class TestMasterDarkCreation:
@pytest.fixture(autouse=True)
@mock.patch('banzai.utils.observation_utils.requests.get', side_effect=observation_portal_side_effect)
def stack_dark_frames(self, mock_lake):
run_reduce_individual_frames('*d00.fits*')
mark_frames_as_good('*d92.fits*')
stack_calibrations('dark')
def test_if_stacked_dark_frame_was_created(self):
check_if_individual_frames_exist('*d00*')
run_check_if_stacked_calibrations_were_created('*d00.fits*', 'dark')
run_check_if_stacked_calibrations_are_in_db('*d00.fits*', 'DARK')
@pytest.mark.e2e
@pytest.mark.master_flat
class TestMasterFlatCreation:
@pytest.fixture(autouse=True)
@mock.patch('banzai.utils.observation_utils.requests.get', side_effect=observation_portal_side_effect)
def stack_flat_frames(self, mock_lake):
run_reduce_individual_frames('*w00.fits*')
mark_frames_as_good('*w92.fits*')
stack_calibrations('lampflat')
def test_if_stacked_flat_frame_was_created(self):
check_if_individual_frames_exist('*w00*')
run_check_if_stacked_calibrations_were_created('*w00.fits*', 'lampflat')
run_check_if_stacked_calibrations_have_extensions('lampflat', ['TRACES', 'PROFILE', 'BLAZE'])
run_check_if_stacked_calibrations_are_in_db('*w00.fits*', 'LAMPFLAT')
@pytest.mark.e2e
@pytest.mark.master_arc
class TestMasterArcCreation:
@pytest.fixture(autouse=True)
@mock.patch('banzai.utils.observation_utils.requests.get', side_effect=observation_portal_side_effect)
def stack_arc_frames(self, mock_lake):
run_reduce_individual_frames('*a00.fits*')
mark_frames_as_good('*a92.fits*')
stack_calibrations('double')
def test_if_stacked_arc_frame_was_created(self):
check_if_individual_frames_exist('*a00*')
run_check_if_stacked_calibrations_were_created('*a00.fits*', 'double')
run_check_if_stacked_calibrations_have_extensions('double', ['WAVELENGTH', 'FEATURES'])
run_check_if_stacked_calibrations_are_in_db('*a00.fits*', 'DOUBLE')
def test_quality_of_wavelength_calibration(self, calibration_type='double', primaryextension=1):
created_stacked_calibrations = []
for day_obs in DAYS_OBS:
created_stacked_calibrations += glob(os.path.join(DATA_ROOT, day_obs, 'processed',
'*' + calibration_type.lower() + '*.fits*'))
for cal in created_stacked_calibrations:
hdulist = fits.open(cal)
quality_metrics = hdulist[primaryextension].header
assert quality_metrics['RVPRECSN'] < 10
assert quality_metrics['RVPRECSN'] > 1
@pytest.mark.e2e
@pytest.mark.science_frames
class TestScienceFrameProcessing:
@pytest.fixture(autouse=True)
# Note this requires the GAIA and SIMBAD services to be up. It's a little scary to depend on outside data source
# for our tests. To mock this, we would have to write a control command and use broadcast() to get it to the workers
# See https://stackoverflow.com/questions/30450468/mocking-out-a-call-within-a-celery-task
def process_frames(self):
run_reduce_individual_frames('*e00.fits*')
def test_if_science_frames_were_created(self):
for day_obs in DAYS_OBS:
raw_files = glob(os.path.join(DATA_ROOT, day_obs, 'raw', '*e00*'))
processed_1d_files = glob(os.path.join(DATA_ROOT, day_obs, 'processed', '*e92-1d*'))
processed_2d_files = glob(os.path.join(DATA_ROOT, day_obs, 'processed', '*e92-2d*'))
summary_files = glob(os.path.join(DATA_ROOT, day_obs, 'processed', '*.pdf'))
assert len(raw_files) == len(processed_1d_files)
assert len(raw_files) == len(processed_2d_files)
assert len(raw_files) == len(summary_files)
check_extracted_spectra('*e92-1d.fits*', 'SPECTRUM', ['wavelength', 'flux', 'uncertainty'])
|
LCOGTREPO_NAMEbanzai-nresPATH_START.@banzai-nres_extracted@banzai-nres-main@banzai_nres@tests@test_e2e.py@.PATH_END.py
|
{
"filename": "dataset.py",
"repo_name": "kboone/avocado",
"repo_path": "avocado_extracted/avocado-master/avocado/dataset.py",
"type": "Python"
}
|
import numpy as np
import os
import pandas as pd
from tqdm import tqdm
from sklearn.model_selection import StratifiedKFold
from .astronomical_object import AstronomicalObject
from .instruments import get_band_central_wavelength
from .utils import (
AvocadoException,
write_dataframe,
read_dataframes,
read_dataframe,
read_dataframes_query,
)
from .settings import settings
class Dataset:
"""A dataset of many astronomical objects.
Parameters
----------
name : str
Name of the dataset. This will be used to determine the filenames of
various outputs such as computed features and predictions.
metadata : pandas.DataFrame
DataFrame where each row is the metadata for an object in the dataset.
See :class:`AstronomicalObject` for details.
observations : pandas.DataFrame
Observations of all of the objects' light curves. See
:class:`AstronomicalObject` for details.
objects : list
A list of :class:`AstronomicalObject` instances. Either this or
observations can be specified but not both.
chunk : int (optional)
If the dataset was loaded in chunks, this indicates the chunk number.
num_chunks : int (optional)
If the dataset was loaded in chunks, this is the total number of chunks
used.
"""
def __init__(
self,
name,
metadata,
observations=None,
objects=None,
chunk=None,
num_chunks=None,
object_class=AstronomicalObject,
):
"""Create a new Dataset from a set of metadata and observations"""
# Make copies of everything so that we don't mess anything up.
metadata = metadata.copy()
if observations is not None:
observations = observations.copy()
self.name = name
self.metadata = metadata
self.chunk = chunk
self.num_chunks = num_chunks
self.object_class = object_class
self.predictions = None
self.classifier = None
if observations is None:
if objects is not None:
# Objects passed in directly
self.objects = np.asarray(objects)
else:
# Metadata only
self.objects = None
else:
# Load each astronomical object in the dataset.
self.objects = np.zeros(len(self.metadata), dtype=object)
self.objects[:] = None
meta_dicts = self.metadata.to_dict("records")
for object_id, object_observations in observations.groupby("object_id"):
meta_index = self.metadata.index.get_loc(object_id)
# Make sure that every object_id only appears once in the
# metadata. OTherwise we have a corrupted dataset that we can't
# handle.
if type(meta_index) != int:
raise AvocadoException(
"Error: found multiple metadata entries for "
"object_id=%s! Can't handle." % object_id
)
object_metadata = meta_dicts[meta_index]
object_metadata["object_id"] = object_id
new_object = self.object_class(object_metadata, object_observations)
self.objects[meta_index] = new_object
# Other variables that will be populated by various methods.
self.raw_features = None
self.features = None
self.models = None
def __getitem__(self, key):
"""Return a dataset containing a subset of the objects.
key is the index to .objects to use.
"""
new_name = f"{self.name}_subset"
if self.objects is not None:
new_objs = self.objects[key]
return Dataset.from_objects(new_name, new_objs)
else:
# Metadata only
new_meta = self.metadata[key]
return Dataset(new_name, new_meta)
def __add__(self, dataset):
new_objs = np.concatenate([self.objects, dataset.objects])
new_name = f"{self.name}_cat_{dataset.name}"
return Dataset.from_objects(new_name, new_objs)
def __len__(self):
return len(self.metadata)
@property
def path(self):
"""Return the path to where this dataset should lie on disk"""
data_directory = settings["data_directory"]
data_path = os.path.join(data_directory, self.name + ".h5")
return data_path
def get_raw_features_path(self, tag=None):
"""Return the path to where the raw features for this dataset should
lie on disk
Parameters
----------
tag : str (optional)
The version of the raw features to use. By default, this will use
settings['features_tag'].
"""
if tag is None:
tag = settings["features_tag"]
features_directory = settings["features_directory"]
features_filename = "%s_%s.h5" % (tag, self.name)
features_path = os.path.join(features_directory, features_filename)
return features_path
def get_models_path(self, tag=None):
"""Return the path to where the models for this dataset should lie on
disk
Parameters
----------
tag : str (optional)
The version of the features/model to use. By default, this will use
settings['features_tag'].
"""
if tag is None:
tag = settings["features_tag"]
features_directory = settings["features_directory"]
models_filename = "models_%s_%s.h5" % (tag, self.name)
models_path = os.path.join(features_directory, models_filename)
return models_path
def get_predictions_path(self, classifier=None):
"""Return the path to where the predicitons for this dataset for a
given classifier should lie on disk.
Parameters
----------
classifier : str or :class:`Classifier` (optional)
The classifier to load predictions from. This can be either an
instance of a :class:`Classifier`, or the name of a classifier. By
default, the stored classifier is used.
"""
if classifier is None:
classifier = self.classifier
if isinstance(classifier, str):
classifier_name = classifier
else:
classifier_name = classifier.name
filename = "predictions_%s_%s.h5" % (self.name, classifier_name)
predictions_path = os.path.join(settings["predictions_directory"], filename)
return predictions_path
@classmethod
def load(cls, name, metadata_only=False, chunk=None, num_chunks=None,
object_class=AstronomicalObject, **kwargs):
"""Load a dataset that has been saved in HDF5 format in the data
directory.
For an example of how to create such a dataset, see
`scripts/download_plasticc.py`.
The dataset can optionally be loaded in chunks. To do this, pass chunk
and num_chunks to this method. See `read_dataframes` for details.
Parameters
----------
name : str
The name of the dataset to load
metadata_only : bool (optional)
If False (default), the observations are loaded. Otherwise, only
the metadata is loaded. This is useful for very large datasets.
chunk : int (optional)
If set, load the dataset in chunks. chunk specifies the chunk
number to load. This is a zero-based index.
num_chunks : int (optional)
The total number of chunks to use.
**kwargs
Additional arguments to `read_dataframes`
Returns
-------
dataset : :class:`Dataset`
The loaded dataset.
"""
data_directory = settings["data_directory"]
data_path = os.path.join(data_directory, name + ".h5")
if not os.path.exists(data_path):
raise AvocadoException("Couldn't find dataset %s!" % name)
if metadata_only:
keys = ["metadata"]
else:
keys = ["metadata", "observations"]
dataframes = read_dataframes(
data_path, keys, chunk=chunk, num_chunks=num_chunks, **kwargs
)
# Create a Dataset object
dataset = cls(name, *dataframes, chunk=chunk, num_chunks=num_chunks,
object_class=object_class)
return dataset
@classmethod
def from_objects(cls, name, objects, **kwargs):
"""Load a dataset from a list of AstronomicalObject instances.
Parameters
----------
objects : list
A list of AstronomicalObject instances.
name : str
The name of the dataset.
**kwargs
Additional arguments to pass to Dataset()
Returns
-------
dataset : :class:`Dataset`
The loaded dataset.
"""
# Pull the metadata out of the objects
metadata = pd.DataFrame([i.metadata for i in objects])
metadata.set_index("object_id", inplace=True)
# Load the new dataset.
dataset = cls(name, metadata, objects=objects, **kwargs)
return dataset
def label_folds(self, num_folds=None, random_state=None):
"""Separate the dataset into groups for k-folding
This is only applicable to training datasets that have assigned
classes.
If the dataset is an augmented dataset, we ensure that the
augmentations of the same object stay in the same fold.
Parameters
----------
num_folds : int (optional)
The number of folds to use. Default: settings['num_folds']
random_state : int (optional)
The random number initializer to use for splitting the folds.
Default: settings['fold_random_state'].
Returns
-------
fold_indices : `pandas.Series`
A pandas Series where each element is an integer representing the
assigned fold for each object.
"""
if num_folds is None:
num_folds = settings["num_folds"]
if random_state is None:
random_state = settings["fold_random_state"]
if "class" not in self.metadata:
raise AvocadoException(
"Dataset %s does not have labeled classes! Can't separate "
"into folds." % self.name
)
if "reference_object_id" in self.metadata:
# We are operating on an augmented dataset. Use original objects to
# determine the folds.
is_augmented = True
reference_mask = self.metadata["reference_object_id"].isna()
reference_metadata = self.metadata[reference_mask]
else:
is_augmented = False
reference_metadata = self.metadata
reference_classes = reference_metadata["class"]
folds = StratifiedKFold(
n_splits=num_folds, shuffle=True, random_state=random_state
)
fold_map = {}
for fold_number, (fold_train, fold_val) in enumerate(
folds.split(reference_classes, reference_classes)
):
for object_id in reference_metadata.index[fold_val]:
fold_map[object_id] = fold_number
if is_augmented:
fold_indices = self.metadata["reference_object_id"].map(fold_map)
fold_indices[reference_mask] = self.metadata.index.to_series().map(fold_map)
else:
fold_indices = self.metadata.index.to_series().map(fold_map)
fold_indices = fold_indices.astype(int)
return fold_indices
def get_object(self, index=None, object_class=None, object_id=None):
"""Parse keywords to pull a specific object out of the dataset
Parameters
==========
index : int (optional)
The index of the object in the dataset in the range
[0, num_objects-1]. If a specific object_class is specified, then
the index only counts objects of that class.
object_class : int or str (optional)
Filter for objects of a specific class. If this is specified, then
index must also be specified.
object_id : str (optional)
Retrieve an object with this specific object_id. If index or
object_class is specified, then object_id cannot also be specified.
Returns
=======
astronomical_object : AstronomicalObject
The object that was retrieved.
"""
# Check to make sure that we have a valid object specification.
base_error = "Error finding object! "
if object_id is not None:
if index is not None or object_class is not None:
raise AvocadoException(
base_error + "If object_id is specified, can't also "
"specify index or object_class!"
)
if object_class is not None and index is None:
raise AvocadoException(
base_error + "Must specify index if object_class is specified!"
)
# Figure out the index to use.
if object_class is not None:
# Figure out the target object_id and use that to get the index.
class_index = index
class_meta = self.metadata[self.metadata["class"] == object_class]
object_id = class_meta.index[class_index]
if object_id is not None:
try:
index = self.metadata.index.get_loc(str(object_id))
except KeyError:
raise AvocadoException(
base_error + "No object with object_id=%s" % object_id
)
return self.objects[index]
def _get_object(self, index=None, object_class=None, object_id=None, **kwargs):
"""Wrapper around get_object that returns unused kwargs.
This function is used for the common situation of pulling an object out
of the dataset and doing additional processing on it. The arguments
used by get_object are removed from the arguments list, and the
remainder are returned. See `get_object` for details of the parameters.
Returns
=======
astronomical_object : AstronomicalObject
The object that was retrieved.
**kwargs
Additional arguments passed to the function that weren't used.
"""
return self.get_object(index, object_class, object_id), kwargs
def read_object(self, object_id, object_class=AstronomicalObject):
"""Read an object with a given object_id.
This function is designed to be used when the Dataset was initialized with
only metadata, and can be used to read the observations for a single object
without having to read the full dataset.
"""
dataframes = read_dataframes_query(self.path, ["metadata", "observations"],
object_id)
metadata = dataframes[0]
if len(metadata) != 1:
raise AvocadoException(f"No object with object_id='{object_id}'")
metadata = metadata.iloc[0].to_dict()
observations = dataframes[1]
# Pandas doesn't keep the index when you convert things to dictionaries, so add
# it back in.
metadata['object_id'] = object_id
return self.object_class(metadata, observations)
def get_bands(self):
"""Return a list of all of the bands in the dataset.
This can take a while for large datasets, and doesn't work for metadata only
datasets.
"""
bands = set()
for obj in self.objects:
bands = bands.union(obj.observations['band'])
sorted_bands = np.array(sorted(bands, key=get_band_central_wavelength))
return sorted_bands
def plot_light_curve(self, *args, **kwargs):
"""Plot the light curve for an object in the dataset.
See `get_object` for the various keywords that can be used to choose
the object. Additional keywords are passed to
`AstronomicalObject.plot()`
"""
target_object, plot_kwargs = self._get_object(*args, **kwargs)
target_object.plot_light_curve(**plot_kwargs)
def plot_interactive(self):
"""Make an interactive plot of the light curves in the dataset.
This requires the ipywidgets package to be set up, and has only been
tested in jupyter-lab.
"""
from ipywidgets import interact, IntSlider, Dropdown
object_classes = {"": None}
for object_class in np.unique(self.metadata["class"]):
object_classes[object_class] = object_class
idx_widget = IntSlider(min=0, max=1)
class_widget = Dropdown(options=object_classes, index=0)
def update_idx_range(*args):
if class_widget.value is None:
idx_widget.max = len(self.metadata) - 1
else:
idx_widget.max = (
np.sum(self.metadata["class"] == class_widget.value) - 1
)
class_widget.observe(update_idx_range, "value")
update_idx_range()
interact(
self.plot_light_curve,
index=idx_widget,
object_class=class_widget,
show_gp=True,
uncertainties=True,
verbose=False,
subtract_background=True,
)
def write(self, overwrite=False, **kwargs):
"""Write the dataset out to disk.
The dataset will be stored in the data directory using the dataset's
name.
Parameters
----------
**kwargs
Additional arguments to be passed to `utils.write_dataframe`
"""
# Pull out the observations from every object
observations = []
for obj in self.objects:
object_observations = obj.observations
object_observations["object_id"] = obj.metadata["object_id"]
observations.append(object_observations)
observations = pd.concat(observations, ignore_index=True, sort=False)
write_dataframe(
self.path,
self.metadata,
"metadata",
chunk=self.chunk,
num_chunks=self.num_chunks,
overwrite=overwrite,
**kwargs
)
write_dataframe(
self.path,
observations,
"observations",
index_chunk_column=False,
chunk=self.chunk,
num_chunks=self.num_chunks,
append=True,
**kwargs
)
def extract_raw_features(self, featurizer, keep_models=False):
"""Extract raw features from the dataset.
The raw features are saved as `self.raw_features`.
Parameters
----------
featurizer : :class:`Featurizer`
The featurizer that will be used to calculate the features.
keep_models : bool
If true, the models used for the features are kept and stored as
Dataset.models. Note that not all featurizers support this.
Returns
-------
raw_features : pandas.DataFrame
The extracted raw features.
"""
list_raw_features = []
object_ids = []
models = {}
for obj in tqdm(self.objects, desc="Object", dynamic_ncols=True):
obj_features = featurizer.extract_raw_features(
obj, return_model=keep_models
)
if keep_models:
obj_features, model = obj_features
models[obj.metadata["object_id"]] = model
list_raw_features.append(obj_features.values())
object_ids.append(obj.metadata["object_id"])
# Pull the keys off of the last extraction. They should be the same for
# every set of features.
keys = obj_features.keys()
raw_features = pd.DataFrame(list_raw_features, index=object_ids, columns=keys)
raw_features.index.name = "object_id"
self.raw_features = raw_features
if keep_models:
self.models = models
return raw_features
def select_features(self, featurizer):
"""Select features from the dataset for classification.
This method assumes that the raw features have already been extracted
for this dataset and are available with `self.raw_features`. Use
`extract_raw_features` to calculate these from the data directly, or
`load_features` to recover features that were previously stored on
disk.
The features are saved as `self.features`.
Parameters
----------
featurizer : :class:`Featurizer`
The featurizer that will be used to select the features.
Returns
-------
features : pandas.DataFrame
The selected features.
"""
if self.raw_features is None:
raise AvocadoException(
"Must calculate raw features before selecting features!"
)
features = featurizer.select_features(self.raw_features)
self.features = features
return features
def write_raw_features(self, tag=None, **kwargs):
"""Write the raw features out to disk.
The features will be stored in the features directory using the
dataset's name and the given features tag.
Parameters
----------
tag : str (optional)
The tag for this version of the features. By default, this will use
settings['features_tag'].
**kwargs
Additional arguments to be passed to `utils.write_dataframe`
"""
raw_features_path = self.get_raw_features_path(tag=tag)
write_dataframe(
raw_features_path,
self.raw_features,
"raw_features",
chunk=self.chunk,
num_chunks=self.num_chunks,
**kwargs
)
def load_raw_features(self, tag=None, **kwargs):
"""Load the raw features from disk.
Parameters
----------
tag : str (optional)
The version of the raw features to use. By default, this will use
settings['features_tag'].
Returns
-------
raw_features : pandas.DataFrame
The extracted raw features.
"""
raw_features_path = self.get_raw_features_path(tag=tag)
self.raw_features = read_dataframe(
raw_features_path,
"raw_features",
chunk=self.chunk,
num_chunks=self.num_chunks,
**kwargs
)
return self.raw_features
def predict(self, classifier):
"""Generate predictions using a classifier.
Parameters
----------
classifier : :class:`Classifier`
The classifier to use.
Returns
-------
predictions : :class:`pandas.DataFrame`
A pandas Series with the predictions for each class.
"""
self.predictions = classifier.predict(self)
self.classifier = classifier
return self.predictions
def write_predictions(self, classifier=None, **kwargs):
"""Write predictions for this classifier to disk.
The predictions will be stored in the predictions directory using both
the dataset and classifier's names.
Parameters
----------
classifier : str or :class:`Classifier` (optional)
The classifier to load predictions from. This can be either an
instance of a :class:`Classifier`, or the name of a classifier. By
default, the stored classifier is used.
**kwargs
Additional arguments to be passed to `utils.write_dataframe`
"""
predictions_path = self.get_predictions_path(classifier)
write_dataframe(
predictions_path,
self.predictions,
"predictions",
chunk=self.chunk,
num_chunks=self.num_chunks,
**kwargs
)
def load_predictions(self, classifier=None, **kwargs):
"""Load the predictions for a classifier from disk.
Parameters
----------
classifier : str or :class:`Classifier` (optional)
The classifier to load predictions from. This can be either an
instance of a :class:`Classifier`, or the name of a classifier. By
default, the stored classifier is used.
Returns
-------
predictions : :class:`pandas.DataFrame`
A pandas Series with the predictions for each class.
"""
predictions_path = self.get_predictions_path(classifier)
self.predictions = read_dataframe(
predictions_path,
"predictions",
chunk=self.chunk,
num_chunks=self.num_chunks,
**kwargs
)
self.predictions.sort_index(inplace=True)
return self.predictions
def write_models(self, tag=None):
"""Write the models of the light curves to disk.
The models will be stored in the features directory using the dataset's
name and the given features tag. Note that for now the models are
stored as individual tables in the HDF5 file because there doesn't
appear to be a good way to store fixed length arrays in pandas.
WARNING: This is not the best way to implement this, and there are
definitely much better ways. This also isn't thread-safe at all.
Parameters
----------
tag : str (optional)
The tag for this version of the features. By default, this will use
settings['features_tag'].
"""
models_path = self.get_models_path(tag=tag)
store = pd.HDFStore(models_path, "a")
for model_name, model in self.models.items():
model.to_hdf(store, model_name, mode="a")
store.close()
|
kbooneREPO_NAMEavocadoPATH_START.@avocado_extracted@avocado-master@avocado@dataset.py@.PATH_END.py
|
{
"filename": "slider_params.py",
"repo_name": "HinLeung622/pipes_vis",
"repo_path": "pipes_vis_extracted/pipes_vis-main/pipes_vis/slider_params.py",
"type": "Python"
}
|
from . import utils
# build a library of standard slider labels and extremes, side sorting and priority
sfh_priorities = {
'burst':0,
'constant':10,
'exponential':20,
'delayed':30,
'lognormal':40,
'dblplaw':50,
'psb_wild2020':60,
'psb_twin':70
}
# commonly used among all SFHs
massformed_dict = {
'label':r'$\log_{10}M_*/M_\odot$',
'lims':[8.5,12],
'side':'left',
'priority':1
}
# library for all possible metallicity parameters, added to each possible SFH component
metallicity_lib = {
# constant metallicity
'metallicity':{
'label':r'$Z_*/Z_\odot$',
'lims':[0.0,2.0],
'side':'left',
'priority':2.0
},
# psb_two_step and two_step
'metallicity_old':{
'label':r'$Z_{old}/Z_\odot$',
'lims':[0.0,2.0],
'side':'left',
'priority':2.1
},
# psb_two_step, and psb_linear_step
'metallicity_burst':{
'label':r'$Z_{burst}/Z_\odot$',
'lims':[0.0,2.0],
'side':'left',
'priority':2.2
},
# two_step
'metallicity_new':{
'label':r'$Z_{new}/Z_\odot$',
'lims':[0.0,2.0],
'side':'left',
'priority':2.3
},
# psb_linear_step
'metallicity_slope':{
'label':r'$Z_*/Z_\odot \; Gyr^{-1}$',
'lims':[-1.0,1.0],
'side':'left',
'priority':2.4
},
# psb_linear_step
'metallicity_zero':{
'label':r'$Z_0/Z_\odot$',
'lims':[0.0,5.0],
'side':'left',
'priority':2.5
},
# two_step
'metallicity_tstep':{
'label':r'$t_{Zstep}$ (Gyr)',
'lims':[0.0,utils.cosmo.age(0).value],
'side':'left',
'priority':2.6
}
}
slider_lib = {
# various sfh parameters
# =================== burst ===================
'burst:tform':{
'label':r'$t_{burst}$ (Gyr)',
'lims':[0,utils.cosmo.age(0).value],
'side':'left',
'priority':3
},
# =================== constant ==================
'constant:tform':{
'label':r'$t_{form}$ (Gyr)',
'lims':[0,utils.cosmo.age(0).value],
'side':'left',
'priority':3
},
'constant:tend':{
'label':r'$t_{end}$ (Gyr)',
'lims':[0,utils.cosmo.age(0).value],
'side':'left',
'priority':4
},
# =================== exponential ====================
'exponential:tform':{
'label':r'$t_{form}$ (Gyr)',
'lims':[0,utils.cosmo.age(0).value-1],
'side':'left',
'priority':3
},
'exponential:tau':{
'label':r'$\tau$ (Gyr)',
'lims':[0.1,10],
'side':'left',
'priority':4
},
# =================== delayed ====================
'delayed:tform':{
'label':r'$t_{form}$ (Gyr)',
'lims':[0,utils.cosmo.age(0).value-1],
'side':'left',
'priority':3
},
'delayed:tau':{
'label':r'$\tau$ (Gyr)',
'lims':[0.1,10],
'side':'left',
'priority':4
},
# =================== lognormal ====================
'lognormal:tmax':{
'label':r'$t_{max}$ (Gyr)',
'lims':[0,utils.cosmo.age(0).value],
'side':'left',
'priority':3
},
'lognormal:fwhm':{
'label':'FWHM (Gyr)',
'lims':[0.1,10],
'side':'left',
'priority':4
},
# =================== dblplaw ====================
'dblplaw:tau':{
'label':r'$\tau\;/\;t_{max}$ (Gyr)',
'lims':[0,utils.cosmo.age(0).value],
'side':'left',
'priority':3
},
'dblplaw:alpha':{
'label':r'$\alpha$',
'lims':[0.01,500],
'side':'left',
'priority':4
},
'dblplaw:beta':{
'label':r'$\beta$',
'lims':[0.01,500],
'side':'left',
'priority':5
},
# =================== psb_wild2020 ====================
'psb_wild2020:told':{
'label':r'$t_{old}$ (Gyr)',
'lims':[0,utils.cosmo.age(0).value-2],
'side':'left',
'priority':3
},
'psb_wild2020:tau':{
'label':r'$\tau$ (Gyr)',
'lims':[0.1,10],
'side':'left',
'priority':4
},
'psb_wild2020:tburst':{
'label':r'$t_{burst}$ (Gyr)',
'lims':[utils.cosmo.age(0).value-5,utils.cosmo.age(0).value-0.3],
'side':'left',
'priority':5
},
'psb_wild2020:alpha':{
'label':r'$\alpha$',
'lims':[0.01,500],
'side':'left',
'priority':6
},
'psb_wild2020:beta':{
'label':r'$\beta$',
'lims':[0.01,500],
'side':'left',
'priority':7
},
'psb_wild2020:fburst':{
'label':r'$f_{burst}$',
'lims':[0.0,1.0],
'side':'left',
'priority':8
},
# =================== psb_twin ===================
'psb_twin:told':{
'label':r'$t_{old}$ (Gyr)',
'lims':[0,utils.cosmo.age(0).value-2],
'side':'left',
'priority':3
},
'psb_twin:alpha1':{
'label':r'$\alpha_1$',
'lims':[0.01,5],
'side':'left',
'priority':4
},
'psb_twin:beta1':{
'label':r'$\beta_1$',
'lims':[0.01,500],
'side':'left',
'priority':5
},
'psb_twin:tburst':{
'label':r'$t_{burst}$ (Gyr)',
'lims':[utils.cosmo.age(0).value-5,utils.cosmo.age(0).value-0.3],
'side':'left',
'priority':6
},
'psb_twin:alpha2':{
'label':r'$\alpha_2$',
'lims':[0.01,500],
'side':'left',
'priority':7
},
'psb_twin:beta2':{
'label':r'$\beta_2$',
'lims':[0.01,500],
'side':'left',
'priority':8
},
'psb_twin:fburst':{
'label':r'$f_{burst}$',
'lims':[0.0,1.0],
'side':'left',
'priority':9
},
# =================== now right hand sides ===================
'redshift':{
'label':'redshift',
'lims':[0,2],
'side':'right',
'priority':1
},
'dust:eta':{
'label':r'$\eta_{dust}$',
'lims':[1,4],
'side':'right',
'priority':2
},
'dust:Av':{
'label':r'$A_V$',
'lims':[0,2],
'side':'right',
'priority':3
},
'dust:n':{
'label':r'$n_{CF00}$',
'lims':[0.3,2.7],
'side':'right',
'priority':4
},
'dust:delta':{
'label':r'$\delta_{Salim}$',
'lims':[-1,1],
'side':'right',
'priority':5
},
'dust:B':{
'label':r'$B_{Salim}$',
'lims':[0,5],
'side':'right',
'priority':6
},
'dust:qpah':{
'label':r'$q_{PAH}$ (%)',
'lims':[0,5],
'side':'right',
'priority':7
},
'dust:umin':{
'label':r'$U_{min}$',
'lims':[0,10],
'side':'right',
'priority':8
},
'dust:gamma':{
'label':r'$\gamma$',
'lims':[0,0.1],
'side':'right',
'priority':9
},
'nebular:logU':{
'label':r'$\log_{10}U$',
'lims':[-10,-2],
'side':'right',
'priority':10
},
't_bc':{
'label':r'$t_{bc}$ (Gyr)',
'lims':[0.001,0.1],
'side':'right',
'priority':11
},
'veldisp':{
'label':r'veldisp$\sigma$ (km/s)',
'lims':[0,400],
'side':'right',
'priority':12
}
}
for sfh_key in sfh_priorities.keys():
slider_lib[sfh_key+':massformed'] = massformed_dict.copy()
for metallicity_key in metallicity_lib:
slider_lib[sfh_key+':'+metallicity_key] = metallicity_lib[metallicity_key].copy()
for key in slider_lib.keys():
if ':' in key:
for sfh_key in sfh_priorities.keys():
if key.split(':')[0] == sfh_key:
slider_lib[key]['priority'] += sfh_priorities[sfh_key]
|
HinLeung622REPO_NAMEpipes_visPATH_START.@pipes_vis_extracted@pipes_vis-main@pipes_vis@slider_params.py@.PATH_END.py
|
{
"filename": "irregulardatagrid.py",
"repo_name": "matplotlib/matplotlib",
"repo_path": "matplotlib_extracted/matplotlib-main/galleries/examples/images_contours_and_fields/irregulardatagrid.py",
"type": "Python"
}
|
"""
=======================================
Contour plot of irregularly spaced data
=======================================
Comparison of a contour plot of irregularly spaced data interpolated
on a regular grid versus a tricontour plot for an unstructured triangular grid.
Since `~.axes.Axes.contour` and `~.axes.Axes.contourf` expect the data to live
on a regular grid, plotting a contour plot of irregularly spaced data requires
different methods. The two options are:
* Interpolate the data to a regular grid first. This can be done with on-board
means, e.g. via `~.tri.LinearTriInterpolator` or using external functionality
e.g. via `scipy.interpolate.griddata`. Then plot the interpolated data with
the usual `~.axes.Axes.contour`.
* Directly use `~.axes.Axes.tricontour` or `~.axes.Axes.tricontourf` which will
perform a triangulation internally.
This example shows both methods in action.
"""
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.tri as tri
np.random.seed(19680801)
npts = 200
ngridx = 100
ngridy = 200
x = np.random.uniform(-2, 2, npts)
y = np.random.uniform(-2, 2, npts)
z = x * np.exp(-x**2 - y**2)
fig, (ax1, ax2) = plt.subplots(nrows=2)
# -----------------------
# Interpolation on a grid
# -----------------------
# A contour plot of irregularly spaced data coordinates
# via interpolation on a grid.
# Create grid values first.
xi = np.linspace(-2.1, 2.1, ngridx)
yi = np.linspace(-2.1, 2.1, ngridy)
# Linearly interpolate the data (x, y) on a grid defined by (xi, yi).
triang = tri.Triangulation(x, y)
interpolator = tri.LinearTriInterpolator(triang, z)
Xi, Yi = np.meshgrid(xi, yi)
zi = interpolator(Xi, Yi)
# Note that scipy.interpolate provides means to interpolate data on a grid
# as well. The following would be an alternative to the four lines above:
# from scipy.interpolate import griddata
# zi = griddata((x, y), z, (xi[None, :], yi[:, None]), method='linear')
ax1.contour(xi, yi, zi, levels=14, linewidths=0.5, colors='k')
cntr1 = ax1.contourf(xi, yi, zi, levels=14, cmap="RdBu_r")
fig.colorbar(cntr1, ax=ax1)
ax1.plot(x, y, 'ko', ms=3)
ax1.set(xlim=(-2, 2), ylim=(-2, 2))
ax1.set_title('grid and contour (%d points, %d grid points)' %
(npts, ngridx * ngridy))
# ----------
# Tricontour
# ----------
# Directly supply the unordered, irregularly spaced coordinates
# to tricontour.
ax2.tricontour(x, y, z, levels=14, linewidths=0.5, colors='k')
cntr2 = ax2.tricontourf(x, y, z, levels=14, cmap="RdBu_r")
fig.colorbar(cntr2, ax=ax2)
ax2.plot(x, y, 'ko', ms=3)
ax2.set(xlim=(-2, 2), ylim=(-2, 2))
ax2.set_title('tricontour (%d points)' % npts)
plt.subplots_adjust(hspace=0.5)
plt.show()
# %%
#
# .. admonition:: References
#
# The use of the following functions, methods, classes and modules is shown
# in this example:
#
# - `matplotlib.axes.Axes.contour` / `matplotlib.pyplot.contour`
# - `matplotlib.axes.Axes.contourf` / `matplotlib.pyplot.contourf`
# - `matplotlib.axes.Axes.tricontour` / `matplotlib.pyplot.tricontour`
# - `matplotlib.axes.Axes.tricontourf` / `matplotlib.pyplot.tricontourf`
|
matplotlibREPO_NAMEmatplotlibPATH_START.@matplotlib_extracted@matplotlib-main@galleries@examples@images_contours_and_fields@irregulardatagrid.py@.PATH_END.py
|
{
"filename": "rfi_inspect_2458158.ipynb",
"repo_name": "HERA-Team/H1C_IDR3_Notebooks",
"repo_path": "H1C_IDR3_Notebooks-main/rfi_inspect/rfi_inspect_2458158.ipynb",
"type": "Jupyter Notebook"
}
|
# RFI Inspection Daily RTP Notebook
```python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import glob
import os
from astropy import units
from copy import deepcopy
from pyuvdata import UVFlag
import matplotlib.colors as colors
from matplotlib import cm
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
```
```python
# If you want to run this notebook locally, copy the output of the next cell into the first few lines of this cell.
# JD = '2459122'
# data_path = '/lustre/aoc/projects/hera/H4C/2459122'
# os.environ["JULIANDATE"] = JD
# os.environ["DATA_PATH"] = data_path
```
```python
# Use environment variables to figure out path to data
JD = os.environ['JULIANDATE']
data_path = os.environ['DATA_PATH']
print(f'JD = "{JD}"')
print(f'data_path = "{data_path}"')
JD = int(JD)
```
JD = "2458158"
data_path = "/lustre/aoc/projects/hera/H1C_IDR3/IDR3_2/2458158"
```python
uvf = UVFlag(f'{data_path}/zen.{JD}.total_threshold_and_a_priori_flags.h5')
# Load in the metadata for easier plotting.
freqs = np.unique(uvf.freq_array)
times = np.unique(uvf.time_array)
lsts = np.unique(uvf.lst_array)
chans = np.arange(freqs.size)
plot_times = times - np.floor(times[0])
lsts_hr = lsts * units.rad.to("cycle") * units.day.to("hr")
freqs_MHz = freqs * units.Hz.to("MHz")
extent = (freqs_MHz[0], freqs_MHz[-1], plot_times[-1], plot_times[0])
```
```python
plt.figure(figsize=(16,12))
cax = plt.imshow(uvf.flag_array[:,:,0], aspect='auto', interpolation='nearest',
extent=[uvf.freq_array[0] / 1e6, uvf.freq_array[-1] / 1e6,
uvf.time_array[-1] - JD, uvf.time_array[0] - JD])
plt.xlabel('Frequency (MHz)')
plt.ylabel(f'JD - {JD}')
ax2 = plt.gca().twinx()
ax2.set_ylim([uvf.lst_array[0] * 12 / np.pi, uvf.lst_array[-1] * 12 / np.pi])
ax2.invert_yaxis()
ax2.set_ylabel('LST (hours)')
ax3 = plt.gca().twiny()
ax3.set_xlim([0, uvf.Nfreqs - 1])
ax3.set_xlabel('Channel');
```
Text(0.5, 0, 'Channel')

# Figure 1(a): Full day of XRFI flags
Yellow is flagged. Blue is unflagged.
```python
xrfi_dirs = sorted(glob.glob(f'{data_path}/zen.{JD}.?????.xrfi'))
print(f'Found {len(xrfi_dirs)} directories containing XRFI intermediate data products.')
files1 = [glob.glob(f'{d}/*combined_metrics1.h5')[0] for d in xrfi_dirs]
print(f'Found {len(files1)} combined round 1 XRFI metrics files.')
files2 = [glob.glob(f'{d}/*combined_metrics2.h5')[0] for d in xrfi_dirs]
print(f'Found {len(files2)} combined round 2 XRFI metrics files.')
uvf1 = UVFlag(files1)
uvf2 = UVFlag(files2)
uvf2.metric_array = np.where(np.isinf(uvf2.metric_array), uvf1.metric_array,
uvf2.metric_array)
```
Found 73 directories containing XRFI intermediate data products.
Found 73 combined round 1 XRFI metrics files.
Found 73 combined round 2 XRFI metrics files.
```python
plt.figure(figsize=(16,12))
max_abs = 100
if np.max(uvf2.metric_array) > max_abs:
extend = 'max'
if np.min(uvf2.metric_array) < -max_abs:
extend = 'both'
elif np.min(uvf2.metric_array) < -max_abs:
extend = 'min'
else:
extend = 'neither'
plt.imshow(uvf2.metric_array[:,:,0], aspect='auto', cmap='RdBu_r',
norm=colors.SymLogNorm(linthresh=1,vmin=-max_abs, vmax=max_abs),
extent=[uvf.freq_array[0] / 1e6, uvf.freq_array[-1] / 1e6,
uvf.time_array[-1] - JD, uvf.time_array[0] - JD])
plt.colorbar(pad=.07, extend=extend,
label='RFI Detection Significance ($\sigma$s)')
plt.title('Combined XRFI Metrics')
plt.xlabel('Frequency (MHz)')
plt.ylabel(f'JD - {JD}')
ax2 = plt.gca().twinx()
ax2.set_ylim([uvf.lst_array[0] * 12 / np.pi, uvf.lst_array[-1] * 12 / np.pi])
ax2.invert_yaxis()
ax2.set_ylabel('LST (hours)')
ax3 = plt.gca().twiny()
ax3.set_xlim([0, uvf.Nfreqs - 1])
ax3.set_xlabel('Channel');
```
default base will change from np.e to 10 in 3.4. To suppress this warning specify the base keyword argument.
Text(0.5, 0, 'Channel')

## Figure 2(a): Combined XRFI Detection Significance
This figure shows round 2 XRFI metrics (mean filter outliers) combined in quadrature. When flagged in round 1 of XRFI, round 1's combined median filter metrics are used instead.
```python
# Load in the flags from each round of XRFI flagging.
low_level_flag_labels = (
"abscal_chi_sq_flags1",
"abscal_chi_sq_flags2",
"ag_flags1",
"ag_flags2",
"apriori_flags",
"auto_flags1",
"auto_flags2",
"ax_flags1",
"ax_flags2",
"combined_flags1",
"combined_flags2",
"cross_flags1",
"cross_flags2",
"flags1",
"flags2",
"og_flags1",
"og_flags2",
"omnical_chi_sq_flags1",
"omnical_chi_sq_flags2",
"ox_flags1",
"ox_flags2",
"v_flags1",
"v_flags2",
)
# Keep the thresholded flags separate for easier analysis.
thresholded_flag_labels = (
"abscal_chi_sq_renormed_threshold_flags",
"ag_threshold_flags",
"auto_threshold_flags",
"ax_threshold_flags",
"combined_threshold_flags",
"cross_threshold_flags",
"og_threshold_flags",
"omnical_chi_sq_renormed_threshold_flags",
"ox_threshold_flags",
"v_threshold_flags",
"total_threshold_and_a_priori_flags",
)
low_level_flags = {}
for file_id in low_level_flag_labels:
flag_files = []
for xrfi_dir in xrfi_dirs:
matching_files = glob.glob(os.path.join(xrfi_dir, f"*.{file_id}.h5"))
if len(matching_files) > 0:
flag_files.append(matching_files[0])
if len(flag_files) > 0:
uvf = UVFlag(flag_files)
low_level_flags[file_id] = np.squeeze(uvf.flag_array)
thresholded_flags = {}
for file_id in thresholded_flag_labels:
flag_file = f"{data_path}/zen.{JD}.{file_id}.h5"
if os.path.exists(flag_file):
uvf = UVFlag(flag_file)
thresholded_flags[file_id] = np.squeeze(uvf.flag_array)
all_flags = dict(**low_level_flags, **thresholded_flags)
```
```python
label_mapping = {
f"Round {i}": {
"Priors": ("apriori_flags", "flags1")[i-1],
"Autocorrs": f"auto_flags{i}",
"Crosscorrs": f"cross_flags{i}",
"Omnical\nVisibilities": f"v_flags{i}",
"Omnical\nGains": f"og_flags{i}",
r"Omnical $\chi^2$": f"ox_flags{i}",
"Omnical\nGlobal $\chi^2$": f"omnical_chi_sq_flags{i}",
"Abscal\nGains": f"ag_flags{i}",
r"Abscal $\chi^2$": f"ax_flags{i}",
r"Abscal\nGlobal $\chi^2$": f"abscal_chi_sq_flags{i}",
"Combined\nMetrics": f"combined_flags{i}",
} for i in (1,2)
}
label_mapping["Round 3"] = {
"Priors": "flags2",
"Autocorrs": "auto_threshold_flags",
"Crosscorrs": "cross_threshold_flags",
"Omnical\nGains": "og_threshold_flags",
r"Omnical $\chi^2$": "ox_threshold_flags",
"Omnical\nGlobal $\chi^2$": f"omnical_chi_sq_renormed_threshold_flags",
"Omnical\nVisibilities": "v_threshold_flags",
"Abscal\nGains": "ag_threshold_flags",
r"Abscal $\chi^2$": "ax_threshold_flags",
r"Abscal\nGlobal $\chi^2$": f"abscal_chi_sq_renormed_threshold_flags",
"Combined\nMetrics": "combined_threshold_flags",
'Final\nFlags': "total_threshold_and_a_priori_flags",
}
# remove labels for metrics not used
label_mapping = {rnd: {label: flags for label, flags in labels.items() if flags in all_flags}
for rnd, labels in label_mapping.items()}
```
```python
# Pick easily distinguishable colors
color_palette = (
'#000000', #black
'#ffffff', #white
'#800000', #maroon
'#808000', #olive
'#008b8b', #darkcyan
'#000080', #navy
'#ff8c00', #darkorange
'#ffff00', #yellow
'#00ff00', #lime
'#0000ff', #blue
'#ff00ff', #fuchsia
'#1e90ff', #dodgerblue
'#98fb98', #palegreen
'#ff1493', #deeppink
)
# assign a unique color to a label
label_to_color_map = {"Unflagged": color_palette[0]}
color_index = 1
for mapping in label_mapping.values():
for label in tuple(mapping.keys()) + ("2+ Separate\nMetrics",):
if label not in label_to_color_map:
label_to_color_map[label] = color_palette[color_index]
color_index += 1
```
```python
# Figure out which flags are unique to each step and source
unique_flags_by_stage = {}
for round_label, mapping in label_mapping.items():
unique_flags_by_stage[round_label] = {}
# handle prior flags
prior_flags = low_level_flags[mapping["Priors"]]
unique_flags_by_stage[round_label]["Priors"] = prior_flags
# handle all other flag types
overlap_flags = np.zeros_like(np.squeeze(uvf.flag_array))
for label, file_id in mapping.items():
if label in ["Priors", "Final\nFlags", "Combined\nMetrics"]: # skip these, they are special
continue
flags = all_flags[file_id]
unique_flags = flags.copy()
for other_label, other_file_id in mapping.items():
if other_label in [label, "Priors", "Final\nFlags", "Combined\nMetrics"]:
continue
other_flags = all_flags[other_file_id]
unique_flags &= ~other_flags
overlap_region = flags & other_flags & ~prior_flags
overlap_flags[overlap_region] = True
unique_flags_by_stage[round_label][label] = unique_flags
unique_flags_by_stage[round_label]["2+ Separate\nMetrics"] = overlap_flags
# handle combined metrics separately so that it doesn't affect "2+ Separate\nMetrics"
all_flags_so_far = np.sum(list(unique_flags_by_stage[round_label].values()), axis=0).astype(bool)
combined_metrics_flags = all_flags[mapping["Combined\nMetrics"]]
unique_flags_by_stage[round_label]["Combined\nMetrics"] = combined_metrics_flags & ~all_flags_so_far
# Figure out which flags got applied at the very end when the a priori YAML was used
all_other_round_3_flags = np.sum([flags for flags in unique_flags_by_stage['Round 3'].values()], axis=0).astype(bool)
unique_flags_by_stage['Round 3']["Final\nFlags"] = all_flags[label_mapping['Round 3']["Final\nFlags"]] & (~all_other_round_3_flags)
```
```python
cmap = plt.cm.colors.ListedColormap(list(label_to_color_map.values()))
norm = plt.cm.colors.Normalize(vmin=0, vmax=1)
smap = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
colored_flags = {}
for round_label, flag_dict in unique_flags_by_stage.items():
colored_flags[round_label] = np.zeros(np.squeeze(uvf.flag_array).shape)
for label, flags in flag_dict.items():
colored_flags[round_label][flags] = list(label_to_color_map.keys()).index(label) / (len(label_to_color_map) - 1)
```
```python
def plot_flag_evolution(freq_slice):
fig, axes = plt.subplots(len(colored_flags), figsize=(15, 11 * len(colored_flags)), dpi=300)
# Figure out the details for which part of the flag arrays to plot.
tmin, tmax = plot_times[0], plot_times[-1]
lstmin, lstmax = lsts_hr[0], lsts_hr[-1]
fmin, fmax = freqs_MHz[freq_slice][::freq_slice.size - 1]
extent = (fmin, fmax, tmax, tmin)
# Actually plot the things.
for ax, (label, flags) in zip(axes, colored_flags.items()):
ax.set_title(label, fontsize=16)
ax.imshow(flags[:,freq_slice], aspect="auto", extent=extent, cmap=cmap, vmin=0, vmax=1)
twinx = ax.twinx()
twiny = ax.twiny()
twinx.set_ylim(lstmax, lstmin)
twiny.set_xlim(freq_slice[0], freq_slice[-1])
ax.set_xlabel("Frequency (MHz)", fontsize=12)
ax.set_ylabel(f"JD - {JD}", fontsize=12)
twinx.set_ylabel("LST (hour)", fontsize=12)
twiny.set_xlabel("Channel", fontsize=12)
fig.tight_layout()
for ax in axes.ravel():
cbar = fig.colorbar(smap, ax=ax, orientation="horizontal", pad=0.1)
cbar.set_ticks(np.linspace(0, 1, 2 * len(cmap.colors) + 1)[1::2])
cbar.set_ticklabels(list(label_to_color_map.keys()))
```
```python
# Plot flags in the low-band.
if np.any(freqs_MHz < 100):
freq_slice = np.argwhere(freqs_MHz < 100).flatten() # Low-band, pre-FM
plot_flag_evolution(freq_slice)
```
## Figure 3: Flag Evolution in the Low Band
This figure delineates which steps different flags are introduced in, but does not make a distinction between sources when multiple flagging routines flag the same region of the waterfall. The plot shows flags for frequencies below the FM band, for the entire night. The top plot shows the flags for the first round of flagging (median filter), where the prior flags are the apriori flags; the middle plot shows the flags for the second round of flagging (mean filter), where the prior flags are the combined flags from the first round of flagging (plus extra flags based on the metrics added in quadrature); the bottom plot shows the flags for the final round of flagging (thresholding), where the prior flags are the combined flags from round 2 (plus extra flags based on the metrics added in quadrature). After threshold flagging, the "final flags" also include any apriori flags from the YAML files. *Note: for H1C data, this plot will be skipped.*
```python
# Plot flags in the mid-band.
freq_slice = np.argwhere(np.logical_and(freqs_MHz >= 100, freqs_MHz < 200)).flatten()
plot_flag_evolution(freq_slice)
```

## Figure 4: Flag Evolution in the Mid-Band
This figure delineates which steps different flags are introduced in, but does not make a distinction between sources when multiple flagging routines flag the same region of the waterfall. The plot shows flags for frequencies between the FM band and the analog TV band, for the entire night. The top plot shows the flags for the first round of flagging (median filter), where the prior flags are the apriori flags; the middle plot shows the flags for the second round of flagging (mean filter), where the prior flags are the combined flags from the first round of flagging (plus extra flags based on the metrics added in quadrature); the bottom plot shows the flags for the final round of flagging (thresholding), where the prior flags are the combined flags from round 2 (plus extra flags based on the metrics added in quadrature). After threshold flagging, the "final flags" also include any apriori flags from the YAML files.
```python
# Calculate occupancies for different important sets of flags.
label_mapping = {
"A Priori": "apriori_flags",
"Median Filter": "flags1",
"Mean Filter": "flags2",
"Thresholding": "total_threshold_and_a_priori_flags",
}
occupancies = {}
for axis, axis_label in enumerate(("Frequency", "Time")):
occupancies[axis_label] = {}
for flag_label, flag_id in label_mapping.items():
flags = all_flags[flag_id]
occupancies[axis_label][flag_label] = flags.mean(axis=(1-axis))
```
```python
fig, axes = plt.subplots(2, figsize=(15,14), dpi=200)
for i, items in enumerate(zip(axes.ravel(), occupancies.items())):
ax, (occupancy_axis, flag_dict) = items
xvalues = (plot_times, freqs_MHz)[i]
alt_xvalues = (lsts_hr, chans)[i]
xlabel = (f"JD - {JD}", "Frequency (MHz)")[i]
ylabel = (
"Fraction of Channels Flagged",
"Fraction of Integrations Flagged"
)[i]
alt_xlabel = ("LST (hours)", "Channel")[i]
ax.set_xlabel(xlabel, fontsize=12)
ax.set_ylabel(ylabel, fontsize=12)
for flag_label, occupancy in flag_dict.items():
ax.plot(xvalues, occupancy, label=flag_label)
twin_ax = ax.twiny()
twin_ax.set_xlim(alt_xvalues[0], alt_xvalues[-1])
twin_ax.set_xlabel(alt_xlabel, fontsize=12)
ax.legend()
```

## Figure 5: Flagging Occupancies
These plots show the flagging occupancies for the Round 0 Flags (Apriori), Round 1 Flags (Median Filter), Round 2 Flags (Mean Filter), and Round 3 Flags (Thresholding). The top plot shows the fraction of channels flagged at each integration for each set of flags, and the bottom plot shows the fraction of integrations flagged as a function of frequency.
# Metadata
```python
from hera_qm import version
print(version.construct_version_info())
```
{'version': '1.0', 'git_origin': 'git@github.com:HERA-Team/hera_qm.git', 'git_hash': 'a15c511f7e0fc30602257c9eb5ff761bc83ef6a5', 'git_description': 'v1.1-313-ga15c511', 'git_branch': 'master'}
```python
```
|
HERA-TeamREPO_NAMEH1C_IDR3_NotebooksPATH_START.@H1C_IDR3_Notebooks-main@rfi_inspect@rfi_inspect_2458158.ipynb@.PATH_END.py
|
{
"filename": "debug.py",
"repo_name": "bill-cotton/Obit",
"repo_path": "Obit_extracted/Obit-master/ObitSystem/Obit/share/scripts/debug.py",
"type": "Python"
}
|
Debug sample script that does a full setup of AIPS crap
# Gollum user 101, disk 3
import OErr, OSystem, UV, AIPS, FITS
import VLACal
# On Gollum
adirs = ["/export/data_1/GOLLUM_1",
"/export/data_1/GOLLUM_2", \
"/export/data_1/GOLLUM_3", \
"/export/data_1/GOLLUM_4", \
"/export/data_2/GOLLUM_5", \
"/export/data_2/GOLLUM_6", \
"/export/data_2/GOLLUM_7", \
"/export/data_2/GOLLUM_8"]
fdirs = ["/export/users/aips/FITS"]
# Init Obit
err=OErr.OErr()
user = 101
ObitSys=OSystem.OSystem ("debug", 1, user, len(adirs), adirs, len(fdirs), fdirs, True, False, err)
OErr.printErrMsg(err, "Error with Obit startup")
# This shit really bites
AIPS.AIPS.userno = user
disk = 0
for ad in adirs:
disk += 1
AIPS.AIPS.disks.append(AIPS.AIPSDisk(None, disk, ad))
disk = 0
for fd in fdirs:
disk += 1
FITS.FITS.disks.append(FITS.FITSDisk(None, disk, ad))
# Set uv data
u3=UV.newPAUV("in","20051116", "K BAND", 3,1,True,err)
u3.Header(err)
VLACal.VLAClearCal(u3,err)
calModel="3C286_K.MODEL"
target="M87"
ACal="1331+305"
PCal="1239+075"
VLACal.VLACal(u3, target, ACal, err, calModel=calModel,calDisk=1)
VLACal.VLASplit(u3, target, err, outClass="IKDarr")
|
bill-cottonREPO_NAMEObitPATH_START.@Obit_extracted@Obit-master@ObitSystem@Obit@share@scripts@debug.py@.PATH_END.py
|
{
"filename": "_idssrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scattermapbox/_idssrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class IdssrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="idssrc", parent_name="scattermapbox", **kwargs):
super(IdssrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scattermapbox@_idssrc.py@.PATH_END.py
|
{
"filename": "_tickvalssrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/scene/xaxis/_tickvalssrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickvalssrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="tickvalssrc", parent_name="layout.scene.xaxis", **kwargs
):
super(TickvalssrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@scene@xaxis@_tickvalssrc.py@.PATH_END.py
|
{
"filename": "EXES-Telluric-Correction.ipynb",
"repo_name": "SOFIAObservatory/Recipes",
"repo_path": "Recipes_extracted/Recipes-master/EXES-Telluric-Correction.ipynb",
"type": "Jupyter Notebook"
}
|
EXES: Telluric Correction
================
* **Aim**: Retreive an atmospheric model from the Planetary Spectrum Generator ([PSG](https://psg.gsfc.nasa.gov/)) and use it to remove telluric features. <br />
* **Data**: Level 3 grism data of the Sirius. <br />
* **Tools**: astropy, scipy, PSG <br />
* **Instrument**: EXES <br />
* **Documentation**: [EXES user's manual}](https://irsa.ipac.caltech.edu/data/SOFIA/docs/sites/default/files/2022-12/exes_users_revE_0.pdf)
Goals
-------
* Inspect EXES data structure.
* Plot EXES spectrum.
* Retrieve atmospheric model from PSG.
* Guassian broadening of model.
* Telluric correction.
Introduction
----------
All EXES level 3 data come with an atmospheric example model or ATRAN model provided for an estimate of the effects of the atmosphere. To do a better job of this, we can retrieve atmospheric models from the PSG and tune different parameters to get something closer to what is found within the target spectrum.
Ingredients
-----------
Level 3 EXES data of the star Arcturus. An internet connection.
#### The data can be directly downloaded [here](https://zenodo.org/record/6574619/files/F0749_EX_SPE_7500601_EXEELONEXEECHL_MRD_0018.fits?download=1).
Imports
-----------
```python
import os
import time
from astropy.io import fits, ascii
import matplotlib.pyplot as plt
from astropy.table import Table
import astropy.units as u
import numpy as np
from scipy.interpolate import interp1d
from astropy.convolution import Gaussian1DKernel
from astropy.convolution import convolve
import warnings
warnings.simplefilter('ignore')
%matplotlib inline
```
```python
# read fits files
hdu = fits.open("example_data/EXES/F0749_EX_SPE_7500601_EXEELONEXEECHL_MRD_0018.fits")
```
```python
# print first 15 lines of header
hdu[0].header[0:10]
```
SIMPLE = T / Written by IDL: Tue Jul 13 14:54:17 2021
BITPIX = -64 /Real*8 (double precision)
NAXIS = 2 /
NAXIS1 = 27044 /
NAXIS2 = 4 /
ADDTIME = 456.741 / Effective on-source time
AIRSPEED= 484.188 / knots, aircraft airspeed das.ic1080...
ALTI_END= 40997.0 / feet, aircraft altitude end fms.altitude)
ALTI_STA= 40998.0 / feet, aircraft altitude start das.ic1080_1
AOR_ID = '75_0060_1' / Astronomical Observation Request Identifie
```python
# specify column data
wavenumber = hdu[0].data[0]
flux_unit = u.erg * u.s ** (-1) * (u.cm) ** -2 * u.sr ** (-1)
flux = hdu[0].data[1] * flux_unit
flux = flux / np.max(flux)
uncertainty = hdu[0].data[2] * flux_unit
atran = hdu[0].data[3]
```
```python
# Plot the whole spectrum
fig = plt.figure(figsize=(15, 5))
ax1 = fig.add_subplot(1, 1, 1)
ax2 = ax1.twinx() # plotting both but on different axes
ax2.set_yticks([]) # remove ax2 ticks
ax1.plot(wavenumber, flux, lw=0.5, c="k")
ax2.plot(wavenumber, atran, lw=0.75, c="r", linestyle=":", label="Telluric (ATRAN)")
ax1.set_ylabel(r"Flux (erg s$^{-1}$ cm$^{-2}$ sr$^{-1}$ cm$^{1}$)")
ax1.set_xlabel(r"Wavenumber (cm$^{-1}$)")
plt.legend()
plt.title("ARCTURUS")
plt.show()
```

While the average atmospheric (ATRAN) model throughout the night does a good job at estimating the impact of the atmosphere on these observations, we need to do a better job to subtract out these features.
We will use [Planetary Spectrum Generator (PSG)](https://psg.gsfc.nasa.gov/) API to retreive a more-appropriate model.
Edit PSG config file
---------------
The PSG model is selected based on parameters specified in a config.txt file. We have provided an example of a config.txt file but can make changes to that file within this notebook.
```python
# read config file
with open('example_data/EXES/psg_config.txt') as f:
config = f.readlines()
```
```python
# print first lines of config.txt file
config[0:10]
```
['<OBJECT>Planet\n',
'<OBJECT-NAME>Earth\n',
'<OBJECT-DATE>2021/06/17 06:15\n',
'<OBJECT-DIAMETER>12742\n',
'<OBJECT-GRAVITY>9.807\n',
'<OBJECT-GRAVITY-UNIT>g\n',
'<OBJECT-STAR-DISTANCE>1.01597\n',
'<OBJECT-STAR-VELOCITY>0.13346\n',
'<OBJECT-SOLAR-LONGITUDE>86.5\n',
'<OBJECT-SOLAR-LATITUDE>23.53\n']
```python
#make changes to config.txt, this can also be done in any text editor
config[1] = '<OBJECT-NAME>Earth\n', # example of a change
```
Download atmospheric model from PSG
-----------------------
```python
## retrieve psg model from PSG
# os.popen(
# "curl -d type=trn --data-urlencode file@../example_data/EXES/psg_config.txt https://psg.gsfc.nasa.gov/api.php > ../example_data/EXES/psg_spectrum.txt"
# )
## wait for download to finish
# time.sleep(15)
```
```python
# read psg output file
psg_inp = ascii.read("example_data/EXES/psg_spectrum.txt", header_start=-1)
# parse into astropy table
psg = Table(psg_inp)
# read first few lines
psg[0:5]
```
<div><i>Table length=5</i>
<table id="table140285177609952" class="table-striped table-bordered table-condensed">
<thead><tr><th>Wave/freq</th><th>Total</th><th>H2O</th><th>CO2</th><th>O3</th><th>N2O</th><th>CO</th><th>CH4</th><th>O2</th><th>N2</th><th>Rayleigh</th><th>CIA</th></tr></thead>
<thead><tr><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th></tr></thead>
<tr><td>1310.0</td><td>0.99774</td><td>0.999993</td><td>1.0</td><td>1.0</td><td>0.998092</td><td>1.0</td><td>0.999667</td><td>1.0</td><td>1.0</td><td>1.0</td><td>0.999987</td></tr>
<tr><td>1309.99738</td><td>0.997666</td><td>0.999993</td><td>1.0</td><td>1.0</td><td>0.998017</td><td>1.0</td><td>0.999667</td><td>1.0</td><td>1.0</td><td>1.0</td><td>0.999987</td></tr>
<tr><td>1309.99476</td><td>0.997592</td><td>0.999993</td><td>1.0</td><td>1.0</td><td>0.997943</td><td>1.0</td><td>0.999667</td><td>1.0</td><td>1.0</td><td>1.0</td><td>0.999987</td></tr>
<tr><td>1309.99214</td><td>0.997519</td><td>0.999993</td><td>1.0</td><td>1.0</td><td>0.997871</td><td>1.0</td><td>0.999667</td><td>1.0</td><td>1.0</td><td>1.0</td><td>0.999987</td></tr>
<tr><td>1309.98952</td><td>0.997435</td><td>0.999993</td><td>1.0</td><td>1.0</td><td>0.997787</td><td>1.0</td><td>0.999666</td><td>1.0</td><td>1.0</td><td>1.0</td><td>0.999988</td></tr>
</table></div>
Plot the model with the data
-----------
```python
# Plot the whole spectrum with PSG model
fig = plt.figure(figsize=(15, 5))
ax1 = fig.add_subplot(1, 1, 1)
ax2 = ax1.twinx()
ax2.set_yticks([])
ax1.set_xlim(1263, 1306)
ax1.plot(wavenumber, flux, lw=0.5, c="k")
ax2.plot(
psg["Wave/freq"],
psg["Total"],
lw=0.75,
c="r",
linestyle=":",
label="Telluric (PSG)",
)
ax1.set_ylabel(r"Normalized Flux (erg s$^{-1}$ cm$^{-2}$ sr$^{-1}$ cm$^{1}$)")
ax1.set_xlabel(r"Wavenumber (cm$^{-1}$)")
plt.legend(bbox_to_anchor=(1.02, 0.5))
plt.title("ARCTURUS")
plt.show()
```

```python
# Plot a subset of the data
# A function to create a figure and plot the data
def plot_data():
fig = plt.figure(figsize=(15, 5))
ax1 = fig.add_subplot(1, 1, 1)
ax2 = ax1.twinx()
ax2.set_yticks([])
ax1.plot(wavenumber, flux, lw=0.5, c="k")
ax1.set_ylabel(r"Normalized Flux (erg s$^{-1}$ cm$^{-2}$ sr$^{-1}$ cm$^{1}$)")
ax1.set_xlabel(r"Wavenumber (cm$^{-1}$)")
plt.title("ARCTURUS")
return fig, ax1, ax2
# Call function
fig, ax1, ax2 = plot_data()
# set x range
ax1.set_xlim(1280, 1283)
# Plot full PSG total absorption
ax2.plot(
psg["Wave/freq"],
psg["Total"],
lw=1.25,
c="r",
linestyle=":",
label="Telluric (PSG)",
)
# Plot just the Methane
ax2.plot(
psg["Wave/freq"],
psg["CH4"],
lw=1.25,
c="blue",
linestyle=":",
label=r"CH$_{4}$ (PSG)",
)
plt.legend(bbox_to_anchor=(1.02, 0.5))
plt.show()
```

The model shows absorption features that are too sharp when compared to the target spectrum. We will need to guassian-broaden the PSG model to match the resolution of the data with the resolution of the model.
Guassian broadening
------------
```python
# We will start with a guassian assuming a sigma of 4
# create the gaussian kernel
g = Gaussian1DKernel(stddev=4)
# Convolve the kernel with the PSG total absorption
psg["Total_broadened"] = convolve(psg["Total"], g)
# Plot a subset of the spectrum
fig, ax1, ax2 = plot_data()
ax1.set_xlim(1280, 1283)
ax2.set_ylim(
0.2, 1.2
) # tinker with numbers to align model, model and data plotted on difference axes
ax2.plot(
psg["Wave/freq"],
psg["Total_broadened"],
lw=0.75,
c="r",
linestyle=":",
label="Telluric (PSG)",
)
ax2.legend()
plt.show()
```

Interpolate PSG model
-------------
In order to divide the data by the PSG model we need to have them with the same wavelength values. We will use scipy.interpolate.interp1d to interpolate the PSG model onto the x values of the data.
```python
# create the interpolating funciton based on the gaussian broadened PSG data
f = interp1d(psg["Wave/freq"], psg["Total_broadened"])
ynew = f(wavenumber) # The new flux at the data wavenumbers
```
```python
# Plot a subset of the spectrum
fig, ax1, ax2 = plot_data()
ax1.set_xlim(1280, 1283)
ax2.set_ylim(0.2, 1.2)
ax2.plot(wavenumber, ynew, lw=0.75, c="r", linestyle=":", label="Telluric (PSG)")
plt.legend()
plt.show()
```

The model looks the same as the previous figure but is now plotted using the Arcturus data wavenumbers on the x axis.
Tune the Model to Fit the Data
--------------
```python
# the model parameters, tune the impact of individual molecular
# components (they are normalized) by altering these variables.
h2o = 0
co2 = 0
o3 = 0
co = 0
o2 = 0
n2 = 0
rayleigh = 0
cia = 0
ch4 = 2.6
n2o = 2.0
gaussian = 6
```
```python
# select model
total = (
psg["H2O"] ** h2o
* psg["CO2"] ** co2
* psg["O3"] ** o3
* psg["N2O"] ** n2o
* psg["CO"] ** co
* psg["CH4"] ** ch4
* psg["O2"] ** o2
* psg["N2"] ** n2
* psg["Rayleigh"] ** rayleigh
* psg["CIA"] ** cia
)
```
```python
# Convolve the kernel with the PSG total absorption
g = Gaussian1DKernel(stddev=gaussian)
psg_tuned_and_broadened = convolve(total, g)
```
```python
# create a figure
fig, ax1, ax2 = plot_data()
ax1.set_xlim(1280, 1283)
ax1.set_ylim(-0.5, 1.5)
ax1.plot(
psg["Wave/freq"],
psg_tuned_and_broadened,
lw=0.5,
c="r",
label=r"Broadened model (PSG)",
)
ax1.set_ylabel(r"Normalized Flux (erg s$^{-1}$ cm$^{-2}$ sr$^{-1}$ cm$^{1}$)")
ax1.set_xlabel(r"Wavenumber (cm$^{-1}$)")
plt.title("ARCTURUS")
ax1.legend()
plt.show()
```

Subtract Telluric Features
----------
```python
# gaussian broaden
g = Gaussian1DKernel(stddev=gaussian)
gaussian_broadened_absorption = convolve(total, g)
# interpolate
f = interp1d(psg["Wave/freq"], gaussian_broadened_absorption)
ynew = f(wavenumber) # interpolated model absorption
flux_corrected = flux / ynew # data / interpolated model absorption
# create a figure
fig = plt.figure(figsize=(15, 5))
ax1 = fig.add_subplot(1, 1, 1)
ax1.set_xlim(1280, 1283)
ax1.plot(
wavenumber,
flux,
lw=0.5,
c="k",
label=r"Flux",
)
ax1.plot(
wavenumber,
flux / atran,
lw=0.5,
c="b",
label=r"Flux corrected (ATRAN)",
)
ax1.plot(
wavenumber,
flux_corrected,
lw=0.5,
c="r",
label=r"Flux corrected (PSG)",
)
ax1.set_ylabel(r"Normalized Flux (erg s$^{-1}$ cm$^{-2}$ sr$^{-1}$ cm$^{1}$)")
ax1.set_xlabel(r"Wavenumber (cm$^{-1}$)")
plt.title("ARCTURUS")
ax1.set_ylim(-0.5, 1.5)
ax1.legend()
plt.show()
```

The PSG does a better job of removing telluric features.
Tips for removing telluric features
------------
While a complete correction isn't possible, most telluric corrections are only needed for small wavelength ranges and individual features. For a better overall correction, consider doing instead telluric corrections for each of the non-merged spectra from the IRSA data products.
|
SOFIAObservatoryREPO_NAMERecipesPATH_START.@Recipes_extracted@Recipes-master@EXES-Telluric-Correction.ipynb@.PATH_END.py
|
{
"filename": "cdbAddImplLang.py",
"repo_name": "ACS-Community/ACS",
"repo_path": "ACS_extracted/ACS-master/LGPL/CommonSoftware/cdb/ws/src/cdbAddImplLang.py",
"type": "Python"
}
|
#!/usr/bin/env python
import sys
import shutil
import os
import getopt
from xml.parsers import expat
class Xml2Container(object):
def StartElement(self, name, attributes):
if attributes.has_key('Container') and attributes[u'Container'] == self.container:
code = attributes['Code']
if code.find("alma.")==0:
self.lang = "java"
elif code.find('.') > 0 :
self.lang = "py"
else:
self.lang = "cpp"
def Parse(self, filename, contName):
Parser = expat.ParserCreate()
Parser.StartElementHandler = self.StartElement
self.container = contName
self.lang = None
ParserStatus = Parser.Parse(open(filename).read(),1)
return self.lang
class Xml2Component(object):
def StartElement(self, name, attributes):
if attributes.has_key('Container') and not attributes.has_key('ImplLang'):
code = attributes['Code']
if code.find("alma.")==0:
lang = "java"
elif code.find('.') > 0 :
lang = "py"
else:
lang = "cpp"
self.lang = None
self.isModified = True
lastidx1 = self.filestr.find(">",self.Parser.ErrorByteIndex+self.offset)
lastidx2 = self.filestr.find("/>",self.Parser.ErrorByteIndex+self.offset)
if lastidx1 == -1 and lastidx2 == -1:
sys.stderr.write("ERROR: '>' and /> not found?? \n")
elif lastidx1 == -1:
lastidx = lastidx2
elif lastidx2 == -1:
lastidx = lastidx1
elif lastidx1 < lastidx2:
lastidx = lastidx1
else:
lastidx = lastidx2
self.filestr = self.filestr[:lastidx] + " ImplLang=\""+lang+"\" " +self.filestr[lastidx:]
self.offset += len(" ImplLang=\""+lang+"\" ")
#print "Component", attributes['Name']," ", self.filestr[self.Parser.ErrorByteIndex:self.Parser.ErrorByteIndex+20]
def Parse(self, filename):
self.Parser = expat.ParserCreate()
self.Parser.StartElementHandler = self.StartElement
self.offset = 0
self.isModified = False
print filename
self.filestr = open(filename).read()
ParserStatus = self.Parser.Parse(open(filename).read(),1)
return self.filestr
verbose = False
execute = True
backup = False
containers = False
components = False
def haveImplLang(rootContainers, filename):
filestr = open(rootContainers+"/"+filename, "r").read()
idx = filestr.find("ImplLang")
if idx >0:
return True
return False
def guessImplLang(rootComp, contName):
parser = Xml2Container()
for root, dirs, files in os.walk(rootComp):
for filename in files:
if filename.lower().endswith(".xml"):
#print "[guessImplLang] looking for container="+contName+" in CDB file="+root+"/"+filename
lang = parser.Parse(root+"/"+filename, contName)
if lang != None:
if verbose:
print "[guessImplLang] Found! in CDB file="+root+"/"+filename
return lang
sys.stderr.write("ERROR container="+contName+" not found in the components configuration of this CDB ="+rootComp+"\n")
if verbose:
print "[guessImplLang] ERROR container="+contName+" not found in the components configuration of this CDB"
return None
def addImplLang(lang, rootContainers, filename):
filestr = open(rootContainers+"/"+filename, "r").read()
idx = filestr.find("<Container")
if idx == -1:
idx = filestr.find("< Container")
if idx == -1:
if verbose:
print "[addImplLang] ERROR: '<Container' or '< Container' not found"
sys.stderr.write("ERROR: '<Container' or '< Container' not found in "+rootContainers+"/"+filename+"\n")
return
lastidx = filestr.find(">",idx)
if lastidx == -1:
if verbose:
print "[addImplLang] ERROR: '>' not found"
sys.stderr.write("ERROR: '>' not found in "+rootContainers+"/"+filename+"\n")
return
filestr = filestr[:lastidx] + "\n ImplLang=\""+lang+"\" " +filestr[lastidx:]
#write filestr to file
if verbose:
print "---------------------------------------------------"
print "Modified file:"+rootContainers+"/"+filename
print "---------------------------------------------------"
print filestr
print "---------------------------------------------------"
if backup:
shutil.copyfile(os.path.join(rootContainers, filename),os.path.join(rootContainers, filename+".bkp"))
if verbose:
print "[addImplLang] Saving a backup copy of "+rootContainers+"/"+filename+".bkp"
if execute:
os.remove(os.path.join(rootContainers, filename))
newfile = open(rootContainers+"/"+filename, "w")
newfile.write(filestr)
newfile.close()
def usage():
print "Add ImplLang to Container and Component xml CDB files when is missing"
print ""
print "Usage cdbAddImplLang.py [OPTIONS]"
print ""
print "Options:"
print " -p PATH the path to search for CDBs to change, default is the current directory"
print " -v prints information"
print " -n it doesn't execute the changes, it is used along with -v to just print"
print " -b it creates backup for the files"
print " -h it shows this message"
print " --containers it modified only containers"
print " --components it modified only components"
print " -a it modified component and containers"
if __name__ == "__main__":
try:
opts, args = getopt.getopt(sys.argv[1:], "vnbp:ha", ["containers","components"])
except getopt.GetoptError, err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
path ="."
for o, a in opts:
if o == "-v":
verbose = True
elif o == "-h":
usage()
sys.exit()
elif o == "-p":
path = a
elif o == "-n":
execute = False
elif o == "-a":
containers = True
components = True
elif o == "--containers":
containers = True
elif o == "--components":
components = True
elif o == "-b":
backup = True
else:
assert False, "unhandled option"
maciContPath = "/MACI/Containers"
maciCompPath = "/MACI/Components"
for root, dirs, files in os.walk(path):
if root.endswith(maciCompPath) and components:
isCDBModified = False
if verbose:
print "###################################"
print "[Main] a CDB/MACI/Components found!:",root
print "###################################"
#we found a potential CDB Components xmls
for rootComp, dirsComp, filesComp in os.walk(root):
for filename in filesComp:
if filename.lower().endswith(".xml"):
#print "[Main] a Components xml found!:",rootComp+"/"+filename
#I assume xml files under MACI/Components components configuration
parser = Xml2Component()
filestr = parser.Parse(rootComp+"/"+filename)
if parser.isModified:
isCDBModified = True
if verbose:
print "---------------------------------------------------"
print "Modified file:"+rootComp+"/"+filename
print "---------------------------------------------------"
print filestr
print "---------------------------------------------------"
if backup:
shutil.copyfile(os.path.join(rootComp, filename),os.path.join(rootComp, filename+".bkp"))
if verbose:
print "[addImplLang] Saving a backup copy in "+rootComp+"/"+filename+".bkp"
if execute:
os.remove(os.path.join(rootComp, filename))
newfile = open(rootComp+"/"+filename, "w")
newfile.write(filestr)
newfile.close()
if not isCDBModified and verbose:
print "[Main] Nothing to do here."
if root.endswith(maciContPath) and containers:
if verbose:
print "###################################"
print "[Main] a CDB/MACI/Containers found!:",root
print "###################################"
#we found a potential CDB Containers xmls
isCDBModified = False
for rootCont, dirsCont, filesCont in os.walk(root):
for filename in filesCont:
if filename.lower().endswith(".xml"):
#print "[Main] a Container xml found!:",rootCont+"/"+filename
#I assume xml files under MACI/Containers are Container configuration
if not haveImplLang(rootCont, filename):
isCDBModified = True
idx = rootCont.find(maciContPath)
contName = rootCont[idx+len(maciContPath)+1:]
rootComp = root[:len(root)-10]+"Components"
if verbose:
print "[Main] Container doesn't have ImplLang, searching for container="+contName+" in "+rootComp
lang = guessImplLang(rootComp, contName)
if lang != None:
addImplLang(lang, rootCont, filename)
if verbose:
print "[Main] Added ImplLang="+lang+" to "+contName
else:
if verbose:
print "[Main] ERROR: CDB container file wasn't updated="+rootCont+"/"+filename
if not isCDBModified and verbose:
print "[Main] Nothing to do here."
|
ACS-CommunityREPO_NAMEACSPATH_START.@ACS_extracted@ACS-master@LGPL@CommonSoftware@cdb@ws@src@cdbAddImplLang.py@.PATH_END.py
|
{
"filename": "split_train_test.py",
"repo_name": "SKA-INAF/caesar-mrcnn-tf2",
"repo_path": "caesar-mrcnn-tf2_extracted/caesar-mrcnn-tf2-main/scripts/split_train_test.py",
"type": "Python"
}
|
#!/usr/bin/env python
from __future__ import print_function
##################################################
### MODULE IMPORT
##################################################
## STANDARD MODULES
import os
import sys
import json
import time
import argparse
import datetime
import random
import numpy as np
import copy
from sklearn.model_selection import train_test_split
## USER MODULES
from mrcnn import logger
############################################################
# PARSE/VALIDATE ARGS
############################################################
def parse_args():
""" Parse command line arguments """
# - Parse command line arguments
parser = argparse.ArgumentParser(description='Mask R-CNN options')
parser.add_argument('--inputfile', dest='inputfile', required=True, type=str, help='Input file to be splitted in train/test sets')
parser.add_argument('--test_data_fract', dest='test_data_fract', required=False, type=float, default=0.4, help='Fraction of input data used for test (default=0.4)')
args = parser.parse_args()
return args
############################################################
# CREATE TRAIN/VAL SETS
############################################################
def create_train_val_sets_from_filelist(filelist, crossval_size=0.4, train_filename='train.dat', crossval_filename='crossval.dat'):
""" Read input filelist with format img,mask,label and create train & val filelists """
# - Read input list
data= []
with open(filelist,'r') as f:
for line in f:
line = line.strip()
if not line:
continue
data.append(line)
# - Return train/cval filenames
return create_train_val_sets_from_list(data, crossval_size, train_filename, crossval_filename)
def create_train_val_sets_from_list(data, crossval_size=0.4, train_filename='train.dat', crossval_filename='crossval.dat'):
""" Read filelist with format img,mask,label and create train & val filelists """
# - Check if list is empty
nentries= len(data)
if nentries<=0:
logger.error("Given filelist is empty!")
return []
if nentries<10:
logger.warn("Given filelist contains less than 10 entries ...")
# - Shuffle and split train/val sets
random.shuffle(data)
x_train, x_crossval = train_test_split(data, test_size=float(crossval_size))
# - Write both sets to files
logger.info("Writing #%d entries to training dataset list ..." % len(x_train))
with open(train_filename, 'w') as f:
for item in x_train:
f.write("%s\n" % item)
logger.info("Writing #%d entries to cross-validation dataset list ..." % len(x_crossval))
with open(crossval_filename, 'w') as f:
for item in x_crossval:
f.write("%s\n" % item)
# - Return filenames
return [train_filename, crossval_filename]
############################################################
# MAIN
############################################################
def main():
"""Main function"""
#===========================
#== PARSE ARGS
#===========================
logger.info("Parsing script args ...")
try:
args= parse_args()
except Exception as ex:
logger.error("Failed to get and parse options (err=%s)" % (str(ex)))
return 1
#===========================
#== SPLIT DATA
#===========================
logger.info("Creating file data split ...")
create_train_val_sets_from_filelist(
args.inputfile,
args.test_data_fract
)
return 0
###################
## MAIN EXEC ##
###################
if __name__ == "__main__":
sys.exit(main())
|
SKA-INAFREPO_NAMEcaesar-mrcnn-tf2PATH_START.@caesar-mrcnn-tf2_extracted@caesar-mrcnn-tf2-main@scripts@split_train_test.py@.PATH_END.py
|
{
"filename": "utils.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/pip/vendor/html5lib/utils.py",
"type": "Python"
}
|
from __future__ import absolute_import, division, unicode_literals
from types import ModuleType
try:
import xml.etree.cElementTree as default_etree
except ImportError:
import xml.etree.ElementTree as default_etree
class MethodDispatcher(dict):
"""Dict with 2 special properties:
On initiation, keys that are lists, sets or tuples are converted to
multiple keys so accessing any one of the items in the original
list-like object returns the matching value
md = MethodDispatcher({("foo", "bar"):"baz"})
md["foo"] == "baz"
A default value which can be set through the default attribute.
"""
def __init__(self, items=()):
# Using _dictEntries instead of directly assigning to self is about
# twice as fast. Please do careful performance testing before changing
# anything here.
_dictEntries = []
for name, value in items:
if type(name) in (list, tuple, frozenset, set):
for item in name:
_dictEntries.append((item, value))
else:
_dictEntries.append((name, value))
dict.__init__(self, _dictEntries)
self.default = None
def __getitem__(self, key):
return dict.get(self, key, self.default)
# Some utility functions to dal with weirdness around UCS2 vs UCS4
# python builds
def isSurrogatePair(data):
return (len(data) == 2 and
ord(data[0]) >= 0xD800 and ord(data[0]) <= 0xDBFF and
ord(data[1]) >= 0xDC00 and ord(data[1]) <= 0xDFFF)
def surrogatePairToCodepoint(data):
char_val = (0x10000 + (ord(data[0]) - 0xD800) * 0x400 +
(ord(data[1]) - 0xDC00))
return char_val
# Module Factory Factory (no, this isn't Java, I know)
# Here to stop this being duplicated all over the place.
def moduleFactoryFactory(factory):
moduleCache = {}
def moduleFactory(baseModule, *args, **kwargs):
if isinstance(ModuleType.__name__, type("")):
name = "_%s_factory" % baseModule.__name__
else:
name = b"_%s_factory" % baseModule.__name__
if name in moduleCache:
return moduleCache[name]
else:
mod = ModuleType(name)
objs = factory(baseModule, *args, **kwargs)
mod.__dict__.update(objs)
moduleCache[name] = mod
return mod
return moduleFactory
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@pip@vendor@html5lib@utils.py@.PATH_END.py
|
{
"filename": "train_disp_reconstructor.py",
"repo_name": "cta-observatory/ctapipe",
"repo_path": "ctapipe_extracted/ctapipe-main/src/ctapipe/tools/train_disp_reconstructor.py",
"type": "Python"
}
|
"""
Tool for training the DispReconstructor
"""
import astropy.units as u
import numpy as np
from ctapipe.core import Tool
from ctapipe.core.traits import Bool, Int, IntTelescopeParameter, Path
from ctapipe.io import TableLoader
from ctapipe.reco import CrossValidator, DispReconstructor
from ctapipe.reco.preprocessing import horizontal_to_telescope
from .utils import read_training_events
__all__ = [
"TrainDispReconstructor",
]
class TrainDispReconstructor(Tool):
"""
Tool to train a `~ctapipe.reco.DispReconstructor` on dl1b/dl2 data.
The tool first performs a cross validation to give an initial estimate
on the quality of the estimation and then finally trains two models
(estimating ``norm(disp)`` and ``sign(disp)`` respectively) per
telescope type on the full dataset.
"""
name = "ctapipe-train-disp-reconstructor"
description = __doc__
examples = """
ctapipe-train-disp-reconstructor \\
--config train_disp_reconstructor.yaml \\
--input gamma.dl2.h5 \\
--output disp_models.pkl
"""
output_path = Path(
default_value=None,
allow_none=False,
directory_ok=False,
help=(
"Output path for the trained reconstructor."
" At the moment, pickle is the only supported format."
),
).tag(config=True)
n_events = IntTelescopeParameter(
default_value=None,
allow_none=True,
help=(
"Number of events for training the models."
" If not given, all available events will be used."
),
).tag(config=True)
chunk_size = Int(
default_value=100000,
allow_none=True,
help="How many subarray events to load at once before training on n_events.",
).tag(config=True)
random_seed = Int(
default_value=0, help="Random seed for sampling training events."
).tag(config=True)
n_jobs = Int(
default_value=None,
allow_none=True,
help="Number of threads to use for the reconstruction. This overwrites the values in the config of each reconstructor.",
).tag(config=True)
project_disp = Bool(
default_value=False,
help=(
"If true, ``true_disp`` is the distance between shower cog and"
" the true source position along the reconstructed main shower axis."
"If false, ``true_disp`` is the distance between shower cog"
" and the true source position."
),
).tag(config=True)
aliases = {
("i", "input"): "TableLoader.input_url",
("o", "output"): "TrainDispReconstructor.output_path",
"n-events": "TrainDispReconstructor.n_events",
"n-jobs": "DispReconstructor.n_jobs",
"cv-output": "CrossValidator.output_path",
}
classes = [TableLoader, DispReconstructor, CrossValidator]
def setup(self):
"""
Initialize components from config.
"""
self.loader = self.enter_context(
TableLoader(
parent=self,
)
)
self.n_events.attach_subarray(self.loader.subarray)
self.models = DispReconstructor(self.loader.subarray, parent=self)
self.cross_validate = self.enter_context(
CrossValidator(
parent=self, model_component=self.models, overwrite=self.overwrite
)
)
self.rng = np.random.default_rng(self.random_seed)
self.check_output(self.output_path)
def start(self):
"""
Train models per telescope type using a cross-validation.
"""
types = self.loader.subarray.telescope_types
self.log.info("Inputfile: %s", self.loader.input_url)
self.log.info("Training models for %d types", len(types))
for tel_type in types:
self.log.info("Loading events for %s", tel_type)
feature_names = self.models.features + [
"true_energy",
"true_impact_distance",
"subarray_pointing_lat",
"subarray_pointing_lon",
"true_alt",
"true_az",
"hillas_fov_lat",
"hillas_fov_lon",
"hillas_psi",
]
table = read_training_events(
loader=self.loader,
chunk_size=self.chunk_size,
telescope_type=tel_type,
reconstructor=self.models,
feature_names=feature_names,
rng=self.rng,
log=self.log,
n_events=self.n_events.tel[tel_type],
)
table[self.models.target] = self._get_true_disp(table)
table = table[
self.models.features
+ [self.models.target, "true_energy", "true_impact_distance"]
]
self.log.info("Train models on %s events", len(table))
self.cross_validate(tel_type, table)
self.log.info("Performing final fit for %s", tel_type)
self.models.fit(tel_type, table)
self.log.info("done")
def _get_true_disp(self, table):
fov_lon, fov_lat = horizontal_to_telescope(
alt=table["true_alt"],
az=table["true_az"],
pointing_alt=table["subarray_pointing_lat"],
pointing_az=table["subarray_pointing_lon"],
)
# numpy's trigonometric functions need radians
psi = table["hillas_psi"].quantity.to_value(u.rad)
cog_lon = table["hillas_fov_lon"].quantity
cog_lat = table["hillas_fov_lat"].quantity
delta_lon = fov_lon - cog_lon
delta_lat = fov_lat - cog_lat
true_disp = np.cos(psi) * delta_lon + np.sin(psi) * delta_lat
true_sign = np.sign(true_disp)
if self.project_disp:
true_norm = np.abs(true_disp)
else:
true_norm = np.sqrt((fov_lon - cog_lon) ** 2 + (fov_lat - cog_lat) ** 2)
return true_norm * true_sign
def finish(self):
"""
Write-out trained models and cross-validation results.
"""
self.log.info("Writing output")
self.models.n_jobs = None
self.models.write(self.output_path, overwrite=self.overwrite)
self.loader.close()
self.cross_validate.close()
def main():
TrainDispReconstructor().run()
if __name__ == "__main__":
main()
|
cta-observatoryREPO_NAMEctapipePATH_START.@ctapipe_extracted@ctapipe-main@src@ctapipe@tools@train_disp_reconstructor.py@.PATH_END.py
|
{
"filename": "TruncationExtractor.py",
"repo_name": "CU-NESS/pylinex",
"repo_path": "pylinex_extracted/pylinex-master/pylinex/nonlinear/TruncationExtractor.py",
"type": "Python"
}
|
"""
File: pylinex/fitter/TruncationExtractor.py
Author: Keith Tauscher
Date: 1 Oct 2018
Description: Class which uses the rest of the module to perform an end-to-end
extraction. The inputs of the class are data and error vectors,
training set matrices and expanders.
"""
import os
import numpy as np
from distpy import DistributionSet, DiscreteUniformDistribution,\
DistributionSet, DiscreteUniformDistribution, KroneckerDeltaDistribution,\
JumpingDistributionSet, GridHopJumpingDistribution
from ..util import Savable, create_hdf5_dataset, int_types, sequence_types,\
bool_types
from ..expander import Expander, NullExpander, ExpanderSet
from ..basis import TrainedBasis, BasisSum, effective_training_set_rank
from ..fitter import Fitter
from ..loglikelihood import LinearTruncationLoglikelihood
from .Sampler import Sampler
from .BurnRule import BurnRule
from .NLFitter import NLFitter
try:
# this runs with no issues in python 2 but raises error in python 3
basestring
except:
# this try/except allows for python 2/3 compatible string type checking
basestring = str
class TruncationExtractor(Savable):
"""
Class which, given:
1) 1D data array
2) 1D error array
3) names of different components of data
4) training set matrices for each of component of the data
5) (optional) Expander objects which transform curves from the space in
which the training set is defined to the space in which the data is
defined
6) information criterion to use for balancing parameter number and
goodness-of-fit
extracts components of the data.
"""
def __init__(self, data, error, names, training_sets, nterms_maxima,\
file_name, information_criterion='deviance_information_criterion',\
expanders=None, mean_translation=False, trust_ranks=False,\
verbose=True):
"""
Initializes an Extractor object with the given data and error vectors,
names, training sets, dimensions, compiled quantity, quantity to
minimize, and expanders.
data: 1D numpy.ndarray of observed values of some quantity
error: 1D numpy.ndarray of error values on the observed data
names: names of distinct bases to separate
training_sets: training sets corresponding to given names of bases.
Must be 2D array where the first dimension represents
the number of the curve.
nterms_maxima: the maximum number of terms for each basis
file_name: string location of the file at which to place Sampler
information_criterion: string name of the information criterion to
minimize to balance parameter number and
goodness-of-fit
expanders: list of Expander objects which expand each of the basis sets
mean_translation: if True (default False), training sets are
mean-subtracted before SVD is computed. the means are
then stored in the translation properties of the
resulting TrainedBasis objects.
trust_ranks: if True, all walkers are initialized on ranks
if False, they are initialized all over discrete space
verbose: if True, messages should be printed to the screen
"""
self.mean_translation = mean_translation
self.file_name = file_name
self.data = data
self.error = error
self.names = names
self.training_sets = training_sets
self.nterms_maxima = nterms_maxima
self.expanders = expanders
self.information_criterion = information_criterion
self.verbose = verbose
self.trust_ranks = trust_ranks
@property
def mean_translation(self):
"""
Property storing whether training sets have mean subtracted before SVD
is taken.
"""
if not hasattr(self, '_mean_translation'):
raise AttributeError("mean_translation was referenced before " +\
"it was set.")
return self._mean_translation
@mean_translation.setter
def mean_translation(self, value):
"""
Setter for the property determining whether training sets should have
mean subtracted before SVD is taken.
value: True if mean should be subtracted pre-SVD or False if it should
not
"""
if type(value) in bool_types:
self._mean_translation = value
else:
raise TypeError("mean_translation was set to a non-bool.")
@property
def trust_ranks(self):
"""
Property storing a boolean which determines whether or not the ranks of
the given training sets are to be trusted. If true, walkers are
initialized at ranks. If False, walkers are initialized all over
allowed parameter space.
"""
if not hasattr(self, '_trust_ranks'):
raise AttributeError("trust_ranks was referenced before it was " +\
"set.")
return self._trust_ranks
@trust_ranks.setter
def trust_ranks(self, value):
"""
Setter for the boolean determining whether training set ranks should be
used in initializing walkers.
value: either True or False
"""
if type(value) in bool_types:
self._trust_ranks = value
else:
raise TypeError("trust_ranks was set to a non-bool.")
@property
def names(self):
"""
Property storing the names of each subbasis.
"""
if not hasattr(self, '_names'):
raise AttributeError("names was referenced before it was set.")
return self._names
@names.setter
def names(self, value):
"""
Setter for the names of the subbases.
value: sequence of strings
"""
if type(value) in sequence_types:
if all([isinstance(element, basestring) for element in value]):
self._names = [element for element in value]
else:
raise TypeError("Not all elements of names sequence were " +\
"strings.")
else:
raise TypeError("names was set to a non-sequence.")
@property
def nterms_maxima(self):
"""
Property storing the maximum numbers of terms necessary for each basis.
"""
if not hasattr(self, '_nterms_maxima'):
raise AttributeError("nterms_maxima was referenced before it " +\
"was set.")
return self._nterms_maxima
@nterms_maxima.setter
def nterms_maxima(self, value):
"""
Setter for the maximum numbers of terms for each basis
value: sequence of numbers
"""
if type(value) in sequence_types:
if all([(type(element) in int_types) for element in value]):
if all([(element > 1) for element in value]):
self._nterms_maxima =\
np.array([element for element in value])
else:
raise ValueError("Not all maximum numbers of terms " +\
"were greater than 1.")
else:
raise TypeError("Not all maximum numbers of terms were " +\
"integers.")
else:
raise TypeError("nterms_maxima was set to a non-sequence.")
@property
def verbose(self):
"""
Property storing a boolean switch determining which things are printed.
"""
if not hasattr(self, '_verbose'):
raise AttributeError("verbose was referenced before it was set.")
return self._verbose
@verbose.setter
def verbose(self, value):
"""
Setter for the verbose property which decided whether things are
printed.
value: must be a bool
"""
if type(value) in bool_types:
self._verbose = value
else:
raise TypeError("verbose was set to a non-bool.")
@property
def information_criterion(self):
"""
Property storing string name of the information criterion to minimize.
"""
if not hasattr(self, '_information_criterion'):
raise AttributeError("information_criterion was referenced " +\
"before it was set.")
return self._information_criterion
@information_criterion.setter
def information_criterion(self, value):
"""
Allows user to supply string name of the information criterion to
minimize.
"""
if isinstance(value, basestring):
self._information_criterion = value
else:
raise TypeError("information_criterion was not a string.")
@property
def data(self):
"""
Property storing the data from which pieces are to be extracted. Should
be a 1D numpy array.
"""
if not hasattr(self, '_data'):
raise AttributeError("data was referenced before it was set.")
return self._data
@data.setter
def data(self, value):
"""
Setter for the data property. It checks to ensure that value is a 1D
numpy.ndarray or can be cast to one.
"""
try:
value = np.array(value)
except:
raise TypeError("data given to Extractor couldn't be cast as a " +\
"numpy.ndarray.")
if value.ndim in [1, 2]:
self._data = value
else:
raise ValueError("data must be 1D or 2D.")
@property
def num_channels(self):
"""
Property storing the number of channels in the data. A positive integer
"""
if not hasattr(self, '_num_channels'):
self._num_channels = self.data.shape[-1]
return self._num_channels
@property
def error(self):
"""
Property storing the error level in the data. This is used to define
the dot product.
"""
if not hasattr(self, '_error'):
raise AttributeError("error was referenced before it was set.")
return self._error
@error.setter
def error(self, value):
"""
Setter for the error property.
value: must be a 1D numpy.ndarray of positive values with the same
length as the data.
"""
try:
value = np.array(value)
except:
raise TypeError("error could not be cast to a numpy.ndarray.")
if value.shape == (self.num_channels,):
self._error = value
else:
raise ValueError("error was set to a numpy.ndarray which " +\
"didn't have the expected shape (i.e. 1D with " +\
"length given by number of data channels.")
@property
def num_bases(self):
"""
Property storing the number of sets of basis functions (also the same
as the number of distinguished pieces of the data).
"""
if not hasattr(self, '_num_bases'):
self._num_bases = len(self.names)
return self._num_bases
@property
def training_sets(self):
"""
Property storing the training sets used in this extraction.
returns a list of numpy.ndarrays
"""
if not hasattr(self, '_training_sets'):
raise AttributeError("training_sets was referenced before it " +\
"was set.")
return self._training_sets
@training_sets.setter
def training_sets(self, value):
"""
Allows user to set training_sets with list of numpy arrays.
value: sequence of numpy.ndarray objects storing training sets, which
are 2D
"""
if type(value) in sequence_types:
num_training_sets = len(value)
if num_training_sets == self.num_bases:
if all([isinstance(ts, np.ndarray) for ts in value]):
if all([(ts.ndim == 2) for ts in value]):
self._training_sets = [ts for ts in value]
else:
raise ValueError("At least one of the training " +\
"sets given to Extractor was not 2D.")
else:
raise TypeError("At least one of the given training " +\
"sets given to Extractor was not a " +\
"numpy.ndarray.")
else:
raise ValueError(("The number of names given to Extractor " +\
"({0}) was not equal to the number of training sets " +\
"given ({1})").format(self.num_bases, num_training_sets))
else:
raise TypeError("training_sets of Extractor class was set to a " +\
"non-sequence.")
@property
def training_set_ranks(self):
"""
Property storing the effective ranks of the training sets given. This
is computed by fitting the training set with its own SVD modes and
checking how many terms are necessary to fit down to the
(expander-contracted) noise level.
"""
if not hasattr(self, '_training_set_ranks'):
self._training_set_ranks = {}
for (name, expander, training_set) in\
zip(self.names, self.expanders, self.training_sets):
self._training_set_ranks[name] = effective_training_set_rank(\
training_set, expander.contract_error(self.error),\
mean_translation=self.mean_translation)
return self._training_set_ranks
@property
def training_set_lengths(self):
"""
Property storing the number of channels in each of the different
training sets.
"""
if not hasattr(self, '_training_set_lengths'):
self._training_set_lengths =\
[ts.shape[-1] for ts in self.training_sets]
return self._training_set_lengths
@property
def total_number_of_combined_training_set_curves(self):
"""
The number of combined training set curves which are given by the
training sets of this Extractor.
"""
if not hasattr(self, '_total_number_of_combined_training_set_curves'):
self._total_number_of_combined_training_set_curves =\
np.prod(self.training_set_lengths)
return self._total_number_of_combined_training_set_curves
@property
def expanders(self):
"""
Property storing the Expander objects connecting the training set
spaces to the data space.
returns: list of values which are either None or 2D numpy.ndarrays
"""
if not hasattr(self, '_expanders'):
raise AttributeError("expanders was referenced before it was set.")
return self._expanders
@expanders.setter
def expanders(self, value):
"""
Allows user to set expanders.
value: list of length self.num_bases. Each element is either None (only
allowed if length of training set corresponding to element is
num_channels) or an Expander object
"""
if type(value) is type(None):
value = [NullExpander()] * self.num_bases
if type(value) in sequence_types:
num_expanders = len(value)
if num_expanders == self.num_bases:
for ibasis in range(self.num_bases):
expander = value[ibasis]
if isinstance(expander, Expander):
ts_len = self.training_set_lengths[ibasis]
if expander.is_compatible(ts_len, self.num_channels):
continue
else:
raise ValueError("At least one expander was " +\
"not compatible with the " +\
"given training set length " +\
"and number of channels.")
else:
raise TypeError("Not all expanders are Expander " +\
"objects.")
self._expanders = value
else:
raise ValueError(("The number of expanders ({0}) given was " +\
"not equal to the number of names and training sets " +\
"({1}).").format(num_expanders, self.num_bases))
else:
raise TypeError("expanders was set to a non-sequence.")
@property
def basis_sum(self):
"""
Property storing the Basis objects associated with all training sets.
"""
if not hasattr(self, '_basis_sum'):
bases = []
for ibasis in range(self.num_bases):
training_set = self.training_sets[ibasis]
num_basis_vectors = self.nterms_maxima[ibasis]
expander = self.expanders[ibasis]
basis = TrainedBasis(training_set, num_basis_vectors,\
error=self.error, expander=expander,\
mean_translation=self.mean_translation)
bases.append(basis)
self._basis_sum = BasisSum(self.names, bases)
return self._basis_sum
@property
def loglikelihood(self):
"""
Property storing the TruncationLoglikelihood which will be explored to
determine the number of terms to use of each basis.
"""
if not hasattr(self, '_loglikelihood'):
self._loglikelihood = LinearTruncationLoglikelihood(\
self.basis_sum, self.data, self.error,\
information_criterion=self.information_criterion)
return self._loglikelihood
@property
def file_name(self):
"""
Property storing the location of the file at which to save the Sampler.
"""
if not hasattr(self, '_file_name'):
raise AttributeError("file_name was referenced before it was set.")
return self._file_name
@file_name.setter
def file_name(self, value):
"""
Setter for the location of the file at which to save the Sampler.
"""
if isinstance(value, basestring):
self._file_name = value
else:
raise TypeError("file_name given was not a string.")
@property
def optimal_truncations(self):
"""
Property storing the sequence of what have been determined to be
optimal truncations.
"""
if not hasattr(self, '_optimal_truncations'):
if not os.path.exists(self.file_name):
parameter_names =\
['{!s}_nterms'.format(name) for name in self.names]
jumping_distribution_set = JumpingDistributionSet()
jumping_probability = 0.9
jumping_distribution =\
GridHopJumpingDistribution(ndim=self.num_bases,\
jumping_probability=jumping_probability,\
minima=np.ones_like(self.nterms_maxima),\
maxima=self.nterms_maxima)
jumping_distribution_set.add_distribution(\
jumping_distribution, parameter_names)
guess_distribution_set = DistributionSet()
for (name, nterms_maximum) in\
zip(self.names, self.nterms_maxima):
if self.trust_ranks:
guess_distribution = KroneckerDeltaDistribution(\
self.training_set_ranks[name], is_discrete=True)
else:
guess_distribution =\
DiscreteUniformDistribution(1, nterms_maximum)
guess_distribution_set.add_distribution(\
guess_distribution, '{!s}_nterms'.format(name))
prior_distribution_set = DistributionSet()
for (parameter_name, nterms_maximum) in\
zip(parameter_names, self.nterms_maxima):
prior_distribution =\
DiscreteUniformDistribution(1, nterms_maximum)
prior_distribution_set.add_distribution(\
prior_distribution, parameter_name)
nwalkers = 100
steps_per_checkpoint = 100
num_checkpoints = 10
sampler = Sampler(self.file_name, nwalkers,\
self.loglikelihood, verbose=self.verbose,\
jumping_distribution_set=jumping_distribution_set,\
guess_distribution_set=guess_distribution_set,\
prior_distribution_set=prior_distribution_set,\
steps_per_checkpoint=steps_per_checkpoint)
sampler.run_checkpoints(num_checkpoints, silence_error=True)
sampler.close()
burn_rule = BurnRule(min_checkpoints=1, desired_fraction=1)
analyzer = NLFitter(self.file_name, burn_rule=burn_rule)
self._optimal_truncations =\
analyzer.maximum_probability_parameters.astype(int)
return self._optimal_truncations
@property
def truncated_basis_sum(self):
"""
Property storing the basis sum with the "optimal" truncations.
"""
if not hasattr(self, '_truncated_basis_sum'):
self._truncated_basis_sum =\
self.loglikelihood.truncated_basis_sum(\
self.optimal_truncations)
return self._truncated_basis_sum
@property
def optimal_fitter(self):
"""
Property storing the Fitter object which minimizes the given
information criterion.
"""
if not hasattr(self, '_optimal_fitter'):
self._optimal_fitter =\
Fitter(self.truncated_basis_sum, self.data, error=self.error)
return self._optimal_fitter
@property
def expander_set(self):
"""
Property yielding an ExpanderSet object organizing the expanders here
so that "true" curves for (e.g.) systematics can be found by using the
data as well as a "true" curve for the signal.
"""
if not hasattr(self, '_expander_set'):
self._expander_set = ExpanderSet(self.data, self.error,\
**{self.names[iname]: self.expanders[iname]\
for iname in range(self.num_bases)})
return self._expander_set
def fill_hdf5_group(self, group, save_training_sets=False,\
save_channel_estimates=False):
"""
Fills the given hdf5 file group with data about this extraction,
including the optimal fitter.
group: the hdf5 file group into which to save data
save_training_sets: bool determining whether to save training sets
save_channel_estimates: save_channel_estimates argument to pass on to
Fitter.fill_hdf5_group
"""
data_link = create_hdf5_dataset(group, 'data', data=self.data)
error_link = create_hdf5_dataset(group, 'error', data=self.error)
subgroup = group.create_group('names')
for (iname, name) in enumerate(self.names):
subgroup.attrs['{:d}'.format(iname)] = name
subgroup = group.create_group('expanders')
self.expander_set.fill_hdf5_group(subgroup)
expander_links = [subgroup[name] for name in self.names]
self.optimal_fitter.fill_hdf5_group(\
group.create_group('optimal_fitter'), data_link=data_link,\
error_link=error_link, expander_links=expander_links,\
save_channel_estimates=save_channel_estimates)
subgroup = group.create_group('ranks')
for name in self.names:
subgroup.attrs[name] = self.training_set_ranks[name]
if save_training_sets:
subgroup = group.create_group('training_sets')
for ibasis in range(self.num_bases):
create_hdf5_dataset(subgroup, self.names[ibasis],\
data=self.training_sets[ibasis])
|
CU-NESSREPO_NAMEpylinexPATH_START.@pylinex_extracted@pylinex-master@pylinex@nonlinear@TruncationExtractor.py@.PATH_END.py
|
{
"filename": "euctwfreq.py",
"repo_name": "davidharvey1986/pyRRG",
"repo_path": "pyRRG_extracted/pyRRG-master/unittests/bugFixPyRRG/lib/python3.7/site-packages/pip/_vendor/chardet/euctwfreq.py",
"type": "Python"
}
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# EUCTW frequency table
# Converted from big5 work
# by Taiwan's Mandarin Promotion Council
# <http:#www.edu.tw:81/mandr/>
# 128 --> 0.42261
# 256 --> 0.57851
# 512 --> 0.74851
# 1024 --> 0.89384
# 2048 --> 0.97583
#
# Idea Distribution Ratio = 0.74851/(1-0.74851) =2.98
# Random Distribution Ration = 512/(5401-512)=0.105
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
EUCTW_TYPICAL_DISTRIBUTION_RATIO = 0.75
# Char to FreqOrder table ,
EUCTW_TABLE_SIZE = 5376
EUCTW_CHAR_TO_FREQ_ORDER = (
1,1800,1506, 255,1431, 198, 9, 82, 6,7310, 177, 202,3615,1256,2808, 110, # 2742
3735, 33,3241, 261, 76, 44,2113, 16,2931,2184,1176, 659,3868, 26,3404,2643, # 2758
1198,3869,3313,4060, 410,2211, 302, 590, 361,1963, 8, 204, 58,4296,7311,1931, # 2774
63,7312,7313, 317,1614, 75, 222, 159,4061,2412,1480,7314,3500,3068, 224,2809, # 2790
3616, 3, 10,3870,1471, 29,2774,1135,2852,1939, 873, 130,3242,1123, 312,7315, # 2806
4297,2051, 507, 252, 682,7316, 142,1914, 124, 206,2932, 34,3501,3173, 64, 604, # 2822
7317,2494,1976,1977, 155,1990, 645, 641,1606,7318,3405, 337, 72, 406,7319, 80, # 2838
630, 238,3174,1509, 263, 939,1092,2644, 756,1440,1094,3406, 449, 69,2969, 591, # 2854
179,2095, 471, 115,2034,1843, 60, 50,2970, 134, 806,1868, 734,2035,3407, 180, # 2870
995,1607, 156, 537,2893, 688,7320, 319,1305, 779,2144, 514,2374, 298,4298, 359, # 2886
2495, 90,2707,1338, 663, 11, 906,1099,2545, 20,2436, 182, 532,1716,7321, 732, # 2902
1376,4062,1311,1420,3175, 25,2312,1056, 113, 399, 382,1949, 242,3408,2467, 529, # 2918
3243, 475,1447,3617,7322, 117, 21, 656, 810,1297,2295,2329,3502,7323, 126,4063, # 2934
706, 456, 150, 613,4299, 71,1118,2036,4064, 145,3069, 85, 835, 486,2114,1246, # 2950
1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,7324,2127,2354, 347,3736, 221, # 2966
3503,3110,7325,1955,1153,4065, 83, 296,1199,3070, 192, 624, 93,7326, 822,1897, # 2982
2810,3111, 795,2064, 991,1554,1542,1592, 27, 43,2853, 859, 139,1456, 860,4300, # 2998
437, 712,3871, 164,2392,3112, 695, 211,3017,2096, 195,3872,1608,3504,3505,3618, # 3014
3873, 234, 811,2971,2097,3874,2229,1441,3506,1615,2375, 668,2076,1638, 305, 228, # 3030
1664,4301, 467, 415,7327, 262,2098,1593, 239, 108, 300, 200,1033, 512,1247,2077, # 3046
7328,7329,2173,3176,3619,2673, 593, 845,1062,3244, 88,1723,2037,3875,1950, 212, # 3062
266, 152, 149, 468,1898,4066,4302, 77, 187,7330,3018, 37, 5,2972,7331,3876, # 3078
7332,7333, 39,2517,4303,2894,3177,2078, 55, 148, 74,4304, 545, 483,1474,1029, # 3094
1665, 217,1869,1531,3113,1104,2645,4067, 24, 172,3507, 900,3877,3508,3509,4305, # 3110
32,1408,2811,1312, 329, 487,2355,2247,2708, 784,2674, 4,3019,3314,1427,1788, # 3126
188, 109, 499,7334,3620,1717,1789, 888,1217,3020,4306,7335,3510,7336,3315,1520, # 3142
3621,3878, 196,1034, 775,7337,7338, 929,1815, 249, 439, 38,7339,1063,7340, 794, # 3158
3879,1435,2296, 46, 178,3245,2065,7341,2376,7342, 214,1709,4307, 804, 35, 707, # 3174
324,3622,1601,2546, 140, 459,4068,7343,7344,1365, 839, 272, 978,2257,2572,3409, # 3190
2128,1363,3623,1423, 697, 100,3071, 48, 70,1231, 495,3114,2193,7345,1294,7346, # 3206
2079, 462, 586,1042,3246, 853, 256, 988, 185,2377,3410,1698, 434,1084,7347,3411, # 3222
314,2615,2775,4308,2330,2331, 569,2280, 637,1816,2518, 757,1162,1878,1616,3412, # 3238
287,1577,2115, 768,4309,1671,2854,3511,2519,1321,3737, 909,2413,7348,4069, 933, # 3254
3738,7349,2052,2356,1222,4310, 765,2414,1322, 786,4311,7350,1919,1462,1677,2895, # 3270
1699,7351,4312,1424,2437,3115,3624,2590,3316,1774,1940,3413,3880,4070, 309,1369, # 3286
1130,2812, 364,2230,1653,1299,3881,3512,3882,3883,2646, 525,1085,3021, 902,2000, # 3302
1475, 964,4313, 421,1844,1415,1057,2281, 940,1364,3116, 376,4314,4315,1381, 7, # 3318
2520, 983,2378, 336,1710,2675,1845, 321,3414, 559,1131,3022,2742,1808,1132,1313, # 3334
265,1481,1857,7352, 352,1203,2813,3247, 167,1089, 420,2814, 776, 792,1724,3513, # 3350
4071,2438,3248,7353,4072,7354, 446, 229, 333,2743, 901,3739,1200,1557,4316,2647, # 3366
1920, 395,2744,2676,3740,4073,1835, 125, 916,3178,2616,4317,7355,7356,3741,7357, # 3382
7358,7359,4318,3117,3625,1133,2547,1757,3415,1510,2313,1409,3514,7360,2145, 438, # 3398
2591,2896,2379,3317,1068, 958,3023, 461, 311,2855,2677,4074,1915,3179,4075,1978, # 3414
383, 750,2745,2617,4076, 274, 539, 385,1278,1442,7361,1154,1964, 384, 561, 210, # 3430
98,1295,2548,3515,7362,1711,2415,1482,3416,3884,2897,1257, 129,7363,3742, 642, # 3446
523,2776,2777,2648,7364, 141,2231,1333, 68, 176, 441, 876, 907,4077, 603,2592, # 3462
710, 171,3417, 404, 549, 18,3118,2393,1410,3626,1666,7365,3516,4319,2898,4320, # 3478
7366,2973, 368,7367, 146, 366, 99, 871,3627,1543, 748, 807,1586,1185, 22,2258, # 3494
379,3743,3180,7368,3181, 505,1941,2618,1991,1382,2314,7369, 380,2357, 218, 702, # 3510
1817,1248,3418,3024,3517,3318,3249,7370,2974,3628, 930,3250,3744,7371, 59,7372, # 3526
585, 601,4078, 497,3419,1112,1314,4321,1801,7373,1223,1472,2174,7374, 749,1836, # 3542
690,1899,3745,1772,3885,1476, 429,1043,1790,2232,2116, 917,4079, 447,1086,1629, # 3558
7375, 556,7376,7377,2020,1654, 844,1090, 105, 550, 966,1758,2815,1008,1782, 686, # 3574
1095,7378,2282, 793,1602,7379,3518,2593,4322,4080,2933,2297,4323,3746, 980,2496, # 3590
544, 353, 527,4324, 908,2678,2899,7380, 381,2619,1942,1348,7381,1341,1252, 560, # 3606
3072,7382,3420,2856,7383,2053, 973, 886,2080, 143,4325,7384,7385, 157,3886, 496, # 3622
4081, 57, 840, 540,2038,4326,4327,3421,2117,1445, 970,2259,1748,1965,2081,4082, # 3638
3119,1234,1775,3251,2816,3629, 773,1206,2129,1066,2039,1326,3887,1738,1725,4083, # 3654
279,3120, 51,1544,2594, 423,1578,2130,2066, 173,4328,1879,7386,7387,1583, 264, # 3670
610,3630,4329,2439, 280, 154,7388,7389,7390,1739, 338,1282,3073, 693,2857,1411, # 3686
1074,3747,2440,7391,4330,7392,7393,1240, 952,2394,7394,2900,1538,2679, 685,1483, # 3702
4084,2468,1436, 953,4085,2054,4331, 671,2395, 79,4086,2441,3252, 608, 567,2680, # 3718
3422,4087,4088,1691, 393,1261,1791,2396,7395,4332,7396,7397,7398,7399,1383,1672, # 3734
3748,3182,1464, 522,1119, 661,1150, 216, 675,4333,3888,1432,3519, 609,4334,2681, # 3750
2397,7400,7401,7402,4089,3025, 0,7403,2469, 315, 231,2442, 301,3319,4335,2380, # 3766
7404, 233,4090,3631,1818,4336,4337,7405, 96,1776,1315,2082,7406, 257,7407,1809, # 3782
3632,2709,1139,1819,4091,2021,1124,2163,2778,1777,2649,7408,3074, 363,1655,3183, # 3798
7409,2975,7410,7411,7412,3889,1567,3890, 718, 103,3184, 849,1443, 341,3320,2934, # 3814
1484,7413,1712, 127, 67, 339,4092,2398, 679,1412, 821,7414,7415, 834, 738, 351, # 3830
2976,2146, 846, 235,1497,1880, 418,1992,3749,2710, 186,1100,2147,2746,3520,1545, # 3846
1355,2935,2858,1377, 583,3891,4093,2573,2977,7416,1298,3633,1078,2549,3634,2358, # 3862
78,3750,3751, 267,1289,2099,2001,1594,4094, 348, 369,1274,2194,2175,1837,4338, # 3878
1820,2817,3635,2747,2283,2002,4339,2936,2748, 144,3321, 882,4340,3892,2749,3423, # 3894
4341,2901,7417,4095,1726, 320,7418,3893,3026, 788,2978,7419,2818,1773,1327,2859, # 3910
3894,2819,7420,1306,4342,2003,1700,3752,3521,2359,2650, 787,2022, 506, 824,3636, # 3926
534, 323,4343,1044,3322,2023,1900, 946,3424,7421,1778,1500,1678,7422,1881,4344, # 3942
165, 243,4345,3637,2521, 123, 683,4096, 764,4346, 36,3895,1792, 589,2902, 816, # 3958
626,1667,3027,2233,1639,1555,1622,3753,3896,7423,3897,2860,1370,1228,1932, 891, # 3974
2083,2903, 304,4097,7424, 292,2979,2711,3522, 691,2100,4098,1115,4347, 118, 662, # 3990
7425, 611,1156, 854,2381,1316,2861, 2, 386, 515,2904,7426,7427,3253, 868,2234, # 4006
1486, 855,2651, 785,2212,3028,7428,1040,3185,3523,7429,3121, 448,7430,1525,7431, # 4022
2164,4348,7432,3754,7433,4099,2820,3524,3122, 503, 818,3898,3123,1568, 814, 676, # 4038
1444, 306,1749,7434,3755,1416,1030, 197,1428, 805,2821,1501,4349,7435,7436,7437, # 4054
1993,7438,4350,7439,7440,2195, 13,2779,3638,2980,3124,1229,1916,7441,3756,2131, # 4070
7442,4100,4351,2399,3525,7443,2213,1511,1727,1120,7444,7445, 646,3757,2443, 307, # 4086
7446,7447,1595,3186,7448,7449,7450,3639,1113,1356,3899,1465,2522,2523,7451, 519, # 4102
7452, 128,2132, 92,2284,1979,7453,3900,1512, 342,3125,2196,7454,2780,2214,1980, # 4118
3323,7455, 290,1656,1317, 789, 827,2360,7456,3758,4352, 562, 581,3901,7457, 401, # 4134
4353,2248, 94,4354,1399,2781,7458,1463,2024,4355,3187,1943,7459, 828,1105,4101, # 4150
1262,1394,7460,4102, 605,4356,7461,1783,2862,7462,2822, 819,2101, 578,2197,2937, # 4166
7463,1502, 436,3254,4103,3255,2823,3902,2905,3425,3426,7464,2712,2315,7465,7466, # 4182
2332,2067, 23,4357, 193, 826,3759,2102, 699,1630,4104,3075, 390,1793,1064,3526, # 4198
7467,1579,3076,3077,1400,7468,4105,1838,1640,2863,7469,4358,4359, 137,4106, 598, # 4214
3078,1966, 780, 104, 974,2938,7470, 278, 899, 253, 402, 572, 504, 493,1339,7471, # 4230
3903,1275,4360,2574,2550,7472,3640,3029,3079,2249, 565,1334,2713, 863, 41,7473, # 4246
7474,4361,7475,1657,2333, 19, 463,2750,4107, 606,7476,2981,3256,1087,2084,1323, # 4262
2652,2982,7477,1631,1623,1750,4108,2682,7478,2864, 791,2714,2653,2334, 232,2416, # 4278
7479,2983,1498,7480,2654,2620, 755,1366,3641,3257,3126,2025,1609, 119,1917,3427, # 4294
862,1026,4109,7481,3904,3760,4362,3905,4363,2260,1951,2470,7482,1125, 817,4110, # 4310
4111,3906,1513,1766,2040,1487,4112,3030,3258,2824,3761,3127,7483,7484,1507,7485, # 4326
2683, 733, 40,1632,1106,2865, 345,4113, 841,2524, 230,4364,2984,1846,3259,3428, # 4342
7486,1263, 986,3429,7487, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562,3907, # 4358
3908,2939, 967,2751,2655,1349, 592,2133,1692,3324,2985,1994,4114,1679,3909,1901, # 4374
2185,7488, 739,3642,2715,1296,1290,7489,4115,2198,2199,1921,1563,2595,2551,1870, # 4390
2752,2986,7490, 435,7491, 343,1108, 596, 17,1751,4365,2235,3430,3643,7492,4366, # 4406
294,3527,2940,1693, 477, 979, 281,2041,3528, 643,2042,3644,2621,2782,2261,1031, # 4422
2335,2134,2298,3529,4367, 367,1249,2552,7493,3530,7494,4368,1283,3325,2004, 240, # 4438
1762,3326,4369,4370, 836,1069,3128, 474,7495,2148,2525, 268,3531,7496,3188,1521, # 4454
1284,7497,1658,1546,4116,7498,3532,3533,7499,4117,3327,2684,1685,4118, 961,1673, # 4470
2622, 190,2005,2200,3762,4371,4372,7500, 570,2497,3645,1490,7501,4373,2623,3260, # 4486
1956,4374, 584,1514, 396,1045,1944,7502,4375,1967,2444,7503,7504,4376,3910, 619, # 4502
7505,3129,3261, 215,2006,2783,2553,3189,4377,3190,4378, 763,4119,3763,4379,7506, # 4518
7507,1957,1767,2941,3328,3646,1174, 452,1477,4380,3329,3130,7508,2825,1253,2382, # 4534
2186,1091,2285,4120, 492,7509, 638,1169,1824,2135,1752,3911, 648, 926,1021,1324, # 4550
4381, 520,4382, 997, 847,1007, 892,4383,3764,2262,1871,3647,7510,2400,1784,4384, # 4566
1952,2942,3080,3191,1728,4121,2043,3648,4385,2007,1701,3131,1551, 30,2263,4122, # 4582
7511,2026,4386,3534,7512, 501,7513,4123, 594,3431,2165,1821,3535,3432,3536,3192, # 4598
829,2826,4124,7514,1680,3132,1225,4125,7515,3262,4387,4126,3133,2336,7516,4388, # 4614
4127,7517,3912,3913,7518,1847,2383,2596,3330,7519,4389, 374,3914, 652,4128,4129, # 4630
375,1140, 798,7520,7521,7522,2361,4390,2264, 546,1659, 138,3031,2445,4391,7523, # 4646
2250, 612,1848, 910, 796,3765,1740,1371, 825,3766,3767,7524,2906,2554,7525, 692, # 4662
444,3032,2624, 801,4392,4130,7526,1491, 244,1053,3033,4131,4132, 340,7527,3915, # 4678
1041,2987, 293,1168, 87,1357,7528,1539, 959,7529,2236, 721, 694,4133,3768, 219, # 4694
1478, 644,1417,3331,2656,1413,1401,1335,1389,3916,7530,7531,2988,2362,3134,1825, # 4710
730,1515, 184,2827, 66,4393,7532,1660,2943, 246,3332, 378,1457, 226,3433, 975, # 4726
3917,2944,1264,3537, 674, 696,7533, 163,7534,1141,2417,2166, 713,3538,3333,4394, # 4742
3918,7535,7536,1186, 15,7537,1079,1070,7538,1522,3193,3539, 276,1050,2716, 758, # 4758
1126, 653,2945,3263,7539,2337, 889,3540,3919,3081,2989, 903,1250,4395,3920,3434, # 4774
3541,1342,1681,1718, 766,3264, 286, 89,2946,3649,7540,1713,7541,2597,3334,2990, # 4790
7542,2947,2215,3194,2866,7543,4396,2498,2526, 181, 387,1075,3921, 731,2187,3335, # 4806
7544,3265, 310, 313,3435,2299, 770,4134, 54,3034, 189,4397,3082,3769,3922,7545, # 4822
1230,1617,1849, 355,3542,4135,4398,3336, 111,4136,3650,1350,3135,3436,3035,4137, # 4838
2149,3266,3543,7546,2784,3923,3924,2991, 722,2008,7547,1071, 247,1207,2338,2471, # 4854
1378,4399,2009, 864,1437,1214,4400, 373,3770,1142,2216, 667,4401, 442,2753,2555, # 4870
3771,3925,1968,4138,3267,1839, 837, 170,1107, 934,1336,1882,7548,7549,2118,4139, # 4886
2828, 743,1569,7550,4402,4140, 582,2384,1418,3437,7551,1802,7552, 357,1395,1729, # 4902
3651,3268,2418,1564,2237,7553,3083,3772,1633,4403,1114,2085,4141,1532,7554, 482, # 4918
2446,4404,7555,7556,1492, 833,1466,7557,2717,3544,1641,2829,7558,1526,1272,3652, # 4934
4142,1686,1794, 416,2556,1902,1953,1803,7559,3773,2785,3774,1159,2316,7560,2867, # 4950
4405,1610,1584,3036,2419,2754, 443,3269,1163,3136,7561,7562,3926,7563,4143,2499, # 4966
3037,4406,3927,3137,2103,1647,3545,2010,1872,4144,7564,4145, 431,3438,7565, 250, # 4982
97, 81,4146,7566,1648,1850,1558, 160, 848,7567, 866, 740,1694,7568,2201,2830, # 4998
3195,4147,4407,3653,1687, 950,2472, 426, 469,3196,3654,3655,3928,7569,7570,1188, # 5014
424,1995, 861,3546,4148,3775,2202,2685, 168,1235,3547,4149,7571,2086,1674,4408, # 5030
3337,3270, 220,2557,1009,7572,3776, 670,2992, 332,1208, 717,7573,7574,3548,2447, # 5046
3929,3338,7575, 513,7576,1209,2868,3339,3138,4409,1080,7577,7578,7579,7580,2527, # 5062
3656,3549, 815,1587,3930,3931,7581,3550,3439,3777,1254,4410,1328,3038,1390,3932, # 5078
1741,3933,3778,3934,7582, 236,3779,2448,3271,7583,7584,3657,3780,1273,3781,4411, # 5094
7585, 308,7586,4412, 245,4413,1851,2473,1307,2575, 430, 715,2136,2449,7587, 270, # 5110
199,2869,3935,7588,3551,2718,1753, 761,1754, 725,1661,1840,4414,3440,3658,7589, # 5126
7590, 587, 14,3272, 227,2598, 326, 480,2265, 943,2755,3552, 291, 650,1883,7591, # 5142
1702,1226, 102,1547, 62,3441, 904,4415,3442,1164,4150,7592,7593,1224,1548,2756, # 5158
391, 498,1493,7594,1386,1419,7595,2055,1177,4416, 813, 880,1081,2363, 566,1145, # 5174
4417,2286,1001,1035,2558,2599,2238, 394,1286,7596,7597,2068,7598, 86,1494,1730, # 5190
3936, 491,1588, 745, 897,2948, 843,3340,3937,2757,2870,3273,1768, 998,2217,2069, # 5206
397,1826,1195,1969,3659,2993,3341, 284,7599,3782,2500,2137,2119,1903,7600,3938, # 5222
2150,3939,4151,1036,3443,1904, 114,2559,4152, 209,1527,7601,7602,2949,2831,2625, # 5238
2385,2719,3139, 812,2560,7603,3274,7604,1559, 737,1884,3660,1210, 885, 28,2686, # 5254
3553,3783,7605,4153,1004,1779,4418,7606, 346,1981,2218,2687,4419,3784,1742, 797, # 5270
1642,3940,1933,1072,1384,2151, 896,3941,3275,3661,3197,2871,3554,7607,2561,1958, # 5286
4420,2450,1785,7608,7609,7610,3942,4154,1005,1308,3662,4155,2720,4421,4422,1528, # 5302
2600, 161,1178,4156,1982, 987,4423,1101,4157, 631,3943,1157,3198,2420,1343,1241, # 5318
1016,2239,2562, 372, 877,2339,2501,1160, 555,1934, 911,3944,7611, 466,1170, 169, # 5334
1051,2907,2688,3663,2474,2994,1182,2011,2563,1251,2626,7612, 992,2340,3444,1540, # 5350
2721,1201,2070,2401,1996,2475,7613,4424, 528,1922,2188,1503,1873,1570,2364,3342, # 5366
3276,7614, 557,1073,7615,1827,3445,2087,2266,3140,3039,3084, 767,3085,2786,4425, # 5382
1006,4158,4426,2341,1267,2176,3664,3199, 778,3945,3200,2722,1597,2657,7616,4427, # 5398
7617,3446,7618,7619,7620,3277,2689,1433,3278, 131, 95,1504,3946, 723,4159,3141, # 5414
1841,3555,2758,2189,3947,2027,2104,3665,7621,2995,3948,1218,7622,3343,3201,3949, # 5430
4160,2576, 248,1634,3785, 912,7623,2832,3666,3040,3786, 654, 53,7624,2996,7625, # 5446
1688,4428, 777,3447,1032,3950,1425,7626, 191, 820,2120,2833, 971,4429, 931,3202, # 5462
135, 664, 783,3787,1997, 772,2908,1935,3951,3788,4430,2909,3203, 282,2723, 640, # 5478
1372,3448,1127, 922, 325,3344,7627,7628, 711,2044,7629,7630,3952,2219,2787,1936, # 5494
3953,3345,2220,2251,3789,2300,7631,4431,3790,1258,3279,3954,3204,2138,2950,3955, # 5510
3956,7632,2221, 258,3205,4432, 101,1227,7633,3280,1755,7634,1391,3281,7635,2910, # 5526
2056, 893,7636,7637,7638,1402,4161,2342,7639,7640,3206,3556,7641,7642, 878,1325, # 5542
1780,2788,4433, 259,1385,2577, 744,1183,2267,4434,7643,3957,2502,7644, 684,1024, # 5558
4162,7645, 472,3557,3449,1165,3282,3958,3959, 322,2152, 881, 455,1695,1152,1340, # 5574
660, 554,2153,4435,1058,4436,4163, 830,1065,3346,3960,4437,1923,7646,1703,1918, # 5590
7647, 932,2268, 122,7648,4438, 947, 677,7649,3791,2627, 297,1905,1924,2269,4439, # 5606
2317,3283,7650,7651,4164,7652,4165, 84,4166, 112, 989,7653, 547,1059,3961, 701, # 5622
3558,1019,7654,4167,7655,3450, 942, 639, 457,2301,2451, 993,2951, 407, 851, 494, # 5638
4440,3347, 927,7656,1237,7657,2421,3348, 573,4168, 680, 921,2911,1279,1874, 285, # 5654
790,1448,1983, 719,2167,7658,7659,4441,3962,3963,1649,7660,1541, 563,7661,1077, # 5670
7662,3349,3041,3451, 511,2997,3964,3965,3667,3966,1268,2564,3350,3207,4442,4443, # 5686
7663, 535,1048,1276,1189,2912,2028,3142,1438,1373,2834,2952,1134,2012,7664,4169, # 5702
1238,2578,3086,1259,7665, 700,7666,2953,3143,3668,4170,7667,4171,1146,1875,1906, # 5718
4444,2601,3967, 781,2422, 132,1589, 203, 147, 273,2789,2402, 898,1786,2154,3968, # 5734
3969,7668,3792,2790,7669,7670,4445,4446,7671,3208,7672,1635,3793, 965,7673,1804, # 5750
2690,1516,3559,1121,1082,1329,3284,3970,1449,3794, 65,1128,2835,2913,2759,1590, # 5766
3795,7674,7675, 12,2658, 45, 976,2579,3144,4447, 517,2528,1013,1037,3209,7676, # 5782
3796,2836,7677,3797,7678,3452,7679,2602, 614,1998,2318,3798,3087,2724,2628,7680, # 5798
2580,4172, 599,1269,7681,1810,3669,7682,2691,3088, 759,1060, 489,1805,3351,3285, # 5814
1358,7683,7684,2386,1387,1215,2629,2252, 490,7685,7686,4173,1759,2387,2343,7687, # 5830
4448,3799,1907,3971,2630,1806,3210,4449,3453,3286,2760,2344, 874,7688,7689,3454, # 5846
3670,1858, 91,2914,3671,3042,3800,4450,7690,3145,3972,2659,7691,3455,1202,1403, # 5862
3801,2954,2529,1517,2503,4451,3456,2504,7692,4452,7693,2692,1885,1495,1731,3973, # 5878
2365,4453,7694,2029,7695,7696,3974,2693,1216, 237,2581,4174,2319,3975,3802,4454, # 5894
4455,2694,3560,3457, 445,4456,7697,7698,7699,7700,2761, 61,3976,3672,1822,3977, # 5910
7701, 687,2045, 935, 925, 405,2660, 703,1096,1859,2725,4457,3978,1876,1367,2695, # 5926
3352, 918,2105,1781,2476, 334,3287,1611,1093,4458, 564,3146,3458,3673,3353, 945, # 5942
2631,2057,4459,7702,1925, 872,4175,7703,3459,2696,3089, 349,4176,3674,3979,4460, # 5958
3803,4177,3675,2155,3980,4461,4462,4178,4463,2403,2046, 782,3981, 400, 251,4179, # 5974
1624,7704,7705, 277,3676, 299,1265, 476,1191,3804,2121,4180,4181,1109, 205,7706, # 5990
2582,1000,2156,3561,1860,7707,7708,7709,4464,7710,4465,2565, 107,2477,2157,3982, # 6006
3460,3147,7711,1533, 541,1301, 158, 753,4182,2872,3562,7712,1696, 370,1088,4183, # 6022
4466,3563, 579, 327, 440, 162,2240, 269,1937,1374,3461, 968,3043, 56,1396,3090, # 6038
2106,3288,3354,7713,1926,2158,4467,2998,7714,3564,7715,7716,3677,4468,2478,7717, # 6054
2791,7718,1650,4469,7719,2603,7720,7721,3983,2661,3355,1149,3356,3984,3805,3985, # 6070
7722,1076, 49,7723, 951,3211,3289,3290, 450,2837, 920,7724,1811,2792,2366,4184, # 6086
1908,1138,2367,3806,3462,7725,3212,4470,1909,1147,1518,2423,4471,3807,7726,4472, # 6102
2388,2604, 260,1795,3213,7727,7728,3808,3291, 708,7729,3565,1704,7730,3566,1351, # 6118
1618,3357,2999,1886, 944,4185,3358,4186,3044,3359,4187,7731,3678, 422, 413,1714, # 6134
3292, 500,2058,2345,4188,2479,7732,1344,1910, 954,7733,1668,7734,7735,3986,2404, # 6150
4189,3567,3809,4190,7736,2302,1318,2505,3091, 133,3092,2873,4473, 629, 31,2838, # 6166
2697,3810,4474, 850, 949,4475,3987,2955,1732,2088,4191,1496,1852,7737,3988, 620, # 6182
3214, 981,1242,3679,3360,1619,3680,1643,3293,2139,2452,1970,1719,3463,2168,7738, # 6198
3215,7739,7740,3361,1828,7741,1277,4476,1565,2047,7742,1636,3568,3093,7743, 869, # 6214
2839, 655,3811,3812,3094,3989,3000,3813,1310,3569,4477,7744,7745,7746,1733, 558, # 6230
4478,3681, 335,1549,3045,1756,4192,3682,1945,3464,1829,1291,1192, 470,2726,2107, # 6246
2793, 913,1054,3990,7747,1027,7748,3046,3991,4479, 982,2662,3362,3148,3465,3216, # 6262
3217,1946,2794,7749, 571,4480,7750,1830,7751,3570,2583,1523,2424,7752,2089, 984, # 6278
4481,3683,1959,7753,3684, 852, 923,2795,3466,3685, 969,1519, 999,2048,2320,1705, # 6294
7754,3095, 615,1662, 151, 597,3992,2405,2321,1049, 275,4482,3686,4193, 568,3687, # 6310
3571,2480,4194,3688,7755,2425,2270, 409,3218,7756,1566,2874,3467,1002, 769,2840, # 6326
194,2090,3149,3689,2222,3294,4195, 628,1505,7757,7758,1763,2177,3001,3993, 521, # 6342
1161,2584,1787,2203,2406,4483,3994,1625,4196,4197, 412, 42,3096, 464,7759,2632, # 6358
4484,3363,1760,1571,2875,3468,2530,1219,2204,3814,2633,2140,2368,4485,4486,3295, # 6374
1651,3364,3572,7760,7761,3573,2481,3469,7762,3690,7763,7764,2271,2091, 460,7765, # 6390
4487,7766,3002, 962, 588,3574, 289,3219,2634,1116, 52,7767,3047,1796,7768,7769, # 6406
7770,1467,7771,1598,1143,3691,4198,1984,1734,1067,4488,1280,3365, 465,4489,1572, # 6422
510,7772,1927,2241,1812,1644,3575,7773,4490,3692,7774,7775,2663,1573,1534,7776, # 6438
7777,4199, 536,1807,1761,3470,3815,3150,2635,7778,7779,7780,4491,3471,2915,1911, # 6454
2796,7781,3296,1122, 377,3220,7782, 360,7783,7784,4200,1529, 551,7785,2059,3693, # 6470
1769,2426,7786,2916,4201,3297,3097,2322,2108,2030,4492,1404, 136,1468,1479, 672, # 6486
1171,3221,2303, 271,3151,7787,2762,7788,2049, 678,2727, 865,1947,4493,7789,2013, # 6502
3995,2956,7790,2728,2223,1397,3048,3694,4494,4495,1735,2917,3366,3576,7791,3816, # 6518
509,2841,2453,2876,3817,7792,7793,3152,3153,4496,4202,2531,4497,2304,1166,1010, # 6534
552, 681,1887,7794,7795,2957,2958,3996,1287,1596,1861,3154, 358, 453, 736, 175, # 6550
478,1117, 905,1167,1097,7796,1853,1530,7797,1706,7798,2178,3472,2287,3695,3473, # 6566
3577,4203,2092,4204,7799,3367,1193,2482,4205,1458,2190,2205,1862,1888,1421,3298, # 6582
2918,3049,2179,3474, 595,2122,7800,3997,7801,7802,4206,1707,2636, 223,3696,1359, # 6598
751,3098, 183,3475,7803,2797,3003, 419,2369, 633, 704,3818,2389, 241,7804,7805, # 6614
7806, 838,3004,3697,2272,2763,2454,3819,1938,2050,3998,1309,3099,2242,1181,7807, # 6630
1136,2206,3820,2370,1446,4207,2305,4498,7808,7809,4208,1055,2605, 484,3698,7810, # 6646
3999, 625,4209,2273,3368,1499,4210,4000,7811,4001,4211,3222,2274,2275,3476,7812, # 6662
7813,2764, 808,2606,3699,3369,4002,4212,3100,2532, 526,3370,3821,4213, 955,7814, # 6678
1620,4214,2637,2427,7815,1429,3700,1669,1831, 994, 928,7816,3578,1260,7817,7818, # 6694
7819,1948,2288, 741,2919,1626,4215,2729,2455, 867,1184, 362,3371,1392,7820,7821, # 6710
4003,4216,1770,1736,3223,2920,4499,4500,1928,2698,1459,1158,7822,3050,3372,2877, # 6726
1292,1929,2506,2842,3701,1985,1187,2071,2014,2607,4217,7823,2566,2507,2169,3702, # 6742
2483,3299,7824,3703,4501,7825,7826, 666,1003,3005,1022,3579,4218,7827,4502,1813, # 6758
2253, 574,3822,1603, 295,1535, 705,3823,4219, 283, 858, 417,7828,7829,3224,4503, # 6774
4504,3051,1220,1889,1046,2276,2456,4004,1393,1599, 689,2567, 388,4220,7830,2484, # 6790
802,7831,2798,3824,2060,1405,2254,7832,4505,3825,2109,1052,1345,3225,1585,7833, # 6806
809,7834,7835,7836, 575,2730,3477, 956,1552,1469,1144,2323,7837,2324,1560,2457, # 6822
3580,3226,4005, 616,2207,3155,2180,2289,7838,1832,7839,3478,4506,7840,1319,3704, # 6838
3705,1211,3581,1023,3227,1293,2799,7841,7842,7843,3826, 607,2306,3827, 762,2878, # 6854
1439,4221,1360,7844,1485,3052,7845,4507,1038,4222,1450,2061,2638,4223,1379,4508, # 6870
2585,7846,7847,4224,1352,1414,2325,2921,1172,7848,7849,3828,3829,7850,1797,1451, # 6886
7851,7852,7853,7854,2922,4006,4007,2485,2346, 411,4008,4009,3582,3300,3101,4509, # 6902
1561,2664,1452,4010,1375,7855,7856, 47,2959, 316,7857,1406,1591,2923,3156,7858, # 6918
1025,2141,3102,3157, 354,2731, 884,2224,4225,2407, 508,3706, 726,3583, 996,2428, # 6934
3584, 729,7859, 392,2191,1453,4011,4510,3707,7860,7861,2458,3585,2608,1675,2800, # 6950
919,2347,2960,2348,1270,4511,4012, 73,7862,7863, 647,7864,3228,2843,2255,1550, # 6966
1346,3006,7865,1332, 883,3479,7866,7867,7868,7869,3301,2765,7870,1212, 831,1347, # 6982
4226,4512,2326,3830,1863,3053, 720,3831,4513,4514,3832,7871,4227,7872,7873,4515, # 6998
7874,7875,1798,4516,3708,2609,4517,3586,1645,2371,7876,7877,2924, 669,2208,2665, # 7014
2429,7878,2879,7879,7880,1028,3229,7881,4228,2408,7882,2256,1353,7883,7884,4518, # 7030
3158, 518,7885,4013,7886,4229,1960,7887,2142,4230,7888,7889,3007,2349,2350,3833, # 7046
516,1833,1454,4014,2699,4231,4519,2225,2610,1971,1129,3587,7890,2766,7891,2961, # 7062
1422, 577,1470,3008,1524,3373,7892,7893, 432,4232,3054,3480,7894,2586,1455,2508, # 7078
2226,1972,1175,7895,1020,2732,4015,3481,4520,7896,2733,7897,1743,1361,3055,3482, # 7094
2639,4016,4233,4521,2290, 895, 924,4234,2170, 331,2243,3056, 166,1627,3057,1098, # 7110
7898,1232,2880,2227,3374,4522, 657, 403,1196,2372, 542,3709,3375,1600,4235,3483, # 7126
7899,4523,2767,3230, 576, 530,1362,7900,4524,2533,2666,3710,4017,7901, 842,3834, # 7142
7902,2801,2031,1014,4018, 213,2700,3376, 665, 621,4236,7903,3711,2925,2430,7904, # 7158
2431,3302,3588,3377,7905,4237,2534,4238,4525,3589,1682,4239,3484,1380,7906, 724, # 7174
2277, 600,1670,7907,1337,1233,4526,3103,2244,7908,1621,4527,7909, 651,4240,7910, # 7190
1612,4241,2611,7911,2844,7912,2734,2307,3058,7913, 716,2459,3059, 174,1255,2701, # 7206
4019,3590, 548,1320,1398, 728,4020,1574,7914,1890,1197,3060,4021,7915,3061,3062, # 7222
3712,3591,3713, 747,7916, 635,4242,4528,7917,7918,7919,4243,7920,7921,4529,7922, # 7238
3378,4530,2432, 451,7923,3714,2535,2072,4244,2735,4245,4022,7924,1764,4531,7925, # 7254
4246, 350,7926,2278,2390,2486,7927,4247,4023,2245,1434,4024, 488,4532, 458,4248, # 7270
4025,3715, 771,1330,2391,3835,2568,3159,2159,2409,1553,2667,3160,4249,7928,2487, # 7286
2881,2612,1720,2702,4250,3379,4533,7929,2536,4251,7930,3231,4252,2768,7931,2015, # 7302
2736,7932,1155,1017,3716,3836,7933,3303,2308, 201,1864,4253,1430,7934,4026,7935, # 7318
7936,7937,7938,7939,4254,1604,7940, 414,1865, 371,2587,4534,4535,3485,2016,3104, # 7334
4536,1708, 960,4255, 887, 389,2171,1536,1663,1721,7941,2228,4027,2351,2926,1580, # 7350
7942,7943,7944,1744,7945,2537,4537,4538,7946,4539,7947,2073,7948,7949,3592,3380, # 7366
2882,4256,7950,4257,2640,3381,2802, 673,2703,2460, 709,3486,4028,3593,4258,7951, # 7382
1148, 502, 634,7952,7953,1204,4540,3594,1575,4541,2613,3717,7954,3718,3105, 948, # 7398
3232, 121,1745,3837,1110,7955,4259,3063,2509,3009,4029,3719,1151,1771,3838,1488, # 7414
4030,1986,7956,2433,3487,7957,7958,2093,7959,4260,3839,1213,1407,2803, 531,2737, # 7430
2538,3233,1011,1537,7960,2769,4261,3106,1061,7961,3720,3721,1866,2883,7962,2017, # 7446
120,4262,4263,2062,3595,3234,2309,3840,2668,3382,1954,4542,7963,7964,3488,1047, # 7462
2704,1266,7965,1368,4543,2845, 649,3383,3841,2539,2738,1102,2846,2669,7966,7967, # 7478
1999,7968,1111,3596,2962,7969,2488,3842,3597,2804,1854,3384,3722,7970,7971,3385, # 7494
2410,2884,3304,3235,3598,7972,2569,7973,3599,2805,4031,1460, 856,7974,3600,7975, # 7510
2885,2963,7976,2886,3843,7977,4264, 632,2510, 875,3844,1697,3845,2291,7978,7979, # 7526
4544,3010,1239, 580,4545,4265,7980, 914, 936,2074,1190,4032,1039,2123,7981,7982, # 7542
7983,3386,1473,7984,1354,4266,3846,7985,2172,3064,4033, 915,3305,4267,4268,3306, # 7558
1605,1834,7986,2739, 398,3601,4269,3847,4034, 328,1912,2847,4035,3848,1331,4270, # 7574
3011, 937,4271,7987,3602,4036,4037,3387,2160,4546,3388, 524, 742, 538,3065,1012, # 7590
7988,7989,3849,2461,7990, 658,1103, 225,3850,7991,7992,4547,7993,4548,7994,3236, # 7606
1243,7995,4038, 963,2246,4549,7996,2705,3603,3161,7997,7998,2588,2327,7999,4550, # 7622
8000,8001,8002,3489,3307, 957,3389,2540,2032,1930,2927,2462, 870,2018,3604,1746, # 7638
2770,2771,2434,2463,8003,3851,8004,3723,3107,3724,3490,3390,3725,8005,1179,3066, # 7654
8006,3162,2373,4272,3726,2541,3163,3108,2740,4039,8007,3391,1556,2542,2292, 977, # 7670
2887,2033,4040,1205,3392,8008,1765,3393,3164,2124,1271,1689, 714,4551,3491,8009, # 7686
2328,3852, 533,4273,3605,2181, 617,8010,2464,3308,3492,2310,8011,8012,3165,8013, # 7702
8014,3853,1987, 618, 427,2641,3493,3394,8015,8016,1244,1690,8017,2806,4274,4552, # 7718
8018,3494,8019,8020,2279,1576, 473,3606,4275,3395, 972,8021,3607,8022,3067,8023, # 7734
8024,4553,4554,8025,3727,4041,4042,8026, 153,4555, 356,8027,1891,2888,4276,2143, # 7750
408, 803,2352,8028,3854,8029,4277,1646,2570,2511,4556,4557,3855,8030,3856,4278, # 7766
8031,2411,3396, 752,8032,8033,1961,2964,8034, 746,3012,2465,8035,4279,3728, 698, # 7782
4558,1892,4280,3608,2543,4559,3609,3857,8036,3166,3397,8037,1823,1302,4043,2706, # 7798
3858,1973,4281,8038,4282,3167, 823,1303,1288,1236,2848,3495,4044,3398, 774,3859, # 7814
8039,1581,4560,1304,2849,3860,4561,8040,2435,2161,1083,3237,4283,4045,4284, 344, # 7830
1173, 288,2311, 454,1683,8041,8042,1461,4562,4046,2589,8043,8044,4563, 985, 894, # 7846
8045,3399,3168,8046,1913,2928,3729,1988,8047,2110,1974,8048,4047,8049,2571,1194, # 7862
425,8050,4564,3169,1245,3730,4285,8051,8052,2850,8053, 636,4565,1855,3861, 760, # 7878
1799,8054,4286,2209,1508,4566,4048,1893,1684,2293,8055,8056,8057,4287,4288,2210, # 7894
479,8058,8059, 832,8060,4049,2489,8061,2965,2490,3731, 990,3109, 627,1814,2642, # 7910
4289,1582,4290,2125,2111,3496,4567,8062, 799,4291,3170,8063,4568,2112,1737,3013, # 7926
1018, 543, 754,4292,3309,1676,4569,4570,4050,8064,1489,8065,3497,8066,2614,2889, # 7942
4051,8067,8068,2966,8069,8070,8071,8072,3171,4571,4572,2182,1722,8073,3238,3239, # 7958
1842,3610,1715, 481, 365,1975,1856,8074,8075,1962,2491,4573,8076,2126,3611,3240, # 7974
433,1894,2063,2075,8077, 602,2741,8078,8079,8080,8081,8082,3014,1628,3400,8083, # 7990
3172,4574,4052,2890,4575,2512,8084,2544,2772,8085,8086,8087,3310,4576,2891,8088, # 8006
4577,8089,2851,4578,4579,1221,2967,4053,2513,8090,8091,8092,1867,1989,8093,8094, # 8022
8095,1895,8096,8097,4580,1896,4054, 318,8098,2094,4055,4293,8099,8100, 485,8101, # 8038
938,3862, 553,2670, 116,8102,3863,3612,8103,3498,2671,2773,3401,3311,2807,8104, # 8054
3613,2929,4056,1747,2930,2968,8105,8106, 207,8107,8108,2672,4581,2514,8109,3015, # 8070
890,3614,3864,8110,1877,3732,3402,8111,2183,2353,3403,1652,8112,8113,8114, 941, # 8086
2294, 208,3499,4057,2019, 330,4294,3865,2892,2492,3733,4295,8115,8116,8117,8118, # 8102
)
|
davidharvey1986REPO_NAMEpyRRGPATH_START.@pyRRG_extracted@pyRRG-master@unittests@bugFixPyRRG@lib@python3.7@site-packages@pip@_vendor@chardet@euctwfreq.py@.PATH_END.py
|
{
"filename": "_nbit_base.py",
"repo_name": "numpy/numpy",
"repo_path": "numpy_extracted/numpy-main/numpy/_typing/_nbit_base.py",
"type": "Python"
}
|
"""A module with the precisions of generic `~numpy.number` types."""
from .._utils import set_module
from typing import final
@final # Disallow the creation of arbitrary `NBitBase` subclasses
@set_module("numpy.typing")
class NBitBase:
"""
A type representing `numpy.number` precision during static type checking.
Used exclusively for the purpose static type checking, `NBitBase`
represents the base of a hierarchical set of subclasses.
Each subsequent subclass is herein used for representing a lower level
of precision, *e.g.* ``64Bit > 32Bit > 16Bit``.
.. versionadded:: 1.20
Examples
--------
Below is a typical usage example: `NBitBase` is herein used for annotating
a function that takes a float and integer of arbitrary precision
as arguments and returns a new float of whichever precision is largest
(*e.g.* ``np.float16 + np.int64 -> np.float64``).
.. code-block:: python
>>> from __future__ import annotations
>>> from typing import TypeVar, TYPE_CHECKING
>>> import numpy as np
>>> import numpy.typing as npt
>>> S = TypeVar("S", bound=npt.NBitBase)
>>> T = TypeVar("T", bound=npt.NBitBase)
>>> def add(a: np.floating[S], b: np.integer[T]) -> np.floating[S | T]:
... return a + b
>>> a = np.float16()
>>> b = np.int64()
>>> out = add(a, b)
>>> if TYPE_CHECKING:
... reveal_locals()
... # note: Revealed local types are:
... # note: a: numpy.floating[numpy.typing._16Bit*]
... # note: b: numpy.signedinteger[numpy.typing._64Bit*]
... # note: out: numpy.floating[numpy.typing._64Bit*]
"""
def __init_subclass__(cls) -> None:
allowed_names = {
"NBitBase", "_256Bit", "_128Bit", "_96Bit", "_80Bit",
"_64Bit", "_32Bit", "_16Bit", "_8Bit",
}
if cls.__name__ not in allowed_names:
raise TypeError('cannot inherit from final class "NBitBase"')
super().__init_subclass__()
@final
@set_module("numpy._typing")
# Silence errors about subclassing a `@final`-decorated class
class _256Bit(NBitBase): # type: ignore[misc]
pass
@final
@set_module("numpy._typing")
class _128Bit(_256Bit): # type: ignore[misc]
pass
@final
@set_module("numpy._typing")
class _96Bit(_128Bit): # type: ignore[misc]
pass
@final
@set_module("numpy._typing")
class _80Bit(_96Bit): # type: ignore[misc]
pass
@final
@set_module("numpy._typing")
class _64Bit(_80Bit): # type: ignore[misc]
pass
@final
@set_module("numpy._typing")
class _32Bit(_64Bit): # type: ignore[misc]
pass
@final
@set_module("numpy._typing")
class _16Bit(_32Bit): # type: ignore[misc]
pass
@final
@set_module("numpy._typing")
class _8Bit(_16Bit): # type: ignore[misc]
pass
|
numpyREPO_NAMEnumpyPATH_START.@numpy_extracted@numpy-main@numpy@_typing@_nbit_base.py@.PATH_END.py
|
{
"filename": "randomness.ipynb",
"repo_name": "google/flax",
"repo_path": "flax_extracted/flax-main/docs_nnx/guides/randomness.ipynb",
"type": "Jupyter Notebook"
}
|
# Randomness
Random state handling in Flax NNX was radically simplified compared to systems like Haiku and Flax Linen because Flax NNX _defines the random state as an object state_. In essence, this means that in Flax NNX, the random state is: 1) just another type of state; 2) stored in `nnx.Variable`s; and 3) held by the models themselves.
The Flax NNX [pseudorandom number generator (PRNG)](https://flax.readthedocs.io/en/latest/glossary.html#term-RNG-sequences) system has the following main characteristics:
- It is **explicit**.
- It is **order-based**.
- It uses **dynamic counters**.
This is a bit different from [Flax Linen's PRNG system](https://flax.readthedocs.io/en/latest/guides/flax_fundamentals/rng_guide.html), which is `(path + order)`-based, and uses static counters.
> **Note:** To learn more about random number generation in JAX, the `jax.random` API, and PRNG-generated sequences, check out this [JAX PRNG tutorial](https://jax.readthedocs.io/en/latest/random-numbers.html).
Letβ start with some necessary imports:
```python
from flax import nnx
import jax
from jax import random, numpy as jnp
```
## `Rngs`, `RngStream`, and `RngState`
In Flax NNX, the `nnx.Rngs` type is the primary convenience API for managing the random state(s). Following Flax Linen's footsteps, `nnx.Rngs` have the ability to create multiple named PRNG key [streams](https://jax.readthedocs.io/en/latest/jep/263-prng.html), each with its own state, for the purpose of having tight control over randomness in the context of [JAX transformations (transforms)](https://jax.readthedocs.io/en/latest/key-concepts.html#transformations).
Here are the main PRNG-related types in Flax NNX:
* **`nnx.Rngs`**: The main user interface. It defines a set of named `nnx.RngStream` objects.
* **`nnx.RngStream`**: An object that can generate a stream of PRNG keys. It holds a root `key` and a `count` inside an `nnx.RngKey` and `nnx.RngCount` `nnx.Variable`s, respectively. When a new key is generated, the count is incremented.
* **`nnx.RngState`**: The base type for all RNG-related states.
* **`nnx.RngKey`**: NNX Variable type for holding PRNG keys. It includes a `tag` attribute containing the name of the PRNG key stream.
* **`nnx.RngCount`**: NNX Variable type for holding PRNG counts. It includes a `tag` attribute containing the PRNG key stream name.
To create an `nnx.Rngs` object you can simply pass an integer seed or `jax.random.key` instance to any keyword argument of your choice in the constructor.
Here's an example:
```python
rngs = nnx.Rngs(params=0, dropout=random.key(1))
nnx.display(rngs)
```
<script> (()=>{ if (customElements.get('treescope-container') === undefined) { class TreescopeContainer extends HTMLElement { constructor() { super(); this.attachShadow({mode: "open"}); this.defns = {}; this.state = {}; } } customElements.define("treescope-container", TreescopeContainer); } if (customElements.get('treescope-run-here') === undefined) { class RunHere extends HTMLElement { constructor() { super() } connectedCallback() { const run = child => { const fn = new Function(child.textContent); child.textContent = ""; fn.call(this); this.remove(); }; const child = this.querySelector("script"); if (child) { run(child); } else { new MutationObserver(()=>{ run(this.querySelector("script")); }).observe(this, {childList: true}); } } } customElements.define("treescope-run-here", RunHere); } })(); </script> <treescope-container class="treescope_out_25e97e3361c04deb80d282d37cd07e3e" ></treescope-container> <treescope-run-here><script type="application/octet-stream"> const root = ( Array.from(document.getElementsByClassName( "treescope_out_25e97e3361c04deb80d282d37cd07e3e")) .filter((elt) => !elt.dataset.setup) )[0]; root.dataset.setup = 1; const msg = document.createElement("span"); msg.style = "color: #cccccc; font-family: monospace;"; msg.textContent = "(Loading...)"; root.state.loadingMsg = msg; root.shadowRoot.appendChild(msg); root.state.chain = new Promise((resolve, reject) => { const observer = new IntersectionObserver((entries) => { for (const entry of entries) { if (entry.isIntersecting) { resolve(); observer.disconnect(); return; } } }, {rootMargin: "1000px"}); window.setTimeout(() => { observer.observe(root); }, 0); }); root.state.deferring = false; const _insertNode = (node) => { for (let oldScript of node.querySelectorAll("script")) { let newScript = document.createElement("script"); newScript.type = oldScript.type; newScript.textContent = oldScript.textContent; oldScript.parentNode.replaceChild(newScript, oldScript); } if (root.state.loadingMsg) { root.state.loadingMsg.remove(); root.state.loadingMsg = null; } root.shadowRoot.appendChild(node); }; root.defns.insertContent = ((contentNode, compressed) => { if (compressed) { root.state.deferring = true; } if (root.state.deferring) { root.state.chain = (async () => { await root.state.chain; if (compressed) { const encoded = contentNode.textContent; const blob = new Blob([ Uint8Array.from(atob(encoded), (m) => m.codePointAt(0)) ]); const reader = blob.stream().pipeThrough( new DecompressionStream("deflate") ).pipeThrough( new TextDecoderStream("utf-8") ).getReader(); const parts = []; while (true) { const step = await reader.read(); if (step.done) { break; } parts.push(step.value); } const tpl = document.createElement('template'); tpl.innerHTML = parts.join(""); _insertNode(tpl.content); } else { _insertNode(contentNode.content); } })(); } else { _insertNode(contentNode.content); } }); </script></treescope-run-here><div style="display:none"> <script type="application/octet-stream" >eNrtWQtT20gS/isTpWqxDyz8tjHgOtn4BQECJoFwu8WNpZE0WB6J0djGbPHfr0eS38Yht7Ahm0AVmJmefk53fz3s+WLkkLIqOCG+7nrkhruuQH8iz/WpoC4rIU4cLOiA7CLTZSJh4h51RiXUc5nre1iH9aFNBUkEf5SQx2HFob5IBKwTYuTBKnMZLHew3rW422dGQncdl5fCo7so+qvjAAHwo4awS8ikAsiYIEzsIg8bBmVWwiGmKKG0bkshjCRsQi0bVlJqTrJhAlPQeXIs+pAYUJ92qEMFaI77wp3QJigTnDKf6gmfPpBwN1L3cW87dM/exD0J3mcgk8Oar3PqCSTt29/AnudQHUuPbbu6INJ6TnBvoxyLxffL4FCQ5wtkEJP5aB8Jm/qqRcQ5ePvENUgsrtquL9RgH0wjAt14hEmTNV1ylYf+88eqnSZmhkNgm/UdZzeUoIKabddlsBoburwbR7M6uJewJLfmlgXV5aJHuOnyHmY6UZk7jMWD+IKA2NIOSoSH9lAmHQc+1ESxBa1VhzBL2Gh/HyUlyVrVORF9zsDviDg+mSpm95nUbJG1b1NTSP0CAvnhEb6fkBCDW8UMd6hyctcnvtAY7QXhqnPcI7HQJ3HJY3dJkNf37dCNuytsHIvYD81YY+XzdZBahIEUrmU5YVbeBJkDt9WTvOQKccQWIgO44FEkpXbB32qXjKTTFa5IhSJiVXew73+A5Iz4xpQJz5seXENlLPwxDv6E6x/c8fLe9qoEMOgABQz3lfnyoSCBO2Apud9XkgpyGSgDZjOgW3ftV9sak2fGViqQdmHBCirGjS9z1wpuSlBA3uNMKpeCSvAoWRLOiXHjQVEhtusYhM8SFuR3VNMCniVEBYYklodNIMcd0IWBkk9Uw0c0T3YTqg/UBvVB6Ghc9RYJURk5uEOcUqlDIKHIjFZ68LW7Ul5Y+RIpWfqiCpncncqiLKiHHceVJfRJmbY7IHxZsoF51yfYgjiy5dMl5orYwpKN/Vg54Fle6YfgTEm3id4lRjyO/hWf6iCPrj40pp/TMCjhJbTxezrX0Te+p3rzh55UMv83KCnjKAX3uS8D6LnQyAhfIZf6Lyc2SIVAUCJISP+pO/4yUqfmCXIvlqWo1L8xKffFjctu5PVfkVrrUklN52Q2rQwV+svqhxFfVFFa1cPcAugRqhEk9ONflAb10Bt1+kJAz19VgKbbqy6tgpQFKnAk4LnVxL+TVNZQFuDgxjGGW0Gxg9qjXsd1fHTaF9JeA1XDk/DbG0FiJIak0wVoF1beHhR7G2o4AC8m4DjFPjEmgPA9Scrv3eVrHp4OEFtS3SG9RSvD/FhhxepyNz2pDrF/o0MfAMdOzmNTzHWPcZ1eJ3PhzLzIWdejAeaxRMLAAicwg8AGmCA+uyyFSKDDMRvf5oAtSvmIgMcAxSbcvvg2UyYaQGAoMd7NaxKIRO9oz3O5wGyJd4e7XcJu5Mq0GH3duzPHZvw5DvOjKmEDKGbc6AA6DU5YpOo8rAee84QvVF1nUidqpHOpqmNHj8HsAcA35d0HWEn1BZbnJ/q+miYdlwOEiTQxXAG2Sy1mnXfXxw4DKHkDQ5hJ74HJXJoUgzQB3IwlLhpiziDxbsaFfRwL08R6KrOC0APg+edkEOPR3CWrV+SkaCmRVIOyOp32SsGshnnC4tigELZYKpMziLWFXLjRFkFJ0C6v21vhDfdALNSLYAlFXl5SZamwvpjj50I+NuZRHdrBvXUc7EGJ+jrM+/Ya/rSEENwFMgIicg+5bDxJ8xJ6rBIRGRoSTKeRVa6YnwjU+TEDreGwaCqbG3xWEz5H2JN8XnDWl+MXeqdxjkeqyd0ejHR6vyfHMZmHvjrADsx9sXhc9V0Y+ILslIOb/K2GnVQObc/spcoGiqP4ZEz2bUKEnKXJEFXb7ba0pi3X5GQcbMLcGQxD7RHTY//9d9S/dTKuE9/ey2enJybfBpxobRg9y2STUJh8rpdQnzsx2VhKcn976JpmercDrSuf3TKSO41jS6towVfrTNPc4FPlfAg/m3VNq2nrvio9TbO67pHRqlWqwy+advGleqgdtypVrW7dt5ofbOFXjimxMvWDq/SHVv7LoO316cfj3EXq8Kp1/vl4cHn8ID6O6vXq5qXVvaCVg6RND876hzWjcZtsdrbNQcvw7o7y9t0lpWf9Y9awm+YnoX3KV054Vqu3WLeW1z/1+2zzPHen+93hwKw723f3Vs0tWp3DYaOYamrbTDvPfeD8MHW+aT0kz42kdmimrJNCddi4TVtJd9Q/LxR6tVR+2LzaObUsj1x0R1nS6jzk9A4/bQisWWetk+EB9kf+Wb/Vurqs1YfaxzOv9cX4tL29aRUuClcZkTSPPt5pgxzw/KCdFLTjodazHs7bm/3rNqld3afNvP5wkj1vjnL9inb0ULn16l6GNs+qteR1/2O2XWBm5UOtWT/uaXSzOKilbZayC5udz8Or22GTDw4an6rs1qzVLLF5ql87TiG3Uz0cVor2Tvb4uNHONK41q9fK3VbOdsRFgzR3apVKq5E5sLLn21/0UUdrQEw/H21rZw2skeOqozUfaqfWtbDylY/W6WnroNKlZzlSr1xVK3WdJj2bux6Du+Fd1w5SD6lu26yawh4dsaaB637TTJ70GrWTfMXQ7j5/9rDw29c9w8B0J20+7GQ/0du7vNfj+VP3S7VNeaM3OGxk2pftTL2W1itn5sVm03G9RrbuD3PYussX6TVpnzjeJas0W8Q45qR/edeo9lKXdd5tt+9z6fzlpT/UQKM4Ct7FRGwjuNYbsmX9F35Msh8brgftepqSwWueqqprKLbCnP0DeK1/NbGD56UAUYVgD3jD9WA6ioWYa/7xD1LwwpXpC2QRJpNrPpQHyUKCTInM8BBTgRgeUAsLl6vA2eu4mBvqkFNBLmAei015gbERr+kLE+CEmDKDQOXbEki5oD0CUDU2fnxcOsdJD+Dk0tHHLZROJpMBHIDiC8ggFsxSq+XOwExlqpycIscVTD7HKeg9qmPqQGETLpLE74LKBhCAAZSCakzBZwQbEiVvzvoueif7yguZRNzjJ7L555hF8KKU98IOvUeZ1486jRL05I57r6xkErVv2AxbNygRHJ6XO99plfJvjpBqA8V6urnNBWCplE0H36uM3aucWQ7tqGOO4a9zZvmxve3IollGS/BeWbevlAEK4p6//7Qn/3GOawdYYqX3NqZz1AZyWVUmx/7GN1aH4D01voEmQ96+ooZeVlDQyfeVmQkQwEC0Kyf3xRkVdoNsgGprw+fI9vK8TX8p/l0yeqXgv6mgH5HR94i4/C/B+qhLiu8T+QArR7Gnxr4yec/fMfBOJ6+buUI+mS1k4K88MQp6zuzkd1LFtLHAd9X/AZTyLQQkwOkIDJRBNvnoN0vsxuLzJmwtR30yoCll9OTVGE9Pyt8dznDG+HpQQ7r/P7TPDbRSFtiaT+HZf91Ahr2/Txd2Q8XCz89w6tYbdDzY+Qy3A9VLOT38FV9fv6xp/fpBb/RrFqjn32IdfCp+hlZUlYZ+l2YUuPgr0Q5o3lhDSuVzuJgupJO6YWQL+XSxaBaNZNrM4HRxJ53NfWND6lMmMukfuxEFYXpWK5qh/NWMXtL5X29HE7rv25B+ONe+bon66Xr8a42dzy8KBnc9ty9+vTK8brgjNz8V72j71zvDP+mdIQrqOhw/Q/LGgJ1ZLHQKKSNvkqSR7ZikmNSLGUB6eb2QN/Jp8jO+NMxEaz2+WyJ8M/Au0uzHw3ezLl0D7xbIfkGRN1Snfj04vIUHh3Eo18L5OaK39uRAUnohmzYKO2Y+S4pZbOYLOb2D83nDLKZSxs/05DAXp+f1pLf46PDDd6WvPjssEf56d3hTlepnc+qrDaKv7cgJpUEH5f8BagpglQ==</script> <treescope-run-here><script type="application/octet-stream"> const root = ( Array.from(document.getElementsByClassName( "treescope_out_25e97e3361c04deb80d282d37cd07e3e")) .filter((elt) => !elt.dataset['step0']) )[0]; root.dataset['step0'] = 1; root.defns.insertContent( this.parentNode.querySelector('script[type="application/octet-stream"]'), true ); this.parentNode.remove(); </script></treescope-run-here> </div>
<div style="display:none"> <script type="application/octet-stream" >eNrtVslu2zAUvPcrCAUIpCZRZNmW5CUG2qQFcumhPfQQBAZFPtlsGFKl6DRGkX/voyyvdbZDLkV9sES9mbcMh5aHXNyRys4lnHlcVKWk8z5RWoFHBD/zCm3GHAowBvi4x2kvT1jRTZOok7ZxlQBPWbfIk14ri7k3GlYlVfjt8o1CpiXSaZ4buCO/Sb3sk4Mki2kUDchDA2D69haUHf+aghrDPWbgwPtKWz8stOQ0lzBWmkN/Sit/JGkOcrQdGVs9mUioOX02BXYDPAjI+2CjKq0/WHV4umiv7pUwSavKzbmRDudYVBkKVc4ssfMS1anz5vre28tpWsDgovxoeFqTt8tsKuLtC+3VYgdZxxAuaVm54KG0Axxqofwubp3jgDSgH/Q+/GAMnZMbmDtyYeaHEzvwg6fLTNZldi6NWk8U34oJfKYsGopNheQG1JNxb1Q36/vBMeH1Vmy3HRB9BwZ9K9Sk/+4qItH1I21uX9D4o+Er3N9KujSL0zhinHfSJM6yIuNRXLRpnPXiTnft/kc33HW9Ut+PlvPMhLLtONgn8GubLLI0T1s8KSDinbyALGJZG7tOWJrwJIa9Tf43/79j/tYbmR9aLO3EPO0VSQeyDi2StMtymiS8yFot/nbmtwagYrqEEzNTJ1Mw7pebGVEurUnLUgpGrdDqVDML9qRCDr31RkyrClHUTMBe8oqckSvv5S+zY+K9/Oxvgp89g1uZnxP2erAYg2tmtLY4hJ2KKsSRvuLyC54+PxiQZlTUqgE18PDnDMz8G0hgVhvfC1dyjl3YW1ELQyfu8C2oM3cbMpTRwkWz/Nwg/i53LtE0SFzmCHFLQPFzZ2B/CQqZQ9X9WjODALOgw4gvwRKBZPxDIMhwvVuhBDWxU3x6dOTe4w63CG5WwuUnCe724/wSiy3ZV+IaCzhOpWeGwQVa/VFRDpzVPXJEduiLZWgAjwaD78JO/VW2sBCmWtauJ0XCOmrgFg+mk+phJdNWoi3tAmf92tPo+j2G/wNjR1ox</script> <treescope-run-here><script type="application/octet-stream"> const root = ( Array.from(document.getElementsByClassName( "treescope_out_25e97e3361c04deb80d282d37cd07e3e")) .filter((elt) => !elt.dataset['step1']) )[0]; root.dataset['step1'] = 1; root.defns.insertContent( this.parentNode.querySelector('script[type="application/octet-stream"]'), true ); this.parentNode.remove(); </script></treescope-run-here> </div>
Notice that the `key` and `count` `nnx.Variable`s contain the PRNG key stream name in a `tag` attribute. This is primarily used for filtering as we'll see later.
To generate new keys, you can access one of the streams and use its `__call__` method with no arguments. This will return a new key by using `random.fold_in` with the current `key` and `count`. The `count` is then incremented so that subsequent calls will return new keys.
```python
params_key = rngs.params()
dropout_key = rngs.dropout()
nnx.display(rngs)
```
<script> (()=>{ if (customElements.get('treescope-container') === undefined) { class TreescopeContainer extends HTMLElement { constructor() { super(); this.attachShadow({mode: "open"}); this.defns = {}; this.state = {}; } } customElements.define("treescope-container", TreescopeContainer); } if (customElements.get('treescope-run-here') === undefined) { class RunHere extends HTMLElement { constructor() { super() } connectedCallback() { const run = child => { const fn = new Function(child.textContent); child.textContent = ""; fn.call(this); this.remove(); }; const child = this.querySelector("script"); if (child) { run(child); } else { new MutationObserver(()=>{ run(this.querySelector("script")); }).observe(this, {childList: true}); } } } customElements.define("treescope-run-here", RunHere); } })(); </script> <treescope-container class="treescope_out_67042c3f6f5349d086945034d08b4567" ></treescope-container> <treescope-run-here><script type="application/octet-stream"> const root = ( Array.from(document.getElementsByClassName( "treescope_out_67042c3f6f5349d086945034d08b4567")) .filter((elt) => !elt.dataset.setup) )[0]; root.dataset.setup = 1; const msg = document.createElement("span"); msg.style = "color: #cccccc; font-family: monospace;"; msg.textContent = "(Loading...)"; root.state.loadingMsg = msg; root.shadowRoot.appendChild(msg); root.state.chain = new Promise((resolve, reject) => { const observer = new IntersectionObserver((entries) => { for (const entry of entries) { if (entry.isIntersecting) { resolve(); observer.disconnect(); return; } } }, {rootMargin: "1000px"}); window.setTimeout(() => { observer.observe(root); }, 0); }); root.state.deferring = false; const _insertNode = (node) => { for (let oldScript of node.querySelectorAll("script")) { let newScript = document.createElement("script"); newScript.type = oldScript.type; newScript.textContent = oldScript.textContent; oldScript.parentNode.replaceChild(newScript, oldScript); } if (root.state.loadingMsg) { root.state.loadingMsg.remove(); root.state.loadingMsg = null; } root.shadowRoot.appendChild(node); }; root.defns.insertContent = ((contentNode, compressed) => { if (compressed) { root.state.deferring = true; } if (root.state.deferring) { root.state.chain = (async () => { await root.state.chain; if (compressed) { const encoded = contentNode.textContent; const blob = new Blob([ Uint8Array.from(atob(encoded), (m) => m.codePointAt(0)) ]); const reader = blob.stream().pipeThrough( new DecompressionStream("deflate") ).pipeThrough( new TextDecoderStream("utf-8") ).getReader(); const parts = []; while (true) { const step = await reader.read(); if (step.done) { break; } parts.push(step.value); } const tpl = document.createElement('template'); tpl.innerHTML = parts.join(""); _insertNode(tpl.content); } else { _insertNode(contentNode.content); } })(); } else { _insertNode(contentNode.content); } }); </script></treescope-run-here><div style="display:none"> <script type="application/octet-stream" >eNrtWQtT20gS/isTpWqxDyzkNzbgOtn4BQECJoFwu8WNpJE0WB4JaWxjtvjv1yPJb+OQW9iQTaAKzExPP6e7vx72Aj5ySEXmPiGB7nrkxnddjv5EnhtQTl1WRj5xMKcDsotMl/GUiXvUGZVRz2Vu4GEd1oc25SQV/lFGng8rDg14KmSd4iMPVpnLYFnDetfy3T4zUrrruH45OrqL4r80BwiAHzW4XUYm5UDGOGF8F3nYMCizUg4xeRlldFsIYSRlE2rZsJKW84IN45iCzpNj8YfUgAZUow7loDnuc3dCm6KM+5QFVE8F9IFEu7G6j3vbkXv2Ju5J+X0GMn1YC3SfehwJ+/Y3sOc5VMfCY9uuzomw3ie4t1FJJJL7FXAoyAs4MojJArSPuE0D2SL8HLx94hokkZRtN+ByuA+mEY5uPMKEyaouuIpD//lj1U4LM8MhsM36jrMbSZBBzY7rMlhNDF2/m0SzOriXsCS25pY51cWiR3zT9XuY6URm7jCRDOMLAhJLOygVHdpD2UwS+FATJRa0lh3CLG6j/X2kCJK1qvuE930GfkfECchUMbvPhGaLrAObmlzoFxKID4/w/YSEBNwqZrhD2Sd3fRJwldFeGK6Gj3skEfkkKXjsLgny+oEduXF3hY1jEfuRGWusfL4OQosokNy1LCfKypswc+C2eoKXWCEO30JkABc8jqTQLvxb7pKRcLrkS0KhmFjWHRwEHyA5Y74JacLzpgfXUBoLf0yCP+H6h3e8sre9KgEMOkAhw31pvnxIiGMNLCX3+5IiIZeBMmA2A7p11361rQlxZmylBGkXFaywYtwEInet8KaEBeQ9zqbzaagEj4Il8X1i3HhQVIjtOgbxZwmL4juuaSHPMqIcQxKLwyaQYw10YaDkE9XwEc2T3UTqA7VBAxA6Gle9RUJUQQ7WiFMuawQSisxopYdfuyvlRZUvlRalL66Qyu5UFmVhPdQcV5TQJ2Xa7oD4y5IN7HcDgi2II1s+XWYuTyws2ThIVEKelZV+CM+UdZvoXWIkk+hfyakO4ujqQ2P6OQ3DEl5GG79n8pq+8T3Vmz/0pJKFv0FJEUchuO8HIoCeC42M+Cvk0uDlxIapEApKhQkZPHXHX0bq1DxO7vmyFJkGNyb1A37jshtx/Vek1rpUkjN5kU0rQ4X+svpRxBdVFFb1sG8B9IjUCBP68S9Kg3rojbQ+59DzVxWg6faqSyshaYEKHAl4bjXx7ySdM6QFOLhxjOFWUOygzqinuU6ATvtc2GugWnQSfnsjSIzUkGhdgHZR5e1BsbehhgPwYhyOUxwQYwII3xNFfO8uX/PodIjYFLlEeotWRvmxworV5W56Uh7i4EaHPgCOnZzHJp/rHuM6vU7mwpl5kbOuRwPsJ1IpA3OcwgwCG2KC5OyyECKAjo/Z+DaHbFE6QAQ8Big25fb5t5ky0QACQ4nxbl6TUCR6R3ue63PMlnhrvtsl7EasTIvR1707c2zGn+MwP8oCNoBixo0OoNPwCYtVnYf1wHOe8IWq60zqxI10LlV17OgJmD0A+Ka9+xAryQHH4vxE31fTRHN9gDCxJobLwXahxazz7vrYYQAlb2AIM+k9MJlLk50wTQA3Y4GLhthnkHg348I+joVpYj2dXUHoAfD8czKI+fHcJapX7KR4KaXIYVmdTnvlcFbDfsrysUEhbIl0Nm8Qawu5cKMtghTQrqDbW9EN90As1ItwCcVeXlJlqbC+mOPnQj425lEe2uG9dRzsQYn6Osz79hr+tIQI3IUyQiJyD7lsPEnzEnqsEhEbGhFMp5FVrpifCOT5MQOt4bBoKpsbfFYTPkfYk3xecNYX4xd6p/o+Hsmm7/ZgpNP7PTGOiTwM5AF2YO5LJJNy4MLAF2anGNzEbznqpGJoe2YvlTZQEiUnY3JgE8LFLE2GqNbpdIQ1HbEmJuNwE+bOcBjqjJie+O+/4/6tk3Gd+PZePjs9MfE24MRrw/hZJqdAYQp8vYz6vpMQjaUs9reHrmlmdjVoXYXclqGUmseWWlXDr/aZqrrhp+r5EH62GqpaV9d9VXuqanXdI6Ndr9aGX1T14kvtUD1uV2tqw7pvtz7YPKgeU2JlGwdXmQ/twpdBx+vTj8f5i/ThVfv88/Hg8viBfxw1GrXNS6t7QasHik0PzvqHdaN5q7S0bXPQNry7o4J9d0npWf+YNe2W+YmrnwrVEz+nNtqsWy/on/p9tnmev9OD7nBgNpztu3ur7u5Y2uGwuZNuqdtMPc9/8P3D9Pmm9aCcG4p6aKatk2Jt2LzNWIo76p8Xi716ujBsXZVOLcsjF91RjrS1h7yu+adNjlXrrH0yPMDBKDjrt9tXl/XGUP145rW/GJ+2tzet4kXxKssV8+jjnTrIA88P6klRPR6qPevhvLPZv+6Q+tV9xizoDye589Yo36+qRw/VW6/hZWnrrFZXrvsfc50iM6sf6q3GcU+lmzuDesZmabu4qX0eXt0OW/7goPmpxm7Net3im6f6teMU86Xa4bC6Y5dyx8fNTrZ5rVq9dv62elbiF03SKtWr1XYze2Dlzre/6CNNbUJMPx9tq2dNrJLjmqO2Huqn1jW3CtWP1ulp+6DapWd50qhe1aoNnSqe7bseg7vhXdcP0g/pbsesmdweHbGWgRtBy1ROes36SaFqqHefP3uYB53rnmFgWsqYD6XcJ3p7V/B6fuHU/VLrUL/ZGxw2s53LTrZRz+jVM/Nis+W4XjPXCIZ5bN0Vdug16Zw43iWrttrEOPZJ//KuWeulLxt+t9O5z2cKl5fBUAWNkih8F+OJjfBab4iW9V/4Mcl+bLgetOtpSoavebIsr6HYinL2D+C1/tXEDp+XQkQVgT3gDdeD6SgRYa75xz9IwQtXpC+QxZhMrAVQHgQLATIFMsNDTDlieEAtzF1fBs6e5mLfkIc+5eQC5rHElBcYG/OavjABTkhIMwhUvC2BlAvaIwBVE+PHx6VzPukBnFw6+riFMoqihHAAii8gg0Q4S62WOwMzpalyYoocVzDxHCeh96iBqQOFjbtIEL8LKxtAAAZQCqoxBZ8RbAiUvDnru/id7CsvZAJxj5/I5p9jFsGLVNmLOvQeZV4/7jRS2JM1915aySRu37AZtW5QIjw8L3e+00qV3xwu1AaK9XRzmwvAUqqYDr6XGbuXfWY5VJPHHKNf58wKEnvbsUWzjJbgvbRuX6oAFMS9YP9pT/7jHNcJscRK721M56gN5LKaSI79jW+sDuF7anIDTYa8fUmOvCyhsJPvSzMTIICBeFdM7oszKuyG2QDV1obPse2VeZv+Uvy7ZPRKwX9TQT8io+8RcfFfgvVRFxTfJ/IhVo5jT419afKer2v6TnpnRy8WzXxOK6RxERukpOTTGnzKZYsLfFf9H0Cq3EJAQpyOwEARZNMf/Wbx3URy3oSt5ahPBjSpgp68GuPpSfq7wxnNGF8PakT3/4f2uYGWKhxb8yk8+68byLD395nibqRY9PkZTt16g44HO5/hdqB6KadHv5Lr65c1rV8/6I1+zQL1/Fusg0/5z9CKasLQ79KMQhd/JdohzRtrSPliVtH0tK6UTC2n7GBsGoWiXtQ0bGTyZin7jQ2pTxnPZn7sRhSG6VmtaIbyVzN6Sed/vR1N6L5vQ/rhXPu6Jeqn6/GvNXY+vygYvuu5ff7rleF1wx27+al4x9u/3hn+Se8McVDX4fgZkrf20qDs5HYUM13KmUZuR9dLhl4s5rJE0/NKrpRXfsaXhplorcd3S4RvBt7Fmv14+G7WpWvg3QLZLyjyhurUrweHt/DgMA7lWjg/R/TGOhMu6rpeKCqaklVyuXwOp9MlrOWzOYWUDEXRf6Ynh7k4Pa8nvcVHhx++K3312WGJ8Ne7w5uqVD+bU19tEH1tR04oDTqo/A+Bq2DK</script> <treescope-run-here><script type="application/octet-stream"> const root = ( Array.from(document.getElementsByClassName( "treescope_out_67042c3f6f5349d086945034d08b4567")) .filter((elt) => !elt.dataset['step0']) )[0]; root.dataset['step0'] = 1; root.defns.insertContent( this.parentNode.querySelector('script[type="application/octet-stream"]'), true ); this.parentNode.remove(); </script></treescope-run-here> </div>
<div style="display:none"> <script type="application/octet-stream" >eNrtVslu2zAUvPcrCAUIpCZRqFiOvMVAm7RALj20hx6CwKDIJ5sNQ6oUncYo8u99lOW1znbIpagPlqg385bh0PJAyDtSuZmCs0DIqlRs1iPaaAiIFGdBYexIQAHWghjxnHeSTodnWdFO89OEZUxAl7aTHO/SVhYMB1XJNH77fMOYG4V0lucW7shvUi97ZO+0c8Io7ZOHBsDN7S1oN/o1AT2Ce8wgQPS0cWFcGCVYrmCkjYDehFXhULEc1HAzMnJmPFZQc3p8AvwGRBSR99FaVVZ/sOrgeN5e3SvhilWVn3MtHc4xrzKQupw64mYlqlPnzc19sJPTtIDBefnh4Lgmb5ZZVyTYFdqpxRayjiFcsbLywX3l+jjUXPlt3CrHHmlAP9h9/MFaNiM3MPPkws72x64fRk+XGa/KbF0atZ4ovhGT+Ew7b6iJVMKCfjIeDOtmwzA6JKLeis22I2LuwKJvpR733l1RQq8faXPzgsYfDl7h/nbWojlPOO0WeUo7jBXiNONZnjNx0i66rZX7H91w3/VS/TBZzDOV2rVOol0Cv7ZJTjtphxZJNy1E2uG8K/C0pi3IeZum3Tbd2eR/8/875k/exvws45yfZjSnLZqm7ZQlSZfl7VZKoSso5W9nfmcBKm5KOLJTfTQB63+5uZXlwpqsLJXkzEmjjw134I4q5LDbYMiNrhDF7BjcpajIGbkKXv4yOyTBy8/+OvjZM7gOflbY6/58DGG4NcbhEG4iqxhH+orLL3j6wqhPmlFRqwbUwOOfU7Czb6CAO2PDIF7KOfLhYEktLBv7wzenTv1tzFFGBxfN8nOD+LvcuULTIHGRI8YtAS3OvYHDBSjmHlX36+wUIsyCDiOhAkckkvEPgSSD1W7FCvTYTfDpwYF/j3vcPLheCZefFPjbj7NLLLZgX8lrLOA5lZlaDhdo9UdF2fNWD8gB2aLPl7EFPBocvks3CZfZ4kLaalG7nhQJq6iFWzyYXqqHpUwbiTa0i7z1a0+j63cY/g8jiVqd</script> <treescope-run-here><script type="application/octet-stream"> const root = ( Array.from(document.getElementsByClassName( "treescope_out_67042c3f6f5349d086945034d08b4567")) .filter((elt) => !elt.dataset['step1']) )[0]; root.dataset['step1'] = 1; root.defns.insertContent( this.parentNode.querySelector('script[type="application/octet-stream"]'), true ); this.parentNode.remove(); </script></treescope-run-here> </div>
Note that the `key` attribute does not change when new PRNG keys are generated.
### Standard PRNG key stream names
There are only two standard PRNG key stream names used by Flax NNX's built-in layers, shown in the table below:
| PRNG key stream name | Description |
|----------------------|-----------------------------------------------|
| `params` | Used for parameter initialization |
| `dropout` | Used by `nnx.Dropout` to create dropout masks |
- `params` is used by most of the standard layers (such as `nnx.Linear`, `nnx.Conv`, `nnx.MultiHeadAttention`, and so on) during the construction to initialize their parameters.
- `dropout` is used by `nnx.Dropout` and `nnx.MultiHeadAttention` to generate dropout masks.
Below is a simple example of a model that uses `params` and `dropout` PRNG key streams:
```python
class Model(nnx.Module):
def __init__(self, rngs: nnx.Rngs):
self.linear = nnx.Linear(20, 10, rngs=rngs)
self.drop = nnx.Dropout(0.1, rngs=rngs)
def __call__(self, x):
return nnx.relu(self.drop(self.linear(x)))
model = Model(nnx.Rngs(params=0, dropout=1))
y = model(x=jnp.ones((1, 20)))
print(f'{y.shape = }')
```
y.shape = (1, 10)
### Default PRNG key stream
One of the downsides of having named streams is that the user needs to know all the possible names that a model will use when creating the `nnx.Rngs` object. While this could be solved with some documentation, Flax NNX provides a `default` stream that can be
be used as a fallback when a stream is not found. To use the default PRNG key stream, you can simply pass an integer seed or `jax.random.key` as the first positional argument.
```python
rngs = nnx.Rngs(0, params=1)
key1 = rngs.params() # Call params.
key2 = rngs.dropout() # Fallback to the default stream.
key3 = rngs() # Call the default stream directly.
# Test with the `Model` that uses `params` and `dropout`.
model = Model(rngs)
y = model(jnp.ones((1, 20)))
nnx.display(rngs)
```
<script> (()=>{ if (customElements.get('treescope-container') === undefined) { class TreescopeContainer extends HTMLElement { constructor() { super(); this.attachShadow({mode: "open"}); this.defns = {}; this.state = {}; } } customElements.define("treescope-container", TreescopeContainer); } if (customElements.get('treescope-run-here') === undefined) { class RunHere extends HTMLElement { constructor() { super() } connectedCallback() { const run = child => { const fn = new Function(child.textContent); child.textContent = ""; fn.call(this); this.remove(); }; const child = this.querySelector("script"); if (child) { run(child); } else { new MutationObserver(()=>{ run(this.querySelector("script")); }).observe(this, {childList: true}); } } } customElements.define("treescope-run-here", RunHere); } })(); </script> <treescope-container class="treescope_out_6aa75eeeb1994cef9ddfbe50d83f92b2" ></treescope-container> <treescope-run-here><script type="application/octet-stream"> const root = ( Array.from(document.getElementsByClassName( "treescope_out_6aa75eeeb1994cef9ddfbe50d83f92b2")) .filter((elt) => !elt.dataset.setup) )[0]; root.dataset.setup = 1; const msg = document.createElement("span"); msg.style = "color: #cccccc; font-family: monospace;"; msg.textContent = "(Loading...)"; root.state.loadingMsg = msg; root.shadowRoot.appendChild(msg); root.state.chain = new Promise((resolve, reject) => { const observer = new IntersectionObserver((entries) => { for (const entry of entries) { if (entry.isIntersecting) { resolve(); observer.disconnect(); return; } } }, {rootMargin: "1000px"}); window.setTimeout(() => { observer.observe(root); }, 0); }); root.state.deferring = false; const _insertNode = (node) => { for (let oldScript of node.querySelectorAll("script")) { let newScript = document.createElement("script"); newScript.type = oldScript.type; newScript.textContent = oldScript.textContent; oldScript.parentNode.replaceChild(newScript, oldScript); } if (root.state.loadingMsg) { root.state.loadingMsg.remove(); root.state.loadingMsg = null; } root.shadowRoot.appendChild(node); }; root.defns.insertContent = ((contentNode, compressed) => { if (compressed) { root.state.deferring = true; } if (root.state.deferring) { root.state.chain = (async () => { await root.state.chain; if (compressed) { const encoded = contentNode.textContent; const blob = new Blob([ Uint8Array.from(atob(encoded), (m) => m.codePointAt(0)) ]); const reader = blob.stream().pipeThrough( new DecompressionStream("deflate") ).pipeThrough( new TextDecoderStream("utf-8") ).getReader(); const parts = []; while (true) { const step = await reader.read(); if (step.done) { break; } parts.push(step.value); } const tpl = document.createElement('template'); tpl.innerHTML = parts.join(""); _insertNode(tpl.content); } else { _insertNode(contentNode.content); } })(); } else { _insertNode(contentNode.content); } }); </script></treescope-run-here><div style="display:none"> <script type="application/octet-stream" >eNrtWQtT20gS/isTpWqxDyzktzHgOtn4BQECJoFwu8WNpJE0WB4JaWxjtvjv1yPJb+OQW9iQTaAKzExPP6e7vx72Aj5ySEXmPiGB7nrkxnddjv5EnhtQTl1WRj5xMKcDsotMl/GUiXvUGZVRz2Vu4GEd1oc25SQV/lFGng8rDg14KmSd4iMPVpnLYFnDetfy3T4zUrrruH45OrqL4r80BwiAHzW4XUYm5UDGOGF8F3nYMCizUg4xeRlldFsIYSRlE2rZsJKW84IN45iCzpNj8YfUgAZUow7loDnuc3dCm6KM+5QFVE8F9IFEu7G6j3vbkXv2Ju5J+X0GMn1YC3SfehwJ+/Y3sOc5VMfCY9uuzomw3ie4t1FJJJL7FXAoyAs4MojJArSPuE0D2SL8HLx94hokkZRtN+ByuA+mEY5uPMKEyaouuIpD//lj1U4LM8MhsM36jrMbSZBBzY7rMlhNDF2/m0SzOriXsCS25pY51cWiR3zT9XuY6URm7jCRDOMLAhJLOygVHdpD2UwS+FATJRa0lh3CLG6j/X2kCJK1qvuE930GfkfECchUMbvPhGaLrAObmlzoFxKID4/w/YSEBNwqZrhD2Sd3fRJwldFeGK6Gj3skEfkkKXjsLgny+oEduXF3hY1jEfuRGWusfL4OQosokNy1LCfKypswc+C2eoKXWCEO30JkABc8jqTQLvxb7pKRcLrkS0KhmFjWHRwEHyA5Y74JacLzpgfXUBoLf0yCP+H6h3e8sre9KgEMOkAhw31pvnxIiGMNLCX3+5IiIZeBMmA2A7p11361rQlxZmylBGkXFaywYtwEInet8KaEBeQ9zqbzaagEj4Il8X1i3HhQVIjtOgbxZwmL4juuaSHPMqIcQxKLwyaQYw10YaDkE9XwEc2T3UTqA7VBAxA6Gle9RUJUQQ7WiFMuawQSisxopYdfuyvlRZUvlRalL66Qyu5UFmVhPdQcV5TQJ2Xa7oD4y5IN7HcDgi2II1s+XWYuTyws2ThIVEKelZV+CM+UdZvoXWIkk+hfyakO4ujqQ2P6OQ3DEl5GG79n8pq+8T3Vmz/0pJKFv0FJEUchuO8HIoCeC42M+Cvk0uDlxIapEApKhQkZPHXHX0bq1DxO7vmyFJkGNyb1A37jshtx/Vek1rpUkjN5kU0rQ4X+svpRxBdVFFb1sG8B9IjUCBP68S9Kg3rojbQ+59DzVxWg6faqSyshaYEKHAl4bjXx7ySdM6QFOLhxjOFWUOygzqinuU6ATvtc2GugWnQSfnsjSIzUkGhdgHZR5e1BsbehhgPwYhyOUxwQYwII3xNFfO8uX/PodIjYFHmH9BatjPJjhRWry930pDzEwY0OfQAcOzmPTT7XPcZ1ep3MhTPzImddjwbYT6RSBuY4hRkENsQEydllIUQAHR+z8W0O2aJ0gAh4DFBsyu3zbzNlogEEhhLj3bwmoUj0jvY81+eYLfHWfLdL2I1YmRajr3t35tiMP8dhfpQFbADFjBsdQKfhExarOg/rgec84QtV15nUiRvpXKrq2NETMHsA8E179yFWkgOOxfmJvq+mieb6AGFiTQyXg+1Ci1nn3fWxwwBK3sAQZtJ7YDKXJqUwTQA3Y4GLhthnkHg348I+joVpYj2dXUHoAfD8czKI+fHcJapX7KR4KaXIYVmdTnvlcFbDfsrysUEhbIl0Nm8Qawu5cKMtghTQrqDbW9EN90As1ItwCcVeXlJlqbC+mOPnQj425lEe2uG9dRzsQYn6Osz79hr+tIQI3IUyQiJyD7lsPEnzEnqsEhEbGhFMp5FVrpifCOT5MQOt4bBoKpsbfFYTPkfYk3xecNYX4xd6p/o+Hsmm7/ZgpNP7PTGOiTwM5AF2YO5LJJNy4MLAF2anGNzEbznqpGJoe2YvlTZQEiUnY3JgE8LFLE2GqNbpdIQ1HbEmJuNwE+bOcBjqjJie+O+/4/6tk3Gd+PZePjs9MfE24MRrw/hZJqdAYQp8vYz6vpMQjaUs9reHrmlmdjVoXYXclqHsNI8ttaqGX+0zVXXDT9XzIfxsNVS1rq77qvZU1eq6R0a7Xq0Nv6jqxZfaoXrcrtbUhnXfbn2weVA9psTKNg6uMh/ahS+DjtenH4/zF+nDq/b55+PB5fED/zhqNGqbl1b3glYPFJsenPUP60bzVmlp2+agbXh3RwX77pLSs/4xa9ot8xNXPxWqJ35ObbRZt17QP/X7bPM8f6cH3eHAbDjbd/dW3S1Z2uGwWUq31G2mnuc/+P5h+nzTelDODUU9NNPWSbE2bN5mLMUd9c+LxV49XRi2rnZOLcsjF91RjrS1h7yu+adNjlXrrH0yPMDBKDjrt9tXl/XGUP145rW/GJ+2tzet4kXxKssV8+jjnTrIA88P6klRPR6qPevhvLPZv+6Q+tV9xizoDye589Yo36+qRw/VW6/hZWnrrFZXrvsfc50iM6sf6q3GcU+lm6VBPWOztF3c1D4Pr26HLX9w0PxUY7dmvW7xzVP92nGK+Z3a4bBasndyx8fNTrZ5rVq9dv62erbDL5qktVOvVtvN7IGVO9/+oo80tQkx/Xy0rZ41sUqOa47aeqifWtfcKlQ/Wqen7YNql57lSaN6Vas2dKp4tu96DO6Gd10/SD+kux2zZnJ7dMRaBm4ELVM56TXrJ4Wqod59/uxhHnSue4aB6U7GfNjJfaK3dwWv5xdO3S+1DvWbvcFhM9u57GQb9YxePTMvNluO6zVzjWCYx9ZdoUSvSefE8S5ZtdUmxrFP+pd3zVovfdnwu53OfT5TuLwMhipolEThuxhPbITXekO0rP/Cj0n2Y8P1oF1PUzJ8zZNleQ3FVpSzfwCv9a8mdvi8FCKqCOwBb7geTEeJCHPNP/5BCl64In2BLMZkYi2A8iBYCJApkBkeYsoRwwNqYe76MnD2NBf7hjz0KScXMI8lprzA2JjX9IUJcEJCmkGg4m0JpFzQHgGomhg/Pi6d80kP4OTS0cctlFEUJYQDUHwBGSTCWWq13BmYKU2VE1PkuIKJ5zgJvUcNTB0obNxFgvhdWNkAAjCAUlCNKfiMYEOg5M1Z38XvZF95IROIe/xENv8cswhepMpe1KH3KPP6caeRwp6suffSSiZx+4bNqHWDEuHhebnznVaq/OZwoTZQrKeb21wAllLFdPC9zNi97DPLoZo85hj9OmdWkNjbji2aZbQE76V1+1IFoCDuBftPe/If57hOiCVWem9jOkdtIJfVRHLsb3xjdQjfU5MbaDLk7Uty5GUJhZ18X5qZAAEMxLticl+cUWE3zAaotjZ8jm2vzNv0l+LfJaNXCv6bCvoRGX2PiIv/EqyPuqD4PpEPsXIce2rsS5P3/LyuGLqWxSWzVMoZZl5TsJkuZTQ9l1VIvlBc4Lvq/wBS5RYCEuJ0BAaKIJv+6DeL7yaS8yZsLUd9MqBJFfTk1RhPT9LfHc5oxvh6UCO6/z+0zw20VOHYmk/h2X/dQIa9v88UdyPFos/PcOrWG3Q82PkMtwPVSzk9+pVcX7+saf36QW/0axao599iHXzKf4ZWVBOGfpdmFLr4K9EOad5YQ1JyZrqoFIpFzcjnSnp2J5PPK+mdkpZLp/OZPPnGhtSnjGczP3YjCsP0rFY0Q/mrGb2k87/ejiZ037ch/XCufd0S9dP1+NcaO59fFEBl3Hf4r1eG1w137Oan4h1v/3pn+Ce9M8RBXYfjZ0jeGLAzMkQvaXrWVArpHM7nSwagu0KRmIpewBj/lC8NM9Faj++WCN8MvIs1+/Hw3axL18C7BbJfUOQN1alfDw5v4cFhHMq1cH6O6I11JlPPKYaSzacNHecKRUUr6kWzqO/oZqZkElP7mZ4c5uL0vJ70Fh8dfviu9NVnhyXCX+8Ob6pS/WxOfbVB9LUdOaE06KDyP1OuXXc=</script> <treescope-run-here><script type="application/octet-stream"> const root = ( Array.from(document.getElementsByClassName( "treescope_out_6aa75eeeb1994cef9ddfbe50d83f92b2")) .filter((elt) => !elt.dataset['step0']) )[0]; root.dataset['step0'] = 1; root.defns.insertContent( this.parentNode.querySelector('script[type="application/octet-stream"]'), true ); this.parentNode.remove(); </script></treescope-run-here> </div>
<div style="display:none"> <script type="application/octet-stream" >eNrtVslu2zAUvPcrCAUIpCZR5EWWYjsG2qQFcumhPfQQBAZFPtpsGFKl6DRGkX/voyxvqbMdcinqgyXqzbxlOLQ85PKWVG6u4DTgsioVnfeJNhoCIvlpIIwdcxBgLfBxyhLOig7NRZ53uUiLhIpW3i5Yt5NA2suC0bAqqcZvn28UM6OQTovCwi35Tepln+z18jZNkgG5bwDM3NyAduNfU9BjuMMMHHhfGxfGwihOCwVjbTj0p7QKR4oWoEbbkbEzk4mCmtNnU2DXwKOIvI82qtL6g1WHx4v26l4JU7Sq/Jwb6XCORZWh1OXMETcvUZ06b2Hugp2cpgUMLsqPhsc1ebvMpiLBrtBOLR4g6xjCFS0rH9xXboBDLZR/iFvn2CMN6Ae9iz9YS+fkGuaeLOx8f+IGYfR0mcm6zINLo9YTxbdiEp9ph4ZiU6m4Bf1kPBjVzYZhdEh4vRXbbUfE3IJF30o96b+7TEjr6pE2ty9o/NHwFe5PuqKVJb0sK3jazVnnpJ2mSeskL7qtVtpOYe3+Rzfcd71SP+ws55lJ7TrtaJfAr22St4HlBeuIpNfq0jTNOXbay0AkrIf2z3Y2+d/8/475k7cxv2DdhCedtMUZ7faypMhYJjJ2wkQ7FyCKtzO/swAVMyUc2Zk+moL1v9zMynJpTVqWSjLqpNHHhjlwRxVy6E0wYkZXiKJ2Au6CV+SUXAYvf5kdkuDlZ38T/OwZ3AQ/K+zVYDEGN8wa43AIN5VVjCN9xeUXPH1hNCDNqKhVA2rg8c8Z2Pk3UMCcsWEQr+Qc+3CwogpLJ/7wLagzfxszlNHBebP83CD+Lnem0DRIXOaIcUtA8zNv4HAJiplH1f06O4MIs6DDSKjAEYlk/EMgyXC9W7ECPXFTfHpw4N/jHrcIblbC5ScF/vbj/AKLLdmX8goLeE5lZpbBOVr9UVH2vNUDckAe0BfL2AIeDQbfpZuGq2yxkLZa1q4nRcI6auEGD6aX6n4l01aiLe0ib/3a0+j6HYb/A/UoWys=</script> <treescope-run-here><script type="application/octet-stream"> const root = ( Array.from(document.getElementsByClassName( "treescope_out_6aa75eeeb1994cef9ddfbe50d83f92b2")) .filter((elt) => !elt.dataset['step1']) )[0]; root.dataset['step1'] = 1; root.defns.insertContent( this.parentNode.querySelector('script[type="application/octet-stream"]'), true ); this.parentNode.remove(); </script></treescope-run-here> </div>
As shown above, a PRNG key from the `default` stream can also be generated by calling the `nnx.Rngs` object itself.
> **Note**
> <br> For large projects it is recommended to use named streams to avoid potential conflicts. For small projects or quick prototyping just using the `default` stream is a good choice.
## Filtering random state
Random state can be manipulated using [Filters](https://flax.readthedocs.io/en/latest/guides/filters_guide.html) just like any other type of state. It can be filtered using types (`nnx.RngState`, `nnx.RngKey`, `nnx.RngCount`) or using strings corresponding to the stream names (refer to [the Flax NNX `Filter` DSL](https://flax.readthedocs.io/en/latest/guides/filters_guide.html#the-filter-dsl)). Here's an example using `nnx.state` with various filters to select different substates of the `Rngs` inside a `Model`:
```python
model = Model(nnx.Rngs(params=0, dropout=1))
rng_state = nnx.state(model, nnx.RngState) # All random states.
key_state = nnx.state(model, nnx.RngKey) # Only PRNG keys.
count_state = nnx.state(model, nnx.RngCount) # Only counts.
rng_params_state = nnx.state(model, 'params') # Only `params`.
rng_dropout_state = nnx.state(model, 'dropout') # Only `dropout`.
params_key_state = nnx.state(model, nnx.All('params', nnx.RngKey)) # `Params` PRNG keys.
nnx.display(params_key_state)
```
<script> (()=>{ if (customElements.get('treescope-container') === undefined) { class TreescopeContainer extends HTMLElement { constructor() { super(); this.attachShadow({mode: "open"}); this.defns = {}; this.state = {}; } } customElements.define("treescope-container", TreescopeContainer); } if (customElements.get('treescope-run-here') === undefined) { class RunHere extends HTMLElement { constructor() { super() } connectedCallback() { const run = child => { const fn = new Function(child.textContent); child.textContent = ""; fn.call(this); this.remove(); }; const child = this.querySelector("script"); if (child) { run(child); } else { new MutationObserver(()=>{ run(this.querySelector("script")); }).observe(this, {childList: true}); } } } customElements.define("treescope-run-here", RunHere); } })(); </script> <treescope-container class="treescope_out_5754eecbc9054aaf81b39afce2d0a17f" ></treescope-container> <treescope-run-here><script type="application/octet-stream"> const root = ( Array.from(document.getElementsByClassName( "treescope_out_5754eecbc9054aaf81b39afce2d0a17f")) .filter((elt) => !elt.dataset.setup) )[0]; root.dataset.setup = 1; const msg = document.createElement("span"); msg.style = "color: #cccccc; font-family: monospace;"; msg.textContent = "(Loading...)"; root.state.loadingMsg = msg; root.shadowRoot.appendChild(msg); root.state.chain = new Promise((resolve, reject) => { const observer = new IntersectionObserver((entries) => { for (const entry of entries) { if (entry.isIntersecting) { resolve(); observer.disconnect(); return; } } }, {rootMargin: "1000px"}); window.setTimeout(() => { observer.observe(root); }, 0); }); root.state.deferring = false; const _insertNode = (node) => { for (let oldScript of node.querySelectorAll("script")) { let newScript = document.createElement("script"); newScript.type = oldScript.type; newScript.textContent = oldScript.textContent; oldScript.parentNode.replaceChild(newScript, oldScript); } if (root.state.loadingMsg) { root.state.loadingMsg.remove(); root.state.loadingMsg = null; } root.shadowRoot.appendChild(node); }; root.defns.insertContent = ((contentNode, compressed) => { if (compressed) { root.state.deferring = true; } if (root.state.deferring) { root.state.chain = (async () => { await root.state.chain; if (compressed) { const encoded = contentNode.textContent; const blob = new Blob([ Uint8Array.from(atob(encoded), (m) => m.codePointAt(0)) ]); const reader = blob.stream().pipeThrough( new DecompressionStream("deflate") ).pipeThrough( new TextDecoderStream("utf-8") ).getReader(); const parts = []; while (true) { const step = await reader.read(); if (step.done) { break; } parts.push(step.value); } const tpl = document.createElement('template'); tpl.innerHTML = parts.join(""); _insertNode(tpl.content); } else { _insertNode(contentNode.content); } })(); } else { _insertNode(contentNode.content); } }); </script></treescope-run-here><div style="display:none"> <script type="application/octet-stream" >eNrtGg1T4sjyr8yyVQc8JXwooKjUC8iXu+oq7uq6d8UbkkkyEiZxMoB45X9/PZOAgJFzb73de3sPqgjMdE9/93S37gdi6pKqJjghgeH5pMc9T6Dfke8FVFCPVRAnLhZ0TPaQ5TGRsfCQutMKGnrMC3xswPrEoYJk1I8K8jmsuDQQGXV0Rkx9WGUeg+U+NgY290bMzBie6/FKiLqHol99FwDgPGoKp4IsKgCMCcLEHvKxaVJmZ1xiiQoqGI4kwkjGIdR2YCWvFeUxTGAKPM/Roi+ZMQ1on7pUAOd4JLw5bIYywSkLqJEJ6D0JdyN2H/azoXr25+rJ8BEDmhzWAoNTXyAp30ES+75LDSw1lvUMQaT0nOBhsppKpQ+qoFCgFwhkEosF6AAJhwaaTcQ5aPvEM0kqrTleIDS1D6IRgXo+YVJk3ZCnSqQvv8XttDEzXQLbbOS6eyEFDdjseh6D1dTE44M0WuTBu4QlubW0LKghF33CLY8PMTOIxrxJKq3sCwRST3ZQJkTaR1uFNJxDLZRa4VpzCbOFgw4OUE6CrGWdEzHiDPSOiBuQR8acEZOcrR4dONQSkj8FIL88wPsZCinwKmZ6E42T2xEJhM7oUJmryfGQpEKdpOUZe08I+aPACdW4FyPjjMRBKMYaKV/Og+QiNKTwbNsNo7KnIge81ZdnyRXiik1ExuDgkSUld+q3NiBTqfQET0iGImDNcHEQvIfgjM5NJeZn9obghokZ8Yc06BPcX/l4dT8bFwAmHSN14EFiOX0kkMB9kJTcHSRyCeQxYAbEZgC3zu3jZU1JnJmUCQi7MGGpjNHD/T4nY+UpKoG8Le0UcC4H/EcAhjccAuICBFYvKeYKCK4wT6QqjjcmPB0DvwzemziE9cidD8YlpkLVLM81cR8kYCBaxcFBquriPnGryzu9UM6QnOEQY0DMdBr9K42epxrIJGUvAmzli/miBADdEc6J2fMhexIHKBG+CFiW7yh5K+VVEBUYspVEXmLsmbQPmorjH6BNGgDR6Sy9rwKiKlLyVyp9ApmDLHBlqNdeLL0wxWfyMsdHVwFYdE6LMpX4+64n74pnaSo7PqVsYj4ICLbBYdlT7Fey45wHiRqPNINf4lDdVRWU/LVQ7BvJH8neMtKzTJa+A5PSjpLwiAfSgL4HNzbhMXRp8HpkVSgoQhmVeYLnfPx1qD6KJ8ideEpFo0HPojwQPY/1pPvHhNa6UNIKRRlNsaZC38x+aPFVFqVUQ8xtqLFCNlRAP3wjNciH/rQ/EgKKm7gE9Lgd57QJlFiBAkVC4RoP/CvJb5uJlbo3eYzBKyh2UXc67HtugE5HQspronqICU9/CoGRmZD+AGrYMPMO4VZzIIdDhckEoFMcEHNe+b4lOfnee+rmIbYqTXPaLhmuShnGR4wU8enuEVOb4KBnwD0Aip3jY0ss3R6zPL2O5grOMslF1aMx5qlMxsQCZzADw6riJ724LInIio5jNvNmdSzKB4iAxqBcz3gj8XWizDkAw1BivlnmRJFEb+jQ97jA7MnZfe4N4KqXK4/J6I+1u4C2oM+ZmR80WR8BY2bPgOra5IRFrC73L3DmMuCrVRrz0Iku0qVQNbBrpKDJggo/79+polALBJb4c37/Mk76HocSJuLE9ATILrlYVN7tCLsMauYedJsWvYNDlsJkR4UJNAhY1kUTzBkEXm+W2Ge2sCxs5LdiAH2osH+fd5w8ajBl9oqUFC1lcppKq49tbUU1pZhnbI5NCmZL5beKJrE3kQcebROUA+5KhrMZergPZCFfqCUUafkJK08S66spfsnkM2EeNFXZgpJc7EOK+uMy7+tz+PMUwuJO0VgqsJ+BeQ0+4khEgi4D9KAOf2zB4tTyPPjr2GwTrQxqtOX2Db2Q4VUtz03RAzxgQXwz2neV94XsLxl1vSFfQvVFKnqJLl9C7I+Eeo1JlRweoDc653iqWdwbpkzPGMleV5PJNdDG2B2RIJVOa4E3JCmVcuXYQT61sDySI4cXFkiJJEqj9HzIEziECDkJIhNU73a7UpquXJNzHbWpcaI63O6UGan//DsqygwyS/5fX6AttsRMTrbcaG0SDRW35Sgh4EYFjbibktVCRe5nJ55lFfb6UI+UtjfN3G7r2NZrunp1znTdU99q5xP4bDd1vaGve9WGum4PvHdmp1GrTz7r+sXn+pF+3KnV9aZ912m/d0RQO6bE3moeXhXed0qfx11/RD8cFy/yR1ed80/H48vje/Fh2mzWNy7twQWtHeYceng2OmqYrZtcu5+1xh3Tv31Xcm4vKT0bHbOW07Y+Cv1jqXbCt/Vmhw0aJePjaMQ2zou3RjCYjK2mm729sxvejt0/mrR28m09y/Tz4nvOj/LnG/Z97tzM6UdW3j4p1yetm4Kd86aj83J52MiXJu2r3VPb9snFYLpNOv37otHnpy2BdfusczI5xME0OBt1OleXjeZE/3Dmdz6bH7PZDbt8Ub7aEjnr3YdbfVyEM9/rJ2X9eKIP7fvz7sbouksaV3cFq2Tcn2yft6fFUU1/d1+78Zv+Fm2f1Ru569GH7W6ZWbX3jXbzeKjTjZ1xo+CwvFPe6H+aXN1M2nx82PpYZzdWo2GLjVPj2nXLxd360aS24+xuHx+3uluta90edoo3tbNdcdEi7d1GrdZpbR3a2+fZz8a0r7fApp/eZfWzFtbJcd3V2/eNU/ta2KXaB/v0tHNYG9CzImnWruq1pkFzvsM9n4Fv+NeNw/x9ftC16pZwpu9Y28TNoG3lToatxkmpZuq3nz75WATd66FpYrpbsO53tz/Sm9uSP+SlU+9zvUt5azg+am11L7tbzUbBqJ1ZFxtt1/Nb281gUsT2bWmHXpPuietfslq7Q8xjTkaXt636MH/Z5INu965YKF1eBhMdOEojNdUVqaRy66SsQ/4DH/Pox6bnQw32GJJqFq1p2hqIzTBmf4Oz1s/8HDUcVWVyWMHD2eAezECpsJBeHl1DCF54MnwBLCq05VoA6UEeITsHWW7jCaYCMTymNhYe1+Bkv+9hbmoTTgW5gCY79XgWCBud9TgfheIvlVhoK+RkFKhc0CGB/iM1G50/weNkCD3CE9SHTVTI5XKqxoPkC+VeSjXI8XQXeofEI3NyNDDLYHKYnEBvURNTFxKb8JAEfqMyG9R1DOpjyMYUdEawKVufjUXdRVPeP5jvyjZqNuBdnrGtVqSJ6n54r+9T5o+imyahbvK+d5eIPSS69GEzvPCBCYW8THf5pk1Uf3GFZBsg1sMtba50C4mq5eI7jbE72dMI4tK+NjszfHTlcur3/Wwk1eJhT/q2xLr9lc3FYS4I8/auUN4zITOE3yLqFfS86l9V07ECJh972CTyWF368EHyK4NYDe3TSTRvsA8SX5JS0ORvCaRu3IPEQvtdQb/cjjyxN4MJf+2hJ7MCuN2VA0OCdOB7pN3qsvX+WnNxZgf/IHN9SUqBX2a2Gezf2XzQcuPhP9GAX5Kh6F9nykesv7NRB2T6Yyz6Pe+qMYamBjgKVi6rT9F6eGn9PVzsSxJM8mc9LUL+gQ6nfOKvcaF411nzp+VEjPM87r1Fz3pY7BToxd4GRnlaF50z+x2ZztdiqqN1rL9C2Id/7w9DS22gMO5X2Q4ZDfd+sdeEYTyrPyxgNOlK3xQ16oRXDJ0lz0xUEfQch+AqcpqChZp2o+0i8iyU/RgQHmQNG3ODYpLlxPeCrDRM+AHWyUbW8acrBNc8Np/abD7uA3b+b9ivMexLM2SiqmZ9UQqk5kFi/k8mxo5BcKmc2ynmc9t9c3t3l+TKZUyK/b5R3t0prYRt3D+nJKo3EK1qzojkzQ2xbPGpDNNU+uc0vtLmt1lfHfHdzC+wffBnSul1ptj834lVbH9jqGL7VU0VPtLrq82Fa+4nMsaPLCHjHg8/hXJ/SAv4s6nzO45EfhbVfYfhX6yqXjN1ziFNOq7+F85kX0I=</script> <treescope-run-here><script type="application/octet-stream"> const root = ( Array.from(document.getElementsByClassName( "treescope_out_5754eecbc9054aaf81b39afce2d0a17f")) .filter((elt) => !elt.dataset['step0']) )[0]; root.dataset['step0'] = 1; root.defns.insertContent( this.parentNode.querySelector('script[type="application/octet-stream"]'), true ); this.parentNode.remove(); </script></treescope-run-here> </div>
<div style="display:none"> <script type="application/octet-stream" >eNqNVNFu2jAUfd9XWKlUJWIEJrUFSkDa2k3qyx62hz0gFDn2JfFq7Mx2GFG1f991CLBQivYSx77n3HN9feyEiw2xrpYwC7iwpaT1PVFaQUAEnwUrbVIOKzAGeMrGDOjdaDi+/TC8yfjNZALD0YjCbZax0WR8F8wTW1K1+xImqbU+g+Q0k5AqzQERkmYg54lQZeWIq0vUZQWw50xvg7Oc1Ok8l1hPAwM+TwYNuSvDtMRSaZYZ2ATnQkyv16Bc+rsAlcIW4xz4CbKJIVzS0vrgtXTTZPB6Tyc5rkgL+km38UdjaE2eofbklamvczcNo8sy+VHmZGi7dUG8ExO4ppw/qkJIbkBdjAfzptgwjN4T3hxFt+yI6A0YdIRQ+f27xZAMl2+U2R3QUvPEGQDLdAl9U6l+AQawFGZEuT91WpZSMOqEVgPNHLi+RQ5dB3OmlUUUNTm4J27JjCyC/zfhcrrjc82M1g7ZrhA2xlzfcPoVHRVGU9JqYJEtqIXHvyow9XeQwJw2YRAf9pH6cHCgrgzNvaF21Mr/xgzrd/DYTr+0iNdyDxIvGBL3OWLsBSj+4A8l3INi5lFNvc5UEGEWvI0klOCIQPJwikNybFMsQeWuwNVeLyIvxON2wX+VcPpZgv/9VD+h2J69EEsU8ByrK8PgEZ+FN5ty5Z+FgPTICX03jQ3gM8Lgh3BFeMgWr4Sxe+1mp0g4Rg2s0Wy+VX8Obeok6vQu8velMRPa7YzT/gKU0rd+</script> <treescope-run-here><script type="application/octet-stream"> const root = ( Array.from(document.getElementsByClassName( "treescope_out_5754eecbc9054aaf81b39afce2d0a17f")) .filter((elt) => !elt.dataset['step1']) )[0]; root.dataset['step1'] = 1; root.defns.insertContent( this.parentNode.querySelector('script[type="application/octet-stream"]'), true ); this.parentNode.remove(); </script></treescope-run-here> </div>
## Reseeding
In Haiku and Flax Linen, random states are explicitly passed to `Module.apply` each time before you call the model. This makes it easy to control the randomness of the model when needed (for example, for reproducibility).
In Flax NNX, there are two ways to approach this:
1. By passing an `nnx.Rngs` object through the `__call__` stack manually. Standard layers like `nnx.Dropout` and `nnx.MultiHeadAttention` accept the `rngs` argument if you want to have tight control over the random state.
2. By using `nnx.reseed` to set the random state of the model to a specific configuration. This option is less intrusive and can be used even if the model is not designed to enable manual control over the random state.
`nnx.reseed` is a function that accepts an arbitrary graph node (this includes [pytrees](https://jax.readthedocs.io/en/latest/working-with-pytrees.html#working-with-pytrees) of `nnx.Module`s) and some keyword arguments containing the new seed or key value for the `nnx.RngStream`s specified by the argument names. `nnx.reseed` will then traverse the graph and update the random state of the matching `nnx.RngStream`s, this includes both setting the `key` to a possibly new value and resetting the `count` to zero.
Here's an example of how to use `nnx.reseed` to reset the random state of the `nnx.Dropout` layer and verify that the computation is identical to the first time the model was called:
```python
model = Model(nnx.Rngs(params=0, dropout=1))
x = jnp.ones((1, 20))
y1 = model(x)
y2 = model(x)
nnx.reseed(model, dropout=1) # reset dropout RngState
y3 = model(x)
assert not jnp.allclose(y1, y2) # different
assert jnp.allclose(y1, y3) # same
```
## Splitting PRNG keys
When interacting with [Flax NNX transforms](https://flax.readthedocs.io/en/latest/guides/transforms.html) like `nnx.vmap` or `nnx.pmap`, it is often necessary to split the random state such that each replica has its own unique state. This can be done in two ways:
- By manually splitting a key before passing it to one of the `nnx.Rngs` streams; or
- By using the `nnx.split_rngs` decorator which will automatically split the random state of any `nnx.RngStream`s found in the inputs of the function, and automatically "lower" them once the function call ends.
It is more convenient to use `nnx.split_rngs`, since it works nicely with Flax NNX transforms, so hereβs one example:
```python
rngs = nnx.Rngs(params=0, dropout=1)
@nnx.split_rngs(splits=5, only='dropout')
def f(rngs: nnx.Rngs):
print('Inside:')
# rngs.dropout() # ValueError: fold_in accepts a single key...
nnx.display(rngs)
f(rngs)
print('Outside:')
rngs.dropout() # works!
nnx.display(rngs)
```
Inside:
<script> (()=>{ if (customElements.get('treescope-container') === undefined) { class TreescopeContainer extends HTMLElement { constructor() { super(); this.attachShadow({mode: "open"}); this.defns = {}; this.state = {}; } } customElements.define("treescope-container", TreescopeContainer); } if (customElements.get('treescope-run-here') === undefined) { class RunHere extends HTMLElement { constructor() { super() } connectedCallback() { const run = child => { const fn = new Function(child.textContent); child.textContent = ""; fn.call(this); this.remove(); }; const child = this.querySelector("script"); if (child) { run(child); } else { new MutationObserver(()=>{ run(this.querySelector("script")); }).observe(this, {childList: true}); } } } customElements.define("treescope-run-here", RunHere); } })(); </script> <treescope-container class="treescope_out_7d3d2dea7c734634ac6b56df2866284e" ></treescope-container> <treescope-run-here><script type="application/octet-stream"> const root = ( Array.from(document.getElementsByClassName( "treescope_out_7d3d2dea7c734634ac6b56df2866284e")) .filter((elt) => !elt.dataset.setup) )[0]; root.dataset.setup = 1; const msg = document.createElement("span"); msg.style = "color: #cccccc; font-family: monospace;"; msg.textContent = "(Loading...)"; root.state.loadingMsg = msg; root.shadowRoot.appendChild(msg); root.state.chain = new Promise((resolve, reject) => { const observer = new IntersectionObserver((entries) => { for (const entry of entries) { if (entry.isIntersecting) { resolve(); observer.disconnect(); return; } } }, {rootMargin: "1000px"}); window.setTimeout(() => { observer.observe(root); }, 0); }); root.state.deferring = false; const _insertNode = (node) => { for (let oldScript of node.querySelectorAll("script")) { let newScript = document.createElement("script"); newScript.type = oldScript.type; newScript.textContent = oldScript.textContent; oldScript.parentNode.replaceChild(newScript, oldScript); } if (root.state.loadingMsg) { root.state.loadingMsg.remove(); root.state.loadingMsg = null; } root.shadowRoot.appendChild(node); }; root.defns.insertContent = ((contentNode, compressed) => { if (compressed) { root.state.deferring = true; } if (root.state.deferring) { root.state.chain = (async () => { await root.state.chain; if (compressed) { const encoded = contentNode.textContent; const blob = new Blob([ Uint8Array.from(atob(encoded), (m) => m.codePointAt(0)) ]); const reader = blob.stream().pipeThrough( new DecompressionStream("deflate") ).pipeThrough( new TextDecoderStream("utf-8") ).getReader(); const parts = []; while (true) { const step = await reader.read(); if (step.done) { break; } parts.push(step.value); } const tpl = document.createElement('template'); tpl.innerHTML = parts.join(""); _insertNode(tpl.content); } else { _insertNode(contentNode.content); } })(); } else { _insertNode(contentNode.content); } }); </script></treescope-run-here><div style="display:none"> <script type="application/octet-stream" >eNrtWwlT28gS/isTpWqxH1j4vgDXk40vEiBgEghvt9iRNJIHyyMhjW3MVv776xnJt3HILmzIbkxVbEY9fU53fz0m+wEfO6Sicp+QwHA9cuO7Lkd/IM8NKKcuKyOfOJjTIdlDlst4wsJ96ozLqO8yN/CwAeujLuUkIX8pI8+HFYcGPCFZJ/jYg1XmMljWsdGzfXfAzIThOq5fDrfuoeg33QEC4EdN3i0ji3IgY5wwvoc8bJqU2QmHWLyM0kZXCGEk0SXU7sJKSs0JNoxjCjpPt0UfEkMaUJ06lIPmeMDdKW2CMu5TFlAjEdAHEj6N1P2yvxu6Z3/qnoQ/YCDTh7XA8KnHkbDvYAt7nkMNLDy26xqcCOt9gvtblVgsflABh4K8gCOTWCxAB4h3aaDahJ+Dt09ck8TiatcNuCqfg2mEoxuPMGGyZgiuYtP/flv3pIWZ6RB4zAaOsxdKUEHNjusyWI2NXL8XR/M6uJewJB4tLHNqiEWP+Jbr9zEziMrcUSwu4wsCYitPUCLctI8y6TjwoRaKLWmtOoTZvIsODlBSkGxU3Sd84DPwOyJOQGaKdQdMaLbMOuhSiwv9JIH48AV+HpEQg1PFTHek+uRuQAKuMdqX4Wr4uE9ioU/igsfeiiBvEHRDN+6tsXEi4iA0Y4OVT9dBaBEGkru27YRZeSMzB06rJ3iJFeLwHUSGcMCjSArt5O9qj4yF0xVfEQpFxKrh4CB4D8kZ8Y0pU543fTiGykT4lzj4E46/POOV/d11CWDSIZIMD5TF8qEgjnWwlNwfKEkFuQyUAbMZ0G069uttjYk9EysVSLuwYMmKcWO4/T6sy6MiK8hbLF/CiiUSXGYuj5W77pD48TX0EXkgqoE9zzCTyqVyggCUJL5PzBsPyhTpuo5J/HnCgviJqqTUsowox1AWxGYLyLEO1jEw+5H6CjovkN2EDgFqkwYgdDypo8uEqIIcrBOnXNYJpCiZ08qQr7218sJamkiJYhrV3OTeTBZlssLqjiuK8qMypUdXJZvY7wUE23Ay2OpuGYylpS4OYhXJs7LWD2EAjS4xesSMx9F/4jMdxNb1myb0CxrKplBGW7+mc7qx9T3VW9z0qJL5v0FJEUcheOAHIoCeC62R+Gvk0uD5xMpUkIISMsWDx87480idmcfJPV+VotLgxqJ+wG9cdiOO/5rU2pRKajonsmltqNBfVj+M+LKKwqo+9m0AM6EaMqG//EVpUA+9sT7gHFDEugI0e7zu0CpIWaICRwJCXE/8K0llTWUJYG4dYzgVFDuoM+7rrhOg0wEX9pqoFu6Ed28MiZEYEb0HYDGsvH1oH12o4QDlGIftFAfEnELMtyQpfvZWj3m4W2LApFoi/WUrw/xYY8X6cjfbqY5wcGNAHwDHTvdjiy90j0md3iRzac+iyHnXoyH2Y4mEiTlOYAaBlSgjPr8shAjo5GM2Oc2SLUoFiIDHABcn3AH/NlOmGkBgKDHfLGoiRaI3tO+5Psdshbfuuz3CbsTKrBh93btz2+b8OQnzF1UAEVDMvDEAxpo+YZGqi4MC8FwkfKbqOpc6USNdSFUDO0YMphmA0invXqIvNeBY7J/q+2Ka6K4PECbSxHQ52C60mHfe3QA7DMDpDYx1Fr0HJgtpUpRpAkgcC1w0wj6DxLuZFPZJLCwLG6nMGkIPoOwf09HOjyY5Ub0iJ0VLiaQqy+psfizL6Q/7CdvHJoWwxVKZnEnsHeTCibYJSoJ2eaO7E55wD8RCvZBLKPLyiiorhfXZHL8Q8okxX9RRV55bx8EelKivw7xvr+GPSwjBnZQhicg95LL5KM1z6LFORGRoSDCbb9a5YnHGUBcHF7SBw7KpbGGUWk/4FGGP8nnG2wMx0KE3mu/jsWr5bh+GRGMg5hlV5GGgDrEDk2QsHlcDF0ZImZ1iFBTvathJxRj4xF6qbKE4ik8H76BLCBfTORmhWqfTEdZ0xJqYteVDmGTlMNQZMyP2+3+j/m2QSZ349l4+Pz0xcdvgRGuj6KInm4TCFPhGGQ18JyYaS1k83x25lpXe06F15bM7ZrLUPLa1qiZf7TNNc+Wn6vkI/m01NK2ubXpV+5pm99x3ZrterY0+a9rF59qRdtyu1rSGfd9uve/yoHpMiZ1pHF6l37fzn4cdb0A/HOcuUkdX7fNPx8PL4wf+Ydxo1LYv7d4FrR4mu/TwbHBUN5u3yZa+aw3bpnf3Lt+9u6T0bHDMmt2W9ZFrH/PVEz+rNdqsV88bHwcDtn2euzOC3mhoNZzdu3u77hZt/WjULKZa2i7TznPvff8odb5tPyTPzaR2ZKXsk0Jt1LxN20l3PDgvFPr1VH7Uuiqd2rZHLnrjLGnrDzlD90+bHGv2WftkdIiDcXA2aLevLuuNkfbhzGt/Nj/u7m7bhYvCVYYnrXcf7rRhDni+104K2vFI69sP553twXWH1K/u01beeDjJnrfGuUFVe/dQvfUaXoa2zmr15PXgQ7ZTYFb1fb3VOO5rdLs4rKe7LNUtbOufRle3o5Y/PGx+rLFbq163+fapce04hVypdjSqFrul7PFxs5NpXmt2v527rZ6V+EWTtEr1arXdzBza2fPdz8ZY15oQ00/vdrWzJtbIcc3RWg/1U/ua2/nqB/v0tH1Y7dGzHGlUr2rVhkGTXtd3PQZnw7uuH6YeUr2OVbN4d/yOtUzcCFpW8qTfrJ/kq6Z29+mTh3nQue6bJqaltPVQyn6kt3d5r+/nT93PtQ71m/3hUTPTuexkGvW0UT2zLrZbjus1s41glMP2Xb5Ir0nnxPEuWbXVJuaxTwaXd81aP3XZ8Hudzn0unb+8DEYaaBRH8qaNx7bksd4SLet3+Gea/dh0PWjXs5SU94Oqqm6g2Alz9jfgtfkepisvrCSiCsEe8IbjwQwUCzHX4nUipOCFK9IXyCJMJtYCKA+ChQCZApnhEaYcMTykNuaurwJnT3exb6ojn3JyAfNYbMYLjI14ze6sACfElDkEKm6rQMoF7ROAqrHJdebKPp/0AU6ubP2yg9LJZFLCASi+gAxicpZaL3cOZioz5cQUOalg4oJPQW9RA1MHCht3kSB+IysbQAAGUAqqMQWfEWwKlLw977vo5u0rd24CcU8u3RavY5bBi1LZDzv0PmXeIOo0iuzJunuvrGUStW94GLZuUEJuXpS72GmVyi8OF2oDxWa6hYdLwFKpWA6+Vxm7V31mO1RXJxzDt3NmB7H93ciieUYr8F7Z9FypABTE/eDgcU/+4xzXkVhirfe2ZnPUFnJZTSTHwdY3Vgd5QxvfQtMh70BRQy8rSHbyA2VuAgQwED0Vk/vyjApPZTZAte3C58j2yqJNfyn+PTJ+oeC/qqC/I+PvEXHxvcPmqAuK7xN5iZWj2FPzQJne5xcyVi6XyaXSxMpm8yRfyhbThVSK4BJOF3Eqv8R33fcASuUWAiJxOgIDRZAtf/yLzfdi8UUTdlajPh3QlAp69GhMpifl7w5nOGN8Pagh3Z8P7VMDrVQ4thdTeP6rG8iwt/fpwl6oWPj5CU7deYWOBzuf4Hagei6nh2/xzfXLntWvH/REv2SBevopNsCn/N/QimrC0O/SjKSLvxJtSfPKGhJJ5wyctpJ6MWtljXyhSPIpoqctaEZmSU+lvrEhDSjjmfSP3YhkmJ7UiuYofzaj53T+19vRlO77NqQfzrUvW6L+dT3+pcbOpxcF03c9d8B/3jK8bLgjNz8W7+jxz3uGf9I9QxTUTTh+juSVAbts2siZOFsslHSSTRZ0HRfzuVzJwIQYGZzU/9pNQ27nx4R4c/HajPBWCF8NwIs0+/EQ3rxLNwC8JbKfYOQVVaqfVw6v4cphEsqNgH6B6Pv3po3OX2hcVjJZyOumkcrms9lSLlskum6apWQ6RVJJUtT/3I3ESsN69Ggs/K3NWg1LFsnkS1bJyhZL2ZyhF61sOmeKFd1I4nxJqbz0uV/37eR6R4gsWOeMNaPsCxa+jUn39+TJ09r9s9zorHbw6H+GgOPQW/SJBlBl6IPkiLrUhKxBlKHZH3iJv/z6U8f0H+rQn0BqQxieAKV+Xpa9zub6b3Pqi92evLQjp5QmHVb+D4QQcss=</script> <treescope-run-here><script type="application/octet-stream"> const root = ( Array.from(document.getElementsByClassName( "treescope_out_7d3d2dea7c734634ac6b56df2866284e")) .filter((elt) => !elt.dataset['step0']) )[0]; root.dataset['step0'] = 1; root.defns.insertContent( this.parentNode.querySelector('script[type="application/octet-stream"]'), true ); this.parentNode.remove(); </script></treescope-run-here> </div>
<div style="display:none"> <script type="application/octet-stream" >eNrtPdt24zaS7/4KxpOEUiypRVlXy3ZOdyeZZDa37Z5b1qvjpkhIZjdFakjKluPRB8x/7P7YfMlW4UICIEjJ7s7MnpN4eiKJqCoUqgqoQuHCcz+4tdLsPiQXx36QrkP3/syK4ogcW4F/cbyIk2ufLEiSEP96dLoYDE4HTo8s+v0hGU76497IcYg7cXtj1xkeX56nazeC/yK9y44Xh4DuzucJubUeLPrzzPrdcNxzu92pteMAXrxakSi7vrsh0TXZAgWf+GdRnDU6izj03XlIrqPYJ2c3btq4DN05CS/VkussXi5DQnHOvBvivSN+s2l91pRqdekf1Hr+jLFHebW80E1TbKdEDtrBajkPovUms7L7NUiH0p3H22MjDmcBCln1l+fPKLJajSyRY1ORURYaJC0D8NBdp1j4aZhNoVFM8jpcQeN3Fgd66247z5PEvbfekXtEXiT3ny6zaaNZX82yqEb74NKqqVwpC+BZlIFBeTdB6Cckqi0/vqTMNhrNluVTVahsN634liRgt0G0PDu66lrdWQWb6gcY/uX5I6yf9Aae21t05+P+ou8NR2MydMi8twDL9ydzxymsv1LhyHUu/UZXtGcTRNlpr2kS8GOZ7Pe8ge/2x6PJnPS7o/ncHQ8Hg4nnEuKdut25kcnfjB+Mf9D6f27+wOBBHeCq3x32xqejwWRg9Yb9iXN6Oj4dzI6sq9NR/7Q/Go/7Xet00OuOJqfOZIgFVg/G8clw1Otb1qA76DrdXrdPC0aDXh8gh13LGfX7k2EfHAAtAHBnMuqeTqzeeAD1Dcfj4eyX6XiTBTkdThaTRX886Q+8+XjR7w18fDL3uu5worsdF+V1G/wM2osyN4hIAm7g7ibISBvgPIIVJSs3BF9gmYA7QbSIAWUBT9oLdxWEwNsqjmKKPS15lJWbLIOoPY+zLF6dWd1Ob0BWU7XGdULqq6P95Yp1tsSNluR4BiyAXrPAc8O2GwbLCLgIfD8ESosgzAjwsARqKZSThgNWAFUFGYwrnUHz0ZWd3aARYatLpB9HL9qs5iQBgtQZL2Jvk6IbnseJT5J24vrBJj2zTtfb9yPJvlOmZS8/on9TXt2Z5ay3VhqHgV8U1dTaSQGSJKluL3XaoyxkwRpwFEOeWus4DbIgBrW5c+Bhk8Gzueu9WybxJvLbnGVakYnheQiwQMX1fezY1K68GyQL7gJESW5hlEhFZXeBn92cgfayNjIHRVM6LCzC+O7Mug3SYI6GU27Wz20cb7ZQc5cGZTWthPH/wFbG23Z64/pYdZf+D5tFG9TiD3rwgDfd3KCcr0kNW14YeO98N3Mfo7EwdlGi1yuSpu6S1MaIWUJI6sVr0k42UfuGJBg3ekmw5o7Rdtdr4MFFCTyLvYxk7RRw3JV9eYR/UG2aWYIL68KCMMa6uLQejiwL/i02kYeolk9SkgTQy38mfwJpjBvYaQDAshKSbZLIok+pJ+gsknjVcLN4DkAtq7GiBFcQTPvkRxTl86zRbTangL07qq7mKxADRB1FRYzV+X1GUuDzSfUJIgukjVQicmfxipgXo+Q7880CxnaOwhvIcPZx/U30L+GZVvMojhnLIcmsl2hMK3f96vcvvgDLnOqtWZLsJRhjEG3iTUqBG7duuCEtZoaAiWiihUhx7qbkmvaGlhUvFinJGB/BwmKo1vmF1RUYlgQPzelO+VOGWTzZWSRMiUTk8sJyKojInHVCEi2zG6tt9UqknY5KXBBjIvayNKfIqvzMaphJO82piY/v3OymA3IHmeXEmiUuino+sRzOj6TpRGvQVVHF7Ko7Q6YcYIGRa1onnLxVhWSdWA5HlLXDKlvWVeY8tTLHXNm8rrLeUyvr6ZVx+79KWtayZc1n5k57H0Ho5D1PvDSIbl4RoN7g9UH4Ssf8P3OzD4P1dy7YduLefRdE7BN/cxK/d9fCLHPqaYaTj9cYn/i8igZgZO6msGC07I+C9KsgAsfQoEV//zszIXBVjW3TeoYI1rnlkHa/wMsbuBWWpVlzDkBppRCZIbHPKLHPchj8owBhvGyUaz3h2H9LMlAK/7WO7xpbBtCyes1mbts7yYrzvq/I0bpQxgAsZ/LUCjTxo8Uw+Vc2VodnZE2c6ZB5q5UCMYpSLQuolbttCL1zhprTipae5xA5l/+mD9btQFvYZ6yLI03zMAFs5CagygC6YWHeYDs5mGharvq8DhCVqOkZ68iqWJ5pgmPOAwQLYy3gmrpM3vWwd0wl0xFYmunkxKDHOF3VBPLhFbv6muAoi9YOfreusz7jNAUt3rMEEaM3eW9tFUYvJFn8NJmqTgjlCnP0eBVEEGMkuQ0HUUMyAVOztaGPi4CyII12rT1UREdBdEVtClOa7lSGjQrkwj9IbRK5wsWWvEAWv8ZA+nWWQMTNfL0au+VORLguxcW8SZbzxscPyc76+GGJ/5nvmm+M7gbj+8RNIaxaPq1GCQKnIxHMau4BotNzekPonwkM0Z2RM+jB9yV+7456+H1eDFIF2qXl9MaF7HljbDoBso0mLUDoJMZWBKo1FObn4Y9uBhOmCGIg0Af8u2/BNJE+yh0lKLmBZhrQgA8+zgUID7Lg2clJU4vQkvgO4DngVTATBpKTe8vIvQVyAJuTeiuT4s4nvrt6O5OfQiXZtoP8vyJe1sDo4i3wDh9By3JaUsxXWOSuZFqMUT9YBhkNnn9MgpWboKquKKz9uwX9s1vw1VmMRvM+/bpYjBZdQr/2PLfb8+hXf9gb9cb066Q/HM19+nXsDYb9ud3iBMnpaOT1aMncm/s99tUZzYm3sAGGiknn6zWBJ77K2WiB/6PYLvFGZMw5m89HnIexvxi7/OlkPBnSr95g3vUH7Gt/4k36OWeL0XzoM3b8uT8fM/YnxHfJIOfsKOfOI2H4GmZRwNJoygq0SQvMTBbBsjRn8WHE+SEiLwFfjHDU9kDJdNrSsvgUJkhhZAv8Yi7DCLbyAV0YBA/OKLRkhdxAaBcGLu34XejdNAaDTzBp0LSnRwZDgqqgI44oN+wL/mtO62mOuhrNUsfidDEIzmmzH1e5nV51W1bxb9ZSChz61CkXfBCMWbM0j1OF3sEMBbgBaLKXTzhtvct7POxXopcSKQGleJsg/d79nk0em3JHL0lcHvgeqz4DOXQKvcGAygM+mxLlJykRROxIInZmsv+tVEuuGKekynqsqrpmTS0EL6bmoMNvQA8wkblXBM00iKoCuRgSCzAfb1kl3ZVFanLTH0pdtZ75/TXHROw8Xj1PKzLWVa+59pNU1/1VqK6QqEkJVd2nvj/Wd1VnT6fTVcQdl48TX+7D+JA3VeMdAXeJWTAt7nlqP32suh+r8Cer/MlKr+1g+wodqf85j8KsLpzJUWfRe3N1nlvdR6uz++tTZ7dO7t0nq/PRZA3qlFVXhC1CwU1Vt/kc//Gxh7HCEgmlDP9UEzBYk+BUMilFdbsn24s2y9oXSFr22oXJQsazw3Yp/fZeEaXaq0TxFR1rZ3LYyUGAnw2wAa6V+L9Fnx8++tQdYUXexCSjPG3TonmblpS4ebwKDjZOOveex5V2iZPYH90kS1/cf4Gg+cScykWecKH8RrPyE1Bbr2WdmktAsIO9EEMO0cfPUzCgvZADDkkx+k/BOMXPwfthwucQ7aWYfco55iC6xXk8CHThgpbkzsqi4E8sx/pIy0cW0VOOnSUbsscKI7J0s+CW5EuI58UKp4BZuUsItze+sgShx2xSvgZXfXOcDo7HdChudtJ1GGQN29ZCPYYkFivPS4bFS0xBA4JerxEW6tTxrhTCM3mMRymvE7pD4joha+Jm6XW8wNXqTRgqftyQ+FPITq2Tk0D3ebyHpxlw07LSwCecB84lY1lKCJZkCIDf0206XDoA21SBc8nRobvcGJ2n8rBiSK4xkWlsWYeJqsJf19XLk42PqZWCatUa4wM6Fpaig8KDcOUUXqSkLfl7ZYSgBUOiPzXro5ayFy2FSlXOTxrXr7TmXZljv2KSZywwBISziuYaFCy33zjwyGGRnOxiwL9FGh8y0jDuFTlYDIeLt2IpBcMIthT3PPK/ifzAI2lDz2QH7Dl+ScFOKKq2AUkM6Fd8SKCDsFhLAiQLRgITtrAsKLmyMY6xZzSSAa9DEje0ZWOjdXTWm/RGIFBGbWPuqUxSj9gF6/mmGd7MK47qboPUns1UxyeALywOlb4L1td0HLK1pR6J3TcfPxjAd2fqYxL58PCNeTbOKz5/bL0Mz0R1H47VNtWFO5Nwvaqe70P0wZSnL5qVFkTK7FHER7TIbqzc9B3xrXiTNe0nsXkdxvG7zbrErVi/sT791PqI4wbLKE5wgkhHyxrtVPNVbg4z1XQzTzOI0WjfzU2Q8XZN16rtmTZdFJyqqFUzxxKHm+hdFN9FCnsVUYOEJ1dW5ZcOkT12QZPoMbozd9sOopj77OX+rpPTfEoPKNvVo9TGqtRYP1Rre3VW30Mq9LWrmojI1Z1/fmlrk4o4JB2SJHHSsP/EeJHHfpv7EePOLr4LgFXwNg4iMfdQNpg+ByW/XhOvtEp7DcGfe/+nKAvCP7MN3w2fYO6Pbk9uWS4FE7KT/BfbHh5k9z/MU5Lc0r07fBssSVJC0URRowEhbhKQNN/HLNTFn191Z51AQnyFtYP9dcvOh+/E/s5N3uGu+wtLYrfztw1J7l9DOOtlcfI8DBu2vnNbljxrW0NelRAzIRJif9EqUy0IQDoJWcW3pNE0WXJJQB0/SKENEUYeuipb1sMu3ykMrUiz5xFMG5C/rxJ3RaQt4GbaMfsiK0/EMeZt3fNNEPrP+Sbzr4LlJtE079FUiWjzPjtR+bs+lLrKomyXj+RPRE6LmGWwMcuDv+StrnRL/Qs3JcN+ASQ9LMF+wfJECih9JkNSp/Ud+CWdslYg4+DxA8wY+HQPOoeXHsqwW9F3JdDimQx5b4C8N0KmITgA3wCuFcg4agqtQPG0bSCKZDbkKxow42LCN1I0q0ipCmh6VAw62Cmp/Iu9U7KKwPUt2JkBye1RADZC6CcYJK2Xt0RohOlJy3qy7IhBJVGBmd3gRANHyS/ZYL+J0s16HScZxDT0mJ7dLO89p3Z0jZGPWik79KFZWVMWWi65OMQZK9/Xjg+8TZLA2Ks+TMmazki68pREywvlG28LE+yITMK9/qipbRjDCpgT5PUXifycH2T1RPzOt+lT/pWHO72daouzOHPDl3GYau2Ow7/gqSfaTmdWFLDmgFRLczJNAKV2GzfFpQAAOAWwlAfjSUncXgslHQiFIECiX2m8JMAKjpjIijhDblvx/TMgeUJFDHXhtv1IORORt5xRy/EUg8sl9yq+0yQHlvs1CZY3WUl094eK7v4xort/D9Hd14uON674vkd0RdMl2SFis8IUuQvxMEn2GkfUH2ioipU+7Krlow2+BwhJw5AkVa78isopgmBiJh3oOdK59lyYhqXyOlzDj70NHrLueAlxM/JlSPBXw2agdn4miv7s0FOFuJG7MM0Tq4dnHcRWQgX8hko2h6f6MMNLQn1JcXHRk2wzhVdOlS+JQmnD7vmURcMgwbdP04Mm/8F3WStJ3yIBD8Ek7mREFaqhjrSkITzhq2I3u5w/rluHMm62M6JwntVYlG/ahwpL8Ctpb7+0U7+01Ztv1de3eZcWi9kGF3qc4/k8NdWYF6q7DHJEd1uDSAtLDJsVdcHkW16YKKmB706n+4dRILh/Hndi68IxL8bzgJDuZv9OkKw5q1T8mfhuYa2tQoStQigtq9txBs3poc1ReMKHJ3ga71lxyG5vZpsvfuGJH4PpBJFBhY+xM3aSaCWfutnXqG5ncIBGajTcxuZQDSOn7NeejMqR4agGBOPf6MchpRwtHYBhhqiNwMrhiZzEyYUc8csD8Wf1A/VU55BPCQp/IoUjaIFsbOpqGyTuC/j7eng+jBWu38Iz0iGB0TnBE0gP0mIIjwoYbP60Elz5Kk0QAJ8Gzle5vGZloxNZwwslGuYDgPU5XYyxzqyPPiqKK+gZtqXL+QDNtzxiu/pRaRFOsVHFBs1fWTDl37qRR17GmyiTbe+pQRWPiIRtQXxzIik3D3DwaRHnUDAREKmwlVatBGyq+RYjgszH5YUUqOHab2lRVfupyqbcDLWZmuxU3tt7eYeuqXCn00KZAZVGhdCa9fgVq8jazznEW++q14F32nCr2c5FLg7j5gKttlJdst+wjspbMJRxYFtttk+ZRNH5T43J4hB1clHMaSrttdJat/XWiuLcqra6rbNV5cd2j51u66y00ka3RhvdVtsYCgkt1CylZh2y0TwPNhclfN3qRrmtM8qjyhqqnLX6YRjCO2EQkb/wSYkzrQFMsyR+RypW5qsov3TXCJz+beMmZC/0H2IaatkrXK21f1EXe1Tr2HhjTRtI2Kp4fu6rjSFZyzD67YPJSz+TbdCZWZ9/jmEqbjSowZDG1SqUKpdq8qNOpR91fvOjv/nRGj96+eH86NFhztOpcJ7Ob87zV+08Lz+A86T/lZNhxfUoJFPyFI2I3Inv6uYjqQAduinJ0eTr0oKFqkxbQemRWTLcP7I/MyZn7kprrnpGMJcE22fwY7AlIV5ecF91ZoHOk6XrhWjygSZghCPDKzucqeG4vIx1X4GlCgQ3LeNdNIjPv+LTS7kjFzn+4lKkGUAeqcPtpeKipOy2hGW42kbOZdZHYeIUXLoJMynn/cEyK2KxlxEp8v/QQQ9Kp1icNzUlTulNH5m+UZB2ahYFrR3L+eVRpYsRAjV/U0gfx/lLNllqt8sNr1tcymEos9b+YZ4n9WJ/E25SFZ5e5ZTj0F8ynt6+4ucngt60dHY1TnztTi4Z8Rnnu7TnvrhUDAmcGNZ8REfBct4/GNJlsVikDpcGs5bH31/eRKDnySKEn0YTuX+6idwfZCJ741XdRmSEWiMpt/BpRqIg/nqMRFyapudHW5Yxzdni7BRJy1ntAmVxvyXm86VLf4oNVZi6jPyXeKlw5TKgH9zaTfVixCCiVKV1OX1FhVX8ePqUsoLGVvzEAl9Bmt7Gma9G2tItp/bUCJqvRB4AC47lxyRekyS7b9jByl2SdkLQOoNoifeprDF4gEb4dtNMAC8K/oreE0zTAeKm4IrqssSNUtx6/kMSLFkGIYvXMIgsPgiD+ym02+I+0vbPcbxCCk5V0zREvHysTe9FTiG2oZj99TavlilUVdYbzw29xq2bNLR6Me7++EFeaN6tt+KQoEwp1+VhpBh4BS1xWy09V4BCgwDDFkJj+AdrSQZX5ESvTUbhmFi1y8vXutD5lb/fQl15qz9+KLmOHb3PsTMgKxypob2iwRX0/hivJXLbg8iV9kGE4bd427m8PYSvS9HnP2onIbh3whX8FXQMCkOTcnTxnV6crqzA0yfCgvAurNfYk1D6aykjJ0Oxi5Nf0Mu3Ea7bcaAF0hXKRiy0YXYTkjSY2VQ0eIubQxd2G93OEIfnshLx5RK8QO0STXE6ZffL7zqi7dEOWdePuyZJyD1C3OBsFFkom2N1J+TzHqn/6frimmKEnG73E7C3jx8CtD9qfmY8Pp5Ird3HiR78FjlOA3OYR33J3ISYhtACEysI+xwvZEeZSSOCZTR0buX5nLjk9NTyvIex/Uly4e4X3431LzYpZdRM4gzI47DZnnR9srSNtA3jMreoBId9YzWJ4j8OtjiTpbatp1v+oy24HJrXmrDBOFWZHGCdR2oMDqb67ZNNokDf27sk0ANMqAQ9L0b/aiBhBXUwMPaL1SUb9V0NqZhubpTtmFolGrFilZopy72/oPzUIcJAQXLGDOaBAp1J7WjhwSt8RGnvmlV7CGnq5kW8JWn1HOC9ZhlFBR36ApRvgzTrQMQCoW60iFGU/K0MeeT0mAQTB2OXHtZuNOXzKLYv+TVnSjN8bS1CYv2QZuNbSmzpUmetLk6j8ea/o48fij6yu3qjbQHC92GUWKuqlL5IQ+qVDJnnYS2bvVvD1krFZp6ylDgA22VWXexuVXE7GkCakbW8OmIWBRMnQ9knNXtm2ZqcmN08UU4MOZcTfU2KrRVWi4kDVIlJFFeKiQPoYsofl2dYGK/2u94NXraScgdReIg6+TKiZdl5mHqFkpcw3uB7S+jbK8htpgTsJVkAQAeauiQZe1QkTzTbqgY8Ute6qzc+8xsndEpK95YySxUrCLrKYez5Eu9xwIGIwHApbKNlEorer56CXB50MQ8UAoUPOORyisX4I5506Fa84zz+WLEDcselo+x48zcMtXiuf9SyHLyLRl5bUDn/Yc3dt8J3XuchLMeUhDxgqsRzS8qvD5dP2mAYVR4ADuz+BbJxAJCKBQ/2qFzGer/tGEpox7d7hiLe5U1Y0AGeZ1kSzMFxN2yqzZasRi1tt4jxJVsfLh3IKRq9tAZSP4EXUGLk/q8Y35vV1onw4kIA5nL7hM7d6aHkRdws5yzyNzRVdqZywHWQQARhJpHvYVDCdoqn9lTnQXPaH44HHHjKPOBVWBrQ41Jz5amGRujgzFyZEo+28vc3fcCstaBZFU7mAGoqTrphffsdfaUbOAltdMNkD26zoisDqC/wOS/wbV5BtHwZBsDMK+VEsLRAFMEU6JUQ18cPghKflrRz0jTPAoJ6Y1w/EvN9Kb6v2DeSh7MiJSfhqAtCHKRD5wPaZCnn2gAv51XN0yGUtnxIG1tBY1ouQ43UHmGyBZ8cX1jfZSE69kS7d2sfu1pMr3LPkhINmpBoWrQIM+2NdrczwFjL6fTISrsf4n1aaKl2wnO1JWMphMCtBY9ZeTfyFXuGnStPJt1VkyT6HhTFzhebUDRe0K6z5GIuu8+OOdEcATf/7JWnxI16xmqRnRUv7RBQtMktURn9pbyLI4vXJix4XCDBDwWHyvWsOGubY9GCAo/+VDDFCyXLqKykwGW/JeSd4dUsdUsMkh3QNG/bUkQiD0d1iwul1BZ7FybNtuUV4DKKRB9+KrmsCvrS0KlKUB46E77ys49YvlogU+MJG4kce1LQ22ln2NExATUMX+gGo7TYBSNFbdUzAvXwYQ21/LKm0GeLOBJ5dnMKPNbmb7gGRd/lkaTspHxDj1rlbUpVAtuzcAjyoxVJ9xHJ3lNzg3dB5Md3ndRL4jB8oYy8D7RrmZqF7yujzW5Zc3Lj3gb4pkYb71Bxo8ze6XXI4TKt55soi/8ckLvGgwEdaIax9w6eRMRNIOqQCIq7XPClTPZXwRZ0YzGStnLVRb4SWFb0Kt6k9FYRVLY+cxZXoOD+NFCUvFENJ7RMAH9tWcWPn5S3LwhMw8nJPDBlSuRv7KTpnjgi0jVyWjxXBVh1I2x+iur6VrsrLd+0kLfskEqpMqrZYytQir/eE3jSzXb0IGVF5rxUR6aNY/uroBvF91eB6lDT4Obr55S6qwVcc9VGc3qAIeiilqYpMEgCxFAvw7GT+CI9nlNmpsken+QEzMhfi/mChs2fm9DpxUJFd4DRQObj0upan36qiEwGPtGAWRAmcayGipq0+IKnrVx6qcHwOFmLIQ9bMFKbJZqu2Y4hjDOyaeTgoKpPqqquaqwikV2Fpn7KNfW1CNNrVPVTrqocWtbV14a4XuON9tt6XYlVovdU1k/vo6zy8PIIXf30CF0VS2J25dbxg9xXzEIV3Xsd6GIOcjB7GaEz9n0+9JXYovxETyrwy+60vJseAZ7uHK8Lr8gqnerXKl7flq+BrOfCfCNAngpRPY/9EguIf4ZpM80a91yKqjemdE2C6e8wZ2Wpr80xH0VQT2Lsk0nFAY4Smn45uPlmgYNs9e6GkNBkq2J8dMMM6i1PcX0SZu5PuMjCcJsd9qTgCrH5HdNfkIULhqMnR4oX0pZmE028gL0oZ7S1l1/UTkFkmZQBcT+sxi6fs4DXQLhvge9wz4REzRTB7FZGVAhN9QOUZY4urf6+1rUvrL5uf0qt5xXs4i0YpWswVHaVnyfqySfTEaHKdpxb7b0NOdnXkMuqhgTRoxrS3t8Qw+KITKK0c51PVJVqyhPTDzE1NU9OTcfgn5btLc9wH/TkER0BPIr7k5xqwKwRtFrtLJ9pg2tDU4UCraSOWH5Lquyveob5Q9ZmmlgXhmG4HnPtpmlwS87Y6yJ2ShretEirq610ASbbDv1DRL7g7wD5cNdfqrebyBcdGrbOsewMBxJPDHB5AkoGZQ+n6vUw5Su8Dr/Eq3SNVynbo13bZcgGIQuZvtek+mKu8r0s9HZ6dgk8DyLwrDsYAS6snklvbQHD4N5bO5mXH2x4wh60yiMEjueu7Sqo4vRALdiK2iTTud3tjAZkVQkrRb5BhLcHtNUZeClTad6obgbGIHsR0sOINrtOthq2/rCCQWKVu7B1SGxVPsO3e52BfVBW8aADDYY2bzKskCppvdU2fB/VnU3JD5konaDWPmSQKuOQYSq2Lx5py1h/xFG/dCkylnwZlub0B1p8VasFWTnI4o/0tvfknY8lMOPWeQv9LXYB8Sp7vov+tDMARyMaK04678jqTSX92q2fJWiRpGkP6nguZsOOUwenbJ71SCRf7FGuWrF3xwSoTrt0h7DT7YF5gF+PSbD2/tusgueM/u0mIRy/ahVVmyByXZbvBOdnDB/43eNaTHRWfsTCNMP13Gemhwi9mx7tmjQKy24CGgK8iuPs+9gnjWbnJk4zmLUuorQjwnFxIxx8nZ4/g8A4WGeX58+yhJDUAxfQTjZR+4Yk5PIct8dadM/GxfEiDn18NcB1BJSPL8+pnC7P6cqZhZHDxbF3Q7x30IZjI851Fi+XIaI+o0gqeXrbwLU7n8Ns+riu6NMwm751tx0qCGtDr49uDFpN6+qf//hfCGn++Y//6c6sn0kSnw2geUDnMv/gPMvE725IdE228MQnvlYxnhoFs/CvPVQzqKq2HApxdxcvEyK+zo1DQ9Zuzj++fCX8LVNPp9MRfBtVQ/XGBQ/GGAYeNcxnsZeRrJ0Cjrs6vszfhsOMm1oI+4UGMs0THQt6aXitAU0ZWAd4eB3Hkbgzn5PI1jyVgGOf8qKAhp2R1RqDB6RDkgQEBmEabh4vAkp25/kfXv/wfYdOixtIsMNPier0WNvtptJzgRqi5O8MsFSr75huvGfsFtflY8/d0yVEW4QalSYd1+jlbRqD/Twc456n4zPr+PVNfIfKxhczLCGW5u+9g5lFBMMM8S1251Xasb7GYPIZzRzSDRvshnSaRDxu8W2i7HpypPu8+u8iB6fXryM07T74WLvonFL6zy9fwP8plnSyGooejt0ufDgwQTgujsPBk6uHY9wJjNgAAaW0x9GfAGV1z6wBPqXbgOEpdNdjsHr4NtjN4Pu9QgufaKcIxGP1bhLKkWiRmL1gPeDf4BHOYfCHuy1+8FTmMZ/wwpP8xtgCKL85tngkXoJKGdkxuVUlUpk8BF/8DVLHAocKBXVxjPPvWrArFYQeZqeqBLEUoi7eCYPyFA+EePfVMdvPxn9HFm5LVuBY4W62k7sN7yLwFQbES30QZh8Hjv3a8Px36wtyCxZxZr388U9WlxP7dJlNzbUwBnBYptEAdFc26QN9xhE5tgIfnVRynffiRbc7Gs59z+kP+/3JoD8m87nvT7o9hzhdMp5z9g52UA39xcjsRQQXzG81TZxTlt97yGe777/x6dHfYpQanS4Gg9OB0yOLfn9IhpP+uDdyHOJO3N7YdYa0rwtg0ht4bm/RnY/7i743HI3J0CHz3gIA/cnccRTgfs8b+G5/PJrMSb87ms/d8XAwmHguId6p250rwJMFOR1OFpNFfzzpD7z5eNHvDXx8Mve67nCiAO/VyWzK3VjsJeC5jI5M+BoULAfi4Jp3Oe7ksr/G4uMcdZG4yxXzo1qU/wX/+RWHKFf3MozpzFjQUOJHAdTxEIryS28fBCqmDXq5arUNeuz9ArRQrgl+8lnIi/tvoDKBTd8Hyt5MEW8SDyLQ22qh/A57ybF1Ymno/JBHQqBXeeQvQXbTyKl1FkGSirppSwGhKC089S4Xk0JIkd0+x/x/X7F7Bg==</script> <treescope-run-here><script type="application/octet-stream"> const root = ( Array.from(document.getElementsByClassName( "treescope_out_7d3d2dea7c734634ac6b56df2866284e")) .filter((elt) => !elt.dataset['step1']) )[0]; root.dataset['step1'] = 1; root.defns.insertContent( this.parentNode.querySelector('script[type="application/octet-stream"]'), true ); this.parentNode.remove(); </script></treescope-run-here> </div>
Outside:
<script> (()=>{ if (customElements.get('treescope-container') === undefined) { class TreescopeContainer extends HTMLElement { constructor() { super(); this.attachShadow({mode: "open"}); this.defns = {}; this.state = {}; } } customElements.define("treescope-container", TreescopeContainer); } if (customElements.get('treescope-run-here') === undefined) { class RunHere extends HTMLElement { constructor() { super() } connectedCallback() { const run = child => { const fn = new Function(child.textContent); child.textContent = ""; fn.call(this); this.remove(); }; const child = this.querySelector("script"); if (child) { run(child); } else { new MutationObserver(()=>{ run(this.querySelector("script")); }).observe(this, {childList: true}); } } } customElements.define("treescope-run-here", RunHere); } })(); </script> <treescope-container class="treescope_out_fae417af9b7e40d5baaf356506dd7e3d" ></treescope-container> <treescope-run-here><script type="application/octet-stream"> const root = ( Array.from(document.getElementsByClassName( "treescope_out_fae417af9b7e40d5baaf356506dd7e3d")) .filter((elt) => !elt.dataset.setup) )[0]; root.dataset.setup = 1; const msg = document.createElement("span"); msg.style = "color: #cccccc; font-family: monospace;"; msg.textContent = "(Loading...)"; root.state.loadingMsg = msg; root.shadowRoot.appendChild(msg); root.state.chain = new Promise((resolve, reject) => { const observer = new IntersectionObserver((entries) => { for (const entry of entries) { if (entry.isIntersecting) { resolve(); observer.disconnect(); return; } } }, {rootMargin: "1000px"}); window.setTimeout(() => { observer.observe(root); }, 0); }); root.state.deferring = false; const _insertNode = (node) => { for (let oldScript of node.querySelectorAll("script")) { let newScript = document.createElement("script"); newScript.type = oldScript.type; newScript.textContent = oldScript.textContent; oldScript.parentNode.replaceChild(newScript, oldScript); } if (root.state.loadingMsg) { root.state.loadingMsg.remove(); root.state.loadingMsg = null; } root.shadowRoot.appendChild(node); }; root.defns.insertContent = ((contentNode, compressed) => { if (compressed) { root.state.deferring = true; } if (root.state.deferring) { root.state.chain = (async () => { await root.state.chain; if (compressed) { const encoded = contentNode.textContent; const blob = new Blob([ Uint8Array.from(atob(encoded), (m) => m.codePointAt(0)) ]); const reader = blob.stream().pipeThrough( new DecompressionStream("deflate") ).pipeThrough( new TextDecoderStream("utf-8") ).getReader(); const parts = []; while (true) { const step = await reader.read(); if (step.done) { break; } parts.push(step.value); } const tpl = document.createElement('template'); tpl.innerHTML = parts.join(""); _insertNode(tpl.content); } else { _insertNode(contentNode.content); } })(); } else { _insertNode(contentNode.content); } }); </script></treescope-run-here><div style="display:none"> <script type="application/octet-stream" >eNrtWQtT20gS/isTpWqxDyz8BhtwnWz8ggABk0C43fKNpJE8WB6J0djGbPHfr0eS38Yht7Ahm0AVmJmefk53fz3s+2LkkJIqOCG+4XqkzV1XoD+R5/pUUJcVEScOFnRA9pDlMpGwcI86oyLqucz1PWzA+rBDBUkEfxSRx2HFob5IBKwTYuTBKnMZLOvY6Nrc7TMzYbiOy4vh0T0U/aU7QAD8qCk6RWRRAWRMECb2kIdNkzI74RBLFFHa6EghjCQ6hNodWEmpOcmGCUxB58mx6ENiQH2qU4cK0Bz3hTuhTVAmOGU+NRI+fSDhbqTu4/526J79iXsSvM9AJoc13+DUE0jad7CBPc+hBpYe23YNQaT1nODeRikWix+UwKEgzxfIJBbz0QESHeqrNhEX4O1T1ySxuNpxfaEG+2AaEajtESZN1gzJVR76zx+rdhqYmQ6BbdZ3nL1QggpqtlyXwWps6PJuHM3q4F7BktyaWxbUkIse4ZbLe5gZRGXuMBYP4gsCYks7KBEe2keZdBz4UAvFFrRWHcJs0UEHBygpSdaqzonocwZ+R8TxyVSxTp9JzRZZ+x1qCalfQCA/PML3ExJicKuY6Q5VTu76xBcao70gXDWOeyQW+iQueewtCfL6fid0494KG8ciDkIz1lj5fB2kFmEghWvbTpiV7SBz4LZ6kpdcIY7YQmQAFzyKpNQu+FvtkpF0usIVqVBErBoO9v0PkJwR35gy4dnuwTVUxsIf4+BPuP7BHS/tb69KAJMOUMDwQJkvHwoSWAdLyf2BklSQy0AZMJsB3bprv9rWmDwztlKBtAsLVlAx2r7MXTu4KUEBeY8zqVwKKsGjZEk4J2bbg6JCOq5jEj5LuCO/o5oW8CwiKjAksTxsATnWQRcGSj5RDR/RPFk7VB+oTeqD0NG46i0SohJysE6cYlEnkFBkRisj+NpbKS+sfImULH1RhUzuTWVRFtRD3XFlCX1SZscdEL4s2cS86xNsQxzZ8ukic0VsYamD/Vgp4Fla6YfgTNHoEKNLzHgc/Ss+1UEeXX1oTD+nYVDCi2jj93RONza+p3rzh55UMv83KCnjKAX3uS8D6LnQyAhfIZf6Lyc2SIVAUCJISP+pO/4yUqfmCXIvlqWo1G9blPui7bK2vP4rUmtdKqnpnMymlaFCf1n9MOKLKkqrepjbAD1CNYKEfvyL0qAeeiO9LwT0/FUFaLq96tIqSFmgAkcCnltN/DtJZU1lAQ5unGC4FRQ7qDXq6a7jo7O+kPaaqBKehN/eCBIjMSR6F6BdWHl7UOw7UMMBeDEBxyn2iTkBhO9JUn7vLV/z8HSA2JJqgfQWrQzzY4UVq8vd9KQ6xH7bgD4Ajp2cx5aY6x7jOr1O5sKZeZGzrkcDzGOJhIkFTmAGgQ0wQXx2WQqRQIdjNr7NAVuU8hEBjwGKTbh98W2mTDSAwFBivpvXJBCJ3tGe53KB2RJvnbtdwtpyZVqMvu7dmWMz/hyH+VGVsAEUM9sGgE6TExapOg/rgec84QtV15nUiRrpXKoa2DFiMHsA8E159wFWUn2B5fmJvq+mie5ygDCRJqYrwHapxazz7vrYYQAl2zCEWfQemMylyW6QJoCbscRFQ8wZJF57XNjHsbAsbKQyKwg9AJ5/TgYxHs1dsnpFToqWEkk1KKvTaa8YzGqYJ2yOTQphi6UyOZPYW8iFG20TlATt8kZnK7zhHoiFehEsocjLS6osFdYXc/xcyMfGPKrDTnBvHQd7UKK+DvO+vYY/LSEEd4GMgIjcQy6bT9K8hB6rRESGhgTTaWSVK+YnAnV+zEBrOCyayuYGn9WEzxH2JJ8XnPXl+IXeaZzjkWpxtwcjndHvyXFM5qGvDrADc18sHld9Fwa+IDvl4CZ/q2EnlUPbM3upsoHiKD4Zk/0OIULO0mSIKq1WS1rTkmtyMg42Ye4MhqHWiBmx//476t8GGdeJb+/ls9MTk28DTrQ2jJ5lskkoTD43iqjPnZhsLEW5vz10LSu9p0Pryme3zGShfmJrZS34ap5rmht8Kl8M4WejpmlVbd1Xuadpdtc9NpvVcmX4RdMuv1SOtJNmuaLV7Ptm40NH+OUTSuxM7fA6/aGZ/zJoeX368SR3mTq6bl58PhlcnTyIj6NarbJ5ZXcvafkw2aGH5/2jqlm/TTb0bWvQNL2743zn7orS8/4Jq3ca1iehfcqXT3lWqzVZt5o3PvX7bPMid2f43eHAqjnbd/d21d219aNhfTfV0LaZdpH7wPlR6mLTfkhemEntyErZpzuVYf02bSfdUf9iZ6dXTeWHjevCmW175LI7ypKm/pAzdH5WF1izz5unw0Psj/zzfrN5fVWtDbWP517zi/lpe3vT3rncuc6IpHX88U4b5IDnB+10RzsZaj374aK12b9pker1fdrKGw+n2YvGKNcva8cP5Vuv5mVo47xSTd70P2ZbO8wqf6g2aic9jW7uDqrpDkt1djb1z8Pr22GDDw7rnyrs1qpWbbF5Ztw4zk6uUDkalnc7hezJSb2Vqd9odq+Zuy2fF8RlnTQK1XK5Wc8c2tmL7S/GSNfqENPPx9vaeR1r5KTiaI2H6pl9I+x8+aN9dtY8LHfpeY7UyteVcs2gSa/DXY/B3fBuqoeph1S3ZVUs0Rkds4aJa37DSp726tXTfNnU7j5/9rDwWzc908S0kLYeCtlP9PYu7/V4/sz9UmlRXu8NjuqZ1lUrU6umjfK5dbnZcFyvnq35wxy27/K79Ia0Th3vipUbTWKecNK/uqtXeqmrGu+2Wve5dP7qyh9qoFEcBe9iIrYRXOsN2bL+Cz8m2Y9N14N2PU3J4DVPVdU1FFthzv4BvNa/mnSC56UAUYVgD3jD9WAGioWYa/7xD1Lw0pXpC2QRJpNrPpQHyUKCTInM8BBTgRgeUBsLl6vA2dNdzE11yKkglzCPxaa8wNiI1/SFCXBCTJlBoPJtCaRc0h4BqBobPz4uneOkB3By6ejjFkonk8kADkDxBWQQC2ap1XJnYKYyVU5OkeMKJp/jFPQe1TB1oLAJF0nid0FlAwjAAEpBNabgM4JNiZI3Z30XvZN95YVMIu7xE9n8c8wieFFK+2GH3qfM60edRgl6su7eKyuZRO0bNsPWDUoEh+flzndapfSbI6TaQLGebm5zAVgqJcvB9ypj9ypntkN1dcwx/HXBbD+2vx1ZNMtoCd4r6/aVEkBB3PMPnvbkP85xrQBLrPTexnSO2kAuq8jkONj4xuoQvKfGN9BkyDtQ1NDLCgo6+YEyMwECGIh25eS+OKPCbpANUG078DmyvTRv01+Kf5eMXin4byrox2T0PSIu/0uwPuqS4vtEPsDKUeypeaBM3vOtfNZMJXeS6dROOpvL5go4Y+QzmWQuaWV2Mji1wHfV/wGU0i0EJMDpCAyUQbb46Ddb7MXi8yZsLUd9MqApJfTk1RhPT8rfHc5wxvh6UEO6/z+0zw20UhLYnk/h2X/dQIa9v0/v7IWKhZ+f4dStN+h4sPMZbgeql3J6+Cu+vn7Z0/r1g97o1yxQz7/FBvhU/AytqCIN/S7NKHDxV6Id0LyxhmRmsZHNp4mR1Y1sChd2c4VM1sC7RlLX06lC5hsbUp8ykUn/2I0oCNOzWtEM5a9m9JLO/3o7mtB934b0w7n2dUvUT9fjX2vsfH5RMLnruX3x65XhdcMdufmpeEfbv94Z/knvDFFQ1+H4GZI3BuwMnC6Y+m7WzONCFhfMgmnl0zm9kLOIXsjmrZ/xpWEmWuvx3RLhm4F3kWY/Hr6bdekaeLdA9guKvKE69evB4S08OIxDuRbOzxG9sc5UwNldoqfzWYuksrpBcCGnk10jv5snVj6fKvxMTw5zcXpeT3qLjw4/fFf66rPDEuGvd4c3Val+Nqe+2iD62o6cUJp0UPofWORgmA==</script> <treescope-run-here><script type="application/octet-stream"> const root = ( Array.from(document.getElementsByClassName( "treescope_out_fae417af9b7e40d5baaf356506dd7e3d")) .filter((elt) => !elt.dataset['step0']) )[0]; root.dataset['step0'] = 1; root.defns.insertContent( this.parentNode.querySelector('script[type="application/octet-stream"]'), true ); this.parentNode.remove(); </script></treescope-run-here> </div>
<div style="display:none"> <script type="application/octet-stream" >eNrtVstu2zAQvPcrCAUIpCZRZElWLNsx0DYtkEsP7aGHIDAocmWzYUiVotMYRf69S1l+1nkdeinqgyVqZ/YxHFoecnFHajuXcO5xUVeSzvtEaQUeEfzcK7UZcyjBGODjMkt5JzqL4s5ZnHbTbk4TliVJ1I3K5CyhHW80rCuq8NvlG4VMS6TTojBwR36RZtknB1kvplE0IA8tgOnbW1B2/HMKagz3mIED7ytt/bDUktNCwlhpDv0prf2RpAXI0XZkbPVkIqHh9NkU2A3wICBvg42qtPlg1eHpor2mV8IkrWs350Y6nGNRZShUNbPEzitUp8lb6HtvL6dtAYOL8qPhaUPeLrOpiLcvtFeLHWQTQ7ikVe2Ch9IOcKiF8ru4dY4D0oK+0/vwnTF0Tm5g7silmR9O7MAPni4zWZfZubRqPVF8KybwmbJoKDYVkhtQT8a9UdOs7wfHhDdbsd12QPQdGPStUJP+m6uIRNePtLl9QeOPhq9wP08pS7MYWFqwtEPzXjdPUkZ7LCqKuJMna/c/uuGu65X6frScZyaUTeJgn8CvbZLROOdFL+UZzVOa85yXWdwt8m4JRZ5m5d4m/5v/3zF/5++YP6dpD4o4S0vooP+B5t0CeizrZVBmWSd/tfnjl5rfGoCa6QpOzEydTMG4X25mRLW0Jq0qKRi1QqtTzSzYkxo59NYbMa1qRFEzAXvJa3JOrryXv8yOiffys78JfvYMboKfFfZ6sBiDa2a0tjiEnYo6xJG+4PIznj4/GJB2VNSqBbXw8McMzPwrSGBWG98LV3KOXdhbUUtDJ+7wLagzdxsylNHCRbv81CL+LPdBommQuMwR4paA4h+cgf0lKGQO1fRrzQwCzIIOI74ESwSS8Q+BIMP1boUS1MRO8enRkXuPO9wiuFkJlx8luNv380sstmRfiWss4Di1nhkGF2j1R0U5cFb3yBHZoS+WoQE8Ggy+CTv1V9nCUph6WbuZFAnrqIFbPJhOqoeVTFuJtrQLnPUbT6Pr9xj+N8tPWjk=</script> <treescope-run-here><script type="application/octet-stream"> const root = ( Array.from(document.getElementsByClassName( "treescope_out_fae417af9b7e40d5baaf356506dd7e3d")) .filter((elt) => !elt.dataset['step1']) )[0]; root.dataset['step1'] = 1; root.defns.insertContent( this.parentNode.querySelector('script[type="application/octet-stream"]'), true ); this.parentNode.remove(); </script></treescope-run-here> </div>
> **Note:** `nnx.split_rngs` allows passing an NNX [`Filter`](https://flax.readthedocs.io/en/latest/guides/filters_guide.html) to the `only` keyword argument in order to select the `nnx.RngStream`s that should be split when inside the function. In such a case, you only need to split the `dropout` PRNG key stream.
## Transforms
As stated before, in Flax NNX the random state is just another type of state. This means that there is nothing special about it when it comes to Flax NNX transforms, which means that you should be able to use the Flax NNX state handling APIs of each transform to get the results you want.
In this section, you will go through two examples of using the random state in Flax NNX transforms - one with `nnx.pmap`, where you will learn how to split the PRNG state, and another one with `nnx.scan`, where you will freeze the PRNG state.
### Data parallel dropout
In the first example, youβll explore how to use `nnx.pmap` to call the `nnx.Model` in a data parallel context.
- Since the `nnx.Model` uses `nnx.Dropout`, youβll need to split the random state of the `dropout` to ensure that each replica gets different dropout masks.
- `nnx.StateAxes` is passed to `in_axes` to specify that the `model`'s `dropout` PRNG key stream will be parallelized across axis `0`, and the rest of its state will be replicated.
- `nnx.split_rngs` is used to split the keys of the `dropout` PRNG key streams into N unique keys, one for each replica.
```python
model = Model(nnx.Rngs(params=0, dropout=1))
num_devices = jax.local_device_count()
x = jnp.ones((num_devices, 16, 20))
state_axes = nnx.StateAxes({'dropout': 0, ...: None})
@nnx.split_rngs(splits=num_devices, only='dropout')
@nnx.pmap(in_axes=(state_axes, 0), out_axes=0)
def forward(model: Model, x: jnp.ndarray):
return model(x)
y = forward(model, x)
print(y.shape)
```
(1, 16, 10)
### Recurrent dropout
Next, letβs explore how to implement an `RNNCell` that uses a recurrent dropout. To do this:
- First, you will create an `nnx.Dropout` layer that will sample PRNG keys from a custom `recurrent_dropout` stream.
- You will apply dropout (`drop`) to the hidden state `h` of the `RNNCell`.
- Then, define an `initial_state` function to create the initial state of the `RNNCell`.
- Finally, instantiate `RNNCell`.
```python
class Count(nnx.Variable): pass
class RNNCell(nnx.Module):
def __init__(self, din, dout, rngs):
self.linear = nnx.Linear(dout + din, dout, rngs=rngs)
self.drop = nnx.Dropout(0.1, rngs=rngs, rng_collection='recurrent_dropout')
self.dout = dout
self.count = Count(jnp.array(0, jnp.uint32))
def __call__(self, h, x) -> tuple[jax.Array, jax.Array]:
h = self.drop(h) # Recurrent dropout.
y = nnx.relu(self.linear(jnp.concatenate([h, x], axis=-1)))
self.count += 1
return y, y
def initial_state(self, batch_size: int):
return jnp.zeros((batch_size, self.dout))
cell = RNNCell(8, 16, nnx.Rngs(params=0, recurrent_dropout=1))
```
Next, you will use `nnx.scan` over an `unroll` function to implement the `rnn_forward` operation:
- The key ingredient of recurrent dropout is to apply the same dropout mask across all time steps. Therefore, to achieve this you will pass `nnx.StateAxes` to `nnx.scan`'s `in_axes`, specifying that the `cell`'s `recurrent_dropout` PRNG stream will be broadcast, and the rest of the `RNNCell`'s state will be carried over.
- Also, the hidden state `h` will be the `nnx.scan`'s `Carry` variable, and the sequence `x` will be `scan`ned over its axis `1`.
```python
@nnx.jit
def rnn_forward(cell: RNNCell, x: jax.Array):
h = cell.initial_state(batch_size=x.shape[0])
# Broadcast the 'recurrent_dropout' PRNG state to have the same mask on every step.
state_axes = nnx.StateAxes({'recurrent_dropout': None, ...: nnx.Carry})
@nnx.scan(in_axes=(state_axes, nnx.Carry, 1), out_axes=(nnx.Carry, 1))
def unroll(cell: RNNCell, h, x) -> tuple[jax.Array, jax.Array]:
h, y = cell(h, x)
return h, y
h, y = unroll(cell, h, x)
return y
x = jnp.ones((4, 20, 8))
y = rnn_forward(cell, x)
print(f'{y.shape = }')
print(f'{cell.count.value = }')
```
y.shape = (4, 20, 16)
cell.count.value = Array(20, dtype=uint32)
|
googleREPO_NAMEflaxPATH_START.@flax_extracted@flax-main@docs_nnx@guides@randomness.ipynb@.PATH_END.py
|
{
"filename": "_weightsrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattergeo/textfont/_weightsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class WeightsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="weightsrc", parent_name="scattergeo.textfont", **kwargs
):
super(WeightsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scattergeo@textfont@_weightsrc.py@.PATH_END.py
|
{
"filename": "ModelReaderTesting_GITMtiming.ipynb",
"repo_name": "nasa/Kamodo",
"repo_path": "Kamodo_extracted/Kamodo-master/Validation/Notebooks/ModelReaderTesting_GITMtiming.ipynb",
"type": "Jupyter Notebook"
}
|
# Demo notebook for Model Reader
```python
# time the file conversion
model, file_dir = 'GITM', 'D:/GITM/jasoon_shim_071418_IT_1_tenth_oneday/'
import kamodo_ccmc.flythrough.model_wrapper as MW
from time import perf_counter
reader = MW.Model_Reader('GITM')
t0 = perf_counter()
kamodo_object = reader(file_dir, variables_requested=['rho_n'])
t1 = perf_counter()
print(t1-t0)
kamodo_object
```
```python
# time the kamodo object creation
var_list = ['rho_N2', 'rho_N2plus', 'rho_NO', 'rho_NOplus', 'rho_O2', 'rho_O2plus', 'rho_O3P',
'rho_Oplus4S4P', 'rho_n', 'T_n']
from time import perf_counter
reader = MW.Model_Reader(model)
t0 = perf_counter()
kamodo_object = reader(file_dir, variables_requested=var_list)
t1 = perf_counter()
print(t1-t0)
kamodo_object
```
```python
```
|
nasaREPO_NAMEKamodoPATH_START.@Kamodo_extracted@Kamodo-master@Validation@Notebooks@ModelReaderTesting_GITMtiming.ipynb@.PATH_END.py
|
{
"filename": "powerMapsPhilcox.py",
"repo_name": "oliverphilcox/HADES",
"repo_path": "HADES_extracted/HADES-master/tile_creation/powerMapsPhilcox.py",
"type": "Python"
}
|
import sys
#sys.path.append('/data/ohep2/EngelenTools/')
from flipper import *
from flipperPol import *
import healpy
import aveTools
import pickle
import scipy.ndimage.filters
## hack to remove mpi4py dependence
#from mpi4py import MPI
#comm = MPI.COMM_WORLD
#rank = comm.Get_rank()
#size = comm.Get_size()
rank =0
size = 1
p = flipperDict.flipperDict()
p.read_from_file(sys.argv[1])
import matplotlib.pyplot as plt
tqus = ['T', 'Q', 'U']
goodMap = pickle.load( open(p['workDir'] + p['basename'] + 'goodMap.pkl', 'r'))
mapRas = pickle.load( open(p['workDir'] + p['basename'] + 'mapRas.pkl', 'r'))
mapDecs = pickle.load( open(p['workDir'] + p['basename'] + 'mapDecs.pkl', 'r'))
nMaps = len(goodMap)
nMaps = len(mapRas)#1400
print "hack, setting nMaps to %i" % nMaps
# nGoodMaps = np.sum(goodMap)
cosineApod = p['cosineApod']
def myTaper(indata):
smoothed = scipy.ndimage.filters.gaussian_filter(indata, sigma = 60)
bitmask = numpy.zeros(indata.shape)
bitmask[numpy.where(smoothed > .99)] = 1.
output = scipy.ndimage.filters.gaussian_filter(bitmask, sigma = 60)
return output
firstTime = True
doAll = True
iStop = nMaps
iStart = 0
delta = (iStop - iStart)/size
if delta == 0:
raise ValueError, 'Too many processors for too small a loop!'
iMin = iStart+rank*delta
iMax = iStart+(rank+1)*delta
if iMax>iStop:
iMax = iStop
elif (iMax > (iStop - delta)) and iMax <iStop:
iMax = iStop
if doAll:
powers = aveTools.onedl(nMaps)
mapnum_all=xrange(iMin,iMax)
mapnum_good=[mapnum for mapnum in mapnum_all if goodMap[mapnum]==True]
#print mapnum_good
# Compute cosine window:
tquMaps=[None]*3
print 'Creating cosine window'
for pol, tqu in enumerate(tqus):
filename=p['workDir']+p['basename'] + 'map%s_%05i.fits'%(tqu, mapnum_good[0])
tquMaps[pol] = liteMap.liteMapFromFits(filename)
taper = liteMapPol.initializeCosineWindow(tquMaps[0],\
cosineApod['lenApod'],\
cosineApod['pad']) # taper weight map
def map_iterator(mapnum):
tquMaps=[None]*3
print 'mapnum', mapnum
for pol, tqu in enumerate(tqus):
filename=p['workDir']+p['basename'] + 'map%s_%05i.fits'%(tqu, mapnum)
tquMaps[pol] = liteMap.liteMapFromFits(filename)
if p['flipU']:
tquMaps[2].data *= -1
if p['applyPerPatchMask']:
maskFilename = p['workDir'] + p['basename'] + 'mapMask_%05i.fits'%( mapnum)
mask = liteMap.liteMapFromFits(maskFilename)
else:
mask = tquMaps[0].copy()
mask.data[:] = 1.
smoothedEdges = myTaper(mask.data)
# maskCopy = mask.copy()
# mask.data *= smoothedEdges * taper.data 1
#mask.data = smoothedEdges * taper.data # 2
mask.data *= taper.data
mask.writeFits(p['workDir'] + p['basename'] + 'mapMaskSmoothed_%05i.fits'%mapnum, overWrite = True)
# don't need to run this for this calculation
powersCoeff = 0.#aveTools.allpowers(*(tquMaps), window = mask, binFile = p['binFile'])
return powersCoeff
# Start multiprocessing
import multiprocessing as mp
import tqdm
pq=mp.Pool()
output=list(tqdm.tqdm(pq.imap(map_iterator,mapnum_good),total=len(mapnum_good)))
# reconstruct the powers matrix
for mnum,mapnum in enumerate(mapnum_good):
powers[mapnum]=output[mnum]
if rank > 0:
comm.send(powers[iMin:iMax], dest = 0)
print 'rank %i of %i: sending data, length', len(powers[iMin:iMax])
else:
for i in range(1, size):
inData = comm.recv(source = i) #do this in two steps, because the length of the data received here is unpredictable for i == size-1
powers[iStart + i * delta : iStart + i * delta + len(inData)] = inData
print 'rank %i of %i: received data ' % (rank, size)
# pickle.dump(powers, open(p['workDir'] + p['basename'] + 'PowersSandbox.pkl', "wb"))
pickle.dump(powers, open(p['workDir'] + p['basename'] + 'Powers.pkl', "wb"))
# nx = 10
# ny = 10
# rangeT = [0,10]
# rangeP = [0, 1]
# ranges = [rangeT, rangeP, rangeP]
|
oliverphilcoxREPO_NAMEHADESPATH_START.@HADES_extracted@HADES-master@tile_creation@powerMapsPhilcox.py@.PATH_END.py
|
{
"filename": "octree.py",
"repo_name": "ageller/firefly",
"repo_path": "firefly_extracted/firefly-main/src/firefly/data_reader/octree.py",
"type": "Python"
}
|
import os
import itertools
import copy
import multiprocessing
import itertools
import time
import numpy as np
from .json_utils import load_from_json,write_to_json
from .binary_writer import RawBinaryWriter,BinaryWriter
from abg_python.system_utils import printProgressBar
octant_offsets = 0.25 * np.array([
[-1,-1,-1], ## x < 0, y < 0, z < 0 -> 000
[ 1,-1,-1], ## x > 0, y < 0, z < 0 -> 100
[-1, 1,-1], ## x < 0, y > 0, z < 0 -> 010
[ 1, 1,-1], ## x > 0, y > 0, z < 0 -> 110
[-1,-1, 1], ## x < 0, y < 0, z > 0 -> 001
[ 1,-1, 1], ## x > 0, y < 0, z > 0 -> 101
[-1, 1, 1], ## x < 0, y > 0, z > 0 -> 011
[ 1, 1, 1]]) ## x > 0, y > 0, z > 0 -> 111
class OctNode(object):
def __repr__(self):
return f"OctNode({self.name}):{self.buffer_size}/{self.nparts:d} points - {self.nfields:d} fields"
def __init__(
self,
center,
width,
field_names,
name:str='',
weight_index=None,
coordss=None,
fieldss=None,
velss=None,
rgba_colorss=None,
has_velocities=False,
has_colors=False,
nthreads=1
):
## bind input
self.center = center
self.width = width
self.field_names,self.nfields = field_names,len(field_names)
self.name = name
self.weight_index = weight_index
self.nthreads = nthreads
## initialize the buffers (and fill with any data we were passed)
self.set_buffers_from_arrays(
coordss,
fieldss,
velss,
rgba_colorss)
self.has_velocities = has_velocities
self.has_colors = has_colors
self.prefixes = (
['x','y','z'] +
['vx','vy','vz']*has_velocities +
['rgba_r','rgba_g','rgba_b','rgba_a']*has_colors +
field_names)
self.children = []
self.child_names = []
def set_buffers_from_dict(self,data_dict,width=None,init_node=False):
""" data_dict requires:
'x','y','z'
optionally:
'vx','vy','vz'
'rgba_r','rgba_g','rgba_b','rgba_a'
any number of field names and arrays
:param data_dict: _description_
:type data_dict: _type_
:param width: _description_, defaults to None
:type width: _type_, optional
:raises KeyError: _description_
:return: _description_
:rtype: _type_
"""
keys = list(data_dict.keys())
if 'x' not in keys: raise KeyError(f"Data dict missing coordinates {keys}")
velss = None
rgba_colorss = None
nparts = data_dict['x'].shape[0]
coordss = np.zeros((nparts,3))
for i,axis in enumerate(['x','y','z']):
coordss[:,i] = data_dict.pop(axis)
key = 'v'+axis
if key in keys:
if velss is None:
velss = np.zeros((nparts,3))
self.has_velocities = True
velss[:,i] = data_dict.pop(key)
for i,color in enumerate(['r','g','b','a']):
key = f'rgba_{color}'
if key in keys:
if rgba_colorss is None:
self.has_colors = True
rgba_colorss = np.zeros((nparts,4))
rgba_colorss[:,i] = data_dict.pop(key)
## the remaining keys are the fields
self.field_names = list(data_dict.keys())
self.nfields = len(self.field_names)
self.prefixes = (
['x','y','z'] +
['vx','vy','vz']*self.has_velocities +
['rgba_r','rgba_g','rgba_b','rgba_a']*self.has_colors +
self.field_names)
## determine field names from remaining keys
fieldss = np.zeros((nparts,self.nfields))
for i,field_name in enumerate(self.field_names):
fieldss[:,i] = data_dict.pop(field_name)
#if width is None: width = np.max(coordss.max(axis=0) - coordss.min(axis=0))
if width is None:
width = np.max(np.percentile(
np.abs(coordss),
99,axis=0))*2
self.width = width
self.center = np.zeros(3)
self.set_buffers_from_arrays(
coordss,
fieldss,
velss,
rgba_colorss,
check_boundaries=True,
init_node=init_node) ## eject the particles outside the 99th %ile
root_dict = {}
root_dict = {'field_names':self.field_names,
'has_velocities':self.has_velocities,
'has_colors':self.has_colors,
'weight_index':self.weight_index,
'nodes':{},
'octree_mins':dict(zip(self.field_names,np.min(fieldss,axis=0))),
'octree_maxs':dict(zip(self.field_names,np.max(fieldss,axis=0)))}
return root_dict
def set_buffers_from_disk(self,files,nparts):
for fname in files:
split = os.path.basename(fname[0]).split('.')
if len(split) != 3: raise IOError(f"bad .ffraw file name [{fname}] must be field.<i>.ffraw")
coordss = np.empty((nparts,3))
buffers = [coordss[:,0],coordss[:,1],coordss[:,2]]
if self.has_velocities:
velss = np.empty((nparts,3))
buffers += [velss[:,0],velss[:,1],velss[:,2]]
else: velss = None
if self.has_colors:
rgba_colorss = np.empty((nparts,4))
buffers += [rgba_colorss[:,0],rgba_colorss[:,1],rgba_colorss[:,2],rgba_colorss[:,2]]
else: rgba_colorss = None
fieldss = np.empty((nparts,self.nfields))
for i in range(self.nfields):
buffers += [fieldss[:,i]]
## sort files and group them into a numpy array
files = group_files(self.prefixes,files)
for prefix,buffer,these_files in zip(
self.prefixes,
buffers,
files):
count_offset = 0
for fname,byte_offset,count in these_files:
## convert from numpy string to ints
byte_offset = int(eval(byte_offset))
count = int(eval(count))
if os.path.basename(fname).split('.')[0][-len(prefix):] != prefix:
raise IOError(
"The file grouping didn't work. God save us. Report this to agurvich@u.northwestern.edu immediately.")
RawBinaryWriter(fname,buffer[count_offset:count_offset+count]).read(byte_offset,count)
count_offset+=count
self.set_buffers_from_arrays(
coordss,
fieldss,
velss,
rgba_colorss)
def set_buffers_from_arrays(
self,
coordss:np.ndarray,
fieldss:np.ndarray,
velss:np.ndarray=None,
rgba_colorss:np.ndarray=None,
check_boundaries:bool=False,
init_node:bool=False):
if coordss is None: coordss = np.zeros((0,3))
self.buffer_coordss = coordss
mask = np.ones(coordss.shape[0],dtype=bool)
if check_boundaries:
for axis in range(3):
mask = np.logical_and(
mask,
np.abs(coordss[:,axis]) <= (self.center[axis]+self.width/2) )
if self.nthreads == 1 and np.sum(mask) != mask.size:
print(f'ejecting {np.sum(~mask)} particles that are outside this node ({100*np.sum(~mask)/mask.size:0.2f}%)')
if not init_node: self.buffer_coordss = self.buffer_coordss[mask].tolist()
## initialize the field buffers
if fieldss is not None:
self.nfields = fieldss.shape[1]
## +6 to hold the com and com^2 fields
self.buffer_fieldss = np.zeros((coordss.shape[0],self.nfields+6))
self.buffer_fieldss[:,:-6] = fieldss
for i in range(3):
self.buffer_fieldss[:,-6+i] = coordss[:,i]
self.buffer_fieldss[:,-3+i] = (coordss[:,i]**2)
self.buffer_fieldss = self.buffer_fieldss[mask]
else: self.buffer_fieldss = np.zeros((0,self.nfields))
if not init_node:self.buffer_fieldss = self.buffer_fieldss.tolist()
## determine if we're taking a weighted average
if self.weight_index is not None:
weights = self.buffer_fieldss[:,self.weight_index]
## need to weight the fieldss now that we know we're weighted
for i in range(self.nfields):
if i != self.weight_index:
self.buffer_fieldss[:,i]*=weights
## change shape of weights for broadcasting below
weights = weights[:,None]
else: weights = 1
## initialize the velocities buffer
if velss is not None:
if velss.shape[0] != coordss.shape[0]:
raise IndexError(
f"Size of velss ({velss.shape[0]})"+
f"does not match size of buffer ({coordss.shape[0]})")
self.buffer_velss = (velss * weights)[mask]
else: self.buffer_velss = np.zeros((0,3))
if not init_node:self.buffer_velss = self.buffer_velss.tolist()
## initialize the rgba_colors buffer
if rgba_colorss is not None:
if rgba_colorss.shape[0] != coordss.shape[0]:
raise IndexError(
f"Size of rgba_colorss ({rgba_colorss.shape[0]})"+
f"does not match size of buffer ({coordss.shape[0]})")
self.buffer_rgba_colorss = (rgba_colorss * weights)[mask]
else: self.buffer_rgba_colorss = np.zeros((0,4))
if not init_node:self.buffer_rgba_colorss = self.buffer_rgba_colorss.tolist()
## initialize com accumulators
self.velocity = np.sum(self.buffer_velss,axis=0)
self.rgba_color = np.sum(self.buffer_rgba_colorss,axis=0)
self.fields = np.sum(self.buffer_fieldss,axis=0)
self.buffer_size = np.sum(mask)
self.nparts = np.sum(mask)
def cascade(self,min_to_refine,nrecurse=0):
#if self.nthreads < 5: print('Refining:',self)
## flush the buffer into its children
#if self.nthreads < 5: printProgressBar(0,self.buffer_size,prefix = 'Progress:',suffix='complete',length=50,decimals=0)
for i in range(self.buffer_size):
self.sort_point_into_child(
self.buffer_coordss[i],
self.buffer_fieldss[i],
self.buffer_velss[i] if self.has_velocities else None,
self.buffer_rgba_colorss[i] if self.has_colors else None)
#if self.nthreads < 5: printProgressBar(i+1,self.buffer_size,prefix = 'Progress:',suffix='complete',length=50,decimals=0)
#if self.nthreads < 5: printProgressBar(i+1,self.buffer_size,prefix = 'Progress:',suffix='complete',length=50,decimals=0)
## probably just [] tbh ??
self.buffer_coordss = np.zeros((0,3)).tolist()
self.buffer_fieldss = np.zeros((0,self.nfields)).tolist()
self.buffer_velss = np.zeros((0,3)).tolist()
self.buffer_rgba_colorss = np.zeros((0,4)).tolist()
self.buffer_size = 0
## okay we made the children but... not all will
## survive. the small ones will be merged back
## into the parent
self.prune(min_to_refine)
#if self.nthreads < 5: print('New children:',self.child_names)
return_value = [(self.name,self.buffer_size)]
if nrecurse>0:
for child in self.children: return_value += child.cascade(min_to_refine,nrecurse-1)
self.processed = True
return return_value
def sort_point_into_child(
self,
coords,
fields,
vels,
rgba_colors):
## use 3 bit binary number to index
## the octants-- for each element of the array
## it is either to the left or right of the center.
## this determines which octant it lives in
## thanks Mike Grudic for this idea!
octant_index = 0
for axis in range(3):
if coords[axis] > self.center[axis]: octant_index+= (1 << axis)
child_name = self.name+'%d'%(octant_index)
if child_name not in self.child_names:
## create a new node! welcome to the party, happy birthday, etc.
child = OctNode(
self.center + self.width*octant_offsets[octant_index],
self.width/2,
self.field_names,
name=child_name,
has_velocities=self.has_velocities,
has_colors=self.has_colors)
#nthreads=self.nthreads) ## <-- unnecessary b.c. if nthreads > 0 then
# we won't be recursively pruning so the child will have this set
# when it's re-initialized in refineNode after it becomes a work_unit
self.children += [child]
self.child_names += [child_name]
else: child:OctNode = self.children[self.child_names.index(child_name)]
child.accumulate(coords,fields,vels,rgba_colors)
def prune(self,min_to_refine):
sort_indices = np.argsort([child.buffer_size for child in self.children])
sort_children = np.array(self.children)[sort_indices]
for child in sort_children:
if (child.buffer_size + self.buffer_size) < min_to_refine/self.nthreads:
self.buffer_coordss += child.buffer_coordss
self.buffer_velss += child.buffer_velss
self.buffer_rgba_colorss += child.buffer_rgba_colorss
self.buffer_fieldss += child.buffer_fieldss
self.buffer_size += child.buffer_size
## evict you
self.children.pop(self.child_names.index(child.name))
## remove you from my will
self.child_names.pop(self.child_names.index(child.name))
else: break ## no more room to consume children, the rest get to live on
#if self.buffer_size > 0 and self.nthreads < 5: print(f"Merged {self.buffer_size} particles back into parent.")
def accumulate(
self,
coords:list,
fields:list,
vels:list=None,
rgba_colors:list=None,
):
## accumulate the point
self.nparts += 1
self.buffer_size += 1
## store coordinate data in its buffer
self.buffer_coordss.append(coords)
## store velocity data in its buffer
## and increment the com velocity accumulator
if self.has_velocities:
self.buffer_velss.append(vels)
self.velocity += vels
## store rgba_color data in its buffer
## and increment the com rgba_color accumulator
if self.has_colors:
self.buffer_rgba_colorss.append(rgba_colors)
self.rgba_color += rgba_color
## store field data in the buffer
## and increment the com field accumulator
## (which includes the com and com^2 as the last 6 entries)
self.buffer_fieldss.append(fields)
self.fields += fields
def write_tree(self,target_directory,split_index,write_protect=False):
## format accumulated values into a dictionary
if self.buffer_size == 0:
this_node_dict = self.format_node_dictionary()
## some children were merged back into the parent, write them to disk
else: this_node_dict = self.write(target_directory,split_index,write_protect=write_protect)
if hasattr(self,'processed'): this_node_dict['processed'] = self.processed
return_value = [this_node_dict]
for child in self.children:
return_value += child.write_tree(target_directory,split_index)
return return_value
def format_node_dictionary(self):
"""
'':{
'name':'',
'files':fnames,
'width':dmax,
'center':np.zeros(3),
'nparts':np.sum(nparts),
'children':[],
'radius':radius,
'center_of_mass':com,
'weight_index':None,
<field_names>
}
"""
node_dict = {}
## set basic keys
for key in ['center','width','name','nparts','buffer_size']:
node_dict[key] = getattr(self,key)
node_dict['children'] = [child.name for child in self.children]
## determine weight for accumulated fields
if self.weight_index is not None:
weight = self.fields[self.weight_index]
else: weight = self.nparts
## set other accumulated field values, use the same weight
for i,field_key in enumerate(self.field_names):
if self.weight_index is None or i != self.weight_index:
self.fields[i]/=weight
node_dict[field_key] = self.fields[i]
## excluded from loop above because not in field names
com = self.fields[-6:-3]/weight ## last 3 fields will always be xcom, ycom, zcom
com_sq = self.fields[-3:]/weight ## last 3 fields will always be xcom^2, ycom^2, zcom^2
## sigma_x = <x^2>_m - <x>_m^2, take average over 3 axes to get 1d
## sigma to represent 1-sigma extent of particles in node
node_dict['radius'] = np.sqrt(np.mean(com_sq-com**2))
node_dict['center_of_mass'] = com
if self.has_velocities: vcom = self.velocity/weight
else: vcom = None
node_dict['com_velocity'] = vcom
if self.has_colors: rgba_color = self.rgba_color/weight
else: rgba_color = None
node_dict['rgba_color'] = rgba_color
return node_dict
def write(self,top_level_directory,split_index=None,bytes_per_file=4e7,write_protect=False):
## convert buffers to numpy arrays
self.buffer_coordss = np.array(self.buffer_coordss)
self.buffer_fieldss = np.array(self.buffer_fieldss)
self.buffer_velss = np.array(self.buffer_velss)
self.buffer_rgba_colorss = np.array(self.buffer_rgba_colorss)
this_dir = os.path.join(top_level_directory)
if not os.path.isdir(this_dir): os.makedirs(this_dir)
namestr = f'{self.name}-' if self.name != '' else 'root-'
splitstr = ''#f'{split_index:02d}-' if split_index is not None else ''
## determine how many files we'll need to split this dataset into
nsub_files = int(4*self.buffer_size//bytes_per_file + (4*self.buffer_size != bytes_per_file))
counts = [arr.shape[0] for arr in np.array_split(np.arange(self.buffer_size),nsub_files)]
## ------ gather buffer array aliases to be written to disk
buffers = [self.buffer_coordss[:,0],self.buffer_coordss[:,1],self.buffer_coordss[:,2]]
if self.has_velocities:
if self.buffer_velss.shape[0] > 0 and np.sum(self.velocity) == 0:
raise IndexError("has_velocities but buffer_velss is empty")
## undo the weighting to write to disk
if self.weight_index is not None:
self.buffer_velss /= self.buffer_fieldss[:,self.weight_index,None]
buffers += [self.buffer_velss[:,0],self.buffer_velss[:,1],self.buffer_velss[:,2]]
if self.has_colors:
if self.buffer_rgba_colorss.shape[0] == 0:
raise IndexError("self.has_colors but buffer_rgba_colorss is empty")
## undo the weighting to write to disk
if self.weight_index is not None:
self.buffer_rgba_colorss /= self.buffer_fieldss[:,self.weight_index,None]
buffers += [
self.buffer_rgba_colorss[:,0],
self.buffer_rgba_colorss[:,1],
self.buffer_rgba_colorss[:,2],
self.buffer_rgba_colorss[:,2]]
if self.buffer_fieldss.shape[0] > 0:
## undo the weighting to write to disk
for i in range(len(self.field_names)):
if self.weight_index is None or i == self.weight_index:
weight = 1
else: weight = self.buffer_fieldss[:,self.weight_index]
buffers += [self.buffer_fieldss[:,i]/weight]
## --------------------------------------------------
## write each buffer to however many subfiles we need to
## enforce a maximum file-size on disk
files = []
for prefix,buffer in zip(self.prefixes,buffers):
count_offset = 0
for index,count in enumerate(counts):
## if a child created in this thread or
## the parent node was not split between threads
if not write_protect or split_index is None:
fname = os.path.join(top_level_directory,f"{namestr}{splitstr}{prefix}.{index}.ffraw")
else:
## don't overwrite a file before another thread can read particles from it
## this will only happen for a node that is split between multiple threads
## children won't be written with references to files that already exist. only
## pruned children are written on top of old particle data.
fname = os.path.join(top_level_directory,f"{namestr}pruned-{splitstr}{prefix}.{index}.ffraw")
RawBinaryWriter(fname,buffer[count_offset:count_offset+count]).write()
## append to file list
files += [[fname,0,int(count)]]
count_offset+=count
fsize = os.path.getsize(fname)
if fsize != 4*(count+1):
raise IOError(f"file was not saved correctly, actual bytes:{int(fsize)} vs. expected:{int(4*count+1)}")
## format aggregate data into a dictionary
node_dict = self.format_node_dictionary()
## append buffer files
node_dict['files'] = files
## validate one more time...
validate_files(node_dict)
return node_dict
def write_fftree(
self,
filename=None,
handle=None,
offset=0):
## we /were/ accumulating a weighted quantity for the CoM particles
## but /now/ we have to divide that weight back out
if self.weight_index is not None: weights = self.buffer_fieldss[self.weight_index]
else: weights = np.ones(self.buffer_size)
## initialize the writer object that will
## convert the data to binary and write it in the
## correct .fftree order
binary_writer = BinaryWriter(
filename,
self.buffer_coordss,
None if not self.has_velocities else
np.array(self.buffer_velss)/weights[:,None],
None if not self.has_colors else
np.array(self.buffer_rgba_colorss)/weights[:,None])
binary_writer.nparts = self.buffer_size
binary_writer.nfields = self.nfields
## don't set binary_writer.field_names or binary_writer.<xxxx>_flags
## because that info is stored in the octree.json file
binary_writer.fields = np.array(self.buffer_fieldss)[:,:binary_writer.nfields]
## renormalize every field except Masses
for i,field in enumerate(self.field_names[:binary_writer.nfields]):
if i != self.weight_index: binary_writer.fields[:,i]/=weights
## take the transpose because binary_writer wants Nfields x Nparts
## but make sure numpy doesn't do anything funny like give you a view
## of the transpose. change it in memory numpy!!
binary_writer.fields = np.array(binary_writer.fields.T,order='C')
## creates a new file if handle is None
byte_size = binary_writer.write(handle)
## format aggregate data into a dictionary
node_dict = self.format_node_dictionary()
## store the length in bytes for this node
node_dict['byte_offset'] = 0 if handle is None else offset
node_dict['buffer_filename'] = os.path.sep.join(filename.split(os.path.sep)[-3:])
node_dict['byte_size'] = byte_size
node_dict['ABG_byte_offsets'] = binary_writer.calculate_array_offsets()
return node_dict
class Octree(object):
def __repr__(self):
my_str = f"{self.UIname} - {self.nparts_tot:,} parts ({len(self.root['nodes']):,} nodes) - {len(self.root['field_names'])} fields"
return my_str
def get_work_units(self,nthreads=1):
work_units = []
nodes = copy.deepcopy(self.root['nodes'])
## first find those nodes which need to be refined
expand_nodes = [node for node in nodes.values() if
'files' in node.keys() and
len(node['files']) > 0 and
node['buffer_size'] > self.min_to_refine]
## determine how many particles that represents
nparts_tot = np.sum([node['buffer_size'] for node in expand_nodes])
## need to split that many particles among nthreads workers
nparts_per_worker = [len(arr) for arr in np.array_split(np.arange(nparts_tot),nthreads)]
this_node = None
for nparts_this_worker in nparts_per_worker:
work_unit = []
nremain = nparts_this_worker
while nremain > 0:
if this_node is None: this_node = expand_nodes.pop(0)
## use this to differentiate between nodes that should
## be deleted and those that should not
this_node['processed'] = False
if 'split_index' not in this_node.keys():
this_node['split_index'] = None
this_node_nparts = this_node['nparts']
## add the whole node as a work unit
if this_node_nparts <= nremain:
work_unit += [this_node]
this_node = None
nremain -= this_node_nparts
## we only need part of this node to complete this worker's task queue
else:
if this_node['split_index'] is None: this_node['split_index'] = 0
## make a node that goes into the work unit
copy_node = {**this_node}
copy_node['nparts'] = nremain
this_node['nparts'] = this_node_nparts - nremain
copy_node['buffer_size'] = nremain
this_node['buffer_size'] = (this_node_nparts - nremain)
## increment the split index for the copy of
## the node with the remaining particles
this_node['split_index'] +=1
## need to find the files that contain the subset we need
files = group_files(self.prefixes,this_node['files'])
first_split_files = []
for subfile_i in range(files.shape[1]):
npart_this_sub_file = int(eval(files[0,0,-1]))
## add the first subfile in
first_split_files += [files[:,0]]
## we can add the entire sub-file
## and still have room for more,
## don't need to adjust the sizes of any files
if npart_this_sub_file <= nremain:
## get rid of the chunk file in the files array
files = np.delete(files,0,axis=1)
## also have to get rid of it in the actual node dictionary
for ftuple in first_split_files[-1]:
for this_index,ftuple_dict in enumerate(this_node['files']):
if (ftuple[0] == ftuple_dict[0]): break
this_node['files'].pop(this_index)
nremain -= npart_this_sub_file
## need to take *only a portion* of this chunk file.
## so we need to update the size inside first_split_files
## and inside the node
else:
## get rid of the chunk file in the files array
## update the most recent list entry
## with the bytesize it should read
first_split_files[-1][...,-1] = int(nremain)
## update the remaining files in the dictionary
## to reflect that the first part of the byte
## string is missing
for ftuple in first_split_files[-1]:
for this_index,ftuple_dict in enumerate(this_node['files']):
if (ftuple[0] == ftuple_dict[0]): break
this_node['files'][this_index] = (
this_node['files'][this_index][0],
## whatever byte offset this sub file might've had
int(first_split_files[-1][0,1]) + nremain*4,
int(npart_this_sub_file-nremain)
)
nremain = 0
break ## we've filled up the node
copy_node['files'] = np.array(first_split_files).reshape(-1,3).tolist()
this_node['files'] = np.array(this_node['files']).tolist()
work_unit +=[copy_node]
nremain = 0
work_units += [work_unit]
self.work_units = work_units
return work_units
def print_work(self):
namess = [[expand_node['name'] for expand_node in expand_nodes] for
expand_nodes in self.work_units]
to_do = list(set(np.hstack(namess)))
nparts = [self.root['nodes'][name]['buffer_size'] for name in to_do]
if len(to_do) > 0: print(f"{self} ({100*(1-np.sum(nparts)/self.nparts_tot):0.1f}%) {to_do}")
def __init__(
self,
UIname,
pathh,
min_to_refine=1e6):
self.UIname = UIname
""" pathh is path to data that has already been saved to .ffraw format and has an acompanying octree.json """
## gotsta point us to an octree my friend
if not os.path.isdir(pathh): raise IOError(pathh)
self.pathh = pathh
self.min_to_refine = min_to_refine
## read octree summary file
self.root = load_from_json(os.path.join(pathh,'octree.json'))
self.nparts = np.array([node_dict['buffer_size'] for node_dict in self.root['nodes'].values()])
self.nparts_tot = np.sum(self.nparts)
self.node_names = np.array(list(self.root['nodes'].keys()))
self.prefixes = (['x','y','z'] +
['vx','vy','vz']*self.root['has_velocities'] +
['rgba_r','rgba_g','rgba_b','rgba_a']*self.root['has_colors'] +
self.root['field_names'])
self.get_work_units()
def full_refine(self,nthreads,nrecurse=0,use_mps=True,loud=True):
init_time = time.time()
while len(self.work_units[0]) >0:
try: self.refine(nthreads,nrecurse,use_mps,loud)
except IndexError as e:
print(e.args[0])
break
if loud:
print()
print(((time.time()-init_time)/60),'min elapsed')
def refine(self,nthreads=1,nrecurse=0,use_mps=True,loud=True):
argss = zip(
self.get_work_units(nthreads),
np.arange(nthreads,dtype=int),
itertools.repeat(self.pathh),
itertools.repeat(self.min_to_refine),
itertools.repeat(self.root['field_names']),
itertools.repeat(nthreads),
itertools.repeat(nrecurse)
)
#node_dicts,
#target_directory,
#min_to_refine,
#field_names
#nrecurse=0
## print which nodes need to be refined to the console
if loud: self.print_work()
## validate all the files
validate_files(self.root)
if np.size(self.work_units) == 0: raise IndexError("No work to be done! Celebrate!")
if not use_mps or nthreads <= 1:
new_dicts = [refineNode(*args) for args in argss]
else:
with multiprocessing.Pool(nthreads) as my_pool: new_dicts = my_pool.starmap(refineNode,argss)
bad_files = set([])
good_files = []
popped = []
for work_unit,children in zip(self.work_units,new_dicts):
for old_node in work_unit:
old_name = old_node['name']
## only remove it if it hasn't already been removed
if old_name not in popped:
self.root['nodes'].pop(old_name)
popped+=[old_name]
## accumulate the bad files though. don't do this outside the loop
## because there are nodes that don't need to be refined anymore
## that aren't referenced in the new children
this_bad_files = [fname[0] for fname in old_node['files']]
bad_files = bad_files.union(set(this_bad_files))
## register the old_node (children[0]) and each of its children.
for child in children: self.register_child(child)
## delete any files that are no longer being pointed to.
good_files += [[ fname[0] for fname in node['files'] ] for node in children
if 'files' in node.keys() and node['buffer_size']>0]
good_files = set(np.hstack(good_files))
bad_files -= good_files
for bad_file in bad_files:
if os.path.isfile(bad_file): os.remove(bad_file)
if len(bad_files) > 0: print('deleting',len(bad_files),'unreferenced files.')#,bad_files)
## write out the new octree.json
write_to_json(self.root,os.path.join(self.pathh,'octree.json'))
## and validate once more...
validate_files(self.root)
def register_child(self,new_child,debug=False):
weight_index = self.root['weight_index']
child_name = new_child['name']
## easy, we've never seen this child before
if child_name not in self.root['nodes']: self.root['nodes'][child_name] = new_child
## annoying, need to append...
else:
nodes = self.root['nodes']
old_child = nodes[child_name]
field_names = self.root['field_names']
## update the accumulated values
if weight_index is not None:
old_weight = old_child[field_names[weight_index]]
new_weight = new_child[field_names[weight_index]]
else:
old_weight = old_child['nparts']
new_weight = new_child['nparts']
for i,field_name in enumerate(
field_names+['center_of_mass','com_velocity','rgba_color']):
## if don't have velocity or rgba_color, for example
if old_child[field_name] is None: continue
if weight_index is None or i!= weight_index:
old_this_weight = old_weight
new_this_weight = new_weight
else: old_this_weight = new_this_weight = 1
old_child[field_name] = (
(old_child[field_name]*old_this_weight +
new_child[field_name]*new_this_weight) /
(old_weight + new_weight))
## handle radius separately because have to do rms
old_child['radius'] = np.sqrt((
old_child['radius']**2*old_weight +
new_child['radius']**2*new_weight)/
(old_weight+new_weight))
## add the number of particles
old_child['nparts']+=new_child['nparts']
old_child['buffer_size']+=new_child['buffer_size']
if 'files' in new_child.keys():
if 'files' not in old_child.keys(): old_child['files'] = []
old_child['files'] += new_child['files']
## shouldn't need to do this b.c. aliasing
## but you know one can never be too careful
self.root['nodes'][child_name] = old_child
if debug: print(child_name,'registered:',new_child['buffer_size'],'/',self.root['nodes'][child_name]['buffer_size'],'particles')
def convert_ffraw_to_fftree(self,target_directory,fname_pattern,nthreads=1):
## sanitize input
if not os.path.isdir(target_directory): os.makedirs(target_directory)
if '%' not in fname_pattern: raise ValueError(
f"fname_pattern must be a format string with % not {fname_pattern}")
## setup the work array, each node is indpt so we can multi-thread if desired
num_nodes = len(self.root['nodes'].keys())
argss = zip(
self.root['nodes'].values(),
[os.path.join(target_directory,fname_pattern%i) for i in range(num_nodes)],
itertools.repeat(self.root['field_names'])
)
if nthreads <=1:
## single threaded, print a progress bar
new_node_dicts = []
printProgressBar(0,num_nodes,prefix='Converting .ffraw to .fftree')
for i,args in enumerate(argss):
new_node_dicts += [convertNodeFFRawFFTree(*args)]
printProgressBar(i+1,num_nodes,prefix='Converting .ffraw to .fftree')
else:
with multiprocessing.Pool(min(num_nodes,multiprocessing.cpu_count())) as my_pool:
new_node_dicts = my_pool.starmap(convertNodeFFRawFFTree,argss)
## now update the self.root dictionary
for i,new_node_dict in enumerate(new_node_dicts):
## the children are lost in translation so the new node dict has to inherit them manually
children = self.root['nodes'][new_node_dict['name']]['children']
## replace the dictionary
self.root['nodes'][new_node_dict['name']] = new_node_dict
## inherit the children
self.root['nodes'][new_node_dict['name']]['children'] = children
## what gets passed to the multiprocessing.Pool
def refineNode(
node_dicts,
thread_id,
target_directory,
min_to_refine,
field_names,
nthreads,
nrecurse=0):
output_dir = os.path.join(target_directory,f"output_{thread_id:02d}.0")
if not os.path.isdir(output_dir): os.makedirs(output_dir)
this_length = len(os.listdir(output_dir))
## find an output directory that has room for our files
while this_length >= 1e4:
base,count = os.path.basename(output_dir).split('.')
output_dir = os.path.join(target_directory,f"{base}.{int(count)+1}")
if not os.path.isdir(output_dir): os.makedirs(output_dir)
this_length = len(os.listdir(output_dir))
return_value = []
for node_dict in node_dicts:
this_node = OctNode(
node_dict['center'],
node_dict['width'],
field_names,
node_dict['name'],
has_velocities=node_dict['com_velocity'] is not None,
has_colors=node_dict['rgba_color'] is not None,
## nthreads will reduce the minimum number of particles to be
## merged back into a parent b.c. multiple threads may have child
## particles and that could push the parent back over the maximum
## in an infinite loop.
nthreads=nthreads)
## load the particle data for this node from disk
this_node.set_buffers_from_disk(node_dict['files'],node_dict['buffer_size'])
## sort these points directly into the children
this_node.cascade(
min_to_refine,
## only cascade children if they aren't split across threads
## otherwise we need to synchronize after each refinement
nrecurse if node_dict['split_index'] is None else 0)
## walk the sub-tree we just created and write node files to disk
## returns a list of dictionaries summarizing the node files that were written to disk
return_value += this_node.write_tree(output_dir,node_dict['split_index'],write_protect=True)
return return_value
def group_files(prefixes,files):
## group files by prefix with arbitrary number of files for each prefix
## without having to check if `prefix in fname` b.c. that would mess up
## fields that include vx, x, etc...
#files = np.transpose(np.array(sorted(files)).reshape(-1,len(prefixes),3),axes=(1,0,2))
filenames_expand = np.array([
fname[0].split(os.path.sep) + list(fname[1:])
for fname in sorted(files)])
new_files = sorted(filenames_expand,key=lambda x: x[-3])
new_files = [[os.path.sep+os.path.join(*fline[:-2]),fline[-2],fline[-1]] for fline in new_files]
new_files = np.array(np.array_split(new_files,len(prefixes)),dtype=object)#np.transpose(np.array(sorted(files)).reshape(-1,len(prefixes),3),axes=(1,0,2))
## now rearrange them to match the prefix order. don't ask
## for whom the hack tolls for it tolls for thee
new_files = new_files[np.argsort(np.argsort(prefixes))]
for i,prefix in enumerate(prefixes):
for isubfile,ftuple in enumerate(new_files[i]):
if os.path.basename(ftuple[0]).split('.')[0][-len(prefix):] != prefix:
print(os.path.basename(ftuple[0]).split('.')[0][-len(prefix):],prefix)
print(new_files)
import pdb; pdb.set_trace()
raise IOError('File grouping failed. Alert Alex immediately!!')
return new_files
def validate_files(dictionary):
## pass a dictionary of dictionaries
if 'nodes' in dictionary.keys():
for node_name in dictionary['nodes'].keys():
## recursively validate
validate_files(dictionary['nodes'][node_name])
return True
## passed a single dictionary
elif 'files' in dictionary.keys(): ftuples = dictionary['files']
else: return True#raise KeyError(f"No files to validate in {dictionary.keys()}")
validate_dict = {}
for fname,byte_offset,count in ftuples:
fsize = os.path.getsize(fname)
filekey = os.path.basename(fname)
if filekey not in validate_dict.keys():
validate_dict[filekey] = [int(fsize/4-1),count]
else:
validate_dict[filekey][0] += int(fsize/4-1) ## -1 b.c. ignore header bytes
validate_dict[filekey][1] += count
for key,(fcount,count) in validate_dict.items():
if fcount != count:
raise IOError(
f"{key} : {fcount:.0f} particles on disk but {count} in metadata.")
#print(key,end='\t')
#print()
return True
def init_octree_root_node(dictionary,top_level_directory=None,thread_id=0):
"""
root_dict = {'field_names':self.field_names,
'has_velocities':self.has_velocities,
'has_colors':self.has_colors,
'weight_index':self.weight_index,
'nodes':{}}
"""
root = OctNode(None,None,[])
root_dict = root.set_buffers_from_dict(dictionary,init_node=True)
if top_level_directory is not None:
output_dir = os.path.join(top_level_directory,f'output_{thread_id:02d}.0')
root_dict['nodes'][root.name] = root.write(output_dir)
write_to_json(root_dict,os.path.join(top_level_directory,'octree.json'))
return root_dict
def convertNodeFFRawFFTree(
node_dict,
fname,
field_names):
if 'files' not in node_dict.keys(): return node_dict
## create a new octnode object to translate the data with
node = OctNode(
node_dict['center'],
node_dict['width'],
field_names,
node_dict['name'],
has_velocities=node_dict['com_velocity'] is not None,
has_colors=node_dict['rgba_color'] is not None)
## load the node's particles from the .ffraw files
node.set_buffers_from_disk(node_dict['files'],node_dict['buffer_size'])
return node.write_fftree(fname)
|
agellerREPO_NAMEfireflyPATH_START.@firefly_extracted@firefly-main@src@firefly@data_reader@octree.py@.PATH_END.py
|
{
"filename": "copy_injection_recovery.py",
"repo_name": "ThibeauWouters/TurboPE-BNS",
"repo_path": "TurboPE-BNS_extracted/TurboPE-BNS-main/injections/outdir_NRTv2/injection_36/copy_injection_recovery.py",
"type": "Python"
}
|
"""
Idea: try different learning rate schemes to try and fix the injections
"""
import psutil
p = psutil.Process()
p.cpu_affinity([0])
import os
os.environ['CUDA_VISIBLE_DEVICES'] = "3"
os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = "0.10"
import numpy as np
import argparse
# Regular imports
import argparse
import copy
import numpy as np
from astropy.time import Time
import time
import shutil
import json
import jax
jax.config.update("jax_enable_x64", True)
import jax.numpy as jnp
from jimgw.jim import Jim
from jimgw.single_event.detector import H1, L1, V1
from jimgw.single_event.likelihood import HeterodynedTransientLikelihoodFD, TransientLikelihoodFD
from jimgw.single_event.waveform import RippleTaylorF2, RippleIMRPhenomD_NRTidalv2, RippleIMRPhenomD_NRTidalv2_no_taper
from jimgw.prior import Uniform, Composite
import utils # our plotting and postprocessing utilities script
import optax
# Names of the parameters and their ranges for sampling parameters for the injection
NAMING = ['M_c', 'q', 's1_z', 's2_z', 'lambda_1', 'lambda_2', 'd_L', 't_c', 'phase_c', 'cos_iota', 'psi', 'ra', 'sin_dec']
PRIOR = {
"M_c": [0.8759659737275101, 2.6060030916165484],
"q": [0.5, 1.0],
"s1_z": [-0.05, 0.05],
"s2_z": [-0.05, 0.05],
"lambda_1": [0.0, 5000.0],
"lambda_2": [0.0, 5000.0],
"d_L": [30.0, 300.0],
"t_c": [-0.1, 0.1],
"phase_c": [0.0, 2 * jnp.pi],
"cos_iota": [-1.0, 1.0],
"psi": [0.0, jnp.pi],
"ra": [0.0, 2 * jnp.pi],
"sin_dec": [-1, 1]
}
################
### ARGPARSE ###
################
# TODO save these into a new file
def get_parser(**kwargs):
add_help = kwargs.get("add_help", True)
parser = argparse.ArgumentParser(
description="Perform an injection recovery.",
add_help=add_help,
)
# TODO os does not use them
# parser.add_argument(
# "--GPU-device",
# type=int,
# default=0,
# help="Select GPU index to use.",
# )
# parser.add_argument(
# "--GPU-memory-fraction",
# type=float,
# default=0.5,
# help="Select percentage of GPU memory to use.",
# )
parser.add_argument(
"--outdir",
type=str,
default="./outdir/",
help="Output directory for the injection.",
)
parser.add_argument(
"--load-existing-config",
type=bool,
default=False,
help="Whether to load and redo an existing injection (True) or to generate a new set of parameters (False).",
)
parser.add_argument(
"--N",
type=str,
default="",
help="Number (or generically, a custom identifier) of this injection, used to locate the output directory. If an empty string is passed (default), we generate a new injection.",
)
parser.add_argument(
"--SNR-threshold",
type=float,
default=12,
help="Skip injections with SNR below this threshold.",
)
parser.add_argument(
"--waveform-approximant",
type=str,
default="TaylorF2",
help="Which waveform approximant to use. Recommended to use TaylorF2 for now, NRTidalv2 might still be a bit unstable.",
)
parser.add_argument(
"--relative-binning-binsize",
type=int,
default=100,
help="Number of bins for the relative binning.",
)
parser.add_argument(
"--relative-binning-ref-params-equal-true-params",
type=bool,
default=True,
help="Whether to set the reference parameters in the relative binning code to injection parameters.",
)
parser.add_argument(
"--save-training-chains",
type=bool,
default=False,
help="Whether to save training chains or not (can be very large!)",
)
parser.add_argument(
"--eps-mass-matrix",
type=float,
default=1e-6,
help="Overall scale factor to rescale the step size of the local sampler.",
)
parser.add_argument(
"--which-local-sampler",
type=str,
default="MALA",
help="Which local sampler to use.",
)
parser.add_argument(
"--smart-initial-guess",
type=bool,
default=False,
help="Distribute the walkers around the injected parameters. TODO change this to reference parameters found by the relative binning code.",
)
parser.add_argument(
"--use-scheduler",
type=bool,
default=True,
help="Use a learning rate scheduler instead of a fixed learning rate.",
)
parser.add_argument(
"--stopping-criterion-global-acc",
type=float,
default=1.0,
help="Stop the run once we reach this global acceptance rate.",
)
parser.add_argument(
"--save-likelihood",
type=bool,
default=False,
help="Whether to save the likelihood object",
)
parser.add_argument(
"--tight-Mc-prior",
type=bool,
default=False,
help="Whether to use a tight prior on the Mc values or not",
)
# # TODO this has to be implemented
# parser.add_argument(
# "--autotune_local_sampler",
# type=bool,
# default=False,
# help="TODO Still has to be implemented! Specify whether to use autotuning for the local sampler.",
# )
return parser
####################
### Script setup ###
####################
def body(args):
"""
Run an injection and recovery. To get an explanation of the hyperparameters, go to:
- jim hyperparameters: https://github.com/ThibeauWouters/jim/blob/8cb4ef09fefe9b353bfb89273a4bc0ee52060d72/src/jimgw/jim.py#L26
- flowMC hyperparameters: https://github.com/ThibeauWouters/flowMC/blob/ad1a32dcb6984b2e178d7204a53d5da54b578073/src/flowMC/sampler/Sampler.py#L40
"""
start_time = time.time()
# TODO move and get these as arguments
# Deal with the hyperparameters
naming = NAMING
HYPERPARAMETERS = {
"flowmc":
{
"n_loop_training": 400,
"n_loop_production": 50,
"n_local_steps": 5,
"n_global_steps": 400,
"n_epochs": 50,
"n_chains": 1000,
"learning_rate": 0.001, # using a scheduler below
"max_samples": 50000,
"momentum": 0.9,
"batch_size": 50000,
"use_global": True,
"logging": True,
"keep_quantile": 0.0,
"local_autotune": None,
"train_thinning": 10,
"output_thinning": 30,
"n_sample_max": 10000,
"precompile": False,
"verbose": False,
"outdir": args.outdir,
"stopping_criterion_global_acc": args.stopping_criterion_global_acc,
"which_local_sampler": "MALA"
},
"jim":
{
"seed": 0,
"n_chains": 1000,
"num_layers": 10,
"hidden_size": [128, 128],
"num_bins": 8,
}
}
flowmc_hyperparameters = HYPERPARAMETERS["flowmc"]
jim_hyperparameters = HYPERPARAMETERS["jim"]
hyperparameters = {**flowmc_hyperparameters, **jim_hyperparameters}
# TODO can I just replace this with update dict?
for key, value in args.__dict__.items():
if key in hyperparameters:
hyperparameters[key] = value
### POLYNOMIAL SCHEDULER
if args.use_scheduler:
print("Using polynomial learning rate scheduler")
total_epochs = hyperparameters["n_epochs"] * hyperparameters["n_loop_training"]
start = int(total_epochs / 10)
start_lr = 1e-3
end_lr = 1e-5
power = 4.0
schedule_fn = optax.polynomial_schedule(start_lr, end_lr, power, total_epochs-start, transition_begin=start)
hyperparameters["learning_rate"] = schedule_fn
print(f"Saving output to {args.outdir}")
# Fetch waveform used
supported_waveforms = ["TaylorF2", "NRTidalv2", "IMRPhenomD_NRTidalv2"]
if args.waveform_approximant not in supported_waveforms:
print(f"Waveform approximant {args.waveform_approximant} not supported. Supported waveforms are {supported_waveforms}. Changing to TaylorF2.")
args.waveform_approximant = "TaylorF2"
if args.waveform_approximant == "TaylorF2":
ripple_waveform_fn = RippleTaylorF2
elif args.waveform_approximant in ["IMRPhenomD_NRTidalv2", "NRTv2", "NRTidalv2"]:
ripple_waveform_fn = RippleIMRPhenomD_NRTidalv2
else:
raise ValueError(f"Waveform approximant {args.waveform_approximant} not supported.")
# Before main code, check if outdir is correct dir format TODO improve with sys?
if args.outdir[-1] != "/":
args.outdir += "/"
outdir = f"{args.outdir}injection_{args.N}/"
# Get the prior bounds, both as 1D and 2D arrays
prior_ranges = jnp.array([PRIOR[name] for name in naming])
prior_low, prior_high = prior_ranges[:, 0], prior_ranges[:, 1]
bounds = np.array(list(PRIOR.values()))
# Now go over to creating parameters, and potentially check SNR cutoff
network_snr = 0.0
print(f"The SNR threshold parameter is set to {args.SNR_threshold}")
while network_snr < args.SNR_threshold:
# Generate the parameters or load them from an existing file
if args.load_existing_config:
config_path = f"{outdir}config.json"
print(f"Loading existing config, path: {config_path}")
config = json.load(open(config_path))
else:
print(f"Generating new config")
config = utils.generate_config(prior_low, prior_high, naming, args.N, args.outdir)
key = jax.random.PRNGKey(config["seed"])
# Save the given script hyperparams
with open(f"{outdir}script_args.json", 'w') as json_file:
json.dump(args.__dict__, json_file)
# Start injections
print("Injecting signals . . .")
waveform = ripple_waveform_fn(f_ref=config["fref"])
# Create frequency grid
freqs = jnp.arange(
config["fmin"],
config["f_sampling"] / 2, # maximum frequency being halved of sampling frequency
1. / config["duration"]
)
# convert injected mass ratio to eta, and apply arccos and arcsin
q = config["q"]
eta = q / (1 + q) ** 2
iota = float(jnp.arccos(config["cos_iota"]))
dec = float(jnp.arcsin(config["sin_dec"]))
# Setup the timing setting for the injection
epoch = config["duration"] - config["post_trigger_duration"]
gmst = Time(config["trigger_time"], format='gps').sidereal_time('apparent', 'greenwich').rad
# Array of injection parameters
true_param = {
'M_c': config["M_c"], # chirp mass
'eta': eta, # symmetric mass ratio 0 < eta <= 0.25
's1_z': config["s1_z"], # aligned spin of priminary component s1_z.
's2_z': config["s2_z"], # aligned spin of secondary component s2_z.
'lambda_1': config["lambda_1"], # tidal deformability of priminary component lambda_1.
'lambda_2': config["lambda_2"], # tidal deformability of secondary component lambda_2.
'd_L': config["d_L"], # luminosity distance
't_c': config["t_c"], # timeshift w.r.t. trigger time
'phase_c': config["phase_c"], # merging phase
'iota': iota, # inclination angle
'psi': config["psi"], # polarization angle
'ra': config["ra"], # right ascension
'dec': dec # declination
}
# Get the true parameter values for the plots
truths = copy.deepcopy(true_param)
truths["eta"] = q
truths = np.fromiter(truths.values(), dtype=float)
detector_param = {
'ra': config["ra"],
'dec': dec,
'gmst': gmst,
'psi': config["psi"],
'epoch': epoch,
't_c': config["t_c"],
}
print(f"The injected parameters are {true_param}")
# Generating the geocenter waveform
h_sky = waveform(freqs, true_param)
# Setup interferometers
ifos = [H1, L1, V1]
psd_files = ["./psds/psd.txt", "./psds/psd.txt", "./psds/psd_virgo.txt"]
# inject signal into ifos
for idx, ifo in enumerate(ifos):
key, subkey = jax.random.split(key)
ifo.inject_signal(
subkey,
freqs,
h_sky,
detector_param,
psd_file=psd_files[idx] # note: the function load_psd actaully loads the asd
)
print("Signal injected")
# Compute the SNR
h1_snr = utils.compute_snr(H1, h_sky, detector_param)
l1_snr = utils.compute_snr(L1, h_sky, detector_param)
v1_snr = utils.compute_snr(V1, h_sky, detector_param)
network_snr = np.sqrt(h1_snr**2 + l1_snr**2 + v1_snr**2)
# If the SNR is too low, we need to generate new parameters
if network_snr < args.SNR_threshold:
print(f"Network SNR is less than {args.SNR_threshold}, generating new parameters")
if args.load_existing_config:
raise ValueError("SNR is less than threshold, but loading existing config. This should not happen!")
print("H1 SNR:", h1_snr)
print("L1 SNR:", l1_snr)
print("V1 SNR:", v1_snr)
print("Network SNR:", network_snr)
print(f"Saving network SNR")
with open(outdir + 'network_snr.txt', 'w') as file:
file.write(str(network_snr))
print("Start prior setup")
# Priors without transformation
if args.tight_Mc_prior:
print("INFO: Using a tight chirp mass prior")
true_mc = true_param["M_c"]
Mc_prior = Uniform(true_mc - 0.1, true_mc + 0.1, naming=['M_c'])
else:
Mc_prior = Uniform(prior_low[0], prior_high[0], naming=['M_c'])
q_prior = Uniform(prior_low[1], prior_high[1], naming=['q'],
transforms={
'q': (
'eta',
lambda params: params['q'] / (1 + params['q']) ** 2
)
}
)
s1z_prior = Uniform(prior_low[2], prior_high[2], naming=['s1_z'])
s2z_prior = Uniform(prior_low[3], prior_high[3], naming=['s2_z'])
lambda_1_prior = Uniform(prior_low[4], prior_high[4], naming=['lambda_1'])
lambda_2_prior = Uniform(prior_low[5], prior_high[5], naming=['lambda_2'])
dL_prior = Uniform(prior_low[6], prior_high[6], naming=['d_L'])
tc_prior = Uniform(prior_low[7], prior_high[7], naming=['t_c'])
phic_prior = Uniform(prior_low[8], prior_high[8], naming=['phase_c'])
cos_iota_prior = Uniform(prior_low[9], prior_high[9], naming=["cos_iota"],
transforms={
"cos_iota": (
"iota",
lambda params: jnp.arccos(
jnp.arcsin(jnp.sin(params["cos_iota"] / 2 * jnp.pi)) * 2 / jnp.pi
),
)
},
)
psi_prior = Uniform(prior_low[10], prior_high[10], naming=["psi"])
ra_prior = Uniform(prior_low[11], prior_high[11], naming=["ra"])
sin_dec_prior = Uniform(prior_low[12], prior_high[12], naming=["sin_dec"],
transforms={
"sin_dec": (
"dec",
lambda params: jnp.arcsin(
jnp.arcsin(jnp.sin(params["sin_dec"] / 2 * jnp.pi)) * 2 / jnp.pi
),
)
},
)
# Save the prior bounds
print("Saving prior bounds")
utils.save_prior_bounds(prior_low, prior_high, outdir)
# Compose the prior
prior_list = [
Mc_prior,
q_prior,
s1z_prior,
s2z_prior,
lambda_1_prior,
lambda_2_prior,
dL_prior,
tc_prior,
phic_prior,
cos_iota_prior,
psi_prior,
ra_prior,
sin_dec_prior,
]
complete_prior = Composite(prior_list)
bounds = jnp.array([[p.xmin, p.xmax] for p in complete_prior.priors])
print("Finished prior setup")
print("Initializing likelihood")
if args.relative_binning_ref_params_equal_true_params:
ref_params = true_param
print("Using the true parameters as reference parameters for the relative binning")
else:
ref_params = None
print("Will search for reference waveform for relative binning")
# ### TODO remove
# # Explicitly fix relative binning for NRTidalv2
# if args.waveform_approximant in ["IMRPhenomD_NRTidalv2", "NRTidalv2"]:
# # ## TODO this might be broken?
# # # # Explicitly set the f_min and f_max used there
# # # relbin_kwargs = {"f_min": config["fmin"], "f_max": config["f_sampling"] / 2}
# # relbin_kwargs = {}
# # # Set the reference parameters at the ideal location for not breaking relative binning
# # print("Setting the reference parameters to not break the relative binning for NRTidalv2")
# # ref_params = true_param
# # ref_params["lambda_1"] = 1.0
# # ref_params["lambda_2"] = 1.0
# print("Now, the reference parameters are: ")
# print(ref_params)
# else:
# relbin_kwargs = {}
relbin_kwargs = {}
if args.waveform_approximant == "IMRPhenomD_NRTidalv2":
print("Using IMRPhenomD_NRTidalv2 no taper as the reference waveform for the likelihood")
reference_waveform = RippleIMRPhenomD_NRTidalv2_no_taper(f_ref=config["fref"])
else:
reference_waveform = waveform
likelihood = HeterodynedTransientLikelihoodFD(
ifos,
prior=complete_prior,
bounds=bounds,
n_bins = args.relative_binning_binsize,
waveform=waveform,
reference_waveform=reference_waveform,
trigger_time=config["trigger_time"],
duration=config["duration"],
post_trigger_duration=config["post_trigger_duration"],
ref_params=ref_params,
**relbin_kwargs
)
if args.save_likelihood:
print(f"INFO: Saving the likelihood to {outdir}")
import pickle
with open(f'{outdir}likelihood.pickle', 'wb') as handle:
pickle.dump(likelihood, handle, protocol=pickle.HIGHEST_PROTOCOL)
# Save the ref params
utils.save_relative_binning_ref_params(likelihood, outdir)
# Generate arguments for the local samplercd
mass_matrix = jnp.eye(len(prior_list))
for idx, prior in enumerate(prior_list):
mass_matrix = mass_matrix.at[idx, idx].set(prior.xmax - prior.xmin) # fetch the prior range
local_sampler_arg = {'step_size': mass_matrix * args.eps_mass_matrix} # set the overall step size
hyperparameters["local_sampler_arg"] = local_sampler_arg
# Create jim object
jim = Jim(
likelihood,
complete_prior,
**hyperparameters
)
if args.smart_initial_guess:
n_chains = hyperparameters["n_chains"]
n_dim = len(prior_list)
initial_guess = utils.generate_smart_initial_guess(gmst, [H1, L1, V1], true_param, n_chains, n_dim, prior_low, prior_high)
# Plot it
utils.plot_chains(initial_guess, "initial_guess", outdir, truths = truths)
else:
initial_guess = jnp.array([])
### Finally, do the sampling
jim.sample(jax.random.PRNGKey(24), initial_guess = initial_guess)
# === Show results, save output ===
# Print a summary to screen:
jim.print_summary()
# Save and plot the results of the run
# - training phase
name = outdir + f'results_training.npz'
print(f"Saving samples to {name}")
state = jim.Sampler.get_sampler_state(training = True)
chains, log_prob, local_accs, global_accs, loss_vals = state["chains"], state["log_prob"], state["local_accs"], state["global_accs"], state["loss_vals"]
local_accs = jnp.mean(local_accs, axis=0)
global_accs = jnp.mean(global_accs, axis=0)
if args.save_training_chains:
np.savez(name, log_prob=log_prob, local_accs=local_accs, global_accs=global_accs, loss_vals=loss_vals, chains=chains)
else:
np.savez(name, log_prob=log_prob, local_accs=local_accs, global_accs=global_accs, loss_vals=loss_vals)
utils.plot_accs(local_accs, "Local accs (training)", "local_accs_training", outdir)
utils.plot_accs(global_accs, "Global accs (training)", "global_accs_training", outdir)
utils.plot_loss_vals(loss_vals, "Loss", "loss_vals", outdir)
utils.plot_log_prob(log_prob, "Log probability (training)", "log_prob_training", outdir)
# - production phase
name = outdir + f'results_production.npz'
state = jim.Sampler.get_sampler_state(training = False)
chains, log_prob, local_accs, global_accs = state["chains"], state["log_prob"], state["local_accs"], state["global_accs"]
local_accs = jnp.mean(local_accs, axis=0)
global_accs = jnp.mean(global_accs, axis=0)
np.savez(name, chains=chains, log_prob=log_prob, local_accs=local_accs, global_accs=global_accs)
utils.plot_accs(local_accs, "Local accs (production)", "local_accs_production", outdir)
utils.plot_accs(global_accs, "Global accs (production)", "global_accs_production", outdir)
utils.plot_log_prob(log_prob, "Log probability (production)", "log_prob_production", outdir)
# Plot the chains as corner plots
utils.plot_chains(chains, "chains_production", outdir, truths = truths)
# Save the NF and show a plot of samples from the flow
print("Saving the NF")
jim.Sampler.save_flow(outdir + "nf_model")
name = outdir + 'results_NF.npz'
chains = jim.Sampler.sample_flow(10_000)
np.savez(name, chains = chains)
# Finally, copy over this script to the outdir for reproducibility
shutil.copy2(__file__, outdir + "copy_injection_recovery.py")
print("Saving the jim hyperparameters")
jim.save_hyperparameters(outdir = outdir)
end_time = time.time()
runtime = end_time - start_time
print(f"Time taken: {runtime} seconds ({(runtime)/60} minutes)")
print(f"Saving runtime")
with open(outdir + 'runtime.txt', 'w') as file:
file.write(str(runtime))
print("Finished injection recovery successfully!")
############
### MAIN ###
############
def main(given_args = None):
parser = get_parser()
args = parser.parse_args()
print(given_args)
# Update with given args
if given_args is not None:
args.__dict__.update(given_args)
if args.load_existing_config and args.N == "":
raise ValueError("If load_existing_config is True, you need to specify the N argument to locate the existing injection. ")
print("------------------------------------")
print("Arguments script:")
for key, value in args.__dict__.items():
print(f"{key}: {value}")
print("------------------------------------")
print("Starting main code")
# If no N is given, fetch N from the structure of outdir
if len(args.N) == 0:
N = utils.get_N(args.outdir)
args.N = N
# TODO fix that os uses these
# import os
# os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = str(args.GPU_memory_fraction)
# os.environ['CUDA_VISIBLE_DEVICES'] = str(args.GPU_device)
# print(f"Running on GPU {args.GPU_device}")
# Execute the script
body(args)
if __name__ == "__main__":
main()
|
ThibeauWoutersREPO_NAMETurboPE-BNSPATH_START.@TurboPE-BNS_extracted@TurboPE-BNS-main@injections@outdir_NRTv2@injection_36@copy_injection_recovery.py@.PATH_END.py
|
{
"filename": "test_database.py",
"repo_name": "gmbrandt/xwavecal",
"repo_path": "xwavecal_extracted/xwavecal-main/xwavecal/tests/test_database.py",
"type": "Python"
}
|
import tempfile
import os
from datetime import datetime, timedelta
import sqlite3
import mock
import xwavecal.database as db
from xwavecal.tests.utils import FakeImage
def test_format_db_info():
data = FakeImage()
fmt = '%Y-%m-%dT%H:%M:%S.%f'
db_info = db.format_db_info(data, fmt)
fib0, fib1, fib2 = db_info['fiber0'], db_info['fiber1'], db_info['fiber2']
assert all([db_info['type'] == data.get_header_val('type'),
db_info['observation_date'] == datetime.strptime(data.get_header_val('observation_date'), fmt),
db_info['date_created'] - datetime.now() <= timedelta(seconds=20),
db_info['instrument'] == data.get_header_val('instrument'),
db_info['instrument2'] == data.get_header_val('instrument2'),
db_info['site_name'] == data.get_header_val('site_name'),
fib0 == data.fiber0_lit, fib1 == data.fiber1_lit, fib2 == data.fiber2_lit,
db_info['is_bad'] == 0, db_info['filepath'] == data.filepath])
@mock.patch('xwavecal.database.query_db_for_match', return_value=None)
def test_add_to_db(mock_match):
data = FakeImage()
fmt = '%Y-%m-%dT%H:%M:%S.%f'
db_info = db.format_db_info(data, fmt)
with tempfile.TemporaryDirectory() as temp_directory:
db_path = os.path.join(temp_directory, 'test.db')
db.add_data_to_db(db_path, db_info)
db.add_data_to_db(db_path, db_info)
db.add_data_to_db(db_path, db_info)
# expect 3 identical entries in the database.
conn = sqlite3.connect(db_path)
c = conn.cursor()
c.execute('SELECT * FROM caldata')
assert len(c.fetchall()) == 3
conn.close()
def test_update_db():
data = FakeImage()
fmt = '%Y-%m-%dT%H:%M:%S.%f'
db_info = db.format_db_info(data, fmt)
with tempfile.TemporaryDirectory() as temp_directory:
db_path = os.path.join(temp_directory, 'test.db')
db.add_data_to_db(db_path, db_info)
db.add_data_to_db(db_path, db_info)
db.add_data_to_db(db_path, db_info)
# expect the 3 identical entries in the database to be culled into one.
conn = sqlite3.connect(db_path)
c = conn.cursor()
c.execute('SELECT * FROM caldata')
assert len(c.fetchall()) == 1
conn.close()
def test_query_database():
data1, data2, data3 = FakeImage(), FakeImage(), FakeImage()
reference_data = FakeImage()
data3.set_header_val('type', 'wrong')
data3.set_header_val('observation_date', '2019-04-15T12:56:44.466')
data2.set_header_val('observation_date', '2019-04-13T12:56:44.466')
data2.filepath = 'some/path'
data1.set_header_val('observation_date', '2019-04-11T12:56:44.466')
reference_data.set_header_val('observation_date', '2019-04-16T12:56:44.466')
fmt = '%Y-%m-%dT%H:%M:%S.%f'
with tempfile.TemporaryDirectory() as temp_directory:
db_path = os.path.join(temp_directory, 'test.db')
for data in [data1, data2, data3]:
db_info = db.format_db_info(data, fmt)
db.add_data_to_db(db_path, db_info)
filepath = db.query_db_for_nearest(db_path, reference_data, 'lampflat', fmt)
assert filepath == data2.filepath
assert db.query_db_for_nearest(db_path, reference_data, 'no_type', fmt) is None
def test_query_database_returns_none():
assert db.query_db_for_nearest('non/existent/path', None, 'lampflat', '%Y-%m-%dT%H:%M:%S.%f') is None
assert db.query_db_for_nearest('non/existent/path', None, 'lampflat', '%Y-%m-%dT%H:%M:%S.%f') is None
|
gmbrandtREPO_NAMExwavecalPATH_START.@xwavecal_extracted@xwavecal-main@xwavecal@tests@test_database.py@.PATH_END.py
|
{
"filename": "CmdInput.py",
"repo_name": "3fon3fonov/exostriker",
"repo_path": "exostriker_extracted/exostriker-main/exostriker/lib/pyqtgraph/console/CmdInput.py",
"type": "Python"
}
|
from ..Qt import QtCore, QtWidgets
class CmdInput(QtWidgets.QLineEdit):
sigExecuteCmd = QtCore.Signal(object)
def __init__(self, parent):
QtWidgets.QLineEdit.__init__(self, parent)
self.ps1 = ">>> "
self.ps2 = "... "
self.history = [""]
self.ptr = 0
self.setMultiline(False)
def setMultiline(self, ml):
if ml:
self.setPlaceholderText(self.ps2)
else:
self.setPlaceholderText(self.ps1)
def keyPressEvent(self, ev):
if ev.key() == QtCore.Qt.Key.Key_Up:
if self.ptr < len(self.history) - 1:
self.setHistory(self.ptr+1)
ev.accept()
return
elif ev.key() == QtCore.Qt.Key.Key_Down:
if self.ptr > 0:
self.setHistory(self.ptr-1)
ev.accept()
return
elif ev.key() in (QtCore.Qt.Key.Key_Return, QtCore.Qt.Key.Key_Enter):
self.execCmd()
else:
super().keyPressEvent(ev)
self.history[0] = self.text()
def execCmd(self):
cmd = self.text()
if len(self.history) == 1 or cmd != self.history[1]:
self.history.insert(1, cmd)
self.history[0] = ""
self.setHistory(0)
self.sigExecuteCmd.emit(cmd)
def setHistory(self, num):
self.ptr = num
self.setText(self.history[self.ptr])
|
3fon3fonovREPO_NAMEexostrikerPATH_START.@exostriker_extracted@exostriker-main@exostriker@lib@pyqtgraph@console@CmdInput.py@.PATH_END.py
|
{
"filename": "_hovertemplate.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattermapbox/_hovertemplate.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class HovertemplateValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="hovertemplate", parent_name="scattermapbox", **kwargs
):
super(HovertemplateValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scattermapbox@_hovertemplate.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "deepmind/optax",
"repo_path": "optax_extracted/optax-main/README.md",
"type": "Markdown"
}
|
# Optax

[](http://optax.readthedocs.io)

## Introduction
Optax is a gradient processing and optimization library for JAX.
Optax is designed to facilitate research by providing building blocks
that can be easily recombined in custom ways.
Our goals are to
* Provide simple, well-tested, efficient implementations of core components.
* Improve research productivity by enabling to easily combine low-level
ingredients into custom optimizers (or other gradient processing components).
* Accelerate adoption of new ideas by making it easy for anyone to contribute.
We favor focusing on small composable building blocks that can be effectively
combined into custom solutions. Others may build upon these basic components
in more complicated abstractions. Whenever reasonable, implementations prioritize
readability and structuring code to match standard equations, over code reuse.
An initial prototype of this library was made available in JAX's experimental
folder as `jax.experimental.optix`. Given the wide adoption across DeepMind
of `optix`, and after a few iterations on the API, `optix` was eventually moved
out of `experimental` as a standalone open-source library, and renamed `optax`.
Documentation on Optax can be found at [optax.readthedocs.io](https://optax.readthedocs.io/).
## Installation
You can install the latest released version of Optax from PyPI via:
```sh
pip install optax
```
or you can install the latest development version from GitHub:
```sh
pip install git+https://github.com/google-deepmind/optax.git
```
## Quickstart
Optax contains implementations of [many popular optimizers](https://optax.readthedocs.io/en/latest/api/optimizers.html) and
[loss functions](https://optax.readthedocs.io/en/latest/api/losses.html).
For example, the following code snippet uses the Adam optimizer from `optax.adam`
and the mean squared error from `optax.l2_loss`. We initialize the optimizer
state using the `init` function and `params` of the model.
```python
optimizer = optax.adam(learning_rate)
# Obtain the `opt_state` that contains statistics for the optimizer.
params = {'w': jnp.ones((num_weights,))}
opt_state = optimizer.init(params)
```
To write the update loop we need a loss function that can be differentiated by
Jax (with `jax.grad` in this
example) to obtain the gradients.
```python
compute_loss = lambda params, x, y: optax.l2_loss(params['w'].dot(x), y)
grads = jax.grad(compute_loss)(params, xs, ys)
```
The gradients are then converted via `optimizer.update` to obtain the updates
that should be applied to the current parameters to obtain the new ones.
`optax.apply_updates` is a convenience utility to do this.
```python
updates, opt_state = optimizer.update(grads, opt_state)
params = optax.apply_updates(params, updates)
```
You can continue the quick start in [the Optax π Getting started notebook.](https://github.com/google-deepmind/optax/blob/main/docs/getting_started.ipynb)
## Development
We welcome new contributors.
### Source code
You can check the latest sources with the following command.
```sh
git clone https://github.com/google-deepmind/optax.git
```
### Testing
To run the tests, please execute the following script.
```sh
sh test.sh
```
### Documentation
To build the documentation, first ensure that all the dependencies are installed.
```sh
pip install -e ".[docs]"
```
Then, execute the following.
```sh
cd docs
make html
```
## Benchmarks
If you feel lost in the crowd of available optimizers for deep learning, there
exist some extensive benchmarks:
[Benchmarking Neural Network Training Algorithms, Dahl G. et al, 2023](https://arxiv.org/pdf/2306.07179),
[Descending through a Crowded Valley β Benchmarking Deep Learning Optimizers, Schmidt R. et al, 2021](https://proceedings.mlr.press/v139/schmidt21a).
If you are interested in developing your own benchmark for some tasks,
consider the following framework
[Benchopt: Reproducible, efficient and collaborative optimization benchmarks, Moreau T. et al, 2022](https://arxiv.org/abs/2206.13424).
Finally, if you are searching for some recommendations on tuning optimizers,
consider taking a look at
[Deep Learning Tuning Playbook, Godbole V. et al, 2023](https://github.com/google-research/tuning_playbook).
## Citing Optax
This repository is part of the DeepMind JAX Ecosystem, to cite Optax
please use the citation:
```bibtex
@software{deepmind2020jax,
title = {The {D}eep{M}ind {JAX} {E}cosystem},
author = {DeepMind and Babuschkin, Igor and Baumli, Kate and Bell, Alison and Bhupatiraju, Surya and Bruce, Jake and Buchlovsky, Peter and Budden, David and Cai, Trevor and Clark, Aidan and Danihelka, Ivo and Dedieu, Antoine and Fantacci, Claudio and Godwin, Jonathan and Jones, Chris and Hemsley, Ross and Hennigan, Tom and Hessel, Matteo and Hou, Shaobo and Kapturowski, Steven and Keck, Thomas and Kemaev, Iurii and King, Michael and Kunesch, Markus and Martens, Lena and Merzic, Hamza and Mikulik, Vladimir and Norman, Tamara and Papamakarios, George and Quan, John and Ring, Roman and Ruiz, Francisco and Sanchez, Alvaro and Sartran, Laurent and Schneider, Rosalia and Sezener, Eren and Spencer, Stephen and Srinivasan, Srivatsan and Stanojevi\'{c}, Milo\v{s} and Stokowiec, Wojciech and Wang, Luyu and Zhou, Guangyao and Viola, Fabio},
url = {http://github.com/google-deepmind},
year = {2020},
}
```
|
deepmindREPO_NAMEoptaxPATH_START.@optax_extracted@optax-main@README.md@.PATH_END.py
|
{
"filename": "resampleNodeList.py",
"repo_name": "LLNL/spheral",
"repo_path": "spheral_extracted/spheral-main/src/SimulationControl/resampleNodeList.py",
"type": "Python"
}
|
import mpi
import VoronoiDistributeNodes
import SolidSpheral
#...........................................................................
# A local helper method for copying data from one NodeList to another.
#...........................................................................
def copyNodeListFields(nodes0, nodes1, mask, solid):
m0 = nodes0.mass()
p0 = nodes0.positions()
v0 = nodes0.velocity()
H0 = nodes0.Hfield()
r0 = nodes0.massDensity()
e0 = nodes0.specificThermalEnergy()
m1 = nodes1.mass()
p1 = nodes1.positions()
v1 = nodes1.velocity()
H1 = nodes1.Hfield()
r1 = nodes1.massDensity()
e1 = nodes1.specificThermalEnergy()
if solid:
S0 = nodes0.deviatoricStress()
ps0 = nodes0.plasticStrain()
psr0 = nodes0.plasticStrainRate()
D0 = nodes0.damage()
S1 = nodes1.deviatoricStress()
ps1 = nodes1.plasticStrain()
psr1 = nodes1.plasticStrainRate()
D1 = nodes1.damage()
j = 0
for i in range(nodes0.numInternalNodes):
if mask[i] == 1:
assert j < nodes1.numInternalNodes
m1[j] = m0[i]
p1[j] = p0[i]
v1[j] = v0[i]
H1[j] = H0[i]
r1[j] = r0[i]
e1[j] = e0[i]
if solid:
S1[j] = S0[i]
ps1[j] = ps0[i]
psr1[j] = psr0[i]
D1[j] = D0[i]
j += 1
return
#-------------------------------------------------------------------------------
# Resample to a new set of nodes represented by a generator.
#-------------------------------------------------------------------------------
def resampleNodeList(nodes,
generator,
W,
mask = None,
etaExclude = None,
removeUnusedNodes = True):
# Check our dimensionality
if isinstance(nodes, SolidSpheral.NodeList1d):
ndim = 1
elif isinstance(nodes, SolidSpheral.NodeList2d):
ndim = 2
elif isinstance(nodes, SolidSpheral.NodeList3d):
ndim = 3
else:
raise ValueError("Unknown thing %s handed in: expected a NodeList" % nodes)
ndim0 = ndim
exec("from SolidSpheral%id import *" % ndim) # Load the aliases for our dimensionality
ndim = ndim0
exec("from VoronoiDistributeNodes import distributeNodes%id as distributor" % ndim)
# Clear out any initial ghost nodes.
nodes.numGhostNodes = 0
# Check if we're doing a Solid or FluidNodeList.
if isinstance(nodes, SolidNodeList):
solid = True
NLF = makeSolidNodeList
elif isinstance(nodes, FluidNodeList):
solid = False
NLF = makeFluidNodeList
else:
raise RuntimeError("Unknown NodeList type.")
# Check how to set the new neighbor info.
if isinstance(nodes._neighbor, NestedGridNeighbor):
topGridSize = nodes._neighbor.topGridSize
xmin = Vector.zero
xmax = Vector.one * topGridSize
NeighborType = NestedGridNeighbor
if mpi.procs > 1:
dbc = NestedGridDistributedBoundary.instance()
elif isinstance(nodes._neighbor, TreeNeighbor):
xmin = nodes._neighbor.xmin
xmax = nodes._neighbor.xmax
topGridSize = (xmax - xmin).maxAbsElement()
NeighborType = TreeNeighbor
if mpi.procs > 1:
dbc = BoundingVolumeDistributedBoundary.instance()
#raise RuntimeError, "Need a parallel policy for TreeNeighbor."
else:
raise RuntimeError("Unknown Neighbor type.")
# Build a temporary NodeList we'll use to sample to.
newnodes = NLF(name = "zza_newnodes",
eos = nodes.eos,
hmin = 1e-10,
hmax = 1e10,
NeighborType = NeighborType,
topGridCellSize = topGridSize,
xmin = xmin,
xmax = xmax)
if mask:
masknodes = NLF(name = "zzz_masknodes",
eos = nodes.eos,
hmin = 1e-10,
hmax = 1e10,
NeighborType = NeighborType,
topGridCellSize = topGridSize,
xmin = xmin,
xmax = xmax)
distributor((newnodes, generator))
# If we're parallel we need distributed ghost nodes.
bcs = vector_of_Boundary()
if mpi.procs > 1:
db = DataBase()
db.appendNodeList(nodes)
db.appendNodeList(newnodes)
nodes.neighbor().updateNodes()
newnodes.neighbor().updateNodes()
dbc.setAllGhostNodes(db)
dbc.finalizeGhostBoundary()
bcs.append(dbc)
# If we're masking some points, things get complicated. The mask nodes are going to persist to the new
# nodes, and so we need to not overlay them. We also want to remove any new nodes that overlap with the
# mask nodes, since the masked ones are going to be copied to the new nodes in the end.
nmask = 0
if mask:
# Copy the field values from the original masked nodes to the temporary mask set.
nmask = mask.localSumElements()
print("Copying %i masked nodes from the original NodeList." % mpi.allreduce(nmask, mpi.SUM))
masknodes.numInternalNodes = nmask
copyNodeListFields(nodes, masknodes, mask, solid)
# Remove the mask nodes from the starting NodeList.
nodes2kill = vector_of_int()
for i in range(nodes.numInternalNodes):
if mask[i] == 1:
nodes2kill.append(i)
assert nodes2kill.size() == nmask
nodes.deleteNodes(nodes2kill)
# Now we need to remove any nodes from the target set that overlap with the mask nodes.
db = DataBase()
db.appendNodeList(newnodes)
db.appendNodeList(masknodes)
newnodes.neighbor().updateNodes()
masknodes.neighbor().updateNodes()
if mpi.procs > 1:
dbc.setAllGhostNodes(db)
dbc.finalizeGhostBoundary()
newnodes.neighbor().updateNodes()
masknodes.neighbor().updateNodes()
db.updateConnectivityMap(False)
cm = db.connectivityMap()
if etaExclude is None:
etaExclude = 1.0/nodes.nodesPerSmoothingScale
assert etaExclude > 0.0
posmask = masknodes.positions()
Hmask = masknodes.Hfield()
posnew = newnodes.positions()
Hnew = newnodes.Hfield()
nodes2kill = vector_of_int()
for i in range(newnodes.numInternalNodes):
fullconnectivity = cm.connectivityForNode(0, i)
for j in fullconnectivity[1]:
eta = min(( Hnew[i]*(posmask[j] - posnew[i])).magnitude(),
(Hmask[j]*(posmask[j] - posnew[i])).magnitude())
if eta < etaExclude:
nodes2kill.append(i)
print("Removing %i nodes from new list due to overlap with masked nodes." % mpi.allreduce(len(nodes2kill), mpi.SUM))
newnodes.deleteNodes(nodes2kill)
# Build the connectivity so we can do the overlay.
db = DataBase()
db.appendNodeList(nodes)
db.appendNodeList(newnodes)
nodes.neighbor().updateNodes()
newnodes.neighbor().updateNodes()
if mpi.procs > 1:
dbc.setAllGhostNodes(db)
dbc.finalizeGhostBoundary()
nodes.neighbor().updateNodes()
newnodes.neighbor().updateNodes()
# Convert fields we're going to map to conserved values. This is necessary 'cause the splat operation we're going
# to use guarantees summing over the input and output field values gives the same value.
mass = nodes.mass()
rho = nodes.massDensity()
vol = ScalarField(nodes.mass())
vel = nodes.velocity()
eps = nodes.specificThermalEnergy()
momentum = VectorField(vel)
thermalenergy = ScalarField(eps)
for i in range(nodes.numNodes):
vol[i] /= rho[i] + 1.0e-30
momentum[i] *= mass[i]
thermalenergy[i] *= mass[i]
if solid:
S = nodes.deviatoricStress()
ps = nodes.plasticStrain()
D = nodes.damage()
mS = SymTensorField(S)
mps = ScalarField(ps)
mD = SymTensorField(D)
for i in range(nodes.numNodes):
mS[i] *= mass[i]
mps[i] *= mass[i]
mD[i] *= mass[i]
# Map stuff from the old to new nodes.
fls = FieldListSet()
mass_fl = ScalarFieldList()
vol_fl = ScalarFieldList()
momentum_fl = VectorFieldList()
thermalenergy_fl = ScalarFieldList()
mass_fl.appendField(mass)
vol_fl.appendField(vol)
momentum_fl.appendField(momentum)
thermalenergy_fl.appendField(thermalenergy)
mass_fl.copyFields()
vol_fl.copyFields()
momentum_fl.copyFields()
thermalenergy_fl.copyFields()
fls.ScalarFieldLists.append(mass_fl)
fls.ScalarFieldLists.append(vol_fl)
fls.VectorFieldLists.append(momentum_fl)
fls.ScalarFieldLists.append(thermalenergy_fl)
if solid:
S_fl = SymTensorFieldList()
ps_fl = ScalarFieldList()
D_fl = SymTensorFieldList()
S_fl.appendField(mS)
ps_fl.appendField(mps)
D_fl.appendField(mD)
S_fl.copyFields()
ps_fl.copyFields()
D_fl.copyFields()
fls.SymTensorFieldLists.append(S_fl)
fls.ScalarFieldLists.append(ps_fl)
fls.SymTensorFieldLists.append(D_fl)
pos0_fl = VectorFieldList()
mass0_fl = ScalarFieldList()
H0_fl = SymTensorFieldList()
pos0_fl.appendField(nodes.positions())
mass0_fl.appendField(nodes.mass())
H0_fl.appendField(nodes.Hfield())
pos1_fl = VectorFieldList()
mass1_fl = ScalarFieldList()
H1_fl = SymTensorFieldList()
pos1_fl.appendField(newnodes.positions())
mass1_fl.appendField(newnodes.mass())
H1_fl.appendField(newnodes.Hfield())
pos0_fl.copyFields()
mass0_fl.copyFields()
H0_fl.copyFields()
pos1_fl.copyFields()
mass1_fl.copyFields()
H1_fl.copyFields()
# Apply boundaries to the Fields we're sampling from.
for bc in bcs:
bc.applyFieldListGhostBoundary(mass0_fl)
bc.applyFieldListGhostBoundary(mass1_fl)
for fl in fls.ScalarFieldLists:
bc.applyFieldListGhostBoundary(fl)
for fl in fls.VectorFieldLists:
bc.applyFieldListGhostBoundary(fl)
for fl in fls.TensorFieldLists:
bc.applyFieldListGhostBoundary(fl)
for fl in fls.SymTensorFieldLists:
bc.applyFieldListGhostBoundary(fl)
bc.finalizeGhostBoundary()
print("Splatting fields...")
newfls = splatMultipleFieldsMash(fls,
pos0_fl, mass0_fl, H0_fl, W,
pos1_fl, mass1_fl, H1_fl,
bcs)
print("Done splatting.")
# Grab the FieldLists
pos0 = nodes.positions()
H0 = nodes.Hfield()
pos1 = newnodes.positions()
H1 = newnodes.Hfield()
mass1 = newfls.ScalarFieldLists[0][0]
vol1 = newfls.ScalarFieldLists[1][0]
momentum1 = newfls.VectorFieldLists[0][0]
thermalenergy1 = newfls.ScalarFieldLists[2][0]
# Denormalize the mapped values and fill them in as new values for the nodes.
nodes.numInternalNodes = nmask + newnodes.numInternalNodes
for i in range(newnodes.numInternalNodes):
j = nmask + i
pos0[j] = pos1[i]
H0[j] = H1[i]
if mass1[i] > 0.0:
assert vol1[i] > 0.0
mass[j] = mass1[i]
rho[j] = mass1[i]/vol1[i]
vel[j] = momentum1[i]/mass1[i]
eps[j] = thermalenergy1[i]/mass1[i]
else:
mass[j] = newnodes.mass()[i]
rho[j] = newnodes.massDensity()[i]
vel[j] = newnodes.velocity()[i]
eps[j] = newnodes.specificThermalEnergy()[i]
if solid:
mS1 = newfls.SymTensorFieldLists[0][0]
mps1 = newfls.ScalarFieldLists[3][0]
mD1 = newfls.SymTensorFieldLists[1][0]
for i in range(newnodes.numInternalNodes):
j = nmask + i
if mass1[i] > 0.0:
S[j] = mS1[i]/mass1[i]
ps[j] = mps1[i]/mass1[i]
D[j] = mD1[i]/mass1[i]
# Look for any nodes that didn't get any information in the new set and delete them.
if removeUnusedNodes:
nodes2kill = vector_of_int()
for i in range(newnodes.numInternalNodes):
if mass1[i] == 0.0:
nodes2kill.append(i)
if nodes2kill.size() > 0:
newnodes.deleteNodes(nodes2kill)
# Insert any masked nodes, and we're done.
if mask:
newmask = [1]*nmask + [0]*nodes.numInternalNodes
copyNodeListFields(masknodes, nodes, newmask, solid)
# Whew!
print("Finished resampling nodes: final node count %i." % mpi.allreduce(nodes.numInternalNodes, mpi.SUM))
return
|
LLNLREPO_NAMEspheralPATH_START.@spheral_extracted@spheral-main@src@SimulationControl@resampleNodeList.py@.PATH_END.py
|
{
"filename": "yaml_ford.md",
"repo_name": "Nicholaswogan/PhotochemPy",
"repo_path": "PhotochemPy_extracted/PhotochemPy-main/src/fortran-yaml/yaml_ford.md",
"type": "Markdown"
}
|
src_dir: ./
output_dir: ~/BB/hugo/static/portfolio/yaml/doc
project_github: https://github.com/BoldingBruggeman/fortran-yaml
project_website: https://github.com/BoldingBruggeman/fortran-yaml
summary: A lightweight YAML parser written in object-oriented Fortran
author: Jorn Bruggeman
author_description:
github: https://github.com/jornbr
email: jorn@bolding-bruggeman.com
fpp_extensions: fpp
predocmark: >
media_dir: ./media
docmark_alt: #
predocmark_alt: <
display: public
protected
private
source: false
graph: true
search: true
macro: TEST
LOGIC=.true.
extra_mods: json_module: http://jacobwilliams.github.io/json-fortran/
futility: http://cmacmackin.github.io
license: by-nc
extra_filetypes: sh #
Hi, my name is ${USER}.
This is a project which I wrote. This file will provide the documents. I'm
writing the body of the text here. It contains an overall description of the
project. It might explain how to go about installing/compiling it. It might
provide a change-log for the code. [[linalg]] Maybe it will talk about the
history and/or motivation for this software.
@Note
You can include any notes (or bugs, warnings, or todos) like so.
@Bug
You can have multi-paragraph versions of these too! That means you can
include
- ordered lists
- unordered lists
- images
- etc.
Isn't that cool?
@endbug
@Bug Hey I'm doing it again...
This ones ends mid...@endbug ...paragraph.
You can have as many paragraphs as you like here and can use headlines, links,
images, etc. Basically, you can use anything in Markdown and Markdown-Extra.
Furthermore, you can insert LaTeX into your documentation. So, for example,
you can provide inline math using like \( y = x^2 \) or math on its own line
like \[ x = \sqrt{y} \] or $$ e = mc^2. $$ You can even use LaTeX environments!
So you can get numbered equations like this:
\begin{equation}
PV = nRT
\end{equation}
So let your imagination run wild. As you can tell, I'm more or less just
filling in space now. This will be the last sentence.
|
NicholaswoganREPO_NAMEPhotochemPyPATH_START.@PhotochemPy_extracted@PhotochemPy-main@src@fortran-yaml@yaml_ford.md@.PATH_END.py
|
{
"filename": "plot_subbox_volrender.py",
"repo_name": "dullemond/radmc3d-2.0",
"repo_path": "radmc3d-2.0_extracted/radmc3d-2.0-master/examples/run_warpeddisk/plot_subbox_volrender.py",
"type": "Python"
}
|
import plotly.graph_objects as go
import numpy as np
from radmc3dPy.subbox import *
from natconst import *
#
# Example how to use RADMC-3D subbox method to sample an irregular grid
# in a regular way, making it easier to plot an analyze stuff.
#
n = 100
nxyz = [n,n,n]
#sz = 1200*au # See the full disk
#floor = 1e-20
sz = 150*au # Zoom in
floor = 1e-15
box = np.array([-sz,sz,-sz,sz,-sz,sz])
s=subBox()
s.makeSubbox('rhodust',box,nxyz,phi1=0.,theta=0.,phi2=0.)
s.readSubbox()
values = np.log10(s.data+floor)
X,Y,Z = np.meshgrid(s.x,s.y,s.z,indexing='ij')
#
# Use plotly library to make a volume rendering of the disk
# https://plotly.com/python/3d-volume-plots/
#
fig = go.Figure(data=go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=values.flatten(),
isomin=values.min(),
isomax=values.max(),
opacity=0.1, # needs to be small to see through all surfaces
surface_count=17, # needs to be a large number for good volume rendering
))
fig.show()
|
dullemondREPO_NAMEradmc3d-2.0PATH_START.@radmc3d-2.0_extracted@radmc3d-2.0-master@examples@run_warpeddisk@plot_subbox_volrender.py@.PATH_END.py
|
{
"filename": "align_labels_demo.py",
"repo_name": "matplotlib/matplotlib",
"repo_path": "matplotlib_extracted/matplotlib-main/galleries/examples/subplots_axes_and_figures/align_labels_demo.py",
"type": "Python"
}
|
"""
=======================
Align labels and titles
=======================
Aligning xlabel, ylabel, and title using `.Figure.align_xlabels`,
`.Figure.align_ylabels`, and `.Figure.align_titles`.
`.Figure.align_labels` wraps the x and y label functions.
Note that the xlabel "XLabel1 1" would normally be much closer to the
x-axis, "YLabel0 0" would be much closer to the y-axis, and title
"Title0 0" would be much closer to the top of their respective axes.
"""
import matplotlib.pyplot as plt
import numpy as np
fig, axs = plt.subplots(2, 2, layout='constrained')
ax = axs[0][0]
ax.plot(np.arange(0, 1e6, 1000))
ax.set_title('Title0 0')
ax.set_ylabel('YLabel0 0')
ax = axs[0][1]
ax.plot(np.arange(1., 0., -0.1) * 2000., np.arange(1., 0., -0.1))
ax.set_title('Title0 1')
ax.xaxis.tick_top()
ax.tick_params(axis='x', rotation=55)
for i in range(2):
ax = axs[1][i]
ax.plot(np.arange(1., 0., -0.1) * 2000., np.arange(1., 0., -0.1))
ax.set_ylabel('YLabel1 %d' % i)
ax.set_xlabel('XLabel1 %d' % i)
if i == 0:
ax.tick_params(axis='x', rotation=55)
fig.align_labels() # same as fig.align_xlabels(); fig.align_ylabels()
fig.align_titles()
plt.show()
# %%
# .. tags::
#
# component: label
# component: title
# styling: position
# level: beginner
|
matplotlibREPO_NAMEmatplotlibPATH_START.@matplotlib_extracted@matplotlib-main@galleries@examples@subplots_axes_and_figures@align_labels_demo.py@.PATH_END.py
|
{
"filename": "whats_new_v22.ipynb",
"repo_name": "sdss/marvin",
"repo_path": "marvin_extracted/marvin-main/docs/sphinx/jupyter/whats_new_v22.ipynb",
"type": "Jupyter Notebook"
}
|
# What's New in Marvin 2.2!
Lots of things are new in Marvin 2.2.0. See the list with links to individual sections here http://sdss-marvin.readthedocs.io/en/latest/whats-new.html
## Marvin now includes MPL-6 data
```python
%matplotlib inline
from marvin import config
config.switchSasUrl('local')
config.forceDbOff()
```
```python
from marvin.tools.cube import Cube
plateifu='8485-1901'
cube = Cube(plateifu=plateifu)
print(cube)
maps = cube.getMaps(bintype='HYB10')
print(maps)
```
WARNING: The binary mode of fromstring is deprecated, as it behaves surprisingly on
unicode inputs. Use frombuffer instead
<Marvin Cube (plateifu='8485-1901', mode='local', data_origin='file')>
<Marvin Maps (plateifu='8485-1901', mode='local', data_origin='file', bintype='HYB10', template='GAU-MILESHC')>
## Smarter handling of inputs
You can still specify **plateifu**, **mangaid**, or **filename** but now Marvin will try to guess your input type if you do not specify an input keyword argument.
```python
from marvin.tools.maps import Maps
maps = Maps(plateifu)
# or a filename
maps = Maps('/Users/Brian/Work/Manga/analysis/v2_3_1/2.1.3/SPX-GAU-MILESHC/8485/1901/manga-8485-1901-MAPS-SPX-GAU-MILESHC.fits.gz')
print(maps)
```
<Marvin Maps (plateifu='8485-1901', mode='local', data_origin='file', bintype='SPX', template='GAU-MILESHC')>
## Fuzzy indexing and extraction
Marvin now includes fuzzy lists and dictionaries in the Maps and Datamodels. This means Marvin will try to guess what you mean by what you type. For example, all of these methods grab the H-alpha flux map.
```python
# grab an H-alpha flux map
ha = maps['emline_gflux_ha_6564']
# fuzzy name indexing
ha = maps['gflux_ha']
# all map properties are available as class attributes. If using iPython, you can tab complete to see them all.
ha = maps.emline_gflux_ha_6564
```
WARNING: The binary mode of fromstring is deprecated, as it behaves surprisingly on
unicode inputs. Use frombuffer instead
## New DRP, DAP and Query Datamodels
There are new datamodels representing the MaNGA data for DRP, DAP and Query parameters. The datamodel is attached to every object you instantiate, or it can be accessed independently. For example, the **Maps** datamodel will list all the available map properties. See http://sdss-marvin.readthedocs.io/en/latest/datamodel/datamodels.html for details.
```python
# see the datamodel on maps
maps.datamodel
```
[<Property 'spx_skycoo', channel='on_sky_x', release='2.1.3', unit='arcsec'>,
<Property 'spx_skycoo', channel='on_sky_y', release='2.1.3', unit='arcsec'>,
<Property 'spx_ellcoo', channel='elliptical_radius', release='2.1.3', unit='arcsec'>,
<Property 'spx_ellcoo', channel='elliptical_azimuth', release='2.1.3', unit='deg'>,
<Property 'spx_mflux', channel='None', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'spx_snr', channel='None', release='2.1.3', unit=''>,
<Property 'binid', channel='binned_spectra', release='2.1.3', unit=''>,
<Property 'binid', channel='stellar_continua', release='2.1.3', unit=''>,
<Property 'binid', channel='em_line_moments', release='2.1.3', unit=''>,
<Property 'binid', channel='em_line_models', release='2.1.3', unit=''>,
<Property 'binid', channel='spectral_indices', release='2.1.3', unit=''>,
<Property 'bin_lwskycoo', channel='lum_weighted_on_sky_x', release='2.1.3', unit='arcsec'>,
<Property 'bin_lwskycoo', channel='lum_weighted_on_sky_y', release='2.1.3', unit='arcsec'>,
<Property 'bin_lwellcoo', channel='lum_weighted_elliptical_radius', release='2.1.3', unit='arcsec'>,
<Property 'bin_lwellcoo', channel='lum_weighted_elliptical_azimuth', release='2.1.3', unit='deg'>,
<Property 'bin_area', channel='None', release='2.1.3', unit='arcsec2'>,
<Property 'bin_farea', channel='None', release='2.1.3', unit=''>,
<Property 'bin_mflux', channel='None', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'bin_snr', channel='None', release='2.1.3', unit=''>,
<Property 'stellar_vel', channel='None', release='2.1.3', unit='km / s'>,
<Property 'stellar_sigma', channel='None', release='2.1.3', unit='km / s'>,
<Property 'stellar_cont_fresid', channel='68th_percentile', release='2.1.3', unit=''>,
<Property 'stellar_cont_fresid', channel='99th_percentile', release='2.1.3', unit=''>,
<Property 'stellar_cont_rchi2', channel='None', release='2.1.3', unit=''>,
<Property 'emline_sflux', channel='oiid_3728', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'emline_sflux', channel='oii_3729', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'emline_sflux', channel='hthe_3798', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'emline_sflux', channel='heta_3836', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'emline_sflux', channel='neiii_3869', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'emline_sflux', channel='hzet_3890', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'emline_sflux', channel='neiii_3968', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'emline_sflux', channel='heps_3971', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'emline_sflux', channel='hdel_4102', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'emline_sflux', channel='hgam_4341', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'emline_sflux', channel='heii_4687', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'emline_sflux', channel='hb_4862', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'emline_sflux', channel='oiii_4960', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'emline_sflux', channel='oiii_5008', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'emline_sflux', channel='hei_5877', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'emline_sflux', channel='oi_6302', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'emline_sflux', channel='oi_6365', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'emline_sflux', channel='nii_6549', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'emline_sflux', channel='ha_6564', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'emline_sflux', channel='nii_6585', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'emline_sflux', channel='sii_6718', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'emline_sflux', channel='sii_6732', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'emline_sew', channel='oiid_3728', release='2.1.3', unit='Angstrom'>,
<Property 'emline_sew', channel='oii_3729', release='2.1.3', unit='Angstrom'>,
<Property 'emline_sew', channel='hthe_3798', release='2.1.3', unit='Angstrom'>,
<Property 'emline_sew', channel='heta_3836', release='2.1.3', unit='Angstrom'>,
<Property 'emline_sew', channel='neiii_3869', release='2.1.3', unit='Angstrom'>,
<Property 'emline_sew', channel='hzet_3890', release='2.1.3', unit='Angstrom'>,
<Property 'emline_sew', channel='neiii_3968', release='2.1.3', unit='Angstrom'>,
<Property 'emline_sew', channel='heps_3971', release='2.1.3', unit='Angstrom'>,
<Property 'emline_sew', channel='hdel_4102', release='2.1.3', unit='Angstrom'>,
<Property 'emline_sew', channel='hgam_4341', release='2.1.3', unit='Angstrom'>,
<Property 'emline_sew', channel='heii_4687', release='2.1.3', unit='Angstrom'>,
<Property 'emline_sew', channel='hb_4862', release='2.1.3', unit='Angstrom'>,
<Property 'emline_sew', channel='oiii_4960', release='2.1.3', unit='Angstrom'>,
<Property 'emline_sew', channel='oiii_5008', release='2.1.3', unit='Angstrom'>,
<Property 'emline_sew', channel='hei_5877', release='2.1.3', unit='Angstrom'>,
<Property 'emline_sew', channel='oi_6302', release='2.1.3', unit='Angstrom'>,
<Property 'emline_sew', channel='oi_6365', release='2.1.3', unit='Angstrom'>,
<Property 'emline_sew', channel='nii_6549', release='2.1.3', unit='Angstrom'>,
<Property 'emline_sew', channel='ha_6564', release='2.1.3', unit='Angstrom'>,
<Property 'emline_sew', channel='nii_6585', release='2.1.3', unit='Angstrom'>,
<Property 'emline_sew', channel='sii_6718', release='2.1.3', unit='Angstrom'>,
<Property 'emline_sew', channel='sii_6732', release='2.1.3', unit='Angstrom'>,
<Property 'emline_gflux', channel='oii_3727', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'emline_gflux', channel='oii_3729', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'emline_gflux', channel='hthe_3798', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'emline_gflux', channel='heta_3836', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'emline_gflux', channel='neiii_3869', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'emline_gflux', channel='hzet_3890', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'emline_gflux', channel='neiii_3968', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'emline_gflux', channel='heps_3971', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'emline_gflux', channel='hdel_4102', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'emline_gflux', channel='hgam_4341', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'emline_gflux', channel='heii_4687', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'emline_gflux', channel='hb_4862', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'emline_gflux', channel='oiii_4960', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'emline_gflux', channel='oiii_5008', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'emline_gflux', channel='hei_5877', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'emline_gflux', channel='oi_6302', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'emline_gflux', channel='oi_6365', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'emline_gflux', channel='nii_6549', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'emline_gflux', channel='ha_6564', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'emline_gflux', channel='nii_6585', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'emline_gflux', channel='sii_6718', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'emline_gflux', channel='sii_6732', release='2.1.3', unit='1e-17 erg / (cm2 s spaxel)'>,
<Property 'emline_gvel', channel='oii_3727', release='2.1.3', unit='km / s'>,
<Property 'emline_gvel', channel='oii_3729', release='2.1.3', unit='km / s'>,
<Property 'emline_gvel', channel='hthe_3798', release='2.1.3', unit='km / s'>,
<Property 'emline_gvel', channel='heta_3836', release='2.1.3', unit='km / s'>,
<Property 'emline_gvel', channel='neiii_3869', release='2.1.3', unit='km / s'>,
<Property 'emline_gvel', channel='hzet_3890', release='2.1.3', unit='km / s'>,
<Property 'emline_gvel', channel='neiii_3968', release='2.1.3', unit='km / s'>,
<Property 'emline_gvel', channel='heps_3971', release='2.1.3', unit='km / s'>,
<Property 'emline_gvel', channel='hdel_4102', release='2.1.3', unit='km / s'>,
<Property 'emline_gvel', channel='hgam_4341', release='2.1.3', unit='km / s'>,
<Property 'emline_gvel', channel='heii_4687', release='2.1.3', unit='km / s'>,
<Property 'emline_gvel', channel='hb_4862', release='2.1.3', unit='km / s'>,
<Property 'emline_gvel', channel='oiii_4960', release='2.1.3', unit='km / s'>,
<Property 'emline_gvel', channel='oiii_5008', release='2.1.3', unit='km / s'>,
<Property 'emline_gvel', channel='hei_5877', release='2.1.3', unit='km / s'>,
<Property 'emline_gvel', channel='oi_6302', release='2.1.3', unit='km / s'>,
<Property 'emline_gvel', channel='oi_6365', release='2.1.3', unit='km / s'>,
<Property 'emline_gvel', channel='nii_6549', release='2.1.3', unit='km / s'>,
<Property 'emline_gvel', channel='ha_6564', release='2.1.3', unit='km / s'>,
<Property 'emline_gvel', channel='nii_6585', release='2.1.3', unit='km / s'>,
<Property 'emline_gvel', channel='sii_6718', release='2.1.3', unit='km / s'>,
<Property 'emline_gvel', channel='sii_6732', release='2.1.3', unit='km / s'>,
<Property 'emline_gew', channel='oiid_3728', release='2.1.3', unit='Angstrom'>,
<Property 'emline_gew', channel='oii_3729', release='2.1.3', unit='Angstrom'>,
<Property 'emline_gew', channel='hthe_3798', release='2.1.3', unit='Angstrom'>,
<Property 'emline_gew', channel='heta_3836', release='2.1.3', unit='Angstrom'>,
<Property 'emline_gew', channel='neiii_3869', release='2.1.3', unit='Angstrom'>,
<Property 'emline_gew', channel='hzet_3890', release='2.1.3', unit='Angstrom'>,
<Property 'emline_gew', channel='neiii_3968', release='2.1.3', unit='Angstrom'>,
<Property 'emline_gew', channel='heps_3971', release='2.1.3', unit='Angstrom'>,
<Property 'emline_gew', channel='hdel_4102', release='2.1.3', unit='Angstrom'>,
<Property 'emline_gew', channel='hgam_4341', release='2.1.3', unit='Angstrom'>,
<Property 'emline_gew', channel='heii_4687', release='2.1.3', unit='Angstrom'>,
<Property 'emline_gew', channel='hb_4862', release='2.1.3', unit='Angstrom'>,
<Property 'emline_gew', channel='oiii_4960', release='2.1.3', unit='Angstrom'>,
<Property 'emline_gew', channel='oiii_5008', release='2.1.3', unit='Angstrom'>,
<Property 'emline_gew', channel='hei_5877', release='2.1.3', unit='Angstrom'>,
<Property 'emline_gew', channel='oi_6302', release='2.1.3', unit='Angstrom'>,
<Property 'emline_gew', channel='oi_6365', release='2.1.3', unit='Angstrom'>,
<Property 'emline_gew', channel='nii_6549', release='2.1.3', unit='Angstrom'>,
<Property 'emline_gew', channel='ha_6564', release='2.1.3', unit='Angstrom'>,
<Property 'emline_gew', channel='nii_6585', release='2.1.3', unit='Angstrom'>,
<Property 'emline_gew', channel='sii_6718', release='2.1.3', unit='Angstrom'>,
<Property 'emline_gew', channel='sii_6732', release='2.1.3', unit='Angstrom'>,
<Property 'emline_gsigma', channel='oii_3727', release='2.1.3', unit='km / s'>,
<Property 'emline_gsigma', channel='oii_3729', release='2.1.3', unit='km / s'>,
<Property 'emline_gsigma', channel='hthe_3798', release='2.1.3', unit='km / s'>,
<Property 'emline_gsigma', channel='heta_3836', release='2.1.3', unit='km / s'>,
<Property 'emline_gsigma', channel='neiii_3869', release='2.1.3', unit='km / s'>,
<Property 'emline_gsigma', channel='hzet_3890', release='2.1.3', unit='km / s'>,
<Property 'emline_gsigma', channel='neiii_3968', release='2.1.3', unit='km / s'>,
<Property 'emline_gsigma', channel='heps_3971', release='2.1.3', unit='km / s'>,
<Property 'emline_gsigma', channel='hdel_4102', release='2.1.3', unit='km / s'>,
<Property 'emline_gsigma', channel='hgam_4341', release='2.1.3', unit='km / s'>,
<Property 'emline_gsigma', channel='heii_4687', release='2.1.3', unit='km / s'>,
<Property 'emline_gsigma', channel='hb_4862', release='2.1.3', unit='km / s'>,
<Property 'emline_gsigma', channel='oiii_4960', release='2.1.3', unit='km / s'>,
<Property 'emline_gsigma', channel='oiii_5008', release='2.1.3', unit='km / s'>,
<Property 'emline_gsigma', channel='hei_5877', release='2.1.3', unit='km / s'>,
<Property 'emline_gsigma', channel='oi_6302', release='2.1.3', unit='km / s'>,
<Property 'emline_gsigma', channel='oi_6365', release='2.1.3', unit='km / s'>,
<Property 'emline_gsigma', channel='nii_6549', release='2.1.3', unit='km / s'>,
<Property 'emline_gsigma', channel='ha_6564', release='2.1.3', unit='km / s'>,
<Property 'emline_gsigma', channel='nii_6585', release='2.1.3', unit='km / s'>,
<Property 'emline_gsigma', channel='sii_6718', release='2.1.3', unit='km / s'>,
<Property 'emline_gsigma', channel='sii_6732', release='2.1.3', unit='km / s'>,
<Property 'emline_instsigma', channel='oii_3727', release='2.1.3', unit='km / s'>,
<Property 'emline_instsigma', channel='oii_3729', release='2.1.3', unit='km / s'>,
<Property 'emline_instsigma', channel='hthe_3798', release='2.1.3', unit='km / s'>,
<Property 'emline_instsigma', channel='heta_3836', release='2.1.3', unit='km / s'>,
<Property 'emline_instsigma', channel='neiii_3869', release='2.1.3', unit='km / s'>,
<Property 'emline_instsigma', channel='hzet_3890', release='2.1.3', unit='km / s'>,
<Property 'emline_instsigma', channel='neiii_3968', release='2.1.3', unit='km / s'>,
<Property 'emline_instsigma', channel='heps_3971', release='2.1.3', unit='km / s'>,
<Property 'emline_instsigma', channel='hdel_4102', release='2.1.3', unit='km / s'>,
<Property 'emline_instsigma', channel='hgam_4341', release='2.1.3', unit='km / s'>,
<Property 'emline_instsigma', channel='heii_4687', release='2.1.3', unit='km / s'>,
<Property 'emline_instsigma', channel='hb_4862', release='2.1.3', unit='km / s'>,
<Property 'emline_instsigma', channel='oiii_4960', release='2.1.3', unit='km / s'>,
<Property 'emline_instsigma', channel='oiii_5008', release='2.1.3', unit='km / s'>,
<Property 'emline_instsigma', channel='hei_5877', release='2.1.3', unit='km / s'>,
<Property 'emline_instsigma', channel='oi_6302', release='2.1.3', unit='km / s'>,
<Property 'emline_instsigma', channel='oi_6365', release='2.1.3', unit='km / s'>,
<Property 'emline_instsigma', channel='nii_6549', release='2.1.3', unit='km / s'>,
<Property 'emline_instsigma', channel='ha_6564', release='2.1.3', unit='km / s'>,
<Property 'emline_instsigma', channel='nii_6585', release='2.1.3', unit='km / s'>,
<Property 'emline_instsigma', channel='sii_6718', release='2.1.3', unit='km / s'>,
<Property 'emline_instsigma', channel='sii_6732', release='2.1.3', unit='km / s'>,
<Property 'emline_tplsigma', channel='oii_3727', release='2.1.3', unit='km / s'>,
<Property 'emline_tplsigma', channel='oii_3729', release='2.1.3', unit='km / s'>,
<Property 'emline_tplsigma', channel='hthe_3798', release='2.1.3', unit='km / s'>,
<Property 'emline_tplsigma', channel='heta_3836', release='2.1.3', unit='km / s'>,
<Property 'emline_tplsigma', channel='neiii_3869', release='2.1.3', unit='km / s'>,
<Property 'emline_tplsigma', channel='hzet_3890', release='2.1.3', unit='km / s'>,
<Property 'emline_tplsigma', channel='neiii_3968', release='2.1.3', unit='km / s'>,
<Property 'emline_tplsigma', channel='heps_3971', release='2.1.3', unit='km / s'>,
<Property 'emline_tplsigma', channel='hdel_4102', release='2.1.3', unit='km / s'>,
<Property 'emline_tplsigma', channel='hgam_4341', release='2.1.3', unit='km / s'>,
<Property 'emline_tplsigma', channel='heii_4687', release='2.1.3', unit='km / s'>,
<Property 'emline_tplsigma', channel='hb_4862', release='2.1.3', unit='km / s'>,
<Property 'emline_tplsigma', channel='oiii_4960', release='2.1.3', unit='km / s'>,
<Property 'emline_tplsigma', channel='oiii_5008', release='2.1.3', unit='km / s'>,
<Property 'emline_tplsigma', channel='hei_5877', release='2.1.3', unit='km / s'>,
<Property 'emline_tplsigma', channel='oi_6302', release='2.1.3', unit='km / s'>,
<Property 'emline_tplsigma', channel='oi_6365', release='2.1.3', unit='km / s'>,
<Property 'emline_tplsigma', channel='nii_6549', release='2.1.3', unit='km / s'>,
<Property 'emline_tplsigma', channel='ha_6564', release='2.1.3', unit='km / s'>,
<Property 'emline_tplsigma', channel='nii_6585', release='2.1.3', unit='km / s'>,
<Property 'emline_tplsigma', channel='sii_6718', release='2.1.3', unit='km / s'>,
<Property 'emline_tplsigma', channel='sii_6732', release='2.1.3', unit='km / s'>,
<Property 'specindex', channel='cn1', release='2.1.3', unit='mag'>,
<Property 'specindex', channel='cn2', release='2.1.3', unit='mag'>,
<Property 'specindex', channel='ca4227', release='2.1.3', unit='Angstrom'>,
<Property 'specindex', channel='g4300', release='2.1.3', unit='Angstrom'>,
<Property 'specindex', channel='fe4383', release='2.1.3', unit='Angstrom'>,
<Property 'specindex', channel='ca4455', release='2.1.3', unit='Angstrom'>,
<Property 'specindex', channel='fe4531', release='2.1.3', unit='Angstrom'>,
<Property 'specindex', channel='c24668', release='2.1.3', unit='Angstrom'>,
<Property 'specindex', channel='hb', release='2.1.3', unit='Angstrom'>,
<Property 'specindex', channel='fe5015', release='2.1.3', unit='Angstrom'>,
<Property 'specindex', channel='mg1', release='2.1.3', unit='mag'>,
<Property 'specindex', channel='mg2', release='2.1.3', unit='mag'>,
<Property 'specindex', channel='mgb', release='2.1.3', unit='Angstrom'>,
<Property 'specindex', channel='fe5270', release='2.1.3', unit='Angstrom'>,
<Property 'specindex', channel='fe5335', release='2.1.3', unit='Angstrom'>,
<Property 'specindex', channel='fe5406', release='2.1.3', unit='Angstrom'>,
<Property 'specindex', channel='fe5709', release='2.1.3', unit='Angstrom'>,
<Property 'specindex', channel='fe5782', release='2.1.3', unit='Angstrom'>,
<Property 'specindex', channel='nad', release='2.1.3', unit='Angstrom'>,
<Property 'specindex', channel='tio1', release='2.1.3', unit='mag'>,
<Property 'specindex', channel='tio2', release='2.1.3', unit='mag'>,
<Property 'specindex', channel='hdeltaa', release='2.1.3', unit='Angstrom'>,
<Property 'specindex', channel='hgammaa', release='2.1.3', unit='Angstrom'>,
<Property 'specindex', channel='hdeltaf', release='2.1.3', unit='Angstrom'>,
<Property 'specindex', channel='hgammaf', release='2.1.3', unit='Angstrom'>,
<Property 'specindex', channel='cahk', release='2.1.3', unit='Angstrom'>,
<Property 'specindex', channel='caii1', release='2.1.3', unit='Angstrom'>,
<Property 'specindex', channel='caii2', release='2.1.3', unit='Angstrom'>,
<Property 'specindex', channel='caii3', release='2.1.3', unit='Angstrom'>,
<Property 'specindex', channel='pa17', release='2.1.3', unit='Angstrom'>,
<Property 'specindex', channel='pa14', release='2.1.3', unit='Angstrom'>,
<Property 'specindex', channel='pa12', release='2.1.3', unit='Angstrom'>,
<Property 'specindex', channel='mgicvd', release='2.1.3', unit='Angstrom'>,
<Property 'specindex', channel='naicvd', release='2.1.3', unit='Angstrom'>,
<Property 'specindex', channel='mgiir', release='2.1.3', unit='Angstrom'>,
<Property 'specindex', channel='fehcvd', release='2.1.3', unit='Angstrom'>,
<Property 'specindex', channel='nai', release='2.1.3', unit='Angstrom'>,
<Property 'specindex', channel='btio', release='2.1.3', unit='mag'>,
<Property 'specindex', channel='atio', release='2.1.3', unit='mag'>,
<Property 'specindex', channel='cah1', release='2.1.3', unit='mag'>,
<Property 'specindex', channel='cah2', release='2.1.3', unit='mag'>,
<Property 'specindex', channel='naisdss', release='2.1.3', unit='Angstrom'>,
<Property 'specindex', channel='tio2sdss', release='2.1.3', unit='Angstrom'>,
<Property 'specindex', channel='d4000', release='2.1.3', unit=''>,
<Property 'specindex', channel='dn4000', release='2.1.3', unit=''>,
<Property 'specindex', channel='tiocvd', release='2.1.3', unit=''>,
<Property 'specindex_corr', channel='cn1', release='2.1.3', unit='mag'>,
<Property 'specindex_corr', channel='cn2', release='2.1.3', unit='mag'>,
<Property 'specindex_corr', channel='ca4227', release='2.1.3', unit='Angstrom'>,
<Property 'specindex_corr', channel='g4300', release='2.1.3', unit='Angstrom'>,
<Property 'specindex_corr', channel='fe4383', release='2.1.3', unit='Angstrom'>,
<Property 'specindex_corr', channel='ca4455', release='2.1.3', unit='Angstrom'>,
<Property 'specindex_corr', channel='fe4531', release='2.1.3', unit='Angstrom'>,
<Property 'specindex_corr', channel='c24668', release='2.1.3', unit='Angstrom'>,
<Property 'specindex_corr', channel='hb', release='2.1.3', unit='Angstrom'>,
<Property 'specindex_corr', channel='fe5015', release='2.1.3', unit='Angstrom'>,
<Property 'specindex_corr', channel='mg1', release='2.1.3', unit='mag'>,
<Property 'specindex_corr', channel='mg2', release='2.1.3', unit='mag'>,
<Property 'specindex_corr', channel='mgb', release='2.1.3', unit='Angstrom'>,
<Property 'specindex_corr', channel='fe5270', release='2.1.3', unit='Angstrom'>,
<Property 'specindex_corr', channel='fe5335', release='2.1.3', unit='Angstrom'>,
<Property 'specindex_corr', channel='fe5406', release='2.1.3', unit='Angstrom'>,
<Property 'specindex_corr', channel='fe5709', release='2.1.3', unit='Angstrom'>,
<Property 'specindex_corr', channel='fe5782', release='2.1.3', unit='Angstrom'>,
<Property 'specindex_corr', channel='nad', release='2.1.3', unit='Angstrom'>,
<Property 'specindex_corr', channel='tio1', release='2.1.3', unit='mag'>,
<Property 'specindex_corr', channel='tio2', release='2.1.3', unit='mag'>,
<Property 'specindex_corr', channel='hdeltaa', release='2.1.3', unit='Angstrom'>,
<Property 'specindex_corr', channel='hgammaa', release='2.1.3', unit='Angstrom'>,
<Property 'specindex_corr', channel='hdeltaf', release='2.1.3', unit='Angstrom'>,
<Property 'specindex_corr', channel='hgammaf', release='2.1.3', unit='Angstrom'>,
<Property 'specindex_corr', channel='cahk', release='2.1.3', unit='Angstrom'>,
<Property 'specindex_corr', channel='caii1', release='2.1.3', unit='Angstrom'>,
<Property 'specindex_corr', channel='caii2', release='2.1.3', unit='Angstrom'>,
<Property 'specindex_corr', channel='caii3', release='2.1.3', unit='Angstrom'>,
<Property 'specindex_corr', channel='pa17', release='2.1.3', unit='Angstrom'>,
<Property 'specindex_corr', channel='pa14', release='2.1.3', unit='Angstrom'>,
<Property 'specindex_corr', channel='pa12', release='2.1.3', unit='Angstrom'>,
<Property 'specindex_corr', channel='mgicvd', release='2.1.3', unit='Angstrom'>,
<Property 'specindex_corr', channel='naicvd', release='2.1.3', unit='Angstrom'>,
<Property 'specindex_corr', channel='mgiir', release='2.1.3', unit='Angstrom'>,
<Property 'specindex_corr', channel='fehcvd', release='2.1.3', unit='Angstrom'>,
<Property 'specindex_corr', channel='nai', release='2.1.3', unit='Angstrom'>,
<Property 'specindex_corr', channel='btio', release='2.1.3', unit='mag'>,
<Property 'specindex_corr', channel='atio', release='2.1.3', unit='mag'>,
<Property 'specindex_corr', channel='cah1', release='2.1.3', unit='mag'>,
<Property 'specindex_corr', channel='cah2', release='2.1.3', unit='mag'>,
<Property 'specindex_corr', channel='naisdss', release='2.1.3', unit='Angstrom'>,
<Property 'specindex_corr', channel='tio2sdss', release='2.1.3', unit='Angstrom'>,
<Property 'specindex_corr', channel='d4000', release='2.1.3', unit=''>,
<Property 'specindex_corr', channel='dn4000', release='2.1.3', unit=''>,
<Property 'specindex_corr', channel='tiocvd', release='2.1.3', unit=''>]
Each **Property** contains a name, a channel, the unit of the property, and a description
```python
haew_prop = maps.datamodel['emline_gew_ha']
haew_prop
```
<Property 'emline_gew', channel='ha_6564', release='2.1.3', unit='Angstrom'>
```python
print(haew_prop.name, haew_prop.unit, haew_prop.description)
```
emline_gew Angstrom Gaussian-fitted equivalent widths measurements (based on EMLINE_GFLUX)
The fulll datamodel is available as a **parent** attribute or you can import it directly
```python
dapdm = maps.datamodel.parent
print(dapdm)
# get a list of all available DAP datamodels
from marvin.utils.datamodel.dap import datamodel
print(datamodel)
# let's get the MPL-6 datamodel
dapdm = datamodel['MPL-6']
print(dapdm)
```
<DAPDataModel release='2.1.3', n_bintypes=5, n_templates=1, n_properties=292>
[<DAPDataModel release='1.1.1', n_bintypes=3, n_templates=3, n_properties=92>, <DAPDataModel release='2.0.2', n_bintypes=4, n_templates=1, n_properties=151>, <DAPDataModel release='2.1.3', n_bintypes=5, n_templates=1, n_properties=292>]
<DAPDataModel release='2.1.3', n_bintypes=5, n_templates=1, n_properties=292>
## Cubes, Maps, ModelCubes now utilize Quantity-based Objects
Most Marvin Tools now use new objects to represent their data. **DataCubes** represent 3-d data, while a **Spectrum** represents a 1-d array of data. These sub-class from Astropy Quantities. This means now most properties have associated units. We also now track and propagate inverse variances and masks.
```python
# the cube datamodel shows the available datacubes
cube.datamodel.datacubes
```
[<DataCube 'flux', release='MPL-6', unit='1e-17 erg / (Angstrom cm2 s spaxel)'>,
<DataCube 'dispersion', release='MPL-6', unit='Angstrom'>,
<DataCube 'dispersion_prepixel', release='MPL-6', unit='Angstrom'>]
```python
# and spectra
cube.datamodel.spectra
```
[<Spectrum 'spectral_resolution', release='MPL-6', unit='Angstrom'>,
<Spectrum 'spectral_resolution_prepixel', release='MPL-6', unit='Angstrom'>]
The cube flux is now a **DataCube**, has proper units, has an ivar, mask, and wavelength attached to it
```python
print(type(cube.flux))
print('flux', cube.flux)
print('mask', cube.flux.mask)
print('wavelength', cube.flux.wavelength)
```
<class 'marvin.tools.quantities.datacube.DataCube'>
flux [[[0. 0. 0. ... 0. 0. 0.]
[0. 0. 0. ... 0. 0. 0.]
[0. 0. 0. ... 0. 0. 0.]
...
[0. 0. 0. ... 0. 0. 0.]
[0. 0. 0. ... 0. 0. 0.]
[0. 0. 0. ... 0. 0. 0.]]
[[0. 0. 0. ... 0. 0. 0.]
[0. 0. 0. ... 0. 0. 0.]
[0. 0. 0. ... 0. 0. 0.]
...
[0. 0. 0. ... 0. 0. 0.]
[0. 0. 0. ... 0. 0. 0.]
[0. 0. 0. ... 0. 0. 0.]]
[[0. 0. 0. ... 0. 0. 0.]
[0. 0. 0. ... 0. 0. 0.]
[0. 0. 0. ... 0. 0. 0.]
...
[0. 0. 0. ... 0. 0. 0.]
[0. 0. 0. ... 0. 0. 0.]
[0. 0. 0. ... 0. 0. 0.]]
...
[[0. 0. 0. ... 0. 0. 0.]
[0. 0. 0. ... 0. 0. 0.]
[0. 0. 0. ... 0. 0. 0.]
...
[0. 0. 0. ... 0. 0. 0.]
[0. 0. 0. ... 0. 0. 0.]
[0. 0. 0. ... 0. 0. 0.]]
[[0. 0. 0. ... 0. 0. 0.]
[0. 0. 0. ... 0. 0. 0.]
[0. 0. 0. ... 0. 0. 0.]
...
[0. 0. 0. ... 0. 0. 0.]
[0. 0. 0. ... 0. 0. 0.]
[0. 0. 0. ... 0. 0. 0.]]
[[0. 0. 0. ... 0. 0. 0.]
[0. 0. 0. ... 0. 0. 0.]
[0. 0. 0. ... 0. 0. 0.]
...
[0. 0. 0. ... 0. 0. 0.]
[0. 0. 0. ... 0. 0. 0.]
[0. 0. 0. ... 0. 0. 0.]]] 1e-17 erg / (Angstrom cm2 s spaxel)
mask [[[1027 1027 1027 ... 1027 1027 1027]
[1027 1027 1027 ... 1027 1027 1027]
[1027 1027 1027 ... 1027 1027 1027]
...
[1027 1027 1027 ... 1027 1027 1027]
[1027 1027 1027 ... 1027 1027 1027]
[1027 1027 1027 ... 1027 1027 1027]]
[[1027 1027 1027 ... 1027 1027 1027]
[1027 1027 1027 ... 1027 1027 1027]
[1027 1027 1027 ... 1027 1027 1027]
...
[1027 1027 1027 ... 1027 1027 1027]
[1027 1027 1027 ... 1027 1027 1027]
[1027 1027 1027 ... 1027 1027 1027]]
[[1027 1027 1027 ... 1027 1027 1027]
[1027 1027 1027 ... 1027 1027 1027]
[1027 1027 1027 ... 1027 1027 1027]
...
[1027 1027 1027 ... 1027 1027 1027]
[1027 1027 1027 ... 1027 1027 1027]
[1027 1027 1027 ... 1027 1027 1027]]
...
[[1027 1027 1027 ... 1027 1027 1027]
[1027 1027 1027 ... 1027 1027 1027]
[1027 1027 1027 ... 1027 1027 1027]
...
[1027 1027 1027 ... 1027 1027 1027]
[1027 1027 1027 ... 1027 1027 1027]
[1027 1027 1027 ... 1027 1027 1027]]
[[1027 1027 1027 ... 1027 1027 1027]
[1027 1027 1027 ... 1027 1027 1027]
[1027 1027 1027 ... 1027 1027 1027]
...
[1027 1027 1027 ... 1027 1027 1027]
[1027 1027 1027 ... 1027 1027 1027]
[1027 1027 1027 ... 1027 1027 1027]]
[[1027 1027 1027 ... 1027 1027 1027]
[1027 1027 1027 ... 1027 1027 1027]
[1027 1027 1027 ... 1027 1027 1027]
...
[1027 1027 1027 ... 1027 1027 1027]
[1027 1027 1027 ... 1027 1027 1027]
[1027 1027 1027 ... 1027 1027 1027]]]
wavelength [ 3621.59598486 3622.42998417 3623.26417553 ... 10349.03843826
10351.42166679 10353.80544415] Angstrom
Slicing a **Datacube** in 2-d will return a new **DataCube**, while slicing in 3-d will return a **Spectrum**
```python
spec = cube.flux[:,17,17]
print(type(spec))
print(spec)
print(spec.unit)
spec.plot()
```
<class 'marvin.tools.quantities.spectrum.Spectrum'>
[0.54676276 0.46566465 0.4622981 ... 0. 0. 0. ] 1e-17 erg / (Angstrom cm2 s spaxel)
1e-17 erg / (Angstrom cm2 s spaxel)
<matplotlib.axes._subplots.AxesSubplot at 0x1252d12e8>

## Maskbits
There is a new Maskbit class for improved maskbit handling. All objects now include new **Maskbit** versions of the DRP/DAP quality flag (**quality_flag**), targeting bits (**target_flags**), and pixel masks (**pixmask**). Now you can easily look up the labels for bits and create custom masks. See http://sdss-marvin.readthedocs.io/en/latest/utils/maskbit.html for details
```python
# H-alpha DAP quality flag
ha.quality_flag
```
<Maskbit 'MANGA_DAPQUAL' []>
```python
ha.target_flags
```
[<Maskbit 'MANGA_TARGET1' ['SECONDARY_v1_1_0', 'SECONDARY_COM2', 'SECONDARY_v1_2_0']>,
<Maskbit 'MANGA_TARGET2' []>,
<Maskbit 'MANGA_TARGET3' []>]
```python
ha.pixmask
```
<Maskbit 'MANGA_DAPPIXMASK' shape=(34, 34)>
```python
# bits for mask value 1027
print('bits', ha.pixmask.values_to_bits(1027))
print('labels', ha.pixmask.values_to_labels(1027))
```
bits [0, 1, 10]
labels ['NOCOV', 'LOWCOV', 'MULTICOMP']
```python
# convert the H-alpha mask into an list of labels
ha.pixmask.labels
```
[[['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE']],
[['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE']],
[['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE']],
[['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE']],
[['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE']],
[['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE']],
[['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
[],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE']],
[['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE']],
[['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE']],
[['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE']],
[['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE']],
[['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE']],
[['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE']],
[['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE']],
[['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE']],
[['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE']],
[['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE']],
[['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE']],
[['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE']],
[['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
[],
['NOVALUE', 'DONOTUSE'],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE']],
[['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
['NOVALUE', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE']],
[['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
['NOVALUE', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE']],
[['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
['NOVALUE', 'DONOTUSE'],
[],
['NOVALUE', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE']],
[['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE']],
[['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
[],
['NOVALUE', 'DONOTUSE'],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
['NOVALUE', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE']],
[['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
['NOVALUE', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE']],
[['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE']],
[['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE']],
[['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
[],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
[],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE']],
[['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
[],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
[],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE']],
[['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['NOVALUE', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE']],
[['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE']],
[['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE']],
[['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'DONOTUSE']]]
## Improved Query and Results Handling
The handling of Queries and Results has been improved to provider better means of retrieving all the results of a query, extracting columns of parameters, and quickly plotting results.
* See http://sdss-marvin.readthedocs.io/en/latest/query.html for Query handling
* See http://sdss-marvin.readthedocs.io/en/latest/results.html for Results handling
* See http://sdss-marvin.readthedocs.io/en/latest/datamodel/query_dm.html for how to use the Query Datamodel
* See http://sdss-marvin.readthedocs.io/en/latest/utils/plot-scatter.html for quick scatter plotting
* See http://sdss-marvin.readthedocs.io/en/latest/utils/plot-hist.html for quick histogram plotting
```python
from marvin.tools.query import Query
config.setRelease('MPL-4')
q = Query(search_filter='nsa.z < 0.1', return_params=['cube.ra', 'cube.dec', 'absmag_g_r', 'nsa.elpetro_ba'])
r = q.run()
```
WARNING:Brain:MarvinUserWarning: No local database found. Cannot perform queries.
WARNING:Brain:MarvinUserWarning: local mode failed. Trying remote now.
WARNING: No local database found. Cannot perform queries.
WARNING: local mode failed. Trying remote now.
Your parsed filter is:
nsa.z<0.1
WARNING:Brain:PendingDeprecationWarning: generator 'extractWithoutOrder' raised StopIteration
Results contain of a total of 1282. Only returning the first 100 results.
WARNING: generator 'extractWithoutOrder' raised StopIteration
```python
# your results are now in Sets
r.results
```
<ResultSet(set=1.0/13, index=0:100, count_in_set=100, total=1282)>
[ResultRow(mangaid='1-109394', plate=8082, plateifu='8082-9102', ifu_name='9102', ra=50.179936141, dec=-1.0022917898, elpetro_absmag_g_r=1.26038932800293, elpetro_ba=0.42712, z=0.0361073),
ResultRow(mangaid='1-113208', plate=8618, plateifu='8618-3701', ifu_name='3701', ra=317.504479435, dec=9.86822191739, elpetro_absmag_g_r=1.48788070678711, elpetro_ba=0.752286, z=0.0699044),
ResultRow(mangaid='1-113219', plate=7815, plateifu='7815-9102', ifu_name='9102', ra=317.374745914, dec=10.0519434342, elpetro_absmag_g_r=0.543312072753906, elpetro_ba=0.517058, z=0.0408897),
ResultRow(mangaid='1-113375', plate=7815, plateifu='7815-9101', ifu_name='9101', ra=316.639658795, dec=10.7512221884, elpetro_absmag_g_r=0.757579803466797, elpetro_ba=0.570455, z=0.028215),
ResultRow(mangaid='1-113379', plate=7815, plateifu='7815-6101', ifu_name='6101', ra=316.541566803, dec=10.3454195236, elpetro_absmag_g_r=1.09770011901855, elpetro_ba=0.373641, z=0.0171611),
ResultRow(mangaid='1-113403', plate=7815, plateifu='7815-12703', ifu_name='12703', ra=316.964281103, dec=11.2623177305, elpetro_absmag_g_r=0.745466232299805, elpetro_ba=0.823788, z=0.0715126),
ResultRow(mangaid='1-113418', plate=7815, plateifu='7815-12704', ifu_name='12704', ra=319.353761201, dec=10.2316206875, elpetro_absmag_g_r=1.44098854064941, elpetro_ba=0.456991, z=0.0430806),
ResultRow(mangaid='1-113469', plate=7815, plateifu='7815-12702', ifu_name='12702', ra=317.943526819, dec=9.27749462963, elpetro_absmag_g_r=0.847789764404297, elpetro_ba=0.522312, z=0.0394617),
ResultRow(mangaid='1-113520', plate=7815, plateifu='7815-1901', ifu_name='1901', ra=317.502202242, dec=11.5106477077, elpetro_absmag_g_r=1.7510347366333, elpetro_ba=0.751988, z=0.0167652),
ResultRow(mangaid='1-113525', plate=8618, plateifu='8618-6103', ifu_name='6103', ra=317.430068351, dec=11.3552406345, elpetro_absmag_g_r=1.57906627655029, elpetro_ba=0.78557, z=0.0169457),
ResultRow(mangaid='1-113525', plate=7815, plateifu='7815-1902', ifu_name='1902', ra=317.430068351, dec=11.3552406345, elpetro_absmag_g_r=1.57906627655029, elpetro_ba=0.78557, z=0.0169457),
ResultRow(mangaid='1-113539', plate=8618, plateifu='8618-12701', ifu_name='12701', ra=317.979595193, dec=11.3794496273, elpetro_absmag_g_r=1.26716613769531, elpetro_ba=0.31432, z=0.0177002),
ResultRow(mangaid='1-113540', plate=7815, plateifu='7815-3702', ifu_name='3702', ra=317.903201533, dec=11.4969433994, elpetro_absmag_g_r=0.952407836914062, elpetro_ba=0.889156, z=0.0293823),
ResultRow(mangaid='1-113567', plate=8618, plateifu='8618-1902', ifu_name='1902', ra=318.026426419, dec=11.3451572409, elpetro_absmag_g_r=1.41732978820801, elpetro_ba=0.515994, z=0.0167432),
ResultRow(mangaid='1-113567', plate=7815, plateifu='7815-12701', ifu_name='12701', ra=318.026426419, dec=11.3451572409, elpetro_absmag_g_r=1.41732978820801, elpetro_ba=0.515994, z=0.0167432),
ResultRow(mangaid='1-113585', plate=7815, plateifu='7815-3703', ifu_name='3703', ra=319.11342841, dec=10.7676202056, elpetro_absmag_g_r=1.68158912658691, elpetro_ba=0.773512, z=0.070276),
ResultRow(mangaid='1-113587', plate=8618, plateifu='8618-12704', ifu_name='12704', ra=319.273361936, dec=11.1201347053, elpetro_absmag_g_r=1.02355575561523, elpetro_ba=0.858524, z=0.0704926),
ResultRow(mangaid='1-113647', plate=8618, plateifu='8618-6104', ifu_name='6104', ra=319.814830226, dec=10.070628454, elpetro_absmag_g_r=1.78754997253418, elpetro_ba=0.850177, z=0.0738563),
ResultRow(mangaid='1-113651', plate=7815, plateifu='7815-3704', ifu_name='3704', ra=319.233949063, dec=9.63757525774, elpetro_absmag_g_r=1.4986743927002, elpetro_ba=0.941069, z=0.0708847),
ResultRow(mangaid='1-113654', plate=8618, plateifu='8618-9102', ifu_name='9102', ra=319.271463809, dec=9.9723035679, elpetro_absmag_g_r=1.10831832885742, elpetro_ba=0.451358, z=0.0430694),
ResultRow(mangaid='1-113663', plate=8618, plateifu='8618-3703', ifu_name='3703', ra=318.804558778, dec=9.91312455151, elpetro_absmag_g_r=2.80322933197021, elpetro_ba=0.502782, z=0.0316328),
ResultRow(mangaid='1-113672', plate=8618, plateifu='8618-3704', ifu_name='3704', ra=318.862286217, dec=9.75781705378, elpetro_absmag_g_r=1.25676536560059, elpetro_ba=0.984299, z=0.0702278),
ResultRow(mangaid='1-113698', plate=8618, plateifu='8618-1901', ifu_name='1901', ra=319.194045241, dec=11.5400106533, elpetro_absmag_g_r=0.995195388793945, elpetro_ba=0.567433, z=0.0167445),
ResultRow(mangaid='1-113700', plate=8618, plateifu='8618-12703', ifu_name='12703', ra=319.451824118, dec=11.6605961542, elpetro_absmag_g_r=0.61408805847168, elpetro_ba=0.751346, z=0.0378372),
ResultRow(mangaid='1-113712', plate=7815, plateifu='7815-6104', ifu_name='6104', ra=319.193098655, dec=11.0437407875, elpetro_absmag_g_r=0.69244384765625, elpetro_ba=0.942534, z=0.0806967),
ResultRow(mangaid='1-114073', plate=7975, plateifu='7975-12705', ifu_name='12705', ra=324.895915071, dec=11.2049630634, elpetro_absmag_g_r=0.751516342163086, elpetro_ba=0.775431, z=0.0402895),
ResultRow(mangaid='1-114082', plate=7975, plateifu='7975-3701', ifu_name='3701', ra=324.152525127, dec=10.5067325085, elpetro_absmag_g_r=1.44381332397461, elpetro_ba=0.425806, z=0.0402683),
ResultRow(mangaid='1-114121', plate=7975, plateifu='7975-12701', ifu_name='12701', ra=323.466394588, dec=10.0718531123, elpetro_absmag_g_r=1.43171119689941, elpetro_ba=0.520187, z=0.0879313),
ResultRow(mangaid='1-114128', plate=7975, plateifu='7975-6101', ifu_name='6101', ra=323.470604621, dec=10.4397349551, elpetro_absmag_g_r=1.86342239379883, elpetro_ba=0.864153, z=0.077875),
ResultRow(mangaid='1-114129', plate=7975, plateifu='7975-12702', ifu_name='12702', ra=323.521211519, dec=10.4218555682, elpetro_absmag_g_r=2.19032287597656, elpetro_ba=0.521832, z=0.0774097),
ResultRow(mangaid='1-114145', plate=7975, plateifu='7975-6102', ifu_name='6102', ra=323.577092837, dec=11.2143239831, elpetro_absmag_g_r=1.41496467590332, elpetro_ba=0.655866, z=0.0341885),
ResultRow(mangaid='1-114171', plate=7975, plateifu='7975-3702', ifu_name='3702', ra=323.296326308, dec=10.6442039273, elpetro_absmag_g_r=1.70641708374023, elpetro_ba=0.849777, z=0.0881405),
ResultRow(mangaid='1-114303', plate=7975, plateifu='7975-1901', ifu_name='1901', ra=323.65768, dec=11.42181, elpetro_absmag_g_r=0.658689498901367, elpetro_ba=0.505907, z=0.0220107),
ResultRow(mangaid='1-114306', plate=7975, plateifu='7975-9101', ifu_name='9101', ra=323.742750886, dec=11.296528361, elpetro_absmag_g_r=0.99525260925293, elpetro_ba=0.811891, z=0.0636505),
ResultRow(mangaid='1-114325', plate=7975, plateifu='7975-12703', ifu_name='12703', ra=324.094963475, dec=12.2363038289, elpetro_absmag_g_r=1.34337997436523, elpetro_ba=0.244175, z=0.0288791),
ResultRow(mangaid='1-114334', plate=7975, plateifu='7975-1902', ifu_name='1902', ra=324.259707865, dec=11.9062032693, elpetro_absmag_g_r=1.43183898925781, elpetro_ba=0.56156, z=0.0222473),
ResultRow(mangaid='1-114454', plate=7975, plateifu='7975-12704', ifu_name='12704', ra=324.586417578, dec=11.3486728499, elpetro_absmag_g_r=1.29723358154297, elpetro_ba=0.591206, z=0.0888606),
ResultRow(mangaid='1-114465', plate=7975, plateifu='7975-6104', ifu_name='6104', ra=324.89155826, dec=10.4834807378, elpetro_absmag_g_r=1.21394157409668, elpetro_ba=0.867381, z=0.0788547),
ResultRow(mangaid='1-114500', plate=7975, plateifu='7975-9102', ifu_name='9102', ra=324.548678082, dec=12.1942577854, elpetro_absmag_g_r=1.14164924621582, elpetro_ba=0.355321, z=0.0220849),
ResultRow(mangaid='1-114502', plate=7975, plateifu='7975-6103', ifu_name='6103', ra=324.799320383, dec=11.9393222318, elpetro_absmag_g_r=1.4673023223877, elpetro_ba=0.960909, z=0.0798058),
ResultRow(mangaid='1-114532', plate=7975, plateifu='7975-3703', ifu_name='3703', ra=325.161350811, dec=11.7227434323, elpetro_absmag_g_r=1.73165702819824, elpetro_ba=0.920698, z=0.0902261),
ResultRow(mangaid='1-114928', plate=7977, plateifu='7977-3702', ifu_name='3702', ra=331.080925269, dec=12.9683778244, elpetro_absmag_g_r=1.65719413757324, elpetro_ba=0.680598, z=0.0273478),
ResultRow(mangaid='1-114955', plate=7977, plateifu='7977-12701', ifu_name='12701', ra=332.602089837, dec=11.7130772993, elpetro_absmag_g_r=1.01249313354492, elpetro_ba=0.742333, z=0.0922799),
ResultRow(mangaid='1-114956', plate=7977, plateifu='7977-3704', ifu_name='3704', ra=332.798726703, dec=11.8007324019, elpetro_absmag_g_r=1.3456974029541, elpetro_ba=0.756417, z=0.0270248),
ResultRow(mangaid='1-114980', plate=7977, plateifu='7977-9102', ifu_name='9102', ra=332.83066426, dec=12.1847175842, elpetro_absmag_g_r=1.14808464050293, elpetro_ba=0.656607, z=0.0630915),
ResultRow(mangaid='1-114998', plate=7977, plateifu='7977-6102', ifu_name='6102', ra=332.756351306, dec=12.3743026872, elpetro_absmag_g_r=2.77035713195801, elpetro_ba=0.6304, z=0.0614042),
ResultRow(mangaid='1-115062', plate=7977, plateifu='7977-1901', ifu_name='1901', ra=330.855372733, dec=12.6758983985, elpetro_absmag_g_r=1.65952682495117, elpetro_ba=0.865932, z=0.0260569),
ResultRow(mangaid='1-115085', plate=7977, plateifu='7977-6103', ifu_name='6103', ra=331.802634213, dec=13.2660525434, elpetro_absmag_g_r=0.912630081176758, elpetro_ba=0.472784, z=0.0349304),
ResultRow(mangaid='1-115097', plate=7977, plateifu='7977-3701', ifu_name='3701', ra=332.203447059, dec=13.3647373417, elpetro_absmag_g_r=1.49947357177734, elpetro_ba=0.528689, z=0.0274473),
ResultRow(mangaid='1-115128', plate=7977, plateifu='7977-1902', ifu_name='1902', ra=332.481316937, dec=12.8180504327, elpetro_absmag_g_r=1.1044979095459, elpetro_ba=0.49669, z=0.0358116),
ResultRow(mangaid='1-115162', plate=7977, plateifu='7977-12703', ifu_name='12703', ra=333.201842347, dec=13.334120927, elpetro_absmag_g_r=1.13131713867188, elpetro_ba=0.479943, z=0.0738627),
ResultRow(mangaid='1-115320', plate=7977, plateifu='7977-3703', ifu_name='3703', ra=333.052045245, dec=12.205190661, elpetro_absmag_g_r=0.99519157409668, elpetro_ba=0.842721, z=0.0275274),
ResultRow(mangaid='1-124604', plate=8439, plateifu='8439-6103', ifu_name='6103', ra=141.34417921, dec=50.5536812778, elpetro_absmag_g_r=1.38611221313477, elpetro_ba=0.345553, z=0.0253001),
ResultRow(mangaid='1-133922', plate=8486, plateifu='8486-6104', ifu_name='6104', ra=239.195689664, dec=47.9955208307, elpetro_absmag_g_r=1.51949119567871, elpetro_ba=0.390132, z=0.0174718),
ResultRow(mangaid='1-133941', plate=8486, plateifu='8486-9102', ifu_name='9102', ra=239.030589848, dec=48.0308761201, elpetro_absmag_g_r=1.04214859008789, elpetro_ba=0.740501, z=0.0189045),
ResultRow(mangaid='1-133945', plate=8486, plateifu='8486-3703', ifu_name='3703', ra=238.881357667, dec=47.677310104, elpetro_absmag_g_r=1.70501899719238, elpetro_ba=0.75216, z=0.0183248),
ResultRow(mangaid='1-133948', plate=8486, plateifu='8486-6103', ifu_name='6103', ra=238.891298957, dec=48.0223923799, elpetro_absmag_g_r=1.62374401092529, elpetro_ba=0.662078, z=0.0195194),
ResultRow(mangaid='1-133976', plate=8486, plateifu='8486-9101', ifu_name='9101', ra=238.718472619, dec=47.8808922742, elpetro_absmag_g_r=1.26091766357422, elpetro_ba=0.627185, z=0.0182938),
ResultRow(mangaid='1-133987', plate=8486, plateifu='8486-1902', ifu_name='1902', ra=239.334163047, dec=48.2072621316, elpetro_absmag_g_r=1.73217391967773, elpetro_ba=0.902851, z=0.0195435),
ResultRow(mangaid='1-134004', plate=8486, plateifu='8486-1901', ifu_name='1901', ra=238.448582292, dec=47.4049584412, elpetro_absmag_g_r=1.27153015136719, elpetro_ba=0.667273, z=0.0185601),
ResultRow(mangaid='1-134020', plate=8486, plateifu='8486-6102', ifu_name='6102', ra=238.046893627, dec=48.0439162921, elpetro_absmag_g_r=1.4318904876709, elpetro_ba=0.452976, z=0.0193267),
ResultRow(mangaid='1-134209', plate=8549, plateifu='8549-9101', ifu_name='9101', ra=242.276471895, dec=46.6712048189, elpetro_absmag_g_r=1.46211814880371, elpetro_ba=0.938842, z=0.0545042),
ResultRow(mangaid='1-134239', plate=8549, plateifu='8549-3703', ifu_name='3703', ra=241.416442386, dec=46.8465606897, elpetro_absmag_g_r=1.20720481872559, elpetro_ba=0.840219, z=0.0571086),
ResultRow(mangaid='1-134248', plate=8549, plateifu='8549-3702', ifu_name='3702', ra=241.005278975, dec=46.8029102028, elpetro_absmag_g_r=1.04830741882324, elpetro_ba=0.603141, z=0.0212204),
ResultRow(mangaid='1-134293', plate=8549, plateifu='8549-6103', ifu_name='6103', ra=240.418740846, dec=46.085291751, elpetro_absmag_g_r=0.724908828735352, elpetro_ba=0.685683, z=0.0416784),
ResultRow(mangaid='1-134503', plate=8555, plateifu='8555-1901', ifu_name='1901', ra=243.873718478, dec=44.2912632693, elpetro_absmag_g_r=1.38505744934082, elpetro_ba=0.580866, z=0.0371472),
ResultRow(mangaid='1-134562', plate=8549, plateifu='8549-1902', ifu_name='1902', ra=242.727439731, dec=44.985695801, elpetro_absmag_g_r=0.999540328979492, elpetro_ba=0.709542, z=0.0355137),
ResultRow(mangaid='1-134597', plate=8549, plateifu='8549-12705', ifu_name='12705', ra=241.907223711, dec=45.0653702307, elpetro_absmag_g_r=1.32281875610352, elpetro_ba=0.493211, z=0.0441938),
ResultRow(mangaid='1-134599', plate=8549, plateifu='8549-12704', ifu_name='12704', ra=242.978644743, dec=46.1277269855, elpetro_absmag_g_r=1.2156925201416, elpetro_ba=0.347987, z=0.019658),
ResultRow(mangaid='1-134614', plate=8549, plateifu='8549-6102', ifu_name='6102', ra=243.009178672, dec=45.7750314981, elpetro_absmag_g_r=1.25503730773926, elpetro_ba=0.409631, z=0.0528277),
ResultRow(mangaid='1-134634', plate=8549, plateifu='8549-3704', ifu_name='3704', ra=243.18537291, dec=45.3520102657, elpetro_absmag_g_r=1.71317291259766, elpetro_ba=0.601301, z=0.0523251),
ResultRow(mangaid='1-134848', plate=8555, plateifu='8555-12703', ifu_name='12703', ra=244.331994382, dec=43.4796723691, elpetro_absmag_g_r=1.4580078125, elpetro_ba=0.276868, z=0.0584495),
ResultRow(mangaid='1-134924', plate=8555, plateifu='8555-9101', ifu_name='9101', ra=245.662015493, dec=43.4646577078, elpetro_absmag_g_r=1.76020240783691, elpetro_ba=0.819258, z=0.0319997),
ResultRow(mangaid='1-134954', plate=8555, plateifu='8555-12705', ifu_name='12705', ra=246.578190983, dec=43.4074643202, elpetro_absmag_g_r=1.38137054443359, elpetro_ba=0.692219, z=0.0315232),
ResultRow(mangaid='1-134964', plate=8555, plateifu='8555-3701', ifu_name='3701', ra=246.760690284, dec=43.4760996734, elpetro_absmag_g_r=1.5971508026123, elpetro_ba=0.853938, z=0.0462348),
ResultRow(mangaid='1-135030', plate=8603, plateifu='8603-12704', ifu_name='12704', ra=247.893876589, dec=40.5655973228, elpetro_absmag_g_r=1.31695175170898, elpetro_ba=0.700621, z=0.0273289),
ResultRow(mangaid='1-135054', plate=8550, plateifu='8550-12703', ifu_name='12703', ra=247.674430234, dec=40.5293893805, elpetro_absmag_g_r=1.34156799316406, elpetro_ba=0.853565, z=0.0298122),
ResultRow(mangaid='1-135055', plate=8601, plateifu='8601-6104', ifu_name='6104', ra=247.641287575, dec=40.5394009252, elpetro_absmag_g_r=1.68307113647461, elpetro_ba=0.808577, z=0.0300581),
ResultRow(mangaid='1-135057', plate=8601, plateifu='8601-12703', ifu_name='12703', ra=247.57407, dec=40.59861, elpetro_absmag_g_r=0.928314208984375, elpetro_ba=0.834526, z=0.0288518),
ResultRow(mangaid='1-135058', plate=8603, plateifu='8603-6103', ifu_name='6103', ra=247.800367796, dec=40.4218744432, elpetro_absmag_g_r=1.1861629486084, elpetro_ba=0.392703, z=0.0270087),
ResultRow(mangaid='1-135077', plate=8312, plateifu='8312-6104', ifu_name='6104', ra=247.638466864, dec=41.4385861863, elpetro_absmag_g_r=1.33458137512207, elpetro_ba=0.458094, z=0.0290664),
ResultRow(mangaid='1-135095', plate=8312, plateifu='8312-3702', ifu_name='3702', ra=247.245291144, dec=41.255253243, elpetro_absmag_g_r=1.44723129272461, elpetro_ba=0.658268, z=0.0332324),
ResultRow(mangaid='1-135129', plate=8603, plateifu='8603-12705', ifu_name='12705', ra=247.280269588, dec=40.5910287121, elpetro_absmag_g_r=1.81981086730957, elpetro_ba=0.503666, z=0.0327969),
ResultRow(mangaid='1-135133', plate=8603, plateifu='8603-12703', ifu_name='12703', ra=247.282646413, dec=40.6650474998, elpetro_absmag_g_r=1.36585807800293, elpetro_ba=0.627429, z=0.0299683),
ResultRow(mangaid='1-135134', plate=8603, plateifu='8603-9101', ifu_name='9101', ra=247.225624269, dec=40.8666111706, elpetro_absmag_g_r=1.85215187072754, elpetro_ba=0.958519, z=0.030343),
ResultRow(mangaid='1-135152', plate=8312, plateifu='8312-6103', ifu_name='6103', ra=246.887611078, dec=41.1385055016, elpetro_absmag_g_r=0.762582778930664, elpetro_ba=0.839506, z=0.0301811),
ResultRow(mangaid='1-135157', plate=8603, plateifu='8603-3702', ifu_name='3702', ra=247.04131843, dec=40.6956030265, elpetro_absmag_g_r=1.68464851379395, elpetro_ba=0.518096, z=0.0323713),
ResultRow(mangaid='1-135207', plate=8555, plateifu='8555-1902', ifu_name='1902', ra=246.323470587, dec=42.6942265737, elpetro_absmag_g_r=1.51096343994141, elpetro_ba=0.755948, z=0.031485),
ResultRow(mangaid='1-135371', plate=8588, plateifu='8588-9101', ifu_name='9101', ra=250.156240419, dec=39.2216349362, elpetro_absmag_g_r=1.37564086914062, elpetro_ba=0.430169, z=0.0352359),
ResultRow(mangaid='1-135372', plate=8588, plateifu='8588-6102', ifu_name='6102', ra=250.116709759, dec=39.3201174959, elpetro_absmag_g_r=1.68138885498047, elpetro_ba=0.789335, z=0.0300793),
ResultRow(mangaid='1-135383', plate=8588, plateifu='8588-12705', ifu_name='12705', ra=250.312873125, dec=39.7523514003, elpetro_absmag_g_r=1.2461109161377, elpetro_ba=0.355884, z=0.0301398),
ResultRow(mangaid='1-135468', plate=8550, plateifu='8550-12705', ifu_name='12705', ra=249.135695215, dec=39.0278800132, elpetro_absmag_g_r=1.37894058227539, elpetro_ba=0.670573, z=0.029986),
ResultRow(mangaid='1-135502', plate=8604, plateifu='8604-12703', ifu_name='12703', ra=247.76417484, dec=39.838503868, elpetro_absmag_g_r=1.57090950012207, elpetro_ba=0.804992, z=0.0305383),
ResultRow(mangaid='1-135503', plate=8604, plateifu='8604-3703', ifu_name='3703', ra=247.882111795, dec=39.8976507098, elpetro_absmag_g_r=1.6621150970459, elpetro_ba=0.914384, z=0.0296457),
ResultRow(mangaid='1-135506', plate=8601, plateifu='8601-3704', ifu_name='3704', ra=247.948553785, dec=39.8142396526, elpetro_absmag_g_r=1.70755767822266, elpetro_ba=0.740217, z=0.0295479),
ResultRow(mangaid='1-135512', plate=8601, plateifu='8601-6102', ifu_name='6102', ra=247.711831631, dec=40.0247994472, elpetro_absmag_g_r=0.778741836547852, elpetro_ba=0.783227, z=0.0279629),
ResultRow(mangaid='1-135516', plate=8550, plateifu='8550-6104', ifu_name='6104', ra=248.41315, dec=39.25763, elpetro_absmag_g_r=1.33112716674805, elpetro_ba=0.41841, z=0.0314747),
ResultRow(mangaid='1-135517', plate=8588, plateifu='8588-6101', ifu_name='6101', ra=248.456755755, dec=39.2632054313, elpetro_absmag_g_r=1.17428970336914, elpetro_ba=0.961436, z=0.0317611),
ResultRow(mangaid='1-135530', plate=8550, plateifu='8550-9101', ifu_name='9101', ra=247.409672103, dec=40.2353879985, elpetro_absmag_g_r=1.7724609375, elpetro_ba=0.286038, z=0.0283296),
ResultRow(mangaid='1-135545', plate=8601, plateifu='8601-6103', ifu_name='6103', ra=247.530374396, dec=40.8801572026, elpetro_absmag_g_r=1.43307685852051, elpetro_ba=0.402053, z=0.0301334)]
```python
# see the available columns
r.columns
```
<ParameterGroup name=Columns, n_parameters=9>
[<QueryParameter full=cube.mangaid, name=mangaid, short=mangaid, remote=mangaid, display=Manga-ID>,
<QueryParameter full=cube.plate, name=plate, short=plate, remote=plate, display=Plate>,
<QueryParameter full=cube.plateifu, name=plateifu, short=plateifu, remote=plateifu, display=Plate-IFU>,
<QueryParameter full=ifu.name, name=ifu_name, short=ifu_name, remote=ifu_name, display=Name>,
<QueryParameter full=cube.ra, name=ra, short=ra, remote=ra, display=RA>,
<QueryParameter full=cube.dec, name=dec, short=dec, remote=dec, display=Dec>,
<QueryParameter full=nsa.elpetro_absmag_g_r, name=elpetro_absmag_g_r, short=absmag_g_r, remote=elpetro_absmag_g_r, display=Absmag g-r>,
<QueryParameter full=nsa.elpetro_ba, name=elpetro_ba, short=axisratio, remote=elpetro_ba, display=Elpetro axis ratio>,
<QueryParameter full=nsa.z, name=z, short=z, remote=z, display=Redshift>]
```python
# quickly plot the redshift vs g-r color
output = r.plot('nsa.z', 'absmag_g_r')
```
WARNING:Brain:UserWarning: Warning: 'partition' will ignore the 'mask' of the MaskedArray.
WARNING:Brain:RuntimeWarning: Invalid value encountered in median
WARNING:Brain:RuntimeWarning: Invalid value encountered in percentile
WARNING: Warning: 'partition' will ignore the 'mask' of the MaskedArray.
WARNING: Invalid value encountered in median
WARNING: Invalid value encountered in percentile

```python
# or a histogram of the elpetro b/a axis ratio
output=r.hist('elpetro_ba')
```
WARNING:Brain:UserWarning: Warning: 'partition' will ignore the 'mask' of the MaskedArray.
WARNING:Brain:RuntimeWarning: Invalid value encountered in median
WARNING:Brain:RuntimeWarning: Invalid value encountered in percentile
WARNING: Warning: 'partition' will ignore the 'mask' of the MaskedArray.
WARNING: Invalid value encountered in median
WARNING: Invalid value encountered in percentile

```python
# get all of the g-r colors as a list
gr = r.getListOf('absmag_g_r', return_all=True)
gr
```
[1.26038932800293,
1.48788070678711,
0.543312072753906,
0.757579803466797,
1.09770011901855,
0.745466232299805,
1.44098854064941,
0.847789764404297,
1.7510347366333,
1.57906627655029,
1.57906627655029,
1.26716613769531,
0.952407836914062,
1.41732978820801,
1.41732978820801,
1.68158912658691,
1.02355575561523,
1.78754997253418,
1.4986743927002,
1.10831832885742,
2.80322933197021,
1.25676536560059,
0.995195388793945,
0.61408805847168,
0.69244384765625,
0.751516342163086,
1.44381332397461,
1.43171119689941,
1.86342239379883,
2.19032287597656,
1.41496467590332,
1.70641708374023,
0.658689498901367,
0.99525260925293,
1.34337997436523,
1.43183898925781,
1.29723358154297,
1.21394157409668,
1.14164924621582,
1.4673023223877,
1.73165702819824,
1.65719413757324,
1.01249313354492,
1.3456974029541,
1.14808464050293,
2.77035713195801,
1.65952682495117,
0.912630081176758,
1.49947357177734,
1.1044979095459,
1.13131713867188,
0.99519157409668,
1.38611221313477,
1.51949119567871,
1.04214859008789,
1.70501899719238,
1.62374401092529,
1.26091766357422,
1.73217391967773,
1.27153015136719,
1.4318904876709,
1.46211814880371,
1.20720481872559,
1.04830741882324,
0.724908828735352,
1.38505744934082,
0.999540328979492,
1.32281875610352,
1.2156925201416,
1.25503730773926,
1.71317291259766,
1.4580078125,
1.76020240783691,
1.38137054443359,
1.5971508026123,
1.31695175170898,
1.34156799316406,
1.68307113647461,
0.928314208984375,
1.1861629486084,
1.33458137512207,
1.44723129272461,
1.81981086730957,
1.36585807800293,
1.85215187072754,
0.762582778930664,
1.68464851379395,
1.51096343994141,
1.37564086914062,
1.68138885498047,
1.2461109161377,
1.37894058227539,
1.57090950012207,
1.6621150970459,
1.70755767822266,
0.778741836547852,
1.33112716674805,
1.17428970336914,
1.7724609375,
1.43307685852051,
1.05030250549316,
0.790615081787109,
1.44169998168945,
1.22106170654297,
1.4596061706543,
1.6043529510498,
1.43718338012695,
1.36807250976562,
1.42204856872559,
1.20418357849121,
0.959020614624023,
1.11434555053711,
1.20375823974609,
1.50382232666016,
1.47062683105469,
1.52022552490234,
0.883840560913086,
0.863546371459961,
0.999143600463867,
1.47824478149414,
1.58131790161133,
1.58131790161133,
1.75845336914062,
1.76508140563965,
1.76508140563965,
1.66989135742188,
1.66989135742188,
0.911367416381836,
0.834562301635742,
1.44910621643066,
1.19685363769531,
0.960855484008789,
1.1392650604248,
0.227899551391602,
1.49036026000977,
1.23330879211426,
1.2242603302002,
1.54246711730957,
0.339872360229492,
1.02695465087891,
1.2266845703125,
1.10024833679199,
1.4588623046875,
0.744720458984375,
1.3321475982666,
0.938104629516602,
0.833671569824219,
1.23658561706543,
0.782651901245117,
1.36958885192871,
1.22234153747559,
1.49322319030762,
1.25778961181641,
1.23126220703125,
1.75717353820801,
1.00096130371094,
1.55574607849121,
1.19962120056152,
1.38706016540527,
1.26955413818359,
0.790740966796875,
1.28773880004883,
1.83721733093262,
1.0489559173584,
1.50351715087891,
1.64328575134277,
1.21530723571777,
1.68683815002441,
1.2888126373291,
1.11646842956543,
1.5238151550293,
1.39241409301758,
0.783824920654297,
1.76472663879395,
0.751018524169922,
1.58340835571289,
1.47800445556641,
0.928119659423828,
1.18128204345703,
0.718753814697266,
1.00078010559082,
0.993005752563477,
1.65610313415527,
0.900096893310547,
0.859102249145508,
1.31706047058105,
1.5174388885498,
1.91670417785645,
1.58527755737305,
0.653806686401367,
0.906030654907227,
1.66348838806152,
1.06909561157227,
0.854578018188477,
1.34856986999512,
1.1823673248291,
1.2220287322998,
1.58125877380371,
1.26830101013184,
1.14142608642578,
1.32029342651367,
1.14487075805664,
0.791526794433594,
1.38916778564453,
1.49543571472168,
1.34278106689453,
1.77796936035156,
1.16924476623535,
1.08199501037598,
1.22658157348633,
1.2657356262207,
0.807031631469727,
0.736539840698242,
1.72653961181641,
0.426862716674805,
0.878713607788086,
1.71902084350586,
1.17444229125977,
1.61144065856934,
1.13142395019531,
0.91609001159668,
1.75609970092773,
1.47218894958496,
1.72292137145996,
1.58425521850586,
1.25091361999512,
1.01857757568359,
1.00876617431641,
1.71321678161621,
1.56660842895508,
0.934855461120605,
1.3363151550293,
1.17270469665527,
1.91660308837891,
0.971338272094727,
1.44587516784668,
0.574041366577148,
0.837287902832031,
0.728397369384766,
0.84881591796875,
1.09189605712891,
1.34137344360352,
1.16559028625488,
1.66678619384766,
0.844734191894531,
1.5816535949707,
1.02146530151367,
1.26579475402832,
0.62200927734375,
1.58538627624512,
1.64804458618164,
0.984698295593262,
1.41409683227539,
1.19541931152344,
0.922063827514648,
0.871667861938477,
1.52823066711426,
1.70123100280762,
1.82316970825195,
1.86878204345703,
1.48141860961914,
1.77270889282227,
1.14588737487793,
1.65116500854492,
1.57740592956543,
1.58359718322754,
1.67409324645996,
1.47936248779297,
1.49068450927734,
1.53426742553711,
1.68362236022949,
1.02366256713867,
1.22614288330078,
1.73893737792969,
-0.792776107788086,
1.28153991699219,
1.54734802246094,
1.21334266662598,
1.62071990966797,
1.57655143737793,
0.0247316360473633,
0.797264099121094,
1.4067440032959,
1.39529609680176,
1.26568222045898,
1.64430618286133,
1.72085380554199,
1.17687034606934,
1.17687034606934,
1.7665901184082,
1.38961029052734,
1.47238731384277,
1.71597290039062,
1.30305099487305,
1.71797370910645,
1.47995376586914,
1.73958778381348,
1.45911979675293,
1.656494140625,
1.23796081542969,
1.62772369384766,
2.10557651519775,
1.36248779296875,
1.60188102722168,
1.17086791992188,
2.2204704284668,
1.77141571044922,
0.827211380004883,
1.22723960876465,
1.28543472290039,
1.32573127746582,
0.826717376708984,
1.32103538513184,
1.22235679626465,
1.49703788757324,
1.43289375305176,
1.40898513793945,
1.55881881713867,
1.5500373840332,
1.20891189575195,
1.09588432312012,
0.955888748168945,
1.09977149963379,
0.719453811645508,
0.734806060791016,
1.89365005493164,
1.74777030944824,
1.41163444519043,
0.987491607666016,
1.7424373626709,
1.60759544372559,
1.10265159606934,
1.99302864074707,
0.918880462646484,
0.836238861083984,
1.4536247253418,
1.1130256652832,
1.2296199798584,
1.64775276184082,
1.1259880065918,
1.55693435668945,
1.80934143066406,
0.751926422119141,
1.58045959472656,
1.38661003112793,
1.69091606140137,
0.90764045715332,
1.21425437927246,
1.63036155700684,
1.09826850891113,
1.22561264038086,
1.49561500549316,
1.55380439758301,
1.54398345947266,
1.63561820983887,
0.725547790527344,
0.811029434204102,
0.938411712646484,
1.5194206237793,
0.772989273071289,
1.53439712524414,
1.69396209716797,
1.06061172485352,
1.77609062194824,
1.32638168334961,
1.41711044311523,
0.791227340698242,
1.49320793151855,
1.49320793151855,
1.5701847076416,
1.5701847076416,
1.0389232635498,
0.985183715820312,
1.39904022216797,
1.56939125061035,
1.22552490234375,
1.65569877624512,
1.32129859924316,
1.53822708129883,
1.4886646270752,
1.82439804077148,
1.42483520507812,
1.35124588012695,
1.29219245910645,
1.58535385131836,
1.19128227233887,
1.57101631164551,
0.624504089355469,
1.57153701782227,
1.42238616943359,
1.70148658752441,
1.67478179931641,
1.67694091796875,
0.60981559753418,
1.02688407897949,
1.65608787536621,
1.51834964752197,
1.38359069824219,
0.953317642211914,
1.40422439575195,
1.11628913879395,
1.01333045959473,
0.803609848022461,
0.952854156494141,
1.97970199584961,
1.3377742767334,
1.05277252197266,
0.965957641601562,
1.76102447509766,
1.33317375183105,
1.55662536621094,
1.38805389404297,
1.68991088867188,
1.14168167114258,
1.04787254333496,
0.961238861083984,
1.07113075256348,
1.1551513671875,
1.48903465270996,
-3.17536354064941,
1.69540786743164,
1.11064147949219,
0.709733963012695,
1.34700584411621,
0.895797729492188,
1.48164558410645,
1.25549125671387,
1.05262756347656,
1.68144035339355,
1.03829002380371,
1.70709419250488,
0.816909790039062,
1.69452667236328,
1.38815879821777,
1.31978988647461,
0.974521636962891,
1.47384262084961,
1.37209701538086,
1.54490089416504,
1.44744682312012,
1.01861190795898,
1.96556854248047,
1.31393051147461,
1.03205108642578,
1.49975776672363,
1.46814918518066,
1.34462738037109,
1.2363338470459,
1.23056221008301,
1.74398040771484,
0.836553573608398,
1.4525146484375,
0.956119537353516,
1.65094566345215,
1.51747226715088,
0.73320198059082,
1.71385383605957,
1.35731887817383,
1.00057220458984,
1.52221298217773,
1.14715003967285,
0.917896270751953,
1.38006210327148,
1.14894676208496,
0.983631134033203,
0.983631134033203,
0.983631134033203,
0.914587020874023,
1.20341682434082,
0.924001693725586,
1.17800903320312,
1.65768432617188,
0.658950805664062,
nan,
nan,
1.3783130645752,
1.70119857788086,
1.71731758117676,
1.71731758117676,
1.71731758117676,
1.56680679321289,
1.69010543823242,
1.69010543823242,
1.69010543823242,
1.53993797302246,
1.57926177978516,
0.957000732421875,
1.60057830810547,
1.16513633728027,
1.54172134399414,
1.69695854187012,
1.45039176940918,
1.05523681640625,
1.33770751953125,
1.33757209777832,
1.33757209777832,
1.33757209777832,
1.4401912689209,
1.4401912689209,
1.4401912689209,
1.55233573913574,
0.784034729003906,
1.70734596252441,
0.911626815795898,
1.10363006591797,
1.71620178222656,
1.69637107849121,
1.00818252563477,
1.4294376373291,
1.02963256835938,
1.05858421325684,
1.37761497497559,
1.29471015930176,
1.39539909362793,
0.684883117675781,
1.09605979919434,
1.68900871276855,
0.951129913330078,
0.976816177368164,
0.699855804443359,
0.887578964233398,
1.69713020324707,
1.27194404602051,
1.72472190856934,
1.8007755279541,
1.11307907104492,
1.11945533752441,
1.58012580871582,
1.45475387573242,
1.64972114562988,
1.78246116638184,
1.63819122314453,
1.00637817382812,
1.10149192810059,
1.59071922302246,
0.940752029418945,
1.37218379974365,
1.56686019897461,
1.1949291229248,
1.71342277526855,
1.18173599243164,
1.08020210266113,
1.43885231018066,
1.60983276367188,
1.20413017272949,
1.18441963195801,
1.42948341369629,
0.993492126464844,
0.833972930908203,
1.28214073181152,
0.955068588256836,
1.71206283569336,
0.93040657043457,
1.06473731994629,
1.35318946838379,
0.821443557739258,
0.953466415405273,
1.0816707611084,
1.0999641418457,
1.63955497741699,
1.55946922302246,
0.92365837097168,
1.36188888549805,
1.88768768310547,
1.58308029174805,
1.38254928588867,
1.03481101989746,
1.07779884338379,
1.68861389160156,
1.02464294433594,
1.08687019348145,
1.78932571411133,
1.1748104095459,
1.59099960327148,
0.86699104309082,
1.66316413879395,
1.70698547363281,
0.925453186035156,
1.28239250183105,
1.55482482910156,
1.73201560974121,
1.5053768157959,
1.78401374816895,
1.14368438720703,
1.04927635192871,
1.67613410949707,
0.933671951293945,
1.37960815429688,
1.33930778503418,
1.7547607421875,
1.3258113861084,
0.93994140625,
1.12911987304688,
1.08281135559082,
1.39982891082764,
2.1340160369873,
1.52947616577148,
0.940595626831055,
1.44391822814941,
1.55187034606934,
1.7088623046875,
1.62078475952148,
0.979959487915039,
1.20776557922363,
1.63409805297852,
1.3450813293457,
1.5622386932373,
1.61195755004883,
1.21268463134766,
1.46304893493652,
1.14181137084961,
1.53633117675781,
1.14987182617188,
0.980047225952148,
1.49104881286621,
1.49104881286621,
1.49104881286621,
1.56260681152344,
1.56260681152344,
1.65445709228516,
1.65445709228516,
1.67844200134277,
1.67844200134277,
0.873035430908203,
0.873035430908203,
1.0898551940918,
0.92662239074707,
1.0157470703125,
1.19947242736816,
1.10720252990723,
0.959125518798828,
0.977848052978516,
1.03470230102539,
1.29042434692383,
1.42698383331299,
1.75157737731934,
0.917922973632812,
1.47256088256836,
0.982328414916992,
1.26026344299316,
1.26026344299316,
1.71033477783203,
1.71033477783203,
1.71033477783203,
1.49874496459961,
1.02738571166992,
1.13454437255859,
0.988702774047852,
0.979625701904297,
1.58403587341309,
1.06747817993164,
1.50131416320801,
1.46430397033691,
1.24053001403809,
1.89778327941895,
1.89778327941895,
1.89778327941895,
1.22134399414062,
0.928123474121094,
0.945835113525391,
0.72022819519043,
1.62304210662842,
1.33382415771484,
0.805131912231445,
1.70050430297852,
1.35200119018555,
0.742740631103516,
0.757841110229492,
1.12235260009766,
1.64518165588379,
0.903297424316406,
1.12158966064453,
1.14593887329102,
1.09593772888184,
1.00565338134766,
1.47928047180176,
1.50151824951172,
0.825166702270508,
1.54441452026367,
-1.88460540771484,
0.853603363037109,
1.36214828491211,
0.273757934570312,
1.47780418395996,
1.13959312438965,
1.07637214660645,
1.11844253540039,
1.66919708251953,
1.39535522460938,
1.39177703857422,
1.49020767211914,
0.927394866943359,
1.15757369995117,
1.57975578308105,
0.870588302612305,
1.73061370849609,
1.16182899475098,
1.20291137695312,
1.2392692565918,
1.49401664733887,
1.44595718383789,
1.39508628845215,
1.73542976379395,
1.68562126159668,
0.956823348999023,
1.48354911804199,
1.50432586669922,
1.23655891418457,
1.74173355102539,
0.893733978271484,
1.52008819580078,
1.14311981201172,
1.04419708251953,
1.48998260498047,
1.1009693145752,
1.09053230285645,
0.741434097290039,
1.59234046936035,
1.80172920227051,
1.73479461669922,
0.86054801940918,
0.928585052490234,
1.7055549621582,
1.54520797729492,
1.6952953338623,
0.906753540039062,
1.91145896911621,
1.62610054016113,
1.39299964904785,
0.948474884033203,
0.810003280639648,
0.954912185668945,
1.56138801574707,
1.11913299560547,
1.15854072570801,
1.30131530761719,
1.05821990966797,
0.813287734985352,
0.839853286743164,
1.52050399780273,
1.57372665405273,
0.785802841186523,
1.47542953491211,
1.21893501281738,
1.49204635620117,
1.79995536804199,
1.48590278625488,
0.963138580322266,
1.24654960632324,
1.25187301635742,
0.920822143554688,
1.65678024291992,
1.65226554870605,
1.26643180847168,
1.05314445495605,
1.65041542053223,
2.47416496276855,
0.0703029632568359,
1.51253700256348,
1.46866798400879,
1.67432403564453,
1.48945617675781,
1.73706436157227,
1.63918685913086,
1.44451713562012,
0.863668441772461,
1.13508033752441,
1.64232635498047,
1.16241264343262,
1.59716987609863,
0.966501235961914,
1.15245819091797,
1.0467472076416,
1.19833946228027,
0.939540863037109,
1.34385681152344,
1.68332481384277,
-1.63743019104004,
1.34518623352051,
1.19291305541992,
1.50571441650391,
1.54122924804688,
1.37088012695312,
1.46299743652344,
1.92683982849121,
1.85723686218262,
0.626987457275391,
1.53471374511719,
1.19682121276855,
1.8375244140625,
1.20996475219727,
1.24330711364746,
1.58600997924805,
1.71118545532227,
1.0413818359375,
0.960577011108398,
1.17041397094727,
1.37961769104004,
1.55269432067871,
1.1957836151123,
1.02608680725098,
0.226890563964844,
1.77253913879395,
1.29454231262207,
1.08570098876953,
1.47203636169434,
1.44456100463867,
1.40625762939453,
0.978277206420898,
0.55487060546875,
1.63270378112793,
1.64004516601562,
0.563379287719727,
0.830810546875,
1.69113540649414,
1.03084373474121,
1.85529708862305,
1.47027015686035,
1.10908126831055,
1.53725624084473,
1.4988956451416,
1.8381233215332,
0.901719093322754,
1.20190048217773,
0.802553176879883,
1.46460342407227,
1.5158748626709,
1.09825897216797,
1.50794219970703,
1.72008895874023,
1.17287254333496,
1.36014747619629,
1.4256706237793,
1.46027755737305,
1.4875602722168,
0.785396575927734,
1.71440124511719,
1.50999641418457,
1.09213638305664,
1.19823455810547,
1.23016548156738,
1.12722206115723,
1.77749443054199,
1.46454048156738,
1.62651443481445,
1.3984432220459,
1.35092544555664,
1.04977035522461,
1.53735160827637,
1.27643775939941,
1.36318206787109,
0.94245719909668,
1.29872894287109,
1.63339233398438,
1.00844955444336,
1.70010757446289,
1.61838531494141,
1.63355445861816,
0.798288345336914,
1.07410430908203,
1.12163543701172,
0.967428207397461,
1.90436935424805,
0.964748382568359,
1.59471130371094,
1.62790489196777,
1.46386528015137,
0.873353958129883,
1.34402084350586,
1.18402481079102,
0.988561630249023,
1.31947135925293,
1.64464378356934,
1.34547424316406,
1.56835174560547,
1.31447792053223,
1.32720947265625,
1.26867485046387,
1.68083190917969,
0.984879493713379,
0.909263610839844,
1.59123802185059,
1.1878719329834,
1.09230995178223,
0.781301498413086,
1.05727195739746,
1.54859161376953,
1.39760971069336,
1.30661773681641,
0.886505126953125,
1.87333679199219,
1.11655426025391,
1.41487121582031,
1.48755645751953,
1.24517250061035,
1.38109016418457,
0.859464645385742,
1.88220024108887,
1.65872764587402,
1.77185821533203,
1.21111106872559,
1.20716857910156,
1.59582901000977,
1.44345855712891,
1.18367576599121,
1.24299049377441,
0.932371139526367,
1.46877670288086,
1.28357696533203,
0.91743278503418,
0.798980712890625,
1.44865417480469,
1.14214706420898,
1.53586959838867,
1.61997604370117,
0.661672592163086,
1.67490768432617,
1.61782264709473,
1.4886474609375,
1.66888427734375,
1.00383567810059,
1.53368949890137,
1.30915260314941,
0.645650863647461,
1.31369781494141,
1.76816940307617,
1.72246170043945,
1.39943885803223,
1.07694435119629,
1.55520439147949,
1.47665977478027,
1.34712791442871,
1.66336822509766,
1.05989074707031,
1.1972541809082,
1.1972541809082,
3.18958759307861,
1.18093299865723,
0.812383651733398,
1.52397537231445,
1.59876823425293,
1.18255805969238,
1.15860557556152,
1.46097564697266,
0.87640380859375,
1.74136734008789,
1.76072120666504,
1.60023498535156,
1.61800384521484,
1.25259017944336,
1.56465339660645,
1.72346496582031,
1.48027038574219,
1.16570854187012,
1.6808967590332,
1.64530372619629,
1.39993286132812,
0.972042083740234,
1.45343589782715,
1.2872142791748,
1.38276672363281,
0.940004348754883,
1.07626819610596,
1.07242393493652,
1.03236198425293,
0.907632827758789,
1.47846221923828,
1.10496139526367,
0.877973556518555,
0.851888656616211,
0.753438949584961,
1.25467872619629,
1.28206253051758,
1.49494361877441,
1.04388618469238,
1.72276878356934,
1.41907978057861,
0.883262634277344,
1.84392738342285,
1.47762489318848,
1.60384750366211,
1.6960334777832,
1.11810493469238,
1.43835830688477,
0.995927810668945,
0.810644149780273,
0.819530487060547,
1.36838531494141,
1.34992599487305,
1.42660522460938,
1.19057083129883,
1.77753639221191,
1.6470947265625,
1.55911064147949,
1.57394027709961,
1.27225875854492,
1.48729133605957,
1.69239234924316,
1.23407936096191,
1.72245597839355,
...]
```python
# the results currently only have 100 out of some total
print(r.count, r.totalcount)
# let's extend our result set by the next chunk of 100
r.extendSet()
```
INFO:Brain:Retrieving next 100, from 100 to 200
100 1282
INFO: Retrieving next 100, from 100 to 200
WARNING:Brain:PendingDeprecationWarning: generator 'extractWithoutOrder' raised StopIteration
WARNING: generator 'extractWithoutOrder' raised StopIteration
```python
print(r.count, r.totalcount)
print(r.results)
```
200 1282
<ResultSet(set=1.0/7, index=0:200, count_in_set=200, total=1282)>
[ResultRow(mangaid='1-109394', plate=8082, plateifu='8082-9102', ifu_name='9102', ra=50.179936141, dec=-1.0022917898, elpetro_absmag_g_r=1.26038932800293, elpetro_ba=0.42712, z=0.0361073),
ResultRow(mangaid='1-113208', plate=8618, plateifu='8618-3701', ifu_name='3701', ra=317.504479435, dec=9.86822191739, elpetro_absmag_g_r=1.48788070678711, elpetro_ba=0.752286, z=0.0699044),
ResultRow(mangaid='1-113219', plate=7815, plateifu='7815-9102', ifu_name='9102', ra=317.374745914, dec=10.0519434342, elpetro_absmag_g_r=0.543312072753906, elpetro_ba=0.517058, z=0.0408897),
ResultRow(mangaid='1-113375', plate=7815, plateifu='7815-9101', ifu_name='9101', ra=316.639658795, dec=10.7512221884, elpetro_absmag_g_r=0.757579803466797, elpetro_ba=0.570455, z=0.028215),
ResultRow(mangaid='1-113379', plate=7815, plateifu='7815-6101', ifu_name='6101', ra=316.541566803, dec=10.3454195236, elpetro_absmag_g_r=1.09770011901855, elpetro_ba=0.373641, z=0.0171611),
ResultRow(mangaid='1-113403', plate=7815, plateifu='7815-12703', ifu_name='12703', ra=316.964281103, dec=11.2623177305, elpetro_absmag_g_r=0.745466232299805, elpetro_ba=0.823788, z=0.0715126),
ResultRow(mangaid='1-113418', plate=7815, plateifu='7815-12704', ifu_name='12704', ra=319.353761201, dec=10.2316206875, elpetro_absmag_g_r=1.44098854064941, elpetro_ba=0.456991, z=0.0430806),
ResultRow(mangaid='1-113469', plate=7815, plateifu='7815-12702', ifu_name='12702', ra=317.943526819, dec=9.27749462963, elpetro_absmag_g_r=0.847789764404297, elpetro_ba=0.522312, z=0.0394617),
ResultRow(mangaid='1-113520', plate=7815, plateifu='7815-1901', ifu_name='1901', ra=317.502202242, dec=11.5106477077, elpetro_absmag_g_r=1.7510347366333, elpetro_ba=0.751988, z=0.0167652),
ResultRow(mangaid='1-113525', plate=8618, plateifu='8618-6103', ifu_name='6103', ra=317.430068351, dec=11.3552406345, elpetro_absmag_g_r=1.57906627655029, elpetro_ba=0.78557, z=0.0169457),
ResultRow(mangaid='1-113525', plate=7815, plateifu='7815-1902', ifu_name='1902', ra=317.430068351, dec=11.3552406345, elpetro_absmag_g_r=1.57906627655029, elpetro_ba=0.78557, z=0.0169457),
ResultRow(mangaid='1-113539', plate=8618, plateifu='8618-12701', ifu_name='12701', ra=317.979595193, dec=11.3794496273, elpetro_absmag_g_r=1.26716613769531, elpetro_ba=0.31432, z=0.0177002),
ResultRow(mangaid='1-113540', plate=7815, plateifu='7815-3702', ifu_name='3702', ra=317.903201533, dec=11.4969433994, elpetro_absmag_g_r=0.952407836914062, elpetro_ba=0.889156, z=0.0293823),
ResultRow(mangaid='1-113567', plate=8618, plateifu='8618-1902', ifu_name='1902', ra=318.026426419, dec=11.3451572409, elpetro_absmag_g_r=1.41732978820801, elpetro_ba=0.515994, z=0.0167432),
ResultRow(mangaid='1-113567', plate=7815, plateifu='7815-12701', ifu_name='12701', ra=318.026426419, dec=11.3451572409, elpetro_absmag_g_r=1.41732978820801, elpetro_ba=0.515994, z=0.0167432),
ResultRow(mangaid='1-113585', plate=7815, plateifu='7815-3703', ifu_name='3703', ra=319.11342841, dec=10.7676202056, elpetro_absmag_g_r=1.68158912658691, elpetro_ba=0.773512, z=0.070276),
ResultRow(mangaid='1-113587', plate=8618, plateifu='8618-12704', ifu_name='12704', ra=319.273361936, dec=11.1201347053, elpetro_absmag_g_r=1.02355575561523, elpetro_ba=0.858524, z=0.0704926),
ResultRow(mangaid='1-113647', plate=8618, plateifu='8618-6104', ifu_name='6104', ra=319.814830226, dec=10.070628454, elpetro_absmag_g_r=1.78754997253418, elpetro_ba=0.850177, z=0.0738563),
ResultRow(mangaid='1-113651', plate=7815, plateifu='7815-3704', ifu_name='3704', ra=319.233949063, dec=9.63757525774, elpetro_absmag_g_r=1.4986743927002, elpetro_ba=0.941069, z=0.0708847),
ResultRow(mangaid='1-113654', plate=8618, plateifu='8618-9102', ifu_name='9102', ra=319.271463809, dec=9.9723035679, elpetro_absmag_g_r=1.10831832885742, elpetro_ba=0.451358, z=0.0430694),
ResultRow(mangaid='1-113663', plate=8618, plateifu='8618-3703', ifu_name='3703', ra=318.804558778, dec=9.91312455151, elpetro_absmag_g_r=2.80322933197021, elpetro_ba=0.502782, z=0.0316328),
ResultRow(mangaid='1-113672', plate=8618, plateifu='8618-3704', ifu_name='3704', ra=318.862286217, dec=9.75781705378, elpetro_absmag_g_r=1.25676536560059, elpetro_ba=0.984299, z=0.0702278),
ResultRow(mangaid='1-113698', plate=8618, plateifu='8618-1901', ifu_name='1901', ra=319.194045241, dec=11.5400106533, elpetro_absmag_g_r=0.995195388793945, elpetro_ba=0.567433, z=0.0167445),
ResultRow(mangaid='1-113700', plate=8618, plateifu='8618-12703', ifu_name='12703', ra=319.451824118, dec=11.6605961542, elpetro_absmag_g_r=0.61408805847168, elpetro_ba=0.751346, z=0.0378372),
ResultRow(mangaid='1-113712', plate=7815, plateifu='7815-6104', ifu_name='6104', ra=319.193098655, dec=11.0437407875, elpetro_absmag_g_r=0.69244384765625, elpetro_ba=0.942534, z=0.0806967),
ResultRow(mangaid='1-114073', plate=7975, plateifu='7975-12705', ifu_name='12705', ra=324.895915071, dec=11.2049630634, elpetro_absmag_g_r=0.751516342163086, elpetro_ba=0.775431, z=0.0402895),
ResultRow(mangaid='1-114082', plate=7975, plateifu='7975-3701', ifu_name='3701', ra=324.152525127, dec=10.5067325085, elpetro_absmag_g_r=1.44381332397461, elpetro_ba=0.425806, z=0.0402683),
ResultRow(mangaid='1-114121', plate=7975, plateifu='7975-12701', ifu_name='12701', ra=323.466394588, dec=10.0718531123, elpetro_absmag_g_r=1.43171119689941, elpetro_ba=0.520187, z=0.0879313),
ResultRow(mangaid='1-114128', plate=7975, plateifu='7975-6101', ifu_name='6101', ra=323.470604621, dec=10.4397349551, elpetro_absmag_g_r=1.86342239379883, elpetro_ba=0.864153, z=0.077875),
ResultRow(mangaid='1-114129', plate=7975, plateifu='7975-12702', ifu_name='12702', ra=323.521211519, dec=10.4218555682, elpetro_absmag_g_r=2.19032287597656, elpetro_ba=0.521832, z=0.0774097),
ResultRow(mangaid='1-114145', plate=7975, plateifu='7975-6102', ifu_name='6102', ra=323.577092837, dec=11.2143239831, elpetro_absmag_g_r=1.41496467590332, elpetro_ba=0.655866, z=0.0341885),
ResultRow(mangaid='1-114171', plate=7975, plateifu='7975-3702', ifu_name='3702', ra=323.296326308, dec=10.6442039273, elpetro_absmag_g_r=1.70641708374023, elpetro_ba=0.849777, z=0.0881405),
ResultRow(mangaid='1-114303', plate=7975, plateifu='7975-1901', ifu_name='1901', ra=323.65768, dec=11.42181, elpetro_absmag_g_r=0.658689498901367, elpetro_ba=0.505907, z=0.0220107),
ResultRow(mangaid='1-114306', plate=7975, plateifu='7975-9101', ifu_name='9101', ra=323.742750886, dec=11.296528361, elpetro_absmag_g_r=0.99525260925293, elpetro_ba=0.811891, z=0.0636505),
ResultRow(mangaid='1-114325', plate=7975, plateifu='7975-12703', ifu_name='12703', ra=324.094963475, dec=12.2363038289, elpetro_absmag_g_r=1.34337997436523, elpetro_ba=0.244175, z=0.0288791),
ResultRow(mangaid='1-114334', plate=7975, plateifu='7975-1902', ifu_name='1902', ra=324.259707865, dec=11.9062032693, elpetro_absmag_g_r=1.43183898925781, elpetro_ba=0.56156, z=0.0222473),
ResultRow(mangaid='1-114454', plate=7975, plateifu='7975-12704', ifu_name='12704', ra=324.586417578, dec=11.3486728499, elpetro_absmag_g_r=1.29723358154297, elpetro_ba=0.591206, z=0.0888606),
ResultRow(mangaid='1-114465', plate=7975, plateifu='7975-6104', ifu_name='6104', ra=324.89155826, dec=10.4834807378, elpetro_absmag_g_r=1.21394157409668, elpetro_ba=0.867381, z=0.0788547),
ResultRow(mangaid='1-114500', plate=7975, plateifu='7975-9102', ifu_name='9102', ra=324.548678082, dec=12.1942577854, elpetro_absmag_g_r=1.14164924621582, elpetro_ba=0.355321, z=0.0220849),
ResultRow(mangaid='1-114502', plate=7975, plateifu='7975-6103', ifu_name='6103', ra=324.799320383, dec=11.9393222318, elpetro_absmag_g_r=1.4673023223877, elpetro_ba=0.960909, z=0.0798058),
ResultRow(mangaid='1-114532', plate=7975, plateifu='7975-3703', ifu_name='3703', ra=325.161350811, dec=11.7227434323, elpetro_absmag_g_r=1.73165702819824, elpetro_ba=0.920698, z=0.0902261),
ResultRow(mangaid='1-114928', plate=7977, plateifu='7977-3702', ifu_name='3702', ra=331.080925269, dec=12.9683778244, elpetro_absmag_g_r=1.65719413757324, elpetro_ba=0.680598, z=0.0273478),
ResultRow(mangaid='1-114955', plate=7977, plateifu='7977-12701', ifu_name='12701', ra=332.602089837, dec=11.7130772993, elpetro_absmag_g_r=1.01249313354492, elpetro_ba=0.742333, z=0.0922799),
ResultRow(mangaid='1-114956', plate=7977, plateifu='7977-3704', ifu_name='3704', ra=332.798726703, dec=11.8007324019, elpetro_absmag_g_r=1.3456974029541, elpetro_ba=0.756417, z=0.0270248),
ResultRow(mangaid='1-114980', plate=7977, plateifu='7977-9102', ifu_name='9102', ra=332.83066426, dec=12.1847175842, elpetro_absmag_g_r=1.14808464050293, elpetro_ba=0.656607, z=0.0630915),
ResultRow(mangaid='1-114998', plate=7977, plateifu='7977-6102', ifu_name='6102', ra=332.756351306, dec=12.3743026872, elpetro_absmag_g_r=2.77035713195801, elpetro_ba=0.6304, z=0.0614042),
ResultRow(mangaid='1-115062', plate=7977, plateifu='7977-1901', ifu_name='1901', ra=330.855372733, dec=12.6758983985, elpetro_absmag_g_r=1.65952682495117, elpetro_ba=0.865932, z=0.0260569),
ResultRow(mangaid='1-115085', plate=7977, plateifu='7977-6103', ifu_name='6103', ra=331.802634213, dec=13.2660525434, elpetro_absmag_g_r=0.912630081176758, elpetro_ba=0.472784, z=0.0349304),
ResultRow(mangaid='1-115097', plate=7977, plateifu='7977-3701', ifu_name='3701', ra=332.203447059, dec=13.3647373417, elpetro_absmag_g_r=1.49947357177734, elpetro_ba=0.528689, z=0.0274473),
ResultRow(mangaid='1-115128', plate=7977, plateifu='7977-1902', ifu_name='1902', ra=332.481316937, dec=12.8180504327, elpetro_absmag_g_r=1.1044979095459, elpetro_ba=0.49669, z=0.0358116),
ResultRow(mangaid='1-115162', plate=7977, plateifu='7977-12703', ifu_name='12703', ra=333.201842347, dec=13.334120927, elpetro_absmag_g_r=1.13131713867188, elpetro_ba=0.479943, z=0.0738627),
ResultRow(mangaid='1-115320', plate=7977, plateifu='7977-3703', ifu_name='3703', ra=333.052045245, dec=12.205190661, elpetro_absmag_g_r=0.99519157409668, elpetro_ba=0.842721, z=0.0275274),
ResultRow(mangaid='1-124604', plate=8439, plateifu='8439-6103', ifu_name='6103', ra=141.34417921, dec=50.5536812778, elpetro_absmag_g_r=1.38611221313477, elpetro_ba=0.345553, z=0.0253001),
ResultRow(mangaid='1-133922', plate=8486, plateifu='8486-6104', ifu_name='6104', ra=239.195689664, dec=47.9955208307, elpetro_absmag_g_r=1.51949119567871, elpetro_ba=0.390132, z=0.0174718),
ResultRow(mangaid='1-133941', plate=8486, plateifu='8486-9102', ifu_name='9102', ra=239.030589848, dec=48.0308761201, elpetro_absmag_g_r=1.04214859008789, elpetro_ba=0.740501, z=0.0189045),
ResultRow(mangaid='1-133945', plate=8486, plateifu='8486-3703', ifu_name='3703', ra=238.881357667, dec=47.677310104, elpetro_absmag_g_r=1.70501899719238, elpetro_ba=0.75216, z=0.0183248),
ResultRow(mangaid='1-133948', plate=8486, plateifu='8486-6103', ifu_name='6103', ra=238.891298957, dec=48.0223923799, elpetro_absmag_g_r=1.62374401092529, elpetro_ba=0.662078, z=0.0195194),
ResultRow(mangaid='1-133976', plate=8486, plateifu='8486-9101', ifu_name='9101', ra=238.718472619, dec=47.8808922742, elpetro_absmag_g_r=1.26091766357422, elpetro_ba=0.627185, z=0.0182938),
ResultRow(mangaid='1-133987', plate=8486, plateifu='8486-1902', ifu_name='1902', ra=239.334163047, dec=48.2072621316, elpetro_absmag_g_r=1.73217391967773, elpetro_ba=0.902851, z=0.0195435),
ResultRow(mangaid='1-134004', plate=8486, plateifu='8486-1901', ifu_name='1901', ra=238.448582292, dec=47.4049584412, elpetro_absmag_g_r=1.27153015136719, elpetro_ba=0.667273, z=0.0185601),
ResultRow(mangaid='1-134020', plate=8486, plateifu='8486-6102', ifu_name='6102', ra=238.046893627, dec=48.0439162921, elpetro_absmag_g_r=1.4318904876709, elpetro_ba=0.452976, z=0.0193267),
ResultRow(mangaid='1-134209', plate=8549, plateifu='8549-9101', ifu_name='9101', ra=242.276471895, dec=46.6712048189, elpetro_absmag_g_r=1.46211814880371, elpetro_ba=0.938842, z=0.0545042),
ResultRow(mangaid='1-134239', plate=8549, plateifu='8549-3703', ifu_name='3703', ra=241.416442386, dec=46.8465606897, elpetro_absmag_g_r=1.20720481872559, elpetro_ba=0.840219, z=0.0571086),
ResultRow(mangaid='1-134248', plate=8549, plateifu='8549-3702', ifu_name='3702', ra=241.005278975, dec=46.8029102028, elpetro_absmag_g_r=1.04830741882324, elpetro_ba=0.603141, z=0.0212204),
ResultRow(mangaid='1-134293', plate=8549, plateifu='8549-6103', ifu_name='6103', ra=240.418740846, dec=46.085291751, elpetro_absmag_g_r=0.724908828735352, elpetro_ba=0.685683, z=0.0416784),
ResultRow(mangaid='1-134503', plate=8555, plateifu='8555-1901', ifu_name='1901', ra=243.873718478, dec=44.2912632693, elpetro_absmag_g_r=1.38505744934082, elpetro_ba=0.580866, z=0.0371472),
ResultRow(mangaid='1-134562', plate=8549, plateifu='8549-1902', ifu_name='1902', ra=242.727439731, dec=44.985695801, elpetro_absmag_g_r=0.999540328979492, elpetro_ba=0.709542, z=0.0355137),
ResultRow(mangaid='1-134597', plate=8549, plateifu='8549-12705', ifu_name='12705', ra=241.907223711, dec=45.0653702307, elpetro_absmag_g_r=1.32281875610352, elpetro_ba=0.493211, z=0.0441938),
ResultRow(mangaid='1-134599', plate=8549, plateifu='8549-12704', ifu_name='12704', ra=242.978644743, dec=46.1277269855, elpetro_absmag_g_r=1.2156925201416, elpetro_ba=0.347987, z=0.019658),
ResultRow(mangaid='1-134614', plate=8549, plateifu='8549-6102', ifu_name='6102', ra=243.009178672, dec=45.7750314981, elpetro_absmag_g_r=1.25503730773926, elpetro_ba=0.409631, z=0.0528277),
ResultRow(mangaid='1-134634', plate=8549, plateifu='8549-3704', ifu_name='3704', ra=243.18537291, dec=45.3520102657, elpetro_absmag_g_r=1.71317291259766, elpetro_ba=0.601301, z=0.0523251),
ResultRow(mangaid='1-134848', plate=8555, plateifu='8555-12703', ifu_name='12703', ra=244.331994382, dec=43.4796723691, elpetro_absmag_g_r=1.4580078125, elpetro_ba=0.276868, z=0.0584495),
ResultRow(mangaid='1-134924', plate=8555, plateifu='8555-9101', ifu_name='9101', ra=245.662015493, dec=43.4646577078, elpetro_absmag_g_r=1.76020240783691, elpetro_ba=0.819258, z=0.0319997),
ResultRow(mangaid='1-134954', plate=8555, plateifu='8555-12705', ifu_name='12705', ra=246.578190983, dec=43.4074643202, elpetro_absmag_g_r=1.38137054443359, elpetro_ba=0.692219, z=0.0315232),
ResultRow(mangaid='1-134964', plate=8555, plateifu='8555-3701', ifu_name='3701', ra=246.760690284, dec=43.4760996734, elpetro_absmag_g_r=1.5971508026123, elpetro_ba=0.853938, z=0.0462348),
ResultRow(mangaid='1-135030', plate=8603, plateifu='8603-12704', ifu_name='12704', ra=247.893876589, dec=40.5655973228, elpetro_absmag_g_r=1.31695175170898, elpetro_ba=0.700621, z=0.0273289),
ResultRow(mangaid='1-135054', plate=8550, plateifu='8550-12703', ifu_name='12703', ra=247.674430234, dec=40.5293893805, elpetro_absmag_g_r=1.34156799316406, elpetro_ba=0.853565, z=0.0298122),
ResultRow(mangaid='1-135055', plate=8601, plateifu='8601-6104', ifu_name='6104', ra=247.641287575, dec=40.5394009252, elpetro_absmag_g_r=1.68307113647461, elpetro_ba=0.808577, z=0.0300581),
ResultRow(mangaid='1-135057', plate=8601, plateifu='8601-12703', ifu_name='12703', ra=247.57407, dec=40.59861, elpetro_absmag_g_r=0.928314208984375, elpetro_ba=0.834526, z=0.0288518),
ResultRow(mangaid='1-135058', plate=8603, plateifu='8603-6103', ifu_name='6103', ra=247.800367796, dec=40.4218744432, elpetro_absmag_g_r=1.1861629486084, elpetro_ba=0.392703, z=0.0270087),
ResultRow(mangaid='1-135077', plate=8312, plateifu='8312-6104', ifu_name='6104', ra=247.638466864, dec=41.4385861863, elpetro_absmag_g_r=1.33458137512207, elpetro_ba=0.458094, z=0.0290664),
ResultRow(mangaid='1-135095', plate=8312, plateifu='8312-3702', ifu_name='3702', ra=247.245291144, dec=41.255253243, elpetro_absmag_g_r=1.44723129272461, elpetro_ba=0.658268, z=0.0332324),
ResultRow(mangaid='1-135129', plate=8603, plateifu='8603-12705', ifu_name='12705', ra=247.280269588, dec=40.5910287121, elpetro_absmag_g_r=1.81981086730957, elpetro_ba=0.503666, z=0.0327969),
ResultRow(mangaid='1-135133', plate=8603, plateifu='8603-12703', ifu_name='12703', ra=247.282646413, dec=40.6650474998, elpetro_absmag_g_r=1.36585807800293, elpetro_ba=0.627429, z=0.0299683),
ResultRow(mangaid='1-135134', plate=8603, plateifu='8603-9101', ifu_name='9101', ra=247.225624269, dec=40.8666111706, elpetro_absmag_g_r=1.85215187072754, elpetro_ba=0.958519, z=0.030343),
ResultRow(mangaid='1-135152', plate=8312, plateifu='8312-6103', ifu_name='6103', ra=246.887611078, dec=41.1385055016, elpetro_absmag_g_r=0.762582778930664, elpetro_ba=0.839506, z=0.0301811),
ResultRow(mangaid='1-135157', plate=8603, plateifu='8603-3702', ifu_name='3702', ra=247.04131843, dec=40.6956030265, elpetro_absmag_g_r=1.68464851379395, elpetro_ba=0.518096, z=0.0323713),
ResultRow(mangaid='1-135207', plate=8555, plateifu='8555-1902', ifu_name='1902', ra=246.323470587, dec=42.6942265737, elpetro_absmag_g_r=1.51096343994141, elpetro_ba=0.755948, z=0.031485),
ResultRow(mangaid='1-135371', plate=8588, plateifu='8588-9101', ifu_name='9101', ra=250.156240419, dec=39.2216349362, elpetro_absmag_g_r=1.37564086914062, elpetro_ba=0.430169, z=0.0352359),
ResultRow(mangaid='1-135372', plate=8588, plateifu='8588-6102', ifu_name='6102', ra=250.116709759, dec=39.3201174959, elpetro_absmag_g_r=1.68138885498047, elpetro_ba=0.789335, z=0.0300793),
ResultRow(mangaid='1-135383', plate=8588, plateifu='8588-12705', ifu_name='12705', ra=250.312873125, dec=39.7523514003, elpetro_absmag_g_r=1.2461109161377, elpetro_ba=0.355884, z=0.0301398),
ResultRow(mangaid='1-135468', plate=8550, plateifu='8550-12705', ifu_name='12705', ra=249.135695215, dec=39.0278800132, elpetro_absmag_g_r=1.37894058227539, elpetro_ba=0.670573, z=0.029986),
ResultRow(mangaid='1-135502', plate=8604, plateifu='8604-12703', ifu_name='12703', ra=247.76417484, dec=39.838503868, elpetro_absmag_g_r=1.57090950012207, elpetro_ba=0.804992, z=0.0305383),
ResultRow(mangaid='1-135503', plate=8604, plateifu='8604-3703', ifu_name='3703', ra=247.882111795, dec=39.8976507098, elpetro_absmag_g_r=1.6621150970459, elpetro_ba=0.914384, z=0.0296457),
ResultRow(mangaid='1-135506', plate=8601, plateifu='8601-3704', ifu_name='3704', ra=247.948553785, dec=39.8142396526, elpetro_absmag_g_r=1.70755767822266, elpetro_ba=0.740217, z=0.0295479),
ResultRow(mangaid='1-135512', plate=8601, plateifu='8601-6102', ifu_name='6102', ra=247.711831631, dec=40.0247994472, elpetro_absmag_g_r=0.778741836547852, elpetro_ba=0.783227, z=0.0279629),
ResultRow(mangaid='1-135516', plate=8550, plateifu='8550-6104', ifu_name='6104', ra=248.41315, dec=39.25763, elpetro_absmag_g_r=1.33112716674805, elpetro_ba=0.41841, z=0.0314747),
ResultRow(mangaid='1-135517', plate=8588, plateifu='8588-6101', ifu_name='6101', ra=248.456755755, dec=39.2632054313, elpetro_absmag_g_r=1.17428970336914, elpetro_ba=0.961436, z=0.0317611),
ResultRow(mangaid='1-135530', plate=8550, plateifu='8550-9101', ifu_name='9101', ra=247.409672103, dec=40.2353879985, elpetro_absmag_g_r=1.7724609375, elpetro_ba=0.286038, z=0.0283296),
ResultRow(mangaid='1-135545', plate=8601, plateifu='8601-6103', ifu_name='6103', ra=247.530374396, dec=40.8801572026, elpetro_absmag_g_r=1.43307685852051, elpetro_ba=0.402053, z=0.0301334),
ResultRow(mangaid='1-135548', plate=8601, plateifu='8601-12702', ifu_name='12702', ra=247.591672626, dec=40.9242421985, elpetro_absmag_g_r=1.05030250549316, elpetro_ba=0.948442, z=0.030559),
ResultRow(mangaid='1-135568', plate=8601, plateifu='8601-12701', ifu_name='12701', ra=247.718035556, dec=41.2861515449, elpetro_absmag_g_r=0.790615081787109, elpetro_ba=0.6425, z=0.0938565),
ResultRow(mangaid='1-135641', plate=8588, plateifu='8588-12704', ifu_name='12704', ra=249.557305714, dec=40.1468209363, elpetro_absmag_g_r=1.44169998168945, elpetro_ba=0.377239, z=0.030363),
ResultRow(mangaid='1-135657', plate=8588, plateifu='8588-1901', ifu_name='1901', ra=249.717085826, dec=40.1993481631, elpetro_absmag_g_r=1.22106170654297, elpetro_ba=0.772008, z=0.0364618),
ResultRow(mangaid='1-135679', plate=8588, plateifu='8588-6103', ifu_name='6103', ra=250.349059361, dec=40.2187885261, elpetro_absmag_g_r=1.4596061706543, elpetro_ba=0.57416, z=0.0331057),
ResultRow(mangaid='1-135794', plate=8588, plateifu='8588-1902', ifu_name='1902', ra=249.770169345, dec=39.2907848202, elpetro_absmag_g_r=1.6043529510498, elpetro_ba=0.617959, z=0.0304343),
ResultRow(mangaid='1-135810', plate=8601, plateifu='8601-12705', ifu_name='12705', ra=250.12314401, dec=39.2351144868, elpetro_absmag_g_r=1.43718338012695, elpetro_ba=0.451484, z=0.0297241),
ResultRow(mangaid='1-136120', plate=8606, plateifu='8606-3701', ifu_name='3701', ra=254.997419646, dec=36.0290774727, elpetro_absmag_g_r=1.36807250976562, elpetro_ba=0.780117, z=0.0573351),
ResultRow(mangaid='1-136248', plate=8606, plateifu='8606-3702', ifu_name='3702', ra=253.793913226, dec=36.9063091542, elpetro_absmag_g_r=1.42204856872559, elpetro_ba=0.50548, z=0.0235624),
ResultRow(mangaid='1-136268', plate=8606, plateifu='8606-6101', ifu_name='6101', ra=254.44755809, dec=37.6877060265, elpetro_absmag_g_r=1.20418357849121, elpetro_ba=0.498686, z=0.0416946),
ResultRow(mangaid='1-136286', plate=8606, plateifu='8606-9102', ifu_name='9102', ra=255.709053426, dec=36.7067487022, elpetro_absmag_g_r=0.959020614624023, elpetro_ba=0.425402, z=0.0327918),
ResultRow(mangaid='1-136304', plate=8606, plateifu='8606-1902', ifu_name='1902', ra=256.01730405, dec=36.4373676031, elpetro_absmag_g_r=1.11434555053711, elpetro_ba=0.488437, z=0.0236332),
ResultRow(mangaid='1-136305', plate=8606, plateifu='8606-3704', ifu_name='3704', ra=255.915542507, dec=36.3849337159, elpetro_absmag_g_r=1.20375823974609, elpetro_ba=0.379571, z=0.0246675),
ResultRow(mangaid='1-136306', plate=8606, plateifu='8606-12702', ifu_name='12702', ra=255.869931612, dec=36.4366645326, elpetro_absmag_g_r=1.50382232666016, elpetro_ba=0.873923, z=0.0231691),
ResultRow(mangaid='1-137528', plate=8440, plateifu='8440-6103', ifu_name='6103', ra=134.40495469, dec=41.0439158135, elpetro_absmag_g_r=1.47062683105469, elpetro_ba=0.814622, z=0.0874946),
ResultRow(mangaid='1-137714', plate=8247, plateifu='8247-3704', ifu_name='3704', ra=136.039205522, dec=42.3034211072, elpetro_absmag_g_r=1.52022552490234, elpetro_ba=0.529873, z=0.0265976),
ResultRow(mangaid='1-137730', plate=8247, plateifu='8247-9101', ifu_name='9101', ra=136.778259104, dec=42.5951034895, elpetro_absmag_g_r=0.883840560913086, elpetro_ba=0.883013, z=0.0415657),
ResultRow(mangaid='1-137795', plate=8247, plateifu='8247-12702', ifu_name='12702', ra=135.722564417, dec=43.2477264356, elpetro_absmag_g_r=0.863546371459961, elpetro_ba=0.696187, z=0.0436196),
ResultRow(mangaid='1-137797', plate=8247, plateifu='8247-12703', ifu_name='12703', ra=136.363181204, dec=44.1438800822, elpetro_absmag_g_r=0.999143600463867, elpetro_ba=0.640129, z=0.0533346),
ResultRow(mangaid='1-137799', plate=8247, plateifu='8247-3703', ifu_name='3703', ra=136.842484254, dec=43.275431327, elpetro_absmag_g_r=1.47824478149414, elpetro_ba=0.873061, z=0.0415027),
ResultRow(mangaid='1-137801', plate=8249, plateifu='8249-3701', ifu_name='3701', ra=136.68645847, dec=44.2609809065, elpetro_absmag_g_r=1.58131790161133, elpetro_ba=0.89272, z=0.0490247),
ResultRow(mangaid='1-137801', plate=8247, plateifu='8247-3702', ifu_name='3702', ra=136.68645847, dec=44.2609809065, elpetro_absmag_g_r=1.58131790161133, elpetro_ba=0.89272, z=0.0490247),
ResultRow(mangaid='1-137844', plate=8250, plateifu='8250-9102', ifu_name='9102', ra=139.427012288, dec=44.1006868066, elpetro_absmag_g_r=1.75845336914062, elpetro_ba=0.749366, z=0.0323374),
ResultRow(mangaid='1-137845', plate=8250, plateifu='8250-9101', ifu_name='9101', ra=139.308858804, dec=44.4891619278, elpetro_absmag_g_r=1.76508140563965, elpetro_ba=0.744394, z=0.0320271),
ResultRow(mangaid='1-137845', plate=8249, plateifu='8249-6104', ifu_name='6104', ra=139.308858804, dec=44.4891619278, elpetro_absmag_g_r=1.76508140563965, elpetro_ba=0.744394, z=0.0320271),
ResultRow(mangaid='1-137853', plate=8250, plateifu='8250-3702', ifu_name='3702', ra=138.935541667, dec=44.2360887374, elpetro_absmag_g_r=1.66989135742188, elpetro_ba=0.86344, z=0.0321364),
ResultRow(mangaid='1-137853', plate=8249, plateifu='8249-12705', ifu_name='12705', ra=138.935541667, dec=44.2360887374, elpetro_absmag_g_r=1.66989135742188, elpetro_ba=0.86344, z=0.0321364),
ResultRow(mangaid='1-137870', plate=8247, plateifu='8247-12704', ifu_name='12704', ra=136.730098431, dec=44.121516356, elpetro_absmag_g_r=0.911367416381836, elpetro_ba=0.883403, z=0.0494434),
ResultRow(mangaid='1-137875', plate=8249, plateifu='8249-6102', ifu_name='6102', ra=137.335924379, dec=45.0655135856, elpetro_absmag_g_r=0.834562301635742, elpetro_ba=0.943022, z=0.0510126),
ResultRow(mangaid='1-137883', plate=8249, plateifu='8249-3704', ifu_name='3704', ra=137.874763008, dec=45.4683204593, elpetro_absmag_g_r=1.44910621643066, elpetro_ba=0.802596, z=0.0268253),
ResultRow(mangaid='1-137890', plate=8249, plateifu='8249-1901', ifu_name='1901', ra=137.219338724, dec=44.9322670576, elpetro_absmag_g_r=1.19685363769531, elpetro_ba=0.549448, z=0.0265684),
ResultRow(mangaid='1-137898', plate=8249, plateifu='8249-12702', ifu_name='12702', ra=137.562412054, dec=44.6841342226, elpetro_absmag_g_r=0.960855484008789, elpetro_ba=0.379162, z=0.0346482),
ResultRow(mangaid='1-137908', plate=8249, plateifu='8249-12703', ifu_name='12703', ra=139.55919103, dec=45.6516888989, elpetro_absmag_g_r=1.1392650604248, elpetro_ba=0.700622, z=0.0269041),
ResultRow(mangaid='1-137912', plate=8250, plateifu='8250-12703', ifu_name='12703', ra=139.647743513, dec=44.5967370112, elpetro_absmag_g_r=0.227899551391602, elpetro_ba=0.779483, z=0.014213),
ResultRow(mangaid='1-137915', plate=8249, plateifu='8249-1902', ifu_name='1902', ra=139.797122285, dec=45.3665231283, elpetro_absmag_g_r=1.49036026000977, elpetro_ba=0.961667, z=0.031543),
ResultRow(mangaid='1-137961', plate=8249, plateifu='8249-3703', ifu_name='3703', ra=139.720468628, dec=45.7277823533, elpetro_absmag_g_r=1.23330879211426, elpetro_ba=0.895169, z=0.026438),
ResultRow(mangaid='1-138021', plate=8252, plateifu='8252-12705', ifu_name='12705', ra=145.443221426, dec=46.9738383647, elpetro_absmag_g_r=1.2242603302002, elpetro_ba=0.853881, z=0.0255975),
ResultRow(mangaid='1-138034', plate=8252, plateifu='8252-3701', ifu_name='3701', ra=144.846118089, dec=47.1268642387, elpetro_absmag_g_r=1.54246711730957, elpetro_ba=0.52102, z=0.027267),
ResultRow(mangaid='1-138087', plate=8252, plateifu='8252-12701', ifu_name='12701', ra=144.23925577, dec=48.2941162265, elpetro_absmag_g_r=0.339872360229492, elpetro_ba=0.643171, z=0.0249804),
ResultRow(mangaid='1-138102', plate=8252, plateifu='8252-6102', ifu_name='6102', ra=144.557956402, dec=48.3883017672, elpetro_absmag_g_r=1.02695465087891, elpetro_ba=0.739386, z=0.0257882),
ResultRow(mangaid='1-138105', plate=8252, plateifu='8252-6101', ifu_name='6101', ra=144.617048762, dec=48.5255082955, elpetro_absmag_g_r=1.2266845703125, elpetro_ba=0.738417, z=0.0248735),
ResultRow(mangaid='1-138106', plate=8252, plateifu='8252-3703', ifu_name='3703', ra=144.352308981, dec=48.5154530802, elpetro_absmag_g_r=1.10024833679199, elpetro_ba=0.806873, z=0.0243491),
ResultRow(mangaid='1-138140', plate=8252, plateifu='8252-3704', ifu_name='3704', ra=145.308121958, dec=47.6885981864, elpetro_absmag_g_r=1.4588623046875, elpetro_ba=0.869377, z=0.0467992),
ResultRow(mangaid='1-138157', plate=8252, plateifu='8252-9102', ifu_name='9102', ra=145.541530882, dec=48.0128634742, elpetro_absmag_g_r=0.744720458984375, elpetro_ba=0.630656, z=0.0561577),
ResultRow(mangaid='1-138164', plate=8252, plateifu='8252-1902', ifu_name='1902', ra=146.091838441, dec=47.459850984, elpetro_absmag_g_r=1.3321475982666, elpetro_ba=0.917753, z=0.0258991),
ResultRow(mangaid='1-147394', plate=8250, plateifu='8250-12705', ifu_name='12705', ra=140.39879069, dec=43.2572462761, elpetro_absmag_g_r=0.938104629516602, elpetro_ba=0.255031, z=0.0160493),
ResultRow(mangaid='1-147475', plate=8453, plateifu='8453-12704', ifu_name='12704', ra=153.13479279, dec=46.6953613957, elpetro_absmag_g_r=0.833671569824219, elpetro_ba=0.885371, z=0.0381522),
ResultRow(mangaid='1-147488', plate=8453, plateifu='8453-1902', ifu_name='1902', ra=153.21425096, dec=46.9128221111, elpetro_absmag_g_r=1.23658561706543, elpetro_ba=0.452716, z=0.0241526),
ResultRow(mangaid='1-147496', plate=8453, plateifu='8453-6102', ifu_name='6102', ra=153.213639346, dec=47.2949237539, elpetro_absmag_g_r=0.782651901245117, elpetro_ba=0.763455, z=0.0395361),
ResultRow(mangaid='1-147507', plate=8453, plateifu='8453-6101', ifu_name='6101', ra=152.773273523, dec=46.8995324281, elpetro_absmag_g_r=1.36958885192871, elpetro_ba=0.591953, z=0.0250793),
ResultRow(mangaid='1-147514', plate=8453, plateifu='8453-12701', ifu_name='12701', ra=151.309949901, dec=46.6508890341, elpetro_absmag_g_r=1.22234153747559, elpetro_ba=0.814928, z=0.0251003),
ResultRow(mangaid='1-147521', plate=8453, plateifu='8453-3702', ifu_name='3702', ra=152.545357653, dec=46.9522671141, elpetro_absmag_g_r=1.49322319030762, elpetro_ba=0.459684, z=0.0253024),
ResultRow(mangaid='1-147522', plate=8453, plateifu='8453-9102', ifu_name='9102', ra=152.514716814, dec=47.1209306545, elpetro_absmag_g_r=1.25778961181641, elpetro_ba=0.923625, z=0.0653628),
ResultRow(mangaid='1-147537', plate=8453, plateifu='8453-12702', ifu_name='12702', ra=151.547771122, dec=47.2950386608, elpetro_absmag_g_r=1.23126220703125, elpetro_ba=0.554456, z=0.0381068),
ResultRow(mangaid='1-147602', plate=8453, plateifu='8453-6103', ifu_name='6103', ra=151.729558675, dec=47.9841111295, elpetro_absmag_g_r=1.75717353820801, elpetro_ba=0.932527, z=0.067855),
ResultRow(mangaid='1-147649', plate=8453, plateifu='8453-9101', ifu_name='9101', ra=152.046182936, dec=47.5174726058, elpetro_absmag_g_r=1.00096130371094, elpetro_ba=0.351228, z=0.0384484),
ResultRow(mangaid='1-147685', plate=8452, plateifu='8452-12702', ifu_name='12702', ra=156.044276918, dec=47.5239549356, elpetro_absmag_g_r=1.55574607849121, elpetro_ba=0.233187, z=0.0425735),
ResultRow(mangaid='1-147787', plate=8453, plateifu='8453-6104', ifu_name='6104', ra=154.119427243, dec=47.3648162968, elpetro_absmag_g_r=1.19962120056152, elpetro_ba=0.395606, z=0.0403757),
ResultRow(mangaid='1-147815', plate=8453, plateifu='8453-1901', ifu_name='1901', ra=153.365546207, dec=47.516235898, elpetro_absmag_g_r=1.38706016540527, elpetro_ba=0.598532, z=0.0253396),
ResultRow(mangaid='1-147863', plate=8453, plateifu='8453-12703', ifu_name='12703', ra=153.685061429, dec=48.689638952, elpetro_absmag_g_r=1.26955413818359, elpetro_ba=0.527282, z=0.0632026),
ResultRow(mangaid='1-148046', plate=8452, plateifu='8452-1902', ifu_name='1902', ra=157.77930272, dec=48.0148303874, elpetro_absmag_g_r=0.790740966796875, elpetro_ba=0.925427, z=0.058703),
ResultRow(mangaid='1-148068', plate=8452, plateifu='8452-12703', ifu_name='12703', ra=156.805684986, dec=48.2447914261, elpetro_absmag_g_r=1.28773880004883, elpetro_ba=0.805928, z=0.0609631),
ResultRow(mangaid='1-148127', plate=8452, plateifu='8452-3702', ifu_name='3702', ra=156.298016415, dec=47.7390794143, elpetro_absmag_g_r=1.83721733093262, elpetro_ba=0.850507, z=0.0621072),
ResultRow(mangaid='1-155337', plate=8249, plateifu='8249-12701', ifu_name='12701', ra=136.156282887, dec=44.874731539, elpetro_absmag_g_r=1.0489559173584, elpetro_ba=0.426153, z=0.0345388),
ResultRow(mangaid='1-155440', plate=8249, plateifu='8249-9101', ifu_name='9101', ra=136.476492743, dec=46.259107066, elpetro_absmag_g_r=1.50351715087891, elpetro_ba=0.678623, z=0.0518655),
ResultRow(mangaid='1-155456', plate=8249, plateifu='8249-6103', ifu_name='6103', ra=136.793850517, dec=46.2111457117, elpetro_absmag_g_r=1.64328575134277, elpetro_ba=0.962315, z=0.040334),
ResultRow(mangaid='1-155463', plate=8249, plateifu='8249-6101', ifu_name='6101', ra=137.562456488, dec=46.2932696556, elpetro_absmag_g_r=1.21530723571777, elpetro_ba=0.558139, z=0.026734),
ResultRow(mangaid='1-155541', plate=8249, plateifu='8249-9102', ifu_name='9102', ra=138.37190266, dec=46.6142215927, elpetro_absmag_g_r=1.68683815002441, elpetro_ba=0.544517, z=0.0802487),
ResultRow(mangaid='1-155558', plate=8249, plateifu='8249-3702', ifu_name='3702', ra=137.03265263, dec=45.9209619515, elpetro_absmag_g_r=1.2888126373291, elpetro_ba=0.589713, z=0.0267975),
ResultRow(mangaid='1-155903', plate=8439, plateifu='8439-1901', ifu_name='1901', ra=141.190236455, dec=49.4448016737, elpetro_absmag_g_r=1.11646842956543, elpetro_ba=0.969302, z=0.0163661),
ResultRow(mangaid='1-155926', plate=8439, plateifu='8439-12702', ifu_name='12702', ra=141.539307103, dec=49.3102016203, elpetro_absmag_g_r=1.5238151550293, elpetro_ba=0.796842, z=0.0269288),
ResultRow(mangaid='1-155975', plate=8439, plateifu='8439-6102', ifu_name='6102', ra=142.778167545, dec=49.0797456578, elpetro_absmag_g_r=1.39241409301758, elpetro_ba=0.725726, z=0.0339319),
ResultRow(mangaid='1-155978', plate=8439, plateifu='8439-12701', ifu_name='12701', ra=143.010196099, dec=48.551093077, elpetro_absmag_g_r=0.783824920654297, elpetro_ba=0.526699, z=0.0162666),
ResultRow(mangaid='1-156011', plate=8252, plateifu='8252-3702', ifu_name='3702', ra=144.059863049, dec=48.7456976861, elpetro_absmag_g_r=1.76472663879395, elpetro_ba=0.842447, z=0.0905527),
ResultRow(mangaid='1-156037', plate=8439, plateifu='8439-9102', ifu_name='9102', ra=143.754018642, dec=48.9767418599, elpetro_absmag_g_r=0.751018524169922, elpetro_ba=0.550243, z=0.0249582),
ResultRow(mangaid='1-156061', plate=8439, plateifu='8439-1902', ifu_name='1902', ra=143.697034579, dec=48.7475756651, elpetro_absmag_g_r=1.58340835571289, elpetro_ba=0.859392, z=0.0259393),
ResultRow(mangaid='1-156062', plate=8439, plateifu='8439-12705', ifu_name='12705', ra=143.288053477, dec=49.0503236816, elpetro_absmag_g_r=1.47800445556641, elpetro_ba=0.844666, z=0.0511487),
ResultRow(mangaid='1-156074', plate=8439, plateifu='8439-6101', ifu_name='6101', ra=143.184618775, dec=48.7963482386, elpetro_absmag_g_r=0.928119659423828, elpetro_ba=0.571587, z=0.0263866),
ResultRow(mangaid='1-156137', plate=8439, plateifu='8439-12704', ifu_name='12704', ra=144.031088241, dec=50.4392201284, elpetro_absmag_g_r=1.18128204345703, elpetro_ba=0.444969, z=0.0640375),
ResultRow(mangaid='1-156154', plate=8439, plateifu='8439-9101', ifu_name='9101', ra=142.713904348, dec=50.3188614584, elpetro_absmag_g_r=0.718753814697266, elpetro_ba=0.439643, z=0.0379614),
ResultRow(mangaid='1-166736', plate=8459, plateifu='8459-12702', ifu_name='12702', ra=147.585854164, dec=43.1455699673, elpetro_absmag_g_r=1.00078010559082, elpetro_ba=0.826408, z=0.0170809),
ResultRow(mangaid='1-166738', plate=8459, plateifu='8459-12705', ifu_name='12705', ra=148.117076795, dec=42.8191413496, elpetro_absmag_g_r=0.993005752563477, elpetro_ba=0.917477, z=0.016087),
ResultRow(mangaid='1-166739', plate=8459, plateifu='8459-12701', ifu_name='12701', ra=147.37898128, dec=42.1302903462, elpetro_absmag_g_r=1.65610313415527, elpetro_ba=0.772124, z=0.0718279),
ResultRow(mangaid='1-166754', plate=8459, plateifu='8459-3704', ifu_name='3704', ra=147.32578151, dec=43.3517193284, elpetro_absmag_g_r=0.900096893310547, elpetro_ba=0.417579, z=0.0164167),
ResultRow(mangaid='1-166889', plate=8459, plateifu='8459-9101', ifu_name='9101', ra=147.277688884, dec=44.0486811007, elpetro_absmag_g_r=0.859102249145508, elpetro_ba=0.525696, z=0.0156854),
ResultRow(mangaid='1-166919', plate=8459, plateifu='8459-3702', ifu_name='3702', ra=146.709100143, dec=43.4238429596, elpetro_absmag_g_r=1.31706047058105, elpetro_ba=0.866956, z=0.0722105),
ResultRow(mangaid='1-166930', plate=8459, plateifu='8459-6103', ifu_name='6103', ra=146.789027825, dec=43.4185743942, elpetro_absmag_g_r=1.5174388885498, elpetro_ba=0.550614, z=0.0720255),
ResultRow(mangaid='1-166932', plate=8459, plateifu='8459-3701', ifu_name='3701', ra=146.785813609, dec=43.5104758987, elpetro_absmag_g_r=1.91670417785645, elpetro_ba=0.951709, z=0.0724488),
ResultRow(mangaid='1-166947', plate=8459, plateifu='8459-3703', ifu_name='3703', ra=147.335, dec=43.44299, elpetro_absmag_g_r=1.58527755737305, elpetro_ba=0.921915, z=0.0719792),
ResultRow(mangaid='1-166969', plate=8459, plateifu='8459-6102', ifu_name='6102', ra=147.990674372, dec=43.4140430617, elpetro_absmag_g_r=0.653806686401367, elpetro_ba=0.921856, z=0.0158773),
ResultRow(mangaid='1-167013', plate=8459, plateifu='8459-9102', ifu_name='9102', ra=149.888880629, dec=43.6605000576, elpetro_absmag_g_r=0.906030654907227, elpetro_ba=0.779596, z=0.0170491),
ResultRow(mangaid='1-167044', plate=8459, plateifu='8459-6104', ifu_name='6104', ra=149.346878642, dec=44.1547632349, elpetro_absmag_g_r=1.66348838806152, elpetro_ba=0.938967, z=0.0741969),
ResultRow(mangaid='1-167067', plate=8459, plateifu='8459-1902', ifu_name='1902', ra=148.502535855, dec=43.0448001127, elpetro_absmag_g_r=1.06909561157227, elpetro_ba=0.766896, z=0.0169612),
ResultRow(mangaid='1-167075', plate=8459, plateifu='8459-12704', ifu_name='12704', ra=147.604836276, dec=44.0406378719, elpetro_absmag_g_r=0.854578018188477, elpetro_ba=0.62063, z=0.0158584),
ResultRow(mangaid='1-167079', plate=8459, plateifu='8459-1901', ifu_name='1901', ra=147.801793989, dec=44.0093089046, elpetro_absmag_g_r=1.34856986999512, elpetro_ba=0.777813, z=0.015711),
ResultRow(mangaid='1-167080', plate=8459, plateifu='8459-6101', ifu_name='6101', ra=147.712302507, dec=44.0304545816, elpetro_absmag_g_r=1.1823673248291, elpetro_ba=0.809313, z=0.0463805),
ResultRow(mangaid='1-167113', plate=8459, plateifu='8459-12703', ifu_name='12703', ra=148.84161359, dec=44.4405591163, elpetro_absmag_g_r=1.2220287322998, elpetro_ba=0.415025, z=0.0264594),
ResultRow(mangaid='1-167380', plate=8453, plateifu='8453-3701', ifu_name='3701', ra=153.231920862, dec=46.4177099017, elpetro_absmag_g_r=1.58125877380371, elpetro_ba=0.478194, z=0.0382131),
ResultRow(mangaid='1-167555', plate=8453, plateifu='8453-3703', ifu_name='3703', ra=153.752608461, dec=46.7567528969, elpetro_absmag_g_r=1.26830101013184, elpetro_ba=0.759023, z=0.0246439),
ResultRow(mangaid='1-167564', plate=8453, plateifu='8453-12705', ifu_name='12705', ra=153.034483163, dec=46.2936923797, elpetro_absmag_g_r=1.14142608642578, elpetro_ba=0.399407, z=0.024247)]
The Query Datamodel shows you every parameter that is available to search on. It groups parameters together into common types.
```python
qdm = q.datamodel
qdm
```
<QueryDataModel release='MPL-4', n_groups=7, n_parameters=41, n_total=0>
```python
qdm.groups
```
[<ParameterGroup name=Metadata, n_parameters=7>,
<ParameterGroup name=Spaxel Metadata, n_parameters=3>,
<ParameterGroup name=Emission Lines, n_parameters=13>,
<ParameterGroup name=Kinematics, n_parameters=6>,
<ParameterGroup name=Spectral Indices, n_parameters=1>,
<ParameterGroup name=NSA Catalog, n_parameters=11>,
<ParameterGroup name=Other, n_parameters=0>]
```python
# look at all the available NSA parameters
qdm.groups['nsa'].parameters
```
[<QueryParameter full=nsa.iauname, name=iauname, short=iauname, remote=iauname, display=IAU Name>,
<QueryParameter full=nsa.ra, name=ra, short=ra, remote=ra, display=RA>,
<QueryParameter full=nsa.dec, name=dec, short=dec, remote=dec, display=Dec>,
<QueryParameter full=nsa.z, name=z, short=z, remote=z, display=Redshift>,
<QueryParameter full=nsa.elpetro_ba, name=elpetro_ba, short=axisratio, remote=elpetro_ba, display=Elpetro axis ratio>,
<QueryParameter full=nsa.elpetro_mag_g_r, name=elpetro_mag_g_r, short=g_r, remote=elpetro_mag_g_r, display=g-r>,
<QueryParameter full=nsa.elpetro_absmag_g_r, name=elpetro_absmag_g_r, short=absmag_g_r, remote=elpetro_absmag_g_r, display=Absmag g-r>,
<QueryParameter full=nsa.elpetro_logmass, name=elpetro_logmass, short=logmass, remote=elpetro_logmass, display=Elpetro Stellar Mass>,
<QueryParameter full=nsa.elpetro_th50_r, name=elpetro_th50_r, short=th50_r, remote=elpetro_th50_r, display=r-band half-light radius>,
<QueryParameter full=nsa.sersic_logmass, name=sersic_logmass, short=sersic_logmass, remote=sersic_logmass, display=Sersic Stellar Mass>,
<QueryParameter full=nsa.sersic_ba, name=sersic_ba, short=sersic_ba, remote=sersic_ba, display=Sersic axis ratio>]
```python
```
|
sdssREPO_NAMEmarvinPATH_START.@marvin_extracted@marvin-main@docs@sphinx@jupyter@whats_new_v22.ipynb@.PATH_END.py
|
{
"filename": "chebyshev.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/numpy/py2/numpy/polynomial/chebyshev.py",
"type": "Python"
}
|
"""
Objects for dealing with Chebyshev series.
This module provides a number of objects (mostly functions) useful for
dealing with Chebyshev series, including a `Chebyshev` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
- `chebdomain` -- Chebyshev series default domain, [-1,1].
- `chebzero` -- (Coefficients of the) Chebyshev series that evaluates
identically to 0.
- `chebone` -- (Coefficients of the) Chebyshev series that evaluates
identically to 1.
- `chebx` -- (Coefficients of the) Chebyshev series for the identity map,
``f(x) = x``.
Arithmetic
----------
- `chebadd` -- add two Chebyshev series.
- `chebsub` -- subtract one Chebyshev series from another.
- `chebmulx` -- multiply a Chebyshev series in ``P_i(x)`` by ``x``.
- `chebmul` -- multiply two Chebyshev series.
- `chebdiv` -- divide one Chebyshev series by another.
- `chebpow` -- raise a Chebyshev series to a positive integer power.
- `chebval` -- evaluate a Chebyshev series at given points.
- `chebval2d` -- evaluate a 2D Chebyshev series at given points.
- `chebval3d` -- evaluate a 3D Chebyshev series at given points.
- `chebgrid2d` -- evaluate a 2D Chebyshev series on a Cartesian product.
- `chebgrid3d` -- evaluate a 3D Chebyshev series on a Cartesian product.
Calculus
--------
- `chebder` -- differentiate a Chebyshev series.
- `chebint` -- integrate a Chebyshev series.
Misc Functions
--------------
- `chebfromroots` -- create a Chebyshev series with specified roots.
- `chebroots` -- find the roots of a Chebyshev series.
- `chebvander` -- Vandermonde-like matrix for Chebyshev polynomials.
- `chebvander2d` -- Vandermonde-like matrix for 2D power series.
- `chebvander3d` -- Vandermonde-like matrix for 3D power series.
- `chebgauss` -- Gauss-Chebyshev quadrature, points and weights.
- `chebweight` -- Chebyshev weight function.
- `chebcompanion` -- symmetrized companion matrix in Chebyshev form.
- `chebfit` -- least-squares fit returning a Chebyshev series.
- `chebpts1` -- Chebyshev points of the first kind.
- `chebpts2` -- Chebyshev points of the second kind.
- `chebtrim` -- trim leading coefficients from a Chebyshev series.
- `chebline` -- Chebyshev series representing given straight line.
- `cheb2poly` -- convert a Chebyshev series to a polynomial.
- `poly2cheb` -- convert a polynomial to a Chebyshev series.
- `chebinterpolate` -- interpolate a function at the Chebyshev points.
Classes
-------
- `Chebyshev` -- A Chebyshev series class.
See also
--------
`numpy.polynomial`
Notes
-----
The implementations of multiplication, division, integration, and
differentiation use the algebraic identities [1]_:
.. math ::
T_n(x) = \\frac{z^n + z^{-n}}{2} \\\\
z\\frac{dx}{dz} = \\frac{z - z^{-1}}{2}.
where
.. math :: x = \\frac{z + z^{-1}}{2}.
These identities allow a Chebyshev series to be expressed as a finite,
symmetric Laurent series. In this module, this sort of Laurent series
is referred to as a "z-series."
References
----------
.. [1] A. T. Benjamin, et al., "Combinatorial Trigonometry with Chebyshev
Polynomials," *Journal of Statistical Planning and Inference 14*, 2008
(preprint: https://www.math.hmc.edu/~benjamin/papers/CombTrig.pdf, pg. 4)
"""
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
import numpy.linalg as la
from numpy.core.multiarray import normalize_axis_index
from . import polyutils as pu
from ._polybase import ABCPolyBase
__all__ = [
'chebzero', 'chebone', 'chebx', 'chebdomain', 'chebline', 'chebadd',
'chebsub', 'chebmulx', 'chebmul', 'chebdiv', 'chebpow', 'chebval',
'chebder', 'chebint', 'cheb2poly', 'poly2cheb', 'chebfromroots',
'chebvander', 'chebfit', 'chebtrim', 'chebroots', 'chebpts1',
'chebpts2', 'Chebyshev', 'chebval2d', 'chebval3d', 'chebgrid2d',
'chebgrid3d', 'chebvander2d', 'chebvander3d', 'chebcompanion',
'chebgauss', 'chebweight', 'chebinterpolate']
chebtrim = pu.trimcoef
#
# A collection of functions for manipulating z-series. These are private
# functions and do minimal error checking.
#
def _cseries_to_zseries(c):
"""Covert Chebyshev series to z-series.
Covert a Chebyshev series to the equivalent z-series. The result is
never an empty array. The dtype of the return is the same as that of
the input. No checks are run on the arguments as this routine is for
internal use.
Parameters
----------
c : 1-D ndarray
Chebyshev coefficients, ordered from low to high
Returns
-------
zs : 1-D ndarray
Odd length symmetric z-series, ordered from low to high.
"""
n = c.size
zs = np.zeros(2*n-1, dtype=c.dtype)
zs[n-1:] = c/2
return zs + zs[::-1]
def _zseries_to_cseries(zs):
"""Covert z-series to a Chebyshev series.
Covert a z series to the equivalent Chebyshev series. The result is
never an empty array. The dtype of the return is the same as that of
the input. No checks are run on the arguments as this routine is for
internal use.
Parameters
----------
zs : 1-D ndarray
Odd length symmetric z-series, ordered from low to high.
Returns
-------
c : 1-D ndarray
Chebyshev coefficients, ordered from low to high.
"""
n = (zs.size + 1)//2
c = zs[n-1:].copy()
c[1:n] *= 2
return c
def _zseries_mul(z1, z2):
"""Multiply two z-series.
Multiply two z-series to produce a z-series.
Parameters
----------
z1, z2 : 1-D ndarray
The arrays must be 1-D but this is not checked.
Returns
-------
product : 1-D ndarray
The product z-series.
Notes
-----
This is simply convolution. If symmetric/anti-symmetric z-series are
denoted by S/A then the following rules apply:
S*S, A*A -> S
S*A, A*S -> A
"""
return np.convolve(z1, z2)
def _zseries_div(z1, z2):
"""Divide the first z-series by the second.
Divide `z1` by `z2` and return the quotient and remainder as z-series.
Warning: this implementation only applies when both z1 and z2 have the
same symmetry, which is sufficient for present purposes.
Parameters
----------
z1, z2 : 1-D ndarray
The arrays must be 1-D and have the same symmetry, but this is not
checked.
Returns
-------
(quotient, remainder) : 1-D ndarrays
Quotient and remainder as z-series.
Notes
-----
This is not the same as polynomial division on account of the desired form
of the remainder. If symmetric/anti-symmetric z-series are denoted by S/A
then the following rules apply:
S/S -> S,S
A/A -> S,A
The restriction to types of the same symmetry could be fixed but seems like
unneeded generality. There is no natural form for the remainder in the case
where there is no symmetry.
"""
z1 = z1.copy()
z2 = z2.copy()
len1 = len(z1)
len2 = len(z2)
if len2 == 1:
z1 /= z2
return z1, z1[:1]*0
elif len1 < len2:
return z1[:1]*0, z1
else:
dlen = len1 - len2
scl = z2[0]
z2 /= scl
quo = np.empty(dlen + 1, dtype=z1.dtype)
i = 0
j = dlen
while i < j:
r = z1[i]
quo[i] = z1[i]
quo[dlen - i] = r
tmp = r*z2
z1[i:i+len2] -= tmp
z1[j:j+len2] -= tmp
i += 1
j -= 1
r = z1[i]
quo[i] = r
tmp = r*z2
z1[i:i+len2] -= tmp
quo /= scl
rem = z1[i+1:i-1+len2].copy()
return quo, rem
def _zseries_der(zs):
"""Differentiate a z-series.
The derivative is with respect to x, not z. This is achieved using the
chain rule and the value of dx/dz given in the module notes.
Parameters
----------
zs : z-series
The z-series to differentiate.
Returns
-------
derivative : z-series
The derivative
Notes
-----
The zseries for x (ns) has been multiplied by two in order to avoid
using floats that are incompatible with Decimal and likely other
specialized scalar types. This scaling has been compensated by
multiplying the value of zs by two also so that the two cancels in the
division.
"""
n = len(zs)//2
ns = np.array([-1, 0, 1], dtype=zs.dtype)
zs *= np.arange(-n, n+1)*2
d, r = _zseries_div(zs, ns)
return d
def _zseries_int(zs):
"""Integrate a z-series.
The integral is with respect to x, not z. This is achieved by a change
of variable using dx/dz given in the module notes.
Parameters
----------
zs : z-series
The z-series to integrate
Returns
-------
integral : z-series
The indefinite integral
Notes
-----
The zseries for x (ns) has been multiplied by two in order to avoid
using floats that are incompatible with Decimal and likely other
specialized scalar types. This scaling has been compensated by
dividing the resulting zs by two.
"""
n = 1 + len(zs)//2
ns = np.array([-1, 0, 1], dtype=zs.dtype)
zs = _zseries_mul(zs, ns)
div = np.arange(-n, n+1)*2
zs[:n] /= div[:n]
zs[n+1:] /= div[n+1:]
zs[n] = 0
return zs
#
# Chebyshev series functions
#
def poly2cheb(pol):
"""
Convert a polynomial to a Chebyshev series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Chebyshev series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Chebyshev
series.
See Also
--------
cheb2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy import polynomial as P
>>> p = P.Polynomial(range(4))
>>> p
Polynomial([ 0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1])
>>> c = p.convert(kind=P.Chebyshev)
>>> c
Chebyshev([ 1. , 3.25, 1. , 0.75], domain=[-1, 1], window=[-1, 1])
>>> P.chebyshev.poly2cheb(range(4))
array([ 1. , 3.25, 1. , 0.75])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1):
res = chebadd(chebmulx(res), pol[i])
return res
def cheb2poly(c):
"""
Convert a Chebyshev series to a polynomial.
Convert an array representing the coefficients of a Chebyshev series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
c : array_like
1-D array containing the Chebyshev series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-D array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2cheb
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy import polynomial as P
>>> c = P.Chebyshev(range(4))
>>> c
Chebyshev([ 0., 1., 2., 3.], [-1., 1.])
>>> p = c.convert(kind=P.Polynomial)
>>> p
Polynomial([ -2., -8., 4., 12.], [-1., 1.])
>>> P.chebyshev.cheb2poly(range(4))
array([ -2., -8., 4., 12.])
"""
from .polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
if n < 3:
return c
else:
c0 = c[-2]
c1 = c[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1):
tmp = c0
c0 = polysub(c[i - 2], c1)
c1 = polyadd(tmp, polymulx(c1)*2)
return polyadd(c0, polymulx(c1))
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Chebyshev default domain.
chebdomain = np.array([-1, 1])
# Chebyshev coefficients representing zero.
chebzero = np.array([0])
# Chebyshev coefficients representing one.
chebone = np.array([1])
# Chebyshev coefficients representing the identity x.
chebx = np.array([0, 1])
def chebline(off, scl):
"""
Chebyshev series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Chebyshev series for
``off + scl*x``.
See Also
--------
polyline
Examples
--------
>>> import numpy.polynomial.chebyshev as C
>>> C.chebline(3,2)
array([3, 2])
>>> C.chebval(-3, C.chebline(3,2)) # should be -3
-3.0
"""
if scl != 0:
return np.array([off, scl])
else:
return np.array([off])
def chebfromroots(roots):
"""
Generate a Chebyshev series with given roots.
The function returns the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
in Chebyshev form, where the `r_n` are the roots specified in `roots`.
If a zero has multiplicity n, then it must appear in `roots` n times.
For instance, if 2 is a root of multiplicity three and 3 is a root of
multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
roots can appear in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x)
The coefficient of the last term is not generally 1 for monic
polynomials in Chebyshev form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of coefficients. If all roots are real then `out` is a
real array, if some of the roots are complex, then `out` is complex
even if all the coefficients in the result are real (see Examples
below).
See Also
--------
polyfromroots, legfromroots, lagfromroots, hermfromroots,
hermefromroots.
Examples
--------
>>> import numpy.polynomial.chebyshev as C
>>> C.chebfromroots((-1,0,1)) # x^3 - x relative to the standard basis
array([ 0. , -0.25, 0. , 0.25])
>>> j = complex(0,1)
>>> C.chebfromroots((-j,j)) # x^2 + 1 relative to the standard basis
array([ 1.5+0.j, 0.0+0.j, 0.5+0.j])
"""
if len(roots) == 0:
return np.ones(1)
else:
[roots] = pu.as_series([roots], trim=False)
roots.sort()
p = [chebline(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [chebmul(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = chebmul(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def chebadd(c1, c2):
"""
Add one Chebyshev series to another.
Returns the sum of two Chebyshev series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Chebyshev series of their sum.
See Also
--------
chebsub, chebmulx, chebmul, chebdiv, chebpow
Notes
-----
Unlike multiplication, division, etc., the sum of two Chebyshev series
is a Chebyshev series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebadd(c1,c2)
array([ 4., 4., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] += c2
ret = c1
else:
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def chebsub(c1, c2):
"""
Subtract one Chebyshev series from another.
Returns the difference of two Chebyshev series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Chebyshev series coefficients representing their difference.
See Also
--------
chebadd, chebmulx, chebmul, chebdiv, chebpow
Notes
-----
Unlike multiplication, division, etc., the difference of two Chebyshev
series is a Chebyshev series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebsub(c1,c2)
array([-2., 0., 2.])
>>> C.chebsub(c2,c1) # -C.chebsub(c1,c2)
array([ 2., 0., -2.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] -= c2
ret = c1
else:
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def chebmulx(c):
"""Multiply a Chebyshev series by x.
Multiply the polynomial `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
Notes
-----
.. versionadded:: 1.5.0
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> C.chebmulx([1,2,3])
array([ 1., 2.5, 3., 1.5, 2.])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]*0
prd[1] = c[0]
if len(c) > 1:
tmp = c[1:]/2
prd[2:] = tmp
prd[0:-2] += tmp
return prd
def chebmul(c1, c2):
"""
Multiply one Chebyshev series by another.
Returns the product of two Chebyshev series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Chebyshev series coefficients representing their product.
See Also
--------
chebadd, chebsub, chebmulx, chebdiv, chebpow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Chebyshev polynomial basis set. Thus, to express
the product as a C-series, it is typically necessary to "reproject"
the product onto said basis set, which typically produces
"unintuitive live" (but correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebmul(c1,c2) # multiplication requires "reprojection"
array([ 6.5, 12. , 12. , 4. , 1.5])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
z1 = _cseries_to_zseries(c1)
z2 = _cseries_to_zseries(c2)
prd = _zseries_mul(z1, z2)
ret = _zseries_to_cseries(prd)
return pu.trimseq(ret)
def chebdiv(c1, c2):
"""
Divide one Chebyshev series by another.
Returns the quotient-with-remainder of two Chebyshev series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
[quo, rem] : ndarrays
Of Chebyshev series coefficients representing the quotient and
remainder.
See Also
--------
chebadd, chebsub, chemulx, chebmul, chebpow
Notes
-----
In general, the (polynomial) division of one C-series by another
results in quotient and remainder terms that are not in the Chebyshev
polynomial basis set. Thus, to express these results as C-series, it
is typically necessary to "reproject" the results onto said basis
set, which typically produces "unintuitive" (but correct) results;
see Examples section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebdiv(c1,c2) # quotient "intuitive," remainder not
(array([ 3.]), array([-8., -4.]))
>>> c2 = (0,1,2,3)
>>> C.chebdiv(c2,c1) # neither "intuitive"
(array([ 0., 2.]), array([-2., -4.]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0:
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2:
return c1[:1]*0, c1
elif lc2 == 1:
return c1/c2[-1], c1[:1]*0
else:
z1 = _cseries_to_zseries(c1)
z2 = _cseries_to_zseries(c2)
quo, rem = _zseries_div(z1, z2)
quo = pu.trimseq(_zseries_to_cseries(quo))
rem = pu.trimseq(_zseries_to_cseries(rem))
return quo, rem
def chebpow(c, pow, maxpower=16):
"""Raise a Chebyshev series to a power.
Returns the Chebyshev series `c` raised to the power `pow`. The
argument `c` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``T_0 + 2*T_1 + 3*T_2.``
Parameters
----------
c : array_like
1-D array of Chebyshev series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Chebyshev series of power.
See Also
--------
chebadd, chebsub, chebmulx, chebmul, chebdiv
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> C.chebpow([1, 2, 3, 4], 2)
array([15.5, 22. , 16. , 14. , 12.5, 12. , 8. ])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0:
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower:
raise ValueError("Power is too large")
elif power == 0:
return np.array([1], dtype=c.dtype)
elif power == 1:
return c
else:
# This can be made more efficient by using powers of two
# in the usual way.
zs = _cseries_to_zseries(c)
prd = zs
for i in range(2, power + 1):
prd = np.convolve(prd, zs)
return _zseries_to_cseries(prd)
def chebder(c, m=1, scl=1, axis=0):
"""
Differentiate a Chebyshev series.
Returns the Chebyshev series coefficients `c` differentiated `m` times
along `axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The argument
`c` is an array of coefficients from low to high degree along each
axis, e.g., [1,2,3] represents the series ``1*T_0 + 2*T_1 + 3*T_2``
while [[1,2],[1,2]] represents ``1*T_0(x)*T_0(y) + 1*T_1(x)*T_0(y) +
2*T_0(x)*T_1(y) + 2*T_1(x)*T_1(y)`` if axis=0 is ``x`` and axis=1 is
``y``.
Parameters
----------
c : array_like
Array of Chebyshev series coefficients. If c is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
der : ndarray
Chebyshev series of the derivative.
See Also
--------
chebint
Notes
-----
In general, the result of differentiating a C-series needs to be
"reprojected" onto the C-series basis set. Thus, typically, the
result of this function is "unintuitive," albeit correct; see Examples
section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c = (1,2,3,4)
>>> C.chebder(c)
array([ 14., 12., 24.])
>>> C.chebder(c,3)
array([ 96.])
>>> C.chebder(c,scl=-1)
array([-14., -12., -24.])
>>> C.chebder(c,2,-1)
array([ 12., 96.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of derivation must be integer")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
if iaxis != axis:
raise ValueError("The axis must be integer")
iaxis = normalize_axis_index(iaxis, c.ndim)
if cnt == 0:
return c
c = np.moveaxis(c, iaxis, 0)
n = len(c)
if cnt >= n:
c = c[:1]*0
else:
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
for j in range(n, 2, -1):
der[j - 1] = (2*j)*c[j]
c[j - 2] += (j*c[j])/(j - 2)
if n > 1:
der[1] = 4*c[2]
der[0] = c[1]
c = der
c = np.moveaxis(c, 0, iaxis)
return c
def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a Chebyshev series.
Returns the Chebyshev series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``T_0 + 2*T_1 + 3*T_2`` while [[1,2],[1,2]]
represents ``1*T_0(x)*T_0(y) + 1*T_1(x)*T_0(y) + 2*T_0(x)*T_1(y) +
2*T_1(x)*T_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Chebyshev series coefficients. If c is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at zero
is the first value in the list, the value of the second integral
at zero is the second value, etc. If ``k == []`` (the default),
all constants are set to zero. If ``m == 1``, a single scalar can
be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
C-series coefficients of the integral.
Raises
------
ValueError
If ``m < 1``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or
``np.ndim(scl) != 0``.
See Also
--------
chebder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
:math:`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a`- perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c = (1,2,3)
>>> C.chebint(c)
array([ 0.5, -0.5, 0.5, 0.5])
>>> C.chebint(c,3)
array([ 0.03125 , -0.1875 , 0.04166667, -0.05208333, 0.01041667,
0.00625 ])
>>> C.chebint(c, k=3)
array([ 3.5, -0.5, 0.5, 0.5])
>>> C.chebint(c,lbnd=-2)
array([ 8.5, -0.5, 0.5, 0.5])
>>> C.chebint(c,scl=-2)
array([-1., 1., -1., -1.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if np.ndim(lbnd) != 0:
raise ValueError("lbnd must be a scalar.")
if np.ndim(scl) != 0:
raise ValueError("scl must be a scalar.")
if iaxis != axis:
raise ValueError("The axis must be integer")
iaxis = normalize_axis_index(iaxis, c.ndim)
if cnt == 0:
return c
c = np.moveaxis(c, iaxis, 0)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]*0
tmp[1] = c[0]
if n > 1:
tmp[2] = c[1]/4
for j in range(2, n):
t = c[j]/(2*j + 1) # FIXME: t never used
tmp[j + 1] = c[j]/(2*(j + 1))
tmp[j - 1] -= c[j]/(2*(j - 1))
tmp[0] += k[i] - chebval(lbnd, tmp)
c = tmp
c = np.moveaxis(c, 0, iaxis)
return c
def chebval(x, c, tensor=True):
"""
Evaluate a Chebyshev series at points x.
If `c` is of length `n + 1`, this function returns the value:
.. math:: p(x) = c_0 * T_0(x) + c_1 * T_1(x) + ... + c_n * T_n(x)
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
.. versionadded:: 1.7.0
Returns
-------
values : ndarray, algebra_like
The shape of the return value is described above.
See Also
--------
chebval2d, chebgrid2d, chebval3d, chebgrid3d
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
if len(c) == 1:
c0 = c[0]
c1 = 0
elif len(c) == 2:
c0 = c[0]
c1 = c[1]
else:
x2 = 2*x
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1):
tmp = c0
c0 = c[-i] - c1
c1 = tmp + c1*x2
return c0 + c1*x
def chebval2d(x, y, c):
"""
Evaluate a 2-D Chebyshev series at points (x, y).
This function returns the values:
.. math:: p(x,y) = \\sum_{i,j} c_{i,j} * T_i(x) * T_j(y)
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars and they
must have the same shape after conversion. In either case, either `x`
and `y` or their elements must support multiplication and addition both
with themselves and with the elements of `c`.
If `c` is a 1-D array a one is implicitly appended to its shape to make
it 2-D. The shape of the result will be c.shape[2:] + x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points `(x, y)`,
where `x` and `y` must have the same shape. If `x` or `y` is a list
or tuple, it is first converted to an ndarray, otherwise it is left
unchanged and if it isn't an ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term
of multi-degree i,j is contained in ``c[i,j]``. If `c` has
dimension greater than 2 the remaining indices enumerate multiple
sets of coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional Chebyshev series at points formed
from pairs of corresponding values from `x` and `y`.
See Also
--------
chebval, chebgrid2d, chebval3d, chebgrid3d
Notes
-----
.. versionadded:: 1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
except Exception:
raise ValueError('x, y are incompatible')
c = chebval(x, c)
c = chebval(y, c, tensor=False)
return c
def chebgrid2d(x, y, c):
"""
Evaluate a 2-D Chebyshev series on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \\sum_{i,j} c_{i,j} * T_i(a) * T_j(b),
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape + y.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j is contained in `c[i,j]`. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional Chebyshev series at points in the
Cartesian product of `x` and `y`.
See Also
--------
chebval, chebval2d, chebval3d, chebgrid3d
Notes
-----
.. versionadded:: 1.7.0
"""
c = chebval(x, c)
c = chebval(y, c)
return c
def chebval3d(x, y, z, c):
"""
Evaluate a 3-D Chebyshev series at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * T_i(x) * T_j(y) * T_k(z)
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
`(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimensional polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
chebval, chebval2d, chebgrid2d, chebgrid3d
Notes
-----
.. versionadded:: 1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
except Exception:
raise ValueError('x, y, z are incompatible')
c = chebval(x, c)
c = chebval(y, c, tensor=False)
c = chebval(z, c, tensor=False)
return c
def chebgrid3d(x, y, z, c):
"""
Evaluate a 3-D Chebyshev series on the Cartesian product of x, y, and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * T_i(a) * T_j(b) * T_k(c)
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
chebval, chebval2d, chebgrid2d, chebval3d
Notes
-----
.. versionadded:: 1.7.0
"""
c = chebval(x, c)
c = chebval(y, c)
c = chebval(z, c)
return c
def chebvander(x, deg):
"""Pseudo-Vandermonde matrix of given degree.
Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
`x`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., i] = T_i(x),
where `0 <= i <= deg`. The leading indices of `V` index the elements of
`x` and the last index is the degree of the Chebyshev polynomial.
If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
matrix ``V = chebvander(x, n)``, then ``np.dot(V, c)`` and
``chebval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of Chebyshev series of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander : ndarray
The pseudo Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding Chebyshev polynomial. The dtype will be the same as
the converted `x`.
"""
ideg = int(deg)
if ideg != deg:
raise ValueError("deg must be integer")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=0, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
# Use forward recursion to generate the entries.
v[0] = x*0 + 1
if ideg > 0:
x2 = 2*x
v[1] = x
for i in range(2, ideg + 1):
v[i] = v[i-1]*x2 - v[i-2]
return np.moveaxis(v, 0, -1)
def chebvander2d(x, y, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y)`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (deg[1] + 1)*i + j] = T_i(x) * T_j(y),
where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
`V` index the points `(x, y)` and the last index encodes the degrees of
the Chebyshev polynomials.
If ``V = chebvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``chebval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D Chebyshev
series of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to
1-D arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
chebvander, chebvander3d. chebval2d, chebval3d
Notes
-----
.. versionadded:: 1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy = ideg
x, y = np.array((x, y), copy=0) + 0.0
vx = chebvander(x, degx)
vy = chebvander(y, degy)
v = vx[..., None]*vy[..., None,:]
return v.reshape(v.shape[:-2] + (-1,))
def chebvander3d(x, y, z, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
then The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = T_i(x)*T_j(y)*T_k(z),
where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
indices of `V` index the points `(x, y, z)` and the last index encodes
the degrees of the Chebyshev polynomials.
If ``V = chebvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
of `V` correspond to the elements of a 3-D coefficient array `c` of
shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
.. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
and ``np.dot(V, c.flat)`` and ``chebval3d(x, y, z, c)`` will be the
same up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 3-D Chebyshev
series of the same degrees and sample points.
Parameters
----------
x, y, z : array_like
Arrays of point coordinates, all of the same shape. The dtypes will
be converted to either float64 or complex128 depending on whether
any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg, z_deg].
Returns
-------
vander3d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will
be the same as the converted `x`, `y`, and `z`.
See Also
--------
chebvander, chebvander3d. chebval2d, chebval3d
Notes
-----
.. versionadded:: 1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy, degz = ideg
x, y, z = np.array((x, y, z), copy=0) + 0.0
vx = chebvander(x, degx)
vy = chebvander(y, degy)
vz = chebvander(z, degz)
v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
return v.reshape(v.shape[:-3] + (-1,))
def chebfit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Chebyshev series to data.
Return the coefficients of a Chebyshev series of degree `deg` that is the
least squares fit to the data values `y` given at points `x`. If `y` is
1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x),
where `n` is `deg`.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int or 1-D array_like
Degree(s) of the fitting polynomials. If `deg` is a single integer,
all terms up to and including the `deg`'th term are included in the
fit. For NumPy versions >= 1.11.0 a list of integers specifying the
degrees of the terms to include may be used instead.
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
.. versionadded:: 1.5.0
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Chebyshev coefficients ordered from low to high. If `y` was 2-D,
the coefficients for the data in column k of `y` are in column
`k`.
[residuals, rank, singular_values, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
polyfit, legfit, lagfit, hermfit, hermefit
chebval : Evaluates a Chebyshev series.
chebvander : Vandermonde matrix of Chebyshev series.
chebweight : Chebyshev weight function.
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the Chebyshev series `p` that
minimizes the sum of the weighted squared errors
.. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where :math:`w_j` are the weights. This problem is solved by setting up
as the (typically) overdetermined matrix equation
.. math:: V(x) * c = w * y,
where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
coefficients to be solved for, `w` are the weights, and `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coefficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using Chebyshev series are usually better conditioned than fits
using power series, but much can depend on the distribution of the
sample points and the smoothness of the data. If the quality of the fit
is inadequate splines may be a good alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
https://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
"""
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
deg = np.asarray(deg)
# check arguments.
if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0:
raise TypeError("deg must be an int or non-empty 1-D array of int")
if deg.min() < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
if deg.ndim == 0:
lmax = deg
order = lmax + 1
van = chebvander(x, lmax)
else:
deg = np.sort(deg)
lmax = deg[-1]
order = len(deg)
van = chebvander(x, lmax)[:, deg]
# set up the least squares matrices in transposed form
lhs = van.T
rhs = y.T
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# apply weights. Don't use inplace operations as they
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None:
rcond = len(x)*np.finfo(x.dtype).eps
# Determine the norms of the design matrix columns.
if issubclass(lhs.dtype.type, np.complexfloating):
scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
else:
scl = np.sqrt(np.square(lhs).sum(1))
scl[scl == 0] = 1
# Solve the least squares problem.
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# Expand c to include non-fitted coefficients which are set to zero
if deg.ndim > 0:
if c.ndim == 2:
cc = np.zeros((lmax + 1, c.shape[1]), dtype=c.dtype)
else:
cc = np.zeros(lmax + 1, dtype=c.dtype)
cc[deg] = c
c = cc
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning, stacklevel=2)
if full:
return c, [resids, rank, s, rcond]
else:
return c
def chebcompanion(c):
"""Return the scaled companion matrix of c.
The basis polynomials are scaled so that the companion matrix is
symmetric when `c` is a Chebyshev basis polynomial. This provides
better eigenvalue estimates than the unscaled case and for basis
polynomials the eigenvalues are guaranteed to be real if
`numpy.linalg.eigvalsh` is used to obtain them.
Parameters
----------
c : array_like
1-D array of Chebyshev series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Scaled companion matrix of dimensions (deg, deg).
Notes
-----
.. versionadded:: 1.7.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array([[-c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
scl = np.array([1.] + [np.sqrt(.5)]*(n-1))
top = mat.reshape(-1)[1::n+1]
bot = mat.reshape(-1)[n::n+1]
top[0] = np.sqrt(.5)
top[1:] = 1/2
bot[...] = top
mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*.5
return mat
def chebroots(c):
"""
Compute the roots of a Chebyshev series.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * T_i(x).
Parameters
----------
c : 1-D array_like
1-D array of coefficients.
Returns
-------
out : ndarray
Array of the roots of the series. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
polyroots, legroots, lagroots, hermroots, hermeroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the series for such
values. Roots with multiplicity greater than 1 will also show larger
errors as the value of the series near such points is relatively
insensitive to errors in the roots. Isolated roots near the origin can
be improved by a few iterations of Newton's method.
The Chebyshev series basis polynomials aren't powers of `x` so the
results of this function may seem unintuitive.
Examples
--------
>>> import numpy.polynomial.chebyshev as cheb
>>> cheb.chebroots((-1, 1,-1, 1)) # T3 - T2 + T1 - T0 has real roots
array([ -5.00000000e-01, 2.60860684e-17, 1.00000000e+00])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
return np.array([], dtype=c.dtype)
if len(c) == 2:
return np.array([-c[0]/c[1]])
m = chebcompanion(c)
r = la.eigvals(m)
r.sort()
return r
def chebinterpolate(func, deg, args=()):
"""Interpolate a function at the Chebyshev points of the first kind.
Returns the Chebyshev series that interpolates `func` at the Chebyshev
points of the first kind in the interval [-1, 1]. The interpolating
series tends to a minmax approximation to `func` with increasing `deg`
if the function is continuous in the interval.
.. versionadded:: 1.14.0
Parameters
----------
func : function
The function to be approximated. It must be a function of a single
variable of the form ``f(x, a, b, c...)``, where ``a, b, c...`` are
extra arguments passed in the `args` parameter.
deg : int
Degree of the interpolating polynomial
args : tuple, optional
Extra arguments to be used in the function call. Default is no extra
arguments.
Returns
-------
coef : ndarray, shape (deg + 1,)
Chebyshev coefficients of the interpolating series ordered from low to
high.
Examples
--------
>>> import numpy.polynomial.chebyshev as C
>>> C.chebfromfunction(lambda x: np.tanh(x) + 0.5, 8)
array([ 5.00000000e-01, 8.11675684e-01, -9.86864911e-17,
-5.42457905e-02, -2.71387850e-16, 4.51658839e-03,
2.46716228e-17, -3.79694221e-04, -3.26899002e-16])
Notes
-----
The Chebyshev polynomials used in the interpolation are orthogonal when
sampled at the Chebyshev points of the first kind. If it is desired to
constrain some of the coefficients they can simply be set to the desired
value after the interpolation, no new interpolation or fit is needed. This
is especially useful if it is known apriori that some of coefficients are
zero. For instance, if the function is even then the coefficients of the
terms of odd degree in the result can be set to zero.
"""
deg = np.asarray(deg)
# check arguments.
if deg.ndim > 0 or deg.dtype.kind not in 'iu' or deg.size == 0:
raise TypeError("deg must be an int")
if deg < 0:
raise ValueError("expected deg >= 0")
order = deg + 1
xcheb = chebpts1(order)
yfunc = func(xcheb, *args)
m = chebvander(xcheb, deg)
c = np.dot(m.T, yfunc)
c[0] /= order
c[1:] /= 0.5*order
return c
def chebgauss(deg):
"""
Gauss-Chebyshev quadrature.
Computes the sample points and weights for Gauss-Chebyshev quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with
the weight function :math:`f(x) = 1/\\sqrt{1 - x^2}`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
.. versionadded:: 1.7.0
The results have only been tested up to degree 100, higher degrees may
be problematic. For Gauss-Chebyshev there are closed form solutions for
the sample points and weights. If n = `deg`, then
.. math:: x_i = \\cos(\\pi (2 i - 1) / (2 n))
.. math:: w_i = \\pi / n
"""
ideg = int(deg)
if ideg != deg or ideg < 1:
raise ValueError("deg must be a non-negative integer")
x = np.cos(np.pi * np.arange(1, 2*ideg, 2) / (2.0*ideg))
w = np.ones(ideg)*(np.pi/ideg)
return x, w
def chebweight(x):
"""
The weight function of the Chebyshev polynomials.
The weight function is :math:`1/\\sqrt{1 - x^2}` and the interval of
integration is :math:`[-1, 1]`. The Chebyshev polynomials are
orthogonal, but not normalized, with respect to this weight function.
Parameters
----------
x : array_like
Values at which the weight function will be computed.
Returns
-------
w : ndarray
The weight function at `x`.
Notes
-----
.. versionadded:: 1.7.0
"""
w = 1./(np.sqrt(1. + x) * np.sqrt(1. - x))
return w
def chebpts1(npts):
"""
Chebyshev points of the first kind.
The Chebyshev points of the first kind are the points ``cos(x)``,
where ``x = [pi*(k + .5)/npts for k in range(npts)]``.
Parameters
----------
npts : int
Number of sample points desired.
Returns
-------
pts : ndarray
The Chebyshev points of the first kind.
See Also
--------
chebpts2
Notes
-----
.. versionadded:: 1.5.0
"""
_npts = int(npts)
if _npts != npts:
raise ValueError("npts must be integer")
if _npts < 1:
raise ValueError("npts must be >= 1")
x = np.linspace(-np.pi, 0, _npts, endpoint=False) + np.pi/(2*_npts)
return np.cos(x)
def chebpts2(npts):
"""
Chebyshev points of the second kind.
The Chebyshev points of the second kind are the points ``cos(x)``,
where ``x = [pi*k/(npts - 1) for k in range(npts)]``.
Parameters
----------
npts : int
Number of sample points desired.
Returns
-------
pts : ndarray
The Chebyshev points of the second kind.
Notes
-----
.. versionadded:: 1.5.0
"""
_npts = int(npts)
if _npts != npts:
raise ValueError("npts must be integer")
if _npts < 2:
raise ValueError("npts must be >= 2")
x = np.linspace(-np.pi, 0, _npts)
return np.cos(x)
#
# Chebyshev series class
#
class Chebyshev(ABCPolyBase):
"""A Chebyshev series class.
The Chebyshev class provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
methods listed below.
Parameters
----------
coef : array_like
Chebyshev coefficients in order of increasing degree, i.e.,
``(1, 2, 3)`` gives ``1*T_0(x) + 2*T_1(x) + 3*T_2(x)``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is [-1, 1].
window : (2,) array_like, optional
Window, see `domain` for its use. The default value is [-1, 1].
.. versionadded:: 1.6.0
"""
# Virtual Functions
_add = staticmethod(chebadd)
_sub = staticmethod(chebsub)
_mul = staticmethod(chebmul)
_div = staticmethod(chebdiv)
_pow = staticmethod(chebpow)
_val = staticmethod(chebval)
_int = staticmethod(chebint)
_der = staticmethod(chebder)
_fit = staticmethod(chebfit)
_line = staticmethod(chebline)
_roots = staticmethod(chebroots)
_fromroots = staticmethod(chebfromroots)
@classmethod
def interpolate(cls, func, deg, domain=None, args=()):
"""Interpolate a function at the Chebyshev points of the first kind.
Returns the series that interpolates `func` at the Chebyshev points of
the first kind scaled and shifted to the `domain`. The resulting series
tends to a minmax approximation of `func` when the function is
continuous in the domain.
.. versionadded:: 1.14.0
Parameters
----------
func : function
The function to be interpolated. It must be a function of a single
variable of the form ``f(x, a, b, c...)``, where ``a, b, c...`` are
extra arguments passed in the `args` parameter.
deg : int
Degree of the interpolating polynomial.
domain : {None, [beg, end]}, optional
Domain over which `func` is interpolated. The default is None, in
which case the domain is [-1, 1].
args : tuple, optional
Extra arguments to be used in the function call. Default is no
extra arguments.
Returns
-------
polynomial : Chebyshev instance
Interpolating Chebyshev instance.
Notes
-----
See `numpy.polynomial.chebfromfunction` for more details.
"""
if domain is None:
domain = cls.domain
xfunc = lambda x: func(pu.mapdomain(x, cls.window, domain), *args)
coef = chebinterpolate(xfunc, deg)
return cls(coef, domain=domain)
# Virtual properties
nickname = 'cheb'
domain = np.array(chebdomain)
window = np.array(chebdomain)
basis_name = 'T'
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@numpy@py2@numpy@polynomial@chebyshev.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "KSaid-1/pvhub",
"repo_path": "pvhub_extracted/pvhub-main/README.md",
"type": "Markdown"
}
|
# pvhub
Function to predict peculiar velocities given RA (right ascension), Dec (declination), and z (redshift). All maps are in redshift-space.
All maps are limited to z < 0.067 with a flag in the function for extrapolation option. Conversion from real-space to redshift space as well as
the extrapolation option are explained by [Carr et al. (2021)](https://ui.adsabs.harvard.edu/abs/2021arXiv211201471C).
## Maps:
The number of each map is the corresponding flag in `pvhub.py`
### default
0. 2MPP-SDSS ([Said et al. 2020](https://ui.adsabs.harvard.edu/abs/2020MNRAS.497.1275S); [Peterson et al. 2021](https://ui.adsabs.harvard.edu/abs/2021arXiv211003487P); [Carr et al. 2021](https://ui.adsabs.harvard.edu/abs/2021arXiv211201471C))
### Other available maps
1. 2MPP-SDSS_6dF ([Said et al. 2020](https://ui.adsabs.harvard.edu/abs/2020MNRAS.497.1275S))
2. 2MRS ([Lilow & Nusser 2021](https://ui.adsabs.harvard.edu/abs/2021MNRAS.507.1557L))
3. 2MPP ([Carrick et al. 2015](https://ui.adsabs.harvard.edu/abs/2015MNRAS.450..317C))
## Cloning
The PV maps are large files, so to properly clone this repository you must download them individually from this webpage or use Git Large File Storage. If you have not used Git LFS before, see https://docs.github.com/en/repositories/working-with-files/managing-large-files/installing-git-large-file-storage or follow the instructions below.
### Mac
brew install git-lfs
git lfs install
git lfs clone https://github.com/KSaid-1/pvhub.git
### Linux
sudo apt install git-lfs
git lfs install
git lfs clone https://github.com/KSaid-1/pvhub.git
### Windows
If you have Git Bash, install Git LFS using the [installer](https://git-lfs.github.com), then continue as normal in Git Bash:
git lfs install
git lfs clone https://github.com/KSaid-1/pvhub.git
|
KSaid-1REPO_NAMEpvhubPATH_START.@pvhub_extracted@pvhub-main@README.md@.PATH_END.py
|
{
"filename": "test_deprecate.py",
"repo_name": "pandas-dev/pandas",
"repo_path": "pandas_extracted/pandas-main/pandas/tests/util/test_deprecate.py",
"type": "Python"
}
|
from textwrap import dedent
import pytest
from pandas.util._decorators import deprecate
import pandas._testing as tm
def new_func():
"""
This is the summary. The deprecate directive goes next.
This is the extended summary. The deprecate directive goes before this.
"""
return "new_func called"
def new_func_no_docstring():
return "new_func_no_docstring called"
def new_func_wrong_docstring():
"""Summary should be in the next line."""
return "new_func_wrong_docstring called"
def new_func_with_deprecation():
"""
This is the summary. The deprecate directive goes next.
.. deprecated:: 1.0
Use new_func instead.
This is the extended summary. The deprecate directive goes before this.
"""
def test_deprecate_ok():
depr_func = deprecate("depr_func", new_func, "1.0", msg="Use new_func instead.")
with tm.assert_produces_warning(FutureWarning):
result = depr_func()
assert result == "new_func called"
assert depr_func.__doc__ == dedent(new_func_with_deprecation.__doc__)
def test_deprecate_no_docstring():
depr_func = deprecate(
"depr_func", new_func_no_docstring, "1.0", msg="Use new_func instead."
)
with tm.assert_produces_warning(FutureWarning):
result = depr_func()
assert result == "new_func_no_docstring called"
def test_deprecate_wrong_docstring():
msg = "deprecate needs a correctly formatted docstring"
with pytest.raises(AssertionError, match=msg):
deprecate(
"depr_func", new_func_wrong_docstring, "1.0", msg="Use new_func instead."
)
|
pandas-devREPO_NAMEpandasPATH_START.@pandas_extracted@pandas-main@pandas@tests@util@test_deprecate.py@.PATH_END.py
|
{
"filename": "GetCov.py",
"repo_name": "theonefromnowhere/FitCov",
"repo_path": "FitCov_extracted/FitCov-main/FitCov/GetCov.py",
"type": "Python"
}
|
import numpy as np
from pycorr import project_to_multipoles
from iminuit import Minuit
def GetFittedCovariance(estimators,cuts=None,path='',ret_alpha=True,ret_sep=True):
#Cuts specify which part of the diagonals of the covariance will be fitted
if(not (cuts is None)):
c_max=cuts[1]
c_min=cuts[0]
else:
c_max = 20
c_min = 1
res_s=estimators
pls=[]
#Creating the rough estimate of a covariance matrix
for res in res_s:
ells=[0,2,4]
s, xiell, cov = project_to_multipoles(res, ells=ells)
s_len=len(s)
pls.append(np.concatenate([xiell[0][c_min:c_max],xiell[1][c_min:c_max],xiell[2][c_min:c_max]]))
etal = np.cov(pls,rowvar=False,ddof=1)
s = s[c_min:c_max]
inds=np.concatenate([np.arange(c_min,c_max),s_len+np.arange(c_min,c_max),2*s_len+np.arange(c_min,c_max)])
cov_jk=[]
#Creating likelihood to use for the fitting
for i in range(len(res_s)):
mask = np.ones(len(res_s),dtype='bool')
mask[i] = False
cov_jk.append(np.diag(np.cov(np.array(pls)[mask],rowvar=False,ddof=1)))
ss = np.concatenate([s,s,s])
#Covariance of covariances for the likelihood
cov_cov = np.linalg.pinv(np.cov(ss**2*cov_jk,rowvar=False))/len(res_s)
def likelihood(alpha):
cov_s=[]
for res in res_s:
s, xiell, cov = project_to_multipoles(res, ells=ells,correction = alpha)
cov_s.append(cov)
cov_m = np.mean(cov_s,axis=0)
#The likelihood of the sample is quite weird by construction, and small values of chi2 appear. So, in order for the iminuit to resolve it properly, without complications, a scaling factor is added.
chi2 = 10000000*(ss**2*np.diag(cov_m)[inds]-ss**2*np.diag(etal))@np.linalg.pinv(cov_cov)@(ss**2*np.diag(cov_m)[inds]-ss**2*np.diag(etal)).T
return chi2
print('Fitting alpha (Will take some time)')
m = Minuit(likelihood, alpha=0.5)
#Fitting with the help of iminuit
m.limits=[-3,10]
m.migrad()
cov_s=[]
#the final mean with the best-fit alpha is created
for res in res_s:
s, xiell, cov = project_to_multipoles(res, ells=ells,correction = m.values[0])
cov_s.append(cov)
cov_m = np.mean(cov_s,axis=0)
print('Done')
out = []
out.append(cov_m)
if(ret_alpha):
out.append(m.values['alpha'])
if(ret_sep):
out.append(s)
if(len(out)==1):
return out[0]
return out
|
theonefromnowhereREPO_NAMEFitCovPATH_START.@FitCov_extracted@FitCov-main@FitCov@GetCov.py@.PATH_END.py
|
{
"filename": "azel.py",
"repo_name": "CMB-S4/spt3g_software",
"repo_path": "spt3g_software_extracted/spt3g_software-master/maps/python/azel.py",
"type": "Python"
}
|
import numpy as np
import os
from .. import core
__all__ = [
"convert_azel_to_radec",
"convert_radec_to_azel",
"convert_radec_to_gal",
"convert_gal_to_radec",
"LocalToAstronomicalPointing",
"EquatorialToGalacticPointing",
]
def get_location(location="spt"):
"""
Return the astropy EarthLocation for the given location name.
Arguments
---------
location : str or EarthLocation instance
If a string, must be a recognized location name. Currently
only "spt" is supported.
"""
from astropy.coordinates import EarthLocation
import astropy.units
if isinstance(location, EarthLocation):
return location
location = str(location).lower()
if location == "spt":
return EarthLocation(
lat=-89.991066 * astropy.units.deg,
lon=-44.65 * astropy.units.deg,
height=2835.0 * astropy.units.meter,
)
raise NotImplementedError
iers_checked = False
def check_iers(mjd):
"""
Check whether IERS calculations will work, and load an IERS database file
from backup if all else fails.
Arguments
---------
mjd : Array-like
MJD timestamps for which an IERS calculation must be computed.
Returns
-------
t : astropy.time.Time instance
Time instance for the input MJD(s).
"""
from astropy.utils import iers
import astropy.time
global iers_checked
if iers_checked:
return astropy.time.Time(mjd, format="mjd")
try:
if os.getenv("SPT3G_IERS_AUTO_URL"):
iers.conf.iers_auto_url = os.getenv("SPT3G_IERS_AUTO_URL")
if os.getenv("SPT3G_IERS_REMOTE_TIMEOUT"):
iers.conf.remote_timeout = float(os.getenv("SPT3G_IERS_REMOTE_TIMEOUT"))
except:
pass
mjd1 = np.atleast_1d(mjd)[-1]
t1 = astropy.time.Time(mjd1, format="mjd")
# check if accessing the IERS table outright works.
try:
t1.ut1
iers_checked = True
return astropy.time.Time(mjd, format="mjd")
except:
pass
# if that fails, allow extrapolation
iers.conf.auto_max_age = None
t1 = astropy.time.Time(mjd1, format="mjd")
try:
t1.ut1
core.log_warn("IERS auto update failed, allowing extrapolation", unit="IERS")
iers_checked = True
return astropy.time.Time(mjd, format="mjd")
except:
pass
# and if that fails, use a locally cached file that is hopefully setup correctly.
fname = os.path.join(os.path.dirname(os.path.abspath(__file__)), "finals2000A.all")
iers.conf.auto_download = False
iers.IERS.iers_table = iers.IERS_A.open(fname)
t1 = astropy.time.Time(mjd1, format="mjd")
t1.ut1
core.log_warn("Using IERS table from local cache {}".format(fname), unit="IERS")
iers_checked = True
return astropy.time.Time(mjd, format="mjd")
def convert_deg(d, system="g3"):
"""
Convert the input array to the appropriate system of units.
Arguments
---------
d : array_like
Data vector
system : str
System to convert to, either "g3" or "astropy". Assumes conversion from
the other system.
"""
import astropy.units
system = str(system).lower()
if system == "astropy":
return np.asarray(d) / core.G3Units.deg * astropy.units.deg
elif system == "g3":
return np.asarray(d / astropy.units.deg) * core.G3Units.deg
raise NotImplementedError
@core.usefulfunc
def convert_azel_to_radec(az, el, location="spt", mjd=None):
"""
Convert timestreams of local azimuth and elevation to right ascension and
declination.
Arguments
---------
az, el : np.ndarray or G3Timestream
Array of local coordinates. If inputs are G3Timestream objects,
G3Timestreams are also returned.
location : str or astropy.coordinates.EarthLocation instance
The telescope location on Earth.
mjd : np.ndarray
An array of times for each az/el sample. If input az and el
are not G3Timestreams, this argument is required.
Returns
-------
ra, dec : np.ndarray or G3Timestream
"""
import astropy.coordinates
singleton = False
if isinstance(az, core.G3Timestream):
assert az.start == el.start
assert az.stop == el.stop
assert az.n_samples == el.n_samples
mjd = np.asarray([i.mjd for i in az.times])
else:
try:
len(az)
except TypeError:
singleton = True
if singleton:
az = np.atleast_1d(az)
el = np.atleast_1d(el)
mjd = np.atleast_1d(mjd)
assert len(az) == len(el)
t = check_iers(mjd)
# record locations of bad elevation values to mark them later
badel_inds = np.where(
(el < -90.0 * core.G3Units.deg) | (el > 90.0 * core.G3Units.deg)
)
el[badel_inds] = 0.0 * core.G3Units.deg
k = astropy.coordinates.AltAz(
az=convert_deg(az, "astropy"),
alt=convert_deg(el, "astropy"),
obstime=t,
location=get_location(location),
pressure=0,
)
kt = k.transform_to(astropy.coordinates.FK5())
ra = convert_deg(kt.ra, "g3")
dec = convert_deg(kt.dec, "g3")
dec[badel_inds] = np.nan
if isinstance(az, core.G3Timestream):
ra = core.G3Timestream(ra)
dec = core.G3Timestream(dec)
ra.start = dec.start = az.start
ra.stop = dec.stop = az.stop
elif singleton:
ra = ra[0]
dec = dec[0]
return (ra, dec)
@core.usefulfunc
def convert_radec_to_azel(ra, dec, location="spt", mjd=None):
"""
Convert timestreams of right ascension and declination to local
azimuth and elevation.
Arguments
---------
ra, dec : np.ndarray or G3Timestream
Array of Equatorial sky coordinates. If inputs are G3Timestream
objects, G3Timestreams are also returned.
location : str or astropy.coordinates.EarthLocation instance
The telescope location on Earth.
mjd : np.ndarray
An array of times for each ra/dec sample. If input ra and dec
are not G3Timestreams, this argument is required.
Returns
-------
az, el : np.ndarray or G3Timestream
"""
import astropy.coordinates
singleton = False
if isinstance(ra, core.G3Timestream):
assert ra.start == dec.start
assert ra.stop == dec.stop
assert ra.n_samples == dec.n_samples
mjd = np.asarray([i.mjd for i in ra.times])
else:
try:
len(ra)
except TypeError:
singleton = True
if singleton:
ra = np.atleast_1d(ra)
dec = np.atleast_1d(dec)
mjd = np.atleast_1d(mjd)
assert len(ra) == len(dec)
t = check_iers(mjd)
k = astropy.coordinates.FK5(
ra=convert_deg(ra, "astropy"), dec=convert_deg(dec, "astropy"),
)
kt = k.transform_to(
astropy.coordinates.AltAz(
obstime=t,
location=get_location(location),
pressure=0,
)
)
az = convert_deg(kt.az, "g3")
el = convert_deg(kt.alt, "g3")
if isinstance(ra, core.G3Timestream):
az = core.G3Timestream(az)
el = core.G3Timestream(el)
az.start = el.start = ra.start
az.stop = el.stop = ra.stop
elif singleton:
az = az[0]
el = el[0]
return (az, el)
@core.usefulfunc
def convert_radec_to_gal(ra, dec):
"""
Convert timestreams of right ascension and declination to Galactic
longitude and latitude.
Arguments
---------
ra, dec : np.ndarray or G3Timestream
Array of Equatorial sky coordinates. If inputs are G3Timestream
objects, G3Timestreams are also returned.
Returns
-------
glon, glat : np.ndarray or G3Timestream
"""
import astropy.coordinates
singleton = False
if isinstance(ra, core.G3Timestream):
assert ra.start == dec.start
assert ra.stop == dec.stop
assert ra.n_samples == dec.n_samples
else:
try:
len(ra)
except TypeError:
singleton = True
if singleton:
ra = np.atleast_1d(ra)
dec = np.atleast_1d(dec)
assert len(ra) == len(dec)
k = astropy.coordinates.FK5(
ra=convert_deg(ra, "astropy"), dec=convert_deg(dec, "astropy"),
)
kt = k.transform_to(astropy.coordinates.Galactic())
glon = convert_deg(kt.l, "g3")
glat = convert_deg(kt.b, "g3")
if isinstance(ra, core.G3Timestream):
glon = core.G3Timestream(glon)
glat = core.G3Timestream(glat)
glon.start = glat.start = ra.start
glon.stop = glat.stop = ra.stop
elif singleton:
glon = glon[0]
glat = glat[0]
return (glon, glat)
@core.usefulfunc
def convert_gal_to_radec(glon, glat):
"""
Convert timestreams of Galactic longitude and latitude to right ascension
and declination.
Arguments
---------
glon, glat : np.ndarray or G3Timestream
Array of Galactic sky coordinates. If inputs are G3Timestream
objects, G3Timestreams are also returned.
Returns
-------
ra, dec : np.ndarray or G3Timestream
"""
import astropy.coordinates
singleton = False
if isinstance(glon, core.G3Timestream):
assert glon.start == glat.start
assert glon.stop == glat.stop
assert glon.n_samples == glat.n_samples
else:
try:
len(glon)
except TypeError:
singleton = True
if singleton:
glon = np.atleast_1d(glon)
glat = np.atleast_1d(glat)
assert len(glon) == len(glat)
k = astropy.coordinates.Galactic(
l=convert_deg(glon, "astropy"), b=convert_deg(glat, "astropy"),
)
kt = k.transform_to(astropy.coordinates.FK5())
ra = convert_deg(kt.ra, "g3")
dec = convert_deg(kt.dec, "g3")
if isinstance(ra, core.G3Timestream):
ra = core.G3Timestream(ra)
dec = core.G3Timestream(dec)
ra.start = dec.start = glon.start
ra.stop = dec.stop = glon.stop
elif singleton:
ra = ra[0]
dec = dec[0]
return (ra, dec)
@core.indexmod
def LocalToAstronomicalPointing(
frame,
az_timestream="BoresightAz",
el_timestream="BoresightEl",
ra_timestream="BoresightRa",
dec_timestream="BoresightDec",
Telescope="spt",
):
"""
Converts a set of timestreams in Scan frames representing Az and El pointing
of the telescope into RA and Declination timestreams, stored in the frame
under their respective names.
"""
if frame.type != core.G3FrameType.Scan:
return
ra, dec = convert_azel_to_radec(
frame[az_timestream], frame[el_timestream], location=Telescope
)
frame[ra_timestream] = ra
frame[dec_timestream] = dec
@core.indexmod
def EquatorialToGalacticPointing(
frame,
ra_timestream="BoresightRa",
dec_timestream="BoresightDec",
glon_timestream="BoresightGalLon",
glat_timestream="BoresightGalLat",
):
"""
Converts a set of timestreams in Scan frames representing RA and Declination
pointing of the telescope into Galactic longitude and latitude timestreams,
stored in the frame under their respective names.
"""
if frame.type != core.G3FrameType.Scan:
return
glon, glat = convert_radec_to_gal(frame[ra_timestream], frame[dec_timestream])
frame[glon_timestream] = glon
frame[glat_timestream] = glat
|
CMB-S4REPO_NAMEspt3g_softwarePATH_START.@spt3g_software_extracted@spt3g_software-master@maps@python@azel.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "amusecode/amuse",
"repo_path": "amuse_extracted/amuse-main/src/amuse/ext/__init__.py",
"type": "Python"
}
|
amusecodeREPO_NAMEamusePATH_START.@amuse_extracted@amuse-main@src@amuse@ext@__init__.py@.PATH_END.py
|
|
{
"filename": "_cmax.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scatter/marker/line/_cmax.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class CmaxValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="cmax", parent_name="scatter.marker.line", **kwargs):
super(CmaxValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
implied_edits=kwargs.pop("implied_edits", {"cauto": False}),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scatter@marker@line@_cmax.py@.PATH_END.py
|
{
"filename": "_hoverlabel.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/scattergeo/_hoverlabel.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Hoverlabel(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scattergeo"
_path_str = "scattergeo.hoverlabel"
_valid_props = {
"align",
"alignsrc",
"bgcolor",
"bgcolorsrc",
"bordercolor",
"bordercolorsrc",
"font",
"namelength",
"namelengthsrc",
}
# align
# -----
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
# alignsrc
# --------
@property
def alignsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `align`.
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bgcolorsrc
# ----------
@property
def bgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `bgcolor`.
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# bordercolorsrc
# --------------
@property
def bordercolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
# font
# ----
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattergeo.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans", "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for `family`.
lineposition
Sets the kind of decoration line(s) with text,
such as an "under", "over" or "through" as well
as combinations e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud
for `lineposition`.
shadow
Sets the shape and color of the shadow behind
text. "auto" places minimal shadow and applies
contrast text font color. See
https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional
options.
shadowsrc
Sets the source reference on Chart Studio Cloud
for `shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud
for `size`.
style
Sets whether a font should be styled with a
normal or italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud
for `style`.
textcase
Sets capitalization of text. It can be used to
make text appear in all-uppercase or all-
lowercase, or with each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud
for `textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud
for `variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud
for `weight`.
Returns
-------
plotly.graph_objs.scattergeo.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# namelength
# ----------
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
# namelengthsrc
# -------------
@property
def namelengthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`namelength`.
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
**kwargs,
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattergeo.Hoverlabel`
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
Returns
-------
Hoverlabel
"""
super(Hoverlabel, self).__init__("hoverlabel")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergeo.Hoverlabel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattergeo.Hoverlabel`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("align", None)
_v = align if align is not None else _v
if _v is not None:
self["align"] = _v
_v = arg.pop("alignsrc", None)
_v = alignsrc if alignsrc is not None else _v
if _v is not None:
self["alignsrc"] = _v
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bgcolorsrc", None)
_v = bgcolorsrc if bgcolorsrc is not None else _v
if _v is not None:
self["bgcolorsrc"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("bordercolorsrc", None)
_v = bordercolorsrc if bordercolorsrc is not None else _v
if _v is not None:
self["bordercolorsrc"] = _v
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("namelength", None)
_v = namelength if namelength is not None else _v
if _v is not None:
self["namelength"] = _v
_v = arg.pop("namelengthsrc", None)
_v = namelengthsrc if namelengthsrc is not None else _v
if _v is not None:
self["namelengthsrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@scattergeo@_hoverlabel.py@.PATH_END.py
|
{
"filename": "_ticklabelstep.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scatterternary/marker/colorbar/_ticklabelstep.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TicklabelstepValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self,
plotly_name="ticklabelstep",
parent_name="scatterternary.marker.colorbar",
**kwargs,
):
super(TicklabelstepValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 1),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scatterternary@marker@colorbar@_ticklabelstep.py@.PATH_END.py
|
{
"filename": "test_graph_laplacian.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/scipy/py3/scipy/sparse/csgraph/tests/test_graph_laplacian.py",
"type": "Python"
}
|
import pytest
import numpy as np
from numpy.testing import assert_allclose
from pytest import raises as assert_raises
from scipy import sparse
from scipy.sparse import csgraph
def check_int_type(mat):
return np.issubdtype(mat.dtype, np.signedinteger) or np.issubdtype(
mat.dtype, np.uint
)
def test_laplacian_value_error():
for t in int, float, complex:
for m in ([1, 1],
[[[1]]],
[[1, 2, 3], [4, 5, 6]],
[[1, 2], [3, 4], [5, 5]]):
A = np.array(m, dtype=t)
assert_raises(ValueError, csgraph.laplacian, A)
def _explicit_laplacian(x, normed=False):
if sparse.issparse(x):
x = x.toarray()
x = np.asarray(x)
y = -1.0 * x
for j in range(y.shape[0]):
y[j,j] = x[j,j+1:].sum() + x[j,:j].sum()
if normed:
d = np.diag(y).copy()
d[d == 0] = 1.0
y /= d[:,None]**.5
y /= d[None,:]**.5
return y
def _check_symmetric_graph_laplacian(mat, normed, copy=True):
if not hasattr(mat, 'shape'):
mat = eval(mat, dict(np=np, sparse=sparse))
if sparse.issparse(mat):
sp_mat = mat
mat = sp_mat.toarray()
else:
sp_mat = sparse.csr_matrix(mat)
mat_copy = np.copy(mat)
sp_mat_copy = sparse.csr_matrix(sp_mat, copy=True)
n_nodes = mat.shape[0]
explicit_laplacian = _explicit_laplacian(mat, normed=normed)
laplacian = csgraph.laplacian(mat, normed=normed, copy=copy)
sp_laplacian = csgraph.laplacian(sp_mat, normed=normed,
copy=copy)
if copy:
assert_allclose(mat, mat_copy)
_assert_allclose_sparse(sp_mat, sp_mat_copy)
else:
if not (normed and check_int_type(mat)):
assert_allclose(laplacian, mat)
if sp_mat.format == 'coo':
_assert_allclose_sparse(sp_laplacian, sp_mat)
assert_allclose(laplacian, sp_laplacian.toarray())
for tested in [laplacian, sp_laplacian.toarray()]:
if not normed:
assert_allclose(tested.sum(axis=0), np.zeros(n_nodes))
assert_allclose(tested.T, tested)
assert_allclose(tested, explicit_laplacian)
def test_symmetric_graph_laplacian():
symmetric_mats = (
'np.arange(10) * np.arange(10)[:, np.newaxis]',
'np.ones((7, 7))',
'np.eye(19)',
'sparse.diags([1, 1], [-1, 1], shape=(4, 4))',
'sparse.diags([1, 1], [-1, 1], shape=(4, 4)).toarray()',
'sparse.diags([1, 1], [-1, 1], shape=(4, 4)).todense()',
'np.vander(np.arange(4)) + np.vander(np.arange(4)).T'
)
for mat in symmetric_mats:
for normed in True, False:
for copy in True, False:
_check_symmetric_graph_laplacian(mat, normed, copy)
def _assert_allclose_sparse(a, b, **kwargs):
# helper function that can deal with sparse matrices
if sparse.issparse(a):
a = a.toarray()
if sparse.issparse(b):
b = b.toarray()
assert_allclose(a, b, **kwargs)
def _check_laplacian_dtype_none(
A, desired_L, desired_d, normed, use_out_degree, copy, dtype, arr_type
):
mat = arr_type(A, dtype=dtype)
L, d = csgraph.laplacian(
mat,
normed=normed,
return_diag=True,
use_out_degree=use_out_degree,
copy=copy,
dtype=None,
)
if normed and check_int_type(mat):
assert L.dtype == np.float64
assert d.dtype == np.float64
_assert_allclose_sparse(L, desired_L, atol=1e-12)
_assert_allclose_sparse(d, desired_d, atol=1e-12)
else:
assert L.dtype == dtype
assert d.dtype == dtype
desired_L = np.asarray(desired_L).astype(dtype)
desired_d = np.asarray(desired_d).astype(dtype)
_assert_allclose_sparse(L, desired_L, atol=1e-12)
_assert_allclose_sparse(d, desired_d, atol=1e-12)
if not copy:
if not (normed and check_int_type(mat)):
if type(mat) is np.ndarray:
assert_allclose(L, mat)
elif mat.format == "coo":
_assert_allclose_sparse(L, mat)
def _check_laplacian_dtype(
A, desired_L, desired_d, normed, use_out_degree, copy, dtype, arr_type
):
mat = arr_type(A, dtype=dtype)
L, d = csgraph.laplacian(
mat,
normed=normed,
return_diag=True,
use_out_degree=use_out_degree,
copy=copy,
dtype=dtype,
)
assert L.dtype == dtype
assert d.dtype == dtype
desired_L = np.asarray(desired_L).astype(dtype)
desired_d = np.asarray(desired_d).astype(dtype)
_assert_allclose_sparse(L, desired_L, atol=1e-12)
_assert_allclose_sparse(d, desired_d, atol=1e-12)
if not copy:
if not (normed and check_int_type(mat)):
if type(mat) is np.ndarray:
assert_allclose(L, mat)
elif mat.format == 'coo':
_assert_allclose_sparse(L, mat)
INT_DTYPES = {np.intc, np.int_, np.longlong}
REAL_DTYPES = {np.single, np.double, np.longdouble}
COMPLEX_DTYPES = {np.csingle, np.cdouble, np.clongdouble}
# use sorted tuple to ensure fixed order of tests
DTYPES = tuple(sorted(INT_DTYPES ^ REAL_DTYPES ^ COMPLEX_DTYPES, key=str))
@pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("arr_type", [np.array,
sparse.csr_matrix,
sparse.coo_matrix,
sparse.csr_array,
sparse.coo_array])
@pytest.mark.parametrize("copy", [True, False])
@pytest.mark.parametrize("normed", [True, False])
@pytest.mark.parametrize("use_out_degree", [True, False])
def test_asymmetric_laplacian(use_out_degree, normed,
copy, dtype, arr_type):
# adjacency matrix
A = [[0, 1, 0],
[4, 2, 0],
[0, 0, 0]]
A = arr_type(np.array(A), dtype=dtype)
A_copy = A.copy()
if not normed and use_out_degree:
# Laplacian matrix using out-degree
L = [[1, -1, 0],
[-4, 4, 0],
[0, 0, 0]]
d = [1, 4, 0]
if normed and use_out_degree:
# normalized Laplacian matrix using out-degree
L = [[1, -0.5, 0],
[-2, 1, 0],
[0, 0, 0]]
d = [1, 2, 1]
if not normed and not use_out_degree:
# Laplacian matrix using in-degree
L = [[4, -1, 0],
[-4, 1, 0],
[0, 0, 0]]
d = [4, 1, 0]
if normed and not use_out_degree:
# normalized Laplacian matrix using in-degree
L = [[1, -0.5, 0],
[-2, 1, 0],
[0, 0, 0]]
d = [2, 1, 1]
_check_laplacian_dtype_none(
A,
L,
d,
normed=normed,
use_out_degree=use_out_degree,
copy=copy,
dtype=dtype,
arr_type=arr_type,
)
_check_laplacian_dtype(
A_copy,
L,
d,
normed=normed,
use_out_degree=use_out_degree,
copy=copy,
dtype=dtype,
arr_type=arr_type,
)
@pytest.mark.parametrize("fmt", ['csr', 'csc', 'coo', 'lil',
'dok', 'dia', 'bsr'])
@pytest.mark.parametrize("normed", [True, False])
@pytest.mark.parametrize("copy", [True, False])
def test_sparse_formats(fmt, normed, copy):
mat = sparse.diags([1, 1], [-1, 1], shape=(4, 4), format=fmt)
_check_symmetric_graph_laplacian(mat, normed, copy)
@pytest.mark.parametrize(
"arr_type", [np.asarray,
sparse.csr_matrix,
sparse.coo_matrix,
sparse.csr_array,
sparse.coo_array]
)
@pytest.mark.parametrize("form", ["array", "function", "lo"])
def test_laplacian_symmetrized(arr_type, form):
# adjacency matrix
n = 3
mat = arr_type(np.arange(n * n).reshape(n, n))
L_in, d_in = csgraph.laplacian(
mat,
return_diag=True,
form=form,
)
L_out, d_out = csgraph.laplacian(
mat,
return_diag=True,
use_out_degree=True,
form=form,
)
Ls, ds = csgraph.laplacian(
mat,
return_diag=True,
symmetrized=True,
form=form,
)
Ls_normed, ds_normed = csgraph.laplacian(
mat,
return_diag=True,
symmetrized=True,
normed=True,
form=form,
)
mat += mat.T
Lss, dss = csgraph.laplacian(mat, return_diag=True, form=form)
Lss_normed, dss_normed = csgraph.laplacian(
mat,
return_diag=True,
normed=True,
form=form,
)
assert_allclose(ds, d_in + d_out)
assert_allclose(ds, dss)
assert_allclose(ds_normed, dss_normed)
d = {}
for L in ["L_in", "L_out", "Ls", "Ls_normed", "Lss", "Lss_normed"]:
if form == "array":
d[L] = eval(L)
else:
d[L] = eval(L)(np.eye(n, dtype=mat.dtype))
_assert_allclose_sparse(d["Ls"], d["L_in"] + d["L_out"].T)
_assert_allclose_sparse(d["Ls"], d["Lss"])
_assert_allclose_sparse(d["Ls_normed"], d["Lss_normed"])
@pytest.mark.parametrize(
"arr_type", [np.asarray,
sparse.csr_matrix,
sparse.coo_matrix,
sparse.csr_array,
sparse.coo_array]
)
@pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("normed", [True, False])
@pytest.mark.parametrize("symmetrized", [True, False])
@pytest.mark.parametrize("use_out_degree", [True, False])
@pytest.mark.parametrize("form", ["function", "lo"])
def test_format(dtype, arr_type, normed, symmetrized, use_out_degree, form):
n = 3
mat = [[0, 1, 0], [4, 2, 0], [0, 0, 0]]
mat = arr_type(np.array(mat), dtype=dtype)
Lo, do = csgraph.laplacian(
mat,
return_diag=True,
normed=normed,
symmetrized=symmetrized,
use_out_degree=use_out_degree,
dtype=dtype,
)
La, da = csgraph.laplacian(
mat,
return_diag=True,
normed=normed,
symmetrized=symmetrized,
use_out_degree=use_out_degree,
dtype=dtype,
form="array",
)
assert_allclose(do, da)
_assert_allclose_sparse(Lo, La)
L, d = csgraph.laplacian(
mat,
return_diag=True,
normed=normed,
symmetrized=symmetrized,
use_out_degree=use_out_degree,
dtype=dtype,
form=form,
)
assert_allclose(d, do)
assert d.dtype == dtype
Lm = L(np.eye(n, dtype=mat.dtype)).astype(dtype)
_assert_allclose_sparse(Lm, Lo, rtol=2e-7, atol=2e-7)
x = np.arange(6).reshape(3, 2)
if not (normed and dtype in INT_DTYPES):
assert_allclose(L(x), Lo @ x)
else:
# Normalized Lo is casted to integer, but L() is not
pass
def test_format_error_message():
with pytest.raises(ValueError, match="Invalid form: 'toto'"):
_ = csgraph.laplacian(np.eye(1), form='toto')
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@scipy@py3@scipy@sparse@csgraph@tests@test_graph_laplacian.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "astropy/astropy",
"repo_path": "astropy_extracted/astropy-main/astropy/_dev/__init__.py",
"type": "Python"
}
|
"""
This package contains utilities that are only used when developing astropy
in a copy of the source repository.
These files are not installed, and should not be assumed to exist at runtime.
"""
|
astropyREPO_NAMEastropyPATH_START.@astropy_extracted@astropy-main@astropy@_dev@__init__.py@.PATH_END.py
|
{
"filename": "NIRSpec_pipeline_workbook_nrs1.ipynb",
"repo_name": "Exo-TiC/ExoTiC-JEDI",
"repo_path": "ExoTiC-JEDI_extracted/ExoTiC-JEDI-master/notebooks/ERS/stellar_spectra/NIRSpec_pipeline_workbook_nrs1.ipynb",
"type": "Jupyter Notebook"
}
|
# NIRSpec G395H Pipeline Workbook
## ERS WASP 39b NRS1
```python
# %matplotlib nbagg
```
```python
workbook = 'insert tag for plots'
```
```python
import exotic_jedi as jedi
import numpy as np
import matplotlib.pyplot as plt
import glob
from astropy.io import fits
import pickle
from scipy.optimize import curve_fit
```
```python
# Setting some rc params
plt.rcParams['figure.figsize'] = [10.0, 3.0] # Dimensions
plt.rcParams['figure.dpi'] = 300 # Resolution
plt.rcParams['savefig.dpi'] = 300
plt.rcParams['image.aspect'] = 7 # Aspect ratio
plt.rcParams['lines.linewidth'] = 1
cmap = plt.cm.magma
cmap.set_bad('k',1.)
from matplotlib import cycler
plt.rcParams['image.cmap'] = 'magma' # Colormap.
plt.rcParams['image.interpolation'] = None
plt.rcParams['image.origin'] = 'lower'
plt.rcParams['axes.prop_cycle'] = cycler(color=['dodgerblue','lawngreen','tomato','darkorchid','gold','lightgray'])
```
/tmp/ipykernel_416540/4030760818.py:10: MatplotlibDeprecationWarning: You are modifying the state of a globally registered colormap. This has been deprecated since 3.3 and in 3.6, you will not be able to modify a registered colormap in-place. To remove this warning, you can make a copy of the colormap first. cmap = mpl.cm.get_cmap("magma").copy()
cmap.set_bad('k',1.)
/tmp/ipykernel_416540/4030760818.py:14: MatplotlibDeprecationWarning: Support for setting an rcParam that expects a str value to a non-str value is deprecated since 3.5 and support will be removed two minor releases later.
plt.rcParams['image.interpolation'] = None
## Load in observations
```python
# Load in data
data_files_path = '/YOUR_DATA_PATH_HERE/' #point to where the data is
rateints_files_nrs1 = glob.glob(data_files_path+'*nrs1_stage_1.fits')
rateints_files_nrs2 = glob.glob(data_files_path+'*nrs2_stage_1.fits')
# Grab relevant ancillary files, etc
ancillary_files = '/YOUR_DATA_PATH_HERE/'
times = np.loadtxt(ancillary_files + 'jw01366003001_04101_00001-seg001-003_nrs1_times.txt')
wvl_fits = fits.open(ancillary_files + 'jw02512010001_04102_00001_nrs1_stage_2_wavelengthmap.fits')[0].data
```
```python
# Get the gain from the crds files for error calculation
gain = np.median(fits.open('/YOUR_CRDS_PATH_HERE/'+'jwst_nirspec_gain_nrs1.fits')[1].data)
```
```python
midtime_bjd = times[:,5]
# Get the integration time from the time file for error calculation
integration_time = np.median(abs((times[:,4]-times[:,6])*24*60*60))
print(len(midtime_bjd), integration_time)
```
465 63.158400007523596
Since the observations come down in segments, we want to stitch them back together to run this full dataset.
We'll use Jeff Valenti's unsegment function, which unpacks everything in a nice 3D array for us. We also need to make sure we do this for both the science and data quality flag extensions so we can clean everything up
```python
trimming = 500
wvl_fits = fits.open(ancillary_files + 'jw01366003001_04101_00001-seg001_nrs1_stage_2_wavelengthmap.fits')[0].data
wvl_fits = wvl_fits[:,trimming:]
sci_cube, _ = jedi.unsegment(sorted(rateints_files_nrs1), 1)
sci_cube = sci_cube[:,:,trimming:-5]
dq_cube, size = jedi.unsegment(sorted(rateints_files_nrs1), 3)
dq_cube = dq_cube[:,:,trimming:-5]
print(dq_cube.shape)
```
(465, 32, 1543)
## Quick Look
First let's take a look at the break down of data quality flags. The dq_flag_metric() function tells us the number of pixels in each category, and how many pixels in our entire observation have a DQ flag
```python
jedi.dq_flag_metrics(sci_cube, dq_cube, plot_bit=None)
```
===== DQ flags info =====
Found 20927 pixels with DQ bit=0 name=DO_NOT_USE.
Found 1291 pixels with DQ bit=1 name=SATURATED.
Found 96172 pixels with DQ bit=2 name=JUMP_DET.
Found 0 pixels with DQ bit=3 name=DROPOUT.
Found 0 pixels with DQ bit=4 name=OUTLIER.
Found 0 pixels with DQ bit=5 name=PERSISTENCE.
Found 0 pixels with DQ bit=6 name=AD_FLOOR.
Found 0 pixels with DQ bit=7 name=RESERVED.
Found 0 pixels with DQ bit=8 name=UNRELIABLE_ERROR.
Found 0 pixels with DQ bit=9 name=NON_SCIENCE.
Found 39525 pixels with DQ bit=10 name=DEAD.
Found 0 pixels with DQ bit=11 name=HOT.
Found 0 pixels with DQ bit=12 name=WARM.
Found 8835 pixels with DQ bit=13 name=LOW_QE.
Found 0 pixels with DQ bit=14 name=RC.
Found 0 pixels with DQ bit=15 name=TELEGRAPH.
Found 0 pixels with DQ bit=16 name=NONLINEAR.
Found 0 pixels with DQ bit=17 name=BAD_REF_PIXEL.
Found 0 pixels with DQ bit=18 name=NO_FLAT_FIELD.
Found 8835 pixels with DQ bit=19 name=NO_GAIN_VALUE.
Found 0 pixels with DQ bit=20 name=NO_LIN_CORR.
Found 0 pixels with DQ bit=21 name=NO_SAT_CHECK.
Found 0 pixels with DQ bit=22 name=UNRELIABLE_BIAS.
Found 0 pixels with DQ bit=23 name=UNRELIABLE_DARK.
Found 0 pixels with DQ bit=24 name=UNRELIABLE_SLOPE.
Found 0 pixels with DQ bit=25 name=UNRELIABLE_FLAT.
Found 930 pixels with DQ bit=26 name=OPEN.
Found 6975 pixels with DQ bit=27 name=ADJ_OPEN.
Found 0 pixels with DQ bit=28 name=UNRELIABLE_RESET.
Found 0 pixels with DQ bit=29 name=MSA_FAILED_OPEN.
Found 0 pixels with DQ bit=30 name=OTHER_BAD_PIXEL.
Found 0 pixels with DQ bit=31 name=REFERENCE_PIXEL.
DQ fraction of total pixels=0.799 %
Next we should check out a random image, just to make sure we're seeing what we expect
```python
initial_look = (sci_cube[271])
plt.figure()
plt.imshow((initial_look))#, vmin=0, vmax=4.5)
plt.xlabel("$x$ pixel")
plt.ylabel("$y$ pixel")
plt.colorbar(label="counts", orientation='horizontal')
plt.show()
plt.figure()
plt.imshow(np.log10(initial_look), vmin=-1, vmax=2.5)
#plt.title('{}'.format(workbook))
plt.xlabel("$x$ pixel")
plt.ylabel("$y$ pixel")
plt.colorbar(label="log(counts)", orientation='horizontal')
plt.show()
```

/tmp/ipykernel_416540/2435248013.py:11: RuntimeWarning: divide by zero encountered in log10
plt.imshow(np.log10(initial_look), vmin=-1, vmax=2.5)
/tmp/ipykernel_416540/2435248013.py:11: RuntimeWarning: invalid value encountered in log10
plt.imshow(np.log10(initial_look), vmin=-1, vmax=2.5)

yeay! a nirspec!
## Data Quality Flags
Using the jwst pipeline data quality flags, we'll first replace any flagged pixels with the median of its neighbours - so pixels in the same row within a window region
We can specify which flags we want to replace using the bits_to_mask arguement, where each number corresponds to the bit for an individual flag
Some key examples are
- 0 DO_NOT_USE, bad pixels (dq flag = 1)
- 1 SATURATED, pixel saturated during exposure (dq flag = 2)
- 10 DEAD, dead pixel (dq flag = 1024)
- 11 HOT, hot pixel
Sometimes the flags seem a little overzealous on the hot pixel one, so maybe check this a couple times before committing!
There's more detail on each of the flags in Table 3 here: https://jwst-pipeline.readthedocs.io/_/downloads/en/latest/pdf/
(this step takes a while, there are a lot of pixels!)
```python
rawest_data = jedi.dq_flat_replace(sci_cube, dq_cube, bits_to_mask=[0,1,10,11,13,19], window_size=4)
```
100%|ββββββββββββββββββββββββββββββββ| 146110/146110 [00:06<00:00, 22561.78it/s]
Replaced 56626 pixels
## Outliers Through Space and Time
(would be a cool movie!)
Now we want to check for any significant outliers throughout the dataset, either pixels that are constantly Not Good (space), or pixels that are not good during a particular integration (time).
We'll replace the outliers with the median of values near them in which ever axis they are outliers
```python
raw_data = jedi.outliers_through_time(rawest_data, window_size=10, n_sig=20, plot=False)
# WINDOW SIZE IS IMPORTANT
```
No more outliers found
In total 353 outliers were found
```python
cleaned_data, counter = jedi.outliers_through_space(raw_data, replace_window=4, search_window=21, poly_order=0, n_sig=6, plot=True)
```
100%|βββββββββββββββββββββββββββββββββββββββββ| 465/465 [01:58<00:00, 3.93it/s]

We can plot up that same image from before at each stage of the cleaning to watch how each step takes care of noisy regions
```python
plt.figure()
plt.imshow(np.log10(initial_look))#, vmin=-1, vmax=2.5)
#plt.title('{}'.format(workbook))
plt.xlabel("$x$ pixel")
plt.ylabel("$y$ pixel")
plt.colorbar(label="log(counts)", orientation='horizontal')
plt.show()
plt.figure()
plt.imshow(np.log10(rawest_data[113]))#, vmin=-1, vmax=2.5)
#plt.title('{}'.format(workbook))
plt.xlabel("$x$ pixel")
plt.ylabel("$y$ pixel")
plt.colorbar(label="log(counts)", orientation='horizontal')
plt.show()
plt.figure()
plt.imshow(np.log10(raw_data[113]))#, vmin=-1, vmax=2.5)
#plt.title('{}'.format(workbook))
plt.xlabel("$x$ pixel")
plt.ylabel("$y$ pixel")
plt.colorbar(label="log(counts)", orientation='horizontal')
plt.show()
plt.figure()
plt.imshow(np.log10(cleaned_data[113]))#, vmin=-1, vmax=2.5)
#plt.title('{}'.format(workbook))
plt.xlabel("$x$ pixel")
plt.ylabel("$y$ pixel")
plt.colorbar(label="log(counts)", orientation='horizontal')
plt.show()
```
/tmp/ipykernel_416540/3584592247.py:2: RuntimeWarning: divide by zero encountered in log10
plt.imshow(np.log10(initial_look))#, vmin=-1, vmax=2.5)
/tmp/ipykernel_416540/3584592247.py:2: RuntimeWarning: invalid value encountered in log10
plt.imshow(np.log10(initial_look))#, vmin=-1, vmax=2.5)

/tmp/ipykernel_416540/3584592247.py:10: RuntimeWarning: divide by zero encountered in log10
plt.imshow(np.log10(rawest_data[113]))#, vmin=-1, vmax=2.5)
/tmp/ipykernel_416540/3584592247.py:10: RuntimeWarning: invalid value encountered in log10
plt.imshow(np.log10(rawest_data[113]))#, vmin=-1, vmax=2.5)

/tmp/ipykernel_416540/3584592247.py:18: RuntimeWarning: divide by zero encountered in log10
plt.imshow(np.log10(raw_data[113]))#, vmin=-1, vmax=2.5)
/tmp/ipykernel_416540/3584592247.py:18: RuntimeWarning: invalid value encountered in log10
plt.imshow(np.log10(raw_data[113]))#, vmin=-1, vmax=2.5)

/tmp/ipykernel_416540/3584592247.py:26: RuntimeWarning: divide by zero encountered in log10
plt.imshow(np.log10(cleaned_data[113]))#, vmin=-1, vmax=2.5)
/tmp/ipykernel_416540/3584592247.py:26: RuntimeWarning: invalid value encountered in log10
plt.imshow(np.log10(cleaned_data[113]))#, vmin=-1, vmax=2.5)

## Find the aperture
Here we want to fit a gaussian to each column of the test image to get the center and width, and then fit a polynomial to each of those across the x axis
```python
test_image = cleaned_data[100].copy() #/ flat # do our flat field correction on our test image
# Might need to vary these initially to get the thing going
ap_start = 0 # x pixel where we want to start hunting for the trace
ap_end = 1543 # x pixel where the trace ends (or just the edge of the detector probs for g395h)
init_guess = 0.7 # how wide do we think the trace is
# Will want to test different values of these to make sure getting the best lcs
polynomial_order = [4, 4] # what order polynomials to use when fitting the trace position and its width
median_filter_window = 5 # MUST BE ODD NUMBER window size applied to the median filter that smooths the trace widths
aperture_width = 5 # number of fwhms to extend the aperture to
extrapolate_method = 'flatten' # 'flatten', 'continue' or None
continue_value = [0,0]
trace_falls_off = True # set this to True if the trace falls off the top/bottom of the detector
# this will enable the aperture to default to the top and bottom edge of the detector
# rather than returning an error message
trace_position, trace_width, upper_ap, lower_ap, up_trim, low_trim = jedi.get_aperture(test_image, init_guess, \
ap_start, ap_end, \
poly_orders=polynomial_order, \
width=aperture_width, \
medflt=median_filter_window, \
extrapolate_method=extrapolate_method,\
continue_value=continue_value, \
set_to_edge=trace_falls_off)
plt.figure()
if extrapolate_method=='continue':
ap_start = ap_start-continue_value[0]
ap_end = ap_end+continue_value[1]
plt.plot(np.arange(ap_start,ap_end),trace_position,color='k',ls='--')
plt.xlabel("$x$ pixel")
plt.ylabel("$y$ pixel")
plt.fill_between(np.arange(0,np.shape(test_image)[1]), upper_ap, lower_ap, facecolor = 'None',edgecolor='w')
plt.fill_between(np.arange(ap_start,ap_end), up_trim, low_trim, facecolor="None", edgecolor='g')
plt.imshow(np.log10(test_image))#,vmin=0,vmax=4.5)
plt.colorbar(label="log(counts)", orientation='horizontal')
plt.title('{}'.format(workbook))
plt.show()
```
/tmp/ipykernel_416540/619788192.py:46: RuntimeWarning: divide by zero encountered in log10
plt.imshow(np.log10(test_image))#,vmin=0,vmax=4.5)
/tmp/ipykernel_416540/619788192.py:46: RuntimeWarning: invalid value encountered in log10
plt.imshow(np.log10(test_image))#,vmin=0,vmax=4.5)

```python
pixel_column = 1000
jedi.column_fit_visualiser(test_image, pixel_column, init_guess, aperture_width)
```
## 1/f noise
We're gonna want to do a column by column median, masking the spectral trace area.
Let's define a buffer region above and below the aperture to make sure we're definitely not including any of that spectrum
```python
fnoise_mask, _, _ = jedi.f_noise_zone(test_image, upper_ap, lower_ap, ap_buffers=[5,5], plot=True, set_to_edge=True, vmin=-4, vmax=3.2)
```
/home/ym20900/ers/w39_g395/r7/exotic_jedi.py:563: RuntimeWarning: divide by zero encountered in log10
plt.imshow(np.log10(im),vmin=vmin,vmax=vmax)
/home/ym20900/ers/w39_g395/r7/exotic_jedi.py:563: RuntimeWarning: invalid value encountered in log10
plt.imshow(np.log10(im),vmin=vmin,vmax=vmax)

/home/ym20900/ers/w39_g395/r7/exotic_jedi.py:583: RuntimeWarning: divide by zero encountered in log10
plt.imshow(np.log10(np.ma.masked_array(im,mask=mask)), vmin=vmin,vmax=vmax)
/home/ym20900/ers/w39_g395/r7/exotic_jedi.py:583: RuntimeWarning: invalid value encountered in log10
plt.imshow(np.log10(np.ma.masked_array(im,mask=mask)), vmin=vmin,vmax=vmax)

Now let's actually remove the 1/f noise from the test image to make sure the region wasn't accidentally clipping anything
```python
clean_test_im = jedi.remove_fnoise(test_image, fnoise_mask, plot=True, vmin=-4, vmax=3.2)
```
/home/ym20900/ers/w39_g395/r7/exotic_jedi.py:615: RuntimeWarning: divide by zero encountered in log10
plt.imshow(np.log10(im),vmin=vmin,vmax=vmax)
/home/ym20900/ers/w39_g395/r7/exotic_jedi.py:615: RuntimeWarning: invalid value encountered in log10
plt.imshow(np.log10(im),vmin=vmin,vmax=vmax)

/home/ym20900/ers/w39_g395/r7/exotic_jedi.py:623: RuntimeWarning: divide by zero encountered in log10
plt.imshow(np.log10(clean_im),vmin=vmin,vmax=vmax)
/home/ym20900/ers/w39_g395/r7/exotic_jedi.py:623: RuntimeWarning: invalid value encountered in log10
plt.imshow(np.log10(clean_im),vmin=vmin,vmax=vmax)


## Extract!
We want to do an intrapixel extraction so we don't erase all the hard work making a curvy trace
Here we'll quickly compare the intrapixel extraction with a more standard hard edge box so we can see how it impact the **shape** of the spectrum, not just the total flux!
```python
test_spectrum = jedi.intrapixel_extraction(test_image, upper_ap, lower_ap)
test_spectrum_basic = jedi.basic_extraction(test_image, upper_ap, lower_ap)
figure, ax = plt.subplots(2,1, figsize=(10,7))
ax[0].plot(test_spectrum, alpha=0.8, label="Intrapixel")
ax[0].plot(test_spectrum_basic, alpha=0.8, label="Basic")
ax[0].set_xlabel("Column Number")
ax[0].set_ylabel("Counts")
ax[0].legend()
ax[1].plot(test_spectrum-test_spectrum_basic, marker='.', ls='None')
ax[1].axhline(0,ls=':', color='k', alpha=0.7, zorder=0)
ax[1].set_xlabel("Column Number")
ax[1].set_ylabel("Residuals")
plt.show()
```

The extraction method is important!! The difference isn't just that one gives overall higher flux, they produce spectra with different shapes
## Get! Those! Spectra!
With everything we've set up, it's time to get the spectra and correlate them for x and y pixel shifts
Playing around with the trimming and the high res factors here might be important. In the tuples, each number refers to either the x or y shifts,
i.e., trim_spec=[value_for_x,value_for_y]
```python
all_spectra, all_y_collapse, x_shifts, y_shifts = jedi.get_stellar_spectra(cleaned_data, upper_ap, lower_ap, \
flat=None, f_mask=fnoise_mask, \
extract_method="intrapixel", \
shift=True, interpolate_mode="cubic", \
trim_spec=[3,1], high_res_factor=[0.01,0.01], \
trim_fit=[10,10], \
plot=True, set_to_edge = True)
```
Running intrapixel extraction on 465 spectra
No flat fielding is being performed at this time
1/f noise is being removed
100%|βββββββββββββββββββββββββββββββββββββββββ| 465/465 [00:26<00:00, 17.71it/s]
Now calculating shifts
100%|βββββββββββββββββββββββββββββββββββββββββ| 465/465 [00:18<00:00, 25.43it/s]

Let's do the same again on the original, uncorrected spectra, without the 1/f noise correction so we can see what difference our cleaning processes have made
```python
unclean_spectra, _, _, _ = jedi.get_stellar_spectra(np.nan_to_num(sci_cube), upper_ap, lower_ap, \
flat=None, f_mask=None, \
extract_method="intrapixel", \
shift=True, interpolate_mode="cubic", \
trim_spec=[3,1], high_res_factor=[0.01,0.01], \
trim_fit=[10,10], \
plot=False, set_to_edge = False)
```
Running intrapixel extraction on 465 spectra
No flat fielding is being performed at this time
No 1/f noise correction is being performed
100%|βββββββββββββββββββββββββββββββββββββββββ| 465/465 [00:08<00:00, 57.64it/s]
Now calculating shifts
100%|βββββββββββββββββββββββββββββββββββββββββ| 465/465 [00:17<00:00, 26.69it/s]
## Wavelength Solution
Using the fits file provided in Stage 1 we need to extract out a wavelength solution. The wavelength solution is a 2D array, and we need to pick a value for each pixel - we can use the trace positions for this
```python
plt.figure()
plt.imshow(wvl_fits)
plt.plot(np.arange(ap_start,ap_end),trace_position,color='w',ls='--')
plt.show()
wvls = []
for count, pixel_column in enumerate(np.arange(ap_start,ap_end)):
pixel_row = int(trace_position[count])
wvls.append(wvl_fits[pixel_row, pixel_column])
```

## Diagnostic Plots and Visualisations
```python
# Make some fake array for plotting, can be useful for locating troublesome pixels,
# or if your wavelength solution / time arrays aren't nailed down yet for whatever reason
fake_time = np.arange(np.shape(all_spectra)[0])
fake_wvls = np.arange(np.shape(all_spectra)[1])
```
Check the x and y shifts and fwhm through time
```python
fwhm_wvls = [3.0.3,5] # at what wavelengths do we want to check the fwhms? you can give as many as you'd like
fwhm_array = jedi.fwhm_through_time_grabber(cleaned_data, wvls, fwhm_wvls)
```
```python
lc = np.sum(all_spectra, axis=1)/np.sum(all_spectra[-1])
plt.figure()
plt.plot(midtime_bjd, lc, color='darkorchid', ls='none',marker='.',alpha=0.2)
plt.ylabel("Normalised Flux")
plt.xlabel("Time (BJD)")
plt.show()
plt.figure()
plt.plot(midtime_bjd, x_shifts - np.median(x_shifts), color = 'lawngreen', label = '$x$', alpha=0.5)
plt.plot(midtime_bjd, y_shifts - np.median(y_shifts), color = 'dodgerblue', label = '$y$', alpha=0.5)
plt.xlabel("Time (BJD)")
plt.ylabel("Pixel Shifts")
plt.legend()
plt.show()
plt.figure()
for column, wvl in enumerate(fwhm_wvls):
plt.plot(midtime_bjd,fwhm_array[:,column]/np.median(fwhm_array[:,column]), label = wvl,alpha=0.5)
plt.legend()
plt.xlabel("Time (BJD)")
plt.ylabel("Normalised FWHM")
plt.show()
```
```python
resolution = 600
bin_time, bin_flux = jedi.binning(np.arange(0,len(fake_time), len(fake_time)/resolution), fake_time, lc)
bin_time, bin_x_shift = jedi.binning(np.arange(0,len(fake_time), len(fake_time)/resolution), fake_time, (x_shifts - np.median(x_shifts)))
bin_time, bin_y_shift = jedi.binning(np.arange(0,len(fake_time), len(fake_time)/resolution), fake_time, (y_shifts - np.median(y_shifts)))
plt.figure()
plt.plot(fake_time, lc, color='darkorchid', ls='none',marker='.',alpha=0.2)
plt.plot(bin_time, bin_flux, color='k', ls='none',marker='.')
plt.ylabel("Normalised Flux")
plt.xlabel("Time (data points)")
plt.show()
plt.figure()
plt.plot(bin_time, bin_x_shift, color = 'lawngreen', label = '$x$', alpha=0.8)
plt.plot(bin_time, bin_y_shift, color = 'dodgerblue', label = '$y$', alpha=0.8)
plt.xlabel("Time (data points)")
plt.ylabel("Pixel Shifts")
plt.legend()
plt.show()
plt.figure()
for column, wvl in enumerate(fwhm_wvls):
bin_time, bin_fwhm = jedi.binning(np.arange(0,len(fake_time), len(fake_time)/resolution), fake_time, (fwhm_array[:,column]/np.median(fwhm_array[:,column])))
plt.plot(bin_time,bin_fwhm, label = wvl,alpha=0.8)
plt.legend()
plt.xlabel("Time (data points)")
plt.ylabel("Normalised FWHM")
plt.show()
```
And finally the white light curve!
```python
pre_transit = 1500 # make sure you set these to be relevant to your observation
pst_transit = 3700
lc = np.sum(all_spectra, axis=1)/np.sum(all_spectra[-1])
plt.figure()
plt.plot(midtime_bjd[10:], lc[10:], ls='none',marker='.')
plt.ylim(0.9985,1.0017)
plt.plot(midtime_bjd[10:pre_transit], lc[10:pre_transit], ls='none',marker='.')
plt.plot(midtime_bjd[pst_transit:], lc[pst_transit:], ls='none',marker='.')
plt.xlabel("Time")
plt.ylabel("Flux")
plt.show()
sdev_lcs = np.std(lc[10:pre_transit])
sdev_lce = np.std(lc[pst_transit:])
print('Sdev before tilt (pre transit) = ', sdev_lcs*1e6, ' ppm')
print('Sdev after tilt (post transit) = ', sdev_lce*1e6, ' ppm')
```

Sdev before tilt (pre transit) = 187.27239050394272 ppm
Sdev after tilt (post transit) = 138.59202297477134 ppm
What happens if we take out a linear slope?
```python
mjds_oot = np.hstack((midtime_bjd[10:pre_transit],midtime_bjd[pst_transit:]))
flux_oot = np.hstack((lc[10:pre_transit],lc[pst_transit:]))
fpopt, fpcov = curve_fit(jedi.linear,mjds_oot,flux_oot)
fyvals = jedi.linear(np.array(midtime_bjd), fpopt[0], fpopt[1])
plt.figure()
plt.plot(midtime_bjd[:], lc[:], ls='none', marker='.', alpha=0.5)
plt.plot(midtime_bjd[:], fyvals[:], linewidth=3)
plt.plot(midtime_bjd[:], (0.999+np.array(lc[:])-fyvals[:]), marker='.', ls='none', alpha=0.5)
plt.show()
```
We can plot the stellar spectra up in 2D time-wavelength space to check what we've done so far. The compare_2d_spectra() function allows us to do this for different stages of the process, so we'll see dead pixels etc disappear between the images.
```python
jedi.compare_2d_spectra(all_spectra[:], unclean_spectra[:], fake_wvls[:], midtime_bjd, \
time_units="BJD", residual_limits=[-0.005,0.005], spectra_limits=[0.975,1.005])
# Picking the residual and spectra limits here can make a big difference in the appearance of the plots,
# especially for very clean or shallow transits
```

We can check the 1/f noise over the whole stack and make a periodogram if we'd like. There isn't much of a difference here because this data was cleaned for 1/f noise at the group level, but for other observations you'll see the before curve is significantly above the after curve on the left hand side of the plot
(this step takes a while)
```python
jedi.check_1f(cleaned_data.copy(), fnoise_mask, stack=True)
```
100%|βββββββββββββββββββββββββββββββββββββββββ| 465/465 [06:43<00:00, 1.15it/s]
0.67878999999968

```python
# Swap some file names around for ease
time_flux = midtime_bjd
wavelength = wvls
flux = all_spectra * integration_time * gain # use the gain so we can calculate simple square root errors
flux_error = np.sqrt(flux)
quality_flag = np.ones(np.shape(all_spectra), dtype=bool) # for Chromatic
x_shift = x_shifts - np.median(x_shifts)
y_shift = y_shifts - np.median(y_shifts)
```
```python
# Save out those xarrays! Here's everything you need to import:
import astropy.units as u
from astropy.utils.misc import JsonCustomEncoder
from astropy.time import Time
import xarray as xr
# put data into a dataset
ds = xr.Dataset(
#now data is a function of two dimensions
data_vars=dict(flux=(["time_flux","wavelength"], flux,{'units': 'electron'}),
flux_error=(["time_flux","wavelength"], flux_error,{'units': 'electron'}),
quality_flag=(["time_flux","wavelength"], quality_flag,{'units': ''}),#unitless!
x_shift=(["time_flux"], x_shift,{'units': ''}),
y_shift=(["time_flux"], y_shift,{'units': ''})
),
coords=dict(
wavelength=(["wavelength"],
wavelength,{'units': 'micron'}),#required*
time_flux=(["time_flux"],
time_flux,{'units': 'bjd'}),#required*
),
##################################################
# You'll want to change this info in particular!!
##################################################
attrs=dict(author="YOUR NAME HERE", #required
contact="youremail@gmail.com", #required,
code="https://github.com/Exo-TiC/ExoTiC-JEDI", #could also insert github link
notes="Using reduction_v7_groupLevelDestriping from Box",
normalised="No",
doi="none",#optional if there is a citation to reference
)
)
# Then save it out! But change the {} to suit yourself
ds.to_netcdf("stellar-spec-{planet}-{mode}-{detector}-exoticjedi-{yourname}.nc")
```
/tmp/ipykernel_1799195/379332143.py:11: RuntimeWarning: invalid value encountered in sqrt
flux_error = np.sqrt(flux)
If xarrays are seem a bit daunting right now, pickles are your friend!
```python
output_dict = {
"bjd_midtimes" : time_flux,
"wavlength_um" : wavelength,
"flux" : flux,
'flux_error' : flux_error,
'x_shift' : x_shift,
'y_shift' : y_shift,
'author' : "YOUR NAME HERE",
'contact' : "youremail@gmail.com",
'code' : "exotic-jedi",
'notes' : "Using reduction_v7_groupLevelDestriping from Box",
}
pickle.dump(output_dict, open('insertname.pickle','wb'))
```
|
Exo-TiCREPO_NAMEExoTiC-JEDIPATH_START.@ExoTiC-JEDI_extracted@ExoTiC-JEDI-master@notebooks@ERS@stellar_spectra@NIRSpec_pipeline_workbook_nrs1.ipynb@.PATH_END.py
|
{
"filename": "_alignsrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scattermapbox/hoverlabel/_alignsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class AlignsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="alignsrc", parent_name="scattermapbox.hoverlabel", **kwargs
):
super(AlignsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scattermapbox@hoverlabel@_alignsrc.py@.PATH_END.py
|
{
"filename": "plotspec.py",
"repo_name": "desihub/redrock",
"repo_path": "redrock_extracted/redrock-main/py/redrock/plotspec.py",
"type": "Python"
}
|
"""
redrock.plotspec
================
Visualization tools for plotting spectra.
"""
import numpy as np
from . import zwarning
from .templates import make_fulltype
class PlotSpec(object):
def __init__(self, targets, templates, zscan, zfit, truth=None, archetypes=False):
"""TODO: document
"""
#- Isolate imports of optional dependencies
import matplotlib.pyplot as plt
#- Only keep targets that are in the zfit table
keeptargets = list()
keepids = set(zfit['targetid'])
for t in targets:
if t.id in keepids:
keeptargets.append(t)
self.targets = keeptargets
self.templates = templates
self.archetypes = archetypes
self.zscan = zscan
self.zfit = zfit
self.itarget = 0
self.znum = 0
self.smooth = 1
self.truth = truth
self.targetid_to_itarget = {}
for i, t in enumerate(self.targets):
self.targetid_to_itarget[t.id] = i
self._fig = plt.figure()
self._ax1 = self._fig.add_subplot(211)
self._ax2 = self._fig.add_subplot(212)
self._cid = self._fig.canvas.mpl_connect('key_press_event',
self._onkeypress)
#- Instructions
print("---------------------------------------------------------------"
"----------")
print("Select window then use keyboard shortcuts to navigate:")
print(" up/down arrow: previous/next target")
print(" left/right arrow: previous/next redshift fit for this"
" target")
print(" (d)etails")
print("---------------------------------------------------------------"
"----------")
#- Disable some default matplotlib key bindings so that we can use keys
#- TODO: cache and reset when done
plt.rcParams['keymap.forward'] = ''
plt.rcParams['keymap.back'] = ''
plt.ion()
self.plot()
plt.show( block=True )
def _onkeypress(self, event):
### print('key', event.key)
if event.key == 'right':
self.znum = (self.znum + 1) % self.nznum
self.plot(keepzoom=True)
elif event.key == 'left':
self.znum = (self.znum - 1) % self.nznum
self.plot(keepzoom=True)
elif event.key == 'down':
if self.itarget == min(len(self.targets),len(self.zscan))-1:
print('At last target')
else:
self.znum = 0
self.itarget += 1
self.plot()
elif event.key == 'up':
if self.itarget == 0:
print('Already at first target')
else:
self.znum = 0
self.itarget -= 1
self.plot()
elif event.key == 'd':
target = self.targets[self.itarget]
zfit = self.zfit[self.zfit['targetid'] == target.id]
print('target {}'.format(target.id))
print(zfit['znum', 'spectype', 'z', 'zerr', 'zwarn', 'chi2'])
def plot(self, keepzoom=False):
#- Isolate imports of optional dependencies
from scipy.signal import medfilt
target = self.targets[self.itarget]
zfit = self.zfit[self.zfit['targetid'] == target.id]
self.nznum = len(zfit)
zz = zfit[zfit['znum'] == self.znum][0]
coeff = zz['coeff']
fulltype = make_fulltype(zz['spectype'], zz['subtype'])
if self.archetypes:
dwave = { s.wavehash:s.wave for s in target.spectra }
tp = self.archetypes.archetypes[zz['spectype']]
else:
tp = self.templates[fulltype]
if tp.template_type != zz['spectype']:
raise ValueError('spectype {} not in'
' templates'.format(zz['spectype']))
#----- zscan plot
if keepzoom:
force_xlim = self._ax1.get_xlim()
force_ylim = self._ax1.get_ylim()
self._ax1.clear()
for spectype, fmt in [('STAR', 'k-'), ('GALAXY', 'b-'), ('QSO', 'g-')]:
if spectype in self.zscan[target.id]:
zx = self.zscan[target.id][spectype]
self._ax1.plot(zx['redshifts'], zx['zchi2'], fmt, alpha=0.2,
label='_none_')
self._ax1.plot(zx['redshifts'], zx['zchi2']+zx['penalty'], fmt,
label=spectype)
self._ax1.plot(zfit['z'], zfit['chi2'], 'r.', label='_none_')
for row in zfit:
self._ax1.text(row['z'], row['chi2'], str(row['znum']),
verticalalignment='top')
if self.truth is not None:
i = np.where(self.truth['targetid'] == target.id)[0]
if len(i) > 0:
ztrue = self.truth['ztrue'][i[0]]
self._ax1.axvline(ztrue, color='g', alpha=0.5)
else:
print('WARNING: target id {} not in truth'
' table'.format(target.id))
self._ax1.axvline(zz['z'], color='k', alpha=0.1)
self._ax1.axhline(zz['chi2'], color='k', alpha=0.1)
self._ax1.legend()
self._ax1.set_title('target {} zbest={:.3f} {}'.format(target.id,
zz['z'], zz['spectype']))
self._ax1.set_ylabel(r'$\chi^2$')
self._ax1.set_xlabel('redshift')
if keepzoom:
self._ax1.set_xlim(*force_xlim)
self._ax1.set_ylim(*force_ylim)
#----- spectrum plot
if keepzoom:
force_xlim = self._ax2.get_xlim()
force_ylim = self._ax2.get_ylim()
self._ax2.clear()
ymin = ymax = 0.0
specs_to_read = target.spectra
for spec in specs_to_read:
if self.archetypes:
mx = tp.eval(zz['subtype'], dwave, coeff, spec.wave, zz['z']) * (1+zz['z'])
else:
mx = tp.eval(coeff[0:tp.nbasis], spec.wave, zz['z']) * (1+zz['z'])
model = spec.R.dot(mx)
flux = spec.flux.copy()
isbad = (spec.ivar == 0)
## model[isbad] = mx[isbad]
flux[isbad] = np.NaN
self._ax2.plot(spec.wave, medfilt(flux, self.smooth), alpha=0.5)
self._ax2.plot(spec.wave, medfilt(mx, self.smooth), 'k:', alpha=0.8)
model[isbad] = np.NaN
self._ax2.plot(spec.wave, medfilt(model, self.smooth), 'k-',
alpha=0.8)
if flux[~isbad].size!=0:
ymin = min(ymin, np.percentile(flux[~isbad], 1))
ymax = max(ymax, np.percentile(flux[~isbad], 99),
np.max(model)*1.05)
if (ymin==0.) & (ymax==0.):
ymax = 1.
#- Label object type and redshift
label = 'znum {} {} z={:.3f}'.format(self.znum, fulltype, zz['z'])
print('target {} id {} {}'.format(self.itarget, target.id, label))
ytext = ymin+0.9*(ymax-ymin)
self._ax2.text(3800, ytext, label)
#- ZWARN labels
if zz['zwarn'] != 0:
label = list()
for name, mask in zwarning.ZWarningMask.flags():
if (zz['zwarn'] & mask) != 0:
label.append(name)
label = '\n'.join(label)
color = 'r'
else:
label = 'ZWARN=0'
color = 'g'
self._ax2.text(10000, ytext, label, horizontalalignment='right',
color=color)
self._ax2.axhline(0, color='k', alpha=0.2)
if keepzoom:
self._ax2.set_xlim(*force_xlim)
self._ax2.set_ylim(*force_ylim)
else:
self._ax2.set_ylim(ymin, ymax)
self._ax2.set_xlim(3500,10100)
self._ax2.set_ylabel('flux')
self._ax2.set_xlabel('wavelength [A]')
# self._fig.tight_layout()
self._fig.canvas.draw()
|
desihubREPO_NAMEredrockPATH_START.@redrock_extracted@redrock-main@py@redrock@plotspec.py@.PATH_END.py
|
{
"filename": "T01generate_events.py",
"repo_name": "nu-radio/NuRadioMC",
"repo_path": "NuRadioMC_extracted/NuRadioMC-master/NuRadioMC/test/emitter/T01generate_events.py",
"type": "Python"
}
|
import numpy as np
from NuRadioReco.utilities import units
from NuRadioMC.EvtGen.generator import write_events_to_hdf5
import logging
logger = logging.getLogger("EventGen")
logging.basicConfig()
VERSION_MAJOR = 1
VERSION_MINOR = 1
def generate_my_events(filename, n_events):
"""
Event generator skeleton
Parameters
----------
filename: string
the output filename of the hdf5 file
n_events: int
number of events to generate
"""
# first set the meta attributes
attributes = {}
n_events = int(n_events)
attributes['simulation_mode'] = "emitter"
attributes['n_events'] = n_events # the number of events contained in this file
attributes['start_event_id'] = 0
# define the fiducial simulation volume. Instead of specifying fiducial_rmin and fiducial_rmin one can also specify
# fiducial_xmin, fiducial_xmax, fiducial_ymin and fiducial_ymax
# the concept of the diducial volume is described in the NuRadioMC paper. In short: only interactions in this smaller
# fiducial volume are saved. This is useful for the simulation of secondary interactions. For this dummy example
# the fiduial volume is the same as the full volume.
# attributes['fiducial_rmin'] = 0
# attributes['fiducial_rmax'] = 1 * units.km
# attributes['fiducial_zmin'] = 0 * units.m
# attributes['fiducial_zmax'] = -2 * units.km
# define the full simulation volume. Instead of specifying rmin and rmin one can also specify
# xmin, xmax, ymin and ymax
# attributes['rmin'] = 0
# attributes['rmax'] = 1 * units.km
# attributes['zmin'] = 0 * units.m
# attributes['zmax'] = -2 * units.km
#
# attributes['volume'] = attributes['rmax'] ** 2 * np.pi * np.abs(attributes['zmax'])
# if only interactions on a surface (e.g. for muons from air showers) are generated, the surface area needs to be
# specified attributes['area']
# define the minumum and maximum energy
# attributes['Emin'] = 1 * units.EeV
# attributes['Emax'] = 1 * units.EeV
# the interval of zenith directions
# attributes['thetamin'] = 0
# attributes['thetamax'] = np.pi
# the interval of azimuths directions
# attributes['phimin'] = 0
# attributes['phimax'] = 2 * np.pi
# now generate the events and fill all required data sets
# here we fill all data sets with dummy values
# each line/entry specified a particle shower of certain energy.
# In principle only the shower direction (zeniths, azimuths fields), the shower position (xx, yy, zz fields)
# the shower energy, the shower type and the event group id are required. We set them first
data_sets = {}
# the position of the emitter
data_sets["xx"] = np.ones(n_events) * -1 * units.km
data_sets["yy"] = np.ones(n_events) * 0
data_sets["zz"] = np.ones(n_events) * -2 * units.km
# the amplitude of the emitter
data_sets["emitter_amplitudes"] = np.ones(n_events) * 1000 * units.V
# the frequency and half_width of emitter
data_sets["emitter_frequency"] = 0.3 * np.ones(n_events) * units.GHz
data_sets["emitter_half_width"]= 1.0 * np.ones(n_events) * units.ns
# the orientation of the emiting antenna, defined via two vectors that are defined with two angles each (see https://nu-radio.github.io/NuRadioReco/pages/detector_database_fields.html)
# the following definition specifies a traditional βuprightβ dipole.
data_sets["emitter_orientation_phi"] = np.ones(n_events) * 0
data_sets["emitter_orientation_theta"] = np.ones(n_events) * 0
data_sets["emitter_rotation_phi"] = np.ones(n_events) * 0
data_sets["emitter_rotation_theta"] = np.ones(n_events) * 90 * units.deg
data_sets["emitter_antenna_type"] = ["RNOG_vpol_v1_n1.73"] * n_events
data_sets["emitter_model"] = ["delta_pulse"] * n_events
# give each shower a unique id (we can also have multiple showers for a single event by just giving several showers
# the same event_group_id)
data_sets["event_group_ids"] = np.arange(n_events)
# there are a couple of additional parameters that are required to run a NuRadioMC simulations. These parameters
# don't influence the simulated radio signals but are required for other post analysis tasks. If these parameters
# are not relevant for the type of data you're generating, just set them to any value.
# the shower type (here we only generate hadronic showers). This infomration is needed for the Askaryan emission model
data_sets["shower_type"] = ['had'] * n_events
data_sets["shower_energies"] = np.ones(n_events)
data_sets["shower_ids"] = np.arange(n_events)
# the direction of the shower
data_sets["azimuths"] = np.ones(n_events)
data_sets["zeniths"] = np.ones(n_events)
# specify which interaction it is (only relevant if multiple showers from the same initial neutrino are simulated)
# here it is just 1 for all events.
data_sets["n_interaction"] = np.ones(n_events, dtype=int)
# the neutrino flavor. Here we only generate electron neutinos which have the integer code 12.
# the neutrino flavor is only used in the calculation of the "weight", i.e. the probability of the neutrino reaching
# the detector. If other particles than a neutrino are simulated, just set the flavor to the corresponding particle code
# following https://pdg.lbl.gov/2019/reviews/rpp2019-rev-monte-carlo-numbering.pdf or just set it to zero.
data_sets["flavors"] = 12 * np.ones(n_events, dtype=int)
# the neutrino energy. This field is also only used for the weight calculation.
data_sets["energies"] = np.ones(n_events) * 1 * units.EeV
# optionally one can also directly set the event weight here (useful if particles other than neutrinos, or calibration
# setups are simulated
# data_sets["weights"] = np.ones(n_events)
# the interaction type. For neutrino interactions is can be either CC or NC. This parameter is not used but passed
# to the output file for information purposes.
data_sets["interaction_type"] = np.full(n_events, "nc", dtype='U2')
# The inelasiticiy, i.e. the fraction of the neutrino energy that is transferred into the hadronic shower.
# This parameter is not used but saved into the output file for information purposes.
data_sets["inelasticity"] = np.ones(n_events)
# write events to file
write_events_to_hdf5(filename, data_sets, attributes)
# add some test code
if __name__ == "__main__":
generate_my_events("emitter_event_list.hdf5", 20)
|
nu-radioREPO_NAMENuRadioMCPATH_START.@NuRadioMC_extracted@NuRadioMC-master@NuRadioMC@test@emitter@T01generate_events.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "LR-inaf/pasta-marker",
"repo_path": "pasta-marker_extracted/pasta-marker-main/pastamarkers/__init__.py",
"type": "Python"
}
|
import os
import pastamarkers
__ROOT__ = os.path.dirname(pastamarkers.__file__)
dir_data = __ROOT__ + "/data/"
# "/mnt/c/Users/nicob/Desktop/pastamarkers/test/data/MPD/"
|
LR-inafREPO_NAMEpasta-markerPATH_START.@pasta-marker_extracted@pasta-marker-main@pastamarkers@__init__.py@.PATH_END.py
|
{
"filename": "dm_helpers.py",
"repo_name": "fjankowsk/meertrig",
"repo_path": "meertrig_extracted/meertrig-master/meertrig/dm_helpers.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
#
# 2020 Fabian Jankowski
# Milky Way DM related helper functions.
#
import pygedm
def get_mw_dm(gl, gb, model="ymw16"):
"""
Determine the Galactic Milky Way contribution to the dispersion measure
for a given sightline.
We return the result from the YMW16 model.
Parameters
----------
gl: float
Galactic longitude in degrees.
gb: float
Galactic latitude in degrees.
model: str (default: ymw16)
The Galactic free electron model to use.
Returns
-------
dm: float
The Milky Way DM.
Raises
------
NotImplementedError
If the Galactic free electron model `model` is not implemented.
"""
# 30 kpc
dist = 30 * 1000
if model in ["ne2001", "ymw16"]:
pass
else:
raise NotImplementedError(
"Galactic free electron model not implemented: {0}".format(model)
)
dm, _ = pygedm.dist_to_dm(gl, gb, dist, mode="gal", method=model)
return dm.value
|
fjankowskREPO_NAMEmeertrigPATH_START.@meertrig_extracted@meertrig-master@meertrig@dm_helpers.py@.PATH_END.py
|
{
"filename": "plot_PT.py",
"repo_name": "Jingxuan97/nemesispy",
"repo_path": "nemesispy_extracted/nemesispy-main/nemesispy/retrieval/plot_PT.py",
"type": "Python"
}
|
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import scipy.interpolate as interpolate
def plot_averaged_TP_ilon(ilon,tmap,longitude_grid,latitude_grid,pressure_grid,
figname='averaged_TP',dpi=400):
"""
Plot latitudinally-averaged TP profiles from a 3D temperature model,
using cos(latitude) as weight.
"""
nlon = len(longitude_grid)
nlat = len(latitude_grid)
npress = len(pressure_grid)
averaged_TP = np.zeros(npress)
x = 0
for ilat,lat in enumerate(latitude_grid):
averaged_TP += tmap[ilon,ilat,:] * np.cos(lat/180*np.pi)
x += np.cos(lat/180*np.pi)
averaged_TP = averaged_TP / x
print(averaged_TP)
plt.plot(averaged_TP,pressure_grid)
plt.gca().invert_yaxis()
plt.yscale('log')
plt.savefig(figname,dpi=dpi)
plt.close()
|
Jingxuan97REPO_NAMEnemesispyPATH_START.@nemesispy_extracted@nemesispy-main@nemesispy@retrieval@plot_PT.py@.PATH_END.py
|
{
"filename": "_marker.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/funnelarea/_marker.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class MarkerValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="marker", parent_name="funnelarea", **kwargs):
super(MarkerValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Marker"),
data_docs=kwargs.pop(
"data_docs",
"""
colors
Sets the color of each sector. If not
specified, the default trace color set is used
to pick the sector colors.
colorssrc
Sets the source reference on Chart Studio Cloud
for `colors`.
line
:class:`plotly.graph_objects.funnelarea.marker.
Line` instance or dict with compatible
properties
pattern
Sets the pattern within the marker.
""",
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@funnelarea@_marker.py@.PATH_END.py
|
{
"filename": "py34compat.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/setuptools/py2/setuptools/py34compat.py",
"type": "Python"
}
|
import importlib
try:
import importlib.util
except ImportError:
pass
try:
module_from_spec = importlib.util.module_from_spec
except AttributeError:
def module_from_spec(spec):
return spec.loader.load_module(spec.name)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@setuptools@py2@setuptools@py34compat.py@.PATH_END.py
|
{
"filename": "test_GaiaCat1.py",
"repo_name": "dsavransky/EXOSIMS",
"repo_path": "EXOSIMS_extracted/EXOSIMS-master/tests/StarCatalog/test_GaiaCat1.py",
"type": "Python"
}
|
import unittest
from tests.TestSupport.Utilities import RedirectStreams
import EXOSIMS.StarCatalog
from EXOSIMS.StarCatalog.GaiaCat1 import GaiaCat1
from EXOSIMS.util.get_module import get_module
import os, sys
import pkgutil
from io import StringIO
import astropy.units as u
from EXOSIMS.util.get_dirs import get_downloads_dir
import shutil
import csv
import numpy as np
from astropy.coordinates import SkyCoord
class TestGaiaCat1(unittest.TestCase):
"""
Sonny Rappaport, July 2021, Cornell
This class tests GaiaCat1's initialization.
"""
def setUp(self):
"""
test data source file is from the Gaia 2nd data release,
with the following ADQL query used to gather the data:
SELECT TOP 1000 gaia_source.source_id,gaia_source.ra,gaia_source.ra_error,
gaia_source.dec,gaia_source.dec_error,gaia_source.parallax,
gaia_source.parallax_error,
gaia_source.astrometric_matched_observations,
gaia_source.visibility_periods_used,gaia_source.phot_g_mean_mag,
gaia_source.phot_bp_mean_mag,gaia_source.phot_rp_mean_mag,
gaia_source.teff_val
FROM gaiadr2.gaia_source
WHERE gaia_source.source_id IS NOT NULL
AND gaia_source.ra IS NOT NULL
AND gaia_source.ra_error IS NOT NULL
AND gaia_source.dec IS NOT NULL
AND gaia_source.dec_error IS NOT NULL
AND gaia_source.parallax IS NOT NULL
AND gaia_source.parallax_error IS NOT NULL
AND gaia_source.astrometric_matched_observations IS NOT NULL
AND gaia_source.visibility_periods_used IS NOT NULL
AND gaia_source.phot_g_mean_mag IS NOT NULL
AND gaia_source.phot_bp_mean_mag IS NOT NULL
AND gaia_source.phot_rp_mean_mag IS NOT NULL
AND gaia_source.teff_val IS NOT NULL
ORDER by gaia_source.source_id;
copy the gaia sample datafile from test-scripts to the downloads folder,
(if the gaia sample datafile isn't there already)
"""
downloads_path = get_downloads_dir()
if not os.path.exists(downloads_path + "/GaiaCatGVTest.gz"):
shutil.copy(
"tests/TestSupport/test-scripts/GaiaCatGVTest.gz", downloads_path
)
self.fixture = GaiaCat1(catalogfile="GaiaCatGVTest.gz")
def test_init(self):
"""
Test of initialization and __init__.
Test method: Use the same dataset, but as a CSV file instead, and check
that the GaiaCat1 object has stored the data correctly.
"""
# nickname for the overall object
gaia = self.fixture
# same raw data from before, just in CSV format.
expected = np.genfromtxt(
"tests/TestSupport/test-scripts/GaiaCatCSVTest.csv",
delimiter=",",
names=True,
)
# test all prototype attributes
np.testing.assert_allclose(expected["source_id"], gaia.Name)
np.testing.assert_allclose(expected["teff_val"], gaia.Teff)
np.testing.assert_allclose(expected["phot_g_mean_mag"], gaia.Gmag)
np.testing.assert_allclose(expected["phot_bp_mean_mag"], gaia.BPmag)
np.testing.assert_allclose(expected["phot_rp_mean_mag"], gaia.RPmag)
np.testing.assert_allclose(expected["ra_error"], gaia.RAerr)
np.testing.assert_allclose(expected["dec_error"], gaia.DECerr)
np.testing.assert_allclose(expected["parallax_error"], gaia.parxerr)
np.testing.assert_allclose(
expected["astrometric_matched_observations"],
gaia.astrometric_matched_observations,
)
np.testing.assert_allclose(
expected["visibility_periods_used"], gaia.visibility_periods_used
)
exp_para_units = expected["parallax"] * u.mas
np.testing.assert_allclose(exp_para_units, gaia.parx)
exp_dist = exp_para_units.to("pc", equivalencies=u.parallax())
np.testing.assert_allclose(exp_dist, gaia.dist)
exp_coords = SkyCoord(
ra=expected["ra"] * u.deg, dec=expected["dec"] * u.deg, distance=exp_dist
)
# prepare skycoord arrays for testing.
exp_coords_array = []
gaia_coords_array = []
for i in range(len(exp_coords)):
exp_coords_array.append(
[
exp_coords[i].ra.degree,
exp_coords[i].dec.degree,
exp_coords[i].distance.pc,
]
)
gaia_coords_array.append(
[
gaia.coords[i].ra.degree,
gaia.coords[i].dec.degree,
gaia.coords[i].distance.pc,
]
)
np.testing.assert_allclose(exp_coords_array, gaia_coords_array)
# expected versions of these three parameters
eGmag = expected["phot_g_mean_mag"]
eBPmag = expected["phot_bp_mean_mag"]
eRPmag = expected["phot_rp_mean_mag"]
expected_vmag = eGmag - (
-0.01760 - 0.006860 * (eBPmag - eRPmag) - 0.1732 * (eBPmag - eRPmag) ** 2
)
expected_rmag = eGmag - (
-0.003226 + 0.3833 * (eBPmag - eRPmag) - 0.1345 * (eBPmag - eRPmag) ** 2
)
expected_Imag = eGmag - (
0.02085 + 0.7419 * (eBPmag - eRPmag) - 0.09631 * (eBPmag - eRPmag) ** 2
)
# test these three parameters. seems to be small rounding imprecision,
# so upped rtolerance slightly
np.testing.assert_allclose(expected_vmag, gaia.Vmag, rtol=2e-7)
np.testing.assert_allclose(expected_rmag, gaia.Rmag, rtol=2e-7)
np.testing.assert_allclose(expected_Imag, gaia.Imag, rtol=2e-7)
|
dsavranskyREPO_NAMEEXOSIMSPATH_START.@EXOSIMS_extracted@EXOSIMS-master@tests@StarCatalog@test_GaiaCat1.py@.PATH_END.py
|
{
"filename": "prompt.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/langchain/langchain/chains/conversation/prompt.py",
"type": "Python"
}
|
# flake8: noqa
from langchain.memory.prompt import (
ENTITY_EXTRACTION_PROMPT,
ENTITY_MEMORY_CONVERSATION_TEMPLATE,
ENTITY_SUMMARIZATION_PROMPT,
KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT,
SUMMARY_PROMPT,
)
from langchain_core.prompts.prompt import PromptTemplate
DEFAULT_TEMPLATE = """The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
{history}
Human: {input}
AI:"""
PROMPT = PromptTemplate(input_variables=["history", "input"], template=DEFAULT_TEMPLATE)
# Only for backwards compatibility
__all__ = [
"SUMMARY_PROMPT",
"ENTITY_MEMORY_CONVERSATION_TEMPLATE",
"ENTITY_SUMMARIZATION_PROMPT",
"ENTITY_EXTRACTION_PROMPT",
"KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT",
"PROMPT",
]
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@langchain@langchain@chains@conversation@prompt.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "ratt-ru/QuartiCal",
"repo_path": "QuartiCal_extracted/QuartiCal-main/testing/__init__.py",
"type": "Python"
}
|
ratt-ruREPO_NAMEQuartiCalPATH_START.@QuartiCal_extracted@QuartiCal-main@testing@__init__.py@.PATH_END.py
|
|
{
"filename": "_opacity.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scatterternary/_opacity.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class OpacityValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="opacity", parent_name="scatterternary", **kwargs):
super(OpacityValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
max=kwargs.pop("max", 1),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scatterternary@_opacity.py@.PATH_END.py
|
{
"filename": "_modebar.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/_modebar.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ModebarValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="modebar", parent_name="layout", **kwargs):
super(ModebarValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Modebar"),
data_docs=kwargs.pop(
"data_docs",
"""
activecolor
Sets the color of the active or hovered on
icons in the modebar.
add
Determines which predefined modebar buttons to
add. Please note that these buttons will only
be shown if they are compatible with all trace
types used in a graph. Similar to
`config.modeBarButtonsToAdd` option. This may
include "v1hovermode", "hoverclosest",
"hovercompare", "togglehover",
"togglespikelines", "drawline", "drawopenpath",
"drawclosedpath", "drawcircle", "drawrect",
"eraseshape".
addsrc
Sets the source reference on Chart Studio Cloud
for `add`.
bgcolor
Sets the background color of the modebar.
color
Sets the color of the icons in the modebar.
orientation
Sets the orientation of the modebar.
remove
Determines which predefined modebar buttons to
remove. Similar to
`config.modeBarButtonsToRemove` option. This
may include "autoScale2d", "autoscale",
"editInChartStudio", "editinchartstudio",
"hoverCompareCartesian", "hovercompare",
"lasso", "lasso2d", "orbitRotation",
"orbitrotation", "pan", "pan2d", "pan3d",
"reset", "resetCameraDefault3d",
"resetCameraLastSave3d", "resetGeo",
"resetSankeyGroup", "resetScale2d",
"resetViewMap", "resetViewMapbox",
"resetViews", "resetcameradefault",
"resetcameralastsave", "resetsankeygroup",
"resetscale", "resetview", "resetviews",
"select", "select2d", "sendDataToCloud",
"senddatatocloud", "tableRotation",
"tablerotation", "toImage", "toggleHover",
"toggleSpikelines", "togglehover",
"togglespikelines", "toimage", "zoom",
"zoom2d", "zoom3d", "zoomIn2d", "zoomInGeo",
"zoomInMap", "zoomInMapbox", "zoomOut2d",
"zoomOutGeo", "zoomOutMap", "zoomOutMapbox",
"zoomin", "zoomout".
removesrc
Sets the source reference on Chart Studio Cloud
for `remove`.
uirevision
Controls persistence of user-driven changes
related to the modebar, including `hovermode`,
`dragmode`, and `showspikes` at both the root
level and inside subplots. Defaults to
`layout.uirevision`.
""",
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@_modebar.py@.PATH_END.py
|
{
"filename": "design.md",
"repo_name": "DifferentiableUniverseInitiative/jax_cosmo",
"repo_path": "jax_cosmo_extracted/jax_cosmo-master/design.md",
"type": "Markdown"
}
|
# Design document for jax-cosmo
This document details the API, implementation choices, and internal mechanisms.
## Objective
Provide a fully end-to-end automatically differentiable cosmology library,
providing observables (e.g. 2pt angular Cls or correlation functions) for a
variety of tracers (e.g. lensing, clustering, CMB lensing, etc.).
This tool will make it easy to perform efficient inference (e.g. HMC, VI), as well as a wide variety of survey optimization tasks (e.g. photoz binning).
## Related Work
There isn't any equivalent of this project so far, to the best of our
knowledge.
But there are a wide collection of non differentiable cosmology libraries.
- CCL
- Cosmosis
- CosmicFish
- ...
## Design Overview
### JAX
This section covers some of the design aspects related to the JAX backend. It is
probably good to have a look at the JAX [intro](https://jax.readthedocs.io/en/latest/notebooks/quickstart.html).
#### JAX best practices
- Loops are evil! avoid them at all cost! The problem is that normal Python
loops will get unrolled into a very long computational graph. Instead, as
much as possible, use batching with `jax.vmap` or the low level loop utilities
in `jax.lax` for XLA compatible control loops.
- Functions should be preferred to methods. Because we want to be able to do
things like:
```python
jax.grad(Omega_m)(cosmo, a)
```
which will compute the derivative with respect to the cosmology. If cosmology
wasn't an argument, it would be a lot more wordy:
```python
def fn(cosmo):
return cosmo.Omega_m(a)
jax.grad(fn)
```
- Careful with caching! Avoid it if possible, the only acceptable form of
caching is by computing an interpolation table and returning the result of an
interpolation. Only useful when needing consecutive calls to that table in the
same function.
#### The container class
Here is a situation, we want to define a parametric redshift distribution, say
a Gaussian with mean `z0` and standard deviation `sigma`. This redshift distribution
needs to be used through many operations all the way to the likelihood, so
we want a structure that can store these 2 parameters, and compatible with JAX
tracing.
So we define a `container` class, which is a generic structure holding some
parameters that need to be traced, and some static configuration arguments. The
`container` class knows how to pack and unpack its arguments, in a manner compatible
with the JAX custom types [(see here)](https://jax.readthedocs.io/en/latest/notebooks/JAX_pytrees.html)
The `container` class will store all the positional arguments it receives during
init in a list stored in `self.params`. These parameters are meant to be the
traceable arguments, so anything that might need to be differentiable should go
there. In addition, non traceable, configuration arguments, like a numerical precision
flag, or a computation flag, can be stored by providing keyword arguments to the
init. These arguments will be stored in `self.config`
Concretely, we can define our redshift distribution this way:
```python
class gaussian_nz(container):
def __init__(self, z0, sigma, zmax=10, **kwargs):
super(gaussian_nz, self).__init__(z0, sigma, # Traceable parameters
zmax=zmax, **kwargs) # non-traceable configuration
def __call__(self, z):
z0, sigma = self.params
return np.clip(exp(-0.5*( z - z0)**2/sigma**2),
0., self.config['zmax'])
```
Note that in this example, the `__init__` isn't doing anything, we just leave it
for readibility. JAX will know how to properly flatten and inflate this object
through the tracing process. You can for instance now do the following:
```python
# Define a likelihood, function of the redshift distribution
def likelihood(nz):
... # some computation that uses this nz
return likelihood_value
>>> nz = gaussian_nz(1., 0.1)
>>> jax.grad(likelihood)(nz)
(0.5346, 0.1123 )
```
where what is the returned is the gradient of the redshift object.
In general, this container mechanism can be used to aggregate a bunch of
parameters in one place, in a way that JAX knows how to handle.
### Cosmology
In this section we cover aspects related to the cosmology API and implementation.
#### Code structure
Here are the main modules:
- The `Cosmology` class: stores cosmological parameters, it is essentially an
instance of the `container`.
- The `background` module: hosts functions of the comology to compute various
background related quantities.
- The `transfer` module: Libary of transfer functions, e.g. EisensteinHu
- The `probes` module: Hosts the definition of various probes, as defined in the next section
- The `angular_cl` module: hosts the Limber integration code, and covariance tools
To these existing modules, we should add a `non_linear` for things like halofit.
#### Handling of 2pt functions
For now, and in the foreseable future, all 2pt functions are computed using the
Limber approximation.
We follow the structure adopted by [CCL](https://github.com/LSSTDESC/CCL) to define two point functions of generalized tracers, as proposed by David Alonso in this issue [#627](https://github.com/LSSTDESC/CCL/issues/627). To summarize, each
tracer (e.g. lensing, number count, etc.) is characterized by the following:
- A radial kernel function
- An ell dependent prefactor
- A transfer function
In `jax-cosmo`, we define `probes` that are container
objects (i.e. which can be differentiated), gathering in particular a list of
redshift distributions, and any other necessary parameters.
|
DifferentiableUniverseInitiativeREPO_NAMEjax_cosmoPATH_START.@jax_cosmo_extracted@jax_cosmo-master@design.md@.PATH_END.py
|
{
"filename": "wave.py",
"repo_name": "enthought/mayavi",
"repo_path": "mayavi_extracted/mayavi-master/examples/tvtk/visual/wave.py",
"type": "Python"
}
|
#!/usr/bin/env python
# Author: Raashid Baig <raashid@aero.iitb.ac.in>
# License: BSD Style.
from math import sin, pi
from numpy import zeros, arange
from tvtk.tools.visual import show, Curve, iterate, MVector
def main():
dt = 0.1
x = arange(-50,50)
wpoints1 = zeros((100,3), float)
wpoints2 = zeros((100,3), float)
wpoints3 = zeros((100,3), float)
wpoints4 = zeros((100,3), float)
for i in range (0,100,1):
wpoints1[i] = [x[i], -30, 0]
wpoints2[i] = [x[i], -15, 0]
wpoints3[i] = [x[i], 0, 0]
wpoints4[i] = [x[i], 15, 0]
band1 = Curve(points = wpoints1, k = 6.0, color = (1,0,0),
mass = 2.0, radius = 0.5, momentum = zeros((100, 3), float))
band2 = Curve(points = wpoints2, k = 6.0, color = (1,1,0),
mass = 2.0, radius = 0.5, momentum = zeros((100, 3), float))
band3 = Curve(points = wpoints3, k = 6.0, color = (0,1,0),
mass = 2.0, radius = 0.5, momentum = zeros((100, 3), float))
band4 = Curve(points = wpoints4, k = 6.0, color = (0,0,1),
mass = 2.0, radius = 0.5, momentum = zeros((100, 3), float))
for i in range(0,25,1):
band1.momentum[i,1] = sin(x[i]*pi/25.0)*3 # half-wave pulse
for i in range(0,25,1):
band2.momentum[i,1] = sin(x[i]*2*pi/25.0)*5 # full-wave pulse
for i in range(0,25,1):
band3.momentum[i,0] = sin(x[i]*pi/25.0)*5 # compresion pulse
for i in range(0,100,1):
band4.momentum[i,1] = sin(x[i]*4*pi/100.0)*2 # standing wave
def anim():
band1.momentum[0] = band1.momentum[-1] = MVector(0,0,0)
band2.momentum[0] = band2.momentum[-1] = MVector(0,0,0)
band3.momentum[0] = band3.momentum[-1] = MVector(0,0,0)
band4.momentum[0] = band4.momentum[-1] = MVector(0,0,0)
band1.points = band1.points + (band1.momentum/band1.mass*dt)
band2.points = band2.points + (band2.momentum/band2.mass*dt)
band3.points = band3.points + (band3.momentum/band3.mass*dt)
band4.points = band4.points + (band4.momentum/band4.mass*dt)
force1 = band1.k * (band1.points[1:] - band1.points[:-1])
force2 = band2.k * (band2.points[1:] - band2.points[:-1])
force3 = band3.k * (band3.points[1:] - band3.points[:-1])
force4 = band4.k * (band4.points[1:] - band4.points[:-1])
band1.momentum[:-1] = band1.momentum[:-1] + force1 * dt
band2.momentum[:-1] = band2.momentum[:-1] + force2 * dt
band3.momentum[:-1] = band3.momentum[:-1] + force3 * dt
band4.momentum[:-1] = band4.momentum[:-1] + force4 * dt
band1.momentum[1:] = band1.momentum[1:] - force1 * dt
band2.momentum[1:] = band2.momentum[1:] - force2 * dt
band3.momentum[1:] = band3.momentum[1:] - force3 * dt
band4.momentum[1:] = band4.momentum[1:] - force4 * dt
a = iterate(20, anim)
show()
return a
if __name__ == '__main__':
main()
|
enthoughtREPO_NAMEmayaviPATH_START.@mayavi_extracted@mayavi-master@examples@tvtk@visual@wave.py@.PATH_END.py
|
{
"filename": "_scalegroup.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/violin/_scalegroup.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ScalegroupValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="scalegroup", parent_name="violin", **kwargs):
super(ScalegroupValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@violin@_scalegroup.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "AgentM-GEG/residual_feature_extraction",
"repo_path": "residual_feature_extraction_extracted/residual_feature_extraction-master/README.md",
"type": "Markdown"
}
|
# RESIDUAL FEATURE EXTRACTION PIPELINE
A pipeline that carries out feature extraction of residual substructure within the residual images produced by popular galaxy structural-fitting routines such as GALFIT, GIM2D, etc. This pipeline extracts faint low surface brightness features by isolating flux-wise and area-wise significant contiguous pixels regions by rigourous masking routine. This routine accepts the image cubes (original image, model image, residual image) and generates several data products:
1. An Image with Extracted features.
2. Source extraction based segmentation map.
3. The background sky mask and the residual extraction mask.
4. A montecarlo approach based area threshold above which the extracted features are identified.
5. A catalog entry indicating the surface brightness and its error.
**Author:** Kameswara Bharadwaj Mantha
**email:** km4n6@mail.umkc.edu
**Publication:**
Studying the Physical Properties of Tidal Features I. Extracting Morphological Substructure in CANDELS Observations and VELA Simulations.
**Corresponding Author:**
Kameswara Bharadwaj Mantha
**Co-authors:**
Daniel H. McIntosh, Cody P. Ciaschi, Rubyet Evan, Henry C. Ferguson, Logan B. Fries, Yicheng Guo, Luther D. Landry, Elizabeth J. McGrath, Raymond C. Simons, Gregory F. Snyder, Scott E. Thompson, Eric F. Bell, Daniel Ceverino, Nimish P. Hathi, Anton M. Koekemoer, Camilla Pacifici, Joel R. Primack, Marc Rafelski, Vicente Rodriguez-Gomez.
# INSTALLATION
Simply clone the github repository. The pipeline is present in the `Tidal_Feature_CAN_*` folder.
Requirements:
1. Please install `sep` from [sep documentation](https://sep.readthedocs.io/en/v1.0.x/)
2. I recommend running the pipeline in an `astroconda` environment. Please see [astroconda documentation](https://astroconda.readthedocs.io/en/latest/) for more details on how to install that environment. This should install all latest packages used by this pipeline.
3. In case you don't want to do step 2, here are the modules that you need: `optparse matplotlib astropy skimage warnings os`
# PIPELINE USAGE
You will use the python script: `Tidal_feature_finder.py`. This python file uses two other python scripts which have all the necessary functions that carry out the task.
**Usage:** Usage in your terminal is `python Tidal_feature_finder.py -p PARAMFILE.param`. Note that you have to provide the full path to PARAMFILE.param. Also, this information needs to be enclosed in single quotations.
**Example:** `python Tidal_feature_finder.py -p '~/path_to_parameter_file/test.param'`
#### IMPORTANT DETAILS about the PARAMETER FILE:
1. For ease of running the residual feature finding process, I have provided important levers used during the feature detection in a parameter file. NOTE this is NOT THE GALFIT PARAMETER FILE.
2. One has to write this parameter file for the galaxy one wishes to find the features.
3. The parameter file has several key words, each key word representing (each) lever that you can change. Please DONOT alter the keywords.
4. Each keyword and its value is arranged as "key = value". Please try to stick to this format. "=" sign is a must. In principle, spaces should be taken care of internally, feel free to test this and let me know if the code breaks.
5. The order of the keywords don't matter. The GALFIT output file and redshift of the galaxy are required and the rest of them are optional. By optional, I mean that the script uses default values (see below for additional description).
*Descriptions and cautions for the key words:*
(a) If any of the following keywords (except for the required fields) are provided as "none", then they would revert to their default values.
b) When mentioning the paths, please DO NOT provide "/" for the last folder. You don't need to create folders, just provide the paths and the code should create folders as needed. See the example parameter file provided.
#### KEYWORDS IN PARAM FILE
**I. gfit_outfile:** Please enter the path+filename to the galfit output file over which you want to find the residual features. This should be a fits file.
*Default value:* No defaults for this. This is a required field
**II. exp_stamp:** The cutout of the exposure map corresponding to the galaxy in question, where each pixel value holds the exposure time in seconds.
*Default value:* 5000 seconds
**III. sigma:** What significance above the sky background do you want to detect the features. If "sigma = 1", then the pixels whose values are above 1*sky are chosen towards computing the features.
*Default value: 2*
**IV. redshift:** Enter the redshift of the galaxy from the catalog. This is used to figure out the corresponding physical distance around the galaxy of interest.
*Default value:* None, this is a required field.
**V. boxcar_smooth_size:** The size of the boxcar2d filter used to smooth the residual image.
*Default value:* 3
**VI. forced_segvalues_galfitted:** In case you are wanting to extract residual features for multiple galaxies in the image. First, make sure that these galaxies are GALFITTED. Then go to the source extract folder and spot the feature extraction image. Enter the segmap values corresponding to the sources you want to force the extraction of features in a consistent fashion to primary galaxy of interest. For example forced_segvalues_galfitted = 9, will perform the feature extraction on the source number 9 by repeating the exact same process performed on the primary galaxy (at the center of the image).
*Default values:* None
**VII. forced_segvalues_not_galfitted:** In some cases, extended features are identified as separate sources in the image. They might get masked if not taken care of and will be omitted during feature extraction. Therefore, if one wishes to forcibly unlock regions that are not GALFITTED, then please provide their corresponding segmentation values. For example forced_segvalues_not_galfitted = 5 will unlock the segmentation region corresponding to object 5.
*Default values:* None.
**VIII. inner_mask_semi_maj:** The semi major axis multiplier as the width of the ellipse (diameter) that masks the central region.
*Default value:* 1.5 * object's semi-major axis
**IX. inner_mask_semi_min:** The semi minor axis multiplier as the height of the ellipse (diameter) that masks the central region.
*Default value:* 1.5 * object's semi-minor axis
**X. outer_phy_mask_size:** the size of the outer circular aperture [in kpc] after which the feature extraction is not performed.
*Default value:* 30 (means 30 kpc).
**XI. run_MC:** In order to choose features that are significant above a random noise expectation, the machine will perform a Monte Carlo simulation of extracting the features that show up if you just have a sky background. If you are running the tidal feature finder for the first time on a galaxy, have this toggled to True. Once it runs, it generates necessary files and stores them for future purpose. It computes an Area threshold and stores it in "MC_path" folder under "A_thresh" folder. You will notice that a text file with the galaxy id (you provided) is created. If you open it, there will be one number which is the area threshold above which a region is statistically unlikely to be caused by sky background. Also, in the same folder, it creates some plots with the random noise image and how it looks like if we applied our tidal feature finder on just noise. This is a diagnostic plot to make sure we are not doing any thing crazy wrong.
*Default value:* 'True'
**XII. plot_destin:** This path will store the key figure, where the residual feature is overlaid on the host galaxy. Please provide the path here.
*Default value:* current working directory, where a new folder is created
**XIII. SB_info_path:** At the end of the residual feature finding, the script computes the surface brightness of the features. Please provide a path in which csv files with the surface brightness information can be stored. The csv file will be structured as follows: ID, Surface brightness, Surface brightness error
*Default value:* current working directory, where a new folder is created
**XIV. sep_sigma:** This is the significance above which you want the sources in your image to be detected. Note that this is used exclusive for source detection.
*Default value:* 0.75
**XV. sep_min_area:** The minimum area of significant pixels to be called as a source.
*Default value:* 7
**XVI. sep_dblend_nthresh:** Number of threshold levels used for de-blending sources.
*Default value:* 32
**XVII. sep_dblend_cont:** The deblend minimum contrast level used for deblending of sources. Please see SExtractor definitions or go to SEP python webpage.
*Default value:* 0.001
**XVIII. sep_filter_kwarg:** The key word indicating what filter do you want to use during the source extraction process. The available key words are "tophat", "gauss", "boxcar".
*Default value:* tophat
**XIX. sep_filter_size:** What is the appropriate filter size you want to use during the source extraction process.
For tophat --> it is the radius
For gauss --> it is the fwhm (in pixels)
For boxcar --> It is the box size.
*Default value:* for all filters, it is 5.
**XX. sep_make_plots:** If you want to see the source extraction output, please mention True here.
*Default value:* True
**XXI. sep_plot_destin:** The destination folder to store the plots, if you decide to the source extraction output.
*Default value:* current working directory, where a new folder is created
**XXII. fits_save_loc:** save location where the fits image files generated during the feature extraction process are stored.
*Default value:* current working directory, where a new folder is created
#### Work in progress
Note that you will notice a couple of more keywords in the example parameter file.
These correspond to a Voronoi Tesselation of the residual features that will incorporated in the upcoming versions of the feature extraction.
Feel free to delete these key words from the parameter files you create. The default values for making Voronoi Tesselation
are set to False, so it shouldn't cause any issue.
|
AgentM-GEGREPO_NAMEresidual_feature_extractionPATH_START.@residual_feature_extraction_extracted@residual_feature_extraction-master@README.md@.PATH_END.py
|
{
"filename": "bug_report.md",
"repo_name": "TeamLEGWORK/LEGWORK",
"repo_path": "LEGWORK_extracted/LEGWORK-main/.github/ISSUE_TEMPLATE/bug_report.md",
"type": "Markdown"
}
|
---
name: Bug report
about: Help us squash those bugs in LEGWORK!
title: ''
labels: bug
assignees: ''
---
**Describe the bug**
A clear and concise description of what the bug is.
**Example for reproducing**
Please include a minimum working example that reproduces the error, e.g.
```
import legwork as lw
# do stuff with legwork
# something breaks
```
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Desktop (please complete the following information):**
- OS: [e.g. macOS]
- Version: [e.g. v0.1.6]
- Run from: [e.g. terminal or Jupyter notebook]
|
TeamLEGWORKREPO_NAMELEGWORKPATH_START.@LEGWORK_extracted@LEGWORK-main@.github@ISSUE_TEMPLATE@bug_report.md@.PATH_END.py
|
{
"filename": "ndslicing.py",
"repo_name": "astropy/astropy",
"repo_path": "astropy_extracted/astropy-main/astropy/nddata/mixins/ndslicing.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module implements the Slicing mixin to the NDData class.
from astropy import log
from astropy.wcs.wcsapi import (
BaseHighLevelWCS, # noqa: F401
BaseLowLevelWCS, # noqa: F401
HighLevelWCSWrapper,
SlicedLowLevelWCS,
)
__all__ = ["NDSlicingMixin"]
class NDSlicingMixin:
"""Mixin to provide slicing on objects using the `NDData`
interface.
The ``data``, ``mask``, ``uncertainty`` and ``wcs`` will be sliced, if
set and sliceable. The ``unit`` and ``meta`` will be untouched. The return
will be a reference and not a copy, if possible.
Examples
--------
Using this Mixin with `~astropy.nddata.NDData`:
>>> from astropy.nddata import NDData, NDSlicingMixin
>>> class NDDataSliceable(NDSlicingMixin, NDData):
... pass
Slicing an instance containing data::
>>> nd = NDDataSliceable([1,2,3,4,5])
>>> nd[1:3]
NDDataSliceable([2, 3])
Also the other attributes are sliced for example the ``mask``::
>>> import numpy as np
>>> mask = np.array([True, False, True, True, False])
>>> nd2 = NDDataSliceable(nd, mask=mask)
>>> nd2slc = nd2[1:3]
>>> nd2slc[nd2slc.mask]
NDDataSliceable([β])
Be aware that changing values of the sliced instance will change the values
of the original::
>>> nd3 = nd2[1:3]
>>> nd3.data[0] = 100
>>> nd2
NDDataSliceable([βββ, 100, βββ, βββ, 5])
See Also
--------
NDDataRef
NDDataArray
"""
def __getitem__(self, item):
# Abort slicing if the data is a single scalar.
if self.data.shape == ():
raise TypeError("scalars cannot be sliced.")
# Let the other methods handle slicing.
kwargs = self._slice(item)
return self.__class__(**kwargs)
def _slice(self, item):
"""Collects the sliced attributes and passes them back as `dict`.
It passes uncertainty, mask and wcs to their appropriate ``_slice_*``
method, while ``meta`` and ``unit`` are simply taken from the original.
The data is assumed to be sliceable and is sliced directly.
When possible the return should *not* be a copy of the data but a
reference.
Parameters
----------
item : slice
The slice passed to ``__getitem__``.
Returns
-------
dict :
Containing all the attributes after slicing - ready to
use them to create ``self.__class__.__init__(**kwargs)`` in
``__getitem__``.
"""
kwargs = {}
kwargs["data"] = self.data[item]
# Try to slice some attributes
kwargs["uncertainty"] = self._slice_uncertainty(item)
kwargs["mask"] = self._slice_mask(item)
kwargs["wcs"] = self._slice_wcs(item)
# Attributes which are copied and not intended to be sliced
kwargs["unit"] = self.unit
kwargs["meta"] = self.meta
return kwargs
def _slice_uncertainty(self, item):
if self.uncertainty is None:
return None
try:
return self.uncertainty[item]
except (TypeError, KeyError):
# Catching TypeError in case the object has no __getitem__ method.
# Catching KeyError for Python 3.12.
# But let IndexError raise.
log.info("uncertainty cannot be sliced.")
return self.uncertainty
def _slice_mask(self, item):
if self.mask is None:
return None
try:
return self.mask[item]
except (TypeError, KeyError):
log.info("mask cannot be sliced.")
return self.mask
def _slice_wcs(self, item):
if self.wcs is None:
return None
try:
llwcs = SlicedLowLevelWCS(self.wcs.low_level_wcs, item)
return HighLevelWCSWrapper(llwcs)
except Exception as err:
self._handle_wcs_slicing_error(err, item)
# Implement this in a method to allow subclasses to customise the error.
def _handle_wcs_slicing_error(self, err, item):
raise ValueError(
f"Slicing the WCS object with the slice '{item}' "
"failed, if you want to slice the NDData object without the WCS, you "
"can remove by setting `NDData.wcs = None` and then retry."
) from err
|
astropyREPO_NAMEastropyPATH_START.@astropy_extracted@astropy-main@astropy@nddata@mixins@ndslicing.py@.PATH_END.py
|
{
"filename": "Likelihood21cmFast.py",
"repo_name": "BradGreig/21CMMC",
"repo_path": "21CMMC_extracted/21CMMC-master/21CMMC_SourceCode/Programs/CosmoHammer_21CMMC/likelihood/module/Likelihood21cmFast.py",
"type": "Python"
}
|
#!/usr/bin/env python
import os
import numpy as np
np.seterr(invalid='ignore', divide='ignore')
from decimal import *
from scipy import interpolate
from scipy.interpolate import InterpolatedUnivariateSpline
import string
import subprocess
import time
import multiprocessing
TWOPLACES = Decimal(10) ** -2 # same as Decimal('0.01')
FOURPLACES = Decimal(10) ** -4 # same as Decimal('0.0001')
SIXPLACES = Decimal(10) ** -6 # same as Decimal('0.000001')
McGreer_Redshift = 5.9
# The redshift of the QSO
QSO_Redshift = 7.0842
class Likelihood21cmFast_multiz(object):
def __init__(self, Redshifts_For_LF,Muv_values, phi_values, phi_Error, k_values, PS_values, Error_k_values, PS_Error, Redshift, Redshifts_For_Prior, param_legend, Fiducial_Params, FlagOptions, param_string_names, NSplinePoints,
TsCalc_z, Foreground_cut, Shot_Noise_cut, IncludeLightCone, IncludeLF, ModUncert, PriorLegend, NFValsQSO, PDFValsQSO):
self.Redshifts_For_LF = Redshifts_For_LF # New in v1.4
self.Muv_values = Muv_values # New in v1.4
self.phi_values = phi_values # New in v1.4
self.phi_Error = phi_Error # New in v1.4
self.k_values = k_values
self.PS_values = PS_values
self.Error_k_values = Error_k_values
self.PS_Error = PS_Error
self.Redshift = Redshift
self.Redshifts_For_Prior = Redshifts_For_Prior
self.param_legend = param_legend
self.Fiducial_Params = Fiducial_Params
self.FlagOptions = FlagOptions
self.param_string_names = param_string_names
self.NSplinePoints = NSplinePoints
self.TsCalc_z = TsCalc_z
self.Foreground_cut = Foreground_cut
self.Shot_Noise_cut = Shot_Noise_cut
self.IncludeLightCone = IncludeLightCone
self.IncludeLF = IncludeLF
self.ModUncert = ModUncert
self.PriorLegend = PriorLegend
self.NFValsQSO = NFValsQSO
self.PDFValsQSO = PDFValsQSO
def Likelihood(self,ctx):
params = ctx.getParams()
# If the light-cone option is set, we do not return the neutral fraction as it can be a large amount of data (also less useful).
# Only really helpful (if at all) for co-eval cubes
if self.IncludeLightCone is True:
nf_vals = np.zeros(3)
else:
# If we are applying the optical depth prior, then we might as well keep the value of the electron scattering optical depth
if self.PriorLegend['PlanckPrior'] is True or self.FlagOptions['KEEP_ALL_DATA'] is True:
nf_vals = np.zeros(len(self.Redshift) + len(self.Redshifts_For_Prior)+3)
else:
nf_vals = np.zeros(len(self.Redshift) + len(self.Redshifts_For_Prior)+2)
# Generate a unique ID for each thread by sampling a randomly seeded distribution.
# Given than file I/O needs to be unique to each thread, it is beneficial to provide a unique ID in the off chance that two different threads
# end up with the same walker position (same parameter set)
np.random.seed()
random_number = np.random.normal(size=1)
# Create a second unique ID, that being the first variable of the specific walker (fail-safe against ID overlap; shouldn't happen, but guarding against anyway)
Individual_ID = Decimal(repr(random_number[0])).quantize(SIXPLACES)
Individual_ID_2 = Decimal(repr(params[0])).quantize(SIXPLACES)
# Add all the redshifts (those for the likelihood and those for prior only). This parameter is only used where this is relevant
number_redshifts = len(self.Redshift) + len(self.Redshifts_For_Prior)
# Add and sort all redshifts (those for the likelihood and those for prior only)
AllRedshifts = []
if self.IncludeLightCone is False:
for i in range(len(self.Redshift)):
AllRedshifts.append(self.Redshift[i])
for i in range(len(self.Redshifts_For_Prior)):
AllRedshifts.append(self.Redshifts_For_Prior[i])
AllRedshifts.sort(key=float)
StoredStatisticalData = []
StoredStatisticalData_Error = []
StoredFileLayout = []
StoredFileLayout_Error = []
separator_column = "\t"
if self.IncludeLightCone is True:
LightConeFlag = 1
else:
LightConeFlag = 0
separator = " "
separator_other = "_"
seq = []
# Add the random thread ID
seq.append("%s"%(Individual_ID))
# Add the second ID
seq.append("%s"%(Individual_ID_2))
StringArgument_other = string.join(seq,separator_other)
# Add number of redshifts
# If using the light-cone version of the code, don't need to set a redshift
if self.IncludeLightCone is True:
seq.append("0")
else:
seq.append("%s"%(number_redshifts))
# Add light cone flag
seq.append("%s"%(LightConeFlag))
# If mass-dependence on ionising efficiency is allowed. Add the flag here
if self.FlagOptions['USE_MASS_DEPENDENT_ZETA'] is True:
seq.append("1")
else:
seq.append("0")
# Add redshift for Ts.c calculation
seq.append("%s"%(self.TsCalc_z))
#StringArgument = string.join(seq,separator)
#print 'StringArgument:',StringArgument
#if self.IncludeLF is True:
if self.IncludeLF is 1:
seq.append("1")
elif self.IncludeLF is 2:
seq.append("2")
else:
seq.append("0")
StringArgument = string.join(seq,separator)
##### Now we need to create the individual walker file to be read by drive_21cmMC_streamlined #####
if self.FlagOptions['GENERATE_NEW_ICS'] is True:
GenerateNewICs = 1
else:
GenerateNewICs = 0
if self.FlagOptions['INCLUDE_RSDS'] is True:
Subcell_RSDs = 1
else:
Subcell_RSDs = 0
if self.FlagOptions['USE_IONISATION_FCOLL_TABLE'] is True:
IONISATION_FCOLL_TABLE = 1
else:
IONISATION_FCOLL_TABLE = 0
if self.FlagOptions['USE_FCOLL_TABLE'] is True:
UseFcollTable = 1
else:
UseFcollTable = 0
if self.FlagOptions['CALC_TS_FLUC'] is True:
PerformTsCalc = 1
else:
PerformTsCalc = 0
if self.FlagOptions['USE_INHOMO_RECO'] is True:
INHOMO_RECO = 1
else:
INHOMO_RECO = 0
if self.FlagOptions['KEEP_GLOBAL_DATA'] is True:
OutputGlobalAve = 1
else:
if self.PriorLegend['PlanckPrior'] is True or self.PriorLegend['McGreerPrior'] is True or self.PriorLegend['GreigPrior'] is True or self.FlagOptions['KEEP_ALL_DATA'] is True:
OutputGlobalAve = 1
elif self.IncludeLightCone is True:
OutputGlobalAve = 1
else:
OutputGlobalAve = 0
parameter_number = 0
create_file = open("Walker_%s.txt"%(StringArgument_other),"w")
create_file.write("FLAGS %s %s %s %s %s %s %s\n"%(GenerateNewICs,Subcell_RSDs,IONISATION_FCOLL_TABLE,UseFcollTable,PerformTsCalc,INHOMO_RECO,OutputGlobalAve))
# New in v1.4
if self.param_legend['F_STAR10'] is True:
create_file.write("F_STAR10 %s\n"%(Decimal(repr(params[parameter_number])).quantize(SIXPLACES)))
parameter_number += 1
else:
create_file.write("F_STAR10 %s\n"%(self.Fiducial_Params['F_STAR10']))
if self.param_legend['ALPHA_STAR'] is True:
create_file.write("ALPHA_STAR %s\n"%(Decimal(repr(params[parameter_number])).quantize(SIXPLACES)))
parameter_number += 1
else:
create_file.write("ALPHA_STAR %s\n"%(self.Fiducial_Params['ALPHA_STAR']))
if self.param_legend['F_ESC10'] is True:
create_file.write("F_ESC10 %s\n"%(Decimal(repr(params[parameter_number])).quantize(SIXPLACES)))
parameter_number += 1
else:
create_file.write("F_ESC10 %s\n"%(self.Fiducial_Params['F_ESC10']))
if self.param_legend['ALPHA_ESC'] is True:
create_file.write("ALPHA_ESC %s\n"%(Decimal(repr(params[parameter_number])).quantize(SIXPLACES)))
parameter_number += 1
else:
create_file.write("ALPHA_ESC %s\n"%(self.Fiducial_Params['ALPHA_ESC']))
if self.param_legend['M_TURN'] is True:
create_file.write("M_TURN %s\n"%(Decimal(repr(params[parameter_number])).quantize(SIXPLACES)))
parameter_number += 1
else:
create_file.write("M_TURN %s\n"%(self.Fiducial_Params['M_TURN']))
if self.param_legend['t_STAR'] is True:
create_file.write("t_STAR %s\n"%(Decimal(repr(params[parameter_number])).quantize(SIXPLACES)))
parameter_number += 1
else:
create_file.write("t_STAR %s\n"%(self.Fiducial_Params['t_STAR']))
if self.param_legend['ZETA'] is True:
create_file.write("ZETA %s\n"%(Decimal(repr(params[parameter_number])).quantize(SIXPLACES)))
parameter_number += 1
else:
create_file.write("ZETA %s\n"%(self.Fiducial_Params['ZETA']))
if self.param_legend['MFP'] is True:
create_file.write("MFP %s\n"%(Decimal(repr(params[parameter_number])).quantize(SIXPLACES)))
parameter_number += 1
else:
create_file.write("MFP %s\n"%(self.Fiducial_Params['MFP']))
if self.param_legend['TVIR_MIN'] is True:
create_file.write("TVIR_MIN %s\n"%(Decimal(repr(params[parameter_number])).quantize(SIXPLACES)))
X_RAY_TVIR_MIN = params[parameter_number]
parameter_number += 1
else:
create_file.write("TVIR_MIN %s\n"%(self.Fiducial_Params['TVIR_MIN']))
if self.param_legend['L_X'] is True:
create_file.write("L_X %s\n"%(Decimal(repr(params[parameter_number])).quantize(SIXPLACES)))
parameter_number += 1
else:
create_file.write("L_X %s\n"%(self.Fiducial_Params['L_X']))
if self.param_legend['NU_X_THRESH'] is True:
create_file.write("NU_X_THRESH %s\n"%(Decimal(repr(params[parameter_number])).quantize(SIXPLACES)))
parameter_number += 1
else:
create_file.write("NU_X_THRESH %s\n"%(self.Fiducial_Params['NU_X_THRESH']))
create_file.write("NU_X_BAND_MAX %s\n"%(self.Fiducial_Params['NU_X_BAND_MAX']))
create_file.write("NU_X_MAX %s\n"%(self.Fiducial_Params['NU_X_MAX']))
if self.param_legend['X_RAY_SPEC_INDEX'] is True:
create_file.write("X_RAY_SPEC_INDEX %s\n"%(Decimal(repr(params[parameter_number])).quantize(SIXPLACES)))
parameter_number += 1
else:
create_file.write("X_RAY_SPEC_INDEX %s\n"%(self.Fiducial_Params['X_RAY_SPEC_INDEX']))
if self.param_legend['TVIR_MIN'] is True:
create_file.write("X_RAY_TVIR_MIN %s\n"%(Decimal(repr(X_RAY_TVIR_MIN)).quantize(SIXPLACES)))
else:
create_file.write("X_RAY_TVIR_MIN %s\n"%(self.Fiducial_Params['X_RAY_TVIR_MIN']))
create_file.write("X_RAY_TVIR_LB %s\n"%(self.Fiducial_Params['X_RAY_TVIR_LB']))
create_file.write("X_RAY_TVIR_UB %s\n"%(self.Fiducial_Params['X_RAY_TVIR_UB']))
#create_file.write("F_STAR %s\n"%(self.Fiducial_Params['F_STAR']))
create_file.write("N_RSD_STEPS %s\n"%(self.Fiducial_Params['N_RSD_SUBCELLS']))
create_file.write("LOS_direction %s\n"%(self.Fiducial_Params['LOS_direction']))
if self.IncludeLightCone is False:
for i in range(number_redshifts):
create_file.write("CO-EVAL-Z %s\n"%(AllRedshifts[i]))
create_file.close()
if self.FlagOptions['GENERATE_NEW_ICS'] is True:
# A random number between 1 and 10^12 should be sufficient to randomise the ICs
RandomSeed = np.random.uniform(low=1,high=1e12,size=1)
# Now create the cosmology file associated with this walker.
create_file = open("WalkerCosmology_%s.txt"%(StringArgument_other),"w")
if self.FlagOptions['GENERATE_NEW_ICS'] is True:
create_file.write("RANDOM_SEED %s\n"%(RandomSeed[0]))
else:
create_file.write("RANDOM_SEED %s\n"%(Decimal(repr(1.0)).quantize(SIXPLACES)))
if self.param_legend['SIGMA_8'] is True:
create_file.write("SIGMA_8 %s\n"%(Decimal(repr(params[parameter_number])).quantize(SIXPLACES)))
parameter_number += 1
else:
create_file.write("SIGMA_8 %s\n"%(self.Fiducial_Params['SIGMA_8']))
if self.param_legend['littleh'] is True:
create_file.write("hubble %s\n"%(Decimal(repr(params[parameter_number])).quantize(SIXPLACES)))
parameter_number += 1
else:
create_file.write("hubble %s\n"%(self.Fiducial_Params['littleh']))
if self.param_legend['OMEGA_M'] is True:
create_file.write("Omega_M %s\n"%(Decimal(repr(params[parameter_number])).quantize(SIXPLACES)))
parameter_number += 1
else:
create_file.write("Omega_M %s\n"%(self.Fiducial_Params['OMEGA_M']))
if self.param_legend['OMEGA_M'] is True:
create_file.write("Omega_L %s\n"%(Decimal(repr(1. - params[parameter_number-1])).quantize(SIXPLACES)))
else:
create_file.write("Omega_L %s\n"%(Decimal(repr(1. - float(self.Fiducial_Params['OMEGA_M']))).quantize(SIXPLACES)))
if self.param_legend['OMEGA_b'] is True:
create_file.write("Omega_b %s\n"%(Decimal(repr(params[parameter_number])).quantize(SIXPLACES)))
parameter_number += 1
else:
create_file.write("Omega_b %s\n"%(self.Fiducial_Params['OMEGA_b']))
if self.param_legend['NS'] is True:
create_file.write("ns %s\n"%(Decimal(repr(params[parameter_number])).quantize(SIXPLACES)))
parameter_number += 1
else:
create_file.write("ns %s\n"%(self.Fiducial_Params['NS']))
create_file.close()
if self.FlagOptions['LOG_LINEAR_K_SAMPLING'] is True:
kSplineMin = np.log10(self.Foreground_cut)
kSplineMax = np.log10(self.Shot_Noise_cut)
else:
kSplineMin = self.Foreground_cut
kSplineMax = self.Shot_Noise_cut
kSpline = np.zeros(self.NSplinePoints)
for j in range(self.NSplinePoints):
kSpline[j] = kSplineMin + (kSplineMax - kSplineMin)*float(j)/(self.NSplinePoints - 1)
if self.FlagOptions['LOG_LINEAR_K_SAMPLING'] is True:
kSpline = 10**( kSpline )
counter = 0
command = "./drive_21cmMC_streamlined %s"%(StringArgument)
os.system(command)
total_sum = 0
if self.FlagOptions['KEEP_GLOBAL_DATA'] is True:
k_values_estimate = np.loadtxt('AveData_%s.txt'%(StringArgument_other), usecols=(0,))
PS_values_estimate = np.loadtxt('AveData_%s.txt'%(StringArgument_other), usecols=(2,))
if self.IncludeLightCone is False:
k_values_estimate = k_values_estimate[::-1]
PS_values_estimate = PS_values_estimate[::-1]
# Converting the redshifts to frequencies for the interpolation (must be in increasing order, it is by default redshift which is decreasing)
FrequencyValues_mock = np.zeros(len(self.k_values[0]))
FrequencyValues_model = np.zeros(len(k_values_estimate))
# Shouldn't need two, as they should be the same sampling. However, just done it for now
for j in range(len(self.k_values[0])):
FrequencyValues_mock[j] = ((2.99792e8)/(.2112*(1. + self.k_values[0][j])))/(1e6)
for j in range(len(k_values_estimate)):
FrequencyValues_model[j] = ((2.99792e8)/(.2112*(1. + k_values_estimate[j])))/(1e6)
splined_mock = interpolate.splrep(FrequencyValues_mock,self.PS_values[0],s=0)
splined_model = interpolate.splrep(FrequencyValues_model,PS_values_estimate,s=0)
FrequencyMin = self.Fiducial_Params['MIN_FREQ']
FrequencyMax = self.Fiducial_Params['MAX_FREQ']
if self.FlagOptions['USE_GS_FIXED_ERROR'] is True:
ErrorOnGlobal = self.Fiducial_Params['CONST_ERROR']
Bandwidth = self.Fiducial_Params['BANDWIDTH']
FrequencyBins = int(np.floor((FrequencyMax-FrequencyMin)/Bandwidth)) + 1
for j in range(FrequencyBins):
FrequencyVal = FrequencyMin + Bandwidth*j
MockPS_val = interpolate.splev(FrequencyVal,splined_mock,der=0)
ModelPS_val = interpolate.splev(FrequencyVal,splined_model,der=0)
total_sum += np.square( (MockPS_val - ModelPS_val)/ErrorOnGlobal )
else:
for j in range(len(self.Error_k_values[0])):
FrequencyVal = ((2.99792e8)/(.2112*(1. + self.Error_k_values[0][j])))/(1e6)
if FrequencyVal >= FrequencyMin and FrequencyVal <= FrequencyMax:
MockPS_val = interpolate.splev(FrequencyVal,splined_mock,der=0)
ModelPS_val = interpolate.splev(FrequencyVal,splined_model,der=0)
total_sum += np.square( (MockPS_val - ModelPS_val)/self.PS_Error[0][j] )
# New in v1.4
#if self.IncludeLF is True:
if self.IncludeLF:
# At the moment I just put the redshift list by hand, but this part should be modified.
#NUM_OF_REDSHIFTS_FOR_LF = 4
for iz in range(len(self.Redshifts_For_LF)):
# Exclude bright-end (Muv < -20) from Lumnosity function
Muv_i = []
phi_i = []
error_i = []
j = 0
while j < len(self.Muv_values[iz]):
if self.Muv_values[iz][j] > -20. and self.Muv_values[iz][j]!=0.:
Muv_i.append(self.Muv_values[iz][j])
phi_i.append(self.phi_values[iz][j])
error_i.append(self.phi_Error[iz][j])
j = j + 1
Muv_values_estimate0 = np.loadtxt('LF_estimate_%s_%s.txt'%(StringArgument_other,self.Redshifts_For_LF[iz]), usecols=(0,))
log10phi_values_estimate0 = np.loadtxt('LF_estimate_%s_%s.txt'%(StringArgument_other,self.Redshifts_For_LF[iz]), usecols=(1,))
Muv_values_estimate = Muv_values_estimate0[::-1]
log10phi_values_estimate = log10phi_values_estimate0[::-1]
LF_criterion = 1 #LF_criteion == 0: skip this chain.
# check whether Muv does not increase monotonically with halo mass. if not interpolation is not possible.
i_check = 0
while i_check < len(Muv_values_estimate)-1:
if (Muv_values_estimate[i_check] > Muv_values_estimate[i_check+1]):
LF_criterion = 0
#print ("Found Muv list reversed\n")
break
i_check = i_check + 1
if (max(Muv_values_estimate) <= min(self.Muv_values[iz])) or (min(Muv_values_estimate) >= max(self.Muv_values[iz])):
LF_criterion = 0
if (LF_criterion == 0):
total_sum = total_sum + 10000000000.
else:
LFestimate_Spline = interpolate.splrep(Muv_values_estimate, log10phi_values_estimate,s=0)
for ii in range(len(Muv_i)):
Muv_i_val = Muv_i[ii]
log10phi_i_val = interpolate.splev(Muv_i_val,LFestimate_Spline,der=0)
#total_sum = total_sum + np.square(phi_i[ii] - 10**(log10phi_i_val)) / (np.square(error_i[ii]))
chi2_i = np.square(phi_i[ii] - 10**(log10phi_i_val)) / (np.square(error_i[ii]))
if (np.isinf(chi2_i)):
chi2_i = 100000.
total_sum = total_sum + chi2_i
else:
if self.IncludeLightCone is True:
# For the light-cone version, the c-code creates a single textfile containing the filenames of each of the light-cone 21cm PS generated. This
# should be of equal or greater length than the number of mock observations added.
LightconePSFilename = 'delTps_lightcone_filenames_%s.txt'%(StringArgument_other)
filename = open('%s'%(LightconePSFilename), 'r')
LightconePS = [line.rstrip('\n') for line in filename]
#nf_vals[0] = 'Walker_%s.txt'%(StringArgument_other)
nf_vals[0] = 0.#'Walker_%s.txt'%(StringArgument_other)
for i in range(len(self.Redshift)):
k_values_estimate = np.loadtxt('%s'%(LightconePS[i]), usecols=(0,))
PS_values_estimate = np.loadtxt('%s'%(LightconePS[i]), usecols=(1,))
Poisson_error_estimate = np.loadtxt('%s'%(LightconePS[i]), usecols=(2,)) # Read possion errors
if self.FlagOptions['KEEP_ALL_DATA'] is True:
if not self.IncludeLF is 2:
if i == 0:
StoredStatisticalData.append(k_values_estimate)
StoredFileLayout.append("{%i}"%(i))
StoredStatisticalData_Error.append(k_values_estimate)
StoredFileLayout_Error.append("{%i}"%(i))
StoredStatisticalData.append(PS_values_estimate)
StoredFileLayout.append("{%i}"%(i+1))
StoredStatisticalData_Error.append(Poisson_error_estimate)
StoredFileLayout_Error.append("{%i}"%(i+1))
else:
for i in range(len(AllRedshifts)):
# Read in the neutral fraction and 21cm PS for this parameter set and redshift
nf_value = np.loadtxt('NeutralFraction_%s_%s.txt'%(StringArgument_other,AllRedshifts[i]), usecols=(0,))
nf_vals[i] = nf_value
# This only reading the data in from file, and then saving it to output
# Yes, I end up reading twice, but whatever...
# (I split it in the case that Redshifts_for_Prior was non-zero)
if not self.IncludeLF is 2:
k_values_estimate = np.loadtxt('delTps_estimate_%s_%s.txt'%(StringArgument_other,AllRedshifts[i]), usecols=(0,))
PS_values_estimate = np.loadtxt('delTps_estimate_%s_%s.txt'%(StringArgument_other,AllRedshifts[i]), usecols=(1,))
Poisson_error_estimate = np.loadtxt('delTps_estimate_%s_%s.txt'%(StringArgument_other,AllRedshifts[i]), usecols=(2,))
if self.FlagOptions['KEEP_ALL_DATA'] is True:
if i == 0:
StoredStatisticalData.append(k_values_estimate)
StoredFileLayout.append("{%i}"%(i))
StoredStatisticalData_Error.append(k_values_estimate)
StoredFileLayout_Error.append("{%i}"%(i))
StoredStatisticalData.append(PS_values_estimate)
StoredFileLayout.append("{%i}"%(i+1))
StoredStatisticalData_Error.append(Poisson_error_estimate)
StoredFileLayout_Error.append("{%i}"%(i+1))
# nf_vals[len(AllRedshifts)] = 'Walker_%s.txt'%(StringArgument_other)
nf_vals[len(AllRedshifts)] = '%s'%(Individual_ID)
nf_vals[len(AllRedshifts)+1] = '%s'%(Individual_ID_2)
# Note here that the usage of len(Redshift) uses the number of mock lightcone 21cm PS if IncludeLightCone was set to True.
for i in range(len(self.Redshift)):
if self.IncludeLightCone is True:
k_values_estimate = np.loadtxt('%s'%(LightconePS[i]), usecols=(0,))
PS_values_estimate = np.loadtxt('%s'%(LightconePS[i]), usecols=(1,))
Poisson_error_estimate = np.loadtxt('%s'%(LightconePS[i]), usecols=(2,)) # Read possion errors
elif not self.IncludeLF is 2:
# Read in the neutral fraction and 21cm PS for this parameter set and redshift
k_values_estimate = np.loadtxt('delTps_estimate_%s_%s.txt'%(StringArgument_other,self.Redshift[i]), usecols=(0,))
PS_values_estimate = np.loadtxt('delTps_estimate_%s_%s.txt'%(StringArgument_other,self.Redshift[i]), usecols=(1,))
Poisson_error_estimate = np.loadtxt('delTps_estimate_%s_%s.txt'%(StringArgument_other,self.Redshift[i]), usecols=(2,))
if not self.IncludeLF is 2:
splined_mock = interpolate.splrep(self.k_values[i],np.log10(self.PS_values[i]),s=0)
splined_error = interpolate.splrep(self.Error_k_values[i],np.log10(self.PS_Error[i]),s=0)
splined_model = interpolate.splrep(k_values_estimate,np.log10(PS_values_estimate),s=0)
splined_model_poisson_err = interpolate.splrep(k_values_estimate,np.log10(Poisson_error_estimate),s=0)
# Interpolating the mock and error PS in log space
for j in range(self.NSplinePoints):
MockPS_val = 10**(interpolate.splev(kSpline[j],splined_mock,der=0))
ErrorPS_val = 10**(interpolate.splev(kSpline[j],splined_error,der=0))
ModelPS_val = 10**(interpolate.splev(kSpline[j],splined_model,der=0))
ModelPE_val = 10**(interpolate.splev(kSpline[j],splined_model_poisson_err,der=0))
# Check if there are any nan values for the 21cm PS
# A nan value implies a IGM neutral fraction of zero, that is, reionisation has completed and thus no 21cm signal
# Set the value of the 21cm PS to zero. Which results in the largest available difference (i.e. if you expect a signal
# (i.e. non zero mock 21cm PS) but have no signal from the sampled model, then want a large difference for the
# chi-squared likelihood).
if np.isnan(ModelPS_val) == True:
ModelPS_val = 0.0
if np.isnan(ModelPE_val) == True:
ModelPE_val = 0.0
if np.isnan(MockPS_val) == True:
MockPS_val = 0.0
#total_sum += np.square((MockPS_val - ModelPS_val)/(np.sqrt(ErrorPS_val**2. + (self.ModUncert*ModelPS_val)**2.)))
total_sum += np.square((MockPS_val - ModelPS_val)/(np.sqrt(ErrorPS_val**2. + (self.ModUncert*ModelPS_val)**2. + ModelPE_val**2)))
# New in v1.4
#if self.IncludeLF is True:
if self.IncludeLF:
# At the moment I just put the redshift list by hand, but this part should be modified.
#NUM_OF_REDSHIFTS_FOR_LF = 4
for iz in range(len(self.Redshifts_For_LF)):
# Exclude bright-end (Muv < -20) from Lumnosity function
Muv_i = []
phi_i = []
error_i = []
j = 0
while j < len(self.Muv_values[iz]):
if self.Muv_values[iz][j] > -20. and self.Muv_values[iz][j]!=0.:
Muv_i.append(self.Muv_values[iz][j])
phi_i.append(self.phi_values[iz][j])
error_i.append(self.phi_Error[iz][j])
j = j + 1
Muv_values_estimate0 = np.loadtxt('LF_estimate_%s_%s.txt'%(StringArgument_other,self.Redshifts_For_LF[iz]), usecols=(0,))
log10phi_values_estimate0 = np.loadtxt('LF_estimate_%s_%s.txt'%(StringArgument_other,self.Redshifts_For_LF[iz]), usecols=(1,))
Muv_values_estimate = Muv_values_estimate0[::-1]
log10phi_values_estimate = log10phi_values_estimate0[::-1]
LF_criterion = 1 #LF_criteion == 0: skip this chain.
# check whether Muv does not increase monotonically with halo mass. if not interpolation is not possible.
i_check = 0
while i_check < len(Muv_values_estimate)-1:
if (Muv_values_estimate[i_check] > Muv_values_estimate[i_check+1]):
LF_criterion = 0
#print ("Found Muv list reversed\n")
break
i_check = i_check + 1
if (max(Muv_values_estimate) <= min(self.Muv_values[iz])) or (min(Muv_values_estimate) >= max(self.Muv_values[iz])):
LF_criterion = 0
if (LF_criterion == 0):
total_sum = total_sum + 10000000000.
else:
LFestimate_Spline = interpolate.splrep(Muv_values_estimate, log10phi_values_estimate,s=0)
for ii in range(len(Muv_i)):
Muv_i_val = Muv_i[ii]
log10phi_i_val = interpolate.splev(Muv_i_val,LFestimate_Spline,der=0)
#total_sum = total_sum + np.square(phi_i[ii] - 10**(log10phi_i_val)) / (np.square(error_i[ii]))
chi2_i = np.square(phi_i[ii] - 10**(log10phi_i_val)) / (np.square(error_i[ii]))
if (np.isinf(chi2_i)):
chi2_i = 100000.
total_sum = total_sum + chi2_i
if self.FlagOptions['KEEP_ALL_DATA'] is True:
StoredFileLayout = string.join(StoredFileLayout,separator_column)
StoredFileLayout_Error = string.join(StoredFileLayout_Error,separator_column)
with open('%s/StatisticalData/TotalPSData_%s.txt'%(self.FlagOptions['KEEP_ALL_DATA_FILENAME'],StringArgument_other),'w') as f:
for x in zip(*StoredStatisticalData):
f.write("%s\n"%(StoredFileLayout).format(*x))
with open('%s/StatisticalData_Error/TotalPS_ErrorData_%s.txt'%(self.FlagOptions['KEEP_ALL_DATA_FILENAME'],StringArgument_other),'w') as f:
for x in zip(*StoredStatisticalData_Error):
f.write("%s\n"%(StoredFileLayout_Error).format(*x))
f.close()
if (self.PriorLegend['PlanckPrior'] is True and number_redshifts > 2) or self.PriorLegend['McGreerPrior'] is True or self.PriorLegend['GreigPrior'] is True or self.FlagOptions['KEEP_ALL_DATA'] is True:
z_Hist = np.loadtxt('AveData_%s.txt'%(StringArgument_other), usecols=(0,))
xH_Hist = np.loadtxt('AveData_%s.txt'%(StringArgument_other), usecols=(1,))
# When the light-cone version is set, the values are writted in decreasing order, not increasing order
# Therefore, reverse to be in increasing order (the interpolation/extrapolation is required to be in increasing order)
if self.IncludeLightCone is True:
if z_Hist[0] > z_Hist[-1]:
z_Hist = z_Hist[::-1]
xH_Hist = xH_Hist[::-1]
if (self.FlagOptions['KEEP_ALL_DATA'] is True or self.PriorLegend['PlanckPrior'] is True) and number_redshifts > 2:
# Mean and one sigma errors for the Planck constraints
# The Planck prior is modelled as a Gaussian: tau = 0.058 \pm 0.012 (https://arxiv.org/abs/1605.03507)
PlanckTau_Mean = 0.058
PlanckTau_OneSigma = 0.012
# Simple linear extrapolation of the redshift range provided by the user, to be able to estimate the optical depth
nZinterp = 15
# The minimum of the extrapolation is chosen to 5.9, to correspond to the McGreer et al. prior on the IGM neutral fraction.
# The maximum is chosed to be z = 18., which is arbitrary.
ZExtrap_min = 5.9
ZExtrap_max = 20.0
ZExtrapVals = np.zeros(nZinterp)
XHI_ExtrapVals = np.zeros(nZinterp)
# Perform only a linear interpolation/extrapolation
order = 1
# The linear interpolation/extrapolation function, taking as input the redshifts supplied by the user and the corresponding neutral fractions
# recovered for the specific EoR parameter set
LinearInterpolationFunction = InterpolatedUnivariateSpline(z_Hist, xH_Hist, k=order)
for i in range(nZinterp):
ZExtrapVals[i] = ZExtrap_min + (ZExtrap_max - ZExtrap_min)*float(i)/(nZinterp - 1)
XHI_ExtrapVals[i] = LinearInterpolationFunction(ZExtrapVals[i])
# Ensure that the neutral fraction does not exceed unity, or go negative
if XHI_ExtrapVals[i] > 1.0:
XHI_ExtrapVals[i] = 1.0
if XHI_ExtrapVals[i] < 0.0:
XHI_ExtrapVals[i] = 0.0
# Set up the arguments for calculating the estimate of the optical depth. Once again, performed using command line code.
separator_Planck = " "
seq_Planck = []
for i in range(nZinterp):
seq_Planck.append("%s"%(ZExtrapVals[i]))
seq_Planck.append("%s"%(XHI_ExtrapVals[i]))
StringArgument_Planck = string.join(seq_Planck,separator_Planck)
# Perform the computation of tau
command = './ComputingTau_e %s %s %s'%(Individual_ID,Decimal(repr(params[0])).quantize(SIXPLACES),StringArgument_Planck)
os.system(command)
# Read tau from file
tau_value = np.loadtxt('Tau_e_%s_%s.txt'%(Individual_ID,Decimal(repr(params[0])).quantize(SIXPLACES)), usecols=(0,))
# remove the temporary files
if self.FlagOptions['KEEP_ALL_DATA'] is True:
command = "mv Tau_e_%s_%s.txt %s/TauData/"%(Individual_ID,Decimal(repr(params[0])).quantize(SIXPLACES),self.FlagOptions['KEEP_ALL_DATA_FILENAME'])
else:
command = "rm Tau_e_%s_%s.txt"%(Individual_ID,Decimal(repr(params[0])).quantize(SIXPLACES))
os.system(command)
# As the likelihood is computed in log space, the addition of the prior is added linearly to the existing chi^2 likelihood
if self.PriorLegend['PlanckPrior'] is True:
total_sum = total_sum + np.square( ( PlanckTau_Mean - tau_value )/(PlanckTau_OneSigma) )
#if self.IncludeLightCone is True:
# nf_vals[1] = tau_value
#else:
# # it is len(AllRedshifts) as the indexing begins at zero
# nf_vals[len(AllRedshifts)+2] = tau_value
nf_vals[len(AllRedshifts)+2] = tau_value
if self.PriorLegend['McGreerPrior'] is True:
# Mean and one sigma errors for the McGreer et al. constraints
# Modelled as a flat, unity prior at x_HI <= 0.06, and a one sided Gaussian at x_HI > 0.06 ( Gaussian of mean 0.06 and one sigma of 0.05 )
McGreer_Mean = 0.06
McGreer_OneSigma = 0.05
if McGreer_Redshift in z_Hist:
for i in range(len(z_Hist)):
if z_Hist[i] == McGreer_Redshift:
McGreer_NF = xH_Hist[i]
if McGreer_NF > 1.:
McGreer_NF = 1.
if McGreer_NF < 0.:
McGreer_NF = 0.
# As the likelihood is computed in log space, the addition of the prior is added linearly to the existing chi^2 likelihood
if McGreer_NF <= 0.06:
total_sum = total_sum + 0.0 # Add zero, as we assume flat (unity) probability at x_HI <= 0.06 (as it is a lower limit)
else:
total_sum = total_sum + np.square( ( McGreer_Mean - McGreer_NF )/(McGreer_OneSigma) )
elif number_redshifts > 2:
# Perform only a linear interpolation/extrapolation
order = 1
# The linear interpolation/extrapolation function, taking as input the redshifts supplied by the user and the corresponding neutral fractions
# recovered for the specific EoR parameter set
LinearInterpolationFunction = InterpolatedUnivariateSpline(z_Hist, xH_Hist, k=order)
McGreer_NF = LinearInterpolationFunction(McGreer_Redshift)
if McGreer_NF > 1.:
McGreer_NF = 1.
if McGreer_NF < 0.:
McGreer_NF = 0.
# As the likelihood is computed in log space, the addition of the prior is added linearly to the existing chi^2 likelihood
if McGreer_NF <= 0.06:
total_sum = total_sum + 0.0 # Add zero, as we assume flat (unity) probability at x_HI <= 0.06 (as it is a lower limit)
else:
total_sum = total_sum + np.square( ( McGreer_Mean - McGreer_NF )/(McGreer_OneSigma) )
if self.PriorLegend['GreigPrior'] is True:
# Interpolate the QSO damping wing PDF
spline_QSODampingPDF = interpolate.splrep(self.NFValsQSO,self.PDFValsQSO,s=0)
if QSO_Redshift in z_Hist:
for i in range(len(z_Hist)):
if z_Hist[i] == QSO_Redshift:
NF_QSO = xH_Hist[i]
# Ensure that the neutral fraction does not exceed unity, or go negative
if NF_QSO > 1.0:
NF_QSO = 1.0
if NF_QSO < 0.0:
NF_QSO = 0.0
QSO_Prob = interpolate.splev(NF_QSO,spline_QSODampingPDF,der=0)
# Interpolating the PDF from the QSO damping wing might cause small negative values at the edges (i.e. x_HI ~ 0 or ~1)
# In case it is zero, or negative, set it to a very small non zero number (we take the log of this value, it cannot be zero)
if QSO_Prob <= 0.0:
QSO_Prob = 0.000006
# We work with the log-likelihood, therefore convert the IGM Damping wing PDF to log space
QSO_Prob = -2.*np.log(QSO_Prob)
total_sum = total_sum + QSO_Prob
elif number_redshifts > 2:
order = 1
# Check the redshift range input by the user to determine whether to interpolate or extrapolate the IGM neutral fraction to the QSO redshift
if QSO_Redshift < np.amin(self.Redshift):
# The QSO redshift is outside the range set by the user. Need to extrapolate the reionisation history to obtain the neutral fraction at the QSO redshift
# The linear interpolation/extrapolation function, taking as input the redshifts supplied by the user and the corresponding neutral fractions
# recovered for the specific EoR parameter set
LinearInterpolationFunction = InterpolatedUnivariateSpline(self.Redshift, nf_vals, k=order)
NF_QSO = LinearInterpolationFunction(QSO_Redshift)
else:
# The QSO redshift is within the range set by the user. Can interpolate the reionisation history to obtain the neutral fraction at the QSO redshift
spline_reionisationhistory = interpolate.splrep(self.Redshift,nf_vals,s=0)
NF_QSO = interpolate.splev(QSO_Redshift,spline_reionisationhistory,der=0)
# Ensure that the neutral fraction does not exceed unity, or go negative
if NF_QSO > 1.0:
NF_QSO = 1.0
if NF_QSO < 0.0:
NF_QSO = 0.0
QSO_Prob = interpolate.splev(NF_QSO,spline_QSODampingPDF,der=0)
# Interpolating the PDF from the QSO damping wing might cause small negative values at the edges (i.e. x_HI ~ 0 or ~1)
# In case it is zero, or negative, set it to a very small non zero number (we take the log of this value, it cannot be zero)
if QSO_Prob <= 0.0:
QSO_Prob = 0.000006
# We work with the log-likelihood, therefore convert the IGM Damping wing PDF to log space
QSO_Prob = -2.*np.log(QSO_Prob)
total_sum = total_sum + QSO_Prob
if self.IncludeLightCone is True:
if self.FlagOptions['KEEP_GLOBAL_DATA'] is True:
LightconePSFilename = 'delTps_lightcone_filenames_%s.txt'%(StringArgument_other)
filename = open('%s'%(LightconePSFilename), 'r')
LightconePS = [line.rstrip('\n') for line in filename]
if self.FlagOptions['KEEP_ALL_DATA'] is True:
command = "mv %s %s/StatisticalData/"%(LightconePSFilename,self.FlagOptions['KEEP_ALL_DATA_FILENAME'])
else:
command = "rm %s"%(LightconePSFilename)
os.system(command)
# Removal of the individual light cone files is done here as in principle these can exceed the number of mock observations provided
for i in range(len(LightconePS)):
command = "rm %s"%(LightconePS[i])
os.system(command)
if self.FlagOptions['KEEP_ALL_DATA'] is True:
for j in range(len(self.Redshifts_For_LF)):
command = "mv LF_estimate_%s_%s.txt %s/LFData/"%(StringArgument_other,self.Redshifts_For_LF[j],self.FlagOptions['KEEP_ALL_DATA_FILENAME'])
os.system(command)
else:
for j in range(len(self.Redshifts_For_LF)):
command = "rm LF_estimate_%s_%s.txt"%(StringArgument_other,self.Redshifts_For_LF[j])
os.system(command)
else:
if not self.IncludeLF is 2:
command = "rm delTps_estimate_%s_*"%(StringArgument_other)
os.system(command)
command = "rm NeutralFraction_%s_*"%(StringArgument_other)
os.system(command)
if self.FlagOptions['KEEP_ALL_DATA'] is True:
for j in range(len(self.Redshifts_For_LF)):
command = "mv LF_estimate_%s_%s.txt %s/LFData/"%(StringArgument_other,self.Redshifts_For_LF[j],self.FlagOptions['KEEP_ALL_DATA_FILENAME'])
os.system(command)
else:
for j in range(len(self.Redshifts_For_LF)):
command = "rm LF_estimate_%s_%s.txt"%(StringArgument_other,self.Redshifts_For_LF[j])
os.system(command)
if OutputGlobalAve == 1:
if self.FlagOptions['KEEP_ALL_DATA'] is True:
command = "mv AveData_%s.txt %s/AveData/"%(StringArgument_other,self.FlagOptions['KEEP_ALL_DATA_FILENAME'])
else:
command = "rm AveData_%s.txt"%(StringArgument_other)
os.system(command)
if self.FlagOptions['KEEP_ALL_DATA'] is True:
command = "mv Walker_%s.txt %s/WalkerData"%(StringArgument_other,self.FlagOptions['KEEP_ALL_DATA_FILENAME'])
os.system(command)
command = "mv WalkerCosmology_%s.txt %s/WalkerData"%(StringArgument_other,self.FlagOptions['KEEP_ALL_DATA_FILENAME'])
os.system(command)
else:
command = "rm Walker_%s.txt"%(StringArgument_other)
os.system(command)
command = "rm WalkerCosmology_%s.txt"%(StringArgument_other)
os.system(command)
if(np.isinf(total_sum)):
total_sum = 10000000000.
return -0.5*total_sum,nf_vals
def computeLikelihood(self, ctx):
return self.Likelihood(ctx)
def setup(self):
print "Likelihood Fitting for 21cm Fast"
|
BradGreigREPO_NAME21CMMCPATH_START.@21CMMC_extracted@21CMMC-master@21CMMC_SourceCode@Programs@CosmoHammer_21CMMC@likelihood@module@Likelihood21cmFast.py@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.