metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
|---|---|---|
{
"filename": "linalg.py",
"repo_name": "fchollet/keras",
"repo_path": "keras_extracted/keras-master/keras/src/backend/torch/linalg.py",
"type": "Python"
}
|
import torch
from keras.src.backend import config
from keras.src.backend import standardize_dtype
from keras.src.backend.common import dtypes
from keras.src.backend.torch.core import cast
from keras.src.backend.torch.core import convert_to_tensor
def cholesky(x):
return torch.linalg.cholesky(x)
def det(x):
return torch.det(x)
def eig(x):
return torch.linalg.eig(x)
def eigh(x):
return torch.linalg.eigh(x)
def inv(x):
return torch.linalg.inv(x)
def lu_factor(x):
LU, pivots = torch.linalg.lu_factor(x)
# torch returns pivots with 1-based indexing
return LU, pivots - 1
def norm(x, ord=None, axis=None, keepdims=False):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = cast(x, dtype)
return torch.linalg.norm(x, ord=ord, dim=axis, keepdim=keepdims)
def qr(x, mode="reduced"):
if mode not in {"reduced", "complete"}:
raise ValueError(
"`mode` argument value not supported. "
"Expected one of {'reduced', 'complete'}. "
f"Received: mode={mode}"
)
return torch.linalg.qr(x, mode=mode)
def solve(a, b):
return torch.linalg.solve(a, b)
def solve_triangular(a, b, lower=False):
if b.ndim == a.ndim - 1:
b = torch.unsqueeze(b, axis=-1)
return torch.linalg.solve_triangular(a, b, upper=not lower).squeeze(
axis=-1
)
return torch.linalg.solve_triangular(a, b, upper=not lower)
def svd(x, full_matrices=True, compute_uv=True):
if not compute_uv:
return torch.linalg.svdvals(x)
return torch.linalg.svd(x, full_matrices=full_matrices)
def lstsq(a, b, rcond=None):
a = convert_to_tensor(a)
b = convert_to_tensor(b)
return torch.linalg.lstsq(a, b, rcond=rcond)[0]
|
fcholletREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@keras@src@backend@torch@linalg.py@.PATH_END.py
|
{
"filename": "fitsheaders.py",
"repo_name": "chandra-marx/marxs",
"repo_path": "marxs_extracted/marxs-main/marxs/missions/chandra/fitsheaders.py",
"type": "Python"
}
|
# Licensed under GPL version 3 - see LICENSE.rst
'''
See `ASC FITS File Designers' Guide ASC-FITS-2.1.0 <cxc.harvard.edu/contrib/arots/fits/ascfits.ps>`.
'''
from astropy import time
import astropy.units as u
from ... import __version__
from .data import TLMINMAX, PIXSIZE, NOMINAL_FOCALLENGTH, ODET
def update_header(header, h):
for elem in h:
if elem[0] not in header:
header[elem[0]] = elem[1]
def complete_CC(header, content, hduclass):
'''Configuration Control Component'''
h = [
('ORIGIN', 'ASC'),
("HDUDOC", "ASC-FITS-2.0: Rots, McDowell: ASC FITS File Designers Guide"),
("CONTENT", content),
("HDUNAME", header['EXTNAME']),
]
for a, b in zip(['HDUCLASS', 'HDUCLAS1', 'HDUCLAS2', 'HDUCLAS3'], hduclass):
h += [(a, b)]
update_header(header, h)
def complete_T(header):
'''Timing component'''
now = time.Time.now()
now.format = 'fits'
nowexp = now + header['EXPOSURE'][0] * u.s
h = [
('DATE', (now.value[:23], 'Date and time of file creation {0}'.format(now.value[23:]))),
('DATE-OBS', (now.value[:23], 'TT with clock correction if CLOCKAPP')),
('DATE-END', (nowexp.value[:23], 'TT with clock correction if CLOCKAPP')),
("TIMESYS", ("TT", "AXAF time will be Terrestrial Time")),
("MJDREF", (50814, "MJD of clock start")),
("TIMEZERO", (0, "Clock Correction")),
("TIMEUNIT", 's'),
("BTIMNULL", (0., "Basic Time offset (s)")),
("BTIMRATE", (2.5625000912249E-01, "Basic Time clock rate (s / VCDUcount)")),
("BTIMDRFT", (2.1806598193841E-17, "Basic Time clock drift (s / VCDUcount^2)")),
("BTIMCORR", (0.0000000000000E+00, "Correction applied to Basic Time rate (s)")),
("TIMEREF", ("LOCAL", "Time is local for data")),
("TASSIGN", ("SATELLITE", "Source of time assignment")),
("CLOCKAPP", (True, "Clock correction applied")),
("TIERRELA", (1e-9, "Short term clock stability")),
("TIERABSO", (1e-4, "Absolute precision of clock correction")),
("TIMVERSN", ("ASC-FITS-2.1", "AXAF Fits design document")),
("TIMEPIXR", (0., "Time stamp refers to start of bin")),
("TIMEDEL", (3.241, "Time resolution of data in seconds")),
]
update_header(header, h)
# The following keywords depend on the DATE-OBS found in the header, which may differ from
# the number calculated above, if it was set to a specific date previously.
tstart = time.Time(header['DATE-OBS'][0], format='fits')
tstart.format = 'cxcsec'
header["TSTART"] = (tstart.value, "As in the TIME column: raw space craft clock;")
header['TSTOP'] = ((tstart + header['EXPOSURE'][0] * u.s).value, " add TIMEZERO and MJDREF for absolute TT")
header['OBS-MJD'] = tstart.mjd
def complete_O(header):
'''Observation info component'''
h = [("MISSION", ( "AXAF", "Mission is AXAF")),
("TELESCOP", ("CHANDRA", "TELESCOPE is Chandra")),
("GRATING", ("NONE", "Grating")),
('DATACLASS', ('SIMULATED', 'see http://marxs.rtfd.org')),
('ONTIME', (header['EXPOSURE'][0], 'Sum of GTIs')),
('DTCOR', (1., 'Dead Time Correction')),
('LIVETIME', (header['EXPOSURE'][0], 'Ontime multiplied by DTCOR')),
('OBSERVER', ('MARXS', 'This is a simulation.')),
('FOC_LEN', (NOMINAL_FOCALLENGTH, 'Assumed focal length')),
]
DMKEYWORDS = [('MTYPE1', 'chip'), ('MFORM1', 'chipx,chipy'),
('MTYPE2', 'tdet'), ('MFORM2', 'tdetx,tdety'),
('MTYPE3', 'det'), ('MFORM3', 'detx,dety'),
('MTYPE4', 'sky'), ('MFORM4', 'x,y'),
('MFORM5', 'RA,DEC'), ('MTYPE5', 'EQPOS')]
'''CIAO data model (DM) keywords that group columns together'''
def add_evt_column_header(header, data):
'''Add CIAO keywords to header of an eventfile.'''
# Clean out column related keywords that may not be valid any longer.
for k in header.iterkeys():
if k[:5] in ['TCTYP', 'TCRVL', 'TCDLT', 'TCRPX', 'TLMIN', 'TLMAX']:
del header[k]
instr = header['INSTRUME'][0]
if instr not in TLMINMAX.keys():
raise KeyError('TLMIN and TLMAX not specified for detector {0}'.format(instr))
colnamesup = [c.upper() for c in data.colnames]
if len(set(data.colnames)) != len(set(colnamesup)):
raise KeyError('Fits files are case insensitive. Column names in data must be unique if converted to upper case.')
tl = TLMINMAX[instr]
odet = ODET[instr]
for i, k in enumerate(data.colnames):
if k.upper() in tl:
header['TLMIN{0}'.format(i+1)] = tl[k.upper()][0]
header['TLMAX{0}'.format(i+1)] = tl[k.upper()][1]
for k in DMKEYWORDS:
header[k[0]] = k[1]
# Turn X,Y into a WCS that e.g. ds9 can interpret
indx = colnamesup.index('X') + 1
header['TCTYP{0}'.format(indx)] = 'RA---TAN'
header['TCRVL{0}'.format(indx)] = header['RA_PNT']
header['TCDLT{0}'.format(indx)] = -PIXSIZE[instr] # - because RA increases to left
header['TCRPX{0}'.format(indx)] = odet[0]
indy = colnamesup.index('Y') + 1
header['TCTYP{0}'.format(indy)] = 'DEC---TAN'
header['TCRVL{0}'.format(indy)] = header['DEC_PNT']
header['TCDLT{0}'.format(indy)] = PIXSIZE[instr]
header['TCRPX{0}'.format(indy)] = odet[1]
header['RADECSYS'] = ('ICRS', 'WCS system')
def complete_header(header, data=None, content=['UNKNOWN'], hduclass='UNKNOWN'):
'''Complete fits header for Chandra fits files.
This method add the common keywords that are required for all CXC fit files.
With few exception, the methods will not overwrite existing keywords, but will
only fill in the value of required keywords if they have not been set before.
There are certain keywords that cannot be generated without more information
(e.g. the name of the detector used). These keywords must be set outside of these
routines.
The exception where keywords are overwritten is timing information that is contained in
redundant keywords. This method read "EXPOSURE" and "DATE-OBS" from the header and
calculates "TSTART", "TSTOP" and "OBS-MJD" from those values.
Parameters
----------
header : a dictionary-like object
In most cases, this will be ``photons.meta``.
data : `astropy.table.Table` or ``None``
For event tables the header includes keywords that depend on the order of the columns
in the data (e.g. column 5 and 6 define the WCS). Pass in the full table as data for
those cases.
content : string
Content keyword as specified by ASC
hduclass : list of stings
The list can contain 1-4 elements, depending on the data product.
See appendix A1 in the
`ASC FITS File Designers' Guide ASC-FITS-2.1.0 <cxc.harvard.edu/contrib/arots/fits/ascfits.ps>`.
References
----------
`ASC FITS File Designers' Guide ASC-FITS-2.1.0 <cxc.harvard.edu/contrib/arots/fits/ascfits.ps>`.
'''
if content.upper().startswith('EVT'):
add_evt_column_header(header, data)
complete_CC(header, content=content, hduclass=hduclass)
complete_T(header)
complete_O(header)
def sort_columns(photons):
'''Clean up the order of column names
Parameters
----------
photons : `astropy.table.Table`
Event list.
Returns
-------
photons : `astropy.table.Table`
Event list with columns sorted such that pairs of columns that describe coordinates
appear together (e.g. "detx" and "dety"). All other columns are sorted alphabetically.
'''
colnames = photons.colnames
# find those pairs that end on x and y like "tdetx, tdety"
end_x = set([c[:-1] for c in colnames if c[-1] == 'x'])
end_y = set([c[:-1] for c in colnames if c[-1] == 'y'])
endxy = end_x.intersection(end_y)
# columns that are not x/y pairs. Careful: Could be e.g. "energy" without "energx"
othercol = set(colnames) - set([c+'x' for c in endxy]) - set([c+'y' for c in endxy])
# Since we are using sets, the order is now random, but CIAO expects certain columns in order
# When comparing the names, we have to take care of upper case / lower case.
# That does not matter when we write it to a fits file, but as long as its an astropy Table
# column names are case sensitive.
endxy = list(endxy)
endxy_l = [c.lower() for c in endxy]
expected_order = ['chip', 'tdet', 'det', '']
endxy_ordered = []
for c in expected_order:
if c in endxy_l:
index = endxy_l.index(c)
endxy_ordered.append(endxy.pop(index))
endxy_l.pop(index) # Keep endxy_l to have the same order as endxy
endxy_ordered.extend(endxy) # Add additional cols not part of the required order.
ordered_cols = []
for c in endxy_ordered:
ordered_cols.extend([c + 'x', c + 'y'])
# Other pairs we want ordered if present
pairs = [('ra', 'dec'), ('RA', 'DEC'), ('ra', 'de'), ('RA', 'DE')]
for p in pairs:
if p[0] in othercol and p[1] in othercol:
othercol -= set(p)
ordered_cols.extend(p)
# Now put the rest in repeatable order
othercol = list(othercol)
othercol.sort()
ordered_cols.extend(othercol)
return photons(ordered_cols)
|
chandra-marxREPO_NAMEmarxsPATH_START.@marxs_extracted@marxs-main@marxs@missions@chandra@fitsheaders.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "3fon3fonov/exostriker",
"repo_path": "exostriker_extracted/exostriker-main/exostriker/lib/pyqtgraph/colors/__init__.py",
"type": "Python"
}
|
3fon3fonovREPO_NAMEexostrikerPATH_START.@exostriker_extracted@exostriker-main@exostriker@lib@pyqtgraph@colors@__init__.py@.PATH_END.py
|
|
{
"filename": "groupby.py",
"repo_name": "mwaskom/seaborn",
"repo_path": "seaborn_extracted/seaborn-master/seaborn/_core/groupby.py",
"type": "Python"
}
|
"""Simplified split-apply-combine paradigm on dataframes for internal use."""
from __future__ import annotations
from typing import cast, Iterable
import pandas as pd
from seaborn._core.rules import categorical_order
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Callable
from pandas import DataFrame, MultiIndex, Index
class GroupBy:
"""
Interface for Pandas GroupBy operations allowing specified group order.
Writing our own class to do this has a few advantages:
- It constrains the interface between Plot and Stat/Move objects
- It allows control over the row order of the GroupBy result, which is
important when using in the context of some Move operations (dodge, stack, ...)
- It simplifies some complexities regarding the return type and Index contents
one encounters with Pandas, especially for DataFrame -> DataFrame applies
- It increases future flexibility regarding alternate DataFrame libraries
"""
def __init__(self, order: list[str] | dict[str, list | None]):
"""
Initialize the GroupBy from grouping variables and optional level orders.
Parameters
----------
order
List of variable names or dict mapping names to desired level orders.
Level order values can be None to use default ordering rules. The
variables can include names that are not expected to appear in the
data; these will be dropped before the groups are defined.
"""
if not order:
raise ValueError("GroupBy requires at least one grouping variable")
if isinstance(order, list):
order = {k: None for k in order}
self.order = order
def _get_groups(
self, data: DataFrame
) -> tuple[str | list[str], Index | MultiIndex]:
"""Return index with Cartesian product of ordered grouping variable levels."""
levels = {}
for var, order in self.order.items():
if var in data:
if order is None:
order = categorical_order(data[var])
levels[var] = order
grouper: str | list[str]
groups: Index | MultiIndex
if not levels:
grouper = []
groups = pd.Index([])
elif len(levels) > 1:
grouper = list(levels)
groups = pd.MultiIndex.from_product(levels.values(), names=grouper)
else:
grouper, = list(levels)
groups = pd.Index(levels[grouper], name=grouper)
return grouper, groups
def _reorder_columns(self, res, data):
"""Reorder result columns to match original order with new columns appended."""
cols = [c for c in data if c in res]
cols += [c for c in res if c not in data]
return res.reindex(columns=pd.Index(cols))
def agg(self, data: DataFrame, *args, **kwargs) -> DataFrame:
"""
Reduce each group to a single row in the output.
The output will have a row for each unique combination of the grouping
variable levels with null values for the aggregated variable(s) where
those combinations do not appear in the dataset.
"""
grouper, groups = self._get_groups(data)
if not grouper:
# We will need to see whether there are valid usecases that end up here
raise ValueError("No grouping variables are present in dataframe")
res = (
data
.groupby(grouper, sort=False, observed=False)
.agg(*args, **kwargs)
.reindex(groups)
.reset_index()
.pipe(self._reorder_columns, data)
)
return res
def apply(
self, data: DataFrame, func: Callable[..., DataFrame],
*args, **kwargs,
) -> DataFrame:
"""Apply a DataFrame -> DataFrame mapping to each group."""
grouper, groups = self._get_groups(data)
if not grouper:
return self._reorder_columns(func(data, *args, **kwargs), data)
parts = {}
for key, part_df in data.groupby(grouper, sort=False, observed=False):
parts[key] = func(part_df, *args, **kwargs)
stack = []
for key in groups:
if key in parts:
if isinstance(grouper, list):
# Implies that we had a MultiIndex so key is iterable
group_ids = dict(zip(grouper, cast(Iterable, key)))
else:
group_ids = {grouper: key}
stack.append(parts[key].assign(**group_ids))
res = pd.concat(stack, ignore_index=True)
return self._reorder_columns(res, data)
|
mwaskomREPO_NAMEseabornPATH_START.@seaborn_extracted@seaborn-master@seaborn@_core@groupby.py@.PATH_END.py
|
{
"filename": "_ticklabeloverflow.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/barpolar/marker/colorbar/_ticklabeloverflow.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TicklabeloverflowValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="ticklabeloverflow",
parent_name="barpolar.marker.colorbar",
**kwargs,
):
super(TicklabeloverflowValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["allow", "hide past div", "hide past domain"]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@barpolar@marker@colorbar@_ticklabeloverflow.py@.PATH_END.py
|
{
"filename": "extract_region.py",
"repo_name": "CIRADA-Tools/RM-Tools",
"repo_path": "RM-Tools_extracted/RM-Tools-master/RMtools_3D/extract_region.py",
"type": "Python"
}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 30 10:44:28 2019
Extract subregion of a FITS file, with option to extract a plane.
There are many cutout tools like it, but this one is mine.
@author: cvaneck
May 2019
"""
import argparse
import os
import astropy.io.fits as pf
from astropy.wcs import WCS
def main():
"""This function will extract a region ('cutout) from an FITS file and save
it to a new file. Command line options will allow the user to select the
region in either pixel or sky coordinates.
"""
descStr = """
Cut out a region in a fits file, writing it to a new file.
Selecting -1 for any coordinate parameter will cause it be set as the
maximum/minimum value allowed.
Default is for box to be defined in pixel coordinates, in the form
xmin xmax ymin ymax.
Pixel selection is inclusive: all corner pixels will be present in output.
Pixel counting starts at 1 (FITS convention).
Sky coordinates not guaranteed to give correct size box in if projection is
highly nonlinear.
If a third non-generate axis is present (as either axis 3 or 4), the
-z flag will allow selection of subranges along this axis."""
parser = argparse.ArgumentParser(
description=descStr, formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument(
"infile", metavar="infile.fits", help="FITS file containing data."
)
parser.add_argument("outfile", metavar="outfile.fits", help="Output fits file")
parser.add_argument(
"box",
metavar="xmin xmax ymin ymax",
nargs=4,
type=float,
help="Box dimensions (in pixels unless -s set)",
)
parser.add_argument(
"-s",
dest="sky",
action="store_true",
help="Box defined in sky coordinates (in decimal degrees if set, otherwise pixels).",
)
parser.add_argument(
"-c",
dest="center",
action="store_true",
help="If true, define box as x_center x_halfwidth y_center y_halfwidth",
)
parser.add_argument(
"-z",
dest="zlim",
metavar="axis3",
nargs=2,
default=None,
type=int,
help="3rd axis limits (only pixel coords supported)",
)
parser.add_argument(
"-o",
dest="overwrite",
action="store_true",
help="Overwrite existing file if present?",
)
args = parser.parse_args()
if not os.path.exists(args.infile):
raise Exception("Input file not found!")
if os.path.exists(args.outfile) and not args.overwrite:
raise Exception("Outfile file already exists! Add -o flag to overwrite.")
if not args.center:
box = args.box
else:
box = [
args.box[0] - args.box[1],
args.box[0] + args.box[1],
args.box[2] - args.box[3],
args.box[2] + args.box[3],
]
if box[0] > box[1]:
raise Exception("Box dimensions incorrect! x_max < x_min!")
if box[2] > box[3]:
raise Exception("Box dimensions incorrect! y_max < y_min!")
hdu = pf.open(args.infile, memmap=True)
header = hdu[0].header
if args.sky:
raise Exception("Not yet implemented. Soon!")
csys = WCS(header, naxis=2)
pixbox = [-1, -1, -1, -1]
pix = csys.all_world2pix(box[0], (box[2] + box[3]) / 2, 1)
pixbox[0] = float(pix[0])
pix = csys.all_world2pix(box[1], (box[2] + box[3]) / 2, 1)
pixbox[1] = float(pix[0])
if pixbox[1] < pixbox[0]:
a = pixbox[0]
pixbox[0] = pixbox[1]
pixbox[1] = a
pix = csys.all_world2pix((box[0] + box[1]) / 2, box[2], 1)
pixbox[2] = float(pix[1])
pix = csys.all_world2pix((box[0] + box[1]) / 2, box[3], 1)
pixbox[3] = float(pix[1])
box = [round(x) for x in pixbox]
else:
box = [int(x) for x in box]
if header["NAXIS"] == 3:
cube_axis = 3
if header["NAXIS"] == 4:
if header["NAXIS3"] != 1:
cube_axis = 3
else:
cube_axis = 4
if header["NAXIS"] == 2:
cube_axis = 2
if args.zlim != None:
zlim = args.zlim
elif cube_axis > 2:
zlim = [1, header["NAXIS" + str(cube_axis)]]
if box[0] < 1:
box[0] = 1
if (box[1] == -1) or (box[1] > header["NAXIS1"]):
box[1] = header["NAXIS1"]
if box[2] < 1:
box[2] = 1
if (box[3] == -1) or (box[3] > header["NAXIS2"]):
box[3] = header["NAXIS2"]
# Extract sub-region:
if header["NAXIS"] == 4:
if cube_axis == 3:
data = hdu[0].data[
:, zlim[0] - 1 : zlim[1], box[2] - 1 : box[3], box[0] - 1 : box[1]
]
if cube_axis == 4:
data = hdu[0].data[
zlim[0] - 1 : zlim[1], :, box[2] - 1 : box[3], box[0] - 1 : box[1]
]
elif header["NAXIS"] == 3:
data = hdu[0].data[
zlim[0] - 1 : zlim[1], box[2] - 1 : box[3], box[0] - 1 : box[1]
]
elif header["NAXIS"] == 2:
data = hdu[0].data[box[2] - 1 : box[3] - 1, box[0] - 1 : box[1] - 1]
else:
raise Exception("Number of dimensions is some nonsupported value!")
# Change header information:
new_header = header.copy()
new_header["NAXIS1"] = box[1] - box[0] + 1
new_header["NAXIS2"] = box[3] - box[2] + 1
if args.zlim != None:
new_header["NAXIS" + str(cube_axis)] = zlim[1] - zlim[0] + 1
new_header["CRPIX" + str(cube_axis)] = (
header["CRPIX" + str(cube_axis)] - zlim[0] + 1
)
new_header["CRPIX1"] = header["CRPIX1"] - box[0] + 1
new_header["CRPIX2"] = header["CRPIX2"] - box[2] + 1
pf.writeto(args.outfile, data, new_header, overwrite=args.overwrite)
if __name__ == "__main__":
main()
|
CIRADA-ToolsREPO_NAMERM-ToolsPATH_START.@RM-Tools_extracted@RM-Tools-master@RMtools_3D@extract_region.py@.PATH_END.py
|
{
"filename": "xml.py",
"repo_name": "pandas-dev/pandas",
"repo_path": "pandas_extracted/pandas-main/pandas/io/formats/xml.py",
"type": "Python"
}
|
"""
:mod:`pandas.io.formats.xml` is a module for formatting data in XML.
"""
from __future__ import annotations
import codecs
import io
from typing import (
TYPE_CHECKING,
Any,
final,
)
from pandas.errors import AbstractMethodError
from pandas.util._decorators import (
cache_readonly,
doc,
)
from pandas.core.dtypes.common import is_list_like
from pandas.core.dtypes.missing import isna
from pandas.core.shared_docs import _shared_docs
from pandas.io.common import get_handle
from pandas.io.xml import get_data_from_filepath
if TYPE_CHECKING:
from pandas._typing import (
CompressionOptions,
FilePath,
ReadBuffer,
StorageOptions,
WriteBuffer,
)
from pandas import DataFrame
@doc(
storage_options=_shared_docs["storage_options"],
compression_options=_shared_docs["compression_options"] % "path_or_buffer",
)
class _BaseXMLFormatter:
"""
Subclass for formatting data in XML.
Parameters
----------
path_or_buffer : str or file-like
This can be either a string of raw XML, a valid URL,
file or file-like object.
index : bool
Whether to include index in xml document.
row_name : str
Name for root of xml document. Default is 'data'.
root_name : str
Name for row elements of xml document. Default is 'row'.
na_rep : str
Missing data representation.
attrs_cols : list
List of columns to write as attributes in row element.
elem_cols : list
List of columns to write as children in row element.
namespaces : dict
The namespaces to define in XML document as dicts with key
being namespace and value the URI.
prefix : str
The prefix for each element in XML document including root.
encoding : str
Encoding of xml object or document.
xml_declaration : bool
Whether to include xml declaration at top line item in xml.
pretty_print : bool
Whether to write xml document with line breaks and indentation.
stylesheet : str or file-like
A URL, file, file-like object, or a raw string containing XSLT.
{compression_options}
.. versionchanged:: 1.4.0 Zstandard support.
{storage_options}
See also
--------
pandas.io.formats.xml.EtreeXMLFormatter
pandas.io.formats.xml.LxmlXMLFormatter
"""
def __init__(
self,
frame: DataFrame,
path_or_buffer: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None,
index: bool = True,
root_name: str | None = "data",
row_name: str | None = "row",
na_rep: str | None = None,
attr_cols: list[str] | None = None,
elem_cols: list[str] | None = None,
namespaces: dict[str | None, str] | None = None,
prefix: str | None = None,
encoding: str = "utf-8",
xml_declaration: bool | None = True,
pretty_print: bool | None = True,
stylesheet: FilePath | ReadBuffer[str] | ReadBuffer[bytes] | None = None,
compression: CompressionOptions = "infer",
storage_options: StorageOptions | None = None,
) -> None:
self.frame = frame
self.path_or_buffer = path_or_buffer
self.index = index
self.root_name = root_name
self.row_name = row_name
self.na_rep = na_rep
self.attr_cols = attr_cols
self.elem_cols = elem_cols
self.namespaces = namespaces
self.prefix = prefix
self.encoding = encoding
self.xml_declaration = xml_declaration
self.pretty_print = pretty_print
self.stylesheet = stylesheet
self.compression: CompressionOptions = compression
self.storage_options = storage_options
self.orig_cols = self.frame.columns.tolist()
self.frame_dicts = self._process_dataframe()
self._validate_columns()
self._validate_encoding()
self.prefix_uri = self._get_prefix_uri()
self._handle_indexes()
def _build_tree(self) -> bytes:
"""
Build tree from data.
This method initializes the root and builds attributes and elements
with optional namespaces.
"""
raise AbstractMethodError(self)
@final
def _validate_columns(self) -> None:
"""
Validate elems_cols and attrs_cols.
This method will check if columns is list-like.
Raises
------
ValueError
* If value is not a list and less then length of nodes.
"""
if self.attr_cols and not is_list_like(self.attr_cols):
raise TypeError(
f"{type(self.attr_cols).__name__} is not a valid type for attr_cols"
)
if self.elem_cols and not is_list_like(self.elem_cols):
raise TypeError(
f"{type(self.elem_cols).__name__} is not a valid type for elem_cols"
)
@final
def _validate_encoding(self) -> None:
"""
Validate encoding.
This method will check if encoding is among listed under codecs.
Raises
------
LookupError
* If encoding is not available in codecs.
"""
codecs.lookup(self.encoding)
@final
def _process_dataframe(self) -> dict[int | str, dict[str, Any]]:
"""
Adjust Data Frame to fit xml output.
This method will adjust underlying data frame for xml output,
including optionally replacing missing values and including indexes.
"""
df = self.frame
if self.index:
df = df.reset_index()
if self.na_rep is not None:
df = df.fillna(self.na_rep)
return df.to_dict(orient="index")
@final
def _handle_indexes(self) -> None:
"""
Handle indexes.
This method will add indexes into attr_cols or elem_cols.
"""
if not self.index:
return
first_key = next(iter(self.frame_dicts))
indexes: list[str] = [
x for x in self.frame_dicts[first_key].keys() if x not in self.orig_cols
]
if self.attr_cols:
self.attr_cols = indexes + self.attr_cols
if self.elem_cols:
self.elem_cols = indexes + self.elem_cols
def _get_prefix_uri(self) -> str:
"""
Get uri of namespace prefix.
This method retrieves corresponding URI to prefix in namespaces.
Raises
------
KeyError
*If prefix is not included in namespace dict.
"""
raise AbstractMethodError(self)
@final
def _other_namespaces(self) -> dict:
"""
Define other namespaces.
This method will build dictionary of namespaces attributes
for root element, conditionally with optional namespaces and
prefix.
"""
nmsp_dict: dict[str, str] = {}
if self.namespaces:
nmsp_dict = {
f"xmlns{p if p=='' else f':{p}'}": n
for p, n in self.namespaces.items()
if n != self.prefix_uri[1:-1]
}
return nmsp_dict
@final
def _build_attribs(self, d: dict[str, Any], elem_row: Any) -> Any:
"""
Create attributes of row.
This method adds attributes using attr_cols to row element and
works with tuples for multindex or hierarchical columns.
"""
if not self.attr_cols:
return elem_row
for col in self.attr_cols:
attr_name = self._get_flat_col_name(col)
try:
if not isna(d[col]):
elem_row.attrib[attr_name] = str(d[col])
except KeyError as err:
raise KeyError(f"no valid column, {col}") from err
return elem_row
@final
def _get_flat_col_name(self, col: str | tuple) -> str:
flat_col = col
if isinstance(col, tuple):
flat_col = (
"".join([str(c) for c in col]).strip()
if "" in col
else "_".join([str(c) for c in col]).strip()
)
return f"{self.prefix_uri}{flat_col}"
@cache_readonly
def _sub_element_cls(self):
raise AbstractMethodError(self)
@final
def _build_elems(self, d: dict[str, Any], elem_row: Any) -> None:
"""
Create child elements of row.
This method adds child elements using elem_cols to row element and
works with tuples for multindex or hierarchical columns.
"""
sub_element_cls = self._sub_element_cls
if not self.elem_cols:
return
for col in self.elem_cols:
elem_name = self._get_flat_col_name(col)
try:
val = None if isna(d[col]) or d[col] == "" else str(d[col])
sub_element_cls(elem_row, elem_name).text = val
except KeyError as err:
raise KeyError(f"no valid column, {col}") from err
@final
def write_output(self) -> str | None:
xml_doc = self._build_tree()
if self.path_or_buffer is not None:
with get_handle(
self.path_or_buffer,
"wb",
compression=self.compression,
storage_options=self.storage_options,
is_text=False,
) as handles:
handles.handle.write(xml_doc)
return None
else:
return xml_doc.decode(self.encoding).rstrip()
class EtreeXMLFormatter(_BaseXMLFormatter):
"""
Class for formatting data in xml using Python standard library
modules: `xml.etree.ElementTree` and `xml.dom.minidom`.
"""
def _build_tree(self) -> bytes:
from xml.etree.ElementTree import (
Element,
SubElement,
tostring,
)
self.root = Element(
f"{self.prefix_uri}{self.root_name}", attrib=self._other_namespaces()
)
for d in self.frame_dicts.values():
elem_row = SubElement(self.root, f"{self.prefix_uri}{self.row_name}")
if not self.attr_cols and not self.elem_cols:
self.elem_cols = list(d.keys())
self._build_elems(d, elem_row)
else:
elem_row = self._build_attribs(d, elem_row)
self._build_elems(d, elem_row)
self.out_xml = tostring(
self.root,
method="xml",
encoding=self.encoding,
xml_declaration=self.xml_declaration,
)
if self.pretty_print:
self.out_xml = self._prettify_tree()
if self.stylesheet is not None:
raise ValueError(
"To use stylesheet, you need lxml installed and selected as parser."
)
return self.out_xml
def _get_prefix_uri(self) -> str:
from xml.etree.ElementTree import register_namespace
uri = ""
if self.namespaces:
for p, n in self.namespaces.items():
if isinstance(p, str) and isinstance(n, str):
register_namespace(p, n)
if self.prefix:
try:
uri = f"{{{self.namespaces[self.prefix]}}}"
except KeyError as err:
raise KeyError(
f"{self.prefix} is not included in namespaces"
) from err
elif "" in self.namespaces:
uri = f'{{{self.namespaces[""]}}}'
else:
uri = ""
return uri
@cache_readonly
def _sub_element_cls(self):
from xml.etree.ElementTree import SubElement
return SubElement
def _prettify_tree(self) -> bytes:
"""
Output tree for pretty print format.
This method will pretty print xml with line breaks and indentation.
"""
from xml.dom.minidom import parseString
dom = parseString(self.out_xml)
return dom.toprettyxml(indent=" ", encoding=self.encoding)
class LxmlXMLFormatter(_BaseXMLFormatter):
"""
Class for formatting data in xml using Python standard library
modules: `xml.etree.ElementTree` and `xml.dom.minidom`.
"""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self._convert_empty_str_key()
def _build_tree(self) -> bytes:
"""
Build tree from data.
This method initializes the root and builds attributes and elements
with optional namespaces.
"""
from lxml.etree import (
Element,
SubElement,
tostring,
)
self.root = Element(f"{self.prefix_uri}{self.root_name}", nsmap=self.namespaces)
for d in self.frame_dicts.values():
elem_row = SubElement(self.root, f"{self.prefix_uri}{self.row_name}")
if not self.attr_cols and not self.elem_cols:
self.elem_cols = list(d.keys())
self._build_elems(d, elem_row)
else:
elem_row = self._build_attribs(d, elem_row)
self._build_elems(d, elem_row)
self.out_xml = tostring(
self.root,
pretty_print=self.pretty_print,
method="xml",
encoding=self.encoding,
xml_declaration=self.xml_declaration,
)
if self.stylesheet is not None:
self.out_xml = self._transform_doc()
return self.out_xml
def _convert_empty_str_key(self) -> None:
"""
Replace zero-length string in `namespaces`.
This method will replace '' with None to align to `lxml`
requirement that empty string prefixes are not allowed.
"""
if self.namespaces and "" in self.namespaces.keys():
self.namespaces[None] = self.namespaces.pop("", "default")
def _get_prefix_uri(self) -> str:
uri = ""
if self.namespaces:
if self.prefix:
try:
uri = f"{{{self.namespaces[self.prefix]}}}"
except KeyError as err:
raise KeyError(
f"{self.prefix} is not included in namespaces"
) from err
elif "" in self.namespaces:
uri = f'{{{self.namespaces[""]}}}'
else:
uri = ""
return uri
@cache_readonly
def _sub_element_cls(self):
from lxml.etree import SubElement
return SubElement
def _transform_doc(self) -> bytes:
"""
Parse stylesheet from file or buffer and run it.
This method will parse stylesheet object into tree for parsing
conditionally by its specific object type, then transforms
original tree with XSLT script.
"""
from lxml.etree import (
XSLT,
XMLParser,
fromstring,
parse,
)
style_doc = self.stylesheet
assert style_doc is not None # is ensured by caller
handle_data = get_data_from_filepath(
filepath_or_buffer=style_doc,
encoding=self.encoding,
compression=self.compression,
storage_options=self.storage_options,
)
with handle_data as xml_data:
curr_parser = XMLParser(encoding=self.encoding)
if isinstance(xml_data, io.StringIO):
xsl_doc = fromstring(
xml_data.getvalue().encode(self.encoding), parser=curr_parser
)
else:
xsl_doc = parse(xml_data, parser=curr_parser)
transformer = XSLT(xsl_doc)
new_doc = transformer(self.root)
return bytes(new_doc)
|
pandas-devREPO_NAMEpandasPATH_START.@pandas_extracted@pandas-main@pandas@io@formats@xml.py@.PATH_END.py
|
{
"filename": "test_fill_region.py",
"repo_name": "rennehan/yt-swift",
"repo_path": "yt-swift_extracted/yt-swift-main/yt/utilities/lib/tests/test_fill_region.py",
"type": "Python"
}
|
import numpy as np
from numpy.testing import assert_equal
from yt.utilities.lib.misc_utilities import fill_region
NDIM = 32
def test_fill_region():
for level in range(2):
rf = 2**level
output_fields = [
np.zeros((NDIM * rf, NDIM * rf, NDIM * rf), "float64") for i in range(3)
]
input_fields = [np.empty(NDIM**3, "float64") for i in range(3)]
v = np.mgrid[
0.0 : 1.0 : NDIM * 1j, 0.0 : 1.0 : NDIM * 1j, 0.0 : 1.0 : NDIM * 1j
]
input_fields[0][:] = v[0].ravel()
input_fields[1][:] = v[1].ravel()
input_fields[2][:] = v[2].ravel()
left_index = np.zeros(3, "int64")
ipos = np.empty((NDIM**3, 3), dtype="int64")
ind = np.indices((NDIM, NDIM, NDIM))
ipos[:, 0] = ind[0].ravel()
ipos[:, 1] = ind[1].ravel()
ipos[:, 2] = ind[2].ravel()
ires = np.zeros(NDIM * NDIM * NDIM, "int64")
ddims = np.array([NDIM, NDIM, NDIM], dtype="int64") * rf
fill_region(
input_fields,
output_fields,
level,
left_index,
ipos,
ires,
ddims,
np.array([2, 2, 2], dtype="i8"),
)
for r in range(level + 1):
for o, i in zip(output_fields, v):
assert_equal(o[r::rf, r::rf, r::rf], i)
|
rennehanREPO_NAMEyt-swiftPATH_START.@yt-swift_extracted@yt-swift-main@yt@utilities@lib@tests@test_fill_region.py@.PATH_END.py
|
{
"filename": "kernel_approximation.py",
"repo_name": "scikit-learn/scikit-learn",
"repo_path": "scikit-learn_extracted/scikit-learn-main/sklearn/kernel_approximation.py",
"type": "Python"
}
|
"""Approximate kernel feature maps based on Fourier transforms and count sketches."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
from numbers import Integral, Real
import numpy as np
import scipy.sparse as sp
from scipy.fft import fft, ifft
from scipy.linalg import svd
from .base import (
BaseEstimator,
ClassNamePrefixFeaturesOutMixin,
TransformerMixin,
_fit_context,
)
from .metrics.pairwise import KERNEL_PARAMS, PAIRWISE_KERNEL_FUNCTIONS, pairwise_kernels
from .utils import check_random_state
from .utils._param_validation import Interval, StrOptions
from .utils.extmath import safe_sparse_dot
from .utils.validation import (
_check_feature_names_in,
check_is_fitted,
validate_data,
)
class PolynomialCountSketch(
ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator
):
"""Polynomial kernel approximation via Tensor Sketch.
Implements Tensor Sketch, which approximates the feature map
of the polynomial kernel::
K(X, Y) = (gamma * <X, Y> + coef0)^degree
by efficiently computing a Count Sketch of the outer product of a
vector with itself using Fast Fourier Transforms (FFT). Read more in the
:ref:`User Guide <polynomial_kernel_approx>`.
.. versionadded:: 0.24
Parameters
----------
gamma : float, default=1.0
Parameter of the polynomial kernel whose feature map
will be approximated.
degree : int, default=2
Degree of the polynomial kernel whose feature map
will be approximated.
coef0 : int, default=0
Constant term of the polynomial kernel whose feature map
will be approximated.
n_components : int, default=100
Dimensionality of the output feature space. Usually, `n_components`
should be greater than the number of features in input samples in
order to achieve good performance. The optimal score / run time
balance is typically achieved around `n_components` = 10 * `n_features`,
but this depends on the specific dataset being used.
random_state : int, RandomState instance, default=None
Determines random number generation for indexHash and bitHash
initialization. Pass an int for reproducible results across multiple
function calls. See :term:`Glossary <random_state>`.
Attributes
----------
indexHash_ : ndarray of shape (degree, n_features), dtype=int64
Array of indexes in range [0, n_components) used to represent
the 2-wise independent hash functions for Count Sketch computation.
bitHash_ : ndarray of shape (degree, n_features), dtype=float32
Array with random entries in {+1, -1}, used to represent
the 2-wise independent hash functions for Count Sketch computation.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
AdditiveChi2Sampler : Approximate feature map for additive chi2 kernel.
Nystroem : Approximate a kernel map using a subset of the training data.
RBFSampler : Approximate a RBF kernel feature map using random Fourier
features.
SkewedChi2Sampler : Approximate feature map for "skewed chi-squared" kernel.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
Examples
--------
>>> from sklearn.kernel_approximation import PolynomialCountSketch
>>> from sklearn.linear_model import SGDClassifier
>>> X = [[0, 0], [1, 1], [1, 0], [0, 1]]
>>> y = [0, 0, 1, 1]
>>> ps = PolynomialCountSketch(degree=3, random_state=1)
>>> X_features = ps.fit_transform(X)
>>> clf = SGDClassifier(max_iter=10, tol=1e-3)
>>> clf.fit(X_features, y)
SGDClassifier(max_iter=10)
>>> clf.score(X_features, y)
1.0
For a more detailed example of usage, see
:ref:`sphx_glr_auto_examples_kernel_approximation_plot_scalable_poly_kernels.py`
"""
_parameter_constraints: dict = {
"gamma": [Interval(Real, 0, None, closed="left")],
"degree": [Interval(Integral, 1, None, closed="left")],
"coef0": [Interval(Real, None, None, closed="neither")],
"n_components": [Interval(Integral, 1, None, closed="left")],
"random_state": ["random_state"],
}
def __init__(
self, *, gamma=1.0, degree=2, coef0=0, n_components=100, random_state=None
):
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.n_components = n_components
self.random_state = random_state
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Fit the model with X.
Initializes the internal variables. The method needs no information
about the distribution of data, so we only care about n_features in X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,) or (n_samples, n_outputs), \
default=None
Target values (None for unsupervised transformations).
Returns
-------
self : object
Returns the instance itself.
"""
X = validate_data(self, X, accept_sparse="csc")
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
if self.coef0 != 0:
n_features += 1
self.indexHash_ = random_state.randint(
0, high=self.n_components, size=(self.degree, n_features)
)
self.bitHash_ = random_state.choice(a=[-1, 1], size=(self.degree, n_features))
self._n_features_out = self.n_components
return self
def transform(self, X):
"""Generate the feature map approximation for X.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
New data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Returns the instance itself.
"""
check_is_fitted(self)
X = validate_data(self, X, accept_sparse="csc", reset=False)
X_gamma = np.sqrt(self.gamma) * X
if sp.issparse(X_gamma) and self.coef0 != 0:
X_gamma = sp.hstack(
[X_gamma, np.sqrt(self.coef0) * np.ones((X_gamma.shape[0], 1))],
format="csc",
)
elif not sp.issparse(X_gamma) and self.coef0 != 0:
X_gamma = np.hstack(
[X_gamma, np.sqrt(self.coef0) * np.ones((X_gamma.shape[0], 1))]
)
if X_gamma.shape[1] != self.indexHash_.shape[1]:
raise ValueError(
"Number of features of test samples does not"
" match that of training samples."
)
count_sketches = np.zeros((X_gamma.shape[0], self.degree, self.n_components))
if sp.issparse(X_gamma):
for j in range(X_gamma.shape[1]):
for d in range(self.degree):
iHashIndex = self.indexHash_[d, j]
iHashBit = self.bitHash_[d, j]
count_sketches[:, d, iHashIndex] += (
(iHashBit * X_gamma[:, [j]]).toarray().ravel()
)
else:
for j in range(X_gamma.shape[1]):
for d in range(self.degree):
iHashIndex = self.indexHash_[d, j]
iHashBit = self.bitHash_[d, j]
count_sketches[:, d, iHashIndex] += iHashBit * X_gamma[:, j]
# For each same, compute a count sketch of phi(x) using the polynomial
# multiplication (via FFT) of p count sketches of x.
count_sketches_fft = fft(count_sketches, axis=2, overwrite_x=True)
count_sketches_fft_prod = np.prod(count_sketches_fft, axis=1)
data_sketch = np.real(ifft(count_sketches_fft_prod, overwrite_x=True))
return data_sketch
class RBFSampler(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
"""Approximate a RBF kernel feature map using random Fourier features.
It implements a variant of Random Kitchen Sinks.[1]
Read more in the :ref:`User Guide <rbf_kernel_approx>`.
Parameters
----------
gamma : 'scale' or float, default=1.0
Parameter of RBF kernel: exp(-gamma * x^2).
If ``gamma='scale'`` is passed then it uses
1 / (n_features * X.var()) as value of gamma.
.. versionadded:: 1.2
The option `"scale"` was added in 1.2.
n_components : int, default=100
Number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : int, RandomState instance or None, default=None
Pseudo-random number generator to control the generation of the random
weights and random offset when fitting the training data.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
random_offset_ : ndarray of shape (n_components,), dtype={np.float64, np.float32}
Random offset used to compute the projection in the `n_components`
dimensions of the feature space.
random_weights_ : ndarray of shape (n_features, n_components),\
dtype={np.float64, np.float32}
Random projection directions drawn from the Fourier transform
of the RBF kernel.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
AdditiveChi2Sampler : Approximate feature map for additive chi2 kernel.
Nystroem : Approximate a kernel map using a subset of the training data.
PolynomialCountSketch : Polynomial kernel approximation via Tensor Sketch.
SkewedChi2Sampler : Approximate feature map for
"skewed chi-squared" kernel.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
Notes
-----
See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and
Benjamin Recht.
[1] "Weighted Sums of Random Kitchen Sinks: Replacing
minimization with randomization in learning" by A. Rahimi and
Benjamin Recht.
(https://people.eecs.berkeley.edu/~brecht/papers/08.rah.rec.nips.pdf)
Examples
--------
>>> from sklearn.kernel_approximation import RBFSampler
>>> from sklearn.linear_model import SGDClassifier
>>> X = [[0, 0], [1, 1], [1, 0], [0, 1]]
>>> y = [0, 0, 1, 1]
>>> rbf_feature = RBFSampler(gamma=1, random_state=1)
>>> X_features = rbf_feature.fit_transform(X)
>>> clf = SGDClassifier(max_iter=5, tol=1e-3)
>>> clf.fit(X_features, y)
SGDClassifier(max_iter=5)
>>> clf.score(X_features, y)
1.0
"""
_parameter_constraints: dict = {
"gamma": [
StrOptions({"scale"}),
Interval(Real, 0.0, None, closed="left"),
],
"n_components": [Interval(Integral, 1, None, closed="left")],
"random_state": ["random_state"],
}
def __init__(self, *, gamma=1.0, n_components=100, random_state=None):
self.gamma = gamma
self.n_components = n_components
self.random_state = random_state
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like, shape (n_samples,) or (n_samples, n_outputs), \
default=None
Target values (None for unsupervised transformations).
Returns
-------
self : object
Returns the instance itself.
"""
X = validate_data(self, X, accept_sparse="csr")
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
sparse = sp.issparse(X)
if self.gamma == "scale":
# var = E[X^2] - E[X]^2 if sparse
X_var = (X.multiply(X)).mean() - (X.mean()) ** 2 if sparse else X.var()
self._gamma = 1.0 / (n_features * X_var) if X_var != 0 else 1.0
else:
self._gamma = self.gamma
self.random_weights_ = (2.0 * self._gamma) ** 0.5 * random_state.normal(
size=(n_features, self.n_components)
)
self.random_offset_ = random_state.uniform(0, 2 * np.pi, size=self.n_components)
if X.dtype == np.float32:
# Setting the data type of the fitted attribute will ensure the
# output data type during `transform`.
self.random_weights_ = self.random_weights_.astype(X.dtype, copy=False)
self.random_offset_ = self.random_offset_.astype(X.dtype, copy=False)
self._n_features_out = self.n_components
return self
def transform(self, X):
"""Apply the approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Returns the instance itself.
"""
check_is_fitted(self)
X = validate_data(self, X, accept_sparse="csr", reset=False)
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= (2.0 / self.n_components) ** 0.5
return projection
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.transformer_tags.preserves_dtype = ["float64", "float32"]
return tags
class SkewedChi2Sampler(
ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator
):
"""Approximate feature map for "skewed chi-squared" kernel.
Read more in the :ref:`User Guide <skewed_chi_kernel_approx>`.
Parameters
----------
skewedness : float, default=1.0
"skewedness" parameter of the kernel. Needs to be cross-validated.
n_components : int, default=100
Number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : int, RandomState instance or None, default=None
Pseudo-random number generator to control the generation of the random
weights and random offset when fitting the training data.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
random_weights_ : ndarray of shape (n_features, n_components)
Weight array, sampled from a secant hyperbolic distribution, which will
be used to linearly transform the log of the data.
random_offset_ : ndarray of shape (n_features, n_components)
Bias term, which will be added to the data. It is uniformly distributed
between 0 and 2*pi.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
AdditiveChi2Sampler : Approximate feature map for additive chi2 kernel.
Nystroem : Approximate a kernel map using a subset of the training data.
RBFSampler : Approximate a RBF kernel feature map using random Fourier
features.
SkewedChi2Sampler : Approximate feature map for "skewed chi-squared" kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
References
----------
See "Random Fourier Approximations for Skewed Multiplicative Histogram
Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu.
Examples
--------
>>> from sklearn.kernel_approximation import SkewedChi2Sampler
>>> from sklearn.linear_model import SGDClassifier
>>> X = [[0, 0], [1, 1], [1, 0], [0, 1]]
>>> y = [0, 0, 1, 1]
>>> chi2_feature = SkewedChi2Sampler(skewedness=.01,
... n_components=10,
... random_state=0)
>>> X_features = chi2_feature.fit_transform(X, y)
>>> clf = SGDClassifier(max_iter=10, tol=1e-3)
>>> clf.fit(X_features, y)
SGDClassifier(max_iter=10)
>>> clf.score(X_features, y)
1.0
"""
_parameter_constraints: dict = {
"skewedness": [Interval(Real, None, None, closed="neither")],
"n_components": [Interval(Integral, 1, None, closed="left")],
"random_state": ["random_state"],
}
def __init__(self, *, skewedness=1.0, n_components=100, random_state=None):
self.skewedness = skewedness
self.n_components = n_components
self.random_state = random_state
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like, shape (n_samples,) or (n_samples, n_outputs), \
default=None
Target values (None for unsupervised transformations).
Returns
-------
self : object
Returns the instance itself.
"""
X = validate_data(self, X)
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
uniform = random_state.uniform(size=(n_features, self.n_components))
# transform by inverse CDF of sech
self.random_weights_ = 1.0 / np.pi * np.log(np.tan(np.pi / 2.0 * uniform))
self.random_offset_ = random_state.uniform(0, 2 * np.pi, size=self.n_components)
if X.dtype == np.float32:
# Setting the data type of the fitted attribute will ensure the
# output data type during `transform`.
self.random_weights_ = self.random_weights_.astype(X.dtype, copy=False)
self.random_offset_ = self.random_offset_.astype(X.dtype, copy=False)
self._n_features_out = self.n_components
return self
def transform(self, X):
"""Apply the approximate feature map to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where `n_samples` is the number of samples
and `n_features` is the number of features. All values of X must be
strictly greater than "-skewedness".
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Returns the instance itself.
"""
check_is_fitted(self)
X = validate_data(
self, X, copy=True, dtype=[np.float64, np.float32], reset=False
)
if (X <= -self.skewedness).any():
raise ValueError("X may not contain entries smaller than -skewedness.")
X += self.skewedness
np.log(X, X)
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.0) / np.sqrt(self.n_components)
return projection
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.transformer_tags.preserves_dtype = ["float64", "float32"]
return tags
class AdditiveChi2Sampler(TransformerMixin, BaseEstimator):
"""Approximate feature map for additive chi2 kernel.
Uses sampling the fourier transform of the kernel characteristic
at regular intervals.
Since the kernel that is to be approximated is additive, the components of
the input vectors can be treated separately. Each entry in the original
space is transformed into 2*sample_steps-1 features, where sample_steps is
a parameter of the method. Typical values of sample_steps include 1, 2 and
3.
Optimal choices for the sampling interval for certain data ranges can be
computed (see the reference). The default values should be reasonable.
Read more in the :ref:`User Guide <additive_chi_kernel_approx>`.
Parameters
----------
sample_steps : int, default=2
Gives the number of (complex) sampling points.
sample_interval : float, default=None
Sampling interval. Must be specified when sample_steps not in {1,2,3}.
Attributes
----------
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of
the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi
squared kernel.
Notes
-----
This estimator approximates a slightly different version of the additive
chi squared kernel then ``metric.additive_chi2`` computes.
This estimator is stateless and does not need to be fitted. However, we
recommend to call :meth:`fit_transform` instead of :meth:`transform`, as
parameter validation is only performed in :meth:`fit`.
References
----------
See `"Efficient additive kernels via explicit feature maps"
<http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>`_
A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence,
2011
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.linear_model import SGDClassifier
>>> from sklearn.kernel_approximation import AdditiveChi2Sampler
>>> X, y = load_digits(return_X_y=True)
>>> chi2sampler = AdditiveChi2Sampler(sample_steps=2)
>>> X_transformed = chi2sampler.fit_transform(X, y)
>>> clf = SGDClassifier(max_iter=5, random_state=0, tol=1e-3)
>>> clf.fit(X_transformed, y)
SGDClassifier(max_iter=5, random_state=0)
>>> clf.score(X_transformed, y)
0.9499...
"""
_parameter_constraints: dict = {
"sample_steps": [Interval(Integral, 1, None, closed="left")],
"sample_interval": [Interval(Real, 0, None, closed="left"), None],
}
def __init__(self, *, sample_steps=2, sample_interval=None):
self.sample_steps = sample_steps
self.sample_interval = sample_interval
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Only validates estimator's parameters.
This method allows to: (i) validate the estimator's parameters and
(ii) be consistent with the scikit-learn transformer API.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like, shape (n_samples,) or (n_samples, n_outputs), \
default=None
Target values (None for unsupervised transformations).
Returns
-------
self : object
Returns the transformer.
"""
X = validate_data(self, X, accept_sparse="csr", ensure_non_negative=True)
if self.sample_interval is None and self.sample_steps not in (1, 2, 3):
raise ValueError(
"If sample_steps is not in [1, 2, 3],"
" you need to provide sample_interval"
)
return self
def transform(self, X):
"""Apply approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Returns
-------
X_new : {ndarray, sparse matrix}, \
shape = (n_samples, n_features * (2*sample_steps - 1))
Whether the return value is an array or sparse matrix depends on
the type of the input X.
"""
X = validate_data(
self, X, accept_sparse="csr", reset=False, ensure_non_negative=True
)
sparse = sp.issparse(X)
if self.sample_interval is None:
# See figure 2 c) of "Efficient additive kernels via explicit feature maps" # noqa
# <http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>
# A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence, # noqa
# 2011
if self.sample_steps == 1:
sample_interval = 0.8
elif self.sample_steps == 2:
sample_interval = 0.5
elif self.sample_steps == 3:
sample_interval = 0.4
else:
raise ValueError(
"If sample_steps is not in [1, 2, 3],"
" you need to provide sample_interval"
)
else:
sample_interval = self.sample_interval
# zeroth component
# 1/cosh = sech
# cosh(0) = 1.0
transf = self._transform_sparse if sparse else self._transform_dense
return transf(X, self.sample_steps, sample_interval)
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Only used to validate feature names with the names seen in :meth:`fit`.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
# Note that passing attributes="n_features_in_" forces check_is_fitted
# to check if the attribute is present. Otherwise it will pass on this
# stateless estimator (requires_fit=False)
check_is_fitted(self, attributes="n_features_in_")
input_features = _check_feature_names_in(
self, input_features, generate_names=True
)
est_name = self.__class__.__name__.lower()
names_list = [f"{est_name}_{name}_sqrt" for name in input_features]
for j in range(1, self.sample_steps):
cos_names = [f"{est_name}_{name}_cos{j}" for name in input_features]
sin_names = [f"{est_name}_{name}_sin{j}" for name in input_features]
names_list.extend(cos_names + sin_names)
return np.asarray(names_list, dtype=object)
@staticmethod
def _transform_dense(X, sample_steps, sample_interval):
non_zero = X != 0.0
X_nz = X[non_zero]
X_step = np.zeros_like(X)
X_step[non_zero] = np.sqrt(X_nz * sample_interval)
X_new = [X_step]
log_step_nz = sample_interval * np.log(X_nz)
step_nz = 2 * X_nz * sample_interval
for j in range(1, sample_steps):
factor_nz = np.sqrt(step_nz / np.cosh(np.pi * j * sample_interval))
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.cos(j * log_step_nz)
X_new.append(X_step)
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.sin(j * log_step_nz)
X_new.append(X_step)
return np.hstack(X_new)
@staticmethod
def _transform_sparse(X, sample_steps, sample_interval):
indices = X.indices.copy()
indptr = X.indptr.copy()
data_step = np.sqrt(X.data * sample_interval)
X_step = sp.csr_matrix(
(data_step, indices, indptr), shape=X.shape, dtype=X.dtype, copy=False
)
X_new = [X_step]
log_step_nz = sample_interval * np.log(X.data)
step_nz = 2 * X.data * sample_interval
for j in range(1, sample_steps):
factor_nz = np.sqrt(step_nz / np.cosh(np.pi * j * sample_interval))
data_step = factor_nz * np.cos(j * log_step_nz)
X_step = sp.csr_matrix(
(data_step, indices, indptr), shape=X.shape, dtype=X.dtype, copy=False
)
X_new.append(X_step)
data_step = factor_nz * np.sin(j * log_step_nz)
X_step = sp.csr_matrix(
(data_step, indices, indptr), shape=X.shape, dtype=X.dtype, copy=False
)
X_new.append(X_step)
return sp.hstack(X_new)
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.requires_fit = False
tags.input_tags.positive_only = True
return tags
class Nystroem(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
"""Approximate a kernel map using a subset of the training data.
Constructs an approximate feature map for an arbitrary kernel
using a subset of the data as basis.
Read more in the :ref:`User Guide <nystroem_kernel_approx>`.
.. versionadded:: 0.13
Parameters
----------
kernel : str or callable, default='rbf'
Kernel map to be approximated. A callable should accept two arguments
and the keyword arguments passed to this object as `kernel_params`, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, laplacian, polynomial, exponential chi2
and sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
coef0 : float, default=None
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
degree : float, default=None
Degree of the polynomial kernel. Ignored by other kernels.
kernel_params : dict, default=None
Additional parameters (keyword arguments) for kernel function passed
as callable object.
n_components : int, default=100
Number of features to construct.
How many data points will be used to construct the mapping.
random_state : int, RandomState instance or None, default=None
Pseudo-random number generator to control the uniform sampling without
replacement of `n_components` of the training data to construct the
basis kernel.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
n_jobs : int, default=None
The number of jobs to use for the computation. This works by breaking
down the kernel matrix into `n_jobs` even slices and computing them in
parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionadded:: 0.24
Attributes
----------
components_ : ndarray of shape (n_components, n_features)
Subset of training points used to construct the feature map.
component_indices_ : ndarray of shape (n_components)
Indices of ``components_`` in the training set.
normalization_ : ndarray of shape (n_components, n_components)
Normalization matrix needed for embedding.
Square root of the kernel matrix on ``components_``.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
AdditiveChi2Sampler : Approximate feature map for additive chi2 kernel.
PolynomialCountSketch : Polynomial kernel approximation via Tensor Sketch.
RBFSampler : Approximate a RBF kernel feature map using random Fourier
features.
SkewedChi2Sampler : Approximate feature map for "skewed chi-squared" kernel.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
References
----------
* Williams, C.K.I. and Seeger, M.
"Using the Nystroem method to speed up kernel machines",
Advances in neural information processing systems 2001
* T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou
"Nystroem Method vs Random Fourier Features: A Theoretical and Empirical
Comparison",
Advances in Neural Information Processing Systems 2012
Examples
--------
>>> from sklearn import datasets, svm
>>> from sklearn.kernel_approximation import Nystroem
>>> X, y = datasets.load_digits(n_class=9, return_X_y=True)
>>> data = X / 16.
>>> clf = svm.LinearSVC()
>>> feature_map_nystroem = Nystroem(gamma=.2,
... random_state=1,
... n_components=300)
>>> data_transformed = feature_map_nystroem.fit_transform(data)
>>> clf.fit(data_transformed, y)
LinearSVC()
>>> clf.score(data_transformed, y)
0.9987...
"""
_parameter_constraints: dict = {
"kernel": [
StrOptions(set(PAIRWISE_KERNEL_FUNCTIONS.keys()) | {"precomputed"}),
callable,
],
"gamma": [Interval(Real, 0, None, closed="left"), None],
"coef0": [Interval(Real, None, None, closed="neither"), None],
"degree": [Interval(Real, 1, None, closed="left"), None],
"kernel_params": [dict, None],
"n_components": [Interval(Integral, 1, None, closed="left")],
"random_state": ["random_state"],
"n_jobs": [Integral, None],
}
def __init__(
self,
kernel="rbf",
*,
gamma=None,
coef0=None,
degree=None,
kernel_params=None,
n_components=100,
random_state=None,
n_jobs=None,
):
self.kernel = kernel
self.gamma = gamma
self.coef0 = coef0
self.degree = degree
self.kernel_params = kernel_params
self.n_components = n_components
self.random_state = random_state
self.n_jobs = n_jobs
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Fit estimator to data.
Samples a subset of training points, computes kernel
on these and computes normalization matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like, shape (n_samples,) or (n_samples, n_outputs), \
default=None
Target values (None for unsupervised transformations).
Returns
-------
self : object
Returns the instance itself.
"""
X = validate_data(self, X, accept_sparse="csr")
rnd = check_random_state(self.random_state)
n_samples = X.shape[0]
# get basis vectors
if self.n_components > n_samples:
# XXX should we just bail?
n_components = n_samples
warnings.warn(
"n_components > n_samples. This is not possible.\n"
"n_components was set to n_samples, which results"
" in inefficient evaluation of the full kernel."
)
else:
n_components = self.n_components
n_components = min(n_samples, n_components)
inds = rnd.permutation(n_samples)
basis_inds = inds[:n_components]
basis = X[basis_inds]
basis_kernel = pairwise_kernels(
basis,
metric=self.kernel,
filter_params=True,
n_jobs=self.n_jobs,
**self._get_kernel_params(),
)
# sqrt of kernel matrix on basis vectors
U, S, V = svd(basis_kernel)
S = np.maximum(S, 1e-12)
self.normalization_ = np.dot(U / np.sqrt(S), V)
self.components_ = basis
self.component_indices_ = basis_inds
self._n_features_out = n_components
return self
def transform(self, X):
"""Apply feature map to X.
Computes an approximate feature map using the kernel
between some training points and X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data to transform.
Returns
-------
X_transformed : ndarray of shape (n_samples, n_components)
Transformed data.
"""
check_is_fitted(self)
X = validate_data(self, X, accept_sparse="csr", reset=False)
kernel_params = self._get_kernel_params()
embedded = pairwise_kernels(
X,
self.components_,
metric=self.kernel,
filter_params=True,
n_jobs=self.n_jobs,
**kernel_params,
)
return np.dot(embedded, self.normalization_.T)
def _get_kernel_params(self):
params = self.kernel_params
if params is None:
params = {}
if not callable(self.kernel) and self.kernel != "precomputed":
for param in KERNEL_PARAMS[self.kernel]:
if getattr(self, param) is not None:
params[param] = getattr(self, param)
else:
if (
self.gamma is not None
or self.coef0 is not None
or self.degree is not None
):
raise ValueError(
"Don't pass gamma, coef0 or degree to "
"Nystroem if using a callable "
"or precomputed kernel"
)
return params
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.transformer_tags.preserves_dtype = ["float64", "float32"]
return tags
|
scikit-learnREPO_NAMEscikit-learnPATH_START.@scikit-learn_extracted@scikit-learn-main@sklearn@kernel_approximation.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "utsav-akhaury/understanding-unets",
"repo_path": "understanding-unets_extracted/understanding-unets-master/README.md",
"type": "Markdown"
}
|
# Learnlets
[](https://travis-ci.com/zaccharieramzi/understanding-unets)
Learnlets are a way to learn a filter bank rather than design one like in the curvelets.
This filter bank will be learned in a denoising setting with backpropagation and gradient descent.
## Requirements
The requirements are listed in `learning_wavelets/requirements.txt`.
## Use
The learnlets are defined in `learning_wavelets/learnlet_model.py`, via the class `Learnlet`.
You can use different types of thresholding listed in `learning_wavelets/keras_utils/thresholding.py`.
## List of saved networks
### Exact reconstruction notebook
| Model id | Params |
|:----------------------------------------------:|:------------------------------------------------------:|
| learnlet_dynamic_st_bsd500_0_55_1580806694 | the big classical network, with 256 filters + identity |
| learnlet_subclassing_st_bsd500_0_55_1582195807 | 64 filters, subclassed API, exact recon forced |
### No threshold notebook
| Model id | Params |
|:------------------------------------------:|:------------------------------------------------------:|
| learnlet_dynamic_st_bsd500_0_55_1580806694 | the big classical network, with 256 filters + identity |
### Different training noise standard deviations notebook
| Model id | Params |
|:-------------------------------------------:|:------------------------------------------------------:|
| learnlet_dynamic_st_bsd500_0_55_1580806694 | the big classical network, with 256 filters + identity |
| learnlet_dynamic_st_bsd500_20_40_1580492805 | same with training on 20;40 noise std |
| learnlet_dynamic_st_bsd500_30_1580668579 | same with training on 30 noise std |
| unet_dynamic_st_bsd500_0_55_1576668365 | big classical unet with 64 base filters and batch norm |
| unet_dynamic_st_bsd500_20.0_40.0_1581002329 | same with training on 20;40 noise std |
| unet_dynamic_st_bsd500_30.0_30.0_1581002329 | same with training on 30 noise std |
### General comparison
| Model id | Params |
|:------------------------------------------:|:------------------------------------------------------:|
| learnlet_dynamic_st_bsd500_0_55_1580806694 | the big classical network, with 256 filters + identity |
| unet_dynamic_st_bsd500_0_55_1576668365 | big classical unet with 64 base filters and batch norm |
|
utsav-akhauryREPO_NAMEunderstanding-unetsPATH_START.@understanding-unets_extracted@understanding-unets-master@README.md@.PATH_END.py
|
{
"filename": "_font.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/graph_objs/scatter/hoverlabel/_font.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scatter.hoverlabel"
_path_str = "scatter.hoverlabel.font"
_valid_props = {
"color",
"colorsrc",
"family",
"familysrc",
"lineposition",
"linepositionsrc",
"shadow",
"shadowsrc",
"size",
"sizesrc",
"style",
"stylesrc",
"textcase",
"textcasesrc",
"variant",
"variantsrc",
"weight",
"weightsrc",
}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans", "Droid Serif", "Droid Sans Mono", "Gravitas One",
"Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow",
"Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `family`.
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
# lineposition
# ------------
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
# linepositionsrc
# ---------------
@property
def linepositionsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`lineposition`.
The 'linepositionsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["linepositionsrc"]
@linepositionsrc.setter
def linepositionsrc(self, val):
self["linepositionsrc"] = val
# shadow
# ------
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
# shadowsrc
# ---------
@property
def shadowsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `shadow`.
The 'shadowsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["shadowsrc"]
@shadowsrc.setter
def shadowsrc(self, val):
self["shadowsrc"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# style
# -----
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
# stylesrc
# --------
@property
def stylesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `style`.
The 'stylesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["stylesrc"]
@stylesrc.setter
def stylesrc(self, val):
self["stylesrc"] = val
# textcase
# --------
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
# textcasesrc
# -----------
@property
def textcasesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `textcase`.
The 'textcasesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textcasesrc"]
@textcasesrc.setter
def textcasesrc(self, val):
self["textcasesrc"] = val
# variant
# -------
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
# variantsrc
# ----------
@property
def variantsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `variant`.
The 'variantsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["variantsrc"]
@variantsrc.setter
def variantsrc(self, val):
self["variantsrc"] = val
# weight
# ------
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
# weightsrc
# ---------
@property
def weightsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `weight`.
The 'weightsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["weightsrc"]
@weightsrc.setter
def weightsrc(self, val):
self["weightsrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans", "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud for
`lineposition`.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
shadowsrc
Sets the source reference on Chart Studio Cloud for
`shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud for
`textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
lineposition=None,
linepositionsrc=None,
shadow=None,
shadowsrc=None,
size=None,
sizesrc=None,
style=None,
stylesrc=None,
textcase=None,
textcasesrc=None,
variant=None,
variantsrc=None,
weight=None,
weightsrc=None,
**kwargs,
):
"""
Construct a new Font object
Sets the font used in hover labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatter.hoverlabel.Font`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans", "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud for
`lineposition`.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
shadowsrc
Sets the source reference on Chart Studio Cloud for
`shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud for
`textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatter.hoverlabel.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatter.hoverlabel.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("familysrc", None)
_v = familysrc if familysrc is not None else _v
if _v is not None:
self["familysrc"] = _v
_v = arg.pop("lineposition", None)
_v = lineposition if lineposition is not None else _v
if _v is not None:
self["lineposition"] = _v
_v = arg.pop("linepositionsrc", None)
_v = linepositionsrc if linepositionsrc is not None else _v
if _v is not None:
self["linepositionsrc"] = _v
_v = arg.pop("shadow", None)
_v = shadow if shadow is not None else _v
if _v is not None:
self["shadow"] = _v
_v = arg.pop("shadowsrc", None)
_v = shadowsrc if shadowsrc is not None else _v
if _v is not None:
self["shadowsrc"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("sizesrc", None)
_v = sizesrc if sizesrc is not None else _v
if _v is not None:
self["sizesrc"] = _v
_v = arg.pop("style", None)
_v = style if style is not None else _v
if _v is not None:
self["style"] = _v
_v = arg.pop("stylesrc", None)
_v = stylesrc if stylesrc is not None else _v
if _v is not None:
self["stylesrc"] = _v
_v = arg.pop("textcase", None)
_v = textcase if textcase is not None else _v
if _v is not None:
self["textcase"] = _v
_v = arg.pop("textcasesrc", None)
_v = textcasesrc if textcasesrc is not None else _v
if _v is not None:
self["textcasesrc"] = _v
_v = arg.pop("variant", None)
_v = variant if variant is not None else _v
if _v is not None:
self["variant"] = _v
_v = arg.pop("variantsrc", None)
_v = variantsrc if variantsrc is not None else _v
if _v is not None:
self["variantsrc"] = _v
_v = arg.pop("weight", None)
_v = weight if weight is not None else _v
if _v is not None:
self["weight"] = _v
_v = arg.pop("weightsrc", None)
_v = weightsrc if weightsrc is not None else _v
if _v is not None:
self["weightsrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@graph_objs@scatter@hoverlabel@_font.py@.PATH_END.py
|
{
"filename": "clustAlgor.py",
"repo_name": "msolpera/pyUPMASK",
"repo_path": "pyUPMASK_extracted/pyUPMASK-main/modules/clustAlgor.py",
"type": "Python"
}
|
import numpy as np
import sklearn.cluster as skclust
import sklearn.mixture as skmixture
from scipy.spatial.distance import cdist
from scipy import spatial
from .voronoiVols import voronoi_volumes
def DDhisto(clust_data, N_membs, n_clusters):
"""
Testing simple N-dimensional histogram
"""
import pandas as pd
N_stars, N_dim = clust_data.shape
N_bins = N_stars / N_membs
if N_bins**N_dim > n_clusters:
N_bins = int(n_clusters**(1 / N_dim))
H, edges = np.histogramdd(clust_data, N_bins)
# Find indexes of points within edges
labels_dim = []
for dim in range(N_dim):
labels_dim.append(list(map(str, np.digitize(
clust_data.T[dim], edges[dim], right=True))))
# Convert the indexes of edges to a unique index
ids = ["".join(_) for _ in zip(*labels_dim)]
# Convert indexes to unique integers
labels = pd.factorize(ids)[0]
return labels
def voronoi(clust_data, N_membs, n_clusters, N_st_max):
"""
Voronoi assignation. Not really a clustering method.
"""
N_stars = clust_data.shape[0]
step = N_membs
if N_stars / n_clusters > N_membs:
step = int(N_stars / n_clusters)
# Obtain Voronoi volumes
vol_v = voronoi_volumes(clust_data)
# Convert to densities
dens = 1. / vol_v
# Indexes for clusters
idxs = np.argsort(-dens)
cl_idx = idxs[::step]
# Cap at n_clusters
cl_idx = cl_idx[:n_clusters]
dist = None
if N_stars < N_st_max:
# Find the distances to all stars, for all stars
dist = cdist(clust_data, clust_data)
labels = densLabeling(N_st_max, N_stars, clust_data, dist, cl_idx)
return labels
def kNNdens(clust_data, cl_method_pars, N_membs, n_clusters, N_st_max):
"""
Adapted from: 'Clustering by fast search and find of density peaks',
Rodriguez and Laio (2014)
"""
N_stars = clust_data.shape[0]
try:
NN_dd = cl_method_pars['NN_dd']
except KeyError:
NN_dd = N_membs
# Find NN_dd nearest neighbors.
tree = spatial.cKDTree(clust_data)
inx = tree.query(clust_data, k=NN_dd + 1)
# Mean distance to the NN_dd neighbors.
NN_dist = inx[0].mean(1)
# Convert to densities
dens = 1. / NN_dist
# For each star, find the distance to the *closest* star that has a
# larger density (stored in 'delta'). For the star with largest
# density, assign the distance to the most distant star.
delta = np.zeros(dens.size)
# Only use for arrays with less than 'Nmax' stars. Otherwise too much
# memory is required.
if N_stars < N_st_max:
# Find the distances to all stars, for all stars
dist = cdist(clust_data, clust_data)
for i, st_dens in enumerate(dens):
msk = dens > st_dens
# Store the index of the star with the largest density.
if msk.sum() == 0:
idx_max = i
else:
delta[i] = dist[i][msk].min()
# For this star, assign the largest distance.
delta[idx_max] = delta.max()
else:
for i, st_dens in enumerate(dens):
# Distance from 'st' to all other stars
dist = cdist([clust_data[i]], clust_data)
msk = dens > st_dens
# Store the index of the star with the largest density.
if msk.sum() == 0:
idx_max = i
else:
delta[i] = dist[0][msk].min()
# For this star, assign the largest distance.
delta[idx_max] = delta.max()
# Density times delta
mult = dens * delta
# Indexes that sort 'mult' in descending order
idx_s = np.argsort(-mult)
# Indexes for clusters
cl_idx = idx_s[:n_clusters]
labels = densLabeling(N_st_max, N_stars, clust_data, dist, cl_idx)
return labels
def densLabeling(Nmax, N_stars, clust_data, dist, cl_idx):
"""
Assign to each star a label corresponding to the cluster that is
closest to it.
"""
if N_stars < Nmax:
labels = np.argmin(dist[cl_idx, :], 0)
else:
# Assign the 'label' given the closest cluster to each star.
tree = spatial.cKDTree(clust_data[cl_idx])
_, labels = tree.query(clust_data)
return labels
def RKmeans(clust_data, n_clusters):
"""
Use R's K-means method.
"""
from rpy2.robjects import r
nr, nc = clust_data.shape
ocdata_px = r.matrix(clust_data, nrow=nr, ncol=nc)
r.assign('ocdata_px', ocdata_px)
r.assign('nclust', n_clusters)
# This line apparently serves no purpose in the original code
# aggregate(ocdata_px, by=list(fit$cluster), FUN=mean)
r('fit <- kmeans(ocdata_px, nclust, nstart=50, iter.max=100)')
r('ocdata_px <- data.frame(ocdata_px, resMclust.class=fit$cluster)')
# r('labels_R <- ocdata_px$resMclust.class')
# labels = np.array(list(r('labels_R')))
labels = np.array(list(r('fit$cluster')))
return labels
def sklearnMethods(clust_method, cl_method_pars, clust_data, n_clusters):
"""
Find clusters in the 'clust_data' array using the selected algorithm.
"""
if clust_method == 'KMeans':
model = skclust.KMeans()
elif clust_method == 'MiniBatchKMeans':
model = skclust.MiniBatchKMeans()
elif clust_method == 'AffinityPropagation':
model = skclust.AffinityPropagation()
elif clust_method == 'SpectralClustering':
model = skclust.SpectralClustering()
elif clust_method == 'AgglomerativeClustering':
model = skclust.AgglomerativeClustering()
elif clust_method == 'GaussianMixture':
model = skmixture.GaussianMixture()
elif clust_method == 'BayesianGaussianMixture':
model = skmixture.BayesianGaussianMixture()
elif clust_method == 'DBSCAN':
model = skclust.DBSCAN()
elif clust_method == 'OPTICS':
model = skclust.OPTICS()
elif clust_method == 'MeanShift':
model = skclust.MeanShift()
elif clust_method == 'Birch':
model = skclust.Birch()
elif clust_method == 'HDBSCAN':
import hdbscan
model = hdbscan.HDBSCAN()
# Set parameters for the method (if any)
if cl_method_pars:
model.set_params(**cl_method_pars)
# Only these methods require the number of clusters to be set
if clust_method in (
'KMeans', 'MiniBatchKMeans', 'SpectralClustering',
'AgglomerativeClustering', 'GaussianMixture',
'BayesianGaussianMixture'):
if clust_method in ('GaussianMixture', 'BayesianGaussianMixture'):
model.n_components = n_clusters
else:
model.n_clusters = n_clusters
# Fit the model
model.fit(clust_data)
# Extract the labels
if clust_method in ('GaussianMixture', 'BayesianGaussianMixture'):
labels = model.predict(clust_data)
# probs_gmm = model.predict_proba(clust_data).max(1)
else:
labels = model.labels_
return labels
def pycl(clust_method, clust_data, n_clusters):
"""
"""
if clust_method == 'pyclKmeans':
from pyclustering.cluster.kmeans import kmeans
from pyclustering.cluster.center_initializer import\
kmeans_plusplus_initializer
initial_centers = kmeans_plusplus_initializer(
clust_data, n_clusters).initialize()
model = kmeans(clust_data, initial_centers)
# final_centers = model.get_centers()
elif clust_method == 'pyclGA':
from pyclustering.cluster.ga import genetic_algorithm
# Create instance of observer that will collect all information:
# observer_instance = ga_observer(True, True, True)
model = genetic_algorithm(
clust_data, count_clusters=n_clusters, chromosome_count=100,
population_count=20, coeff_mutation_count=.5)
# Fit the model
model.process()
if clust_method[4:] == 'Kmeans':
labels = model.predict(clust_data)
elif clust_method[4:] == 'GA':
labels = np.zeros(clust_data.shape[0])
for i, clust in enumerate(model.get_clusters()):
labels[clust] = i
labels = labels.astype(int)
return labels
# The 'HDBSCAN' method is taken from https://hdbscan.readthedocs.io/. Here's
# a nice article explaining it: https://towardsdatascience.com/
# understanding-hdbscan-and-density-based-clustering-121dbee1320e
# elif clust_method == 'HDBSCAN':
# import hdbscan
# model = hdbscan.HDBSCAN()
# The 'KMeansSwap' method is adapted from the article
# 'Efficiency of random swap clustering', Franti (2018)
# elif clust_method == 'KMeansSwap':
# model = skclust.KMeans()
# model.n_clusters = n_clusters
# model.fit(clust_data)
# inertia_old = model.inertia_
# centers = model.cluster_centers_
# for _ in range(cl_method_pars['n_runs']):
# centers2 = np.array(centers)
# idx_1 = np.random.choice(n_clusters)
# idx_2 = np.random.choice(clust_data.shape[0])
# centers2[idx_1] = clust_data[idx_2]
# model = skclust.KMeans(
# init=centers2, n_clusters=n_clusters, n_init=1, max_iter=2)
# model.fit(clust_data)
# if model.inertia_ < inertia_old:
# centers = model.cluster_centers_
# inertia_old = model.inertia_
# # Reset this parameter
# model.max_iter = 300
|
msolperaREPO_NAMEpyUPMASKPATH_START.@pyUPMASK_extracted@pyUPMASK-main@modules@clustAlgor.py@.PATH_END.py
|
{
"filename": "heartbeat.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/ipykernel/py2/ipykernel/heartbeat.py",
"type": "Python"
}
|
"""The client and server for a basic ping-pong style heartbeat.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import errno
import os
import socket
from threading import Thread
import zmq
from jupyter_client.localinterfaces import localhost
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
class Heartbeat(Thread):
"A simple ping-pong style heartbeat that runs in a thread."
def __init__(self, context, addr=None):
if addr is None:
addr = ('tcp', localhost(), 0)
Thread.__init__(self)
self.context = context
self.transport, self.ip, self.port = addr
if self.port == 0:
if addr[0] == 'tcp':
s = socket.socket()
# '*' means all interfaces to 0MQ, which is '' to socket.socket
s.bind(('' if self.ip == '*' else self.ip, 0))
self.port = s.getsockname()[1]
s.close()
elif addr[0] == 'ipc':
self.port = 1
while os.path.exists("%s-%s" % (self.ip, self.port)):
self.port = self.port + 1
else:
raise ValueError("Unrecognized zmq transport: %s" % addr[0])
self.addr = (self.ip, self.port)
self.daemon = True
def run(self):
self.socket = self.context.socket(zmq.ROUTER)
self.socket.linger = 1000
c = ':' if self.transport == 'tcp' else '-'
self.socket.bind('%s://%s' % (self.transport, self.ip) + c + str(self.port))
while True:
try:
zmq.device(zmq.QUEUE, self.socket, self.socket)
except zmq.ZMQError as e:
if e.errno == errno.EINTR:
continue
else:
raise
else:
break
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@ipykernel@py2@ipykernel@heartbeat.py@.PATH_END.py
|
{
"filename": "prepare_data.py",
"repo_name": "rpoleski/MulensModel",
"repo_path": "MulensModel_extracted/MulensModel-master/examples/run_time_tests/prepare_data.py",
"type": "Python"
}
|
"""
Prepare files to be used as tests.
"""
import numpy as np
from MulensModel import Model
MAG_ZEROPOINT = 18.
def simulate_PSPL(file_name, n_data, t_start=None, t_stop=None, u_0=None,
magnification_function=None):
"""simulate PSPL light curve and save to file"""
t_0 = 2456900.
t_E = 20.
u_0 = 0.01
if t_start is None:
t_start = t_0 - 4. * t_E
if t_stop is None:
t_stop = t_0 + 4. * t_E
relative_sigma = 0.03
add_magnitude_error = 0.003
flux_source = 3.
flux_blend = 0.1
times = np.sort(np.random.uniform(t_start, t_stop, n_data))
if magnification_function is None:
tau = (times - t_0) / t_E
u2 = tau**2 + u_0**2
magnification = (u2 + 2.) / np.sqrt(u2 * (u2 + 4.))
else:
magnification = magnification_function(times)
flux = magnification * flux_source + flux_blend
diff = np.random.normal(0., 1., n_data)
sigma = flux * relative_sigma
flux_observed = flux + sigma * diff
mag_observed = MAG_ZEROPOINT - 2.5 * np.log10(flux_observed)
sigma_observed = np.sqrt((sigma/flux_observed)**2 + add_magnitude_error**2)
data_out = np.array([times, mag_observed, sigma_observed]).T
# data_out = np.array([times, magnification, sigma_observed]).T
np.savetxt(file_name, data_out)
if __name__ == '__main__':
simulate_PSPL('test_100.txt', 100)
simulate_PSPL('test_1000.txt', 1000)
simulate_PSPL('test_10000.txt', 10000)
parameters = {'t_0': 2456900., 'u_0': 0.1, 't_E': 50.,
'pi_E_N': 0.6, 'pi_E_E': 0.8}
model = Model(parameters, coords="18:00:00.00 -30:00:00.0")
model.parallax(earth_orbital=True)
kwargs = {'magnification_function': model.magnification,
't_start': parameters['t_0']-80.,
't_stop': parameters['t_0']+80.}
simulate_PSPL('test_100_piE.txt', 100, **kwargs)
simulate_PSPL('test_1000_piE.txt', 1000, **kwargs)
simulate_PSPL('test_10000_piE.txt', 10000, **kwargs)
|
rpoleskiREPO_NAMEMulensModelPATH_START.@MulensModel_extracted@MulensModel-master@examples@run_time_tests@prepare_data.py@.PATH_END.py
|
{
"filename": "epub.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/langchain/langchain/document_loaders/epub.py",
"type": "Python"
}
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import UnstructuredEPubLoader
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"UnstructuredEPubLoader": "langchain_community.document_loaders"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"UnstructuredEPubLoader",
]
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@langchain@langchain@document_loaders@epub.py@.PATH_END.py
|
{
"filename": "_enabled.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/histogram/cumulative/_enabled.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class EnabledValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="enabled", parent_name="histogram.cumulative", **kwargs
):
super(EnabledValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@histogram@cumulative@_enabled.py@.PATH_END.py
|
{
"filename": "metalpoor_stars.py",
"repo_name": "annayqho/TheCannon",
"repo_path": "TheCannon_extracted/TheCannon-master/code/lamost/mass_age/paper_plots/metalpoor_stars.py",
"type": "Python"
}
|
import matplotlib.pyplot as plt
from matplotlib import rc
import matplotlib.gridspec as gridspec
from matplotlib.colors import LogNorm
from math import log10, floor
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
import numpy as np
def round_sig(x, sig=2):
if x < 0:
return -round(-x, sig-int(floor(log10(-x)))-1)
return round(x, sig-int(floor(log10(x)))-1)
names = ['T_{eff},', '\log g', '[Fe/H]']
units = ['K', 'dex', 'dex']
snr_str = [r'SNR $\textless$ 50', r'50 $\textless$ SNR $\textless$ 100', r'SNR $\textgreater$ 100']
snr_str = snr_str[::-1]
cutoffs = [0, 50, 100, 10000]
cutoffs = cutoffs[::-1]
y_highs = [300, 0.5, 0.3]
x_lows = [4000, 1.1, -0.9, -0.08]
x_highs = [5300, 3.8, 0.4, 0.4]
all_cannon = np.load("run_2_train_on_good/optimization_experiment/best_labels.npz")['arr_0'].T
all_ids = np.load("run_2_train_on_good/all_ids.npz")['arr_0']
all_apogee = np.load("run_2_train_on_good/all_label.npz")['arr_0']
all_snr = np.load("run_2_train_on_good/SNRs.npz")['arr_0']
IDs_lamost = np.loadtxt(
"../examples/test_training_overlap/lamost_sorted_by_ra_with_dr2_params.txt",
usecols=(0,), dtype=(str))
IDs_apogee = np.loadtxt(
"../examples/apogee_sorted_by_ra.txt",
usecols=(0,), dtype=(str))
labels_all_lamost = np.loadtxt(
"../examples/test_training_overlap/lamost_sorted_by_ra_with_dr2_params.txt",
usecols=(3,4,5), dtype=(float))
inds = np.array([np.where(IDs_lamost==a)[0][0] for a in all_ids])
labels_lamost = labels_all_lamost[inds,:]
ids_apogee = IDs_apogee[inds]
labels_apogee = all_apogee[inds]
good = labels_apogee[:,2] > -500
metalpoor_stars = apogee[good][apogee_labels[:,2][good]<-1.8]
np.savetxt("metalpoor_stars.txt", metalpoor_stars)
#plt.hist2d(lamost[:,2][good], apogee[:,2][good], norm=LogNorm(), cmap="gray_r", bins=50)
#plt.xlabel("LAMOST [Fe/H] [dex]")
#plt.ylabel("APOGEE [Fe/H] [dex]")
#plt.title("Comparing [Fe/H] in LAMOST and APOGEE")
#plt.plot([-3,1], [-3,1])
#plt.savefig("metalpoor_stars.png")
#plt.close()
|
annayqhoREPO_NAMETheCannonPATH_START.@TheCannon_extracted@TheCannon-master@code@lamost@mass_age@paper_plots@metalpoor_stars.py@.PATH_END.py
|
{
"filename": "test_ephemerides.py",
"repo_name": "lsst/rubin_sim",
"repo_path": "rubin_sim_extracted/rubin_sim-main/tests/moving_objects/test_ephemerides.py",
"type": "Python"
}
|
import os
import unittest
import numpy as np
import pandas as pd
from astropy.time import Time
from rubin_scheduler.data import get_data_dir
from rubin_sim.moving_objects import Orbits, PyOrbEphemerides
class TestPyOrbEphemerides(unittest.TestCase):
def setUp(self):
self.testdir = os.path.join(get_data_dir(), "tests", "orbits_testdata")
self.orbits = Orbits()
self.orbits.read_orbits(os.path.join(self.testdir, "test_orbitsQ.des"))
self.orbits_kep = Orbits()
self.orbits_kep.read_orbits(os.path.join(self.testdir, "test_orbitsA.des"))
self.ephems = PyOrbEphemerides()
self.ephems.set_orbits(self.orbits)
self.len_ephems_basic = 11
self.len_ephems_full = 34
def tear_down(self):
del self.orbits
del self.orbits_kep
del self.ephems
def test_which_pyoorb(self):
import pyoorb
print(pyoorb.__file__)
def test_set_orbits(self):
# Test that we can set orbits.
self.ephems.set_orbits(self.orbits)
# Test that setting with an empty orbit object fails.
# (Avoids hard-to-interpret errors from pyoorb).
with self.assertRaises(ValueError):
empty_orb = Orbits()
empty = pd.DataFrame([], columns=self.orbits.data_cols["KEP"])
empty_orb.set_orbits(empty)
self.ephems.set_orbits(empty_orb)
def test_convert_to_oorb_array(self):
# Check that orbital elements are converted.
self.ephems._convert_to_oorb_elem(self.orbits.orbits, self.orbits.orb_format)
self.assertEqual(len(self.ephems.oorb_elem), len(self.orbits))
self.assertEqual(self.ephems.oorb_elem[0][7], 2)
self.assertEqual(self.ephems.oorb_elem[0][9], 3)
self.assertEqual(self.ephems.oorb_elem[0][1], self.orbits.orbits["q"][0])
# Test that we can convert KEP orbital elements too.
self.ephems._convert_to_oorb_elem(self.orbits_kep.orbits, self.orbits_kep.orb_format)
self.assertEqual(len(self.ephems.oorb_elem), len(self.orbits_kep))
self.assertEqual(self.ephems.oorb_elem[0][7], 3)
self.assertEqual(self.ephems.oorb_elem[0][1], self.orbits_kep.orbits["a"][0])
def test_convert_from_oorb_array(self):
# Check that we can convert orbital elements TO oorb format and back
# without losing info
# (except ObjId -- we will lose that unless we use updateOrbits.)
self.ephems._convert_to_oorb_elem(self.orbits.orbits, self.orbits.orb_format)
new_orbits = Orbits()
new_orbits.set_orbits(self.orbits.orbits)
new_orbits.update_orbits(self.ephems.convert_from_oorb_elem())
self.assertEqual(new_orbits, self.orbits)
def test_convert_times(self):
times = np.arange(49353, 49353 + 10, 0.5)
eph_times = self.ephems._convert_times(times, "UTC")
# Check that shape of eph_times is correct. (times x 2)
self.assertEqual(eph_times.shape[0], len(times))
self.assertEqual(eph_times.shape[1], 2)
# Check that 'timescale' for eph_times is correct.
self.assertEqual(eph_times[0][1], 1)
eph_times = self.ephems._convert_times(times, "TAI")
self.assertEqual(eph_times[0][1], 4)
def test_oorb_ephemeris(self):
self.ephems.set_orbits(self.orbits)
times = np.arange(49353, 49353 + 3, 0.25)
eph_times = self.ephems._convert_times(times)
# Basic ephemerides.
oorb_ephs = self.ephems._generate_oorb_ephs_basic(eph_times, obscode=807, eph_mode="N")
# Check that it returned the right sort of array.
self.assertEqual(
oorb_ephs.shape,
(len(self.ephems.oorb_elem), len(times), self.len_ephems_basic),
)
# Full ephemerides
oorb_ephs = self.ephems._generate_oorb_ephs_full(eph_times, obscode=807, eph_mode="N")
# Check that it returned the right sort of array.
self.assertEqual(
oorb_ephs.shape,
(len(self.ephems.oorb_elem), len(times), self.len_ephems_full),
)
def test_ephemeris(self):
# Calculate and convert ephemerides.
self.ephems.set_orbits(self.orbits)
times = np.arange(49353, 49353 + 2, 0.3)
eph_times = self.ephems._convert_times(times)
oorb_ephs = self.ephems._generate_oorb_ephs_basic(eph_times, obscode=807)
# Group by object, and check grouping.
ephs = self.ephems._convert_oorb_ephs_basic(oorb_ephs, by_object=True)
self.assertEqual(len(ephs), len(self.orbits))
# Group by time, and check grouping.
oorb_ephs = self.ephems._generate_oorb_ephs_basic(eph_times, obscode=807)
ephs = self.ephems._convert_oorb_ephs_basic(oorb_ephs, by_object=False)
self.assertEqual(len(ephs), len(times))
# And test all-wrapped-up method:
ephs_all = self.ephems.generate_ephemerides(
times,
obscode=807,
eph_mode="N",
eph_type="basic",
time_scale="UTC",
by_object=False,
)
# See https://rubinobs.atlassian.net/browse/SP-1633
# This needs to be fixed, but on a separate ticket
# for key in ephs_all.dtype.names:
# np.testing.assert_almost_equal(ephs_all[key], ephs[key])
# Reset ephems to use KEP Orbits, and calculate new ephemerides.
self.ephems.set_orbits(self.orbits_kep)
oorb_ephs = self.ephems._generate_oorb_ephs_basic(eph_times, obscode=807, eph_mode="N")
ephs_kep = self.ephems._convert_oorb_ephs_basic(oorb_ephs, by_object=True)
self.assertEqual(len(ephs_kep), len(self.orbits_kep))
oorb_ephs = self.ephems._generate_oorb_ephs_basic(eph_times, obscode=807, eph_mode="N")
ephs_kep = self.ephems._convert_oorb_ephs_basic(oorb_ephs, by_object=False)
self.assertEqual(len(ephs_kep), len(times))
# And test all-wrapped-up method:
ephs_all_kep = self.ephems.generate_ephemerides(
times,
obscode=807,
eph_mode="N",
eph_type="basic",
time_scale="UTC",
by_object=False,
)
# Also https://rubinobs.atlassian.net/browse/SP-1633
# for key in ephs_all_kep.dtype.names:
# np.testing.assert_almost_equal(ephs_all_kep[key], ephs_kep[key])
# Check that ephemerides calculated from the different (COM/KEP)
# orbits are almost equal
for column in ephs.dtype.names:
np.testing.assert_allclose(ephs[column], ephs_kep[column], rtol=1e-5, atol=1e-4)
# Check that the wrapped method using KEP elements and the wrapped
# method using COM elements match.
for column in ephs_all.dtype.names:
np.testing.assert_allclose(ephs_all_kep[column], ephs_all[column], rtol=1e-5, atol=1e-4)
class TestJPLValues(unittest.TestCase):
"""Test the oorb generated RA/Dec values against
JPL generated RA/Dec values."""
def setUp(self):
# Read orbits.
self.orbits = Orbits()
self.jpl_dir = os.path.join(get_data_dir(), "tests", "jpl_testdata")
self.orbits.read_orbits(os.path.join(self.jpl_dir, "S0_n747.des"), skiprows=1)
# Read JPL ephems.
self.jpl = pd.read_csv(os.path.join(self.jpl_dir, "807_n747.txt"), sep=r"\s+")
# Temp key fix
self.jpl["obj_id"] = self.jpl["objId"]
# Add times in TAI and UTC, because.
t = Time(self.jpl["epoch_mjd"], format="mjd", scale="utc")
self.jpl["mjdTAI"] = t.tai.mjd
self.jpl["mjdUTC"] = t.utc.mjd
def tear_down(self):
del self.orbits
del self.jpl
def test_ra_dec(self):
# We won't compare Vmag, because this also needs information
# on trailing losses.
times = self.jpl["mjdUTC"].unique()
delta_ra = np.zeros(len(times), float)
delta_dec = np.zeros(len(times), float)
for i, t in enumerate(times):
# Find the JPL obj_ids visible at this time.
j = self.jpl.query("mjdUTC == @t").sort_values("obj_id")
# Set the ephems, using the objects seen at this time.
suborbits = self.orbits.orbits.query("obj_id in @j.obj_id").sort_values("obj_id")
sub_orbits = Orbits()
sub_orbits.set_orbits(suborbits)
ephems = PyOrbEphemerides()
ephems.set_orbits(sub_orbits)
ephs = ephems.generate_ephemerides(
[t],
time_scale="UTC",
obscode=807,
eph_mode="N",
eph_type="Basic",
by_object=False,
)
delta_ra[i] = np.abs(ephs["ra"] - j["ra_deg"].values).max()
delta_dec[i] = np.abs(ephs["dec"] - j["dec_deg"].values).max()
# Convert to mas
delta_ra *= 3600.0 * 1000.0
delta_dec *= 3600.0 * 1000.0
# Much of the time we're closer than 1mas,
# but there are a few which hit higher values.
print("max JPL errors", np.max(delta_ra), np.max(delta_dec))
print("std JPL errors", np.std(delta_ra), np.std(delta_dec))
self.assertLess(np.max(delta_ra), 25)
self.assertLess(np.max(delta_dec), 25)
self.assertLess(np.std(delta_ra), 3)
self.assertLess(np.std(delta_dec), 3)
if __name__ == "__main__":
unittest.main()
|
lsstREPO_NAMErubin_simPATH_START.@rubin_sim_extracted@rubin_sim-main@tests@moving_objects@test_ephemerides.py@.PATH_END.py
|
{
"filename": "load_dust.py",
"repo_name": "rometsch/fargocpt",
"repo_path": "fargocpt_extracted/fargocpt-master/test/dust_diffusion/load_dust.py",
"type": "Python"
}
|
import numpy as np
from fargocpt import Loader
def get_sigma_dust(outdir, N, nbins=51):
l = Loader(outdir)
r = l.particles.get("r", N).value
counts, interfaces = np.histogram(r, bins=nbins)
mid = 0.5*(interfaces[1:] + interfaces[:-1])
dr = interfaces[1:] - interfaces[:-1]
sigma_dust = counts/(dr*mid*2*np.pi)
return sigma_dust, mid, dr
|
rometschREPO_NAMEfargocptPATH_START.@fargocpt_extracted@fargocpt-master@test@dust_diffusion@load_dust.py@.PATH_END.py
|
{
"filename": "GCPDataTee.py",
"repo_name": "CMB-S4/spt3g_software",
"repo_path": "spt3g_software_extracted/spt3g_software-master/gcp/python/GCPDataTee.py",
"type": "Python"
}
|
import struct, socket, errno, numpy, time, threading
from .. import core, dfmux
class PagerWatchdog(object):
'''
Module that sends a watchdog (ping) message to the GCP pager when the parent
process is running successfully. Modify the `data_valid` method for
particular use cases, and call the `run` method periodically in your
application.
'''
host = 'sptnet.spt'
port = 50040
timeout = 20
def __init__(self, name, interval=600, sim=False):
self.name = name.lower()
self.unit = '{}Watchdog'.format(name.capitalize())
self.interval = interval
self.sim = sim
self.last_ping = None
# ping on startup
self.thread = threading.Thread(target=self.ping)
self.thread.start()
def ping(self):
"""
Send a watchdog ping message to the GCP pager process. This method is
called by the `run` method at regular intervals whenever the
`data_valid` method returns True.
"""
try:
if not self.sim:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(self.timeout)
sock.connect((self.host, self.port))
sock.send('watchdog {}'.format(self.name).encode())
resp = sock.recv(4096)
if resp:
core.log_debug(
'Sent watchdog ping, got response {}'.format(resp.decode()),
unit=self.unit,
)
sock.close()
except Exception as e:
core.log_error('Error sending watchdog ping: {}'.format(e), unit=self.unit)
# try again in ten seconds
self.last_ping = time.time() - self.interval + 10
else:
core.log_info('Sent watchdog ping', unit=self.unit)
self.last_ping = time.time()
def data_valid(self, *args, **kwargs):
"""
Returns True if the watchdog should ping, otherwise False. Arguments
are passed to this method from the `run` method.
"""
raise NotImplementedError
def run(self, *args, **kwargs):
"""
When called, issues a watchdog ping message if the interval time has passed, and
the `data_valid` method returns True. All input arguments are passed to the
`data_valid` method for validation.
"""
# only ping if ready
if not self.data_valid(*args, **kwargs):
return
# only ping if another ping isn't already running
if self.thread is not None:
if not self.thread.is_alive():
del self.thread
self.thread = None
else:
return
# only ping on the appropriate interval
now = time.time()
if self.last_ping and (now - self.last_ping < self.interval):
return
# ping
self.thread = threading.Thread(target=self.ping)
self.thread.start()
@core.indexmod
class DAQWatchdog(PagerWatchdog):
"""
Watchdog that issues a ping to the GCP pager when the DAQ is operational.
"""
def __init__(self, calibrator=False, interval=600, sim=False):
"""
Arguments
---------
calibrator : bool
If True, ensure that the calibrator is also running successfully
before sending a ping.
"""
super(DAQWatchdog, self).__init__('DAQ', interval=interval, sim=sim)
self.last_missing = None
self.boards_missing = 0
self.last_log_boards = None
self.calibrator = calibrator
self.last_log_cal = None
def data_valid(self, frame):
"""
Check the incoming frame for completeness.
* Ensure that all modules in the listed iceboards are reporting.
* If `calibrator` is True, ensure that the calibrator sync signal is in the frame.
"""
# always ready in sim mode
if self.sim:
return True
# only ping on Timepoint frames
if 'DfMux' not in frame:
return False
now = time.time()
retval = True
# only ping if all expected modules are present
data = frame['DfMux']
boards_expected = len(data.keys())
boards_complete = sum([v.nmodules > 0 and v.Complete() for v in data.values()])
boards_missing = boards_expected - boards_complete
if boards_missing:
if not self.last_log_boards or boards_missing != self.boards_missing:
# log loss or change in missing count
core.log_error(
"Missing data from {} boards in DAQ data stream".format(boards_missing),
unit=self.unit,
)
self.last_log_boards = now
self.boards_missing = boards_missing
self.last_missing = now
retval = False
elif self.last_log_boards and now - self.last_missing > 10:
# log recovery
core.log_notice("All boards recovered in DAQ data stream", unit=self.unit)
self.last_log_boards = None
# only ping if the calibrator sync signal is present
if self.calibrator and 'CalibratorOn' not in frame:
if not self.last_log_cal:
# log loss
core.log_error(
"Missing calibrator signal in DAQ data stream",
unit=self.unit,
)
self.last_log_cal = now
self.last_missing = now
retval = False
elif self.calibrator and self.last_log_cal and now - self.last_missing > 10:
# log recovery
core.log_notice(
"Calibrator signal recovered in DAQ data stream", unit=self.unit
)
self.last_log_cal = None
# only ping if normal data acquisition has been going for a bit
if self.last_missing and now - self.last_missing < 10:
retval = False
return retval
def __call__(self, frame):
self.run(frame)
@core.indexmod
class GCPSignalledHousekeeping(object):
'''
Module that collects housekeeping data when connected to. If
collect_on_start is True (the default), injects an HK frame
unconditionally at startup.
'''
def __init__(self, port=50011, collect_on_start=True):
self.socket = socket.socket()
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(('', port))
self.socket.listen(5)
self.socket.setblocking(False)
self.collect_on_start = collect_on_start
self.first_frame = True
def __del__(self):
self.socket.close()
def __call__(self, frame):
# Only try on timepoint frames
if frame.type != core.G3FrameType.Timepoint:
return
if self.collect_on_start and self.first_frame:
self.first_frame = False
return [core.G3Frame(core.G3FrameType.Housekeeping), frame]
# Check for new connections
try:
s, origin_ip = self.socket.accept()
except socket.error as e:
if e.errno != errno.EAGAIN and e.errno != errno.EWOULDBLOCK:
raise
return
core.log_debug('Accepted housekeeping collection signal from %s:%d' % origin_ip,
unit='GCPSignalledHousekeeping')
s.close()
return [core.G3Frame(core.G3FrameType.Housekeeping), frame]
@core.indexmod
class GCPHousekeepingTee(object):
'''
Module that serves housekeeping information to GCP when asked. If a key
named "DataOK" exists in the housekeeping frames, will also transmit
that information to GCP for paging purposes.
'''
def __init__(self, port=50010, verbose=False):
# make some noise at startup
core.log_info("Initialize gcp.GCPHousekeepingTee on port %d" % port,
unit='GCPHousekeepingTee')
self.hkblob = self.PackHKToGCP(dfmux.DfMuxHousekeepingMap())
self.socket = socket.socket()
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(('', port))
self.socket.listen(25)
self.socket.setblocking(False)
self.verbose = verbose # flag for printing debugging statements
def __del__(self):
# Clear any pending connections. No one is getting anything now.
# This works around some misfeatures in the Linux kernel that
# do not occur in other, better socket implementations.
while True:
try:
s, origin_ip = self.socket.accept()
except socket.error as e:
if e.errno != errno.EAGAIN and e.errno != errno.EWOULDBLOCK:
raise
break
s.close()
self.socket.close()
def __call__(self, frame):
if frame.type == core.G3FrameType.Housekeeping:
dataok = True
if 'DataOK' in frame:
dataok = frame['DataOK']
self.hkblob = self.PackHKToGCP(
frame['DfMuxHousekeeping'], dataok=dataok,
verbose=self.verbose)
# Check for new connections, send any interested
# parties the same data
cxs = []
while True:
try:
s, origin_ip = self.socket.accept()
except socket.error as e:
if e.errno != errno.EAGAIN and \
e.errno != errno.EWOULDBLOCK:
raise
break
core.log_debug('Accepted connection from %s:%d' % origin_ip,
unit='GCPHousekeepingTee')
cxs.append(s)
for s in cxs:
s.setblocking(True)
s.sendall(self.hkblob)
s.close()
@staticmethod
def PackHKToGCP(hk, dataok=True, verbose=False):
if verbose:
core.log_debug('gcp.GCPHousekeepingTee.PackHKToGCP(hk)', unit='GCPHousekeepingTee')
buf = struct.pack('<?I', dataok, len(hk))
# See HkDataStruct in GCP
for ip, board in hk.items():
# if verbose mode, print a few registers for debugging
if verbose:
core.log_debug("%d, %10.6f, %10.6f, %10.6f, %10.6f, %10.6f" %
(ip, board.temperatures['MOTHERBOARD_TEMPERATURE_ARM'],
board.temperatures['MOTHERBOARD_TEMPERATURE_FPGA'],
board.temperatures['MOTHERBOARD_TEMPERATURE_FPGA_DIE'],
board.temperatures['MOTHERBOARD_TEMPERATURE_PHY'],
board.temperatures['MOTHERBOARD_TEMPERATURE_POWER']),
unit='GCPHousekeepingTee')
buf += struct.pack('<fffff',
board.temperatures['MOTHERBOARD_TEMPERATURE_ARM'],
board.temperatures['MOTHERBOARD_TEMPERATURE_FPGA'],
board.temperatures['MOTHERBOARD_TEMPERATURE_FPGA_DIE'],
board.temperatures['MOTHERBOARD_TEMPERATURE_PHY'],
board.temperatures['MOTHERBOARD_TEMPERATURE_POWER'])
buf += struct.pack('<fffffffff',
board.voltages['MOTHERBOARD_RAIL_VADJ'],
board.voltages['MOTHERBOARD_RAIL_VCC12V0'],
board.voltages['MOTHERBOARD_RAIL_VCC1V0'],
board.voltages['MOTHERBOARD_RAIL_VCC1V0_GTX'],
board.voltages['MOTHERBOARD_RAIL_VCC1V2'],
board.voltages['MOTHERBOARD_RAIL_VCC1V5'],
board.voltages['MOTHERBOARD_RAIL_VCC1V8'],
board.voltages['MOTHERBOARD_RAIL_VCC3V3'],
board.voltages['MOTHERBOARD_RAIL_VCC5V5'])
buf += struct.pack('<fffffffff',
board.currents['MOTHERBOARD_RAIL_VADJ'],
board.currents['MOTHERBOARD_RAIL_VCC12V0'],
board.currents['MOTHERBOARD_RAIL_VCC1V0'],
board.currents['MOTHERBOARD_RAIL_VCC1V0_GTX'],
board.currents['MOTHERBOARD_RAIL_VCC1V2'],
board.currents['MOTHERBOARD_RAIL_VCC1V5'],
board.currents['MOTHERBOARD_RAIL_VCC1V8'],
board.currents['MOTHERBOARD_RAIL_VCC3V3'],
board.currents['MOTHERBOARD_RAIL_VCC5V5'])
buf += struct.pack('255s', ('iceboard' + board.serial).encode())
for i in [1,2]:
buf += struct.pack('<?',
board.mezz[i].present)
buf += struct.pack('<?',
board.mezz[i].power)
buf += struct.pack('<?',
board.mezz[i].squid_controller_power)
buf += struct.pack('<fff',
board.mezz[i].voltages['MEZZANINE_RAIL_VADJ'],
board.mezz[i].voltages['MEZZANINE_RAIL_VCC12V0'],
board.mezz[i].voltages['MEZZANINE_RAIL_VCC3V3'])
buf += struct.pack('<fff',
board.mezz[i].currents['MEZZANINE_RAIL_VADJ'],
board.mezz[i].currents['MEZZANINE_RAIL_VCC12V0'],
board.mezz[i].currents['MEZZANINE_RAIL_VCC3V3'])
buf += struct.pack('<fff',
board.mezz[i].temperature,
board.mezz[i].squid_controller_temperature,
board.mezz[i].squid_heater)
# Prefix with total message length
buf = struct.pack('!q', len(buf)) + buf
return buf
@core.indexmod
class GCPBoloDataTee(object):
'''
Module that serves bolometer data to GCP when asked. Once a second,
will serve the data from the previous second of bolometer data.
If a boolean key appears in the timepoint frames named "DataOK",
this will be sent to GCP as a data quality indicator for paging.
'''
def __init__(self, port=50020, verbose=False, bolometers=[]):
'''
Send data from the given list of bolometer logical IDs to the GCP.
'''
core.log_info('Listening for requests from GCP on port %d' % port, unit='GCPBoloDataTee')
core.log_info('Selected bolometers: %s' % bolometers, unit='GCPBoloDataTee')
self.bololist = bolometers
self.socket = socket.socket()
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(('', port))
self.socket.listen(5)
self.socket.setblocking(False)
self.data = {}
def __call__(self, frame):
if frame.type == core.G3FrameType.Wiring:
self.wmap = frame['WiringMap']
if frame.type == core.G3FrameType.Timepoint:
sec = int(frame['EventHeader'].time/core.G3Units.s)
# Add data from this sample to the cache for this calendar second, replacing any missing detectors with -1
if sec not in self.data:
self.data[sec] = {b: [] for b in self.bololist}
self.data[sec]['DataOK'] = []
d = self.data[sec]
for b in self.bololist:
w = self.wmap[b]
board = frame['DfMux'][w.board_serial]
if board.nblocks > 1:
mod_idx = w.module * board.nblocks + w.channel // board.nchannels
chan_idx = w.channel % board.nchannels
else:
mod_idx = w.module
chan_idx = w.channel
try:
d[b].append(board[mod_idx][chan_idx])
except KeyError:
d[b].append(-1)
if 'DataOK' in frame:
self.data[sec]['DataOK'].append(bool(frame['DataOK']))
else:
self.data[sec]['DataOK'].append(True)
# Toss ancient data: we keep the last second (complete)
# for GCP, plus the second we are currently accumulating
if len(self.data) > 2:
keys = list(self.data.keys())
keys.sort()
for k in keys[:-2]:
del self.data[k]
# Check for new connections once we have a buffer
if len(self.data) == 2:
try:
s, origin_ip = self.socket.accept()
except socket.error as e:
if e.errno != errno.EAGAIN and e.errno != errno.EWOULDBLOCK:
raise
return
core.log_debug('Accepted connection from %s:%d' % origin_ip, unit='GCPBoloDataTee')
s.setblocking(True)
keys = list(self.data.keys())
keys.sort()
s.sendall(self.PackForGCP(self.data[keys[0]]))
s.close()
# Delete data once enqueued
del self.data[keys[0]]
@staticmethod
def PackForGCP(data):
# Input data: dict of bolo names to samples (can be a
# G3TimestreamMap, in principle)
#
# On-wire format:
# U64 Length of buffer (big-endian)
# U8 Data Quality Indicator: True (1) = good
# U32 Number of detectors in list
# U32 Number of samples in the last second
# N copies of:
# - 16 byte character string with detector name
# - N_sample 32-bit signed integers with data
#
# All fields are little-endian, unless otherwise noted
buf = struct.pack('<?II', numpy.all(data['DataOK']), len(data) - 1, len(data.values()[0]))
for i in range(len(data)):
if data.keys()[i] == 'DataOK':
continue
buf += struct.pack('16s', data.keys()[i].encode())
assert(len(data.values()[i]) == len(data.values()[0]))
buf += struct.pack('<%di' % len(data.values()[i]), *data.values()[i])
buf = struct.pack('!q', len(buf)) + buf
return buf
|
CMB-S4REPO_NAMEspt3g_softwarePATH_START.@spt3g_software_extracted@spt3g_software-master@gcp@python@GCPDataTee.py@.PATH_END.py
|
{
"filename": "friendly.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/Pygments/py3/pygments/styles/friendly.py",
"type": "Python"
}
|
"""
pygments.styles.friendly
~~~~~~~~~~~~~~~~~~~~~~~~
A modern style based on the VIM pyte theme.
:copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
__all__ = ['FriendlyStyle']
class FriendlyStyle(Style):
"""
A modern style based on the VIM pyte theme.
"""
name = 'friendly'
background_color = "#f0f0f0"
line_number_color = "#666666"
styles = {
Whitespace: "#bbbbbb",
Comment: "italic #60a0b0",
Comment.Preproc: "noitalic #007020",
Comment.Special: "noitalic bg:#fff0f0",
Keyword: "bold #007020",
Keyword.Pseudo: "nobold",
Keyword.Type: "nobold #902000",
Operator: "#666666",
Operator.Word: "bold #007020",
Name.Builtin: "#007020",
Name.Function: "#06287e",
Name.Class: "bold #0e84b5",
Name.Namespace: "bold #0e84b5",
Name.Exception: "#007020",
Name.Variable: "#bb60d5",
Name.Constant: "#60add5",
Name.Label: "bold #002070",
Name.Entity: "bold #d55537",
Name.Attribute: "#4070a0",
Name.Tag: "bold #062873",
Name.Decorator: "bold #555555",
String: "#4070a0",
String.Doc: "italic",
String.Interpol: "italic #70a0d0",
String.Escape: "bold #4070a0",
String.Regex: "#235388",
String.Symbol: "#517918",
String.Other: "#c65d09",
Number: "#40a070",
Generic.Heading: "bold #000080",
Generic.Subheading: "bold #800080",
Generic.Deleted: "#A00000",
Generic.Inserted: "#00A000",
Generic.Error: "#FF0000",
Generic.Emph: "italic",
Generic.Strong: "bold",
Generic.EmphStrong: "bold italic",
Generic.Prompt: "bold #c65d09",
Generic.Output: "#888",
Generic.Traceback: "#04D",
Error: "border:#FF0000"
}
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@Pygments@py3@pygments@styles@friendly.py@.PATH_END.py
|
{
"filename": "transform_dataset.ipynb",
"repo_name": "astrockragh/Mangrove",
"repo_path": "Mangrove_extracted/Mangrove-main/transform/transform_dataset.ipynb",
"type": "Jupyter Notebook"
}
|
```python
import torch, os, pickle, time
import torch_geometric as tg
from torch_geometric.data import Data
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from tqdm import tqdm
import os.path as osp
import networkx as nx
path='~/../../tigress/cj1223/merger_trees/isotrees/'
transform_path='~/../../scratch/gpfs/cj1223/GraphStorage/transformer'
all_cols=np.array([0,2,4,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,35]+list(range(37,60)))
```
```python
os.listdir(osp.expanduser('~/../../../scratch/gpfs/cj1223/GraphStorage/'))
```
['vlarge_all_4t_z1.0_standard_quant',
'vlarge_4t_quantile_raw_redshift_75_all',
'vlarge_all_4t_z1.0_quantile_raw',
'vlarge_all_4t_z0.3_None',
'vlarge_all_4t_z2.0_standard_quant',
'vlarge_all_4t_z2.0_None',
'redshift_scan_0',
'testid_all_4t_z2.0_None',
'vlarge_all_4t_z0.0_quantile_stand',
'vlarge_all_multi_try1',
'vlarge_4t_quantile_raw_redshift_99_all',
'vlarge_all_4t_z2.0_quantile_raw',
'vlarge_all_4t_z0.0_standard_quant',
'vlarge_all_4t_z0.5_quantile_quant',
'vlarge_4t_quantile_raw_redshift_50_all',
'vlarge_all_4t_z2.0_quantile_stand',
'vlarge_all_4t_z1.0_quantile_quant',
'transformers',
'vlarge_all_4t_z0.0_standard_raw',
'vlarge_all_4t_quantile_raw_final',
'vlarge_all_4t_z0.5_standard_stand',
'vlarge_all_4t_z1.8_quantile_raw',
'vlarge_all_4t_z0.5_standard_quant',
'vlarge_all_4t_zall_quantile_raw_trainandtest',
'vlarge_all_4t_z0.0_quantile_raw',
'old',
'vlarge_all_4t_z1.0_None',
'vlarge_all_4t_z1.0_standard_stand',
'vlarge_all_4t_z0.8_None',
'vlarge_all_4t_z1.8_None',
'vlarge_all_4t_z2.0_standard_raw',
'vlarge_4t_quantile_raw_redshift_95_all',
'testid_all_4t_z0.0_None',
'vlarge_all_4t_z3.0_None',
'vlarge_all_4t_z0.5_standard_raw',
'vlarge_all_4t_z1.5_None',
'vlarge_all_4t_z0.0_None',
'vlarge_4t_quantile_raw_redshift_85_all',
'vlarge_all_4t_z0.5_quantile_raw',
'vlarge_all_4t_z1.0_standard_raw',
'vlarge_all_4t_quantile_raw',
'testt_all_4t_z0.0_None',
'vlarge_all_smass',
'vlarge_all_4t_z0.0_quantile_quant',
'vlarge_all_4t_z0.5_quantile_stand',
'vlarge_all_4t_zall_quantile_raw',
'vlarge_all_4t_z0.0_standard_stand',
'vlarge_all_4t_z1.0_quantile_stand',
'vlarge_all_4t_z2.0_quantile_quant',
'vlarge_all_4t_z2.0_standard_stand',
'vlarge_all_4t_z0.5_None']
```python
cases=['vlarge_all_4t_z0.3_None', 'vlarge_all_4t_z0.8_None', 'vlarge_all_4t_z1.5_None', 'vlarge_all_4t_z3.0_None']
# dataset=[]
xss=[]
yss=[]
lss=[]
edge_indexs=[]
edge_attrs=[]
splits=[]
ltot=[]
for case in tqdm(cases):
data=pickle.load(open(osp.expanduser(f'~/../../../scratch/gpfs/cj1223/GraphStorage/{case}/data.pkl'), 'rb'))
# dataset.append(data)
xs=[]
ys=[]
ls=[]
edge_index=[]
edge_attr=[]
for d in data:
xs.append(d.x.numpy())
ys.append(d.y.numpy())
edge_index.append(d.edge_index)
edge_attr.append(d.edge_attr)
l=len(d.x.numpy())
ls.append(l)
edge_indexs.append(edge_index)
edge_attrs.append(edge_attr)
xs=np.vstack(xs)
ltot.append(len(xs))
ys=np.vstack(ys)
ls=np.array(ls)
splits.append(np.cumsum(ls))
xss.append(xs)
yss.append(ys)
lss.append(ls)
yls=[]
for y in yss:
yls.append(len(y))
```
100%|███████████████████████████████████████████████████████████████████████| 4/4 [02:33<00:00, 38.30s/it]
## Scale the targets
```python
### targets
ex=f'../samout/{0}_{0}_{0}/galprop_0-99.dat'
pdc=pd.read_table(path+ex, skiprows=0, delimiter=',', nrows=41, header=None)
targets=[8,11,15,23]
colst=pdc.iloc[targets,0]
cols_t = []
for i, col in enumerate(colst):
cols_t.append(col[4:]+f'({i})')
yss=np.vstack(yss)
```
```python
transform='quantile'
transform_path=osp.expanduser(f"~/../../../scratch/gpfs/cj1223/GraphStorage/transformers/{transform}_alltarg_4t.pkl")
scaler=pickle.load(open(transform_path, 'rb'))
fig,ax=plt.subplots(nrows=2,ncols=2, figsize=(12,8))
ax=ax.flatten()
ysQuant=np.copy(yss)
for i in tqdm(range(len(cols_t))):
# new=scaler[targets[i]].transform(yss[:,i].reshape(-1,1))
# ysQuant[:,i]=np.hstack(new)
# ax[i].hist(yss, bins=100, density=1, histtype='step');
ax[i].hist(yss[:,i], bins=100, density=1, histtype='step');
# ax[i].hist(new, bins=50, density=1, histtype='step');
ax[i].set(title=cols_t[i])
fig.tight_layout()
```
100%|███████████████████████████████████████████████████████████████████████| 4/4 [00:00<00:00, 90.62it/s]

```python
yssplitQuant=np.split(ysQuant, np.cumsum(yls)[:-1])
```
```python
transform='standard'
transform_path=osp.expanduser(f"~/../../../scratch/gpfs/cj1223/GraphStorage/transformers/{transform}_alltarg_4t.pkl")
scaler0=pickle.load(open(transform_path, 'rb'))
# transform='power'
# transform_path=osp.expanduser(f"~/../../../scratch/gpfs/cj1223/GraphStorage/transformers/{transform}_alltarg_1.pkl")
# scaler1=pickle.load(open(transform_path, 'rb'))
fig,ax=plt.subplots(nrows=2,ncols=2, figsize=(25,20))
ax=ax.flatten()
ysStandard=np.copy(yss)
# ysPower=np.copy(yss)
for i in tqdm(range(len(cols_t))):
ax[i].hist(yss[:,i], bins=100, density=1, histtype='step');
new=yss[:,i]
# ysStandard[:,i]=np.hstack(new)
ax[i].hist(new, bins=50, density=1, histtype='step');
# new=scaler1[targets[i]].transform(new)
# ysPower[:,i]=np.hstack(new)
# ax[i].hist(new, bins=50, density=1, histtype='step');
ax[i].set(title=cols_t[i])
fig.tight_layout()
```
100%|███████████████████████████████████████████████████████████████████████| 4/4 [00:00<00:00, 48.92it/s]

```python
yssplitRaw=np.split(yss, np.cumsum(yls)[:-1])
fig,ax=plt.subplots(nrows=2,ncols=2, figsize=(25,20))
ax=ax.flatten()
for i in tqdm(range(len(cols_t))):
ax[i].hist(yssplitRaw[0][:,i], bins=100, density=1, histtype='step');
# new=(yssplitRaw[0][:,i]-np.mean(yssplitRaw[0][:,i]))/np.std(yssplitRaw[0][:,i])
# print(np.mean(yssplitRaw[0][:,i]), np.std(yssplitRaw[0][:,i]) )
# print(np.mean(new), np.std(new) )
# ax[i].hist(new, bins=50, density=1, histtype='step');
# new=scaler1[targets[i]].transform(new)
# ysPower[:,i]=np.hstack(new)
# ax[i].hist(new, bins=50, density=1, histtype='step');
ax[i].set(title=cols_t[i])
fig.tight_layout()
```
100%|██████████████████████████████████████████████████████████████████████| 4/4 [00:00<00:00, 181.91it/s]

```python
yssplitStandard=np.split(ysStandard, np.cumsum(yls)[:-1])
yssplitRaw=np.split(yss, np.cumsum(yls)[:-1])
```
```python
targs={'quant': yssplitQuant,
'stand': yssplitStandard,
'raw': yssplitRaw}
```
## Scale the inputs
```python
xss=np.vstack(xss)
```
```python
halos=pd.read_table(path+f'isotree_0_0_0.dat', skiprows=0, nrows=1, delimiter='\s+')
```
```python
cols_h = []
for i, col in enumerate(halos.columns[all_cols]):
if col[-1] == ')':
cols_h.append(col[:-3]+f'({i})')
else:
cols_h.append(col+f'({i})')
```
```python
##raw plot
fig,ax=plt.subplots(nrows=7,ncols=7, figsize=(15,12))
ax=ax.flatten()
for i in tqdm(range(len(cols_h))):
ax[i].hist(xss[:,i], bins=100, density=1, histtype='step');
ax[i].set(title=cols_h[i])
fig.tight_layout()
```
100%|█████████████████████████████████████████████████████████████████████| 43/43 [01:29<00:00, 2.07s/it]

```python
transform='quantile'
transform_path=osp.expanduser(f"~/../../../scratch/gpfs/cj1223/GraphStorage/transformers/{transform}_allfeat_1.pkl")
scaler=pickle.load(open(transform_path, 'rb'))
fig,ax=plt.subplots(nrows=7,ncols=7, figsize=(15,11))
ax=ax.flatten()
xsQuant=np.copy(xss)
for i in tqdm(range(len(cols_h))):
new=scaler[all_cols[i]].transform(xss[:,i].reshape(-1,1))
xsQuant[:,i]=np.hstack(new)
# ax[i].hist(xs[:,i], bins=100, density=1, histtype='step');
ax[i].hist(new, bins=50, density=1, histtype='step');
ax[i].set(title=cols_h[i])
fig.tight_layout()
```
100%|██████████████████████████████████████████████████████████████████| 43/43 [1:58:42<00:00, 165.64s/it]

```python
xssplit=np.split(xsQuant, np.cumsum(ltot)[:-1])
graph_xs = []
for i, x in enumerate(xssplit):
graph_xs.append(np.split(x, np.cumsum(lss[i])[:-1]))
targ=['raw']
for t in targ:
yspe=targs[t]
for i, case in enumerate(cases):
dat=[]
for n in tqdm(range(len(graph_xs[i]))):
x = torch.tensor(graph_xs[i][n], dtype=torch.float)
y=torch.tensor(yspe[i][n], dtype=torch.float)
graph=Data(x=x, edge_index=edge_indexs[i][n], edge_attr=edge_attrs[i][n], y=y)
dat.append(graph)
case=case[:-4]+f'{transform}'+f'_{t}'
print(case)
print("Saving dataset")
if not osp.exists(osp.expanduser(f'~/../../../scratch/gpfs/cj1223/GraphStorage/{case}')):
os.mkdir(osp.expanduser(f'~/../../../scratch/gpfs/cj1223/GraphStorage/{case}'))
with open(osp.expanduser(f'~/../../../scratch/gpfs/cj1223/GraphStorage/{case}/data.pkl'), 'wb') as handle:
print(handle)
pickle.dump(dat, handle)
```
100%|███████████████████████████████████████████████████████████| 119812/119812 [00:16<00:00, 7205.59it/s]
vlarge_all_4t_z0.3_quantile_raw
Saving dataset
<_io.BufferedWriter name='/home/cj1223/../../../scratch/gpfs/cj1223/GraphStorage/vlarge_all_4t_z0.3_quantile_raw/data.pkl'>
100%|███████████████████████████████████████████████████████████| 125644/125644 [00:17<00:00, 7370.02it/s]
vlarge_all_4t_z0.8_quantile_raw
Saving dataset
<_io.BufferedWriter name='/home/cj1223/../../../scratch/gpfs/cj1223/GraphStorage/vlarge_all_4t_z0.8_quantile_raw/data.pkl'>
100%|██████████████████████████████████████████████████████████| 124562/124562 [00:09<00:00, 13653.04it/s]
vlarge_all_4t_z1.5_quantile_raw
Saving dataset
<_io.BufferedWriter name='/home/cj1223/../../../scratch/gpfs/cj1223/GraphStorage/vlarge_all_4t_z1.5_quantile_raw/data.pkl'>
100%|████████████████████████████████████████████████████████████| 95661/95661 [00:05<00:00, 18036.80it/s]
vlarge_all_4t_z3.0_quantile_raw
Saving dataset
<_io.BufferedWriter name='/home/cj1223/../../../scratch/gpfs/cj1223/GraphStorage/vlarge_all_4t_z3.0_quantile_raw/data.pkl'>
```python
data
```
### Check that everything works
```python
datat[:5], data[:5]
```
([Data(x=[57, 43], edge_index=[2, 56], edge_attr=[56], y=[4]),
Data(x=[94, 43], edge_index=[2, 93], edge_attr=[93], y=[4]),
Data(x=[50, 43], edge_index=[2, 49], edge_attr=[49], y=[4]),
Data(x=[141, 43], edge_index=[2, 140], edge_attr=[140], y=[4]),
Data(x=[81, 43], edge_index=[2, 80], edge_attr=[80], y=[4])],
[Data(x=[57, 43], edge_index=[2, 56], edge_attr=[56], y=[4]),
Data(x=[94, 43], edge_index=[2, 93], edge_attr=[93], y=[4]),
Data(x=[50, 43], edge_index=[2, 49], edge_attr=[49], y=[4]),
Data(x=[141, 43], edge_index=[2, 140], edge_attr=[140], y=[4]),
Data(x=[81, 43], edge_index=[2, 80], edge_attr=[80], y=[4])])
```python
transform='standard'
transform_path=osp.expanduser(f"~/../../../scratch/gpfs/cj1223/GraphStorage/transformers/{transform}_allfeat_4t.pkl")
scaler0=pickle.load(open(transform_path, 'rb'))
# transform='power'
# transform_path=osp.expanduser(f"~/../../../scratch/gpfs/cj1223/GraphStorage/transformers/{transform}_allfeat_1.pkl")
# scaler1=pickle.load(open(transform_path, 'rb'))
fig,ax=plt.subplots(nrows=7,ncols=7, figsize=(30,23))
ax=ax.flatten()
xsStandard=np.copy(xss)
xsPower=np.copy(xss)
for i in tqdm(range(len(cols_h))):
# ax[i].hist(xs[:,i], bins=100, density=1, histtype='step');
new=scaler0[all_cols[i]].transform(xss[:,i].reshape(-1,1))
xsStandard[:,i]=np.hstack(new)
ax[i].hist(new, bins=50, density=1, histtype='step');
# new=scaler1[all_cols[i]].transform(new)
# xsPower[:,i]=np.hstack(new)
# ax[i].hist(new, bins=50, density=1, histtype='step');
ax[i].set(title=cols_h[i])
fig.tight_layout()
```
100%|██████████████████████████████████████████████████████████████| 43/43 [2:25:27<00:00, 202.96s/it]

```python
transform='standard'
xssplit=np.split(xsStandard, np.cumsum(ltot)[:-1])
graph_xs = []
for i, x in enumerate(xssplit):
graph_xs.append(np.split(x, np.cumsum(lss[i])[:-1]))
targ=['quant', 'stand', 'raw']
for t in targ:
yspe=targs[t]
for i, case in enumerate(cases):
dat=[]
for n in tqdm(range(len(graph_xs[i]))):
x = torch.tensor(graph_xs[i][n], dtype=torch.float)
y=torch.tensor(yspe[i][n], dtype=torch.float)
graph=Data(x=x, edge_index=edge_indexs[i][n], edge_attr=edge_attrs[i][n], y=y)
dat.append(graph)
case=case[:-4]+f'{transform}'+f'_{t}'
print("Saving dataset")
if not osp.exists(osp.expanduser(f'~/../../../scratch/gpfs/cj1223/GraphStorage/{case}')):
os.mkdir(osp.expanduser(f'~/../../../scratch/gpfs/cj1223/GraphStorage/{case}'))
with open(osp.expanduser(f'~/../../../scratch/gpfs/cj1223/GraphStorage/{case}/data.pkl'), 'wb') as handle:
print(handle)
pickle.dump(dat, handle)
```
100%|███████████████████████████████████████████████████████| 108808/108808 [00:22<00:00, 4766.26it/s]
Saving dataset
<_io.BufferedWriter name='/home/cj1223/../../../scratch/gpfs/cj1223/GraphStorage/vlarge_all_4t_z0.0_standard_quant/data.pkl'>
100%|███████████████████████████████████████████████████████| 123198/123198 [00:16<00:00, 7517.58it/s]
Saving dataset
<_io.BufferedWriter name='/home/cj1223/../../../scratch/gpfs/cj1223/GraphStorage/vlarge_all_4t_z0.5_standard_quant/data.pkl'>
100%|███████████████████████████████████████████████████████| 126565/126565 [00:24<00:00, 5262.86it/s]
Saving dataset
<_io.BufferedWriter name='/home/cj1223/../../../scratch/gpfs/cj1223/GraphStorage/vlarge_all_4t_z1.0_standard_quant/data.pkl'>
100%|███████████████████████████████████████████████████████| 118127/118127 [00:13<00:00, 9061.82it/s]
Saving dataset
<_io.BufferedWriter name='/home/cj1223/../../../scratch/gpfs/cj1223/GraphStorage/vlarge_all_4t_z2.0_standard_quant/data.pkl'>
100%|███████████████████████████████████████████████████████| 108808/108808 [00:22<00:00, 4734.30it/s]
Saving dataset
<_io.BufferedWriter name='/home/cj1223/../../../scratch/gpfs/cj1223/GraphStorage/vlarge_all_4t_z0.0_standard_stand/data.pkl'>
100%|███████████████████████████████████████████████████████| 123198/123198 [00:18<00:00, 6545.59it/s]
Saving dataset
<_io.BufferedWriter name='/home/cj1223/../../../scratch/gpfs/cj1223/GraphStorage/vlarge_all_4t_z0.5_standard_stand/data.pkl'>
100%|███████████████████████████████████████████████████████| 126565/126565 [00:23<00:00, 5380.36it/s]
Saving dataset
<_io.BufferedWriter name='/home/cj1223/../../../scratch/gpfs/cj1223/GraphStorage/vlarge_all_4t_z1.0_standard_stand/data.pkl'>
100%|███████████████████████████████████████████████████████| 118127/118127 [00:13<00:00, 8897.29it/s]
Saving dataset
<_io.BufferedWriter name='/home/cj1223/../../../scratch/gpfs/cj1223/GraphStorage/vlarge_all_4t_z2.0_standard_stand/data.pkl'>
100%|███████████████████████████████████████████████████████| 108808/108808 [00:17<00:00, 6251.74it/s]
Saving dataset
<_io.BufferedWriter name='/home/cj1223/../../../scratch/gpfs/cj1223/GraphStorage/vlarge_all_4t_z0.0_standard_raw/data.pkl'>
100%|███████████████████████████████████████████████████████| 123198/123198 [00:23<00:00, 5175.79it/s]
Saving dataset
<_io.BufferedWriter name='/home/cj1223/../../../scratch/gpfs/cj1223/GraphStorage/vlarge_all_4t_z0.5_standard_raw/data.pkl'>
100%|███████████████████████████████████████████████████████| 126565/126565 [00:24<00:00, 5233.39it/s]
Saving dataset
<_io.BufferedWriter name='/home/cj1223/../../../scratch/gpfs/cj1223/GraphStorage/vlarge_all_4t_z1.0_standard_raw/data.pkl'>
100%|███████████████████████████████████████████████████████| 118127/118127 [00:13<00:00, 8730.81it/s]
Saving dataset
<_io.BufferedWriter name='/home/cj1223/../../../scratch/gpfs/cj1223/GraphStorage/vlarge_all_4t_z2.0_standard_raw/data.pkl'>
```python
datat=pickle.load(open(osp.expanduser(\
f'~/../../../scratch/gpfs/cj1223/GraphStorage/vlarge_all_4t_z2.0_standard_stand/data.pkl'), 'rb'))
```
```python
datat[:5], data[:5]
```
([Data(x=[57, 43], edge_index=[2, 56], edge_attr=[56], y=[4]),
Data(x=[94, 43], edge_index=[2, 93], edge_attr=[93], y=[4]),
Data(x=[50, 43], edge_index=[2, 49], edge_attr=[49], y=[4]),
Data(x=[141, 43], edge_index=[2, 140], edge_attr=[140], y=[4]),
Data(x=[81, 43], edge_index=[2, 80], edge_attr=[80], y=[4])],
[Data(x=[57, 43], edge_index=[2, 56], edge_attr=[56], y=[4]),
Data(x=[94, 43], edge_index=[2, 93], edge_attr=[93], y=[4]),
Data(x=[50, 43], edge_index=[2, 49], edge_attr=[49], y=[4]),
Data(x=[141, 43], edge_index=[2, 140], edge_attr=[140], y=[4]),
Data(x=[81, 43], edge_index=[2, 80], edge_attr=[80], y=[4])])
```python
```
|
astrockraghREPO_NAMEMangrovePATH_START.@Mangrove_extracted@Mangrove-main@transform@transform_dataset.ipynb@.PATH_END.py
|
{
"filename": "testDistributeByPosition2d.py",
"repo_name": "LLNL/spheral",
"repo_path": "spheral_extracted/spheral-main/tests/unit/Distributed/testDistributeByPosition2d.py",
"type": "Python"
}
|
#ATS:test(SELF, np=8, label="DistributeByPosition2d unit tests")
from math import *
import unittest
from Spheral import *
#===============================================================================
# Load mpi, and figure out how may domains to set up, and which domain we are.
#===============================================================================
import mpi
domainID = mpi.rank
nDomains = mpi.procs
#===============================================================================
# Distribute nodes randomly amongst domains.
#===============================================================================
def randomDistribute(thisDomain, # domain ID to be calculated
nDomains, # total number of domains
nNodesGlobal, # global number of nodes in this nodelist
xyRangeTotal): # total simulation volume
assert thisDomain >= 0 and thisDomain < nDomains
assert nDomains > 0
import random
g = random.Random()
globalNodeIDs = []
nodePositions = []
for globalNodeID in range(0, nNodesGlobal):
mpi.barrier()
domain0 = g.randint(0, nDomains - 1)
domain = mpi.bcast(domain0)
if domain == thisDomain:
globalNodeIDs.append(globalNodeID)
nodePositions.append(Vector2d(g.uniform(xyRangeTotal[0][0],
xyRangeTotal[1][0]),
g.uniform(xyRangeTotal[0][1],
xyRangeTotal[1][1])))
assert len(globalNodeIDs) == len(nodePositions)
assert mpi.allreduce(len(globalNodeIDs), mpi.SUM) == nNodesGlobal
return globalNodeIDs, nodePositions
#===============================================================================
# Calculate one over the smoothing scale for the given number of nodes and
# volume.
#===============================================================================
def determineH(nGlobal, xRange,
nNodesPerh = 2.01):
assert nGlobal > 0
vol = (xRange[1][0] - xRange[0][0])*(xRange[1][1] - xRange[0][1])
assert vol > 0.0
dV = vol/nGlobal
dx = sqrt(dV)
hi = 1.0/(nNodesPerh*dx)
Hi = SymTensor2d(hi, 0.0,
0.0, hi)
return Hi
#===============================================================================
# Main testing class for the 2-D tests.
#===============================================================================
class TestDistributeByPosition2d(unittest.TestCase):
# Set up method called before test is run.
def setUp(self):
# Generic parameters for 2-D tests.
n1 = 1000
n2 = 2500
n3 = 500
range1 = [(0.0, 0.0), (1.0, 1.0)]
range2 = [(1.0, 0.0), (1.5, 1.0)]
range3 = [(1.5, 0.0), (2.0, 1.0)]
# Construct the NodeLists to be distributed
self.eos = GammaLawGasMKS2d(2.0, 2.0)
self.WT = TableKernel2d(BSplineKernel2d())
self.nodes1 = makeFluidNodeList2d("nodes1 2d", self.eos)
self.nodes2 = makeFluidNodeList2d("nodes2 2d", self.eos)
self.nodes3 = makeFluidNodeList2d("nodes3 2d", self.eos)
for (nodes, nGlobal, globalRange) in ((self.nodes1, n1, range1),
(self.nodes2, n2, range2),
(self.nodes3, n3, range3)):
globalIDs, xyNodes = randomDistribute(domainID,
nDomains,
nGlobal,
globalRange)
n = len(globalIDs)
nodes.numInternalNodes = n
Hi = determineH(nGlobal, globalRange)
for i in range(n):
nodes.mass()[i] = 1.0
nodes.positions()[i] = xyNodes[i]
nodes.Hfield()[i] = Hi
nodes.neighbor().updateNodes()
# Put the distributed NodeLists into a DataBase.
self.dataBase = DataBase2d()
self.dataBase.appendNodeList(self.nodes1)
self.dataBase.appendNodeList(self.nodes2)
self.dataBase.appendNodeList(self.nodes3)
assert mpi.allreduce(self.nodes1.numInternalNodes, mpi.SUM) == n1
assert mpi.allreduce(self.nodes2.numInternalNodes, mpi.SUM) == n2
assert mpi.allreduce(self.nodes3.numInternalNodes, mpi.SUM) == n3
return
# Method called after test is completed.
def tearDown(self):
del self.nodes1, self.nodes2, self.nodes3
return
# The actual test itself!
# Create a DistributeNodeByXPosition object, have it redistribute the
# nodes, and check that each domains x distribution is contiguous and
# in the right order.
def testIt(self):
print("Testing DistributeByXPosition2d on domain %i of %i domains" % \
(domainID, nDomains))
# Record how many nodes we're starting with.
nNodesGlobal = []
for nodeList in self.dataBase.nodeLists():
nNodesGlobal.append(mpi.allreduce(nodeList.numInternalNodes,
mpi.SUM))
# Go ahead and redistribute those nodes!
repartition = DistributeByXPosition2d()
repartition.redistributeNodes(self.dataBase)
# Make sure that the numbers of nodes are correct.
assert self.dataBase.numNodeLists == len(nNodesGlobal)
i = 0
for nodeList in self.dataBase.nodeLists():
n = mpi.allreduce(nodeList.numInternalNodes, mpi.SUM)
nGlobal = nNodesGlobal[i]
if n != nGlobal:
self.fail("Wrong number of nodes: %i != %i" % (n, nGlobal))
i += 1
# Have each domain figure out it's min and max x.
localxmin = 1e10
localxmax = -1e10
for nodeList in self.dataBase.nodeLists():
if nodeList.numInternalNodes > 0:
localxmin = min(localxmin, min([nodeList.positions()[i].x for i in range(nodeList.numNodes)]))
localxmax = max(localxmax, max([nodeList.positions()[i].x for i in range(nodeList.numNodes)]))
# Now make sure that our (xmin,xmax) range is greater than any processor
# less than us, and less than any processor greater.
localDomRange = [(domainID, localxmin, localxmax)]
globalDomRange = mpi.allreduce(localDomRange, mpi.SUM)
globalDomRange.sort()
for i in range(1, len(globalDomRange)):
if globalDomRange[i][1] < globalDomRange[i-1][1]:
self.fail("(proc,xmin) not in order")
if globalDomRange[i][2] < globalDomRange[i-1][2]:
self.fail("(proc,xmax) not in order")
#===============================================================================
# Run the tests
#===============================================================================
if __name__ == "__main__":
unittest.main()
|
LLNLREPO_NAMEspheralPATH_START.@spheral_extracted@spheral-main@tests@unit@Distributed@testDistributeByPosition2d.py@.PATH_END.py
|
{
"filename": "B_Model_Tutorial_3_Bayes_Classifier.ipynb",
"repo_name": "jmschrei/pomegranate",
"repo_path": "pomegranate_extracted/pomegranate-master/docs/tutorials/B_Model_Tutorial_3_Bayes_Classifier.ipynb",
"type": "Jupyter Notebook"
}
|
## Bayes Classifier
author: Jacob Schreiber <br>
contact: jmschreiber91@gmail.com
Although most of the models implemented in pomegranate are unsupervised, a simple way to construct a classifier using probabilistic models is to use Bayes' rule. Specifically, given a set of probabilistic models M, one can make classifications on some data D by calculating the posterior probability of the data under each of these models.
\begin{equation}
P(M|D) = \frac{P(D|M)P(M)}{P(D)}
\end{equation}
Specifically, what this equation is saying is that one should calculate the likelihood of the data under each component, $P(D|M)$, multiply it by the prior probability of data coming from that component regardless of what the data actually is $P(M)$, and then divide by some factor.
More concretely: if you have a set of probability distributions and want to classify points as having come from one of them, you just calculate the likelihood of the data given the distribution and then multiply through by the prior for each distribution, which can just be a uniform distribution.
```python
%pylab inline
import seaborn; seaborn.set_style('whitegrid')
import torch
from pomegranate.bayes_classifier import BayesClassifier
from pomegranate.distributions import *
numpy.random.seed(0)
numpy.set_printoptions(suppress=True)
%load_ext watermark
%watermark -m -n -p numpy,scipy,torch,pomegranate
```
Populating the interactive namespace from numpy and matplotlib
numpy : 1.23.4
scipy : 1.9.3
torch : 1.13.0
pomegranate: 1.0.0
Compiler : GCC 11.2.0
OS : Linux
Release : 4.15.0-208-generic
Machine : x86_64
Processor : x86_64
CPU cores : 8
Architecture: 64bit
### Naive Bayes
The simplest form of a Bayes classifier is the naive Bayes classifier. The reason it is "naive" is that the classifier assumes that the features are all independent from each other. This assumption makes the classifier fast and interpretable.
#### Initialization and Fitting
Bayes classifiers can be initialized or fit in the same way that mixtures can. Specifically, you can either pass in learned distributions, or uninitialized distributions that are then fit.
```python
numpy.random.seed(0)
X = numpy.concatenate([numpy.random.normal((7, 2), 1, size=(100, 2)),
numpy.random.normal((2, 3), 1, size=(150, 2)),
numpy.random.normal((7, 7), 1, size=(100, 2))])
y = numpy.concatenate([numpy.zeros(100), numpy.zeros(150)+1, numpy.zeros(100)+2])
plt.figure(figsize=(6, 5))
plt.scatter(X[:,0], X[:,1])
plt.axis(False)
plt.show()
```

A Gaussian naive Bayes model can be initialized like the following:
```python
d1 = Normal([1.1, 1.3], [0.3, 0.9], covariance_type='diag')
d2 = Normal([1.3, 1.8], [1.1, 1.5], covariance_type='diag')
d3 = Normal([2.1, 2.3], [0.5, 1.8], covariance_type='diag')
model = BayesClassifier([d1, d2, d3])
```
And we can make predictions for the above points just like other methods:
```python
y_hat = model.predict(X)
for i in range(3):
plt.scatter(*X[y_hat == i].T)
plt.axis(False)
plt.show()
```

Wow, looks pretty bad. Let's try fitting to the data.
```python
model = BayesClassifier([Normal(covariance_type='diag') for i in range(3)]).fit(X, y)
y_hat = model.predict(X)
for i in range(3):
plt.scatter(*X[y_hat == i].T)
plt.axis(False)
plt.show()
```

|
jmschreiREPO_NAMEpomegranatePATH_START.@pomegranate_extracted@pomegranate-master@docs@tutorials@B_Model_Tutorial_3_Bayes_Classifier.ipynb@.PATH_END.py
|
{
"filename": "null_test.py",
"repo_name": "amanchokshi/EMBERS",
"repo_path": "EMBERS_extracted/EMBERS-master/src/embers/kindle/null_test.py",
"type": "Python"
}
|
"""
Null Test
---------
perform null tests on reference rf data and reference beam models
"""
import argparse
from pathlib import Path
from embers.tile_maps.null_test import null_test
def main():
"""
Perform a null test of the reference antennas using the :func:`~embers.tile_maps.null_test.null_test` function.
.. code-block:: console
$ null_test --help
"""
_parser = argparse.ArgumentParser(
description="""
Create MWA FEE beam models at multiple pointings with dipoles flagged.
"""
)
_parser.add_argument(
"--nside", metavar="\b", default=32, type=int, help="Healpix nside. Default=32"
)
_parser.add_argument(
"--za_max",
metavar="\b",
default=90,
type=int,
help="Maximum zenith angle upto which to perform the null test. Default: 90 deg",
)
_parser.add_argument(
"--ref_model",
metavar="\b",
default="embers_out/tile_maps/ref_models/ref_dipole_models.npz",
help="Reference feko healpix model created by embers.tile_maps.ref_fee_healpix. Default: embers_out/tile_maps/ref_models/ref_dipole_models.npz",
)
_parser.add_argument(
"--map_dir",
metavar="\b",
default="embers_out/tile_maps/tile_maps/tile_maps_raw",
help="Directory with tile_maps_raw, created by embers.tile_maps.tile_maps.project_tile_healpix. Default: embers_out/tile_maps/tile_maps/tile_maps_raw",
)
_parser.add_argument(
"--out_dir",
metavar="\b",
default="./embers_out/tile_maps/null_test",
help="Dir where null tests will be saved. Default=./embers_out/tile_maps/null_test",
)
_args = _parser.parse_args()
_nside = _args.nside
_za_max = _args.za_max
_ref_model = _args.ref_model
_map_dir = Path(_args.map_dir)
_out_dir = Path(_args.out_dir)
print(f"Null tests saved to {_out_dir}")
null_test(_nside, _za_max, _ref_model, _map_dir, _out_dir)
|
amanchokshiREPO_NAMEEMBERSPATH_START.@EMBERS_extracted@EMBERS-master@src@embers@kindle@null_test.py@.PATH_END.py
|
{
"filename": "conftest.py",
"repo_name": "nickhand/pyRSD",
"repo_path": "pyRSD_extracted/pyRSD-master/pyRSD/tests/test_gal_derivatives/conftest.py",
"type": "Python"
}
|
from .. import pytest
from .. import cache_manager
from pyRSD.rsdfit import FittingDriver
from pyRSD import data_dir
import os
@pytest.fixture(scope='session', autouse=True)
def driver(request):
from pyRSD.rsd import GalaxySpectrum
# add the PYRSD_DATA env var
os.environ['PYRSD_DATA'] = data_dir
# inititalize the model
config = {}
config['z'] = 0.55
config['cosmo_filename'] = 'runPB.ini'
config['kmin'] = 1e-3
config['kmax'] = 0.6
config['interpolate'] = True
m = GalaxySpectrum(**config)
# load the model
with cache_manager(m, "runPB_galaxy.npy") as model:
pass
# initialize the driver
path = os.path.join(data_dir, 'examples', 'params.dat')
driver = FittingDriver(path, init_model=False)
driver.model = model
# set fiducial
driver.set_fiducial()
return driver
|
nickhandREPO_NAMEpyRSDPATH_START.@pyRSD_extracted@pyRSD-master@pyRSD@tests@test_gal_derivatives@conftest.py@.PATH_END.py
|
{
"filename": "check_accuracy_of_WODEN.ipynb",
"repo_name": "JLBLine/WODEN",
"repo_path": "WODEN_extracted/WODEN-master/cmake_testing/GPU_code/source_components/check_accuracy_of_WODEN.ipynb",
"type": "Jupyter Notebook"
}
|
# Does 2 + 2 = 4?
This notebook is an attempt to quantify the errors of the float sine/cosine functions in `CUDA`, and estimate how they might impact the asbolute accuracy of `WODEN`. They theory for everything is written into `test_calc_measurement_equation.c` in the `testing/cmake_testing/source_components` section of the `WODEN` documentation. We'll be trying to measure the absolute accuracy of the `sine` and `cosine` functions inside ``CUDA``.
## Make $l,m,n$ coords
First up, make some $l,m,n$ coords that add up to known angles, and check every is kosher:
```python
import numpy as np
def find_lmn(phi_simple):
"""Given phi_simple (radians), find a set of l,m,n coords
that produce phi_simple in the measurement equation"""
numer = np.sqrt(2)*np.sqrt(-phi_simple**2 + -4*np.pi*phi_simple + 8*np.pi**2) + phi_simple + 2*np.pi
denom = 6*np.pi
n = numer / denom
l = np.sqrt(1 - n*n) / np.sqrt(2)
m = l
return l,m,n
##Known list of angles that have predictable sin/cos outputs
phi_simples = [0.0, np.pi/6, np.pi/4, np.pi/3, np.pi/2, 2*np.pi/3, 3*np.pi/4, 5*np.pi/6, np.pi,
7*np.pi/6, 5*np.pi/4]
##Known sin outputs for input phi_simples
known_sine_angles = [0.0, 0.5, np.sqrt(2)/2, np.sqrt(3)/2, 1.0, np.sqrt(3)/2, np.sqrt(2)/2, 0.5, 0.0,
-0.5, -np.sqrt(2)/2]
all_ls = []
all_ms = []
all_ns = []
##For known angles, calculate l,m,n coords and check they are legit
for phi_simple, expec in zip(phi_simples, known_sine_angles):
l,m,n = find_lmn(phi_simple)
all_ls.append(l)
all_ms.append(m)
all_ns.append(n)
phi_outcome = 2*np.pi*(l + m + n - 1)
check_lmn_sum = l*l + m*m + n*n
print(f"Target,Recovered: angle {phi_simple:.6f},{phi_outcome:.6f} sin: {expec:.6f},{np.sin(phi_outcome):.6f}")
print(f"l,m {l:.6f} n {n:.6f}, sum {check_lmn_sum:.4f}")
print(f"")
```
Target,Recovered: angle 0.000000,0.000000 sin: 0.000000,0.000000
l,m 0.000000 n 1.000000, sum 1.0000
Target,Recovered: angle 0.523599,0.523599 sin: 0.500000,0.500000
l,m 0.042574 n 0.998186, sum 1.0000
Target,Recovered: angle 0.785398,0.785398 sin: 0.707107,0.707107
l,m 0.064590 n 0.995819, sum 1.0000
Target,Recovered: angle 1.047198,1.047198 sin: 0.866025,0.866025
l,m 0.087145 n 0.992377, sum 1.0000
Target,Recovered: angle 1.570796,1.570796 sin: 1.000000,1.000000
l,m 0.134070 n 0.981861, sum 1.0000
Target,Recovered: angle 2.094395,2.094395 sin: 0.866025,0.866025
l,m 0.183866 n 0.965602, sum 1.0000
Target,Recovered: angle 2.356194,2.356194 sin: 0.707107,0.707107
l,m 0.210076 n 0.954849, sum 1.0000
Target,Recovered: angle 2.617994,2.617994 sin: 0.500000,0.500000
l,m 0.237340 n 0.941987, sum 1.0000
Target,Recovered: angle 3.141593,3.141593 sin: 0.000000,0.000000
l,m 0.295876 n 0.908248, sum 1.0000
Target,Recovered: angle 3.665191,3.665191 sin: -0.500000,-0.500000
l,m 0.362273 n 0.858788, sum 1.0000
Target,Recovered: angle 3.926991,3.926991 sin: -0.707107,-0.707107
l,m 0.400368 n 0.824264, sum 1.0000
## Check calculation of $b$ is legit
We can give the baseline a length by setting $u = v = w = b$, check the maths of that here works out to still give the expected results
```python
def calc_b(phi_simple, num_mult):
"""Given the target angle `phi_simple` (radians), and
an integer multiplier `num_mult`, find a baseline
length `b` that results in the same sine/cosine
angle"""
if phi_simple == 0:
b = 2*np.pi*num_mult
else:
b = (phi_simple + 2*np.pi*num_mult) / phi_simple
return b
##For known angles, calculate l,m,n coords and check they are legit
for phi_simple, expec in zip(phi_simples, known_sine_angles):
l,m,n = find_lmn(phi_simple)
for num_mult in [1, 10, 100, 1000, 10000]:
b = calc_b(phi_simple, num_mult)
phi_outcome = 2*np.pi*b*(l + m + n - 1)
print(f"Sine Target,Recovered: angle {expec:.6f},{np.sin(phi_outcome):.6f} b: {b:.1f}")
# print(f"")
# if expec == 0:
# expec_div = 1
# else:
# expec_div = expec
# print((expec - np.sin(phi_outcome)) / expec_div)
```
Sine Target,Recovered: angle 0.000000,0.000000 b: 6.3
Sine Target,Recovered: angle 0.000000,0.000000 b: 62.8
Sine Target,Recovered: angle 0.000000,0.000000 b: 628.3
Sine Target,Recovered: angle 0.000000,0.000000 b: 6283.2
Sine Target,Recovered: angle 0.000000,0.000000 b: 62831.9
Sine Target,Recovered: angle 0.500000,0.500000 b: 13.0
Sine Target,Recovered: angle 0.500000,0.500000 b: 121.0
Sine Target,Recovered: angle 0.500000,0.500000 b: 1201.0
Sine Target,Recovered: angle 0.500000,0.500000 b: 12001.0
Sine Target,Recovered: angle 0.500000,0.500000 b: 120001.0
Sine Target,Recovered: angle 0.707107,0.707107 b: 9.0
Sine Target,Recovered: angle 0.707107,0.707107 b: 81.0
Sine Target,Recovered: angle 0.707107,0.707107 b: 801.0
Sine Target,Recovered: angle 0.707107,0.707107 b: 8001.0
Sine Target,Recovered: angle 0.707107,0.707107 b: 80001.0
Sine Target,Recovered: angle 0.866025,0.866025 b: 7.0
Sine Target,Recovered: angle 0.866025,0.866025 b: 61.0
Sine Target,Recovered: angle 0.866025,0.866025 b: 601.0
Sine Target,Recovered: angle 0.866025,0.866025 b: 6001.0
Sine Target,Recovered: angle 0.866025,0.866025 b: 60001.0
Sine Target,Recovered: angle 1.000000,1.000000 b: 5.0
Sine Target,Recovered: angle 1.000000,1.000000 b: 41.0
Sine Target,Recovered: angle 1.000000,1.000000 b: 401.0
Sine Target,Recovered: angle 1.000000,1.000000 b: 4001.0
Sine Target,Recovered: angle 1.000000,1.000000 b: 40001.0
Sine Target,Recovered: angle 0.866025,0.866025 b: 4.0
Sine Target,Recovered: angle 0.866025,0.866025 b: 31.0
Sine Target,Recovered: angle 0.866025,0.866025 b: 301.0
Sine Target,Recovered: angle 0.866025,0.866025 b: 3001.0
Sine Target,Recovered: angle 0.866025,0.866025 b: 30001.0
Sine Target,Recovered: angle 0.707107,0.707107 b: 3.7
Sine Target,Recovered: angle 0.707107,0.707107 b: 27.7
Sine Target,Recovered: angle 0.707107,0.707107 b: 267.7
Sine Target,Recovered: angle 0.707107,0.707107 b: 2667.7
Sine Target,Recovered: angle 0.707107,0.707107 b: 26667.7
Sine Target,Recovered: angle 0.500000,0.500000 b: 3.4
Sine Target,Recovered: angle 0.500000,0.500000 b: 25.0
Sine Target,Recovered: angle 0.500000,0.500000 b: 241.0
Sine Target,Recovered: angle 0.500000,0.500000 b: 2401.0
Sine Target,Recovered: angle 0.500000,0.500000 b: 24001.0
Sine Target,Recovered: angle 0.000000,0.000000 b: 3.0
Sine Target,Recovered: angle 0.000000,-0.000000 b: 21.0
Sine Target,Recovered: angle 0.000000,0.000000 b: 201.0
Sine Target,Recovered: angle 0.000000,0.000000 b: 2001.0
Sine Target,Recovered: angle 0.000000,-0.000000 b: 20001.0
Sine Target,Recovered: angle -0.500000,-0.500000 b: 2.7
Sine Target,Recovered: angle -0.500000,-0.500000 b: 18.1
Sine Target,Recovered: angle -0.500000,-0.500000 b: 172.4
Sine Target,Recovered: angle -0.500000,-0.500000 b: 1715.3
Sine Target,Recovered: angle -0.500000,-0.500000 b: 17143.9
Sine Target,Recovered: angle -0.707107,-0.707107 b: 2.6
Sine Target,Recovered: angle -0.707107,-0.707107 b: 17.0
Sine Target,Recovered: angle -0.707107,-0.707107 b: 161.0
Sine Target,Recovered: angle -0.707107,-0.707107 b: 1601.0
Sine Target,Recovered: angle -0.707107,-0.707107 b: 16001.0
## WODEN vs expectation
Ok, I've run the the test in `test_calc_measurement_equation.c`, which outputs a text file of visibilities
calculated
```python
import matplotlib.pyplot as plt
from copy import deepcopy
import matplotlib.ticker as mtick
# data = np.loadtxt('../../build/cmake_testing/source_components/measurement_eq_outcomes.txt')
data = np.load('../../test_installation/absolute_accuracy/accuracy_test_outputs.npz')['all_data']
##Known sin outputs for input phi_simples
known_sine_angles = [0.0, 0.5, np.sqrt(2)/2, np.sqrt(3)/2, 1.0, np.sqrt(3)/2, np.sqrt(2)/2, 0.5, 0.0,
-0.5, -np.sqrt(2)/2]
##Known sin outputs for input phi_simples
known_cosine_angles = [1.0, np.sqrt(3)/2, np.sqrt(2)/2, 0.5, 0.0,
-0.5, -np.sqrt(2)/2, -np.sqrt(3)/2, -1.0, -np.sqrt(3)/2, -np.sqrt(2)/2]
known_angles_strings = ["$0.0$", "$\pi/6$", "$\pi/4$", "$\pi/3$",
"$\pi/2$", "$2\pi/3$", "$3\pi/4$", "$5\pi/6$",
"$\pi$", "$7\pi/6$", "$5\pi/4$"]
num_baselines = 5
num_angles = len(known_sine_angles)
fig, axs = plt.subplots(2,2, figsize=(14,14))
markers = ["o", "v", "^", "<", ">", "8",
"s", "p", "P", "*", "h", "H", "+"]
all_re_diffs = np.empty(num_baselines*num_angles)
all_im_diffs = np.empty(num_baselines*num_angles)
num_neg = []
num_pos = []
for angle_ind, known_angle in enumerate(known_angles_strings):
slice_low = angle_ind*num_baselines
slice_high = (angle_ind + 1)*num_baselines
u_lens = data[slice_low:slice_high, 0]
calc_re = data[slice_low:slice_high, 3]
calc_im = data[slice_low:slice_high, 4]
u_lens = np.sqrt(3*u_lens**2)
expec_im = known_sine_angles[angle_ind]
expec_re = known_cosine_angles[angle_ind]
if expec_re == 0:
expec_re_div = 1.0
else:
expec_re_div = expec_re
if expec_im == 0:
expec_im_div = 1.0
else:
expec_im_div = expec_im
abs_diff_re = np.abs((expec_re - calc_re) / expec_re_div)*100.0
abs_diff_im = np.abs((expec_im - calc_im) / expec_im_div)*100.0
for diff in expec_re - calc_re:
if diff < 0:
num_neg.append(diff)
else:
num_pos.append(diff)
for diff in expec_im - calc_im:
if diff < 0:
num_neg.append(diff)
else:
num_pos.append(diff)
all_re_diffs[slice_low:slice_high] = abs_diff_re
all_im_diffs[slice_low:slice_high] = abs_diff_im
axs[0,0].plot(u_lens, abs_diff_re, color=f'C{angle_ind}',
marker=markers[angle_ind], label=known_angle)
axs[0,1].plot(u_lens, abs_diff_im, color=f'C{angle_ind}',
marker=markers[angle_ind], label=known_angle)
expec_comp = expec_re + 1j*expec_im
calc_comp = calc_re + 1j*calc_im
abs_diff_percent = np.abs(1 - np.abs(calc_comp))*100
phase_expec = np.angle(expec_comp)
phase_calc = np.angle(calc_comp)
phase_diff = np.abs(phase_expec - phase_calc)
axs[1,0].plot(u_lens, abs_diff_percent, color=f'C{angle_ind}',
marker=markers[angle_ind], label=known_angle)
axs[1,1].plot(u_lens, phase_diff*(180.0/np.pi), color=f'C{angle_ind}',
marker=markers[angle_ind], label=known_angle)
# print(np.abs(expec_comp))
print(f"Num pos offsets {len(num_pos)}")
print(f"Num neg offsets {len(num_neg)}")
fontsize = 16
for ax in axs.flatten():
ax.set_xscale('log')
ax.set_yscale('log')
ax.tick_params(axis='both', labelsize=fontsize)
axs[1,0].legend(ncol=2)
axs[1,0].set_xlabel('$|\mathrm{u}| \,(\lambda)$', fontsize=fontsize)
axs[1,1].set_xlabel('$|\mathrm{u}| \,(\lambda)$', fontsize=fontsize)
axs[0,0].set_ylabel('Percentage difference from expected', fontsize=fontsize)
axs[0,1].set_ylabel('Percentage difference from expected', fontsize=fontsize)
axs[1,0].set_ylabel('Percentage difference from expected', fontsize=fontsize)
axs[1,1].set_ylabel('Phase offset from expected (degrees)', fontsize=fontsize)
axs[0,0].set_title("Real", fontsize=fontsize)
axs[0,1].set_title("Imaginary", fontsize=fontsize)
axs[1,0].set_title("Absolute", fontsize=fontsize)
axs[1,1].set_title("Phase", fontsize=fontsize)
plt.show()
```
Num pos offsets 67
Num neg offsets 43

```python
from lmfit.models import PowerLawModel,LinearModel
u_lens = data[:,0]
u_range = np.arange(1,1e6)
linear_model = LinearModel(prefix='linear_')
linear_params = linear_model.make_params()
fit_re = linear_model.fit(np.log10(all_re_diffs[all_re_diffs != 0]), linear_params, x=np.log10(u_lens[all_re_diffs != 0]))
intercept_re = fit_re.params['linear_intercept'].value
slope_re = fit_re.params['linear_slope'].value
print(f'Real intercept {intercept_re:.5f} slope {slope_re:.5f}')
best_fit_re = 10**(intercept_re + np.log10(u_range)*slope_re)
fig, axs = plt.subplots(1,2, figsize=(14,7))
axs[0].plot(u_lens, all_re_diffs, 'o', mfc='none')
axs[0].plot(u_range, best_fit_re, 'k--')
fit_im = linear_model.fit(np.log10(all_im_diffs[all_im_diffs != 0]), linear_params, x=np.log10(u_lens[all_im_diffs != 0]))
intercept_im = fit_im.params['linear_intercept'].value
slope_im = fit_im.params['linear_slope'].value
print(f'Imag intercept {intercept_im:.5f} slope {slope_im:.5f}')
best_fit_im = 10**(intercept_im + np.log10(u_range)*slope_im)
axs[1].plot(u_lens, all_im_diffs, 'o', mfc='none')
axs[1].plot(u_range, best_fit_im, 'k--')
for ax in axs:
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel('$|\mathrm{u}| \,(\lambda)$', fontsize=fontsize)
ax.set_ylabel('Percentage difference from expected', fontsize=fontsize)
ax.tick_params(axis='both', labelsize=fontsize)
plt.tight_layout()
# fig.savefig('measure_eq_results.png',bbox_inches='tight')
plt.show()
intercept = (intercept_im + intercept_re) / 2
slope = (slope_im + slope_re) / 2
```
Real intercept -5.81135 slope 0.89464
Imag intercept -5.73264 slope 0.84455

# How do these errors manifest?
Ok, now we have a functional form for the percentage offset, we can make "true" visibilities, and then add this baseline length-dependent error, and see how that effects something like calibration. I had to code up the `YANDAsoft` calibration algorithm for my Shapelet paper, so copy across some functions here to run a calibration.
## Calibration functions
I'm going to set the number of antennas/baselines to the MWA because I like the MWA.
```python
##Some lovely hard coding
NUM_ANTENNAS = 128
NUM_BASELINES = 8128
np.random.seed(983745)
D2R = np.pi/180.0
MWA_LAT = -26.7*D2R
def get_uvw(x_length=None, y_length=None, z_length=None,
dec=MWA_LAT, ha=0.0):
'''Takes the baseline length in X,Y,Z (wavelengths) and calculates the u,v,w
for MWA zenith'''
X = x_length
Y = y_length
Z = z_length
u = np.sin(ha)*X + np.cos(ha)*Y
v = -np.sin(dec)*np.cos(ha)*X + np.sin(dec)*np.sin(ha)*Y + np.cos(dec)*Z
w = np.cos(dec)*np.cos(ha)*X - np.cos(dec)*np.sin(ha)*Y + np.sin(dec)*Z
return u,v,w
def enh2uvw(east, north, height, model_num, latitiude=MWA_LAT):
"""Takes an array layout in e,n,h and returns the u,v,w
for a zenith phase centre at given latitude"""
sl = np.sin(latitiude)
cl = np.cos(latitiude)
X = -north*sl + height*cl
Y = east
Z = north*cl + height*sl
all_us = []
all_vs = []
all_ws = []
for ant1 in np.arange(NUM_ANTENNAS - 1):
for ant2 in np.arange(ant1 + 1, NUM_ANTENNAS):
x_diff = X[ant1] - X[ant2]
y_diff = Y[ant1] - Y[ant2]
z_diff = Z[ant1] - Z[ant2]
u,v,w = get_uvw(x_length=x_diff, y_length=y_diff, z_length=z_diff)
all_us.append(u)
all_vs.append(v)
all_ws.append(w)
return np.array(all_us), np.array(all_vs), np.array(all_ws)
def apply_gains(model, gains):
'''Takes model visibilities and gains and applies (multiples by) the gains to the model'''
updated_model = []
model_ind = 0
for i in np.arange(len(gains)-1):
for j in range(i+1,len(gains)):
updated_model.append(gains[i]*model[model_ind]*np.conjugate(gains[j]))
model_ind += 1
return np.array(updated_model)
def minimise_using_a(visi_data, gains, model):
'''Takes in visibility data, current gain estimates and a sky model.
Using this it creates an 'A' np.array to estimate updates to the current gains.
Returns updated gains'''
##Generate and populate the 'A' np.array
visi_ind = 0
##Set up empty np.array
a_array = np.zeros((2*len(visi_data),2*len(gains)))
for i in range(len(gains)-1):
for j in range(i+1,len(gains)):
a_array[2*visi_ind,2*i] = np.real(visi_data[visi_ind]*np.conjugate(gains[j]))
a_array[2*visi_ind,2*i+1] = -np.imag(visi_data[visi_ind]*np.conjugate(gains[j]))
a_array[2*visi_ind,2*j] = np.real(visi_data[visi_ind]*gains[i])
a_array[2*visi_ind,2*j+1] = np.imag(visi_data[visi_ind]*gains[i])
##Imag part goes in first row
a_array[2*visi_ind+1,2*i] = np.imag(visi_data[visi_ind]*np.conjugate(gains[j]))
a_array[2*visi_ind+1,2*i+1] = np.real(visi_data[visi_ind]*np.conjugate(gains[j]))
a_array[2*visi_ind+1,2*j] = np.imag(visi_data[visi_ind]*gains[i])
a_array[2*visi_ind+1,2*j+1] = -np.real(visi_data[visi_ind]*gains[i])
visi_ind += 1
a_array = np.matrix(a_array)
##Start by giving everything a weight of 1.0
##via an identity matrix
weights = np.identity(a_array.shape[0])
##Find the difference between the real visis and the model
##with the gains applied
diffs = visi_data - apply_gains(model,gains)
##Populate a difference np.array with real and imag parts of the
##the difference between
diff_array = np.zeros((2*len(visi_data),1))
for i in np.arange(len(diffs)):
diff_array[2*i] = np.real(diffs[i])
diff_array[2*i+1] = np.imag(diffs[i])
##The equation used to calucalte the update to the gains is
##x_tilde = inverse(transpose(a_np.array)*weights*a_np.array) * transpose(a_np.array) * weights * diff_np.array
x_tilde = np.linalg.pinv((np.transpose(a_array)*weights)*a_array) * np.transpose(a_array) * weights * diff_array
##Put the updates that we found back into complex form so that we can easily apply to the gains
delta_gains = np.zeros(len(gains),dtype=complex)
for i in np.arange(len(delta_gains)):
delta_gains[i] = complex(x_tilde[2*i],x_tilde[2*i+1])
##TODO Add in a safe-guard against massive updates to avoid death of matrix?
##Choose how much of the updates to apply to the gains
update_step_size = 0.5
return gains + update_step_size*delta_gains
def get_gain(data=None, model=None, num_iterations=10):
'''Loops over all time steps, and runs a 10 iteration calibration'''
##Initial gains guess of one as we know we have simulations
gains = np.ones(128,dtype=complex)
for iter in range(num_iterations):
gains = minimise_using_a(data, gains, model)
# print(f"On iter {iter} in calibration")
return gains
```
## Making array layouts and models
I'm going to make random array layouts, and make visibilities with random tile gain errors. The test will be how well the calibration is able to recover the correct gain for each tile. I'll make two sets of model visibilities to calibrate with. One I'll apply the gains to, and use that as the data to be calibrated. Another, I'll add this baseline-dependent error to, and use that as a model to calibrate to. Below, I'll set up functions to acheive that.
```python
from copy import deepcopy
def make_array(model_num, east_max=1000, north_max=1000, height_max=5):
"""Makes a random array between given east, north, and height limits.
Returns the u,v,w coords for a zenith phase centre for the array.
Plots the east,north and resultant u,v"""
east = np.random.uniform(-east_max, east_max, NUM_ANTENNAS)
north = np.random.uniform(-north_max, north_max, NUM_ANTENNAS)
height = np.random.uniform(-height_max, height_max, NUM_ANTENNAS)
us, vs, ws = enh2uvw(east, north, height, model_num)
fig, axs = plt.subplots(1,2, figsize=(10,6))
axs[0].plot(east, north, 'C0o', mfc='none')
axs[1].plot(us, vs, 'C0.', mfc='none')
axs[0].set_xlabel('East (wavelengths)')
axs[0].set_ylabel('North (wavelengths)')
axs[1].set_xlabel('$u$ (wavelengths)')
axs[1].set_ylabel('$v$ (wavelengths)')
plt.tight_layout()
# fig.savefig(f'plots/array_layout_{model_num:03d}.png',bbox_inches='tight')
plt.show()
return us, vs, ws
def make_gains(phase_err=10, gain_err=0.05):
"""Makes a set of complex gains which have a random uniform
gain error between +/- 5% and a phase error +/- 10 deg"""
phases = np.random.uniform(-phase_err*(np.pi/180.0), phase_err*(np.pi/180.0), NUM_ANTENNAS)
gains = 1 + np.random.uniform(-gain_err, gain_err, NUM_ANTENNAS)
gains = gains*np.exp(1j*phases)
##First gain is reference gain
gains[0] = 1.0 + 0.0j
return gains
print(f"Using slope {slope:.3f} intercept {intercept:.3f}")
def simple_simulate(us, vs, ws, l, m, n, gains):
"""Using the given u,v,w coords, l,m,n coord, and set of gains,
create a 'true' set of visibilities without any errors, and
an 'error' set of visibilities, where we add a """
##The measurement equation
model = np.exp(2j*np.pi*(us*l + vs*m + ws*(n-1)))
##Apply the gains to the 'perfect' visibilities
true_visis = apply_gains(model, gains)
##Lengths of the baselines
u_lengths = np.sqrt(us*us + vs*vs + ws*ws)
##Get the fractional error based on length of baseline
##using the linear fit we made in log space
errors_frac = 10**(np.log10(u_lengths)*slope + intercept)
##About half the errors are negative, half are positive. Make an array
##of half pos, half neg, and apply to fractional errors
signs = np.random.uniform(0.0, 1.0, NUM_BASELINES)
signs[signs < 0.5] = -1.0
signs[signs >= 0.5] = 1.0
##Set the franctional errors as half neg/half pos
errors_frac *= signs
##Make a copy of the model
err_model = deepcopy(model)
##Add fractional errors onto the error models
err_model += errors_frac*err_model
##Apply the true gains to the 'error' model
err_visis = apply_gains(err_model, gains)
return true_visis, gains, model, err_visis, err_model, u_lengths
```
Using slope 0.870 intercept -5.772
For this experiment, I'll try 5 different random array layouts, and generate 25 different sets of visibilities for each array layout, with each visibility having a single point source in a different direction on the sky. I'll run two calibrations for each simulation, one with a "true" model, and one with errors injected.
```python
do_cal = True
number_models = 5
num_iterations = 20
ls = np.array([-0.7, -0.3, 0.0, 0.3, 0.7])
ms = np.array([-0.7, -0.3, 0.0, 0.3, 0.7])
if do_cal:
all_results = np.empty((number_models*len(ls)*len(ms), NUM_ANTENNAS, 3), dtype=complex)
for model_num in range(number_models):
print(f'Doing model {model_num}---------------------------------')
us, vs, ws = make_array(model_num)
gains = make_gains()
for l_ind, l in enumerate(ls):
for m_ind, m in enumerate(ms):
print(f'Doing coord {l:.1f},{m:.1f}')
result = model_num*len(ls)*len(ms) + l_ind*len(ms) + m_ind
##Find n based on l,m
n = np.sqrt(1 - l*l - m*m)
true_visis, true_gains, true_model, err_visis, err_model, u_lengths = simple_simulate(us, vs, ws, l, m, n, gains)
recover_true_gains = get_gain(data=true_visis, model=true_model, num_iterations=num_iterations)
recover_error_gains = get_gain(data=true_visis, model=err_model, num_iterations=num_iterations)
results = np.empty((len(true_gains), 3), dtype=complex)
results[:,0] = true_gains
results[:,1] = recover_true_gains
results[:,2] = recover_error_gains
all_results[result] = results
np.savez_compressed("notebook_gain_error_from_cal_test.npz", results=all_results)
```
Doing model 0---------------------------------

Doing coord -0.7,-0.7
Doing coord -0.7,-0.3
Doing coord -0.7,0.0
Doing coord -0.7,0.3
Doing coord -0.7,0.7
Doing coord -0.3,-0.7
Doing coord -0.3,-0.3
Doing coord -0.3,0.0
Doing coord -0.3,0.3
Doing coord -0.3,0.7
Doing coord 0.0,-0.7
Doing coord 0.0,-0.3
Doing coord 0.0,0.0
Doing coord 0.0,0.3
Doing coord 0.0,0.7
Doing coord 0.3,-0.7
Doing coord 0.3,-0.3
Doing coord 0.3,0.0
Doing coord 0.3,0.3
Doing coord 0.3,0.7
Doing coord 0.7,-0.7
Doing coord 0.7,-0.3
Doing coord 0.7,0.0
Doing coord 0.7,0.3
Doing coord 0.7,0.7
Doing model 1---------------------------------

Doing coord -0.7,-0.7
Doing coord -0.7,-0.3
Doing coord -0.7,0.0
Doing coord -0.7,0.3
Doing coord -0.7,0.7
Doing coord -0.3,-0.7
Doing coord -0.3,-0.3
Doing coord -0.3,0.0
Doing coord -0.3,0.3
Doing coord -0.3,0.7
Doing coord 0.0,-0.7
Doing coord 0.0,-0.3
Doing coord 0.0,0.0
Doing coord 0.0,0.3
Doing coord 0.0,0.7
Doing coord 0.3,-0.7
Doing coord 0.3,-0.3
Doing coord 0.3,0.0
Doing coord 0.3,0.3
Doing coord 0.3,0.7
Doing coord 0.7,-0.7
Doing coord 0.7,-0.3
Doing coord 0.7,0.0
Doing coord 0.7,0.3
Doing coord 0.7,0.7
Doing model 2---------------------------------

Doing coord -0.7,-0.7
Doing coord -0.7,-0.3
Doing coord -0.7,0.0
Doing coord -0.7,0.3
Doing coord -0.7,0.7
Doing coord -0.3,-0.7
Doing coord -0.3,-0.3
Doing coord -0.3,0.0
Doing coord -0.3,0.3
Doing coord -0.3,0.7
Doing coord 0.0,-0.7
Doing coord 0.0,-0.3
Doing coord 0.0,0.0
Doing coord 0.0,0.3
Doing coord 0.0,0.7
Doing coord 0.3,-0.7
Doing coord 0.3,-0.3
Doing coord 0.3,0.0
Doing coord 0.3,0.3
Doing coord 0.3,0.7
Doing coord 0.7,-0.7
Doing coord 0.7,-0.3
Doing coord 0.7,0.0
Doing coord 0.7,0.3
Doing coord 0.7,0.7
Doing model 3---------------------------------

Doing coord -0.7,-0.7
Doing coord -0.7,-0.3
Doing coord -0.7,0.0
Doing coord -0.7,0.3
Doing coord -0.7,0.7
Doing coord -0.3,-0.7
Doing coord -0.3,-0.3
Doing coord -0.3,0.0
Doing coord -0.3,0.3
Doing coord -0.3,0.7
Doing coord 0.0,-0.7
Doing coord 0.0,-0.3
Doing coord 0.0,0.0
Doing coord 0.0,0.3
Doing coord 0.0,0.7
Doing coord 0.3,-0.7
Doing coord 0.3,-0.3
Doing coord 0.3,0.0
Doing coord 0.3,0.3
Doing coord 0.3,0.7
Doing coord 0.7,-0.7
Doing coord 0.7,-0.3
Doing coord 0.7,0.0
Doing coord 0.7,0.3
Doing coord 0.7,0.7
Doing model 4---------------------------------

Doing coord -0.7,-0.7
Doing coord -0.7,-0.3
Doing coord -0.7,0.0
Doing coord -0.7,0.3
Doing coord -0.7,0.7
Doing coord -0.3,-0.7
Doing coord -0.3,-0.3
Doing coord -0.3,0.0
Doing coord -0.3,0.3
Doing coord -0.3,0.7
Doing coord 0.0,-0.7
Doing coord 0.0,-0.3
Doing coord 0.0,0.0
Doing coord 0.0,0.3
Doing coord 0.0,0.7
Doing coord 0.3,-0.7
Doing coord 0.3,-0.3
Doing coord 0.3,0.0
Doing coord 0.3,0.3
Doing coord 0.3,0.7
Doing coord 0.7,-0.7
Doing coord 0.7,-0.3
Doing coord 0.7,0.0
Doing coord 0.7,0.3
Doing coord 0.7,0.7
# Calibration results
I now have 625 calibration runs. Let's start off by just plotting a histogram of all the offsets from the correct gain and phase, for both the "true" and "error" models, for each calibration run. As we go, we'll collect the median and median absolute deviation of the offests from expectation for future plottage.
```python
from scipy.stats import median_abs_deviation as mad
import matplotlib.ticker as ticker
def plot_gain_results(results, axs):
"""Given a set of calibration results, plot the outcomes
on the given set of axes, and collect some mad/med stats"""
##Slice relevant data
true_gains = results[:,0]
recover_true_gains = results[:,1]
recover_error_gains = results[:,2]
##Find the absolute gain offset
gain_offset_true = ((np.abs(recover_true_gains) - np.abs(true_gains)))
gain_offset_error = ((np.abs(recover_error_gains) - np.abs(true_gains)))
##Plot histogram of gains offsets
axs[0,0].hist(gain_offset_true, histtype='step', density=True, lw=1.0)
axs[0,1].hist(gain_offset_error, histtype='step', density=True, lw=1.0)
##Find the phases of the two calibrations in reference to the first tile
phases_true_recover = np.angle(recover_true_gains) - np.angle(recover_true_gains[0])
phases_error_recover = np.angle(recover_error_gains) - np.angle(recover_error_gains[0])
##Find the phase offset from the true gain phase
phase_offset_true = np.angle(true_gains)/D2R - phases_true_recover/D2R
phase_offset_error = np.angle(true_gains)/D2R - phases_error_recover/D2R
##Plot the phase offset histograms
axs[1,0].hist(phase_offset_true, histtype='step', density=True, lw=1.0)
axs[1,1].hist(phase_offset_error,histtype='step', density=True, lw=1.0)
##Find the median and median absolute deviation for
##the gain and phase of both the true and error models
med_gain_true = np.median(gain_offset_true)
mad_gain_true = mad(gain_offset_true)
med_gain_error = np.median(gain_offset_error)
mad_gain_error = mad(gain_offset_error)
med_phase_true = np.median(phase_offset_true)
mad_phase_true = mad(phase_offset_true)
med_phase_error = np.median(phase_offset_error)
mad_phase_error = mad(phase_offset_error)
# print(gain_offset_error.min(), gain_offset_error.max(), mad_gain_error)
return med_gain_true, mad_gain_true, med_gain_error, mad_gain_error, med_phase_true, mad_phase_true, med_phase_error, mad_phase_error
##Load up the calibration results
# results = np.load("gain_error_from_cal_test.npz")["results"]
results = np.load("notebook_gain_error_from_cal_test.npz")["results"]
fig, axs = plt.subplots(2,2, figsize=(12, 12))
##make empty lists and just stick results in because I'm being lazy
all_med_gain_true = []
all_mad_gain_true = []
all_med_gain_error = []
all_mad_gain_error = []
all_med_phase_true = []
all_mad_phase_true = []
all_med_phase_error = []
all_mad_phase_error = []
## For each calibration run, plot some histograms and collect some stats
for result in range(number_models*len(ls)*len(ms)):
# for result in range(5):
med_gain_true, mad_gain_true, med_gain_error, mad_gain_error, med_phase_true, mad_phase_true, med_phase_error, mad_phase_error = plot_gain_results(results[result, :, :], axs)
all_med_gain_true.append(med_gain_true)
all_mad_gain_true.append(mad_gain_true)
all_med_gain_error.append(med_gain_error)
all_mad_gain_error.append(mad_gain_error)
all_med_phase_true.append(med_phase_true)
all_mad_phase_true.append(mad_phase_true)
all_med_phase_error.append(med_phase_error)
all_mad_phase_error.append(mad_phase_error)
for ax in axs.flatten():
ax.tick_params(axis='both', labelsize=fontsize)
# ax.ticklabel_format(useOffset=False)
ax.xaxis.set_major_formatter(ticker.FormatStrFormatter('%.0e'))
axs[0,0].set_title("True Model", fontsize=fontsize)
axs[0,1].set_title("Error Model", fontsize=fontsize)
axs[1,0].set_title("True Model", fontsize=fontsize)
axs[1,1].set_title("Error Model", fontsize=fontsize)
axs[0,0].set_ylabel('Density', fontsize=fontsize)
axs[1,0].set_ylabel('Density', fontsize=fontsize)
axs[0,0].set_xlabel('Gain offset from expected', fontsize=fontsize)
axs[0,1].set_xlabel('Gain offset from expected', fontsize=fontsize)
axs[1,0].set_xlabel('Angle offset from expected (deg)', fontsize=fontsize)
axs[1,1].set_xlabel('Angle offset from expected (deg)', fontsize=fontsize)
for ax in axs.flatten():
ax.axvline(0.0, linestyle='--',alpha=0.5, color='k')
plt.tight_layout()
# fig.savefig('hist_gain-phase_error_from_cal_test.png',bbox_inches='tight')
plt.show()
```

Ok, so there is definitely an increase in the error seen in both gain and phase, and importantly, the distribution of phase offsets looks different for the error model. Let look at the median and median offset for each calibration:
```python
fig, axs = plt.subplots(2,2, figsize=(12, 12))
num_coords = len(ls)*len(ms)
markers = ['o', 's', '^', 'h', 'D']
for ind in range(len(all_med_phase_true)):
m_ind = int(ind // num_coords)
axs[0,0].errorbar(all_med_phase_true[ind], all_med_gain_true[ind],
xerr=all_mad_phase_true[ind], yerr=all_mad_gain_true[ind],
marker=markers[m_ind], mfc='none')
axs[0,1].errorbar(all_med_phase_error[ind], all_med_gain_error[ind],
xerr=all_mad_phase_error[ind], yerr=all_mad_gain_error[ind],
marker=markers[m_ind], mfc='none',alpha=0.4,
mec=f"C{m_ind}", ecolor=f"C{m_ind}")
axs[0,1].plot(all_med_phase_error[ind], all_med_gain_error[ind],
marker=markers[m_ind], mfc='none',
color=f"C{m_ind}")
axs[1,0].errorbar(all_med_phase_error[ind], all_med_gain_error[ind],
xerr=all_mad_phase_error[ind],
marker=markers[m_ind], mfc='none',alpha=0.4,
mec=f"C{m_ind}", ecolor=f"C{m_ind}")
axs[1,0].plot(all_med_phase_error[ind], all_med_gain_error[ind],
marker=markers[m_ind], mfc='none',
color=f"C{m_ind}")
axs[1,1].plot(all_med_phase_error[ind], all_med_gain_error[ind],
marker=markers[m_ind], mfc='none',
color=f"C{m_ind}")
fontsize = 14
axs[0,0].set_ylabel('Gain offset', fontsize=fontsize)
axs[1,0].set_ylabel('Gain offset', fontsize=fontsize)
axs[0,0].set_title('True Model', fontsize=fontsize)
axs[0,1].set_title('Error Model', fontsize=fontsize)
axs[1,0].set_title('Error Model', fontsize=fontsize)
axs[1,1].set_title('Error Model', fontsize=fontsize)
axs[1,0].set_xlabel('Phase offset (deg)', fontsize=fontsize)
axs[1,1].set_xlabel('Phase offset (deg)', fontsize=fontsize)
for ax in axs.flatten():
ax.set_xlim(-0.012, 0.012)
ax.set_ylim(-5e-4, 5e-4)
ax.axvline(0., color='k', linestyle='--', alpha=0.4, zorder=0)
ax.axhline(0., color='k', linestyle='--', alpha=0.4, zorder=0)
ax.tick_params(axis='both', labelsize=fontsize)
axs[1,0].set_ylim(-1e-4, 1e-4)
axs[1,1].set_ylim(-1e-4, 1e-4)
plt.tight_layout()
# fig.savefig("med-mad_error_from_cal_test.png")
plt.show()
print(f"Median magnitude of median phase offset: {np.median(np.abs(all_med_phase_error)):.1e} +/- "
f"{mad(np.abs(all_med_phase_error)):.1e}")
```

Median magnitude of median phase offset: 2.9e-04 +/- 1.5e-04
I think the last printed statement above means that the recovered phases when including errors are biased by around <= 0.01 degrees in some direction
## What about the actual MWA layout?
Ok, let's try this again with the MWA layout
```python
##read in coords from a text file
east, north, height = np.loadtxt('WODEN_array_layout.txt', unpack=True)
##Uh go for 150MHz? Wavelength of about 2m
mwa_us, mwa_vs, mwa_ws = enh2uvw(east/2, north/2, height/2, _)
fig, axs = plt.subplots(1,2, figsize=(10,6))
axs[0].plot(east, north, 'C0o', mfc='none')
axs[1].plot(mwa_us, mwa_vs, 'C0.', mfc='none')
axs[0].set_xlabel('East (wavelengths)')
axs[0].set_ylabel('North (wavelengths)')
axs[1].set_xlabel('$u$ (wavelengths)')
axs[1].set_ylabel('$v$ (wavelengths)')
plt.tight_layout()
plt.show()
```

```python
%%time
number_gains = 5
num_iterations = 20
do_cal = True
if do_cal:
all_results = np.empty((number_gains*len(ls)*len(ms), NUM_ANTENNAS, 3), dtype=complex)
for gain_num in range(number_gains):
print(f'Doing gain set {gain_num}---------------------------------')
gains = make_gains()
for l_ind, l in enumerate(ls):
for m_ind, m in enumerate(ms):
# print(f'Doing coord {l:.1f},{m:.1f}')
result = gain_num*len(ls)*len(ms) + l_ind*len(ms) + m_ind
##Find n based on l,m
n = np.sqrt(1 - l*l - m*m)
true_visis, true_gains, true_model, err_visis, err_model, u_lengths = simple_simulate(mwa_us, mwa_vs, mwa_ws, l, m, n, gains)
recover_true_gains = get_gain(data=true_visis, model=true_model, num_iterations=num_iterations)
recover_error_gains = get_gain(data=true_visis, model=err_model, num_iterations=num_iterations)
results = np.empty((len(true_gains), 3), dtype=complex)
results[:,0] = true_gains
results[:,1] = recover_true_gains
results[:,2] = recover_error_gains
all_results[result] = results
np.savez_compressed("notebook_MWA_gain_errors.npz", results=all_results)
```
Doing gain set 0---------------------------------
Doing gain set 1---------------------------------
Doing gain set 2---------------------------------
Doing gain set 3---------------------------------
Doing gain set 4---------------------------------
CPU times: user 1d 8h 54min 48s, sys: 3h 17min 27s, total: 1d 12h 12min 16s
Wall time: 2h 29min 29s
```python
##Load up the calibration results
results = np.load("notebook_MWA_gain_errors.npz")["results"]
fig, axs = plt.subplots(2,2, figsize=(12, 12))
print(number_models)
##make empty lists and just stick results in because I'm being lazy
all_med_gain_true = []
all_mad_gain_true = []
all_med_gain_error = []
all_mad_gain_error = []
all_med_phase_true = []
all_mad_phase_true = []
all_med_phase_error = []
all_mad_phase_error = []
## For each calibration run, plot some histograms and collect some stats
for result in range(number_gains*len(ls)*len(ms)):
# for result in range(5):
med_gain_true, mad_gain_true, med_gain_error, mad_gain_error, med_phase_true, mad_phase_true, med_phase_error, mad_phase_error = plot_gain_results(results[result, :, :], axs)
all_med_gain_true.append(med_gain_true)
all_mad_gain_true.append(mad_gain_true)
all_med_gain_error.append(med_gain_error)
all_mad_gain_error.append(mad_gain_error)
all_med_phase_true.append(med_phase_true)
all_mad_phase_true.append(mad_phase_true)
all_med_phase_error.append(med_phase_error)
all_mad_phase_error.append(mad_phase_error)
for ax in axs.flatten():
ax.tick_params(axis='both', labelsize=fontsize)
# ax.ticklabel_format(useOffset=False)
ax.xaxis.set_major_formatter(ticker.FormatStrFormatter('%.0e'))
axs[0,0].set_title("True Model", fontsize=fontsize)
axs[0,1].set_title("Error Model", fontsize=fontsize)
axs[1,0].set_title("True Model", fontsize=fontsize)
axs[1,1].set_title("Error Model", fontsize=fontsize)
axs[0,0].set_ylabel('Density', fontsize=fontsize)
axs[1,0].set_ylabel('Density', fontsize=fontsize)
axs[0,0].set_xlabel('Gain offset from expected', fontsize=fontsize)
axs[0,1].set_xlabel('Gain offset from expected', fontsize=fontsize)
axs[1,0].set_xlabel('Angle offset from expected (deg)', fontsize=fontsize)
axs[1,1].set_xlabel('Angle offset from expected (deg)', fontsize=fontsize)
for ax in axs.flatten():
ax.axvline(0.0, linestyle='--',alpha=0.5, color='k')
plt.tight_layout()
# fig.savefig('hist_gain-phase_error_from_cal_test.png',bbox_inches='tight')
plt.show()
```
5

```python
fig, axs = plt.subplots(2,2, figsize=(12, 12))
num_coords = len(ls)*len(ms)
markers = ['o', 's', '^', 'h', 'D']
for ind in range(len(all_med_phase_true)):
m_ind = int(ind // num_coords)
axs[0,0].errorbar(all_med_phase_true[ind], all_med_gain_true[ind],
xerr=all_mad_phase_true[ind], yerr=all_mad_gain_true[ind],
marker=markers[m_ind], mfc='none')
axs[0,1].errorbar(all_med_phase_error[ind], all_med_gain_error[ind],
xerr=all_mad_phase_error[ind], yerr=all_mad_gain_error[ind],
marker=markers[m_ind], mfc='none',alpha=0.4,
mec=f"C{m_ind}", ecolor=f"C{m_ind}")
axs[0,1].plot(all_med_phase_error[ind], all_med_gain_error[ind],
marker=markers[m_ind], mfc='none',
color=f"C{m_ind}")
axs[1,0].errorbar(all_med_phase_error[ind], all_med_gain_error[ind],
xerr=all_mad_phase_error[ind],
marker=markers[m_ind], mfc='none',alpha=0.4,
mec=f"C{m_ind}", ecolor=f"C{m_ind}")
axs[1,0].plot(all_med_phase_error[ind], all_med_gain_error[ind],
marker=markers[m_ind], mfc='none',
color=f"C{m_ind}")
axs[1,1].plot(all_med_phase_error[ind], all_med_gain_error[ind],
marker=markers[m_ind], mfc='none',
color=f"C{m_ind}")
fontsize = 14
axs[0,0].set_ylabel('Gain offset', fontsize=fontsize)
axs[1,0].set_ylabel('Gain offset', fontsize=fontsize)
axs[0,0].set_title('True Model', fontsize=fontsize)
axs[0,1].set_title('Error Model', fontsize=fontsize)
axs[1,0].set_title('Error Model', fontsize=fontsize)
axs[1,1].set_title('Error Model', fontsize=fontsize)
axs[1,0].set_xlabel('Phase offset (deg)', fontsize=fontsize)
axs[1,1].set_xlabel('Phase offset (deg)', fontsize=fontsize)
for ax in axs.flatten():
ax.set_xlim(-0.003, 0.003)
ax.set_ylim(-2e-4, 2e-4)
ax.axvline(0., color='k', linestyle='--', alpha=0.4, zorder=0)
ax.axhline(0., color='k', linestyle='--', alpha=0.4, zorder=0)
ax.tick_params(axis='both', labelsize=fontsize)
axs[1,0].set_ylim(-4e-5, 4e-5)
axs[1,1].set_ylim(-4e-5, 4e-5)
plt.tight_layout()
# fig.savefig("med-mad_error_from_cal_test.png")
plt.show()
print(f"Median magnitude of median phase offset: {np.median(np.abs(all_med_phase_error)):.1e} +/- "
f"{mad(np.abs(all_med_phase_error)):.1e}")
print(f"Median magnitude of median gain offset: {np.median(np.abs(all_med_gain_error)):.1e} +/- "
f"{mad(np.abs(all_med_gain_error)):.1e}")
```

Median magnitude of median phase offset: 1.1e-04 +/- 6.7e-05
Median magnitude of median gain offset: 9.6e-07 +/- 5.5e-07
Overall we see the same pattern of phase being biased slightly, but we see an order of magnitude less than the random array layouts - most likely because the MWA has less long baselines.
# Mulitple sources on the sky
So far I've tested calibrating a single direction on the sky. What if we have multiple directions? Do these offsets somewhat cancel each other out?
```python
%%time
number_gains = 100
num_iterations = 20
do_cal = True
if do_cal:
all_results = np.empty((number_gains, NUM_ANTENNAS, 3), dtype=complex)
for gain_num in range(number_gains):
print(f'Doing gain set {gain_num}---------------------------------')
sum_visis_true = np.zeros(NUM_BASELINES, dtype=complex)
sum_model_true = np.zeros(NUM_BASELINES, dtype=complex)
sum_model_error = np.zeros(NUM_BASELINES, dtype=complex)
gains = make_gains()
for l_ind, l in enumerate(ls):
for m_ind, m in enumerate(ms):
# print(f'Doing coord {l:.1f},{m:.1f}')
result = gain_num*len(ls)*len(ms) + l_ind*len(ms) + m_ind
##Find n based on l,m
n = np.sqrt(1 - l*l - m*m)
true_visis, true_gains, true_model, err_visis, err_model, u_lengths = simple_simulate(mwa_us, mwa_vs, mwa_ws, l, m, n, gains)
sum_visis_true += true_visis
sum_model_true += true_model
sum_model_error += err_model
recover_true_gains = get_gain(data=sum_visis_true, model=sum_model_true, num_iterations=num_iterations)
recover_error_gains = get_gain(data=sum_visis_true, model=sum_model_error, num_iterations=num_iterations)
results = np.empty((len(gains), 3), dtype=complex)
results[:,0] = gains
results[:,1] = recover_true_gains
results[:,2] = recover_error_gains
all_results[gain_num] = results
np.savez_compressed("notebook_MWA_multi-comp_gain_errors.npz", results=all_results)
```
Doing gain set 0---------------------------------
Doing gain set 1---------------------------------
Doing gain set 2---------------------------------
Doing gain set 3---------------------------------
Doing gain set 4---------------------------------
Doing gain set 5---------------------------------
Doing gain set 6---------------------------------
Doing gain set 7---------------------------------
Doing gain set 8---------------------------------
Doing gain set 9---------------------------------
Doing gain set 10---------------------------------
Doing gain set 11---------------------------------
Doing gain set 12---------------------------------
Doing gain set 13---------------------------------
Doing gain set 14---------------------------------
Doing gain set 15---------------------------------
Doing gain set 16---------------------------------
Doing gain set 17---------------------------------
Doing gain set 18---------------------------------
Doing gain set 19---------------------------------
Doing gain set 20---------------------------------
Doing gain set 21---------------------------------
Doing gain set 22---------------------------------
Doing gain set 23---------------------------------
Doing gain set 24---------------------------------
Doing gain set 25---------------------------------
Doing gain set 26---------------------------------
Doing gain set 27---------------------------------
Doing gain set 28---------------------------------
Doing gain set 29---------------------------------
Doing gain set 30---------------------------------
Doing gain set 31---------------------------------
Doing gain set 32---------------------------------
Doing gain set 33---------------------------------
Doing gain set 34---------------------------------
Doing gain set 35---------------------------------
Doing gain set 36---------------------------------
Doing gain set 37---------------------------------
Doing gain set 38---------------------------------
Doing gain set 39---------------------------------
Doing gain set 40---------------------------------
Doing gain set 41---------------------------------
Doing gain set 42---------------------------------
Doing gain set 43---------------------------------
Doing gain set 44---------------------------------
Doing gain set 45---------------------------------
Doing gain set 46---------------------------------
Doing gain set 47---------------------------------
Doing gain set 48---------------------------------
Doing gain set 49---------------------------------
Doing gain set 50---------------------------------
Doing gain set 51---------------------------------
Doing gain set 52---------------------------------
Doing gain set 53---------------------------------
Doing gain set 54---------------------------------
Doing gain set 55---------------------------------
Doing gain set 56---------------------------------
Doing gain set 57---------------------------------
Doing gain set 58---------------------------------
Doing gain set 59---------------------------------
Doing gain set 60---------------------------------
Doing gain set 61---------------------------------
Doing gain set 62---------------------------------
Doing gain set 63---------------------------------
Doing gain set 64---------------------------------
Doing gain set 65---------------------------------
Doing gain set 66---------------------------------
Doing gain set 67---------------------------------
Doing gain set 68---------------------------------
Doing gain set 69---------------------------------
Doing gain set 70---------------------------------
Doing gain set 71---------------------------------
Doing gain set 72---------------------------------
Doing gain set 73---------------------------------
Doing gain set 74---------------------------------
Doing gain set 75---------------------------------
Doing gain set 76---------------------------------
Doing gain set 77---------------------------------
Doing gain set 78---------------------------------
Doing gain set 79---------------------------------
Doing gain set 80---------------------------------
Doing gain set 81---------------------------------
Doing gain set 82---------------------------------
Doing gain set 83---------------------------------
Doing gain set 84---------------------------------
Doing gain set 85---------------------------------
Doing gain set 86---------------------------------
Doing gain set 87---------------------------------
Doing gain set 88---------------------------------
Doing gain set 89---------------------------------
Doing gain set 90---------------------------------
Doing gain set 91---------------------------------
Doing gain set 92---------------------------------
Doing gain set 93---------------------------------
Doing gain set 94---------------------------------
Doing gain set 95---------------------------------
Doing gain set 96---------------------------------
Doing gain set 97---------------------------------
Doing gain set 98---------------------------------
Doing gain set 99---------------------------------
CPU times: user 1d 2h 20min 28s, sys: 2h 38min 40s, total: 1d 4h 59min 9s
Wall time: 2h 45s
```python
##Load up the calibration results
results = np.load("notebook_MWA_multi-comp_gain_errors.npz")["results"]
fig, axs = plt.subplots(2,2, figsize=(12, 12))
print(number_models)
##make empty lists and just stick results in because I'm being lazy
all_med_gain_true = []
all_mad_gain_true = []
all_med_gain_error = []
all_mad_gain_error = []
all_med_phase_true = []
all_mad_phase_true = []
all_med_phase_error = []
all_mad_phase_error = []
## For each calibration run, plot some histograms and collect some stats
for result in range(number_gains):
# for result in range(5):
med_gain_true, mad_gain_true, med_gain_error, mad_gain_error, med_phase_true, mad_phase_true, med_phase_error, mad_phase_error = plot_gain_results(results[result, :, :], axs)
all_med_gain_true.append(med_gain_true)
all_mad_gain_true.append(mad_gain_true)
all_med_gain_error.append(med_gain_error)
all_mad_gain_error.append(mad_gain_error)
all_med_phase_true.append(med_phase_true)
all_mad_phase_true.append(mad_phase_true)
all_med_phase_error.append(med_phase_error)
all_mad_phase_error.append(mad_phase_error)
for ax in axs.flatten():
ax.tick_params(axis='both', labelsize=fontsize)
# ax.ticklabel_format(useOffset=False)
ax.xaxis.set_major_formatter(ticker.FormatStrFormatter('%.0e'))
axs[0,0].set_title("True Model", fontsize=fontsize)
axs[0,1].set_title("Error Model", fontsize=fontsize)
axs[1,0].set_title("True Model", fontsize=fontsize)
axs[1,1].set_title("Error Model", fontsize=fontsize)
axs[0,0].set_ylabel('Density', fontsize=fontsize)
axs[1,0].set_ylabel('Density', fontsize=fontsize)
axs[0,0].set_xlabel('Gain offset from expected', fontsize=fontsize)
axs[0,1].set_xlabel('Gain offset from expected', fontsize=fontsize)
axs[1,0].set_xlabel('Angle offset from expected (deg)', fontsize=fontsize)
axs[1,1].set_xlabel('Angle offset from expected (deg)', fontsize=fontsize)
for ax in axs.flatten():
ax.axvline(0.0, linestyle='--',alpha=0.5, color='k')
plt.tight_layout()
# fig.savefig('hist_gain-phase_error_from_cal_test.png',bbox_inches='tight')
plt.show()
```
5

```python
fig, axs = plt.subplots(2,2, figsize=(12, 12))
# num_coords = len(ls)*len(ms)
# markers = ['o', 's', '^', 'h', 'D']
marker = 'o'
colour1 = 'C0'
colour2 = 'C1'
for ind in range(len(all_med_phase_true)):
m_ind = int(ind // num_coords)
axs[0,0].errorbar(all_med_phase_true[ind], all_med_gain_true[ind],
xerr=all_mad_phase_true[ind], yerr=all_mad_gain_true[ind],
marker=marker, mfc='none',alpha=0.4,
ecolor=colour1, mec=colour2)
axs[0,0].plot(all_med_phase_true[ind], all_med_gain_true[ind],
marker=marker, mfc='none',
color=colour2)
axs[0,1].errorbar(all_med_phase_error[ind], all_med_gain_error[ind],
xerr=all_mad_phase_error[ind], yerr=all_mad_gain_error[ind],
marker=marker, mfc='none',alpha=0.4,
ecolor=colour1, mec=colour2)
axs[0,1].plot(all_med_phase_error[ind], all_med_gain_error[ind],
marker=marker, mfc='none',
color=colour2)
axs[1,0].errorbar(all_med_phase_error[ind], all_med_gain_error[ind],
xerr=all_mad_phase_error[ind],
marker=marker, mfc='none',alpha=0.4,
ecolor=colour1, mec=colour2)
axs[1,0].plot(all_med_phase_error[ind], all_med_gain_error[ind],
marker=marker, mfc='none',
color=colour2)
axs[1,1].plot(all_med_phase_error[ind], all_med_gain_error[ind],
marker=marker, mfc='none',
color=colour2)
fontsize = 14
axs[0,0].set_ylabel('Gain offset', fontsize=fontsize)
axs[1,0].set_ylabel('Gain offset', fontsize=fontsize)
axs[0,0].set_title('True Model', fontsize=fontsize)
axs[0,1].set_title('Error Model', fontsize=fontsize)
axs[1,0].set_title('Error Model', fontsize=fontsize)
axs[1,1].set_title('Error Model', fontsize=fontsize)
axs[1,0].set_xlabel('Phase offset (deg)', fontsize=fontsize)
axs[1,1].set_xlabel('Phase offset (deg)', fontsize=fontsize)
for ax in axs.flatten():
ax.set_xlim(-0.025, 0.025)
ax.set_ylim(-1.5e-4, 1.5e-4)
ax.axvline(0., color='k', linestyle='--', alpha=0.4, zorder=0)
ax.axhline(0., color='k', linestyle='--', alpha=0.4, zorder=0)
ax.tick_params(axis='both', labelsize=fontsize)
axs[1,0].set_ylim(-3e-5, 3e-5)
axs[1,1].set_ylim(-3e-5, 3e-5)
plt.tight_layout()
# fig.savefig("med-mad_error_from_cal_test.png")
plt.show()
print(f"Median magnitude of median phase offset: {np.median(np.abs(all_med_phase_error)):.1e} +/- "
f"{mad(np.abs(all_med_phase_error)):.1e}")
print(f"Median magnitude of median gain offset: {np.median(np.abs(all_med_gain_error)):.1e} +/- "
f"{mad(np.abs(all_med_gain_error)):.1e}")
```

Median magnitude of median phase offset: 7.0e-04 +/- 4.8e-04
Median magnitude of median gain offset: 1.2e-06 +/- 5.6e-07
It actually looks like the errors compound on one another and increase the overall error on the phase estimation of the gains
# Just the measurement equation alone
If we test just the measurement equation function in `WODEN` rather that a full `woden` executable run, we get the following accuracy.
```python
data = np.loadtxt('../../build/cmake_testing/source_components/measurement_eq_outcomes_double.txt')
##Known sin outputs for input phi_simples
known_sine_angles = [0.0, 0.5, np.sqrt(2)/2, np.sqrt(3)/2, 1.0, np.sqrt(3)/2, np.sqrt(2)/2, 0.5, 0.0,
-0.5, -np.sqrt(2)/2]
##Known sin outputs for input phi_simples
known_cosine_angles = [1.0, np.sqrt(3)/2, np.sqrt(2)/2, 0.5, 0.0,
-0.5, -np.sqrt(2)/2, -np.sqrt(3)/2, -1.0, -np.sqrt(3)/2, -np.sqrt(2)/2]
known_angles_strings = ["$0.0$", "$\pi/6$", "$\pi/4$", "$\pi/3$",
"$\pi/2$", "$2\pi/3$", "$3\pi/4$", "$5\pi/6$",
"$\pi$", "$7\pi/6$", "$5\pi/4$"]
num_baselines = 5
num_angles = len(known_sine_angles)
fig, axs = plt.subplots(1,2, figsize=(10,6))
markers = ["o", "v", "^", "<", ">", "8",
"s", "p", "P", "*", "h", "H", "+"]
all_re_diffs = np.empty(num_baselines*num_angles)
all_im_diffs = np.empty(num_baselines*num_angles)
num_neg = []
num_pos = []
for angle_ind, known_angle in enumerate(known_angles_strings):
slice_low = angle_ind*num_baselines
slice_high = (angle_ind + 1)*num_baselines
u_lens = data[slice_low:slice_high, 0]
# expec_re = data[slice_low, 1]
calc_re = data[slice_low:slice_high, 2]
# expec_im = data[slice_low, 3]
calc_im = data[slice_low:slice_high, 4]
# u_lens = np.sqrt(3*u_lens**2)
expec_im = known_sine_angles[angle_ind]
expec_re = known_cosine_angles[angle_ind]
if expec_re == 0:
expec_re_div = 1.0
else:
expec_re_div = expec_re
if expec_im == 0:
expec_im_div = 1.0
else:
expec_im_div = expec_im
abs_diff_re = np.abs((expec_re - calc_re) / expec_re_div)*100.0
abs_diff_im = np.abs((expec_im - calc_im) / expec_im_div)*100.0
for diff in expec_re - calc_re:
if diff < 0:
num_neg.append(diff)
else:
num_pos.append(diff)
for diff in expec_im - calc_im:
if diff < 0:
num_neg.append(diff)
else:
num_pos.append(diff)
all_re_diffs[slice_low:slice_high] = abs_diff_re
all_im_diffs[slice_low:slice_high] = abs_diff_im
axs[0].plot(u_lens, abs_diff_re, color=f'C{angle_ind}',
marker=markers[angle_ind], label=known_angle)
axs[1].plot(u_lens, abs_diff_im, color=f'C{angle_ind}',
marker=markers[angle_ind], label=known_angle)
# expec_comp = expec_re + 1j*expec_im
# calc_comp = calc_re + 1j*calc_im
# abs_diff_percent = np.abs(1 - np.abs(calc_comp))*100
# phase_expec = np.angle(expec_comp)
# phase_calc = np.angle(calc_comp)
# phase_diff = phase_expec - phase_calc
# phase_diff[phase_diff > np.pi] -= 2*np.pi
# axs[1,0].plot(u_lens, abs_diff_percent, color=f'C{angle_ind}',
# marker=markers[angle_ind], label=known_angle)
# axs[1,1].plot(u_lens, phase_diff*(180.0/np.pi), color=f'C{angle_ind}',
# marker=markers[angle_ind], label=known_angle)
# print(np.abs(expec_comp))
print(f"Num pos offsets {len(num_pos)}")
print(f"Num neg offsets {len(num_neg)}")
print(f"Max difference in real {all_re_diffs.max()/100:.1e}")
print(f"Max difference in imag {all_re_diffs.max()/100:.1e}")
fontsize = 16
for ax in axs.flatten():
ax.set_xscale('log')
ax.set_yscale('log')
ax.tick_params(axis='both', labelsize=fontsize)
axs[0].legend(ncol=2)
axs[0].set_xlabel('$|\mathrm{u}| \,(\lambda)$', fontsize=fontsize)
axs[1].set_xlabel('$|\mathrm{u}| \,(\lambda)$', fontsize=fontsize)
axs[0].set_ylabel('Percentage difference from expected', fontsize=fontsize)
axs[1].set_ylabel('Percentage difference from expected', fontsize=fontsize)
axs[0].set_title("Real", fontsize=fontsize)
axs[1].set_title("Imaginary", fontsize=fontsize)
for ax in axs: ax.set_ylim(1e-6, 4e-6)
plt.tight_layout()
plt.show()
```
Num pos offsets 77
Num neg offsets 33
Max difference in real 1.8e-08
Max difference in imag 1.8e-08

```python
u_lens = np.sqrt(3*data[:,0]**2)
u_range = np.arange(1e-1,1e6)
linear_model = LinearModel(prefix='linear_')
linear_params = linear_model.make_params()
fit_re = linear_model.fit(np.log10(all_re_diffs[all_re_diffs != 0]),
linear_params, x=np.log10(u_lens[all_re_diffs != 0]))
intercept = fit_re.params['linear_intercept'].value
slope = fit_re.params['linear_slope'].value
print(f'Real intercept {intercept:.5f} slope {slope:.5f}')
best_fit_re = 10**(intercept + np.log10(u_range)*slope)
fig, axs = plt.subplots(1,2, figsize=(14,7))
axs[0].plot(u_lens, all_re_diffs, 'o', mfc='none')
axs[0].plot(u_range, best_fit_re, 'k--')
fit_im = linear_model.fit(np.log10(all_im_diffs[all_im_diffs != 0]),
linear_params, x=np.log10(u_lens[all_im_diffs != 0]))
intercept = fit_im.params['linear_intercept'].value
slope = fit_im.params['linear_slope'].value
print(f'Imag intercept {intercept:.5f} slope {slope:.5f}')
best_fit_im = 10**(intercept + np.log10(u_range)*slope)
axs[1].plot(u_lens, all_im_diffs, 'o', mfc='none')
axs[1].plot(u_range, best_fit_im, 'k--')
for ax in axs:
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel('$|\mathrm{u}| \,(\lambda)$', fontsize=fontsize)
ax.set_ylabel('Percentage difference from expected', fontsize=fontsize)
ax.tick_params(axis='both', labelsize=fontsize)
plt.tight_layout()
# fig.savefig('measure_eq_results.png',bbox_inches='tight')
plt.show()
```
Real intercept -6.68839 slope 0.12269
Imag intercept -7.49154 slope 0.26107

|
JLBLineREPO_NAMEWODENPATH_START.@WODEN_extracted@WODEN-master@cmake_testing@GPU_code@source_components@check_accuracy_of_WODEN.ipynb@.PATH_END.py
|
{
"filename": "example_rotation.py",
"repo_name": "danielrd6/ifscube",
"repo_path": "ifscube_extracted/ifscube-master/ifscube/examples/example_rotation.py",
"type": "Python"
}
|
import matplotlib.pyplot as plt
import numpy as np
from ifscube import models, rotation
def create_fake_data(plot: bool = False):
y, x = np.indices((100, 100))
m = models.DiskRotation(amplitude=250.0, c_0=3.0, p=1.25, phi_0=np.deg2rad(60.0), theta=np.deg2rad(30.0), v_sys=0.0,
x_0=55, y_0=45)
data = np.random.normal(m(x, y), 5.0)
if plot:
fig = plt.figure()
ax = fig.add_subplot(111)
im = ax.imshow(data, origin='lower', cmap='Spectral_r')
plt.colorbar(im, ax=ax)
plt.show()
return data
def main():
data = create_fake_data(plot=False)
data[0, 0] = np.nan
config = rotation.Config('rotation.ini')
r = rotation.Rotation(input_data=data)
config.model['x_0'] = 55
config.model['y_0'] = 45
config.model['theta'] = np.deg2rad(30)
config.model['phi_0'] = np.deg2rad(60)
r.update_model(config.model)
r.update_bounds(config.bounds)
r.update_fixed(config.fixed)
r.fit_model(maxiter=1000)
r.print_solution()
r.plot_results(contours=False)
if __name__ == '__main__':
main()
|
danielrd6REPO_NAMEifscubePATH_START.@ifscube_extracted@ifscube-master@ifscube@examples@example_rotation.py@.PATH_END.py
|
{
"filename": "mc_publish_summary.py",
"repo_name": "HERA-Team/hera_mc",
"repo_path": "hera_mc_extracted/hera_mc-main/scripts/mc_publish_summary.py",
"type": "Python"
}
|
#! /usr/bin/env python
# -*- mode: python; coding: utf-8 -*-
# Copyright 2017 the HERA Collaboration
# Licensed under the 2-clause BSD license.
"""This publishes a webpage on hera-today.
If not on qmaster, it just writes the html file.
"""
from hera_mc import cm_sysutils, cm_utils, mc
if __name__ == "__main__":
default_hookup_cols = [
"station",
"feed",
"front-end",
"node-bulkhead",
"post-amp",
"snap",
"node",
]
parser = mc.get_mc_argument_parser()
# set values for 'action' to use
parser.add_argument(
"-p", "--hpn", help="Part number, csv-list or [default]", default="default"
)
parser.add_argument(
"-e",
"--exact-match",
help="Force exact matches on part numbers, not beginning N char. [False]",
dest="exact_match",
action="store_true",
)
parser.add_argument(
"--hookup-cols",
help="Specify a subset of parts to show comma-delimited no-space list.",
dest="hookup_cols",
default=default_hookup_cols,
)
args = parser.parse_args()
# Pre-process the args
args.hpn = cm_utils.listify(args.hpn)
args.hookup_cols = cm_utils.listify(args.hookup_cols)
# Start session
db = mc.connect_to_mc_db(args)
with db.sessionmaker() as session:
system = cm_sysutils.Handling(session)
system.publish_summary(
hlist=args.hpn,
exact_match=args.exact_match,
hookup_cols=args.hookup_cols,
)
|
HERA-TeamREPO_NAMEhera_mcPATH_START.@hera_mc_extracted@hera_mc-main@scripts@mc_publish_summary.py@.PATH_END.py
|
{
"filename": "_lineposition.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/table/hoverlabel/font/_lineposition.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LinepositionValidator(_plotly_utils.basevalidators.FlaglistValidator):
def __init__(
self, plotly_name="lineposition", parent_name="table.hoverlabel.font", **kwargs
):
super(LinepositionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
extras=kwargs.pop("extras", ["none"]),
flags=kwargs.pop("flags", ["under", "over", "through"]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@table@hoverlabel@font@_lineposition.py@.PATH_END.py
|
{
"filename": "_yanchor.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattermap/marker/colorbar/_yanchor.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class YanchorValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="yanchor", parent_name="scattermap.marker.colorbar", **kwargs
):
super(YanchorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop("values", ["top", "middle", "bottom"]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scattermap@marker@colorbar@_yanchor.py@.PATH_END.py
|
{
"filename": "BaseCosmology.py",
"repo_name": "igomezv/simplemc_tests",
"repo_path": "simplemc_tests_extracted/simplemc_tests-main/simplemc/cosmo/BaseCosmology.py",
"type": "Python"
}
|
from simplemc.cosmo.paramDefs import h_par, Pr_par, s8_par
from scipy.misc import derivative
import scipy.integrate as intg
from scipy import constants
import scipy as sp
class BaseCosmology:
"""
Base Cosmology class doesn't know about your
parameterization of the equation of state or densities or anything.
However, it does know about Hubble's constant at z=0 OR the prefactor
c/(H0*rd) which should be fit for in the case of "rd agnostic" fits.
That is why you should let it declare those parameters based on its settings
However, to get the angular diameter distance you need to pass it
its Curvature parameter (Omega_k basically), so you need to update it.
Also to use fs8 dataset you need to add s8 parameter.
We use speed of light in km s^-1.
Parameters
----------
h : float
Value of the Hubble parameter h = H/100.
"""
c_ = constants.c/1000.
def __init__(self, h=h_par.value):
self.Curv = 0
self.rd = 149.50
self.h = h
self.prefact = Pr_par.value
self.s8 = s8_par.value
self.varys8 = False
self.varyPrefactor = False
BaseCosmology.updateParams(self, [])
def setCurvature(self, R):
self.Curv = R
def setrd(self, rd):
self.rd = rd
def setVaryPrefactor(self, T=True):
self.varyPrefactor = T
def setPrefactor(self, p):
self.prefact = p
def prefactor(self):
if self.varyPrefactor:
return self.prefact
else:
return self.c_/(self.rd*self.h*100)
def setVarys8(self, T=True):
self.varys8= T
def freeParameters(self):
if (self.varyPrefactor):
Pr_par.setValue(self.prefact)
l = [Pr_par]
else:
h_par.setValue(self.h)
l = [h_par]
if (self.varys8):
s8_par.setValue(self.s8)
l.append(s8_par)
return l
def printFreeParameters(self):
print("Free parameters:")
self.printParameters(self.freeParameters())
def printParameters(self, params):
l = []
for p in params:
print(p.name, '=', p.value, '+/-', p.error)
l.append("{}: {} = +/- {}".format(p.name, p.value, p.error))
return l
def updateParams(self, pars):
"""
Update parameters values.
Parameters
----------
pars : list
List of instance of the Parameter class
"""
for p in pars:
if p.name == "h":
self.h = p.value
elif p.name == "Pr":
self.setPrefactor(p.value)
# h shouldn't matter here.
# We do not want it to enter secondarily through
# say neutrinos, so let's keep it sane.
#
# self.h=p.value*self.rd*100/self.c_
elif p.name == 's8':
self.s8 = p.value
return True
def prior_loglike(self):
return 0
def RHSquared_a(self, a):
"""
This is relative h-squared as a function of the factor scale a
i.e. H(z)^2/H(z=0)^2.
Parameters
----------
a : float
Factor scale.
"""
print("You should not instatiate BaseCosmology")
print("BAD")
return 0
def Hinv_z(self, z):
return 1./sp.sqrt(self.RHSquared_a(1.0/(1+z)))
# @autojit
def DistIntegrand_a(self, a):
return 1./sp.sqrt(self.RHSquared_a(a))/a**2
# @autojit
def Da_z(self, z):
# r=intg.quad(self.Hinv_z,0,z)
# This version seems to be faster.
r = intg.quad(self.DistIntegrand_a, 1./(1+z), 1)
r = r[0] # assume precision is ok
if self.Curv == 0:
return r
elif (self.Curv > 0):
q = sp.sqrt(self.Curv)
# someone check this eq
# Pure ADD has a 1+z fact, but have
# comoving one.
return sp.sinh(r*q)/(q)
else:
q = sp.sqrt(-self.Curv)
return sp.sin(r*q)/(q)
# Angular distance.
def AD_z(self, z):
return self.Da_z(z)*self.c_/(self.h*100)/(1+z)
# D_a / rd
def DaOverrd(self, z):
return self.prefactor()*self.Da_z(z)
# H^{-1} / rd
def HIOverrd(self, z):
return self.prefactor()*self.Hinv_z(z)
# Dv / rd
def DVOverrd(self, z):
return self.prefactor()*(self.Da_z(z)**(2./3.)*(z*self.Hinv_z(z))**(1./3.))
# Distance modulus.
def distance_modulus(self, z):
# I think this should also work with varyPrefactor as long as BAO is there too
# assert(not self.varyPrefactor)
# Note that our Da_z is comoving, so we're only
# multilpyting with a single (1+z) factor.
return 5*sp.log10(self.Da_z(z)*(1+z))
# Returns the growth factor as a function of redshift.
def GrowthIntegrand_a(self, a):
return 1./(self.RHSquared_a(a)*a*a)**(1.5)
def growth(self, z):
# Equation 7.77 from Doddie
af = 1/(1.+z)
r = intg.quad(self.GrowthIntegrand_a, 1e-7, af)
gr = sp.sqrt(self.RHSquared_a(af))*r[0] # assuming precision is ok
# If we have Omega_m, let's normalize that way.
if hasattr(self, "Om"):
gr *= 5/2.*self.Om
return gr
def fs8(self, z):
# The growth factor.
return -self.s8*(1+z)*derivative(self.growth, z, dx=1e-6)/self.growth(0)
def compuAge(self, z):
return 1.0/((1+z)*100.0*self.h*sp.sqrt(self.RHSquared_a(1.0/(1+z))))
def Age(self):
# Age of the Universe.
return intg.quad(self.compuAge, 0, 10**5)[0]/3.24076E-20/(3.154E7*1.0E9)
|
igomezvREPO_NAMEsimplemc_testsPATH_START.@simplemc_tests_extracted@simplemc_tests-main@simplemc@cosmo@BaseCosmology.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/treemap/root/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._color import ColorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__, [], ["._color.ColorValidator"]
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@treemap@root@__init__.py@.PATH_END.py
|
{
"filename": "more.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/setuptools/py3/pkg_resources/_vendor/more_itertools/more.py",
"type": "Python"
}
|
import warnings
from collections import Counter, defaultdict, deque, abc
from collections.abc import Sequence
from functools import cached_property, partial, reduce, wraps
from heapq import heapify, heapreplace, heappop
from itertools import (
chain,
compress,
count,
cycle,
dropwhile,
groupby,
islice,
repeat,
starmap,
takewhile,
tee,
zip_longest,
product,
)
from math import exp, factorial, floor, log, perm, comb
from queue import Empty, Queue
from random import random, randrange, uniform
from operator import itemgetter, mul, sub, gt, lt, ge, le
from sys import hexversion, maxsize
from time import monotonic
from .recipes import (
_marker,
_zip_equal,
UnequalIterablesError,
consume,
flatten,
pairwise,
powerset,
take,
unique_everseen,
all_equal,
batched,
)
__all__ = [
'AbortThread',
'SequenceView',
'UnequalIterablesError',
'adjacent',
'all_unique',
'always_iterable',
'always_reversible',
'bucket',
'callback_iter',
'chunked',
'chunked_even',
'circular_shifts',
'collapse',
'combination_index',
'combination_with_replacement_index',
'consecutive_groups',
'constrained_batches',
'consumer',
'count_cycle',
'countable',
'difference',
'distinct_combinations',
'distinct_permutations',
'distribute',
'divide',
'duplicates_everseen',
'duplicates_justseen',
'classify_unique',
'exactly_n',
'filter_except',
'filter_map',
'first',
'gray_product',
'groupby_transform',
'ichunked',
'iequals',
'ilen',
'interleave',
'interleave_evenly',
'interleave_longest',
'intersperse',
'is_sorted',
'islice_extended',
'iterate',
'iter_suppress',
'last',
'locate',
'longest_common_prefix',
'lstrip',
'make_decorator',
'map_except',
'map_if',
'map_reduce',
'mark_ends',
'minmax',
'nth_or_last',
'nth_permutation',
'nth_product',
'nth_combination_with_replacement',
'numeric_range',
'one',
'only',
'outer_product',
'padded',
'partial_product',
'partitions',
'peekable',
'permutation_index',
'product_index',
'raise_',
'repeat_each',
'repeat_last',
'replace',
'rlocate',
'rstrip',
'run_length',
'sample',
'seekable',
'set_partitions',
'side_effect',
'sliced',
'sort_together',
'split_after',
'split_at',
'split_before',
'split_into',
'split_when',
'spy',
'stagger',
'strip',
'strictly_n',
'substrings',
'substrings_indexes',
'takewhile_inclusive',
'time_limited',
'unique_in_window',
'unique_to_each',
'unzip',
'value_chain',
'windowed',
'windowed_complete',
'with_iter',
'zip_broadcast',
'zip_equal',
'zip_offset',
]
def chunked(iterable, n, strict=False):
"""Break *iterable* into lists of length *n*:
>>> list(chunked([1, 2, 3, 4, 5, 6], 3))
[[1, 2, 3], [4, 5, 6]]
By the default, the last yielded list will have fewer than *n* elements
if the length of *iterable* is not divisible by *n*:
>>> list(chunked([1, 2, 3, 4, 5, 6, 7, 8], 3))
[[1, 2, 3], [4, 5, 6], [7, 8]]
To use a fill-in value instead, see the :func:`grouper` recipe.
If the length of *iterable* is not divisible by *n* and *strict* is
``True``, then ``ValueError`` will be raised before the last
list is yielded.
"""
iterator = iter(partial(take, n, iter(iterable)), [])
if strict:
if n is None:
raise ValueError('n must not be None when using strict mode.')
def ret():
for chunk in iterator:
if len(chunk) != n:
raise ValueError('iterable is not divisible by n.')
yield chunk
return iter(ret())
else:
return iterator
def first(iterable, default=_marker):
"""Return the first item of *iterable*, or *default* if *iterable* is
empty.
>>> first([0, 1, 2, 3])
0
>>> first([], 'some default')
'some default'
If *default* is not provided and there are no items in the iterable,
raise ``ValueError``.
:func:`first` is useful when you have a generator of expensive-to-retrieve
values and want any arbitrary one. It is marginally shorter than
``next(iter(iterable), default)``.
"""
for item in iterable:
return item
if default is _marker:
raise ValueError(
'first() was called on an empty iterable, and no '
'default value was provided.'
)
return default
def last(iterable, default=_marker):
"""Return the last item of *iterable*, or *default* if *iterable* is
empty.
>>> last([0, 1, 2, 3])
3
>>> last([], 'some default')
'some default'
If *default* is not provided and there are no items in the iterable,
raise ``ValueError``.
"""
try:
if isinstance(iterable, Sequence):
return iterable[-1]
# Work around https://bugs.python.org/issue38525
elif hasattr(iterable, '__reversed__') and (hexversion != 0x030800F0):
return next(reversed(iterable))
else:
return deque(iterable, maxlen=1)[-1]
except (IndexError, TypeError, StopIteration):
if default is _marker:
raise ValueError(
'last() was called on an empty iterable, and no default was '
'provided.'
)
return default
def nth_or_last(iterable, n, default=_marker):
"""Return the nth or the last item of *iterable*,
or *default* if *iterable* is empty.
>>> nth_or_last([0, 1, 2, 3], 2)
2
>>> nth_or_last([0, 1], 2)
1
>>> nth_or_last([], 0, 'some default')
'some default'
If *default* is not provided and there are no items in the iterable,
raise ``ValueError``.
"""
return last(islice(iterable, n + 1), default=default)
class peekable:
"""Wrap an iterator to allow lookahead and prepending elements.
Call :meth:`peek` on the result to get the value that will be returned
by :func:`next`. This won't advance the iterator:
>>> p = peekable(['a', 'b'])
>>> p.peek()
'a'
>>> next(p)
'a'
Pass :meth:`peek` a default value to return that instead of raising
``StopIteration`` when the iterator is exhausted.
>>> p = peekable([])
>>> p.peek('hi')
'hi'
peekables also offer a :meth:`prepend` method, which "inserts" items
at the head of the iterable:
>>> p = peekable([1, 2, 3])
>>> p.prepend(10, 11, 12)
>>> next(p)
10
>>> p.peek()
11
>>> list(p)
[11, 12, 1, 2, 3]
peekables can be indexed. Index 0 is the item that will be returned by
:func:`next`, index 1 is the item after that, and so on:
The values up to the given index will be cached.
>>> p = peekable(['a', 'b', 'c', 'd'])
>>> p[0]
'a'
>>> p[1]
'b'
>>> next(p)
'a'
Negative indexes are supported, but be aware that they will cache the
remaining items in the source iterator, which may require significant
storage.
To check whether a peekable is exhausted, check its truth value:
>>> p = peekable(['a', 'b'])
>>> if p: # peekable has items
... list(p)
['a', 'b']
>>> if not p: # peekable is exhausted
... list(p)
[]
"""
def __init__(self, iterable):
self._it = iter(iterable)
self._cache = deque()
def __iter__(self):
return self
def __bool__(self):
try:
self.peek()
except StopIteration:
return False
return True
def peek(self, default=_marker):
"""Return the item that will be next returned from ``next()``.
Return ``default`` if there are no items left. If ``default`` is not
provided, raise ``StopIteration``.
"""
if not self._cache:
try:
self._cache.append(next(self._it))
except StopIteration:
if default is _marker:
raise
return default
return self._cache[0]
def prepend(self, *items):
"""Stack up items to be the next ones returned from ``next()`` or
``self.peek()``. The items will be returned in
first in, first out order::
>>> p = peekable([1, 2, 3])
>>> p.prepend(10, 11, 12)
>>> next(p)
10
>>> list(p)
[11, 12, 1, 2, 3]
It is possible, by prepending items, to "resurrect" a peekable that
previously raised ``StopIteration``.
>>> p = peekable([])
>>> next(p)
Traceback (most recent call last):
...
StopIteration
>>> p.prepend(1)
>>> next(p)
1
>>> next(p)
Traceback (most recent call last):
...
StopIteration
"""
self._cache.extendleft(reversed(items))
def __next__(self):
if self._cache:
return self._cache.popleft()
return next(self._it)
def _get_slice(self, index):
# Normalize the slice's arguments
step = 1 if (index.step is None) else index.step
if step > 0:
start = 0 if (index.start is None) else index.start
stop = maxsize if (index.stop is None) else index.stop
elif step < 0:
start = -1 if (index.start is None) else index.start
stop = (-maxsize - 1) if (index.stop is None) else index.stop
else:
raise ValueError('slice step cannot be zero')
# If either the start or stop index is negative, we'll need to cache
# the rest of the iterable in order to slice from the right side.
if (start < 0) or (stop < 0):
self._cache.extend(self._it)
# Otherwise we'll need to find the rightmost index and cache to that
# point.
else:
n = min(max(start, stop) + 1, maxsize)
cache_len = len(self._cache)
if n >= cache_len:
self._cache.extend(islice(self._it, n - cache_len))
return list(self._cache)[index]
def __getitem__(self, index):
if isinstance(index, slice):
return self._get_slice(index)
cache_len = len(self._cache)
if index < 0:
self._cache.extend(self._it)
elif index >= cache_len:
self._cache.extend(islice(self._it, index + 1 - cache_len))
return self._cache[index]
def consumer(func):
"""Decorator that automatically advances a PEP-342-style "reverse iterator"
to its first yield point so you don't have to call ``next()`` on it
manually.
>>> @consumer
... def tally():
... i = 0
... while True:
... print('Thing number %s is %s.' % (i, (yield)))
... i += 1
...
>>> t = tally()
>>> t.send('red')
Thing number 0 is red.
>>> t.send('fish')
Thing number 1 is fish.
Without the decorator, you would have to call ``next(t)`` before
``t.send()`` could be used.
"""
@wraps(func)
def wrapper(*args, **kwargs):
gen = func(*args, **kwargs)
next(gen)
return gen
return wrapper
def ilen(iterable):
"""Return the number of items in *iterable*.
>>> ilen(x for x in range(1000000) if x % 3 == 0)
333334
This consumes the iterable, so handle with care.
"""
# This approach was selected because benchmarks showed it's likely the
# fastest of the known implementations at the time of writing.
# See GitHub tracker: #236, #230.
counter = count()
deque(zip(iterable, counter), maxlen=0)
return next(counter)
def iterate(func, start):
"""Return ``start``, ``func(start)``, ``func(func(start))``, ...
>>> from itertools import islice
>>> list(islice(iterate(lambda x: 2*x, 1), 10))
[1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
"""
while True:
yield start
try:
start = func(start)
except StopIteration:
break
def with_iter(context_manager):
"""Wrap an iterable in a ``with`` statement, so it closes once exhausted.
For example, this will close the file when the iterator is exhausted::
upper_lines = (line.upper() for line in with_iter(open('foo')))
Any context manager which returns an iterable is a candidate for
``with_iter``.
"""
with context_manager as iterable:
yield from iterable
def one(iterable, too_short=None, too_long=None):
"""Return the first item from *iterable*, which is expected to contain only
that item. Raise an exception if *iterable* is empty or has more than one
item.
:func:`one` is useful for ensuring that an iterable contains only one item.
For example, it can be used to retrieve the result of a database query
that is expected to return a single row.
If *iterable* is empty, ``ValueError`` will be raised. You may specify a
different exception with the *too_short* keyword:
>>> it = []
>>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: too many items in iterable (expected 1)'
>>> too_short = IndexError('too few items')
>>> one(it, too_short=too_short) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
IndexError: too few items
Similarly, if *iterable* contains more than one item, ``ValueError`` will
be raised. You may specify a different exception with the *too_long*
keyword:
>>> it = ['too', 'many']
>>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: Expected exactly one item in iterable, but got 'too',
'many', and perhaps more.
>>> too_long = RuntimeError
>>> one(it, too_long=too_long) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
RuntimeError
Note that :func:`one` attempts to advance *iterable* twice to ensure there
is only one item. See :func:`spy` or :func:`peekable` to check iterable
contents less destructively.
"""
it = iter(iterable)
try:
first_value = next(it)
except StopIteration as e:
raise (
too_short or ValueError('too few items in iterable (expected 1)')
) from e
try:
second_value = next(it)
except StopIteration:
pass
else:
msg = (
'Expected exactly one item in iterable, but got {!r}, {!r}, '
'and perhaps more.'.format(first_value, second_value)
)
raise too_long or ValueError(msg)
return first_value
def raise_(exception, *args):
raise exception(*args)
def strictly_n(iterable, n, too_short=None, too_long=None):
"""Validate that *iterable* has exactly *n* items and return them if
it does. If it has fewer than *n* items, call function *too_short*
with those items. If it has more than *n* items, call function
*too_long* with the first ``n + 1`` items.
>>> iterable = ['a', 'b', 'c', 'd']
>>> n = 4
>>> list(strictly_n(iterable, n))
['a', 'b', 'c', 'd']
Note that the returned iterable must be consumed in order for the check to
be made.
By default, *too_short* and *too_long* are functions that raise
``ValueError``.
>>> list(strictly_n('ab', 3)) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: too few items in iterable (got 2)
>>> list(strictly_n('abc', 2)) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: too many items in iterable (got at least 3)
You can instead supply functions that do something else.
*too_short* will be called with the number of items in *iterable*.
*too_long* will be called with `n + 1`.
>>> def too_short(item_count):
... raise RuntimeError
>>> it = strictly_n('abcd', 6, too_short=too_short)
>>> list(it) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
RuntimeError
>>> def too_long(item_count):
... print('The boss is going to hear about this')
>>> it = strictly_n('abcdef', 4, too_long=too_long)
>>> list(it)
The boss is going to hear about this
['a', 'b', 'c', 'd']
"""
if too_short is None:
too_short = lambda item_count: raise_(
ValueError,
'Too few items in iterable (got {})'.format(item_count),
)
if too_long is None:
too_long = lambda item_count: raise_(
ValueError,
'Too many items in iterable (got at least {})'.format(item_count),
)
it = iter(iterable)
for i in range(n):
try:
item = next(it)
except StopIteration:
too_short(i)
return
else:
yield item
try:
next(it)
except StopIteration:
pass
else:
too_long(n + 1)
def distinct_permutations(iterable, r=None):
"""Yield successive distinct permutations of the elements in *iterable*.
>>> sorted(distinct_permutations([1, 0, 1]))
[(0, 1, 1), (1, 0, 1), (1, 1, 0)]
Equivalent to ``set(permutations(iterable))``, except duplicates are not
generated and thrown away. For larger input sequences this is much more
efficient.
Duplicate permutations arise when there are duplicated elements in the
input iterable. The number of items returned is
`n! / (x_1! * x_2! * ... * x_n!)`, where `n` is the total number of
items input, and each `x_i` is the count of a distinct item in the input
sequence.
If *r* is given, only the *r*-length permutations are yielded.
>>> sorted(distinct_permutations([1, 0, 1], r=2))
[(0, 1), (1, 0), (1, 1)]
>>> sorted(distinct_permutations(range(3), r=2))
[(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)]
"""
# Algorithm: https://w.wiki/Qai
def _full(A):
while True:
# Yield the permutation we have
yield tuple(A)
# Find the largest index i such that A[i] < A[i + 1]
for i in range(size - 2, -1, -1):
if A[i] < A[i + 1]:
break
# If no such index exists, this permutation is the last one
else:
return
# Find the largest index j greater than j such that A[i] < A[j]
for j in range(size - 1, i, -1):
if A[i] < A[j]:
break
# Swap the value of A[i] with that of A[j], then reverse the
# sequence from A[i + 1] to form the new permutation
A[i], A[j] = A[j], A[i]
A[i + 1 :] = A[: i - size : -1] # A[i + 1:][::-1]
# Algorithm: modified from the above
def _partial(A, r):
# Split A into the first r items and the last r items
head, tail = A[:r], A[r:]
right_head_indexes = range(r - 1, -1, -1)
left_tail_indexes = range(len(tail))
while True:
# Yield the permutation we have
yield tuple(head)
# Starting from the right, find the first index of the head with
# value smaller than the maximum value of the tail - call it i.
pivot = tail[-1]
for i in right_head_indexes:
if head[i] < pivot:
break
pivot = head[i]
else:
return
# Starting from the left, find the first value of the tail
# with a value greater than head[i] and swap.
for j in left_tail_indexes:
if tail[j] > head[i]:
head[i], tail[j] = tail[j], head[i]
break
# If we didn't find one, start from the right and find the first
# index of the head with a value greater than head[i] and swap.
else:
for j in right_head_indexes:
if head[j] > head[i]:
head[i], head[j] = head[j], head[i]
break
# Reverse head[i + 1:] and swap it with tail[:r - (i + 1)]
tail += head[: i - r : -1] # head[i + 1:][::-1]
i += 1
head[i:], tail[:] = tail[: r - i], tail[r - i :]
items = sorted(iterable)
size = len(items)
if r is None:
r = size
if 0 < r <= size:
return _full(items) if (r == size) else _partial(items, r)
return iter(() if r else ((),))
def intersperse(e, iterable, n=1):
"""Intersperse filler element *e* among the items in *iterable*, leaving
*n* items between each filler element.
>>> list(intersperse('!', [1, 2, 3, 4, 5]))
[1, '!', 2, '!', 3, '!', 4, '!', 5]
>>> list(intersperse(None, [1, 2, 3, 4, 5], n=2))
[1, 2, None, 3, 4, None, 5]
"""
if n == 0:
raise ValueError('n must be > 0')
elif n == 1:
# interleave(repeat(e), iterable) -> e, x_0, e, x_1, e, x_2...
# islice(..., 1, None) -> x_0, e, x_1, e, x_2...
return islice(interleave(repeat(e), iterable), 1, None)
else:
# interleave(filler, chunks) -> [e], [x_0, x_1], [e], [x_2, x_3]...
# islice(..., 1, None) -> [x_0, x_1], [e], [x_2, x_3]...
# flatten(...) -> x_0, x_1, e, x_2, x_3...
filler = repeat([e])
chunks = chunked(iterable, n)
return flatten(islice(interleave(filler, chunks), 1, None))
def unique_to_each(*iterables):
"""Return the elements from each of the input iterables that aren't in the
other input iterables.
For example, suppose you have a set of packages, each with a set of
dependencies::
{'pkg_1': {'A', 'B'}, 'pkg_2': {'B', 'C'}, 'pkg_3': {'B', 'D'}}
If you remove one package, which dependencies can also be removed?
If ``pkg_1`` is removed, then ``A`` is no longer necessary - it is not
associated with ``pkg_2`` or ``pkg_3``. Similarly, ``C`` is only needed for
``pkg_2``, and ``D`` is only needed for ``pkg_3``::
>>> unique_to_each({'A', 'B'}, {'B', 'C'}, {'B', 'D'})
[['A'], ['C'], ['D']]
If there are duplicates in one input iterable that aren't in the others
they will be duplicated in the output. Input order is preserved::
>>> unique_to_each("mississippi", "missouri")
[['p', 'p'], ['o', 'u', 'r']]
It is assumed that the elements of each iterable are hashable.
"""
pool = [list(it) for it in iterables]
counts = Counter(chain.from_iterable(map(set, pool)))
uniques = {element for element in counts if counts[element] == 1}
return [list(filter(uniques.__contains__, it)) for it in pool]
def windowed(seq, n, fillvalue=None, step=1):
"""Return a sliding window of width *n* over the given iterable.
>>> all_windows = windowed([1, 2, 3, 4, 5], 3)
>>> list(all_windows)
[(1, 2, 3), (2, 3, 4), (3, 4, 5)]
When the window is larger than the iterable, *fillvalue* is used in place
of missing values:
>>> list(windowed([1, 2, 3], 4))
[(1, 2, 3, None)]
Each window will advance in increments of *step*:
>>> list(windowed([1, 2, 3, 4, 5, 6], 3, fillvalue='!', step=2))
[(1, 2, 3), (3, 4, 5), (5, 6, '!')]
To slide into the iterable's items, use :func:`chain` to add filler items
to the left:
>>> iterable = [1, 2, 3, 4]
>>> n = 3
>>> padding = [None] * (n - 1)
>>> list(windowed(chain(padding, iterable), 3))
[(None, None, 1), (None, 1, 2), (1, 2, 3), (2, 3, 4)]
"""
if n < 0:
raise ValueError('n must be >= 0')
if n == 0:
yield tuple()
return
if step < 1:
raise ValueError('step must be >= 1')
window = deque(maxlen=n)
i = n
for _ in map(window.append, seq):
i -= 1
if not i:
i = step
yield tuple(window)
size = len(window)
if size == 0:
return
elif size < n:
yield tuple(chain(window, repeat(fillvalue, n - size)))
elif 0 < i < min(step, n):
window += (fillvalue,) * i
yield tuple(window)
def substrings(iterable):
"""Yield all of the substrings of *iterable*.
>>> [''.join(s) for s in substrings('more')]
['m', 'o', 'r', 'e', 'mo', 'or', 're', 'mor', 'ore', 'more']
Note that non-string iterables can also be subdivided.
>>> list(substrings([0, 1, 2]))
[(0,), (1,), (2,), (0, 1), (1, 2), (0, 1, 2)]
"""
# The length-1 substrings
seq = []
for item in iter(iterable):
seq.append(item)
yield (item,)
seq = tuple(seq)
item_count = len(seq)
# And the rest
for n in range(2, item_count + 1):
for i in range(item_count - n + 1):
yield seq[i : i + n]
def substrings_indexes(seq, reverse=False):
"""Yield all substrings and their positions in *seq*
The items yielded will be a tuple of the form ``(substr, i, j)``, where
``substr == seq[i:j]``.
This function only works for iterables that support slicing, such as
``str`` objects.
>>> for item in substrings_indexes('more'):
... print(item)
('m', 0, 1)
('o', 1, 2)
('r', 2, 3)
('e', 3, 4)
('mo', 0, 2)
('or', 1, 3)
('re', 2, 4)
('mor', 0, 3)
('ore', 1, 4)
('more', 0, 4)
Set *reverse* to ``True`` to yield the same items in the opposite order.
"""
r = range(1, len(seq) + 1)
if reverse:
r = reversed(r)
return (
(seq[i : i + L], i, i + L) for L in r for i in range(len(seq) - L + 1)
)
class bucket:
"""Wrap *iterable* and return an object that buckets the iterable into
child iterables based on a *key* function.
>>> iterable = ['a1', 'b1', 'c1', 'a2', 'b2', 'c2', 'b3']
>>> s = bucket(iterable, key=lambda x: x[0]) # Bucket by 1st character
>>> sorted(list(s)) # Get the keys
['a', 'b', 'c']
>>> a_iterable = s['a']
>>> next(a_iterable)
'a1'
>>> next(a_iterable)
'a2'
>>> list(s['b'])
['b1', 'b2', 'b3']
The original iterable will be advanced and its items will be cached until
they are used by the child iterables. This may require significant storage.
By default, attempting to select a bucket to which no items belong will
exhaust the iterable and cache all values.
If you specify a *validator* function, selected buckets will instead be
checked against it.
>>> from itertools import count
>>> it = count(1, 2) # Infinite sequence of odd numbers
>>> key = lambda x: x % 10 # Bucket by last digit
>>> validator = lambda x: x in {1, 3, 5, 7, 9} # Odd digits only
>>> s = bucket(it, key=key, validator=validator)
>>> 2 in s
False
>>> list(s[2])
[]
"""
def __init__(self, iterable, key, validator=None):
self._it = iter(iterable)
self._key = key
self._cache = defaultdict(deque)
self._validator = validator or (lambda x: True)
def __contains__(self, value):
if not self._validator(value):
return False
try:
item = next(self[value])
except StopIteration:
return False
else:
self._cache[value].appendleft(item)
return True
def _get_values(self, value):
"""
Helper to yield items from the parent iterator that match *value*.
Items that don't match are stored in the local cache as they
are encountered.
"""
while True:
# If we've cached some items that match the target value, emit
# the first one and evict it from the cache.
if self._cache[value]:
yield self._cache[value].popleft()
# Otherwise we need to advance the parent iterator to search for
# a matching item, caching the rest.
else:
while True:
try:
item = next(self._it)
except StopIteration:
return
item_value = self._key(item)
if item_value == value:
yield item
break
elif self._validator(item_value):
self._cache[item_value].append(item)
def __iter__(self):
for item in self._it:
item_value = self._key(item)
if self._validator(item_value):
self._cache[item_value].append(item)
yield from self._cache.keys()
def __getitem__(self, value):
if not self._validator(value):
return iter(())
return self._get_values(value)
def spy(iterable, n=1):
"""Return a 2-tuple with a list containing the first *n* elements of
*iterable*, and an iterator with the same items as *iterable*.
This allows you to "look ahead" at the items in the iterable without
advancing it.
There is one item in the list by default:
>>> iterable = 'abcdefg'
>>> head, iterable = spy(iterable)
>>> head
['a']
>>> list(iterable)
['a', 'b', 'c', 'd', 'e', 'f', 'g']
You may use unpacking to retrieve items instead of lists:
>>> (head,), iterable = spy('abcdefg')
>>> head
'a'
>>> (first, second), iterable = spy('abcdefg', 2)
>>> first
'a'
>>> second
'b'
The number of items requested can be larger than the number of items in
the iterable:
>>> iterable = [1, 2, 3, 4, 5]
>>> head, iterable = spy(iterable, 10)
>>> head
[1, 2, 3, 4, 5]
>>> list(iterable)
[1, 2, 3, 4, 5]
"""
it = iter(iterable)
head = take(n, it)
return head.copy(), chain(head, it)
def interleave(*iterables):
"""Return a new iterable yielding from each iterable in turn,
until the shortest is exhausted.
>>> list(interleave([1, 2, 3], [4, 5], [6, 7, 8]))
[1, 4, 6, 2, 5, 7]
For a version that doesn't terminate after the shortest iterable is
exhausted, see :func:`interleave_longest`.
"""
return chain.from_iterable(zip(*iterables))
def interleave_longest(*iterables):
"""Return a new iterable yielding from each iterable in turn,
skipping any that are exhausted.
>>> list(interleave_longest([1, 2, 3], [4, 5], [6, 7, 8]))
[1, 4, 6, 2, 5, 7, 3, 8]
This function produces the same output as :func:`roundrobin`, but may
perform better for some inputs (in particular when the number of iterables
is large).
"""
i = chain.from_iterable(zip_longest(*iterables, fillvalue=_marker))
return (x for x in i if x is not _marker)
def interleave_evenly(iterables, lengths=None):
"""
Interleave multiple iterables so that their elements are evenly distributed
throughout the output sequence.
>>> iterables = [1, 2, 3, 4, 5], ['a', 'b']
>>> list(interleave_evenly(iterables))
[1, 2, 'a', 3, 4, 'b', 5]
>>> iterables = [[1, 2, 3], [4, 5], [6, 7, 8]]
>>> list(interleave_evenly(iterables))
[1, 6, 4, 2, 7, 3, 8, 5]
This function requires iterables of known length. Iterables without
``__len__()`` can be used by manually specifying lengths with *lengths*:
>>> from itertools import combinations, repeat
>>> iterables = [combinations(range(4), 2), ['a', 'b', 'c']]
>>> lengths = [4 * (4 - 1) // 2, 3]
>>> list(interleave_evenly(iterables, lengths=lengths))
[(0, 1), (0, 2), 'a', (0, 3), (1, 2), 'b', (1, 3), (2, 3), 'c']
Based on Bresenham's algorithm.
"""
if lengths is None:
try:
lengths = [len(it) for it in iterables]
except TypeError:
raise ValueError(
'Iterable lengths could not be determined automatically. '
'Specify them with the lengths keyword.'
)
elif len(iterables) != len(lengths):
raise ValueError('Mismatching number of iterables and lengths.')
dims = len(lengths)
# sort iterables by length, descending
lengths_permute = sorted(
range(dims), key=lambda i: lengths[i], reverse=True
)
lengths_desc = [lengths[i] for i in lengths_permute]
iters_desc = [iter(iterables[i]) for i in lengths_permute]
# the longest iterable is the primary one (Bresenham: the longest
# distance along an axis)
delta_primary, deltas_secondary = lengths_desc[0], lengths_desc[1:]
iter_primary, iters_secondary = iters_desc[0], iters_desc[1:]
errors = [delta_primary // dims] * len(deltas_secondary)
to_yield = sum(lengths)
while to_yield:
yield next(iter_primary)
to_yield -= 1
# update errors for each secondary iterable
errors = [e - delta for e, delta in zip(errors, deltas_secondary)]
# those iterables for which the error is negative are yielded
# ("diagonal step" in Bresenham)
for i, e in enumerate(errors):
if e < 0:
yield next(iters_secondary[i])
to_yield -= 1
errors[i] += delta_primary
def collapse(iterable, base_type=None, levels=None):
"""Flatten an iterable with multiple levels of nesting (e.g., a list of
lists of tuples) into non-iterable types.
>>> iterable = [(1, 2), ([3, 4], [[5], [6]])]
>>> list(collapse(iterable))
[1, 2, 3, 4, 5, 6]
Binary and text strings are not considered iterable and
will not be collapsed.
To avoid collapsing other types, specify *base_type*:
>>> iterable = ['ab', ('cd', 'ef'), ['gh', 'ij']]
>>> list(collapse(iterable, base_type=tuple))
['ab', ('cd', 'ef'), 'gh', 'ij']
Specify *levels* to stop flattening after a certain level:
>>> iterable = [('a', ['b']), ('c', ['d'])]
>>> list(collapse(iterable)) # Fully flattened
['a', 'b', 'c', 'd']
>>> list(collapse(iterable, levels=1)) # Only one level flattened
['a', ['b'], 'c', ['d']]
"""
def walk(node, level):
if (
((levels is not None) and (level > levels))
or isinstance(node, (str, bytes))
or ((base_type is not None) and isinstance(node, base_type))
):
yield node
return
try:
tree = iter(node)
except TypeError:
yield node
return
else:
for child in tree:
yield from walk(child, level + 1)
yield from walk(iterable, 0)
def side_effect(func, iterable, chunk_size=None, before=None, after=None):
"""Invoke *func* on each item in *iterable* (or on each *chunk_size* group
of items) before yielding the item.
`func` must be a function that takes a single argument. Its return value
will be discarded.
*before* and *after* are optional functions that take no arguments. They
will be executed before iteration starts and after it ends, respectively.
`side_effect` can be used for logging, updating progress bars, or anything
that is not functionally "pure."
Emitting a status message:
>>> from more_itertools import consume
>>> func = lambda item: print('Received {}'.format(item))
>>> consume(side_effect(func, range(2)))
Received 0
Received 1
Operating on chunks of items:
>>> pair_sums = []
>>> func = lambda chunk: pair_sums.append(sum(chunk))
>>> list(side_effect(func, [0, 1, 2, 3, 4, 5], 2))
[0, 1, 2, 3, 4, 5]
>>> list(pair_sums)
[1, 5, 9]
Writing to a file-like object:
>>> from io import StringIO
>>> from more_itertools import consume
>>> f = StringIO()
>>> func = lambda x: print(x, file=f)
>>> before = lambda: print(u'HEADER', file=f)
>>> after = f.close
>>> it = [u'a', u'b', u'c']
>>> consume(side_effect(func, it, before=before, after=after))
>>> f.closed
True
"""
try:
if before is not None:
before()
if chunk_size is None:
for item in iterable:
func(item)
yield item
else:
for chunk in chunked(iterable, chunk_size):
func(chunk)
yield from chunk
finally:
if after is not None:
after()
def sliced(seq, n, strict=False):
"""Yield slices of length *n* from the sequence *seq*.
>>> list(sliced((1, 2, 3, 4, 5, 6), 3))
[(1, 2, 3), (4, 5, 6)]
By the default, the last yielded slice will have fewer than *n* elements
if the length of *seq* is not divisible by *n*:
>>> list(sliced((1, 2, 3, 4, 5, 6, 7, 8), 3))
[(1, 2, 3), (4, 5, 6), (7, 8)]
If the length of *seq* is not divisible by *n* and *strict* is
``True``, then ``ValueError`` will be raised before the last
slice is yielded.
This function will only work for iterables that support slicing.
For non-sliceable iterables, see :func:`chunked`.
"""
iterator = takewhile(len, (seq[i : i + n] for i in count(0, n)))
if strict:
def ret():
for _slice in iterator:
if len(_slice) != n:
raise ValueError("seq is not divisible by n.")
yield _slice
return iter(ret())
else:
return iterator
def split_at(iterable, pred, maxsplit=-1, keep_separator=False):
"""Yield lists of items from *iterable*, where each list is delimited by
an item where callable *pred* returns ``True``.
>>> list(split_at('abcdcba', lambda x: x == 'b'))
[['a'], ['c', 'd', 'c'], ['a']]
>>> list(split_at(range(10), lambda n: n % 2 == 1))
[[0], [2], [4], [6], [8], []]
At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
then there is no limit on the number of splits:
>>> list(split_at(range(10), lambda n: n % 2 == 1, maxsplit=2))
[[0], [2], [4, 5, 6, 7, 8, 9]]
By default, the delimiting items are not included in the output.
To include them, set *keep_separator* to ``True``.
>>> list(split_at('abcdcba', lambda x: x == 'b', keep_separator=True))
[['a'], ['b'], ['c', 'd', 'c'], ['b'], ['a']]
"""
if maxsplit == 0:
yield list(iterable)
return
buf = []
it = iter(iterable)
for item in it:
if pred(item):
yield buf
if keep_separator:
yield [item]
if maxsplit == 1:
yield list(it)
return
buf = []
maxsplit -= 1
else:
buf.append(item)
yield buf
def split_before(iterable, pred, maxsplit=-1):
"""Yield lists of items from *iterable*, where each list ends just before
an item for which callable *pred* returns ``True``:
>>> list(split_before('OneTwo', lambda s: s.isupper()))
[['O', 'n', 'e'], ['T', 'w', 'o']]
>>> list(split_before(range(10), lambda n: n % 3 == 0))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
then there is no limit on the number of splits:
>>> list(split_before(range(10), lambda n: n % 3 == 0, maxsplit=2))
[[0, 1, 2], [3, 4, 5], [6, 7, 8, 9]]
"""
if maxsplit == 0:
yield list(iterable)
return
buf = []
it = iter(iterable)
for item in it:
if pred(item) and buf:
yield buf
if maxsplit == 1:
yield [item] + list(it)
return
buf = []
maxsplit -= 1
buf.append(item)
if buf:
yield buf
def split_after(iterable, pred, maxsplit=-1):
"""Yield lists of items from *iterable*, where each list ends with an
item where callable *pred* returns ``True``:
>>> list(split_after('one1two2', lambda s: s.isdigit()))
[['o', 'n', 'e', '1'], ['t', 'w', 'o', '2']]
>>> list(split_after(range(10), lambda n: n % 3 == 0))
[[0], [1, 2, 3], [4, 5, 6], [7, 8, 9]]
At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
then there is no limit on the number of splits:
>>> list(split_after(range(10), lambda n: n % 3 == 0, maxsplit=2))
[[0], [1, 2, 3], [4, 5, 6, 7, 8, 9]]
"""
if maxsplit == 0:
yield list(iterable)
return
buf = []
it = iter(iterable)
for item in it:
buf.append(item)
if pred(item) and buf:
yield buf
if maxsplit == 1:
buf = list(it)
if buf:
yield buf
return
buf = []
maxsplit -= 1
if buf:
yield buf
def split_when(iterable, pred, maxsplit=-1):
"""Split *iterable* into pieces based on the output of *pred*.
*pred* should be a function that takes successive pairs of items and
returns ``True`` if the iterable should be split in between them.
For example, to find runs of increasing numbers, split the iterable when
element ``i`` is larger than element ``i + 1``:
>>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2], lambda x, y: x > y))
[[1, 2, 3, 3], [2, 5], [2, 4], [2]]
At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
then there is no limit on the number of splits:
>>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2],
... lambda x, y: x > y, maxsplit=2))
[[1, 2, 3, 3], [2, 5], [2, 4, 2]]
"""
if maxsplit == 0:
yield list(iterable)
return
it = iter(iterable)
try:
cur_item = next(it)
except StopIteration:
return
buf = [cur_item]
for next_item in it:
if pred(cur_item, next_item):
yield buf
if maxsplit == 1:
yield [next_item] + list(it)
return
buf = []
maxsplit -= 1
buf.append(next_item)
cur_item = next_item
yield buf
def split_into(iterable, sizes):
"""Yield a list of sequential items from *iterable* of length 'n' for each
integer 'n' in *sizes*.
>>> list(split_into([1,2,3,4,5,6], [1,2,3]))
[[1], [2, 3], [4, 5, 6]]
If the sum of *sizes* is smaller than the length of *iterable*, then the
remaining items of *iterable* will not be returned.
>>> list(split_into([1,2,3,4,5,6], [2,3]))
[[1, 2], [3, 4, 5]]
If the sum of *sizes* is larger than the length of *iterable*, fewer items
will be returned in the iteration that overruns *iterable* and further
lists will be empty:
>>> list(split_into([1,2,3,4], [1,2,3,4]))
[[1], [2, 3], [4], []]
When a ``None`` object is encountered in *sizes*, the returned list will
contain items up to the end of *iterable* the same way that itertools.slice
does:
>>> list(split_into([1,2,3,4,5,6,7,8,9,0], [2,3,None]))
[[1, 2], [3, 4, 5], [6, 7, 8, 9, 0]]
:func:`split_into` can be useful for grouping a series of items where the
sizes of the groups are not uniform. An example would be where in a row
from a table, multiple columns represent elements of the same feature
(e.g. a point represented by x,y,z) but, the format is not the same for
all columns.
"""
# convert the iterable argument into an iterator so its contents can
# be consumed by islice in case it is a generator
it = iter(iterable)
for size in sizes:
if size is None:
yield list(it)
return
else:
yield list(islice(it, size))
def padded(iterable, fillvalue=None, n=None, next_multiple=False):
"""Yield the elements from *iterable*, followed by *fillvalue*, such that
at least *n* items are emitted.
>>> list(padded([1, 2, 3], '?', 5))
[1, 2, 3, '?', '?']
If *next_multiple* is ``True``, *fillvalue* will be emitted until the
number of items emitted is a multiple of *n*::
>>> list(padded([1, 2, 3, 4], n=3, next_multiple=True))
[1, 2, 3, 4, None, None]
If *n* is ``None``, *fillvalue* will be emitted indefinitely.
"""
it = iter(iterable)
if n is None:
yield from chain(it, repeat(fillvalue))
elif n < 1:
raise ValueError('n must be at least 1')
else:
item_count = 0
for item in it:
yield item
item_count += 1
remaining = (n - item_count) % n if next_multiple else n - item_count
for _ in range(remaining):
yield fillvalue
def repeat_each(iterable, n=2):
"""Repeat each element in *iterable* *n* times.
>>> list(repeat_each('ABC', 3))
['A', 'A', 'A', 'B', 'B', 'B', 'C', 'C', 'C']
"""
return chain.from_iterable(map(repeat, iterable, repeat(n)))
def repeat_last(iterable, default=None):
"""After the *iterable* is exhausted, keep yielding its last element.
>>> list(islice(repeat_last(range(3)), 5))
[0, 1, 2, 2, 2]
If the iterable is empty, yield *default* forever::
>>> list(islice(repeat_last(range(0), 42), 5))
[42, 42, 42, 42, 42]
"""
item = _marker
for item in iterable:
yield item
final = default if item is _marker else item
yield from repeat(final)
def distribute(n, iterable):
"""Distribute the items from *iterable* among *n* smaller iterables.
>>> group_1, group_2 = distribute(2, [1, 2, 3, 4, 5, 6])
>>> list(group_1)
[1, 3, 5]
>>> list(group_2)
[2, 4, 6]
If the length of *iterable* is not evenly divisible by *n*, then the
length of the returned iterables will not be identical:
>>> children = distribute(3, [1, 2, 3, 4, 5, 6, 7])
>>> [list(c) for c in children]
[[1, 4, 7], [2, 5], [3, 6]]
If the length of *iterable* is smaller than *n*, then the last returned
iterables will be empty:
>>> children = distribute(5, [1, 2, 3])
>>> [list(c) for c in children]
[[1], [2], [3], [], []]
This function uses :func:`itertools.tee` and may require significant
storage. If you need the order items in the smaller iterables to match the
original iterable, see :func:`divide`.
"""
if n < 1:
raise ValueError('n must be at least 1')
children = tee(iterable, n)
return [islice(it, index, None, n) for index, it in enumerate(children)]
def stagger(iterable, offsets=(-1, 0, 1), longest=False, fillvalue=None):
"""Yield tuples whose elements are offset from *iterable*.
The amount by which the `i`-th item in each tuple is offset is given by
the `i`-th item in *offsets*.
>>> list(stagger([0, 1, 2, 3]))
[(None, 0, 1), (0, 1, 2), (1, 2, 3)]
>>> list(stagger(range(8), offsets=(0, 2, 4)))
[(0, 2, 4), (1, 3, 5), (2, 4, 6), (3, 5, 7)]
By default, the sequence will end when the final element of a tuple is the
last item in the iterable. To continue until the first element of a tuple
is the last item in the iterable, set *longest* to ``True``::
>>> list(stagger([0, 1, 2, 3], longest=True))
[(None, 0, 1), (0, 1, 2), (1, 2, 3), (2, 3, None), (3, None, None)]
By default, ``None`` will be used to replace offsets beyond the end of the
sequence. Specify *fillvalue* to use some other value.
"""
children = tee(iterable, len(offsets))
return zip_offset(
*children, offsets=offsets, longest=longest, fillvalue=fillvalue
)
def zip_equal(*iterables):
"""``zip`` the input *iterables* together, but raise
``UnequalIterablesError`` if they aren't all the same length.
>>> it_1 = range(3)
>>> it_2 = iter('abc')
>>> list(zip_equal(it_1, it_2))
[(0, 'a'), (1, 'b'), (2, 'c')]
>>> it_1 = range(3)
>>> it_2 = iter('abcd')
>>> list(zip_equal(it_1, it_2)) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
more_itertools.more.UnequalIterablesError: Iterables have different
lengths
"""
if hexversion >= 0x30A00A6:
warnings.warn(
(
'zip_equal will be removed in a future version of '
'more-itertools. Use the builtin zip function with '
'strict=True instead.'
),
DeprecationWarning,
)
return _zip_equal(*iterables)
def zip_offset(*iterables, offsets, longest=False, fillvalue=None):
"""``zip`` the input *iterables* together, but offset the `i`-th iterable
by the `i`-th item in *offsets*.
>>> list(zip_offset('0123', 'abcdef', offsets=(0, 1)))
[('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e')]
This can be used as a lightweight alternative to SciPy or pandas to analyze
data sets in which some series have a lead or lag relationship.
By default, the sequence will end when the shortest iterable is exhausted.
To continue until the longest iterable is exhausted, set *longest* to
``True``.
>>> list(zip_offset('0123', 'abcdef', offsets=(0, 1), longest=True))
[('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e'), (None, 'f')]
By default, ``None`` will be used to replace offsets beyond the end of the
sequence. Specify *fillvalue* to use some other value.
"""
if len(iterables) != len(offsets):
raise ValueError("Number of iterables and offsets didn't match")
staggered = []
for it, n in zip(iterables, offsets):
if n < 0:
staggered.append(chain(repeat(fillvalue, -n), it))
elif n > 0:
staggered.append(islice(it, n, None))
else:
staggered.append(it)
if longest:
return zip_longest(*staggered, fillvalue=fillvalue)
return zip(*staggered)
def sort_together(iterables, key_list=(0,), key=None, reverse=False):
"""Return the input iterables sorted together, with *key_list* as the
priority for sorting. All iterables are trimmed to the length of the
shortest one.
This can be used like the sorting function in a spreadsheet. If each
iterable represents a column of data, the key list determines which
columns are used for sorting.
By default, all iterables are sorted using the ``0``-th iterable::
>>> iterables = [(4, 3, 2, 1), ('a', 'b', 'c', 'd')]
>>> sort_together(iterables)
[(1, 2, 3, 4), ('d', 'c', 'b', 'a')]
Set a different key list to sort according to another iterable.
Specifying multiple keys dictates how ties are broken::
>>> iterables = [(3, 1, 2), (0, 1, 0), ('c', 'b', 'a')]
>>> sort_together(iterables, key_list=(1, 2))
[(2, 3, 1), (0, 0, 1), ('a', 'c', 'b')]
To sort by a function of the elements of the iterable, pass a *key*
function. Its arguments are the elements of the iterables corresponding to
the key list::
>>> names = ('a', 'b', 'c')
>>> lengths = (1, 2, 3)
>>> widths = (5, 2, 1)
>>> def area(length, width):
... return length * width
>>> sort_together([names, lengths, widths], key_list=(1, 2), key=area)
[('c', 'b', 'a'), (3, 2, 1), (1, 2, 5)]
Set *reverse* to ``True`` to sort in descending order.
>>> sort_together([(1, 2, 3), ('c', 'b', 'a')], reverse=True)
[(3, 2, 1), ('a', 'b', 'c')]
"""
if key is None:
# if there is no key function, the key argument to sorted is an
# itemgetter
key_argument = itemgetter(*key_list)
else:
# if there is a key function, call it with the items at the offsets
# specified by the key function as arguments
key_list = list(key_list)
if len(key_list) == 1:
# if key_list contains a single item, pass the item at that offset
# as the only argument to the key function
key_offset = key_list[0]
key_argument = lambda zipped_items: key(zipped_items[key_offset])
else:
# if key_list contains multiple items, use itemgetter to return a
# tuple of items, which we pass as *args to the key function
get_key_items = itemgetter(*key_list)
key_argument = lambda zipped_items: key(
*get_key_items(zipped_items)
)
return list(
zip(*sorted(zip(*iterables), key=key_argument, reverse=reverse))
)
def unzip(iterable):
"""The inverse of :func:`zip`, this function disaggregates the elements
of the zipped *iterable*.
The ``i``-th iterable contains the ``i``-th element from each element
of the zipped iterable. The first element is used to determine the
length of the remaining elements.
>>> iterable = [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
>>> letters, numbers = unzip(iterable)
>>> list(letters)
['a', 'b', 'c', 'd']
>>> list(numbers)
[1, 2, 3, 4]
This is similar to using ``zip(*iterable)``, but it avoids reading
*iterable* into memory. Note, however, that this function uses
:func:`itertools.tee` and thus may require significant storage.
"""
head, iterable = spy(iter(iterable))
if not head:
# empty iterable, e.g. zip([], [], [])
return ()
# spy returns a one-length iterable as head
head = head[0]
iterables = tee(iterable, len(head))
def itemgetter(i):
def getter(obj):
try:
return obj[i]
except IndexError:
# basically if we have an iterable like
# iter([(1, 2, 3), (4, 5), (6,)])
# the second unzipped iterable would fail at the third tuple
# since it would try to access tup[1]
# same with the third unzipped iterable and the second tuple
# to support these "improperly zipped" iterables,
# we create a custom itemgetter
# which just stops the unzipped iterables
# at first length mismatch
raise StopIteration
return getter
return tuple(map(itemgetter(i), it) for i, it in enumerate(iterables))
def divide(n, iterable):
"""Divide the elements from *iterable* into *n* parts, maintaining
order.
>>> group_1, group_2 = divide(2, [1, 2, 3, 4, 5, 6])
>>> list(group_1)
[1, 2, 3]
>>> list(group_2)
[4, 5, 6]
If the length of *iterable* is not evenly divisible by *n*, then the
length of the returned iterables will not be identical:
>>> children = divide(3, [1, 2, 3, 4, 5, 6, 7])
>>> [list(c) for c in children]
[[1, 2, 3], [4, 5], [6, 7]]
If the length of the iterable is smaller than n, then the last returned
iterables will be empty:
>>> children = divide(5, [1, 2, 3])
>>> [list(c) for c in children]
[[1], [2], [3], [], []]
This function will exhaust the iterable before returning and may require
significant storage. If order is not important, see :func:`distribute`,
which does not first pull the iterable into memory.
"""
if n < 1:
raise ValueError('n must be at least 1')
try:
iterable[:0]
except TypeError:
seq = tuple(iterable)
else:
seq = iterable
q, r = divmod(len(seq), n)
ret = []
stop = 0
for i in range(1, n + 1):
start = stop
stop += q + 1 if i <= r else q
ret.append(iter(seq[start:stop]))
return ret
def always_iterable(obj, base_type=(str, bytes)):
"""If *obj* is iterable, return an iterator over its items::
>>> obj = (1, 2, 3)
>>> list(always_iterable(obj))
[1, 2, 3]
If *obj* is not iterable, return a one-item iterable containing *obj*::
>>> obj = 1
>>> list(always_iterable(obj))
[1]
If *obj* is ``None``, return an empty iterable:
>>> obj = None
>>> list(always_iterable(None))
[]
By default, binary and text strings are not considered iterable::
>>> obj = 'foo'
>>> list(always_iterable(obj))
['foo']
If *base_type* is set, objects for which ``isinstance(obj, base_type)``
returns ``True`` won't be considered iterable.
>>> obj = {'a': 1}
>>> list(always_iterable(obj)) # Iterate over the dict's keys
['a']
>>> list(always_iterable(obj, base_type=dict)) # Treat dicts as a unit
[{'a': 1}]
Set *base_type* to ``None`` to avoid any special handling and treat objects
Python considers iterable as iterable:
>>> obj = 'foo'
>>> list(always_iterable(obj, base_type=None))
['f', 'o', 'o']
"""
if obj is None:
return iter(())
if (base_type is not None) and isinstance(obj, base_type):
return iter((obj,))
try:
return iter(obj)
except TypeError:
return iter((obj,))
def adjacent(predicate, iterable, distance=1):
"""Return an iterable over `(bool, item)` tuples where the `item` is
drawn from *iterable* and the `bool` indicates whether
that item satisfies the *predicate* or is adjacent to an item that does.
For example, to find whether items are adjacent to a ``3``::
>>> list(adjacent(lambda x: x == 3, range(6)))
[(False, 0), (False, 1), (True, 2), (True, 3), (True, 4), (False, 5)]
Set *distance* to change what counts as adjacent. For example, to find
whether items are two places away from a ``3``:
>>> list(adjacent(lambda x: x == 3, range(6), distance=2))
[(False, 0), (True, 1), (True, 2), (True, 3), (True, 4), (True, 5)]
This is useful for contextualizing the results of a search function.
For example, a code comparison tool might want to identify lines that
have changed, but also surrounding lines to give the viewer of the diff
context.
The predicate function will only be called once for each item in the
iterable.
See also :func:`groupby_transform`, which can be used with this function
to group ranges of items with the same `bool` value.
"""
# Allow distance=0 mainly for testing that it reproduces results with map()
if distance < 0:
raise ValueError('distance must be at least 0')
i1, i2 = tee(iterable)
padding = [False] * distance
selected = chain(padding, map(predicate, i1), padding)
adjacent_to_selected = map(any, windowed(selected, 2 * distance + 1))
return zip(adjacent_to_selected, i2)
def groupby_transform(iterable, keyfunc=None, valuefunc=None, reducefunc=None):
"""An extension of :func:`itertools.groupby` that can apply transformations
to the grouped data.
* *keyfunc* is a function computing a key value for each item in *iterable*
* *valuefunc* is a function that transforms the individual items from
*iterable* after grouping
* *reducefunc* is a function that transforms each group of items
>>> iterable = 'aAAbBBcCC'
>>> keyfunc = lambda k: k.upper()
>>> valuefunc = lambda v: v.lower()
>>> reducefunc = lambda g: ''.join(g)
>>> list(groupby_transform(iterable, keyfunc, valuefunc, reducefunc))
[('A', 'aaa'), ('B', 'bbb'), ('C', 'ccc')]
Each optional argument defaults to an identity function if not specified.
:func:`groupby_transform` is useful when grouping elements of an iterable
using a separate iterable as the key. To do this, :func:`zip` the iterables
and pass a *keyfunc* that extracts the first element and a *valuefunc*
that extracts the second element::
>>> from operator import itemgetter
>>> keys = [0, 0, 1, 1, 1, 2, 2, 2, 3]
>>> values = 'abcdefghi'
>>> iterable = zip(keys, values)
>>> grouper = groupby_transform(iterable, itemgetter(0), itemgetter(1))
>>> [(k, ''.join(g)) for k, g in grouper]
[(0, 'ab'), (1, 'cde'), (2, 'fgh'), (3, 'i')]
Note that the order of items in the iterable is significant.
Only adjacent items are grouped together, so if you don't want any
duplicate groups, you should sort the iterable by the key function.
"""
ret = groupby(iterable, keyfunc)
if valuefunc:
ret = ((k, map(valuefunc, g)) for k, g in ret)
if reducefunc:
ret = ((k, reducefunc(g)) for k, g in ret)
return ret
class numeric_range(abc.Sequence, abc.Hashable):
"""An extension of the built-in ``range()`` function whose arguments can
be any orderable numeric type.
With only *stop* specified, *start* defaults to ``0`` and *step*
defaults to ``1``. The output items will match the type of *stop*:
>>> list(numeric_range(3.5))
[0.0, 1.0, 2.0, 3.0]
With only *start* and *stop* specified, *step* defaults to ``1``. The
output items will match the type of *start*:
>>> from decimal import Decimal
>>> start = Decimal('2.1')
>>> stop = Decimal('5.1')
>>> list(numeric_range(start, stop))
[Decimal('2.1'), Decimal('3.1'), Decimal('4.1')]
With *start*, *stop*, and *step* specified the output items will match
the type of ``start + step``:
>>> from fractions import Fraction
>>> start = Fraction(1, 2) # Start at 1/2
>>> stop = Fraction(5, 2) # End at 5/2
>>> step = Fraction(1, 2) # Count by 1/2
>>> list(numeric_range(start, stop, step))
[Fraction(1, 2), Fraction(1, 1), Fraction(3, 2), Fraction(2, 1)]
If *step* is zero, ``ValueError`` is raised. Negative steps are supported:
>>> list(numeric_range(3, -1, -1.0))
[3.0, 2.0, 1.0, 0.0]
Be aware of the limitations of floating point numbers; the representation
of the yielded numbers may be surprising.
``datetime.datetime`` objects can be used for *start* and *stop*, if *step*
is a ``datetime.timedelta`` object:
>>> import datetime
>>> start = datetime.datetime(2019, 1, 1)
>>> stop = datetime.datetime(2019, 1, 3)
>>> step = datetime.timedelta(days=1)
>>> items = iter(numeric_range(start, stop, step))
>>> next(items)
datetime.datetime(2019, 1, 1, 0, 0)
>>> next(items)
datetime.datetime(2019, 1, 2, 0, 0)
"""
_EMPTY_HASH = hash(range(0, 0))
def __init__(self, *args):
argc = len(args)
if argc == 1:
(self._stop,) = args
self._start = type(self._stop)(0)
self._step = type(self._stop - self._start)(1)
elif argc == 2:
self._start, self._stop = args
self._step = type(self._stop - self._start)(1)
elif argc == 3:
self._start, self._stop, self._step = args
elif argc == 0:
raise TypeError(
'numeric_range expected at least '
'1 argument, got {}'.format(argc)
)
else:
raise TypeError(
'numeric_range expected at most '
'3 arguments, got {}'.format(argc)
)
self._zero = type(self._step)(0)
if self._step == self._zero:
raise ValueError('numeric_range() arg 3 must not be zero')
self._growing = self._step > self._zero
def __bool__(self):
if self._growing:
return self._start < self._stop
else:
return self._start > self._stop
def __contains__(self, elem):
if self._growing:
if self._start <= elem < self._stop:
return (elem - self._start) % self._step == self._zero
else:
if self._start >= elem > self._stop:
return (self._start - elem) % (-self._step) == self._zero
return False
def __eq__(self, other):
if isinstance(other, numeric_range):
empty_self = not bool(self)
empty_other = not bool(other)
if empty_self or empty_other:
return empty_self and empty_other # True if both empty
else:
return (
self._start == other._start
and self._step == other._step
and self._get_by_index(-1) == other._get_by_index(-1)
)
else:
return False
def __getitem__(self, key):
if isinstance(key, int):
return self._get_by_index(key)
elif isinstance(key, slice):
step = self._step if key.step is None else key.step * self._step
if key.start is None or key.start <= -self._len:
start = self._start
elif key.start >= self._len:
start = self._stop
else: # -self._len < key.start < self._len
start = self._get_by_index(key.start)
if key.stop is None or key.stop >= self._len:
stop = self._stop
elif key.stop <= -self._len:
stop = self._start
else: # -self._len < key.stop < self._len
stop = self._get_by_index(key.stop)
return numeric_range(start, stop, step)
else:
raise TypeError(
'numeric range indices must be '
'integers or slices, not {}'.format(type(key).__name__)
)
def __hash__(self):
if self:
return hash((self._start, self._get_by_index(-1), self._step))
else:
return self._EMPTY_HASH
def __iter__(self):
values = (self._start + (n * self._step) for n in count())
if self._growing:
return takewhile(partial(gt, self._stop), values)
else:
return takewhile(partial(lt, self._stop), values)
def __len__(self):
return self._len
@cached_property
def _len(self):
if self._growing:
start = self._start
stop = self._stop
step = self._step
else:
start = self._stop
stop = self._start
step = -self._step
distance = stop - start
if distance <= self._zero:
return 0
else: # distance > 0 and step > 0: regular euclidean division
q, r = divmod(distance, step)
return int(q) + int(r != self._zero)
def __reduce__(self):
return numeric_range, (self._start, self._stop, self._step)
def __repr__(self):
if self._step == 1:
return "numeric_range({}, {})".format(
repr(self._start), repr(self._stop)
)
else:
return "numeric_range({}, {}, {})".format(
repr(self._start), repr(self._stop), repr(self._step)
)
def __reversed__(self):
return iter(
numeric_range(
self._get_by_index(-1), self._start - self._step, -self._step
)
)
def count(self, value):
return int(value in self)
def index(self, value):
if self._growing:
if self._start <= value < self._stop:
q, r = divmod(value - self._start, self._step)
if r == self._zero:
return int(q)
else:
if self._start >= value > self._stop:
q, r = divmod(self._start - value, -self._step)
if r == self._zero:
return int(q)
raise ValueError("{} is not in numeric range".format(value))
def _get_by_index(self, i):
if i < 0:
i += self._len
if i < 0 or i >= self._len:
raise IndexError("numeric range object index out of range")
return self._start + i * self._step
def count_cycle(iterable, n=None):
"""Cycle through the items from *iterable* up to *n* times, yielding
the number of completed cycles along with each item. If *n* is omitted the
process repeats indefinitely.
>>> list(count_cycle('AB', 3))
[(0, 'A'), (0, 'B'), (1, 'A'), (1, 'B'), (2, 'A'), (2, 'B')]
"""
iterable = tuple(iterable)
if not iterable:
return iter(())
counter = count() if n is None else range(n)
return ((i, item) for i in counter for item in iterable)
def mark_ends(iterable):
"""Yield 3-tuples of the form ``(is_first, is_last, item)``.
>>> list(mark_ends('ABC'))
[(True, False, 'A'), (False, False, 'B'), (False, True, 'C')]
Use this when looping over an iterable to take special action on its first
and/or last items:
>>> iterable = ['Header', 100, 200, 'Footer']
>>> total = 0
>>> for is_first, is_last, item in mark_ends(iterable):
... if is_first:
... continue # Skip the header
... if is_last:
... continue # Skip the footer
... total += item
>>> print(total)
300
"""
it = iter(iterable)
try:
b = next(it)
except StopIteration:
return
try:
for i in count():
a = b
b = next(it)
yield i == 0, False, a
except StopIteration:
yield i == 0, True, a
def locate(iterable, pred=bool, window_size=None):
"""Yield the index of each item in *iterable* for which *pred* returns
``True``.
*pred* defaults to :func:`bool`, which will select truthy items:
>>> list(locate([0, 1, 1, 0, 1, 0, 0]))
[1, 2, 4]
Set *pred* to a custom function to, e.g., find the indexes for a particular
item.
>>> list(locate(['a', 'b', 'c', 'b'], lambda x: x == 'b'))
[1, 3]
If *window_size* is given, then the *pred* function will be called with
that many items. This enables searching for sub-sequences:
>>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]
>>> pred = lambda *args: args == (1, 2, 3)
>>> list(locate(iterable, pred=pred, window_size=3))
[1, 5, 9]
Use with :func:`seekable` to find indexes and then retrieve the associated
items:
>>> from itertools import count
>>> from more_itertools import seekable
>>> source = (3 * n + 1 if (n % 2) else n // 2 for n in count())
>>> it = seekable(source)
>>> pred = lambda x: x > 100
>>> indexes = locate(it, pred=pred)
>>> i = next(indexes)
>>> it.seek(i)
>>> next(it)
106
"""
if window_size is None:
return compress(count(), map(pred, iterable))
if window_size < 1:
raise ValueError('window size must be at least 1')
it = windowed(iterable, window_size, fillvalue=_marker)
return compress(count(), starmap(pred, it))
def longest_common_prefix(iterables):
"""Yield elements of the longest common prefix amongst given *iterables*.
>>> ''.join(longest_common_prefix(['abcd', 'abc', 'abf']))
'ab'
"""
return (c[0] for c in takewhile(all_equal, zip(*iterables)))
def lstrip(iterable, pred):
"""Yield the items from *iterable*, but strip any from the beginning
for which *pred* returns ``True``.
For example, to remove a set of items from the start of an iterable:
>>> iterable = (None, False, None, 1, 2, None, 3, False, None)
>>> pred = lambda x: x in {None, False, ''}
>>> list(lstrip(iterable, pred))
[1, 2, None, 3, False, None]
This function is analogous to to :func:`str.lstrip`, and is essentially
an wrapper for :func:`itertools.dropwhile`.
"""
return dropwhile(pred, iterable)
def rstrip(iterable, pred):
"""Yield the items from *iterable*, but strip any from the end
for which *pred* returns ``True``.
For example, to remove a set of items from the end of an iterable:
>>> iterable = (None, False, None, 1, 2, None, 3, False, None)
>>> pred = lambda x: x in {None, False, ''}
>>> list(rstrip(iterable, pred))
[None, False, None, 1, 2, None, 3]
This function is analogous to :func:`str.rstrip`.
"""
cache = []
cache_append = cache.append
cache_clear = cache.clear
for x in iterable:
if pred(x):
cache_append(x)
else:
yield from cache
cache_clear()
yield x
def strip(iterable, pred):
"""Yield the items from *iterable*, but strip any from the
beginning and end for which *pred* returns ``True``.
For example, to remove a set of items from both ends of an iterable:
>>> iterable = (None, False, None, 1, 2, None, 3, False, None)
>>> pred = lambda x: x in {None, False, ''}
>>> list(strip(iterable, pred))
[1, 2, None, 3]
This function is analogous to :func:`str.strip`.
"""
return rstrip(lstrip(iterable, pred), pred)
class islice_extended:
"""An extension of :func:`itertools.islice` that supports negative values
for *stop*, *start*, and *step*.
>>> iterable = iter('abcdefgh')
>>> list(islice_extended(iterable, -4, -1))
['e', 'f', 'g']
Slices with negative values require some caching of *iterable*, but this
function takes care to minimize the amount of memory required.
For example, you can use a negative step with an infinite iterator:
>>> from itertools import count
>>> list(islice_extended(count(), 110, 99, -2))
[110, 108, 106, 104, 102, 100]
You can also use slice notation directly:
>>> iterable = map(str, count())
>>> it = islice_extended(iterable)[10:20:2]
>>> list(it)
['10', '12', '14', '16', '18']
"""
def __init__(self, iterable, *args):
it = iter(iterable)
if args:
self._iterable = _islice_helper(it, slice(*args))
else:
self._iterable = it
def __iter__(self):
return self
def __next__(self):
return next(self._iterable)
def __getitem__(self, key):
if isinstance(key, slice):
return islice_extended(_islice_helper(self._iterable, key))
raise TypeError('islice_extended.__getitem__ argument must be a slice')
def _islice_helper(it, s):
start = s.start
stop = s.stop
if s.step == 0:
raise ValueError('step argument must be a non-zero integer or None.')
step = s.step or 1
if step > 0:
start = 0 if (start is None) else start
if start < 0:
# Consume all but the last -start items
cache = deque(enumerate(it, 1), maxlen=-start)
len_iter = cache[-1][0] if cache else 0
# Adjust start to be positive
i = max(len_iter + start, 0)
# Adjust stop to be positive
if stop is None:
j = len_iter
elif stop >= 0:
j = min(stop, len_iter)
else:
j = max(len_iter + stop, 0)
# Slice the cache
n = j - i
if n <= 0:
return
for index, item in islice(cache, 0, n, step):
yield item
elif (stop is not None) and (stop < 0):
# Advance to the start position
next(islice(it, start, start), None)
# When stop is negative, we have to carry -stop items while
# iterating
cache = deque(islice(it, -stop), maxlen=-stop)
for index, item in enumerate(it):
cached_item = cache.popleft()
if index % step == 0:
yield cached_item
cache.append(item)
else:
# When both start and stop are positive we have the normal case
yield from islice(it, start, stop, step)
else:
start = -1 if (start is None) else start
if (stop is not None) and (stop < 0):
# Consume all but the last items
n = -stop - 1
cache = deque(enumerate(it, 1), maxlen=n)
len_iter = cache[-1][0] if cache else 0
# If start and stop are both negative they are comparable and
# we can just slice. Otherwise we can adjust start to be negative
# and then slice.
if start < 0:
i, j = start, stop
else:
i, j = min(start - len_iter, -1), None
for index, item in list(cache)[i:j:step]:
yield item
else:
# Advance to the stop position
if stop is not None:
m = stop + 1
next(islice(it, m, m), None)
# stop is positive, so if start is negative they are not comparable
# and we need the rest of the items.
if start < 0:
i = start
n = None
# stop is None and start is positive, so we just need items up to
# the start index.
elif stop is None:
i = None
n = start + 1
# Both stop and start are positive, so they are comparable.
else:
i = None
n = start - stop
if n <= 0:
return
cache = list(islice(it, n))
yield from cache[i::step]
def always_reversible(iterable):
"""An extension of :func:`reversed` that supports all iterables, not
just those which implement the ``Reversible`` or ``Sequence`` protocols.
>>> print(*always_reversible(x for x in range(3)))
2 1 0
If the iterable is already reversible, this function returns the
result of :func:`reversed()`. If the iterable is not reversible,
this function will cache the remaining items in the iterable and
yield them in reverse order, which may require significant storage.
"""
try:
return reversed(iterable)
except TypeError:
return reversed(list(iterable))
def consecutive_groups(iterable, ordering=lambda x: x):
"""Yield groups of consecutive items using :func:`itertools.groupby`.
The *ordering* function determines whether two items are adjacent by
returning their position.
By default, the ordering function is the identity function. This is
suitable for finding runs of numbers:
>>> iterable = [1, 10, 11, 12, 20, 30, 31, 32, 33, 40]
>>> for group in consecutive_groups(iterable):
... print(list(group))
[1]
[10, 11, 12]
[20]
[30, 31, 32, 33]
[40]
For finding runs of adjacent letters, try using the :meth:`index` method
of a string of letters:
>>> from string import ascii_lowercase
>>> iterable = 'abcdfgilmnop'
>>> ordering = ascii_lowercase.index
>>> for group in consecutive_groups(iterable, ordering):
... print(list(group))
['a', 'b', 'c', 'd']
['f', 'g']
['i']
['l', 'm', 'n', 'o', 'p']
Each group of consecutive items is an iterator that shares it source with
*iterable*. When an an output group is advanced, the previous group is
no longer available unless its elements are copied (e.g., into a ``list``).
>>> iterable = [1, 2, 11, 12, 21, 22]
>>> saved_groups = []
>>> for group in consecutive_groups(iterable):
... saved_groups.append(list(group)) # Copy group elements
>>> saved_groups
[[1, 2], [11, 12], [21, 22]]
"""
for k, g in groupby(
enumerate(iterable), key=lambda x: x[0] - ordering(x[1])
):
yield map(itemgetter(1), g)
def difference(iterable, func=sub, *, initial=None):
"""This function is the inverse of :func:`itertools.accumulate`. By default
it will compute the first difference of *iterable* using
:func:`operator.sub`:
>>> from itertools import accumulate
>>> iterable = accumulate([0, 1, 2, 3, 4]) # produces 0, 1, 3, 6, 10
>>> list(difference(iterable))
[0, 1, 2, 3, 4]
*func* defaults to :func:`operator.sub`, but other functions can be
specified. They will be applied as follows::
A, B, C, D, ... --> A, func(B, A), func(C, B), func(D, C), ...
For example, to do progressive division:
>>> iterable = [1, 2, 6, 24, 120]
>>> func = lambda x, y: x // y
>>> list(difference(iterable, func))
[1, 2, 3, 4, 5]
If the *initial* keyword is set, the first element will be skipped when
computing successive differences.
>>> it = [10, 11, 13, 16] # from accumulate([1, 2, 3], initial=10)
>>> list(difference(it, initial=10))
[1, 2, 3]
"""
a, b = tee(iterable)
try:
first = [next(b)]
except StopIteration:
return iter([])
if initial is not None:
first = []
return chain(first, map(func, b, a))
class SequenceView(Sequence):
"""Return a read-only view of the sequence object *target*.
:class:`SequenceView` objects are analogous to Python's built-in
"dictionary view" types. They provide a dynamic view of a sequence's items,
meaning that when the sequence updates, so does the view.
>>> seq = ['0', '1', '2']
>>> view = SequenceView(seq)
>>> view
SequenceView(['0', '1', '2'])
>>> seq.append('3')
>>> view
SequenceView(['0', '1', '2', '3'])
Sequence views support indexing, slicing, and length queries. They act
like the underlying sequence, except they don't allow assignment:
>>> view[1]
'1'
>>> view[1:-1]
['1', '2']
>>> len(view)
4
Sequence views are useful as an alternative to copying, as they don't
require (much) extra storage.
"""
def __init__(self, target):
if not isinstance(target, Sequence):
raise TypeError
self._target = target
def __getitem__(self, index):
return self._target[index]
def __len__(self):
return len(self._target)
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, repr(self._target))
class seekable:
"""Wrap an iterator to allow for seeking backward and forward. This
progressively caches the items in the source iterable so they can be
re-visited.
Call :meth:`seek` with an index to seek to that position in the source
iterable.
To "reset" an iterator, seek to ``0``:
>>> from itertools import count
>>> it = seekable((str(n) for n in count()))
>>> next(it), next(it), next(it)
('0', '1', '2')
>>> it.seek(0)
>>> next(it), next(it), next(it)
('0', '1', '2')
>>> next(it)
'3'
You can also seek forward:
>>> it = seekable((str(n) for n in range(20)))
>>> it.seek(10)
>>> next(it)
'10'
>>> it.relative_seek(-2) # Seeking relative to the current position
>>> next(it)
'9'
>>> it.seek(20) # Seeking past the end of the source isn't a problem
>>> list(it)
[]
>>> it.seek(0) # Resetting works even after hitting the end
>>> next(it), next(it), next(it)
('0', '1', '2')
Call :meth:`peek` to look ahead one item without advancing the iterator:
>>> it = seekable('1234')
>>> it.peek()
'1'
>>> list(it)
['1', '2', '3', '4']
>>> it.peek(default='empty')
'empty'
Before the iterator is at its end, calling :func:`bool` on it will return
``True``. After it will return ``False``:
>>> it = seekable('5678')
>>> bool(it)
True
>>> list(it)
['5', '6', '7', '8']
>>> bool(it)
False
You may view the contents of the cache with the :meth:`elements` method.
That returns a :class:`SequenceView`, a view that updates automatically:
>>> it = seekable((str(n) for n in range(10)))
>>> next(it), next(it), next(it)
('0', '1', '2')
>>> elements = it.elements()
>>> elements
SequenceView(['0', '1', '2'])
>>> next(it)
'3'
>>> elements
SequenceView(['0', '1', '2', '3'])
By default, the cache grows as the source iterable progresses, so beware of
wrapping very large or infinite iterables. Supply *maxlen* to limit the
size of the cache (this of course limits how far back you can seek).
>>> from itertools import count
>>> it = seekable((str(n) for n in count()), maxlen=2)
>>> next(it), next(it), next(it), next(it)
('0', '1', '2', '3')
>>> list(it.elements())
['2', '3']
>>> it.seek(0)
>>> next(it), next(it), next(it), next(it)
('2', '3', '4', '5')
>>> next(it)
'6'
"""
def __init__(self, iterable, maxlen=None):
self._source = iter(iterable)
if maxlen is None:
self._cache = []
else:
self._cache = deque([], maxlen)
self._index = None
def __iter__(self):
return self
def __next__(self):
if self._index is not None:
try:
item = self._cache[self._index]
except IndexError:
self._index = None
else:
self._index += 1
return item
item = next(self._source)
self._cache.append(item)
return item
def __bool__(self):
try:
self.peek()
except StopIteration:
return False
return True
def peek(self, default=_marker):
try:
peeked = next(self)
except StopIteration:
if default is _marker:
raise
return default
if self._index is None:
self._index = len(self._cache)
self._index -= 1
return peeked
def elements(self):
return SequenceView(self._cache)
def seek(self, index):
self._index = index
remainder = index - len(self._cache)
if remainder > 0:
consume(self, remainder)
def relative_seek(self, count):
index = len(self._cache)
self.seek(max(index + count, 0))
class run_length:
"""
:func:`run_length.encode` compresses an iterable with run-length encoding.
It yields groups of repeated items with the count of how many times they
were repeated:
>>> uncompressed = 'abbcccdddd'
>>> list(run_length.encode(uncompressed))
[('a', 1), ('b', 2), ('c', 3), ('d', 4)]
:func:`run_length.decode` decompresses an iterable that was previously
compressed with run-length encoding. It yields the items of the
decompressed iterable:
>>> compressed = [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
>>> list(run_length.decode(compressed))
['a', 'b', 'b', 'c', 'c', 'c', 'd', 'd', 'd', 'd']
"""
@staticmethod
def encode(iterable):
return ((k, ilen(g)) for k, g in groupby(iterable))
@staticmethod
def decode(iterable):
return chain.from_iterable(repeat(k, n) for k, n in iterable)
def exactly_n(iterable, n, predicate=bool):
"""Return ``True`` if exactly ``n`` items in the iterable are ``True``
according to the *predicate* function.
>>> exactly_n([True, True, False], 2)
True
>>> exactly_n([True, True, False], 1)
False
>>> exactly_n([0, 1, 2, 3, 4, 5], 3, lambda x: x < 3)
True
The iterable will be advanced until ``n + 1`` truthy items are encountered,
so avoid calling it on infinite iterables.
"""
return len(take(n + 1, filter(predicate, iterable))) == n
def circular_shifts(iterable):
"""Return a list of circular shifts of *iterable*.
>>> circular_shifts(range(4))
[(0, 1, 2, 3), (1, 2, 3, 0), (2, 3, 0, 1), (3, 0, 1, 2)]
"""
lst = list(iterable)
return take(len(lst), windowed(cycle(lst), len(lst)))
def make_decorator(wrapping_func, result_index=0):
"""Return a decorator version of *wrapping_func*, which is a function that
modifies an iterable. *result_index* is the position in that function's
signature where the iterable goes.
This lets you use itertools on the "production end," i.e. at function
definition. This can augment what the function returns without changing the
function's code.
For example, to produce a decorator version of :func:`chunked`:
>>> from more_itertools import chunked
>>> chunker = make_decorator(chunked, result_index=0)
>>> @chunker(3)
... def iter_range(n):
... return iter(range(n))
...
>>> list(iter_range(9))
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
To only allow truthy items to be returned:
>>> truth_serum = make_decorator(filter, result_index=1)
>>> @truth_serum(bool)
... def boolean_test():
... return [0, 1, '', ' ', False, True]
...
>>> list(boolean_test())
[1, ' ', True]
The :func:`peekable` and :func:`seekable` wrappers make for practical
decorators:
>>> from more_itertools import peekable
>>> peekable_function = make_decorator(peekable)
>>> @peekable_function()
... def str_range(*args):
... return (str(x) for x in range(*args))
...
>>> it = str_range(1, 20, 2)
>>> next(it), next(it), next(it)
('1', '3', '5')
>>> it.peek()
'7'
>>> next(it)
'7'
"""
# See https://sites.google.com/site/bbayles/index/decorator_factory for
# notes on how this works.
def decorator(*wrapping_args, **wrapping_kwargs):
def outer_wrapper(f):
def inner_wrapper(*args, **kwargs):
result = f(*args, **kwargs)
wrapping_args_ = list(wrapping_args)
wrapping_args_.insert(result_index, result)
return wrapping_func(*wrapping_args_, **wrapping_kwargs)
return inner_wrapper
return outer_wrapper
return decorator
def map_reduce(iterable, keyfunc, valuefunc=None, reducefunc=None):
"""Return a dictionary that maps the items in *iterable* to categories
defined by *keyfunc*, transforms them with *valuefunc*, and
then summarizes them by category with *reducefunc*.
*valuefunc* defaults to the identity function if it is unspecified.
If *reducefunc* is unspecified, no summarization takes place:
>>> keyfunc = lambda x: x.upper()
>>> result = map_reduce('abbccc', keyfunc)
>>> sorted(result.items())
[('A', ['a']), ('B', ['b', 'b']), ('C', ['c', 'c', 'c'])]
Specifying *valuefunc* transforms the categorized items:
>>> keyfunc = lambda x: x.upper()
>>> valuefunc = lambda x: 1
>>> result = map_reduce('abbccc', keyfunc, valuefunc)
>>> sorted(result.items())
[('A', [1]), ('B', [1, 1]), ('C', [1, 1, 1])]
Specifying *reducefunc* summarizes the categorized items:
>>> keyfunc = lambda x: x.upper()
>>> valuefunc = lambda x: 1
>>> reducefunc = sum
>>> result = map_reduce('abbccc', keyfunc, valuefunc, reducefunc)
>>> sorted(result.items())
[('A', 1), ('B', 2), ('C', 3)]
You may want to filter the input iterable before applying the map/reduce
procedure:
>>> all_items = range(30)
>>> items = [x for x in all_items if 10 <= x <= 20] # Filter
>>> keyfunc = lambda x: x % 2 # Evens map to 0; odds to 1
>>> categories = map_reduce(items, keyfunc=keyfunc)
>>> sorted(categories.items())
[(0, [10, 12, 14, 16, 18, 20]), (1, [11, 13, 15, 17, 19])]
>>> summaries = map_reduce(items, keyfunc=keyfunc, reducefunc=sum)
>>> sorted(summaries.items())
[(0, 90), (1, 75)]
Note that all items in the iterable are gathered into a list before the
summarization step, which may require significant storage.
The returned object is a :obj:`collections.defaultdict` with the
``default_factory`` set to ``None``, such that it behaves like a normal
dictionary.
"""
valuefunc = (lambda x: x) if (valuefunc is None) else valuefunc
ret = defaultdict(list)
for item in iterable:
key = keyfunc(item)
value = valuefunc(item)
ret[key].append(value)
if reducefunc is not None:
for key, value_list in ret.items():
ret[key] = reducefunc(value_list)
ret.default_factory = None
return ret
def rlocate(iterable, pred=bool, window_size=None):
"""Yield the index of each item in *iterable* for which *pred* returns
``True``, starting from the right and moving left.
*pred* defaults to :func:`bool`, which will select truthy items:
>>> list(rlocate([0, 1, 1, 0, 1, 0, 0])) # Truthy at 1, 2, and 4
[4, 2, 1]
Set *pred* to a custom function to, e.g., find the indexes for a particular
item:
>>> iterable = iter('abcb')
>>> pred = lambda x: x == 'b'
>>> list(rlocate(iterable, pred))
[3, 1]
If *window_size* is given, then the *pred* function will be called with
that many items. This enables searching for sub-sequences:
>>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]
>>> pred = lambda *args: args == (1, 2, 3)
>>> list(rlocate(iterable, pred=pred, window_size=3))
[9, 5, 1]
Beware, this function won't return anything for infinite iterables.
If *iterable* is reversible, ``rlocate`` will reverse it and search from
the right. Otherwise, it will search from the left and return the results
in reverse order.
See :func:`locate` to for other example applications.
"""
if window_size is None:
try:
len_iter = len(iterable)
return (len_iter - i - 1 for i in locate(reversed(iterable), pred))
except TypeError:
pass
return reversed(list(locate(iterable, pred, window_size)))
def replace(iterable, pred, substitutes, count=None, window_size=1):
"""Yield the items from *iterable*, replacing the items for which *pred*
returns ``True`` with the items from the iterable *substitutes*.
>>> iterable = [1, 1, 0, 1, 1, 0, 1, 1]
>>> pred = lambda x: x == 0
>>> substitutes = (2, 3)
>>> list(replace(iterable, pred, substitutes))
[1, 1, 2, 3, 1, 1, 2, 3, 1, 1]
If *count* is given, the number of replacements will be limited:
>>> iterable = [1, 1, 0, 1, 1, 0, 1, 1, 0]
>>> pred = lambda x: x == 0
>>> substitutes = [None]
>>> list(replace(iterable, pred, substitutes, count=2))
[1, 1, None, 1, 1, None, 1, 1, 0]
Use *window_size* to control the number of items passed as arguments to
*pred*. This allows for locating and replacing subsequences.
>>> iterable = [0, 1, 2, 5, 0, 1, 2, 5]
>>> window_size = 3
>>> pred = lambda *args: args == (0, 1, 2) # 3 items passed to pred
>>> substitutes = [3, 4] # Splice in these items
>>> list(replace(iterable, pred, substitutes, window_size=window_size))
[3, 4, 5, 3, 4, 5]
"""
if window_size < 1:
raise ValueError('window_size must be at least 1')
# Save the substitutes iterable, since it's used more than once
substitutes = tuple(substitutes)
# Add padding such that the number of windows matches the length of the
# iterable
it = chain(iterable, [_marker] * (window_size - 1))
windows = windowed(it, window_size)
n = 0
for w in windows:
# If the current window matches our predicate (and we haven't hit
# our maximum number of replacements), splice in the substitutes
# and then consume the following windows that overlap with this one.
# For example, if the iterable is (0, 1, 2, 3, 4...)
# and the window size is 2, we have (0, 1), (1, 2), (2, 3)...
# If the predicate matches on (0, 1), we need to zap (0, 1) and (1, 2)
if pred(*w):
if (count is None) or (n < count):
n += 1
yield from substitutes
consume(windows, window_size - 1)
continue
# If there was no match (or we've reached the replacement limit),
# yield the first item from the window.
if w and (w[0] is not _marker):
yield w[0]
def partitions(iterable):
"""Yield all possible order-preserving partitions of *iterable*.
>>> iterable = 'abc'
>>> for part in partitions(iterable):
... print([''.join(p) for p in part])
['abc']
['a', 'bc']
['ab', 'c']
['a', 'b', 'c']
This is unrelated to :func:`partition`.
"""
sequence = list(iterable)
n = len(sequence)
for i in powerset(range(1, n)):
yield [sequence[i:j] for i, j in zip((0,) + i, i + (n,))]
def set_partitions(iterable, k=None):
"""
Yield the set partitions of *iterable* into *k* parts. Set partitions are
not order-preserving.
>>> iterable = 'abc'
>>> for part in set_partitions(iterable, 2):
... print([''.join(p) for p in part])
['a', 'bc']
['ab', 'c']
['b', 'ac']
If *k* is not given, every set partition is generated.
>>> iterable = 'abc'
>>> for part in set_partitions(iterable):
... print([''.join(p) for p in part])
['abc']
['a', 'bc']
['ab', 'c']
['b', 'ac']
['a', 'b', 'c']
"""
L = list(iterable)
n = len(L)
if k is not None:
if k < 1:
raise ValueError(
"Can't partition in a negative or zero number of groups"
)
elif k > n:
return
def set_partitions_helper(L, k):
n = len(L)
if k == 1:
yield [L]
elif n == k:
yield [[s] for s in L]
else:
e, *M = L
for p in set_partitions_helper(M, k - 1):
yield [[e], *p]
for p in set_partitions_helper(M, k):
for i in range(len(p)):
yield p[:i] + [[e] + p[i]] + p[i + 1 :]
if k is None:
for k in range(1, n + 1):
yield from set_partitions_helper(L, k)
else:
yield from set_partitions_helper(L, k)
class time_limited:
"""
Yield items from *iterable* until *limit_seconds* have passed.
If the time limit expires before all items have been yielded, the
``timed_out`` parameter will be set to ``True``.
>>> from time import sleep
>>> def generator():
... yield 1
... yield 2
... sleep(0.2)
... yield 3
>>> iterable = time_limited(0.1, generator())
>>> list(iterable)
[1, 2]
>>> iterable.timed_out
True
Note that the time is checked before each item is yielded, and iteration
stops if the time elapsed is greater than *limit_seconds*. If your time
limit is 1 second, but it takes 2 seconds to generate the first item from
the iterable, the function will run for 2 seconds and not yield anything.
As a special case, when *limit_seconds* is zero, the iterator never
returns anything.
"""
def __init__(self, limit_seconds, iterable):
if limit_seconds < 0:
raise ValueError('limit_seconds must be positive')
self.limit_seconds = limit_seconds
self._iterable = iter(iterable)
self._start_time = monotonic()
self.timed_out = False
def __iter__(self):
return self
def __next__(self):
if self.limit_seconds == 0:
self.timed_out = True
raise StopIteration
item = next(self._iterable)
if monotonic() - self._start_time > self.limit_seconds:
self.timed_out = True
raise StopIteration
return item
def only(iterable, default=None, too_long=None):
"""If *iterable* has only one item, return it.
If it has zero items, return *default*.
If it has more than one item, raise the exception given by *too_long*,
which is ``ValueError`` by default.
>>> only([], default='missing')
'missing'
>>> only([1])
1
>>> only([1, 2]) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: Expected exactly one item in iterable, but got 1, 2,
and perhaps more.'
>>> only([1, 2], too_long=TypeError) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError
Note that :func:`only` attempts to advance *iterable* twice to ensure there
is only one item. See :func:`spy` or :func:`peekable` to check
iterable contents less destructively.
"""
it = iter(iterable)
first_value = next(it, default)
try:
second_value = next(it)
except StopIteration:
pass
else:
msg = (
'Expected exactly one item in iterable, but got {!r}, {!r}, '
'and perhaps more.'.format(first_value, second_value)
)
raise too_long or ValueError(msg)
return first_value
class _IChunk:
def __init__(self, iterable, n):
self._it = islice(iterable, n)
self._cache = deque()
def fill_cache(self):
self._cache.extend(self._it)
def __iter__(self):
return self
def __next__(self):
try:
return next(self._it)
except StopIteration:
if self._cache:
return self._cache.popleft()
else:
raise
def ichunked(iterable, n):
"""Break *iterable* into sub-iterables with *n* elements each.
:func:`ichunked` is like :func:`chunked`, but it yields iterables
instead of lists.
If the sub-iterables are read in order, the elements of *iterable*
won't be stored in memory.
If they are read out of order, :func:`itertools.tee` is used to cache
elements as necessary.
>>> from itertools import count
>>> all_chunks = ichunked(count(), 4)
>>> c_1, c_2, c_3 = next(all_chunks), next(all_chunks), next(all_chunks)
>>> list(c_2) # c_1's elements have been cached; c_3's haven't been
[4, 5, 6, 7]
>>> list(c_1)
[0, 1, 2, 3]
>>> list(c_3)
[8, 9, 10, 11]
"""
source = peekable(iter(iterable))
ichunk_marker = object()
while True:
# Check to see whether we're at the end of the source iterable
item = source.peek(ichunk_marker)
if item is ichunk_marker:
return
chunk = _IChunk(source, n)
yield chunk
# Advance the source iterable and fill previous chunk's cache
chunk.fill_cache()
def iequals(*iterables):
"""Return ``True`` if all given *iterables* are equal to each other,
which means that they contain the same elements in the same order.
The function is useful for comparing iterables of different data types
or iterables that do not support equality checks.
>>> iequals("abc", ['a', 'b', 'c'], ('a', 'b', 'c'), iter("abc"))
True
>>> iequals("abc", "acb")
False
Not to be confused with :func:`all_equal`, which checks whether all
elements of iterable are equal to each other.
"""
return all(map(all_equal, zip_longest(*iterables, fillvalue=object())))
def distinct_combinations(iterable, r):
"""Yield the distinct combinations of *r* items taken from *iterable*.
>>> list(distinct_combinations([0, 0, 1], 2))
[(0, 0), (0, 1)]
Equivalent to ``set(combinations(iterable))``, except duplicates are not
generated and thrown away. For larger input sequences this is much more
efficient.
"""
if r < 0:
raise ValueError('r must be non-negative')
elif r == 0:
yield ()
return
pool = tuple(iterable)
generators = [unique_everseen(enumerate(pool), key=itemgetter(1))]
current_combo = [None] * r
level = 0
while generators:
try:
cur_idx, p = next(generators[-1])
except StopIteration:
generators.pop()
level -= 1
continue
current_combo[level] = p
if level + 1 == r:
yield tuple(current_combo)
else:
generators.append(
unique_everseen(
enumerate(pool[cur_idx + 1 :], cur_idx + 1),
key=itemgetter(1),
)
)
level += 1
def filter_except(validator, iterable, *exceptions):
"""Yield the items from *iterable* for which the *validator* function does
not raise one of the specified *exceptions*.
*validator* is called for each item in *iterable*.
It should be a function that accepts one argument and raises an exception
if that item is not valid.
>>> iterable = ['1', '2', 'three', '4', None]
>>> list(filter_except(int, iterable, ValueError, TypeError))
['1', '2', '4']
If an exception other than one given by *exceptions* is raised by
*validator*, it is raised like normal.
"""
for item in iterable:
try:
validator(item)
except exceptions:
pass
else:
yield item
def map_except(function, iterable, *exceptions):
"""Transform each item from *iterable* with *function* and yield the
result, unless *function* raises one of the specified *exceptions*.
*function* is called to transform each item in *iterable*.
It should accept one argument.
>>> iterable = ['1', '2', 'three', '4', None]
>>> list(map_except(int, iterable, ValueError, TypeError))
[1, 2, 4]
If an exception other than one given by *exceptions* is raised by
*function*, it is raised like normal.
"""
for item in iterable:
try:
yield function(item)
except exceptions:
pass
def map_if(iterable, pred, func, func_else=lambda x: x):
"""Evaluate each item from *iterable* using *pred*. If the result is
equivalent to ``True``, transform the item with *func* and yield it.
Otherwise, transform the item with *func_else* and yield it.
*pred*, *func*, and *func_else* should each be functions that accept
one argument. By default, *func_else* is the identity function.
>>> from math import sqrt
>>> iterable = list(range(-5, 5))
>>> iterable
[-5, -4, -3, -2, -1, 0, 1, 2, 3, 4]
>>> list(map_if(iterable, lambda x: x > 3, lambda x: 'toobig'))
[-5, -4, -3, -2, -1, 0, 1, 2, 3, 'toobig']
>>> list(map_if(iterable, lambda x: x >= 0,
... lambda x: f'{sqrt(x):.2f}', lambda x: None))
[None, None, None, None, None, '0.00', '1.00', '1.41', '1.73', '2.00']
"""
for item in iterable:
yield func(item) if pred(item) else func_else(item)
def _sample_unweighted(iterable, k):
# Implementation of "Algorithm L" from the 1994 paper by Kim-Hung Li:
# "Reservoir-Sampling Algorithms of Time Complexity O(n(1+log(N/n)))".
# Fill up the reservoir (collection of samples) with the first `k` samples
reservoir = take(k, iterable)
# Generate random number that's the largest in a sample of k U(0,1) numbers
# Largest order statistic: https://en.wikipedia.org/wiki/Order_statistic
W = exp(log(random()) / k)
# The number of elements to skip before changing the reservoir is a random
# number with a geometric distribution. Sample it using random() and logs.
next_index = k + floor(log(random()) / log(1 - W))
for index, element in enumerate(iterable, k):
if index == next_index:
reservoir[randrange(k)] = element
# The new W is the largest in a sample of k U(0, `old_W`) numbers
W *= exp(log(random()) / k)
next_index += floor(log(random()) / log(1 - W)) + 1
return reservoir
def _sample_weighted(iterable, k, weights):
# Implementation of "A-ExpJ" from the 2006 paper by Efraimidis et al. :
# "Weighted random sampling with a reservoir".
# Log-transform for numerical stability for weights that are small/large
weight_keys = (log(random()) / weight for weight in weights)
# Fill up the reservoir (collection of samples) with the first `k`
# weight-keys and elements, then heapify the list.
reservoir = take(k, zip(weight_keys, iterable))
heapify(reservoir)
# The number of jumps before changing the reservoir is a random variable
# with an exponential distribution. Sample it using random() and logs.
smallest_weight_key, _ = reservoir[0]
weights_to_skip = log(random()) / smallest_weight_key
for weight, element in zip(weights, iterable):
if weight >= weights_to_skip:
# The notation here is consistent with the paper, but we store
# the weight-keys in log-space for better numerical stability.
smallest_weight_key, _ = reservoir[0]
t_w = exp(weight * smallest_weight_key)
r_2 = uniform(t_w, 1) # generate U(t_w, 1)
weight_key = log(r_2) / weight
heapreplace(reservoir, (weight_key, element))
smallest_weight_key, _ = reservoir[0]
weights_to_skip = log(random()) / smallest_weight_key
else:
weights_to_skip -= weight
# Equivalent to [element for weight_key, element in sorted(reservoir)]
return [heappop(reservoir)[1] for _ in range(k)]
def sample(iterable, k, weights=None):
"""Return a *k*-length list of elements chosen (without replacement)
from the *iterable*. Like :func:`random.sample`, but works on iterables
of unknown length.
>>> iterable = range(100)
>>> sample(iterable, 5) # doctest: +SKIP
[81, 60, 96, 16, 4]
An iterable with *weights* may also be given:
>>> iterable = range(100)
>>> weights = (i * i + 1 for i in range(100))
>>> sampled = sample(iterable, 5, weights=weights) # doctest: +SKIP
[79, 67, 74, 66, 78]
The algorithm can also be used to generate weighted random permutations.
The relative weight of each item determines the probability that it
appears late in the permutation.
>>> data = "abcdefgh"
>>> weights = range(1, len(data) + 1)
>>> sample(data, k=len(data), weights=weights) # doctest: +SKIP
['c', 'a', 'b', 'e', 'g', 'd', 'h', 'f']
"""
if k == 0:
return []
iterable = iter(iterable)
if weights is None:
return _sample_unweighted(iterable, k)
else:
weights = iter(weights)
return _sample_weighted(iterable, k, weights)
def is_sorted(iterable, key=None, reverse=False, strict=False):
"""Returns ``True`` if the items of iterable are in sorted order, and
``False`` otherwise. *key* and *reverse* have the same meaning that they do
in the built-in :func:`sorted` function.
>>> is_sorted(['1', '2', '3', '4', '5'], key=int)
True
>>> is_sorted([5, 4, 3, 1, 2], reverse=True)
False
If *strict*, tests for strict sorting, that is, returns ``False`` if equal
elements are found:
>>> is_sorted([1, 2, 2])
True
>>> is_sorted([1, 2, 2], strict=True)
False
The function returns ``False`` after encountering the first out-of-order
item. If there are no out-of-order items, the iterable is exhausted.
"""
compare = (le if reverse else ge) if strict else (lt if reverse else gt)
it = iterable if key is None else map(key, iterable)
return not any(starmap(compare, pairwise(it)))
class AbortThread(BaseException):
pass
class callback_iter:
"""Convert a function that uses callbacks to an iterator.
Let *func* be a function that takes a `callback` keyword argument.
For example:
>>> def func(callback=None):
... for i, c in [(1, 'a'), (2, 'b'), (3, 'c')]:
... if callback:
... callback(i, c)
... return 4
Use ``with callback_iter(func)`` to get an iterator over the parameters
that are delivered to the callback.
>>> with callback_iter(func) as it:
... for args, kwargs in it:
... print(args)
(1, 'a')
(2, 'b')
(3, 'c')
The function will be called in a background thread. The ``done`` property
indicates whether it has completed execution.
>>> it.done
True
If it completes successfully, its return value will be available
in the ``result`` property.
>>> it.result
4
Notes:
* If the function uses some keyword argument besides ``callback``, supply
*callback_kwd*.
* If it finished executing, but raised an exception, accessing the
``result`` property will raise the same exception.
* If it hasn't finished executing, accessing the ``result``
property from within the ``with`` block will raise ``RuntimeError``.
* If it hasn't finished executing, accessing the ``result`` property from
outside the ``with`` block will raise a
``more_itertools.AbortThread`` exception.
* Provide *wait_seconds* to adjust how frequently the it is polled for
output.
"""
def __init__(self, func, callback_kwd='callback', wait_seconds=0.1):
self._func = func
self._callback_kwd = callback_kwd
self._aborted = False
self._future = None
self._wait_seconds = wait_seconds
# Lazily import concurrent.future
self._executor = __import__(
).futures.__import__("concurrent.futures").futures.ThreadPoolExecutor(max_workers=1)
self._iterator = self._reader()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self._aborted = True
self._executor.shutdown()
def __iter__(self):
return self
def __next__(self):
return next(self._iterator)
@property
def done(self):
if self._future is None:
return False
return self._future.done()
@property
def result(self):
if not self.done:
raise RuntimeError('Function has not yet completed')
return self._future.result()
def _reader(self):
q = Queue()
def callback(*args, **kwargs):
if self._aborted:
raise AbortThread('canceled by user')
q.put((args, kwargs))
self._future = self._executor.submit(
self._func, **{self._callback_kwd: callback}
)
while True:
try:
item = q.get(timeout=self._wait_seconds)
except Empty:
pass
else:
q.task_done()
yield item
if self._future.done():
break
remaining = []
while True:
try:
item = q.get_nowait()
except Empty:
break
else:
q.task_done()
remaining.append(item)
q.join()
yield from remaining
def windowed_complete(iterable, n):
"""
Yield ``(beginning, middle, end)`` tuples, where:
* Each ``middle`` has *n* items from *iterable*
* Each ``beginning`` has the items before the ones in ``middle``
* Each ``end`` has the items after the ones in ``middle``
>>> iterable = range(7)
>>> n = 3
>>> for beginning, middle, end in windowed_complete(iterable, n):
... print(beginning, middle, end)
() (0, 1, 2) (3, 4, 5, 6)
(0,) (1, 2, 3) (4, 5, 6)
(0, 1) (2, 3, 4) (5, 6)
(0, 1, 2) (3, 4, 5) (6,)
(0, 1, 2, 3) (4, 5, 6) ()
Note that *n* must be at least 0 and most equal to the length of
*iterable*.
This function will exhaust the iterable and may require significant
storage.
"""
if n < 0:
raise ValueError('n must be >= 0')
seq = tuple(iterable)
size = len(seq)
if n > size:
raise ValueError('n must be <= len(seq)')
for i in range(size - n + 1):
beginning = seq[:i]
middle = seq[i : i + n]
end = seq[i + n :]
yield beginning, middle, end
def all_unique(iterable, key=None):
"""
Returns ``True`` if all the elements of *iterable* are unique (no two
elements are equal).
>>> all_unique('ABCB')
False
If a *key* function is specified, it will be used to make comparisons.
>>> all_unique('ABCb')
True
>>> all_unique('ABCb', str.lower)
False
The function returns as soon as the first non-unique element is
encountered. Iterables with a mix of hashable and unhashable items can
be used, but the function will be slower for unhashable items.
"""
seenset = set()
seenset_add = seenset.add
seenlist = []
seenlist_add = seenlist.append
for element in map(key, iterable) if key else iterable:
try:
if element in seenset:
return False
seenset_add(element)
except TypeError:
if element in seenlist:
return False
seenlist_add(element)
return True
def nth_product(index, *args):
"""Equivalent to ``list(product(*args))[index]``.
The products of *args* can be ordered lexicographically.
:func:`nth_product` computes the product at sort position *index* without
computing the previous products.
>>> nth_product(8, range(2), range(2), range(2), range(2))
(1, 0, 0, 0)
``IndexError`` will be raised if the given *index* is invalid.
"""
pools = list(map(tuple, reversed(args)))
ns = list(map(len, pools))
c = reduce(mul, ns)
if index < 0:
index += c
if not 0 <= index < c:
raise IndexError
result = []
for pool, n in zip(pools, ns):
result.append(pool[index % n])
index //= n
return tuple(reversed(result))
def nth_permutation(iterable, r, index):
"""Equivalent to ``list(permutations(iterable, r))[index]```
The subsequences of *iterable* that are of length *r* where order is
important can be ordered lexicographically. :func:`nth_permutation`
computes the subsequence at sort position *index* directly, without
computing the previous subsequences.
>>> nth_permutation('ghijk', 2, 5)
('h', 'i')
``ValueError`` will be raised If *r* is negative or greater than the length
of *iterable*.
``IndexError`` will be raised if the given *index* is invalid.
"""
pool = list(iterable)
n = len(pool)
if r is None or r == n:
r, c = n, factorial(n)
elif not 0 <= r < n:
raise ValueError
else:
c = perm(n, r)
if index < 0:
index += c
if not 0 <= index < c:
raise IndexError
if c == 0:
return tuple()
result = [0] * r
q = index * factorial(n) // c if r < n else index
for d in range(1, n + 1):
q, i = divmod(q, d)
if 0 <= n - d < r:
result[n - d] = i
if q == 0:
break
return tuple(map(pool.pop, result))
def nth_combination_with_replacement(iterable, r, index):
"""Equivalent to
``list(combinations_with_replacement(iterable, r))[index]``.
The subsequences with repetition of *iterable* that are of length *r* can
be ordered lexicographically. :func:`nth_combination_with_replacement`
computes the subsequence at sort position *index* directly, without
computing the previous subsequences with replacement.
>>> nth_combination_with_replacement(range(5), 3, 5)
(0, 1, 1)
``ValueError`` will be raised If *r* is negative or greater than the length
of *iterable*.
``IndexError`` will be raised if the given *index* is invalid.
"""
pool = tuple(iterable)
n = len(pool)
if (r < 0) or (r > n):
raise ValueError
c = comb(n + r - 1, r)
if index < 0:
index += c
if (index < 0) or (index >= c):
raise IndexError
result = []
i = 0
while r:
r -= 1
while n >= 0:
num_combs = comb(n + r - 1, r)
if index < num_combs:
break
n -= 1
i += 1
index -= num_combs
result.append(pool[i])
return tuple(result)
def value_chain(*args):
"""Yield all arguments passed to the function in the same order in which
they were passed. If an argument itself is iterable then iterate over its
values.
>>> list(value_chain(1, 2, 3, [4, 5, 6]))
[1, 2, 3, 4, 5, 6]
Binary and text strings are not considered iterable and are emitted
as-is:
>>> list(value_chain('12', '34', ['56', '78']))
['12', '34', '56', '78']
Multiple levels of nesting are not flattened.
"""
for value in args:
if isinstance(value, (str, bytes)):
yield value
continue
try:
yield from value
except TypeError:
yield value
def product_index(element, *args):
"""Equivalent to ``list(product(*args)).index(element)``
The products of *args* can be ordered lexicographically.
:func:`product_index` computes the first index of *element* without
computing the previous products.
>>> product_index([8, 2], range(10), range(5))
42
``ValueError`` will be raised if the given *element* isn't in the product
of *args*.
"""
index = 0
for x, pool in zip_longest(element, args, fillvalue=_marker):
if x is _marker or pool is _marker:
raise ValueError('element is not a product of args')
pool = tuple(pool)
index = index * len(pool) + pool.index(x)
return index
def combination_index(element, iterable):
"""Equivalent to ``list(combinations(iterable, r)).index(element)``
The subsequences of *iterable* that are of length *r* can be ordered
lexicographically. :func:`combination_index` computes the index of the
first *element*, without computing the previous combinations.
>>> combination_index('adf', 'abcdefg')
10
``ValueError`` will be raised if the given *element* isn't one of the
combinations of *iterable*.
"""
element = enumerate(element)
k, y = next(element, (None, None))
if k is None:
return 0
indexes = []
pool = enumerate(iterable)
for n, x in pool:
if x == y:
indexes.append(n)
tmp, y = next(element, (None, None))
if tmp is None:
break
else:
k = tmp
else:
raise ValueError('element is not a combination of iterable')
n, _ = last(pool, default=(n, None))
# Python versions below 3.8 don't have math.comb
index = 1
for i, j in enumerate(reversed(indexes), start=1):
j = n - j
if i <= j:
index += comb(j, i)
return comb(n + 1, k + 1) - index
def combination_with_replacement_index(element, iterable):
"""Equivalent to
``list(combinations_with_replacement(iterable, r)).index(element)``
The subsequences with repetition of *iterable* that are of length *r* can
be ordered lexicographically. :func:`combination_with_replacement_index`
computes the index of the first *element*, without computing the previous
combinations with replacement.
>>> combination_with_replacement_index('adf', 'abcdefg')
20
``ValueError`` will be raised if the given *element* isn't one of the
combinations with replacement of *iterable*.
"""
element = tuple(element)
l = len(element)
element = enumerate(element)
k, y = next(element, (None, None))
if k is None:
return 0
indexes = []
pool = tuple(iterable)
for n, x in enumerate(pool):
while x == y:
indexes.append(n)
tmp, y = next(element, (None, None))
if tmp is None:
break
else:
k = tmp
if y is None:
break
else:
raise ValueError(
'element is not a combination with replacement of iterable'
)
n = len(pool)
occupations = [0] * n
for p in indexes:
occupations[p] += 1
index = 0
cumulative_sum = 0
for k in range(1, n):
cumulative_sum += occupations[k - 1]
j = l + n - 1 - k - cumulative_sum
i = n - k
if i <= j:
index += comb(j, i)
return index
def permutation_index(element, iterable):
"""Equivalent to ``list(permutations(iterable, r)).index(element)```
The subsequences of *iterable* that are of length *r* where order is
important can be ordered lexicographically. :func:`permutation_index`
computes the index of the first *element* directly, without computing
the previous permutations.
>>> permutation_index([1, 3, 2], range(5))
19
``ValueError`` will be raised if the given *element* isn't one of the
permutations of *iterable*.
"""
index = 0
pool = list(iterable)
for i, x in zip(range(len(pool), -1, -1), element):
r = pool.index(x)
index = index * i + r
del pool[r]
return index
class countable:
"""Wrap *iterable* and keep a count of how many items have been consumed.
The ``items_seen`` attribute starts at ``0`` and increments as the iterable
is consumed:
>>> iterable = map(str, range(10))
>>> it = countable(iterable)
>>> it.items_seen
0
>>> next(it), next(it)
('0', '1')
>>> list(it)
['2', '3', '4', '5', '6', '7', '8', '9']
>>> it.items_seen
10
"""
def __init__(self, iterable):
self._it = iter(iterable)
self.items_seen = 0
def __iter__(self):
return self
def __next__(self):
item = next(self._it)
self.items_seen += 1
return item
def chunked_even(iterable, n):
"""Break *iterable* into lists of approximately length *n*.
Items are distributed such the lengths of the lists differ by at most
1 item.
>>> iterable = [1, 2, 3, 4, 5, 6, 7]
>>> n = 3
>>> list(chunked_even(iterable, n)) # List lengths: 3, 2, 2
[[1, 2, 3], [4, 5], [6, 7]]
>>> list(chunked(iterable, n)) # List lengths: 3, 3, 1
[[1, 2, 3], [4, 5, 6], [7]]
"""
len_method = getattr(iterable, '__len__', None)
if len_method is None:
return _chunked_even_online(iterable, n)
else:
return _chunked_even_finite(iterable, len_method(), n)
def _chunked_even_online(iterable, n):
buffer = []
maxbuf = n + (n - 2) * (n - 1)
for x in iterable:
buffer.append(x)
if len(buffer) == maxbuf:
yield buffer[:n]
buffer = buffer[n:]
yield from _chunked_even_finite(buffer, len(buffer), n)
def _chunked_even_finite(iterable, N, n):
if N < 1:
return
# Lists are either size `full_size <= n` or `partial_size = full_size - 1`
q, r = divmod(N, n)
num_lists = q + (1 if r > 0 else 0)
q, r = divmod(N, num_lists)
full_size = q + (1 if r > 0 else 0)
partial_size = full_size - 1
num_full = N - partial_size * num_lists
num_partial = num_lists - num_full
# Yield num_full lists of full_size
partial_start_idx = num_full * full_size
if full_size > 0:
for i in range(0, partial_start_idx, full_size):
yield list(islice(iterable, i, i + full_size))
# Yield num_partial lists of partial_size
if partial_size > 0:
for i in range(
partial_start_idx,
partial_start_idx + (num_partial * partial_size),
partial_size,
):
yield list(islice(iterable, i, i + partial_size))
def zip_broadcast(*objects, scalar_types=(str, bytes), strict=False):
"""A version of :func:`zip` that "broadcasts" any scalar
(i.e., non-iterable) items into output tuples.
>>> iterable_1 = [1, 2, 3]
>>> iterable_2 = ['a', 'b', 'c']
>>> scalar = '_'
>>> list(zip_broadcast(iterable_1, iterable_2, scalar))
[(1, 'a', '_'), (2, 'b', '_'), (3, 'c', '_')]
The *scalar_types* keyword argument determines what types are considered
scalar. It is set to ``(str, bytes)`` by default. Set it to ``None`` to
treat strings and byte strings as iterable:
>>> list(zip_broadcast('abc', 0, 'xyz', scalar_types=None))
[('a', 0, 'x'), ('b', 0, 'y'), ('c', 0, 'z')]
If the *strict* keyword argument is ``True``, then
``UnequalIterablesError`` will be raised if any of the iterables have
different lengths.
"""
def is_scalar(obj):
if scalar_types and isinstance(obj, scalar_types):
return True
try:
iter(obj)
except TypeError:
return True
else:
return False
size = len(objects)
if not size:
return
new_item = [None] * size
iterables, iterable_positions = [], []
for i, obj in enumerate(objects):
if is_scalar(obj):
new_item[i] = obj
else:
iterables.append(iter(obj))
iterable_positions.append(i)
if not iterables:
yield tuple(objects)
return
zipper = _zip_equal if strict else zip
for item in zipper(*iterables):
for i, new_item[i] in zip(iterable_positions, item):
pass
yield tuple(new_item)
def unique_in_window(iterable, n, key=None):
"""Yield the items from *iterable* that haven't been seen recently.
*n* is the size of the lookback window.
>>> iterable = [0, 1, 0, 2, 3, 0]
>>> n = 3
>>> list(unique_in_window(iterable, n))
[0, 1, 2, 3, 0]
The *key* function, if provided, will be used to determine uniqueness:
>>> list(unique_in_window('abAcda', 3, key=lambda x: x.lower()))
['a', 'b', 'c', 'd', 'a']
The items in *iterable* must be hashable.
"""
if n <= 0:
raise ValueError('n must be greater than 0')
window = deque(maxlen=n)
counts = defaultdict(int)
use_key = key is not None
for item in iterable:
if len(window) == n:
to_discard = window[0]
if counts[to_discard] == 1:
del counts[to_discard]
else:
counts[to_discard] -= 1
k = key(item) if use_key else item
if k not in counts:
yield item
counts[k] += 1
window.append(k)
def duplicates_everseen(iterable, key=None):
"""Yield duplicate elements after their first appearance.
>>> list(duplicates_everseen('mississippi'))
['s', 'i', 's', 's', 'i', 'p', 'i']
>>> list(duplicates_everseen('AaaBbbCccAaa', str.lower))
['a', 'a', 'b', 'b', 'c', 'c', 'A', 'a', 'a']
This function is analogous to :func:`unique_everseen` and is subject to
the same performance considerations.
"""
seen_set = set()
seen_list = []
use_key = key is not None
for element in iterable:
k = key(element) if use_key else element
try:
if k not in seen_set:
seen_set.add(k)
else:
yield element
except TypeError:
if k not in seen_list:
seen_list.append(k)
else:
yield element
def duplicates_justseen(iterable, key=None):
"""Yields serially-duplicate elements after their first appearance.
>>> list(duplicates_justseen('mississippi'))
['s', 's', 'p']
>>> list(duplicates_justseen('AaaBbbCccAaa', str.lower))
['a', 'a', 'b', 'b', 'c', 'c', 'a', 'a']
This function is analogous to :func:`unique_justseen`.
"""
return flatten(g for _, g in groupby(iterable, key) for _ in g)
def classify_unique(iterable, key=None):
"""Classify each element in terms of its uniqueness.
For each element in the input iterable, return a 3-tuple consisting of:
1. The element itself
2. ``False`` if the element is equal to the one preceding it in the input,
``True`` otherwise (i.e. the equivalent of :func:`unique_justseen`)
3. ``False`` if this element has been seen anywhere in the input before,
``True`` otherwise (i.e. the equivalent of :func:`unique_everseen`)
>>> list(classify_unique('otto')) # doctest: +NORMALIZE_WHITESPACE
[('o', True, True),
('t', True, True),
('t', False, False),
('o', True, False)]
This function is analogous to :func:`unique_everseen` and is subject to
the same performance considerations.
"""
seen_set = set()
seen_list = []
use_key = key is not None
previous = None
for i, element in enumerate(iterable):
k = key(element) if use_key else element
is_unique_justseen = not i or previous != k
previous = k
is_unique_everseen = False
try:
if k not in seen_set:
seen_set.add(k)
is_unique_everseen = True
except TypeError:
if k not in seen_list:
seen_list.append(k)
is_unique_everseen = True
yield element, is_unique_justseen, is_unique_everseen
def minmax(iterable_or_value, *others, key=None, default=_marker):
"""Returns both the smallest and largest items in an iterable
or the largest of two or more arguments.
>>> minmax([3, 1, 5])
(1, 5)
>>> minmax(4, 2, 6)
(2, 6)
If a *key* function is provided, it will be used to transform the input
items for comparison.
>>> minmax([5, 30], key=str) # '30' sorts before '5'
(30, 5)
If a *default* value is provided, it will be returned if there are no
input items.
>>> minmax([], default=(0, 0))
(0, 0)
Otherwise ``ValueError`` is raised.
This function is based on the
`recipe <http://code.activestate.com/recipes/577916/>`__ by
Raymond Hettinger and takes care to minimize the number of comparisons
performed.
"""
iterable = (iterable_or_value, *others) if others else iterable_or_value
it = iter(iterable)
try:
lo = hi = next(it)
except StopIteration as e:
if default is _marker:
raise ValueError(
'`minmax()` argument is an empty iterable. '
'Provide a `default` value to suppress this error.'
) from e
return default
# Different branches depending on the presence of key. This saves a lot
# of unimportant copies which would slow the "key=None" branch
# significantly down.
if key is None:
for x, y in zip_longest(it, it, fillvalue=lo):
if y < x:
x, y = y, x
if x < lo:
lo = x
if hi < y:
hi = y
else:
lo_key = hi_key = key(lo)
for x, y in zip_longest(it, it, fillvalue=lo):
x_key, y_key = key(x), key(y)
if y_key < x_key:
x, y, x_key, y_key = y, x, y_key, x_key
if x_key < lo_key:
lo, lo_key = x, x_key
if hi_key < y_key:
hi, hi_key = y, y_key
return lo, hi
def constrained_batches(
iterable, max_size, max_count=None, get_len=len, strict=True
):
"""Yield batches of items from *iterable* with a combined size limited by
*max_size*.
>>> iterable = [b'12345', b'123', b'12345678', b'1', b'1', b'12', b'1']
>>> list(constrained_batches(iterable, 10))
[(b'12345', b'123'), (b'12345678', b'1', b'1'), (b'12', b'1')]
If a *max_count* is supplied, the number of items per batch is also
limited:
>>> iterable = [b'12345', b'123', b'12345678', b'1', b'1', b'12', b'1']
>>> list(constrained_batches(iterable, 10, max_count = 2))
[(b'12345', b'123'), (b'12345678', b'1'), (b'1', b'12'), (b'1',)]
If a *get_len* function is supplied, use that instead of :func:`len` to
determine item size.
If *strict* is ``True``, raise ``ValueError`` if any single item is bigger
than *max_size*. Otherwise, allow single items to exceed *max_size*.
"""
if max_size <= 0:
raise ValueError('maximum size must be greater than zero')
batch = []
batch_size = 0
batch_count = 0
for item in iterable:
item_len = get_len(item)
if strict and item_len > max_size:
raise ValueError('item size exceeds maximum size')
reached_count = batch_count == max_count
reached_size = item_len + batch_size > max_size
if batch_count and (reached_size or reached_count):
yield tuple(batch)
batch.clear()
batch_size = 0
batch_count = 0
batch.append(item)
batch_size += item_len
batch_count += 1
if batch:
yield tuple(batch)
def gray_product(*iterables):
"""Like :func:`itertools.product`, but return tuples in an order such
that only one element in the generated tuple changes from one iteration
to the next.
>>> list(gray_product('AB','CD'))
[('A', 'C'), ('B', 'C'), ('B', 'D'), ('A', 'D')]
This function consumes all of the input iterables before producing output.
If any of the input iterables have fewer than two items, ``ValueError``
is raised.
For information on the algorithm, see
`this section <https://www-cs-faculty.stanford.edu/~knuth/fasc2a.ps.gz>`__
of Donald Knuth's *The Art of Computer Programming*.
"""
all_iterables = tuple(tuple(x) for x in iterables)
iterable_count = len(all_iterables)
for iterable in all_iterables:
if len(iterable) < 2:
raise ValueError("each iterable must have two or more items")
# This is based on "Algorithm H" from section 7.2.1.1, page 20.
# a holds the indexes of the source iterables for the n-tuple to be yielded
# f is the array of "focus pointers"
# o is the array of "directions"
a = [0] * iterable_count
f = list(range(iterable_count + 1))
o = [1] * iterable_count
while True:
yield tuple(all_iterables[i][a[i]] for i in range(iterable_count))
j = f[0]
f[0] = 0
if j == iterable_count:
break
a[j] = a[j] + o[j]
if a[j] == 0 or a[j] == len(all_iterables[j]) - 1:
o[j] = -o[j]
f[j] = f[j + 1]
f[j + 1] = j + 1
def partial_product(*iterables):
"""Yields tuples containing one item from each iterator, with subsequent
tuples changing a single item at a time by advancing each iterator until it
is exhausted. This sequence guarantees every value in each iterable is
output at least once without generating all possible combinations.
This may be useful, for example, when testing an expensive function.
>>> list(partial_product('AB', 'C', 'DEF'))
[('A', 'C', 'D'), ('B', 'C', 'D'), ('B', 'C', 'E'), ('B', 'C', 'F')]
"""
iterators = list(map(iter, iterables))
try:
prod = [next(it) for it in iterators]
except StopIteration:
return
yield tuple(prod)
for i, it in enumerate(iterators):
for prod[i] in it:
yield tuple(prod)
def takewhile_inclusive(predicate, iterable):
"""A variant of :func:`takewhile` that yields one additional element.
>>> list(takewhile_inclusive(lambda x: x < 5, [1, 4, 6, 4, 1]))
[1, 4, 6]
:func:`takewhile` would return ``[1, 4]``.
"""
for x in iterable:
yield x
if not predicate(x):
break
def outer_product(func, xs, ys, *args, **kwargs):
"""A generalized outer product that applies a binary function to all
pairs of items. Returns a 2D matrix with ``len(xs)`` rows and ``len(ys)``
columns.
Also accepts ``*args`` and ``**kwargs`` that are passed to ``func``.
Multiplication table:
>>> list(outer_product(mul, range(1, 4), range(1, 6)))
[(1, 2, 3, 4, 5), (2, 4, 6, 8, 10), (3, 6, 9, 12, 15)]
Cross tabulation:
>>> xs = ['A', 'B', 'A', 'A', 'B', 'B', 'A', 'A', 'B', 'B']
>>> ys = ['X', 'X', 'X', 'Y', 'Z', 'Z', 'Y', 'Y', 'Z', 'Z']
>>> rows = list(zip(xs, ys))
>>> count_rows = lambda x, y: rows.count((x, y))
>>> list(outer_product(count_rows, sorted(set(xs)), sorted(set(ys))))
[(2, 3, 0), (1, 0, 4)]
Usage with ``*args`` and ``**kwargs``:
>>> animals = ['cat', 'wolf', 'mouse']
>>> list(outer_product(min, animals, animals, key=len))
[('cat', 'cat', 'cat'), ('cat', 'wolf', 'wolf'), ('cat', 'wolf', 'mouse')]
"""
ys = tuple(ys)
return batched(
starmap(lambda x, y: func(x, y, *args, **kwargs), product(xs, ys)),
n=len(ys),
)
def iter_suppress(iterable, *exceptions):
"""Yield each of the items from *iterable*. If the iteration raises one of
the specified *exceptions*, that exception will be suppressed and iteration
will stop.
>>> from itertools import chain
>>> def breaks_at_five(x):
... while True:
... if x >= 5:
... raise RuntimeError
... yield x
... x += 1
>>> it_1 = iter_suppress(breaks_at_five(1), RuntimeError)
>>> it_2 = iter_suppress(breaks_at_five(2), RuntimeError)
>>> list(chain(it_1, it_2))
[1, 2, 3, 4, 2, 3, 4]
"""
try:
yield from iterable
except exceptions:
return
def filter_map(func, iterable):
"""Apply *func* to every element of *iterable*, yielding only those which
are not ``None``.
>>> elems = ['1', 'a', '2', 'b', '3']
>>> list(filter_map(lambda s: int(s) if s.isnumeric() else None, elems))
[1, 2, 3]
"""
for x in iterable:
y = func(x)
if y is not None:
yield y
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@setuptools@py3@pkg_resources@_vendor@more_itertools@more.py@.PATH_END.py
|
{
"filename": "_x.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/choropleth/colorbar/_x.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class XValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="x", parent_name="choropleth.colorbar", **kwargs):
super(XValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@choropleth@colorbar@_x.py@.PATH_END.py
|
{
"filename": "cats.py",
"repo_name": "legacysurvey/imagine",
"repo_path": "imagine_extracted/imagine-main/map/cats.py",
"type": "Python"
}
|
from __future__ import print_function
from functools import lru_cache
import os
if __name__ == '__main__':
import sys
sys.path.insert(0, 'django-1.9')
os.environ['DJANGO_SETTINGS_MODULE'] = 'viewer.settings'
import django
django.setup()
from django.http import HttpResponse
from viewer import settings
try:
from django.core.urlresolvers import reverse
except:
# django 2.0
from django.urls import reverse
from map.utils import send_file, trymakedirs, get_tile_wcs, oneyear
from datetime import datetime
debug = print
if not settings.DEBUG_LOGGING:
def debug(*args, **kwargs):
pass
catversions = {
'hsc-dr2-cosmos': [1,],
'dr9sv': [1,],
'dr9sv-north': [1,],
'dr9sv-south': [1,],
'ls-dr9': [1,],
'ls-dr9-north': [1,],
'ls-dr9-south': [1,],
'ls-dr8': [1,],
'ls-dr8-north': [1,],
'ls-dr8-south': [1,],
'decals-dr7': [1,],
'mzls+bass-dr6': [1,],
'ls-dr67':[1,],
'decals-dr5': [1,],
'ngc': [1,],
'GCs-PNe': [1,],
'lslga': [1,],
'sga': [1,],
'spec': [1,],
'spec-deep2': [1,],
'manga': [1,],
'bright': [1,],
'tycho2': [1,],
'targets-dr67': [1,],
'targets-bgs-dr67': [1,],
'targets-sky-dr67': [1,],
'targets-bright-dr67': [1,],
'targets-dark-dr67': [1,],
'targets-cmx-dr7': [1,],
'targets-dr8': [1,],
'targets-sv-dr8': [1,],
'targets-dr9-sv1-sec-bright':[1,],
'targets-dr9-sv1-sec-dark':[1,],
'targets-dr9-sv1-dark':[1,],
'targets-dr9-sv1-bright':[1,],
'targets-dr9-sv1-supp':[1,],
'targets-dr9-sv3-bright':[1,],
'targets-dr9-sv3-dark':[1,],
'targets-dr9-sv3-sec-bright':[1,],
'targets-dr9-sv3-sec-dark':[1,],
'targets-dr9-main-bright':[1,],
'targets-dr9-main-dark':[1,],
'targets-dr9-main-sec-bright':[1,],
'targets-dr9-main-sec-dark':[1,],
'gaia-dr1': [1,],
'gaia-dr2': [1,],
'gaia-edr3': [1,],
'sdss-cat': [1,],
'phat-clusters': [1,],
'ps1': [1,],
'desi-tiles': [1,],
'masks-dr8': [1,],
'cfis-dr2': [1,],
'photoz-dr9': [1,],
'desi-edr-tiles': [1,],
'desi-edr-spectra': [1,],
'desi-dr1-tiles': [1,],
'desi-dr1-spectra': [1,],
'desi-daily-tiles': [1,],
'desi-daily-spectra': [1,],
'desi-daily-obs': [1,],
'desi-fuji-tiles': [1,],
'desi-fuji-spectra': [1,],
'desi-guadalupe-tiles': [1,],
'desi-guadalupe-spectra': [1,],
'ls-dr10': [1,],
'ls-dr10-south': [1,],
}
test_cats = []
try:
from map.test_layers import test_cats as tc
for key,pretty in tc:
catversions[key] = [1,]
except:
pass
def cat_cfis(req, ver, tag='cfis-dr2'):
import json
ralo = float(req.GET['ralo'])
rahi = float(req.GET['rahi'])
declo = float(req.GET['declo'])
dechi = float(req.GET['dechi'])
ver = int(ver)
if not ver in catversions[tag]:
raise RuntimeError('Invalid version %i for tag %s' % (ver, tag))
from astrometry.util.fits import fits_table, merge_tables
from astrometry.libkd.spherematch import tree_open, tree_search_radec
from map.views import get_layer
import numpy as np
rc,dc,rad = radecbox_to_circle(ralo, rahi, declo, dechi)
layer = get_layer('cfis-dr2')
B = layer.bricks_within_range(rc, dc, rad, scale=0)
print(len(B), 'bricks:', B.brickname)
TT = []
ntot = 0
for b in B:
fn = os.path.join(layer.basedir, 'catalogs-dr2',
'CFIS.%s.%s.fits' % (b.grid1, b.grid2))
print(fn)
T = fits_table(fn)
print(len(T), 'from', fn)
if len(T) == 0:
continue
T.cut((T.dec >= declo) * (T.dec <= dechi))
print(len(T), 'in Dec range')
if ralo < rahi:
T.cut((T.ra >= ralo) * (T.ra <= rahi))
print(len(T), 'in RA range')
else:
T.cut(np.logical_or(T.ra >= ralo, T.ra <= rahi))
print(len(T), 'in RA range (wrapped)')
if len(T) == 0:
continue
T.brickname = np.array(['CFIS.' + b.grid1 + '.' + b.grid2] * len(T))
TT.append(T)
ntot += len(T)
if ntot > 1000:
break
T = merge_tables(TT)
print('Total of', len(T))
T = T[:1000]
color = []
for rflag,uflag in zip(T.r_flags, T.u_flags):
# -99, 0, or >0
ok = not((rflag > 0) or (uflag > 0))
if ok:
color.append('skyblue')
else:
color.append('gray')
return HttpResponse(json.dumps(dict(
rd=[(float(r), float(d)) for r,d in zip(T.ra, T.dec)],
color=color,
r_flags=[int(i) for i in T.r_flags],
u_flags=[int(i) for i in T.u_flags],
cfis_id=[int(i) for i in T.cfis_id],
r_mag_auto=[float(m) for m in T.r_mag_auto],
u_mag_auto=[float(m) for m in T.u_mag_auto],
tile=[str(b) for b in T.brickname],
)),
content_type='application/json')
def gaia_stars_for_wcs(req):
import json
from legacypipe.gaiacat import GaiaCatalog
from astrometry.util.util import Tan
import os
import numpy as np
os.environ['GAIA_CAT_DIR'] = os.path.join(settings.DATA_DIR, 'gaia-dr2')
J = json.loads(req.POST['wcs'])
print('Got WCS values:', J)
reply = []
gaia = GaiaCatalog()
for jwcs in J:
wcs = Tan(*[float(jwcs[k]) for k in
['crval1', 'crval2', 'crpix1', 'crpix2', 'cd11', 'cd12', 'cd21', 'cd22',
'width', 'height']])
stars = gaia.get_catalog_in_wcs(wcs)
I = np.argsort(stars.phot_g_mean_mag)
stars.cut(I[:10])
ok,xx,yy = wcs.radec2pixelxy(stars.ra, stars.dec)
def clean(x):
if np.isfinite(x):
return float(x)
return 0.
reply.append([
dict(ra=clean(g.ra), dec=clean(g.dec),
g=clean(g.phot_g_mean_mag), bp=clean(g.phot_bp_mean_mag),
rp=clean(g.phot_rp_mean_mag), x=clean(x), y=clean(y))
for g,x,y in zip(stars, xx, yy)])
return HttpResponse(json.dumps(reply),
content_type='application/json')
def call_prospect(spectra, zbests, redrock_template_dir=None, outdir=None):
import tempfile
import os
import prospect.viewer
import redrock.templates
if redrock_template_dir is None:
redrock_template_dir = os.path.join(settings.DATA_DIR, 'redrock-templates')
os.environ['RR_TEMPLATE_DIR'] = redrock_template_dir
if outdir is None:
tempdir = tempfile.TemporaryDirectory()
outdir = tempdir.name
elif not os.path.exists(outdir):
try:
os.makedirs(outdir)
except:
pass
if not os.path.exists(outdir):
print('Failed to create requested output directory', outdir)
outdir = None
outfn = os.path.join(outdir, 'prospect.html')
if os.path.exists(outfn):
print('Cache hit for', outfn)
return HttpResponse(open(outfn))
try:
tt = 'DESI Spectr%s: TARGETID %s' % (('a' if (len(zbests) > 1) else 'um'),
', '.join(['%i'%i for i in zbests['TARGETID']]))
prospect.viewer.plotspectra(spectra, zcatalog=zbests, html_dir=outdir, outfile=outfn,
with_vi_widgets=False,
with_thumb_tab=False,
title=tt)
# colors=['color_for_spectrum', 'color_for_model', 'color_for_noise']
# color names html syntax; current default values '#D62728', 'black', 'green')
except KeyError:
prospect.viewer.plotspectra(spectra, zcatalog=zbests, html_dir=outdir,
with_vi_widgets=False, model_from_zcat=False)
f = open(outfn)
return HttpResponse(f)
def cat_desi_release_spectra_detail(req, tile, fiber, release):
from glob import glob
from desispec.io import read_spectra
import numpy as np
import os
from astropy.table import Table
import astropy
from desispec.spectra import stack
tile = int(tile)
fiber = int(fiber)
sp = fiber//500
pat = ('/global/cfs/cdirs/desi/spectro/redux/%s/tiles/cumulative/%i/*/coadd-%i-%i-thru*.fits'
% (release, tile, sp, tile))
#print('Searching', pat)
fns = glob(pat)
fns.sort()
fn = fns[-1]
spectra = read_spectra(fn)
keep = np.in1d(spectra.fibermap['FIBER'], [fiber])
spectra = spectra[keep]
zbests = []
# I don't understand why some daily spectra seem to be 'zbest' and some 'redrock'
# eg zbest https://www.legacysurvey.org/viewer-desi/desi-spectrum/daily/targetid39628433252155537
# /global/cfs/cdirs/desi/spectro/redux/daily/tiles/cumulative/125/20210504/zbest
# #- Read matching zbest file for this spectra/coadd file
# if release in []:#'daily']:
# fntag = 'zbest'
# extname = 'ZBEST'
# else:
# fntag = 'redrock'
# extname = 'REDSHIFTS'
# Just gd try both
for fntag,extname in [('redrock', 'REDSHIFTS'), ('zbest', 'ZBEST')]:
zbfile = os.path.basename(fn).replace('coadd', fntag, 1)
zbfile = os.path.join(os.path.dirname(fn), zbfile)
if not os.path.exists(zbfile):
continue
zb = Table.read(zbfile, extname)
#- Trim zb to only have TARGETIDs in filtered spectra sp
keep = np.in1d(zb['TARGETID'], spectra.fibermap['TARGETID'])
zb = zb[keep]
#- spectra files can have multiple entries per TARGETID,
#- while zbest files have only 1. Expand to match spectra.
#- Note: astropy.table.join changes the order
if len(spectra.fibermap) > len(zb):
zbx = Table()
zbx['TARGETID'] = spectra.fibermap['TARGETID']
zbx = astropy.table.join(zbx, zb, keys='TARGETID')
else:
zbx = zb
#- Sort the zbx Table to match the order of sp['TARGETID']
ii = np.argsort(spectra.fibermap['TARGETID'])
jj = np.argsort(zbx['TARGETID'])
kk = np.argsort(ii[jj])
zbx = zbx[kk]
zbests = zbx
#- Confirm that we got all that expanding and sorting correct
assert np.all(spectra.fibermap['TARGETID'] == zbests['TARGETID'])
# print('Passing to prospect: spectra:')
# print(spectra)
# print('zcatalog:')
# print(zbests)
return call_prospect(spectra, zbests)
def desi_healpix_spectrum(req, obj, release, redrock_template_dir=None):
from glob import glob
from desispec.io import read_spectra
import numpy as np
import os
from astropy.table import Table
import astropy
from desispec.spectra import stack
import os
# Check the cache!
outdir = None
if settings.DESI_PROSPECT_DIR is not None:
outdir = os.path.join(settings.DESI_PROSPECT_DIR, release, 'targetid%i' % obj.targetid)
fn = os.path.join(outdir, 'prospect.html')
if os.path.exists(fn):
print('Cache hit for', fn)
return HttpResponse(open(fn))
if not os.path.exists(outdir):
try:
os.makedirs(outdir)
except:
pass
if not os.path.exists(outdir):
outdir = None
prog = obj.program.strip()
surv = obj.survey.strip()
hp = '%i' % obj.healpix
hp_pre = '%i' % (obj.healpix//100)
if release == 'edr':
basedir = '/global/cfs/cdirs/desi/public/edr/spectro/redux/fuji'
elif release == 'dr1':
basedir = '/global/cfs/cdirs/desi/spectro/redux/iron'
else:
basedir = '/global/cfs/cdirs/desi/spectro/redux/%s' % release
fn = os.path.join(basedir, 'healpix', surv, prog, hp_pre, hp,
'coadd-%s-%s-%s.fits' % (surv, prog, hp))
spectra = read_spectra(fn)
spectra = spectra.select(targets=[obj.targetid])
if spectra.num_targets() != 1:
return HttpResponse('Expected to find 1 spectrum, got %i for TARGETID %i in file %s' %
(spectra.num_targets(), obj.targetid, fn))
zbests = []
#- Read matching zbest file for this spectra/coadd file
zbfile = os.path.basename(fn).replace('coadd', 'redrock', 1)
zbfile = os.path.join(os.path.dirname(fn), zbfile)
zb = Table.read(zbfile, 'REDSHIFTS')
#- Trim zb to only have TARGETIDs in filtered spectra sp
keep = np.in1d(zb['TARGETID'], spectra.fibermap['TARGETID'])
zb = zb[keep]
assert(len(spectra.fibermap) == 1)
assert(len(zb) == 1)
zbests = zb
#- Confirm that we got all that expanding and sorting correct
assert np.all(spectra.fibermap['TARGETID'] == zbests['TARGETID'])
return call_prospect(spectra, zbests, redrock_template_dir=redrock_template_dir,
outdir=outdir)
def get_desi_spectro_kdfile(release):
if release == 'edr':
return os.path.join(settings.DATA_DIR, 'desi-spectro-edr', 'zpix-all.kd.fits')
elif release == 'dr1':
return os.path.join(settings.DATA_DIR, 'desi-spectro-dr1', 'zpix-all.kd.fits')
elif release == 'daily':
return os.path.join(settings.DATA_DIR, 'desi-spectro-daily', 'allzbest.kd.fits')
elif release == 'daily-obs':
return os.path.join(settings.DATA_DIR, 'desi-spectro-daily', 'desi-obs.kd.fits')
elif release == 'guadalupe':
return os.path.join(settings.DATA_DIR, 'desi-spectro-guadalupe', 'zpix-all.kd.fits')
elif release == 'fuji':
return os.path.join(settings.DATA_DIR, 'desi-spectro-fuji', 'zpix-all.kd.fits')
elif release == 'denali':
return os.path.join(settings.DATA_DIR, 'desi-spectro-denali',
'zcatalog-denali-cumulative.kd.fits')
return None
def lookup_targetid(targetid, release):
from astrometry.libkd.spherematch import tree_open
from astrometry.util.fits import fits_table
import numpy as np
fn = get_desi_spectro_kdfile(release)
kd = tree_open(fn, 'targetid')
print('Searching for targetid', targetid)
#I = kd.search(np.array([float(targetid)]), 0.5, 0, 0)
I = kd.search(np.array([targetid]).astype(np.uint64), 0.5, 0, 0)
if len(I) == 0:
return None
## The kd-search for uint64 for return matches outside the search range! uint64 vs float is weird!
#print('Found', len(I), 'entries for targetid', targetid)
# Read only the allzbest table rows within range.
T = fits_table(fn, rows=I)
#print('Matched targetids:', T.targetid)
I = np.flatnonzero(T.targetid == targetid)
if len(I) == 0:
return None
T.cut(I)
if len(T) > 1:
print('Matched targetids:', T.targetid)
#print('Surveys:', T.survey)
#print('Programs:', T.program)
else:
print('Found targetid', T.targetid[0])#, 'in survey', T.survey[0], 'program', T.program[0])
i = 0
return T[i]
def cat_desi_daily_spectra_detail(req, targetid):
targetid = int(targetid)
t = lookup_targetid(targetid, 'daily')
if t is None:
return HttpResponse('No such targetid found in DESI daily spectra: %s' % targetid)
return cat_desi_release_spectra_detail(req, t.tileid, t.fiber, 'daily')
def cat_desi_guadalupe_spectra_detail(req, targetid):
targetid = int(targetid)
t = lookup_targetid(targetid, 'guadalupe')
if t is None:
return HttpResponse('No such targetid found in DESI Guadalupe spectra: %s' % targetid)
return desi_healpix_spectrum(req, t, 'guadalupe')
def cat_desi_fuji_spectra_detail(req, targetid):
targetid = int(targetid)
t = lookup_targetid(targetid, 'fuji')
if t is None:
return HttpResponse('No such targetid found in DESI Fuji spectra: %s' % targetid)
return desi_healpix_spectrum(req, t, 'fuji')
def cat_desi_edr_spectra_detail(req, targetid):
targetid = int(targetid)
release = 'edr'
# Quick-check cache (without looking up object)
if settings.DESI_PROSPECT_DIR is not None:
outdir = os.path.join(settings.DESI_PROSPECT_DIR, release, 'targetid%i' % targetid)
fn = os.path.join(outdir, 'prospect.html')
if os.path.exists(fn):
print('Cache hit for', fn)
return HttpResponse(open(fn))
t = lookup_targetid(targetid, release)
if t is None:
return HttpResponse('No such targetid found in DESI EDR spectra: %s' % targetid)
return desi_healpix_spectrum(req, t, release)
def cat_desi_dr1_spectra_detail(req, targetid):
targetid = int(targetid)
release = 'dr1'
# Quick-check cache (without looking up object)
if settings.DESI_PROSPECT_DIR is not None:
outdir = os.path.join(settings.DESI_PROSPECT_DIR, release, 'targetid%i' % targetid)
fn = os.path.join(outdir, 'prospect.html')
if os.path.exists(fn):
print('Cache hit for', fn)
return HttpResponse(open(fn))
t = lookup_targetid(targetid, release)
if t is None:
return HttpResponse('No such targetid found in DESI DR1 spectra: %s' % targetid)
rr_templ = os.path.join(settings.DATA_DIR, 'desi-spectro-dr1', 'redrock-templates')
return desi_healpix_spectrum(req, t, release, redrock_template_dir=rr_templ)
def cat_desi_release_spectra(req, ver, kdfn, tag, racol='ra', deccol='dec',
tile_clusters=None, sky=False, obs=False):
import json
T = cat_kd(req, ver, tag, kdfn, racol=racol, deccol=deccol)
if T is None:
return HttpResponse(json.dumps(dict(rd=[], name=[], color=[])), #, z=[], zerr=[])),
content_type='application/json')
tileid = None
if 'tile' in req.GET:
try:
tileid = int(req.GET['tile'])
except:
pass
if tileid is not None:
T.cut(T.tileid == tileid)
if sky:
T.cut(T.targetid < 0)
else:
T.cut(T.targetid >= 0)
# T.cut(T.npixels > 0)
J = {}
print('Got', len(T), 'spectra')
if len(T) > 1000:
import numpy as np
from scipy.spatial import ConvexHull
# Split into a 3x3 grid? Cluster?
# Then find convex hull and send polygon & count?
ralo = float(req.GET['ralo'])
rahi = float(req.GET['rahi'])
declo = float(req.GET['declo'])
dechi = float(req.GET['dechi'])
ra_wrap = ralo > rahi
if ra_wrap:
# Move RAs to +- 0
T.ra_wrap = T.ra + -360 * (T.ra > 180)
ralo -= 360
else:
T.ra_wrap = T.ra
assert(ralo < rahi)
assert(declo < dechi)
cluster_edges = []
cluster_labels = []
Iloose = []
from astrometry.util.miscutils import point_in_poly, clip_polygon
Iclusters = []
# Use pre-computed cluster membership!
if tile_clusters is not None:
cl = np.unique(T.tile_cluster)
print(len(cl), 'unique clusters')
for cl_i in cl:
rd = tile_clusters[cl_i]
ra,dec = rd[:,0],rd[:,1]
if ra_wrap:
ra_w = ra + -360 * (ra > 180)
else:
ra_w = ra
I = np.flatnonzero(T.tile_cluster == cl_i)
print(len(I), 'spectra in cluster', cl_i)
Iclusters.append((I, (ra,ra_w,dec)))
# if tile_clusters is None:
# tile_clusters = []
# for cl_i,rd in enumerate(tile_clusters):
# #print('Tile cluster boundary:', rd.shape)
# ra,dec = rd[:,0],rd[:,1]
# if ra_wrap:
# ra_w = ra + -360 * (ra > 180)
# else:
# ra_w = ra
# # completely outside RA,Dec region?
# if min(ra_w) > rahi or max(ra_w) < ralo or min(dec) > dechi or max(dec) < declo:
# #print('Tile not nearby, skipping')
# continue
# poly = np.vstack((ra_w,dec)).T
# #print('Poly', poly.shape)
# isin = point_in_poly(T.ra_wrap, T.dec, np.vstack((ra_w,dec)).T)
# I = np.flatnonzero(isin)
# print(len(I), 'spectra in cluster', cl_i)
# if len(I) == 0:
# continue
# Iclusters.append((I, (ra,ra_w,dec)))
if len(Iclusters) > 1:
for I,(ra,ra_w,dec) in Iclusters:
if len(I) < 100:
Iloose.append(I)
continue
K = np.flatnonzero((ra_w > ralo) * (ra_w < rahi) * (dec > declo) * (dec < dechi))
if len(K) > 0:
cra,cdec = np.mean(np.vstack((ra_w[K], dec[K])), axis=1)
else:
cra = (ralo + rahi) / 2.
cdec = (declo + dechi) / 2.
if cra < 0:
cra += 360.
#cliprd = clip_polygon(np.vstack((ra_w,dec)).T,
# np.array([[ralo,declo],[ralo,dechi],[rahi,dechi],[rahi,declo]]))
#cliprd = np.array(cliprd)
#ra,dec = cliprd[:,0], cliprd[:,1]
cluster_edges.append([(float(r),float(d)) for r,d in zip(ra,dec)])
cluster_labels.append([float(cra), float(cdec), '%i spectra' % len(I)])
# No grid!
nra = ndec = 0
else:
# grid
nra = ndec = 3
for i in range(ndec):
for j in range(nra):
r1 = ralo + j * (rahi - ralo) / nra
r2 = ralo + (j+1) * (rahi - ralo) / nra
d1 = declo + i * (dechi - declo) / ndec
d2 = declo + (i+1) * (dechi - declo) / ndec
I = np.flatnonzero((T.ra_wrap >= r1) * (T.ra_wrap < r2) *
(T.dec >= d1) * (T.dec < d2))
print(len(I), 'in grid cell', j, i)
if len(I) == 0:
continue
if len(I) < 100:
Iloose.append(I)
continue
try:
ch = ConvexHull(np.vstack((T.ra_wrap[I], T.dec[I])).T)
except:
import traceback
print_exc()
continue
Ivert = I[ch.vertices]
ch = np.vstack((T.ra[Ivert], T.dec[Ivert])).T
#print('Convex hull:', ch)
chw = np.vstack((T.ra_wrap[Ivert], T.dec[Ivert])).T
c = np.mean(chw, axis=0)
cra,cdec = c
if cra < 0:
cra += 360.
#print('Center of convex hull:', cra,cdec)
cluster_edges.append([(float(r),float(d)) for r,d in ch])
cluster_labels.append([float(cra), float(cdec), '%i spectra' % len(I)])
J.update(cluster_edges=cluster_edges,
cluster_labels=cluster_labels)
if len(Iloose):
I = np.hstack(Iloose)
T.cut(I)
# Continue to add the loose ones into J...
else:
return HttpResponse(json.dumps(J), content_type='application/json')
cols = T.get_columns()
targetids = [str(i) for i in T.targetid]
if obs:
from astrometry.util.starutil_numpy import mjdtodate, radectoxyz, xyztoradec
from astrometry.libkd.spherematch import match_radec
import numpy as np
names = []
colors = []
for zw,t,mjd,tile in zip(T.zwarn, T.coadd_exptime, T.minmjd, T.tileid):
d = mjdtodate(mjd)
#names.append('Tile %i, %i-%02i-%02i (%i s)' % (tile, d.year, d.month, d.day, int(t)))
names.append('%i sec' % (int(t)))
if zw != 0:
colors.append('#888888')
else:
colors.append('#3388ff')
# Make all names lists
names = [[n] for n in names]
# Make all targetids lists
targetids = [[t] for t in targetids]
# Merge targets within 1"
ra = T.get(racol)
dec = T.get(deccol)
I,JJ,d = match_radec(ra, dec, ra, dec, 1./3600., notself=True)
if len(I):
K = np.flatnonzero(I < JJ)
I = I[K]
JJ = JJ[K]
del K
# Merge!
xyz1 = radectoxyz(ra[I], dec[I])
xyz2 = radectoxyz(ra[JJ], dec[JJ])
ra[I],dec[I] = xyztoradec((xyz1 + xyz2)/2.)
for i,j in zip(I, JJ):
# append lists
names[i] = names[i] + names[j]
targetids[i] = targetids[i] + targetids[j]
keep = np.ones(len(T), bool)
keep[JJ] = False
T.cut(keep)
keep = np.flatnonzero(keep)
names = [names[i] for i in keep]
targetids = [targetids[i] for i in keep]
colors = [colors[i] for i in keep]
else:
# objtype -- FIXME -- can we unpack TARGETID enough to figure out SKY fibers?
if 'objtype' in T.get_columns():
objtype = T.objtype
else:
objtype = ['']*len(T)
if sky:
objtype = ['SKY']*len(T)
names = []
colors = []
for ot,t,st,z,zerr,zw in zip(objtype, T.spectype, T.subtype, T.z, T.zerr, T.zwarn):
c = '#3388ff'
t = t.strip()
nm = t
st = st.strip()
if st != '':
nm += ': ' + st
if t != 'STAR':
nm += ', z = %.3f' % z
ot = ot.strip()
if ot == 'SKY':
c = '#448888'
nm = ot
else:
if t == 'STAR':
c = '#ff4444'
elif t == 'GALAXY':
c = '#ffffff'
elif t == 'QSO':
c = '#4444ff'
if zw > 0:
nm += ' (ZWARN=0x%x)' %zw
c = '#888888'
names.append(nm)
colors.append(c)
#print('Targetids:', T.targetid.dtype, T.targetid)
rd = list((float(r),float(d)) for r,d in zip(T.get(racol), T.get(deccol)))
J.update(dict(rd=rd, name=names, color=colors,
targetid=targetids))
if 'fiber' in cols:
J.update(fiberid=[int(i) for i in T.fiber])
if 'tileid' in cols:
J.update(tileid=[int(i) for i in T.tileid])
return HttpResponse(json.dumps(J), content_type='application/json')
def cat_desi_daily_spectra(req, ver):
kdfn = get_desi_spectro_kdfile('daily')
tag = 'desi-daily-spectra'
return cat_desi_release_spectra(req, ver, kdfn, tag)
def cat_desi_daily_sky_spectra(req, ver):
kdfn = get_desi_spectro_kdfile('daily')
tag = 'desi-daily-spectra'
return cat_desi_release_spectra(req, ver, kdfn, tag, sky=True)
def cat_desi_daily_obs(req, ver):
kdfn = get_desi_spectro_kdfile('daily-obs')
tag = 'desi-daily-obs'
return cat_desi_release_spectra(req, ver, kdfn, tag, obs=True)
def cat_desi_daily_obs_detail(req, targetid):
targetid = int(targetid)
release = 'daily-obs'
t = lookup_targetid(targetid, release)
if t is None:
return HttpResponse('No such targetid found in DESI Daily observations: %s' % targetid)
print('Target:', t)
t.about()
vals = {}
for c in t.get_columns():
v = t.get(c)
vals[c] = v
print('Values:', vals)
from astrometry.util.starutil_numpy import mjdtodate
d = mjdtodate(t.minmjd)
d = datetime(d.year, d.month, d.day, d.hour, d.minute, d.second)
#d.microsecond = 0
vals['mindate'] = d
d = mjdtodate(t.maxmjd)
d = datetime(d.year, d.month, d.day, d.hour, d.minute, d.second)
#d.microsecond = 0
vals['maxdate'] = d
from django.shortcuts import render
return render(req, 'obs.html', dict(obj=vals))
def cat_desi_guadalupe_spectra(req, ver):
'''
TT = []
for surv,prog in [('main','dark'), ('main','bright'), ('special','dark'), ('special','bright')]:
fn = '/global/cfs/cdirs/desi/spectro/redux/guadalupe/zcatalog/zpix-%s-%s.fits' % (surv, prog)
T = fits_table(fn, columns=['target_ra','target_dec','targetid','z','zerr','zwarn','spectype','subtype',
'healpix', 'objtype'])
T.survey = np.array([surv]*len(T))
T.program = np.array([prog]*len(T))
TT.append(T)
T = merge_tables(TT)
T.writeto('cosmo/webapp/viewer-desi/data/desi-spectro-guadalupe/zpix-all.fits')
'''
# python -c "from desi_spectro_kdtree import create_desi_spectro_kdtree as create; create('data/desi-spectro-guadalupe/zpix-all.fits', 'data/desi-spectro-guadalupe/zpix-all.kd.fits', racol='target_ra', deccol='target_dec')"
kdfn = get_desi_spectro_kdfile('guadalupe')
tag = 'desi-guadalupe-spectra'
return cat_desi_release_spectra(req, ver, kdfn, tag, racol='target_ra', deccol='target_dec')
def cat_desi_fuji_spectra(req, ver):
'''
sys.path.append('cosmo/webapp/viewer-desi/')
from desi_spectro_kdtree import create_desi_spectro_kdtree
TT = []
for surv,prog in [('cmx','other'), ('special','dark'), ('sv1','backup'), ('sv1','bright'),
('sv1','other'), ('sv2','backup'), ('sv2','bright'), ('sv2','dark'),
('sv3','backup'), ('sv3','bright'), ('sv3','dark')]:
fn = '/global/cfs/cdirs/desi/spectro/redux/fuji/zcatalog/zpix-%s-%s.fits' % (surv, prog)
T = fits_table(fn, columns=['target_ra','target_dec','targetid','z','zerr','zwarn','spectype','subtype',
'healpix', 'objtype'])
T.survey = np.array([surv]*len(T))
T.program = np.array([prog]*len(T))
TT.append(T)
T = merge_tables(TT)
fn = 'cosmo/webapp/viewer-desi/data/desi-spectro-fuji/zpix-all.fits'
T.writeto(fn)
create_desi_spectro_kdtree(fn, 'cosmo/webapp/viewer-desi/data/desi-spectro-fuji/zpix-all.kd.fits', racol='target_ra', deccol='target_dec')
'''
kdfn = get_desi_spectro_kdfile('fuji')
tag = 'desi-fuji-spectra'
return cat_desi_release_spectra(req, ver, kdfn, tag, racol='target_ra', deccol='target_dec')
def cat_desi_edr_spectra(req, ver):
kdfn = get_desi_spectro_kdfile('edr')
tag = 'desi-edr-spectra'
clusters = open(os.path.join(settings.DATA_DIR, 'desi-spectro-edr', 'tile-clusters.json')).read()
import json
import numpy as np
clusters = json.loads(clusters)
clusters = [np.array(cl).reshape(-1,2) for cl in clusters]
return cat_desi_release_spectra(req, ver, kdfn, tag, racol='target_ra', deccol='target_dec',
tile_clusters=clusters)
def cat_desi_dr1_spectra(req, ver):
kdfn = get_desi_spectro_kdfile('dr1')
tag = 'desi-dr1-spectra'
import json
import numpy as np
return cat_desi_release_spectra(req, ver, kdfn, tag, racol='target_ra', deccol='target_dec')
def cat_desi_release_tiles(req, ver, release, color_function=None):
import json
from astrometry.util.fits import fits_table
from astrometry.libkd.spherematch import tree_open, tree_search_radec
import numpy as np
ver = int(ver)
tag = 'desi-%s-tiles' % release
if not ver in catversions[tag]:
raise RuntimeError('Invalid version %i for tag %s' % (ver, tag))
ralo = float(req.GET['ralo'])
rahi = float(req.GET['rahi'])
declo = float(req.GET['declo'])
dechi = float(req.GET['dechi'])
desi_radius = 1.628
fn = os.path.join(settings.DATA_DIR, 'desi-spectro-%s/tiles2.kd.fits' % release)
if color_function is None:
def color_function(t, surv, prog):
cc = {
('sv1', 'backup'): '#999999', # HSV V=0.6
('sv1', 'bright'): '#996600', # darker orange
('sv1', 'dark'): '#177699', # darker blue
('sv2', 'backup'): '#999999', # HSV V=0.7
('sv2', 'bright'): '#b37700', #
('sv2', 'dark'): '#1b8ab3', #
('sv3', 'backup'): '#cccccc', # HSV V=0.8
('sv3', 'bright'): '#cc8800',
('sv3', 'dark'): '#1f9ecc',
('main', 'dark'): '#22aadd',
('main', 'bright'): '#cc8800',
('special', 'dark'): '#77ccee',
('special', 'bright'): '#ffbb33',
}.get((surv, prog), '#888888')
return cc
def result(T):
res = []
for t in T:
name = 'Tile %i' % t.tileid
details = []
surv = t.survey.strip()
if surv != 'unknown':
details.append(surv)
if 'program' in t.get_columns():
prog = t.program.strip()
else:
prog = t.faprgrm.strip()
if prog != 'unknown':
details.append(prog)
if len(details):
name += ' (%s)' % ', '.join(details)
cc = color_function(t, surv, prog)
res.append(dict(name=name, ra=t.tilera, dec=t.tiledec, radius=desi_radius,
color=cc))
return HttpResponse(json.dumps(dict(objs=res)),
content_type='application/json')
if dechi - declo > 10:
T = fits_table(fn)
return result(T)
rc,dc,rad = radecbox_to_circle(ralo, rahi, declo, dechi)
kd = tree_open(fn)
I = tree_search_radec(kd, rc, dc, rad + desi_radius)
T = fits_table(fn, rows=I)
return result(T)
def cat_desi_daily_tiles(req, ver):
return cat_desi_release_tiles(req, ver, 'daily')
def cat_desi_guadalupe_tiles(req, ver):
#startree -i /global/cfs/cdirs/desi/spectro/redux/guadalupe/tiles-guadalupe.fits -R tilera -D tiledec -PTk -o data/desi-spectro-guadalupe/tiles2.kd.fits
return cat_desi_release_tiles(req, ver, 'guadalupe')
def cat_desi_fuji_tiles(req, ver):
#startree -i /global/cfs/cdirs/desi/spectro/redux/fuji/tiles-fuji.fits -R tilera -D tiledec -PTk -o data/desi-spectro-fuji/tiles2.kd.fits
return cat_desi_release_tiles(req, ver, 'fuji')
def cat_desi_edr_tiles(req, ver):
def tilecolor(t, surv, prog):
other = '#7f7f7f'
if surv == 'sv1' and prog == 'other':
return other
return {
'sv1': '#77a8d0',
'sv2': '#37a436',
'sv3': '#ffae73',
}.get(surv, other)
# sv1: dark 2077b4 mid 77a8d0 light aac9e1
# sv1/sec: dark 000000 mid 3f3f3f light 7f7f7f
# sv2: 2ba02b / 37a436 / 5fb45b
# sv3: ff7f0f / ffae73 / ffcca9
# All combos:
# cmx other
# special dark
# sv1 backup
# sv1 bright
# sv1 dark
# sv1 other
# sv2 backup
# sv2 bright
# sv2 dark
# sv3 backup
# sv3 bright
# sv3 dark
return cat_desi_release_tiles(req, ver, 'edr', color_function=tilecolor)
def cat_desi_dr1_tiles(req, ver):
def tilecolor(t, surv, prog):
other = '#7f7f7f'
if surv == 'sv1' and prog == 'other':
return other
return {
'main': '#3b79ab',
'sv1': '#77a8d0',
'sv2': '#37a436',
'sv3': '#ffae73',
}.get(surv, other)
return cat_desi_release_tiles(req, ver, 'dr1')#, color_function=tilecolor)
def cat_photoz_dr9(req, ver):
'''
I pre-processed the photo-z sweep files like this to create a kd-tree per sweep:
pzfns = glob('/global/cscratch1/sd/rongpu/dr9_photoz/south/sweep-*.fits')
pzfns.sort()
for pzfn in pzfns:
outfn = ('/global/cfs/cdirs/cosmo/webapp/viewer-dev/data/photoz/dr9-south/'
+ os.path.basename(pzfn).replace('.fits', '.kd.fits'))
if os.path.exists(outfn):
print('Exists:', outfn)
continue
sweepfn = pzfn.replace('/global/cscratch1/sd/rongpu/dr9_photoz/south',
'/global/cfs/cdirs/cosmo/data/legacysurvey/dr9/south/sweep/9.0').replace(
'-pz.fits', '.fits')
PZ = fits_table(pzfn)
if not np.any(PZ.z_phot_mean > -99):
print('Skipping', pzfn)
continue
SW = fits_table(sweepfn, columns=['ra','dec'])
assert(len(PZ) == len(SW))
PZ.ra = SW.ra
PZ.dec = SW.dec
print(pzfn)
print('Before cut:', len(PZ))
PZ.cut(PZ.z_phot_mean > -99)
print('After cut:', len(PZ))
if len(PZ) == 0:
continue
PZ.writeto('/tmp/pz.fits')
cmd = ('startree -i /tmp/pz.fits -o %s -PTk' % outfn)
print(cmd)
os.system(cmd)
'''
import numpy as np
import json
from astrometry.libkd.spherematch import tree_open, tree_search_radec
from astrometry.util.fits import fits_table, merge_tables
from astrometry.util.starutil_numpy import radectolb
tag = 'photoz-dr9'
ralo = float(req.GET['ralo'])
rahi = float(req.GET['rahi'])
declo = float(req.GET['declo'])
dechi = float(req.GET['dechi'])
rc,dc,rad = radecbox_to_circle(ralo, rahi, declo, dechi)
ver = int(ver)
if not ver in catversions[tag]:
raise RuntimeError('Invalid version %i for tag %s' % (ver, tag))
TT = []
# find relevant sweep files
rastep = 10
decstep = 5
r1 = rastep * np.floor(ralo / rastep).astype(int)
r2 = rastep * np.ceil (rahi / rastep).astype(int)
d1 = decstep * np.floor(declo / decstep).astype(int)
d2 = decstep * np.ceil (dechi / decstep).astype(int)
# Too big an area?
empty = json.dumps(dict(rd=[], phot_z_mean=[], phot_z_std=[]))
if (d2 - d1) * (r2 - r1) > 200:
return HttpResponse(empty, content_type='application/json')
decsplit = 32.375
for hemi in ['north', 'south']:
for d in range(d1, d2, decstep):
for r in range(r1, r2, rastep):
dsign = 'p' if d >= 0 else 'm'
d2sign = 'p' if (d+decstep) >= 0 else 'm'
fn = os.path.join(settings.DATA_DIR, 'photoz', 'dr9-' + hemi,
'sweep-%03i%s%03i-%03i%s%03i-pz.kd.fits' %
(r, dsign, abs(d), r+rastep, d2sign, abs(d+decstep)))
if not os.path.exists(fn):
print('No such file:', fn)
continue
kd = tree_open(fn)
I = tree_search_radec(kd, rc, dc, rad)
print('Matched', len(I), 'from', fn)
if len(I) == 0:
continue
T = fits_table(fn, rows=I)
ll,bb = radectolb(T.ra, T.dec)
ngc = (bb > 0.)
if hemi == 'north':
T.cut((T.dec >= decsplit) * ngc)
else:
T.cut(np.logical_or(T.dec <= decsplit, np.logical_not(ngc)))
if len(T) == 0:
continue
T.cut((T.ra >= ralo ) * (T.ra <= rahi) *
(T.dec >= declo) * (T.dec <= dechi))
if len(T) == 0:
continue
TT.append(T)
if len(TT) == 0:
return HttpResponse(empty, content_type='application/json')
T = merge_tables(TT)
return HttpResponse(json.dumps(dict(
rd=[(float(r),float(d)) for r,d in zip(T.ra, T.dec)],
phot_z_mean=[float(z) for z in T.z_phot_mean],
phot_z_std=[float(z) for z in T.z_phot_std],
)), content_type='application/json')
def cat_phat_clusters(req, ver):
import json
from astrometry.util.fits import fits_table, merge_tables
tag = 'phat-clusters'
ralo = float(req.GET['ralo'])
rahi = float(req.GET['rahi'])
declo = float(req.GET['declo'])
dechi = float(req.GET['dechi'])
ver = int(ver)
if not ver in catversions[tag]:
raise RuntimeError('Invalid version %i for tag %s' % (ver, tag))
cat = fits_table(os.path.join(settings.DATA_DIR, 'phat-clusters.fits'))
cat.cut((cat.ra >= ralo ) * (cat.ra <= rahi) *
(cat.dec >= declo) * (cat.dec <= dechi))
return HttpResponse(json.dumps(dict(
name=[str(s.strip()) for s in cat.name],
rd=[(float(o.ra),float(o.dec)) for o in cat],
mag=[float(o.mag) for o in cat],
young=[bool(o.young) for o in cat],
velocity=[float(o.velocity) for o in cat],
metallicity=[float(o.metallicity) for o in cat],
)),
content_type='application/json')
def cat_gaia_edr3(req, ver):
import legacypipe.gaiacat
print('legacypipe.gaiacat:', legacypipe.gaiacat.__file__)
catdir = os.path.join(settings.DATA_DIR, 'gaia-edr3')
return cat_gaia_dr2(req, ver, catdir=catdir, prefix='healpix', indexing='nested')
def cat_gaia_dr2(req, ver, catdir=None, prefix=None, indexing=None):
import json
from legacypipe.gaiacat import GaiaCatalog
import numpy as np
tag = 'gaia-dr2'
ralo = float(req.GET['ralo'])
rahi = float(req.GET['rahi'])
declo = float(req.GET['declo'])
dechi = float(req.GET['dechi'])
ver = int(ver)
if not ver in catversions[tag]:
raise RuntimeError('Invalid version %i for tag %s' % (ver, tag))
if catdir is None:
catdir = os.path.join(settings.DATA_DIR, 'gaia-dr2')
os.environ['GAIA_CAT_DIR'] = catdir
kwa = {}
if prefix is not None:
kwa.update(file_prefix=prefix)
if indexing is not None:
kwa.update(indexing=indexing)
gaia = GaiaCatalog(**kwa)
cat = gaia.get_catalog_radec_box(ralo, rahi, declo, dechi)
for c in ['ra','dec','phot_g_mean_mag','phot_bp_mean_mag', 'phot_rp_mean_mag',
'pmra','pmdec','parallax',
'pmra_error', 'pmdec_error', 'parallax_error',
'astrometric_excess_noise']:
val = cat.get(c)
val[np.logical_not(np.isfinite(val))] = 0.
cat.set(c, val)
return HttpResponse(json.dumps(dict(
rd=[(float(o.ra),float(o.dec)) for o in cat],
sourceid=[str(o.source_id) for o in cat],
gmag=[float(o.phot_g_mean_mag) for o in cat],
bpmag=[float(o.phot_bp_mean_mag) for o in cat],
rpmag=[float(o.phot_rp_mean_mag) for o in cat],
pmra=[float(o.pmra) for o in cat],
pmdec=[float(o.pmdec) for o in cat],
parallax=[float(o.parallax) for o in cat],
pmra_err=[float(o.pmra_error) for o in cat],
pmdec_err=[float(o.pmdec_error) for o in cat],
parallax_err=[float(o.parallax_error) for o in cat],
astrometric_excess_noise=[float(o.astrometric_excess_noise) for o in cat],
)),
content_type='application/json')
def cat_sdss(req, ver):
import json
import numpy as np
from map.views import sdss_ccds_near
from astrometry.util.fits import fits_table, merge_tables
tag = 'sdss-cat'
ralo = float(req.GET['ralo'])
rahi = float(req.GET['rahi'])
declo = float(req.GET['declo'])
dechi = float(req.GET['dechi'])
ver = int(ver)
if not ver in catversions[tag]:
raise RuntimeError('Invalid version %i for tag %s' % (ver, tag))
rc,dc,rad = radecbox_to_circle(ralo, rahi, declo, dechi)
rad = rad + np.hypot(10.,14.)/2./60.
ccds = sdss_ccds_near(rc, dc, rad)
if ccds is None:
print('No SDSS CCDs nearby')
return HttpResponse(json.dumps(dict(rd=[])),
content_type='application/json')
print(len(ccds), 'SDSS CCDs')
T = []
for ccd in ccds:
# env/BOSS_PHOTOOBJ/301/2073/3/photoObj-002073-3-0088.fits
fn = os.path.join(settings.SDSS_BASEDIR, 'env', 'BOSS_PHOTOOBJ',
str(ccd.rerun), str(ccd.run), str(ccd.camcol),
'photoObj-%06i-%i-%04i.fits' % (ccd.run, ccd.camcol, ccd.field))
print('Reading', fn)
T.append(fits_table(fn, columns='ra dec objid mode objc_type objc_flags objc_flags nchild tai expflux devflux psfflux cmodelflux fracdev mjd'.split()))
T = merge_tables(T)
T.cut((T.dec >= declo) * (T.dec <= dechi))
# FIXME
T.cut((T.ra >= ralo) * (T.ra <= rahi))
# primary
T.cut(T.mode == 1)
types = ['P' if t == 6 else 'C' for t in T.objc_type]
fluxes = [p if t == 6 else c for t,p,c in zip(T.objc_type, T.psfflux, T.cmodelflux)]
return HttpResponse(json.dumps(dict(
rd=[(float(o.ra),float(o.dec)) for o in T],
sourcetype=types,
fluxes = [dict(u=float(f[0]), g=float(f[1]), r=float(f[2]),
i=float(f[3]), z=float(f[4])) for f in fluxes],
)),
content_type='application/json')
def rename_cols(T):
"""If TARGET_RA and TARGET_DEC exists, rename them to ra, dec
Parameters
----------
T : :class:`astrometry.util.fits.tabledata`
A table data object, parsed from user upload
Returns
-------
boolean
true if renaming took place, false if otherwise
"""
cols = T.columns()
if (('target_ra' in cols) and ('target_dec' in cols)
and ('ra' not in cols) and ('dec' not in cols)):
T.rename('target_ra', 'ra')
T.rename('target_dec', 'dec')
return True
return False
def upload_cat(req):
import tempfile
from astrometry.util.fits import fits_table
from django.http import HttpResponseRedirect
from map.views import index
if req.method != 'POST':
return HttpResponse('POST only')
print('Files:', req.FILES)
cat = req.FILES['catalog']
dirnm = settings.USER_QUERY_DIR
if not os.path.exists(dirnm):
try:
os.makedirs(dirnm)
except:
pass
f,tmpfn = tempfile.mkstemp(suffix='.fits', dir=dirnm)
os.close(f)
os.unlink(tmpfn)
print('Saving to', tmpfn)
with open(tmpfn, 'wb+') as destination:
for chunk in cat.chunks():
destination.write(chunk)
print('Wrote', tmpfn)
errtxt = ('<html><body>%s<p>Custom catalogs must be either a: <ul>'
+ '<li><b>FITS binary table</b> with columns named "RA", "DEC" (not case sensitive) and optionally "NAME".'
+ '<li><b>CSV text file</b> with columns "RA", "DEC", and optionally "NAME" (also not case sensitive)</ul>'
+'See <a href="https://www.legacysurvey.org/svtips/">Tips & Tricks</a> for some hints on how to produce such a catalog.</p></body></html>')
T = None
emsg = ''
try:
T = fits_table(tmpfn)
except Exception as e:
emsg = str(e)
if T is None:
try:
# Try CSV...
from astropy.table import Table
t = Table.read(tmpfn, format='ascii').write(tmpfn, overwrite=True)
T = fits_table(tmpfn)
except Exception as e:
emsg += '; ' + str(e)
if T is None:
return HttpResponse(errtxt % ('Error: '+emsg))
# Rename and resave columns if necessary
if rename_cols(T):
T.write_to(tmpfn)
cols = T.columns()
if not (('ra' in cols) and ('dec' in cols)):
return HttpResponse(errtxt % '<p>Did not find column "RA" and "DEC" in table.</p>')
ra,dec = T.ra[0], T.dec[0]
catname = tmpfn.replace(dirnm, '').replace('.fits', '')
if catname.startswith('/'):
catname = catname[1:]
try:
import fitsio
primhdr = fitsio.read_header(tmpfn)
name = primhdr.get('CATNAME', '')
color = primhdr.get('CATCOLOR', '')
if len(name):
catname = catname + '-n%s' % name.strip().replace(' ','_')
if len(color):
catname = catname + '-c%s' % color.strip()
except:
pass
from map.views import my_reverse
return HttpResponseRedirect(my_reverse(req, index) +
'?ra=%.4f&dec=%.4f&catalog=%s' % (ra, dec, catname))
galaxycats = {}
def get_random_galaxy(layer=None):
import numpy as np
from map.views import layer_to_survey_name
if layer is not None:
layer = layer_to_survey_name(layer)
global galaxycats
if layer == 'mzls+bass-dr6':
galfn = os.path.join(settings.DATA_DIR, 'galaxies-in-dr6.fits')
drnum = 6
elif layer == 'decals-dr7':
galfn = os.path.join(settings.DATA_DIR, 'galaxies-in-dr7.fits')
drnum = 7
elif layer == 'decals-dr5':
galfn = os.path.join(settings.DATA_DIR, 'galaxies-in-dr5.fits')
drnum = 5
elif layer == 'hsc2':
galfn = os.path.join(settings.DATA_DIR, 'galaxies-in-hsc2.fits')
elif 'ls-dr8' in layer:
drnum = 8
galfn = os.path.join(settings.DATA_DIR, 'galaxies-in-dr8.fits')
elif layer == 'ls-dr9-north':
drnum = 9
galfn = os.path.join(settings.DATA_DIR, 'galaxies-in-ls-dr9-north.fits')
elif layer in ['ls-dr9-south', 'ls-dr9']:
drnum = 9
galfn = os.path.join(settings.DATA_DIR, 'galaxies-in-ls-dr9-south.fits')
elif 'ls-dr10-south' in layer:
drnum = 10
galfn = os.path.join(settings.DATA_DIR, 'galaxies-in-ls-dr10-south.fits')
elif 'ls-dr10' in layer:
drnum = 10
galfn = os.path.join(settings.DATA_DIR, 'galaxies-in-ls-dr10.fits')
else:
drnum = 9
galfn = os.path.join(settings.DATA_DIR, 'galaxies-in-dr9.fits')
if (not layer in galaxycats) and not os.path.exists(galfn):
if settings.CREATE_GALAXY_CATALOG:
try:
create_galaxy_catalog(galfn, drnum)
except:
import traceback
traceback.print_exc()
if not os.path.exists(galfn):
if drnum == 4:
return 147.1744, 44.0812, 'NGC 2998'
else:
return 18.6595, -1.0210, 'NGC 442'
if not layer in galaxycats:
from astrometry.util.fits import fits_table
galaxycats[layer] = fits_table(galfn)
### HACK
#cat = fits_table(galfn)
#cat.cut(cat.dec < 30.)
#galaxycats[layer] = cat
galaxycat = galaxycats[layer]
i = np.random.randint(len(galaxycat))
ra = float(galaxycat.ra[i])
dec = float(galaxycat.dec[i])
name = galaxycat.name[i].strip()
return ra,dec,name
def create_galaxy_catalog(galfn, drnum, layer=None):
import astrometry.catalogs
from astrometry.util.fits import fits_table, merge_tables
import fitsio
from astrometry.util.util import Tan
from astrometry.libkd.spherematch import match_radec
import numpy as np
#fn = os.path.join(os.path.dirname(astrometry.catalogs.__file__), 'ngc2000.fits')
fn = os.path.join(os.path.dirname(astrometry.catalogs.__file__), 'openngc-ngc.fits')
NGC = fits_table(fn)
print(len(NGC), 'NGC objects')
NGC.name = np.array(['NGC %i' % n for n in NGC.ngcnum])
NGC.delete_column('ngcnum')
#fn = os.path.join(os.path.dirname(astrometry.catalogs.__file__), 'ic2000.fits')
fn = os.path.join(os.path.dirname(astrometry.catalogs.__file__), 'openngc-ic.fits')
IC = fits_table(fn)
print(len(IC), 'IC objects')
IC.name = np.array(['IC %i' % n for n in IC.icnum])
IC.delete_column('icnum')
# fn = os.path.join(settings.DATA_DIR, 'ugc.fits')
# UGC = fits_table(fn)
# print(len(UGC), 'UGC objects')
# UGC.name = np.array(['UGC %i' % n for n in UGC.ugcnum])
# UGC.delete_column('ugcnum')
#T = merge_tables([NGC, IC, UGC])
#T.writeto(os.path.join(settings.DATA_DIR, 'galaxy-cats.fits'))
T = merge_tables([NGC, IC])
T.writeto(os.path.join('/tmp/ngcic.fits'))
keep = np.zeros(len(T), bool)
from map.views import get_survey
bricks = None
if layer is not None:
bricks = layer.get_bricks()
name = layer.name
survey = None
else:
name = 'dr%i' % drnum
if drnum == 6:
survey = get_survey('mzls+bass-dr6')
bricks = survey.get_bricks()
bricks.cut(bricks.has_g * bricks.has_r * bricks.has_z)
elif drnum == 5:
survey = get_survey('decals-dr5')
elif drnum == 7:
survey = get_survey('decals-dr7')
if bricks is None:
bricks = survey.get_bricks()
I,J,d = match_radec(bricks.ra, bricks.dec, T.ra, T.dec, 0.25, nearest=True)
print('Matched', len(I), 'bricks near NGC objects')
bricks.cut(I)
for brick in bricks:
I = np.flatnonzero((T.ra >= brick.ra1 ) * (T.ra < brick.ra2 ) *
(T.dec >= brick.dec1) * (T.dec < brick.dec2))
print('Brick', brick.brickname, 'has', len(I), 'galaxies')
if len(I) == 0:
continue
keep[I] = True
if survey is None:
continue
fn = survey.find_file('nexp', brick=brick.brickname, band='r')
if not os.path.exists(fn):
print('Does not exist:', fn)
continue
nn,hdr = fitsio.read(fn, header=True)
h,w = nn.shape
#imgfn = survey.find_file('image', brick=brick.brickname, band='r')
#wcs = Tan(imgfn)
print('file', fn)
wcs = Tan(hdr)
ok,x,y = wcs.radec2pixelxy(T.ra[I], T.dec[I])
x = np.clip((x-1).astype(int), 0, w-1)
y = np.clip((y-1).astype(int), 0, h-1)
n = nn[y,x]
keep[I[n > 0]] = True
T.cut(keep)
fn = '/tmp/galaxies-in-%s.fits' % name
T.writeto(fn)
print('Wrote', fn)
T.writeto(galfn)
def cat_targets_dr8(req, ver):
return cat_targets_drAB(req, ver, cats=[
os.path.join(settings.DATA_DIR, 'targets-dr8-0.31.1-main.kd.fits'),
], tag='targets-dr8')
def cat_targets_sv_dr8(req, ver):
return cat_targets_drAB(req, ver, cats=[
os.path.join(settings.DATA_DIR, 'targets-dr8-0.31.1-sv.kd.fits'),
], tag='targets-sv-dr8', colprefix='sv1_')
def cat_targets_cmx_dr7(req, ver):
return cat_targets_drAB(req, ver, cats=[
os.path.join(settings.DATA_DIR, 'targets-cmx-0.27.0.kd.fits'),
], tag='targets-cmx-dr7', color_name_func=desi_cmx_color_names)
def cat_targets_dr67(req, ver):
return cat_targets_drAB(req, ver, cats=[
os.path.join(settings.DATA_DIR, 'targets-dr6-0.22.0.kd.fits'),
os.path.join(settings.DATA_DIR, 'targets-dr7.1-0.29.0.kd.fits'),
], tag = 'targets-dr67')
def cat_targets_bgs_dr67(req, ver):
return cat_targets_drAB(req, ver, cats=[
os.path.join(settings.DATA_DIR, 'targets-dr6-0.22.0.kd.fits'),
os.path.join(settings.DATA_DIR, 'targets-dr7.1-0.29.0.kd.fits'),
], tag = 'targets-bgs-dr67', bgs=True)
def cat_targets_sky_dr67(req, ver):
return cat_targets_drAB(req, ver, cats=[
os.path.join(settings.DATA_DIR, 'skies-dr6-0.22.0.kd.fits'),
os.path.join(settings.DATA_DIR, 'skies-dr7.1-0.22.0.kd.fits'),
], tag = 'targets-sky-dr67', sky=True)
def cat_targets_bright_dr67(req, ver):
return cat_targets_drAB(req, ver, cats=[
os.path.join(settings.DATA_DIR, 'targets-dr6-0.22.0.kd.fits'),
os.path.join(settings.DATA_DIR, 'targets-dr7.1-0.29.0.kd.fits'),
], tag = 'targets-bright-dr67', bright=True)
def cat_targets_dark_dr67(req, ver):
return cat_targets_drAB(req, ver, cats=[
os.path.join(settings.DATA_DIR, 'targets-dr6-0.22.0.kd.fits'),
os.path.join(settings.DATA_DIR, 'targets-dr7.1-0.29.0.kd.fits'),
], tag = 'targets-dark-dr67', dark=True)
def cat_targets_dr8b(req, ver):
return cat_targets_drAB(req, ver, cats=[
os.path.join(settings.DATA_DIR, 'targets-dr8b-0.29.0.kd.fits'),
], tag='targets-dr8b')
def cat_targets_dr8c(req, ver):
return cat_targets_drAB(req, ver, cats=[
os.path.join(settings.DATA_DIR, 'targets-dr8c-PR490.kd.fits'),
], tag='targets-dr8c')
def cat_targets_dr9_sv1_sec_bright(req, ver):
# /global/cscratch1/sd/adamyers/dr9/0.47.0.dev4352/targets/sv1/secondary/dark/sv1targets-dark-secondary.fits
return cat_targets_drAB(req, ver, cats=[
os.path.join(settings.DATA_DIR,
'targets-dr9-0.49.0-sv1-secondary-bright.kd.fits'),
#'targets-sv1-secondary-dark.kd.fits'),
], tag='targets-dr9-sv1-sec-bright', name_func=desitarget_sv1_names, colprefix='sv1_',
color_name_func=None)
def cat_targets_dr9_sv1_sec_dark(req, ver):
# /global/cscratch1/sd/adamyers/dr9/0.47.0.dev4352/targets/sv1/secondary/dark/sv1targets-dark-secondary.fits
return cat_targets_drAB(req, ver, cats=[
os.path.join(settings.DATA_DIR,
'targets-dr9-0.49.0-sv1-secondary-dark.kd.fits'),
#'targets-sv1-secondary-dark.kd.fits'),
], tag='targets-dr9-sv1-sec-dark', name_func=desitarget_sv1_names, colprefix='sv1_',
color_name_func=None)
def cat_targets_healpixed(req, ver, tag, catpat, name_func=None, colprefix='', nside=8,
bgs=False, sky=False, bright=False, dark=False):
import json
ralo = float(req.GET['ralo'])
rahi = float(req.GET['rahi'])
declo = float(req.GET['declo'])
dechi = float(req.GET['dechi'])
ver = int(ver)
if not ver in catversions[tag]:
raise RuntimeError('Invalid version %i for tag %s' % (ver, tag))
from astrometry.util.fits import fits_table, merge_tables
from astrometry.libkd.spherematch import tree_open, tree_search_radec
from astrometry.util.util import healpix_rangesearch_radec, healpix_xy_to_nested, healpix_side_length_arcmin, healpix_rangesearch_radec_approx
import numpy as np
# hackily bump up the healpix search radius...
#hpr = healpix_side_length_arcmin(nside) / 60.
#print('Healpix side length:', hpr, 'deg')
rplus = 0.01 * healpix_side_length_arcmin(nside) / 60.
#rplus = 0.
rc,dc,rad = radecbox_to_circle(ralo, rahi, declo, dechi)
print('RA,Dec', rc,dc, 'radius', rad, 'rplus', rplus)
hps = healpix_rangesearch_radec(rc, dc, rad+rplus, nside)
#hps = healpix_rangesearch_radec_approx(rc, dc, np.deg2rad(rad + rplus), nside)
print('Healpixes', hps)
TT = []
for hp in hps:
from astrometry.util.util import healpix_to_radecdeg
from astrometry.util.starutil import degrees_between
hr,hd = healpix_to_radecdeg(hp, nside, 0.5, 0.5)
dist = degrees_between(hr,hd, rc,dc)
print('Healpix center:', hr,hd)
print('Dist from HP', hp, ':', dist)
hpx = healpix_xy_to_nested(hp, nside)
fn = catpat % hpx
if not os.path.exists(fn):
print('No such file:', fn)
continue
kd = tree_open(fn)
I = tree_search_radec(kd, rc, dc, rad)
print('Matched', len(I), 'from', fn)
if len(I) == 0:
continue
T = fits_table(fn, rows=I)
TT.append(T)
if len(TT) == 0:
return HttpResponse(json.dumps(dict(rd=[], name=[])),
content_type='application/json')
T = merge_tables(TT, columns='fillzero')
# cut to RA,Dec rectangle
margin = (dechi - declo) * 0.05
rmargin = margin / np.cos(np.deg2rad(dc))
T.cut((T.dec > (declo-margin)) * (T.dec < (dechi+margin)))
if ralo > rahi:
T.cut(np.logical_or(T.ra > (ralo-rmargin), T.ra < (rahi+rmargin)))
else:
T.cut((T.ra > (ralo-rmargin)) * (T.ra < (rahi+rmargin)))
if bgs:
bgs_target = T.get(colprefix + 'bgs_target')
T.cut(bgs_target > 0)
if bright:
bgs_target = T.get(colprefix + 'bgs_target')
mws_target = T.get(colprefix + 'mws_target')
T.cut(np.logical_or(bgs_target > 0, mws_target > 0))
if dark:
desi_target = T.get(colprefix + 'desi_target')
T.cut(T.desi_target > 0)
names = None
if name_func is not None:
names = name_func(T, colprefix=colprefix)
rtn = dict(rd=[(t.ra, t.dec) for t in T],
# Convert targetid to string to prevent rounding errors in Javascript
targetid=[str(t) for t in T.targetid])
if names is not None:
rtn.update(name=names)
return HttpResponse(json.dumps(rtn), content_type='application/json')
def desitarget_main_names(T, colprefix='main_'):
from desitarget.targetmask import desi_mask, bgs_mask, mws_mask, scnd_mask
allnames = []
colors = []
cols = T.get_columns()
for t in T:
desibits = []
bgsbits = []
mwsbits = []
secbits = []
desi_target = int(t.get(colprefix + 'desi_target'))
bgs_target = get_target_val(t, colprefix + 'bgs_target', cols)
mws_target = get_target_val(t, colprefix + 'mws_target', cols)
scnd_target = get_target_val(t, colprefix + 'scnd_target', cols)
if 'objtype' in cols:
obj = t.objtype
else:
obj = ''
names = []
desi_keys = ['LRG', 'ELG', 'QSO', 'QSO_HIZ', 'ELG_LOP', 'ELG_HIP', 'ELG_VLO',
'SKY', 'STD_FAINT', 'STD_WD', 'STD_BRIGHT', 'BAD_SKY', 'SUPP_SKY',
'NO_TARGET', 'BRIGHT_OBJECT', 'IN_BRIGHT_OBJECT', 'NEAR_BRIGHT_OBJECT',]
for k in desi_keys:
bitmask = desi_mask[k].mask
if bitmask & desi_target:
names.append(k)
bgs_keys = ['BGS_FAINT', 'BGS_BRIGHT', 'BGS_WISE', 'BGS_FAINT_HIP']
for k in bgs_keys:
bitmask = bgs_mask[k].mask
if bitmask & bgs_target:
names.append(k)
mws_keys = ['MWS_BROAD', 'MWS_WD', 'MWS_NEARBY', 'MWS_BHB', 'MWS_MAIN_BLUE',
'MWS_MAIN_RED', 'MWS_FAINT_BLUE', 'MWS_FAINT_RED', 'GAIA_STD_FAINT',
'GAIA_STD_WD', 'GAIA_STD_BRIGHT', 'BACKUP_BRIGHT', 'BACKUP_FAINT',
'BACKUP_VERY_FAINT']
for k in mws_keys:
bitmask = mws_mask[k].mask
if bitmask & mws_target:
names.append(k)
scnd_keys = ['UDG', 'FIRST_MALS', 'QSO_RED', 'MWS_CLUS_GAL_DEEP', 'LOW_MASS_AGN',
'FAINT_HPM', 'LOW_Z_TIER1', 'LOW_Z_TIER2', 'LOW_Z_TIER3', 'BHB', 'SPCV',
'DC3R2_GAMA', 'PSF_OUT_BRIGHT', 'PSF_OUT_DARK', 'HPM_SOUM', 'SN_HOSTS',
'GAL_CLUS_BCG', 'GAL_CLUS_2ND', 'GAL_CLUS_SAT', 'STRONG_LENS',
'WISE_VAR_QSO', 'Z5_QSO', 'MWS_MAIN_CLUSTER_SV', 'BRIGHT_HPM',
'WD_BINARIES_BRIGHT', 'WD_BINARIES_DARK', 'PV_BRIGHT_HIGH',
'PV_BRIGHT_MEDIUM', 'PV_BRIGHT_LOW', 'PV_DARK_HIGH', 'PV_DARK_MEDIUM',
'PV_DARK_LOW', 'GC_BRIGHT', 'GC_DARK', 'DWF_BRIGHT_HI', 'DWF_BRIGHT_LO',
'DWF_DARK_HI', 'DWF_DARK_LO', 'BRIGHT_TOO_LOP', 'BRIGHT_TOO_HIP',
'DARK_TOO_LOP', 'DARK_TOO_HIP']
for k in scnd_keys:
bitmask = scnd_mask[k].mask
if bitmask & scnd_target:
names.append(k)
if obj == 'SKY':
names.append('SKY')
if obj == 'BAD':
names.append('BAD')
if len(names) == 0:
names.append('0x%x' % desi_target)
allnames.append(', '.join(names))
return allnames
def cat_targets_dr9_main_sec_bright(req, ver):
# startree -i /global/cfs/cdirs/desi/target/catalogs/dr9/0.58.0/targets/main/secondary/bright/targets-bright-secondary.fits -o data/targets-dr9-0.58.0-main-sec-bright.kd.fits -TPk
return cat_targets_drAB(req, ver, cats=[
os.path.join(settings.DATA_DIR,
'targets-dr9-0.58.0-main-sec-bright.kd.fits'),
], tag='targets-dr9-main-sec-bright', name_func=desitarget_main_names, colprefix='',
color_name_func=None)
def cat_targets_dr9_main_sec_dark(req, ver):
# startree -i /global/cfs/cdirs/desi/target/catalogs/dr9/0.58.0/targets/main/secondary/dark/targets-dark-secondary.fits -o data/targets-dr9-0.58.0-main-sec-dark.kd.fits -TPk
return cat_targets_drAB(req, ver, cats=[
os.path.join(settings.DATA_DIR,
'targets-dr9-0.58.0-main-sec-dark.kd.fits'),
], tag='targets-dr9-main-sec-dark', name_func=desitarget_main_names, colprefix='',
color_name_func=None)
def cat_targets_dr9_main_dark(req, ver):
# for x in /global/cfs/cdirs/desi/target/catalogs/dr9/0.58.0/targets/main/resolve/dark/targets-dark-hp-*.fits;
# do echo $x; startree -i $x -o data/targets-dr9-0.58.0-main-dark/$(basename $x .fits).kd.fits -TPk; done
return cat_targets_healpixed(req, ver, 'targets-dr9-main-dark',
os.path.join(settings.DATA_DIR,
'targets-dr9-0.58.0-main-dark',
'targets-dark-hp-%i.kd.fits'),
name_func=desitarget_main_names, colprefix='')
def cat_targets_dr9_main_bright(req, ver):
# for x in /global/cfs/cdirs/desi/target/catalogs/dr9/0.58.0/targets/main/resolve/bright/targets-bright-hp-*.fits;
# do echo $x; startree -i $x -o data/targets-dr9-0.58.0-main-bright/$(basename $x .fits).kd.fits -TPk; done
return cat_targets_healpixed(req, ver, 'targets-dr9-main-bright',
os.path.join(settings.DATA_DIR,
'targets-dr9-0.58.0-main-bright',
'targets-bright-hp-%i.kd.fits'),
name_func=desitarget_main_names, colprefix='')
def cat_targets_dr9_sv3_sec_bright(req, ver):
# startree -i /global/cfs/cdirs/desi/target/catalogs/dr9/0.57.0/targets/sv3/secondary/bright/sv3targets-bright-secondary.fits -o data/targets-dr9-0.57.0-sv3-sec-bright.kd.fits -TPk
return cat_targets_drAB(req, ver, cats=[
os.path.join(settings.DATA_DIR,
'targets-dr9-0.57.0-sv3-sec-bright.kd.fits'),
], tag='targets-dr9-sv3-sec-bright', name_func=desitarget_sv3_names, colprefix='sv3_',
color_name_func=None)
def cat_targets_dr9_sv3_sec_dark(req, ver):
# startree -i /global/cfs/cdirs/desi/target/catalogs/dr9/0.57.0/targets/sv3/secondary/dark/sv3targets-dark-secondary.fits -o data/targets-dr9-0.57.0-sv3-sec-dark.kd.fits -TPk
return cat_targets_drAB(req, ver, cats=[
os.path.join(settings.DATA_DIR,
'targets-dr9-0.57.0-sv3-sec-dark.kd.fits'),
], tag='targets-dr9-sv3-sec-dark', name_func=desitarget_sv3_names, colprefix='sv3_',
color_name_func=None)
def cat_targets_dr9_sv3_dark(req, ver):
# for x in /global/cfs/cdirs/desi/target/catalogs/dr9/0.57.0/targets/sv3/resolve/dark/sv3targets-dark-hp-*.fits;
# do echo $x; startree -i $x -o data/targets-dr9-0.57.0-sv3-dark/$(basename $x .fits).kd.fits -TPk; done
return cat_targets_healpixed(req, ver, 'targets-dr9-sv3-dark',
os.path.join(settings.DATA_DIR,
#'targets-dr9-0.47.0.dev4352-sv1-dark',
'targets-dr9-0.57.0-sv3-dark',
'sv3targets-dark-hp-%i.kd.fits'),
name_func=desitarget_sv3_names, colprefix='sv3_')
def cat_targets_dr9_sv3_bright(req, ver):
return cat_targets_healpixed(req, ver, 'targets-dr9-sv3-bright',
os.path.join(settings.DATA_DIR,
'targets-dr9-0.57.0-sv3-bright',
'sv3targets-bright-hp-%i.kd.fits'),
name_func=desitarget_sv3_names, colprefix='sv3_')
def cat_targets_dr9_sv1_dark(req, ver):
# for x in /global/cscratch1/sd/adamyers/dr9/0.47.0.dev4352/targets/sv1/resolve/dark/*.fits;
# do echo $x; startree -i $x -o data/targets-dr9-0.47.0.dev4352-sv1-dark/$(basename $x .fits).kd.fits -TPk; done
return cat_targets_healpixed(req, ver, 'targets-dr9-sv1-dark',
os.path.join(settings.DATA_DIR,
#'targets-dr9-0.47.0.dev4352-sv1-dark',
'targets-dr9-0.49.0-sv1-dark',
'sv1targets-dark-hp-%i.kd.fits'),
name_func=desitarget_sv1_names, colprefix='sv1_')
def cat_targets_dr9_sv1_bright(req, ver):
return cat_targets_healpixed(req, ver, 'targets-dr9-sv1-bright',
os.path.join(settings.DATA_DIR,
#'targets-dr9-0.47.0.dev4352-sv1-bright',
'targets-dr9-0.49.0-sv1-bright',
'sv1targets-bright-hp-%i.kd.fits'),
name_func=desitarget_sv1_names, colprefix='sv1_')
def cat_targets_dr9_sv1_supp(req, ver):
pass
# /global/cscratch1/sd/adamyers/dr9/0.47.0.dev4352/targets/sv1/resolve/dark/
# /global/cscratch1/sd/adamyers/dr9/0.47.0.dev4352/targets/sv1/resolve/bright/
# /global/cscratch1/sd/adamyers/gaiadr2/0.47.0.dev4352/targets/sv1/resolve/supp/
def desitarget_cmx_names(T):
names = []
colors = []
for t in T:
cmxbits = []
cmx_target = int(t.cmx_target)
obj = t.objtype
for bit in range(64):
if (1 << bit) & cmx_target:
cmxbits.append(bit)
# https://github.com/desihub/desitarget/blob/master/py/desitarget/cmx/data/cmx_targetmask.yaml
cmxnames = [{
0: 'STD_GAIA',
1: 'SV0_STD_FAINT',
2: 'SV0_STD_BRIGHT',
3: 'STD_TEST',
4: 'STD_CALSPEC',
5: 'STD_DITHER',
6: 'SV0_MWS_CLUSTER',
7: 'SV0_MWS_CLUSTER_VERYBRIGHT',
8: 'SV0_BGS',
9: 'SV0_MWS',
10: 'SV0_LRG',
11: 'SV0_ELG',
12: 'SV0_QSO',
13: 'SV0_WD',
14: 'SV0_QSO_Z5',
15: 'BACKUP_BRIGHT',
16: 'BACKUP_FAINT',
18: 'M31_STD_BRIGHT',
19: 'M31_H2PN',
20: 'M31_GC',
21: 'M31_QSO',
22: 'M31_VAR',
23: 'M31_BSPL',
24: 'M31_M31cen',
25: 'M31_M31out',
26: 'ORI_STD_BRIGHT',
27: 'ORI_QSO',
28: 'ORI_ORI',
29: 'ORI_HA',
30: 'ROS_STD_BRIGHT',
31: 'ROS_QSO',
38: 'ROS_ROSM17',
39: 'ROS_ROS1',
40: 'ROS_HA',
41: 'ROS_ROS2',
42: 'M33_STD_BRIGHT',
43: 'M33_H2PN',
44: 'M33_GC',
45: 'M33_QSO',
46: 'M33_M33cen',
47: 'M33_M33out',
53: 'MINI_SV_LRG',
54: 'MINI_SV_ELG',
55: 'MINI_SV_QSO',
56: 'MINI_SV_BGS_BRIGHT',
57: 'SV0_MWS_FAINT',
58: 'STD_DITHER_GAIA',
32: 'SKY',
33: 'STD_FAINT',
35: 'STD_BRIGHT',
36: 'BAD_SKY',
37: 'SUPP_SKY',
}.get(b) for b in cmxbits]
bitnames = [n for n in cmxnames if n is not None]
if obj == 'SKY':
bitnames.append('SKY')
if obj == 'BAD':
bitnames.append('BAD')
names.append(', '.join(bitnames))
return names
def get_target_val(t, nm, cols):
if nm in cols:
return int(t.get(nm))
return 0
def desitarget_sv3_names(T, colprefix='sv3_'):
names = []
colors = []
cols = T.get_columns()
for t in T:
desibits = []
bgsbits = []
mwsbits = []
secbits = []
desi_target = int(t.get(colprefix + 'desi_target'))
bgs_target = get_target_val(t, colprefix + 'bgs_target', cols)
mws_target = get_target_val(t, colprefix + 'mws_target', cols)
sec_target = get_target_val(t, colprefix + 'scnd_target', cols)
if 'objtype' in cols:
obj = t.objtype
else:
obj = ''
for bit in range(64):
if (1 << bit) & desi_target:
desibits.append(bit)
if (1 << bit) & bgs_target:
bgsbits.append(bit)
if (1 << bit) & mws_target:
mwsbits.append(bit)
if (1 << bit) & sec_target:
secbits.append(bit)
# https://github.com/desihub/desitarget/blob/master/py/desitarget/sv1/data/sv1_targetmask.yaml
desinames = [{
0: 'LRG',
1: 'ELG',
2: 'QSO',
3: 'LRG_LOWDENS',
4: 'QSO_HIZ',
5: 'ELG_LOP',
6: 'ELG_HIP',
#- Calibration targets
32: 'SKY',
33: 'STD_FAINT',
34: 'STD_WD',
35: 'STD_BRIGHT',
36: 'BAD_SKY',
37: 'SUPP_SKY',
60: 'BGS_ANY',
61: 'MWS_ANY',
62: 'SCND_ANY',
}.get(b) for b in desibits]
bgsnames = [{
0: 'BGS_FAINT',
1: 'BGS_BRIGHT',
2: 'BGS_WISE',
3: 'BGS_FAINT_HIP',
}.get(b) for b in bgsbits]
mwsnames = [{
0: 'MWS_BROAD',
1: 'MWS_WD',
2: 'MWS_NEARBY',
#- (skip) 4: MWS_MAIN north/south splits
6: 'MWS_BHB',
33: 'GAIA_STD_FAINT',
34: 'GAIA_STD_WD',
35: 'GAIA_STD_BRIGHT',
60: 'BACKUP_BRIGHT',
61: 'BACKUP_FAINT',
62: 'BACKUP_VERY_FAINT',
}.get(b) for b in mwsbits]
secondarynames = [{
0: 'SCND_VETO',
1: 'SCND_UDG',
2: 'SCND_FIRST_MALS',
5: 'SCND_QSO_RED',
10: 'SCND_MWS_CLUS_GAL_DEEP',
11: 'SCND_LOW_MASS_AGN',
12: 'SCND_FAINT_HPM',
15: 'SCND_LOW_Z_TIER1',
16: 'SCND_LOW_Z_TIER2',
17: 'SCND_LOW_Z_TIER3',
18: 'SCND_BHB',
19: 'SCND_SPCV',
20: 'SCND_DC3R2_GAMA',
25: 'SCND_PSF_OUT_BRIGHT',
26: 'SCND_PSF_OUT_DARK',
27: 'SCND_HPM_SOUM',
28: 'SCND_SN_HOSTS',
29: 'SCND_GAL_CLUS_BCG',
30: 'SCND_GAL_CLUS_2ND',
31: 'SCND_GAL_CLUS_SAT',
34: 'SCND_STRONG_LENS',
35: 'SCND_WISE_VAR_QSO',
36: 'SCND_Z5_QSO',
38: 'SCND_MWS_MAIN_CLUSTER_SV',
40: 'SCND_BRIGHT_HPM',
41: 'SCND_WD_BINARIES_BRIGHT',
42: 'SCND_WD_BINARIES_DARK',
43: 'SCND_PV_BRIGHT_HIGH',
44: 'SCND_PV_BRIGHT_MEDIUM',
45: 'SCND_PV_BRIGHT_LOW',
46: 'SCND_PV_DARK_HIGH',
47: 'SCND_PV_DARK_MEDIUM',
48: 'SCND_PV_DARK_LOW',
59: 'SCND_BRIGHT_TOO_LOP',
60: 'SCND_BRIGHT_TOO_HIP',
61: 'SCND_DARK_TOO_LOP',
62: 'SCND_DARK_TOO_HIP',
}.get(b) for b in secbits]
bitnames = [n for n in desinames + bgsnames + mwsnames + secondarynames if n is not None]
if obj == 'SKY':
bitnames.append('SKY')
if obj == 'BAD':
bitnames.append('BAD')
if len(bitnames) == 0:
bitnames.append('0x%x' % desi_target)
names.append(', '.join(bitnames))
return names #, colors
def desitarget_sv1_names(T, colprefix='sv1_'):
names = []
colors = []
cols = T.get_columns()
for t in T:
desibits = []
bgsbits = []
mwsbits = []
secbits = []
desi_target = int(t.get(colprefix + 'desi_target'))
bgs_target = get_target_val(t, colprefix + 'bgs_target', cols)
mws_target = get_target_val(t, colprefix + 'mws_target', cols)
sec_target = get_target_val(t, colprefix + 'scnd_target', cols)
if 'objtype' in cols:
obj = t.objtype
else:
obj = ''
for bit in range(64):
if (1 << bit) & desi_target:
desibits.append(bit)
if (1 << bit) & bgs_target:
bgsbits.append(bit)
if (1 << bit) & mws_target:
mwsbits.append(bit)
if (1 << bit) & sec_target:
secbits.append(bit)
# https://github.com/desihub/desitarget/blob/master/py/desitarget/sv1/data/sv1_targetmask.yaml
desinames = [{
0: 'LRG',
1: 'ELG',
2: 'QSO',
3: 'LRG_OPT',
4: 'LRG_IR',
5: 'LRG_SV_OPT',
6: 'LRG_SV_IR',
7: 'LOWZ_FILLER',
8: 'ELG_SV_GTOT',
9: 'ELG_SV_GFIB',
10: 'ELG_FDR_GTOT',
11: 'ELG_FDR_GFIB',
12: 'QSO_COLOR_4PASS',
13: 'QSO_RF_4PASS',
14: 'QSO_COLOR_8PASS',
15: 'QSO_RF_8PASS',
16: 'QSO_HZ_F',
17: 'QSO_Z5',
# (skip)
#- North vs. South selections for different sub-classes
#- Calibration targets
32: 'SKY',
33: 'STD_FAINT',
34: 'STD_WD',
35: 'STD_BRIGHT',
36: 'BAD_SKY',
37: 'SUPP_SKY',
60: 'BGS_ANY',
61: 'MWS_ANY',
#62: 'SCND_ANY',
}.get(b) for b in desibits]
bgsnames = [{
0: 'BGS_FAINT',
1: 'BGS_BRIGHT',
2: 'BGS_FAINT_EXT',
3: 'BGS_LOWQ',
4: 'BGS_FIBMAG',
#- (skip) BGS North vs. South selections
40: 'BGS_KNOWN_ANY',
}.get(b) for b in bgsbits]
mwsnames = [{
0: 'MWS_MAIN_BROAD',
1: 'MWS_WD',
2: 'MWS_NEARBY',
#- (skip) 4: MWS_MAIN north/south splits
6: 'MWS_BHB',
14: 'MWS_MAIN_FAINT',
}.get(b) for b in mwsbits]
secondarynames = [{
0: 'SCND_VETO',
1: 'SCND_UDG',
2: 'SCND_FIRST_MALS',
3: 'SCND_WD_BINARIES',
4: 'SCND_LBG_TOMOG',
5: 'SCND_QSO_RED',
6: 'SCND_M31_KNOWN',
7: 'SCND_M31_QSO',
8: 'SCND_M31_STAR',
10: 'SCND_MWS_CLUS_GAL_DEEP',
11: 'SCND_LOW_MASS_AGN',
12: 'SCND_FAINT_HPM',
13: 'SCND_GW190412',
14: 'SCND_IC134191',
15: 'SCND_PV_BRIGHT',
16: 'SCND_PV_DARK',
17: 'SCND_LOW_Z',
18: 'SCND_BHB',
19: 'SCND_SPCV',
20: 'SCND_DC3R2_GAMA',
21: 'SCND_UNWISE_BLUE',
22: 'SCND_UNWISE_GREEN',
23: 'SCND_HETDEX_MAIN',
24: 'SCND_HEXDEX_HP',
27: 'SCND_HPM_SOUM',
28: 'SCND_SN_HOSTS',
29: 'SCND_GAL_CLUS_BCG',
30: 'SCND_GAL_CLUS_2ND',
31: 'SCND_GAL_CLUS_SAT',
32: 'SCND_HSC_HIZ_SNE',
33: 'SCND_ISM_CGM_QGP',
34: 'SCND_STRONG_LENS',
35: 'SCND_WISE_VAR_QSO',
36: 'SCND_MWS_CALIB',
37: 'SCND_BACKUP_CALIB',
38: 'SCND_MWS_MAIN_CLUSTER_SV',
39: 'SCND_MWS_RRLYR',
}.get(b) for b in secbits]
bitnames = [n for n in desinames + bgsnames + mwsnames + secondarynames if n is not None]
if obj == 'SKY':
bitnames.append('SKY')
if obj == 'BAD':
bitnames.append('BAD')
if len(bitnames) == 0:
bitnames.append('0x%x' % desi_target)
names.append(', '.join(bitnames))
# cc = 'white'
# if 'QSO' in nn:
# cc = 'cyan'
# elif 'LRG' in nn:
# cc = 'red'
# elif 'ELG' in nn:
# cc = 'gray'
# elif 'BGS' in nn:
# cc = 'orange'
# colors.append(cc)
return names #, colors
def desitarget_color_names(T, colprefix=''):
names = []
colors = []
for t in T:
desibits = []
bgsbits = []
mwsbits = []
desi_target = int(t.get(colprefix + 'desi_target'))
bgs_target = int(t.get(colprefix + 'bgs_target'))
mws_target = int(t.get(colprefix + 'mws_target'))
for bit in range(64):
if (1 << bit) & desi_target:
desibits.append(bit)
if (1 << bit) & bgs_target:
bgsbits.append(bit)
if (1 << bit) & mws_target:
mwsbits.append(bit)
# https://github.com/desihub/desitarget/blob/master/py/desitarget/data/targetmask.yaml
desinames = [{
0: 'LRG',
1: 'ELG',
2: 'QSO',
8: 'LRG_NORTH',
9: 'ELG_NORTH',
10: 'QSO_NORTH',
16: 'LRG_SOUTH',
17: 'ELG_SOUTH',
18: 'QSO_SOUTH',
32: 'SKY',
33: 'STD_FSTAR',
34: 'STD_WD',
35: 'STD_BRIGHT',
36: 'BADSKY',
50: 'BRIGHT_OBJECT',
51: 'IN_BRIGHT_OBJECT',
52: 'NEAR_BRIGHT_OBJECT',
60: 'BGS_ANY',
61: 'MWS_ANY',
62: 'ANCILLARY_ANY',
}.get(b) for b in desibits]
bgsnames = [{
0: 'BGS_FAINT',
1: 'BGS_BRIGHT',
8: 'BGS_FAINT_NORTH',
9: 'BGS_BRIGHT_NORTH',
16: 'BGS_FAINT_SOUTH',
17: 'BGS_BRIGHT_SOUTH',
40: 'BGS_KNOWN_ANY',
41: 'BGS_KNOWN_COLLIDED',
42: 'BGS_KNOWN_SDSS',
43: 'BGS_KNOWN_BOSS',
}.get(b) for b in bgsbits]
mwsnames = [{
0: 'MWS_MAIN',
1: 'MWS_WD',
2: 'MWS_NEARBY',
16: 'MWS_MAIN_VERY_FAINT',
}.get(b) for b in mwsbits]
bitnames = [n for n in desinames + bgsnames + mwsnames if n is not None]
# If any of the names in value exists, remove the key in bitnames
# Example: if 'ELG' exists, remove 'ELG_SOUTH' and 'ELG_NORTH'
bitnames_veto = {
'ELG_SOUTH': ['ELG'],
'ELG_NORTH': ['ELG'],
'QSO_SOUTH': ['QSO'],
'QSO_NORTH': ['QSO'],
'LRG_NORTH': ['LRG'],
'LRG_SOUTH': ['LRG'],
'BGS_FAINT_NORTH': ['BGS_FAINT'],
'BGS_FAINT_SOUTH': ['BGS_FAINT'],
'BGS_BRIGHT_NORTH': ['BGS_BRIGHT'],
'BGS_BRIGHT_SOUTH': ['BGS_BRIGHT'],
'BGS_ANY': ['BGS_FAINT', 'BGS_BRIGHT', 'BGS_FAINT_NORTH',
'BGS_BRIGHT_NORTH', 'BGS_FAINT_SOUTH', 'BGS_BRIGHT_SOUTH',
'BGS_KNOWN_ANY', 'BGS_KNOWN_COLLIDED', 'BGS_KNOWN_SDSS',
'BGS_KNOWN_BOSS'],
'MWS_ANY': ['MWS_MAIN', 'MWS_WD', 'MWS_NEARBY', 'MWS_MAIN_VERY_FAINT'],
}
for name in bitnames[:]:
# As described in the comment above, if any of the better_names
# exist in bitnames, remove the current name
if any([better_name in bitnames for better_name in bitnames_veto.get(name, [])]):
bitnames.remove(name)
names.append(', '.join(bitnames))
nn = ' '.join(bitnames)
cc = 'white'
if 'QSO' in nn:
cc = 'cyan'
elif 'LRG' in nn:
cc = 'red'
elif 'ELG' in nn:
cc = 'gray'
elif 'BGS' in nn:
cc = 'orange'
colors.append(cc)
return names, colors
def desi_cmx_color_names(T, colprefix=None):
names = []
colors = []
for bits in T.cmx_target:
bitnames = []
for bitval,name in [(0x1, 'STD_GAIA'),
(0x2, 'SV0_STD_BRIGHT'),
(0x4, 'STD_TEST'),
(0x8, 'CALSPEC'),
(0x100, 'SV0_BGS'),
(0x200, 'SV0_MWS'),]:
if bits & bitval:
bitnames.append(name)
nn = ' '.join(bitnames)
names.append(nn)
cc = 'white'
if 'BGS' in nn:
cc = 'orange'
elif 'MWS' in nn:
cc = 'cyan'
elif 'Gaia' in nn:
cc = 'gray'
else:
cc = 'white'
colors.append(cc)
return names, colors
def cat_targets_drAB(req, ver, cats=None, tag='', bgs=False, sky=False, bright=False, dark=False, color_name_func=desitarget_color_names, colprefix='', name_func=None):
'''
color_name_func: function that selects names and colors for targets
(eg based on targeting bit values)
'''
if cats is None:
cats = []
import json
ralo = float(req.GET['ralo'])
rahi = float(req.GET['rahi'])
declo = float(req.GET['declo'])
dechi = float(req.GET['dechi'])
ver = int(ver)
if not ver in catversions[tag]:
raise RuntimeError('Invalid version %i for tag %s' % (ver, tag))
from astrometry.util.fits import fits_table, merge_tables
from astrometry.libkd.spherematch import tree_open, tree_search_radec
import numpy as np
rc,dc,rad = radecbox_to_circle(ralo, rahi, declo, dechi)
'''
startree -i /project/projectdirs/desi/target/catalogs/targets-dr4-0.20.0.fits -o data/targets-dr4-0.20.0.kd.fits -P -k -T
'''
TT = []
for fn in cats:
kd = tree_open(fn)
I = tree_search_radec(kd, rc, dc, rad)
print('Matched', len(I), 'from', fn)
if len(I) == 0:
continue
T = fits_table(fn, rows=I)
TT.append(T)
if len(TT) == 0:
return HttpResponse(json.dumps(dict(rd=[], name=[])),
content_type='application/json')
T = merge_tables(TT, columns='fillzero')
if bgs:
bgs_target = T.get(colprefix + 'bgs_target')
T.cut(bgs_target > 0)
if bright:
bgs_target = T.get(colprefix + 'bgs_target')
mws_target = T.get(colprefix + 'mws_target')
T.cut(np.logical_or(bgs_target > 0, mws_target > 0))
if dark:
desi_target = T.get(colprefix + 'desi_target')
T.cut(T.desi_target > 0)
names = None
colors = None
if color_name_func is not None:
names,colors = color_name_func(T, colprefix=colprefix)
if name_func is not None:
names = name_func(T, colprefix=colprefix)
rtn = dict(rd=[(t.ra, t.dec) for t in T],
targetid=[str(t) for t in T.targetid])
fluxes = None
nobs = None
if sky:
fluxes = [dict(g=float(g), r=float(r), z=float(z))
for (g,r,z) in zip(T.apflux_g[:,0], T.apflux_r[:,0], T.apflux_z[:,0])]
else:
if 'flux_g' in T.get_columns():
if 'flux_w1' in T.get_columns():
fluxes = [dict(g=float(g), r=float(r), z=float(z),
W1=float(W1), W2=float(W2))
for (g,r,z,W1,W2)
in zip(T.flux_g, T.flux_r, T.flux_z, T.flux_w1, T.flux_w2)]
else:
fluxes = [dict(g=float(g), r=float(r), z=float(z))
for (g,r,z)
in zip(T.flux_g, T.flux_r, T.flux_z)]
if 'nobs_g' in T.get_columns():
nobs=[dict(g=int(g), r=int(r), z=int(z)) for g,r,z
in zip(T.nobs_g, T.nobs_r, T.nobs_z)],
if names is not None:
rtn.update(name=names)
if colors is not None:
rtn.update(color=colors)
if nobs is not None:
rtn.update(nobs=nobs)
if fluxes is not None:
rtn.update(fluxes=fluxes)
# Convert targetid to string to prevent rounding errors
rtn['targetid'] = [str(s) for s in rtn['targetid']]
return HttpResponse(json.dumps(rtn), content_type='application/json')
def cat_sga_parent(req, ver):
fn = os.path.join(settings.DATA_DIR, 'sga', 'SGA-parent-v3.0.kd.fits')
return _cat_sga(req, ver, fn=fn, tag='sga')
def cat_sga_ellipse(req, ver):
# T = fits_table('cosmo/webapp/viewer-dev/data/sga/SGA-ellipse-v3.2.kd.fits')
# T.cut((T.sga_id >= 0) * (T.preburned))
# cols = ['ra','dec','diam', 'galaxy', 'pgc', 'morphtype', 'ba', 'pa', 'z_leda', 'group_name']
# T.writeto('sga-cut.fits', columns=cols)
# startree -i ~/sga-cut.fits -o data/sga/SGA-ellipse-v3.2-cut.kd.fits -PTk
#fn = os.path.join(settings.DATA_DIR, 'sga', 'SGA-ellipse-v3.2.kd.fits')
#fn = os.path.join(settings.DATA_DIR, 'sga', 'SGA-ellipse-v3.2-cut.kd.fits')
fn = os.path.join(settings.DATA_DIR, 'sga', 'SGA-2020.kd.fits')
print('Reading', fn)
return _cat_sga(req, ver, ellipse=True, fn=fn, tag='sga')
def _cat_sga(req, ver, ellipse=False, fn=None, tag='sga'):
import json
import numpy as np
# The SGA catalog includes radii for the galaxies, and we want galaxies
# that touch our RA,Dec box, so can't use the standard method...
#T = cat_kd(req, ver, tag, fn)
ralo = float(req.GET['ralo'])
rahi = float(req.GET['rahi'])
declo = float(req.GET['declo'])
dechi = float(req.GET['dechi'])
ver = int(ver)
if not ver in catversions[tag]:
raise RuntimeError('Invalid version %i for tag %s' % (ver, tag))
T = query_sga_radecbox(fn, ralo, rahi, declo, dechi)
if T is None:
return HttpResponse(json.dumps(dict(rd=[], name=[], radiusArcsec=[], abRatio=[],
posAngle=[], pgc=[], type=[], redshift=[])),
content_type='application/json')
T.cut(np.argsort(-T.radius_arcsec))
rd = list((float(r),float(d)) for r,d in zip(T.ra, T.dec))
names = [t.strip() for t in T.galaxy]
pgc = [int(p) for p in T.pgc]
typ = [t.strip() if t != 'nan' else '' for t in T.get('morphtype')]
radius = [float(r) for r in T.radius_arcsec.astype(np.float32)]
ab = [float(f) for f in T.ba.astype(np.float32)]
pax = T.pa.copy().astype(np.float32)
pax[np.logical_not(np.isfinite(pax))] = 0.
pax[pax < 0] += 180.
pax[pax >= 180.] -= 180.
pa = [float(90.-f) for f in pax]
pa_disp = [float(f) for f in pax]
if ellipse:
color = ['#377eb8']*len(T)
#'#ff3333'
else:
color = ['#e41a1c']*len(T)
#'#3388ff'
z = [float(z) if np.isfinite(z) else -1. for z in T.z_leda.astype(np.float32)]
groupnames = [t.strip() for t in T.group_name]
return HttpResponse(json.dumps(dict(rd=rd, name=names, radiusArcsec=radius,
groupname=groupnames,
abRatio=ab, posAngle=pa, pgc=pgc, type=typ,
redshift=z, color=color, posAngleDisplay=pa_disp)),
content_type='application/json')
def query_sga_radecbox(fn, ralo, rahi, declo, dechi):
ra,dec,radius = radecbox_to_circle(ralo, rahi, declo, dechi)
# max radius for SGA entries?!
sga_radius = 2.0
T = cat_query_radec(fn, ra, dec, radius + sga_radius)
if T is None:
return None
wcs = radecbox_to_wcs(ralo, rahi, declo, dechi)
H,W = wcs.shape
# cut to sga entries possibly touching wcs box
cols = T.get_columns()
if 'diam' in cols:
T.radius_arcsec = T.diam / 2. * 60.
else:
T.radius_arcsec = T.d26 / 2. * 60.
radius_pix = T.radius_arcsec / wcs.pixel_scale()
ok,xx,yy = wcs.radec2pixelxy(T.ra, T.dec)
T.cut((xx > -radius_pix) * (xx < W+radius_pix) *
(yy > -radius_pix) * (yy < H+radius_pix))
if len(T) == 0:
return None
return T
def cat_manga(req, ver):
import json
import numpy as np
# DR16
# startree -i data/manga/drpall-v2_4_3.fits -o data/manga/drpall-v2_4_3.kd.fits \
# -P -T -k -R ifura -D ifudec
fn = os.path.join(settings.DATA_DIR, 'manga', 'drpall-v2_4_3.kd.fits')
tag = 'manga'
T = cat_kd(req, ver, tag, fn, racol='ifura', deccol='ifudec')
if T is not None and len(T)>0:
T.cut(T.ifudesignsize > 0)
if len(T) == 0:
T = None
if T is None:
return HttpResponse(json.dumps(dict(rd=[], name=[], mjd=[], fiber=[],plate=[])),
content_type='application/json')
# plate = req.GET.get('plate', None)
# if plate is not None:
# plate = int(plate, 10)
# T.cut(T.plate == plate)
rd = list((float(r),float(d)) for r,d in zip(T.ifura, T.ifudec))
#names = [t.strip() for t in T.label]
names = [t.strip() for t in T.nsa_iauname]
#mjd = [int(x) for x in T.mjd]
#fiber = [int(x) for x in T.fiberid]
plate = [int(x) for x in T.plate]
ifudsgn = [int(x) for x in T.ifudsgn]
z = [float(z) for z in T.z]
ifusize = [int(x) for x in T.ifudesignsize]
hexes = []
fibers = []
dradec = manga_ifu_offsets()
for sz,(r,d) in zip(ifusize, rd):
radius = { 127: 6,
91: 5,
61: 4,
37: 3,
19: 2 }
rr = radius[sz]
hexx = []
fibs = []
cosdec = np.cos(np.deg2rad(d))
# hexagon
for i in range(7):
j = (i % 6) + 1
hexx.append((float(r + (rr + 0.5) * dradec[j][0] / 3600. / cosdec),
float(d + (rr + 0.5) * dradec[j][1] / 3600. )))
for dr,dd in dradec[:sz]:
fibs.append((float(r + dr / 3600. / cosdec),
float(d + dd / 3600.)))
hexes.append(hexx)
fibers.append(fibs)
#ifudsgn (= plate-ifu)
#plateifu
#mangaid
#nsa_iauname
#ifudesignsize
#z
return HttpResponse(json.dumps(dict(rd=rd, name=names, plate=plate, ifudsgn=ifudsgn, z=z,
hexes=hexes, fibers=fibers)),
content_type='application/json')
def manga_ifu_offsets():
return [(0.0, 0.0), (-1.25, -2.16506), (1.25, -2.16506), (2.5, 0.0), (1.25, 2.16506), (-1.25, 2.16506), (-2.5, 0.0), (-2.5, -4.33013), (0.0, -4.33013), (2.5, -4.33013), (3.75, -2.16506), (5.0, 0.0), (3.75, 2.16506), (2.5, 4.33013), (0.0, 4.33013), (-2.5, 4.33013), (-3.75, 2.16506), (-5.0, 0.0), (-3.75, -2.16506), (-3.75, -6.49519), (-1.25, -6.49519), (1.25, -6.49519), (3.75, -6.49519), (5.0, -4.33013), (6.25, -2.16506), (7.5, 0.0), (6.25, 2.16506), (5.0, 4.33013), (3.75, 6.49519), (1.25, 6.49519), (-1.25, 6.49519), (-3.75, 6.49519), (-5.0, 4.33013), (-6.25, 2.16506), (-7.5, 0.0), (-6.25, -2.16506), (-5.0, -4.33013), (-5.0, -8.66025), (-2.5, -8.66025), (0.0, -8.66025), (2.5, -8.66025), (5.0, -8.66025), (6.25, -6.49519), (7.5, -4.33013), (8.75, -2.16506), (10.0, 0.0), (8.75, 2.16506), (7.5, 4.33013), (6.25, 6.49519), (5.0, 8.66025), (2.5, 8.66025), (0.0, 8.66025), (-2.5, 8.66025), (-5.0, 8.66025), (-6.25, 6.49519), (-7.5, 4.33013), (-8.75, 2.16506), (-10.0, 0.0), (-8.75, -2.16506), (-7.5, -4.33013), (-6.25, -6.49519), (-6.25, -10.8253), (-3.75, -10.8253), (-1.25, -10.8253), (1.25, -10.8253), (3.75, -10.8253), (6.25, -10.8253), (7.5, -8.66025), (8.75, -6.49519), (10.0, -4.33013), (11.25, -2.16506), (12.5, 0.0), (11.25, 2.16506), (10.0, 4.33013), (8.75, 6.49519), (7.5, 8.66025), (6.25, 10.8253), (3.75, 10.8253), (1.25, 10.8253), (-1.25, 10.8253), (-3.75, 10.8253), (-6.25, 10.8253), (-7.5, 8.66025), (-8.75, 6.49519), (-10.0, 4.33013), (-11.25, 2.16506), (-12.5, 0.0), (-11.25, -2.16506), (-10.0, -4.33013), (-8.75, -6.49519), (-7.5, -8.66025), (-7.5, -12.9904), (-5.0, -12.9904), (-2.5, -12.9904), (0.0, -12.9904), (2.5, -12.9904), (5.0, -12.9904), (7.5, -12.9904), (8.75, -10.8253), (10.0, -8.66025), (11.25, -6.49519), (12.5, -4.33013), (13.75, -2.16506), (15.0, 0.0), (13.75, 2.16506), (12.5, 4.33013), (11.25, 6.49519), (10.0, 8.66025), (8.75, 10.8253), (7.5, 12.9904), (5.0, 12.9904), (2.5, 12.9904), (0.0, 12.9904), (-2.5, 12.9904), (-5.0, 12.9904), (-7.5, 12.9904), (-8.75, 10.8253), (-10.0, 8.66025), (-11.25, 6.49519), (-12.5, 4.33013), (-13.75, 2.16506), (-15.0, 0.0), (-13.75, -2.16506), (-12.5, -4.33013), (-11.25, -6.49519), (-10.0, -8.66025), (-8.75, -10.8253)]
def cat_spec(req, ver):
import json
fn = os.path.join(settings.DATA_DIR, 'sdss', 'specObj-dr16-trimmed.kd.fits')
tag = 'spec'
T = cat_kd(req, ver, tag, fn)
if T is None:
return HttpResponse(json.dumps(dict(rd=[], name=[], mjd=[], fiber=[],
plate=[], zwarning=[])),
content_type='application/json')
plate = req.GET.get('plate', None)
if plate is not None:
plate = int(plate, 10)
T.cut(T.plate == plate)
rd = list((float(r),float(d)) for r,d in zip(T.ra, T.dec))
names = [t.strip() for t in T.label]
# HACK
#names = [t.split()[0] for t in names]
mjd = [int(x) for x in T.mjd]
fiber = [int(x) for x in T.fiberid]
plate = [int(x) for x in T.plate]
zwarning = [int(x) for x in T.zwarning]
return HttpResponse(json.dumps(dict(rd=rd, name=names, mjd=mjd, fiber=fiber, plate=plate,
zwarning=zwarning)),
content_type='application/json')
def cat_masks_dr9(req, ver):
import json
import os
import numpy as np
from legacypipe.reference import get_reference_sources
from legacypipe.survey import LegacySurveyData
ralo = float(req.GET['ralo'])
rahi = float(req.GET['rahi'])
declo = float(req.GET['declo'])
dechi = float(req.GET['dechi'])
wcs = radecbox_to_wcs(ralo, rahi, declo, dechi)
os.environ['TYCHO2_KD_DIR'] = settings.DATA_DIR
#os.environ['LARGEGALAXIES_CAT'] = os.path.join(settings.DATA_DIR, 'sga', 'SGA-v7.0.kd.fits')
os.environ['LARGEGALAXIES_CAT'] = os.path.join(settings.DATA_DIR, 'sga', 'SGA-ellipse-v3.0.kd.fits')
os.environ['GAIA_CAT_DIR'] = os.path.join(settings.DATA_DIR, 'gaia-dr2')
os.environ['GAIA_CAT_VER'] = '2'
os.environ['GAIA_CAT_SCHEME'] = 'ring'
os.environ['GAIA_CAT_PREFIX'] = 'chunk'
survey = LegacySurveyData(survey_dir=os.getcwd())
pixscale = wcs.pixel_scale()
T,_ = get_reference_sources(survey, wcs, pixscale, None)
T.about()
if T is None:
return HttpResponse(json.dumps(dict(rd=[], name=[], radiusArcsec=[])),
content_type='application/json')
from functools import reduce
#T.cut(reduce(np.logical_or, [T.isbright, T.iscluster, T.islargegalaxy, T.ismedium]))
T.cut(reduce(np.logical_or, [T.isbright, T.iscluster, T.islargegalaxy, T.ismedium, T.isgaia]))
# sort by radius to improve the layering
T.cut(np.argsort(-T.radius))
rd = []
radius = []
color = []
ab = []
PA = []
PA_disp = []
names = []
for medium, bright,cluster,gal,dup,ptsrc,aen,ra,dec,rad,mag,zguess,freeze,refid,ba,pa in zip(
T.ismedium, T.isbright, T.iscluster, T.islargegalaxy, T.donotfit, T.pointsource,
T.astrometric_excess_noise, T.ra, T.dec, T.radius,
T.mag, T.zguess, T.freezeparams, T.ref_id, T.ba, T.pa):
rd.append((float(ra), float(dec)))
radius.append(3600. * float(rad))
if dup:
color.append('#aaaaaa')
elif cluster:
color.append('yellow')
#elif bright:
# color.append('orange')
elif gal:
color.append('#33ff88')
elif medium and ptsrc:
color.append('#3388ff')
elif medium:
color.append('#8833ff')
else:
color.append('#888888')
if dup:
names.append('DUP')
elif cluster:
names.append('CLUSTER')
elif gal:
# freezeparams, ref_id
name = 'SGA %i' % refid
# We're not pointing to the 'model' version
#if freeze:
# name += ' (frozen)'
names.append(name)
elif medium:
if bright:
name = 'BRIGHT mag=%.2f' % mag
else:
name = 'MEDIUM G=%.2f' % mag
if np.isfinite(zguess) * (zguess+1 < mag):
zg = ', zguess=%.2f' % (zguess)
name += zg
if ptsrc:
name += ', ptsrc'
name += ', aen=%.2g' % aen
oname = name
if bright:
name = 'MED/'+name
names.append(name)
else:
if ptsrc:
names.append('ptsrc')
else:
names.append('')
if ba == 0.:
ba = 1.
ab.append(float(ba))
if not np.isfinite(pa):
pa = 0.
if pa < 0:
pa += 180.
if pa >= 180.:
pa -= 180.
PA.append(float(90.-pa))
PA_disp.append(float(pa))
if bright:
# Double entry at half radius!
rd.append((float(ra), float(dec)))
radius.append(0.5 * 3600. * float(rad))
color.append('orange')
names.append(oname)
ab.append(float(ba))
PA.append(float(90.-pa))
PA_disp.append(float(pa))
return HttpResponse(json.dumps(dict(rd=rd, name=names, radiusArcsec=radius, color=color,
abRatio=ab, posAngle=PA, posAngleDisplay=PA_disp)),
content_type='application/json')
def cat_gaia_mask(req, ver):
import json
'''
fitscopy data/gaia-mask.fits"[col ra;dec;ref_cat;ref_id;radius;phot_g_mean_mag;pointsource;ismedium;isbright]" data/gaia-mask-sub.fits
startree -i data/gaia-mask-sub.fits -o data/gaia-mask.kd.fits -P -T -k
'''
fn = os.path.join(settings.DATA_DIR, 'gaia-mask.kd.fits')
tag = 'masks-dr8'
T = cat_kd(req, ver, tag, fn)
if T is None:
return HttpResponse(json.dumps(dict(rd=[], name=[], radiusArcsec=[])),
content_type='application/json')
rd = list((float(r),float(d)) for r,d in zip(T.ra, T.dec))
names = ['G=%.2f' % g for g in T.phot_g_mean_mag]
radius = [3600. * float(r) for r in T.radius]
color = ['orange' if bright else '#3388ff' for bright in T.isbright]
#G = [float(r) for r in T.phot_g_mean_mag]
return HttpResponse(json.dumps(dict(rd=rd, name=names, radiusArcsec=radius, color=color)),
content_type='application/json')
def cat_hsc_dr2_cosmos(req, ver):
import json
import numpy as np
fn = os.path.join(settings.DATA_DIR, 'hsc-dr2', 'cosmos-cat.kd.fits')
T = cat_kd(req, ver, 'hsc-dr2-cosmos', fn)
if T is None:
return HttpResponse(json.dumps(dict(rd=[], name=[], color=[])),
content_type='application/json')
if len(T) > 5000:
T = T[:5000]
rd = list((float(r),float(d)) for r,d in zip(T.ra, T.dec))
names = []
psf_g = 30. -2.5*np.log10(T.g_psfflux_flux)
psf_r = 30. -2.5*np.log10(T.r_psfflux_flux)
psf_i = 30. -2.5*np.log10(T.i_psfflux_flux)
psf_z = 30. -2.5*np.log10(T.z_psfflux_flux)
cm_g = 30. -2.5*np.log10(T.g_cmodel_flux)
cm_r = 30. -2.5*np.log10(T.r_cmodel_flux)
cm_i = 30. -2.5*np.log10(T.i_cmodel_flux)
cm_z = 30. -2.5*np.log10(T.z_cmodel_flux)
# in the cosmos region it only takes values 0.0 and 1.0
e = (T.i_extendedness_value > 0.5)
color = ['orange' if ext else '#3388ff' for ext in e]
g = np.where(e, cm_g, psf_g)
r = np.where(e, cm_r, psf_r)
i = np.where(e, cm_i, psf_i)
z = np.where(e, cm_z, psf_z)
for ext,gg,rr,ii,zz in zip(e,g,r,i,z):
names.append('%s g=%.2f, r=%.2f, i=%.2f, z=%.2f' % ('Galaxy' if ext else 'Star', gg, rr, ii, zz))
return HttpResponse(json.dumps(dict(rd=rd, name=names, color=color)),
content_type='application/json')
def cat_kd(req, ver, tag, fn, racol=None, deccol=None):
ralo = float(req.GET['ralo'])
rahi = float(req.GET['rahi'])
declo = float(req.GET['declo'])
dechi = float(req.GET['dechi'])
ver = int(ver)
if not ver in catversions[tag]:
raise RuntimeError('Invalid version %i for tag %s' % (ver, tag))
ra,dec,radius = radecbox_to_circle(ralo, rahi, declo, dechi)
T = cat_query_radec(fn, ra, dec, radius)
if T is None:
debug('No objects in query')
return None
#debug(len(T), 'spectra')
if racol is not None:
T.ra = T.get(racol)
if deccol is not None:
T.dec = T.get(deccol)
if ralo > rahi:
import numpy as np
# RA wrap
T.cut(np.logical_or(T.ra > ralo, T.ra < rahi) * (T.dec > declo) * (T.dec < dechi))
else:
T.cut((T.ra > ralo) * (T.ra < rahi) * (T.dec > declo) * (T.dec < dechi))
#debug(len(T), 'in cut')
return T
def radecbox_to_wcs(ralo, rahi, declo, dechi):
from astrometry.util.starutil_numpy import radectoxyz, xyztoradec, degrees_between
from astrometry.util.util import Tan
import numpy as np
rc,dc,radius = radecbox_to_circle(ralo, rahi, declo, dechi)
wd = degrees_between(ralo, dc, rahi, dc)
hd = degrees_between(rc, declo, rc, dechi)
W = 1000.
pixsc = wd / W
H = hd / pixsc
wcs = Tan(rc, dc, (W+1.)/2., (H+1.)/2., -pixsc, 0., 0., pixsc,
float(W), float(H))
ok,x,y = wcs.radec2pixelxy(ralo, declo)
ok,x,y = wcs.radec2pixelxy(ralo, dechi)
ok,x,y = wcs.radec2pixelxy(rahi, declo)
ok,x,y = wcs.radec2pixelxy(rahi, dechi)
return wcs
def radecbox_to_circle(ralo, rahi, declo, dechi):
from astrometry.util.starutil_numpy import radectoxyz, xyztoradec, degrees_between
import numpy as np
xyz1 = radectoxyz(ralo, declo)
xyz2 = radectoxyz(rahi, dechi)
xyz = (xyz1 + xyz2)/2.
xyz /= np.sqrt(np.sum(xyz**2))
rc,dc = xyztoradec(xyz)
rc = rc[0]
dc = dc[0]
rad = max(degrees_between(rc, dc, ralo, declo),
degrees_between(rc, dc, rahi, dechi))
return rc, dc, rad
def cat_query_radec(kdfn, ra, dec, radius):
from astrometry.libkd.spherematch import tree_open, tree_search_radec
from astrometry.util.fits import fits_table
kd = tree_open(kdfn)
I = tree_search_radec(kd, ra, dec, radius)
#print('Matched', len(I), 'from', fn)
if len(I) == 0:
return None
T = fits_table(kdfn, rows=I)
return T
def cat_spec_deep2(req, ver):
import json
tag = 'spec-deep2'
ralo = float(req.GET['ralo'])
rahi = float(req.GET['rahi'])
declo = float(req.GET['declo'])
dechi = float(req.GET['dechi'])
ver = int(ver)
if not ver in catversions[tag]:
raise RuntimeError('Invalid version %i for tag %s' % (ver, tag))
from astrometry.util.fits import fits_table, merge_tables
import numpy as np
TT = []
T = fits_table(os.path.join(settings.DATA_DIR, 'deep2-zcat-dr4-uniq.fits'))
debug(len(T), 'spectra')
if ralo > rahi:
# RA wrap
T.cut(np.logical_or(T.ra > ralo, T.ra < rahi) * (T.dec > declo) * (T.dec < dechi))
else:
T.cut((T.ra > ralo) * (T.ra < rahi) * (T.dec > declo) * (T.dec < dechi))
debug(len(T), 'in cut')
rd = list((float(r),float(d)) for r,d in zip(T.ra, T.dec))
names = []
classes = T.get('class')
subclasses = T.subclass
zbests = T.zbest
zq = T.zquality
for i in range(len(T)):
clazz = classes[i]
clazz = clazz[0] + clazz[1:].lower()
#if zq[i] >= 3:
nm = clazz
sc = subclasses[i].strip()
if sc != 'NONE':
nm += ' ' + sc
if not (zq[i] == -1 and clazz.strip() == 'Star'):
nm += ' z=%.2f, q=%i' % (zbests[i], zq[i])
names.append(nm)
return HttpResponse(json.dumps(dict(rd=rd, name=names)),
content_type='application/json')
def cat_user(req, ver):
from astrometry.util.fits import fits_table
import json
import re
cat = str(req.GET.get('cat'))
if not re.match('\w?', cat):
print('Catalog "%s" did not match regex' % cat)
return
haverd = False
havei = False
if ('ralo' in req.GET and 'rahi' in req.GET and
'declo' in req.GET and 'dechi' in req.GET):
ralo = float(req.GET['ralo'])
rahi = float(req.GET['rahi'])
declo = float(req.GET['declo'])
dechi = float(req.GET['dechi'])
haverd = True
elif ('start' in req.GET and 'N' in req.GET):
start = int(req.GET['start'])
N = int(req.GET['N'])
havei = True
else:
return HttpResponse('need {ra,dec}{lo,hi} or start,N')
fn = os.path.join(settings.USER_QUERY_DIR, cat+'.fits')
if not os.path.exists(fn):
print('Does not exist:', fn)
return
cat = fits_table(fn)
if haverd:
if ralo > rahi:
import numpy as np
# RA wrap
cat.cut(np.logical_or(cat.ra > ralo, cat.ra < rahi) *
(cat.dec > declo) * (cat.dec < dechi))
else:
cat.cut((cat.ra > ralo) * (cat.ra < rahi) *
(cat.dec > declo) * (cat.dec < dechi))
print(len(cat), 'user catalog sources after RA,Dec cut')
elif havei:
cat = cat[start:start+N]
rd = list(zip(cat.ra.astype(float), cat.dec.astype(float)))
D = dict(rd=rd)
cols = cat.columns()
if 'name' in cols:
D.update(names=cat.name.tolist())
if 'type' in cols:
try:
v = list([t[0] for t in cat.get('type')])
json.dumps(v)
D.update(sourcetype=v)
except:
print('failed to convert column "type". Traceback:')
import traceback
traceback.print_exc()
if 'g' in cols and 'r' in cols and 'z' in cols:
D.update(fluxes=[dict(g=float(g), r=float(r), z=float(z))
for g,r,z in zip(10.**((cat.g - 22.5)/-2.5),
10.**((cat.r - 22.5)/-2.5),
10.**((cat.z - 22.5)/-2.5))])
else:
fluxbands = []
fluxes = []
for band in 'griz':
if 'flux_'+band in cols:
fluxbands.append(band)
fluxes.append(cat.get('flux_'+band))
if len(fluxbands) > 0:
allfluxes = []
for srcfluxes in zip(*fluxes):
#print('srcfluxes:', srcfluxes)
#print('zip:', dict(zip(fluxbands, srcfluxes)))
allfluxes.append(dict(zip(fluxbands, [float(f) for f in srcfluxes])))
D.update(fluxes = allfluxes)
if 'gnobs' in cols and 'rnobs' in cols and 'znobs' in cols:
D.update(nobs=[dict(g=int(g), r=int(r), z=int(z))
for g,r,z in zip(cat.gnobs, cat.rnobs, cat.znobs)])
if 'objid' in cols:
D.update(objids=[int(x) for x in cat.objid])
if 'brickname' in cols:
D.update(bricknames=cat.brickname.tolist())
if 'radius' in cols:
D.update(radius=list([float(r) for r in cat.radius]))
if 'color' in cols:
D.update(color=list([c.strip() for c in cat.color]))
if 'abratio' in cols:
D.update(abratio=list([float(r) for r in cat.abratio]))
if 'posangle' in cols:
D.update(posangle=list([float(r) for r in cat.posangle]))
#for k,v in D.items():
# print('Cat', k, v)
return HttpResponse(json.dumps(D).replace('NaN','null'),
content_type='application/json')
def desi_fiberassign_filename(tileid):
tilestr = '%06i' % tileid
fn = os.path.join(settings.DATA_DIR, 'desi-tiles',
tilestr[:3], 'fiberassign-%s.fits.gz'%tilestr)
return fn
def cat_desi_all_tiles(req, subset, ver):
import numpy as np
import json
ralo = float(req.GET['ralo'])
rahi = float(req.GET['rahi'])
declo = float(req.GET['declo'])
dechi = float(req.GET['dechi'])
tag = 'desi-all-tiles'
ver = int(ver)
if not ver in catversions[tag]:
raise RuntimeError('Invalid version %i for tag %s' % (ver, tag))
from astropy.table import Table
t = Table.read('data/tiles-main.ecsv')
from astrometry.util.fits import fits_table
T = fits_table()
T.tileid = t['TILEID'].data
T.ra = t['RA'].data
T.dec = t['DEC'].data
T.in_desi = t['IN_DESI'].data
T.program = t['PROGRAM'].data
T.cut(T.in_desi)
margin = 0.8
# not exactly right...
cosdec = np.cos(np.deg2rad((declo+dechi)/2.))
r0 = ralo - margin/cosdec
r1 = rahi + margin/cosdec
d0 = declo - margin
d1 = dechi + margin
T.cut((T.dec > d0) * (T.dec < d1))
if ralo > rahi:
# RA wrap
T.cut(np.logical_or(T.ra > r0, T.ra < r1))
else:
T.cut((T.ra > r0) * (T.ra < r1))
if subset == 'dark':
T.cut(T.program == 'DARK')
elif subset == 'bright':
T.cut(T.program == 'BRIGHT')
# rd = list((float(r),float(d)) for r,d in zip(T.ra, T.dec))
# tid = list(int(t) for t in T.tileid)
# rtn = dict(rd=rd, tileid=list(tid), program=list(T.program))
# return HttpResponse(json.dumps(rtn), content_type='application/json')
objs = []
for r,d,prog,tid in zip(T.ra, T.dec, T.program, T.tileid):
objs.append(dict(name='Tile %i (%s)' % (tid, prog),
ra=r,
dec=d,
radius=1.6))
return HttpResponse(json.dumps({'objs':objs}), content_type='application/json')
def cat_desi_tile(req, ver):
from astrometry.util.fits import fits_table
import json
import re
tile = int(req.GET.get('tile','0'), 10)
if tile == 0:
return 'bad tile'
haverd = False
if ('ralo' in req.GET and 'rahi' in req.GET and
'declo' in req.GET and 'dechi' in req.GET):
ralo = float(req.GET['ralo'])
rahi = float(req.GET['rahi'])
declo = float(req.GET['declo'])
dechi = float(req.GET['dechi'])
haverd = True
fn = desi_fiberassign_filename(tile)
if not os.path.exists(fn):
print('Does not exist:', fn)
return
print('Reading', fn)
cat = fits_table(fn)
cat.ra = cat.target_ra
cat.dec = cat.target_dec
if haverd:
if ralo > rahi:
import numpy as np
# RA wrap
cat.cut(np.logical_or(cat.ra > ralo, cat.ra < rahi) *
(cat.dec > declo) * (cat.dec < dechi))
else:
cat.cut((cat.ra > ralo) * (cat.ra < rahi) *
(cat.dec > declo) * (cat.dec < dechi))
print(len(cat), 'DESI tile sources after RA,Dec cut')
rd = list(zip(cat.ra.astype(float), cat.dec.astype(float)))
D = dict(rd=rd)
cols = cat.columns()
print('Columns:', cols)
if 'sv3_desi_target' in cols:
bitnames = desitarget_sv3_names(cat)
D.update(bits=bitnames)
elif 'sv1_desi_target' in cols:
bitnames = desitarget_sv1_names(cat)
D.update(bits=bitnames)
elif 'cmx_target' in cols:
bitnames = desitarget_cmx_names(cat)
D.update(bits=bitnames)
# Main targets
elif 'desi_target' in cols:
from desitarget.targets import desi_mask, bgs_mask, mws_mask, scnd_mask
bitnames = []
for desi,bgs,mws,scnd in zip(cat.desi_target, cat.bgs_target, cat.mws_target, cat.scnd_target):
sec = scnd_mask.names(scnd)
sec = ['sec:'+k for k in sec]
names = desi_mask.names(desi) + bgs_mask.names(bgs) + mws_mask.names(mws) + sec
for kill in ['LRG_SOUTH', 'ELG_SOUTH', 'QSO_SOUTH', 'ELG_VLO_SOUTH', 'SCND_ANY',
'MWS_MAIN_BLUE_SOUTH', ]:
if kill in names:
names.remove(kill)
bitnames.append(', '.join(names))
D.update(bits=bitnames)
if 'targetid' in cols:
D.update(targetid=['%i'%i for i in cat.targetid])
if 'fiber' in cols:
D.update(fiberid=[int(i) for i in cat.fiber])
return HttpResponse(json.dumps(D).replace('NaN','null'),
content_type='application/json')
def cat_bright(req, ver):
return cat(req, ver, 'bright',
os.path.join(settings.DATA_DIR, 'bright.fits'))
def cat_tycho2(req, ver):
#return cat(req, ver, 'tycho2',
# os.path.join(settings.DATA_DIR, 'tycho2.fits'))
import json
T = cat_kd(req, ver, 'tycho2', os.path.join(settings.DATA_DIR, 'tycho2-sub.kd.fits'))
if T is None:
rtn = dict(rd=[], name=[])
else:
rd = list((float(r),float(d)) for r,d in zip(T.ra, T.dec))
rtn = dict(rd=rd)
if 'name' in T.columns():
names = [t.strip() for t in T.name]
rtn['name'] = names
for i,name in enumerate(names):
try:
# Parse name as "Tycho-2 ###-###-###", then form Simbad link from the parsed
# numbers.
words = name.split()
nums = words[1].split('-')
tyc1 = int(nums[0])
tyc2 = int(nums[1])
tyc3 = int(nums[2])
url = 'https://simbad.cds.unistra.fr/simbad/sim-id?Ident=TYC++%i+%i+%i&NbIdent=1' % (tyc1, tyc2, tyc3)
names[i] = '<a href="%s">%s</a>' % (url, name)
except:
pass
return HttpResponse(json.dumps(rtn), content_type='application/json')
def cat_ngc(req, ver):
return cat(req, ver, 'ngc',
os.path.join(settings.DATA_DIR, 'ngcic.fits'))
def cat_GCs_PNe(req, ver):
from astrometry.util.fits import fits_table
import numpy as np
T = fits_table(os.path.join(settings.DATA_DIR,'NGC-star-clusters.fits'))
#T.alt_name = np.array(['' if n.startswith('N/A') else n.strip() for n in T.commonnames])
T.posAngle = T.pa
T.abRatio = T.ba
return cat(req, ver, 'GCs-PNe', None, T=T)
def cat_ps1(req, ver):
ralo = float(req.GET['ralo'])
rahi = float(req.GET['rahi'])
declo = float(req.GET['declo'])
dechi = float(req.GET['dechi'])
# We have the EDR region and a block around 0,0
if (rahi > 241) and (ralo < 246) * (dechi >= 6.5) * (declo < 11.5):
return cat(req, ver, 'ps1',
os.path.join(settings.DATA_DIR,'ps1-cat-edr.fits'))
return cat(req, ver, 'ps1',
os.path.join(settings.DATA_DIR,'ps1-cat.fits'))
def cat(req, ver, tag, fn, T=None):
import json
ralo = float(req.GET['ralo'])
rahi = float(req.GET['rahi'])
declo = float(req.GET['declo'])
dechi = float(req.GET['dechi'])
ver = int(ver)
if not ver in catversions[tag]:
raise RuntimeError('Invalid version %i for tag %s' % (ver, tag))
from astrometry.util.fits import fits_table
import numpy as np
if T is None:
T = fits_table(fn)
debug(len(T), 'catalog objects')
if ralo > rahi:
# RA wrap
T.cut(np.logical_or(T.ra > ralo, T.ra < rahi) * (T.dec > declo) * (T.dec < dechi))
else:
T.cut((T.ra > ralo) * (T.ra < rahi) * (T.dec > declo) * (T.dec < dechi))
debug(len(T), 'in cut')
rd = list((float(r),float(d)) for r,d in zip(T.ra, T.dec))
rtn = dict(rd=rd)
# PS1
if 'ndetections' in T.columns():
T.name = np.array(['%i' % n for n in T.ndetections])
if 'name' in T.columns():
names = [t.strip() for t in T.name]
rtn['name'] = names
# bright stars, GCs/PNe
if 'alt_name' in T.columns():
rtn.update(altname = [t.strip() for t in T.alt_name])
# if 'majax' in T.columns() and 'minax' in T.columns() and 'pa' in T.columns():
# # GCs/PNe catalog
# T.abRatio = T.minax / T.majax
# T.posAngle = T.pa
# T.rename('majax', 'radius')
if 'radius' in T.columns():
rtn.update(radiusArcsec=list(float(f) for f in T.radius * 3600.))
if 'posAngle' in T.columns() and 'abRatio' in T.columns():
rtn.update(posAngle=list(float(f) for f in T.posAngle),
abRatio =list(float(f) for f in T.abRatio))
return HttpResponse(json.dumps(rtn), content_type='application/json')
def any_cat(req, name, ver, zoom, x, y, **kwargs):
from map.views import get_layer
#print('any_cat(', name, ver, zoom, x, y, ')')
layer = get_layer(name)
if layer is None:
return HttpResponse('no such layer')
return cat_decals(req, ver, zoom, x, y, tag=name, docache=False)
def cat_decals(req, ver, zoom, x, y, tag='decals', docache=True):
import json
zoom = int(zoom)
if zoom < 12:
return HttpResponse(json.dumps(dict(rd=[])),
content_type='application/json')
try:
wcs, W, H, zoomscale, zoom,x,y = get_tile_wcs(zoom, x, y)
except RuntimeError as e:
print('e:', e)
return HttpResponse(e.strerror)
ver = int(ver)
if not ver in catversions[tag]:
raise RuntimeError('Invalid version %i for tag %s' % (ver, tag))
basedir = settings.DATA_DIR
sendfile_kwargs = dict()
if docache:
cachefn = os.path.join(basedir, 'cats-cache', tag,
'%i/%i/%i/%i.cat.json' % (ver, zoom, x, y))
if os.path.exists(cachefn):
return send_file(cachefn, 'application/json',
modsince=req.META.get('HTTP_IF_MODIFIED_SINCE'),
expires=oneyear)
sendfile_kwargs.update(expires=oneyear)
else:
import tempfile
f,cachefn = tempfile.mkstemp(suffix='.json')
os.close(f)
sendfile_kwargs.update(unlink=True)
from map.views import get_layer
layer = get_layer(tag)
cat,hdr = layer.get_catalog_in_wcs(wcs)
if cat is None:
rd = []
types = []
fluxes = []
bricknames = []
objids = []
nobs = []
else:
rd = list(zip(cat.ra, cat.dec))
types = list([t[0] for t in cat.get('type')])
havebands = []
havefluxes = []
havenobs = []
cols = cat.get_columns()
for band in 'griz':
if 'flux_'+band in cols:
havebands.append(band)
havefluxes.append(cat.get('flux_' + band))
havenobs.append(cat.get('nobs_' + band))
fluxes = []
for F in zip(*havefluxes):
fluxes.append(dict([(b,float(f)) for b,f in zip(havebands, F)]))
nobs = []
for N in zip(*havenobs):
nobs.append(dict([(b,int(f)) for b,f in zip(havebands, N)]))
#fluxes = [dict(g=float(g), r=float(r), z=float(z))
# for g,r,z in zip(cat.flux_g, cat.flux_r, cat.flux_z)]
#nobs = [dict(g=int(g), r=int(r), z=int(z))
# for g,r,z in zip(cat.nobs_g, cat.nobs_r, cat.nobs_z)]
bricknames = list(cat.brickname)
objids = [int(x) for x in cat.objid]
json = json.dumps(dict(rd=rd, sourcetype=types, fluxes=fluxes, nobs=nobs,
bricknames=bricknames, objids=objids))
if docache:
trymakedirs(cachefn)
f = open(cachefn, 'w')
f.write(json)
f.close()
return send_file(cachefn, 'application/json', **sendfile_kwargs)
@lru_cache(maxsize=1)
def get_desi_tiles():
"""Returns a dictionary mapping of tileid: (ra, dec) of desi tiles
"""
from astrometry.util.fits import fits_table
#path = os.path.join(settings.DATA_DIR, 'desi-tiles.fits')
path = os.path.join(settings.DATA_DIR, 'desi-spectro-daily', 'tiles2.kd.fits')
t = fits_table(path)
tileradec = dict()
for tileid, ra, dec in zip(t.tileid, t.ra, t.dec):
tileradec[tileid] = (ra,dec)
return tileradec
def get_desi_tile_radec(tileid, fiberid=None):
tilerd = get_desi_tiles()
return tilerd.get(tileid, None)
#### OLD - read from fiberassign file.
"""Accepts a tile_id, returns a tuple of ra, dec
Raises a RuntimeError if tile_id is not found
"""
import fitsio
fn = desi_fiberassign_filename(tileid)
if not os.path.exists(fn):
raise RuntimeError("DESI tile not found")
hdr = fitsio.read_header(fn)
ra = hdr['TILERA']
dec = hdr['TILEDEC']
if fiberid is not None:
from astrometry.util.fits import fits_table
import numpy as np
T = fits_table(fn, columns=['target_ra', 'target_dec', 'fiber'])
I = np.flatnonzero(T.fiber == fiberid)
if len(I) == 1:
i = I[0]
return T.target_ra[i], T.target_dec[i]
return ra,dec
# # Load tile radec
# tileradec = get_desi_tiles()
# if tile_id in tileradec:
# ra = tileradec[tile_id][0]
# dec = tileradec[tile_id][1]
# return ra, dec
# else:
# raise RuntimeError("DESI tile not found")
if __name__ == '__main__':
import sys
if False:
from map.views import get_layer
#galfn = os.path.join(settings.DATA_DIR, 'galaxies-in-hsc2.fits')
#layer = get_layer('hsc2')
#create_galaxy_catalog(galfn, None, layer=layer)
galfn = os.path.join(settings.DATA_DIR, 'galaxies-in-dr10.fits')
layer = get_layer('ls-dr10')
create_galaxy_catalog(galfn, None, layer=layer)
sys.exit(0)
# galfn = os.path.join(settings.DATA_DIR, 'galaxies-in-dr9.fits')
# layer = get_layer('ls-dr9-north')
# create_galaxy_catalog(galfn, None, layer=layer)
# sys.exit(0)
if False:
# Create SDSS DR16 Spectra file (specObj-dr16-trimmed.kd.fits):
from astrometry.util.fits import fits_table
import numpy as np
T=fits_table('/global/cfs/cdirs/cosmo/data/sdss/dr16/sdss/spectro/redux/specObj-dr16.fits',
columns=['plate','mjd','fiberid','plug_ra','plug_dec','class','subclass','z','zwarning'])
print('Read', len(T))
T.rename('plug_ra', 'ra')
T.rename('plug_dec','dec')
labels = []
for t in T:
sub = t.subclass
sub = sub.split()
sub = ' '.join([s for s in sub if s[0] != '('])
cla = t.get('class').strip()
txt = cla
if len(sub):
txt += ' (' + sub + ')'
if cla in ['GALAXY', 'QSO']:
txt += ' z=%.3f' % t.z
labels.append(txt)
T.label = np.array(labels)
print('Writing trimmed...')
T.writeto('data/sdss/specObj-dr16-trimmed.fits', columns=['ra','dec','plate','mjd','fiberid','z','zwarning','label'])
print('Creating kdtree...')
os.system('startree -i data/sdss/specObj-dr16-trimmed.fits -o data/sdss/specObj-dr16-trimmed.kd.fits -T -k -P')
sys.exit(0)
#t = lookup_targetid(39627788403084375)
#print('Targetid:', t)
#t.about()
from django.test import Client
c = Client()
#r = c.get('/sga/1/cat.json?ralo=259.2787&rahi=259.7738&declo=35.9422&dechi=36.1656')
#r = c.get('/sga/1/cat.json?ralo=259.5726&rahi=260.0677&declo=35.9146&dechi=36.1382')
#r = c.get('/usercatalog/1/cat.json?ralo=350.0142&rahi=350.0761&declo=-9.6430&dechi=-9.6090&cat=tmppboi50xv')
## should contain NGC 6349
#r = c.get('/dr8-south/1/12/3572/2187.cat.json')
#r = c.get('/dr8-north/1/14/8194/5895.cat.json')
#r = c.get('/dr8/1/14/8194/5895.cat.json')
#r = c.get('/decals-dr7/1/14/8639/7624.cat.json')
#r = c.get('/mzls+bass-dr6/1/14/7517/6364.cat.json')
#r = c.get('/targets-dr9-main-dark/1/cat.json?ralo=189.1391&rahi=189.2628&declo=27.5179&dechi=27.5791')
#r = c.get('/desi-spec-daily/1/cat.json?ralo=154.1814&rahi=154.3175&declo=-2.6274&dechi=-2.5515')
#r = c.get('/targets-dr9-main-dark/1/cat.json?ralo=189.1391&rahi=189.2628&declo=27.5179&dechi=27.5791')
#r = c.get('/desi-tile/1/cat.json?ralo=238.1458&rahi=238.4181&declo=-0.0750&dechi=0.0748&tile=1000')
#r = c.get('/desi-tile/1/cat.json?ralo=190.9733&rahi=191.6270&declo=10.1426&dechi=10.5060&tile=8786')
#r = c.get('/targets-dr9-main-dark/1/cat.json?ralo=189.1391&rahi=189.2628&declo=27.5179&dechi=27.5791')
#r = c.get('/desi-spectrum/daily/targetid39628158961452744')
#r = c.get('/desi-spectrum/daily/targetid39633049259870731')
# tile = 6834
# print('Tile', tile, 'rd', get_desi_tile_radec(tile))
# sys.exit(0)
#r = c.get('/desi-spectrum/guadalupe/targetid39633049259870731')
#r = c.get('/desi-spectrum/daily/targetid43977408013222855')
#r = c.get('/ls-dr9/1/15/29479/18709.cat.json')
#r = c.get('/usercatalog/1/cat.json?ralo=61.2789&rahi=61.3408&declo=-74.8711&dechi=-74.8622&cat=tmpbclfdga8')
#r = c.get('/desi-spectrum/edr/targetid39627883857055540')
#r = c.get('/desi-spec-dr1/1/cat.json?ralo=149.1504&rahi=149.3979&declo=68.5631&dechi=68.6128')
#r = c.get('/desi-spec-daily/1/cat.json?ralo=146.9512&rahi=147.0131&declo=13.2602&dechi=13.2932')
#r = c.get('/desi-spec-daily-sky/1/cat.json?ralo=146.9512&rahi=147.0131&declo=13.2602&dechi=13.2932')
#r = c.get('/desi-obs-daily/1/cat.json?ralo=146.9298&rahi=147.0535&declo=13.2322&dechi=13.2983')
#r = c.get('/desi-obs/daily/targetid39628104741683680')
#r = c.get('/desi-obs/daily/targetid2411699042779148')
#r = c.get('/desi-obs-daily/1/cat.json?ralo=218.6108&rahi=218.6418&declo=30.9829&dechi=30.9974')
#r = c.get('/desi-spectrum/daily/targetid2305843037000968814')
r = c.get('/desi-spectrum/daily/targetid39627920582379819')
f = open('out', 'wb')
for x in r:
f.write(x)
f.close()
sys.exit(0)
#print('Random galaxy:', get_random_galaxy(layer='mzls+bass-dr4'))
#create_galaxy_catalog('/tmp/dr8.fits', 8)
from astrometry.util.fits import *
T6 = fits_table('data/galaxies-in-dr6.fits')
T7 = fits_table('data/galaxies-in-dr7.fits')
T8 = merge_tables([T6, T7], columns='fillzero')
T8.writeto('data/galaxies-in-dr8.fits')
#specObj-dr14.fits
#T = fits_table('/project/projectdirs/cosmo/data/sdss/dr14/sdss/spectro/redux/specObj-dr14.fits')
from django.test import Client
c = Client()
#c.get('/usercatalog/1/cat.json?ralo=200.2569&rahi=200.4013&declo=47.4930&dechi=47.5823&cat=tmpajwai3dx')
sys.exit(0)
|
legacysurveyREPO_NAMEimaginePATH_START.@imagine_extracted@imagine-main@map@cats.py@.PATH_END.py
|
{
"filename": "_field_common.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/pyrsistent/py3/pyrsistent/_field_common.py",
"type": "Python"
}
|
from pyrsistent._checked_types import (
CheckedPMap,
CheckedPSet,
CheckedPVector,
CheckedType,
InvariantException,
_restore_pickle,
get_type,
maybe_parse_user_type,
maybe_parse_many_user_types,
)
from pyrsistent._checked_types import optional as optional_type
from pyrsistent._checked_types import wrap_invariant
import inspect
def set_fields(dct, bases, name):
dct[name] = dict(sum([list(b.__dict__.get(name, {}).items()) for b in bases], []))
for k, v in list(dct.items()):
if isinstance(v, _PField):
dct[name][k] = v
del dct[k]
def check_global_invariants(subject, invariants):
error_codes = tuple(error_code for is_ok, error_code in
(invariant(subject) for invariant in invariants) if not is_ok)
if error_codes:
raise InvariantException(error_codes, (), 'Global invariant failed')
def serialize(serializer, format, value):
if isinstance(value, CheckedType) and serializer is PFIELD_NO_SERIALIZER:
return value.serialize(format)
return serializer(format, value)
def check_type(destination_cls, field, name, value):
if field.type and not any(isinstance(value, get_type(t)) for t in field.type):
actual_type = type(value)
message = "Invalid type for field {0}.{1}, was {2}".format(destination_cls.__name__, name, actual_type.__name__)
raise PTypeError(destination_cls, name, field.type, actual_type, message)
def is_type_cls(type_cls, field_type):
if type(field_type) is set:
return True
types = tuple(field_type)
if len(types) == 0:
return False
return issubclass(get_type(types[0]), type_cls)
def is_field_ignore_extra_complaint(type_cls, field, ignore_extra):
# ignore_extra param has default False value, for speed purpose no need to propagate False
if not ignore_extra:
return False
if not is_type_cls(type_cls, field.type):
return False
return 'ignore_extra' in inspect.signature(field.factory).parameters
class _PField(object):
__slots__ = ('type', 'invariant', 'initial', 'mandatory', '_factory', 'serializer')
def __init__(self, type, invariant, initial, mandatory, factory, serializer):
self.type = type
self.invariant = invariant
self.initial = initial
self.mandatory = mandatory
self._factory = factory
self.serializer = serializer
@property
def factory(self):
# If no factory is specified and the type is another CheckedType use the factory method of that CheckedType
if self._factory is PFIELD_NO_FACTORY and len(self.type) == 1:
typ = get_type(tuple(self.type)[0])
if issubclass(typ, CheckedType):
return typ.create
return self._factory
PFIELD_NO_TYPE = ()
PFIELD_NO_INVARIANT = lambda _: (True, None)
PFIELD_NO_FACTORY = lambda x: x
PFIELD_NO_INITIAL = object()
PFIELD_NO_SERIALIZER = lambda _, value: value
def field(type=PFIELD_NO_TYPE, invariant=PFIELD_NO_INVARIANT, initial=PFIELD_NO_INITIAL,
mandatory=False, factory=PFIELD_NO_FACTORY, serializer=PFIELD_NO_SERIALIZER):
"""
Field specification factory for :py:class:`PRecord`.
:param type: a type or iterable with types that are allowed for this field
:param invariant: a function specifying an invariant that must hold for the field
:param initial: value of field if not specified when instantiating the record
:param mandatory: boolean specifying if the field is mandatory or not
:param factory: function called when field is set.
:param serializer: function that returns a serialized version of the field
"""
# NB: We have to check this predicate separately from the predicates in
# `maybe_parse_user_type` et al. because this one is related to supporting
# the argspec for `field`, while those are related to supporting the valid
# ways to specify types.
# Multiple types must be passed in one of the following containers. Note
# that a type that is a subclass of one of these containers, like a
# `collections.namedtuple`, will work as expected, since we check
# `isinstance` and not `issubclass`.
if isinstance(type, (list, set, tuple)):
types = set(maybe_parse_many_user_types(type))
else:
types = set(maybe_parse_user_type(type))
invariant_function = wrap_invariant(invariant) if invariant != PFIELD_NO_INVARIANT and callable(invariant) else invariant
field = _PField(type=types, invariant=invariant_function, initial=initial,
mandatory=mandatory, factory=factory, serializer=serializer)
_check_field_parameters(field)
return field
def _check_field_parameters(field):
for t in field.type:
if not isinstance(t, type) and not isinstance(t, str):
raise TypeError('Type parameter expected, not {0}'.format(type(t)))
if field.initial is not PFIELD_NO_INITIAL and \
not callable(field.initial) and \
field.type and not any(isinstance(field.initial, t) for t in field.type):
raise TypeError('Initial has invalid type {0}'.format(type(field.initial)))
if not callable(field.invariant):
raise TypeError('Invariant must be callable')
if not callable(field.factory):
raise TypeError('Factory must be callable')
if not callable(field.serializer):
raise TypeError('Serializer must be callable')
class PTypeError(TypeError):
"""
Raised when trying to assign a value with a type that doesn't match the declared type.
Attributes:
source_class -- The class of the record
field -- Field name
expected_types -- Types allowed for the field
actual_type -- The non matching type
"""
def __init__(self, source_class, field, expected_types, actual_type, *args, **kwargs):
super(PTypeError, self).__init__(*args, **kwargs)
self.source_class = source_class
self.field = field
self.expected_types = expected_types
self.actual_type = actual_type
SEQ_FIELD_TYPE_SUFFIXES = {
CheckedPVector: "PVector",
CheckedPSet: "PSet",
}
# Global dictionary to hold auto-generated field types: used for unpickling
_seq_field_types = {}
def _restore_seq_field_pickle(checked_class, item_type, data):
"""Unpickling function for auto-generated PVec/PSet field types."""
type_ = _seq_field_types[checked_class, item_type]
return _restore_pickle(type_, data)
def _types_to_names(types):
"""Convert a tuple of types to a human-readable string."""
return "".join(get_type(typ).__name__.capitalize() for typ in types)
def _make_seq_field_type(checked_class, item_type, item_invariant):
"""Create a subclass of the given checked class with the given item type."""
type_ = _seq_field_types.get((checked_class, item_type))
if type_ is not None:
return type_
class TheType(checked_class):
__type__ = item_type
__invariant__ = item_invariant
def __reduce__(self):
return (_restore_seq_field_pickle,
(checked_class, item_type, list(self)))
suffix = SEQ_FIELD_TYPE_SUFFIXES[checked_class]
TheType.__name__ = _types_to_names(TheType._checked_types) + suffix
_seq_field_types[checked_class, item_type] = TheType
return TheType
def _sequence_field(checked_class, item_type, optional, initial,
invariant=PFIELD_NO_INVARIANT,
item_invariant=PFIELD_NO_INVARIANT):
"""
Create checked field for either ``PSet`` or ``PVector``.
:param checked_class: ``CheckedPSet`` or ``CheckedPVector``.
:param item_type: The required type for the items in the set.
:param optional: If true, ``None`` can be used as a value for
this field.
:param initial: Initial value to pass to factory.
:return: A ``field`` containing a checked class.
"""
TheType = _make_seq_field_type(checked_class, item_type, item_invariant)
if optional:
def factory(argument, _factory_fields=None, ignore_extra=False):
if argument is None:
return None
else:
return TheType.create(argument, _factory_fields=_factory_fields, ignore_extra=ignore_extra)
else:
factory = TheType.create
return field(type=optional_type(TheType) if optional else TheType,
factory=factory, mandatory=True,
invariant=invariant,
initial=factory(initial))
def pset_field(item_type, optional=False, initial=(),
invariant=PFIELD_NO_INVARIANT,
item_invariant=PFIELD_NO_INVARIANT):
"""
Create checked ``PSet`` field.
:param item_type: The required type for the items in the set.
:param optional: If true, ``None`` can be used as a value for
this field.
:param initial: Initial value to pass to factory if no value is given
for the field.
:return: A ``field`` containing a ``CheckedPSet`` of the given type.
"""
return _sequence_field(CheckedPSet, item_type, optional, initial,
invariant=invariant,
item_invariant=item_invariant)
def pvector_field(item_type, optional=False, initial=(),
invariant=PFIELD_NO_INVARIANT,
item_invariant=PFIELD_NO_INVARIANT):
"""
Create checked ``PVector`` field.
:param item_type: The required type for the items in the vector.
:param optional: If true, ``None`` can be used as a value for
this field.
:param initial: Initial value to pass to factory if no value is given
for the field.
:return: A ``field`` containing a ``CheckedPVector`` of the given type.
"""
return _sequence_field(CheckedPVector, item_type, optional, initial,
invariant=invariant,
item_invariant=item_invariant)
_valid = lambda item: (True, "")
# Global dictionary to hold auto-generated field types: used for unpickling
_pmap_field_types = {}
def _restore_pmap_field_pickle(key_type, value_type, data):
"""Unpickling function for auto-generated PMap field types."""
type_ = _pmap_field_types[key_type, value_type]
return _restore_pickle(type_, data)
def _make_pmap_field_type(key_type, value_type):
"""Create a subclass of CheckedPMap with the given key and value types."""
type_ = _pmap_field_types.get((key_type, value_type))
if type_ is not None:
return type_
class TheMap(CheckedPMap):
__key_type__ = key_type
__value_type__ = value_type
def __reduce__(self):
return (_restore_pmap_field_pickle,
(self.__key_type__, self.__value_type__, dict(self)))
TheMap.__name__ = "{0}To{1}PMap".format(
_types_to_names(TheMap._checked_key_types),
_types_to_names(TheMap._checked_value_types))
_pmap_field_types[key_type, value_type] = TheMap
return TheMap
def pmap_field(key_type, value_type, optional=False, invariant=PFIELD_NO_INVARIANT):
"""
Create a checked ``PMap`` field.
:param key: The required type for the keys of the map.
:param value: The required type for the values of the map.
:param optional: If true, ``None`` can be used as a value for
this field.
:param invariant: Pass-through to ``field``.
:return: A ``field`` containing a ``CheckedPMap``.
"""
TheMap = _make_pmap_field_type(key_type, value_type)
if optional:
def factory(argument):
if argument is None:
return None
else:
return TheMap.create(argument)
else:
factory = TheMap.create
return field(mandatory=True, initial=TheMap(),
type=optional_type(TheMap) if optional else TheMap,
factory=factory, invariant=invariant)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@pyrsistent@py3@pyrsistent@_field_common.py@.PATH_END.py
|
{
"filename": "_shadowsrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/sunburst/outsidetextfont/_shadowsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShadowsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="shadowsrc", parent_name="sunburst.outsidetextfont", **kwargs
):
super(ShadowsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@sunburst@outsidetextfont@_shadowsrc.py@.PATH_END.py
|
{
"filename": "_kernels.py",
"repo_name": "MikeSWang/Harmonia",
"repo_path": "Harmonia_extracted/Harmonia-master/harmonia/reader/_kernels.py",
"type": "Python"
}
|
"""
Spherical kernels (:mod:`~harmonia.reader.kernels`)
===========================================================================
Evaluate integration kernels for computing spherical Fourier coupling
coefficients.
"""
import numpy as np
from harmonia.algorithms.bases import spherical_besselj, spherical_harmonic
def angular_kernel(theta, phi, mu, nu, mask=None):
r"""Evaluate the angular coupling kernel.
Parameters
----------
theta, phi : float, array_like
Angular coordinates :math:`\theta`, :math:`\phi`.
mu, nu : tuple(int, int, int)
Coefficient triplet index.
mask : callable or None, optional
Mask as a function of angular coordinates (default is `None`).
Returns
-------
kernel : complex :class:`numpy.ndarray`
Angular coupling kernel value.
"""
kernel = \
spherical_harmonic(mu[0], mu[1], theta, phi, conj=True) \
* spherical_harmonic(nu[0], nu[1], theta, phi)
if callable(mask):
kernel *= mask(np.column_stack([theta, phi]))
return kernel
def radial_kernel(r, mu, nu, k_mu, k_nu, selection=None, weight=None,
bias_evolution=None, clustering_evolution=None,
z_from_r=None, chi_of_z=None):
"""Evaluate the radial coupling kernel.
Parameters
----------
r : float, array_like
Radial coordinate.
mu, nu : tuple(int, int, int)
Coefficient triplet index.
k_mu, k_nu : float
Discrete wave number corresponding to index `mu` or `nu`.
selection, weight : callable or None, optional
Selection or weight as a function of the radial coordinate
(default is `None`).
bias_evolution, clustering_evolution : callable or None, optional
Bias and clustering evolution as a function of redshift
normalised to unity at the current epoch (default is `None`).
z_from_r : callable or None, optional
Comoving distance-to-redshift conversion in a variable
cosmological model (default is `None`).
chi_of_z : callable or None, optional
Comoving redshift-to-distance conversion in a fiducial
cosmological model (default is `None`).
Returns
-------
kernel : float :class:`numpy.ndarray`
Radial coupling kernel value.
Raises
------
TypeError
If `z_from_r` is not callable when any of `bias_evolution`,
`clustering_evolution` and `chi_of_z` is.
"""
if any(map(callable, [chi_of_z, clustering_evolution, bias_evolution])) \
and not callable(z_from_r):
raise TypeError(
"`z_from_r` must be callable when any of "
"`bias_evolution`, `clustering_evolution` and `chi_of_z` is."
)
if not callable(chi_of_z):
r_tilde = r
else:
r_tilde = chi_of_z(z_from_r(r))
kernel = \
spherical_besselj(mu[0], k_mu * r_tilde) \
* spherical_besselj(nu[0], k_nu * r)
if callable(selection):
kernel *= selection(r)
if callable(weight):
kernel *= weight(r_tilde)
if callable(clustering_evolution):
kernel *= clustering_evolution(z_from_r(r))
if callable(bias_evolution):
kernel *= bias_evolution(z_from_r(r))
return kernel
def RSD_kernel(r, mu, nu, k_mu, k_nu, selection=None, weight=None,
weight_derivative=None, growth_evolution=None,
clustering_evolution=None, z_from_r=None, chi_of_z=None,
differential_AP_distortion=None):
"""Evaluate the RSD coupling kernel.
Parameters
----------
r : float, array_like
Radial coordinate.
mu, nu : tuple(int, int, int)
Coefficient triplet index.
k_mu, k_nu : float
Discrete wave number corresponding to index `mu` or `nu`.
selection, weight, weight_derivative : callable or None, optional
Selection, weight or weight derivative as a function of the
radial coordinate (default is `None`).
growth_evolution, clustering_evolution : callable or None, optional
Growth rate evolution or clustering evolution as a function of
redshift normalised to unity at the current epoch (default is
`None`).
z_from_r : callable or None, optional
Cosmological comoving distance-to-redshift conversion (default
is `None`).
chi_of_z : callable or None, optional
Fiducial comoving redshift-to-distance conversion (default is
`None`).
differential_AP_distortion : callable or None, optional
Differential AP distortion as a function of redshift (default
is `None`).
Returns
-------
kernel : float, array_like
RSD coupling kernel value.
Raises
------
TypeError
If `z_from_r` is not callable when any of `growth_evolution`,
`clustering_evolution` and `chi_of_z` is.
TypeError
If `weight` and `weight_derivative` are not both None or callable.
TypeError
If `chi_of_z` and `AP_distortion` are not both None or callable.
"""
if any(map(callable, [chi_of_z, growth_evolution, clustering_evolution])) \
and not callable(z_from_r):
raise TypeError(
"`z_from_r` must be callable when any of "
"`growth_evolution`, `clustering_evolution` and `chi_of_z` is."
)
apply_weight = any(map(callable, [weight, weight_derivative]))
weight_applicable = all(map(callable, [weight, weight_derivative]))
if apply_weight and not weight_applicable:
raise TypeError(
"`weight` and `weight_derivative` must both be callable "
"in order to apply weight."
)
apply_AP = any(map(callable, [chi_of_z, differential_AP_distortion]))
AP_applicable = all(map(callable, [chi_of_z, differential_AP_distortion]))
if apply_AP and not AP_applicable:
raise TypeError(
"`chi_of_z` and `differential_AP_distortion` must both be "
"callable in order to apply AP correction."
)
kernel = spherical_besselj(nu[0], k_nu*r, derivative=True)
if apply_AP:
r_tilde = chi_of_z(z_from_r(r))
kernel *= differential_AP_distortion(z_from_r(r))
else:
r_tilde = r
if callable(selection):
kernel *= selection(r)
if apply_weight:
kernel *= (
weight_derivative(r_tilde)
* spherical_besselj(mu[0], k_mu * r_tilde) \
+ k_mu * weight(r_tilde) \
* spherical_besselj(mu[0], k_mu * r_tilde, derivative=True)
)
else:
kernel *= k_mu * spherical_besselj(mu[0], k_mu * r, derivative=True)
if callable(growth_evolution):
kernel *= growth_evolution(z_from_r(r))
if callable(clustering_evolution):
kernel *= clustering_evolution(z_from_r(r))
return kernel
def shot_noise_kernel(r, mu, nu, k_mu, k_nu, selection=None, weight=None):
"""Evalaute the two-point correlator shot noise kernel.
Parameters
----------
r : float, array_like
Radial coordinate.
mu, nu : tuple(int, int, int)
Coefficient triplet index.
k_mu, k_nu : float
Discrete wave number corresponding to index `mu` or `nu`.
selection, weight : callable or None, optional
Selection or weight as a function of the radial coordinate
(default is `None`).
Returns
-------
kernel : float, array_like
Two-point correlator shot noise kernel value.
"""
kernel = \
spherical_besselj(mu[0], k_mu * r) \
* spherical_besselj(nu[0], k_nu * r)
if callable(selection):
kernel *= selection(r)
if callable(weight):
kernel *= weight(r) ** 2
return kernel
|
MikeSWangREPO_NAMEHarmoniaPATH_START.@Harmonia_extracted@Harmonia-master@harmonia@reader@_kernels.py@.PATH_END.py
|
{
"filename": "build_remedy_call_for_date.py",
"repo_name": "grzeimann/Remedy",
"repo_path": "Remedy_extracted/Remedy-master/build_remedy_call_for_date.py",
"type": "Python"
}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 1 14:47:05 2019
@author: gregz
"""
import glob
import os.path as op
import sys
import tarfile
import numpy as np
from astropy.io import fits
rootdir = '/work/03946/hetdex/maverick'
inst = 'virus'
date = sys.argv[1]
cal = sys.argv[2]
target = sys.argv[3]
ncalls = int(sys.argv[4])
tarfolders = sorted(glob.glob(op.join(rootdir, date, inst,
'%s0000*.tar' % inst)))
call = ('python3 /work/03730/gregz/maverick/Remedy/quick_reduction.py %s %i '
'47 %s -nd 8 -fp '
'/work/03730/gregz/maverick/fplaneall.txt -nD')
tarlist = []
dates = [op.basename(op.dirname(op.dirname(tarf))) for tarf in tarfolders]
for date, tarfolder in zip(dates, tarfolders):
T = tarfile.open(tarfolder, 'r')
flag = True
while flag:
try:
a = T.next()
except:
print('This file had an issue: %s' % tarfolder)
flag = False
break
try:
name = a.name
except:
flag = False
if name[-5:] == '.fits':
try:
b = fits.open(T.extractfile(a))
Target = b[0].header['OBJECT']
except:
print('Failed to open fits file from %s' % tarfolder)
try:
exptime = b[0].header['EXPTIME']
except:
exptime = 0.0
try:
prog = b[0].header['QPROG']
except:
prog = 'None'
obs = int(op.basename(tarfolder)[-11:-4])
if name[-8:-5] == 'sci':
if target in Target:
tarlist.append([obs, name[-8:-5], Target, prog, exptime, date])
flag = False
print('Number of calls: %i' % len(tarlist))
with open('%s_calls' % tarlist[0][-1], 'w') as out_file:
for chunk in np.array_split(tarlist, ncalls):
calls = [call % (ch[-1], int(ch[0]), cal) for ch in chunk]
call_str = '; '.join(calls)
out_file.write(call_str + '\n')
|
grzeimannREPO_NAMERemedyPATH_START.@Remedy_extracted@Remedy-master@build_remedy_call_for_date.py@.PATH_END.py
|
{
"filename": "sf_error.py",
"repo_name": "scipy/scipy",
"repo_path": "scipy_extracted/scipy-main/scipy/special/sf_error.py",
"type": "Python"
}
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.special` namespace for importing the functions
# included below.
from scipy._lib.deprecation import _sub_module_deprecation
__all__ = [ # noqa: F822
'SpecialFunctionWarning',
'SpecialFunctionError'
]
def __dir__():
return __all__
def __getattr__(name):
return _sub_module_deprecation(sub_package="special", module="sf_error",
private_modules=["_sf_error"], all=__all__,
attribute=name)
|
scipyREPO_NAMEscipyPATH_START.@scipy_extracted@scipy-main@scipy@special@sf_error.py@.PATH_END.py
|
{
"filename": "skimage_measure_fit.py",
"repo_name": "igrins/plp",
"repo_path": "plp_extracted/plp-master/igrins/procedures/skimage_measure_fit.py",
"type": "Python"
}
|
import math
import warnings
import numpy as np
from scipy import optimize
def _check_data_dim(data, dim):
if data.ndim != 2 or data.shape[1] != dim:
raise ValueError('Input data must have shape (N, %d).' % dim)
class BaseModel(object):
def __init__(self):
self.params = None
@property
def _params(self):
warnings.warn('`_params` attribute is deprecated, '
'use `params` instead.')
return self.params
class LineModel(BaseModel):
"""Total least squares estimator for 2D lines.
Lines are parameterized using polar coordinates as functional model::
dist = x * cos(theta) + y * sin(theta)
This parameterization is able to model vertical lines in contrast to the
standard line model ``y = a*x + b``.
This estimator minimizes the squared distances from all points to the
line::
min{ sum((dist - x_i * cos(theta) + y_i * sin(theta))**2) }
A minimum number of 2 points is required to solve for the parameters.
Attributes
----------
params : tuple
Line model parameters in the following order `dist`, `theta`.
"""
def estimate(self, data):
"""Estimate line model from data using total least squares.
Parameters
----------
data : (N, 2) array
N points with ``(x, y)`` coordinates, respectively.
"""
_check_data_dim(data, dim=2)
X0 = data.mean(axis=0)
if data.shape[0] == 2: # well determined
theta = np.arctan2(data[1, 1] - data[0, 1],
data[1, 0] - data[0, 0])
elif data.shape[0] > 2: # over-determined
data = data - X0
# first principal component
_, _, v = np.linalg.svd(data)
theta = np.arctan2(v[0, 1], v[0, 0])
else: # under-determined
raise ValueError('At least 2 input points needed.')
# angle perpendicular to line angle
theta = (theta + np.pi / 2) % np.pi
# line always passes through mean
dist = X0[0] * math.cos(theta) + X0[1] * math.sin(theta)
self.params = (dist, theta)
def residuals(self, data):
"""Determine residuals of data to model.
For each point the shortest distance to the line is returned.
Parameters
----------
data : (N, 2) array
N points with ``(x, y)`` coordinates, respectively.
Returns
-------
residuals : (N, ) array
Residual for each data point.
"""
_check_data_dim(data, dim=2)
dist, theta = self.params
x = data[:, 0]
y = data[:, 1]
return dist - (x * math.cos(theta) + y * math.sin(theta))
def predict_x(self, y, params=None):
"""Predict x-coordinates using the estimated model.
Parameters
----------
y : array
y-coordinates.
params : (2, ) array, optional
Optional custom parameter set.
Returns
-------
x : array
Predicted x-coordinates.
"""
if params is None:
params = self.params
dist, theta = params
return (dist - y * math.sin(theta)) / math.cos(theta)
def predict_y(self, x, params=None):
"""Predict y-coordinates using the estimated model.
Parameters
----------
x : array
x-coordinates.
params : (2, ) array, optional
Optional custom parameter set.
Returns
-------
y : array
Predicted y-coordinates.
"""
if params is None:
params = self.params
dist, theta = params
return (dist - x * math.cos(theta)) / math.sin(theta)
class CircleModel(BaseModel):
"""Total least squares estimator for 2D circles.
The functional model of the circle is::
r**2 = (x - xc)**2 + (y - yc)**2
This estimator minimizes the squared distances from all points to the
circle::
min{ sum((r - sqrt((x_i - xc)**2 + (y_i - yc)**2))**2) }
A minimum number of 3 points is required to solve for the parameters.
Attributes
----------
params : tuple
Circle model parameters in the following order `xc`, `yc`, `r`.
"""
def estimate(self, data):
"""Estimate circle model from data using total least squares.
Parameters
----------
data : (N, 2) array
N points with ``(x, y)`` coordinates, respectively.
"""
_check_data_dim(data, dim=2)
x = data[:, 0]
y = data[:, 1]
# pre-allocate jacobian for all iterations
A = np.zeros((3, data.shape[0]), dtype=np.double)
# same for all iterations: r
A[2, :] = -1
def dist(xc, yc):
return np.sqrt((x - xc)**2 + (y - yc)**2)
def fun(params):
xc, yc, r = params
return dist(xc, yc) - r
def Dfun(params):
xc, yc, r = params
d = dist(xc, yc)
A[0, :] = -(x - xc) / d
A[1, :] = -(y - yc) / d
# same for all iterations, so not changed in each iteration
#A[2, :] = -1
return A
xc0 = x.mean()
yc0 = y.mean()
r0 = dist(xc0, yc0).mean()
params0 = (xc0, yc0, r0)
params, _ = optimize.leastsq(fun, params0, Dfun=Dfun, col_deriv=True)
self.params = params
def residuals(self, data):
"""Determine residuals of data to model.
For each point the shortest distance to the circle is returned.
Parameters
----------
data : (N, 2) array
N points with ``(x, y)`` coordinates, respectively.
Returns
-------
residuals : (N, ) array
Residual for each data point.
"""
_check_data_dim(data, dim=2)
xc, yc, r = self.params
x = data[:, 0]
y = data[:, 1]
return r - np.sqrt((x - xc)**2 + (y - yc)**2)
def predict_xy(self, t, params=None):
"""Predict x- and y-coordinates using the estimated model.
Parameters
----------
t : array
Angles in circle in radians. Angles start to count from positive
x-axis to positive y-axis in a right-handed system.
params : (3, ) array, optional
Optional custom parameter set.
Returns
-------
xy : (..., 2) array
Predicted x- and y-coordinates.
"""
if params is None:
params = self.params
xc, yc, r = params
x = xc + r * np.cos(t)
y = yc + r * np.sin(t)
return np.concatenate((x[..., None], y[..., None]), axis=t.ndim)
class EllipseModel(BaseModel):
"""Total least squares estimator for 2D ellipses.
The functional model of the ellipse is::
xt = xc + a*cos(theta)*cos(t) - b*sin(theta)*sin(t)
yt = yc + a*sin(theta)*cos(t) + b*cos(theta)*sin(t)
d = sqrt((x - xt)**2 + (y - yt)**2)
where ``(xt, yt)`` is the closest point on the ellipse to ``(x, y)``. Thus
d is the shortest distance from the point to the ellipse.
This estimator minimizes the squared distances from all points to the
ellipse::
min{ sum(d_i**2) } = min{ sum((x_i - xt)**2 + (y_i - yt)**2) }
Thus you have ``2 * N`` equations (x_i, y_i) for ``N + 5`` unknowns (t_i,
xc, yc, a, b, theta), which gives you an effective redundancy of ``N - 5``.
The ``params`` attribute contains the parameters in the following order::
xc, yc, a, b, theta
A minimum number of 5 points is required to solve for the parameters.
Attributes
----------
params : tuple
Ellipse model parameters in the following order `xc`, `yc`, `a`,
`b`, `theta`.
"""
def estimate(self, data):
"""Estimate circle model from data using total least squares.
Parameters
----------
data : (N, 2) array
N points with ``(x, y)`` coordinates, respectively.
"""
_check_data_dim(data, dim=2)
x = data[:, 0]
y = data[:, 1]
N = data.shape[0]
# pre-allocate jacobian for all iterations
A = np.zeros((N + 5, 2 * N), dtype=np.double)
# same for all iterations: xc, yc
A[0, :N] = -1
A[1, N:] = -1
diag_idxs = np.diag_indices(N)
def fun(params):
xyt = self.predict_xy(params[5:], params[:5])
fx = x - xyt[:, 0]
fy = y - xyt[:, 1]
return np.append(fx, fy)
def Dfun(params):
xc, yc, a, b, theta = params[:5]
t = params[5:]
ct = np.cos(t)
st = np.sin(t)
ctheta = math.cos(theta)
stheta = math.sin(theta)
# derivatives for fx, fy in the following order:
# xc, yc, a, b, theta, t_i
# fx
A[2, :N] = - ctheta * ct
A[3, :N] = stheta * st
A[4, :N] = a * stheta * ct + b * ctheta * st
A[5:, :N][diag_idxs] = a * ctheta * st + b * stheta * ct
# fy
A[2, N:] = - stheta * ct
A[3, N:] = - ctheta * st
A[4, N:] = - a * ctheta * ct + b * stheta * st
A[5:, N:][diag_idxs] = a * stheta * st - b * ctheta * ct
return A
# initial guess of parameters using a circle model
params0 = np.empty((N + 5, ), dtype=np.double)
xc0 = x.mean()
yc0 = y.mean()
r0 = np.sqrt((x - xc0)**2 + (y - yc0)**2).mean()
params0[:5] = (xc0, yc0, r0, 0, 0)
params0[5:] = np.arctan2(y - yc0, x - xc0)
params, _ = optimize.leastsq(fun, params0, Dfun=Dfun, col_deriv=True)
self.params = params[:5]
def residuals(self, data):
"""Determine residuals of data to model.
For each point the shortest distance to the ellipse is returned.
Parameters
----------
data : (N, 2) array
N points with ``(x, y)`` coordinates, respectively.
Returns
-------
residuals : (N, ) array
Residual for each data point.
"""
_check_data_dim(data, dim=2)
xc, yc, a, b, theta = self.params
ctheta = math.cos(theta)
stheta = math.sin(theta)
x = data[:, 0]
y = data[:, 1]
N = data.shape[0]
def fun(t, xi, yi):
ct = math.cos(t)
st = math.sin(t)
xt = xc + a * ctheta * ct - b * stheta * st
yt = yc + a * stheta * ct + b * ctheta * st
return (xi - xt)**2 + (yi - yt)**2
# def Dfun(t, xi, yi):
# ct = math.cos(t)
# st = math.sin(t)
# xt = xc + a * ctheta * ct - b * stheta * st
# yt = yc + a * stheta * ct + b * ctheta * st
# dfx_t = - 2 * (xi - xt) * (- a * ctheta * st
# - b * stheta * ct)
# dfy_t = - 2 * (yi - yt) * (- a * stheta * st
# + b * ctheta * ct)
# return [dfx_t + dfy_t]
residuals = np.empty((N, ), dtype=np.double)
# initial guess for parameter t of closest point on ellipse
t0 = np.arctan2(y - yc, x - xc) - theta
# determine shortest distance to ellipse for each point
for i in range(N):
xi = x[i]
yi = y[i]
# faster without Dfun, because of the python overhead
t, _ = optimize.leastsq(fun, t0[i], args=(xi, yi))
residuals[i] = np.sqrt(fun(t, xi, yi))
return residuals
def predict_xy(self, t, params=None):
"""Predict x- and y-coordinates using the estimated model.
Parameters
----------
t : array
Angles in circle in radians. Angles start to count from positive
x-axis to positive y-axis in a right-handed system.
params : (5, ) array, optional
Optional custom parameter set.
Returns
-------
xy : (..., 2) array
Predicted x- and y-coordinates.
"""
if params is None:
params = self.params
xc, yc, a, b, theta = params
ct = np.cos(t)
st = np.sin(t)
ctheta = math.cos(theta)
stheta = math.sin(theta)
x = xc + a * ctheta * ct - b * stheta * st
y = yc + a * stheta * ct + b * ctheta * st
return np.concatenate((x[..., None], y[..., None]), axis=t.ndim)
def ransac(data, model_class, min_samples, residual_threshold,
is_data_valid=None, is_model_valid=None,
max_trials=100, stop_sample_num=np.inf, stop_residuals_sum=0):
"""Fit a model to data with the RANSAC (random sample consensus) algorithm.
RANSAC is an iterative algorithm for the robust estimation of parameters
from a subset of inliers from the complete data set. Each iteration
performs the following tasks:
1. Select `min_samples` random samples from the original data and check
whether the set of data is valid (see `is_data_valid`).
2. Estimate a model to the random subset
(`model_cls.estimate(*data[random_subset]`) and check whether the
estimated model is valid (see `is_model_valid`).
3. Classify all data as inliers or outliers by calculating the residuals
to the estimated model (`model_cls.residuals(*data)`) - all data samples
with residuals smaller than the `residual_threshold` are considered as
inliers.
4. Save estimated model as best model if number of inlier samples is
maximal. In case the current estimated model has the same number of
inliers, it is only considered as the best model if it has less sum of
residuals.
These steps are performed either a maximum number of times or until one of
the special stop criteria are met. The final model is estimated using all
inlier samples of the previously determined best model.
Parameters
----------
data : [list, tuple of] (N, D) array
Data set to which the model is fitted, where N is the number of data
points and D the dimensionality of the data.
If the model class requires multiple input data arrays (e.g. source and
destination coordinates of ``skimage.transform.AffineTransform``),
they can be optionally passed as tuple or list. Note, that in this case
the functions ``estimate(*data)``, ``residuals(*data)``,
``is_model_valid(model, *random_data)`` and
``is_data_valid(*random_data)`` must all take each data array as
separate arguments.
model_class : object
Object with the following object methods:
* ``estimate(*data)``
* ``residuals(*data)``
min_samples : int
The minimum number of data points to fit a model to.
residual_threshold : float
Maximum distance for a data point to be classified as an inlier.
is_data_valid : function, optional
This function is called with the randomly selected data before the
model is fitted to it: `is_data_valid(*random_data)`.
is_model_valid : function, optional
This function is called with the estimated model and the randomly
selected data: `is_model_valid(model, *random_data)`, .
max_trials : int, optional
Maximum number of iterations for random sample selection.
stop_sample_num : int, optional
Stop iteration if at least this number of inliers are found.
stop_residuals_sum : float, optional
Stop iteration if sum of residuals is less equal than this threshold.
Returns
-------
model : object
Best model with largest consensus set.
inliers : (N, ) array
Boolean mask of inliers classified as ``True``.
References
----------
.. [1] "RANSAC", Wikipedia, http://en.wikipedia.org/wiki/RANSAC
Examples
--------
Generate ellipse data without tilt and add noise:
>>> t = np.linspace(0, 2 * np.pi, 50)
>>> a = 5
>>> b = 10
>>> xc = 20
>>> yc = 30
>>> x = xc + a * np.cos(t)
>>> y = yc + b * np.sin(t)
>>> data = np.column_stack([x, y])
>>> np.random.seed(seed=1234)
>>> data += np.random.normal(size=data.shape)
Add some faulty data:
>>> data[0] = (100, 100)
>>> data[1] = (110, 120)
>>> data[2] = (120, 130)
>>> data[3] = (140, 130)
Estimate ellipse model using all available data:
>>> model = EllipseModel()
>>> model.estimate(data)
>>> model.params # doctest: +SKIP
array([ -3.30354146e+03, -2.87791160e+03, 5.59062118e+03,
7.84365066e+00, 7.19203152e-01])
Estimate ellipse model using RANSAC:
>>> ransac_model, inliers = ransac(data, EllipseModel, 5, 3, max_trials=50)
>>> ransac_model.params
array([ 20.12762373, 29.73563063, 4.81499637, 10.4743584 , 0.05217117])
>>> inliers
array([False, False, False, False, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True], dtype=bool)
Robustly estimate geometric transformation:
>>> from skimage.transform import SimilarityTransform
>>> np.random.seed(0)
>>> src = 100 * np.random.rand(50, 2)
>>> model0 = SimilarityTransform(scale=0.5, rotation=1,
... translation=(10, 20))
>>> dst = model0(src)
>>> dst[0] = (10000, 10000)
>>> dst[1] = (-100, 100)
>>> dst[2] = (50, 50)
>>> model, inliers = ransac((src, dst), SimilarityTransform, 2, 10)
>>> inliers
array([False, False, False, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True], dtype=bool)
"""
best_model = None
best_inlier_num = 0
best_inlier_residuals_sum = np.inf
best_inliers = None
if not isinstance(data, list) and not isinstance(data, tuple):
data = [data]
# make sure data is list and not tuple, so it can be modified below
data = list(data)
# number of samples
N = data[0].shape[0]
for _ in range(max_trials):
# choose random sample set
samples = []
random_idxs = np.random.randint(0, N, min_samples)
for d in data:
samples.append(d[random_idxs])
# check if random sample set is valid
if is_data_valid is not None and not is_data_valid(*samples):
continue
# estimate model for current random sample set
sample_model = model_class()
sample_model.estimate(*samples)
# check if estimated model is valid
if is_model_valid is not None and not is_model_valid(sample_model,
*samples):
continue
sample_model_residuals = np.abs(sample_model.residuals(*data))
# consensus set / inliers
sample_model_inliers = sample_model_residuals < residual_threshold
sample_model_residuals_sum = np.sum(sample_model_residuals**2)
# choose as new best model if number of inliers is maximal
sample_inlier_num = np.sum(sample_model_inliers)
if (
# more inliers
sample_inlier_num > best_inlier_num
# same number of inliers but less "error" in terms of residuals
or (sample_inlier_num == best_inlier_num
and sample_model_residuals_sum < best_inlier_residuals_sum)
):
best_model = sample_model
best_inlier_num = sample_inlier_num
best_inlier_residuals_sum = sample_model_residuals_sum
best_inliers = sample_model_inliers
if (
best_inlier_num >= stop_sample_num
or best_inlier_residuals_sum <= stop_residuals_sum
):
break
# estimate final model using all inliers
if best_inliers is not None:
# select inliers for each data array
for i in range(len(data)):
data[i] = data[i][best_inliers]
best_model.estimate(*data)
return best_model, best_inliers
|
igrinsREPO_NAMEplpPATH_START.@plp_extracted@plp-master@igrins@procedures@skimage_measure_fit.py@.PATH_END.py
|
{
"filename": "srsgan.py",
"repo_name": "xwzhang98/SREmulator",
"repo_path": "SREmulator_extracted/SREmulator-main/map2map/map2map/models/srsgan.py",
"type": "Python"
}
|
from math import log2
import torch
import torch.nn as nn
from .narrow import narrow_by
from .resample import Resampler
class G(nn.Module):
def __init__(
self,
in_chan,
out_chan,
scale_factor=16,
chan_base=512,
chan_min=64,
chan_max=512,
cat_noise=False,
**kwargs
):
super().__init__()
self.scale_factor = scale_factor
num_blocks = round(log2(self.scale_factor))
assert chan_min <= chan_max
def chan(b):
c = chan_base >> b
c = max(c, chan_min)
c = min(c, chan_max)
return c
self.block0 = nn.Sequential(
nn.Conv3d(in_chan, chan(0), 1),
nn.LeakyReLU(0.2, True),
)
self.blocks = nn.ModuleList()
for b in range(num_blocks):
prev_chan, next_chan = chan(b), chan(b + 1)
self.blocks.append(HBlock(prev_chan, next_chan, out_chan, cat_noise))
def forward(self, x):
y = x # direct upsampling from the input
x = self.block0(x)
# y = None # no direct upsampling from the input
for block in self.blocks:
x, y = block(x, y)
return y
class HBlock(nn.Module):
"""The "H" block of the StyleGAN2 generator.
x_p y_p
| |
convolution linear upsample
| |
>--- projection ------>+
| |
v v
x_n y_n
See Fig. 7 (b) upper in https://arxiv.org/abs/1912.04958
Upsampling are all linear, not transposed convolution.
Parameters
----------
prev_chan : number of channels of x_p
next_chan : number of channels of x_n
out_chan : number of channels of y_p and y_n
cat_noise: concatenate noise if True, otherwise add noise
Notes
-----
next_size = 2 * prev_size - 6
"""
def __init__(self, prev_chan, next_chan, out_chan, cat_noise):
super().__init__()
self.upsample = Resampler(3, 2)
self.conv = nn.Sequential(
AddNoise(cat_noise, chan=prev_chan),
self.upsample,
nn.Conv3d(prev_chan + int(cat_noise), next_chan, 3),
nn.LeakyReLU(0.2, True),
AddNoise(cat_noise, chan=next_chan),
nn.Conv3d(next_chan + int(cat_noise), next_chan, 3),
nn.LeakyReLU(0.2, True),
)
self.proj = nn.Sequential(
nn.Conv3d(next_chan, out_chan, 1),
nn.LeakyReLU(0.2, True),
)
def forward(self, x, y):
x = self.conv(x) # narrow by 3
if y is None:
y = self.proj(x)
else:
y = self.upsample(y) # narrow by 1
y = narrow_by(y, 2)
y = y + self.proj(x)
return x, y
class AddNoise(nn.Module):
"""Add or concatenate noise.
Add noise if `cat=False`.
The number of channels `chan` should be 1 (StyleGAN2)
or that of the input (StyleGAN).
"""
def __init__(self, cat, chan=1):
super().__init__()
self.cat = cat
if not self.cat:
self.std = nn.Parameter(torch.zeros([chan]))
def forward(self, x):
noise = torch.randn_like(x[:, :1])
if self.cat:
x = torch.cat([x, noise], dim=1)
else:
std_shape = (-1,) + (1,) * (x.dim() - 2)
noise = self.std.view(std_shape) * noise
x = x + noise
return x
class D(nn.Module):
def __init__(
self,
in_chan,
out_chan,
scale_factor=16,
chan_base=512,
chan_min=64,
chan_max=512,
**kwargs
):
super().__init__()
self.scale_factor = scale_factor
num_blocks = round(log2(self.scale_factor))
assert chan_min <= chan_max
def chan(b):
if b >= 0:
c = chan_base >> b
else:
c = chan_base << -b
c = max(c, chan_min)
c = min(c, chan_max)
return c
self.block0 = nn.Sequential(
nn.Conv3d(in_chan, chan(num_blocks), 1),
nn.LeakyReLU(0.2, True),
)
self.blocks = nn.ModuleList()
for b in reversed(range(num_blocks)):
prev_chan, next_chan = chan(b + 1), chan(b)
self.blocks.append(ResBlock(prev_chan, next_chan))
self.block9 = nn.Sequential(
nn.Conv3d(chan(0), chan(-1), 1),
nn.LeakyReLU(0.2, True),
nn.Conv3d(chan(-1), 1, 1),
)
def forward(self, x):
x = self.block0(x)
for block in self.blocks:
x = block(x)
x = self.block9(x)
return x
class ResBlock(nn.Module):
"""The residual block of the StyleGAN2 discriminator.
Downsampling are all linear, not strided convolution.
Notes
-----
next_size = (prev_size - 4) // 2
"""
def __init__(self, prev_chan, next_chan):
super().__init__()
self.conv = nn.Sequential(
nn.Conv3d(prev_chan, prev_chan, 3),
nn.LeakyReLU(0.2, True),
nn.Conv3d(prev_chan, next_chan, 3),
nn.LeakyReLU(0.2, True),
)
self.skip = nn.Conv3d(prev_chan, next_chan, 1)
self.downsample = Resampler(3, 0.5)
def forward(self, x):
y = self.conv(x)
x = self.skip(x)
x = narrow_by(x, 2)
x = x + y
x = self.downsample(x)
return x
|
xwzhang98REPO_NAMESREmulatorPATH_START.@SREmulator_extracted@SREmulator-main@map2map@map2map@models@srsgan.py@.PATH_END.py
|
{
"filename": "detectors.py",
"repo_name": "GalSim-developers/GalSim",
"repo_path": "GalSim_extracted/GalSim-main/galsim/detectors.py",
"type": "Python"
}
|
# Copyright (c) 2012-2023 by the GalSim developers team on GitHub
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
# https://github.com/GalSim-developers/GalSim
#
# GalSim is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
import numpy as np
import sys
import warnings
from .image import Image
from .errors import GalSimRangeError, GalSimValueError, GalSimIncompatibleValuesError, galsim_warn
def applyNonlinearity(self, NLfunc, *args):
"""
Applies the given non-linearity function (``NLfunc``) on the `Image` instance directly.
This routine can transform the image in a non-linear manner specified by the user. However,
the typical kind of non-linearity one sees in astronomical images is voltage non-linearity,
also sometimes known as 'classical non-linearity', refers to the non-linearity in
charge-to-voltage conversion process. This arises as charge gets integrated at the junction
capacitance of the pixel node. Voltage non-linearity decreases signals at higher signal
levels, causing the attenuation of brighter pixels. The image should include both the
signal from the astronomical objects as well as the background level. Other detectors effects
such as dark current and persistence (not currently included in GalSim) would also occur
before the inclusion of nonlinearity.
The argument ``NLfunc`` is a callable function (for example a lambda function, a
`galsim.LookupTable`, or a user-defined function), possibly with arguments that need to be given
as subsequent arguments to the ``applyNonlinearity`` function (after the ``NLfunc`` argument).
``NLfunc`` should be able to take a 2d NumPy array as input, and return a NumPy array of the
same shape. It should be defined such that it outputs the final image with nonlinearity
included (i.e., in the limit that there is no nonlinearity, the function should return the
original image, NOT zero). The image should be in units of electrons when this routine is being
used to generate classical non-linearity. When used for other purposes, the units can be in
electrons or in ADU, as found appropriate by the user.
Examples::
>>> f = lambda x: x + (1.e-7)*(x**2)
>>> img.applyNonlinearity(f)
>>> f = lambda x, beta1, beta2: x - beta1*x*x + beta2*x*x*x
>>> img.applyNonlinearity(f, 1.e-7, 1.e-10)
On calling the method, the `Image` instance ``img`` is transformed by the user-defined function
``f`` with ``beta1`` = 1.e-7 and ``beta2`` = 1.e-10.
Parameters:
NLfunc: The function that maps the input image pixel values to the output image pixel
values.
*args: Any subsequent arguments are passed along to the NLfunc function.
"""
# Extract out the array from Image since not all functions can act directly on Images
result = NLfunc(self.array,*args)
if not isinstance(result, np.ndarray):
raise GalSimValueError("NLfunc does not return a NumPy array.", NLfunc)
if self.array.shape != result.shape:
raise GalSimValueError("NLfunc does not return a NumPy array of the same shape as input.",
NLfunc)
self.array[:,:] = result
def addReciprocityFailure(self, exp_time, alpha, base_flux):
r"""
Accounts for the reciprocity failure and includes it in the original `Image` directly.
Reciprocity, in the context of photography, is the inverse relationship between the incident
flux (I) of a source object and the exposure time (t) required to produce a given response (p)
in the detector, i.e., p = I*t. At very low (also at high) levels of incident flux, deviation
from this relation is observed, leading to reduced sensitivity at low flux levels. The pixel
response to a high flux is larger than its response to a low flux. This flux-dependent non-
linearity is known as 'Reciprocity Failure' and is known to happen in photographic films since
1893. Interested users can refer to http://en.wikipedia.org/wiki/Reciprocity_(photography)
CCDs are not known to suffer from this effect. HgCdTe detectors that are used for near infrared
astrometry, although to an extent much lesser than the photographic films, are found to
exhibit reciprocity failure at low flux levels. The exact mechanism of this effect is unknown
and hence we lack a good theoretical model. Many models that fit the empirical data exist and
a common relation is
.. math::
\frac{p_R}{p} = \left(1 + \alpha \log_{10}\left(\frac{p}{t}\right)
- \alpha \log_{10}\left(\frac{p^\prime}{t^\prime}\right)\right)
where :math:`t` is the exposure time (in units of seconds), :math:`p` is the pixel response
(in units of electrons) and :math:`p_R` is the response if the reciprocity relation fails to
hold. :math:`p^\prime/t^\prime` is the count rate (in electrons/second) corresponding to the
photon flux (base flux) at which the detector is calibrated to have its nominal gain.
\alpha is the parameter in the model, measured in units of per decade and varies with detectors
and the operating temperature. The functional form for the reciprocity failure is motivated
empirically from the tests carried out on H2RG detectors.
See for reference Fig. 1 and Fig. 2 of http://arxiv.org/abs/1106.1090. Since :math:`p_R/p`
remains close to unity over a wide range of flux, we convert this relation to a power law by
approximating :math:`p_R/p \approx 1 + \log(p_R/p)`. This gives a relation that is better
behaved than the logarithmic relation at low flux levels.
.. math::
\frac{p_R}{p} = \left(\frac{p/t}{p^\prime/t^\prime}\right)^\frac{\alpha}{\log(10)}.
Because of how this function is defined, the input image must have non-negative pixel
values for the resulting image to be well-defined. Negative pixel values result in 'nan's.
The image should be in units of electrons, or if it is in ADU, then the value passed to
exp_time should be the exposure time divided by the nominal gain. The image should include
both the signal from the astronomical objects as well as the background level. The addition of
nonlinearity should occur after including the effect of reciprocity failure.
Parameters:
exp_time: The exposure time (t) in seconds, which goes into the expression for
reciprocity failure given in the docstring.
alpha: The alpha parameter in the expression for reciprocity failure, in
units of 'per decade'.
base_flux: The flux (:math:`p^\prime/t^\prime`) at which the gain is calibrated to have
its nominal value.
"""
if alpha < 0.:
raise GalSimRangeError("Invalid value of alpha, must be >= 0",
alpha, 0, None)
if exp_time < 0.:
raise GalSimRangeError("Invalid value of exp_time, must be >= 0",
exp_time, 0, None)
if base_flux < 0.:
raise GalSimRangeError("Invalid value of base_flux, must be >= 0",
base_flux, 0, None)
if np.any(self.array<0):
galsim_warn("One or more pixel values are negative and will be set as 'nan'.")
p0 = exp_time*base_flux
a = alpha/np.log(10)
with warnings.catch_warnings():
# If self.array has negative values, then this might raise a RuntimeWarning.
# We already gave a more useful warning above, so ignore it here.
warnings.filterwarnings("ignore", category=RuntimeWarning)
self.applyNonlinearity(lambda x,x0,a: (x**(a+1))/(x0**a), p0, a)
def applyIPC(self, IPC_kernel, edge_treatment='extend', fill_value=None, kernel_nonnegativity=True,
kernel_normalization=True):
"""
Applies the effect of interpixel capacitance to the `Image` instance.
In NIR detectors, the quantity that is sensed is not the charge as in CCDs, but a voltage that
relates to the charge present within each pixel. The voltage read at a given pixel location is
influenced by the charges present in the neighboring pixel locations due to capacitive
coupling of sense nodes.
This interpixel capacitance is approximated as a linear effect that can be described by a 3x3
kernel that is convolved with the image. The kernel must be an `Image` instance and could be
intrinsically anisotropic. A sensible kernel must have non-negative entries and must be
normalized such that the sum of the elements is 1, in order to conserve the total charge.
The (1,1) element of the kernel is the contribution to the voltage read at a pixel from the
electrons in the pixel to its bottom-left, the (1,2) element of the kernel is the contribution
from the charges to its left and so on.
The argument 'edge_treatment' specifies how the edges of the image should be treated, which
could be in one of the three ways:
1. 'extend': The kernel is convolved with the zero-padded image, leading to a larger
intermediate image. The central portion of this image is returned. [default]
2. 'crop': The kernel is convolved with the image, with the kernel inside the image completely.
Pixels at the edges, where the center of the kernel could not be placed, are set to the
value specified by 'fill_value'. If 'fill_value' is not specified or set to 'None', then
the pixel values in the original image are retained. The user can make the edges invalid
by setting fill_value to numpy.nan.
3. 'wrap': The kernel is convolved with the image, assuming periodic boundary conditions.
The size of the image array remains unchanged in all three cases.
Parameters:
IPC_kernel: A 3x3 `Image` instance that is convolved with the `Image` instance
edge_treatment: Specifies the method of handling edges and should be one of
'crop', 'extend' or 'wrap'. See above for details.
[default: 'extend']
fill_value: Specifies the value (including nan) to fill the edges with when
edge_treatment is 'crop'. If unspecified or set to 'None', the
original pixel values are retained at the edges. If
edge_treatment is not 'crop', then this is ignored.
kernel_nonnegativity: Specify whether the kernel should have only non-negative
entries. [default: True]
kernel_normalization: Specify whether to check and enforce correct normalization for
the kernel. [default: True]
"""
# IPC kernel has to be a 3x3 Image
ipc_kernel = IPC_kernel.array
if not ipc_kernel.shape==(3,3):
raise GalSimValueError("IPC kernel must be an Image instance of size 3x3.", IPC_kernel)
# Check for non-negativity of the kernel
if kernel_nonnegativity and (ipc_kernel<0).any():
raise GalSimValueError("IPC kernel must not contain negative entries", IPC_kernel)
# Check and enforce correct normalization for the kernel
if kernel_normalization and abs(ipc_kernel.sum()-1) > 10.*np.finfo(ipc_kernel.dtype.type).eps:
galsim_warn("The entries in the IPC kernel did not sum to 1. Scaling the kernel to "
"ensure correct normalization.")
IPC_kernel = IPC_kernel/ipc_kernel.sum()
# edge_treatment can be 'extend', 'wrap' or 'crop'
if edge_treatment=='crop':
# Simply re-label the array of the Image instance
pad_array = self.array
elif edge_treatment=='extend':
# Copy the array of the Image instance and pad with zeros
pad_array = np.zeros((self.array.shape[0]+2,self.array.shape[1]+2))
pad_array[1:-1,1:-1] = self.array
elif edge_treatment=='wrap':
# Copy the array of the Image instance and pad with zeros initially
pad_array = np.zeros((self.array.shape[0]+2,self.array.shape[1]+2))
pad_array[1:-1,1:-1] = self.array
# and wrap around the edges
pad_array[0,:] = pad_array[-2,:]
pad_array[-1,:] = pad_array[1,:]
pad_array[:,0] = pad_array[:,-2]
pad_array[:,-1] = pad_array[:,1]
else:
raise GalSimValueError("Invalid edge_treatment.", edge_treatment,
('extend', 'wrap', 'crop'))
# Generating different segments of the padded array
center = pad_array[1:-1,1:-1]
top = pad_array[2:,1:-1]
bottom = pad_array[:-2,1:-1]
left = pad_array[1:-1,:-2]
right = pad_array[1:-1,2:]
topleft = pad_array[2:,:-2]
bottomright = pad_array[:-2,2:]
topright = pad_array[2:,2:]
bottomleft = pad_array[:-2,:-2]
# Ensure that the origin is (1,1)
kernel = IPC_kernel.view()
kernel.setOrigin(1,1)
# Generating the output array, with 2 rows and 2 columns lesser than the padded array
# Image values have been used to make the code look more intuitive
out_array = kernel(1,3)*topleft + kernel(2,3)*top + kernel(3,3)*topright + \
kernel(1,2)*left + kernel(2,2)*center + kernel(3,2)*right + \
kernel(1,1)*bottomleft + kernel(2,1)*bottom + kernel(3,1)*bottomright
if edge_treatment=='crop':
self.array[1:-1,1:-1] = out_array
#Explicit edge effects handling with filling the edges with the value given in fill_value
if fill_value is not None:
self.array[0,:] = fill_value
self.array[-1,:] = fill_value
self.array[:,0] = fill_value
self.array[:,-1] = fill_value
else:
self.array[:,:] = out_array
def applyPersistence(self,imgs,coeffs):
"""
Applies the effects of persistence to the `Image` instance.
Persistence refers to the retention of a small fraction of the signal after resetting the
imager pixel elements. The persistence signal of a previous exposure is left in the pixel even
after several detector resets. This effect is most likely due to charge traps in the material.
Laboratory tests on the Roman Space Telescope CMOS detectors show that if exposures and
readouts are taken in a fixed cadence, the persistence signal can be given as a linear
combination of prior pixel values that can be added to the current image.
This routine takes in a list of `Image` instances and adds them to `Image` weighted by the
values passed on to 'coeffs'. The pixel values of the `Image` instances in the list must
correspond to the electron counts before the readout. This routine does NOT keep track of
realistic dither patterns. During the image simulation process, the user has to queue a list of
previous `Image` instances (imgs) outside the routine by inserting the latest image in the
beginning of the list and deleting the oldest image. The values in 'coeffs' tell how much of
each `Image` is to be added. This usually remains constant in the image generation process.
Parameters:
imgs: A list of previous `Image` instances that still persist.
coeffs: A list of floats that specifies the retention factors for the corresponding
`Image` instances listed in 'imgs'.
"""
if not len(imgs)==len(coeffs):
raise GalSimIncompatibleValuesError("The length of 'imgs' and 'coeffs' must be the same",
imgs=imgs, coeffs=coeffs)
for img,coeff in zip(imgs,coeffs):
self += coeff*img
def quantize(self):
"""
Rounds the pixel values in an image to integer values, while preserving the type of the data.
At certain stages in the astronomical image generation process, detectors effectively round to
the nearest integer. The exact stage at which this happens depends on the type of device (CCD
vs. NIR detector). For example, for H2RG detectors, quantization happens in two stages: first,
when detecting a certain number of photons, corresponding to the sum of background and signal
multiplied by the QE and including reciprocity failure. After this, a number of other processes
occur (e.g., nonlinearity, IPC, read noise) that could result in non-integer pixel values, only
rounding to an integer at the stage of analog-to-digital conversion.
Because we cannot guarantee that quantization will always be the last step in the process, the
quantize() routine does not actually modify the type of the image to 'int'. However, users can
easily do so by doing::
>>> image.quantize()
>>> int_image = galsim.Image(image, dtype=int)
"""
self.applyNonlinearity(np.round)
Image.applyNonlinearity = applyNonlinearity
Image.addReciprocityFailure = addReciprocityFailure
Image.applyIPC = applyIPC
Image.applyPersistence = applyPersistence
Image.quantize = quantize
|
GalSim-developersREPO_NAMEGalSimPATH_START.@GalSim_extracted@GalSim-main@galsim@detectors.py@.PATH_END.py
|
{
"filename": "test_extended_source.py",
"repo_name": "threeML/astromodels",
"repo_path": "astromodels_extracted/astromodels-master/astromodels/tests/test_extended_source.py",
"type": "Python"
}
|
from __future__ import print_function
# this prevent a crash in macos. If does not import threeML first the code crashes
# with a segmantiation violation (Need to investigate more)s
try:
from threeML import *
except:
pass
import astropy.io.fits as fits
import astropy.units as u
from astropy import wcs
import numpy as np
import pytest
from astromodels.core.model import Model
from astromodels.core.model_parser import clone_model
from astromodels.core.spectral_component import SpectralComponent
from astromodels.functions import *
from astromodels.functions import Log_parabola, Powerlaw
from astromodels.functions.function import _known_functions
from astromodels.sources.extended_source import ExtendedSource
__author__ = "henrikef"
def make_test_template(ra, dec, fitsfile):
# Test template function: 40 pixel (0.8 deg) wide square centered approximately around a given ra, dec.
test_wcs = False
if test_wcs:
# this is an alternative way to build the header from WCS:
w = wcs.WCS(naxis=2)
w.wcs.crpix = [100, 100]
w.wcs.cdelt = np.array([-0.02, 0.02])
w.wcs.crval = [ra, dec]
w.wcs.ctype = ["RA---TAN", "DEC--TAN"]
dOmega = (
(abs(w.wcs.cdelt[0] * w.wcs.cdelt[1]) * u.degree * u.degree)
.to(u.steradian)
.value
)
header = w.to_header()
else:
cards = {
"SIMPLE": "T",
"BITPIX": -32,
"NAXIS": 2,
"NAXIS1": 200,
"NAXIS2": 200,
"DATE": "2018-11-13",
"CUNIT1": "deg",
"CRVAL1": ra,
"CRPIX1": 100,
"CDELT1": -0.02,
"CUNIT2": "deg",
"CRVAL2": dec,
"CRPIX2": 100,
"CDELT2": 0.02,
"CTYPE1": "RA---TAN",
"CTYPE2": "DEC--TAN",
}
dOmega = (
(abs(cards["CDELT1"] * cards["CDELT2"]) * u.degree * u.degree)
.to(u.steradian)
.value
)
header = fits.Header(cards)
data = np.zeros([200, 200])
data[80:120, 80:120] = 1
total = np.sum(data)
data = data / total / dOmega
hdu = fits.PrimaryHDU(data=data, header=header)
hdu.writeto(fitsfile, overwrite=True)
def test_constructor():
# RA, Dec and L,B of the same point in the sky
ra, dec = (125.6, -75.3)
l, b = (288.44190139183564, -20.717313145391525)
# This should throw an error as we are using Powerlaw instead of Powerlaw()
with pytest.raises(RuntimeError):
_ = ExtendedSource("my_source", Gaussian_on_sphere, Powerlaw)
# This should throw an error because we should use a 2D function for the spatial shape
with pytest.raises(RuntimeError):
_ = ExtendedSource("my_source", Powerlaw(), Powerlaw())
# Init with RA, Dec
shape = Gaussian_on_sphere()
source1 = ExtendedSource("my_source", shape, Powerlaw())
shape.lon0 = ra * u.degree
shape.lat0 = dec * u.degree
assert source1.spatial_shape.lon0.value == ra
assert source1.spatial_shape.lat0.value == dec
# Verify that the position is free by default
assert source1.spatial_shape.lon0.free
assert source1.spatial_shape.lon0.free
def test_call():
# Multi-component
po1 = Powerlaw()
po2 = Powerlaw()
c1 = SpectralComponent("component1", po1)
c2 = SpectralComponent("component2", po2)
ra, dec = (125.6, -75.3)
def test_one(class_type, name):
print("testing %s ..." % name)
if name != "SpatialTemplate_2D":
shape = class_type()
source = ExtendedSource("test_source_%s" % name, shape, components=[c1, c2])
shape.lon0 = ra * u.degree
shape.lat0 = dec * u.degree
else:
make_test_template(ra, dec, "__test.fits")
shape = class_type(fits_file="__test.fits")
source = ExtendedSource("test_source_%s" % name, shape, components=[c1, c2])
shape.K = 1.0
assert np.all(source.spectrum.component1([1, 2, 3]) == po1([1, 2, 3]))
assert np.all(source.spectrum.component2([1, 2, 3]) == po2([1, 2, 3]))
one = source.spectrum.component1([1, 2, 3])
two = source.spectrum.component2([1, 2, 3])
# check spectral components
assert np.all(
np.abs(one + two - source.get_spatially_integrated_flux([1, 2, 3])) == 0
)
# check spectral and spatial components
total = source([ra, ra, ra], [dec, dec, dec], [1, 2, 3])
spectrum = one + two
spatial = source.spatial_shape([ra, ra, ra], [dec, dec, dec])
assert np.all(np.abs(total - spectrum * spatial) == 0)
total = source([ra * 1.01] * 3, [dec * 1.01] * 3, [1, 2, 3])
spectrum = one + two
spatial = source.spatial_shape([ra * 1.01] * 3, [dec * 1.01] * 3)
assert np.all(np.abs(total - spectrum * spatial) == 0)
for key in _known_functions:
if key in ["Latitude_galactic_diffuse"]:
# not testing latitude galactic diffuse for now.
continue
this_function = _known_functions[key]
if key in ["SpatialTemplate_2D"]:
test_one(this_function, key)
elif this_function._n_dim == 2 and not this_function().is_prior:
test_one(this_function, key)
with pytest.raises(AssertionError):
# this will fail because the Latitude_galactic_diffuse function isn't normalized.
test_one(
_known_functions["Latitude_galactic_diffuse"],
"Latitude_galactic_diffuse",
)
def test_call_with_units():
# Multi-component
po1 = Powerlaw()
po2 = Powerlaw()
c1 = SpectralComponent("component1", po1)
c2 = SpectralComponent("component2", po2)
ra, dec = (125.6, -75.3)
def test_one(class_type, name):
print("testing %s ..." % name)
if name != "SpatialTemplate_2D":
shape = class_type()
source = ExtendedSource(
"test_source_%s" % name,
spatial_shape=shape,
components=[c1, c2],
)
shape.lon0 = ra * u.degree
shape.lat0 = dec * u.degree
else:
make_test_template(ra, dec, "__test.fits")
shape = class_type(fits_file="__test.fits")
source = ExtendedSource(
"test_source_%s" % name,
spatial_shape=shape,
components=[c1, c2],
)
shape.K = 1.0
assert np.all(
source.spectrum.component1([1, 2, 3] * u.keV) == po1([1, 2, 3] * u.keV)
)
assert np.all(
source.spectrum.component2([1, 2, 3] * u.keV) == po2([1, 2, 3] * u.keV)
)
one = source.spectrum.component1([1, 2, 3] * u.keV)
two = source.spectrum.component2([1, 2, 3] * u.keV)
# check spectral components
assert np.all(
np.abs(one + two - source.get_spatially_integrated_flux([1, 2, 3] * u.keV))
== 0
)
# check spectral and spatial components
# spatial = source.spatial_shape( ra*u.deg,dec*u.deg )
spatial = source.spatial_shape([ra, ra, ra] * u.deg, [dec, dec, dec] * u.deg)
total = source([ra, ra, ra] * u.deg, [dec, dec, dec] * u.deg, [1, 2, 3] * u.keV)
spectrum = one + two
assert np.all(np.abs(total - spectrum * spatial) == 0)
total = source(
[ra * 1.01] * 3 * u.deg, [dec * 1.01] * 3 * u.deg, [1, 2, 3] * u.keV
)
spectrum = one + two
spatial = source.spatial_shape(
[ra * 1.01] * 3 * u.deg, [dec * 1.01] * 3 * u.deg
)
assert np.all(np.abs(total - spectrum * spatial) == 0)
model = Model(source)
new_model = clone_model(model)
new_total = new_model["test_source_%s" % name](
[ra * 1.01] * 3 * u.deg, [dec * 1.01] * 3 * u.deg, [1, 2, 3] * u.keV
)
assert np.all(np.abs(total - new_total) == 0)
for key in _known_functions:
if key in ["Latitude_galactic_diffuse"]:
# not testing latitude galactic diffuse for now.
continue
this_function = _known_functions[key]
if key in ["SpatialTemplate_2D"]:
test_one(this_function, key)
elif this_function._n_dim == 2 and not this_function().is_prior:
test_one(this_function, key)
with pytest.raises(AssertionError):
# this will fail because the Latitude_galactic_diffuse function isn't normalized.
test_one(
_known_functions["Latitude_galactic_diffuse"],
"Latitude_galactic_diffuse",
)
def test_free_param():
spectrum = Log_parabola()
source = ExtendedSource(
"test_source",
spatial_shape=Gaussian_on_sphere(),
spectral_shape=spectrum,
)
parameters = [
spectrum.alpha,
spectrum.beta,
spectrum.piv,
spectrum.K,
source.spatial_shape.lat0,
source.spatial_shape.lon0,
source.spatial_shape.sigma,
]
for param in parameters:
param.free = False
assert len(source.free_parameters) == 0
for i, param in enumerate(parameters):
param.free = True
assert len(source.free_parameters) == i + 1
|
threeMLREPO_NAMEastromodelsPATH_START.@astromodels_extracted@astromodels-master@astromodels@tests@test_extended_source.py@.PATH_END.py
|
{
"filename": "fft_window.py",
"repo_name": "cosmodesi/pypower",
"repo_path": "pypower_extracted/pypower-main/pypower/fft_window.py",
"type": "Python"
}
|
"""
Implementation of window function estimation, following https://github.com/cosmodesi/GC_derivations,
and https://fr.overleaf.com/read/hpgbwqzmtcxn.
"""
import time
import numpy as np
from scipy import special
from . import mpi, utils
from .fftlog import PowerToCorrelation
from .utils import _make_array
from .fft_power import MeshFFTPower, get_real_Ylm, _transform_rslab, _get_real_dtype, _format_positions, _format_all_weights, project_to_basis, PowerSpectrumMultipoles, PowerSpectrumWedges, normalization, unnormalized_shotnoise
from .wide_angle import BaseMatrix, Projection, PowerSpectrumOddWideAngleMatrix
from .mesh import CatalogMesh, _get_mesh_attrs, _wrap_positions
def Si(x):
return special.sici(x)[0]
# Derivative of correlation function w.r.t. k-bins, precomputed with sympy; full, low-s or low-a limit
_registered_correlation_function_tophat_derivatives = {}
_registered_correlation_function_tophat_derivatives[0] = (lambda s, a: (-a * np.cos(a * s) / s + np.sin(a * s) / s**2) / (2 * np.pi**2 * s),
lambda s, a: -a**9 * s**6 / (90720 * np.pi**2) + a**7 * s**4 / (1680 * np.pi**2) - a**5 * s**2 / (60 * np.pi**2) + a**3 / (6 * np.pi**2))
_registered_correlation_function_tophat_derivatives[1] = (lambda s, a: ((-a * np.sin(a * s) - 2 * np.cos(a * s) / s) / s**2 + 2 / s**3) / (2 * np.pi**2),
lambda s, a: -a**10 * s**7 / (907200 * np.pi**2) + a**8 * s**5 / (13440 * np.pi**2) - a**6 * s**3 / (360 * np.pi**2) + a**4 * s / (24 * np.pi**2))
_registered_correlation_function_tophat_derivatives[2] = (lambda s, a: -(a * s * np.cos(a * s) - 4 * np.sin(a * s) + 3 * Si(a * s)) / (2 * np.pi**2 * s**3),
lambda s, a: -a**9 * s**6 / (136080 * np.pi**2) + a**7 * s**4 / (2940 * np.pi**2) - a**5 * s**2 / (150 * np.pi**2))
_registered_correlation_function_tophat_derivatives[3] = (lambda s, a: -(8 / s**3 + (a * s**2 * np.sin(a * s) + 7 * s * np.cos(a * s) - 15 * np.sin(a * s) / a) / s**4) / (2 * np.pi**2),
lambda s, a: -a**10 * s**7 / (1663200 * np.pi**2) + a**8 * s**5 / (30240 * np.pi**2) - a**6 * s**3 / (1260 * np.pi**2))
_registered_correlation_function_tophat_derivatives[4] = (lambda s, a: (-a * s**3 * np.cos(a * s) + 11 * s**2 * np.sin(a * s) + 15 * s**2 * Si(a * s) / 2 + 105 * s * np.cos(a * s) / (2 * a) - 105 * np.sin(a * s) / (2 * a**2)) / (2 * np.pi**2 * s**5),
lambda s, a: -a**9 * s**6 / (374220 * np.pi**2) + a**7 * s**4 / (13230 * np.pi**2))
_registered_correlation_function_tophat_derivatives[5] = (lambda s, a: (16 / s**3 + (-a * s**4 * np.sin(a * s) - 16 * s**3 * np.cos(a * s) + 105 * s**2 * np.sin(a * s) / a + 315 * s * np.cos(a * s) / a**2 - 315 * np.sin(a * s) / a**3) / s**6) / (2 * np.pi**2),
lambda s, a: -a**10 * s**7 / (5405400 * np.pi**2) + a**8 * s**5 / (166320 * np.pi**2))
def _get_attr_in_inst(obj, name, insts=(None,)):
# Search for ``name`` in instances of name ``insts`` of obj
for inst in insts:
if inst is None:
if hasattr(obj, name):
return getattr(obj, name)
else:
if hasattr(obj, inst) and hasattr(getattr(obj, inst), name):
return getattr(getattr(obj, inst), name)
def get_correlation_function_tophat_derivative(kedges, ell=0, k=None, **kwargs):
r"""
Return a list of callable corresponding to the derivative of the correlation function
w.r.t. :math:`k`-bins.
Parameters
----------
kedges : array
:math:`k`-edges of the :math:`k`-bins.
ell : int, default=0
Multipole order.
k : array, default=None
If ``None``, calculation will be analytic, which will work if ``ell`` in [0, 2, 4], or sympy package is installed
(such analytic integration with sympy may take several seconds).
If not ``None``, this is the :math:`k` log-spaced array for numerical FFTlog integration.
kwargs : dict
If ``k`` is not ``None``, other arguments for :class:`fftlog.PowerToCorrelation`.
Returns
-------
toret : list
List of callables, taking configuration-space separation ``s`` as input.
"""
if k is None:
if ell in _registered_correlation_function_tophat_derivatives:
fun, fun_lows = _registered_correlation_function_tophat_derivatives[ell]
else:
try:
import sympy as sp
except ImportError as exc:
raise ImportError('Install sympy to for analytic computation') from exc
k, s, a = sp.symbols('k s a', real=True, positive=True)
integrand = sp.simplify(k**2 * sp.expand_func(sp.jn(ell, k * s)))
# i^ell; we take in the imaginary part of the odd power spectrum multipoles
expr = (-1)**(ell // 2) / (2 * sp.pi**2) * sp.integrate(integrand, (k, 0, a))
expr_lows = sp.series(expr, x=s, x0=0, n=8).removeO()
modules = ['numpy', {'Si': Si}]
fun = sp.lambdify((s, a), expr, modules=modules)
fun_lows = sp.lambdify((s, a), expr_lows, modules=modules)
def _make_fun(kmin, kmax):
funa = fun_lows if np.abs(kmin) < 1e-4 else fun
funb = fun_lows if np.abs(kmax) < 1e-4 else fun
def _fun(s):
toret = np.empty_like(s)
mask = s < 1e-1
toret[mask] = fun_lows(s[mask], kmax) - fun_lows(s[mask], kmin)
toret[~mask] = funb(s[~mask], kmax) - funa(s[~mask], kmin)
return toret
return _fun
toret = []
for kmin, kmax in zip(kedges[:-1], kedges[1:]):
toret.append(_make_fun(kmin, kmax))
return toret
fftlog = PowerToCorrelation(k, ell=ell, complex=False, **kwargs)
def _make_fun(sep, fun):
return lambda s: np.interp(s, sep, fun)
toret = []
for kmin, kmax in zip(kedges[:-1], kedges[1:]):
tophat = np.zeros_like(k)
tophat[(k >= kmin) & (k <= kmax)] = 1.
sep, fun = fftlog(tophat)
toret.append(_make_fun(sep, fun))
return toret
class PowerSpectrumFFTWindowMatrix(BaseMatrix):
"""Window matrix, relating "theory" input to "observed" output."""
def __init__(self, matrix, xin, xout, projsin, projsout, nmodes, wnorm=1., wnorm_ref=None, attrs=None, mpicomm=None):
"""
Initialize :class:`PowerSpectrumFFTWindowMatrix`.
Parameters
----------
matrix : array
2D array representing window matrix.
xin : array, list
List of input "theory" coordinates.
If single array, assumed to be the same for all input projections ``projsin``.
xout : list
List of output "theory" coordinates.
If single array, assumed to be the same for all output projections ``projsout``.
projsin : list
List of input "theory" projections.
projsout : list
List of output "observed" projections.
nmodes : array
Number of modes in each bin.
wnorm : float, default=1.
Window function normalization.
attrs : dict, default=None
Dictionary of other attributes.
mpicomm : MPI communicator, default=None
The MPI communicator, only used when saving (:meth:`save`) matrix.
"""
weight = wnorm_ref if wnorm_ref is not None else wnorm
super(PowerSpectrumFFTWindowMatrix, self).__init__(matrix, xin, xout, projsin, projsout, weightsout=nmodes, weight=weight, attrs=attrs)
self.cvalue = self.value # let us just keep the original value somewhere
value = []
nout = 0
for iout, xout in enumerate(self.xout):
slout = slice(nout, nout + len(xout))
tmp = self.cvalue[:, slout]
tmp = tmp.real if self.projsout[iout].ell % 2 == 0 else tmp.imag
value.append(tmp)
nout = slout.stop
self.value = np.concatenate(value, axis=-1)
self.wnorm = float(wnorm)
self.wnorm_ref = float(weight)
self.mpicomm = mpicomm
@property
def nmodes(self):
return self.weightsout
@nmodes.setter
def nmodes(self, nmodes):
self.weightsout = nmodes
@classmethod
def from_power(cls, power, xin, projin=(0, 0), **kwargs):
"""
Create window function from input :class:`PowerSpectrumMultipoles`.
Parameters
----------
power : PowerSpectrumMultipoles
Power spectrum measurement to convert into :class:`PowerSpectrumFFTWindowMatrix`.
xin : float
Input "theory" bin.
projin : tuple, Projection, default=(0, 0)
Input "theory" projection, i.e. (multipole, wide-angle order) tuple.
Returns
-------
matrix : PowerSpectrumFFTWindowMatrix
"""
xin = [np.asarray([xin])]
projsin = [projin]
ells = getattr(power, 'ells', [0]) # in case of PowerSpectrumWedges, only 0
projsout = [Projection(ell=ell, wa_order=None) for ell in ells]
xout = [np.array([modes.ravel() for modes in power.modes]).T] * len(projsout) # modes are k for PowerSpectrumMultipoles, (k, mu) for PowerSpectrumWedges
for xx in xout: # np.squeeze(xx, axis=-1) raises an error when last axis not of size 1
if xx.shape[-1] == 1: xx.shape = -1
weights = [power.nmodes.ravel()] * len(projsout)
matrix = np.atleast_2d(power.power.ravel())
attrs = power.attrs.copy()
attrs['edges'] = power.edges
return cls(matrix, xin, xout, projsin, projsout, weights, wnorm=power.wnorm, attrs=attrs, **kwargs)
def __getstate__(self):
"""Return this class state dictionary."""
state = super(PowerSpectrumFFTWindowMatrix, self).__getstate__()
for name in ['cvalue', 'wnorm', 'wnorm_ref']:
state[name] = getattr(self, name)
return state
def __setstate__(self, state):
"""Set this class state."""
name = 'poles'
if name in state: # actually a MeshFFTWindow object, which has wedges or poles
state = state[name]
super(PowerSpectrumFFTWindowMatrix, self).__setstate__(state)
def resum_input_odd_wide_angle(self, **kwargs):
"""
Resum odd wide-angle orders.
Input ``kwargs`` will be passed to :attr:`PowerSpectrumOddWideAngleMatrix`.
"""
projsin = [proj for proj in self.projsin if proj.wa_order == 0]
if projsin == self.projsin: return
# The theory wide-angle expansion uses first point line-of-sight
matrix = PowerSpectrumOddWideAngleMatrix(self.xin[0], projsin=projsin, projsout=self.projsin, los='firstpoint', **kwargs)
self.__dict__.update(self.join(matrix, self).__dict__)
return self
class MeshFFTWindow(MeshFFTPower):
"""
Class that computes window function from input mesh(es), using global or local line-of-sight, see:
- https://github.com/cosmodesi/GC_derivations
- https://fr.overleaf.com/read/hpgbwqzmtcxn
Attributes
----------
poles : PowerSpectrumFFTWindowMatrix
Window matrix.
"""
def __init__(self, mesh1=None, mesh2=None, edgesin=None, projsin=None, power_ref=None, edges=None, ells=None, los=None, periodic=False, boxcenter=None,
compensations=None, wnorm=None, shotnoise=None, shotnoise_nonorm=None, edgesin_type='smooth', mode_oversampling=None, **kwargs):
r"""
Initialize :class:`MeshFFTWindow`.
Parameters
----------
mesh1 : CatalogMesh, RealField, default=None
First mesh.
mesh2 : CatalogMesh, RealField, default=None
In case of cross-correlation, second mesh, with same size and physical extent (``boxsize`` and ``boxcenter``) that ``mesh1``.
edgesin : dict, array, list
An array of :math:`k`-edges which defines the theory :math:`k`-binning; corresponding derivatives will be computed
(see ``edgesin_type``); or a dictionary of such array for each theory projection.
Else a list of derivatives (callable) of theory correlation function w.r.t. each theory basis vector, e.g. each in :math:`k`-bin;
or a dictionary of such list for each theory projection.
If ``periodic`` is ``True``, this should correspond to the derivatives of theory *power spectrum* (instead of correlation function)
w.r.t. each theory basis vector, e.g. each in :math:`k` bin.
projsin : list, default=None
List of :class:`Projection` instances or (multipole, wide-angle order) tuples.
If ``None``, and ``power_ref`` is provided, the list of projections is set
to be able to compute window convolution of theory power spectrum multipoles of orders ``power_ref.ells``.
power_ref : CatalogFFTPower, MeshFFTPower, PowerSpectrumWedges, PowerSpectrumMultipoles, default=None
"Reference" power spectrum estimation, e.g. of the actual data.
It is used to set default values for ``edges``, ``ells``, ``los``, ``boxcenter``, ``compensations`` and ``wnorm`` if those are ``None``.
edges : tuple, array, default=None
If ``los`` is local (``None``), :math:`k`-edges for :attr:`poles`.
Else, one can also provide :math:`\mu`-edges (hence a tuple ``(kedges, muedges)``) for :attr:`wedges`.
If ``kedges`` is ``None``, defaults to edges containing unique :math:`k` (norm) values, see :func:`find_unique_edges`.
``kedges`` may be a dictionary, with keys 'min' (minimum :math:`k`, defaults to 0), 'max' (maximum :math:`k`, defaults to ``np.pi/(boxsize/nmesh)``),
'step' (if not provided :func:`find_unique_edges` is used to find unique :math:`k` (norm) values between 'min' and 'max').
For both :math:`k` and :math:`\mu`, binning is inclusive on the low end and exclusive on the high end, i.e. ``edges[i] <= x < edges[i+1]``.
However, last :math:`\mu`-bin is inclusive on both ends: ``edges[-2] <= mu <= edges[-1]``.
Therefore, with e.g. :math:`\mu`-edges ``[0.2, 0.4, 1.0]``, the last :math:`\mu`-bin includes modes at :math:`\mu = 1.0`.
Similarly, with :math:`\mu`-edges ``[0.2, 0.4, 0.8]``, the last :math:`\mu`-bin includes modes at :math:`\mu = 0.8`.
If ``None``, defaults to the edges used in estimation of ``power_ref``.
ells : list, tuple, default=(0, 2, 4)
Output multipole orders.
If ``None``, defaults to the multipoles used in estimation of ``power_ref``.
los : string, array, default=None
If ``los`` is 'firstpoint' (resp. 'endpoint'), use local (varying) first point (resp. end point) line-of-sight.
Else, may be 'x', 'y' or 'z', for one of the Cartesian axes.
Else, a 3-vector.
If ``None``, defaults to the line-of-sight used in estimation of ``power_ref``.
periodic : bool, default=False
If ``True``, selection function is assumed uniform, periodic.
In this case, ``mesh1`` may be ``None``; in this case ``nmesh`` and ``boxsize`` default to that of ``power_ref``,
else may be set with ``kwargs``.
boxcenter : float, array, default=None
Box center; defaults to 0.
Used only if provided ``mesh1`` and ``mesh2`` are not ``CatalogMesh``.
If ``None``, defaults to the value used in estimation of ``power_ref``.
compensations : list, tuple, string, default=None
Compensations to apply to mesh to (optionally) correct for particle-mesh assignment scheme;
e.g. 'cic' (resp. 'cic-sn') for cic assignment scheme, with (resp. without) interlacing.
In case ``mesh2`` is not ``None`` (cross-correlation), provide a list (or tuple) of two such strings
(for ``mesh1`` and ``mesh2``, respectively).
Used only if provided ``mesh1`` or ``mesh2`` are not ``CatalogMesh``.
wnorm : float, default=None
Window function normalization.
If ``None``, defaults to the value used in estimation of ``power_ref``,
rescaled to the input random weights --- which yields a correct normalization of the window function
for the power spectrum estimation ``power_ref``.
If ``power_ref`` provided, use internal estimate obtained with :func:`normalization` --- which is wrong
(the normalization :attr:`poles.wnorm` can be reset a posteriori using the above recipe).
shotnoise : float, default=None
Window function shot noise, to use instead of internal estimate, which is 0 in case of cross-correlation
or both ``mesh1`` and ``mesh2`` are :class:`pmesh.pm.RealField`,
and in case of auto-correlation is obtained by dividing :func:`unnormalized_shotnoise`
of ``mesh1`` by window function normalization.
edgesin_type : str, default='smooth'
Technique to transpose ``edgesin`` to Fourier space, relevant only if ``periodic`` is ``False``.
'smooth' uses :func:`get_correlation_function_tophat_derivative`;
'fourier-grid' paints ``edgesin`` on the Fourier mesh (akin to the periodic case), then takes the FFT.
mode_oversampling : int, default=None
If > 0, artificially increase the resolution of the input mesh by a factor ``2 * mode_oversampling + 1``.
In practice, shift the coordinates of the coordinates of the input grid by ``np.arange(-mode_oversampling, mode_oversampling + 1)``
along each of x, y, z axes.
This reduces "discrete grid binning effects".
If ``None``, defaults to the value used in estimation of ``power_ref``.
kwargs : dict
Arguments for :class:`ParticleMesh` in case ``mesh1`` is not provided (as may be the case if ``periodic`` is ``True``),
typically ``boxsize``, ``nmesh``, ``mpicomm``.
"""
t0 = time.time()
if power_ref is not None:
if edges is None: edges = _get_attr_in_inst(power_ref, 'edges', insts=(None, 'wedges', 'poles'))
attrs_ref = _get_attr_in_inst(power_ref, 'attrs', insts=(None, 'wedges', 'poles'))
if los is None:
los_type = attrs_ref['los_type']
los = attrs_ref['los']
if los_type != 'global': los = los_type
if boxcenter is None: boxcenter = attrs_ref['boxcenter']
if compensations is None: compensations = attrs_ref['compensations']
if ells is None: ells = _get_attr_in_inst(power_ref, 'ells', insts=(None, 'poles'))
if mode_oversampling is None: mode_oversampling = attrs_ref.get('mode_oversampling', 0)
self._set_los(los)
self._set_ells(ells)
self._set_periodic(periodic)
if mode_oversampling is None: mode_oversampling = 0
if mesh1 is None:
if not self.periodic:
raise ValueError('mesh1 can be "None" only if periodic = True')
attrs_pm = {'dtype': 'f8', 'mpicomm': mpi.COMM_WORLD}
if power_ref is not None:
attrs_pm.update(boxsize=attrs_ref['boxsize'], nmesh=attrs_ref['nmesh'], dtype=attrs_ref.get('dtype', attrs_pm['dtype']))
attrs_pm.update(kwargs)
translate = {'boxsize': 'BoxSize', 'nmesh': 'Nmesh', 'mpicomm': 'comm'}
attrs_pm = {translate.get(key, key): value for key, value in attrs_pm.items()}
from pmesh.pm import ParticleMesh
mesh1 = ParticleMesh(**attrs_pm)
self._set_compensations(compensations)
self._set_mesh(mesh1, mesh2=mesh2, boxcenter=boxcenter)
self._set_projsin(projsin)
self._set_edges(edges)
self._set_xin(edgesin, edgesin_type=edgesin_type)
self.wnorm_ref = 1.
if power_ref is not None:
self.wnorm_ref = _get_attr_in_inst(power_ref, 'wnorm', insts=(None, 'wedges', 'poles'))
self.wnorm = wnorm
if wnorm is None:
if self.periodic:
self.wnorm = 1.
else:
if power_ref is not None:
ialpha2 = np.prod([self.attrs[name] / power_ref.attrs[name] for name in ['sum_data_weights1', 'sum_data_weights2']])
self.wnorm = ialpha2 * self.wnorm_ref
else:
self._set_normalization(self.wnorm, mesh1, mesh2)
self._set_shotnoise(shotnoise, shotnoise_nonorm=shotnoise_nonorm, mesh1=mesh1, mesh2=mesh2)
self.mode_oversampling = int(mode_oversampling)
self.attrs.update(self._get_attrs())
t1 = time.time()
if self.mpicomm.rank == 0:
self.log_info('Meshes prepared in elapsed time {:.2f} s.'.format(t1 - t0))
self.log_info('Running mesh calculation.')
self.run()
t2 = time.time()
if self.mpicomm.rank == 0:
self.log_info('Mesh calculations performed in elapsed time {:.2f} s.'.format(t2 - t1))
self.log_info('Window function computed in elapsed time {:.2f} s.'.format(t2 - t0))
def _set_periodic(self, periodic=False):
self.periodic = periodic
if self.periodic and self.los_type != 'global':
raise ValueError('Cannot set "periodic" if line-of-sight is local.')
def _set_mesh(self, mesh1, mesh2=None, boxcenter=None):
from pmesh.pm import ParticleMesh
if self.periodic:
self.attrs = {}
self.autocorr = True
if isinstance(mesh1, ParticleMesh):
self.pm = mesh1
else:
self.pm = mesh1.pm
self.mpicomm = self.pm.comm
self.boxcenter = _make_array(boxcenter if boxcenter is not None else 0., 3, dtype='f8')
else:
super(MeshFFTWindow, self)._set_mesh(mesh1, mesh2=mesh2, boxcenter=boxcenter)
def _set_projsin(self, projsin):
if projsin is None:
if self.ells is None:
raise ValueError('If no output multipoles requested, provide "projsin"')
ellmax = max(self.ells)
projsin = [(ell, 0) for ell in range(0, ellmax + 1, 2)]
if self.los_type in ['firstpoint', 'endpoint']:
projsin += PowerSpectrumOddWideAngleMatrix.propose_out(projsin, wa_orders=1)
self.projsin = [Projection(proj) for proj in projsin]
if self.los_type == 'global' and any(proj.wa_order != 0 for proj in self.projsin):
raise ValueError('With global line-of-sight, input wide_angle order = 0 only is supported')
def _set_xin(self, edgesin, edgesin_type='fourier-grid'):
self.edgesin_type = edgesin_type.lower()
allowed_edgesin_types = ['smooth']
if self.los_type == 'global': allowed_edgesin_types.append('fourier-grid')
if self.edgesin_type not in allowed_edgesin_types:
raise ValueError('edgesin_type must be one of {}'.format(allowed_edgesin_types))
if not isinstance(edgesin, dict):
edgesin = {proj: edgesin for proj in self.projsin}
else:
edgesin = {Projection(proj): edge for proj, edge in edgesin.items()}
self.xin, self.deriv = {}, {}
for proj in self.projsin:
if proj not in edgesin:
raise ValueError('Projection {} not in edgesin'.format(proj))
iscallable = [callable(f) for f in edgesin[proj]]
if any(iscallable):
if not all(iscallable): raise ValueError('Provide callables or floats only for edgesin')
self.deriv[proj] = edgesin[proj]
self.xin[proj] = np.arange(len(self.deriv[proj]))
else:
edges = np.asarray(edgesin[proj])
self.xin[proj] = 3. / 4. * (edges[1:]**4 - edges[:-1]**4) / (edges[1:]**3 - edges[:-1]**3)
if self.periodic or self.edgesin_type == 'fourier-grid':
def _make_fun(low, high):
return lambda k: 1. * ((k >= low) & (k < high))
self.deriv[proj] = [_make_fun(*lh) for lh in zip(edges[:-1], edges[1:])]
else:
self.deriv[proj] = get_correlation_function_tophat_derivative(edges, ell=proj.ell)
def _get_q(self, ellout, mout, projin):
# Called for local (varying) line-of-sight only
# This corresponds to Q defined in https://fr.overleaf.com/read/hpgbwqzmtcxn
# ellout is \ell, mout is m, projin = (\ell^\prime, m^\prime)
from pmesh.pm import RealField, ComplexField
Ylmout = get_real_Ylm(ellout, mout)
Ylmins = [get_real_Ylm(projin.ell, m) for m in range(-projin.ell, projin.ell + 1)]
rfield = RealField(self.pm)
cfield = ComplexField(self.pm)
toret = RealField(self.pm)
toret[:] = 0.
remove_shotnoise = ellout == projin.ell == projin.wa_order == 0
if remove_shotnoise:
shotnoise = self.shotnoise * self.wnorm / self.nmesh.prod(dtype='f8') / (4 * np.pi) # normalization of Ylmin * Ylmout
for Ylmin in Ylmins:
for islab, slab in enumerate(rfield.slabs):
slab[:] = self.rfield1[islab] * Ylmin(self.xhat[0][islab], self.xhat[1][islab], self.xhat[2][islab]) * Ylmout(self.xhat[0][islab], self.xhat[1][islab], self.xhat[2][islab])
if projin.wa_order != 0: slab[:] /= self.xnorm[islab]**projin.wa_order
rfield.r2c(out=cfield)
for islab in range(cfield.shape[0]):
cfield[islab, ...] = cfield[islab].conj() * self.cfield2[islab]
cfield.c2r(out=rfield)
for islab, slab in enumerate(rfield.slabs):
# No 1/N^6 factor due to pmesh convention
if remove_shotnoise:
mask_zero = True
for ii in slab.i: mask_zero = mask_zero & (ii == 0)
slab[mask_zero] -= shotnoise
slab[:] *= 4 * np.pi / (2 * projin.ell + 1) * Ylmin(self.xwhat[0][islab], self.xwhat[1][islab], self.xwhat[2][islab])
toret[:] += rfield[:]
return toret
def _run_periodic(self, projin, deriv):
legendre = special.legendre(projin.ell)
for islab, slab in enumerate(self.qfield.slabs):
tmp = deriv(self.knorm[islab])
if projin.ell:
mu = sum(xx[islab] * ll for xx, ll in zip(self.khat, self.los))
tmp *= legendre(mu)
slab[:] = tmp
result, result_poles = project_to_basis(self.qfield, self.edges, ells=self.ells, los=self.los, mode_oversampling=self.mode_oversampling)
# Format the power results into :class:`PowerSpectrumWedges` instance
kwargs = {'wnorm': self.wnorm, 'shotnoise_nonorm': 0., 'attrs': self.attrs}
k, mu, power, nmodes, power_zero = result
self.wedges = PowerSpectrumWedges(modes=(k, mu), edges=self.edges, power_nonorm=power, power_zero_nonorm=power_zero, nmodes=nmodes, **kwargs)
if result_poles:
# Format the power results into :class:`PolePowerSpectrum` instance
k, power, nmodes, power_zero = result_poles
self.poles = PowerSpectrumMultipoles(modes=k, edges=self.edges[0], power_nonorm=power, power_zero_nonorm=power_zero, nmodes=nmodes, ells=self.ells, **kwargs)
def _run_global_los(self, projin, deriv):
# projin is \ell^\prime
# deriv is \xi^{\ell^{\prime}, \beta \ell^\prime}(s^w)
from pmesh.pm import ParticleMesh, ComplexField
legendre = special.legendre(projin.ell)
if self.edgesin_type == 'fourier-grid':
if projin.ell % 2:
# Odd poles, need full mesh
dtype = self.dtype if 'complex' in self.dtype.name else 'c{:d}'.format(self.dtype.itemsize * 2)
pm = ParticleMesh(BoxSize=self.boxSize, Nmesh=self.nmesh, dtype=dtype, comm=self.mpicomm, np=self.np)
qfield = ComplexField(pm)
else:
qfield = ComplexField(self.pm)
for islab, slab in enumerate(qfield.slabs):
tmp = deriv(self.knorm[islab])
if projin.ell:
mu = sum(xx[islab] * ll for xx, ll in zip(self.khat, self.los))
tmp *= legendre(mu)
slab[:] = tmp
qfield = qfield.c2r()
volume = self.boxsize.prod()
for islab, slab in enumerate(qfield.slabs):
slab[:] *= self.qfield[islab] / volume
else:
qfield = self.qfield.copy()
for islab, slab in enumerate(qfield.slabs):
# tmp = np.zeros_like(self.xwnorm[islab])
# mask_nonzero = self.xwnorm[islab] != 0.
# tmp[mask_nonzero] = deriv(self.xwnorm[islab][mask_nonzero])
tmp = deriv(self.xwnorm[islab])
if projin.ell:
mu = sum(xx[islab] * ll for xx, ll in zip(self.xwhat, self.los))
tmp *= legendre(mu)
slab[:] *= tmp
wfield = qfield.r2c()
result, result_poles = project_to_basis(wfield, self.edges, ells=self.ells, los=self.los, mode_oversampling=self.mode_oversampling)
# Format the power results into :class:`PowerSpectrumWedges` instance
kwargs = {'wnorm': self.wnorm, 'shotnoise_nonorm': 0., 'attrs': self.attrs}
k, mu, power, nmodes, power_zero = result
power, power_zero = (self.nmesh.prod(dtype='f8')**2 * tmp.conj() for tmp in (power, power_zero))
self.wedges = PowerSpectrumWedges(modes=(k, mu), edges=self.edges, power_nonorm=power, power_zero_nonorm=power_zero, nmodes=nmodes, **kwargs)
if result_poles:
# Format the power results into :class:`PolePowerSpectrum` instance
k, power, nmodes, power_zero = result_poles
power, power_zero = (self.nmesh.prod(dtype='f8')**2 * tmp.conj() for tmp in (power, power_zero))
self.poles = PowerSpectrumMultipoles(modes=k, edges=self.edges[0], power_nonorm=power, power_zero_nonorm=power_zero, nmodes=nmodes, ells=self.ells, **kwargs)
def _run_local_los(self, projin, deriv):
# We we perform the sum of Q defined in https://fr.overleaf.com/read/hpgbwqzmtcxn
# projin is \ell^\prime, n
# deriv is \xi^{(n)}_{\ell^{\prime},\beta \ell^\prime}(s^w)
from pmesh.pm import RealField, ComplexField
result = []
ells = sorted(set(self.ells))
dfield = RealField(self.pm)
for islab, slab in enumerate(dfield.slabs):
# tmp = np.zeros_like(self.xwnorm[islab])
# mask_nonzero = self.xwnorm[islab] != 0.
# tmp[mask_nonzero] = deriv(self.xwnorm[islab][mask_nonzero])
# tmp = deriv(self.xwnorm[islab])
# if projin.wa_order != 0: tmp *= self.xwnorm[islab]**projin.wa_order # s_w^n
# slab[:] = tmp
slab[:] = deriv(self.xwnorm[islab])
for ellout in ells:
wfield = ComplexField(self.pm)
wfield[:] = 0.
for mout in range(-ellout, ellout + 1):
Ylm = get_real_Ylm(ellout, mout)
qfield = self._get_q(ellout=ellout, mout=mout, projin=projin)
qfield[:] *= dfield[:]
cfield = qfield.r2c()
for islab, slab in enumerate(cfield.slabs):
slab[:] *= 4 * np.pi * Ylm(self.khat[0][islab], self.khat[1][islab], self.khat[2][islab])
wfield[:] += cfield[:]
proj_result = project_to_basis(wfield, self.edges, antisymmetric=bool(ellout % 2), mode_oversampling=self.mode_oversampling)[0]
result.append(tuple(np.ravel(proj_result[ii]) for ii in [2, -1]))
k, nmodes = proj_result[0], proj_result[3]
del dfield
power, power_zero = (self.nmesh.prod(dtype='f8')**2 * np.array([result[ells.index(ell)][ii] for ell in self.ells]).conj() for ii in range(2))
if self.swap: power, power_zero = (tmp.conj() for tmp in (power, power_zero))
k, nmodes = np.ravel(k), np.ravel(nmodes)
kwargs = {'wnorm': self.wnorm, 'shotnoise_nonorm': 0., 'attrs': self.attrs}
self.poles = PowerSpectrumMultipoles(modes=k, edges=self.edges[0], power_nonorm=power, power_zero_nonorm=np.ravel(power_zero), nmodes=nmodes, ells=self.ells, **kwargs)
def run(self):
from pmesh.pm import RealField, ComplexField
def _wrap_rslab(rslab):
# We do not use the same conventions as pmesh:
# rslab < 0 is sent back to [boxsize/2, boxsize]
toret = []
for ii, rr in enumerate(rslab):
mask = rr > self.boxsize[ii] / 2.
rr[mask] -= self.boxsize[ii]
toret.append(rr)
return toret
def _safe_divide(num, denom):
with np.errstate(divide='ignore', invalid='ignore'):
toret = num / denom
toret[denom == 0.] = 0.
return toret
if self.periodic:
self.qfield = ComplexField(self.pm)
if self.periodic or self.edgesin_type == 'fourier-grid':
# The Fourier-space grid
self.khat = [kk.real.astype('f8') for kk in ComplexField(self.pm).slabs.optx]
self.knorm = np.sqrt(sum(kk**2 for kk in self.khat))
self.khat = [_safe_divide(kk, self.knorm) for kk in self.khat]
else:
self.xwhat = [xx.real.astype('f8') for xx in _wrap_rslab(_transform_rslab(RealField(self.pm).slabs.optx, self.boxsize))] # this should just give self.mesh1.slabs.optx
self.xwnorm = np.sqrt(sum(xx**2 for xx in self.xwhat))
self.xwhat = [_safe_divide(xx, self.xwnorm) for xx in self.xwhat]
if self.los_type == 'global': # global (fixed) line-of-sight
if self.periodic:
run_projin = self._run_periodic
else:
cfield2 = cfield1 = self._to_complex(self.mesh1, copy=True) # copy because will be modified in-place
del self.mesh1
# We will apply all compensation transfer functions to cfield1
compensations = [self.compensations[0]] if self.autocorr else self.compensations
self._compensate(cfield1, *compensations)
if not self.autocorr:
cfield2 = self._to_complex(self.mesh2, copy=False)
del self.mesh2
for islab in range(cfield1.shape[0]):
cfield1[islab, ...] = cfield1[islab].conj() * cfield2[islab]
self.qfield = cfield1.c2r()
shotnoise = self.shotnoise * self.wnorm / self.nmesh.prod(dtype='f8')
for i, c in zip(self.qfield.slabs.i, self.qfield.slabs):
mask_zero = True
for ii in i: mask_zero = mask_zero & (ii == 0)
c[mask_zero] -= shotnoise # remove shot noise
del cfield2, cfield1
run_projin = self._run_global_los
else: # local (varying) line-of-sight
self.swap = self.los_type == 'endpoint'
if self.swap: self.mesh1, self.mesh2 = self.mesh2, self.mesh1 # swap meshes + complex conjugaison at the end of run()
self.rfield1 = self._to_real(self.mesh1)
if self.autocorr:
self.cfield2 = self._to_complex(self.mesh1, copy=True) # copy because will be modified in-place
compensations = [self.compensations[0]] * 2
else:
self.cfield2 = self._to_complex(self.mesh2, copy=True) # copy because will be modified in-place
compensations = self.compensations
# We apply all compensation transfer functions to cfield2
self._compensate(self.cfield2, *compensations)
# for i, c in zip(self.cfield2.slabs.i, self.cfield2.slabs):
# mask_zero = True
# for ii in i: mask_zero = mask_zero & (ii == 0)
# c[mask_zero] = 0.
del self.mesh2, self.mesh1
offset = self.boxcenter - self.boxsize / 2.
self.xhat = [xx.real.astype('f8') + offset[ii] for ii, xx in enumerate(_transform_rslab(self.rfield1.slabs.optx, self.boxsize))]
self.xnorm = np.sqrt(sum(xx**2 for xx in self.xhat))
self.xhat = [_safe_divide(xx, self.xnorm) for xx in self.xhat]
# The Fourier-space grid
self.khat = [kk.real.astype('f8') for kk in self.cfield2.slabs.optx]
knorm = np.sqrt(sum(kk**2 for kk in self.khat))
self.khat = [_safe_divide(kk, knorm) for kk in self.khat]
del knorm
run_projin = self._run_local_los
poles, wedges = [], []
for projin in self.projsin:
poles_x, wedges_x = [], []
for iin, xin in enumerate(self.xin[projin]):
run_projin(projin, self.deriv[projin][iin])
if self.ells:
poles_x.append(PowerSpectrumFFTWindowMatrix.from_power(self.poles, xin, projin, wnorm_ref=self.wnorm_ref, mpicomm=self.mpicomm))
if self.los_type == 'global':
wedges_x.append(PowerSpectrumFFTWindowMatrix.from_power(self.wedges, xin, projin, wnorm_ref=self.wnorm_ref, mpicomm=self.mpicomm))
if poles_x:
poles.append(PowerSpectrumFFTWindowMatrix.concatenate_x(*poles_x, axis='in'))
if wedges_x:
wedges.append(PowerSpectrumFFTWindowMatrix.concatenate_x(*wedges_x, axis='in'))
if poles:
self.poles = PowerSpectrumFFTWindowMatrix.concatenate_proj(*poles, axis='in')
if wedges:
self.wedges = PowerSpectrumFFTWindowMatrix.concatenate_proj(*wedges, axis='in')
for name in ['mesh1', 'mesh2', 'rfield1', 'cfield2', 'qfield']:
if hasattr(self, name): delattr(self, name)
@classmethod
def concatenate_proj(cls, *others):
if len(others) == 1 and utils.is_sequence(others[0]):
others = others[0]
new = others[0].copy()
for name in ['poles', 'wedges']:
if hasattr(others[0], name):
setattr(new, name, PowerSpectrumFFTWindowMatrix.concatenate_proj(*[getattr(other, name) for other in others], axis='in'))
return new
@classmethod
def concatenate_x(cls, *others):
if len(others) == 1 and utils.is_sequence(others[0]):
others = others[0]
new = others[0].copy()
for name in ['poles', 'wedges']:
if hasattr(others[0], name):
setattr(new, name, PowerSpectrumFFTWindowMatrix.concatenate_x(*[getattr(other, name) for other in others], axis='in'))
return new
def __setstate__(self, state):
"""Set this class state."""
super(MeshFFTPower, self).__setstate__(state) # MeshFFTPower to get BaseClass.__setstate__(state)
for name in ['wedges', 'poles']:
if name in state:
setattr(self, name, PowerSpectrumFFTWindowMatrix.from_state(state[name]))
class CatalogFFTWindow(MeshFFTWindow):
"""Wrapper on :class:`MeshFFTWindow` to estimate window function from input random positions and weigths."""
def __init__(self, randoms_positions1=None, randoms_positions2=None,
randoms_weights1=None, randoms_weights2=None,
edgesin=None, projsin=None, edges=None, ells=None, power_ref=None,
los=None, nmesh=None, boxsize=None, boxcenter=None, cellsize=None, boxpad=2., wrap=False, dtype=None,
resampler=None, interlacing=None, position_type='xyz', weight_type='auto', weight_attrs=None,
wnorm=None, shotnoise=None, shotnoise_nonorm=None, edgesin_type='smooth', mode_oversampling=None, mpiroot=None, mpicomm=mpi.COMM_WORLD):
r"""
Initialize :class:`CatalogFFTWindow`, i.e. estimate power spectrum window matrix.
Note
----
To compute the cross-window of samples 1 and 2, provide ``randoms_positions2``.
To compute (with the correct shot noise estimate) the auto-window of randoms 1, but with 2 weights, provide ``randoms_positions1``,
``randoms_weights1`` and ``randoms_weights2``.
Parameters
----------
randoms_positions1 : list, array, default=None
Positions in the first randoms catalog. Typically of shape (3, N) or (N, 3).
randoms_positions2 : list, array, default=None
Optionally (for cross-correlation), positions in the second randoms catalog. See ``randoms_positions1``.
randoms_weights1 : array of shape (N,), default=None
Optionally, weights in the first randoms catalog.
randoms_weights2 : array of shape (N,), default=None
Optionally (for cross-correlation), weights in the second randoms catalog.
edgesin : dict, array, list
An array of :math:`k`-edges which defines the theory :math:`k`-binning; corresponding derivatives will be computed
using :func:`get_correlation_function_tophat_derivative`; or a dictionary of such array for each theory projection.
Else a list of derivatives (callable) of theory correlation function w.r.t. each theory basis vector, e.g. each in :math:`k`-bin;
or a dictionary of such list for each theory projection.
projsin : list, default=None
List of :class:`Projection` instances or (multipole, wide-angle order) tuples.
If ``None``, and ``power_ref`` is provided, the list of projections is set
to be able to compute window convolution of theory power spectrum multipoles of orders ``power_ref.ells``.
power_ref : PowerSpectrumMultipoles, default=None
"Reference" power spectrum estimation, e.g. of the actual data.
It is used to set default values for ``edges``, ``ells``, ``los``, ``boxsize``, ``boxcenter``, ``nmesh``,
``interlacing``, ``resampler``, ``wnorm`` and ``mode_oversampling`` if those are ``None``.
edges : tuple, array, default=None
If ``los`` is local (``None``), :math:`k`-edges for :attr:`poles`.
Else, one can also provide :math:`\mu`-edges (hence a tuple ``(kedges, muedges)``) for :attr:`wedges`.
If ``kedges`` is ``None``, defaults to edges containing unique :math:`k` (norm) values, see :func:`find_unique_edges`.
``kedges`` may be a dictionary, with keys 'min' (minimum :math:`k`, defaults to 0), 'max' (maximum :math:`k`, defaults to ``np.pi/(boxsize/nmesh)``),
'step' (if not provided :func:`find_unique_edges` is used to find unique :math:`k` (norm) values between 'min' and 'max').
For both :math:`k` and :math:`\mu`, binning is inclusive on the low end and exclusive on the high end, i.e. ``bins[i] <= x < bins[i+1]``.
However, last :math:`\mu`-bin is inclusive on both ends: ``bins[-2] <= mu <= bins[-1]``.
Therefore, with e.g. :math:`\mu`-edges ``[0.2, 0.4, 1.0]``, the last :math:`\mu`-bin includes modes at :math:`\mu = 1.0`.
Similarly, with :math:`\mu`-edges ``[0.2, 0.4, 0.8]``, the last :math:`\mu`-bin includes modes at :math:`\mu = 0.8`.
If ``None``, defaults to the edges used in estimation of ``power_ref``.
ells : list, tuple, default=(0, 2, 4)
Output multipole orders.
los : string, array, default=None
If ``los`` is 'firstpoint' (resp. 'endpoint'), use local (varying) first point (resp. end point) line-of-sight.
Else, may be 'x', 'y' or 'z', for one of the Cartesian axes.
Else, a 3-vector.
If ``None``, defaults to line-of-sight used in estimation of ``power_ref``.
nmesh : array, int, default=None
Mesh size, i.e. number of mesh nodes along each axis.
If ``None``, defaults to the value used in estimation of ``power_ref``.
boxsize : array, float, default=None
Physical size of the box along each axis.
If ``None``, defaults to the value used in estimation of ``power_ref``.
boxcenter : array, float, default=None
Box center, defaults to center of the Cartesian box enclosing all input positions.
If ``None``, defaults to the value used in estimation of ``power_ref``.
cellsize : array, float, default=None
Physical size of mesh cells.
If not ``None``, and mesh size ``nmesh`` is not ``None``, used to set ``boxsize`` as ``nmesh * cellsize``.
If ``nmesh`` is ``None``, it is set as (the nearest integer(s) to) ``boxsize / cellsize``.
boxpad : float, default=2.
When ``boxsize`` is determined from input positions, take ``boxpad`` times the smallest box enclosing positions as ``boxsize``.
wrap : bool, default=False
Whether to wrap input positions in [0, boxsize[.
If ``False`` and input positions do not fit in the the box size, raise a :class:`ValueError`.
dtype : string, dtype, default=None
The data type to use for input positions and weights and the mesh.
If ``None``, defaults to the value used in estimation of ``power_ref`` if provided, else 'f8'.
resampler : string, ResampleWindow, default=None
Resampler used to assign particles to the mesh.
Choices are ['ngp', 'cic', 'tcs', 'pcs'].
If ``None``, defaults to the value used in estimation of ``power_ref``.
interlacing : bool, int, default=None
Whether to use interlacing to reduce aliasing when painting the particles on the mesh.
If positive int, the interlacing order (minimum: 2).
If ``None``, defaults to the value used in estimation of ``power_ref``.
position_type : string, default='xyz'
Type of input positions, one of:
- "pos": Cartesian positions of shape (N, 3)
- "xyz": Cartesian positions of shape (3, N)
- "rdd": RA/Dec in degree, distance of shape (3, N)
If ``position_type`` is "pos", positions are of (real) type ``dtype``, and ``mpiroot`` is ``None``,
no internal copy of positions will be made, hence saving some memory.
weight_type : string, default='auto'
The type of weighting to apply to provided weights. One of:
- ``None``: no weights are applied.
- "product_individual": each pair is weighted by the product of weights :math:`w_{1} w_{2}`.
- "auto": automatically choose weighting based on input ``weights1`` and ``weights2``,
i.e. ``None`` when ``weights1`` and ``weights2`` are ``None``,
else "product_individual".
If floating weights are of (real) type ``dtype`` and ``mpiroot`` is ``None``,
no internal copy of weights will be made, hence saving some memory.
weight_attrs : dict, default=None
Dictionary of weighting scheme attributes. In case ``weight_type`` is "inverse_bitwise",
one can provide "nrealizations", the total number of realizations (*including* current one;
defaulting to the number of bits in input weights plus one);
"noffset", the offset to be added to the bitwise counts in the denominator (defaulting to 1)
and "default_value", the default value of weights if the denominator is zero (defaulting to 0).
wnorm : float, default=None
Window function normalization.
If ``None``, defaults to the value used in estimation of ``power_ref``,
rescaled to the input random weights --- which yields a correct normalization of the window function
for the power spectrum estimation ``power_ref``.
If ``power_ref`` provided, use internal estimate obtained with :func:`normalization` --- which is wrong
(the normalization :attr:`poles.wnorm` can be reset a posteriori using the above recipe).
shotnoise : float, default=None
Window function shot noise, to use instead of internal estimate, which is 0 in case of cross-correlation
and in case of auto-correlation is obtained by dividing :func:`unnormalized_shotnoise` by window function normalization.
edgesin_type : str, default='smooth'
Technique to transpose ``edgesin`` to Fourier space.
'smooth' uses :func:`get_correlation_function_tophat_derivative`;
'fourier-grid' paints ``edgesin`` on the Fourier mesh, then takes the FFT.
mode_oversampling : int, default=None
If > 0, artificially increase the resolution of the input mesh by a factor ``2 * mode_oversampling + 1``.
In practice, shift the coordinates of the coordinates of the input grid by ``np.arange(-mode_oversampling, mode_oversampling + 1)``
along each of x, y, z axes.
This reduces "discrete grid binning effects".
If ``None``, defaults to the value used in estimation of ``power_ref``.
mpiroot : int, default=None
If ``None``, input positions and weights are assumed to be scatted across all ranks.
Else the MPI rank where input positions and weights are gathered.
mpicomm : MPI communicator, default=mpi.COMM_WORLD
The MPI communicator.
"""
mesh_names = ['nmesh', 'boxsize', 'boxcenter']
loc = locals()
mesh_attrs = {name: loc[name] for name in mesh_names if loc[name] is not None}
if power_ref is not None:
attrs_ref = _get_attr_in_inst(power_ref, 'attrs', insts=(None, 'poles', 'wedges'))
for name in mesh_names:
mesh_attrs.setdefault(name, attrs_ref[name])
if interlacing is None:
interlacing = tuple(attrs_ref['interlacing{:d}'.format(i + 1)] for i in range(2))
if resampler is None:
resampler = tuple(attrs_ref['resampler{:d}'.format(i + 1)] for i in range(2))
if dtype is None: dtype = attrs_ref.get('dtype', 'f8')
if dtype is None: dtype = 'f8'
rdtype = _get_real_dtype(dtype)
if cellsize is not None: # if cellsize is provided, remove default nmesh or boxsize value from old_matrix instance.
mesh_attrs['cellsize'] = cellsize
if nmesh is None: mesh_attrs.pop('nmesh')
elif boxsize is None: mesh_attrs.pop('boxsize')
loc = locals()
bpositions, positions = [], {}
for name in ['randoms_positions1', 'randoms_positions2']:
tmp = _format_positions(loc[name], position_type=position_type, dtype=rdtype, mpicomm=mpicomm, mpiroot=mpiroot)
if tmp is not None: bpositions.append(tmp)
label = name.replace('randoms_positions', 'R')
positions[label] = tmp
autocorr = positions['R2'] is None
weights = {name: loc[name] for name in ['randoms_weights1', 'randoms_weights2']}
weights, bweights, n_bitwise_weights, weight_attrs = _format_all_weights(dtype=rdtype, weight_type=weight_type, weight_attrs=weight_attrs, mpicomm=mpicomm, mpiroot=mpiroot, **weights)
self.same_shotnoise = autocorr and (weights['R2'] is not None)
autocorr &= not self.same_shotnoise
# Get box encompassing all catalogs
nmesh, boxsize, boxcenter = _get_mesh_attrs(**mesh_attrs, positions=bpositions, boxpad=boxpad, check=not wrap, mpicomm=mpicomm)
if resampler is None: resampler = 'tsc'
if interlacing is None: interlacing = 2
if not isinstance(resampler, tuple):
resampler = (resampler,) * 2
if not isinstance(interlacing, tuple):
interlacing = (interlacing,) * 2
if wrap:
for name, position in positions.items():
if position is not None:
positions[name] = _wrap_positions(position, boxsize, boxcenter - boxsize / 2.)
# if wnorm is None and power_ref is not None:
# wsum = [mpicomm.allreduce(sum(weights['R1']) if weights['R1'] is not None else len(positions['R1']))]*2
# if not autocorr: wsum[1] = mpicomm.allreduce(sum(weights['R2']) if weights['R2'] is not None else len(positions['R2']))
# ialpha2 = np.prod([wsum[ii]/attrs_ref[name] for ii, name in enumerate(['sum_data_weights1', 'sum_data_weights2'])])
# wnorm = ialpha2 * _get_attr_in_inst(power_ref, 'wnorm', insts=(None, 'poles', 'wedges'))
# Get catalog meshes
def get_mesh(data_positions, data_weights=None, **kwargs):
return CatalogMesh(data_positions, data_weights=data_weights,
nmesh=nmesh, boxsize=boxsize, boxcenter=boxcenter,
position_type='pos', dtype=dtype, mpicomm=mpicomm, **kwargs)
mesh1 = get_mesh(positions['R1'], data_weights=weights['R1'], resampler=resampler[0], interlacing=interlacing[0])
mesh2 = None
if not autocorr:
if self.same_shotnoise:
for name in ['R']:
positions[name + '2'] = positions[name + '1']
if weights[name + '2'] is None: weights[name + '2'] = weights[name + '1']
mesh2 = get_mesh(positions['R2'], data_weights=weights['R2'], resampler=resampler[1], interlacing=interlacing[1])
# Now, run window function estimation
super(CatalogFFTWindow, self).__init__(mesh1=mesh1, mesh2=mesh2, edgesin=edgesin, projsin=projsin, power_ref=power_ref, edges=edges, ells=ells, los=los, wnorm=wnorm, shotnoise=shotnoise, shotnoise_nonorm=shotnoise_nonorm, edgesin_type=edgesin_type, mode_oversampling=mode_oversampling)
|
cosmodesiREPO_NAMEpypowerPATH_START.@pypower_extracted@pypower-main@pypower@fft_window.py@.PATH_END.py
|
{
"filename": "test_extint128.py",
"repo_name": "numpy/numpy",
"repo_path": "numpy_extracted/numpy-main/numpy/_core/tests/test_extint128.py",
"type": "Python"
}
|
import itertools
import contextlib
import operator
import pytest
import numpy as np
import numpy._core._multiarray_tests as mt
from numpy.testing import assert_raises, assert_equal
INT64_MAX = np.iinfo(np.int64).max
INT64_MIN = np.iinfo(np.int64).min
INT64_MID = 2**32
# int128 is not two's complement, the sign bit is separate
INT128_MAX = 2**128 - 1
INT128_MIN = -INT128_MAX
INT128_MID = 2**64
INT64_VALUES = (
[INT64_MIN + j for j in range(20)] +
[INT64_MAX - j for j in range(20)] +
[INT64_MID + j for j in range(-20, 20)] +
[2*INT64_MID + j for j in range(-20, 20)] +
[INT64_MID//2 + j for j in range(-20, 20)] +
list(range(-70, 70))
)
INT128_VALUES = (
[INT128_MIN + j for j in range(20)] +
[INT128_MAX - j for j in range(20)] +
[INT128_MID + j for j in range(-20, 20)] +
[2*INT128_MID + j for j in range(-20, 20)] +
[INT128_MID//2 + j for j in range(-20, 20)] +
list(range(-70, 70)) +
[False] # negative zero
)
INT64_POS_VALUES = [x for x in INT64_VALUES if x > 0]
@contextlib.contextmanager
def exc_iter(*args):
"""
Iterate over Cartesian product of *args, and if an exception is raised,
add information of the current iterate.
"""
value = [None]
def iterate():
for v in itertools.product(*args):
value[0] = v
yield v
try:
yield iterate()
except Exception:
import traceback
msg = "At: %r\n%s" % (repr(value[0]),
traceback.format_exc())
raise AssertionError(msg)
def test_safe_binop():
# Test checked arithmetic routines
ops = [
(operator.add, 1),
(operator.sub, 2),
(operator.mul, 3)
]
with exc_iter(ops, INT64_VALUES, INT64_VALUES) as it:
for xop, a, b in it:
pyop, op = xop
c = pyop(a, b)
if not (INT64_MIN <= c <= INT64_MAX):
assert_raises(OverflowError, mt.extint_safe_binop, a, b, op)
else:
d = mt.extint_safe_binop(a, b, op)
if c != d:
# assert_equal is slow
assert_equal(d, c)
def test_to_128():
with exc_iter(INT64_VALUES) as it:
for a, in it:
b = mt.extint_to_128(a)
if a != b:
assert_equal(b, a)
def test_to_64():
with exc_iter(INT128_VALUES) as it:
for a, in it:
if not (INT64_MIN <= a <= INT64_MAX):
assert_raises(OverflowError, mt.extint_to_64, a)
else:
b = mt.extint_to_64(a)
if a != b:
assert_equal(b, a)
def test_mul_64_64():
with exc_iter(INT64_VALUES, INT64_VALUES) as it:
for a, b in it:
c = a * b
d = mt.extint_mul_64_64(a, b)
if c != d:
assert_equal(d, c)
def test_add_128():
with exc_iter(INT128_VALUES, INT128_VALUES) as it:
for a, b in it:
c = a + b
if not (INT128_MIN <= c <= INT128_MAX):
assert_raises(OverflowError, mt.extint_add_128, a, b)
else:
d = mt.extint_add_128(a, b)
if c != d:
assert_equal(d, c)
def test_sub_128():
with exc_iter(INT128_VALUES, INT128_VALUES) as it:
for a, b in it:
c = a - b
if not (INT128_MIN <= c <= INT128_MAX):
assert_raises(OverflowError, mt.extint_sub_128, a, b)
else:
d = mt.extint_sub_128(a, b)
if c != d:
assert_equal(d, c)
def test_neg_128():
with exc_iter(INT128_VALUES) as it:
for a, in it:
b = -a
c = mt.extint_neg_128(a)
if b != c:
assert_equal(c, b)
def test_shl_128():
with exc_iter(INT128_VALUES) as it:
for a, in it:
if a < 0:
b = -(((-a) << 1) & (2**128-1))
else:
b = (a << 1) & (2**128-1)
c = mt.extint_shl_128(a)
if b != c:
assert_equal(c, b)
def test_shr_128():
with exc_iter(INT128_VALUES) as it:
for a, in it:
if a < 0:
b = -((-a) >> 1)
else:
b = a >> 1
c = mt.extint_shr_128(a)
if b != c:
assert_equal(c, b)
def test_gt_128():
with exc_iter(INT128_VALUES, INT128_VALUES) as it:
for a, b in it:
c = a > b
d = mt.extint_gt_128(a, b)
if c != d:
assert_equal(d, c)
@pytest.mark.slow
def test_divmod_128_64():
with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it:
for a, b in it:
if a >= 0:
c, cr = divmod(a, b)
else:
c, cr = divmod(-a, b)
c = -c
cr = -cr
d, dr = mt.extint_divmod_128_64(a, b)
if c != d or d != dr or b*d + dr != a:
assert_equal(d, c)
assert_equal(dr, cr)
assert_equal(b*d + dr, a)
def test_floordiv_128_64():
with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it:
for a, b in it:
c = a // b
d = mt.extint_floordiv_128_64(a, b)
if c != d:
assert_equal(d, c)
def test_ceildiv_128_64():
with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it:
for a, b in it:
c = (a + b - 1) // b
d = mt.extint_ceildiv_128_64(a, b)
if c != d:
assert_equal(d, c)
|
numpyREPO_NAMEnumpyPATH_START.@numpy_extracted@numpy-main@numpy@_core@tests@test_extint128.py@.PATH_END.py
|
{
"filename": "ReadMe.md",
"repo_name": "christophmschaefer/miluphcuda",
"repo_path": "miluphcuda_extracted/miluphcuda-main/test_cases/rotating_sphere/ReadMe.md",
"type": "Markdown"
}
|
### Rotating sphere test case
Standard SPH cannot handle rigid rotations.
This test case is one of the standard tests to check if the TENSORIAL_CORRECTION
implementation works.
How to run it:
1. copy parameter.h to the root source directory of miluphcuda (usually cp parameter.h ../../)
2. compile
3. copy the binary to this directory
4. start run.sh in this directory
The initial particle distribution is a solid sphere with R=1, rotating with omega_z=2 pi /100 c_s/R, where c_s denotes the
sound speed, which is set to 1.
The test runs only for some minutes. In the end, either check the file **conserved_quantities.log** for the total
angular momentum (column 1 is time, column 12 is the absolute value of the total angular momentum of all particles), or
take a look at **angular_momentum.png**.
|
christophmschaeferREPO_NAMEmiluphcudaPATH_START.@miluphcuda_extracted@miluphcuda-main@test_cases@rotating_sphere@ReadMe.md@.PATH_END.py
|
{
"filename": "contents.md",
"repo_name": "triton-inference-server/server",
"repo_path": "server_extracted/server-main/docs/contents.md",
"type": "Markdown"
}
|
<!--
# Copyright 2022-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-->
```{toctree}
:maxdepth: 1
:caption: Getting Started
getting_started/quickstart
```
```{toctree}
:maxdepth: 1
:caption: User Guide
user_guide/performance_tuning
user_guide/architecture
user_guide/model_repository
customization_guide/repository_agents
user_guide/model_configuration
user_guide/request_cancellation
user_guide/optimization
user_guide/ragged_batching
user_guide/rate_limiter
user_guide/model_analyzer
user_guide/model_management
user_guide/custom_operations
user_guide/decoupled_models
user_guide/response_cache
user_guide/metrics
user_guide/trace
user_guide/jetson
user_guide/v1_to_v2
customization_guide/deploy
```
```{toctree}
:maxdepth: 1
:caption: Debugging
user_guide/debugging_guide
user_guide/faq
```
```{toctree}
:maxdepth: 1
:caption: Protocol Guides
protocol/README
customization_guide/inference_protocols
protocol/extension_binary_data
protocol/extension_classification
protocol/extension_generate
protocol/extension_logging
protocol/extension_model_configuration
protocol/extension_model_repository
protocol/extension_schedule_policy
protocol/extension_sequence
protocol/extension_shared_memory
protocol/extension_statistics
protocol/extension_trace
protocol/extension_parameters
```
```{toctree}
:maxdepth: 1
:caption: Customization Guide
customization_guide/build
customization_guide/compose
customization_guide/test
```
```{toctree}
:maxdepth: 1
:caption: Examples
examples/jetson/README
examples/jetson/concurrency_and_dynamic_batching/README
```
```{toctree}
:maxdepth: 1
:caption: Client
client/README
_reference/tritonclient_api.rst
client/src/java/README
client/src/grpc_generated/go/README
client/src/grpc_generated/javascript/README
client/src/grpc_generated/java/README
```
```{toctree}
:maxdepth: 1
:caption: Performance Analyzer
perf_analyzer/README
perf_analyzer/docs/README
perf_analyzer/docs/install
perf_analyzer/docs/quick_start
perf_analyzer/docs/cli
perf_analyzer/docs/inference_load_modes
perf_analyzer/docs/input_data
perf_analyzer/docs/measurements_metrics
perf_analyzer/docs/benchmarking
perf_analyzer/genai-perf/README
perf_analyzer/genai-perf/docs/compare
perf_analyzer/genai-perf/docs/embeddings
perf_analyzer/genai-perf/docs/files
perf_analyzer/genai-perf/docs/lora
perf_analyzer/genai-perf/docs/multi_modal
perf_analyzer/genai-perf/docs/rankings
perf_analyzer/genai-perf/docs/tutorial
perf_analyzer/genai-perf/examples/tutorial
```
```{toctree}
:maxdepth: 1
:caption: Python Backend
python_backend/README
python_backend/inferentia/README
python_backend/examples/auto_complete/README
python_backend/examples/bls/README
python_backend/examples/bls_decoupled/README
python_backend/examples/custom_metrics/README
python_backend/examples/decoupled/README
python_backend/examples/instance_kind/README
python_backend/examples/jax/README
python_backend/examples/preprocessing/README
```
|
triton-inference-serverREPO_NAMEserverPATH_START.@server_extracted@server-main@docs@contents.md@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "CosmoLike/cocoa",
"repo_path": "cocoa_extracted/cocoa-main/Cocoa/projects/README.md",
"type": "Markdown"
}
|
# Table of contents
1. [The Projects Folder](#appendix_projects_folder)
2. [Adapting the COCOA_LSST_Y1 repository to a new project](#appendix_lsst_y1_new)
1. [Minor changes: the easy way](#appendix_lsst_y1_new_small)
2. [Minor changes: the hard way](#appendix_lsst_y1_new_small2)
3. [Major changes](#appendix_lsst_y1_new_major)
# The Projects Folder <a name="appendix_projects_folder"></a>
The `projects` folder includes all the projects linked to Cosmolike; they can also help organize general investigations even if they don't use Cosmolike directly.
Projects should be hosted on independent GitHub repositories; our convention is to name the repository cocoa_XXX, where XXX is the intended project name. Projects that utilize Cosmolike need to have more or less the following structure, taken from a [LSST_Y1 project](https://github.com/CosmoLike/cocoa_lsst_y1)
+-- cocoa_lsst_y1
| +-- likelihood
| | +-- _cosmolike_prototype_base.py
| | +-- lsst_3x2pt.py
| | +-- lsst_3x2pt.yaml
| | +-- lsst_2x2pt.py
| | +-- lsst_2x2pt.yaml
| | +-- lsst_clustering.py
| | +-- lsst_clustering.yaml
| | +-- lsst_cosmic_shear.py
| | +-- lsst_cosmic_shear.yaml
| +-- scripts
| | +-- compile_lsst_y1
| | +-- start_lsst_y1
| | +-- stop_lsst_y1
| +-- data
| | +-- LSST_Y1.dataset
| | +-- datavector.txt
| | +-- covariance.txt
| | +-- nzlens.txt
| | +-- nzsource.txt
| | +-- mask.mask
| +-- interface
| | +-- MakefileCosmolike
| | +-- cosmolike_lsst_y1_interface.py
| | +-- interface.cpp
| | +-- interface.hpp
| +-- chains
| | +-- README
| +-- EXAMPLE_EVALUATE_1.YAML
| +-- EXAMPLE_MCMC_1.YAML
# Adapting the COCOA_LSST_Y1 repository to a new project <a name="appendix_lsst_y1_new"></a>
Adapting the LSST_Y1 folder to construct a new project involves many small core changes and a few major ones. They are tedious but straightforward. The easier way to apply the minor core changes to the code is via the bash script *transfer_project.sh*.
## Minor changes: the easy way <a name="appendix_lsst_y1_new_small"></a>
To properly use the bash script *transfer_project.sh*., users must set the following variables at the beginning of the file:
OLD_PROJECT="lsst_y1"
OLD_SURVEY="LSST"
NEW_PROJECT="des_y3"
NEW_SURVEY="DES"
After that, just type
$(cocoa)(.local) bash transfer_project.sh
## Minor changes: the hard way <a name="appendix_lsst_y1_new_small2"></a>
### Create the new project
**Step 1:** Choose a project name (e.g., project XXX), and copy the `LSST_Y1` project using the command below
$(cocoa)(.local) cp $ROOTDIR/projects/lsst_y1/ $ROOTDIR/projects/xxx
**Step 2:** Remove the git repository associated with LSST_Y1 project
$(cocoa)(.local) rm -rf $ROOTDIR/projects/$NEW_PROJECT/.git/
### Changes in the interface folder
**Step 1:** Change the file `$ROOTDIR/projects/XXX/interface/MakefileCosmolike` following the instructions below
(...)
CSOURCES += \
(...)
${ROOTDIR}/external_modules/code/cosmolike/pt_cfastpt.c \
// add additional files from /external_modules/code/cosmolike that is needed
(...)
OBJECTC += \
(...)
./pt_cfastpt.o \
// add additional files from /external_modules/code/cosmolike that is needed
(...)
all: shared
// change cosmolike_lsst_y1_interface.so to cosmolike_XXX_interface.so in the line below
shared: cosmolike_lsst_y1_interface.so
(...)
// change cosmolike_lsst_y1_interface.so to cosmolike_XXX_interface.so in the line below
cosmolike_lsst_y1_interface.so: $(OBJECTC) $(CSOURCES) interface.cpp
$(CXX) $(CXXFLAGS) -DCOBAYA_SAMPLER -shared -fPIC -o $@ $(OBJECTC) interface.cpp $(LDFLAGS)
@rm *.o
(...)
// change cosmolike_lsst_y1_interface.so to cosmolike_XXX_interface.so in the line below
clean:
@rm -rf cosmolike_lsst_y1_interface.so cosmolike_lsst_y1_interface.so.dSYM *.o
**Step 2:** Change the name of the File `$ROOTDIR/projects/XXX/interface/cosmolike_lsst_y1_interface.py` using the command below
$(cocoa)(.local) mv $ROOTDIR/projects/XXX/interface/cosmolike_lsst_y1_interface.py $ROOTDIR/projects/XXX/interface/cosmolike_XXX_interface.py
**Step 3** Changes in the newly created file `$ROOTDIR/projects/XXX/interface/cosmolike_XXX_interface.py`
def __bootstrap__():
(...)
// change cosmolike_lsst_y1_interface.so to cosmolike_XXX_interface.so in the line below
__file__ = pkg_resources.resource_filename(__name__,'cosmolike_lsst_y1_interface.so')
**Step 4** Change the file `$ROOTDIR/projects/XXX/interface/interface.cpp` following the instructions below
(...)
// change cosmolike_lsst_y1_interface to cosmolike_XXX_interface in the line below
PYBIND11_MODULE(cosmolike_lsst_y1_interface, m)
{
// change the description below
m.doc() = "CosmoLike Interface for LSST_Y1 3x2pt Module";
(...)
}
### Changes in the script folder
**Step 1:** Change the name of the file `$ROOTDIR/projects/XXX/scripts/compile_lsst_y1` using the command below
$(cocoa)(.local) mv $ROOTDIR/projects/XXX/scripts/compile_lsst_y1 $ROOTDIR/projects/XXX/scripts/compile_XXX
**Step 2:** Change the name of the file `$ROOTDIR/projects/XXX/scripts/start_lsst_y1` using the command below
$(cocoa)(.local) mv $ROOTDIR/projects/XXX/scripts/start_lsst_y1 $ROOTDIR/projects/XXX/scripts/start_XXX
**Step 3:** Change the name of the file `$ROOTDIR/projects/XXX/scripts/stop_lsst_y1` using the command below
$(cocoa)(.local) mv $ROOTDIR/projects/XXX/scripts/stop_lsst_y1 $ROOTDIR/projects/XXX/scripts/stop_XXX
**Step 4:** Change the file `$ROOTDIR/projects/XXX/scripts/compile_lsst_y1` following the instructions below
(...)
// change $ROOTDIR/projects/lsst_y1/interface to $ROOTDIR/projects/XXX/interface in the line below
cd $ROOTDIR/projects/lsst_y1/interface
**Step 5:** Change the file `$ROOTDIR/projects/XXX/scripts/start_lsst_y1` following the instructions below
(...)
// change $ROOTDIR/projects/lsst_y1/interface to $ROOTDIR/projects/XXX/interface in the line below
addvar LD_LIBRARY_PATH $ROOTDIR/projects/lsst_y1/interface
// change $ROOTDIR/projects/lsst_y1/interface to $ROOTDIR/projects/XXX/interface in the line below
addvar PYTHONPATH $ROOTDIR/projects/lsst_y1/interface
### Changes in the likelihood folder
**Step 1:** Change the file `$ROOTDIR/projects/XXX/likelihood/_cosmolike_prototype_base.py` following the instructions below
(...)
// change cosmolike_lsst_y1_interface to cosmolike_XXX_interface in the line below
import cosmolike_lsst_y1_interface as ci
(...)
def set_source_related(self, **params_values):
ci.set_nuisance_shear_calib(
M = [
params_values.get(p, None) for p in [
// change LSST_ to the name of the survey associated w/ XXX)
"LSST_M"+str(i+1) for i in range(self.source_ntomo)
]
]
)
ci.set_nuisance_shear_photoz(
bias = [
params_values.get(p, None) for p in [
// change LSST_ to the name of the survey associated w/ XXX)
"LSST_DZ_S"+str(i+1) for i in range(self.source_ntomo)
]
]
)
ci.set_nuisance_ia(
A1 = [
params_values.get(p, None) for p in [
// change LSST_ to the name of the survey associated w/ XXX)
"LSST_A1_"+str(i+1) for i in range(self.source_ntomo)
]
],
A2 = [
params_values.get(p, None) for p in [
// change LSST_ to the name of the survey associated w/ XXX)
"LSST_A2_"+str(i+1) for i in range(self.source_ntomo)
]
],
B_TA = [
params_values.get(p, None) for p in [
// change LSST_ to the name of the survey associated w/ XXX)
"LSST_BTA_"+str(i+1) for i in range(self.source_ntomo)
]
],
)
(...)
def set_lens_related(self, **params_values):
ci.set_nuisance_bias(
B1 = [
params_values.get(p, None) for p in [
// change DES_ to the name of the survey associated w/ XXX)
"LSST_B1_"+str(i+1) for i in range(self.lens_ntomo)
]
],
B2 = [
params_values.get(p, None) for p in [
// change DES_ to the name of the survey associated w/ XXX)
"LSST_B2_"+str(i+1) for i in range(self.lens_ntomo)
]
],
B_MAG = [
params_values.get(p, None) for p in [
// change DES_ to the name of the survey associated w/ XXX)
"LSST_BMAG_"+str(i+1) for i in range(self.lens_ntomo)
]
]
)
ci.set_nuisance_clustering_photoz(
bias = [
params_values.get(p, None) for p in [
// change DES_ to the name of the survey associated w/ XXX)
"LSST_DZ_L"+str(i+1) for i in range(self.lens_ntomo)
]
]
)
ci.set_point_mass(
PMV = [
params_values.get(p, None) for p in [
// change DES_ to the name of the survey associated w/ XXX)
"LSST_PM"+str(i+1) for i in range(self.lens_ntomo)
]
]
)
(...)
def set_baryon_related(self, **params_values):
// change LSST_ to the name of the survey associated w/ XXX)
self.baryon_pcs_qs[0] = params_values.get("LSST_BARYON_Q1", None)
self.baryon_pcs_qs[1] = params_values.get("LSST_BARYON_Q2", None)
self.baryon_pcs_qs[2] = params_values.get("LSST_BARYON_Q3", None)
self.baryon_pcs_qs[3] = params_values.get("LSST_BARYON_Q4", None)
If the project name `XXX` contains more than the experiment name, we suggest replacing `LSST_` with just the experiment name. For example, if `XXX = DES_Y3`, then adopt `DES_DZ_L1` for the name of the redshift shift on lens bin 1. The convention adopted must be followed when changing the files `params_des_cosmic_shear.yaml` and `params_des_3x2pt.yaml`.
**Step 2:** Change the file `$ROOTDIR/projects/XXX/likelihood/lsst_3x2pt.py` following the instructions below
// change lsst_y1 to XXX in the line below
from cobaya.likelihoods.lsst_y1._cosmolike_prototype_base import _cosmolike_prototype_base
// change cosmolike_lsst_y1_interface to cosmolike_XXX_interface in the line below
import cosmolike_lsst_y1_interface as ci
**Step 3:** Change the file `$ROOTDIR/projects/XXX/likelihood/lsst_3x2pt.yaml` following the instructions below
(...)
// change LSST_Y1.dataset to XXX.dataset in the line below (adopted convention: .dataset file name = project name all in CAPS)
data_file: LSST_Y1.dataset
(...)
// change params_lsst_3x2pt to params_XXX_3x2pt in the line below
params: !defaults [params_lsst_3x2pt]
**Step 4:** Rename the file `params_lsst_3x2pt.yaml` to `params_XXX_3x2pt.yaml`. Also, rename the associated parameter names,
replacing the `LSST_` prefix as shown below.
XXX_DZ_S1:
prior:
dist: norm
loc: 0.0
scale: 0.005
ref:
dist: norm
loc: 0.0
scale: 0.005
proposal: 0.005
latex: \Delta z_\mathrm{s, XXX}^1
Similar changes must be made in `params_XXX_cosmic_shear.yaml`. Note that changes either in the number of lenses or source bins will demand the introduction of new parameters in
`params_XXX_cosmic_shear.yaml` and `params_XXX_3x2pt.yaml`
### Changes in the data folder
**Step 1:** Rename the `.dataset` file. Our adopted convention is: `.dataset` file name = project name capitalized
$(cocoa)(.local) cp $ROOTDIR/projects/LSST_Y1/data/LSST_Y1.dataset $ROOTDIR/projects/XXX/data/XXX.dataset
**Step 2:** Update `XXX.dataset` file with the names of the new data vector, covariance, n(z), binning, mask...
data_file = XXX_nonlim
cov_file = cov_XXX
mask_file = 3x2pt_baseline.mask
nz_lens_file = lens_XXX.nz
nz_source_file = source_XXX.nz
lensing_overlap_cut = 0.0
lens_ntomo = 5
source_ntomo = 5
n_theta = 26
IA_model = 4
theta_min_arcmin = 2.5
theta_max_arcmin = 900.
#baryon_pca_file = pca.txt
## Major changes: <a name="appendix_lsst_y1_new_major"></a>
* Computation of a new covariance matrix using either [CosmoCov](https://github.com/CosmoLike/CosmoCov) or [CosmoCovFourier](https://github.com/CosmoLike/CosmoCov_Fourier)
* Simulation of new `n(z)` for lenses and sources
* Updates to the Cosmolike C++ interface so the appropriate routines can be called from the Python likelihood
* Updates to the Cosmolike Python likelihoods and their associated Yaml files. These include, for example, `/likelihood/lsst_3x2pt.py` and `/likelihood/lsst_3x2pt.yaml`
|
CosmoLikeREPO_NAMEcocoaPATH_START.@cocoa_extracted@cocoa-main@Cocoa@projects@README.md@.PATH_END.py
|
{
"filename": "makesky.py",
"repo_name": "mdwarfgeek/tres-tools",
"repo_path": "tres-tools_extracted/tres-tools-master/makesky.py",
"type": "Python"
}
|
import numpy
from poly import *
def makesky(wave, flux, deg):
# x is normalised [-1, 1] wavelength.
wavemin = wave[0]
wavemax = wave[-1]
x = (2*wave - (wavemin + wavemax)) / (wavemax - wavemin)
# Remove NaNs.
ww = numpy.isfinite(flux)
# Legendre fit.
coef = cliplegfit(x[ww], flux[ww], deg)
return numpy.polynomial.legendre.legval(x, coef)
|
mdwarfgeekREPO_NAMEtres-toolsPATH_START.@tres-tools_extracted@tres-tools-master@makesky.py@.PATH_END.py
|
{
"filename": "overview.md",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/lite/g3doc/examples/auto_complete/overview.md",
"type": "Markdown"
}
|
# Auto Complete
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/lite/examples/auto_complete/overview"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.sandbox.google.com/github/tensorflow/codelabs/blob/main/KerasNLP/io2023_workshop.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
</table>
## Introduction
Large language models (LLMs) are a class of machine learning models that are
trained to generate text based on large datasets. They can be used for natural
language processing (NLP) tasks, including text generation, question answering,
and machine translation. They are based on Transformer architecture and are
trained on massive amounts of text data, often involving billions of words. Even
LLMs of a smaller scale, such as GPT-2, can perform impressively. Converting
TensorFlow models to a lighter, faster, and low-power model allows for us to run
generative AI models on-device, with benefits of better user security because
data will never leave your device.
This runbook shows you how to build an Android app with TensorFlow Lite to run a
Keras LLM and provides suggestions for model optimization using quantizing
techniques, which otherwise would require a much larger amount of memory and
greater computational power to run.
We have open sourced our
[Android app framework](https://github.com/tensorflow/examples/tree/master/lite/examples/generative_ai/)
that any compatible TFLite LLMs can plug into. Here are two demos:
* In Figure 1, we used a Keras GPT-2 model to perform text completion tasks on
device.
* In Figure 2, we converted a version of instruction-tuned
[PaLM model](https://ai.googleblog.com/2022/04/pathways-language-model-palm-scaling-to.html)
(1.5 billion parameters) to TFLite and executed through TFLite runtime.
<center>
{: width="400px"}
<figcaption><b>Figure 1: </b>Example of running the Keras GPT-2 model (converted
from this [Codelab](https://codelabs.developers.google.com/kerasnlp-tflite)) on
device to perform text completion on Pixel 7. Demo shows the real latency with
no speedup.</figcaption>
</center>
<center>
{: width="400px"}
</p>
<figcaption><b>Figure 2: </b>Example of running a version of
[PaLM model](https://ai.googleblog.com/2022/04/pathways-language-model-palm-scaling-to.html)
with 1.5 billion parameters. Demo is recorded on Pixel 7 Pro without playback
speedup.</figcaption>
</center>
## Guides
### Model authoring
For this demonstration, we will use KerasNLP to get the GPT-2 model. KerasNLP is
a library that contains state-of-the-art pretrained models for natural language
processing tasks, and can support users through their entire development cycle.
You can see the list of models available in the
[KerasNLP repository](https://keras.io/api/keras_hub/models/). The workflows are
built from modular components that have state-of-the-art preset weights and
architectures when used out-of-the-box and are easily customizable when more
control is needed. Creating the GPT-2 model can be done with the following
steps:
```python
gpt2_tokenizer = keras_nlp.models.GPT2Tokenizer.from_preset("gpt2_base_en")
gpt2_preprocessor = keras_nlp.models.GPT2CausalLMPreprocessor.from_preset(
"gpt2_base_en",
sequence_length=256,
add_end_token=True,
)
gpt2_lm =
keras_nlp.models.GPT2CausalLM.from_preset(
"gpt2_base_en",
preprocessor=gpt2_preprocessor
)
```
One commonality among these three lines of code is the `from_preset()` method,
which will instantiate the part of Keras API from a preset architecture and/or
weights, therefore loading the pre-trained model. From this code snippet, you’ll
also notice three modular components:
1. **Tokenizer**: converts a raw string input into integer token IDs suitable
for a Keras Embedding layer. GPT-2 uses the byte-pair encoding (BPE)
tokenizer specifically.
2. **Preprocessor**: layer for tokenizing and packing inputs to be fed into a
Keras model. Here, the preprocessor will pad the tensor of token IDs to a
specified length (256) after tokenization.
3. **Backbone**: Keras model that follows the SoTA transformer backbone
architecture and has the preset weights.
Additionally, you can check out the full GPT-2 model implementation on
[GitHub](https://github.com/keras-team/keras-nlp/tree/master/keras_nlp/models/gpt2).
### Model conversion
TensorFlow Lite is a mobile library for deploying methods on mobile,
microcontrollers, and other edge devices. The first step is to convert a Keras
model to a more compact TensorFlow Lite format using the TensorFlow Lite
**converter**, and then use the TensorFlow Lite **interpreter**, which is highly
optimized for mobile devices, to run the converted model.
<img src="https://www.tensorflow.org/lite/examples/auto_complete/images/tflite_workflow.png" class="attempt-right" />
Start with the `generate()` function from `GPT2CausalLM` that performs the
conversion. Wrap the `generate()` function to create a concrete TensorFlow
function:
```python
@tf.function
def generate(prompt, max_length):
"""
Args:
prompt: input prompt to the LLM in string format
max_length: the max length of the generated tokens
"""
return gpt2_lm.generate(prompt, max_length)
concrete_func = generate.get_concrete_function(tf.TensorSpec([], tf.string), 100)
```
Note that you can also use `from_keras_model()` from
[`TFLiteConverter`](https://www.tensorflow.org/api_docs/python/tf/lite/TFLiteConverter#from_keras_model)
in order to perform the conversion.
Now define a helper function that will run inference with an input and a TFLite
model. TensorFlow text ops are not built-in ops in the TFLite runtime, so you
will need to add these custom ops in order for the interpreter to make inference
on this model. This helper function accepts an input and a function that
performs the conversion, namely the `generator()` function defined above.
```python
def run_inference(input, generate_tflite):
interp = interpreter.InterpreterWithCustomOps(
model_content=generate_tflite,
custom_op_registerers=
tf_text.tflite_registrar.SELECT_TFTEXT_OPS
)
interp.get_signature_list()
generator = interp.get_signature_runner('serving_default')
output = generator(prompt=np.array([input]))
```
You can convert the model now:
```python
gpt2_lm.jit_compile = False
converter = tf.lite.TFLiteConverter.from_concrete_functions(
[concrete_func],
gpt2_lm)
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS, # enable TFLite ops
tf.lite.OpsSet.SELECT_TF_OPS, # enable TF ops
]
converter.allow_custom_ops = True
converter.target_spec.experimental_select_user_tf_ops = [
"UnsortedSegmentJoin",
"UpperBound"
]
converter._experimental_guarantee_all_funcs_one_use = True
generate_tflite = converter.convert()
run_inference("I'm enjoying a", generate_tflite)
```
### Quantization
TensorFlow Lite has implemented an optimization technique called
**quantization** which can reduce model size and accelerate inference. Through
the quantization process, 32-bit floats are mapped to smaller 8-bit integers,
therefore reducing the model size by a factor of 4 for more efficient execution
on modern hardwares. There are several ways to do quantization in TensorFlow.
You can visit the
[TFLite Model optimization](https://www.tensorflow.org/lite/performance/model_optimization)
and
[TensorFlow Model Optimization Toolkit](https://www.tensorflow.org/model_optimization)
pages for more information. The types of quantizations are explained briefly
below.
Here, you will use the
[post-training dynamic range quantization](https://www.tensorflow.org/lite/performance/post_training_quant)
on the GPT-2 model by setting the converter optimization flag to
`tf.lite.Optimize.DEFAULT`, and the rest of the conversion process is the same
as detailed before. We tested that with this quantization technique the latency
is around 6.7 seconds on Pixel 7 with max output length set to 100.
```python
gpt2_lm.jit_compile = False
converter = tf.lite.TFLiteConverter.from_concrete_functions(
[concrete_func],
gpt2_lm)
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS, # enable TFLite ops
tf.lite.OpsSet.SELECT_TF_OPS, # enable TF ops
]
converter.allow_custom_ops = True
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.experimental_select_user_tf_ops = [
"UnsortedSegmentJoin",
"UpperBound"
]
converter._experimental_guarantee_all_funcs_one_use = True
quant_generate_tflite = converter.convert()
run_inference("I'm enjoying a", quant_generate_tflite)
```
**Dynamic Range**
Dynamic range quantization is the recommended starting point for optimizing
on-device models. It can achieve about a 4x reduction in the model size, and is
a recommended starting point as it provides reduced memory usage and faster
computation without you having to provide a representative dataset for
calibration. This type of quantization statically quantizes only the weights
from floating point to 8-bit integer at conversion time.
**FP16**
Floating point models can also be optimized by quantizing the weights to float16
type. The advantages of
[float16 quantization](https://www.tensorflow.org/lite/performance/post_training_float16_quant)
are reducing the model size by up to half (as all weights become half their
size), causing minimal loss in accuracy, and supporting GPU delegates that can
operate directly on float16 data (which results in faster computation than on
float32 data). A model converted to float16 weights can still run on the CPU
without additional modifications. The float16 weights are upsampled to float32
before the first inference, which permits a reduction in model size in exchange
for a minimal impact to latency and accuracy.
**Full Integer Quantization**
[Full integer quantization](https://www.tensorflow.org/lite/performance/post_training_integer_quant)
both converts the 32 bit floating point numbers, including weights and
activations, to the nearest 8 bit integers. This type of quantization results in
a smaller model with increased inference speed, which is incredibly valuable
when using microcontrollers. This mode is recommended when activations are
sensitive to the quantization.
### Android App integration
You can follow this
[Android example](https://github.com/tensorflow/examples/tree/master/lite/examples/generative_ai)
to integrate your TFLite model into an Android App.
### Prerequisites
If you have not already, install
[Android Studio](https://developer.android.com/studio/index.html), following the
instructions on the website.
* Android Studio 2022.2.1 or above.
* An Android device or Android emulator with more than 4G memory
### Building and Running with Android Studio
* Open Android Studio, and from the Welcome screen, select **Open an existing
Android Studio project**.
* From the Open File or Project window that appears, navigate to and select
the
[`lite/examples/generative_ai/android`](https://github.com/tensorflow/examples/tree/master/lite/examples/generative_ai/android)
directory from wherever you cloned the TensorFlow Lite sample GitHub repo.
* You may also need to install various platforms and tools according to error
messages.
* Rename the converted .tflite model to `autocomplete.tflite` and copy it into
`app/src/main/assets/` folder.
* Select menu **Build -> Make Project** to build the app. (Ctrl+F9, depending
on your version).
* Click menu **Run -> Run 'app'**. (Shift+F10, depending on your version)
Alternatively, you can also use the
[gradle wrapper](https://docs.gradle.org/current/userguide/gradle_wrapper.html#gradle_wrapper)
to build it in the command line. Please refer to the
[Gradle documentation](https://docs.gradle.org/current/userguide/command_line_interface.html)
for more information.
### (Optional) Building the .aar file
By default the app automatically downloads the needed `.aar` files. But if you
want to build your own, switch to `app/libs/build_aar/` folder run
`./build_aar.sh`. This script will pull in the necessary ops from TensorFlow
Text and build the aar for Select TF operators.
After compilation, a new file `tftext_tflite_flex.aar` is generated. Replace the
.aar file in `app/libs/` folder and re-build the app.
Note that you still need to include the standard `tensorflow-lite` aar in your
gradle file.
### Context window size
<img src="https://www.tensorflow.org/lite/examples/auto_complete/images/context_window.png" class="attempt-right" />
The app has a changeable parameter ‘context window size’, which is needed
because LLMs today generally have a fixed context size which limits how many
words/tokens can be fed into the model as ‘prompt’ (note that ‘word’ is not
necessarily equivalent to ‘token’ in this case, due to different tokenization
methods). This number is important because:
* Setting it too small, the model will not have enough context to generate
meaningful output
* Setting it too big, the model will not have enough room to work with (since
the output sequence is inclusive of the prompt)
You can experiment with it, but setting it to ~50% of output sequence length is
a good start.
## Safety and Responsible AI
As noted in the original
[OpenAI GPT-2 announcement](https://openai.com/research/better-language-models),
there are
[notable caveats and limitations](https://github.com/openai/gpt-2#some-caveats)
with the GPT-2 model. In fact, LLMs today generally have some well-known
challenges such as hallucinations, fairness, and bias; this is because these
models are trained on real-world data, which make them reflect real world
issues.
This codelab is created only to demonstrate how to create an app powered by LLMs
with TensorFlow tooling. The model produced in this codelab is for educational
purposes only and not intended for production usage.
LLM production usage requires thoughtful selection of training datasets and
comprehensive safety mitigations. One such functionality offered in this Android
app is the profanity filter, which rejects bad user inputs or model outputs. If
any inappropriate language is detected, the app will in return reject that
action. To learn more about Responsible AI in the context of LLMs, make sure to
watch the Safe and Responsible Development with Generative Language Models
technical session at Google I/O 2023 and check out the
[Responsible AI Toolkit](https://www.tensorflow.org/responsible_ai).
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@lite@g3doc@examples@auto_complete@overview.md@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "AstroVPK/kali",
"repo_path": "kali_extracted/kali-master/python/__init__.py",
"type": "Python"
}
|
AstroVPKREPO_NAMEkaliPATH_START.@kali_extracted@kali-master@python@__init__.py@.PATH_END.py
|
|
{
"filename": "_twodim_base_impl.py",
"repo_name": "numpy/numpy",
"repo_path": "numpy_extracted/numpy-main/numpy/lib/_twodim_base_impl.py",
"type": "Python"
}
|
""" Basic functions for manipulating 2d arrays
"""
import functools
import operator
from numpy._core._multiarray_umath import _array_converter
from numpy._core.numeric import (
asanyarray, arange, zeros, greater_equal, multiply, ones,
asarray, where, int8, int16, int32, int64, intp, empty, promote_types,
diagonal, nonzero, indices
)
from numpy._core.overrides import finalize_array_function_like, set_module
from numpy._core import overrides
from numpy._core import iinfo
from numpy.lib._stride_tricks_impl import broadcast_to
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def _flip_dispatcher(m):
return (m,)
@array_function_dispatch(_flip_dispatcher)
def fliplr(m):
"""
Reverse the order of elements along axis 1 (left/right).
For a 2-D array, this flips the entries in each row in the left/right
direction. Columns are preserved, but appear in a different order than
before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
flip : Flip array in one or more dimensions.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``m[:,::-1]`` or ``np.flip(m, axis=1)``.
Requires the array to be at least 2-D.
Examples
--------
>>> import numpy as np
>>> A = np.diag([1.,2.,3.])
>>> A
array([[1., 0., 0.],
[0., 2., 0.],
[0., 0., 3.]])
>>> np.fliplr(A)
array([[0., 0., 1.],
[0., 2., 0.],
[3., 0., 0.]])
>>> rng = np.random.default_rng()
>>> A = rng.normal(size=(2,3,5))
>>> np.all(np.fliplr(A) == A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
@array_function_dispatch(_flip_dispatcher)
def flipud(m):
"""
Reverse the order of elements along axis 0 (up/down).
For a 2-D array, this flips the entries in each column in the up/down
direction. Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
flip : Flip array in one or more dimensions.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``m[::-1, ...]`` or ``np.flip(m, axis=0)``.
Requires the array to be at least 1-D.
Examples
--------
>>> import numpy as np
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[1., 0., 0.],
[0., 2., 0.],
[0., 0., 3.]])
>>> np.flipud(A)
array([[0., 0., 3.],
[0., 2., 0.],
[1., 0., 0.]])
>>> rng = np.random.default_rng()
>>> A = rng.normal(size=(2,3,5))
>>> np.all(np.flipud(A) == A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
@finalize_array_function_like
@set_module('numpy')
def eye(N, M=None, k=0, dtype=float, order='C', *, device=None, like=None):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
order : {'C', 'F'}, optional
Whether the output should be stored in row-major (C-style) or
column-major (Fortran-style) order in memory.
device : str, optional
The device on which to place the created array. Default: None.
For Array-API interoperability only, so must be ``"cpu"`` if passed.
.. versionadded:: 2.0.0
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> import numpy as np
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[0., 1., 0.],
[0., 0., 1.],
[0., 0., 0.]])
"""
if like is not None:
return _eye_with_like(
like, N, M=M, k=k, dtype=dtype, order=order, device=device
)
if M is None:
M = N
m = zeros((N, M), dtype=dtype, order=order, device=device)
if k >= M:
return m
# Ensure M and k are integers, so we don't get any surprise casting
# results in the expressions `M-k` and `M+1` used below. This avoids
# a problem with inputs with type (for example) np.uint64.
M = operator.index(M)
k = operator.index(k)
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
_eye_with_like = array_function_dispatch()(eye)
def _diag_dispatcher(v, k=None):
return (v,)
@array_function_dispatch(_diag_dispatcher)
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> import numpy as np
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asanyarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
@array_function_dispatch(_diag_dispatcher)
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> import numpy as np
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
conv = _array_converter(v)
v, = conv.as_arrays(subok=False)
v = v.ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k, dtype=intp)
fi = i+k+i*n
else:
i = arange(0, n+k, dtype=intp)
fi = i+(i-k)*n
res.flat[fi] = v
return conv.wrap(res)
@finalize_array_function_like
@set_module('numpy')
def tri(N, M=None, k=0, dtype=float, *, like=None):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``j <= i + k``, 0 otherwise.
Examples
--------
>>> import numpy as np
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0.],
[1., 1., 0., 0., 0.]])
"""
if like is not None:
return _tri_with_like(like, N, M=M, k=k, dtype=dtype)
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
_tri_with_like = array_function_dispatch()(tri)
def _trilu_dispatcher(m, k=None):
return (m,)
@array_function_dispatch(_trilu_dispatcher)
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
For arrays with ``ndim`` exceeding 2, `tril` will apply to the final two
axes.
Parameters
----------
m : array_like, shape (..., M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (..., M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> import numpy as np
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
>>> np.tril(np.arange(3*4*5).reshape(3, 4, 5))
array([[[ 0, 0, 0, 0, 0],
[ 5, 6, 0, 0, 0],
[10, 11, 12, 0, 0],
[15, 16, 17, 18, 0]],
[[20, 0, 0, 0, 0],
[25, 26, 0, 0, 0],
[30, 31, 32, 0, 0],
[35, 36, 37, 38, 0]],
[[40, 0, 0, 0, 0],
[45, 46, 0, 0, 0],
[50, 51, 52, 0, 0],
[55, 56, 57, 58, 0]]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
@array_function_dispatch(_trilu_dispatcher)
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of an array with the elements below the `k`-th diagonal
zeroed. For arrays with ``ndim`` exceeding 2, `triu` will apply to the
final two axes.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> import numpy as np
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
>>> np.triu(np.arange(3*4*5).reshape(3, 4, 5))
array([[[ 0, 1, 2, 3, 4],
[ 0, 6, 7, 8, 9],
[ 0, 0, 12, 13, 14],
[ 0, 0, 0, 18, 19]],
[[20, 21, 22, 23, 24],
[ 0, 26, 27, 28, 29],
[ 0, 0, 32, 33, 34],
[ 0, 0, 0, 38, 39]],
[[40, 41, 42, 43, 44],
[ 0, 46, 47, 48, 49],
[ 0, 0, 52, 53, 54],
[ 0, 0, 0, 58, 59]]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
def _vander_dispatcher(x, N=None, increasing=None):
return (x,)
# Originally borrowed from John Hunter and matplotlib
@array_function_dispatch(_vander_dispatcher)
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> import numpy as np
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043 # may vary
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def _histogram2d_dispatcher(x, y, bins=None, range=None, density=None,
weights=None):
yield x
yield y
# This terrible logic is adapted from the checks in histogram2d
try:
N = len(bins)
except TypeError:
N = 1
if N == 2:
yield from bins # bins=[x, y]
else:
yield bins
yield weights
@array_function_dispatch(_histogram2d_dispatcher)
def histogram2d(x, y, bins=10, range=None, density=None, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or array_like or [int, int] or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
density : bool, optional
If False, the default, returns the number of samples in each bin.
If True, returns the probability *density* function at the bin,
``bin_count / sample_count / bin_area``.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `density` is True. If `density` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx+1,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny+1,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `density` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> import numpy as np
>>> from matplotlib.image import NonUniformImage
>>> import matplotlib.pyplot as plt
Construct a 2-D histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(2, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges))
>>> # Histogram does not follow Cartesian convention (see Notes),
>>> # therefore transpose H for visualization purposes.
>>> H = H.T
:func:`imshow <matplotlib.pyplot.imshow>` can only display square bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131, title='imshow: square bins')
>>> plt.imshow(H, interpolation='nearest', origin='lower',
... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
<matplotlib.image.AxesImage object at 0x...>
:func:`pcolormesh <matplotlib.pyplot.pcolormesh>` can display actual edges:
>>> ax = fig.add_subplot(132, title='pcolormesh: actual edges',
... aspect='equal')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
<matplotlib.collections.QuadMesh object at 0x...>
:class:`NonUniformImage <matplotlib.image.NonUniformImage>` can be used to
display actual bin edges with interpolation:
>>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated',
... aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]])
>>> im = NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = (xedges[:-1] + xedges[1:]) / 2
>>> ycenters = (yedges[:-1] + yedges[1:]) / 2
>>> im.set_data(xcenters, ycenters, H)
>>> ax.add_image(im)
>>> plt.show()
It is also possible to construct a 2-D histogram without specifying bin
edges:
>>> # Generate non-symmetric test data
>>> n = 10000
>>> x = np.linspace(1, 100, n)
>>> y = 2*np.log(x) + np.random.rand(n) - 0.5
>>> # Compute 2d histogram. Note the order of x/y and xedges/yedges
>>> H, yedges, xedges = np.histogram2d(y, x, bins=20)
Now we can plot the histogram using
:func:`pcolormesh <matplotlib.pyplot.pcolormesh>`, and a
:func:`hexbin <matplotlib.pyplot.hexbin>` for comparison.
>>> # Plot histogram using pcolormesh
>>> fig, (ax1, ax2) = plt.subplots(ncols=2, sharey=True)
>>> ax1.pcolormesh(xedges, yedges, H, cmap='rainbow')
>>> ax1.plot(x, 2*np.log(x), 'k-')
>>> ax1.set_xlim(x.min(), x.max())
>>> ax1.set_ylim(y.min(), y.max())
>>> ax1.set_xlabel('x')
>>> ax1.set_ylabel('y')
>>> ax1.set_title('histogram2d')
>>> ax1.grid()
>>> # Create hexbin plot for comparison
>>> ax2.hexbin(x, y, gridsize=20, cmap='rainbow')
>>> ax2.plot(x, 2*np.log(x), 'k-')
>>> ax2.set_title('hexbin')
>>> ax2.set_xlim(x.min(), x.max())
>>> ax2.set_xlabel('x')
>>> ax2.grid()
>>> plt.show()
"""
from numpy import histogramdd
if len(x) != len(y):
raise ValueError('x and y must have the same length.')
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, density, weights)
return hist, edges[0], edges[1]
@set_module('numpy')
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Examples
--------
>>> import numpy as np
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return nonzero(a != 0)
@set_module('numpy')
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The row and column indices, respectively. The row indices are sorted
in non-decreasing order, and the correspdonding column indices are
strictly increasing for each row.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Examples
--------
>>> import numpy as np
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il1
(array([0, 1, 1, 2, 2, 2, 3, 3, 3, 3]), array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3]))
Note that row indices (first array) are non-decreasing, and the corresponding
column indices (second array) are strictly increasing for each row.
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, ..., 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> il2 = np.tril_indices(4, 2)
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
tri_ = tri(n, m, k=k, dtype=bool)
return tuple(broadcast_to(inds, tri_.shape)[tri_]
for inds in indices(tri_.shape, sparse=True))
def _trilu_indices_form_dispatcher(arr, k=None):
return (arr,)
@array_function_dispatch(_trilu_indices_form_dispatcher)
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
Examples
--------
>>> import numpy as np
Create a 4 by 4 array
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Pass the array to get the indices of the lower triangular elements.
>>> trili = np.tril_indices_from(a)
>>> trili
(array([0, 1, 1, 2, 2, 2, 3, 3, 3, 3]), array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3]))
>>> a[trili]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
This is syntactic sugar for tril_indices().
>>> np.tril_indices(a.shape[0])
(array([0, 1, 1, 2, 2, 2, 3, 3, 3, 3]), array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3]))
Use the `k` parameter to return the indices for the lower triangular array
up to the k-th diagonal.
>>> trili1 = np.tril_indices_from(a, k=1)
>>> a[trili1]
array([ 0, 1, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15])
See Also
--------
tril_indices, tril, triu_indices_from
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
@set_module('numpy')
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The row and column indices, respectively. The row indices are sorted
in non-decreasing order, and the correspdonding column indices are
strictly increasing for each row.
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Examples
--------
>>> import numpy as np
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu1
(array([0, 0, 0, 0, 1, 1, 1, 2, 2, 3]), array([0, 1, 2, 3, 1, 2, 3, 2, 3, 3]))
Note that row indices (first array) are non-decreasing, and the corresponding
column indices (second array) are strictly increasing for each row.
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, ..., 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> iu2 = np.triu_indices(4, 2)
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
tri_ = ~tri(n, m, k=k - 1, dtype=bool)
return tuple(broadcast_to(inds, tri_.shape)[tri_]
for inds in indices(tri_.shape, sparse=True))
@array_function_dispatch(_trilu_indices_form_dispatcher)
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
Examples
--------
>>> import numpy as np
Create a 4 by 4 array
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Pass the array to get the indices of the upper triangular elements.
>>> triui = np.triu_indices_from(a)
>>> triui
(array([0, 0, 0, 0, 1, 1, 1, 2, 2, 3]), array([0, 1, 2, 3, 1, 2, 3, 2, 3, 3]))
>>> a[triui]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
This is syntactic sugar for triu_indices().
>>> np.triu_indices(a.shape[0])
(array([0, 0, 0, 0, 1, 1, 1, 2, 2, 3]), array([0, 1, 2, 3, 1, 2, 3, 2, 3, 3]))
Use the `k` parameter to return the indices for the upper triangular array
from the k-th diagonal.
>>> triuim1 = np.triu_indices_from(a, k=1)
>>> a[triuim1]
array([ 1, 2, 3, 6, 7, 11])
See Also
--------
triu_indices, triu, tril_indices_from
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
|
numpyREPO_NAMEnumpyPATH_START.@numpy_extracted@numpy-main@numpy@lib@_twodim_base_impl.py@.PATH_END.py
|
{
"filename": "eta_phi.py",
"repo_name": "LoganAMorrison/Hazma",
"repo_path": "Hazma_extracted/Hazma-master/hazma/vector_mediator/form_factors/eta_phi.py",
"type": "Python"
}
|
from dataclasses import dataclass
from typing import Union, overload
import numpy as np
import numpy.typing as npt
from hazma import parameters
from .cross_sections import cross_section_x_x_to_p_v
from .utils import ComplexArray, RealArray, breit_wigner_fw
from .widths import width_v_to_v_p
@dataclass(frozen=True)
class FormFactorEtaPhi:
"""
Class for storing the parameters needed to compute the form factor for
V-eta-omega. See arXiv:1911.11147 for details on the default values.
"""
masses: npt.NDArray[np.float64] = np.array([1.67, 2.14])
widths: npt.NDArray[np.float64] = np.array([0.122, 0.0435])
amps: npt.NDArray[np.float64] = np.array([0.175, 0.00409])
phase_factors: npt.NDArray[np.complex128] = np.exp(1j * np.array([0.0, 2.19]))
def __form_factor(
self,
*,
s: npt.NDArray[np.float64],
gvss: float,
):
"""
Compute the V-eta-phi form-factor.
Uses the parameterization from arXiv:1911.11147.
Parameters
----------
s: float or np.ndarray
Square of the center of mass energy(ies) in GeV.
gvss: float
Coupling of vector to strang-quarks.
fit: FormFactorEtaOmegaFit, optional
Fit parameters.
"""
cs = -3.0 * gvss
return (
cs
* np.sum(
self.amps
* self.phase_factors
* breit_wigner_fw(s, self.masses, self.widths, reshape=True),
axis=1,
)
* np.sqrt(s)
)
@overload
def form_factor(self, *, q: float, gvss: float) -> complex:
...
@overload
def form_factor(self, *, q: RealArray, gvss: float) -> ComplexArray:
...
def form_factor(
self, *, q: Union[float, RealArray], gvss: float
) -> Union[complex, ComplexArray]:
"""
Compute the V-eta-phi form factor.
Parameters
----------
q: Union[float,npt.NDArray[np.float64]
Center-of-mass energy in MeV.
Returns
-------
ff: Union[complex,npt.NDArray[np.complex128]]
Form factor from V-eta-phi.
"""
if hasattr(q, "__len__"):
qq = 1e-3 * np.array(q)
else:
qq = 1e-3 * np.array([q])
mask = qq > (parameters.eta_mass + parameters.phi_mass) * 1e-3
ff = np.zeros_like(qq, dtype=np.complex128)
ff[mask] = self.__form_factor(s=qq[mask] ** 2, gvss=gvss)
if len(ff) == 1 and not hasattr(q, "__len__"):
return ff[0]
return ff
def width(self, *, mv, gvss):
ff = self.form_factor(q=mv, gvss=gvss)
mvector = parameters.phi_mass
mscalar = parameters.eta_mass
return width_v_to_v_p(mv, ff, mvector, mscalar)
def cross_section(self, *, cme, mx, mv, gvss, gamv):
ff = self.form_factor(q=cme, gvss=gvss)
mvector = parameters.phi_mass
mscalar = parameters.eta_mass
s = cme**2
return cross_section_x_x_to_p_v(s, mx, mscalar, mvector, ff, mv, gamv)
|
LoganAMorrisonREPO_NAMEHazmaPATH_START.@Hazma_extracted@Hazma-master@hazma@vector_mediator@form_factors@eta_phi.py@.PATH_END.py
|
{
"filename": "_tickformatstops.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/histogram/marker/colorbar/_tickformatstops.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickformatstopsValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(
self,
plotly_name="tickformatstops",
parent_name="histogram.marker.colorbar",
**kwargs,
):
super(TickformatstopsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Tickformatstop"),
data_docs=kwargs.pop(
"data_docs",
"""
dtickrange
range [*min*, *max*], where "min", "max" -
dtick values which describe some zoom level, it
is possible to omit "min" or "max" value by
passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level,
the same as "tickformat"
""",
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@histogram@marker@colorbar@_tickformatstops.py@.PATH_END.py
|
{
"filename": "tableSaxParser.py",
"repo_name": "astropy/astroquery",
"repo_path": "astroquery_extracted/astroquery-main/astroquery/utils/tap/xmlparser/tableSaxParser.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
=============
TAP plus
=============
@author: Juan Carlos Segovia
@contact: juan.carlos.segovia@sciops.esa.int
European Space Astronomy Centre (ESAC)
European Space Agency (ESA)
Created on 30 jun. 2016
"""
import xml.sax
from astroquery.utils.tap.model.tapcolumn import TapColumn
from astroquery.utils.tap.model.taptable import TapTableMeta
from astroquery.utils.tap.xmlparser import utils as Utils
READING_SCHEMA = 10
READING_TABLE = 20
READING_TABLE_COLUMN = 30
class TableSaxParser(xml.sax.ContentHandler):
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
self.__internal_init()
def __internal_init(self):
self.__concatData = False
self.__charBuffer = []
self.__tables = []
self.__status = 0
self.__currentSchemaName = None
self.__currentTable = None
self.__currentColumn = None
def __create_string_from_buffer(self):
return Utils.util_create_string_from_buffer(self.__charBuffer)
def __check_item_id(self, itemId, tmpValue):
if str(itemId).lower() == str(tmpValue).lower():
return True
return False
def __start_reading_data(self):
self.__concatData = True
del self.__charBuffer[:]
def __stop_reading_data(self):
self.__concatData = False
def parseData(self, data):
del self.__tables[:]
self.__status = READING_SCHEMA
xml.sax.parse(data, self)
return self.__tables
def startElement(self, name, attrs):
if self.__status == READING_SCHEMA:
self.__reading_schema(name, attrs)
elif self.__status == READING_TABLE:
self.__reading_table(name, attrs)
elif self.__status == READING_TABLE_COLUMN:
self.__reading_table_column(name, attrs)
def endElement(self, name):
if self.__status == READING_SCHEMA:
self.__end_schema(name)
elif self.__status == READING_TABLE:
self.__end_table(name)
elif self.__status == READING_TABLE_COLUMN:
self.__end_table_column(name)
def characters(self, content):
if self.__concatData:
self.__charBuffer.append(content)
def __reading_schema(self, name, attrs):
if self.__check_item_id("name", name):
self.__start_reading_data()
if self.__check_item_id("table", name):
self.__status = READING_TABLE
self.__currentTable = TapTableMeta()
self.__currentTable.schema = self.__currentSchemaName
if 'esatapplus:size_bytes' in attrs:
self.__currentTable.size_bytes = int(attrs.getValue('esatapplus:size_bytes'))
def __end_schema(self, name):
if self.__check_item_id("name", name):
self.__currentSchemaName = self.__create_string_from_buffer()
self.__stop_reading_data()
def __reading_table(self, name, attrs):
if self.__check_item_id("name", name):
self.__start_reading_data()
elif self.__check_item_id("description", name):
self.__start_reading_data()
elif self.__check_item_id("column", name):
self.__status = READING_TABLE_COLUMN
self.__currentColumn = TapColumn(attrs.getValue('esatapplus:flags'))
def __end_table(self, name):
if self.__check_item_id("name", name):
self.__stop_reading_data()
self.__currentTable.name = self.__create_string_from_buffer()
elif self.__check_item_id("description", name):
self.__stop_reading_data()
self.__currentTable.description = self.__create_string_from_buffer()
elif self.__check_item_id("table", name):
self.__tables.append(self.__currentTable)
self.__status = READING_SCHEMA
def __reading_table_column(self, name, attrs):
if self.__check_item_id("name", name):
self.__start_reading_data()
elif self.__check_item_id("description", name):
self.__start_reading_data()
elif self.__check_item_id("unit", name):
self.__start_reading_data()
elif self.__check_item_id("ucd", name):
self.__start_reading_data()
elif self.__check_item_id("utype", name):
self.__start_reading_data()
elif self.__check_item_id("datatype", name):
self.__start_reading_data()
elif self.__check_item_id("flag", name):
self.__start_reading_data()
def __end_table_column(self, name):
if self.__check_item_id("name", name):
self.__currentColumn.name = self.__create_string_from_buffer()
self.__stop_reading_data()
elif self.__check_item_id("description", name):
self.__currentColumn.description = self.__create_string_from_buffer()
self.__stop_reading_data()
elif self.__check_item_id("unit", name):
self.__currentColumn.unit = self.__create_string_from_buffer()
self.__stop_reading_data()
elif self.__check_item_id("ucd", name):
self.__currentColumn.ucd = self.__create_string_from_buffer()
self.__stop_reading_data()
elif self.__check_item_id("utype", name):
self.__currentColumn.utype = self.__create_string_from_buffer()
self.__stop_reading_data()
elif self.__check_item_id("datatype", name):
self.__currentColumn.data_type = self.__create_string_from_buffer()
self.__stop_reading_data()
elif self.__check_item_id("flag", name):
self.__currentColumn.flag = self.__create_string_from_buffer()
self.__stop_reading_data()
if self.__check_item_id("column", name):
self.__status = READING_TABLE
self.__currentTable.add_column(self.__currentColumn)
def __show_attributes(self, attrs):
return str(attrs.getNames())
def __nothing(self, name, attrs):
pass
def get_table(self):
if len(self.__tables) < 1:
return None
return self.__tables[0]
def get_tables(self):
return self.__tables
|
astropyREPO_NAMEastroqueryPATH_START.@astroquery_extracted@astroquery-main@astroquery@utils@tap@xmlparser@tableSaxParser.py@.PATH_END.py
|
{
"filename": "problem.py",
"repo_name": "tigerchenlu98/rebound",
"repo_path": "rebound_extracted/rebound-main/python_examples/horizons/problem.py",
"type": "Python"
}
|
import matplotlib; matplotlib.use("pdf")
import matplotlib.pyplot as plt
import rebound
import socket
import sys
import os.path
import os
filename = "cache.bin"
solar_system_objects = ["Sun", "Mercury", "Venus", "Earth", "Mars", "Jupiter", "Saturn", "Uranus", "Neptune", "C/2014 Q2"]
if os.path.isfile(filename):
# Try to load simulation from file
sim = rebound.Simulation(filename)
else:
sim = rebound.Simulation()
# Get data from NASA Horizons
try:
sim.add(solar_system_objects)
except socket.error:
print("A socket error occured. Maybe Horizons is down?")
sys.exit(0) # we ignore the error and exit
sim.move_to_com()
# Configure simulation
sim.integrator = "whfast"
sim.set_dt = 0.01
# Let's save it for next time
# Note: sim.save_to_file() only saves the particle data, not the integrator settings, etc.
sim.save_to_file(filename)
sim.status()
import numpy as np
Nout = 1000
times = np.linspace(0,16.*np.pi,Nout) # 8 years
x = np.zeros((sim.N,Nout))
y = np.zeros((sim.N,Nout))
ps = sim.particles
for ti,t in enumerate(times):
sim.integrate(t)
for i, p in enumerate(ps):
x[i][ti] = p.x
y[i][ti] = p.y
fig = plt.figure(figsize=(11,5))
def plot(zoom):
ax.set_xlim([-zoom,zoom])
ax.set_ylim([-zoom,zoom])
ax.set_xlabel("x [AU]")
ax.set_ylabel("y [AU]")
for i in xrange(0,sim.N):
plt.plot(x[i],y[i])
if x[i][-1]*x[i][-1]+y[i][-1]*y[i][-1]>0.01*zoom*zoom or i==0:
ax.annotate(solar_system_objects[i], xy=(x[i][-1], y[i][-1]),horizontalalignment="center")
ax = plt.subplot(121)
plot(zoom=24.)
ax = plt.subplot(122)
plot(zoom=1.2)
plt.savefig("orbits.pdf")
|
tigerchenlu98REPO_NAMEreboundPATH_START.@rebound_extracted@rebound-main@python_examples@horizons@problem.py@.PATH_END.py
|
{
"filename": "test_factorize.py",
"repo_name": "pandas-dev/pandas",
"repo_path": "pandas_extracted/pandas-main/pandas/tests/indexes/datetimes/methods/test_factorize.py",
"type": "Python"
}
|
import numpy as np
import pytest
from pandas import (
DatetimeIndex,
Index,
date_range,
factorize,
)
import pandas._testing as tm
class TestDatetimeIndexFactorize:
def test_factorize(self):
idx1 = DatetimeIndex(
["2014-01", "2014-01", "2014-02", "2014-02", "2014-03", "2014-03"]
)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.intp)
exp_idx = DatetimeIndex(["2014-01", "2014-02", "2014-03"])
arr, idx = idx1.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
assert idx.freq == exp_idx.freq
arr, idx = idx1.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
assert idx.freq == exp_idx.freq
# tz must be preserved
idx1 = idx1.tz_localize("Asia/Tokyo")
exp_idx = exp_idx.tz_localize("Asia/Tokyo")
arr, idx = idx1.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
assert idx.freq == exp_idx.freq
idx2 = DatetimeIndex(
["2014-03", "2014-03", "2014-02", "2014-01", "2014-03", "2014-01"]
)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype=np.intp)
exp_idx = DatetimeIndex(["2014-01", "2014-02", "2014-03"])
arr, idx = idx2.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
assert idx.freq == exp_idx.freq
exp_arr = np.array([0, 0, 1, 2, 0, 2], dtype=np.intp)
exp_idx = DatetimeIndex(["2014-03", "2014-02", "2014-01"])
arr, idx = idx2.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
assert idx.freq == exp_idx.freq
def test_factorize_preserves_freq(self):
# GH#38120 freq should be preserved
idx3 = date_range("2000-01", periods=4, freq="ME", tz="Asia/Tokyo")
exp_arr = np.array([0, 1, 2, 3], dtype=np.intp)
arr, idx = idx3.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, idx3)
assert idx.freq == idx3.freq
arr, idx = factorize(idx3)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, idx3)
assert idx.freq == idx3.freq
def test_factorize_tz(self, tz_naive_fixture, index_or_series):
tz = tz_naive_fixture
# GH#13750
base = date_range("2016-11-05", freq="h", periods=100, tz=tz)
idx = base.repeat(5)
exp_arr = np.arange(100, dtype=np.intp).repeat(5)
obj = index_or_series(idx)
arr, res = obj.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
expected = base._with_freq(None)
tm.assert_index_equal(res, expected)
assert res.freq == expected.freq
def test_factorize_dst(self, index_or_series):
# GH#13750
idx = date_range("2016-11-06", freq="h", periods=12, tz="US/Eastern")
obj = index_or_series(idx)
arr, res = obj.factorize()
tm.assert_numpy_array_equal(arr, np.arange(12, dtype=np.intp))
tm.assert_index_equal(res, idx)
if index_or_series is Index:
assert res.freq == idx.freq
idx = date_range("2016-06-13", freq="h", periods=12, tz="US/Eastern")
obj = index_or_series(idx)
arr, res = obj.factorize()
tm.assert_numpy_array_equal(arr, np.arange(12, dtype=np.intp))
tm.assert_index_equal(res, idx)
if index_or_series is Index:
assert res.freq == idx.freq
@pytest.mark.parametrize("sort", [True, False])
def test_factorize_no_freq_non_nano(self, tz_naive_fixture, sort):
# GH#51978 case that does not go through the fastpath based on
# non-None freq
tz = tz_naive_fixture
idx = date_range("2016-11-06", freq="h", periods=5, tz=tz)[[0, 4, 1, 3, 2]]
exp_codes, exp_uniques = idx.factorize(sort=sort)
res_codes, res_uniques = idx.as_unit("s").factorize(sort=sort)
tm.assert_numpy_array_equal(res_codes, exp_codes)
tm.assert_index_equal(res_uniques, exp_uniques.as_unit("s"))
res_codes, res_uniques = idx.as_unit("s").to_series().factorize(sort=sort)
tm.assert_numpy_array_equal(res_codes, exp_codes)
tm.assert_index_equal(res_uniques, exp_uniques.as_unit("s"))
|
pandas-devREPO_NAMEpandasPATH_START.@pandas_extracted@pandas-main@pandas@tests@indexes@datetimes@methods@test_factorize.py@.PATH_END.py
|
{
"filename": "test_verifyCalib.py",
"repo_name": "lsst/cp_verify",
"repo_path": "cp_verify_extracted/cp_verify-main/tests/test_verifyCalib.py",
"type": "Python"
}
|
# This file is part of cp_verify.
#
# Developed for the LSST Data Management System.
# This product includes software developed by the LSST Project
# (https://www.lsst.org).
# See the COPYRIGHT file at the top-level directory of this distribution
# for details of code ownership.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import numpy as np
import unittest
import lsst.utils.tests
import lsst.cp.verify as cpVerify
from lsst.ip.isr import CrosstalkCalib
class VerifyCrosstalkTestCase(lsst.utils.tests.TestCase):
"""Unit test for stats code - crosstalk cases.
"""
def test_crosstalk(self):
"""Test a subset of the output values to identify that the
image stat methods haven't changed.
"""
crosstalk = CrosstalkCalib(nAmp=2)
crosstalk.coeffs = np.array([[0.0, 1e-4], [1e-4, 0.0]])
crosstalk.coeffErr = np.array([[0.0, 1e-5], [1e-5, 0.0]])
crosstalk.coeffNum = np.array([[0, 100], [100, 0]])
crosstalk.coeffValid = np.array([[False, True], [True, False]], dtype=bool)
config = cpVerify.CpVerifyCrosstalkConfig()
task = cpVerify.CpVerifyCrosstalkTask(config=config)
results = task.run(crosstalk, dimensions={'instrument': 'fakeCam', 'detector': 'det00'})
crosstalkStats = results.outputStats
self.assertEqual(crosstalkStats['DET']['N_AMP'], crosstalk.nAmp)
self.assertEqual(crosstalkStats['DET']['N_VALID'], 2)
self.assertFalse(crosstalkStats['SUCCESS'])
self.assertFalse(crosstalkStats['VERIFY']['NO_SIGNIFICANT_DETECTION'])
def test_crosstalkNull(self):
"""Test a subset of the output values to identify that the
image stat methods haven't changed.
"""
crosstalk = CrosstalkCalib(nAmp=2)
crosstalk.coeffs = np.array([[0.0, 1e-6], [1e-6, 0.0]])
crosstalk.coeffErr = np.array([[0.0, 1e-5], [1e-5, 0.0]])
crosstalk.coeffNum = np.array([[0, 100], [100, 0]])
crosstalk.coeffValid = np.array([[False, False], [False, False]], dtype=bool)
config = cpVerify.CpVerifyCrosstalkConfig()
task = cpVerify.CpVerifyCrosstalkTask(config=config)
results = task.run(crosstalk, dimensions={'instrument': 'fakeCam', 'detector': 'det00'})
crosstalkStats = results.outputStats
self.assertEqual(crosstalkStats['DET']['N_AMP'], crosstalk.nAmp)
self.assertEqual(crosstalkStats['DET']['N_VALID'], 0)
self.assertTrue(crosstalkStats['SUCCESS'])
self.assertTrue(crosstalkStats['VERIFY']['NO_SIGNIFICANT_DETECTION'])
class MemoryTester(lsst.utils.tests.MemoryTestCase):
pass
def setup_module(module):
lsst.utils.tests.init()
if __name__ == "__main__":
lsst.utils.tests.init()
unittest.main()
|
lsstREPO_NAMEcp_verifyPATH_START.@cp_verify_extracted@cp_verify-main@tests@test_verifyCalib.py@.PATH_END.py
|
{
"filename": "DataPropagation.py",
"repo_name": "wlxu/RelicClass",
"repo_path": "RelicClass_extracted/RelicClass-master/RealSpaceInterface/Calc2D/DataPropagation.py",
"type": "Python"
}
|
import numpy as np
#uses one dimensional interpolation
def PropagateDatawithListOld(k,FValue,zredindex,transferFunctionlist):
return (transferFunctionlist[zredindex](k.ravel()) * FValue.ravel()).reshape(FValue.shape)
def PropagateDatawithList(k, FValue, zredindex, transferFunctionlist):
result = {}
for field, transfer_function in transferFunctionlist.items():
result[field] = (transfer_function[zredindex](k.ravel()) * FValue.ravel()).reshape(FValue.shape)
return result
#module with uses two dimensional interpolation and propagates all data at once (fastest but high memory consumption)
def PropagateAllData(k,FValue,allzred,transferFunction):
allFValue = np.ones((len(allzred),FValue.shape[0],FValue.shape[1]),dtype=complex)
for kxindex in range(FValue.shape[0]):
allFValue[:,kxindex,:] = transferFunction(allzred,k[kxindex])*FValue[kxindex]
return allFValue
#module with uses 2 dimensional interpolation (slowest but can be useful if the set of redshift changes very often)
def PropagateData(k,FValue,zred,transferFunction):
FValuenew = np.ones(FValue.shape,dtype=complex)
for kxindex in range(FValue.shape[0]):
allFValue[kxindex,:] = transferFunction(zred,k[kxindex])*FValue[kxindex]
return allFValue
|
wlxuREPO_NAMERelicClassPATH_START.@RelicClass_extracted@RelicClass-master@RealSpaceInterface@Calc2D@DataPropagation.py@.PATH_END.py
|
{
"filename": "codeofconduct.md",
"repo_name": "21cmfast/21cmEMU",
"repo_path": "21cmEMU_extracted/21cmEMU-main/docs/codeofconduct.md",
"type": "Markdown"
}
|
```{include} ../CODE_OF_CONDUCT.md
```
|
21cmfastREPO_NAME21cmEMUPATH_START.@21cmEMU_extracted@21cmEMU-main@docs@codeofconduct.md@.PATH_END.py
|
{
"filename": "_bgcolorsrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scattergeo/hoverlabel/_bgcolorsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class BgcolorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="bgcolorsrc", parent_name="scattergeo.hoverlabel", **kwargs
):
super(BgcolorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scattergeo@hoverlabel@_bgcolorsrc.py@.PATH_END.py
|
{
"filename": "MakeMasterContbars.py",
"repo_name": "Keck-DataReductionPipelines/KCWI_DRP",
"repo_path": "KCWI_DRP_extracted/KCWI_DRP-master/kcwidrp/primitives/MakeMasterContbars.py",
"type": "Python"
}
|
from keckdrpframework.primitives.base_img import BaseImg
from kcwidrp.primitives.kcwi_file_primitives import kcwi_fits_reader, \
kcwi_fits_writer, strip_fname , get_unique_STATEID_master_name
import os
import ccdproc
import numpy as np
from astropy.stats import mad_std
class MakeMasterContbars(BaseImg):
"""
Stack continuum bars frames into master continuum bars
Generate a master cont bars frame based on the instrument config parameter
contbars_min_nframes, which defaults to 1 for the BLUE channel and 3 for
the RED channel. It is assumed that each frame is well-exposed and the
combine method 'median' will be used to mitigate cosmic rays (especially
for the RED channel). A high sigma clipping of 2.0 is used to help with
the CRs.
Uses the ccdproc.combine routine to peform the stacking.
Writes out a \*_mcbars.fits file and records a master cont bars frame in
the proc table, no matter how many frames are combined.
"""
def __init__(self, action, context):
BaseImg.__init__(self, action, context)
self.logger = context.pipeline_logger
def _pre_condition(self):
"""
Checks if we can build a stacked frame based on the processing table
"""
# get list of arc frames
self.logger.info("Checking precondition for stack_arcs")
self.combine_list = self.context.proctab.search_proctab(
frame=self.action.args.ccddata, target_type='CONTBARS',
target_group=self.action.args.groupid)
self.logger.info(f"pre condition got {len(self.combine_list)},"
f" expecting {self.action.args.min_files}")
# do we meet the criterion?
if len(self.combine_list) >= self.action.args.min_files:
return True
else:
return False
def _perform(self):
"""
Returns an Argument() with the parameters that depends on this operation
"""
args = self.action.args
method = 'median'
sig_up = 2.0
suffix = args.new_type.lower()
log_string = MakeMasterContbars.__module__
combine_list = list(self.combine_list['filename'])
if self.action.args.min_files > 1:
stack = []
stackf = []
for cbs in combine_list:
# get arc intensity (int) image file name in redux directory
stackf.append(strip_fname(cbs) + '_int.fits')
cbsfn = os.path.join(self.config.instrument.cwd, self.config.instrument.output_directory, stackf[-1])
# using [0] gets just the image data
stack.append(kcwi_fits_reader(cbsfn)[0])
stacked = ccdproc.combine(stack, method=method, sigma_clip=True,
sigma_clip_low_thresh=None,
sigma_clip_high_thresh=sig_up,
sigma_clip_func=np.ma.median,
sigma_clip_dev_func=mad_std)
stacked.unit = stack[0].unit
stacked.header['IMTYPE'] = args.new_type
stacked.header['NSTACK'] = (len(combine_list),
'number of images stacked')
stacked.header['STCKMETH'] = (method, 'method used for stacking')
stacked.header['STCKSIGU'] = (sig_up,
'Upper sigma rejection for stacking')
for ii, fname in enumerate(stackf):
stacked.header['STACKF%d' % (ii + 1)] = (fname,
"stack input file")
stacked.header['HISTORY'] = log_string
self.action.args.ccddata = stacked
# get master arc output name
mcbars_name = strip_fname(combine_list[0]) + '_' + suffix + '.fits'
kcwi_fits_writer(stacked, output_file=mcbars_name,
output_dir=self.config.instrument.output_directory)
self.context.proctab.update_proctab(frame=stacked, suffix=suffix,
newtype=args.new_type,
filename=self.action.args.name) ### HERE
# self.action.args.name = stacked.header['OFNAME']
else:
mcbars_name = strip_fname(combine_list[0]) + '_' + suffix + '.fits'
self.action.args.ccddata.header['IMTYPE'] = args.new_type
self.action.args.ccddata.header['HISTORY'] = log_string
kcwi_fits_writer(self.action.args.ccddata, output_file=mcbars_name,
output_dir=self.config.instrument.output_directory)
self.context.proctab.update_proctab(frame=self.action.args.ccddata,
suffix=suffix,
newtype=args.new_type,
filename=self.action.args.name)
self.logger.info(log_string)
self.context.proctab.write_proctab(tfil=self.config.instrument.procfile)
return self.action.args
# END: class MakeMasterContbars()
|
Keck-DataReductionPipelinesREPO_NAMEKCWI_DRPPATH_START.@KCWI_DRP_extracted@KCWI_DRP-master@kcwidrp@primitives@MakeMasterContbars.py@.PATH_END.py
|
{
"filename": "pool__quantize__p.md",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/catboost/docs/en/_includes/work_src/reusage-python/pool__quantize__p.md",
"type": "Markdown"
}
|
Create a pool from a file and quantize it while loading the data. This compresses the size of the initial dataset and provides an opportunity to load huge datasets that can not be loaded to RAM otherwise.
{% note info %}
The input data should contain only numerical features (other types are not currently supported).
{% endnote %}
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@catboost@docs@en@_includes@work_src@reusage-python@pool__quantize__p.md@.PATH_END.py
|
{
"filename": "traces.py",
"repo_name": "nu-radio/NuRadioMC",
"repo_path": "NuRadioMC_extracted/NuRadioMC-master/NuRadioReco/eventbrowser/apps/traces.py",
"type": "Python"
}
|
from __future__ import absolute_import, division, print_function # , unicode_literals
from dash import html, no_update
import NuRadioReco.eventbrowser.apps.trace_plots.rec_electric_field_trace
import NuRadioReco.eventbrowser.apps.trace_plots.rec_electric_field_spectrum
import NuRadioReco.eventbrowser.apps.trace_plots.channel_time_trace
import NuRadioReco.eventbrowser.apps.trace_plots.channel_spectrum
import NuRadioReco.eventbrowser.apps.trace_plots.multi_channel_plot
from dash.dependencies import State, Input, Output
from NuRadioReco.eventbrowser.app import app
import logging
logger = logging.getLogger('traces')
parent_logger = logging.getLogger('NuRadioReco')
logger.setLevel(parent_logger.level)
layout = html.Div([
html.Div(id='trigger-trace', style={'display': 'none'}),
html.Div([
html.Div([
html.Div([
'Electric Field Traces',
html.Button('Show', id='toggle_efield_traces', n_clicks=0, style={'float':'right'})
], className='panel-heading'),
html.Div(NuRadioReco.eventbrowser.apps.trace_plots.rec_electric_field_trace.layout,
className='panel-body', id='efield_traces_layout')
], className='panel panel-default', style={'flex': '1'}),
html.Div([
html.Div([
'Electric Field Spectrum',
' (y-scale: ',
html.Button(id='efield-spectrum-log-linear-switch', children='linear'),
')',
html.Button('Show', id='toggle_efield_spectrum', n_clicks=0, style={'float':'right'})
], className='panel-heading'),
html.Div(NuRadioReco.eventbrowser.apps.trace_plots.rec_electric_field_spectrum.layout,
className='panel-body', id='efield_spectrum_layout')
], className='panel panel-default', style={'flex': '1'})
], style={'display': 'flex'}),
html.Div([
html.Div([
html.Div([
'Channel Traces',
html.Button('Show', id='toggle_channel_traces', n_clicks=0, style={'float':'right'})
], className='panel-heading'),
html.Div(NuRadioReco.eventbrowser.apps.trace_plots.channel_time_trace.layout,
className='panel-body', id='channel_traces_layout')
], className='panel panel-default', style={'flex': '1'}),
html.Div([
html.Div([
'Channel Spectrum',
' (y-scale: ',
html.Button(id='channel-spectrum-log-linear-switch', children='linear'),
')',
html.Button('Show', id='toggle_channel_spectrum', n_clicks=0, style={'float':'right'})
], className='panel-heading'),
html.Div(NuRadioReco.eventbrowser.apps.trace_plots.channel_spectrum.layout,
className='panel-body', id='channel_spectrum_layout')
], className='panel panel-default', style={'flex': '1'})
], style={'display': 'flex'}),
html.Div([
html.Div([
html.Div('Individual Channels', className='panel-heading'),
html.Div(NuRadioReco.eventbrowser.apps.trace_plots.multi_channel_plot.layout, className='panel-body')
], className='panel panel-default', style={'flex': '1'})
], style={'display': 'flex'})
])
@app.callback(
[Output('channel_traces_layout', 'style'),
Output('toggle_channel_traces', 'children')],
[Input('toggle_channel_traces', 'n_clicks')],
State('toggle_channel_traces', 'children'),
prevent_initial_callbacks=True
)
def toggle_channel_trace_plot(button_clicks, showhide):
if showhide == 'Hide':
return {'flex': '1', 'display': 'none'}, 'Show'
else:
return {'flex' : '1'}, 'Hide'
@app.callback(
[Output('channel_spectrum_layout', 'style'),
Output('toggle_channel_spectrum', 'children')],
[Input('toggle_channel_spectrum', 'n_clicks')],
State('toggle_channel_spectrum', 'children'),
prevent_initial_callbacks=True
)
def toggle_channel_spectrum_plot(button_clicks, showhide):
if showhide == 'Hide':
return {'flex': '1', 'display': 'none'}, 'Show'
else:
return {'flex': '1'}, 'Hide'
@app.callback(
[Output('efield_traces_layout', 'style'),
Output('toggle_efield_traces', 'children')],
[Input('toggle_efield_traces', 'n_clicks')],
State('toggle_efield_traces', 'children'),
prevent_initial_callbacks=True
)
def toggle_efield_traces_plot(button_clicks, showhide):
if showhide == 'Hide':
return {'flex': '1', 'display': 'none'}, 'Show'
else:
return {'flex': '1'}, 'Hide'
@app.callback(
[Output('efield_spectrum_layout', 'style'),
Output('toggle_efield_spectrum', 'children')],
[Input('toggle_efield_spectrum', 'n_clicks')],
State('toggle_efield_spectrum', 'children'),
prevent_initial_callbacks=True
)
def toggle_efield_spectrum_plot(button_clicks, showhide):
if showhide == 'Hide':
return {'flex': '1', 'display': 'none'}, 'Show'
else:
return {'flex': '1'}, 'Hide'
# callback to change frequency spectra between linear and log scale
@app.callback(
[Output('efield-spectrum', 'figure', allow_duplicate=True), # this is a 'duplicate' callback - requires dash >= 2.9
Output('channel-spectrum', 'figure', allow_duplicate=True), # this is a 'duplicate' callback - requires dash >= 2.9
Output('time-traces', 'figure', allow_duplicate=True),
Output('channel-spectrum-log-linear-switch', 'children'),
Output('efield-spectrum-log-linear-switch', 'children')
],
[Input('channel-spectrum-log-linear-switch', 'n_clicks'),
Input('efield-spectrum-log-linear-switch', 'n_clicks')],
[State('channel-spectrum-log-linear-switch', 'children'),
State('efield-spectrum', 'figure'),
State('channel-spectrum', 'figure'),
State('time-traces', 'figure'),
], prevent_initial_call=True
)
def toggle_linear_log_scale(button_clicks, button2_clicks, button_current_value, efield_spectrum, channel_spectrum, multichannel_plot):
outputs = []
if button_current_value == 'linear': # switch to log
new_value = 'log'
else:
new_value = 'linear'
for spectrum_plot in [efield_spectrum, channel_spectrum]:
try:
spectrum_plot['layout']['yaxis']['type'] = new_value
outputs.append(spectrum_plot)
except KeyError:
outputs.append(no_update)
try:
yaxes = [key for key in multichannel_plot['layout'].keys() if 'yaxis' in key]
yaxes = [key for key in yaxes if len(key)>5] # omit key 'yaxis'
yaxes_even = [key for key in yaxes if not (int(key.split('yaxis')[-1]) % 2)]
for yaxis in yaxes_even:
multichannel_plot['layout'][yaxis]['type'] = new_value
outputs.append(multichannel_plot)
except KeyError as e:
outputs.append(no_update)
outputs.append(new_value)
outputs.append(new_value)
return outputs
|
nu-radioREPO_NAMENuRadioMCPATH_START.@NuRadioMC_extracted@NuRadioMC-master@NuRadioReco@eventbrowser@apps@traces.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "jrenaud90/TidalPy",
"repo_path": "TidalPy_extracted/TidalPy-main/TidalPy/utilities/dimensions/__init__.py",
"type": "Python"
}
|
jrenaud90REPO_NAMETidalPyPATH_START.@TidalPy_extracted@TidalPy-main@TidalPy@utilities@dimensions@__init__.py@.PATH_END.py
|
|
{
"filename": "_color.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scatterpolargl/line/_color.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="color", parent_name="scatterpolargl.line", **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scatterpolargl@line@_color.py@.PATH_END.py
|
{
"filename": "test_ddt_log_norm_likelihood.py",
"repo_name": "sibirrer/hierArc",
"repo_path": "hierArc_extracted/hierArc-main/test/test_Likelihood/test_LensLikelihood/test_ddt_log_norm_likelihood.py",
"type": "Python"
}
|
import numpy as np
import numpy.testing as npt
from hierarc.Likelihood.LensLikelihood.ddt_lognorm_likelihood import (
DdtLogNormLikelihood,
)
from scipy.stats import lognorm
import pytest
class TestTDLikelihoodLogNorm(object):
def setup_method(self):
self.z_L = 0.8
self.z_S = 3.0
self.ddt_mu = 3.5
self.ddt_sigma = 0.2
self.kwargs_post = {
"z_lens": self.z_L,
"z_source": self.z_S,
"ddt_mu": self.ddt_mu,
"ddt_sigma": self.ddt_sigma,
}
self.ddt_grid = np.arange(1, 10000, 50)
self.scipy_lognorm = lognorm(scale=np.exp(self.ddt_mu), s=self.ddt_sigma)
self.ll_object = DdtLogNormLikelihood(**self.kwargs_post)
def test_log_likelihood(self):
ll = self.ll_object.log_likelihood(self.ddt_grid)
scipy_ll = self.scipy_lognorm.logpdf(
self.ddt_grid
) # with the constant term included
npt.assert_almost_equal(ll, scipy_ll + 0.5 * np.log(2 * np.pi), decimal=7)
if __name__ == "__main__":
pytest.main()
|
sibirrerREPO_NAMEhierArcPATH_START.@hierArc_extracted@hierArc-main@test@test_Likelihood@test_LensLikelihood@test_ddt_log_norm_likelihood.py@.PATH_END.py
|
{
"filename": "_x0.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/waterfall/_x0.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class X0Validator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="x0", parent_name="waterfall", **kwargs):
super(X0Validator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc+clearAxisTypes"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@waterfall@_x0.py@.PATH_END.py
|
{
"filename": "python.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/pytest/py3/_pytest/python.py",
"type": "Python"
}
|
"""Python test discovery, setup and run of test functions."""
import dataclasses
import enum
import fnmatch
import inspect
import itertools
import os
import sys
import types
import warnings
from collections import Counter
from collections import defaultdict
from functools import partial
from pathlib import Path
from typing import Any
from typing import Callable
from typing import Dict
from typing import Generator
from typing import Iterable
from typing import Iterator
from typing import List
from typing import Mapping
from typing import Optional
from typing import Pattern
from typing import Sequence
from typing import Set
from typing import Tuple
from typing import TYPE_CHECKING
from typing import Union
import _pytest
from _pytest import fixtures
from _pytest import nodes
from _pytest._code import filter_traceback
from _pytest._code import getfslineno
from _pytest._code.code import ExceptionInfo
from _pytest._code.code import TerminalRepr
from _pytest._code.code import Traceback
from _pytest._io import TerminalWriter
from _pytest._io.saferepr import saferepr
from _pytest.compat import ascii_escaped
from _pytest.compat import assert_never
from _pytest.compat import final
from _pytest.compat import get_default_arg_names
from _pytest.compat import get_real_func
from _pytest.compat import getimfunc
from _pytest.compat import getlocation
from _pytest.compat import is_async_function
from _pytest.compat import is_generator
from _pytest.compat import LEGACY_PATH
from _pytest.compat import NOTSET
from _pytest.compat import safe_getattr
from _pytest.compat import safe_isclass
from _pytest.compat import STRING_TYPES
from _pytest.config import Config
from _pytest.config import ExitCode
from _pytest.config import hookimpl
from _pytest.config.argparsing import Parser
from _pytest.deprecated import check_ispytest
from _pytest.deprecated import INSTANCE_COLLECTOR
from _pytest.deprecated import NOSE_SUPPORT_METHOD
from _pytest.fixtures import FuncFixtureInfo
from _pytest.main import Session
from _pytest.mark import MARK_GEN
from _pytest.mark import ParameterSet
from _pytest.mark.structures import get_unpacked_marks
from _pytest.mark.structures import Mark
from _pytest.mark.structures import MarkDecorator
from _pytest.mark.structures import normalize_mark_list
from _pytest.outcomes import fail
from _pytest.outcomes import skip
from _pytest.pathlib import bestrelpath
from _pytest.pathlib import fnmatch_ex
from _pytest.pathlib import import_path
from _pytest.pathlib import ImportPathMismatchError
from _pytest.pathlib import parts
from _pytest.pathlib import visit
from _pytest.scope import Scope
from _pytest.warning_types import PytestCollectionWarning
from _pytest.warning_types import PytestReturnNotNoneWarning
from _pytest.warning_types import PytestUnhandledCoroutineWarning
if TYPE_CHECKING:
from typing_extensions import Literal
from _pytest.scope import _ScopeName
_PYTEST_DIR = Path(_pytest.__file__).parent
def pytest_addoption(parser: Parser) -> None:
group = parser.getgroup("general")
group.addoption(
"--fixtures",
"--funcargs",
action="store_true",
dest="showfixtures",
default=False,
help="Show available fixtures, sorted by plugin appearance "
"(fixtures with leading '_' are only shown with '-v')",
)
group.addoption(
"--fixtures-per-test",
action="store_true",
dest="show_fixtures_per_test",
default=False,
help="Show fixtures per test",
)
parser.addini(
"python_files",
type="args",
# NOTE: default is also used in AssertionRewritingHook.
default=["test_*.py", "*_test.py"],
help="Glob-style file patterns for Python test module discovery",
)
parser.addini(
"python_classes",
type="args",
default=["Test"],
help="Prefixes or glob names for Python test class discovery",
)
parser.addini(
"python_functions",
type="args",
default=["test"],
help="Prefixes or glob names for Python test function and method discovery",
)
parser.addini(
"disable_test_id_escaping_and_forfeit_all_rights_to_community_support",
type="bool",
default=False,
help="Disable string escape non-ASCII characters, might cause unwanted "
"side effects(use at your own risk)",
)
def pytest_cmdline_main(config: Config) -> Optional[Union[int, ExitCode]]:
if config.option.showfixtures:
showfixtures(config)
return 0
if config.option.show_fixtures_per_test:
show_fixtures_per_test(config)
return 0
return None
def pytest_generate_tests(metafunc: "Metafunc") -> None:
for marker in metafunc.definition.iter_markers(name="parametrize"):
metafunc.parametrize(*marker.args, **marker.kwargs, _param_mark=marker)
def pytest_configure(config: Config) -> None:
config.addinivalue_line(
"markers",
"parametrize(argnames, argvalues): call a test function multiple "
"times passing in different arguments in turn. argvalues generally "
"needs to be a list of values if argnames specifies only one name "
"or a list of tuples of values if argnames specifies multiple names. "
"Example: @parametrize('arg1', [1,2]) would lead to two calls of the "
"decorated test function, one with arg1=1 and another with arg1=2."
"see https://docs.pytest.org/en/stable/how-to/parametrize.html for more info "
"and examples.",
)
config.addinivalue_line(
"markers",
"usefixtures(fixturename1, fixturename2, ...): mark tests as needing "
"all of the specified fixtures. see "
"https://docs.pytest.org/en/stable/explanation/fixtures.html#usefixtures ",
)
def async_warn_and_skip(nodeid: str) -> None:
msg = "async def functions are not natively supported and have been skipped.\n"
msg += (
"You need to install a suitable plugin for your async framework, for example:\n"
)
msg += " - anyio\n"
msg += " - pytest-asyncio\n"
msg += " - pytest-tornasync\n"
msg += " - pytest-trio\n"
msg += " - pytest-twisted"
warnings.warn(PytestUnhandledCoroutineWarning(msg.format(nodeid)))
skip(reason="async def function and no async plugin installed (see warnings)")
@hookimpl(trylast=True)
def pytest_pyfunc_call(pyfuncitem: "Function") -> Optional[object]:
testfunction = pyfuncitem.obj
if is_async_function(testfunction):
async_warn_and_skip(pyfuncitem.nodeid)
funcargs = pyfuncitem.funcargs
testargs = {arg: funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames}
result = testfunction(**testargs)
if hasattr(result, "__await__") or hasattr(result, "__aiter__"):
async_warn_and_skip(pyfuncitem.nodeid)
elif result is not None:
warnings.warn(
PytestReturnNotNoneWarning(
f"Expected None, but {pyfuncitem.nodeid} returned {result!r}, which will be an error in a "
"future version of pytest. Did you mean to use `assert` instead of `return`?"
)
)
return True
def pytest_collect_file(file_path: Path, parent: nodes.Collector) -> Optional["Module"]:
if file_path.suffix == ".py":
if not parent.session.isinitpath(file_path):
if not path_matches_patterns(
file_path, parent.config.getini("python_files") + ["__init__.py"]
):
return None
ihook = parent.session.gethookproxy(file_path)
module: Module = ihook.pytest_pycollect_makemodule(
module_path=file_path, parent=parent
)
return module
return None
def path_matches_patterns(path: Path, patterns: Iterable[str]) -> bool:
"""Return whether path matches any of the patterns in the list of globs given."""
return any(fnmatch_ex(pattern, path) for pattern in patterns)
def pytest_pycollect_makemodule(module_path: Path, parent) -> "Module":
if module_path.name == "__init__.py":
pkg: Package = Package.from_parent(parent, path=module_path)
return pkg
mod: Module = Module.from_parent(parent, path=module_path)
return mod
@hookimpl(trylast=True)
def pytest_pycollect_makeitem(
collector: Union["Module", "Class"], name: str, obj: object
) -> Union[None, nodes.Item, nodes.Collector, List[Union[nodes.Item, nodes.Collector]]]:
assert isinstance(collector, (Class, Module)), type(collector)
# Nothing was collected elsewhere, let's do it here.
if safe_isclass(obj):
if collector.istestclass(obj, name):
klass: Class = Class.from_parent(collector, name=name, obj=obj)
return klass
elif collector.istestfunction(obj, name):
# mock seems to store unbound methods (issue473), normalize it.
obj = getattr(obj, "__func__", obj)
# We need to try and unwrap the function if it's a functools.partial
# or a functools.wrapped.
# We mustn't if it's been wrapped with mock.patch (python 2 only).
if not (inspect.isfunction(obj) or inspect.isfunction(get_real_func(obj))):
filename, lineno = getfslineno(obj)
warnings.warn_explicit(
message=PytestCollectionWarning(
"cannot collect %r because it is not a function." % name
),
category=None,
filename=str(filename),
lineno=lineno + 1,
)
elif getattr(obj, "__test__", True):
if is_generator(obj):
res: Function = Function.from_parent(collector, name=name)
reason = "yield tests were removed in pytest 4.0 - {name} will be ignored".format(
name=name
)
res.add_marker(MARK_GEN.xfail(run=False, reason=reason))
res.warn(PytestCollectionWarning(reason))
return res
else:
return list(collector._genfunctions(name, obj))
return None
class PyobjMixin(nodes.Node):
"""this mix-in inherits from Node to carry over the typing information
as its intended to always mix in before a node
its position in the mro is unaffected"""
_ALLOW_MARKERS = True
@property
def module(self):
"""Python module object this node was collected from (can be None)."""
node = self.getparent(Module)
return node.obj if node is not None else None
@property
def cls(self):
"""Python class object this node was collected from (can be None)."""
node = self.getparent(Class)
return node.obj if node is not None else None
@property
def instance(self):
"""Python instance object the function is bound to.
Returns None if not a test method, e.g. for a standalone test function,
a staticmethod, a class or a module.
"""
node = self.getparent(Function)
return getattr(node.obj, "__self__", None) if node is not None else None
@property
def obj(self):
"""Underlying Python object."""
obj = getattr(self, "_obj", None)
if obj is None:
self._obj = obj = self._getobj()
# XXX evil hack
# used to avoid Function marker duplication
if self._ALLOW_MARKERS:
self.own_markers.extend(get_unpacked_marks(self.obj))
# This assumes that `obj` is called before there is a chance
# to add custom keys to `self.keywords`, so no fear of overriding.
self.keywords.update((mark.name, mark) for mark in self.own_markers)
return obj
@obj.setter
def obj(self, value):
self._obj = value
def _getobj(self):
"""Get the underlying Python object. May be overwritten by subclasses."""
# TODO: Improve the type of `parent` such that assert/ignore aren't needed.
assert self.parent is not None
obj = self.parent.obj # type: ignore[attr-defined]
return getattr(obj, self.name)
def getmodpath(self, stopatmodule: bool = True, includemodule: bool = False) -> str:
"""Return Python path relative to the containing module."""
chain = self.listchain()
chain.reverse()
parts = []
for node in chain:
name = node.name
if isinstance(node, Module):
name = os.path.splitext(name)[0]
if stopatmodule:
if includemodule:
parts.append(name)
break
parts.append(name)
parts.reverse()
return ".".join(parts)
def reportinfo(self) -> Tuple[Union["os.PathLike[str]", str], Optional[int], str]:
# XXX caching?
obj = self.obj
compat_co_firstlineno = getattr(obj, "compat_co_firstlineno", None)
if isinstance(compat_co_firstlineno, int):
# nose compatibility
file_path = sys.modules[obj.__module__].__file__
assert file_path is not None
if file_path.endswith(".pyc"):
file_path = file_path[:-1]
path: Union["os.PathLike[str]", str] = file_path
lineno = compat_co_firstlineno
else:
path, lineno = getfslineno(obj)
modpath = self.getmodpath()
assert isinstance(lineno, int)
return path, lineno, modpath
# As an optimization, these builtin attribute names are pre-ignored when
# iterating over an object during collection -- the pytest_pycollect_makeitem
# hook is not called for them.
# fmt: off
class _EmptyClass: pass # noqa: E701
IGNORED_ATTRIBUTES = frozenset.union( # noqa: E305
frozenset(),
# Module.
dir(types.ModuleType("empty_module")),
# Some extra module attributes the above doesn't catch.
{"__builtins__", "__file__", "__cached__"},
# Class.
dir(_EmptyClass),
# Instance.
dir(_EmptyClass()),
)
del _EmptyClass
# fmt: on
class PyCollector(PyobjMixin, nodes.Collector):
def funcnamefilter(self, name: str) -> bool:
return self._matches_prefix_or_glob_option("python_functions", name)
def isnosetest(self, obj: object) -> bool:
"""Look for the __test__ attribute, which is applied by the
@nose.tools.istest decorator.
"""
# We explicitly check for "is True" here to not mistakenly treat
# classes with a custom __getattr__ returning something truthy (like a
# function) as test classes.
return safe_getattr(obj, "__test__", False) is True
def classnamefilter(self, name: str) -> bool:
return self._matches_prefix_or_glob_option("python_classes", name)
def istestfunction(self, obj: object, name: str) -> bool:
if self.funcnamefilter(name) or self.isnosetest(obj):
if isinstance(obj, (staticmethod, classmethod)):
# staticmethods and classmethods need to be unwrapped.
obj = safe_getattr(obj, "__func__", False)
return callable(obj) and fixtures.getfixturemarker(obj) is None
else:
return False
def istestclass(self, obj: object, name: str) -> bool:
return self.classnamefilter(name) or self.isnosetest(obj)
def _matches_prefix_or_glob_option(self, option_name: str, name: str) -> bool:
"""Check if the given name matches the prefix or glob-pattern defined
in ini configuration."""
for option in self.config.getini(option_name):
if name.startswith(option):
return True
# Check that name looks like a glob-string before calling fnmatch
# because this is called for every name in each collected module,
# and fnmatch is somewhat expensive to call.
elif ("*" in option or "?" in option or "[" in option) and fnmatch.fnmatch(
name, option
):
return True
return False
def collect(self) -> Iterable[Union[nodes.Item, nodes.Collector]]:
if not getattr(self.obj, "__test__", True):
return []
# Avoid random getattrs and peek in the __dict__ instead.
dicts = [getattr(self.obj, "__dict__", {})]
if isinstance(self.obj, type):
for basecls in self.obj.__mro__:
dicts.append(basecls.__dict__)
# In each class, nodes should be definition ordered.
# __dict__ is definition ordered.
seen: Set[str] = set()
dict_values: List[List[Union[nodes.Item, nodes.Collector]]] = []
ihook = self.ihook
for dic in dicts:
values: List[Union[nodes.Item, nodes.Collector]] = []
# Note: seems like the dict can change during iteration -
# be careful not to remove the list() without consideration.
for name, obj in list(dic.items()):
if name in IGNORED_ATTRIBUTES:
continue
if name in seen:
continue
seen.add(name)
res = ihook.pytest_pycollect_makeitem(
collector=self, name=name, obj=obj
)
if res is None:
continue
elif isinstance(res, list):
values.extend(res)
else:
values.append(res)
dict_values.append(values)
# Between classes in the class hierarchy, reverse-MRO order -- nodes
# inherited from base classes should come before subclasses.
result = []
for values in reversed(dict_values):
result.extend(values)
return result
def _genfunctions(self, name: str, funcobj) -> Iterator["Function"]:
modulecol = self.getparent(Module)
assert modulecol is not None
module = modulecol.obj
clscol = self.getparent(Class)
cls = clscol and clscol.obj or None
definition = FunctionDefinition.from_parent(self, name=name, callobj=funcobj)
fixtureinfo = definition._fixtureinfo
# pytest_generate_tests impls call metafunc.parametrize() which fills
# metafunc._calls, the outcome of the hook.
metafunc = Metafunc(
definition=definition,
fixtureinfo=fixtureinfo,
config=self.config,
cls=cls,
module=module,
_ispytest=True,
)
methods = []
if hasattr(module, "pytest_generate_tests"):
methods.append(module.pytest_generate_tests)
if cls is not None and hasattr(cls, "pytest_generate_tests"):
methods.append(cls().pytest_generate_tests)
self.ihook.pytest_generate_tests.call_extra(methods, dict(metafunc=metafunc))
if not metafunc._calls:
yield Function.from_parent(self, name=name, fixtureinfo=fixtureinfo)
else:
# Add funcargs() as fixturedefs to fixtureinfo.arg2fixturedefs.
fm = self.session._fixturemanager
fixtures.add_funcarg_pseudo_fixture_def(self, metafunc, fm)
# Add_funcarg_pseudo_fixture_def may have shadowed some fixtures
# with direct parametrization, so make sure we update what the
# function really needs.
fixtureinfo.prune_dependency_tree()
for callspec in metafunc._calls:
subname = f"{name}[{callspec.id}]"
yield Function.from_parent(
self,
name=subname,
callspec=callspec,
fixtureinfo=fixtureinfo,
keywords={callspec.id: True},
originalname=name,
)
class Module(nodes.File, PyCollector):
"""Collector for test classes and functions in a Python module."""
def _getobj(self):
return self._importtestmodule()
def collect(self) -> Iterable[Union[nodes.Item, nodes.Collector]]:
self._inject_setup_module_fixture()
self._inject_setup_function_fixture()
self.session._fixturemanager.parsefactories(self)
return super().collect()
def _inject_setup_module_fixture(self) -> None:
"""Inject a hidden autouse, module scoped fixture into the collected module object
that invokes setUpModule/tearDownModule if either or both are available.
Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with
other fixtures (#517).
"""
has_nose = self.config.pluginmanager.has_plugin("nose")
setup_module = _get_first_non_fixture_func(
self.obj, ("setUpModule", "setup_module")
)
if setup_module is None and has_nose:
# The name "setup" is too common - only treat as fixture if callable.
setup_module = _get_first_non_fixture_func(self.obj, ("setup",))
if not callable(setup_module):
setup_module = None
teardown_module = _get_first_non_fixture_func(
self.obj, ("tearDownModule", "teardown_module")
)
if teardown_module is None and has_nose:
teardown_module = _get_first_non_fixture_func(self.obj, ("teardown",))
# Same as "setup" above - only treat as fixture if callable.
if not callable(teardown_module):
teardown_module = None
if setup_module is None and teardown_module is None:
return
@fixtures.fixture(
autouse=True,
scope="module",
# Use a unique name to speed up lookup.
name=f"_xunit_setup_module_fixture_{self.obj.__name__}",
)
def xunit_setup_module_fixture(request) -> Generator[None, None, None]:
if setup_module is not None:
_call_with_optional_argument(setup_module, request.module)
yield
if teardown_module is not None:
_call_with_optional_argument(teardown_module, request.module)
self.obj.__pytest_setup_module = xunit_setup_module_fixture
def _inject_setup_function_fixture(self) -> None:
"""Inject a hidden autouse, function scoped fixture into the collected module object
that invokes setup_function/teardown_function if either or both are available.
Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with
other fixtures (#517).
"""
setup_function = _get_first_non_fixture_func(self.obj, ("setup_function",))
teardown_function = _get_first_non_fixture_func(
self.obj, ("teardown_function",)
)
if setup_function is None and teardown_function is None:
return
@fixtures.fixture(
autouse=True,
scope="function",
# Use a unique name to speed up lookup.
name=f"_xunit_setup_function_fixture_{self.obj.__name__}",
)
def xunit_setup_function_fixture(request) -> Generator[None, None, None]:
if request.instance is not None:
# in this case we are bound to an instance, so we need to let
# setup_method handle this
yield
return
if setup_function is not None:
_call_with_optional_argument(setup_function, request.function)
yield
if teardown_function is not None:
_call_with_optional_argument(teardown_function, request.function)
self.obj.__pytest_setup_function = xunit_setup_function_fixture
def _importtestmodule(self):
# We assume we are only called once per module.
importmode = self.config.getoption("--import-mode")
try:
mod = import_path(self.path, mode=importmode, root=self.config.rootpath)
except SyntaxError as e:
raise self.CollectError(
ExceptionInfo.from_current().getrepr(style="short")
) from e
except ImportPathMismatchError as e:
raise self.CollectError(
"import file mismatch:\n"
"imported module %r has this __file__ attribute:\n"
" %s\n"
"which is not the same as the test file we want to collect:\n"
" %s\n"
"HINT: remove __pycache__ / .pyc files and/or use a "
"unique basename for your test file modules" % e.args
) from e
except ImportError as e:
exc_info = ExceptionInfo.from_current()
if self.config.getoption("verbose") < 2:
exc_info.traceback = exc_info.traceback.filter(filter_traceback)
exc_repr = (
exc_info.getrepr(style="short")
if exc_info.traceback
else exc_info.exconly()
)
formatted_tb = str(exc_repr)
raise self.CollectError(
"ImportError while importing test module '{path}'.\n"
"Hint: make sure your test modules/packages have valid Python names.\n"
"Traceback:\n"
"{traceback}".format(path=self.path, traceback=formatted_tb)
) from e
except skip.Exception as e:
if e.allow_module_level:
raise
raise self.CollectError(
"Using pytest.skip outside of a test will skip the entire module. "
"If that's your intention, pass `allow_module_level=True`. "
"If you want to skip a specific test or an entire class, "
"use the @pytest.mark.skip or @pytest.mark.skipif decorators."
) from e
self.config.pluginmanager.consider_module(mod)
return mod
class Package(Module):
"""Collector for files and directories in a Python packages -- directories
with an `__init__.py` file."""
def __init__(
self,
fspath: Optional[LEGACY_PATH],
parent: nodes.Collector,
# NOTE: following args are unused:
config=None,
session=None,
nodeid=None,
path: Optional[Path] = None,
) -> None:
# NOTE: Could be just the following, but kept as-is for compat.
# nodes.FSCollector.__init__(self, fspath, parent=parent)
session = parent.session
nodes.FSCollector.__init__(
self,
fspath=fspath,
path=path,
parent=parent,
config=config,
session=session,
nodeid=nodeid,
)
self.name = self.path.parent.name
def setup(self) -> None:
# Not using fixtures to call setup_module here because autouse fixtures
# from packages are not called automatically (#4085).
setup_module = _get_first_non_fixture_func(
self.obj, ("setUpModule", "setup_module")
)
if setup_module is not None:
_call_with_optional_argument(setup_module, self.obj)
teardown_module = _get_first_non_fixture_func(
self.obj, ("tearDownModule", "teardown_module")
)
if teardown_module is not None:
func = partial(_call_with_optional_argument, teardown_module, self.obj)
self.addfinalizer(func)
def _recurse(self, direntry: "os.DirEntry[str]") -> bool:
if direntry.name == "__pycache__":
return False
fspath = Path(direntry.path)
ihook = self.session.gethookproxy(fspath.parent)
if ihook.pytest_ignore_collect(collection_path=fspath, config=self.config):
return False
return True
def _collectfile(
self, fspath: Path, handle_dupes: bool = True
) -> Sequence[nodes.Collector]:
assert (
fspath.is_file()
), "{!r} is not a file (isdir={!r}, exists={!r}, islink={!r})".format(
fspath, fspath.is_dir(), fspath.exists(), fspath.is_symlink()
)
ihook = self.session.gethookproxy(fspath)
if not self.session.isinitpath(fspath):
if ihook.pytest_ignore_collect(collection_path=fspath, config=self.config):
return ()
if handle_dupes:
keepduplicates = self.config.getoption("keepduplicates")
if not keepduplicates:
duplicate_paths = self.config.pluginmanager._duplicatepaths
if fspath in duplicate_paths:
return ()
else:
duplicate_paths.add(fspath)
return ihook.pytest_collect_file(file_path=fspath, parent=self) # type: ignore[no-any-return]
def collect(self) -> Iterable[Union[nodes.Item, nodes.Collector]]:
this_path = self.path.parent
# Always collect the __init__ first.
if path_matches_patterns(self.path, self.config.getini("python_files")):
yield Module.from_parent(self, path=self.path)
pkg_prefixes: Set[Path] = set()
for direntry in visit(str(this_path), recurse=self._recurse):
path = Path(direntry.path)
# We will visit our own __init__.py file, in which case we skip it.
if direntry.is_file():
if direntry.name == "__init__.py" and path.parent == this_path:
continue
parts_ = parts(direntry.path)
if any(
str(pkg_prefix) in parts_ and pkg_prefix / "__init__.py" != path
for pkg_prefix in pkg_prefixes
):
continue
if direntry.is_file():
yield from self._collectfile(path)
elif not direntry.is_dir():
# Broken symlink or invalid/missing file.
continue
elif path.joinpath("__init__.py").is_file():
pkg_prefixes.add(path)
def _call_with_optional_argument(func, arg) -> None:
"""Call the given function with the given argument if func accepts one argument, otherwise
calls func without arguments."""
arg_count = func.__code__.co_argcount
if inspect.ismethod(func):
arg_count -= 1
if arg_count:
func(arg)
else:
func()
def _get_first_non_fixture_func(obj: object, names: Iterable[str]) -> Optional[object]:
"""Return the attribute from the given object to be used as a setup/teardown
xunit-style function, but only if not marked as a fixture to avoid calling it twice.
"""
for name in names:
meth: Optional[object] = getattr(obj, name, None)
if meth is not None and fixtures.getfixturemarker(meth) is None:
return meth
return None
class Class(PyCollector):
"""Collector for test methods (and nested classes) in a Python class."""
@classmethod
def from_parent(cls, parent, *, name, obj=None, **kw):
"""The public constructor."""
return super().from_parent(name=name, parent=parent, **kw)
def newinstance(self):
return self.obj()
def collect(self) -> Iterable[Union[nodes.Item, nodes.Collector]]:
if not safe_getattr(self.obj, "__test__", True):
return []
if hasinit(self.obj):
assert self.parent is not None
self.warn(
PytestCollectionWarning(
"cannot collect test class %r because it has a "
"__init__ constructor (from: %s)"
% (self.obj.__name__, self.parent.nodeid)
)
)
return []
elif hasnew(self.obj):
assert self.parent is not None
self.warn(
PytestCollectionWarning(
"cannot collect test class %r because it has a "
"__new__ constructor (from: %s)"
% (self.obj.__name__, self.parent.nodeid)
)
)
return []
self._inject_setup_class_fixture()
self._inject_setup_method_fixture()
self.session._fixturemanager.parsefactories(self.newinstance(), self.nodeid)
return super().collect()
def _inject_setup_class_fixture(self) -> None:
"""Inject a hidden autouse, class scoped fixture into the collected class object
that invokes setup_class/teardown_class if either or both are available.
Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with
other fixtures (#517).
"""
setup_class = _get_first_non_fixture_func(self.obj, ("setup_class",))
teardown_class = _get_first_non_fixture_func(self.obj, ("teardown_class",))
if setup_class is None and teardown_class is None:
return
@fixtures.fixture(
autouse=True,
scope="class",
# Use a unique name to speed up lookup.
name=f"_xunit_setup_class_fixture_{self.obj.__qualname__}",
)
def xunit_setup_class_fixture(cls) -> Generator[None, None, None]:
if setup_class is not None:
func = getimfunc(setup_class)
_call_with_optional_argument(func, self.obj)
yield
if teardown_class is not None:
func = getimfunc(teardown_class)
_call_with_optional_argument(func, self.obj)
self.obj.__pytest_setup_class = xunit_setup_class_fixture
def _inject_setup_method_fixture(self) -> None:
"""Inject a hidden autouse, function scoped fixture into the collected class object
that invokes setup_method/teardown_method if either or both are available.
Using a fixture to invoke these methods ensures we play nicely and unsurprisingly with
other fixtures (#517).
"""
has_nose = self.config.pluginmanager.has_plugin("nose")
setup_name = "setup_method"
setup_method = _get_first_non_fixture_func(self.obj, (setup_name,))
emit_nose_setup_warning = False
if setup_method is None and has_nose:
setup_name = "setup"
emit_nose_setup_warning = True
setup_method = _get_first_non_fixture_func(self.obj, (setup_name,))
teardown_name = "teardown_method"
teardown_method = _get_first_non_fixture_func(self.obj, (teardown_name,))
emit_nose_teardown_warning = False
if teardown_method is None and has_nose:
teardown_name = "teardown"
emit_nose_teardown_warning = True
teardown_method = _get_first_non_fixture_func(self.obj, (teardown_name,))
if setup_method is None and teardown_method is None:
return
@fixtures.fixture(
autouse=True,
scope="function",
# Use a unique name to speed up lookup.
name=f"_xunit_setup_method_fixture_{self.obj.__qualname__}",
)
def xunit_setup_method_fixture(self, request) -> Generator[None, None, None]:
method = request.function
if setup_method is not None:
func = getattr(self, setup_name)
_call_with_optional_argument(func, method)
if emit_nose_setup_warning:
warnings.warn(
NOSE_SUPPORT_METHOD.format(
nodeid=request.node.nodeid, method="setup"
),
stacklevel=2,
)
yield
if teardown_method is not None:
func = getattr(self, teardown_name)
_call_with_optional_argument(func, method)
if emit_nose_teardown_warning:
warnings.warn(
NOSE_SUPPORT_METHOD.format(
nodeid=request.node.nodeid, method="teardown"
),
stacklevel=2,
)
self.obj.__pytest_setup_method = xunit_setup_method_fixture
class InstanceDummy:
"""Instance used to be a node type between Class and Function. It has been
removed in pytest 7.0. Some plugins exist which reference `pytest.Instance`
only to ignore it; this dummy class keeps them working. This will be removed
in pytest 8."""
def __getattr__(name: str) -> object:
if name == "Instance":
warnings.warn(INSTANCE_COLLECTOR, 2)
return InstanceDummy
raise AttributeError(f"module {__name__} has no attribute {name}")
def hasinit(obj: object) -> bool:
init: object = getattr(obj, "__init__", None)
if init:
return init != object.__init__
return False
def hasnew(obj: object) -> bool:
new: object = getattr(obj, "__new__", None)
if new:
return new != object.__new__
return False
@final
@dataclasses.dataclass(frozen=True)
class IdMaker:
"""Make IDs for a parametrization."""
__slots__ = (
"argnames",
"parametersets",
"idfn",
"ids",
"config",
"nodeid",
"func_name",
)
# The argnames of the parametrization.
argnames: Sequence[str]
# The ParameterSets of the parametrization.
parametersets: Sequence[ParameterSet]
# Optionally, a user-provided callable to make IDs for parameters in a
# ParameterSet.
idfn: Optional[Callable[[Any], Optional[object]]]
# Optionally, explicit IDs for ParameterSets by index.
ids: Optional[Sequence[Optional[object]]]
# Optionally, the pytest config.
# Used for controlling ASCII escaping, and for calling the
# :hook:`pytest_make_parametrize_id` hook.
config: Optional[Config]
# Optionally, the ID of the node being parametrized.
# Used only for clearer error messages.
nodeid: Optional[str]
# Optionally, the ID of the function being parametrized.
# Used only for clearer error messages.
func_name: Optional[str]
def make_unique_parameterset_ids(self) -> List[str]:
"""Make a unique identifier for each ParameterSet, that may be used to
identify the parametrization in a node ID.
Format is <prm_1_token>-...-<prm_n_token>[counter], where prm_x_token is
- user-provided id, if given
- else an id derived from the value, applicable for certain types
- else <argname><parameterset index>
The counter suffix is appended only in case a string wouldn't be unique
otherwise.
"""
resolved_ids = list(self._limit_ids(self._resolve_ids(), limit=500))
# All IDs must be unique!
if len(resolved_ids) != len(set(resolved_ids)):
# Record the number of occurrences of each ID.
id_counts = Counter(resolved_ids)
# Map the ID to its next suffix.
id_suffixes: Dict[str, int] = defaultdict(int)
# Suffix non-unique IDs to make them unique.
for index, id in enumerate(resolved_ids):
if id_counts[id] > 1:
resolved_ids[index] = f"{id}{id_suffixes[id]}"
id_suffixes[id] += 1
return resolved_ids
def _limit_ids(self, ids, limit=500):
prefix_count = {}
limit -= 6
assert limit > 0
for idval in ids:
if len(idval) > limit:
prefix = idval[:limit]
idx = prefix_count.get(prefix, -1) + 1
prefix_count[prefix] = idx
idval = "{}-{}".format(prefix, idx)
yield idval
def _resolve_ids(self) -> Iterable[str]:
"""Resolve IDs for all ParameterSets (may contain duplicates)."""
for idx, parameterset in enumerate(self.parametersets):
if parameterset.id is not None:
# ID provided directly - pytest.param(..., id="...")
yield parameterset.id
elif self.ids and idx < len(self.ids) and self.ids[idx] is not None:
# ID provided in the IDs list - parametrize(..., ids=[...]).
yield self._idval_from_value_required(self.ids[idx], idx)
else:
# ID not provided - generate it.
yield "-".join(
self._idval(val, argname, idx)
for val, argname in zip(parameterset.values, self.argnames)
)
def _idval(self, val: object, argname: str, idx: int) -> str:
"""Make an ID for a parameter in a ParameterSet."""
idval = self._idval_from_function(val, argname, idx)
if idval is not None:
return idval
idval = self._idval_from_hook(val, argname)
if idval is not None:
return idval
idval = self._idval_from_value(val)
if idval is not None:
return idval
return self._idval_from_argname(argname, idx)
def _idval_from_function(
self, val: object, argname: str, idx: int
) -> Optional[str]:
"""Try to make an ID for a parameter in a ParameterSet using the
user-provided id callable, if given."""
if self.idfn is None:
return None
try:
id = self.idfn(val)
except Exception as e:
prefix = f"{self.nodeid}: " if self.nodeid is not None else ""
msg = "error raised while trying to determine id of parameter '{}' at position {}"
msg = prefix + msg.format(argname, idx)
raise ValueError(msg) from e
if id is None:
return None
return self._idval_from_value(id)
def _idval_from_hook(self, val: object, argname: str) -> Optional[str]:
"""Try to make an ID for a parameter in a ParameterSet by calling the
:hook:`pytest_make_parametrize_id` hook."""
if self.config:
id: Optional[str] = self.config.hook.pytest_make_parametrize_id(
config=self.config, val=val, argname=argname
)
return id
return None
def _idval_from_value(self, val: object) -> Optional[str]:
"""Try to make an ID for a parameter in a ParameterSet from its value,
if the value type is supported."""
if isinstance(val, STRING_TYPES):
return _ascii_escaped_by_config(val, self.config)
elif val is None or isinstance(val, (float, int, bool, complex)):
return str(val)
elif isinstance(val, Pattern):
return ascii_escaped(val.pattern)
elif val is NOTSET:
# Fallback to default. Note that NOTSET is an enum.Enum.
pass
elif isinstance(val, enum.Enum):
return str(val)
elif isinstance(getattr(val, "__name__", None), str):
# Name of a class, function, module, etc.
name: str = getattr(val, "__name__")
return name
return None
def _idval_from_value_required(self, val: object, idx: int) -> str:
"""Like _idval_from_value(), but fails if the type is not supported."""
id = self._idval_from_value(val)
if id is not None:
return id
# Fail.
if self.func_name is not None:
prefix = f"In {self.func_name}: "
elif self.nodeid is not None:
prefix = f"In {self.nodeid}: "
else:
prefix = ""
msg = (
f"{prefix}ids contains unsupported value {saferepr(val)} (type: {type(val)!r}) at index {idx}. "
"Supported types are: str, bytes, int, float, complex, bool, enum, regex or anything with a __name__."
)
fail(msg, pytrace=False)
@staticmethod
def _idval_from_argname(argname: str, idx: int) -> str:
"""Make an ID for a parameter in a ParameterSet from the argument name
and the index of the ParameterSet."""
return str(argname) + str(idx)
@final
@dataclasses.dataclass(frozen=True)
class CallSpec2:
"""A planned parameterized invocation of a test function.
Calculated during collection for a given test function's Metafunc.
Once collection is over, each callspec is turned into a single Item
and stored in item.callspec.
"""
# arg name -> arg value which will be passed to the parametrized test
# function (direct parameterization).
funcargs: Dict[str, object] = dataclasses.field(default_factory=dict)
# arg name -> arg value which will be passed to a fixture of the same name
# (indirect parametrization).
params: Dict[str, object] = dataclasses.field(default_factory=dict)
# arg name -> arg index.
indices: Dict[str, int] = dataclasses.field(default_factory=dict)
# Used for sorting parametrized resources.
_arg2scope: Dict[str, Scope] = dataclasses.field(default_factory=dict)
# Parts which will be added to the item's name in `[..]` separated by "-".
_idlist: List[str] = dataclasses.field(default_factory=list)
# Marks which will be applied to the item.
marks: List[Mark] = dataclasses.field(default_factory=list)
def setmulti(
self,
*,
valtypes: Mapping[str, "Literal['params', 'funcargs']"],
argnames: Iterable[str],
valset: Iterable[object],
id: str,
marks: Iterable[Union[Mark, MarkDecorator]],
scope: Scope,
param_index: int,
) -> "CallSpec2":
funcargs = self.funcargs.copy()
params = self.params.copy()
indices = self.indices.copy()
arg2scope = self._arg2scope.copy()
for arg, val in zip(argnames, valset):
if arg in params or arg in funcargs:
raise ValueError(f"duplicate parametrization of {arg!r}")
valtype_for_arg = valtypes[arg]
if valtype_for_arg == "params":
params[arg] = val
elif valtype_for_arg == "funcargs":
funcargs[arg] = val
else:
assert_never(valtype_for_arg)
indices[arg] = param_index
arg2scope[arg] = scope
return CallSpec2(
funcargs=funcargs,
params=params,
indices=indices,
_arg2scope=arg2scope,
_idlist=[*self._idlist, id],
marks=[*self.marks, *normalize_mark_list(marks)],
)
def getparam(self, name: str) -> object:
try:
return self.params[name]
except KeyError as e:
raise ValueError(name) from e
@property
def id(self) -> str:
return "-".join(self._idlist)
@final
class Metafunc:
"""Objects passed to the :hook:`pytest_generate_tests` hook.
They help to inspect a test function and to generate tests according to
test configuration or values specified in the class or module where a
test function is defined.
"""
def __init__(
self,
definition: "FunctionDefinition",
fixtureinfo: fixtures.FuncFixtureInfo,
config: Config,
cls=None,
module=None,
*,
_ispytest: bool = False,
) -> None:
check_ispytest(_ispytest)
#: Access to the underlying :class:`_pytest.python.FunctionDefinition`.
self.definition = definition
#: Access to the :class:`pytest.Config` object for the test session.
self.config = config
#: The module object where the test function is defined in.
self.module = module
#: Underlying Python test function.
self.function = definition.obj
#: Set of fixture names required by the test function.
self.fixturenames = fixtureinfo.names_closure
#: Class object where the test function is defined in or ``None``.
self.cls = cls
self._arg2fixturedefs = fixtureinfo.name2fixturedefs
# Result of parametrize().
self._calls: List[CallSpec2] = []
def parametrize(
self,
argnames: Union[str, Sequence[str]],
argvalues: Iterable[Union[ParameterSet, Sequence[object], object]],
indirect: Union[bool, Sequence[str]] = False,
ids: Optional[
Union[Iterable[Optional[object]], Callable[[Any], Optional[object]]]
] = None,
scope: "Optional[_ScopeName]" = None,
*,
_param_mark: Optional[Mark] = None,
) -> None:
"""Add new invocations to the underlying test function using the list
of argvalues for the given argnames. Parametrization is performed
during the collection phase. If you need to setup expensive resources
see about setting indirect to do it rather than at test setup time.
Can be called multiple times per test function (but only on different
argument names), in which case each call parametrizes all previous
parametrizations, e.g.
::
unparametrized: t
parametrize ["x", "y"]: t[x], t[y]
parametrize [1, 2]: t[x-1], t[x-2], t[y-1], t[y-2]
:param argnames:
A comma-separated string denoting one or more argument names, or
a list/tuple of argument strings.
:param argvalues:
The list of argvalues determines how often a test is invoked with
different argument values.
If only one argname was specified argvalues is a list of values.
If N argnames were specified, argvalues must be a list of
N-tuples, where each tuple-element specifies a value for its
respective argname.
:param indirect:
A list of arguments' names (subset of argnames) or a boolean.
If True the list contains all names from the argnames. Each
argvalue corresponding to an argname in this list will
be passed as request.param to its respective argname fixture
function so that it can perform more expensive setups during the
setup phase of a test rather than at collection time.
:param ids:
Sequence of (or generator for) ids for ``argvalues``,
or a callable to return part of the id for each argvalue.
With sequences (and generators like ``itertools.count()``) the
returned ids should be of type ``string``, ``int``, ``float``,
``bool``, or ``None``.
They are mapped to the corresponding index in ``argvalues``.
``None`` means to use the auto-generated id.
If it is a callable it will be called for each entry in
``argvalues``, and the return value is used as part of the
auto-generated id for the whole set (where parts are joined with
dashes ("-")).
This is useful to provide more specific ids for certain items, e.g.
dates. Returning ``None`` will use an auto-generated id.
If no ids are provided they will be generated automatically from
the argvalues.
:param scope:
If specified it denotes the scope of the parameters.
The scope is used for grouping tests by parameter instances.
It will also override any fixture-function defined scope, allowing
to set a dynamic scope using test context or configuration.
"""
argnames, parametersets = ParameterSet._for_parametrize(
argnames,
argvalues,
self.function,
self.config,
nodeid=self.definition.nodeid,
)
del argvalues
if "request" in argnames:
fail(
"'request' is a reserved name and cannot be used in @pytest.mark.parametrize",
pytrace=False,
)
if scope is not None:
scope_ = Scope.from_user(
scope, descr=f"parametrize() call in {self.function.__name__}"
)
else:
scope_ = _find_parametrized_scope(argnames, self._arg2fixturedefs, indirect)
self._validate_if_using_arg_names(argnames, indirect)
arg_values_types = self._resolve_arg_value_types(argnames, indirect)
# Use any already (possibly) generated ids with parametrize Marks.
if _param_mark and _param_mark._param_ids_from:
generated_ids = _param_mark._param_ids_from._param_ids_generated
if generated_ids is not None:
ids = generated_ids
ids = self._resolve_parameter_set_ids(
argnames, ids, parametersets, nodeid=self.definition.nodeid
)
# Store used (possibly generated) ids with parametrize Marks.
if _param_mark and _param_mark._param_ids_from and generated_ids is None:
object.__setattr__(_param_mark._param_ids_from, "_param_ids_generated", ids)
# Create the new calls: if we are parametrize() multiple times (by applying the decorator
# more than once) then we accumulate those calls generating the cartesian product
# of all calls.
newcalls = []
for callspec in self._calls or [CallSpec2()]:
for param_index, (param_id, param_set) in enumerate(
zip(ids, parametersets)
):
newcallspec = callspec.setmulti(
valtypes=arg_values_types,
argnames=argnames,
valset=param_set.values,
id=param_id,
marks=param_set.marks,
scope=scope_,
param_index=param_index,
)
newcalls.append(newcallspec)
self._calls = newcalls
def _resolve_parameter_set_ids(
self,
argnames: Sequence[str],
ids: Optional[
Union[Iterable[Optional[object]], Callable[[Any], Optional[object]]]
],
parametersets: Sequence[ParameterSet],
nodeid: str,
) -> List[str]:
"""Resolve the actual ids for the given parameter sets.
:param argnames:
Argument names passed to ``parametrize()``.
:param ids:
The `ids` parameter of the ``parametrize()`` call (see docs).
:param parametersets:
The parameter sets, each containing a set of values corresponding
to ``argnames``.
:param nodeid str:
The nodeid of the definition item that generated this
parametrization.
:returns:
List with ids for each parameter set given.
"""
if ids is None:
idfn = None
ids_ = None
elif callable(ids):
idfn = ids
ids_ = None
else:
idfn = None
ids_ = self._validate_ids(ids, parametersets, self.function.__name__)
id_maker = IdMaker(
argnames,
parametersets,
idfn,
ids_,
self.config,
nodeid=nodeid,
func_name=self.function.__name__,
)
return id_maker.make_unique_parameterset_ids()
def _validate_ids(
self,
ids: Iterable[Optional[object]],
parametersets: Sequence[ParameterSet],
func_name: str,
) -> List[Optional[object]]:
try:
num_ids = len(ids) # type: ignore[arg-type]
except TypeError:
try:
iter(ids)
except TypeError as e:
raise TypeError("ids must be a callable or an iterable") from e
num_ids = len(parametersets)
# num_ids == 0 is a special case: https://github.com/pytest-dev/pytest/issues/1849
if num_ids != len(parametersets) and num_ids != 0:
msg = "In {}: {} parameter sets specified, with different number of ids: {}"
fail(msg.format(func_name, len(parametersets), num_ids), pytrace=False)
return list(itertools.islice(ids, num_ids))
def _resolve_arg_value_types(
self,
argnames: Sequence[str],
indirect: Union[bool, Sequence[str]],
) -> Dict[str, "Literal['params', 'funcargs']"]:
"""Resolve if each parametrized argument must be considered a
parameter to a fixture or a "funcarg" to the function, based on the
``indirect`` parameter of the parametrized() call.
:param List[str] argnames: List of argument names passed to ``parametrize()``.
:param indirect: Same as the ``indirect`` parameter of ``parametrize()``.
:rtype: Dict[str, str]
A dict mapping each arg name to either:
* "params" if the argname should be the parameter of a fixture of the same name.
* "funcargs" if the argname should be a parameter to the parametrized test function.
"""
if isinstance(indirect, bool):
valtypes: Dict[str, Literal["params", "funcargs"]] = dict.fromkeys(
argnames, "params" if indirect else "funcargs"
)
elif isinstance(indirect, Sequence):
valtypes = dict.fromkeys(argnames, "funcargs")
for arg in indirect:
if arg not in argnames:
fail(
"In {}: indirect fixture '{}' doesn't exist".format(
self.function.__name__, arg
),
pytrace=False,
)
valtypes[arg] = "params"
else:
fail(
"In {func}: expected Sequence or boolean for indirect, got {type}".format(
type=type(indirect).__name__, func=self.function.__name__
),
pytrace=False,
)
return valtypes
def _validate_if_using_arg_names(
self,
argnames: Sequence[str],
indirect: Union[bool, Sequence[str]],
) -> None:
"""Check if all argnames are being used, by default values, or directly/indirectly.
:param List[str] argnames: List of argument names passed to ``parametrize()``.
:param indirect: Same as the ``indirect`` parameter of ``parametrize()``.
:raises ValueError: If validation fails.
"""
default_arg_names = set(get_default_arg_names(self.function))
func_name = self.function.__name__
for arg in argnames:
if arg not in self.fixturenames:
if arg in default_arg_names:
fail(
"In {}: function already takes an argument '{}' with a default value".format(
func_name, arg
),
pytrace=False,
)
else:
if isinstance(indirect, Sequence):
name = "fixture" if arg in indirect else "argument"
else:
name = "fixture" if indirect else "argument"
fail(
f"In {func_name}: function uses no {name} '{arg}'",
pytrace=False,
)
def _find_parametrized_scope(
argnames: Sequence[str],
arg2fixturedefs: Mapping[str, Sequence[fixtures.FixtureDef[object]]],
indirect: Union[bool, Sequence[str]],
) -> Scope:
"""Find the most appropriate scope for a parametrized call based on its arguments.
When there's at least one direct argument, always use "function" scope.
When a test function is parametrized and all its arguments are indirect
(e.g. fixtures), return the most narrow scope based on the fixtures used.
Related to issue #1832, based on code posted by @Kingdread.
"""
if isinstance(indirect, Sequence):
all_arguments_are_fixtures = len(indirect) == len(argnames)
else:
all_arguments_are_fixtures = bool(indirect)
if all_arguments_are_fixtures:
fixturedefs = arg2fixturedefs or {}
used_scopes = [
fixturedef[0]._scope
for name, fixturedef in fixturedefs.items()
if name in argnames
]
# Takes the most narrow scope from used fixtures.
return min(used_scopes, default=Scope.Function)
return Scope.Function
def _ascii_escaped_by_config(val: Union[str, bytes], config: Optional[Config]) -> str:
if config is None:
escape_option = False
else:
escape_option = config.getini(
"disable_test_id_escaping_and_forfeit_all_rights_to_community_support"
)
# TODO: If escaping is turned off and the user passes bytes,
# will return a bytes. For now we ignore this but the
# code *probably* doesn't handle this case.
return val if escape_option else ascii_escaped(val) # type: ignore
def _pretty_fixture_path(func) -> str:
cwd = Path.cwd()
loc = Path(getlocation(func, str(cwd)))
prefix = Path("...", "_pytest")
try:
return str(prefix / loc.relative_to(_PYTEST_DIR))
except ValueError:
return bestrelpath(cwd, loc)
def show_fixtures_per_test(config):
from _pytest.main import wrap_session
return wrap_session(config, _show_fixtures_per_test)
def _show_fixtures_per_test(config: Config, session: Session) -> None:
import _pytest.config
session.perform_collect()
curdir = Path.cwd()
tw = _pytest.config.create_terminal_writer(config)
verbose = config.getvalue("verbose")
def get_best_relpath(func) -> str:
loc = getlocation(func, str(curdir))
return bestrelpath(curdir, Path(loc))
def write_fixture(fixture_def: fixtures.FixtureDef[object]) -> None:
argname = fixture_def.argname
if verbose <= 0 and argname.startswith("_"):
return
prettypath = _pretty_fixture_path(fixture_def.func)
tw.write(f"{argname}", green=True)
tw.write(f" -- {prettypath}", yellow=True)
tw.write("\n")
fixture_doc = inspect.getdoc(fixture_def.func)
if fixture_doc:
write_docstring(
tw, fixture_doc.split("\n\n")[0] if verbose <= 0 else fixture_doc
)
else:
tw.line(" no docstring available", red=True)
def write_item(item: nodes.Item) -> None:
# Not all items have _fixtureinfo attribute.
info: Optional[FuncFixtureInfo] = getattr(item, "_fixtureinfo", None)
if info is None or not info.name2fixturedefs:
# This test item does not use any fixtures.
return
tw.line()
tw.sep("-", f"fixtures used by {item.name}")
# TODO: Fix this type ignore.
tw.sep("-", f"({get_best_relpath(item.function)})") # type: ignore[attr-defined]
# dict key not used in loop but needed for sorting.
for _, fixturedefs in sorted(info.name2fixturedefs.items()):
assert fixturedefs is not None
if not fixturedefs:
continue
# Last item is expected to be the one used by the test item.
write_fixture(fixturedefs[-1])
for session_item in session.items:
write_item(session_item)
def showfixtures(config: Config) -> Union[int, ExitCode]:
from _pytest.main import wrap_session
return wrap_session(config, _showfixtures_main)
def _showfixtures_main(config: Config, session: Session) -> None:
import _pytest.config
session.perform_collect()
curdir = Path.cwd()
tw = _pytest.config.create_terminal_writer(config)
verbose = config.getvalue("verbose")
fm = session._fixturemanager
available = []
seen: Set[Tuple[str, str]] = set()
for argname, fixturedefs in fm._arg2fixturedefs.items():
assert fixturedefs is not None
if not fixturedefs:
continue
for fixturedef in fixturedefs:
loc = getlocation(fixturedef.func, str(curdir))
if (fixturedef.argname, loc) in seen:
continue
seen.add((fixturedef.argname, loc))
available.append(
(
len(fixturedef.baseid),
fixturedef.func.__module__,
_pretty_fixture_path(fixturedef.func),
fixturedef.argname,
fixturedef,
)
)
available.sort()
currentmodule = None
for baseid, module, prettypath, argname, fixturedef in available:
if currentmodule != module:
if not module.startswith("_pytest."):
tw.line()
tw.sep("-", f"fixtures defined from {module}")
currentmodule = module
if verbose <= 0 and argname.startswith("_"):
continue
tw.write(f"{argname}", green=True)
if fixturedef.scope != "function":
tw.write(" [%s scope]" % fixturedef.scope, cyan=True)
tw.write(f" -- {prettypath}", yellow=True)
tw.write("\n")
doc = inspect.getdoc(fixturedef.func)
if doc:
write_docstring(tw, doc.split("\n\n")[0] if verbose <= 0 else doc)
else:
tw.line(" no docstring available", red=True)
tw.line()
def write_docstring(tw: TerminalWriter, doc: str, indent: str = " ") -> None:
for line in doc.split("\n"):
tw.line(indent + line)
class Function(PyobjMixin, nodes.Item):
"""Item responsible for setting up and executing a Python test function.
:param name:
The full function name, including any decorations like those
added by parametrization (``my_func[my_param]``).
:param parent:
The parent Node.
:param config:
The pytest Config object.
:param callspec:
If given, this is function has been parametrized and the callspec contains
meta information about the parametrization.
:param callobj:
If given, the object which will be called when the Function is invoked,
otherwise the callobj will be obtained from ``parent`` using ``originalname``.
:param keywords:
Keywords bound to the function object for "-k" matching.
:param session:
The pytest Session object.
:param fixtureinfo:
Fixture information already resolved at this fixture node..
:param originalname:
The attribute name to use for accessing the underlying function object.
Defaults to ``name``. Set this if name is different from the original name,
for example when it contains decorations like those added by parametrization
(``my_func[my_param]``).
"""
# Disable since functions handle it themselves.
_ALLOW_MARKERS = False
def __init__(
self,
name: str,
parent,
config: Optional[Config] = None,
callspec: Optional[CallSpec2] = None,
callobj=NOTSET,
keywords: Optional[Mapping[str, Any]] = None,
session: Optional[Session] = None,
fixtureinfo: Optional[FuncFixtureInfo] = None,
originalname: Optional[str] = None,
) -> None:
super().__init__(name, parent, config=config, session=session)
if callobj is not NOTSET:
self.obj = callobj
#: Original function name, without any decorations (for example
#: parametrization adds a ``"[...]"`` suffix to function names), used to access
#: the underlying function object from ``parent`` (in case ``callobj`` is not given
#: explicitly).
#:
#: .. versionadded:: 3.0
self.originalname = originalname or name
# Note: when FunctionDefinition is introduced, we should change ``originalname``
# to a readonly property that returns FunctionDefinition.name.
self.own_markers.extend(get_unpacked_marks(self.obj))
if callspec:
self.callspec = callspec
self.own_markers.extend(callspec.marks)
# todo: this is a hell of a hack
# https://github.com/pytest-dev/pytest/issues/4569
# Note: the order of the updates is important here; indicates what
# takes priority (ctor argument over function attributes over markers).
# Take own_markers only; NodeKeywords handles parent traversal on its own.
self.keywords.update((mark.name, mark) for mark in self.own_markers)
self.keywords.update(self.obj.__dict__)
if keywords:
self.keywords.update(keywords)
if fixtureinfo is None:
fixtureinfo = self.session._fixturemanager.getfixtureinfo(
self, self.obj, self.cls, funcargs=True
)
self._fixtureinfo: FuncFixtureInfo = fixtureinfo
self.fixturenames = fixtureinfo.names_closure
self._initrequest()
@classmethod
def from_parent(cls, parent, **kw): # todo: determine sound type limitations
"""The public constructor."""
return super().from_parent(parent=parent, **kw)
def _initrequest(self) -> None:
self.funcargs: Dict[str, object] = {}
self._request = fixtures.FixtureRequest(self, _ispytest=True)
@property
def function(self):
"""Underlying python 'function' object."""
return getimfunc(self.obj)
def _getobj(self):
assert self.parent is not None
if isinstance(self.parent, Class):
# Each Function gets a fresh class instance.
parent_obj = self.parent.newinstance()
else:
parent_obj = self.parent.obj # type: ignore[attr-defined]
return getattr(parent_obj, self.originalname)
@property
def _pyfuncitem(self):
"""(compatonly) for code expecting pytest-2.2 style request objects."""
return self
def runtest(self) -> None:
"""Execute the underlying test function."""
self.ihook.pytest_pyfunc_call(pyfuncitem=self)
def setup(self) -> None:
self._request._fillfixtures()
def _traceback_filter(self, excinfo: ExceptionInfo[BaseException]) -> Traceback:
if hasattr(self, "_obj") and not self.config.getoption("fulltrace", False):
code = _pytest._code.Code.from_function(get_real_func(self.obj))
path, firstlineno = code.path, code.firstlineno
traceback = excinfo.traceback
ntraceback = traceback.cut(path=path, firstlineno=firstlineno)
if ntraceback == traceback:
ntraceback = ntraceback.cut(path=path)
if ntraceback == traceback:
ntraceback = ntraceback.filter(filter_traceback)
if not ntraceback:
ntraceback = traceback
ntraceback = ntraceback.filter(excinfo)
# issue364: mark all but first and last frames to
# only show a single-line message for each frame.
if self.config.getoption("tbstyle", "auto") == "auto":
if len(ntraceback) > 2:
ntraceback = Traceback(
entry
if i == 0 or i == len(ntraceback) - 1
else entry.with_repr_style("short")
for i, entry in enumerate(ntraceback)
)
return ntraceback
return excinfo.traceback
# TODO: Type ignored -- breaks Liskov Substitution.
def repr_failure( # type: ignore[override]
self,
excinfo: ExceptionInfo[BaseException],
) -> Union[str, TerminalRepr]:
style = self.config.getoption("tbstyle", "auto")
if style == "auto":
style = "long"
return self._repr_failure_py(excinfo, style=style)
class FunctionDefinition(Function):
"""This class is a stop gap solution until we evolve to have actual function
definition nodes and manage to get rid of ``metafunc``."""
def runtest(self) -> None:
raise RuntimeError("function definitions are not supposed to be run as tests")
setup = runtest
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@pytest@py3@_pytest@python.py@.PATH_END.py
|
{
"filename": "pygrplib.t.py",
"repo_name": "sherpa/sherpa",
"repo_path": "sherpa_extracted/sherpa-main/extern/grplib-4.9/python/pygrplib.t.py",
"type": "Python"
}
|
# May 3, 2007
# pygrplib.t.py
# this file
# pygrplib.t
# unix sh script, to activate grpglue.t.py for running regression testing
# OUTFILE and/or INFILE
# global variables exported in grpglue.t
#------------ Beginning of Template ----------------------
# import the packge
from group import *
import numpy
import os #used for environment vars
from pycrates import * #used for file input
#rename the function to get the environment variable for readability
getenv = os.environ.get
# get infile and outfile by invoking environment variables
# exported from "pygrpglue.t"
TESTID = getenv('TESTID')
if( TESTID == None ):
print ("No TESTID specified\n")
OUTFILE = getenv('OUTFILE')
if( OUTFILE != None):
OutFilePtr = open (OUTFILE, 'w')
if( OutFilePtr == None):
print ("Unable to open %s\n" % OUTFILE)
INFILE = getenv('INFILE')
if( INFILE != None):
InFilePtr = read_file(INFILE)
if (InFilePtr == None):
print ("Unable to open %s\n" % INFILE)
BINFILE = getenv('BINFILE')
if( BINFILE != None):
BinFilePtr = read_file(BINFILE)
if (BinFilePtr == None):
print ("Unable to open %s\n" % BINFILE)
#------------ End of Template ----------------------
# !! 2
# Below are pecific subroutines for regression testing
#=============================================================================
#
# --- Subroutines ---
#
#=============================================================================
# !!4
#=============================================================================
#
# --- Main Routine ---
#
#=============================================================================
#Python does not have a native switch statement, so the code below
#tries to duplicate the C switch format/behavior
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args:
self.fall = True
return True
else:
return False
#Switch statement for TESTID
for case in switch(TESTID):
if case('test1'):
i_stopspec = numpy.array([0,0,0,1,1,0,0,0,0,0,0])
(o_group,o_qual) = grpNumCounts(numCounts=1000, countsArray=copy_colvals(InFilePtr,'COUNTS'), tabStops=i_stopspec, maxLength=0);
print >> OutFilePtr, str(copy_colvals(InFilePtr, 'CHANNEL'))
print >> OutFilePtr, str(copy_colvals(InFilePtr, 'COUNTS'))
print >> OutFilePtr, str(o_group)
print >> OutFilePtr, str(o_qual)
break
if case('test2'):
i_stopspec = numpy.array([0,0,0,1,1,0,0,0,0,0,0])
i_numchans = len(copy_colvals(InFilePtr, 'COUNTS'))
(o_group,o_qual) = grpNumBins(i_numchans, 5)
print >> OutFilePtr, str(copy_colvals(InFilePtr, 'CHANNEL'))
print >> OutFilePtr, str(o_group)
print >> OutFilePtr, str(o_qual)
break
if case('test3'):
i_numchans = len(copy_colvals(InFilePtr, 'COUNTS'))
(o_group,o_qual) = grpBinWidth(i_numchans, 4)
print >> OutFilePtr, str(copy_colvals(InFilePtr, 'CHANNEL'))
print >> OutFilePtr, str(o_group)
print >> OutFilePtr, str(o_qual)
break
if case('test4'):
i_stopspec = numpy.array([0,0,0,0,0,0,0,0,0,0,0])
(o_group,o_qual) = grpSnr(copy_colvals(InFilePtr, 'COUNTS'), 50, 0, i_stopspec, errorCol=copy_colvals(InFilePtr, 'STAT_ERR'))
print >> OutFilePtr, str(copy_colvals(InFilePtr, 'CHANNEL'))
print >> OutFilePtr, str(copy_colvals(InFilePtr, 'COUNTS'))
print >> OutFilePtr, str(o_group)
print >> OutFilePtr, str(o_qual)
break
if case('test5'):
i_stopspec = numpy.array([0,0,0,0,0,0,0,0,0,0,0])
(o_group,o_qual) = grpAdaptive(copy_colvals(InFilePtr, 'COUNTS'), 700, 3, i_stopspec)
print >> OutFilePtr, str(copy_colvals(InFilePtr, 'CHANNEL'))
print >> OutFilePtr, str(copy_colvals(InFilePtr, 'COUNTS'))
print >> OutFilePtr, str(o_group)
print >> OutFilePtr, str(o_qual)
break
if case('test6'):
(o_group,o_qual) = grpAdaptiveSnr(copy_colvals(InFilePtr, 'COUNTS'), 17.0)
print >> OutFilePtr, str(copy_colvals(InFilePtr, 'CHANNEL'))
print >> OutFilePtr, str(copy_colvals(InFilePtr, 'COUNTS'))
print >> OutFilePtr, str(o_group)
print >> OutFilePtr, str(o_qual)
break
if case('test7'):
(o_group,o_qual) = grpMaxSlope(copy_colvals(InFilePtr, 'CHANNEL'), copy_colvals(InFilePtr, 'COUNTS'), 50)
print >> OutFilePtr, str(copy_colvals(InFilePtr, 'CHANNEL'))
print >> OutFilePtr, str(copy_colvals(InFilePtr, 'COUNTS'))
print >> OutFilePtr, str(o_group)
print >> OutFilePtr, str(o_qual)
break
if case('test8'):
(o_group,o_qual) = grpMinSlope(copy_colvals(InFilePtr, 'CHANNEL'), copy_colvals(InFilePtr, 'COUNTS'), 20)
print >> OutFilePtr, str(copy_colvals(InFilePtr, 'CHANNEL'))
print >> OutFilePtr, str(copy_colvals(InFilePtr, 'COUNTS'))
print >> OutFilePtr, str(o_group)
print >> OutFilePtr, str(o_qual)
break
if case('test9'):
i_binlow = numpy.array([1,4,7,10])
i_binhigh = numpy.array([3,6,9,11])
i_stopspec = numpy.array([0,0,0,0,1,0,0,0,0,0,0])
(o_group,o_qual) = grpBin(copy_colvals(InFilePtr, 'CHANNEL'), i_binlow, i_binhigh, i_stopspec)
print >> OutFilePtr, str(copy_colvals(InFilePtr, 'CHANNEL'))
print >> OutFilePtr, str(copy_colvals(InFilePtr, 'COUNTS'))
print >> OutFilePtr, str(o_group)
print >> OutFilePtr, str(o_qual)
break
if case('test10'):
(o_group,o_qual) = grpBinFile(copy_colvals(InFilePtr, 'CHANNEL'), copy_colvals(BinFilePtr, 'CHANNEL'), \
copy_colvals(BinFilePtr, 'GROUPING'), copy_colvals(BinFilePtr, 'QUALITY'))
print >> OutFilePtr, str(copy_colvals(InFilePtr, 'CHANNEL'))
print >> OutFilePtr, str(copy_colvals(InFilePtr, 'COUNTS'))
print >> OutFilePtr, str(o_group)
print >> OutFilePtr, str(o_qual)
break
if case('test11'):
i_binlow = numpy.array([0,50,100,150,200,250,300,350,600,700,800,900])
i_binhigh = numpy.array([50,100,150,200,250,300,350,400,700,800,900,1000])
i_stopspec = numpy.array([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,-1,-1,-1,-1,-1,-1,0,0,0,0,0,0,0,0,0,1,-1,-1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
(o_group,o_qual) = grpBin(copy_colvals(InFilePtr, 'PI'), i_binlow, i_binhigh, i_stopspec)
print >> OutFilePtr, str(copy_colvals(InFilePtr, 'CHANNEL'))
print >> OutFilePtr, str(copy_colvals(InFilePtr, 'PI'))
print >> OutFilePtr, str(o_group)
print >> OutFilePtr, str(o_qual)
break
if case('test12'):
i_binlow = numpy.array([3.0])
i_binhigh = numpy.array([7.5])
(o_group,o_qual) = grpBin(copy_colvals(InFilePtr, 'BOO'), i_binlow, i_binhigh)
print >> OutFilePtr, str(copy_colvals(InFilePtr, 'pi'))
print >> OutFilePtr, str(copy_colvals(InFilePtr, 'BOO'))
print >> OutFilePtr, str(o_group)
print >> OutFilePtr, str(o_qual)
break
if case(): # default
print ("Invalid TESTID")
OutFilePtr.close()
|
sherpaREPO_NAMEsherpaPATH_START.@sherpa_extracted@sherpa-main@extern@grplib-4.9@python@pygrplib.t.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "Autostronomy/AutoProf",
"repo_path": "AutoProf_extracted/AutoProf-main/autoprof/autoprofutils/__init__.py",
"type": "Python"
}
|
from .Diagnostic_Plots import *
from .ImageTransform import *
from .SharedFunctions import *
|
AutostronomyREPO_NAMEAutoProfPATH_START.@AutoProf_extracted@AutoProf-main@autoprof@autoprofutils@__init__.py@.PATH_END.py
|
{
"filename": "_namelength.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/histogram2d/hoverlabel/_namelength.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class NamelengthValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self, plotly_name="namelength", parent_name="histogram2d.hoverlabel", **kwargs
):
super(NamelengthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
min=kwargs.pop("min", -1),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@histogram2d@hoverlabel@_namelength.py@.PATH_END.py
|
{
"filename": "createExtendedSourceFromTemplate.py",
"repo_name": "threeML/astromodels",
"repo_path": "astromodels_extracted/astromodels-master/examples/createExtendedSourceFromTemplate.py",
"type": "Python"
}
|
# code to show how to create an extended source via uploading a FITs image template
# author: Andrea Albert (aalbert@slac.stanford.edu)
# date: Oct 26, 2016
from threeML import *
# the class SpatialTemplate_2D expects a FITs file that contains a header with the following info: reference pixels (e.g. 'CRPIX1'), pixels step in degrees (e.g. 'CDELT1'), RA and DEC values at reference pixel (e.g. 'CRVAL1')
# initialize shape object
shape = SpatialTemplate_2D()
# load in template file
# by default the extension number is set to zero (ihdu = 0)
shape.load_file("exampleDMtemplate.fits",ihdu=0)
# just for example let's assume a powerlaw spectrum
spectrum = Powerlaw()
source = ExtendedSource("M31",spatial_shape=shape,spectral_shape=spectrum)
# The code assumes the template is normalized to 1 sr. If it isn't be default then you should set the optional normalization (K) appropriately. The example template is already normalized to 1 sr so we'll keep K set to 1. Note K is set to 1 and fixed by default, we include the following commands as an example of how to manipulate K
shape.K = 1.
shape.K.fix = True
# The following are example commands that get called during fitting
# get the edges of the template
(min_ra,max_ra),(min_dec,max_dec) = shape.get_boundaries()
# return the values at various pixels at locations (x,y). Note the code assumes x=RA (degrees) and y=DEC(degrees). Note the code will return a value of 0 is the pixel is outside the template ROI...in this example only the 2nd pixel will have a non-zero value
val = shape.evaluate(x=[1.,10.,10.],y=[1.,40.,89.],K=1)
|
threeMLREPO_NAMEastromodelsPATH_START.@astromodels_extracted@astromodels-master@examples@createExtendedSourceFromTemplate.py@.PATH_END.py
|
{
"filename": "test_galactics.py",
"repo_name": "amusecode/amuse",
"repo_path": "amuse_extracted/amuse-main/src/amuse/test/suite/codes_tests/test_galactics.py",
"type": "Python"
}
|
import os
import os.path
import numpy
import platform
from amuse.community import *
from amuse.test.amusetest import TestWithMPI
from amuse.community.galactics.interface import GalactICsInterface, GalactICs
# Change the default for some GalactICs(-Interface) keyword arguments:
default_options = dict()
# default_options = dict(redirection = "none")
class GalactICsInterfaceTests(TestWithMPI):
def test1(self):
print("Testing GalactICsInterface initialization")
instance = GalactICsInterface(**default_options)
self.assertEqual(instance.initialize_code(), 0)
self.assertEqual(instance.set_output_path(instance.get_output_directory()), 0)
self.assertEqual(instance.set_generate_bulge_flag(False), 0)
self.assertEqual(instance.set_generate_disk_flag(False), 0)
self.assertEqual(instance.set_order_of_multipole_expansion(0), 0)
self.assertEqual(instance.commit_parameters(), 0)
self.assertEqual(instance.cleanup_code(), 0)
instance.stop()
def test2(self):
print("Testing GalactICsInterface parameters")
instance = GalactICsInterface(**default_options)
self.assertEqual(instance.initialize_code(), 0)
self.assertEqual(instance.set_output_path(os.path.join(instance.get_output_directory(), "test")), 0)
self.assertEqual(instance.set_generate_halo_flag(False), 0)
self.assertEqual(instance.set_disk_do_center_flag(False), 0)
self.assertEqual(instance.set_number_of_grid_intervals(50000), 0)
self.assertEqual(instance.set_disk_random_seed(-1234), 0)
self.assertEqual(instance.set_halo_outer_radius(250.0), 0)
self.assertEqual(instance.set_bulge_streaming_fraction(0.4), 0)
self.assertEqual([False, 0], list(instance.get_generate_halo_flag().values()))
self.assertEqual([False, 0], list(instance.get_disk_do_center_flag().values()))
self.assertEqual([50000, 0], list(instance.get_number_of_grid_intervals().values()))
self.assertEqual([-1234, 0], list(instance.get_disk_random_seed().values()))
self.assertEqual([250.0, 0], list(instance.get_halo_outer_radius().values()))
self.assertEqual([0.4, 0], list(instance.get_bulge_streaming_fraction().values()))
self.assertEqual(instance.cleanup_code(), 0)
instance.stop()
def slowtest3(self):
print("Testing GalactICsInterface generate_particles")
n_particles_halo = 100
n_particles_bulge = 100
n_particles_disk = 100
number_of_particles = n_particles_disk + n_particles_bulge + n_particles_halo
instance = GalactICsInterface(**default_options)
self.assertEqual(instance.initialize_code(), 0)
self.assertEqual(instance.set_output_path(instance.get_output_directory()), 0)
self.assertEqual(instance.set_halo_number_of_particles(n_particles_halo), 0)
self.assertEqual(instance.set_bulge_number_of_particles(n_particles_bulge), 0)
self.assertEqual(instance.set_disk_number_of_particles(n_particles_disk), 0)
self.assertEqual(instance.commit_parameters(), 0)
self.assertEqual(list(instance.get_number_of_particles_updated().values()), [0, 0])
self.assertEqual(instance.generate_particles(), 0)
self.assertEqual(list(instance.get_number_of_particles_updated().values()), [number_of_particles, 0])
mass_disk, mass_bulge, mass_halo = 26.578816771507263, 14.632800221443176, 1184.2350006103516
masses, errors = instance.get_mass(range(number_of_particles))
self.assertEqual(errors, numpy.zeros(number_of_particles))
self.assertAlmostRelativeEquals(masses, numpy.concatenate((
numpy.ones(n_particles_disk)*mass_disk/n_particles_disk,
numpy.ones(n_particles_bulge)*mass_bulge/n_particles_bulge,
numpy.ones(n_particles_halo)*mass_halo/n_particles_halo,
)), 3)
x_positions, y_positions, z_positions, errors = instance.get_position(range(number_of_particles))
self.assertEqual(errors, numpy.zeros(number_of_particles))
self.assertAlmostEqual(numpy.array([numpy.mean(x_positions), numpy.mean(y_positions),
numpy.mean(z_positions)]), numpy.array([0.0]*3), 5)
self.assertAlmostRelativeEquals(numpy.array([
numpy.mean(abs(x_positions[:n_particles_disk])),
numpy.mean(abs(y_positions[:n_particles_disk])),
numpy.mean(abs(z_positions[:n_particles_disk]))]),
numpy.array([7.3994484072923656, 7.1570298135280606, 0.33854196755215527]), 3)
self.assertAlmostRelativeEquals(numpy.array([
numpy.mean(abs(x_positions[n_particles_disk:n_particles_disk+n_particles_bulge])),
numpy.mean(abs(y_positions[n_particles_disk:n_particles_disk+n_particles_bulge])),
numpy.mean(abs(z_positions[n_particles_disk:n_particles_disk+n_particles_bulge]))]),
numpy.array([1.244429082274437, 1.1639373835548759, 0.8550614269822836]), 3)
self.assertAlmostRelativeEquals(numpy.array([
numpy.mean(abs(x_positions[-n_particles_halo:])),
numpy.mean(abs(y_positions[-n_particles_halo:])),
numpy.mean(abs(z_positions[-n_particles_halo:]))]),
numpy.array([94.242819476127622, 88.41320479869843, 85.234394512176507]), 3)
x_velocities, y_velocities, z_velocities, errors = instance.get_velocity(range(number_of_particles))
self.assertEqual(errors, numpy.zeros(number_of_particles))
self.assertAlmostEqual(numpy.array([numpy.mean(x_velocities), numpy.mean(y_velocities),
numpy.mean(z_velocities)]), numpy.array([0.0]*3))
self.assertAlmostRelativeEquals(numpy.array([
numpy.mean(abs(x_velocities[:n_particles_disk])),
numpy.mean(abs(y_velocities[:n_particles_disk])),
numpy.mean(abs(z_velocities[:n_particles_disk]))]),
numpy.array([1.5026254250109197, 1.5649469271302223, 0.20230436498299242]), 5)
self.assertAlmostRelativeEquals(numpy.array([
numpy.mean(abs(x_velocities[n_particles_disk:])),
numpy.mean(abs(y_velocities[n_particles_disk:])),
numpy.mean(abs(z_velocities[n_particles_disk:]))]),
numpy.array([0.99470628838986164, 0.95913934175856408, 0.9359876788407564]), 5)
self.assertEqual(instance.cleanup_code(), 0)
instance.stop()
def test4(self):
print("Testing GalactICsInterface generate_particles")
number_of_particles_halo = 1000
instance = GalactICsInterface(**default_options)
self.assertEqual(instance.initialize_code(), 0)
self.assertEqual(instance.set_output_path(instance.get_output_directory()), 0)
self.assertEqual(instance.set_halo_number_of_particles(number_of_particles_halo), 0)
self.assertEqual(instance.set_generate_bulge_flag(False), 0)
self.assertEqual(instance.set_generate_disk_flag(False), 0)
self.assertEqual(instance.set_order_of_multipole_expansion(0), 0)
self.assertEqual(instance.commit_parameters(), 0)
self.assertEqual(list(instance.get_number_of_particles_updated().values()), [0, 0])
self.assertEqual(instance.generate_particles(), 0)
self.assertEqual(list(instance.get_number_of_particles_updated().values()), [number_of_particles_halo, 0])
masses, errors = instance.get_mass(range(number_of_particles_halo))
self.assertEqual(errors, numpy.zeros(number_of_particles_halo))
self.assertAlmostRelativeEquals(masses, numpy.ones(number_of_particles_halo)*masses[0])
total_mass = masses.sum()
if platform.processor() == 'ppc64le':
# on ppc64le, the model generation has small differences from intel
# change expected pos
expected_mean_pos = numpy.array([73.5628, 76.251034, 75.53434])
else:
expected_mean_pos = numpy.array([73.768384103536604, 76.03533643054962, 75.176319462463255])
expected_mean_vel = numpy.array([0.92904859858192501, 0.94953939936682585, 0.92897711758688095])
x_positions, y_positions, z_positions, errors = instance.get_position(range(number_of_particles_halo))
self.assertEqual(errors, numpy.zeros(number_of_particles_halo))
self.assertAlmostEqual(numpy.array([numpy.mean(x_positions), numpy.mean(y_positions),
numpy.mean(z_positions)]), numpy.array([0.0]*3), 5)
self.assertAlmostRelativeEquals(numpy.array([numpy.mean(abs(x_positions)), numpy.mean(abs(y_positions)),
numpy.mean(abs(z_positions))]), expected_mean_pos, 3)
x_velocities, y_velocities, z_velocities, errors = instance.get_velocity(range(number_of_particles_halo))
self.assertEqual(errors, numpy.zeros(number_of_particles_halo))
self.assertAlmostEqual(numpy.array([numpy.mean(x_velocities), numpy.mean(y_velocities),
numpy.mean(z_velocities)]), numpy.array([0.0]*3))
self.assertAlmostRelativeEquals(numpy.array([numpy.mean(abs(x_velocities)), numpy.mean(abs(y_velocities)),
numpy.mean(abs(z_velocities))]), expected_mean_vel, 2)
self.assertEqual(instance.cleanup_code(), 0)
instance.stop()
class GalactICsTests(TestWithMPI):
default_unit_converter = nbody_system.nbody_to_si(1.0 | units.kpc, 1.0e6 | units.MSun)
def test1(self):
print("Testing GalactICs initialization")
instance = GalactICs(**default_options)
instance.initialize_code()
instance.parameters.generate_bulge_flag = False
instance.parameters.generate_disk_flag = False
instance.parameters.order_of_multipole_expansion = 0
instance.commit_parameters()
instance.cleanup_code()
instance.stop()
def test2(self):
print("Testing GalactICs parameters (with unit converter)")
instance = GalactICs(self.default_unit_converter, **default_options)
instance.initialize_code()
for par, value in [('generate_halo_flag', True), ('generate_disk_flag', True),
('generate_bulge_flag', True), ('halo_do_center_flag', True),
('bulge_do_center_flag', True), ('disk_do_center_flag', True)]:
self.assertTrue(value is getattr(instance.parameters, par))
setattr(instance.parameters, par, not value)
self.assertFalse(value is getattr(instance.parameters, par))
for par, value in [('number_of_grid_intervals', 90000), ('order_of_multipole_expansion', 10),
('number_of_radial_steps_correction_fns_disk_df', 10),
('number_of_iterations_disk_df', 50), ('halo_number_of_particles', 200000),
('bulge_number_of_particles', 50000), ('disk_number_of_particles', 100000),
('halo_random_seed', -1), ('bulge_random_seed', -1), ('disk_random_seed', -1)]:
self.assertEqual(value, getattr(instance.parameters, par))
setattr(instance.parameters, par, 1)
self.assertEqual(1, getattr(instance.parameters, par))
for par, value in [('halo_outer_radius', 300.0 | nbody_system.length),
('halo_scale_velocity', 3.26331115 | nbody_system.speed),
('halo_scale_radius', 6.06699419 | nbody_system.length),
('halo_truncation_width', 100.0 | nbody_system.length)]:
self.assertEqual(instance.unit_converter.to_si(value),
getattr(instance.parameters, par))
setattr(instance.parameters, par, 3.0 | value.unit)
self.assertEqual(instance.unit_converter.to_si(3.0 | value.unit),
getattr(instance.parameters, par))
self.assertEqual(os.path.join(instance.get_output_directory()), instance.parameters.output_directory)
instance.parameters.output_directory = 'test'
self.assertEqual("test", instance.parameters.output_directory)
instance.cleanup_code()
instance.stop()
def test3(self):
print("Testing GalactICs parameters (nbody units, no converter)")
instance = GalactICs(**default_options)
instance.initialize_code()
for par, value in [('halo_outer_radius', 300.0 | nbody_system.length),
('halo_scale_velocity', 3.26331115 | nbody_system.speed),
('halo_scale_radius', 6.06699419 | nbody_system.length),
('halo_truncation_width', 100.0 | nbody_system.length)]:
self.assertEqual(value, getattr(instance.parameters, par))
setattr(instance.parameters, par, 3.0 | value.unit)
self.assertEqual(3.0 | value.unit, getattr(instance.parameters, par))
instance.cleanup_code()
instance.stop()
def slowtest4(self):
print("Testing GalactICs generate_particles")
n_particles_halo = 100
n_particles_bulge = 100
n_particles_disk = 100
number_of_particles = n_particles_disk + n_particles_bulge + n_particles_halo
instance = GalactICs(**default_options)
instance.initialize_code()
instance.parameters.disk_number_of_particles = n_particles_disk
instance.parameters.bulge_number_of_particles = n_particles_bulge
instance.parameters.halo_number_of_particles = n_particles_halo
instance.commit_parameters()
instance.generate_particles()
self.assertEqual(len(instance.particles), number_of_particles)
self.assertAlmostRelativeEquals(instance.particles.total_mass(), 1225.4466176 | nbody_system.mass, 3)
self.assertAlmostRelativeEquals(instance.particles.kinetic_energy(), 2564.69894361 | nbody_system.energy, 3)
self.assertAlmostRelativeEquals(instance.particles.potential_energy(G=nbody_system.G), -4531.58416742 | nbody_system.energy, 3)
self.assertAlmostRelativeEquals(instance.particles.virial_radius(), 165.694750127 | nbody_system.length, 3)
instance.cleanup_code()
instance.stop()
def test5(self):
print("Testing GalactICs generate_particles")
instance = GalactICs(**default_options)
instance.initialize_code()
instance.parameters.halo_number_of_particles = 1000
instance.parameters.generate_bulge_flag = False
instance.parameters.generate_disk_flag = False
instance.parameters.order_of_multipole_expansion = 0
instance.commit_parameters()
instance.generate_particles()
self.assertEqual(len(instance.particles), 1000)
accuracy = 3
mass_halo = 1178.89297009 | nbody_system.mass
expected_kinetic_energy = 2418.49730735 | nbody_system.energy
self.assertAlmostRelativeEquals(instance.particles.total_mass(), mass_halo, accuracy)
self.assertAlmostRelativeEquals(instance.particles.kinetic_energy(), expected_kinetic_energy, accuracy)
self.assertEqual(len(instance.halo_particles), 1000)
self.assertEqual(len(instance.disk_particles), 0)
self.assertEqual(len(instance.bulge_particles), 0)
instance.cleanup_code()
instance.stop()
def test6(self):
print("Testing GalactICs generate_particles: generate multiple sets")
number_of_particles = 1000
instance = GalactICs(**default_options)
instance.initialize_code()
instance.parameters.halo_number_of_particles = number_of_particles
instance.parameters.generate_bulge_flag = False
instance.parameters.generate_disk_flag = False
instance.parameters.order_of_multipole_expansion = 0
instance.parameters.halo_random_seed = -1.0
instance.commit_parameters()
instance.generate_particles()
set1 = instance.particles.copy()
self.assertEqual(len(set1), number_of_particles)
instance.generate_particles()
set2 = instance.particles.copy()
self.assertEqual(len(set2), number_of_particles)
# GalactICs' random-number generator is re-seeded with 'halo_random_seed'
# each time, and the result should be the same:
for attribute in ["mass", "x", "y", "z", "vx", "vy", "vz"]:
self.assertEqual(getattr(set1, attribute), getattr(set2, attribute))
instance.parameters.halo_random_seed = -42.0
instance.generate_particles()
# halo_random_seed changed: draw a different random set of particles
set3 = instance.particles.copy()
self.assertEqual(len(set3), number_of_particles)
self.assertEqual(set1.mass, set3.mass)
self.assertRaises(self.failureException, self.assertEqual, set1.x, set3.x)
self.assertAlmostRelativeEquals(abs(set1.x).median(), abs(set3.x).median(), 1)
self.assertAlmostRelativeEquals(abs(set1.vy).median(), abs(set3.vy).median(), 1)
instance.cleanup_code()
instance.stop()
def test7(self):
print("Testing GalactICs state")
number_of_particles = 1000
print("First do everything manually:")
instance = GalactICs(**default_options)
self.assertEqual(instance.get_name_of_current_state(), 'UNINITIALIZED')
instance.initialize_code()
self.assertEqual(instance.get_name_of_current_state(), 'INITIALIZED')
instance.parameters.halo_number_of_particles = number_of_particles
instance.parameters.generate_bulge_flag = False
instance.parameters.generate_disk_flag = False
instance.parameters.order_of_multipole_expansion = 0
instance.commit_parameters()
self.assertEqual(instance.get_name_of_current_state(), 'EDIT')
instance.overridden().generate_particles()
self.assertEqual(instance.get_name_of_current_state(), 'UPDATE')
instance.invoke_state_change_updated()
self.assertEqual(instance.get_name_of_current_state(), 'RUN')
self.assertEqual(len(instance.particles), number_of_particles)
instance.cleanup_code()
self.assertEqual(instance.get_name_of_current_state(), 'END')
instance.stop()
print("initialize_code(), (re)commit_parameters(), update_particle_set(), "
"and cleanup_code() should be called automatically:")
instance = GalactICs(**default_options)
self.assertEqual(instance.get_name_of_current_state(), 'UNINITIALIZED')
instance.parameters.halo_number_of_particles = number_of_particles
instance.parameters.generate_bulge_flag = False
instance.parameters.generate_disk_flag = False
instance.parameters.order_of_multipole_expansion = 0
self.assertEqual(instance.get_name_of_current_state(), 'INITIALIZED')
self.assertEqual(instance.get_number_of_particles_updated(), 0)
self.assertEqual(instance.get_name_of_current_state(), 'EDIT')
instance.parameters.halo_random_seed = -42.0
self.assertEqual(instance.get_name_of_current_state(), 'CHANGE_PARAMETERS_EDIT')
self.assertEqual(instance.get_number_of_particles_updated(), 0)
self.assertEqual(instance.get_name_of_current_state(), 'EDIT')
instance.generate_particles()
self.assertEqual(instance.get_name_of_current_state(), 'RUN')
self.assertEqual(len(instance.particles), number_of_particles)
self.assertEqual(instance.get_number_of_particles_updated(), 0)
instance.stop()
self.assertEqual(instance.get_name_of_current_state(), 'STOPPED')
|
amusecodeREPO_NAMEamusePATH_START.@amuse_extracted@amuse-main@src@amuse@test@suite@codes_tests@test_galactics.py@.PATH_END.py
|
{
"filename": "extract_errors.py",
"repo_name": "mcataneo/mochi_class_public",
"repo_path": "mochi_class_public_extracted/mochi_class_public-main/python/extract_errors.py",
"type": "Python"
}
|
# From the dumped stdout and stderr of a nosetests test_class.py, extract all
# the failed steps.
# Usage: python extract_errors.py output
from __future__ import print_function
import sys
import os
def main(path):
"""
Create a shorter file containing only the errors from nosetests
"""
assert os.path.isfile(path) is True
trimmed_path = path + '_errors'
destination = open(trimmed_path, 'w')
contains_error = False
with open(path, 'r') as source:
text = source.readlines()
start = 0
for index, line in enumerate(text):
if line.find('------------------') != -1:
if text[index+2].find('----------------') != -1:
stop = index-1
# Check that an error is contained
if stop > 0:
for i in range(start, stop+1):
if text[i].startswith('E'):
contains_error = True
if contains_error:
print('Found an error')
for i in range(start, stop+1):
print(text[i], end=' ')
destination.write(text[i])
start = index
contains_error = False
elif text[index+2].find('=================') != -1:
break
else:
pass
destination.close()
if __name__ == "__main__":
print(sys.argv)
if len(sys.argv) != 2:
print('Please specify the output file to analyse')
exit()
else:
main(sys.argv[-1])
|
mcataneoREPO_NAMEmochi_class_publicPATH_START.@mochi_class_public_extracted@mochi_class_public-main@python@extract_errors.py@.PATH_END.py
|
{
"filename": "Atmo_spectro.py",
"repo_name": "bretonr/Icarus",
"repo_path": "Icarus_extracted/Icarus-master/Icarus/Atmosphere/Atmo_spectro.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE
from __future__ import print_function, division
__all__ = ["Atmo_grid_spectro"]
from ..Utils.import_modules import *
from .. import Utils
from .Atmo import Atmo_grid
######################## class Atmo_grid_spectro ########################
class Atmo_grid_spectro(Atmo_grid):
"""Atmo_grid_spectro
This class handles the atmosphere grid containing a spectral
dimension.
"""
def __init__(self, flns, wave_cut=[3000,11000], linlog=False):
"""__init__
"""
self.flns = flns
self.Flux_init(flns, wave_cut=wave_cut, linlog=linlog)
def Flux_init(self, flns, wave_cut=None, linlog=False):
"""Flux_init(flns, wave_cut=None, linlog=False)
Reads a band file and construct a grid.
Calculates:
logtemp: effective temperatures. logtemp.shape = (ntemp)
logg: log of surface gravity. logg.shape = (nlogg)
mu: cos(angle) of emission direction. mu.shape = (nmu)
grid: the grid of specific intensities. grid.shape = (ntemp,nlogg,nmu)
leff: ???
h: ???
wave_cut: Allows to define a lower-upper cut in wavelength [wave_low, wave_up].
linlog (=False): If true, will rebin the data to be linear in the log space.
>>> self.Flux_init()
"""
lst = []
for i in np.arange(len(flns)):
# Get the log(g) and temp value from the filename
lst.append( [i, float(flns[i].split('-')[1]), float(flns[i].split('lte')[1].split('-')[0])*100.] )
Utils.Misc.List_sort(lst, [2,1])
lst = np.array(lst)
self.logtemp = np.log(list(set(lst[:,2])))
self.logtemp.sort()
n_temp = self.logtemp.shape[0]
self.logg = np.array(list(set(lst[:,1])))
self.logg.sort()
n_logg = self.logg.shape[0]
if n_temp*n_logg != lst.shape[0]:
print("There is a mismatch in the number of log(g) and "
"temp grid points")
return
grid = []
mu = []
wav = []
for l in lst[:,0]:
tmp = self.Flux_init_singlefile(flns[int(l)], wave_cut=wave_cut, linlog=linlog)
grid.append(tmp[0])
mu.append(tmp[1])
wav.append(tmp[2])
try:
mu = np.array(mu)
wav = np.array(wav)
if mu.std(0).sum() > 1.e-6:
print('mu has different values')
return
else:
self.mu = mu[0]
if wav.std(0).sum() > 1.e-6:
print('wav has different values')
return
else:
self.wav = wav[0]
except:
print('mu or wav has inconsistent number of elements')
return
grid = np.array(grid)
grid.shape = n_temp, n_logg, self.mu.shape[0], self.wav.shape[0]
self.grid = grid
return
def Flux_init_singlefile(self, fln, wave_cut=None, linlog=False):
"""Flux_init_singlefile(fln, linlog=False)
Reads a band file and construct a grid.
wave_cut: Allows to define a lower-upper cut in wavelength [wave_low, wave_up].
linlog (=False): If true, will rebin the data to be linear in the log space.
>>>
"""
f = open(fln,'r')
lines = f.read()
lines = lines.replace('D+','E+')
lines = lines.replace('D-','E-')
lines = lines.splitlines()
# Read the mu values
mu = np.array(lines[3].split()+lines[4].split()+lines[5].split()+lines[6].split(),dtype=float)
# Read the info line for each grid point
hdr = []
grid = []
# The first grid point is "special"
hdr.append(lines[1].split())
grid.append(lines[8].split()+lines[9].split()+lines[10].split()+lines[11].split())
# Now the other lines
for i in np.arange(12,len(lines),6):
hdr.append(lines[i].split())
grid.append(lines[i+2].split()+lines[i+3].split()+lines[i+4].split()+lines[i+5].split())
hdr = np.array(hdr,dtype=float)
# The wavelength is contained in the first column of the grid element headers.
wav = hdr[:,0]
grid = np.log(np.array(grid,dtype=float).T/(C*100)*wav**2)
# There is no point in keeping grid values for mu < 0. We discard them.
grid = grid[mu > 0.]
mu = mu[mu > 0.]
if wave_cut is not None:
inds = (wav > wave_cut[0]) * (wav < wave_cut[1])
grid = grid.take(inds, axis=-1)
wav = wav[inds]
if linlog:
new_wav, self.v, self.z = Utils.Series.Resample_linlog(wav)
ws, inds = Utils.Series.Getaxispos_vector(wav, new_wav)
wav = new_wav
grid = grid.take(inds, axis=-1)*(1-ws) + grid.take(inds+1, axis=-1)*ws
return grid, mu, wav
def Interp_orig(self, val_temp, val_logg, val_mu):
"""
Obsolete!!!
"""
grid = self.grid
logtemp = self.logtemp
logg = self.logg
mu = self.mu
w1temp, jtemp = self.Getaxispos(logtemp,val_temp)
w1logg, jlogg = self.Getaxispos(logg,val_logg)
w1mu, jmu = self.Getaxispos(mu,val_mu)
w1temp.shape = w1temp.size,1
w1logg.shape = w1logg.size,1
w1mu.shape = w1mu.size,1
w0mu = 1.-w1mu
w0temp = 1.-w1temp
w0logg = 1.-w1logg
fl = w0logg*(w0temp*(w0mu*grid[jtemp,jlogg,jmu] \
+w1mu*grid[jtemp,jlogg,jmu+1]) \
+w1temp*(w0mu*grid[jtemp+1,jlogg,jmu] \
+w1mu*grid[jtemp+1,jlogg,jmu+1])) \
+w1logg*(w0temp*(w0mu*grid[jtemp,jlogg+1,jmu] \
+w1mu*grid[jtemp,jlogg+1,jmu+1]) \
+w1temp*(w0mu*grid[jtemp+1,jlogg+1,jmu] \
+w1mu*grid[jtemp+1,jlogg+1,jmu+1]))
val_mu = val_mu.reshape((val_mu.size,1))
flux = np.exp(fl) * val_mu * self.Limb_darkening(val_mu, self.wav)
return flux
def Limb_darkening(self, mu, wav):
"""Limb_darkening(mu, wav)
Returns the limb darkening factor given the cos(angle)
of emission, mu, and the wavelength, wav, in angstroms.
Note: The limb darkening law is from
Hestroffer and Magnan, A&A, 1998, 333, 338
"""
# We calculate the alpha power-law index, given the wavelength.
# Lambda has to be in micrometer, hence the 1e4 factor.
alpha = -0.023 + 0.292*(1e4/wav)
return 1 - mu*(1-mu**alpha)
######################## class Atmo_grid_spectro ########################
|
bretonrREPO_NAMEIcarusPATH_START.@Icarus_extracted@Icarus-master@Icarus@Atmosphere@Atmo_spectro.py@.PATH_END.py
|
{
"filename": "_enabled.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/volume/colorbar/tickformatstop/_enabled.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class EnabledValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self,
plotly_name="enabled",
parent_name="volume.colorbar.tickformatstop",
**kwargs,
):
super(EnabledValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@volume@colorbar@tickformatstop@_enabled.py@.PATH_END.py
|
{
"filename": "TinyReconstruction.py",
"repo_name": "nu-radio/NuRadioMC",
"repo_path": "NuRadioMC_extracted/NuRadioMC-master/NuRadioReco/test/tiny_reconstruction/TinyReconstruction.py",
"type": "Python"
}
|
#!/usr/bin/env python3
import os
import sys
import datetime
import matplotlib
import matplotlib.pyplot as plt
from NuRadioReco.utilities import units
from NuRadioReco.detector import detector
import NuRadioReco.modules.io.coreas.readCoREAS
import NuRadioReco.modules.io.coreas.simulationSelector
import NuRadioReco.modules.efieldToVoltageConverter
import NuRadioReco.modules.ARIANNA.hardwareResponseIncorporator
import NuRadioReco.modules.channelGenericNoiseAdder
import NuRadioReco.modules.trigger.simpleThreshold
import NuRadioReco.modules.channelBandPassFilter
import NuRadioReco.modules.eventTypeIdentifier
import NuRadioReco.modules.channelStopFilter
import NuRadioReco.modules.channelSignalReconstructor
import NuRadioReco.modules.correlationDirectionFitter
import NuRadioReco.modules.voltageToEfieldConverter
import NuRadioReco.modules.electricFieldSignalReconstructor
import NuRadioReco.modules.electricFieldBandPassFilter
import NuRadioReco.modules.voltageToAnalyticEfieldConverter
import NuRadioReco.modules.channelResampler
import NuRadioReco.modules.electricFieldResampler
import NuRadioReco.modules.io.eventWriter
# Logging level
import logging
from NuRadioReco.modules.base import module
logger = module.setup_logger(name='NuRadioReco', level=logging.WARNING)
matplotlib.use('agg')
plt.switch_backend('agg')
"""
Here, we show an example reconstruction of CoREAS data. A variety of modules
are being used. Please refer to details in the modules themselves.
Input parameters (all with a default provided)
---------------------
Command line input:
python FullReconstruction.py station_id input_file detector_file templates
station_id: int
station id to be used, default 32
input_file: str
CoREAS simulation file, default example data
detector_file: str
path to json detector database, default given
template_path: str
path to signal templates, default given
"""
dir_path = os.path.dirname(os.path.realpath(__file__)) # get the directory of this file
try:
station_id = int(sys.argv[1]) # specify station id
input_file = sys.argv[2] # file with coreas simulations
except:
logger.warning("Usage: python FullReconstruction.py station_id input_file detector templates")
station_id = 32
input_file = os.path.join(dir_path, "../../examples/example_data/example_event.h5")
logger.warning("Using default station {}".format(32))
if(station_id == 32):
triggered_channels = [0, 1, 2, 3]
used_channels_efield = [0, 1, 2, 3]
used_channels_fit = [0, 1, 2, 3]
channel_pairs = ((0, 2), (1, 3))
else:
logger.warning("Default channels not defined for station_id != 32")
try:
detector_file = sys.argv[3]
logger.info("Using {0} as detector ".format(detector_file))
except:
logger.warning("Using default file for detector")
detector_file = os.path.join(dir_path, "../../examples/example_data/arianna_station_32.json")
det = detector.Detector(json_filename=detector_file) # detector file
det.update(datetime.datetime(2018, 10, 1))
# initialize all modules that are needed for processing
# provide input parameters that are to remain constant during processung
readCoREAS = NuRadioReco.modules.io.coreas.readCoREAS.readCoREAS()
readCoREAS.begin([input_file], station_id, n_cores=10, max_distance=None, seed=0)
simulationSelector = NuRadioReco.modules.io.coreas.simulationSelector.simulationSelector()
simulationSelector.begin()
efieldToVoltageConverter = NuRadioReco.modules.efieldToVoltageConverter.efieldToVoltageConverter()
efieldToVoltageConverter.begin(debug=False)
hardwareResponseIncorporator = NuRadioReco.modules.ARIANNA.hardwareResponseIncorporator.hardwareResponseIncorporator()
channelGenericNoiseAdder = NuRadioReco.modules.channelGenericNoiseAdder.channelGenericNoiseAdder()
channelGenericNoiseAdder.begin(seed=1)
triggerSimulator = NuRadioReco.modules.trigger.simpleThreshold.triggerSimulator()
triggerSimulator.begin()
channelBandPassFilter = NuRadioReco.modules.channelBandPassFilter.channelBandPassFilter()
channelBandPassFilter.begin()
eventTypeIdentifier = NuRadioReco.modules.eventTypeIdentifier.eventTypeIdentifier()
channelStopFilter = NuRadioReco.modules.channelStopFilter.channelStopFilter()
channelSignalReconstructor = NuRadioReco.modules.channelSignalReconstructor.channelSignalReconstructor()
channelSignalReconstructor.begin(signal_window_start=20 * units.ns, signal_window_length=80 * units.ns, noise_window_start=150 * units.ns, noise_window_length=200 * units.ns)
correlationDirectionFitter = NuRadioReco.modules.correlationDirectionFitter.correlationDirectionFitter()
voltageToEfieldConverter = NuRadioReco.modules.voltageToEfieldConverter.voltageToEfieldConverter()
electricFieldSignalReconstructor = NuRadioReco.modules.electricFieldSignalReconstructor.electricFieldSignalReconstructor()
electricFieldSignalReconstructor.begin()
voltageToAnalyticEfieldConverter = NuRadioReco.modules.voltageToAnalyticEfieldConverter.voltageToAnalyticEfieldConverter()
voltageToAnalyticEfieldConverter.begin()
electricFieldResampler = NuRadioReco.modules.electricFieldResampler.electricFieldResampler()
electricFieldResampler.begin()
electricFieldBandPassFilter = NuRadioReco.modules.electricFieldBandPassFilter.electricFieldBandPassFilter()
channelResampler = NuRadioReco.modules.channelResampler.channelResampler()
channelResampler.begin()
eventWriter = NuRadioReco.modules.io.eventWriter.eventWriter()
output_filename = "MC_example_station_{}.nur".format(station_id)
eventWriter.begin(output_filename)
event_counter = 0
# Loop over all events in file as initialized in readCoRREAS and perform analysis
for iE, evt in enumerate(readCoREAS.run(detector=det)):
logger.warning("Processing event number {}".format(event_counter))
logger.info("processing event {:d} with id {:d}".format(iE, evt.get_id()))
station = evt.get_station(station_id)
if simulationSelector.run(evt, station.get_sim_station(), det):
efieldToVoltageConverter.run(evt, station, det)
hardwareResponseIncorporator.run(evt, station, det, sim_to_data=True)
channelGenericNoiseAdder.run(evt, station, det, type="rayleigh", amplitude=20 * units.mV)
triggerSimulator.run(evt, station, det, number_concidences=2, threshold=100 * units.mV)
if station.get_trigger('default_simple_threshold').has_triggered():
channelBandPassFilter.run(evt, station, det, passband=[80 * units.MHz, 500 * units.MHz], filter_type='butter', order=10)
eventTypeIdentifier.run(evt, station, "forced", 'cosmic_ray')
channelStopFilter.run(evt, station, det)
channelBandPassFilter.run(evt, station, det, passband=[60 * units.MHz, 600 * units.MHz], filter_type='rectangular')
channelSignalReconstructor.run(evt, station, det)
hardwareResponseIncorporator.run(evt, station, det)
correlationDirectionFitter.run(evt, station, det, n_index=1., channel_pairs=channel_pairs)
voltageToEfieldConverter.run(evt, station, det, use_channels=used_channels_efield)
electricFieldBandPassFilter.run(evt, station, det, passband=[80 * units.MHz, 300 * units.MHz])
electricFieldSignalReconstructor.run(evt, station, det)
voltageToAnalyticEfieldConverter.run(evt, station, det, use_channels=used_channels_efield, bandpass=[80 * units.MHz, 500 * units.MHz], use_MC_direction=False)
channelResampler.run(evt, station, det, sampling_rate=1 * units.GHz)
electricFieldResampler.run(evt, station, det, sampling_rate=1 * units.GHz)
eventWriter.run(evt)
event_counter += 1
if event_counter > 2:
break
nevents = eventWriter.end()
logger.warning("Finished processing, {} events".format(event_counter))
|
nu-radioREPO_NAMENuRadioMCPATH_START.@NuRadioMC_extracted@NuRadioMC-master@NuRadioReco@test@tiny_reconstruction@TinyReconstruction.py@.PATH_END.py
|
{
"filename": "periods.py",
"repo_name": "lgrcia/paper-nuance",
"repo_path": "paper-nuance_extracted/paper-nuance-main/workflows/tess_injection_recovery/scripts/periods.py",
"type": "Python"
}
|
import numpy as np
import yaml
from nuance.utils import clean_periods
info = yaml.safe_load(open(snakemake.input[0], "r"))
periods = np.linspace(*snakemake.config["search"]["periods_linspace"])
periods = clean_periods(periods, info["star_period"])
output = snakemake.output[0]
np.save(output, periods)
|
lgrciaREPO_NAMEpaper-nuancePATH_START.@paper-nuance_extracted@paper-nuance-main@workflows@tess_injection_recovery@scripts@periods.py@.PATH_END.py
|
{
"filename": "ueda2003.py",
"repo_name": "mirochaj/ares",
"repo_path": "ares_extracted/ares-main/input/litdata/ueda2003.py",
"type": "Python"
}
|
"""
Ueda, Y., Akiyama, M., Ohta, K., & Miyaji, T. 2003, ApJ, 598, 886
Notes
-----
There are three different models here:
(1) Pure Luminosity Evolution (`ple`)
(2) Pure Density Evolution (`pde`)
(3) Luminosity Dependent Density Evolution (`ldde`)
The cosmology assumed was (H0, Om, Ol) = (70, 0.3, 0.7)
"""
import numpy as np
default_evolution = 'ldde'
qsolf_info = \
{
'logLmin': 41.5,
'logLmax': 46.5,
'band': (2., 10.),
'band_units': 'keV',
}
qsolf_ple_pars = \
{
'A': 14.1e-6,
'logLstar': 43.66,
'gamma1': 0.82,
'gamma2': 2.37,
'p1': 2.7,
'p2': 0.0,
'zc': 1.15,
'evolution': 'ple',
}
qsolf_ple_err = \
{
'A': 1.0e-6,
'logLstar': 0.17,
'gamma1': 0.13,
'gamma2': 0.16,
'p1': 0.21, # should be +0.17/-0.25
'p2': 1e-10, # held fixed
'zc': 0.145, # should be +0.2/-0.07
}
qsolf_pde_pars = \
{
'A': 2.64e-6,
'logLstar': 44.11,
'gamma1': 0.93,
'gamma2': 2.23,
'p1': 4.2,
'p2': 0.0,
'zc': 1.14,
'evolution': 'pde',
}
qsolf_pde_err = \
{
'A': 0.18e-6,
'logLstar': 0.23,
'gamma1': 0.13,
'gamma2': 0.15,
'p1': 0.32,
'p2': 1e-10, # held fixed
'zc': 0.145, # should be +0.13/-0.16
}
qsolf_ldde_pars = \
{
'A': 5.04e-6,
'logLstar': 43.94,
'gamma1': 0.86,
'gamma2': 2.23,
'p1': 4.23,
'p2': -1.5,
'zc': 1.9,
'logLa': 44.6,
'alpha': 0.335,
'evolution': 'ldde',
}
qsolf_ldde_err = \
{
'A': 0.33e-6,
'logLstar': 0.23, # Should be +0.21/-0.26
'gamma1': 0.15,
'gamma2': 0.13,
'p1': 0.39,
'p2': 1e-10, # held fixed
'zc': 1e-10, # held fixed
'logLa': 1e-10, # held fixed
'alpha': 0.07,
}
kwargs_by_evolution = \
{
'ple': qsolf_ple_pars,
'pde': qsolf_pde_pars,
'ldde': qsolf_ldde_pars,
}
errs_by_evolution = \
{
'ple': qsolf_ple_err,
'pde': qsolf_pde_err,
'ldde': qsolf_ldde_err,
}
def _parse_kwargs(**kwargs):
if not kwargs:
kwargs = kwargs_by_evolution[default_evolution]
elif 'evolution' in kwargs:
kw = kwargs_by_evolution[kwargs['evolution']]
kw.update(kwargs)
kwargs = kw
elif 'evolution' not in kwargs:
kwargs['evolution'] = default_evolution
return kwargs
_eofz_f1 = lambda z, p1: (1. + z)**p1
_eofz_f2 = lambda z, p1, p2, zc: _eofz_f1(zc, p1) * ((1. + z) / (1. + zc))**p2
def _zc_of_L(L, **kwargs):
"""
Compute cutoff redshift for luminosity-dependent density evolution.
"""
La = 10**kwargs['logLa']
if L < La:
zc_ast = kwargs['zc'] * (L / La)**kwargs['alpha']
elif L >= La:
zc_ast = kwargs['zc']
return zc_ast
def _evolution_factor_pde(z, **kwargs):
"""
Pure density evolution model.
"""
if z < kwargs['zc']:
return _eofz_f1(z, kwargs['p1'])
else:
return _eofz_f2(z, kwargs['p1'], kwargs['p2'], kwargs['zc'])
def _evolution_factor_ldde(z, L, **kwargs):
try:
kwargs['zc'] = _zc_of_L(L, **kwargs)
eofz = _evolution_factor_pde(z, **kwargs)
except ValueError:
eofz = np.zeros_like(L)
zcarr = np.array([_zc_of_L(LL ,**kwargs) for LL in L])
for i, zcval in enumerate(zcarr):
kwargs['zc'] = zcval
eofz[i] = _evolution_factor_pde(z, **kwargs)
return eofz
def _DoublePowerLaw(L, **kwargs):
# Defaults from PDE model
Lstar = 10**kwargs['logLstar']
return kwargs['A'] / ((L / Lstar)**kwargs['gamma1'] \
+ (L / Lstar)**kwargs['gamma2'])
def LuminosityFunction(L, z, **kwargs):
"""
Compute the 2-10 keV quasar luminosity function.
Parameters
----------
L : int, float, np.ndarray
Luminosity of interest [erg / s]
z : int, float
Redshift of interest
kwags['evolution'] : str
"ple": Pure Luminosity Evolution (Eq. 11)
"pde": Pure Density Evolution (Eq. 12)
"ldde": Luminosity-Dependent Density Evolution (Eqs. 16-17)
"""
kwargs = _parse_kwargs(**kwargs)
if kwargs['evolution'] == 'ple':
Lprime = L / _evolution_factor_pde(z, **kwargs)
NofL = _DoublePowerLaw(Lprime, **kwargs)
elif kwargs['evolution'] == 'pde':
NofL = _DoublePowerLaw(L, **kwargs)
NofL *= _evolution_factor_pde(z, **kwargs)
elif kwargs['evolution'] == 'ldde':
NofL = _DoublePowerLaw(L, **kwargs)
NofL *= _evolution_factor_ldde(z, L, **kwargs)
else:
raise ValueError('Unrecognized evolution model: {!s}'.format(kwargs['evolution']))
return NofL
def Spectrum():
pass
|
mirochajREPO_NAMEaresPATH_START.@ares_extracted@ares-main@input@litdata@ueda2003.py@.PATH_END.py
|
{
"filename": "luminosity_model--BPL_3param--parameter_error_estimation.py",
"repo_name": "DebduttaPaul/luminosity_function_of_sGRBs",
"repo_path": "luminosity_function_of_sGRBs_extracted/luminosity_function_of_sGRBs-master/luminosity_model--BPL_3param--parameter_error_estimation.py",
"type": "Python"
}
|
from __future__ import division
from astropy.io import ascii
from astropy.table import Table
from scipy.optimize import curve_fit
from scipy.integrate import quad, simps
import debduttaS_functions as mf
import specific_functions as sf
import time, pickle, pprint
import numpy as np
import matplotlib.pyplot as plt
plt.rc('axes', linewidth = 2)
plt.rc('font', family = 'serif', serif = 'cm10')
plt.rc('text', usetex = True)
plt.rcParams['text.latex.preamble'] = [r'\boldmath']
####################################################################################################################################################
P = np.pi # Dear old pi!
CC = 0.73 # Cosmological constant.
L_norm = 1e52 # in ergs.s^{-1}.
T90_cut = 2 # in sec.
cm_per_Mpc = 3.0857 * 1e24
erg_per_keV = 1.6022 * 1e-9
logL_bin = 1.0
logL_min = -5.0
logL_max = +5.1
z_min = 1e-1
z_max = 1e+1
padding = 8 # The padding of the axes labels.
size_font = 16 # The fontsize in the images.
marker_size = 7 # The size of markers in scatter plots.
al = 0.8 # The brightness of plots.
####################################################################################################################################################
####################################################################################################################################################
constraints = 4
#~ n = 1.0
#~ n = 1.5
n = 2.0
####################################################################################################################################################
####################################################################################################################################################
k_table = ascii.read( './../../tables/k_correction.txt', format = 'fixed_width' ) ; global z_sim, dL_sim, k_Fermi, k_Swift
z_sim = k_table['z'].data
dL_sim = k_table['dL'].data
k_BATSE = k_table['k_BATSE'].data
k_Fermi = k_table['k_Fermi'].data
k_Swift = k_table['k_Swift'].data
ind_zMin = mf.nearest(z_sim, z_min)
ind_zMax = mf.nearest(z_sim, z_max)
z_sim = z_sim[ ind_zMin : ind_zMax]
dL_sim = dL_sim[ ind_zMin : ind_zMax]
k_BATSE = k_BATSE[ind_zMin : ind_zMax]
k_Fermi = k_Fermi[ind_zMin : ind_zMax]
k_Swift = k_Swift[ind_zMin : ind_zMax]
volume_tab = ascii.read( './../../tables/rho_star_dot.txt', format = 'fixed_width' ) ; global volume_term
volume_term = volume_tab['vol'].data ; volume_term = volume_term[ind_zMin : ind_zMax]
Phi_table = ascii.read( './../../tables/CSFR_delayed--n={0:.1f}.txt'.format(n), format = 'fixed_width' ) ; global Phi
Phi = Phi_table['CSFR_delayed'].data ; Phi = Phi[ind_zMin : ind_zMax]
threshold_data = ascii.read( './../../tables/thresholds.txt', format = 'fixed_width' )
L_cut__Fermi = threshold_data['L_cut__Fermi'].data ; L_cut__Fermi = L_cut__Fermi[ind_zMin : ind_zMax]
L_cut__Swift = threshold_data['L_cut__Swift'].data ; L_cut__Swift = L_cut__Swift[ind_zMin : ind_zMax]
L_cut__BATSE = threshold_data['L_cut__BATSE'].data ; L_cut__BATSE = L_cut__BATSE[ind_zMin : ind_zMax]
L_cut__ACZTI = threshold_data['L_cut__CZTI'].data ; L_cut__ACZTI = L_cut__ACZTI[ind_zMin : ind_zMax]
L_vs_z__known_short = ascii.read( './../../tables/L_vs_z__known_short.txt', format = 'fixed_width' )
L_vs_z__Fermi_short = ascii.read( './../../tables/L_vs_z__Fermi_short.txt', format = 'fixed_width' )
L_vs_z__FermE_short = ascii.read( './../../tables/L_vs_z__FermE_short.txt', format = 'fixed_width' )
L_vs_z__Swift_short = ascii.read( './../../tables/L_vs_z__Swift_short.txt', format = 'fixed_width' )
L_vs_z__other_short = ascii.read( './../../tables/L_vs_z__other_short.txt', format = 'fixed_width' )
L_vs_z__BATSE_short = ascii.read( './../../tables/L_vs_z__BATSE_short.txt', format = 'fixed_width' )
known_short_redshift = L_vs_z__known_short[ 'measured z'].data
known_short_Luminosity = L_vs_z__known_short[ 'Luminosity [erg/s]'].data
known_short_Luminosity_error = L_vs_z__known_short[ 'Luminosity_error [erg/s]'].data
Fermi_short_redshift = L_vs_z__Fermi_short[ 'pseudo z' ].data
Fermi_short_Luminosity = L_vs_z__Fermi_short[ 'Luminosity [erg/s]'].data
Fermi_short_Luminosity_error = L_vs_z__Fermi_short[ 'Luminosity_error [erg/s]'].data
FermE_short_redshift = L_vs_z__FermE_short[ 'pseudo z'].data
FermE_short_Luminosity = L_vs_z__FermE_short[ 'Luminosity [erg/s]'].data
FermE_short_Luminosity_error = L_vs_z__FermE_short[ 'Luminosity_error [erg/s]'].data
Swift_short_redshift = L_vs_z__Swift_short[ 'pseudo z' ].data
Swift_short_Luminosity = L_vs_z__Swift_short[ 'Luminosity [erg/s]'].data
Swift_short_Luminosity_error = L_vs_z__Swift_short[ 'Luminosity_error [erg/s]'].data
other_short_redshift = L_vs_z__other_short[ 'measured z'].data
other_short_Luminosity = L_vs_z__other_short[ 'Luminosity [erg/s]'].data
other_short_Luminosity_error = L_vs_z__other_short[ 'Luminosity_error [erg/s]'].data
BATSE_short_redshift = L_vs_z__BATSE_short[ 'pseudo z'].data
BATSE_short_Luminosity = L_vs_z__BATSE_short[ 'Luminosity [erg/s]'].data
BATSE_short_Luminosity_error = L_vs_z__BATSE_short[ 'Luminosity_error [erg/s]'].data
inds_to_delete = np.where(other_short_Luminosity < 1e-16 )[0]
print 'other GRBs, deleted : ', inds_to_delete.size
other_short_redshift = np.delete( other_short_redshift , inds_to_delete )
other_short_Luminosity = np.delete( other_short_Luminosity, inds_to_delete )
other_short_Luminosity_error = np.delete( other_short_Luminosity_error, inds_to_delete )
inds_to_delete = []
for j, z in enumerate( Swift_short_redshift ):
array = np.abs( z_sim - z )
ind = np.where( array == array.min() )[0]
if ( Swift_short_Luminosity[j] - L_cut__Swift[ind] ) < 0 :
inds_to_delete.append( j )
inds_to_delete = np.array( inds_to_delete )
print 'Swift GRBs, deleted : ', inds_to_delete.size, '\n'
Swift_short_redshift = np.delete( Swift_short_redshift , inds_to_delete )
Swift_short_Luminosity = np.delete( Swift_short_Luminosity , inds_to_delete )
Swift_short_Luminosity_error = np.delete( Swift_short_Luminosity_error, inds_to_delete )
inds_to_delete = np.where( Fermi_short_redshift > z_max )[0]
print 'Fermi GRBs, deleted : ', inds_to_delete.size
Fermi_short_redshift = np.delete( Fermi_short_redshift , inds_to_delete )
Fermi_short_Luminosity = np.delete( Fermi_short_Luminosity , inds_to_delete )
Fermi_short_Luminosity_error = np.delete( Fermi_short_Luminosity_error, inds_to_delete )
inds_to_delete = np.where( Swift_short_redshift > z_max )[0]
print 'Swift GRBs, deleted : ', inds_to_delete.size, '\n'
Swift_short_redshift = np.delete( Swift_short_redshift , inds_to_delete )
Swift_short_Luminosity = np.delete( Swift_short_Luminosity , inds_to_delete )
Swift_short_Luminosity_error = np.delete( Swift_short_Luminosity_error, inds_to_delete )
print 'Number of "known" GRBs : ', known_short_redshift.size
print 'Number of "Fermi" GRBs : ', Fermi_short_redshift.size
print 'Number of "FermE" GRBs : ', FermE_short_redshift.size
print 'Number of "Swift" GRBs : ', Swift_short_redshift.size
print 'Number of "other" GRBs : ', other_short_redshift.size, '\n'
Fermi_short_Luminosity = np.concatenate( [ known_short_Luminosity , Fermi_short_Luminosity , FermE_short_Luminosity ] )
Fermi_short_Luminosity_error = np.concatenate( [ known_short_Luminosity_error , Fermi_short_Luminosity_error , FermE_short_Luminosity_error ] )
N__Fermi = Fermi_short_Luminosity.size
x__Fermi_short, y__Fermi_short, y__Fermi_short_poserr, y__Fermi_short_negerr = sf.my_histogram_with_errorbars( np.log10(Fermi_short_Luminosity/L_norm), np.log10( (Fermi_short_Luminosity + Fermi_short_Luminosity_error) / L_norm ) - np.log10(Fermi_short_Luminosity/L_norm), np.log10( (Fermi_short_Luminosity + Fermi_short_Luminosity_error) / L_norm ) - np.log10(Fermi_short_Luminosity/L_norm), logL_bin*1.0, logL_min, logL_max )
y__Fermi_short_error = np.maximum(y__Fermi_short_negerr, y__Fermi_short_poserr)+1
print 'Total number, Fermi : ', N__Fermi
Swift_short_Luminosity = np.concatenate( [ other_short_Luminosity , Swift_short_Luminosity ] )
Swift_short_Luminosity_error = np.concatenate( [ other_short_Luminosity_error , Swift_short_Luminosity_error ] )
# To add artificial errors, of percentage : f
f = 45.0
Swift_short_Luminosity_error = Swift_short_Luminosity_error + (f/100)*Swift_short_Luminosity
N__Swift = Swift_short_Luminosity.size
x__Swift_short, y__Swift_short, y__Swift_short_poserr, y__Swift_short_negerr = sf.my_histogram_with_errorbars( np.log10(Swift_short_Luminosity/L_norm), np.log10( (Swift_short_Luminosity + Swift_short_Luminosity_error) / L_norm ) - np.log10(Swift_short_Luminosity/L_norm), np.log10( (Swift_short_Luminosity + Swift_short_Luminosity_error) / L_norm ) - np.log10(Swift_short_Luminosity/L_norm), logL_bin*1.0, logL_min, logL_max )
y__Swift_short_error = np.maximum(y__Swift_short_negerr, y__Swift_short_poserr)+1
print 'Total number, Swift : ', N__Swift
print 'Total number, Fermi & Swift : ', N__Fermi + N__Swift, '\n'
inds_to_delete = []
for j, z in enumerate( BATSE_short_redshift ):
array = np.abs( z_sim - z )
ind = np.where( array == array.min() )[0]
if ( BATSE_short_Luminosity[j] - L_cut__BATSE[ind] ) < 0 :
inds_to_delete.append( j )
inds_to_delete = np.array( inds_to_delete )
print 'Number of BATSE GRBs : ', BATSE_short_Luminosity.size
print 'BATSE GRBs, deleted : ', inds_to_delete.size
BATSE_short_redshift = np.delete( BATSE_short_redshift , inds_to_delete )
BATSE_short_Luminosity = np.delete( BATSE_short_Luminosity , inds_to_delete )
BATSE_short_Luminosity_error = np.delete( BATSE_short_Luminosity_error, inds_to_delete )
# To add artificial errors, of percentage : f
f = 48.0
BATSE_short_Luminosity_error = BATSE_short_Luminosity_error + (f/100)*BATSE_short_Luminosity
N__BATSE = BATSE_short_Luminosity.size
x__BATSE_short, y__BATSE_short, y__BATSE_short_poserr, y__BATSE_short_negerr = sf.my_histogram_with_errorbars( np.log10(BATSE_short_Luminosity/L_norm), np.log10( (BATSE_short_Luminosity + BATSE_short_Luminosity_error) / L_norm ) - np.log10(BATSE_short_Luminosity/L_norm), np.log10( (BATSE_short_Luminosity + BATSE_short_Luminosity_error) / L_norm ) - np.log10(BATSE_short_Luminosity/L_norm), logL_bin*1.0, logL_min, logL_max )
y__BATSE_short_error = np.maximum(y__BATSE_short_negerr, y__BATSE_short_poserr)+1
print ' Number, BATSE : ', N__BATSE
print '\n'
print 'Fermi error percentage: ', np.mean(Fermi_short_Luminosity_error/Fermi_short_Luminosity)*100
print 'Swift error percentage: ', np.mean(Swift_short_Luminosity_error/Swift_short_Luminosity)*100
print 'BATSE error percentage: ', np.mean(BATSE_short_Luminosity_error/BATSE_short_Luminosity)*100
print '\n'
Luminosity_mids = x__Fermi_short
Luminosity_mins = L_norm * ( 10 ** ( Luminosity_mids - logL_bin/2 ) )
Luminosity_maxs = L_norm * ( 10 ** ( Luminosity_mids + logL_bin/2 ) )
L_lo = Luminosity_mins.min()
L_hi = Luminosity_maxs.max()
print '\n\n'
####################################################################################################################################################
###############################################################################################################################################s
def model_BPL__Fermi( x__Fermi_short, Gamma, nu1, nu2, coeff ):
CSFR = Phi * volume_term
L_b = ( L_norm * coeff ) * np.ones(z_sim.size)
den_int = dL_sim**2 * k_Fermi
den_int = den_int ** (-Gamma)
deno = simps( den_int, z_sim )
denominator = ( ( 1 - ((L_lo/L_b)**(Gamma-nu1+1)) ) / (Gamma-nu1+1) ) + ( ( ((L_hi/L_b)**(Gamma-nu2+1)) - 1 ) / (Gamma-nu2+1) )
N_vs_L__model = np.zeros(Luminosity_mids.size)
for j, L1 in enumerate( Luminosity_mins ):
inds = np.where( L_cut__Fermi <= L1 )[0]
Lmin = L_cut__Fermi.copy()
Lmin[inds] = L1
L2 = Luminosity_maxs[j]
Lmax = L2 * np.ones(z_sim.size)
integral_over_L = L_b.copy()
ind_low = np.where( L_b <= L1 )[0]
ind_mid = np.where( (L1 < L_b) & (L_b < L2) )[0]
ind_high = np.where( L2 <= L_b )[0]
integral_over_L[ind_low] = ( ( ((Lmax/L_b)[ind_low] )**(Gamma-nu2+1) ) - ( ((Lmin/L_b)[ind_low ])**(Gamma-nu2+1) ) ) / (Gamma-nu2+1)
integral_over_L[ind_mid] = ( ( 1 - ( ((Lmin/L_b)[ind_mid])**(Gamma-nu1+1) ) ) / (Gamma-nu1+1) ) + ( ( ( ((Lmax/L_b)[ind_mid])**(Gamma-nu2+1) ) - 1 ) / (Gamma-nu2+1) )
integral_over_L[ind_high] = ( ( ((Lmax/L_b)[ind_high])**(Gamma-nu1+1) ) - ( ((Lmin/L_b)[ind_high])**(Gamma-nu1+1) ) ) / (Gamma-nu1+1)
integral_overL = integral_over_L / denominator
ind = np.where( integral_over_L <= 0 )[0]
integral_over_L[ind] = 0
integrand = ( CSFR * den_int/deno ) * integral_over_L
N_vs_L__model[j] = simps( integrand, z_sim )
norm = np.sum(N_vs_L__model)
N_vs_L__model = N_vs_L__model / norm
return N_vs_L__model
def find_discrepancy( model, observed ):
return np.sum( ( model - observed ) ** 2 )
####################################################################################################################################################
####################################################################################################################################################
print '################################################################################'
print '\n\n'
Gamma__Fermi = 0.001
#~ ## n = 1.0
#~ Fermi__nu1_array = np.array( [0.01, 0.23, 0.48, 0.68, 0.69, 0.70, 0.71] )
#~ Fermi__nu2_array = np.array( [1.65, 1.66, 1.67, 1.86, 2.86, 2.93, 2.94, 2.95] )
#~ Fermi__Lb__array = np.array( [0.85, 0.86, 0.87, 1.52, 2.92, 3.09, 3.10, 3.11, 3.12] )
#~ ## n = 1.5
#~ Fermi__nu1_array = np.array( [0.01, 0.23, 0.38, 0.58, 0.59, 0.60, 0.61, 0.62] )
#~ Fermi__nu2_array = np.array( [1.65, 1.66, 1.67, 1.85, 2.85, 2.87, 2.88, 2.89, 2.90] )
#~ Fermi__Lb__array = np.array( [0.84, 0.85, 0.86, 1.46, 2.71, 2.81, 2.82] )
## n = 2.0
Fermi__nu1_array = np.array( [0.01, 0.23, 0.34, 0.55, 0.56, 0.57, 0.58] )
Fermi__nu2_array = np.array( [1.65, 1.66, 1.67, 1.85, 2.85, 2.87, 2.88] )
Fermi__Lb__array = np.array( [0.85, 0.86, 0.87, 1.45, 2.71, 2.76, 2.77] )
Fermi__nu1_size = Fermi__nu1_array.size
Fermi__nu2_size = Fermi__nu2_array.size
Fermi__Lb__size = Fermi__Lb__array.size
print 'nu1_array: ', Fermi__nu1_array
print 'nu2_array: ', Fermi__nu2_array
print 'Lb__array: ', Fermi__Lb__array, '\n'
grid_of_discrepancy__Fermi = np.zeros( (Fermi__nu1_size, Fermi__nu2_size, Fermi__Lb__size ) )
grid_of_rdcdchisqrd__Fermi = grid_of_discrepancy__Fermi.copy()
print 'Grid of {0:d} (nu1) X {1:d} (nu2) X {2:d} (Lb) = {3:d}.'.format( Fermi__nu1_size, Fermi__nu2_size, Fermi__Lb__size, grid_of_rdcdchisqrd__Fermi.size), '\n'
t0 = time.time()
for c1, nu1 in enumerate(Fermi__nu1_array):
for c2, nu2 in enumerate(Fermi__nu2_array):
for cLb, coeff in enumerate(Fermi__Lb__array):
model_fit__Fermi = model_BPL__Fermi( x__Fermi_short, Gamma__Fermi, nu1, nu2, coeff ) * N__Fermi
grid_of_discrepancy__Fermi[c1, c2, cLb] = find_discrepancy( model_fit__Fermi, y__Fermi_short )
grid_of_rdcdchisqrd__Fermi[c1, c2, cLb] = mf.reduced_chisquared( model_fit__Fermi, y__Fermi_short, y__Fermi_short_error, constraints )[2]
print 'Done in {:.3f} seconds.'.format( time.time() - t0 ), '\n\n'
output = open( './../tables/pkl/Fermi--rdcdchisqrd--1.pkl', 'wb' )
pickle.dump( grid_of_rdcdchisqrd__Fermi, output )
output.close()
output = open( './../tables/pkl/Fermi--discrepancy--1.pkl', 'wb' )
pickle.dump( grid_of_discrepancy__Fermi, output )
output.close()
ind_discrepancy_min__Fermi = np.unravel_index( grid_of_discrepancy__Fermi.argmin(), grid_of_discrepancy__Fermi.shape )
nu1__Fermi = Fermi__nu1_array[ind_discrepancy_min__Fermi[0]]
nu2__Fermi = Fermi__nu2_array[ind_discrepancy_min__Fermi[1]]
Lb___Fermi = Fermi__Lb__array[ind_discrepancy_min__Fermi[2]]
print 'Minimum discrepancy of {0:.3f} at nu1 = {1:.2f}, nu2 = {2:.2f}, Lb = {3:.2f}'.format( grid_of_discrepancy__Fermi[ind_discrepancy_min__Fermi], nu1__Fermi, nu2__Fermi, Lb___Fermi )
print 'Reduced-chisquared of {0:.3f}.'.format( grid_of_rdcdchisqrd__Fermi[ind_discrepancy_min__Fermi]), '\n'
ind_rdcdchisqrd_min__Fermi = np.unravel_index( grid_of_rdcdchisqrd__Fermi.argmin(), grid_of_rdcdchisqrd__Fermi.shape )
nu1__Fermi = Fermi__nu1_array[ind_rdcdchisqrd_min__Fermi[0]]
nu2__Fermi = Fermi__nu2_array[ind_rdcdchisqrd_min__Fermi[1]]
Lb___Fermi = Fermi__Lb__array[ind_rdcdchisqrd_min__Fermi[2]]
print 'Minimum reduced-chisquared of {0:.3f} at nu1 = {1:.2f}, nu2 = {2:.2f}, Lb = {3:.2f}'.format( grid_of_rdcdchisqrd__Fermi[ind_rdcdchisqrd_min__Fermi], nu1__Fermi, nu2__Fermi, Lb___Fermi )
print 'Reduced-chisquared of {0:.3f}.'.format( grid_of_rdcdchisqrd__Fermi[ind_rdcdchisqrd_min__Fermi]), '\n\n'
grid_of_chisquared__Fermi = grid_of_rdcdchisqrd__Fermi * 7
chisquared_at_solution = grid_of_chisquared__Fermi[ind_discrepancy_min__Fermi]
chisquared_for_1sigma = chisquared_at_solution + 3.53
print 'Chi-squared at 1-sigma: ', np.round(chisquared_for_1sigma, 3), '\n'
print np.round( grid_of_chisquared__Fermi[ :, ind_discrepancy_min__Fermi[1], ind_discrepancy_min__Fermi[2] ], 3 )
print np.round( grid_of_chisquared__Fermi[ ind_discrepancy_min__Fermi[0], :, ind_discrepancy_min__Fermi[2] ], 3 )
print np.round( grid_of_chisquared__Fermi[ ind_discrepancy_min__Fermi[0], ind_discrepancy_min__Fermi[1], : ], 3 )
print '\n\n'
print '################################################################################'
|
DebduttaPaulREPO_NAMEluminosity_function_of_sGRBsPATH_START.@luminosity_function_of_sGRBs_extracted@luminosity_function_of_sGRBs-master@luminosity_model--BPL_3param--parameter_error_estimation.py@.PATH_END.py
|
{
"filename": "utils.py",
"repo_name": "desihub/LSS",
"repo_path": "LSS_extracted/LSS-main/py/LSS/DESI_ke/utils.py",
"type": "Python"
}
|
from subprocess import check_output
def run_command(cmd, noid=False):
print('Command: {}'.format(cmd))
cmd = cmd.split()
env = {}
# env.update(os.environ)
# print('Calling ...')
out = check_output(cmd)
out = out.decode('utf-8')
out = out.replace('\n', '')
if noid:
out=0
out = int(out)
return out
|
desihubREPO_NAMELSSPATH_START.@LSS_extracted@LSS-main@py@LSS@DESI_ke@utils.py@.PATH_END.py
|
{
"filename": "cprint.py",
"repo_name": "3fon3fonov/exostriker",
"repo_path": "exostriker_extracted/exostriker-main/exostriker/lib/pyqtgraph/util/cprint.py",
"type": "Python"
}
|
"""
Cross-platform color text printing
Based on colorama (see pyqtgraph/util/colorama/README.txt)
"""
import sys
from .colorama.win32 import windll
from .colorama.winterm import WinColor, WinStyle, WinTerm
_WIN = sys.platform.startswith('win')
if windll is not None:
winterm = WinTerm()
else:
_WIN = False
def winset(reset=False, fore=None, back=None, style=None, stderr=False):
if reset:
winterm.reset_all()
if fore is not None:
winterm.fore(fore, stderr)
if back is not None:
winterm.back(back, stderr)
if style is not None:
winterm.style(style, stderr)
ANSI = {}
WIN = {}
for i,color in enumerate(['BLACK', 'RED', 'GREEN', 'YELLOW', 'BLUE', 'MAGENTA', 'CYAN', 'WHITE']):
globals()[color] = i
globals()['BR_' + color] = i + 8
globals()['BACK_' + color] = i + 40
ANSI[i] = "\033[%dm" % (30+i)
ANSI[i+8] = "\033[2;%dm" % (30+i)
ANSI[i+40] = "\033[%dm" % (40+i)
color = 'GREY' if color == 'WHITE' else color
WIN[i] = {'fore': getattr(WinColor, color), 'style': WinStyle.NORMAL}
WIN[i+8] = {'fore': getattr(WinColor, color), 'style': WinStyle.BRIGHT}
WIN[i+40] = {'back': getattr(WinColor, color)}
RESET = -1
ANSI[RESET] = "\033[0m"
WIN[RESET] = {'reset': True}
def cprint(stream, *args, **kwds):
"""
Print with color. Examples::
# colors are BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE
cprint('stdout', RED, 'This is in red. ', RESET, 'and this is normal\n')
# Adding BR_ before the color manes it bright
cprint('stdout', BR_GREEN, 'This is bright green.\n', RESET)
# Adding BACK_ changes background color
cprint('stderr', BACK_BLUE, WHITE, 'This is white-on-blue.', -1)
# Integers 0-7 for normal, 8-15 for bright, and 40-47 for background.
# -1 to reset.
cprint('stderr', 1, 'This is in red.', -1)
"""
if isinstance(stream, str):
stream = kwds.get('stream', 'stdout')
err = stream == 'stderr'
stream = getattr(sys, stream)
else:
err = kwds.get('stderr', False)
if hasattr(stream, 'isatty') and stream.isatty():
if _WIN:
# convert to win32 calls
for arg in args:
if isinstance(arg, str):
stream.write(arg)
else:
kwds = WIN[arg]
winset(stderr=err, **kwds)
else:
# convert to ANSI
for arg in args:
if isinstance(arg, str):
stream.write(arg)
else:
stream.write(ANSI[arg])
else:
# ignore colors
for arg in args:
if isinstance(arg, str):
stream.write(arg)
def cout(*args):
"""Shorthand for cprint('stdout', ...)"""
cprint('stdout', *args)
def cerr(*args):
"""Shorthand for cprint('stderr', ...)"""
cprint('stderr', *args)
|
3fon3fonovREPO_NAMEexostrikerPATH_START.@exostriker_extracted@exostriker-main@exostriker@lib@pyqtgraph@util@cprint.py@.PATH_END.py
|
{
"filename": "_plist.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/pyrsistent/py3/pyrsistent/_plist.py",
"type": "Python"
}
|
from collections.abc import Sequence, Hashable
from numbers import Integral
from functools import reduce
from typing import Generic, TypeVar
T_co = TypeVar('T_co', covariant=True)
class _PListBuilder(object):
"""
Helper class to allow construction of a list without
having to reverse it in the end.
"""
__slots__ = ('_head', '_tail')
def __init__(self):
self._head = _EMPTY_PLIST
self._tail = _EMPTY_PLIST
def _append(self, elem, constructor):
if not self._tail:
self._head = constructor(elem)
self._tail = self._head
else:
self._tail.rest = constructor(elem)
self._tail = self._tail.rest
return self._head
def append_elem(self, elem):
return self._append(elem, lambda e: PList(e, _EMPTY_PLIST))
def append_plist(self, pl):
return self._append(pl, lambda l: l)
def build(self):
return self._head
class _PListBase(object):
__slots__ = ('__weakref__',)
# Selected implementations can be taken straight from the Sequence
# class, other are less suitable. Especially those that work with
# index lookups.
count = Sequence.count
index = Sequence.index
def __reduce__(self):
# Pickling support
return plist, (list(self),)
def __len__(self):
"""
Return the length of the list, computed by traversing it.
This is obviously O(n) but with the current implementation
where a list is also a node the overhead of storing the length
in every node would be quite significant.
"""
return sum(1 for _ in self)
def __repr__(self):
return "plist({0})".format(list(self))
__str__ = __repr__
def cons(self, elem):
"""
Return a new list with elem inserted as new head.
>>> plist([1, 2]).cons(3)
plist([3, 1, 2])
"""
return PList(elem, self)
def mcons(self, iterable):
"""
Return a new list with all elements of iterable repeatedly cons:ed to the current list.
NB! The elements will be inserted in the reverse order of the iterable.
Runs in O(len(iterable)).
>>> plist([1, 2]).mcons([3, 4])
plist([4, 3, 1, 2])
"""
head = self
for elem in iterable:
head = head.cons(elem)
return head
def reverse(self):
"""
Return a reversed version of list. Runs in O(n) where n is the length of the list.
>>> plist([1, 2, 3]).reverse()
plist([3, 2, 1])
Also supports the standard reversed function.
>>> reversed(plist([1, 2, 3]))
plist([3, 2, 1])
"""
result = plist()
head = self
while head:
result = result.cons(head.first)
head = head.rest
return result
__reversed__ = reverse
def split(self, index):
"""
Spilt the list at position specified by index. Returns a tuple containing the
list up until index and the list after the index. Runs in O(index).
>>> plist([1, 2, 3, 4]).split(2)
(plist([1, 2]), plist([3, 4]))
"""
lb = _PListBuilder()
right_list = self
i = 0
while right_list and i < index:
lb.append_elem(right_list.first)
right_list = right_list.rest
i += 1
if not right_list:
# Just a small optimization in the cases where no split occurred
return self, _EMPTY_PLIST
return lb.build(), right_list
def __iter__(self):
li = self
while li:
yield li.first
li = li.rest
def __lt__(self, other):
if not isinstance(other, _PListBase):
return NotImplemented
return tuple(self) < tuple(other)
def __eq__(self, other):
"""
Traverses the lists, checking equality of elements.
This is an O(n) operation, but preserves the standard semantics of list equality.
"""
if not isinstance(other, _PListBase):
return NotImplemented
self_head = self
other_head = other
while self_head and other_head:
if not self_head.first == other_head.first:
return False
self_head = self_head.rest
other_head = other_head.rest
return not self_head and not other_head
def __getitem__(self, index):
# Don't use this this data structure if you plan to do a lot of indexing, it is
# very inefficient! Use a PVector instead!
if isinstance(index, slice):
if index.start is not None and index.stop is None and (index.step is None or index.step == 1):
return self._drop(index.start)
# Take the easy way out for all other slicing cases, not much structural reuse possible anyway
return plist(tuple(self)[index])
if not isinstance(index, Integral):
raise TypeError("'%s' object cannot be interpreted as an index" % type(index).__name__)
if index < 0:
# NB: O(n)!
index += len(self)
try:
return self._drop(index).first
except AttributeError as e:
raise IndexError("PList index out of range") from e
def _drop(self, count):
if count < 0:
raise IndexError("PList index out of range")
head = self
while count > 0:
head = head.rest
count -= 1
return head
def __hash__(self):
return hash(tuple(self))
def remove(self, elem):
"""
Return new list with first element equal to elem removed. O(k) where k is the position
of the element that is removed.
Raises ValueError if no matching element is found.
>>> plist([1, 2, 1]).remove(1)
plist([2, 1])
"""
builder = _PListBuilder()
head = self
while head:
if head.first == elem:
return builder.append_plist(head.rest)
builder.append_elem(head.first)
head = head.rest
raise ValueError('{0} not found in PList'.format(elem))
class PList(Generic[T_co], _PListBase):
"""
Classical Lisp style singly linked list. Adding elements to the head using cons is O(1).
Element access is O(k) where k is the position of the element in the list. Taking the
length of the list is O(n).
Fully supports the Sequence and Hashable protocols including indexing and slicing but
if you need fast random access go for the PVector instead.
Do not instantiate directly, instead use the factory functions :py:func:`l` or :py:func:`plist` to
create an instance.
Some examples:
>>> x = plist([1, 2])
>>> y = x.cons(3)
>>> x
plist([1, 2])
>>> y
plist([3, 1, 2])
>>> y.first
3
>>> y.rest == x
True
>>> y[:2]
plist([3, 1])
"""
__slots__ = ('first', 'rest')
def __new__(cls, first, rest):
instance = super(PList, cls).__new__(cls)
instance.first = first
instance.rest = rest
return instance
def __bool__(self):
return True
__nonzero__ = __bool__
Sequence.register(PList)
Hashable.register(PList)
class _EmptyPList(_PListBase):
__slots__ = ()
def __bool__(self):
return False
__nonzero__ = __bool__
@property
def first(self):
raise AttributeError("Empty PList has no first")
@property
def rest(self):
return self
Sequence.register(_EmptyPList)
Hashable.register(_EmptyPList)
_EMPTY_PLIST = _EmptyPList()
def plist(iterable=(), reverse=False):
"""
Creates a new persistent list containing all elements of iterable.
Optional parameter reverse specifies if the elements should be inserted in
reverse order or not.
>>> plist([1, 2, 3])
plist([1, 2, 3])
>>> plist([1, 2, 3], reverse=True)
plist([3, 2, 1])
"""
if not reverse:
iterable = list(iterable)
iterable.reverse()
return reduce(lambda pl, elem: pl.cons(elem), iterable, _EMPTY_PLIST)
def l(*elements):
"""
Creates a new persistent list containing all arguments.
>>> l(1, 2, 3)
plist([1, 2, 3])
"""
return plist(elements)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@pyrsistent@py3@pyrsistent@_plist.py@.PATH_END.py
|
{
"filename": "input_pipeline.py",
"repo_name": "google/flax",
"repo_path": "flax_extracted/flax-main/examples/vae/input_pipeline.py",
"type": "Python"
}
|
# Copyright 2024 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Input pipeline for VAE dataset."""
import jax
import jax.numpy as jnp
import tensorflow as tf
import tensorflow_datasets as tfds
def build_train_set(batch_size, ds_builder):
"""Builds train dataset."""
train_ds = ds_builder.as_dataset(split=tfds.Split.TRAIN)
train_ds = train_ds.map(prepare_image)
train_ds = train_ds.cache()
train_ds = train_ds.repeat()
train_ds = train_ds.shuffle(50000)
train_ds = train_ds.batch(batch_size)
train_ds = iter(tfds.as_numpy(train_ds))
return train_ds
def build_test_set(ds_builder):
"""Builds train dataset."""
test_ds = ds_builder.as_dataset(split=tfds.Split.TEST)
test_ds = test_ds.map(prepare_image).batch(10000)
test_ds = jnp.array(list(test_ds)[0])
test_ds = jax.device_put(test_ds)
return test_ds
def prepare_image(x):
x = tf.cast(x['image'], tf.float32)
x = tf.reshape(x, (-1,))
return x
|
googleREPO_NAMEflaxPATH_START.@flax_extracted@flax-main@examples@vae@input_pipeline.py@.PATH_END.py
|
{
"filename": "_bgcolorsrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scatter3d/hoverlabel/_bgcolorsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class BgcolorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="bgcolorsrc", parent_name="scatter3d.hoverlabel", **kwargs
):
super(BgcolorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scatter3d@hoverlabel@_bgcolorsrc.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "IftachSadeh/ANNZ",
"repo_path": "ANNZ_extracted/ANNZ-master/examples/scripts/__init__.py",
"type": "Python"
}
|
import commonImports
import fitsFuncs
import generalSettings
import helperFuncs
|
IftachSadehREPO_NAMEANNZPATH_START.@ANNZ_extracted@ANNZ-master@examples@scripts@__init__.py@.PATH_END.py
|
{
"filename": "visualizer.py",
"repo_name": "Jammy2211/PyAutoLens",
"repo_path": "PyAutoLens_extracted/PyAutoLens-main/autolens/interferometer/model/visualizer.py",
"type": "Python"
}
|
import autofit as af
from autolens.interferometer.model.plotter_interface import (
PlotterInterfaceInterferometer,
)
from autogalaxy import exc
class VisualizerInterferometer(af.Visualizer):
@staticmethod
def visualize_before_fit(
analysis,
paths: af.AbstractPaths,
model: af.AbstractPriorModel,
):
"""
PyAutoFit calls this function immediately before the non-linear search begins.
It visualizes objects which do not change throughout the model fit like the dataset.
Parameters
----------
paths
The paths object which manages all paths, e.g. where the non-linear search outputs are stored,
visualization and the pickled objects used by the aggregator output by this function.
model
The model object, which includes model components representing the galaxies that are fitted to
the imaging data.
"""
plotter_interface = PlotterInterfaceInterferometer(
image_path=paths.image_path, title_prefix=analysis.title_prefix
)
plotter_interface.interferometer(dataset=analysis.interferometer)
if analysis.positions_likelihood is not None:
plotter_interface.image_with_positions(
image=analysis.dataset.dirty_image,
positions=analysis.positions_likelihood.positions,
)
if analysis.adapt_images is not None:
plotter_interface.adapt_images(adapt_images=analysis.adapt_images)
@staticmethod
def visualize(
analysis,
paths: af.DirectoryPaths,
instance: af.ModelInstance,
during_analysis: bool,
):
"""
Outputs images of the maximum log likelihood model inferred by the model-fit. This function is called
throughout the non-linear search at input intervals, and therefore provides on-the-fly visualization of how
well the model-fit is going.
The visualization performed by this function includes:
- Images of the best-fit `Tracer`, including the images of each of its galaxies.
- Images of the best-fit `FitInterferometer`, including the model-image, residuals and chi-squared of its fit
to the imaging data.
- The adapt-images of the model-fit showing how the galaxies are used to represent different galaxies in
the dataset.
- If adapt features are used to scale the noise, a `FitInterferometer` with these features turned off may be
output, to indicate how much these features are altering the dataset.
The images output by this function are customized using the file `config/visualize/plots.yaml`.
Parameters
----------
paths
The paths object which manages all paths, e.g. where the non-linear search outputs are stored,
visualization, and the pickled objects used by the aggregator output by this function.
instance
An instance of the model that is being fitted to the data by this analysis (whose parameters have been set
via a non-linear search).
during_analysis
If True the visualization is being performed midway through the non-linear search before it is finished,
which may change which images are output.
"""
fit = analysis.fit_from(instance=instance)
if analysis.positions_likelihood is not None:
analysis.positions_likelihood.output_positions_info(
output_path=paths.output_path, tracer=fit.tracer
)
if fit.inversion is not None:
try:
fit.inversion.reconstruction
except exc.InversionException:
return
plotter_interface = PlotterInterfaceInterferometer(
image_path=paths.image_path, title_prefix=analysis.title_prefix
)
try:
plotter_interface.fit_interferometer(
fit=fit, during_analysis=during_analysis
)
except exc.InversionException:
pass
tracer = fit.tracer_linear_light_profiles_to_light_profiles
plotter_interface.tracer(
tracer=tracer, grid=fit.grids.uniform, during_analysis=during_analysis
)
plotter_interface.galaxies(
galaxies=tracer.galaxies,
grid=fit.grids.uniform,
during_analysis=during_analysis,
)
if fit.inversion is not None:
try:
plotter_interface.inversion(
inversion=fit.inversion, during_analysis=during_analysis
)
except IndexError:
pass
|
Jammy2211REPO_NAMEPyAutoLensPATH_START.@PyAutoLens_extracted@PyAutoLens-main@autolens@interferometer@model@visualizer.py@.PATH_END.py
|
{
"filename": "base.py",
"repo_name": "icrar/daliuge",
"repo_path": "daliuge_extracted/daliuge-master/daliuge-translator/dlg/dropmake/utils/heft/base.py",
"type": "Python"
}
|
# This file was ported and adapted by chen.wu@icrar.org from:
# https://github.com/mrocklin/heft/blob/master/heft/core.py
# The original copyright statement is as below:
# Copyright (c) 2013 Matthew Rocklin
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# a. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# b. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# c. Neither the name of HEFT nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
"""
Heterogeneous Earliest Finish Time -- A static scheduling heuristic
Performance-effective and low-complexity task scheduling
for heterogeneous computing
by
Topcuoglu, Haluk; Hariri, Salim Wu, M
IEEE Transactions on Parallel and Distributed Systems 2002
Cast of Characters:
agent - resources (e.g. machines)
task - the task to be allocated
orders - dict {agent: [tasks-run-on-agent-in-order]}
taskson - dict {task: agent-on-which-task-is-run}
prec - dict {task: (tasks which directly precede task)}
succ - dict {task: (tasks which directly succeed task)}
compcost - function :: task, agent -> time to compute task on agent
commcost - function :: task, task, agent, agent -> time to transfer results
of one task needed by another between two agents
[1]. http://en.wikipedia.org/wiki/Heterogeneous_Earliest_Finish_Time
Significant changes for DALiuGE involve the support for tasks that require
multiple cores. The original HEFT algorithm assumes each task
consumes exactly one processor at a time
"""
import itertools as it
from collections import namedtuple
from functools import partial
from itertools import chain
Event = namedtuple("Event", "task start end")
def reverse_dict(d):
"""Reverses direction of dependence dict
>>> d = {'a': (1, 2), 'b': (2, 3), 'c':()}
>>> reverse_dict(d)
{1: ('a',), 2: ('a', 'b'), 3: ('b',)}
"""
result = {}
for key in d:
for val in d[key]:
result[val] = result.get(val, tuple()) + (key,)
return result
def find_task_event(task_name, orders_dict):
for event in it.chain.from_iterable(orders_dict.values()):
if event.task == task_name:
return event
def wbar(ni, agents, compcost):
""" Average computation cost """
return sum(compcost(ni, agent) for agent in agents) / len(agents)
def cbar(ni, nj, agents, commcost):
""" Average communication cost """
n = len(agents)
if n == 1:
return 0
npairs = n * (n - 1)
return (
1.0
* sum(commcost(ni, nj, a1, a2) for a1 in agents for a2 in agents if a1 != a2)
/ npairs
)
def ranku(ni, agents, succ, compcost, commcost):
"""Rank of task
This code is designed to mirror the wikipedia entry.
Please see that for details
[1]. http://en.wikipedia.org/wiki/Heterogeneous_Earliest_Finish_Time
"""
rank = partial(
ranku, compcost=compcost, commcost=commcost, succ=succ, agents=agents
)
w = partial(wbar, compcost=compcost, agents=agents)
c = partial(cbar, agents=agents, commcost=commcost)
if ni in succ and succ[ni]:
return w(ni) + max(c(ni, nj) + rank(nj) for nj in succ[ni])
else:
return w(ni)
def endtime(task, events):
""" Endtime of task in list of events """
for e in events:
if e.task == task:
return e.end
def find_first_gap(agent_orders, desired_start_time, duration):
"""Find the first gap in an agent's list of tasks
Essentially this is equivalent to "sequentialisation"
But for DAG-preserving, such sequentialisation does not work since the execution
will be triggered as soon as the `desired_start_time` arrives regardless of
scheduling decisions (insertion into some gap slots)
So the actual start time cannot be after the desired_start time, it must be
at the desired_start_time. This is the main difference from the original HEFT.
This means if the task cannot run at the desired_start_time (i.e. No gaps found)
due to resource depletion (e.g. DoP overflow), then the agent has to either
reject the task or face the consequence of resource over-subscription, or
ask for creating a new resource unit for that task
The gap must be after `desired_start_time` and of length at least
`duration`.
"""
# TODO change to a "DAG preserved" first gap
# TODO return an infinite large value if the DoP constraint is not met
# No tasks: can fit it in whenever the task is ready to run
if (agent_orders is None) or (len(agent_orders)) == 0:
return desired_start_time
# Try to fit it in between each pair of Events, but first prepend a
# dummy Event which ends at time 0 to check for gaps before any real
# Event starts.
a = chain([Event(None, None, 0)], agent_orders[:-1])
for e1, e2 in zip(a, agent_orders):
earliest_start = max(desired_start_time, e1.end)
if e2.start - earliest_start > duration:
return earliest_start
# No gaps found: put it at the end, or whenever the task is ready
return max(agent_orders[-1].end, desired_start_time)
def start_time(task, orders, taskson, prec, commcost, compcost, agent):
""" Earliest time that task can be executed on agent """
duration = compcost(task, agent)
if task in prec:
comm_ready = max(
[
endtime(p, orders[taskson[p]]) + commcost(p, task, taskson[p], agent)
for p in prec[task]
]
)
else:
comm_ready = 0
return find_first_gap(orders[agent], comm_ready, duration)
def allocate(task, orders, taskson, prec, compcost, commcost):
"""Allocate task to the machine with earliest finish time
Operates in place
"""
st = partial(start_time, task, orders, taskson, prec, commcost, compcost)
def ft(machine):
return st(machine) + compcost(task, machine)
# 'min()' represents 'earliest' finished time (ft)
# this is exactly why the allocation policy is considered greedy!
# TODO the new greediness should be based on "DoP" since all start time will be
# the same (the desired_start_time). Smaller DoP (or bigger leftover) is better
agent = min(orders.keys(), key=ft)
start = st(agent)
end = ft(agent)
# assert(end == start + compcost(task, agent))
orders[agent].append(Event(task, start, end))
# orders[agent] = sorted(orders[agent], key=lambda e: e.start)
orders[agent].sort(key=lambda e: e.start)
# Might be better to use a different data structure to keep each
# agent's orders sorted at a lower cost.
taskson[task] = agent
def makespan(orders):
""" Finish time of last task """
return max(v[-1].end for v in orders.values() if v)
def schedule(succ, agents, compcost, commcost):
"""Schedule computation dag onto worker agents
inputs:
succ - DAG of tasks {a: (b, c)} where b, and c follow a
agents - set of agents that can perform work
compcost - function :: task, agent -> runtime
commcost - function :: j1, j2, a1, a2 -> communication time
"""
rank = partial(
ranku, agents=agents, succ=succ, compcost=compcost, commcost=commcost
)
prec = reverse_dict(succ)
tasks = set(succ.keys()) | set(x for xx in succ.values() for x in xx)
tasks = sorted(tasks, key=rank)
orders = {agent: [] for agent in agents}
taskson = dict()
for task in reversed(tasks):
allocate(task, orders, taskson, prec, compcost, commcost)
return orders, taskson
|
icrarREPO_NAMEdaliugePATH_START.@daliuge_extracted@daliuge-master@daliuge-translator@dlg@dropmake@utils@heft@base.py@.PATH_END.py
|
{
"filename": "decoder.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/toml/py2/toml/decoder.py",
"type": "Python"
}
|
import datetime
import io
from os import linesep
import re
import sys
from toml.tz import TomlTz
if sys.version_info < (3,):
_range = xrange # noqa: F821
else:
unicode = str
_range = range
basestring = str
unichr = chr
def _detect_pathlib_path(p):
if (3, 4) <= sys.version_info:
import pathlib
if isinstance(p, pathlib.PurePath):
return True
return False
def _ispath(p):
if isinstance(p, (bytes, basestring)):
return True
return _detect_pathlib_path(p)
def _getpath(p):
if (3, 6) <= sys.version_info:
import os
return os.fspath(p)
if _detect_pathlib_path(p):
return str(p)
return p
try:
FNFError = FileNotFoundError
except NameError:
FNFError = IOError
TIME_RE = re.compile(r"([0-9]{2}):([0-9]{2}):([0-9]{2})(\.([0-9]{3,6}))?")
class TomlDecodeError(ValueError):
"""Base toml Exception / Error."""
def __init__(self, msg, doc, pos):
lineno = doc.count('\n', 0, pos) + 1
colno = pos - doc.rfind('\n', 0, pos)
emsg = '{} (line {} column {} char {})'.format(msg, lineno, colno, pos)
ValueError.__init__(self, emsg)
self.msg = msg
self.doc = doc
self.pos = pos
self.lineno = lineno
self.colno = colno
# Matches a TOML number, which allows underscores for readability
_number_with_underscores = re.compile('([0-9])(_([0-9]))*')
class CommentValue(object):
def __init__(self, val, comment, beginline, _dict):
self.val = val
separator = "\n" if beginline else " "
self.comment = separator + comment
self._dict = _dict
def __getitem__(self, key):
return self.val[key]
def __setitem__(self, key, value):
self.val[key] = value
def dump(self, dump_value_func):
retstr = dump_value_func(self.val)
if isinstance(self.val, self._dict):
return self.comment + "\n" + unicode(retstr)
else:
return unicode(retstr) + self.comment
def _strictly_valid_num(n):
n = n.strip()
if not n:
return False
if n[0] == '_':
return False
if n[-1] == '_':
return False
if "_." in n or "._" in n:
return False
if len(n) == 1:
return True
if n[0] == '0' and n[1] not in ['.', 'o', 'b', 'x']:
return False
if n[0] == '+' or n[0] == '-':
n = n[1:]
if len(n) > 1 and n[0] == '0' and n[1] != '.':
return False
if '__' in n:
return False
return True
def load(f, _dict=dict, decoder=None):
"""Parses named file or files as toml and returns a dictionary
Args:
f: Path to the file to open, array of files to read into single dict
or a file descriptor
_dict: (optional) Specifies the class of the returned toml dictionary
decoder: The decoder to use
Returns:
Parsed toml file represented as a dictionary
Raises:
TypeError -- When f is invalid type
TomlDecodeError: Error while decoding toml
IOError / FileNotFoundError -- When an array with no valid (existing)
(Python 2 / Python 3) file paths is passed
"""
if _ispath(f):
with io.open(_getpath(f), encoding='utf-8') as ffile:
return loads(ffile.read(), _dict, decoder)
elif isinstance(f, list):
from os import path as op
from warnings import warn
if not [path for path in f if op.exists(path)]:
error_msg = "Load expects a list to contain filenames only."
error_msg += linesep
error_msg += ("The list needs to contain the path of at least one "
"existing file.")
raise FNFError(error_msg)
if decoder is None:
decoder = TomlDecoder(_dict)
d = decoder.get_empty_table()
for l in f: # noqa: E741
if op.exists(l):
d.update(load(l, _dict, decoder))
else:
warn("Non-existent filename in list with at least one valid "
"filename")
return d
else:
try:
return loads(f.read(), _dict, decoder)
except AttributeError:
raise TypeError("You can only load a file descriptor, filename or "
"list")
_groupname_re = re.compile(r'^[A-Za-z0-9_-]+$')
def loads(s, _dict=dict, decoder=None):
"""Parses string as toml
Args:
s: String to be parsed
_dict: (optional) Specifies the class of the returned toml dictionary
Returns:
Parsed toml file represented as a dictionary
Raises:
TypeError: When a non-string is passed
TomlDecodeError: Error while decoding toml
"""
implicitgroups = []
if decoder is None:
decoder = TomlDecoder(_dict)
retval = decoder.get_empty_table()
currentlevel = retval
if not isinstance(s, basestring):
raise TypeError("Expecting something like a string")
if not isinstance(s, unicode):
s = s.decode('utf8')
original = s
sl = list(s)
openarr = 0
openstring = False
openstrchar = ""
multilinestr = False
arrayoftables = False
beginline = True
keygroup = False
dottedkey = False
keyname = 0
key = ''
prev_key = ''
line_no = 1
for i, item in enumerate(sl):
if item == '\r' and sl[i + 1] == '\n':
sl[i] = ' '
continue
if keyname:
key += item
if item == '\n':
raise TomlDecodeError("Key name found without value."
" Reached end of line.", original, i)
if openstring:
if item == openstrchar:
oddbackslash = False
k = 1
while i >= k and sl[i - k] == '\\':
oddbackslash = not oddbackslash
k += 1
if not oddbackslash:
keyname = 2
openstring = False
openstrchar = ""
continue
elif keyname == 1:
if item.isspace():
keyname = 2
continue
elif item == '.':
dottedkey = True
continue
elif item.isalnum() or item == '_' or item == '-':
continue
elif (dottedkey and sl[i - 1] == '.' and
(item == '"' or item == "'")):
openstring = True
openstrchar = item
continue
elif keyname == 2:
if item.isspace():
if dottedkey:
nextitem = sl[i + 1]
if not nextitem.isspace() and nextitem != '.':
keyname = 1
continue
if item == '.':
dottedkey = True
nextitem = sl[i + 1]
if not nextitem.isspace() and nextitem != '.':
keyname = 1
continue
if item == '=':
keyname = 0
prev_key = key[:-1].rstrip()
key = ''
dottedkey = False
else:
raise TomlDecodeError("Found invalid character in key name: '" +
item + "'. Try quoting the key name.",
original, i)
if item == "'" and openstrchar != '"':
k = 1
try:
while sl[i - k] == "'":
k += 1
if k == 3:
break
except IndexError:
pass
if k == 3:
multilinestr = not multilinestr
openstring = multilinestr
else:
openstring = not openstring
if openstring:
openstrchar = "'"
else:
openstrchar = ""
if item == '"' and openstrchar != "'":
oddbackslash = False
k = 1
tripquote = False
try:
while sl[i - k] == '"':
k += 1
if k == 3:
tripquote = True
break
if k == 1 or (k == 3 and tripquote):
while sl[i - k] == '\\':
oddbackslash = not oddbackslash
k += 1
except IndexError:
pass
if not oddbackslash:
if tripquote:
multilinestr = not multilinestr
openstring = multilinestr
else:
openstring = not openstring
if openstring:
openstrchar = '"'
else:
openstrchar = ""
if item == '#' and (not openstring and not keygroup and
not arrayoftables):
j = i
comment = ""
try:
while sl[j] != '\n':
comment += s[j]
sl[j] = ' '
j += 1
except IndexError:
break
if not openarr:
decoder.preserve_comment(line_no, prev_key, comment, beginline)
if item == '[' and (not openstring and not keygroup and
not arrayoftables):
if beginline:
if len(sl) > i + 1 and sl[i + 1] == '[':
arrayoftables = True
else:
keygroup = True
else:
openarr += 1
if item == ']' and not openstring:
if keygroup:
keygroup = False
elif arrayoftables:
if sl[i - 1] == ']':
arrayoftables = False
else:
openarr -= 1
if item == '\n':
if openstring or multilinestr:
if not multilinestr:
raise TomlDecodeError("Unbalanced quotes", original, i)
if ((sl[i - 1] == "'" or sl[i - 1] == '"') and (
sl[i - 2] == sl[i - 1])):
sl[i] = sl[i - 1]
if sl[i - 3] == sl[i - 1]:
sl[i - 3] = ' '
elif openarr:
sl[i] = ' '
else:
beginline = True
line_no += 1
elif beginline and sl[i] != ' ' and sl[i] != '\t':
beginline = False
if not keygroup and not arrayoftables:
if sl[i] == '=':
raise TomlDecodeError("Found empty keyname. ", original, i)
keyname = 1
key += item
if keyname:
raise TomlDecodeError("Key name found without value."
" Reached end of file.", original, len(s))
if openstring: # reached EOF and have an unterminated string
raise TomlDecodeError("Unterminated string found."
" Reached end of file.", original, len(s))
s = ''.join(sl)
s = s.split('\n')
multikey = None
multilinestr = ""
multibackslash = False
pos = 0
for idx, line in enumerate(s):
if idx > 0:
pos += len(s[idx - 1]) + 1
decoder.embed_comments(idx, currentlevel)
if not multilinestr or multibackslash or '\n' not in multilinestr:
line = line.strip()
if line == "" and (not multikey or multibackslash):
continue
if multikey:
if multibackslash:
multilinestr += line
else:
multilinestr += line
multibackslash = False
closed = False
if multilinestr[0] == '[':
closed = line[-1] == ']'
elif len(line) > 2:
closed = (line[-1] == multilinestr[0] and
line[-2] == multilinestr[0] and
line[-3] == multilinestr[0])
if closed:
try:
value, vtype = decoder.load_value(multilinestr)
except ValueError as err:
raise TomlDecodeError(str(err), original, pos)
currentlevel[multikey] = value
multikey = None
multilinestr = ""
else:
k = len(multilinestr) - 1
while k > -1 and multilinestr[k] == '\\':
multibackslash = not multibackslash
k -= 1
if multibackslash:
multilinestr = multilinestr[:-1]
else:
multilinestr += "\n"
continue
if line[0] == '[':
arrayoftables = False
if len(line) == 1:
raise TomlDecodeError("Opening key group bracket on line by "
"itself.", original, pos)
if line[1] == '[':
arrayoftables = True
line = line[2:]
splitstr = ']]'
else:
line = line[1:]
splitstr = ']'
i = 1
quotesplits = decoder._get_split_on_quotes(line)
quoted = False
for quotesplit in quotesplits:
if not quoted and splitstr in quotesplit:
break
i += quotesplit.count(splitstr)
quoted = not quoted
line = line.split(splitstr, i)
if len(line) < i + 1 or line[-1].strip() != "":
raise TomlDecodeError("Key group not on a line by itself.",
original, pos)
groups = splitstr.join(line[:-1]).split('.')
i = 0
while i < len(groups):
groups[i] = groups[i].strip()
if len(groups[i]) > 0 and (groups[i][0] == '"' or
groups[i][0] == "'"):
groupstr = groups[i]
j = i + 1
while ((not groupstr[0] == groupstr[-1]) or
len(groupstr) == 1):
j += 1
if j > len(groups) + 2:
raise TomlDecodeError("Invalid group name '" +
groupstr + "' Something " +
"went wrong.", original, pos)
groupstr = '.'.join(groups[i:j]).strip()
groups[i] = groupstr[1:-1]
groups[i + 1:j] = []
else:
if not _groupname_re.match(groups[i]):
raise TomlDecodeError("Invalid group name '" +
groups[i] + "'. Try quoting it.",
original, pos)
i += 1
currentlevel = retval
for i in _range(len(groups)):
group = groups[i]
if group == "":
raise TomlDecodeError("Can't have a keygroup with an empty "
"name", original, pos)
try:
currentlevel[group]
if i == len(groups) - 1:
if group in implicitgroups:
implicitgroups.remove(group)
if arrayoftables:
raise TomlDecodeError("An implicitly defined "
"table can't be an array",
original, pos)
elif arrayoftables:
currentlevel[group].append(decoder.get_empty_table()
)
else:
raise TomlDecodeError("What? " + group +
" already exists?" +
str(currentlevel),
original, pos)
except TypeError:
currentlevel = currentlevel[-1]
if group not in currentlevel:
currentlevel[group] = decoder.get_empty_table()
if i == len(groups) - 1 and arrayoftables:
currentlevel[group] = [decoder.get_empty_table()]
except KeyError:
if i != len(groups) - 1:
implicitgroups.append(group)
currentlevel[group] = decoder.get_empty_table()
if i == len(groups) - 1 and arrayoftables:
currentlevel[group] = [decoder.get_empty_table()]
currentlevel = currentlevel[group]
if arrayoftables:
try:
currentlevel = currentlevel[-1]
except KeyError:
pass
elif line[0] == "{":
if line[-1] != "}":
raise TomlDecodeError("Line breaks are not allowed in inline"
"objects", original, pos)
try:
decoder.load_inline_object(line, currentlevel, multikey,
multibackslash)
except ValueError as err:
raise TomlDecodeError(str(err), original, pos)
elif "=" in line:
try:
ret = decoder.load_line(line, currentlevel, multikey,
multibackslash)
except ValueError as err:
raise TomlDecodeError(str(err), original, pos)
if ret is not None:
multikey, multilinestr, multibackslash = ret
return retval
def _load_date(val):
microsecond = 0
tz = None
try:
if len(val) > 19:
if val[19] == '.':
if val[-1].upper() == 'Z':
subsecondval = val[20:-1]
tzval = "Z"
else:
subsecondvalandtz = val[20:]
if '+' in subsecondvalandtz:
splitpoint = subsecondvalandtz.index('+')
subsecondval = subsecondvalandtz[:splitpoint]
tzval = subsecondvalandtz[splitpoint:]
elif '-' in subsecondvalandtz:
splitpoint = subsecondvalandtz.index('-')
subsecondval = subsecondvalandtz[:splitpoint]
tzval = subsecondvalandtz[splitpoint:]
else:
tzval = None
subsecondval = subsecondvalandtz
if tzval is not None:
tz = TomlTz(tzval)
microsecond = int(int(subsecondval) *
(10 ** (6 - len(subsecondval))))
else:
tz = TomlTz(val[19:])
except ValueError:
tz = None
if "-" not in val[1:]:
return None
try:
if len(val) == 10:
d = datetime.date(
int(val[:4]), int(val[5:7]),
int(val[8:10]))
else:
d = datetime.datetime(
int(val[:4]), int(val[5:7]),
int(val[8:10]), int(val[11:13]),
int(val[14:16]), int(val[17:19]), microsecond, tz)
except ValueError:
return None
return d
def _load_unicode_escapes(v, hexbytes, prefix):
skip = False
i = len(v) - 1
while i > -1 and v[i] == '\\':
skip = not skip
i -= 1
for hx in hexbytes:
if skip:
skip = False
i = len(hx) - 1
while i > -1 and hx[i] == '\\':
skip = not skip
i -= 1
v += prefix
v += hx
continue
hxb = ""
i = 0
hxblen = 4
if prefix == "\\U":
hxblen = 8
hxb = ''.join(hx[i:i + hxblen]).lower()
if hxb.strip('0123456789abcdef'):
raise ValueError("Invalid escape sequence: " + hxb)
if hxb[0] == "d" and hxb[1].strip('01234567'):
raise ValueError("Invalid escape sequence: " + hxb +
". Only scalar unicode points are allowed.")
v += unichr(int(hxb, 16))
v += unicode(hx[len(hxb):])
return v
# Unescape TOML string values.
# content after the \
_escapes = ['0', 'b', 'f', 'n', 'r', 't', '"']
# What it should be replaced by
_escapedchars = ['\0', '\b', '\f', '\n', '\r', '\t', '\"']
# Used for substitution
_escape_to_escapedchars = dict(zip(_escapes, _escapedchars))
def _unescape(v):
"""Unescape characters in a TOML string."""
i = 0
backslash = False
while i < len(v):
if backslash:
backslash = False
if v[i] in _escapes:
v = v[:i - 1] + _escape_to_escapedchars[v[i]] + v[i + 1:]
elif v[i] == '\\':
v = v[:i - 1] + v[i:]
elif v[i] == 'u' or v[i] == 'U':
i += 1
else:
raise ValueError("Reserved escape sequence used")
continue
elif v[i] == '\\':
backslash = True
i += 1
return v
class InlineTableDict(object):
"""Sentinel subclass of dict for inline tables."""
class TomlDecoder(object):
def __init__(self, _dict=dict):
self._dict = _dict
def get_empty_table(self):
return self._dict()
def get_empty_inline_table(self):
class DynamicInlineTableDict(self._dict, InlineTableDict):
"""Concrete sentinel subclass for inline tables.
It is a subclass of _dict which is passed in dynamically at load
time
It is also a subclass of InlineTableDict
"""
return DynamicInlineTableDict()
def load_inline_object(self, line, currentlevel, multikey=False,
multibackslash=False):
candidate_groups = line[1:-1].split(",")
groups = []
if len(candidate_groups) == 1 and not candidate_groups[0].strip():
candidate_groups.pop()
while len(candidate_groups) > 0:
candidate_group = candidate_groups.pop(0)
try:
_, value = candidate_group.split('=', 1)
except ValueError:
raise ValueError("Invalid inline table encountered")
value = value.strip()
if ((value[0] == value[-1] and value[0] in ('"', "'")) or (
value[0] in '-0123456789' or
value in ('true', 'false') or
(value[0] == "[" and value[-1] == "]") or
(value[0] == '{' and value[-1] == '}'))):
groups.append(candidate_group)
elif len(candidate_groups) > 0:
candidate_groups[0] = (candidate_group + "," +
candidate_groups[0])
else:
raise ValueError("Invalid inline table value encountered")
for group in groups:
status = self.load_line(group, currentlevel, multikey,
multibackslash)
if status is not None:
break
def _get_split_on_quotes(self, line):
doublequotesplits = line.split('"')
quoted = False
quotesplits = []
if len(doublequotesplits) > 1 and "'" in doublequotesplits[0]:
singlequotesplits = doublequotesplits[0].split("'")
doublequotesplits = doublequotesplits[1:]
while len(singlequotesplits) % 2 == 0 and len(doublequotesplits):
singlequotesplits[-1] += '"' + doublequotesplits[0]
doublequotesplits = doublequotesplits[1:]
if "'" in singlequotesplits[-1]:
singlequotesplits = (singlequotesplits[:-1] +
singlequotesplits[-1].split("'"))
quotesplits += singlequotesplits
for doublequotesplit in doublequotesplits:
if quoted:
quotesplits.append(doublequotesplit)
else:
quotesplits += doublequotesplit.split("'")
quoted = not quoted
return quotesplits
def load_line(self, line, currentlevel, multikey, multibackslash):
i = 1
quotesplits = self._get_split_on_quotes(line)
quoted = False
for quotesplit in quotesplits:
if not quoted and '=' in quotesplit:
break
i += quotesplit.count('=')
quoted = not quoted
pair = line.split('=', i)
strictly_valid = _strictly_valid_num(pair[-1])
if _number_with_underscores.match(pair[-1]):
pair[-1] = pair[-1].replace('_', '')
while len(pair[-1]) and (pair[-1][0] != ' ' and pair[-1][0] != '\t' and
pair[-1][0] != "'" and pair[-1][0] != '"' and
pair[-1][0] != '[' and pair[-1][0] != '{' and
pair[-1].strip() != 'true' and
pair[-1].strip() != 'false'):
try:
float(pair[-1])
break
except ValueError:
pass
if _load_date(pair[-1]) is not None:
break
if TIME_RE.match(pair[-1]):
break
i += 1
prev_val = pair[-1]
pair = line.split('=', i)
if prev_val == pair[-1]:
raise ValueError("Invalid date or number")
if strictly_valid:
strictly_valid = _strictly_valid_num(pair[-1])
pair = ['='.join(pair[:-1]).strip(), pair[-1].strip()]
if '.' in pair[0]:
if '"' in pair[0] or "'" in pair[0]:
quotesplits = self._get_split_on_quotes(pair[0])
quoted = False
levels = []
for quotesplit in quotesplits:
if quoted:
levels.append(quotesplit)
else:
levels += [level.strip() for level in
quotesplit.split('.')]
quoted = not quoted
else:
levels = pair[0].split('.')
while levels[-1] == "":
levels = levels[:-1]
for level in levels[:-1]:
if level == "":
continue
if level not in currentlevel:
currentlevel[level] = self.get_empty_table()
currentlevel = currentlevel[level]
pair[0] = levels[-1].strip()
elif (pair[0][0] == '"' or pair[0][0] == "'") and \
(pair[0][-1] == pair[0][0]):
pair[0] = _unescape(pair[0][1:-1])
k, koffset = self._load_line_multiline_str(pair[1])
if k > -1:
while k > -1 and pair[1][k + koffset] == '\\':
multibackslash = not multibackslash
k -= 1
if multibackslash:
multilinestr = pair[1][:-1]
else:
multilinestr = pair[1] + "\n"
multikey = pair[0]
else:
value, vtype = self.load_value(pair[1], strictly_valid)
try:
currentlevel[pair[0]]
raise ValueError("Duplicate keys!")
except TypeError:
raise ValueError("Duplicate keys!")
except KeyError:
if multikey:
return multikey, multilinestr, multibackslash
else:
currentlevel[pair[0]] = value
def _load_line_multiline_str(self, p):
poffset = 0
if len(p) < 3:
return -1, poffset
if p[0] == '[' and (p.strip()[-1] != ']' and
self._load_array_isstrarray(p)):
newp = p[1:].strip().split(',')
while len(newp) > 1 and newp[-1][0] != '"' and newp[-1][0] != "'":
newp = newp[:-2] + [newp[-2] + ',' + newp[-1]]
newp = newp[-1]
poffset = len(p) - len(newp)
p = newp
if p[0] != '"' and p[0] != "'":
return -1, poffset
if p[1] != p[0] or p[2] != p[0]:
return -1, poffset
if len(p) > 5 and p[-1] == p[0] and p[-2] == p[0] and p[-3] == p[0]:
return -1, poffset
return len(p) - 1, poffset
def load_value(self, v, strictly_valid=True):
if not v:
raise ValueError("Empty value is invalid")
if v == 'true':
return (True, "bool")
elif v.lower() == 'true':
raise ValueError("Only all lowercase booleans allowed")
elif v == 'false':
return (False, "bool")
elif v.lower() == 'false':
raise ValueError("Only all lowercase booleans allowed")
elif v[0] == '"' or v[0] == "'":
quotechar = v[0]
testv = v[1:].split(quotechar)
triplequote = False
triplequotecount = 0
if len(testv) > 1 and testv[0] == '' and testv[1] == '':
testv = testv[2:]
triplequote = True
closed = False
for tv in testv:
if tv == '':
if triplequote:
triplequotecount += 1
else:
closed = True
else:
oddbackslash = False
try:
i = -1
j = tv[i]
while j == '\\':
oddbackslash = not oddbackslash
i -= 1
j = tv[i]
except IndexError:
pass
if not oddbackslash:
if closed:
raise ValueError("Found tokens after a closed " +
"string. Invalid TOML.")
else:
if not triplequote or triplequotecount > 1:
closed = True
else:
triplequotecount = 0
if quotechar == '"':
escapeseqs = v.split('\\')[1:]
backslash = False
for i in escapeseqs:
if i == '':
backslash = not backslash
else:
if i[0] not in _escapes and (i[0] != 'u' and
i[0] != 'U' and
not backslash):
raise ValueError("Reserved escape sequence used")
if backslash:
backslash = False
for prefix in ["\\u", "\\U"]:
if prefix in v:
hexbytes = v.split(prefix)
v = _load_unicode_escapes(hexbytes[0], hexbytes[1:],
prefix)
v = _unescape(v)
if len(v) > 1 and v[1] == quotechar and (len(v) < 3 or
v[1] == v[2]):
v = v[2:-2]
return (v[1:-1], "str")
elif v[0] == '[':
return (self.load_array(v), "array")
elif v[0] == '{':
inline_object = self.get_empty_inline_table()
self.load_inline_object(v, inline_object)
return (inline_object, "inline_object")
elif TIME_RE.match(v):
h, m, s, _, ms = TIME_RE.match(v).groups()
time = datetime.time(int(h), int(m), int(s), int(ms) if ms else 0)
return (time, "time")
else:
parsed_date = _load_date(v)
if parsed_date is not None:
return (parsed_date, "date")
if not strictly_valid:
raise ValueError("Weirdness with leading zeroes or "
"underscores in your number.")
itype = "int"
neg = False
if v[0] == '-':
neg = True
v = v[1:]
elif v[0] == '+':
v = v[1:]
v = v.replace('_', '')
lowerv = v.lower()
if '.' in v or ('x' not in v and ('e' in v or 'E' in v)):
if '.' in v and v.split('.', 1)[1] == '':
raise ValueError("This float is missing digits after "
"the point")
if v[0] not in '0123456789':
raise ValueError("This float doesn't have a leading "
"digit")
v = float(v)
itype = "float"
elif len(lowerv) == 3 and (lowerv == 'inf' or lowerv == 'nan'):
v = float(v)
itype = "float"
if itype == "int":
v = int(v, 0)
if neg:
return (0 - v, itype)
return (v, itype)
def bounded_string(self, s):
if len(s) == 0:
return True
if s[-1] != s[0]:
return False
i = -2
backslash = False
while len(s) + i > 0:
if s[i] == "\\":
backslash = not backslash
i -= 1
else:
break
return not backslash
def _load_array_isstrarray(self, a):
a = a[1:-1].strip()
if a != '' and (a[0] == '"' or a[0] == "'"):
return True
return False
def load_array(self, a):
atype = None
retval = []
a = a.strip()
if '[' not in a[1:-1] or "" != a[1:-1].split('[')[0].strip():
strarray = self._load_array_isstrarray(a)
if not a[1:-1].strip().startswith('{'):
a = a[1:-1].split(',')
else:
# a is an inline object, we must find the matching parenthesis
# to define groups
new_a = []
start_group_index = 1
end_group_index = 2
open_bracket_count = 1 if a[start_group_index] == '{' else 0
in_str = False
while end_group_index < len(a[1:]):
if a[end_group_index] == '"' or a[end_group_index] == "'":
if in_str:
backslash_index = end_group_index - 1
while (backslash_index > -1 and
a[backslash_index] == '\\'):
in_str = not in_str
backslash_index -= 1
in_str = not in_str
if not in_str and a[end_group_index] == '{':
open_bracket_count += 1
if in_str or a[end_group_index] != '}':
end_group_index += 1
continue
elif a[end_group_index] == '}' and open_bracket_count > 1:
open_bracket_count -= 1
end_group_index += 1
continue
# Increase end_group_index by 1 to get the closing bracket
end_group_index += 1
new_a.append(a[start_group_index:end_group_index])
# The next start index is at least after the closing
# bracket, a closing bracket can be followed by a comma
# since we are in an array.
start_group_index = end_group_index + 1
while (start_group_index < len(a[1:]) and
a[start_group_index] != '{'):
start_group_index += 1
end_group_index = start_group_index + 1
a = new_a
b = 0
if strarray:
while b < len(a) - 1:
ab = a[b].strip()
while (not self.bounded_string(ab) or
(len(ab) > 2 and
ab[0] == ab[1] == ab[2] and
ab[-2] != ab[0] and
ab[-3] != ab[0])):
a[b] = a[b] + ',' + a[b + 1]
ab = a[b].strip()
if b < len(a) - 2:
a = a[:b + 1] + a[b + 2:]
else:
a = a[:b + 1]
b += 1
else:
al = list(a[1:-1])
a = []
openarr = 0
j = 0
for i in _range(len(al)):
if al[i] == '[':
openarr += 1
elif al[i] == ']':
openarr -= 1
elif al[i] == ',' and not openarr:
a.append(''.join(al[j:i]))
j = i + 1
a.append(''.join(al[j:]))
for i in _range(len(a)):
a[i] = a[i].strip()
if a[i] != '':
nval, ntype = self.load_value(a[i])
if atype:
if ntype != atype:
raise ValueError("Not a homogeneous array")
else:
atype = ntype
retval.append(nval)
return retval
def preserve_comment(self, line_no, key, comment, beginline):
pass
def embed_comments(self, idx, currentlevel):
pass
class TomlPreserveCommentDecoder(TomlDecoder):
def __init__(self, _dict=dict):
self.saved_comments = {}
super(TomlPreserveCommentDecoder, self).__init__(_dict)
def preserve_comment(self, line_no, key, comment, beginline):
self.saved_comments[line_no] = (key, comment, beginline)
def embed_comments(self, idx, currentlevel):
if idx not in self.saved_comments:
return
key, comment, beginline = self.saved_comments[idx]
currentlevel[key] = CommentValue(currentlevel[key], comment, beginline,
self._dict)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@toml@py2@toml@decoder.py@.PATH_END.py
|
{
"filename": "makesetup.py",
"repo_name": "bill-cotton/Obit",
"repo_path": "Obit_extracted/Obit-master/ObitSystem/ObitSD/python/makesetup.py",
"type": "Python"
}
|
# Python/Obit build utillity
# $Id$
#-----------------------------------------------------------------------
# Copyright (C) 2005
# Associated Universities, Inc. Washington DC, USA.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free
# Software Foundation, Inc., 675 Massachusetts Ave, Cambridge,
# MA 02139, USA.
#
# Correspondence concerning this software should be addressed as follows:
# Internet email: bcotton@nrao.edu.
# Postal address: William Cotton
# National Radio Astronomy Observatory
# 520 Edgemont Road
# Charlottesville, VA 22903-2475 USA
#-----------------------------------------------------------------------
# Create setup.py file from setupdata.py file with
# compiler/linker instructions (see Makefile)
# to build and install interface module, e.g.:
# CFLAGS='-g -O2 -fPIC -I/usr/include/glib-2.0 ... '
# CPPFLAGS=' -I../include -I/usr/local/include/python2.3 ...'
# LDFLAGS=' -L/home/bcotton/opt/xmlrpc/lib ... '
# LIBS='../lib/LINUX/libObit.a -lcfitsio -lm ... '
#
# Run setup.py as:
# python setup.py build install --install-lib=.
import os
# Read details left by Makefile
import setupdata
# Variables needed
packageName = 'Obit'
packageVer = '1.0'
compileArgs = []
incDirs = []
libDirs = []
runtimeLibDirs = []
libs = []
# Parse input from setupdata
tt = setupdata.CFLAGS
# Cleanup line
tt=tt.replace("\n","_"); tt=tt.replace("\\ ","_");
t = tt.split()
for x in t:
if x[0:2]=='-I':
incDirs.append(x[2:])
tt = setupdata.CPPFLAGS
tt=tt.replace("\n","_"); tt=tt.replace("\\ ","_");
t = tt.split()
for x in t:
if x[0:2]=='-I':
incDirs.append(x[2:])
elif x[0:7]=='-DHELLO':
pass
elif x[0:2]=='-D':
compileArgs.append(x)
# if x[0:16]=='-DPACKAGE_NAME=\"':
# packageName = x[16:len(x)-1]
if x[0:19]=='-DPACKAGE_VERSION=\"':
packageVer = x[19:len(x)-1]
tt = setupdata.LIBS
tt=tt.replace("\n","_"); tt=tt.replace("\\ ","_");
t = tt.split()
# assume package library name
libs = ['ObitSD']
for x in t:
if x[0:2]=='-l':
libs.append(x[2:])
elif x[0:3]=='-Wl': # Ignore linker options
pass
elif x[0:2]=='-L':
libDirs.append(x[2:])
else: # better be a path
libDirs.append(os.path.dirname(x))
libs.append(os.path.basename(x)[3:].split('.')[0])
tt = setupdata.LDFLAGS
tt=tt.replace("\n","_"); tt=tt.replace("\\ ","_");
t = tt.split()
for x in t:
if x[0:2]=='-L':
libDirs.append(x[2:])
elif x[0:11]=='-Wl,-rpath,':
runtimeLibDirs.append( x[11:] )
# Dump it out
outfile = file("setup.py","w")
outfile.write('from distutils.core import setup, Extension'+os.linesep)
outfile.write('setup( name=\"'+packageName+'\", version=\"'+packageVer+'\",'+os.linesep)
outfile.write(' ext_modules=[Extension(\"'+packageName+'\",'+os.linesep)
outfile.write(' [\''+packageName+'_wrap.c\'],'+os.linesep)
outfile.write(' extra_compile_args='+str(compileArgs)+','+os.linesep)
outfile.write(' library_dirs='+str(libDirs)+','+os.linesep)
outfile.write(' libraries='+str(libs)+','+os.linesep)
outfile.write(' runtime_library_dirs='+str(runtimeLibDirs)+')],'+os.linesep)
outfile.write(' include_dirs='+str(incDirs)+os.linesep)
outfile.write(')')
|
bill-cottonREPO_NAMEObitPATH_START.@Obit_extracted@Obit-master@ObitSystem@ObitSD@python@makesetup.py@.PATH_END.py
|
{
"filename": "_len.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/parcats/line/colorbar/_len.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LenValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="len", parent_name="parcats.line.colorbar", **kwargs
):
super(LenValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@parcats@line@colorbar@_len.py@.PATH_END.py
|
{
"filename": "power.py",
"repo_name": "Harry45/DESEMU",
"repo_path": "DESEMU_extracted/DESEMU-main/jax_cosmo/power.py",
"type": "Python"
}
|
# This module computes power spectra
import jax
import jax.numpy as jnp
import jax_cosmo.background as bkgrd
import jax_cosmo.constants as const
import jax_cosmo.transfer as tklib
from jax_cosmo.scipy.integrate import romb
from jax_cosmo.scipy.integrate import simps
from jax_cosmo.scipy.interpolate import interp
from jax_cosmo.utils import load_pkl, a2z
from jax_cosmo.emulator import prediction_pklin_jax, prediction_gf_jax
from jax.experimental import checkify
jax.config.update("jax_enable_x64", True)
# if jax.__version__ == "0.3.25":
# from jax.config import config
# else:
# from jax import config
# config.update("jax_enable_x64", True)
__all__ = ["primordial_matter_power", "linear_matter_power", "nonlinear_matter_power"]
# -------------------------------------------------------------------------------
# Setup used for building the emulator - these should be fixed
# -------------------------------------------------------------------------------
# priors for the cosmological parameters
COSMO_PRIORS = {
"sigma8": {"distribution": "uniform", "loc": 0.6, "scale": 0.4},
"Omega_cdm": {"distribution": "uniform", "loc": 0.07, "scale": 0.43},
"Omega_b": {"distribution": "uniform", "loc": 0.028, "scale": 0.027},
"h": {"distribution": "uniform", "loc": 0.64, "scale": 0.18},
"n_s": {"distribution": "uniform", "loc": 0.87, "scale": 0.2},
}
# number of nodes for redshifts
NZ = 20
# number of nodes for the linear matter power spectrum
NK = 30
# minimum wavenumber
KMIN = 1e-4
# maximum wavenumber
KMAX = 50
# minimum redshift
ZMIN = 0.0
# maximum redshift
ZMAX = 3.0
# paths where the GPs are stored
PATH_QUANT = "quantities"
# PATH_QUANT = "/mnt/zfsusers/phys2286/projects/DESEMU/notebooks/quantities"
# the important quantities (linear matter spectrum and growth factor)
QUANT_PKLIN = [load_pkl(PATH_QUANT, f"pklin_{i}") for i in range(NK)]
QUANT_GF = [load_pkl(PATH_QUANT, f"gf_{i}") for i in range(NZ - 1)]
# the grids used in the training of the emulator
ZGRID = jnp.linspace(ZMIN, ZMAX, NZ)
KGRID = jnp.geomspace(KMIN, KMAX, NK)
# option to use the emulator or not
USE_EMU = True
# -------------------------------------------------------------------------------
def linear_matter_power_emu(cosmo, k: jnp.ndarray, a=1.0) -> jnp.ndarray:
"""
Calculates the linear matter power spectrum using the emulator. The testpoint is a 5D vector
and the order of the parameters is as follows:
testpoint[0] -> sigma8
testpoint[1] -> Omega_cdm
testpoint[2] -> Omega_b
testpoint[3] -> h
testpoint[4] -> n_s
Args:
testpoint (jnp.ndarray): the cosmological parameter
wavenumber (jnp.ndarray): the new wavenumbers
scalefactor (float): the scale factor
Returns:
jnp.ndarray: the linear matter power spectrum for any scale factor/redshift
"""
k = jnp.atleast_1d(k)
a = jnp.atleast_1d(a)
redshift = a2z(a)
testpoint = jnp.array(
[cosmo.sigma8, cosmo.Omega_c, cosmo.Omega_b, cosmo.h, cosmo.n_s]
)
# this is the linear matter power spectrum at redshift 0
pklin_jax_0 = prediction_pklin_jax(testpoint, QUANT_PKLIN)
# this is the growth factor between 0 and 3 (20 values)
gf_jax = prediction_gf_jax(testpoint, QUANT_GF)
# interpolate the power spectrum for the new wavenumbers
pklin_jax_0 = jnp.interp(jnp.log(k), jnp.log(KGRID), jnp.log(pklin_jax_0))
pklin_jax_0 = jnp.exp(pklin_jax_0)
# this is the linear matter power spectrum for the queried scale factor.
pklin_jax_z = pklin_jax_0 * jnp.interp(redshift, ZGRID, gf_jax)
return pklin_jax_z
def primordial_matter_power(cosmo, k):
"""Primordial power spectrum
Pk = k^n
"""
return k**cosmo.n_s
def linear_matter_power(cosmo, k, a=1.0, transfer_fn=tklib.Eisenstein_Hu, **kwargs):
r"""Computes the linear matter power spectrum.
Parameters
----------
k: array_like
Wave number in h Mpc^{-1}
a: array_like, optional
Scale factor (def: 1.0)
transfer_fn: transfer_fn(cosmo, k, **kwargs)
Transfer function
Returns
-------
pk: array_like
Linear matter power spectrum at the specified scale
and scale factor.
"""
k = jnp.atleast_1d(k)
a = jnp.atleast_1d(a)
g = bkgrd.growth_factor(cosmo, a)
t = transfer_fn(cosmo, k, **kwargs)
pknorm = cosmo.sigma8**2 / sigmasqr(cosmo, 8.0, transfer_fn, **kwargs)
pk = primordial_matter_power(cosmo, k) * t**2 * g**2
# Apply normalisation
pk = pk * pknorm
return pk.squeeze()
def sigmasqr(cosmo, R, transfer_fn, kmin=0.0001, kmax=1000.0, ksteps=5, **kwargs):
"""Computes the energy of the fluctuations within a sphere of R h^{-1} Mpc
.. math::
\\sigma^2(R)= \\frac{1}{2 \\pi^2} \\int_0^\\infty \\frac{dk}{k} k^3 P(k,z) W^2(kR)
where
.. math::
W(kR) = \\frac{3j_1(kR)}{kR}
"""
def int_sigma(logk):
k = jnp.exp(logk)
x = k * R
w = 3.0 * (jnp.sin(x) - x * jnp.cos(x)) / (x * x * x)
pk = transfer_fn(cosmo, k, **kwargs) ** 2 * primordial_matter_power(cosmo, k)
return k * (k * w) ** 2 * pk
y = romb(int_sigma, jnp.log10(kmin), jnp.log10(kmax), divmax=7)
return 1.0 / (2.0 * jnp.pi**2.0) * y
def linear(cosmo, k, a, transfer_fn):
"""Linear matter power spectrum"""
return linear_matter_power(cosmo, k, a, transfer_fn)
def _halofit_parameters(cosmo, a, transfer_fn):
r"""Computes the non linear scale,
effective spectral index,
spectral curvature
"""
# Step 1: Finding the non linear scale for which sigma(R)=1
# That's our search range for the non linear scale
logr = jnp.linspace(jnp.log(1e-4), jnp.log(1e1), 256)
# TODO: implement a better root finding algorithm to compute the non linear scale
@jax.vmap
def R_nl(a):
def int_sigma(logk):
k = jnp.exp(logk)
r = jnp.exp(logr)
y = jnp.outer(k, r)
if USE_EMU:
pk = linear_matter_power_emu(cosmo, k)
else:
pk = linear_matter_power(cosmo, k, transfer_fn=transfer_fn)
g = bkgrd.growth_factor(cosmo, jnp.atleast_1d(a))
return (
jnp.expand_dims(pk * k**3, axis=1)
* jnp.exp(-(y**2))
/ (2.0 * jnp.pi**2)
* g**2
)
sigma = simps(int_sigma, jnp.log(1e-4), jnp.log(1e4), 256)
root = interp(jnp.atleast_1d(1.0), sigma, logr)
return jnp.exp(root).clip(
1e-6
) # To ensure that the root is not too close to zero
# Compute non linear scale
k_nl = 1.0 / R_nl(jnp.atleast_1d(a)).squeeze()
# Step 2: Retrieve the spectral index and spectral curvature
def integrand(logk):
k = jnp.exp(logk)
y = jnp.outer(k, 1.0 / k_nl)
if USE_EMU:
pk = linear_matter_power_emu(cosmo, k)
else:
pk = linear_matter_power(cosmo, k, transfer_fn=transfer_fn)
g = jnp.expand_dims(bkgrd.growth_factor(cosmo, jnp.atleast_1d(a)), 0)
res = (
jnp.expand_dims(pk * k**3, axis=1)
* jnp.exp(-(y**2))
* g**2
/ (2.0 * jnp.pi**2)
)
dneff_dlogk = 2 * res * y**2
dC_dlogk = 4 * res * (y**2 - y**4)
return jnp.stack([dneff_dlogk, dC_dlogk], axis=1)
res = simps(integrand, jnp.log(1e-4), jnp.log(1e4), 256)
n_eff = res[0] - 3.0
C = res[0] ** 2 + res[1]
return k_nl, n_eff, C
def halofit(cosmo, k, a, transfer_fn, prescription="takahashi2012"):
r"""Computes the non linear halofit correction to the matter power spectrum.
Parameters
----------
k: array_like
Wave number in h Mpc^{-1}
a: array_like, optional
Scale factor (def: 1.0)
prescription: str, optional
Either 'smith2003' or 'takahashi2012'
Returns
-------
pk: array_like
Non linear matter power spectrum at the specified scale
and scale factor.
Notes
-----
The non linear corrections are implemented following :cite:`2003:smith`
"""
a = jnp.atleast_1d(a)
# Compute the linear power spectrum
if USE_EMU:
print("Using the emulator")
pklin = linear_matter_power_emu(cosmo, k, a)
else:
print("Not using the emulator")
pklin = linear_matter_power(cosmo, k, a, transfer_fn)
# Compute non linear scale, effective spectral index and curvature
k_nl, n, C = _halofit_parameters(cosmo, a, transfer_fn)
om_m = bkgrd.Omega_m_a(cosmo, a)
om_de = bkgrd.Omega_de_a(cosmo, a)
w = bkgrd.w(cosmo, a)
frac = om_de / (1.0 - om_m)
if prescription == "smith2003":
# eq C9 to C18
a_n = 10 ** (
1.4861
+ 1.8369 * n
+ 1.6762 * n**2
+ 0.7940 * n**3
+ 0.1670 * n**4
- 0.6206 * C
)
b_n = 10 ** (0.9463 + 0.9466 * n + 0.3084 * n**2 - 0.9400 * C)
c_n = 10 ** (-0.2807 + 0.6669 * n + 0.3214 * n**2 - 0.0793 * C)
gamma_n = 0.8649 + 0.2989 * n + 0.1631 * C
alpha_n = 1.3884 + 0.3700 * n - 0.1452 * n**2
beta_n = 0.8291 + 0.9854 * n + 0.3401 * n**2
mu_n = 10 ** (-3.5442 + 0.1908 * n)
nu_n = 10 ** (0.9585 + 1.2857 * n)
elif prescription == "takahashi2012":
a_n = 10 ** (
1.5222
+ 2.8553 * n
+ 2.3706 * n**2
+ 0.9903 * n**3
+ 0.2250 * n**4
- 0.6038 * C
+ 0.1749 * om_de * (1 + w)
)
b_n = 10 ** (
-0.5642 + 0.5864 * n + 0.5716 * n**2 - 1.5474 * C + 0.2279 * om_de * (1 + w)
)
c_n = 10 ** (0.3698 + 2.0404 * n + 0.8161 * n**2 + 0.5869 * C)
gamma_n = 0.1971 - 0.0843 * n + 0.8460 * C
alpha_n = jnp.abs(6.0835 + 1.3373 * n - 0.1959 * n**2 - 5.5274 * C)
beta_n = (
2.0379
- 0.7354 * n
+ 0.3157 * n**2
+ 1.2490 * n**3
+ 0.3980 * n**4
- 0.1682 * C
)
mu_n = 0.0
nu_n = 10 ** (5.2105 + 3.6902 * n)
else:
raise NotImplementedError
f1a = om_m ** (-0.0732)
f2a = om_m ** (-0.1423)
f3a = om_m**0.0725
f1b = om_m ** (-0.0307)
f2b = om_m ** (-0.0585)
f3b = om_m ** (0.0743)
if prescription == "takahashi2012":
f1 = f1b
f2 = f2b
f3 = f3b
elif prescription == "smith2003":
f1 = frac * f1b + (1 - frac) * f1a
f2 = frac * f2b + (1 - frac) * f2a
f3 = frac * f3b + (1 - frac) * f3a
else:
raise NotImplementedError
f = lambda x: x / 4.0 + x**2 / 8.0
d2l = k**3 * pklin / (2.0 * jnp.pi**2)
y = k / k_nl
# Eq C2
d2q = d2l * ((1.0 + d2l) ** beta_n / (1 + alpha_n * d2l)) * jnp.exp(-f(y))
d2hprime = (
a_n * y ** (3 * f1) / (1.0 + b_n * y**f2 + (c_n * f3 * y) ** (3.0 - gamma_n))
)
d2h = d2hprime / (1.0 + mu_n / y + nu_n / y**2)
# Eq. C1
d2nl = d2q + d2h
pk_nl = 2.0 * jnp.pi**2 / k**3 * d2nl
return pk_nl.squeeze()
def nonlinear_matter_power(
cosmo, k, a=1.0, transfer_fn=tklib.Eisenstein_Hu, nonlinear_fn=halofit
):
"""Computes the non-linear matter power spectrum.
This function is just a wrapper over several nonlinear power spectra.
"""
return nonlinear_fn(cosmo, k, a, transfer_fn=transfer_fn)
|
Harry45REPO_NAMEDESEMUPATH_START.@DESEMU_extracted@DESEMU-main@jax_cosmo@power.py@.PATH_END.py
|
{
"filename": "ppxf_example_integral_field_high_redshift.ipynb",
"repo_name": "micappe/ppxf_examples",
"repo_path": "ppxf_examples_extracted/ppxf_examples-main/ppxf_example_integral_field_high_redshift.ipynb",
"type": "Jupyter Notebook"
}
|
# pPXF: Fitting integral-field data at high redshift

## Description
Usage example for the procedure pPXF originally described in
[Cappellari & Emsellem (2004)](http://adsabs.harvard.edu/abs/2004PASP..116..138C),
substantially upgraded in
[Cappellari (2017)](http://adsabs.harvard.edu/abs/2017MNRAS.466..798C)
and with the inclusion of photometry and linear constraints in
[Cappellari (2023)](https://ui.adsabs.harvard.edu/abs/2023MNRAS.526.3273C).
### MODIFICATION HISTORY
* V1.0.0: Michele Cappellari, Oxford, 7 June 2024
___
```python
from pathlib import Path
from urllib import request
import numpy as np
from astropy.io import fits
from scipy import signal, ndimage
import matplotlib.pyplot as plt
from ppxf.ppxf import ppxf, robust_sigma
import ppxf.ppxf_util as util
import ppxf.sps_util as lib
from vorbin.voronoi_2d_binning import voronoi_2d_binning
from plotbin.plot_velfield import plot_velfield
from plotbin.display_pixels import display_pixels
```
## Function to read the MUSE cube
```python
class read_data_cube:
def __init__(self, filename, lam_range, redshift, sn_min=1):
"""Read data cube, de-redshift, log rebin and compute coordinates of each spaxel."""
self.read_fits_file(filename)
# Only use the specified rest-frame wavelength range
wave = self.wave/(1 + redshift) # de-redshift the spectrum
w = (wave > lam_range[0]) & (wave < lam_range[1])
wave = wave[w]
cube = self.cube[w, ...]
cubevar = self.cubevar[w, ...]
signal = np.nanmedian(cube, 0)
noise = np.sqrt(np.nanmedian(cubevar, 0))
# Create coordinates centred on the brightest spaxel
jm = np.argmax(signal)
row, col = map(np.ravel, np.indices(cube.shape[-2:]))
x = (col - col[jm])*self.pixsize
y = (row - row[jm])*self.pixsize
# Transform cube into 2-dim array of spectra
npix = cube.shape[0]
spectra = cube.reshape(npix, -1) # create array of spectra [npix, nx*ny]
variance = cubevar.reshape(npix, -1) # create array of variance [npix, nx*ny]
c = 299792.458 # speed of light in km/s
velscale = np.min(c*np.diff(np.log(wave))) # Preserve smallest velocity step
lam_range_temp = [np.min(wave), np.max(wave)]
spectra, ln_lam_gal, velscale = util.log_rebin(lam_range_temp, spectra, velscale=velscale)
good = select_galaxy_spaxels(signal/noise, sn_min)
# Coordinates and signal for all spaxels of original cube (for plotting)
self.signal_all = signal.ravel()
self.x_all = x
self.y_all = y
# Coordinates and spectra only for spaxels with enough signal
self.good = good
self.spectra = spectra[:, good]
self.variance = variance[:, good]
self.x = x[good]
self.y = y[good]
self.signal = signal.flat[good]
self.noise = noise.flat[good]
self.col = col + 1 # start counting from 1
self.row = row + 1
self.velscale = velscale
self.ln_lam_gal = ln_lam_gal
self.fwhm_gal = self.fwhm_gal/(1 + redshift)
###############################################################################
def read_fits_file(self, filename):
"""
Read MUSE cube, noise, wavelength, spectral FWHM and pixel size.
It must return the cube and cubevar as (npix, nx, ny) and wave as (npix,)
IMPORTANT: This is not a general function! Its details depend on the
way the data were stored in the FITS file and the available keywords in
the FITS header. One may have to adapt the function to properly read
the FITS file under analysis.
"""
hdu = fits.open(filename)
head = hdu["data"].header
cube = hdu["data"].data
cubevar = hdu["stat"].data # Variance per spectral pixel
# Only use the specified rest-frame wavelength range
wave = head['CRVAL3'] + head['CD3_3']*np.arange(cube.shape[0])
self.cube = cube
self.cubevar = cubevar
self.wave = wave
self.fwhm_gal = 2.62 # Median FWHM = 2.62Å. Range: 2.51--2.88 (ESO instrument manual).
self.pixsize = abs(head["CD2_2"])*3600 # 0.2"
###############################################################################
def select_galaxy_spaxels(sn, sn_min):
"""Return indices of the largest blob of connected spaxels with S/N > sn_min"""
mask = signal.medfilt(sn, 3) > sn_min
labels, nb = ndimage.label(mask) # Get blob indices
j = np.argmax(np.bincount(labels.flat)[1:]) + 1 # Index of largest blob
return np.flatnonzero(labels == j)
```
## Function to iteratively clip the outliers
```python
def clip_outliers(galaxy, bestfit, mask):
"""
Repeat the fit after clipping bins deviants more than 3*sigma in relative
error until the bad bins don't change any more. This function uses eq.(34)
of Cappellari (2023) https://ui.adsabs.harvard.edu/abs/2023MNRAS.526.3273C
"""
while True:
scale = galaxy[mask] @ bestfit[mask]/np.sum(bestfit[mask]**2)
resid = scale*bestfit[mask] - galaxy[mask]
err = robust_sigma(resid, zero=1)
ok_old = mask
mask = np.abs(bestfit - galaxy) < 3*err
if np.array_equal(mask, ok_old):
break
return mask
```
## Function to fit the stellar kinematics
```python
def ppxf_fit_and_clean(templates, galaxy, velscale, start, mask0, lam, lam_temp, plot=True, quiet=False):
"""
This is a simple pPXF wrapper. It perform two pPXF fits: the first one
serves to estimate the scatter in the spectrum and identify the outlier
pixels. The second fit uses the mask obtained from the first fit to exclude
the outliers. The general approach used in this function is described in
Sec.6.5 of Cappellari (2023) https://ui.adsabs.harvard.edu/abs/2023MNRAS.526.3273C
"""
mask = mask0.copy()
pp = ppxf(templates, galaxy, np.ones_like(galaxy), velscale, start,
moments=2, degree=4, mdegree=-1, lam=lam, lam_temp=lam_temp,
mask=mask, quiet=quiet)
if plot:
plt.figure(figsize=(20, 3))
plt.subplot(121)
pp.plot()
plt.title("Initial pPXF fit before outliers removal")
mask = clip_outliers(galaxy, pp.bestfit, mask)
# Add clipped pixels to the original masked emission lines regions and repeat the fit
mask &= mask0
pp = ppxf(templates, galaxy, np.ones_like(galaxy), velscale, start,
moments=2, degree=4, mdegree=-1, lam=lam, lam_temp=lam_temp,
mask=mask, quiet=quiet)
pp.optimal_template = templates.reshape(templates.shape[0], -1) @ pp.weights
resid = (pp.galaxy - pp.bestfit)[pp.goodpixels]
pp.sn = np.nanmedian(pp.galaxy[pp.goodpixels])/robust_sigma(resid)
if plot:
plt.subplot(122)
pp.plot()
return pp
```
## Data input and Voronoi binning
Load the MUSE data cube
```python
lam_range_temp = [3540, 5500]
redshift = 0.43 # Initial redshift estimate
sn_min = 0.5 # This is to remove residuals-dominated spectra before Voronoi binning
```
Read the input data. If the data file does not exists, download it from my GitHub page.
```python
objfile = Path('MACS1206_7876.fits')
if not objfile.is_file():
url = "https://raw.githubusercontent.com/micappe/ppxf_examples/main/" + objfile.name
request.urlretrieve(url, objfile)
s = read_data_cube(objfile, lam_range_temp, redshift)
```
Spatially Voronoi bin the data to a target $S/N=5$ per bin using the
[VorBin](https://pypi.org/project/vorbin/) method and software of
[Cappellari & Copin (2003)](https://ui.adsabs.harvard.edu/abs/2003MNRAS.342..345C/abstract)
```python
plt.figure(figsize=(4, 5))
target_sn = 5
bin_num = voronoi_2d_binning(s.x, s.y, s.signal, s.noise, target_sn, plot=1, quiet=1)[0]
plt.tight_layout()
```

## Setup stellar templates
pPXF can be used with any set of SPS population templates. However, I am
currently providing (with permission) ready-to-use template files for four
SPS. One can just uncomment one of the four models below. The included files
are only a subset of the SPS that can be produced with the models, and one
should use the relevant software/website to produce different sets of SPS
templates if needed.
1. If you use the [fsps v3.2](https://github.com/cconroy20/fsps) SPS model
templates, please also cite in your paper
[Conroy et al. (2009)](https://ui.adsabs.harvard.edu/abs/2009ApJ...699..486C) and
[Conroy et al. (2010)](https://ui.adsabs.harvard.edu/abs/2010ApJ...712..833C).
2. If you use the [GALAXEV v2020](http://www.bruzual.org/bc03/) SPS model
templates, please also cite in your paper
[Bruzual & Charlot (2003)](https://ui.adsabs.harvard.edu/abs/2003MNRAS.344.1000B).
3. If you use the [E-MILES](http://miles.iac.es/) SPS model templates,
please also cite in your paper
[Vazdekis et al. (2016)](https://ui.adsabs.harvard.edu/abs/2016MNRAS.463.3409V).
<font color="red">WARNING: The E-MILES models only include SPS with age > 63 Myr and
are not recommended for highly star forming galaxies.</font>
4. If you use the [X-Shooter Spectral Library (XSL)](http://xsl.u-strasbg.fr/)
SPS model templates, please also cite in your paper
[Verro et al. (2022)](https://ui.adsabs.harvard.edu/abs/2022A%26A...661A..50V).
<font color="red">WARNING: The XSL models only include SPS with age > 50 Myr and
are not recommended for highly star forming galaxies.</font>
```python
# sps_name = 'fsps'
# sps_name = 'galaxev'
sps_name = 'emiles'
# sps_name = 'xsl'
ppxf_dir = Path(lib.__file__).parent
basename = f"spectra_{sps_name}_9.0.npz"
filename = ppxf_dir / 'sps_models' / basename
if not filename.is_file():
url = "https://raw.githubusercontent.com/micappe/ppxf_data/main/" + basename
request.urlretrieve(url, filename)
sps = lib.sps_lib(filename, s.velscale, s.fwhm_gal)
stars_templates = sps.templates
```
## pPXF Fit to global spectrum
```python
start = [0, 200.] # (km/s), starting guess for [V,sigma]
mask0 = s.ln_lam_gal > 0 # Do not use an initial mask in this case. This could be used to mask known emission lines
lam_gal = np.exp(s.ln_lam_gal)
```
If `single_template` is set to `True`, a single template is created using the
average global spectrum of the galaxy. This single template is then employed
to fit the kinematics across all Voronoi bins. On the other hand, if
`single_template` is set to `False`, the entire library of SPS templates is
fitted to each Voronoi bin individually. Opting for a single optimal template
is generally more robust for data with low signal-to-noise ratios ($S/N$),
but it can introduce bias when there are very strong population gradients.
Although, in most scenarios, the results should not differ significantly
between the two approaches.
```python
single_template = True
if single_template:
galaxy = np.nanmean(s.spectra[:, s.signal/s.noise > 5], 1) # Global spectrum
p0 = ppxf_fit_and_clean(stars_templates, galaxy, s.velscale, start, mask0, lam_gal, sps.lam_temp)
optimal_template = p0.optimal_template
txt = f"Global spectrum; $\\sigma$={p0.sol[1]:.0f} km/s; S/N={p0.sn:.1f}"
print(txt + '\n' + '#'*78)
plt.title(txt)
else:
optimal_template = stars_templates
```
Best Fit: Vel sigma
comp. 0: 14 189
chi2/DOF: 1.054; DOF: 2769; degree = 4; mdegree = 0
method = capfit; Jac calls: 2; Func calls: 8; Status: 2
linear_method = lsq_box; Nonzero Templates (>0.1%): 3/150
Best Fit: Vel sigma
comp. 0: 9 186
chi2/DOF: 0.3476; DOF: 2710; degree = 4; mdegree = 0
method = capfit; Jac calls: 2; Func calls: 8; Status: 2
linear_method = lsq_box; Nonzero Templates (>0.1%): 4/150
Global spectrum; $\sigma$=186 km/s; S/N=33.8
##############################################################################

## pPXF fit to all Voronoi bins
Fit the kinematics of all Voronoi bins. Also store the centroid of the
Voronoi bins for plotting.
```python
nbins = np.unique(bin_num).size
velbin, sigbin, xbin, ybin = np.zeros((4, nbins))
for j in range(nbins):
plot = True if j < 3 else False # Only plot the first few spectra
w = bin_num == j
galaxy = np.nanmean(s.spectra[:, w], 1)
pp = ppxf_fit_and_clean(optimal_template, galaxy, s.velscale, start, mask0, lam_gal, sps.lam_temp, plot=plot, quiet=not plot)
velbin[j], sigbin[j] = pp.sol
xbin[j], ybin[j] = np.mean(s.x[w]), np.mean(s.y[w])
if plot:
txt = f"Voronoi bin {j + 1} / {nbins}; SPS: {sps_name}; $\\sigma$={sigbin[j]:.0f} km/s; S/N={pp.sn:.1f}"
print(txt + '\n' + '#'*78)
plt.title(txt)
```
Best Fit: Vel sigma
comp. 0: 30 196
chi2/DOF: 6.319; DOF: 2769; degree = 4; mdegree = 0
method = capfit; Jac calls: 3; Func calls: 11; Status: 2
linear_method = lsq_box; Nonzero Templates (>0.1%): 1/1
Templates weights:
2.21
Best Fit: Vel sigma
comp. 0: 31 197
chi2/DOF: 5.131; DOF: 2742; degree = 4; mdegree = 0
method = capfit; Jac calls: 3; Func calls: 11; Status: 2
linear_method = lsq_box; Nonzero Templates (>0.1%): 1/1
Templates weights:
2.22
Voronoi bin 1 / 37; SPS: emiles; $\sigma$=197 km/s; S/N=18.9
##############################################################################
Best Fit: Vel sigma
comp. 0: 51 151
chi2/DOF: 5.025; DOF: 2769; degree = 4; mdegree = 0
method = capfit; Jac calls: 3; Func calls: 11; Status: 2
linear_method = lsq_box; Nonzero Templates (>0.1%): 1/1
Templates weights:
1.23
Best Fit: Vel sigma
comp. 0: 48 150
chi2/DOF: 4.024; DOF: 2730; degree = 4; mdegree = 0
method = capfit; Jac calls: 3; Func calls: 11; Status: 2
linear_method = lsq_box; Nonzero Templates (>0.1%): 1/1
Templates weights:
1.24
Voronoi bin 2 / 37; SPS: emiles; $\sigma$=150 km/s; S/N=12.9
##############################################################################
Best Fit: Vel sigma
comp. 0: 91 169
chi2/DOF: 4.378; DOF: 2769; degree = 4; mdegree = 0
method = capfit; Jac calls: 3; Func calls: 11; Status: 2
linear_method = lsq_box; Nonzero Templates (>0.1%): 1/1
Templates weights:
1.19
Best Fit: Vel sigma
comp. 0: 93 169
chi2/DOF: 3.751; DOF: 2748; degree = 4; mdegree = 0
method = capfit; Jac calls: 3; Func calls: 11; Status: 2
linear_method = lsq_box; Nonzero Templates (>0.1%): 1/1
Templates weights:
1.18
Voronoi bin 3 / 37; SPS: emiles; $\sigma$=169 km/s; S/N=12.3
##############################################################################



Plot the output kinematics maps
```python
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
plt.subplots_adjust(wspace=0.5)
plt.subplot(121)
velbin -= np.median(velbin)
vmax = np.percentile(abs(velbin), 95)
mag = -2.5*np.log10(s.signal_all/np.max(s.signal_all))
plt.tricontour(s.x_all, s.y_all, mag, levels=np.arange(20), linewidths=1, colors='gray'); # 1 mag contours
plot_velfield(xbin, ybin, velbin, colorbar=1, cmap='sauron', label='V (km/s)', vmin=-vmax, vmax=vmax)
display_pixels(s.x, s.y, np.zeros_like(s.x), cmap='gray')
plt.title(objfile.stem)
plt.subplot(122)
vmin, vmax = np.percentile(sigbin, [5, 95])
plt.tricontour(s.x_all, s.y_all, mag, levels=np.arange(20), linewidths=1, colors='gray'); # 1 mag contours
plot_velfield(xbin, ybin, sigbin, colorbar=1, cmap='inferno', label=r'$\sigma$ (km/s)', vmin=vmin, vmax=vmax)
display_pixels(s.x, s.y, np.zeros_like(s.x), cmap='gray')
plt.title(objfile.stem)
plt.pause(5);
```

|
micappeREPO_NAMEppxf_examplesPATH_START.@ppxf_examples_extracted@ppxf_examples-main@ppxf_example_integral_field_high_redshift.ipynb@.PATH_END.py
|
{
"filename": "_outsidetextfont.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/sunburst/_outsidetextfont.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class OutsidetextfontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="outsidetextfont", parent_name="sunburst", **kwargs):
super(OutsidetextfontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Outsidetextfont"),
data_docs=kwargs.pop(
"data_docs",
"""
color
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for family .
size
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
""",
),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@sunburst@_outsidetextfont.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "mavrix93/LightCurvesClassifier",
"repo_path": "LightCurvesClassifier_extracted/LightCurvesClassifier-master/lcc/stars_processing/deciders/__init__.py",
"type": "Python"
}
|
from .neuron_decider import NeuronDecider
from .supervised_deciders import (QDADec, LDADec, TreeDec, GaussianNBDec, GradBoostDec,
SVCDec, AdaBoostDec, ExtraTreesDec, RandomForestDec)
from .custom_decider import CustomDecider
|
mavrix93REPO_NAMELightCurvesClassifierPATH_START.@LightCurvesClassifier_extracted@LightCurvesClassifier-master@lcc@stars_processing@deciders@__init__.py@.PATH_END.py
|
{
"filename": "style.py",
"repo_name": "desy-multimessenger/nuztf",
"repo_path": "nuztf_extracted/nuztf-main/nuztf/style.py",
"type": "Python"
}
|
#!/usr/bin/env python3
# coding: utf-8
import logging
import os
import subprocess
import matplotlib.pyplot as plt
import seaborn as sns
from ztfquery.io import LOCALSOURCE
logger = logging.getLogger(__name__)
sns.set_style("white")
# Use latex if available
try:
subprocess.check_output(["which", "latex"])
plt.rc("text", usetex=True)
plt.rc("text.latex", preamble=r"\usepackage{romanbar}")
except subprocess.CalledProcessError:
logger.warning(
"No Latex installation found. Proceeding without, but plots may look weird."
)
plt.rcParams["font.family"] = "sans-serif"
plot_dir = os.path.join(LOCALSOURCE, "plots")
if not os.path.exists(plot_dir):
os.makedirs(plot_dir)
dpi = 300
fontsize = 7.0
big_fontsize = 10.0
golden_ratio = 1.618
base_width = 4.0
base_height = base_width / golden_ratio
margin_width = 0.5 * base_width
margin_height = margin_width / golden_ratio
full_width = 1.5 * base_width
full_height_landscape = full_width / golden_ratio
full_height_a4 = 11.75 / 8.25 * full_width
cmap = "rocket"
|
desy-multimessengerREPO_NAMEnuztfPATH_START.@nuztf_extracted@nuztf-main@nuztf@style.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "jrenaud90/TidalPy",
"repo_path": "TidalPy_extracted/TidalPy-main/TidalPy/Extending/burnman/material/custom/__init__.py",
"type": "Python"
}
|
from .ice import (
LowPressureIceConst,
HighPressureIceConst,
UnterbornIce,
Water,
HighPressureIce,
IceX_Fu2010,
IceVII_Fu2010,
IceIh_Fu2010
)
from .pyrite import Pyrite
|
jrenaud90REPO_NAMETidalPyPATH_START.@TidalPy_extracted@TidalPy-main@TidalPy@Extending@burnman@material@custom@__init__.py@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.