metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
|---|---|---|
{
"filename": "__init__.py",
"repo_name": "pandas-dev/pandas",
"repo_path": "pandas_extracted/pandas-main/pandas/tests/indexes/period/__init__.py",
"type": "Python"
}
|
pandas-devREPO_NAMEpandasPATH_START.@pandas_extracted@pandas-main@pandas@tests@indexes@period@__init__.py@.PATH_END.py
|
|
{
"filename": "localdb.py",
"repo_name": "GeminiDRSoftware/DRAGONS",
"repo_path": "DRAGONS_extracted/DRAGONS-master/recipe_system/cal_service/localdb.py",
"type": "Python"
}
|
# Defines the LocalDB class for calibration returns. This is a high-level
# interface to the local calibration manager, and provides an API for
# modifying the database on disk.
from os import path, makedirs
from .caldb import CalDB, CalReturn
from .calrequestlib import get_cal_requests, generate_md5_digest
try:
from . import localmanager
localmanager_available = True
except ImportError as e:
localmanager_available = False
import_error = str(e)
DEFAULT_DB_NAME = "cal_manager.db"
class LocalDB(CalDB):
"""
The class handling a calibration database stored on disk, via the
LocalManager class. In addition to the methods required to interface
with DRAGONS data reduction pipelines, other methods are used to
provide a full API and effect actions of the "caldb" script.
An attempt to create an instance of this class without the LocalManager
being importable will result in an error.
Attributes
----------
dbfile : str
name of the file on disk holding the database
_calmgr : LocalManager instance
the local calibration manager that will handle the requests
"""
def __init__(self, dbfile, name=None, valid_caltypes=None, procmode=None,
get_cal=True, store_cal=True, log=None, force_init=False):
if not localmanager_available:
raise ValueError(f"Cannot initialize local database {name} as"
"localmanager could not be imported.\n"
f"{import_error}")
if name is None: # Do this first so "~" is in the name
name = dbfile
dbfile = path.expanduser(dbfile)
if path.isdir(dbfile) or dbfile.endswith(path.sep):
dbfile = path.join(dbfile, DEFAULT_DB_NAME)
name = path.join(name, DEFAULT_DB_NAME)
super().__init__(name=name, get_cal=get_cal, store_cal=store_cal,
log=log, valid_caltypes=valid_caltypes,
procmode=procmode)
self.dbfile = dbfile
self._calmgr = localmanager.LocalManager(dbfile)
if not path.exists(dbfile) and force_init:
self.log.stdinfo(f"Local database file {dbfile} does not exist. "
"Initializing.")
if not path.exists(path.dirname(dbfile)):
makedirs(path.dirname(dbfile))
self.init()
def _get_calibrations(self, adinputs, caltype=None, procmode=None,
howmany=1):
self.log.debug(f"Querying {self.name} for {caltype}")
cal_requests = get_cal_requests(adinputs, caltype, procmode=procmode,
is_local=True)
cals = []
for rq in cal_requests:
local_cals = self._calmgr.calibration_search(rq, howmany=howmany)
if not local_cals[0]:
cals.append(None)
continue
good_cals = []
for calurl, calmd5 in zip(*local_cals):
calfile = calurl[7:] # strip "file://"
cached_md5 = generate_md5_digest(calfile)
if calmd5 == cached_md5:
self.log.debug(f"{rq.filename}: retrieved {calfile}")
good_cals.append(calfile)
else:
self.log.warning(f"md5 checksum of {calfile} does not match."
" Not returning this calibration")
# Append list if >1 requested, else just the filename string
if good_cals:
cals.append(good_cals if howmany != 1 else good_cals[0])
else:
cals.append(None)
return CalReturn([None if cal is None else (cal, self.name)
for cal in cals])
def _store_calibration(self, cal, caltype=None):
"""Store the calibration. The LocalDB is not interested in science"""
if self.store_cal:
if caltype is None or "science" not in caltype:
if not path.exists(cal):
raise OSError(f"File {cal} does not exist.")
if caltype is not None:
self.log.stdinfo(f"{self.name}: Storing {cal} as {caltype}")
self._calmgr.ingest_file(cal)
else:
self.log.stdinfo(f"{self.name}: NOT storing {cal} as {caltype}")
# The following methods provide an API to modify the database, by
# initializing it, removing a named calibration, and listing the files
# it contains.
def init(self, wipe=False):
"""
Initialize a calibration database. Callers will usually only want to do
this once. But if called again, it will wipe the old database.
Parameters
----------
wipe : <bool>, optional
If the database exists and this parameter is `True`, the file will
be removed and recreated before initializing
Raises
------
IOError
If the file exists and there a system error when trying to
remove it (eg. lack of permissions).
LocalManagerError
If the file exists and `wipe` was `False`
"""
return self._calmgr.init_database(wipe=wipe)
def add_cal(self, calfile):
self._store_calibration(calfile)
def add_directory(self, path, walk=False):
"""
Ingest one or more files from a given directory, optionally searching
all subdirectories. This is not used by primitives in the DRAGONS
data reduction pipelines.
Parameters
----------
path : str
directory containing files to be ingested
walk : bool
add files from all subdirectories?
"""
self._calmgr.ingest_directory(path, walk=walk, log=None)
def remove_cal(self, calfile):
"""
Removes a calibration file from the database. Note that only the filename
is relevant. All duplicate copies in the database will be removed.
Parameters
----------
calfile : <str>
Path to the file. It can be either absolute or relative
"""
return self._calmgr.remove_file(path.basename(calfile))
def list_files(self):
"""
List all files in the local calibration database. This is not used by
primitives in the DRAGONS data reduction pipelines.
Returns
-------
LocalManager.list_files: <generator>.
(See class docstring for example of how to use this generator.)
Raises
------
LocalManagerError
Raised when unable to read database.
"""
return self._calmgr.list_files()
|
GeminiDRSoftwareREPO_NAMEDRAGONSPATH_START.@DRAGONS_extracted@DRAGONS-master@recipe_system@cal_service@localdb.py@.PATH_END.py
|
{
"filename": "test_rename_axis.py",
"repo_name": "pandas-dev/pandas",
"repo_path": "pandas_extracted/pandas-main/pandas/tests/series/methods/test_rename_axis.py",
"type": "Python"
}
|
import pytest
from pandas import (
Index,
MultiIndex,
Series,
)
import pandas._testing as tm
class TestSeriesRenameAxis:
def test_rename_axis_mapper(self):
# GH 19978
mi = MultiIndex.from_product([["a", "b", "c"], [1, 2]], names=["ll", "nn"])
ser = Series(list(range(len(mi))), index=mi)
result = ser.rename_axis(index={"ll": "foo"})
assert result.index.names == ["foo", "nn"]
result = ser.rename_axis(index=str.upper, axis=0)
assert result.index.names == ["LL", "NN"]
result = ser.rename_axis(index=["foo", "goo"])
assert result.index.names == ["foo", "goo"]
with pytest.raises(TypeError, match="unexpected"):
ser.rename_axis(columns="wrong")
def test_rename_axis_inplace(self, datetime_series):
# GH 15704
expected = datetime_series.rename_axis("foo")
result = datetime_series
no_return = result.rename_axis("foo", inplace=True)
assert no_return is None
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("kwargs", [{"mapper": None}, {"index": None}, {}])
def test_rename_axis_none(self, kwargs):
# GH 25034
index = Index(list("abc"), name="foo")
ser = Series([1, 2, 3], index=index)
result = ser.rename_axis(**kwargs)
expected_index = index.rename(None) if kwargs else index
expected = Series([1, 2, 3], index=expected_index)
tm.assert_series_equal(result, expected)
|
pandas-devREPO_NAMEpandasPATH_START.@pandas_extracted@pandas-main@pandas@tests@series@methods@test_rename_axis.py@.PATH_END.py
|
{
"filename": "make_superdark.py",
"repo_name": "jotaylor/acdc-hst",
"repo_path": "acdc-hst_extracted/acdc-hst-main/src/acdc/superdark/make_superdark.py",
"type": "Python"
}
|
import numpy as np
import argparse
from astropy.io import fits
import os
import glob
from acdc.database.query_cos_dark import files_by_mjd
def sum_data(mjdstart, mjdend):
df = files_by_mjd(mjdstart, mjdend)
total_time_a = 0
total_time_b = 0
sumdark_a = np.zeros(16384*1024).reshape(1024, 16384)
sumdark_b = np.zeros(16384*1024).reshape(1024, 16384)
for i in range(len(df)):
fileloc = df.iloc[i]["fileloc"]
segment = df.iloc[i]["segment"]
filename = os.path.basename(fileloc)
pid = os.path.dirname(fileloc).split("/")[-1]
cosmofile = glob.glob(os.path.join("/grp/hst/cos2/cosmo/", pid, filename.replace("corrtag", "counts")+"*"))[0]
print(cosmofile)
exptime = fits.getval(cosmofile, "exptime", 1)
data = fits.getdata(cosmofile)
if segment == "FUVA":
total_time_a += exptime
sumdark_a += data
else:
total_time_b += exptime
sumdark_b += data
superdark_a = sumdark_a / total_time_a
superdark_b = sumdark_b / total_time_b
return superdark_a, superdark_b, total_time_a, total_time_b
def make_superdark(mjdstart, mjdend):
superdark_a, superdark_b, total_time_a, total_time_b = sum_data(mjdstart, mjdend)
hdr = fits.Header()
hdr["MJDSTART"] = mjdstart
hdr["MJDEND"] = mjdend
primary = fits.PrimaryHDU(header=hdr)
hdr1 = fits.Header()
hdr1["EXTNAME"] = "SUPERDARK_FUVA"
hdr1["EXPTIME"] = total_time_a
sci1 = fits.ImageHDU(superdark_a, header=hdr1, name="SUPERDARK_FUVA")
hdr2 = fits.Header()
hdr2["EXTNAME"] = "SUPERDARK_FUVA"
hdr2["EXPTIME"] = total_time_b
sci2 = fits.ImageHDU(superdark_b, header=hdr2, name="SUPERDARK_FUVB")
hdu = fits.HDUList([primary, sci1, sci2])
outname = f"superdark_{mjdstart}_{mjdend}.fits"
hdu.writeto(outname, overwrite=True)
print(f"Wrote file {outname}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(dest="mjdstart",
help="Starting MJD time to select files")
parser.add_argument(dest="mjdend",
help="Ending MJD time to select files")
args = parser.parse_args()
make_superdark(args.mjdstart, args.mjdend)
|
jotaylorREPO_NAMEacdc-hstPATH_START.@acdc-hst_extracted@acdc-hst-main@src@acdc@superdark@make_superdark.py@.PATH_END.py
|
{
"filename": "art3d.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/matplotlib/py3/mpl_toolkits/mplot3d/art3d.py",
"type": "Python"
}
|
# art3d.py, original mplot3d version by John Porter
# Parts rewritten by Reinier Heeres <reinier@heeres.eu>
# Minor additions by Ben Axelrod <baxelrod@coroware.com>
"""
Module containing 3D artist code and functions to convert 2D
artists into 3D versions which can be added to an Axes3D.
"""
import math
import numpy as np
from contextlib import contextmanager
from matplotlib import (
artist, cbook, colors as mcolors, lines, text as mtext,
path as mpath)
from matplotlib.collections import (
Collection, LineCollection, PolyCollection, PatchCollection, PathCollection)
from matplotlib.colors import Normalize
from matplotlib.patches import Patch
from . import proj3d
def _norm_angle(a):
"""Return the given angle normalized to -180 < *a* <= 180 degrees."""
a = (a + 360) % 360
if a > 180:
a = a - 360
return a
def _norm_text_angle(a):
"""Return the given angle normalized to -90 < *a* <= 90 degrees."""
a = (a + 180) % 180
if a > 90:
a = a - 180
return a
def get_dir_vector(zdir):
"""
Return a direction vector.
Parameters
----------
zdir : {'x', 'y', 'z', None, 3-tuple}
The direction. Possible values are:
- 'x': equivalent to (1, 0, 0)
- 'y': equivalent to (0, 1, 0)
- 'z': equivalent to (0, 0, 1)
- *None*: equivalent to (0, 0, 0)
- an iterable (x, y, z) is converted to an array
Returns
-------
x, y, z : array
The direction vector.
"""
if zdir == 'x':
return np.array((1, 0, 0))
elif zdir == 'y':
return np.array((0, 1, 0))
elif zdir == 'z':
return np.array((0, 0, 1))
elif zdir is None:
return np.array((0, 0, 0))
elif np.iterable(zdir) and len(zdir) == 3:
return np.array(zdir)
else:
raise ValueError("'x', 'y', 'z', None or vector of length 3 expected")
class Text3D(mtext.Text):
"""
Text object with 3D position and direction.
Parameters
----------
x, y, z : float
The position of the text.
text : str
The text string to display.
zdir : {'x', 'y', 'z', None, 3-tuple}
The direction of the text. See `.get_dir_vector` for a description of
the values.
Other Parameters
----------------
**kwargs
All other parameters are passed on to `~matplotlib.text.Text`.
"""
def __init__(self, x=0, y=0, z=0, text='', zdir='z', **kwargs):
mtext.Text.__init__(self, x, y, text, **kwargs)
self.set_3d_properties(z, zdir)
def get_position_3d(self):
"""Return the (x, y, z) position of the text."""
return self._x, self._y, self._z
def set_position_3d(self, xyz, zdir=None):
"""
Set the (*x*, *y*, *z*) position of the text.
Parameters
----------
xyz : (float, float, float)
The position in 3D space.
zdir : {'x', 'y', 'z', None, 3-tuple}
The direction of the text. If unspecified, the *zdir* will not be
changed. See `.get_dir_vector` for a description of the values.
"""
super().set_position(xyz[:2])
self.set_z(xyz[2])
if zdir is not None:
self._dir_vec = get_dir_vector(zdir)
def set_z(self, z):
"""
Set the *z* position of the text.
Parameters
----------
z : float
"""
self._z = z
self.stale = True
def set_3d_properties(self, z=0, zdir='z'):
"""
Set the *z* position and direction of the text.
Parameters
----------
z : float
The z-position in 3D space.
zdir : {'x', 'y', 'z', 3-tuple}
The direction of the text. Default: 'z'.
See `.get_dir_vector` for a description of the values.
"""
self._z = z
self._dir_vec = get_dir_vector(zdir)
self.stale = True
@artist.allow_rasterization
def draw(self, renderer):
position3d = np.array((self._x, self._y, self._z))
proj = proj3d._proj_trans_points(
[position3d, position3d + self._dir_vec], self.axes.M)
dx = proj[0][1] - proj[0][0]
dy = proj[1][1] - proj[1][0]
angle = math.degrees(math.atan2(dy, dx))
with cbook._setattr_cm(self, _x=proj[0][0], _y=proj[1][0],
_rotation=_norm_text_angle(angle)):
mtext.Text.draw(self, renderer)
self.stale = False
def get_tightbbox(self, renderer=None):
# Overwriting the 2d Text behavior which is not valid for 3d.
# For now, just return None to exclude from layout calculation.
return None
def text_2d_to_3d(obj, z=0, zdir='z'):
"""
Convert a `.Text` to a `.Text3D` object.
Parameters
----------
z : float
The z-position in 3D space.
zdir : {'x', 'y', 'z', 3-tuple}
The direction of the text. Default: 'z'.
See `.get_dir_vector` for a description of the values.
"""
obj.__class__ = Text3D
obj.set_3d_properties(z, zdir)
class Line3D(lines.Line2D):
"""
3D line object.
.. note:: Use `get_data_3d` to obtain the data associated with the line.
`~.Line2D.get_data`, `~.Line2D.get_xdata`, and `~.Line2D.get_ydata` return
the x- and y-coordinates of the projected 2D-line, not the x- and y-data of
the 3D-line. Similarly, use `set_data_3d` to set the data, not
`~.Line2D.set_data`, `~.Line2D.set_xdata`, and `~.Line2D.set_ydata`.
"""
def __init__(self, xs, ys, zs, *args, **kwargs):
"""
Parameters
----------
xs : array-like
The x-data to be plotted.
ys : array-like
The y-data to be plotted.
zs : array-like
The z-data to be plotted.
*args, **kwargs
Additional arguments are passed to `~matplotlib.lines.Line2D`.
"""
super().__init__([], [], *args, **kwargs)
self.set_data_3d(xs, ys, zs)
def set_3d_properties(self, zs=0, zdir='z'):
"""
Set the *z* position and direction of the line.
Parameters
----------
zs : float or array of floats
The location along the *zdir* axis in 3D space to position the
line.
zdir : {'x', 'y', 'z'}
Plane to plot line orthogonal to. Default: 'z'.
See `.get_dir_vector` for a description of the values.
"""
xs = self.get_xdata()
ys = self.get_ydata()
zs = cbook._to_unmasked_float_array(zs).ravel()
zs = np.broadcast_to(zs, len(xs))
self._verts3d = juggle_axes(xs, ys, zs, zdir)
self.stale = True
def set_data_3d(self, *args):
"""
Set the x, y and z data
Parameters
----------
x : array-like
The x-data to be plotted.
y : array-like
The y-data to be plotted.
z : array-like
The z-data to be plotted.
Notes
-----
Accepts x, y, z arguments or a single array-like (x, y, z)
"""
if len(args) == 1:
args = args[0]
for name, xyz in zip('xyz', args):
if not np.iterable(xyz):
raise RuntimeError(f'{name} must be a sequence')
self._verts3d = args
self.stale = True
def get_data_3d(self):
"""
Get the current data
Returns
-------
verts3d : length-3 tuple or array-like
The current data as a tuple or array-like.
"""
return self._verts3d
@artist.allow_rasterization
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, self.axes.M)
self.set_data(xs, ys)
super().draw(renderer)
self.stale = False
def line_2d_to_3d(line, zs=0, zdir='z'):
"""
Convert a `.Line2D` to a `.Line3D` object.
Parameters
----------
zs : float
The location along the *zdir* axis in 3D space to position the line.
zdir : {'x', 'y', 'z'}
Plane to plot line orthogonal to. Default: 'z'.
See `.get_dir_vector` for a description of the values.
"""
line.__class__ = Line3D
line.set_3d_properties(zs, zdir)
def _path_to_3d_segment(path, zs=0, zdir='z'):
"""Convert a path to a 3D segment."""
zs = np.broadcast_to(zs, len(path))
pathsegs = path.iter_segments(simplify=False, curves=False)
seg = [(x, y, z) for (((x, y), code), z) in zip(pathsegs, zs)]
seg3d = [juggle_axes(x, y, z, zdir) for (x, y, z) in seg]
return seg3d
def _paths_to_3d_segments(paths, zs=0, zdir='z'):
"""Convert paths from a collection object to 3D segments."""
if not np.iterable(zs):
zs = np.broadcast_to(zs, len(paths))
else:
if len(zs) != len(paths):
raise ValueError('Number of z-coordinates does not match paths.')
segs = [_path_to_3d_segment(path, pathz, zdir)
for path, pathz in zip(paths, zs)]
return segs
def _path_to_3d_segment_with_codes(path, zs=0, zdir='z'):
"""Convert a path to a 3D segment with path codes."""
zs = np.broadcast_to(zs, len(path))
pathsegs = path.iter_segments(simplify=False, curves=False)
seg_codes = [((x, y, z), code) for ((x, y), code), z in zip(pathsegs, zs)]
if seg_codes:
seg, codes = zip(*seg_codes)
seg3d = [juggle_axes(x, y, z, zdir) for (x, y, z) in seg]
else:
seg3d = []
codes = []
return seg3d, list(codes)
def _paths_to_3d_segments_with_codes(paths, zs=0, zdir='z'):
"""
Convert paths from a collection object to 3D segments with path codes.
"""
zs = np.broadcast_to(zs, len(paths))
segments_codes = [_path_to_3d_segment_with_codes(path, pathz, zdir)
for path, pathz in zip(paths, zs)]
if segments_codes:
segments, codes = zip(*segments_codes)
else:
segments, codes = [], []
return list(segments), list(codes)
class Collection3D(Collection):
"""A collection of 3D paths."""
def do_3d_projection(self):
"""Project the points according to renderer matrix."""
xyzs_list = [proj3d.proj_transform(*vs.T, self.axes.M)
for vs, _ in self._3dverts_codes]
self._paths = [mpath.Path(np.column_stack([xs, ys]), cs)
for (xs, ys, _), (_, cs) in zip(xyzs_list, self._3dverts_codes)]
zs = np.concatenate([zs for _, _, zs in xyzs_list])
return zs.min() if len(zs) else 1e9
def collection_2d_to_3d(col, zs=0, zdir='z'):
"""Convert a `.Collection` to a `.Collection3D` object."""
zs = np.broadcast_to(zs, len(col.get_paths()))
col._3dverts_codes = [
(np.column_stack(juggle_axes(
*np.column_stack([p.vertices, np.broadcast_to(z, len(p.vertices))]).T,
zdir)),
p.codes)
for p, z in zip(col.get_paths(), zs)]
col.__class__ = cbook._make_class_factory(Collection3D, "{}3D")(type(col))
class Line3DCollection(LineCollection):
"""
A collection of 3D lines.
"""
def set_sort_zpos(self, val):
"""Set the position to use for z-sorting."""
self._sort_zpos = val
self.stale = True
def set_segments(self, segments):
"""
Set 3D segments.
"""
self._segments3d = segments
super().set_segments([])
def do_3d_projection(self):
"""
Project the points according to renderer matrix.
"""
xyslist = [proj3d._proj_trans_points(points, self.axes.M)
for points in self._segments3d]
segments_2d = [np.column_stack([xs, ys]) for xs, ys, zs in xyslist]
LineCollection.set_segments(self, segments_2d)
# FIXME
minz = 1e9
for xs, ys, zs in xyslist:
minz = min(minz, min(zs))
return minz
def line_collection_2d_to_3d(col, zs=0, zdir='z'):
"""Convert a `.LineCollection` to a `.Line3DCollection` object."""
segments3d = _paths_to_3d_segments(col.get_paths(), zs, zdir)
col.__class__ = Line3DCollection
col.set_segments(segments3d)
class Patch3D(Patch):
"""
3D patch object.
"""
def __init__(self, *args, zs=(), zdir='z', **kwargs):
"""
Parameters
----------
verts :
zs : float
The location along the *zdir* axis in 3D space to position the
patch.
zdir : {'x', 'y', 'z'}
Plane to plot patch orthogonal to. Default: 'z'.
See `.get_dir_vector` for a description of the values.
"""
super().__init__(*args, **kwargs)
self.set_3d_properties(zs, zdir)
def set_3d_properties(self, verts, zs=0, zdir='z'):
"""
Set the *z* position and direction of the patch.
Parameters
----------
verts :
zs : float
The location along the *zdir* axis in 3D space to position the
patch.
zdir : {'x', 'y', 'z'}
Plane to plot patch orthogonal to. Default: 'z'.
See `.get_dir_vector` for a description of the values.
"""
zs = np.broadcast_to(zs, len(verts))
self._segment3d = [juggle_axes(x, y, z, zdir)
for ((x, y), z) in zip(verts, zs)]
def get_path(self):
return self._path2d
def do_3d_projection(self):
s = self._segment3d
xs, ys, zs = zip(*s)
vxs, vys, vzs, vis = proj3d.proj_transform_clip(xs, ys, zs,
self.axes.M)
self._path2d = mpath.Path(np.column_stack([vxs, vys]))
return min(vzs)
class PathPatch3D(Patch3D):
"""
3D PathPatch object.
"""
def __init__(self, path, *, zs=(), zdir='z', **kwargs):
"""
Parameters
----------
path :
zs : float
The location along the *zdir* axis in 3D space to position the
path patch.
zdir : {'x', 'y', 'z', 3-tuple}
Plane to plot path patch orthogonal to. Default: 'z'.
See `.get_dir_vector` for a description of the values.
"""
# Not super().__init__!
Patch.__init__(self, **kwargs)
self.set_3d_properties(path, zs, zdir)
def set_3d_properties(self, path, zs=0, zdir='z'):
"""
Set the *z* position and direction of the path patch.
Parameters
----------
path :
zs : float
The location along the *zdir* axis in 3D space to position the
path patch.
zdir : {'x', 'y', 'z', 3-tuple}
Plane to plot path patch orthogonal to. Default: 'z'.
See `.get_dir_vector` for a description of the values.
"""
Patch3D.set_3d_properties(self, path.vertices, zs=zs, zdir=zdir)
self._code3d = path.codes
def do_3d_projection(self):
s = self._segment3d
xs, ys, zs = zip(*s)
vxs, vys, vzs, vis = proj3d.proj_transform_clip(xs, ys, zs,
self.axes.M)
self._path2d = mpath.Path(np.column_stack([vxs, vys]), self._code3d)
return min(vzs)
def _get_patch_verts(patch):
"""Return a list of vertices for the path of a patch."""
trans = patch.get_patch_transform()
path = patch.get_path()
polygons = path.to_polygons(trans)
return polygons[0] if len(polygons) else np.array([])
def patch_2d_to_3d(patch, z=0, zdir='z'):
"""Convert a `.Patch` to a `.Patch3D` object."""
verts = _get_patch_verts(patch)
patch.__class__ = Patch3D
patch.set_3d_properties(verts, z, zdir)
def pathpatch_2d_to_3d(pathpatch, z=0, zdir='z'):
"""Convert a `.PathPatch` to a `.PathPatch3D` object."""
path = pathpatch.get_path()
trans = pathpatch.get_patch_transform()
mpath = trans.transform_path(path)
pathpatch.__class__ = PathPatch3D
pathpatch.set_3d_properties(mpath, z, zdir)
class Patch3DCollection(PatchCollection):
"""
A collection of 3D patches.
"""
def __init__(self, *args, zs=0, zdir='z', depthshade=True, **kwargs):
"""
Create a collection of flat 3D patches with its normal vector
pointed in *zdir* direction, and located at *zs* on the *zdir*
axis. 'zs' can be a scalar or an array-like of the same length as
the number of patches in the collection.
Constructor arguments are the same as for
:class:`~matplotlib.collections.PatchCollection`. In addition,
keywords *zs=0* and *zdir='z'* are available.
Also, the keyword argument *depthshade* is available to indicate
whether to shade the patches in order to give the appearance of depth
(default is *True*). This is typically desired in scatter plots.
"""
self._depthshade = depthshade
super().__init__(*args, **kwargs)
self.set_3d_properties(zs, zdir)
def get_depthshade(self):
return self._depthshade
def set_depthshade(self, depthshade):
"""
Set whether depth shading is performed on collection members.
Parameters
----------
depthshade : bool
Whether to shade the patches in order to give the appearance of
depth.
"""
self._depthshade = depthshade
self.stale = True
def set_sort_zpos(self, val):
"""Set the position to use for z-sorting."""
self._sort_zpos = val
self.stale = True
def set_3d_properties(self, zs, zdir):
"""
Set the *z* positions and direction of the patches.
Parameters
----------
zs : float or array of floats
The location or locations to place the patches in the collection
along the *zdir* axis.
zdir : {'x', 'y', 'z'}
Plane to plot patches orthogonal to.
All patches must have the same direction.
See `.get_dir_vector` for a description of the values.
"""
# Force the collection to initialize the face and edgecolors
# just in case it is a scalarmappable with a colormap.
self.update_scalarmappable()
offsets = self.get_offsets()
if len(offsets) > 0:
xs, ys = offsets.T
else:
xs = []
ys = []
self._offsets3d = juggle_axes(xs, ys, np.atleast_1d(zs), zdir)
self._z_markers_idx = slice(-1)
self._vzs = None
self.stale = True
def do_3d_projection(self):
xs, ys, zs = self._offsets3d
vxs, vys, vzs, vis = proj3d.proj_transform_clip(xs, ys, zs,
self.axes.M)
self._vzs = vzs
super().set_offsets(np.column_stack([vxs, vys]))
if vzs.size > 0:
return min(vzs)
else:
return np.nan
def _maybe_depth_shade_and_sort_colors(self, color_array):
color_array = (
_zalpha(color_array, self._vzs)
if self._vzs is not None and self._depthshade
else color_array
)
if len(color_array) > 1:
color_array = color_array[self._z_markers_idx]
return mcolors.to_rgba_array(color_array, self._alpha)
def get_facecolor(self):
return self._maybe_depth_shade_and_sort_colors(super().get_facecolor())
def get_edgecolor(self):
# We need this check here to make sure we do not double-apply the depth
# based alpha shading when the edge color is "face" which means the
# edge colour should be identical to the face colour.
if cbook._str_equal(self._edgecolors, 'face'):
return self.get_facecolor()
return self._maybe_depth_shade_and_sort_colors(super().get_edgecolor())
class Path3DCollection(PathCollection):
"""
A collection of 3D paths.
"""
def __init__(self, *args, zs=0, zdir='z', depthshade=True, **kwargs):
"""
Create a collection of flat 3D paths with its normal vector
pointed in *zdir* direction, and located at *zs* on the *zdir*
axis. 'zs' can be a scalar or an array-like of the same length as
the number of paths in the collection.
Constructor arguments are the same as for
:class:`~matplotlib.collections.PathCollection`. In addition,
keywords *zs=0* and *zdir='z'* are available.
Also, the keyword argument *depthshade* is available to indicate
whether to shade the patches in order to give the appearance of depth
(default is *True*). This is typically desired in scatter plots.
"""
self._depthshade = depthshade
self._in_draw = False
super().__init__(*args, **kwargs)
self.set_3d_properties(zs, zdir)
self._offset_zordered = None
def draw(self, renderer):
with self._use_zordered_offset():
with cbook._setattr_cm(self, _in_draw=True):
super().draw(renderer)
def set_sort_zpos(self, val):
"""Set the position to use for z-sorting."""
self._sort_zpos = val
self.stale = True
def set_3d_properties(self, zs, zdir):
"""
Set the *z* positions and direction of the paths.
Parameters
----------
zs : float or array of floats
The location or locations to place the paths in the collection
along the *zdir* axis.
zdir : {'x', 'y', 'z'}
Plane to plot paths orthogonal to.
All paths must have the same direction.
See `.get_dir_vector` for a description of the values.
"""
# Force the collection to initialize the face and edgecolors
# just in case it is a scalarmappable with a colormap.
self.update_scalarmappable()
offsets = self.get_offsets()
if len(offsets) > 0:
xs, ys = offsets.T
else:
xs = []
ys = []
self._offsets3d = juggle_axes(xs, ys, np.atleast_1d(zs), zdir)
# In the base draw methods we access the attributes directly which
# means we cannot resolve the shuffling in the getter methods like
# we do for the edge and face colors.
#
# This means we need to carry around a cache of the unsorted sizes and
# widths (postfixed with 3d) and in `do_3d_projection` set the
# depth-sorted version of that data into the private state used by the
# base collection class in its draw method.
#
# Grab the current sizes and linewidths to preserve them.
self._sizes3d = self._sizes
self._linewidths3d = np.array(self._linewidths)
xs, ys, zs = self._offsets3d
# Sort the points based on z coordinates
# Performance optimization: Create a sorted index array and reorder
# points and point properties according to the index array
self._z_markers_idx = slice(-1)
self._vzs = None
self.stale = True
def set_sizes(self, sizes, dpi=72.0):
super().set_sizes(sizes, dpi)
if not self._in_draw:
self._sizes3d = sizes
def set_linewidth(self, lw):
super().set_linewidth(lw)
if not self._in_draw:
self._linewidths3d = np.array(self._linewidths)
def get_depthshade(self):
return self._depthshade
def set_depthshade(self, depthshade):
"""
Set whether depth shading is performed on collection members.
Parameters
----------
depthshade : bool
Whether to shade the patches in order to give the appearance of
depth.
"""
self._depthshade = depthshade
self.stale = True
def do_3d_projection(self):
xs, ys, zs = self._offsets3d
vxs, vys, vzs, vis = proj3d.proj_transform_clip(xs, ys, zs,
self.axes.M)
# Sort the points based on z coordinates
# Performance optimization: Create a sorted index array and reorder
# points and point properties according to the index array
z_markers_idx = self._z_markers_idx = np.argsort(vzs)[::-1]
self._vzs = vzs
# we have to special case the sizes because of code in collections.py
# as the draw method does
# self.set_sizes(self._sizes, self.figure.dpi)
# so we cannot rely on doing the sorting on the way out via get_*
if len(self._sizes3d) > 1:
self._sizes = self._sizes3d[z_markers_idx]
if len(self._linewidths3d) > 1:
self._linewidths = self._linewidths3d[z_markers_idx]
PathCollection.set_offsets(self, np.column_stack((vxs, vys)))
# Re-order items
vzs = vzs[z_markers_idx]
vxs = vxs[z_markers_idx]
vys = vys[z_markers_idx]
# Store ordered offset for drawing purpose
self._offset_zordered = np.column_stack((vxs, vys))
return np.min(vzs) if vzs.size else np.nan
@contextmanager
def _use_zordered_offset(self):
if self._offset_zordered is None:
# Do nothing
yield
else:
# Swap offset with z-ordered offset
old_offset = self._offsets
super().set_offsets(self._offset_zordered)
try:
yield
finally:
self._offsets = old_offset
def _maybe_depth_shade_and_sort_colors(self, color_array):
color_array = (
_zalpha(color_array, self._vzs)
if self._vzs is not None and self._depthshade
else color_array
)
if len(color_array) > 1:
color_array = color_array[self._z_markers_idx]
return mcolors.to_rgba_array(color_array, self._alpha)
def get_facecolor(self):
return self._maybe_depth_shade_and_sort_colors(super().get_facecolor())
def get_edgecolor(self):
# We need this check here to make sure we do not double-apply the depth
# based alpha shading when the edge color is "face" which means the
# edge colour should be identical to the face colour.
if cbook._str_equal(self._edgecolors, 'face'):
return self.get_facecolor()
return self._maybe_depth_shade_and_sort_colors(super().get_edgecolor())
def patch_collection_2d_to_3d(col, zs=0, zdir='z', depthshade=True):
"""
Convert a `.PatchCollection` into a `.Patch3DCollection` object
(or a `.PathCollection` into a `.Path3DCollection` object).
Parameters
----------
zs : float or array of floats
The location or locations to place the patches in the collection along
the *zdir* axis. Default: 0.
zdir : {'x', 'y', 'z'}
The axis in which to place the patches. Default: "z".
See `.get_dir_vector` for a description of the values.
depthshade
Whether to shade the patches to give a sense of depth. Default: *True*.
"""
if isinstance(col, PathCollection):
col.__class__ = Path3DCollection
col._offset_zordered = None
elif isinstance(col, PatchCollection):
col.__class__ = Patch3DCollection
col._depthshade = depthshade
col._in_draw = False
col.set_3d_properties(zs, zdir)
class Poly3DCollection(PolyCollection):
"""
A collection of 3D polygons.
.. note::
**Filling of 3D polygons**
There is no simple definition of the enclosed surface of a 3D polygon
unless the polygon is planar.
In practice, Matplotlib fills the 2D projection of the polygon. This
gives a correct filling appearance only for planar polygons. For all
other polygons, you'll find orientations in which the edges of the
polygon intersect in the projection. This will lead to an incorrect
visualization of the 3D area.
If you need filled areas, it is recommended to create them via
`~mpl_toolkits.mplot3d.axes3d.Axes3D.plot_trisurf`, which creates a
triangulation and thus generates consistent surfaces.
"""
def __init__(self, verts, *args, zsort='average', shade=False,
lightsource=None, **kwargs):
"""
Parameters
----------
verts : list of (N, 3) array-like
The sequence of polygons [*verts0*, *verts1*, ...] where each
element *verts_i* defines the vertices of polygon *i* as a 2D
array-like of shape (N, 3).
zsort : {'average', 'min', 'max'}, default: 'average'
The calculation method for the z-order.
See `~.Poly3DCollection.set_zsort` for details.
shade : bool, default: False
Whether to shade *facecolors* and *edgecolors*. When activating
*shade*, *facecolors* and/or *edgecolors* must be provided.
.. versionadded:: 3.7
lightsource : `~matplotlib.colors.LightSource`, optional
The lightsource to use when *shade* is True.
.. versionadded:: 3.7
*args, **kwargs
All other parameters are forwarded to `.PolyCollection`.
Notes
-----
Note that this class does a bit of magic with the _facecolors
and _edgecolors properties.
"""
if shade:
normals = _generate_normals(verts)
facecolors = kwargs.get('facecolors', None)
if facecolors is not None:
kwargs['facecolors'] = _shade_colors(
facecolors, normals, lightsource
)
edgecolors = kwargs.get('edgecolors', None)
if edgecolors is not None:
kwargs['edgecolors'] = _shade_colors(
edgecolors, normals, lightsource
)
if facecolors is None and edgecolors is None:
raise ValueError(
"You must provide facecolors, edgecolors, or both for "
"shade to work.")
super().__init__(verts, *args, **kwargs)
if isinstance(verts, np.ndarray):
if verts.ndim != 3:
raise ValueError('verts must be a list of (N, 3) array-like')
else:
if any(len(np.shape(vert)) != 2 for vert in verts):
raise ValueError('verts must be a list of (N, 3) array-like')
self.set_zsort(zsort)
self._codes3d = None
_zsort_functions = {
'average': np.average,
'min': np.min,
'max': np.max,
}
def set_zsort(self, zsort):
"""
Set the calculation method for the z-order.
Parameters
----------
zsort : {'average', 'min', 'max'}
The function applied on the z-coordinates of the vertices in the
viewer's coordinate system, to determine the z-order.
"""
self._zsortfunc = self._zsort_functions[zsort]
self._sort_zpos = None
self.stale = True
def get_vector(self, segments3d):
"""Optimize points for projection."""
if len(segments3d):
xs, ys, zs = np.vstack(segments3d).T
else: # vstack can't stack zero arrays.
xs, ys, zs = [], [], []
ones = np.ones(len(xs))
self._vec = np.array([xs, ys, zs, ones])
indices = [0, *np.cumsum([len(segment) for segment in segments3d])]
self._segslices = [*map(slice, indices[:-1], indices[1:])]
def set_verts(self, verts, closed=True):
"""
Set 3D vertices.
Parameters
----------
verts : list of (N, 3) array-like
The sequence of polygons [*verts0*, *verts1*, ...] where each
element *verts_i* defines the vertices of polygon *i* as a 2D
array-like of shape (N, 3).
closed : bool, default: True
Whether the polygon should be closed by adding a CLOSEPOLY
connection at the end.
"""
self.get_vector(verts)
# 2D verts will be updated at draw time
super().set_verts([], False)
self._closed = closed
def set_verts_and_codes(self, verts, codes):
"""Set 3D vertices with path codes."""
# set vertices with closed=False to prevent PolyCollection from
# setting path codes
self.set_verts(verts, closed=False)
# and set our own codes instead.
self._codes3d = codes
def set_3d_properties(self):
# Force the collection to initialize the face and edgecolors
# just in case it is a scalarmappable with a colormap.
self.update_scalarmappable()
self._sort_zpos = None
self.set_zsort('average')
self._facecolor3d = PolyCollection.get_facecolor(self)
self._edgecolor3d = PolyCollection.get_edgecolor(self)
self._alpha3d = PolyCollection.get_alpha(self)
self.stale = True
def set_sort_zpos(self, val):
"""Set the position to use for z-sorting."""
self._sort_zpos = val
self.stale = True
def do_3d_projection(self):
"""
Perform the 3D projection for this object.
"""
if self._A is not None:
# force update of color mapping because we re-order them
# below. If we do not do this here, the 2D draw will call
# this, but we will never port the color mapped values back
# to the 3D versions.
#
# We hold the 3D versions in a fixed order (the order the user
# passed in) and sort the 2D version by view depth.
self.update_scalarmappable()
if self._face_is_mapped:
self._facecolor3d = self._facecolors
if self._edge_is_mapped:
self._edgecolor3d = self._edgecolors
txs, tys, tzs = proj3d._proj_transform_vec(self._vec, self.axes.M)
xyzlist = [(txs[sl], tys[sl], tzs[sl]) for sl in self._segslices]
# This extra fuss is to re-order face / edge colors
cface = self._facecolor3d
cedge = self._edgecolor3d
if len(cface) != len(xyzlist):
cface = cface.repeat(len(xyzlist), axis=0)
if len(cedge) != len(xyzlist):
if len(cedge) == 0:
cedge = cface
else:
cedge = cedge.repeat(len(xyzlist), axis=0)
if xyzlist:
# sort by depth (furthest drawn first)
z_segments_2d = sorted(
((self._zsortfunc(zs), np.column_stack([xs, ys]), fc, ec, idx)
for idx, ((xs, ys, zs), fc, ec)
in enumerate(zip(xyzlist, cface, cedge))),
key=lambda x: x[0], reverse=True)
_, segments_2d, self._facecolors2d, self._edgecolors2d, idxs = \
zip(*z_segments_2d)
else:
segments_2d = []
self._facecolors2d = np.empty((0, 4))
self._edgecolors2d = np.empty((0, 4))
idxs = []
if self._codes3d is not None:
codes = [self._codes3d[idx] for idx in idxs]
PolyCollection.set_verts_and_codes(self, segments_2d, codes)
else:
PolyCollection.set_verts(self, segments_2d, self._closed)
if len(self._edgecolor3d) != len(cface):
self._edgecolors2d = self._edgecolor3d
# Return zorder value
if self._sort_zpos is not None:
zvec = np.array([[0], [0], [self._sort_zpos], [1]])
ztrans = proj3d._proj_transform_vec(zvec, self.axes.M)
return ztrans[2][0]
elif tzs.size > 0:
# FIXME: Some results still don't look quite right.
# In particular, examine contourf3d_demo2.py
# with az = -54 and elev = -45.
return np.min(tzs)
else:
return np.nan
def set_facecolor(self, colors):
# docstring inherited
super().set_facecolor(colors)
self._facecolor3d = PolyCollection.get_facecolor(self)
def set_edgecolor(self, colors):
# docstring inherited
super().set_edgecolor(colors)
self._edgecolor3d = PolyCollection.get_edgecolor(self)
def set_alpha(self, alpha):
# docstring inherited
artist.Artist.set_alpha(self, alpha)
try:
self._facecolor3d = mcolors.to_rgba_array(
self._facecolor3d, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
try:
self._edgecolors = mcolors.to_rgba_array(
self._edgecolor3d, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
self.stale = True
def get_facecolor(self):
# docstring inherited
# self._facecolors2d is not initialized until do_3d_projection
if not hasattr(self, '_facecolors2d'):
self.axes.M = self.axes.get_proj()
self.do_3d_projection()
return np.asarray(self._facecolors2d)
def get_edgecolor(self):
# docstring inherited
# self._edgecolors2d is not initialized until do_3d_projection
if not hasattr(self, '_edgecolors2d'):
self.axes.M = self.axes.get_proj()
self.do_3d_projection()
return np.asarray(self._edgecolors2d)
def poly_collection_2d_to_3d(col, zs=0, zdir='z'):
"""
Convert a `.PolyCollection` into a `.Poly3DCollection` object.
Parameters
----------
zs : float or array of floats
The location or locations to place the polygons in the collection along
the *zdir* axis. Default: 0.
zdir : {'x', 'y', 'z'}
The axis in which to place the patches. Default: 'z'.
See `.get_dir_vector` for a description of the values.
"""
segments_3d, codes = _paths_to_3d_segments_with_codes(
col.get_paths(), zs, zdir)
col.__class__ = Poly3DCollection
col.set_verts_and_codes(segments_3d, codes)
col.set_3d_properties()
def juggle_axes(xs, ys, zs, zdir):
"""
Reorder coordinates so that 2D *xs*, *ys* can be plotted in the plane
orthogonal to *zdir*. *zdir* is normally 'x', 'y' or 'z'. However, if
*zdir* starts with a '-' it is interpreted as a compensation for
`rotate_axes`.
"""
if zdir == 'x':
return zs, xs, ys
elif zdir == 'y':
return xs, zs, ys
elif zdir[0] == '-':
return rotate_axes(xs, ys, zs, zdir)
else:
return xs, ys, zs
def rotate_axes(xs, ys, zs, zdir):
"""
Reorder coordinates so that the axes are rotated with *zdir* along
the original z axis. Prepending the axis with a '-' does the
inverse transform, so *zdir* can be 'x', '-x', 'y', '-y', 'z' or '-z'.
"""
if zdir in ('x', '-y'):
return ys, zs, xs
elif zdir in ('-x', 'y'):
return zs, xs, ys
else:
return xs, ys, zs
def _zalpha(colors, zs):
"""Modify the alphas of the color list according to depth."""
# FIXME: This only works well if the points for *zs* are well-spaced
# in all three dimensions. Otherwise, at certain orientations,
# the min and max zs are very close together.
# Should really normalize against the viewing depth.
if len(colors) == 0 or len(zs) == 0:
return np.zeros((0, 4))
norm = Normalize(min(zs), max(zs))
sats = 1 - norm(zs) * 0.7
rgba = np.broadcast_to(mcolors.to_rgba_array(colors), (len(zs), 4))
return np.column_stack([rgba[:, :3], rgba[:, 3] * sats])
def _generate_normals(polygons):
"""
Compute the normals of a list of polygons, one normal per polygon.
Normals point towards the viewer for a face with its vertices in
counterclockwise order, following the right hand rule.
Uses three points equally spaced around the polygon. This method assumes
that the points are in a plane. Otherwise, more than one shade is required,
which is not supported.
Parameters
----------
polygons : list of (M_i, 3) array-like, or (..., M, 3) array-like
A sequence of polygons to compute normals for, which can have
varying numbers of vertices. If the polygons all have the same
number of vertices and array is passed, then the operation will
be vectorized.
Returns
-------
normals : (..., 3) array
A normal vector estimated for the polygon.
"""
if isinstance(polygons, np.ndarray):
# optimization: polygons all have the same number of points, so can
# vectorize
n = polygons.shape[-2]
i1, i2, i3 = 0, n//3, 2*n//3
v1 = polygons[..., i1, :] - polygons[..., i2, :]
v2 = polygons[..., i2, :] - polygons[..., i3, :]
else:
# The subtraction doesn't vectorize because polygons is jagged.
v1 = np.empty((len(polygons), 3))
v2 = np.empty((len(polygons), 3))
for poly_i, ps in enumerate(polygons):
n = len(ps)
i1, i2, i3 = 0, n//3, 2*n//3
v1[poly_i, :] = ps[i1, :] - ps[i2, :]
v2[poly_i, :] = ps[i2, :] - ps[i3, :]
return np.cross(v1, v2)
def _shade_colors(color, normals, lightsource=None):
"""
Shade *color* using normal vectors given by *normals*,
assuming a *lightsource* (using default position if not given).
*color* can also be an array of the same length as *normals*.
"""
if lightsource is None:
# chosen for backwards-compatibility
lightsource = mcolors.LightSource(azdeg=225, altdeg=19.4712)
with np.errstate(invalid="ignore"):
shade = ((normals / np.linalg.norm(normals, axis=1, keepdims=True))
@ lightsource.direction)
mask = ~np.isnan(shade)
if mask.any():
# convert dot product to allowed shading fractions
in_norm = mcolors.Normalize(-1, 1)
out_norm = mcolors.Normalize(0.3, 1).inverse
def norm(x):
return out_norm(in_norm(x))
shade[~mask] = 0
color = mcolors.to_rgba_array(color)
# shape of color should be (M, 4) (where M is number of faces)
# shape of shade should be (M,)
# colors should have final shape of (M, 4)
alpha = color[:, 3]
colors = norm(shade)[:, np.newaxis] * color
colors[:, 3] = alpha
else:
colors = np.asanyarray(color).copy()
return colors
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@matplotlib@py3@mpl_toolkits@mplot3d@art3d.py@.PATH_END.py
|
{
"filename": "demo_ROS_viper.py",
"repo_name": "projectchrono/chrono",
"repo_path": "chrono_extracted/chrono-main/src/demos/python/ros/demo_ROS_viper.py",
"type": "Python"
}
|
# =============================================================================
# PROJECT CHRONO - http://projectchrono.org
#
# Copyright (c) 2023 projectchrono.org
# All right reserved.
#
# Use of this source code is governed by a BSD-style license that can be found
# in the LICENSE file at the top level of the distribution and at
# http://projectchrono.org/license-chrono.txt.
#
# =============================================================================
# Authors: Aaron Young
# =============================================================================
#
# Demo showing the integration of ROS with the Viper rover model in python
#
# =============================================================================
import pychrono as ch
import pychrono.robot as robot
import pychrono.ros as chros
def main():
# Create Chrono system
system = ch.ChSystemNSC()
system.SetGravitationalAcceleration(ch.ChVector3d(0, 0, -9.81))
ch.ChCollisionModel.SetDefaultSuggestedEnvelope(0.0025)
ch.ChCollisionModel.SetDefaultSuggestedMargin(0.0025)
# Create ground body
ground_mat = ch.ChContactMaterialNSC()
ground = ch.ChBodyEasyBox(20, 20, 1, 1000, True, True, ground_mat)
ground.SetPos(ch.ChVector3d(0, 0, -1))
ground.SetFixed(True)
ground.GetVisualShape(0).SetTexture(ch.GetChronoDataFile("textures/concrete.jpg"))
system.Add(ground)
# Create Viper rover
driver = robot.ViperDCMotorControl()
rover = robot.Viper(system)
rover.SetDriver(driver)
rover.Initialize(ch.ChFramed(ch.ChVector3d(0, -0.2, 0), ch.ChQuaterniond(1, 0, 0, 0)))
# Create ROS manager
ros_manager = chros.ChROSPythonManager()
ros_manager.RegisterHandler(chros.ChROSClockHandler())
ros_manager.RegisterHandler(chros.ChROSViperDCMotorControlHandler(25, driver, "~/input/driver_inputs"))
ros_manager.RegisterHandler(chros.ChROSBodyHandler(25, rover.GetChassis().GetBody(), "~/output/viper/state"))
ros_manager.Initialize()
# Simulation loop
time = 0
time_step = 1e-3
time_end = 30
while time < time_end:
time = system.GetChTime()
rover.Update()
if not ros_manager.Update(time, time_step):
break
system.DoStepDynamics(time_step)
if __name__ == "__main__":
main()
|
projectchronoREPO_NAMEchronoPATH_START.@chrono_extracted@chrono-main@src@demos@python@ros@demo_ROS_viper.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "amusecode/amuse",
"repo_path": "amuse_extracted/amuse-main/src/amuse/community/bse/__init__.py",
"type": "Python"
}
|
from .interface import Bse
|
amusecodeREPO_NAMEamusePATH_START.@amuse_extracted@amuse-main@src@amuse@community@bse@__init__.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "jolideco/jolideco",
"repo_path": "jolideco_extracted/jolideco-main/jolideco/priors/patches/__init__.py",
"type": "Python"
}
|
from .core import GMMPatchPrior, MultiScalePrior
from .gmm import GMM_REGISTRY, GaussianMixtureModel
__all__ = ["GaussianMixtureModel", "GMMPatchPrior", "MultiScalePrior", "GMM_REGISTRY"]
|
jolidecoREPO_NAMEjolidecoPATH_START.@jolideco_extracted@jolideco-main@jolideco@priors@patches@__init__.py@.PATH_END.py
|
{
"filename": "lax_numpy_einsum_test.py",
"repo_name": "jax-ml/jax",
"repo_path": "jax_extracted/jax-main/tests/lax_numpy_einsum_test.py",
"type": "Python"
}
|
# Copyright 2018 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
from functools import partial
import itertools
import numpy as np
from absl.testing import absltest
from absl.testing import parameterized
import jax
from jax import dtypes
from jax import lax
import jax.numpy as jnp
import jax._src.test_util as jtu
jax.config.parse_flags_with_absl()
class EinsumTest(jtu.JaxTestCase):
def _check(self, s, *ops):
a = np.einsum(s, *ops)
b = jnp.einsum(s, *ops, precision=lax.Precision.HIGHEST)
self.assertAllClose(a, b, atol=1e-4, rtol=1e-4)
def test_three_operands_1(self):
r = self.rng()
x = r.randn(3)
y = r.randn(4)
z = r.randn(5)
s = 'i,j,k->ijk'
self._check(s, x, y, z)
def test_three_operands_2(self):
r = self.rng()
x = r.randn(3)
y = r.randn(4)
z = r.randn(5)
s = 'i,j,k->ijk'
self._check(s, x, y, z)
def test_two_operands_1(self):
r = self.rng()
x = r.randn(3, 4)
y = r.randn(4)
s = 'ij,j->i'
self._check(s, x, y)
def test_two_operands_2(self):
r = self.rng()
x = r.randn(3, 4, 5)
y = r.randn(4)
s = 'ijk,j->i'
self._check(s, x, y)
def test_two_operands_3(self):
r = self.rng()
x = r.randn(3, 4, 3)
y = r.randn(3)
s = 'iji,i->j'
self._check(s, x, y)
def test_two_operands_4(self):
r = self.rng()
x = r.randn(3, 4)
y = r.randn(3, 4)
s = 'ij,ij->'
self._check(s, x, y)
def test_two_operands_5(self):
r = self.rng()
x = r.randn(10, 2, 3)
y = r.randn(3, 4)
s = 'nij,jk->nik'
self._check(s, x, y)
def test_two_operands_6(self):
# based on https://github.com/jax-ml/jax/issues/37#issuecomment-448572187
r = self.rng()
x = r.randn(2, 1)
y = r.randn(2, 3, 4)
s = 'sa,shb->shab'
self._check(s, x, y)
def test_one_operand_1(self):
r = self.rng()
x = r.randn(3, 4, 5)
s = 'ijk->j'
self._check(s, x)
def test_one_operand_2(self):
r = self.rng()
x = r.randn(3, 4, 5)
s = 'ijk->kij'
self._check(s, x)
def test_one_operand_3(self):
r = self.rng()
x = r.randn(3, 4, 5)
s = 'ijk->ki'
self._check(s, x)
def test_one_operand_4(self):
r = self.rng()
x = r.randn(3, 4, 5)
s = 'ijk->ki'
self._check(s, x)
def test_one_operand_5(self):
r = self.rng()
x = r.randn(2, 3, 4, 5)
s = '...ijk->...ki'
self._check(s, x)
def test_one_operand_6(self):
r = self.rng()
x = r.randn(3, 4, 5)
s = '...ijk->ki'
self._check(s, x)
def test_one_operand_7(self):
r = self.rng()
x = r.randn(3, 3)
s = 'ii->'
self._check(s, x)
def test_one_operand_8(self):
r = self.rng()
x = r.randn(3, 3)
s = 'ij->'
self._check(s, x)
def test_one_operand_9(self):
r = self.rng()
x = r.randn(3, 3, 3)
s = 'iii->'
self._check(s, x)
def test_one_operand_10(self):
r = self.rng()
x = r.randn(3, 3)
s = 'ii->i'
self._check(s, x)
def test_one_operand_11(self):
r = self.rng()
x = r.randn(3, 3, 4)
s = 'iij->i'
self._check(s, x)
def test_one_operand_12(self):
r = self.rng()
x = r.randn(3, 3, 3)
s = 'iii->i'
self._check(s, x)
def test_one_operand_13(self):
r = self.rng()
x = r.randn(3, 3, 5, 4, 4)
s = 'iijkk->i'
self._check(s, x)
def test_one_operand_14(self):
r = self.rng()
x = r.randn(3, 3, 5, 4, 4)
s = 'iijkk->ik'
self._check(s, x)
def test_one_operand_15(self):
r = self.rng()
x = r.randn(3, 3, 5, 4, 4)
s = 'iijkl->il'
self._check(s, x)
def test_one_operand_16(self):
r = self.rng()
x = r.randn(3, 3)
s = 'ij->ij'
self._check(s, x)
def test_tf_unsupported_1(self):
# from https://www.tensorflow.org/api_docs/python/tf/einsum
r = self.rng()
x = r.randn(2, 3, 5, 1)
y = r.randn(3, 4, 5, 1)
s = 'ij...,jk...->ik...'
self._check(s, x, y)
def test_tf_unsupported_2(self):
# from https://www.tensorflow.org/api_docs/python/tf/einsum
r = self.rng()
x = r.randn(2, 3, 3)
y = r.randn(4)
s = 'ijj,k->ik'
self._check(s, x, y)
def test_tf_unsupported_3(self):
# from https://www.tensorflow.org/api_docs/python/tf/einsum
r = self.rng()
x = r.randn(2, 3)
y = r.randn(2, 3)
z = r.randn(3, 4)
s = 'ij,ij,jk->ik'
self._check(s, x, y, z)
# these tests are based on https://github.com/dask/dask/pull/3412/files
@parameterized.named_parameters(
{"testcase_name": f"_{einstr}_dtype={dtype.__name__}",
"einstr": einstr, "dtype": dtype}
for einstr in [
'abc,bad->abcd',
'abcdef,bcdfg->abcdeg',
'ea,fb,abcd,gc,hd->efgh',
'ab,b',
'aa',
'a,a->',
'a,a->a',
'a,a',
'a,b',
'a,b,c',
'a',
'ba,b',
'ba,b->',
'defab,fedbc->defac',
'ab...,bc...->ac...',
'a...a',
'abc...->cba...',
'...ab->...a',
'a...a->a...',
# Following 2 from # https://stackoverflow.com/a/19203475/1611416
'...abc,...abcd->...d',
'ab...,b->ab...',
# https://github.com/dask/dask/pull/3412#discussion_r182413444
'aa->a',
'ab,ab,c->c',
'aab,bc->ac',
'aab,bcc->ac',
'fdf,cdd,ccd,afe->ae',
'fff,fae,bef,def->abd',
]
for dtype in [jnp.float32, jnp.int32, jnp.complex64, jnp.bool_])
def test_from_dask(self, einstr, dtype):
r = jtu.rand_default(self.rng())
if '->' in einstr:
input_str, result_names = einstr.split('->')
else:
input_str = einstr
input_names = input_str.split(',')
dims = itertools.cycle([2, 3, 4])
shapes = defaultdict(lambda: next(dims))
input_shapes = [tuple(shapes[c] for c in names.replace('...', '01'))
for names in input_names]
operands = [r(shape, dtype) for shape in input_shapes]
self._check(einstr, *operands)
def test_ordered_front_batch_dim_case(self):
x = np.ones((1,8,20,4))
y = np.ones((1,8,20,4))
s = 'ijkl,ijml->ijkm'
self._check(s, x, y)
def test_einsum_path(self):
# just check examples from np.einsum_path docstring
a = self.rng().rand(2, 2)
b = self.rng().rand(2, 5)
c = self.rng().rand(5, 2)
path_info = np.einsum_path('ij,jk,kl->il', a, b, c, optimize='greedy')
self.assertEqual(str(path_info[0]), "['einsum_path', (1, 2), (0, 1)]")
self.assertEqual(path_info[1].split('\n')[0],
' Complete contraction: ij,jk,kl->il')
# check this doesn't crash
I = self.rng().rand(10, 10, 10, 10)
C = self.rng().rand(10, 10)
np.einsum_path('ea,fb,abcd,gc,hd->efgh', C, C, I, C, C, optimize='greedy')
@jax.default_matmul_precision("float32")
def test_einsum_kpmurphy_example(self):
# code from an email with @murphyk
N, C, D, K, T = 2, 3, 4, 5, 6
r = self.rng()
S = r.randn(N, T, K)
W = r.randn(K, D)
V = r.randn(D, C)
L = np.zeros((N, C))
for n in range(N):
for c in range(C):
s = 0
for d in range(D):
for k in range(K):
for t in range(T):
s += S[n,t,k] * W[k,d] * V[d,c]
L[n,c] = s
path = jnp.einsum_path('ntk,kd,dc->nc', S, W, V, optimize='optimal')[0]
self.assertAllClose(L, jnp.einsum('ntk,kd,dc->nc', S, W, V, optimize=path),
check_dtypes=False)
def test_contraction_broadcasting(self):
r = self.rng()
x = r.randn(3, 4, 5)
y = r.randn(3, 1, 6)
s = 'cij,cjk->cik'
self._check(s, x, y)
def test_batch_broadcasting(self):
r = self.rng()
x = r.randn(1, 4, 5)
y = r.randn(3, 5, 6)
s = 'cij,cjk->cik'
self._check(s, x, y)
def test_batch_and_contraction_broadcasting(self):
r = self.rng()
x = r.randn(1, 4, 5)
y = r.randn(3, 1, 6)
s = 'cij,cjk->cik'
self._check(s, x, y)
def test_broadcasting_issue_2189(self):
r = self.rng()
x = r.randn(2, 1, 3, 3)
y = r.randn(2, 4, 3)
s = '...ij,...j'
self._check(s, x, y)
def test_no_unnecessary_transpose(self):
r = self.rng()
x = r.randn(2, 2, 2)
y = r.randn(2, 2)
jaxpr = jax.make_jaxpr(partial(jnp.einsum, "ijk,kl->ijl"))(x, y)
self.assertNotIn('transpose', str(jaxpr))
def test_preferred_element_type(self):
r = self.rng()
x = r.randn(2, 2).astype('bfloat16')
y = r.randn(2).astype('bfloat16')
pattern = "ij,j->i"
f1 = partial(jnp.einsum, pattern)
jaxpr = jax.make_jaxpr(f1)(x, y)
self.assertLen(jaxpr.eqns, 1)
self.assertEqual(jaxpr.eqns[0].params['preferred_element_type'],
dtypes.result_type(x, y))
f2 = partial(jnp.einsum, pattern, preferred_element_type='float32')
jaxpr = jax.make_jaxpr(f2)(x, y)
self.assertLen(jaxpr.eqns, 1)
self.assertEqual(jaxpr.eqns[0].params['preferred_element_type'], 'float32')
def test_inf_nan(self):
x = np.array([[[np.inf, np.inf],
[ 1.0, 1.0]]])
out = jnp.einsum('baa->ba', x)
expected = np.einsum('baa->ba', x)
self.assertAllClose(out, expected, check_dtypes=False)
@jtu.sample_product(
lhs_dtype=jtu.dtypes.numeric,
rhs_dtype=jtu.dtypes.numeric,
)
@jax.numpy_dtype_promotion('standard')
def test_einsum_mixed_precision(self, lhs_dtype, rhs_dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng((10,), lhs_dtype), rng((10,), rhs_dtype)]
f_jax = partial(jnp.einsum, 'a,a->a')
jaxpr = jax.make_jaxpr(f_jax)(*args_maker())
self.assertIn(
[eqn.primitive for eqn in jaxpr.eqns],
[
[lax.dot_general_p],
[lax.dot_general_p, lax.convert_element_type_p],
])
# Check result and expected dtype for all combinations
f_np = jtu.promote_like_jnp(partial(np.einsum, 'a,a->a'))
self._CheckAgainstNumpy(f_np, f_jax, args_maker, check_dtypes=True)
@jtu.sample_product(
[
{'signature': 'i->', 'shapes': [(3,)]},
{'signature': 'ii->i', 'shapes': [(4, 4)]},
{'signature': 'ij,jk', 'shapes': [(3, 4), (4, 3)]},
{'signature': 'ij,jkl,klm', 'shapes': [(2, 2), (2, 3, 4), (3, 4, 2)]},
],
optimize=[True, False, 'optimal', 'auto', 'greedy', 'eager'],
dtype=[np.dtype('float32')],
)
@jtu.skip_on_devices('tpu')
def test_einsum_optimization_modes(self, signature, shapes, optimize, dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype) for shape in shapes]
jnp_fun = partial(jnp.einsum, signature, optimize=optimize)
np_fun = partial(np.einsum, signature)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, rtol=1E-4)
self._CompileAndCheck(jnp_fun, args_maker, rtol=1E-4)
@jtu.sample_product(
[
{'signature': 'i->', 'shapes': [(3,)]},
{'signature': 'ii->i', 'shapes': [(4, 4)]},
{'signature': 'ij,jk', 'shapes': [(3, 4), (4, 3)]},
{'signature': 'ij,jkl,klm', 'shapes': [(2, 2), (2, 3, 4), (3, 4, 2)]},
],
optimize=[True, False, 'optimal', 'auto', 'greedy', 'eager'],
dtype=[np.dtype('float32')],
)
@jtu.skip_on_devices('tpu')
def test_einsum_path_optimization_modes(self, signature, shapes, optimize, dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype) for shape in shapes]
def jnp_fun(*args, signature=signature, optimize=optimize):
path, _ = jnp.einsum_path(signature, *args, optimize=optimize)
return jnp.einsum(signature, *args, optimize=path)
np_fun = partial(np.einsum, signature)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, rtol=1E-4)
self._CompileAndCheck(jnp_fun, args_maker, rtol=1E-4)
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
|
jax-mlREPO_NAMEjaxPATH_START.@jax_extracted@jax-main@tests@lax_numpy_einsum_test.py@.PATH_END.py
|
{
"filename": "test_stretch.py",
"repo_name": "3fon3fonov/exostriker",
"repo_path": "exostriker_extracted/exostriker-main/exostriker/lib/emcee_ES/tests/integration/test_stretch.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
import pytest
from emcee import moves
from .test_proposal import _test_normal, _test_uniform
__all__ = [
"test_normal_stretch",
"test_uniform_stretch",
"test_nsplits_stretch",
]
@pytest.mark.parametrize("blobs", [True, False])
def test_normal_stretch(blobs, **kwargs):
kwargs["blobs"] = blobs
_test_normal(moves.StretchMove(), **kwargs)
def test_uniform_stretch(**kwargs):
_test_uniform(moves.StretchMove(), **kwargs)
def test_nsplits_stretch(**kwargs):
_test_normal(moves.StretchMove(nsplits=5), **kwargs)
def test_randomize_stretch(**kwargs):
_test_normal(moves.StretchMove(randomize_split=True), **kwargs)
|
3fon3fonovREPO_NAMEexostrikerPATH_START.@exostriker_extracted@exostriker-main@exostriker@lib@emcee_ES@tests@integration@test_stretch.py@.PATH_END.py
|
{
"filename": "test_datacube.py",
"repo_name": "sdss/mangadap",
"repo_path": "mangadap_extracted/mangadap-main/mangadap/tests/test_datacube.py",
"type": "Python"
}
|
import pytest
from IPython import embed
import numpy
from astropy.io import fits
from mangadap.config.manga import MaNGAConfig
from mangadap.proc.reductionassessments import ReductionAssessmentDef
from mangadap.util.covariance import Covariance
from mangadap.datacube import MaNGADataCube
from mangadap.tests.util import data_test_file, remote_data_file, requires_remote
@requires_remote
def test_sres_ext():
cfg = MaNGAConfig(7815, 3702)
file = remote_data_file(filename=cfg.file_name)
hdu = fits.open(file)
assert MaNGAConfig.spectral_resolution_extension(hdu) == 'LSFPRE', \
'Bad spectral resolution extension selection'
assert MaNGAConfig.spectral_resolution_extension(hdu, ext='SPECRES') == 'SPECRES', \
'Bad spectral resolution extension selection'
assert MaNGAConfig.spectral_resolution_extension(hdu, ext='junk') is None, \
'Should return None for a bad extension name.'
@requires_remote
def test_read():
cube = MaNGADataCube.from_plateifu(7815, 3702, directory_path=remote_data_file())
assert cube.log, 'Should read the log-binned version by default.'
assert cube.wcs is not None, 'WCS should be defined.'
assert cube.shape[:2] == cube.spatial_shape, 'Spatial shape should be first two axes.'
assert cube.nspec == numpy.prod(cube.spatial_shape), 'Definition of number of spectra changed.'
assert cube.sres is not None, 'Spectral resolution data was not constructed.'
assert cube.sres_ext == 'LSFPRE', 'Should default to LSFPRE extension.'
assert abs(cube.pixelscale - cube._get_pixelscale()) < 1e-6, 'Bad match in pixel scale.'
# NOTE: This is worse than it should be because of how the WCS in MaNGA is defined.
assert numpy.all(numpy.absolute(cube.wave - cube._get_wavelength_vector()) < 2e-4), \
'Bad calculation of wavelength vector.'
assert cube.covar is None, 'Covariance should not have been read'
@requires_remote
def test_read_correl():
cube = MaNGADataCube.from_plateifu(7815, 3702, directory_path=remote_data_file(),
covar_ext='GCORREL')
assert isinstance(cube.covar, Covariance), 'Incorrect type for covariance.'
assert cube.covar.shape == (cube.nspec,cube.nspec), 'Covariance has incorrect shape.'
assert cube.covar.is_correlation, 'Covariance object should be in a correlation mode.'
# Check that the variances are all unity (or close to it when it's defined)
unique_var = numpy.unique(cube.covar.var)
assert numpy.allclose(unique_var[unique_var>0], 1.), 'Bad variance values'
@requires_remote
def test_wcs():
cube = MaNGADataCube.from_plateifu(7815, 3702, directory_path=remote_data_file())
x, y = cube.mean_sky_coordinates(offset=None)
assert x[0,0] > x[-1,0], 'RA should increase from large to small indices'
assert y[0,0] < y[0,-1], 'DEC should increase from small to small indices'
assert numpy.unravel_index(numpy.argmin( numpy.square(x - cube.prihdr['OBJRA'])
+ numpy.square(y - cube.prihdr['OBJDEC'])), x.shape) \
== (21,21), 'Object should be at cube center.'
x, y = cube.mean_sky_coordinates(center_coo=(x[0,0], y[0,0]))
assert numpy.isclose(x[0,0], 0.0) and numpy.isclose(y[0,0], 0.0), 'Offset incorrect'
x, y = cube.mean_sky_coordinates()
assert abs(x[21,21]) < 1e-2 and abs(y[21,21]) < 1e-2, 'Offset incorrect'
@requires_remote
def test_copyto():
cube = MaNGADataCube.from_plateifu(7815, 3702, directory_path=remote_data_file())
flux = cube.copy_to_array()
assert not isinstance(flux, numpy.ma.MaskedArray), 'Should output normal array'
assert flux.shape[0] == cube.nspec, 'Should be flattened into a 2D array.'
assert flux.shape[1] == cube.nwave, 'Should be flattened into a 2D array.'
# Apply a wavelength mask
waverange = [5000, 7000]
flux = cube.copy_to_array(waverange=waverange)
indx = (cube.wave > waverange[0]) & (cube.wave < waverange[1])
assert flux.shape[1] == numpy.sum(indx), 'Wavelength range masking failed'
# Find the spaxels with non-zero signal
method = ReductionAssessmentDef()
# methods = available_reduction_assessments()
# i = numpy.where([m['key'] == 'SNRG' for m in methods])[0]
# assert len(i) == 1, 'Could not find correct reduction assessment definition.'
sig, var, snr = cube.flux_stats(response_func=method.response)
indx = ((sig > 0) & numpy.invert(numpy.ma.getmaskarray(sig))).data.ravel()
ngood = numpy.sum(indx)
# Select the spaxels with non-zero signal
flux = cube.copy_to_array(waverange=waverange, select_bins=indx)
assert flux.shape[0] == ngood, 'Bin selection failed'
# Get the masked array
flux = cube.copy_to_masked_array()
assert isinstance(flux, numpy.ma.MaskedArray), 'Should output a masked array'
assert flux.shape[0] == cube.nspec, 'Should be flattened into a 2D array.'
assert flux.shape[1] == cube.nwave, 'Should be flattened into a 2D array.'
# Select the spaxels with non-zero signal
flux = cube.copy_to_masked_array(select_bins=indx)
assert flux.shape[0] == ngood, 'Bin selection failed'
# Try to get the inverse variance
i = cube.nspec//2 + cube.spatial_shape[1]//2
ivar = cube.copy_to_masked_array(attr='ivar')
assert ivar.shape == (cube.nspec, cube.nwave), 'Bad ivar shape'
assert numpy.array_equal(cube.ivar[numpy.unravel_index(i, cube.spatial_shape)],
ivar[i].data), 'Did not pull ivar data.'
# Try to get the spectral resolution
sres = cube.copy_to_masked_array(attr='sres')
assert sres.shape == (cube.nspec, cube.nwave), 'Bad sres shape'
assert numpy.array_equal(cube.sres[numpy.unravel_index(i, cube.spatial_shape)],
sres[i].data), 'Did not pull sres data.'
@requires_remote
def test_stats():
cube = MaNGADataCube.from_plateifu(7815, 3702, directory_path=remote_data_file())
# Create a fake bin map
bin_indx = numpy.arange(cube.nspec/4, dtype=int).reshape(cube.spatial_shape[0]//2,
cube.spatial_shape[0]//2)
bin_indx = numpy.repeat(bin_indx, 2, axis=0)
bin_indx = numpy.repeat(bin_indx, 2, axis=1)
# Get the bin area
bins, area = cube.binned_on_sky_area(bin_indx)
assert numpy.array_equal(bins, numpy.arange(cube.nspec/4)), 'Bad bin list'
assert numpy.allclose(area, 1.), 'Bad area calculation'
# Find the spaxels with non-zero signal
method = ReductionAssessmentDef()
# methods = available_reduction_assessments()
# i = numpy.where([m['key'] == 'SNRG' for m in methods])[0]
# assert len(i) == 1, 'Could not find correct reduction assessment definition.'
cen_wave = cube.central_wavelength(response_func=method.response,
flag=cube.do_not_use_flags())
assert numpy.isclose(cen_wave, 4638.0), 'Central wavelength changed.'
cen_wave = cube.central_wavelength(waverange=[4000,8000], flag=cube.do_not_use_flags(),
fluxwgt=True)
assert numpy.isclose(cen_wave, 5895.7), 'Central wavelength changed.'
cen_wave = cube.central_wavelength(waverange=[4000,8000], flag=cube.do_not_use_flags(),
per_pixel=False)
assert numpy.isclose(cen_wave, 6044.9), 'Central wavelength changed.'
sig, var, snr = cube.flux_stats(response_func=method.response)
assert sig.shape == cube.spatial_shape, 'Should be shaped as a map.'
assert isinstance(sig, numpy.ma.MaskedArray), 'Expected masked arrays'
assert numpy.ma.amax(snr) > 60, 'S/N changed'
# Try it with the linear cube
cube = MaNGADataCube.from_plateifu(7815, 3702, directory_path=remote_data_file(), log=False)
_sig, _var, _snr = cube.flux_stats(response_func=method.response)
# TODO: Not sure why these are not closer.
assert numpy.absolute(numpy.ma.median((sig-_sig)/_sig)) < 0.01, \
'Signal should be the same to better than 1%.'
assert numpy.absolute(numpy.ma.median((var-_var)/_var)) < 0.03, \
'Variance should be the same to better than 3%.'
assert numpy.absolute(numpy.ma.median((snr-_snr)/_snr)) < 0.02, \
'S/N should be the same to better than 2%.'
@requires_remote
def test_read_lin():
cube = MaNGADataCube.from_plateifu(7815, 3702, directory_path=remote_data_file(), log=False)
assert not cube.log, 'Wavelength sampling should be linear'
assert numpy.isclose(numpy.std(numpy.diff(cube.wave)), 0.), \
'Wavelength sampling should be linear'
@requires_remote
def test_from_config():
cube = MaNGADataCube.from_config(data_test_file('datacube.ini'))
assert cube.meta['z'] == 0.0293823, 'Bad config file read'
assert cube.meta['ell'] == 0.110844, 'Bad config file read'
@requires_remote
def test_load_rss():
cube = MaNGADataCube.from_plateifu(7815, 3702, directory_path=remote_data_file())
cube.load_rss()
@requires_remote
def test_covariance():
cube = MaNGADataCube.from_plateifu(7815, 3702, directory_path=remote_data_file())
with pytest.raises(ValueError):
# Have to load the RSS first
cube.covariance_matrix(1000)
# Load the RSS
cube.load_rss()
# Construct a covariance matrix
C = cube.covariance_matrix(1000)
assert C.shape == (1764, 1764), 'Bad covariance shape'
# Make it a correlation matrix and check it
C.to_correlation()
# Check that the variances are all unity (or close to it when it's defined)
unique_var = numpy.unique(numpy.diag(C.toarray()))
assert numpy.allclose(unique_var[unique_var>0], 1.), 'Bad correlation diagonal'
# Try multiple channels
C = cube.covariance_cube(channels=[1000,2000])
assert numpy.array_equal(C.input_indx, [1000,2000]), 'Bad matrix indices'
assert C.shape == (1764, 1764, 2), 'Bad covariance shape'
# Try to convert multiple channels
C.to_correlation()
# And reverting it
C.revert_correlation()
# Try to generate an approximate correlation matrix, covariance
# matrix, and covariance cube
approxC = cube.approximate_correlation_matrix()
approxC = cube.approximate_covariance_matrix(1000)
approxC = cube.approximate_covariance_cube(channels=[1000,2000])
# Variance should be the same for direct and approximate calculations
assert numpy.allclose(approxC.variance(), C.variance()), 'Variances should be the same.'
|
sdssREPO_NAMEmangadapPATH_START.@mangadap_extracted@mangadap-main@mangadap@tests@test_datacube.py@.PATH_END.py
|
{
"filename": "imports.py",
"repo_name": "catrionamurray/occultence",
"repo_path": "occultence_extracted/occultence-main/occultence/imports.py",
"type": "Python"
}
|
import numpy as np
import copy
import astropy
from astropy.time import Time
from astropy.timeseries import TimeSeries, aggregate_downsample
import astropy.units as u
from .utils import *
# import fitsio
import math
import matplotlib.pyplot as plt
import pandas as pd
import os
import glob
from astropy.io import fits, ascii
from astropy.stats import sigma_clip
import pickle
import operator
import george
from scipy.stats import binned_statistic
from scipy.interpolate import interp1d
from scipy.optimize import minimize
from astropy.timeseries import BoxLeastSquares
import warnings, textwrap
# copied from chromatic (credit Zach Berta-Thompson):
def custom_formatwarning(message, *args, **kwargs):
return f"🤖⚠ {textwrap.dedent(str(message)).strip().strip()} ⚠🤖\n\n"
original_warning_format = warnings.formatwarning
def cheerfully_suggest(*args, **kwargs):
warnings.formatwarning = custom_formatwarning
warnings.warn(*args, **kwargs)
warnings.formatwarning = original_warning_format
|
catrionamurrayREPO_NAMEoccultencePATH_START.@occultence_extracted@occultence-main@occultence@imports.py@.PATH_END.py
|
{
"filename": "_b0.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/carpet/_b0.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class B0Validator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="b0", parent_name="carpet", **kwargs):
super(B0Validator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@carpet@_b0.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "aeictf/EPiCA",
"repo_path": "EPiCA_extracted/EPiCA-main/README.md",
"type": "Markdown"
}
|
Information
Currently under constructions 🚧🏗, first stable release version is expected in January 2024.
This package is an implementation of the Pierre Connes' algorithm (Connes 1985).
EPiCA (Exoplanetary Pierre Connes Algorithm)
Current version works only for pipeline ESPRESSO CCFs, summed-up.
Requirements
The algorithm requires astropy, numpy.
It also requires the CCF files in the .fits format as taken from ESO archive.
Installation
The easiest is to install epica using pip:
pip install epica
Otherwise your can download the current repository and install the package manually:
cd EPiCA/
python setup.py install
|
aeictfREPO_NAMEEPiCAPATH_START.@EPiCA_extracted@EPiCA-main@README.md@.PATH_END.py
|
{
"filename": "config.py",
"repo_name": "gammapy/gammapy",
"repo_path": "gammapy_extracted/gammapy-main/gammapy/analysis/config.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import html
import json
import logging
from collections import defaultdict
from collections.abc import Mapping
from enum import Enum
from pathlib import Path
from typing import List, Optional
import yaml
from pydantic import BaseModel, ConfigDict
from gammapy.makers import MapDatasetMaker
from gammapy.utils.scripts import read_yaml, to_yaml, write_yaml
from gammapy.utils.types import AngleType, EnergyType, PathType, TimeType
__all__ = ["AnalysisConfig"]
CONFIG_PATH = Path(__file__).resolve().parent / "config"
DOCS_FILE = CONFIG_PATH / "docs.yaml"
log = logging.getLogger(__name__)
def deep_update(d, u):
"""Recursively update a nested dictionary.
Taken from: https://stackoverflow.com/a/3233356/19802442
"""
for k, v in u.items():
if isinstance(v, Mapping):
d[k] = deep_update(d.get(k, {}), v)
else:
d[k] = v
return d
class ReductionTypeEnum(str, Enum):
spectrum = "1d"
cube = "3d"
class FrameEnum(str, Enum):
icrs = "icrs"
galactic = "galactic"
class RequiredHDUEnum(str, Enum):
events = "events"
gti = "gti"
aeff = "aeff"
bkg = "bkg"
edisp = "edisp"
psf = "psf"
rad_max = "rad_max"
class BackgroundMethodEnum(str, Enum):
reflected = "reflected"
fov = "fov_background"
ring = "ring"
class SafeMaskMethodsEnum(str, Enum):
aeff_default = "aeff-default"
aeff_max = "aeff-max"
edisp_bias = "edisp-bias"
offset_max = "offset-max"
bkg_peak = "bkg-peak"
class MapSelectionEnum(str, Enum):
counts = "counts"
exposure = "exposure"
background = "background"
psf = "psf"
edisp = "edisp"
class GammapyBaseConfig(BaseModel):
model_config = ConfigDict(
arbitrary_types_allowed=True,
validate_assignment=True,
extra="forbid",
validate_default=True,
use_enum_values=True,
)
def _repr_html_(self):
try:
return self.to_html()
except AttributeError:
return f"<pre>{html.escape(str(self))}</pre>"
class SkyCoordConfig(GammapyBaseConfig):
frame: Optional[FrameEnum] = None
lon: Optional[AngleType] = None
lat: Optional[AngleType] = None
class EnergyAxisConfig(GammapyBaseConfig):
min: Optional[EnergyType] = None
max: Optional[EnergyType] = None
nbins: Optional[int] = None
class SpatialCircleConfig(GammapyBaseConfig):
frame: Optional[FrameEnum] = None
lon: Optional[AngleType] = None
lat: Optional[AngleType] = None
radius: Optional[AngleType] = None
class EnergyRangeConfig(GammapyBaseConfig):
min: Optional[EnergyType] = None
max: Optional[EnergyType] = None
class TimeRangeConfig(GammapyBaseConfig):
start: Optional[TimeType] = None
stop: Optional[TimeType] = None
class FluxPointsConfig(GammapyBaseConfig):
energy: EnergyAxisConfig = EnergyAxisConfig()
source: str = "source"
parameters: dict = {"selection_optional": "all"}
class LightCurveConfig(GammapyBaseConfig):
time_intervals: TimeRangeConfig = TimeRangeConfig()
energy_edges: EnergyAxisConfig = EnergyAxisConfig()
source: str = "source"
parameters: dict = {"selection_optional": "all"}
class FitConfig(GammapyBaseConfig):
fit_range: EnergyRangeConfig = EnergyRangeConfig()
class ExcessMapConfig(GammapyBaseConfig):
correlation_radius: AngleType = "0.1 deg"
parameters: dict = {}
energy_edges: EnergyAxisConfig = EnergyAxisConfig()
class BackgroundConfig(GammapyBaseConfig):
method: Optional[BackgroundMethodEnum] = None
exclusion: Optional[PathType] = None
parameters: dict = {}
class SafeMaskConfig(GammapyBaseConfig):
methods: List[SafeMaskMethodsEnum] = [SafeMaskMethodsEnum.aeff_default]
parameters: dict = {}
class EnergyAxesConfig(GammapyBaseConfig):
energy: EnergyAxisConfig = EnergyAxisConfig(min="1 TeV", max="10 TeV", nbins=5)
energy_true: EnergyAxisConfig = EnergyAxisConfig(
min="0.5 TeV", max="20 TeV", nbins=16
)
class SelectionConfig(GammapyBaseConfig):
offset_max: AngleType = "2.5 deg"
class WidthConfig(GammapyBaseConfig):
width: AngleType = "5 deg"
height: AngleType = "5 deg"
class WcsConfig(GammapyBaseConfig):
skydir: SkyCoordConfig = SkyCoordConfig()
binsize: AngleType = "0.02 deg"
width: WidthConfig = WidthConfig()
binsize_irf: AngleType = "0.2 deg"
class GeomConfig(GammapyBaseConfig):
wcs: WcsConfig = WcsConfig()
selection: SelectionConfig = SelectionConfig()
axes: EnergyAxesConfig = EnergyAxesConfig()
class DatasetsConfig(GammapyBaseConfig):
type: ReductionTypeEnum = ReductionTypeEnum.spectrum
stack: bool = True
geom: GeomConfig = GeomConfig()
map_selection: List[MapSelectionEnum] = MapDatasetMaker.available_selection
background: BackgroundConfig = BackgroundConfig()
safe_mask: SafeMaskConfig = SafeMaskConfig()
on_region: SpatialCircleConfig = SpatialCircleConfig()
containment_correction: bool = True
class ObservationsConfig(GammapyBaseConfig):
datastore: PathType = Path("$GAMMAPY_DATA/hess-dl3-dr1/")
obs_ids: List[int] = []
obs_file: Optional[PathType] = None
obs_cone: SpatialCircleConfig = SpatialCircleConfig()
obs_time: TimeRangeConfig = TimeRangeConfig()
required_irf: List[RequiredHDUEnum] = ["aeff", "edisp", "psf", "bkg"]
class LogConfig(GammapyBaseConfig):
level: str = "info"
filename: Optional[PathType] = None
filemode: Optional[str] = None
format: Optional[str] = None
datefmt: Optional[str] = None
class GeneralConfig(GammapyBaseConfig):
log: LogConfig = LogConfig()
outdir: str = "."
n_jobs: int = 1
datasets_file: Optional[PathType] = None
models_file: Optional[PathType] = None
class AnalysisConfig(GammapyBaseConfig):
"""Gammapy analysis configuration."""
general: GeneralConfig = GeneralConfig()
observations: ObservationsConfig = ObservationsConfig()
datasets: DatasetsConfig = DatasetsConfig()
fit: FitConfig = FitConfig()
flux_points: FluxPointsConfig = FluxPointsConfig()
excess_map: ExcessMapConfig = ExcessMapConfig()
light_curve: LightCurveConfig = LightCurveConfig()
def __str__(self):
"""Display settings in pretty YAML format."""
info = self.__class__.__name__ + "\n\n\t"
data = self.to_yaml()
data = data.replace("\n", "\n\t")
info += data
return info.expandtabs(tabsize=4)
@classmethod
def read(cls, path):
"""Read from YAML file.
Parameters
----------
path : str
input filepath
"""
config = read_yaml(path)
config.pop("metadata", None)
return AnalysisConfig(**config)
@classmethod
def from_yaml(cls, config_str):
"""Create from YAML string.
Parameters
----------
config_str : str
yaml str
"""
settings = yaml.safe_load(config_str)
return AnalysisConfig(**settings)
def write(self, path, overwrite=False):
"""Write to YAML file.
Parameters
----------
path : `pathlib.Path` or str
Path to write files.
overwrite : bool, optional
Overwrite existing file. Default is False.
"""
yaml_str = self.to_yaml()
write_yaml(yaml_str, path, overwrite=overwrite)
def to_yaml(self):
"""Convert to YAML string."""
data = json.loads(self.model_dump_json())
return to_yaml(data)
def set_logging(self):
"""Set logging config.
Calls ``logging.basicConfig``, i.e. adjusts global logging state.
"""
self.general.log.level = self.general.log.level.upper()
logging.basicConfig(**self.general.log.model_dump())
log.info("Setting logging config: {!r}".format(self.general.log.model_dump()))
def update(self, config=None):
"""Update config with provided settings.
Parameters
----------
config : str or `AnalysisConfig` object, optional
Configuration settings provided in dict() syntax. Default is None.
"""
if isinstance(config, str):
other = AnalysisConfig.from_yaml(config)
elif isinstance(config, AnalysisConfig):
other = config
else:
raise TypeError(f"Invalid type: {config}")
config_new = deep_update(
self.model_dump(exclude_defaults=True),
other.model_dump(exclude_defaults=True),
)
return AnalysisConfig(**config_new)
@staticmethod
def _get_doc_sections():
"""Return dictionary with commented docs from docs file."""
doc = defaultdict(str)
with open(DOCS_FILE) as f:
for line in filter(lambda line: not line.startswith("---"), f):
line = line.strip("\n")
if line.startswith("# Section: "):
keyword = line.replace("# Section: ", "")
doc[keyword] += line + "\n"
return doc
|
gammapyREPO_NAMEgammapyPATH_START.@gammapy_extracted@gammapy-main@gammapy@analysis@config.py@.PATH_END.py
|
{
"filename": "_paramtreecfg.py",
"repo_name": "3fon3fonov/exostriker",
"repo_path": "exostriker_extracted/exostriker-main/exostriker/lib/pyqtgraph/examples/_paramtreecfg.py",
"type": "Python"
}
|
import numpy as np
from pyqtgraph.parametertree.parameterTypes import QtEnumParameter as enum
from pyqtgraph.Qt import QtWidgets
dlg = QtWidgets.QFileDialog
cfg = {
'list': {
'limits': {
'type': 'checklist',
'limits': ['a', 'b', 'c']
}
},
'file': {
'acceptMode': {
'type': 'list',
'limits': list(enum(dlg.AcceptMode, dlg).enumMap)
},
'fileMode': {
'type': 'list',
'limits': list(enum(dlg.FileMode, dlg).enumMap)
},
'viewMode': {
'type': 'list',
'limits': list(enum(dlg.ViewMode, dlg).enumMap)
},
'dialogLabel': {
'type': 'list',
'limits': list(enum(dlg.DialogLabel, dlg).enumMap)
},
'relativeTo': {
'type': 'str',
'value': None
},
'directory': {
'type': 'str',
'value': None
},
'windowTitle': {
'type': 'str',
'value': None
},
'nameFilter': {
'type': 'str',
'value': None
}
},
'float': {
'Float Information': {
'type': 'str',
'readonly': True,
'value': 'Note that all options except "finite" also apply to "int" parameters',
},
'step': {
'type': 'float',
'limits': [0, None],
'value': 1,
},
'limits': {
'type': 'list',
'limits': {'[0, None]': [0, None], '[1, 5]': [1, 5]},
},
'suffix': {
'type': 'list',
'limits': ['Hz', 's', 'm'],
},
'siPrefix': {
'type': 'bool',
'value': True
},
'finite': {
'type': 'bool',
'value': True,
},
'dec': {
'type': 'bool',
'value': False,
},
'minStep': {
'type': 'float',
'value': 1.0e-12,
},
},
'checklist': {
'limits': {
'type': 'checklist',
'limits': ['one', 'two', 'three', 'four'],
},
'exclusive': {
'type': 'bool',
'value': False,
},
'delay': {
'type': 'float',
'value': 1.0,
'limits': [0, None]
}
},
'pen': {
'Pen Information': {
'type': 'str',
'value': 'Click the button to see options',
'readonly': True,
},
},
'slider': {
'step': {
'type': 'float',
'limits': [0, None],
'value': 1, },
'format': {
'type': 'str',
'value': '{0:>3}',
},
'precision': {
'type': 'int',
'value': 2,
'limits': [1, None],
},
'span': {
'type': 'list',
'limits': {'linspace(-pi, pi)': np.linspace(-np.pi, np.pi), 'arange(10)**2': np.arange(10) ** 2},
},
'How to Set': {
'type': 'list',
'limits': ['Use span', 'Use step + limits'],
}
},
'action': {
'shortcut': {
'type': 'str',
'value': "Ctrl+Shift+P",
},
'icon': {
'type': 'file',
'value': None,
'nameFilter': "Images (*.png *.jpg *.bmp *.jpeg *.svg)",
},
},
'calendar': {
'format': {
'type': 'str',
'value': 'MM DD',
}
},
'Applies to All Types': {
'Extra Information': {
'type': 'text',
'value': 'These apply to all parameters. Watch how this text box is altered by any setting you change.',
'default': 'These apply to all parameters. Watch how this text box is altered by any setting you change.',
'readonly': True,
},
'readonly': {
'type': 'bool',
'value': True,
},
'removable': {
'type': 'bool',
'tip': 'Adds a context menu option to remove this parameter',
'value': False,
},
'visible': {
'type': 'bool',
'value': True,
},
'disabled': {
'type': 'bool',
'value': False,
},
'title': {
'type': 'str',
'value': 'Meta Options',
},
'default': {
'tip': 'The default value that gets set when clicking the arrow in the right column',
'type': 'str',
},
'expanded': {
'type': 'bool',
'value': True,
},
},
'No Extra Options': {
'text': 'Unlike the other parameters shown, these don\'t have extra settable options.\n' \
+ 'Note: "int" *does* have the same options as float, mentioned above',
'int': 10,
'str': 'Hi, world!',
'color': '#fff',
'bool': False,
'colormap': None,
'progress': 50,
'font': 'Inter',
}
}
|
3fon3fonovREPO_NAMEexostrikerPATH_START.@exostriker_extracted@exostriker-main@exostriker@lib@pyqtgraph@examples@_paramtreecfg.py@.PATH_END.py
|
{
"filename": "_legendwidth.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scatterpolar/_legendwidth.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LegendwidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="legendwidth", parent_name="scatterpolar", **kwargs):
super(LegendwidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scatterpolar@_legendwidth.py@.PATH_END.py
|
{
"filename": "abstract.py",
"repo_name": "rhayes777/PyAutoFit",
"repo_path": "PyAutoFit_extracted/PyAutoFit-main/autofit/non_linear/paths/abstract.py",
"type": "Python"
}
|
import logging
import os
import re
import shutil
import zipfile
from abc import ABC, abstractmethod
from configparser import NoSectionError
from os import path
from pathlib import Path
from typing import Optional
import numpy as np
from autoconf import conf
from autofit.mapper.identifier import Identifier, IdentifierField
from autofit.non_linear.samples.summary import SamplesSummary
from autofit.text import text_util
from autofit.tools.util import open_, zip_directory
logger = logging.getLogger(__name__)
pattern = re.compile(r"(?<!^)(?=[A-Z])")
class AbstractPaths(ABC):
def __init__(
self,
name: Optional[str] = None,
path_prefix: Optional[os.PathLike] = None,
is_identifier_in_paths=True,
parent: Optional["AbstractPaths"] = None,
unique_tag: Optional[str] = None,
identifier: str = None,
image_path_suffix: str = "",
):
"""
Manages the path structure for `NonLinearSearch` output, for analyses both not using and using the search
API. Use via non-linear searches requires manual input of paths, whereas the search API manages this using the
search attributes.
The output path within which the *Paths* objects path structure is contained is set via PyAutoConf, using the
command:
from autoconf import conf
conf.instance = conf.Config(output_path="path/to/output")
If we assume all the input strings above are used with the following example names:
name = "name"
path_prefix = "folder_0/folder_1"
The output path of the `NonLinearSearch` results will be:
/path/to/output/folder_0/folder_1/name
Parameters
----------
name
The name of the non-linear search, which is used as a folder name after the ``path_prefix``. For searchs
this name is the ``name``.
path_prefix
A prefixed path that appears after the output_path but before the name variable.
is_identifier_in_paths
If True output path and symlink path terminate with an identifier generated from the
search and model
parent
The parent paths object of this paths object.
unique_tag
A unique tag for the search, used to differentiate between searches with the same name.
identifier
A custom identifier for the search, if this is not None it will be used instead of the automatically
generated identifier
image_path_suffix
A suffix which is appended to the image path. This is used to differentiate between different
image outputs, for example the image of the starting point of an MLE.
"""
self.name = name or ""
self.path_prefix = path_prefix or ""
self.unique_tag = unique_tag
self._non_linear_name = None
self.__custom_identifier = identifier
self.__identifier = None
self.is_identifier_in_paths = is_identifier_in_paths
self._parent = None
self.parent = parent
try:
self.remove_files = conf.instance["general"]["output"]["remove_files"]
if conf.instance["general"]["hpc"]["hpc_mode"]:
self.remove_files = True
except NoSectionError as e:
logger.exception(e)
self.image_path_suffix = image_path_suffix
@property
@abstractmethod
def samples(self):
pass
def save_parent_identifier(self):
pass
def save_unique_tag(self, is_grid_search=False):
pass
def __str__(self):
return str(self.output_path)
def __repr__(self):
return f"<{self.__class__.__name__} {self}>"
@property
def parent(self) -> "AbstractPaths":
"""
The search performed before this search. For example, a search
that is then compared to searches during a grid search.
"""
return self._parent
@parent.setter
@abstractmethod
def parent(self, parent: "AbstractPaths"):
pass
@property
@abstractmethod
def is_grid_search(self) -> bool:
pass
def for_sub_analysis(self, analysis_name: str):
return self.create_child(name=analysis_name)
@abstractmethod
def create_child(
self,
name: Optional[str] = None,
path_prefix: Optional[str] = None,
is_identifier_in_paths: Optional[bool] = None,
identifier: Optional[str] = None,
) -> "AbstractPaths":
"""
Create a paths object which is the child of some parent
paths object. This is done during a GridSearch so that
results can be stored in the correct directory.
Parameters
----------
name
path_prefix
is_identifier_in_paths
If False then this path's identifier will not be
added to its output path.
identifier
Returns
-------
A new paths object
"""
search = IdentifierField()
model = IdentifierField()
unique_tag = IdentifierField()
@property
def non_linear_name(self):
if self._non_linear_name is None:
if self.search is not None:
self._non_linear_name = pattern.sub(
"_", type(self.search).__name__
).lower()
return self._non_linear_name
@property
def _identifier(self):
if self.__custom_identifier is not None:
return self.__custom_identifier
if self.__identifier is None:
if None in (self.model, self.search):
logger.debug(
"Generating identifier without both model and search having been set."
)
identifier_list = [self.search, self.model]
if self.unique_tag is not None:
identifier_list.append(self.unique_tag)
self.__identifier = Identifier(identifier_list)
return self.__identifier
@_identifier.setter
def _identifier(self, identifier):
self.__identifier = identifier
@property
def identifier(self):
return str(self._identifier)
def save_identifier(self):
with open_(self.output_path / ".identifier", "w+") as f:
f.write(self._identifier.description)
@property
def search_internal_path(self) -> Path:
"""
The path to the samples folder.
"""
os.makedirs(self._files_path / "search_internal", exist_ok=True)
return self._files_path / "search_internal"
@property
def image_path(self) -> Path:
"""
The path to the image folder.
"""
if not os.path.exists(self.output_path / f"image{self.image_path_suffix}"):
os.makedirs(self.output_path / f"image{self.image_path_suffix}")
return self.output_path / f"image{self.image_path_suffix}"
@property
def profile_path(self) -> Path:
"""
The path to the profile folder.
"""
return self.output_path / "profile"
@property
def output_path(self) -> Path:
"""
The path to the output information for a search.
"""
strings = list(
filter(
None,
[
str(conf.instance.output_path),
str(self.path_prefix),
self.unique_tag,
str(self.name),
],
)
)
if self.is_identifier_in_paths:
strings.append(self.identifier)
return Path(path.join("", *strings))
@property
def _files_path(self) -> Path:
"""
This is private for a reason, use the save_json etc. methods to save and load json
"""
files_path = self.output_path / "files"
try:
os.makedirs(files_path, exist_ok=True)
except FileExistsError:
pass
return files_path
def zip_remove(self):
"""
Copy files from the sym linked search folder then remove the sym linked folder.
"""
self._zip()
def _zip(self):
try:
zip_directory(self.output_path, self._zip_path)
if self.remove_files:
shutil.rmtree(
self.output_path,
ignore_errors=True,
)
except FileNotFoundError:
pass
def zip_remove_nuclear(self):
"""
When multiple model-fits are performed using the same `path_prefix` and `name`,
the results are populated in the same folder with different unique identifiers.
By accident, one may perform runs where additional results are placed
in these folders which are not wanted for the subsequent analysis.
Removing these results from the directory can be cumbersome, as determining
the unwanted results based on their unique identifier requires visually inspecting
them.
These unwanted results can also make manipulating the results via the database
problematic, as one may need to again filter based on unique identifier.
When a run is performed in nuclear mode, all results in every folder are
deleted except the results corresponding to the unique identifier of that run.
Therefore, provided the user is 100% certain that the run corresponds to the
results they want to keep, nuclear mode can be used to remove all unwanted results.
For example, suppose a folder has 5 results, 4 of which are unwanted and 1 which is
wanted. If nuclear mode runs, and the model-fit is set up correctly such that the
identifier created corresponds to the wanted result, all 4 unwanted results
will be deleted.
To enable nuclear mode, set the environment variable ``PYAUTOFIT_NUCLEAR_MODE=1``.
Nuclear mode is dangerous, and must be used with CAUTION AND CARE!
"""
if os.environ.get("PYAUTOFIT_NUCLEAR_MODE") == "1":
file_path = Path(os.path.split(self.output_path)[0])
file_list = os.listdir(file_path)
file_list = [file for file in file_list if self.identifier not in file]
for file in file_list:
file_to_remove = file_path / file
try:
os.remove(file_to_remove)
logger.info(f"NUCLEAR MODE -- Removed {file_to_remove}")
except (IsADirectoryError, FileNotFoundError):
pass
try:
shutil.rmtree(file_to_remove)
logger.info(f"NUCLEAR MODE -- Removed {file_to_remove}")
except (NotADirectoryError, FileNotFoundError):
pass
def restore(self):
"""
Copy files from the ``.zip`` file to the samples folder.
"""
if path.exists(self._zip_path):
shutil.rmtree(self.output_path, ignore_errors=True)
try:
try:
with zipfile.ZipFile(self._zip_path, "r") as f:
f.extractall(self.output_path)
except FileExistsError:
pass
except zipfile.BadZipFile as e:
raise zipfile.BadZipFile(
f"Unable to restore the zip file at the path {self._zip_path}"
) from e
try:
os.remove(self._zip_path)
except FileNotFoundError:
pass
def __eq__(self, other):
return isinstance(other, AbstractPaths) and all(
[
self.path_prefix == other.path_prefix,
self.name == other.name,
self.non_linear_name == other.non_linear_name,
]
)
@property
def _zip_path(self) -> str:
return f"{self.output_path}.zip"
@abstractmethod
def save_json(self, name, object_dict: dict, prefix: str = ""):
pass
@abstractmethod
def load_json(self, name, prefix: str = "") -> dict:
pass
@abstractmethod
def save_array(self, name, array: np.ndarray):
pass
@abstractmethod
def load_array(self, name) -> np.ndarray:
pass
@abstractmethod
def save_fits(self, name: str, hdu, prefix: str = ""):
pass
@abstractmethod
def load_fits(self, name: str, prefix: str = ""):
pass
@abstractmethod
def save_object(self, name: str, obj: object, prefix: str = ""):
pass
@abstractmethod
def load_object(self, name: str, prefix: str = ""):
pass
@abstractmethod
def remove_object(self, name: str):
pass
@abstractmethod
def is_object(self, name: str) -> bool:
pass
def save_search_internal(self, obj):
raise NotImplementedError
def load_search_internal(self):
raise NotImplementedError
def remove_search_internal(self):
raise NotImplementedError
@property
@abstractmethod
def is_complete(self) -> bool:
pass
@abstractmethod
def completed(self):
pass
@abstractmethod
def save_all(self, search_config_dict=None, info=None):
pass
@abstractmethod
def load_samples(self):
"""
Load samples from the database
"""
@abstractmethod
def save_samples(self, samples):
"""
Save samples to the database
"""
def save_samples_summary(self, samples_summary: SamplesSummary):
"""
Save samples summary to the database.
"""
def load_samples_summary(self) -> SamplesSummary:
"""
Load samples summary from the database.
"""
@abstractmethod
def save_latent_samples(self, latent_samples):
"""
Save latent variables. These are values computed from an instance and output
during analysis.
"""
@abstractmethod
def load_samples_info(self):
pass
def save_summary(
self,
samples,
latent_samples,
log_likelihood_function_time,
):
result_info = text_util.result_info_from(
samples=samples,
)
filename = self.output_path / "model.results"
with open_(filename, "w") as f:
f.write(result_info)
if latent_samples:
result_info = text_util.result_info_from(
samples=latent_samples,
)
filename = self.output_path / "latent.results"
with open_(filename, "w") as f:
f.write(result_info)
text_util.search_summary_to_file(
samples=samples,
log_likelihood_function_time=log_likelihood_function_time,
filename=self.output_path / "search.summary",
)
@property
def _samples_file(self) -> Path:
return self._files_path / "samples.csv"
@property
def _latent_variables_file(self) -> Path:
return self._files_path / "latent.csv"
@property
def _covariance_file(self) -> Path:
return self._files_path / "covariance.csv"
@property
def _info_file(self) -> Path:
return self._files_path / "samples_info.json"
|
rhayes777REPO_NAMEPyAutoFitPATH_START.@PyAutoFit_extracted@PyAutoFit-main@autofit@non_linear@paths@abstract.py@.PATH_END.py
|
{
"filename": "test_umath.py",
"repo_name": "numpy/numpy",
"repo_path": "numpy_extracted/numpy-main/numpy/_core/tests/test_umath.py",
"type": "Python"
}
|
import platform
import warnings
import fnmatch
import itertools
import pytest
import sys
import operator
from fractions import Fraction
from functools import reduce
from collections import namedtuple
import numpy._core.umath as ncu
from numpy._core import _umath_tests as ncu_tests, sctypes
import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_raises_regex,
assert_array_equal, assert_almost_equal, assert_array_almost_equal,
assert_array_max_ulp, assert_allclose, assert_no_warnings, suppress_warnings,
_gen_alignment_data, assert_array_almost_equal_nulp, IS_WASM, IS_MUSL,
IS_PYPY, HAS_REFCOUNT
)
from numpy.testing._private.utils import _glibc_older_than
UFUNCS = [obj for obj in np._core.umath.__dict__.values()
if isinstance(obj, np.ufunc)]
UFUNCS_UNARY = [
uf for uf in UFUNCS if uf.nin == 1
]
UFUNCS_UNARY_FP = [
uf for uf in UFUNCS_UNARY if 'f->f' in uf.types
]
UFUNCS_BINARY = [
uf for uf in UFUNCS if uf.nin == 2
]
UFUNCS_BINARY_ACC = [
uf for uf in UFUNCS_BINARY if hasattr(uf, "accumulate") and uf.nout == 1
]
def interesting_binop_operands(val1, val2, dtype):
"""
Helper to create "interesting" operands to cover common code paths:
* scalar inputs
* only first "values" is an array (e.g. scalar division fast-paths)
* Longer array (SIMD) placing the value of interest at different positions
* Oddly strided arrays which may not be SIMD compatible
It does not attempt to cover unaligned access or mixed dtypes.
These are normally handled by the casting/buffering machinery.
This is not a fixture (currently), since I believe a fixture normally
only yields once?
"""
fill_value = 1 # could be a parameter, but maybe not an optional one?
arr1 = np.full(10003, dtype=dtype, fill_value=fill_value)
arr2 = np.full(10003, dtype=dtype, fill_value=fill_value)
arr1[0] = val1
arr2[0] = val2
extractor = lambda res: res
yield arr1[0], arr2[0], extractor, "scalars"
extractor = lambda res: res
yield arr1[0, ...], arr2[0, ...], extractor, "scalar-arrays"
# reset array values to fill_value:
arr1[0] = fill_value
arr2[0] = fill_value
for pos in [0, 1, 2, 3, 4, 5, -1, -2, -3, -4]:
arr1[pos] = val1
arr2[pos] = val2
extractor = lambda res: res[pos]
yield arr1, arr2, extractor, f"off-{pos}"
yield arr1, arr2[pos], extractor, f"off-{pos}-with-scalar"
arr1[pos] = fill_value
arr2[pos] = fill_value
for stride in [-1, 113]:
op1 = arr1[::stride]
op2 = arr2[::stride]
op1[10] = val1
op2[10] = val2
extractor = lambda res: res[10]
yield op1, op2, extractor, f"stride-{stride}"
op1[10] = fill_value
op2[10] = fill_value
def on_powerpc():
""" True if we are running on a Power PC platform."""
return platform.processor() == 'powerpc' or \
platform.machine().startswith('ppc')
def bad_arcsinh():
"""The blocklisted trig functions are not accurate on aarch64/PPC for
complex256. Rather than dig through the actual problem skip the
test. This should be fixed when we can move past glibc2.17
which is the version in manylinux2014
"""
if platform.machine() == 'aarch64':
x = 1.78e-10
elif on_powerpc():
x = 2.16e-10
else:
return False
v1 = np.arcsinh(np.float128(x))
v2 = np.arcsinh(np.complex256(x)).real
# The eps for float128 is 1-e33, so this is way bigger
return abs((v1 / v2) - 1.0) > 1e-23
class _FilterInvalids:
def setup_method(self):
self.olderr = np.seterr(invalid='ignore')
def teardown_method(self):
np.seterr(**self.olderr)
class TestConstants:
def test_pi(self):
assert_allclose(ncu.pi, 3.141592653589793, 1e-15)
def test_e(self):
assert_allclose(ncu.e, 2.718281828459045, 1e-15)
def test_euler_gamma(self):
assert_allclose(ncu.euler_gamma, 0.5772156649015329, 1e-15)
class TestOut:
def test_out_subok(self):
for subok in (True, False):
a = np.array(0.5)
o = np.empty(())
r = np.add(a, 2, o, subok=subok)
assert_(r is o)
r = np.add(a, 2, out=o, subok=subok)
assert_(r is o)
r = np.add(a, 2, out=(o,), subok=subok)
assert_(r is o)
d = np.array(5.7)
o1 = np.empty(())
o2 = np.empty((), dtype=np.int32)
r1, r2 = np.frexp(d, o1, None, subok=subok)
assert_(r1 is o1)
r1, r2 = np.frexp(d, None, o2, subok=subok)
assert_(r2 is o2)
r1, r2 = np.frexp(d, o1, o2, subok=subok)
assert_(r1 is o1)
assert_(r2 is o2)
r1, r2 = np.frexp(d, out=(o1, None), subok=subok)
assert_(r1 is o1)
r1, r2 = np.frexp(d, out=(None, o2), subok=subok)
assert_(r2 is o2)
r1, r2 = np.frexp(d, out=(o1, o2), subok=subok)
assert_(r1 is o1)
assert_(r2 is o2)
with assert_raises(TypeError):
# Out argument must be tuple, since there are multiple outputs.
r1, r2 = np.frexp(d, out=o1, subok=subok)
assert_raises(TypeError, np.add, a, 2, o, o, subok=subok)
assert_raises(TypeError, np.add, a, 2, o, out=o, subok=subok)
assert_raises(TypeError, np.add, a, 2, None, out=o, subok=subok)
assert_raises(ValueError, np.add, a, 2, out=(o, o), subok=subok)
assert_raises(ValueError, np.add, a, 2, out=(), subok=subok)
assert_raises(TypeError, np.add, a, 2, [], subok=subok)
assert_raises(TypeError, np.add, a, 2, out=[], subok=subok)
assert_raises(TypeError, np.add, a, 2, out=([],), subok=subok)
o.flags.writeable = False
assert_raises(ValueError, np.add, a, 2, o, subok=subok)
assert_raises(ValueError, np.add, a, 2, out=o, subok=subok)
assert_raises(ValueError, np.add, a, 2, out=(o,), subok=subok)
def test_out_wrap_subok(self):
class ArrayWrap(np.ndarray):
__array_priority__ = 10
def __new__(cls, arr):
return np.asarray(arr).view(cls).copy()
def __array_wrap__(self, arr, context=None, return_scalar=False):
return arr.view(type(self))
for subok in (True, False):
a = ArrayWrap([0.5])
r = np.add(a, 2, subok=subok)
if subok:
assert_(isinstance(r, ArrayWrap))
else:
assert_(type(r) == np.ndarray)
r = np.add(a, 2, None, subok=subok)
if subok:
assert_(isinstance(r, ArrayWrap))
else:
assert_(type(r) == np.ndarray)
r = np.add(a, 2, out=None, subok=subok)
if subok:
assert_(isinstance(r, ArrayWrap))
else:
assert_(type(r) == np.ndarray)
r = np.add(a, 2, out=(None,), subok=subok)
if subok:
assert_(isinstance(r, ArrayWrap))
else:
assert_(type(r) == np.ndarray)
d = ArrayWrap([5.7])
o1 = np.empty((1,))
o2 = np.empty((1,), dtype=np.int32)
r1, r2 = np.frexp(d, o1, subok=subok)
if subok:
assert_(isinstance(r2, ArrayWrap))
else:
assert_(type(r2) == np.ndarray)
r1, r2 = np.frexp(d, o1, None, subok=subok)
if subok:
assert_(isinstance(r2, ArrayWrap))
else:
assert_(type(r2) == np.ndarray)
r1, r2 = np.frexp(d, None, o2, subok=subok)
if subok:
assert_(isinstance(r1, ArrayWrap))
else:
assert_(type(r1) == np.ndarray)
r1, r2 = np.frexp(d, out=(o1, None), subok=subok)
if subok:
assert_(isinstance(r2, ArrayWrap))
else:
assert_(type(r2) == np.ndarray)
r1, r2 = np.frexp(d, out=(None, o2), subok=subok)
if subok:
assert_(isinstance(r1, ArrayWrap))
else:
assert_(type(r1) == np.ndarray)
with assert_raises(TypeError):
# Out argument must be tuple, since there are multiple outputs.
r1, r2 = np.frexp(d, out=o1, subok=subok)
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_out_wrap_no_leak(self):
# Regression test for gh-26545
class ArrSubclass(np.ndarray):
pass
arr = np.arange(10).view(ArrSubclass)
arr *= 1
assert sys.getrefcount(arr) == 2
class TestComparisons:
import operator
@pytest.mark.parametrize('dtype', sctypes['uint'] + sctypes['int'] +
sctypes['float'] + [np.bool])
@pytest.mark.parametrize('py_comp,np_comp', [
(operator.lt, np.less),
(operator.le, np.less_equal),
(operator.gt, np.greater),
(operator.ge, np.greater_equal),
(operator.eq, np.equal),
(operator.ne, np.not_equal)
])
def test_comparison_functions(self, dtype, py_comp, np_comp):
# Initialize input arrays
if dtype == np.bool:
a = np.random.choice(a=[False, True], size=1000)
b = np.random.choice(a=[False, True], size=1000)
scalar = True
else:
a = np.random.randint(low=1, high=10, size=1000).astype(dtype)
b = np.random.randint(low=1, high=10, size=1000).astype(dtype)
scalar = 5
np_scalar = np.dtype(dtype).type(scalar)
a_lst = a.tolist()
b_lst = b.tolist()
# (Binary) Comparison (x1=array, x2=array)
comp_b = np_comp(a, b).view(np.uint8)
comp_b_list = [int(py_comp(x, y)) for x, y in zip(a_lst, b_lst)]
# (Scalar1) Comparison (x1=scalar, x2=array)
comp_s1 = np_comp(np_scalar, b).view(np.uint8)
comp_s1_list = [int(py_comp(scalar, x)) for x in b_lst]
# (Scalar2) Comparison (x1=array, x2=scalar)
comp_s2 = np_comp(a, np_scalar).view(np.uint8)
comp_s2_list = [int(py_comp(x, scalar)) for x in a_lst]
# Sequence: Binary, Scalar1 and Scalar2
assert_(comp_b.tolist() == comp_b_list,
f"Failed comparison ({py_comp.__name__})")
assert_(comp_s1.tolist() == comp_s1_list,
f"Failed comparison ({py_comp.__name__})")
assert_(comp_s2.tolist() == comp_s2_list,
f"Failed comparison ({py_comp.__name__})")
def test_ignore_object_identity_in_equal(self):
# Check comparing identical objects whose comparison
# is not a simple boolean, e.g., arrays that are compared elementwise.
a = np.array([np.array([1, 2, 3]), None], dtype=object)
assert_raises(ValueError, np.equal, a, a)
# Check error raised when comparing identical non-comparable objects.
class FunkyType:
def __eq__(self, other):
raise TypeError("I won't compare")
a = np.array([FunkyType()])
assert_raises(TypeError, np.equal, a, a)
# Check identity doesn't override comparison mismatch.
a = np.array([np.nan], dtype=object)
assert_equal(np.equal(a, a), [False])
def test_ignore_object_identity_in_not_equal(self):
# Check comparing identical objects whose comparison
# is not a simple boolean, e.g., arrays that are compared elementwise.
a = np.array([np.array([1, 2, 3]), None], dtype=object)
assert_raises(ValueError, np.not_equal, a, a)
# Check error raised when comparing identical non-comparable objects.
class FunkyType:
def __ne__(self, other):
raise TypeError("I won't compare")
a = np.array([FunkyType()])
assert_raises(TypeError, np.not_equal, a, a)
# Check identity doesn't override comparison mismatch.
a = np.array([np.nan], dtype=object)
assert_equal(np.not_equal(a, a), [True])
def test_error_in_equal_reduce(self):
# gh-20929
# make sure np.equal.reduce raises a TypeError if an array is passed
# without specifying the dtype
a = np.array([0, 0])
assert_equal(np.equal.reduce(a, dtype=bool), True)
assert_raises(TypeError, np.equal.reduce, a)
def test_object_dtype(self):
assert np.equal(1, [1], dtype=object).dtype == object
assert np.equal(1, [1], signature=(None, None, "O")).dtype == object
def test_object_nonbool_dtype_error(self):
# bool output dtype is fine of course:
assert np.equal(1, [1], dtype=bool).dtype == bool
# but the following are examples do not have a loop:
with pytest.raises(TypeError, match="No loop matching"):
np.equal(1, 1, dtype=np.int64)
with pytest.raises(TypeError, match="No loop matching"):
np.equal(1, 1, sig=(None, None, "l"))
@pytest.mark.parametrize("dtypes", ["qQ", "Qq"])
@pytest.mark.parametrize('py_comp, np_comp', [
(operator.lt, np.less),
(operator.le, np.less_equal),
(operator.gt, np.greater),
(operator.ge, np.greater_equal),
(operator.eq, np.equal),
(operator.ne, np.not_equal)
])
@pytest.mark.parametrize("vals", [(2**60, 2**60+1), (2**60+1, 2**60)])
def test_large_integer_direct_comparison(
self, dtypes, py_comp, np_comp, vals):
# Note that float(2**60) + 1 == float(2**60).
a1 = np.array([2**60], dtype=dtypes[0])
a2 = np.array([2**60 + 1], dtype=dtypes[1])
expected = py_comp(2**60, 2**60+1)
assert py_comp(a1, a2) == expected
assert np_comp(a1, a2) == expected
# Also check the scalars:
s1 = a1[0]
s2 = a2[0]
assert isinstance(s1, np.integer)
assert isinstance(s2, np.integer)
# The Python operator here is mainly interesting:
assert py_comp(s1, s2) == expected
assert np_comp(s1, s2) == expected
@pytest.mark.parametrize("dtype", np.typecodes['UnsignedInteger'])
@pytest.mark.parametrize('py_comp_func, np_comp_func', [
(operator.lt, np.less),
(operator.le, np.less_equal),
(operator.gt, np.greater),
(operator.ge, np.greater_equal),
(operator.eq, np.equal),
(operator.ne, np.not_equal)
])
@pytest.mark.parametrize("flip", [True, False])
def test_unsigned_signed_direct_comparison(
self, dtype, py_comp_func, np_comp_func, flip):
if flip:
py_comp = lambda x, y: py_comp_func(y, x)
np_comp = lambda x, y: np_comp_func(y, x)
else:
py_comp = py_comp_func
np_comp = np_comp_func
arr = np.array([np.iinfo(dtype).max], dtype=dtype)
expected = py_comp(int(arr[0]), -1)
assert py_comp(arr, -1) == expected
assert np_comp(arr, -1) == expected
scalar = arr[0]
assert isinstance(scalar, np.integer)
# The Python operator here is mainly interesting:
assert py_comp(scalar, -1) == expected
assert np_comp(scalar, -1) == expected
class TestAdd:
def test_reduce_alignment(self):
# gh-9876
# make sure arrays with weird strides work with the optimizations in
# pairwise_sum_@TYPE@. On x86, the 'b' field will count as aligned at a
# 4 byte offset, even though its itemsize is 8.
a = np.zeros(2, dtype=[('a', np.int32), ('b', np.float64)])
a['a'] = -1
assert_equal(a['b'].sum(), 0)
class TestDivision:
def test_division_int(self):
# int division should follow Python
x = np.array([5, 10, 90, 100, -5, -10, -90, -100, -120])
if 5 / 10 == 0.5:
assert_equal(x / 100, [0.05, 0.1, 0.9, 1,
-0.05, -0.1, -0.9, -1, -1.2])
else:
assert_equal(x / 100, [0, 0, 0, 1, -1, -1, -1, -1, -2])
assert_equal(x // 100, [0, 0, 0, 1, -1, -1, -1, -1, -2])
assert_equal(x % 100, [5, 10, 90, 0, 95, 90, 10, 0, 80])
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
@pytest.mark.parametrize("dtype,ex_val", itertools.product(
sctypes['int'] + sctypes['uint'], (
(
# dividend
"np.array(range(fo.max-lsize, fo.max)).astype(dtype),"
# divisors
"np.arange(lsize).astype(dtype),"
# scalar divisors
"range(15)"
),
(
# dividend
"np.arange(fo.min, fo.min+lsize).astype(dtype),"
# divisors
"np.arange(lsize//-2, lsize//2).astype(dtype),"
# scalar divisors
"range(fo.min, fo.min + 15)"
), (
# dividend
"np.array(range(fo.max-lsize, fo.max)).astype(dtype),"
# divisors
"np.arange(lsize).astype(dtype),"
# scalar divisors
"[1,3,9,13,neg, fo.min+1, fo.min//2, fo.max//3, fo.max//4]"
)
)
))
def test_division_int_boundary(self, dtype, ex_val):
fo = np.iinfo(dtype)
neg = -1 if fo.min < 0 else 1
# Large enough to test SIMD loops and remainder elements
lsize = 512 + 7
a, b, divisors = eval(ex_val)
a_lst, b_lst = a.tolist(), b.tolist()
c_div = lambda n, d: (
0 if d == 0 else (
fo.min if (n and n == fo.min and d == -1) else n//d
)
)
with np.errstate(divide='ignore'):
ac = a.copy()
ac //= b
div_ab = a // b
div_lst = [c_div(x, y) for x, y in zip(a_lst, b_lst)]
msg = "Integer arrays floor division check (//)"
assert all(div_ab == div_lst), msg
msg_eq = "Integer arrays floor division check (//=)"
assert all(ac == div_lst), msg_eq
for divisor in divisors:
ac = a.copy()
with np.errstate(divide='ignore', over='ignore'):
div_a = a // divisor
ac //= divisor
div_lst = [c_div(i, divisor) for i in a_lst]
assert all(div_a == div_lst), msg
assert all(ac == div_lst), msg_eq
with np.errstate(divide='raise', over='raise'):
if 0 in b:
# Verify overflow case
with pytest.raises(FloatingPointError,
match="divide by zero encountered in floor_divide"):
a // b
else:
a // b
if fo.min and fo.min in a:
with pytest.raises(FloatingPointError,
match='overflow encountered in floor_divide'):
a // -1
elif fo.min:
a // -1
with pytest.raises(FloatingPointError,
match="divide by zero encountered in floor_divide"):
a // 0
with pytest.raises(FloatingPointError,
match="divide by zero encountered in floor_divide"):
ac = a.copy()
ac //= 0
np.array([], dtype=dtype) // 0
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
@pytest.mark.parametrize("dtype,ex_val", itertools.product(
sctypes['int'] + sctypes['uint'], (
"np.array([fo.max, 1, 2, 1, 1, 2, 3], dtype=dtype)",
"np.array([fo.min, 1, -2, 1, 1, 2, -3]).astype(dtype)",
"np.arange(fo.min, fo.min+(100*10), 10, dtype=dtype)",
"np.array(range(fo.max-(100*7), fo.max, 7)).astype(dtype)",
)
))
def test_division_int_reduce(self, dtype, ex_val):
fo = np.iinfo(dtype)
a = eval(ex_val)
lst = a.tolist()
c_div = lambda n, d: (
0 if d == 0 or (n and n == fo.min and d == -1) else n//d
)
with np.errstate(divide='ignore'):
div_a = np.floor_divide.reduce(a)
div_lst = reduce(c_div, lst)
msg = "Reduce floor integer division check"
assert div_a == div_lst, msg
with np.errstate(divide='raise', over='raise'):
with pytest.raises(FloatingPointError,
match="divide by zero encountered in reduce"):
np.floor_divide.reduce(np.arange(-100, 100).astype(dtype))
if fo.min:
with pytest.raises(FloatingPointError,
match='overflow encountered in reduce'):
np.floor_divide.reduce(
np.array([fo.min, 1, -1], dtype=dtype)
)
@pytest.mark.parametrize(
"dividend,divisor,quotient",
[(np.timedelta64(2,'Y'), np.timedelta64(2,'M'), 12),
(np.timedelta64(2,'Y'), np.timedelta64(-2,'M'), -12),
(np.timedelta64(-2,'Y'), np.timedelta64(2,'M'), -12),
(np.timedelta64(-2,'Y'), np.timedelta64(-2,'M'), 12),
(np.timedelta64(2,'M'), np.timedelta64(-2,'Y'), -1),
(np.timedelta64(2,'Y'), np.timedelta64(0,'M'), 0),
(np.timedelta64(2,'Y'), 2, np.timedelta64(1,'Y')),
(np.timedelta64(2,'Y'), -2, np.timedelta64(-1,'Y')),
(np.timedelta64(-2,'Y'), 2, np.timedelta64(-1,'Y')),
(np.timedelta64(-2,'Y'), -2, np.timedelta64(1,'Y')),
(np.timedelta64(-2,'Y'), -2, np.timedelta64(1,'Y')),
(np.timedelta64(-2,'Y'), -3, np.timedelta64(0,'Y')),
(np.timedelta64(-2,'Y'), 0, np.timedelta64('Nat','Y')),
])
def test_division_int_timedelta(self, dividend, divisor, quotient):
# If either divisor is 0 or quotient is Nat, check for division by 0
if divisor and (isinstance(quotient, int) or not np.isnat(quotient)):
msg = "Timedelta floor division check"
assert dividend // divisor == quotient, msg
# Test for arrays as well
msg = "Timedelta arrays floor division check"
dividend_array = np.array([dividend]*5)
quotient_array = np.array([quotient]*5)
assert all(dividend_array // divisor == quotient_array), msg
else:
if IS_WASM:
pytest.skip("fp errors don't work in wasm")
with np.errstate(divide='raise', invalid='raise'):
with pytest.raises(FloatingPointError):
dividend // divisor
def test_division_complex(self):
# check that implementation is correct
msg = "Complex division implementation check"
x = np.array([1. + 1.*1j, 1. + .5*1j, 1. + 2.*1j], dtype=np.complex128)
assert_almost_equal(x**2/x, x, err_msg=msg)
# check overflow, underflow
msg = "Complex division overflow/underflow check"
x = np.array([1.e+110, 1.e-110], dtype=np.complex128)
y = x**2/x
assert_almost_equal(y/x, [1, 1], err_msg=msg)
def test_zero_division_complex(self):
with np.errstate(invalid="ignore", divide="ignore"):
x = np.array([0.0], dtype=np.complex128)
y = 1.0/x
assert_(np.isinf(y)[0])
y = complex(np.inf, np.nan)/x
assert_(np.isinf(y)[0])
y = complex(np.nan, np.inf)/x
assert_(np.isinf(y)[0])
y = complex(np.inf, np.inf)/x
assert_(np.isinf(y)[0])
y = 0.0/x
assert_(np.isnan(y)[0])
def test_floor_division_complex(self):
# check that floor division, divmod and remainder raises type errors
x = np.array([.9 + 1j, -.1 + 1j, .9 + .5*1j, .9 + 2.*1j], dtype=np.complex128)
with pytest.raises(TypeError):
x // 7
with pytest.raises(TypeError):
np.divmod(x, 7)
with pytest.raises(TypeError):
np.remainder(x, 7)
def test_floor_division_signed_zero(self):
# Check that the sign bit is correctly set when dividing positive and
# negative zero by one.
x = np.zeros(10)
assert_equal(np.signbit(x//1), 0)
assert_equal(np.signbit((-x)//1), 1)
@pytest.mark.skipif(hasattr(np.__config__, "blas_ssl2_info"),
reason="gh-22982")
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
@pytest.mark.parametrize('dtype', np.typecodes['Float'])
def test_floor_division_errors(self, dtype):
fnan = np.array(np.nan, dtype=dtype)
fone = np.array(1.0, dtype=dtype)
fzer = np.array(0.0, dtype=dtype)
finf = np.array(np.inf, dtype=dtype)
# divide by zero error check
with np.errstate(divide='raise', invalid='ignore'):
assert_raises(FloatingPointError, np.floor_divide, fone, fzer)
with np.errstate(divide='ignore', invalid='raise'):
np.floor_divide(fone, fzer)
# The following already contain a NaN and should not warn
with np.errstate(all='raise'):
np.floor_divide(fnan, fone)
np.floor_divide(fone, fnan)
np.floor_divide(fnan, fzer)
np.floor_divide(fzer, fnan)
@pytest.mark.parametrize('dtype', np.typecodes['Float'])
def test_floor_division_corner_cases(self, dtype):
# test corner cases like 1.0//0.0 for errors and return vals
x = np.zeros(10, dtype=dtype)
y = np.ones(10, dtype=dtype)
fnan = np.array(np.nan, dtype=dtype)
fone = np.array(1.0, dtype=dtype)
fzer = np.array(0.0, dtype=dtype)
finf = np.array(np.inf, dtype=dtype)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in floor_divide")
div = np.floor_divide(fnan, fone)
assert(np.isnan(div)), "div: %s" % div
div = np.floor_divide(fone, fnan)
assert(np.isnan(div)), "div: %s" % div
div = np.floor_divide(fnan, fzer)
assert(np.isnan(div)), "div: %s" % div
# verify 1.0//0.0 computations return inf
with np.errstate(divide='ignore'):
z = np.floor_divide(y, x)
assert_(np.isinf(z).all())
def floor_divide_and_remainder(x, y):
return (np.floor_divide(x, y), np.remainder(x, y))
def _signs(dt):
if dt in np.typecodes['UnsignedInteger']:
return (+1,)
else:
return (+1, -1)
class TestRemainder:
def test_remainder_basic(self):
dt = np.typecodes['AllInteger'] + np.typecodes['Float']
for op in [floor_divide_and_remainder, np.divmod]:
for dt1, dt2 in itertools.product(dt, dt):
for sg1, sg2 in itertools.product(_signs(dt1), _signs(dt2)):
fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s'
msg = fmt % (op.__name__, dt1, dt2, sg1, sg2)
a = np.array(sg1*71, dtype=dt1)
b = np.array(sg2*19, dtype=dt2)
div, rem = op(a, b)
assert_equal(div*b + rem, a, err_msg=msg)
if sg2 == -1:
assert_(b < rem <= 0, msg)
else:
assert_(b > rem >= 0, msg)
def test_float_remainder_exact(self):
# test that float results are exact for small integers. This also
# holds for the same integers scaled by powers of two.
nlst = list(range(-127, 0))
plst = list(range(1, 128))
dividend = nlst + [0] + plst
divisor = nlst + plst
arg = list(itertools.product(dividend, divisor))
tgt = list(divmod(*t) for t in arg)
a, b = np.array(arg, dtype=int).T
# convert exact integer results from Python to float so that
# signed zero can be used, it is checked.
tgtdiv, tgtrem = np.array(tgt, dtype=float).T
tgtdiv = np.where((tgtdiv == 0.0) & ((b < 0) ^ (a < 0)), -0.0, tgtdiv)
tgtrem = np.where((tgtrem == 0.0) & (b < 0), -0.0, tgtrem)
for op in [floor_divide_and_remainder, np.divmod]:
for dt in np.typecodes['Float']:
msg = 'op: %s, dtype: %s' % (op.__name__, dt)
fa = a.astype(dt)
fb = b.astype(dt)
div, rem = op(fa, fb)
assert_equal(div, tgtdiv, err_msg=msg)
assert_equal(rem, tgtrem, err_msg=msg)
def test_float_remainder_roundoff(self):
# gh-6127
dt = np.typecodes['Float']
for op in [floor_divide_and_remainder, np.divmod]:
for dt1, dt2 in itertools.product(dt, dt):
for sg1, sg2 in itertools.product((+1, -1), (+1, -1)):
fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s'
msg = fmt % (op.__name__, dt1, dt2, sg1, sg2)
a = np.array(sg1*78*6e-8, dtype=dt1)
b = np.array(sg2*6e-8, dtype=dt2)
div, rem = op(a, b)
# Equal assertion should hold when fmod is used
assert_equal(div*b + rem, a, err_msg=msg)
if sg2 == -1:
assert_(b < rem <= 0, msg)
else:
assert_(b > rem >= 0, msg)
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
@pytest.mark.xfail(sys.platform.startswith("darwin"),
reason="MacOS seems to not give the correct 'invalid' warning for "
"`fmod`. Hopefully, others always do.")
@pytest.mark.parametrize('dtype', np.typecodes['Float'])
def test_float_divmod_errors(self, dtype):
# Check valid errors raised for divmod and remainder
fzero = np.array(0.0, dtype=dtype)
fone = np.array(1.0, dtype=dtype)
finf = np.array(np.inf, dtype=dtype)
fnan = np.array(np.nan, dtype=dtype)
# since divmod is combination of both remainder and divide
# ops it will set both dividebyzero and invalid flags
with np.errstate(divide='raise', invalid='ignore'):
assert_raises(FloatingPointError, np.divmod, fone, fzero)
with np.errstate(divide='ignore', invalid='raise'):
assert_raises(FloatingPointError, np.divmod, fone, fzero)
with np.errstate(invalid='raise'):
assert_raises(FloatingPointError, np.divmod, fzero, fzero)
with np.errstate(invalid='raise'):
assert_raises(FloatingPointError, np.divmod, finf, finf)
with np.errstate(divide='ignore', invalid='raise'):
assert_raises(FloatingPointError, np.divmod, finf, fzero)
with np.errstate(divide='raise', invalid='ignore'):
# inf / 0 does not set any flags, only the modulo creates a NaN
np.divmod(finf, fzero)
@pytest.mark.skipif(hasattr(np.__config__, "blas_ssl2_info"),
reason="gh-22982")
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
@pytest.mark.xfail(sys.platform.startswith("darwin"),
reason="MacOS seems to not give the correct 'invalid' warning for "
"`fmod`. Hopefully, others always do.")
@pytest.mark.parametrize('dtype', np.typecodes['Float'])
@pytest.mark.parametrize('fn', [np.fmod, np.remainder])
def test_float_remainder_errors(self, dtype, fn):
fzero = np.array(0.0, dtype=dtype)
fone = np.array(1.0, dtype=dtype)
finf = np.array(np.inf, dtype=dtype)
fnan = np.array(np.nan, dtype=dtype)
# The following already contain a NaN and should not warn.
with np.errstate(all='raise'):
with pytest.raises(FloatingPointError,
match="invalid value"):
fn(fone, fzero)
fn(fnan, fzero)
fn(fzero, fnan)
fn(fone, fnan)
fn(fnan, fone)
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
def test_float_remainder_overflow(self):
a = np.finfo(np.float64).tiny
with np.errstate(over='ignore', invalid='ignore'):
div, mod = np.divmod(4, a)
np.isinf(div)
assert_(mod == 0)
with np.errstate(over='raise', invalid='ignore'):
assert_raises(FloatingPointError, np.divmod, 4, a)
with np.errstate(invalid='raise', over='ignore'):
assert_raises(FloatingPointError, np.divmod, 4, a)
def test_float_divmod_corner_cases(self):
# check nan cases
for dt in np.typecodes['Float']:
fnan = np.array(np.nan, dtype=dt)
fone = np.array(1.0, dtype=dt)
fzer = np.array(0.0, dtype=dt)
finf = np.array(np.inf, dtype=dt)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in divmod")
sup.filter(RuntimeWarning, "divide by zero encountered in divmod")
div, rem = np.divmod(fone, fzer)
assert(np.isinf(div)), 'dt: %s, div: %s' % (dt, rem)
assert(np.isnan(rem)), 'dt: %s, rem: %s' % (dt, rem)
div, rem = np.divmod(fzer, fzer)
assert(np.isnan(rem)), 'dt: %s, rem: %s' % (dt, rem)
assert_(np.isnan(div)), 'dt: %s, rem: %s' % (dt, rem)
div, rem = np.divmod(finf, finf)
assert(np.isnan(div)), 'dt: %s, rem: %s' % (dt, rem)
assert(np.isnan(rem)), 'dt: %s, rem: %s' % (dt, rem)
div, rem = np.divmod(finf, fzer)
assert(np.isinf(div)), 'dt: %s, rem: %s' % (dt, rem)
assert(np.isnan(rem)), 'dt: %s, rem: %s' % (dt, rem)
div, rem = np.divmod(fnan, fone)
assert(np.isnan(rem)), "dt: %s, rem: %s" % (dt, rem)
assert(np.isnan(div)), "dt: %s, rem: %s" % (dt, rem)
div, rem = np.divmod(fone, fnan)
assert(np.isnan(rem)), "dt: %s, rem: %s" % (dt, rem)
assert(np.isnan(div)), "dt: %s, rem: %s" % (dt, rem)
div, rem = np.divmod(fnan, fzer)
assert(np.isnan(rem)), "dt: %s, rem: %s" % (dt, rem)
assert(np.isnan(div)), "dt: %s, rem: %s" % (dt, rem)
def test_float_remainder_corner_cases(self):
# Check remainder magnitude.
for dt in np.typecodes['Float']:
fone = np.array(1.0, dtype=dt)
fzer = np.array(0.0, dtype=dt)
fnan = np.array(np.nan, dtype=dt)
b = np.array(1.0, dtype=dt)
a = np.nextafter(np.array(0.0, dtype=dt), -b)
rem = np.remainder(a, b)
assert_(rem <= b, 'dt: %s' % dt)
rem = np.remainder(-a, -b)
assert_(rem >= -b, 'dt: %s' % dt)
# Check nans, inf
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in remainder")
sup.filter(RuntimeWarning, "invalid value encountered in fmod")
for dt in np.typecodes['Float']:
fone = np.array(1.0, dtype=dt)
fzer = np.array(0.0, dtype=dt)
finf = np.array(np.inf, dtype=dt)
fnan = np.array(np.nan, dtype=dt)
rem = np.remainder(fone, fzer)
assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
# MSVC 2008 returns NaN here, so disable the check.
#rem = np.remainder(fone, finf)
#assert_(rem == fone, 'dt: %s, rem: %s' % (dt, rem))
rem = np.remainder(finf, fone)
fmod = np.fmod(finf, fone)
assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, fmod))
assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
rem = np.remainder(finf, finf)
fmod = np.fmod(finf, fone)
assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, fmod))
rem = np.remainder(finf, fzer)
fmod = np.fmod(finf, fzer)
assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, fmod))
rem = np.remainder(fone, fnan)
fmod = np.fmod(fone, fnan)
assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, fmod))
rem = np.remainder(fnan, fzer)
fmod = np.fmod(fnan, fzer)
assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, rem))
rem = np.remainder(fnan, fone)
fmod = np.fmod(fnan, fone)
assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, rem))
class TestDivisionIntegerOverflowsAndDivideByZero:
result_type = namedtuple('result_type',
['nocast', 'casted'])
helper_lambdas = {
'zero': lambda dtype: 0,
'min': lambda dtype: np.iinfo(dtype).min,
'neg_min': lambda dtype: -np.iinfo(dtype).min,
'min-zero': lambda dtype: (np.iinfo(dtype).min, 0),
'neg_min-zero': lambda dtype: (-np.iinfo(dtype).min, 0),
}
overflow_results = {
np.remainder: result_type(
helper_lambdas['zero'], helper_lambdas['zero']),
np.fmod: result_type(
helper_lambdas['zero'], helper_lambdas['zero']),
operator.mod: result_type(
helper_lambdas['zero'], helper_lambdas['zero']),
operator.floordiv: result_type(
helper_lambdas['min'], helper_lambdas['neg_min']),
np.floor_divide: result_type(
helper_lambdas['min'], helper_lambdas['neg_min']),
np.divmod: result_type(
helper_lambdas['min-zero'], helper_lambdas['neg_min-zero'])
}
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
@pytest.mark.parametrize("dtype", np.typecodes["Integer"])
def test_signed_division_overflow(self, dtype):
to_check = interesting_binop_operands(np.iinfo(dtype).min, -1, dtype)
for op1, op2, extractor, operand_identifier in to_check:
with pytest.warns(RuntimeWarning, match="overflow encountered"):
res = op1 // op2
assert res.dtype == op1.dtype
assert extractor(res) == np.iinfo(op1.dtype).min
# Remainder is well defined though, and does not warn:
res = op1 % op2
assert res.dtype == op1.dtype
assert extractor(res) == 0
# Check fmod as well:
res = np.fmod(op1, op2)
assert extractor(res) == 0
# Divmod warns for the division part:
with pytest.warns(RuntimeWarning, match="overflow encountered"):
res1, res2 = np.divmod(op1, op2)
assert res1.dtype == res2.dtype == op1.dtype
assert extractor(res1) == np.iinfo(op1.dtype).min
assert extractor(res2) == 0
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
def test_divide_by_zero(self, dtype):
# Note that the return value cannot be well defined here, but NumPy
# currently uses 0 consistently. This could be changed.
to_check = interesting_binop_operands(1, 0, dtype)
for op1, op2, extractor, operand_identifier in to_check:
with pytest.warns(RuntimeWarning, match="divide by zero"):
res = op1 // op2
assert res.dtype == op1.dtype
assert extractor(res) == 0
with pytest.warns(RuntimeWarning, match="divide by zero"):
res1, res2 = np.divmod(op1, op2)
assert res1.dtype == res2.dtype == op1.dtype
assert extractor(res1) == 0
assert extractor(res2) == 0
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
@pytest.mark.parametrize("dividend_dtype", sctypes['int'])
@pytest.mark.parametrize("divisor_dtype", sctypes['int'])
@pytest.mark.parametrize("operation",
[np.remainder, np.fmod, np.divmod, np.floor_divide,
operator.mod, operator.floordiv])
@np.errstate(divide='warn', over='warn')
def test_overflows(self, dividend_dtype, divisor_dtype, operation):
# SIMD tries to perform the operation on as many elements as possible
# that is a multiple of the register's size. We resort to the
# default implementation for the leftover elements.
# We try to cover all paths here.
arrays = [np.array([np.iinfo(dividend_dtype).min]*i,
dtype=dividend_dtype) for i in range(1, 129)]
divisor = np.array([-1], dtype=divisor_dtype)
# If dividend is a larger type than the divisor (`else` case),
# then, result will be a larger type than dividend and will not
# result in an overflow for `divmod` and `floor_divide`.
if np.dtype(dividend_dtype).itemsize >= np.dtype(
divisor_dtype).itemsize and operation in (
np.divmod, np.floor_divide, operator.floordiv):
with pytest.warns(
RuntimeWarning,
match="overflow encountered in"):
result = operation(
dividend_dtype(np.iinfo(dividend_dtype).min),
divisor_dtype(-1)
)
assert result == self.overflow_results[operation].nocast(
dividend_dtype)
# Arrays
for a in arrays:
# In case of divmod, we need to flatten the result
# column first as we get a column vector of quotient and
# remainder and a normal flatten of the expected result.
with pytest.warns(
RuntimeWarning,
match="overflow encountered in"):
result = np.array(operation(a, divisor)).flatten('f')
expected_array = np.array(
[self.overflow_results[operation].nocast(
dividend_dtype)]*len(a)).flatten()
assert_array_equal(result, expected_array)
else:
# Scalars
result = operation(
dividend_dtype(np.iinfo(dividend_dtype).min),
divisor_dtype(-1)
)
assert result == self.overflow_results[operation].casted(
dividend_dtype)
# Arrays
for a in arrays:
# See above comment on flatten
result = np.array(operation(a, divisor)).flatten('f')
expected_array = np.array(
[self.overflow_results[operation].casted(
dividend_dtype)]*len(a)).flatten()
assert_array_equal(result, expected_array)
class TestCbrt:
def test_cbrt_scalar(self):
assert_almost_equal((np.cbrt(np.float32(-2.5)**3)), -2.5)
def test_cbrt(self):
x = np.array([1., 2., -3., np.inf, -np.inf])
assert_almost_equal(np.cbrt(x**3), x)
assert_(np.isnan(np.cbrt(np.nan)))
assert_equal(np.cbrt(np.inf), np.inf)
assert_equal(np.cbrt(-np.inf), -np.inf)
class TestPower:
def test_power_float(self):
x = np.array([1., 2., 3.])
assert_equal(x**0, [1., 1., 1.])
assert_equal(x**1, x)
assert_equal(x**2, [1., 4., 9.])
y = x.copy()
y **= 2
assert_equal(y, [1., 4., 9.])
assert_almost_equal(x**(-1), [1., 0.5, 1./3])
assert_almost_equal(x**(0.5), [1., ncu.sqrt(2), ncu.sqrt(3)])
for out, inp, msg in _gen_alignment_data(dtype=np.float32,
type='unary',
max_size=11):
exp = [ncu.sqrt(i) for i in inp]
assert_almost_equal(inp**(0.5), exp, err_msg=msg)
np.sqrt(inp, out=out)
assert_equal(out, exp, err_msg=msg)
for out, inp, msg in _gen_alignment_data(dtype=np.float64,
type='unary',
max_size=7):
exp = [ncu.sqrt(i) for i in inp]
assert_almost_equal(inp**(0.5), exp, err_msg=msg)
np.sqrt(inp, out=out)
assert_equal(out, exp, err_msg=msg)
def test_power_complex(self):
x = np.array([1+2j, 2+3j, 3+4j])
assert_equal(x**0, [1., 1., 1.])
assert_equal(x**1, x)
assert_almost_equal(x**2, [-3+4j, -5+12j, -7+24j])
assert_almost_equal(x**3, [(1+2j)**3, (2+3j)**3, (3+4j)**3])
assert_almost_equal(x**4, [(1+2j)**4, (2+3j)**4, (3+4j)**4])
assert_almost_equal(x**(-1), [1/(1+2j), 1/(2+3j), 1/(3+4j)])
assert_almost_equal(x**(-2), [1/(1+2j)**2, 1/(2+3j)**2, 1/(3+4j)**2])
assert_almost_equal(x**(-3), [(-11+2j)/125, (-46-9j)/2197,
(-117-44j)/15625])
assert_almost_equal(x**(0.5), [ncu.sqrt(1+2j), ncu.sqrt(2+3j),
ncu.sqrt(3+4j)])
norm = 1./((x**14)[0])
assert_almost_equal(x**14 * norm,
[i * norm for i in [-76443+16124j, 23161315+58317492j,
5583548873 + 2465133864j]])
# Ticket #836
def assert_complex_equal(x, y):
assert_array_equal(x.real, y.real)
assert_array_equal(x.imag, y.imag)
for z in [complex(0, np.inf), complex(1, np.inf)]:
z = np.array([z], dtype=np.complex128)
with np.errstate(invalid="ignore"):
assert_complex_equal(z**1, z)
assert_complex_equal(z**2, z*z)
assert_complex_equal(z**3, z*z*z)
def test_power_zero(self):
# ticket #1271
zero = np.array([0j])
one = np.array([1+0j])
cnan = np.array([complex(np.nan, np.nan)])
# FIXME cinf not tested.
#cinf = np.array([complex(np.inf, 0)])
def assert_complex_equal(x, y):
x, y = np.asarray(x), np.asarray(y)
assert_array_equal(x.real, y.real)
assert_array_equal(x.imag, y.imag)
# positive powers
for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]:
assert_complex_equal(np.power(zero, p), zero)
# zero power
assert_complex_equal(np.power(zero, 0), one)
with np.errstate(invalid="ignore"):
assert_complex_equal(np.power(zero, 0+1j), cnan)
# negative power
for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]:
assert_complex_equal(np.power(zero, -p), cnan)
assert_complex_equal(np.power(zero, -1+0.2j), cnan)
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
def test_zero_power_nonzero(self):
# Testing 0^{Non-zero} issue 18378
zero = np.array([0.0+0.0j])
cnan = np.array([complex(np.nan, np.nan)])
def assert_complex_equal(x, y):
assert_array_equal(x.real, y.real)
assert_array_equal(x.imag, y.imag)
#Complex powers with positive real part will not generate a warning
assert_complex_equal(np.power(zero, 1+4j), zero)
assert_complex_equal(np.power(zero, 2-3j), zero)
#Testing zero values when real part is greater than zero
assert_complex_equal(np.power(zero, 1+1j), zero)
assert_complex_equal(np.power(zero, 1+0j), zero)
assert_complex_equal(np.power(zero, 1-1j), zero)
#Complex powers will negative real part or 0 (provided imaginary
# part is not zero) will generate a NAN and hence a RUNTIME warning
with pytest.warns(expected_warning=RuntimeWarning) as r:
assert_complex_equal(np.power(zero, -1+1j), cnan)
assert_complex_equal(np.power(zero, -2-3j), cnan)
assert_complex_equal(np.power(zero, -7+0j), cnan)
assert_complex_equal(np.power(zero, 0+1j), cnan)
assert_complex_equal(np.power(zero, 0-1j), cnan)
assert len(r) == 5
def test_fast_power(self):
x = np.array([1, 2, 3], np.int16)
res = x**2.0
assert_((x**2.00001).dtype is res.dtype)
assert_array_equal(res, [1, 4, 9])
# check the inplace operation on the casted copy doesn't mess with x
assert_(not np.may_share_memory(res, x))
assert_array_equal(x, [1, 2, 3])
# Check that the fast path ignores 1-element not 0-d arrays
res = x ** np.array([[[2]]])
assert_equal(res.shape, (1, 1, 3))
def test_integer_power(self):
a = np.array([15, 15], 'i8')
b = np.power(a, a)
assert_equal(b, [437893890380859375, 437893890380859375])
def test_integer_power_with_integer_zero_exponent(self):
dtypes = np.typecodes['Integer']
for dt in dtypes:
arr = np.arange(-10, 10, dtype=dt)
assert_equal(np.power(arr, 0), np.ones_like(arr))
dtypes = np.typecodes['UnsignedInteger']
for dt in dtypes:
arr = np.arange(10, dtype=dt)
assert_equal(np.power(arr, 0), np.ones_like(arr))
def test_integer_power_of_1(self):
dtypes = np.typecodes['AllInteger']
for dt in dtypes:
arr = np.arange(10, dtype=dt)
assert_equal(np.power(1, arr), np.ones_like(arr))
def test_integer_power_of_zero(self):
dtypes = np.typecodes['AllInteger']
for dt in dtypes:
arr = np.arange(1, 10, dtype=dt)
assert_equal(np.power(0, arr), np.zeros_like(arr))
def test_integer_to_negative_power(self):
dtypes = np.typecodes['Integer']
for dt in dtypes:
a = np.array([0, 1, 2, 3], dtype=dt)
b = np.array([0, 1, 2, -3], dtype=dt)
one = np.array(1, dtype=dt)
minusone = np.array(-1, dtype=dt)
assert_raises(ValueError, np.power, a, b)
assert_raises(ValueError, np.power, a, minusone)
assert_raises(ValueError, np.power, one, b)
assert_raises(ValueError, np.power, one, minusone)
def test_float_to_inf_power(self):
for dt in [np.float32, np.float64]:
a = np.array([1, 1, 2, 2, -2, -2, np.inf, -np.inf], dt)
b = np.array([np.inf, -np.inf, np.inf, -np.inf,
np.inf, -np.inf, np.inf, -np.inf], dt)
r = np.array([1, 1, np.inf, 0, np.inf, 0, np.inf, 0], dt)
assert_equal(np.power(a, b), r)
def test_power_fast_paths(self):
# gh-26055
for dt in [np.float32, np.float64]:
a = np.array([0, 1.1, 2, 12e12, -10., np.inf, -np.inf], dt)
expected = np.array([0.0, 1.21, 4., 1.44e+26, 100, np.inf, np.inf])
result = np.power(a, 2.)
assert_array_max_ulp(result, expected.astype(dt), maxulp=1)
a = np.array([0, 1.1, 2, 12e12], dt)
expected = np.sqrt(a).astype(dt)
result = np.power(a, 0.5)
assert_array_max_ulp(result, expected, maxulp=1)
class TestFloat_power:
def test_type_conversion(self):
arg_type = '?bhilBHILefdgFDG'
res_type = 'ddddddddddddgDDG'
for dtin, dtout in zip(arg_type, res_type):
msg = "dtin: %s, dtout: %s" % (dtin, dtout)
arg = np.ones(1, dtype=dtin)
res = np.float_power(arg, arg)
assert_(res.dtype.name == np.dtype(dtout).name, msg)
class TestLog2:
@pytest.mark.parametrize('dt', ['f', 'd', 'g'])
def test_log2_values(self, dt):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
assert_almost_equal(np.log2(xf), yf)
@pytest.mark.parametrize("i", range(1, 65))
def test_log2_ints(self, i):
# a good log2 implementation should provide this,
# might fail on OS with bad libm
v = np.log2(2.**i)
assert_equal(v, float(i), err_msg='at exponent %d' % i)
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
def test_log2_special(self):
assert_equal(np.log2(1.), 0.)
assert_equal(np.log2(np.inf), np.inf)
assert_(np.isnan(np.log2(np.nan)))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.log2(-1.)))
assert_(np.isnan(np.log2(-np.inf)))
assert_equal(np.log2(0.), -np.inf)
assert_(w[0].category is RuntimeWarning)
assert_(w[1].category is RuntimeWarning)
assert_(w[2].category is RuntimeWarning)
class TestExp2:
def test_exp2_values(self):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f', 'd', 'g']:
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
assert_almost_equal(np.exp2(yf), xf)
class TestLogAddExp2(_FilterInvalids):
# Need test for intermediate precisions
def test_logaddexp2_values(self):
x = [1, 2, 3, 4, 5]
y = [5, 4, 3, 2, 1]
z = [6, 6, 6, 6, 6]
for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]):
xf = np.log2(np.array(x, dtype=dt))
yf = np.log2(np.array(y, dtype=dt))
zf = np.log2(np.array(z, dtype=dt))
assert_almost_equal(np.logaddexp2(xf, yf), zf, decimal=dec_)
def test_logaddexp2_range(self):
x = [1000000, -1000000, 1000200, -1000200]
y = [1000200, -1000200, 1000000, -1000000]
z = [1000200, -1000000, 1000200, -1000000]
for dt in ['f', 'd', 'g']:
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_almost_equal(np.logaddexp2(logxf, logyf), logzf)
def test_inf(self):
inf = np.inf
x = [inf, -inf, inf, -inf, inf, 1, -inf, 1]
y = [inf, inf, -inf, -inf, 1, inf, 1, -inf]
z = [inf, inf, inf, -inf, inf, inf, 1, 1]
with np.errstate(invalid='raise'):
for dt in ['f', 'd', 'g']:
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_equal(np.logaddexp2(logxf, logyf), logzf)
def test_nan(self):
assert_(np.isnan(np.logaddexp2(np.nan, np.inf)))
assert_(np.isnan(np.logaddexp2(np.inf, np.nan)))
assert_(np.isnan(np.logaddexp2(np.nan, 0)))
assert_(np.isnan(np.logaddexp2(0, np.nan)))
assert_(np.isnan(np.logaddexp2(np.nan, np.nan)))
def test_reduce(self):
assert_equal(np.logaddexp2.identity, -np.inf)
assert_equal(np.logaddexp2.reduce([]), -np.inf)
assert_equal(np.logaddexp2.reduce([-np.inf]), -np.inf)
assert_equal(np.logaddexp2.reduce([-np.inf, 0]), 0)
class TestLog:
def test_log_values(self):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f', 'd', 'g']:
log2_ = 0.69314718055994530943
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)*log2_
assert_almost_equal(np.log(xf), yf)
# test aliasing(issue #17761)
x = np.array([2, 0.937500, 3, 0.947500, 1.054697])
xf = np.log(x)
assert_almost_equal(np.log(x, out=x), xf)
def test_log_values_maxofdtype(self):
# test log() of max for dtype does not raise
dtypes = [np.float32, np.float64]
# This is failing at least on linux aarch64 (see gh-25460), and on most
# other non x86-64 platforms checking `longdouble` isn't too useful as
# it's an alias for float64.
if platform.machine() == 'x86_64':
dtypes += [np.longdouble]
for dt in dtypes:
with np.errstate(all='raise'):
x = np.finfo(dt).max
np.log(x)
def test_log_strides(self):
np.random.seed(42)
strides = np.array([-4,-3,-2,-1,1,2,3,4])
sizes = np.arange(2,100)
for ii in sizes:
x_f64 = np.float64(np.random.uniform(low=0.01, high=100.0,size=ii))
x_special = x_f64.copy()
x_special[3:-1:4] = 1.0
y_true = np.log(x_f64)
y_special = np.log(x_special)
for jj in strides:
assert_array_almost_equal_nulp(np.log(x_f64[::jj]), y_true[::jj], nulp=2)
assert_array_almost_equal_nulp(np.log(x_special[::jj]), y_special[::jj], nulp=2)
# Reference values were computed with mpmath, with mp.dps = 200.
@pytest.mark.parametrize(
'z, wref',
[(1 + 1e-12j, 5e-25 + 1e-12j),
(1.000000000000001 + 3e-08j,
1.5602230246251546e-15 + 2.999999999999996e-08j),
(0.9999995000000417 + 0.0009999998333333417j,
7.831475869017683e-18 + 0.001j),
(0.9999999999999996 + 2.999999999999999e-08j,
5.9107901499372034e-18 + 3e-08j),
(0.99995000042 - 0.009999833j,
-7.015159763822903e-15 - 0.009999999665816696j)],
)
def test_log_precision_float64(self, z, wref):
w = np.log(z)
assert_allclose(w, wref, rtol=1e-15)
# Reference values were computed with mpmath, with mp.dps = 200.
@pytest.mark.parametrize(
'z, wref',
[(np.complex64(1.0 + 3e-6j), np.complex64(4.5e-12+3e-06j)),
(np.complex64(1.0 - 2e-5j), np.complex64(1.9999999e-10 - 2e-5j)),
(np.complex64(0.9999999 + 1e-06j),
np.complex64(-1.192088e-07+1.0000001e-06j))],
)
def test_log_precision_float32(self, z, wref):
w = np.log(z)
assert_allclose(w, wref, rtol=1e-6)
class TestExp:
def test_exp_values(self):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f', 'd', 'g']:
log2_ = 0.69314718055994530943
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)*log2_
assert_almost_equal(np.exp(yf), xf)
def test_exp_strides(self):
np.random.seed(42)
strides = np.array([-4,-3,-2,-1,1,2,3,4])
sizes = np.arange(2,100)
for ii in sizes:
x_f64 = np.float64(np.random.uniform(low=0.01, high=709.1,size=ii))
y_true = np.exp(x_f64)
for jj in strides:
assert_array_almost_equal_nulp(np.exp(x_f64[::jj]), y_true[::jj], nulp=2)
class TestSpecialFloats:
def test_exp_values(self):
with np.errstate(under='raise', over='raise'):
x = [np.nan, np.nan, np.inf, 0.]
y = [np.nan, -np.nan, np.inf, -np.inf]
for dt in ['e', 'f', 'd', 'g']:
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
assert_equal(np.exp(yf), xf)
# See: https://github.com/numpy/numpy/issues/19192
@pytest.mark.xfail(
_glibc_older_than("2.17"),
reason="Older glibc versions may not raise appropriate FP exceptions"
)
def test_exp_exceptions(self):
with np.errstate(over='raise'):
assert_raises(FloatingPointError, np.exp, np.float16(11.0899))
assert_raises(FloatingPointError, np.exp, np.float32(100.))
assert_raises(FloatingPointError, np.exp, np.float32(1E19))
assert_raises(FloatingPointError, np.exp, np.float64(800.))
assert_raises(FloatingPointError, np.exp, np.float64(1E19))
with np.errstate(under='raise'):
assert_raises(FloatingPointError, np.exp, np.float16(-17.5))
assert_raises(FloatingPointError, np.exp, np.float32(-1000.))
assert_raises(FloatingPointError, np.exp, np.float32(-1E19))
assert_raises(FloatingPointError, np.exp, np.float64(-1000.))
assert_raises(FloatingPointError, np.exp, np.float64(-1E19))
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
def test_log_values(self):
with np.errstate(all='ignore'):
x = [np.nan, np.nan, np.inf, np.nan, -np.inf, np.nan]
y = [np.nan, -np.nan, np.inf, -np.inf, 0.0, -1.0]
y1p = [np.nan, -np.nan, np.inf, -np.inf, -1.0, -2.0]
for dt in ['e', 'f', 'd', 'g']:
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
yf1p = np.array(y1p, dtype=dt)
assert_equal(np.log(yf), xf)
assert_equal(np.log2(yf), xf)
assert_equal(np.log10(yf), xf)
assert_equal(np.log1p(yf1p), xf)
with np.errstate(divide='raise'):
for dt in ['e', 'f', 'd']:
assert_raises(FloatingPointError, np.log,
np.array(0.0, dtype=dt))
assert_raises(FloatingPointError, np.log2,
np.array(0.0, dtype=dt))
assert_raises(FloatingPointError, np.log10,
np.array(0.0, dtype=dt))
assert_raises(FloatingPointError, np.log1p,
np.array(-1.0, dtype=dt))
with np.errstate(invalid='raise'):
for dt in ['e', 'f', 'd']:
assert_raises(FloatingPointError, np.log,
np.array(-np.inf, dtype=dt))
assert_raises(FloatingPointError, np.log,
np.array(-1.0, dtype=dt))
assert_raises(FloatingPointError, np.log2,
np.array(-np.inf, dtype=dt))
assert_raises(FloatingPointError, np.log2,
np.array(-1.0, dtype=dt))
assert_raises(FloatingPointError, np.log10,
np.array(-np.inf, dtype=dt))
assert_raises(FloatingPointError, np.log10,
np.array(-1.0, dtype=dt))
assert_raises(FloatingPointError, np.log1p,
np.array(-np.inf, dtype=dt))
assert_raises(FloatingPointError, np.log1p,
np.array(-2.0, dtype=dt))
# See https://github.com/numpy/numpy/issues/18005
with assert_no_warnings():
a = np.array(1e9, dtype='float32')
np.log(a)
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
@pytest.mark.parametrize('dtype', ['e', 'f', 'd', 'g'])
def test_sincos_values(self, dtype):
with np.errstate(all='ignore'):
x = [np.nan, np.nan, np.nan, np.nan]
y = [np.nan, -np.nan, np.inf, -np.inf]
xf = np.array(x, dtype=dtype)
yf = np.array(y, dtype=dtype)
assert_equal(np.sin(yf), xf)
assert_equal(np.cos(yf), xf)
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
@pytest.mark.xfail(
sys.platform.startswith("darwin"),
reason="underflow is triggered for scalar 'sin'"
)
def test_sincos_underflow(self):
with np.errstate(under='raise'):
underflow_trigger = np.array(
float.fromhex("0x1.f37f47a03f82ap-511"),
dtype=np.float64
)
np.sin(underflow_trigger)
np.cos(underflow_trigger)
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
@pytest.mark.parametrize('callable', [np.sin, np.cos])
@pytest.mark.parametrize('dtype', ['e', 'f', 'd'])
@pytest.mark.parametrize('value', [np.inf, -np.inf])
def test_sincos_errors(self, callable, dtype, value):
with np.errstate(invalid='raise'):
assert_raises(FloatingPointError, callable,
np.array([value], dtype=dtype))
@pytest.mark.parametrize('callable', [np.sin, np.cos])
@pytest.mark.parametrize('dtype', ['f', 'd'])
@pytest.mark.parametrize('stride', [-1, 1, 2, 4, 5])
def test_sincos_overlaps(self, callable, dtype, stride):
N = 100
M = N // abs(stride)
rng = np.random.default_rng(42)
x = rng.standard_normal(N, dtype)
y = callable(x[::stride])
callable(x[::stride], out=x[:M])
assert_equal(x[:M], y)
@pytest.mark.parametrize('dt', ['e', 'f', 'd', 'g'])
def test_sqrt_values(self, dt):
with np.errstate(all='ignore'):
x = [np.nan, np.nan, np.inf, np.nan, 0.]
y = [np.nan, -np.nan, np.inf, -np.inf, 0.]
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
assert_equal(np.sqrt(yf), xf)
# with np.errstate(invalid='raise'):
# assert_raises(
# FloatingPointError, np.sqrt, np.array(-100., dtype=dt)
# )
def test_abs_values(self):
x = [np.nan, np.nan, np.inf, np.inf, 0., 0., 1.0, 1.0]
y = [np.nan, -np.nan, np.inf, -np.inf, 0., -0., -1.0, 1.0]
for dt in ['e', 'f', 'd', 'g']:
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
assert_equal(np.abs(yf), xf)
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
def test_square_values(self):
x = [np.nan, np.nan, np.inf, np.inf]
y = [np.nan, -np.nan, np.inf, -np.inf]
with np.errstate(all='ignore'):
for dt in ['e', 'f', 'd', 'g']:
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
assert_equal(np.square(yf), xf)
with np.errstate(over='raise'):
assert_raises(FloatingPointError, np.square,
np.array(1E3, dtype='e'))
assert_raises(FloatingPointError, np.square,
np.array(1E32, dtype='f'))
assert_raises(FloatingPointError, np.square,
np.array(1E200, dtype='d'))
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
def test_reciprocal_values(self):
with np.errstate(all='ignore'):
x = [np.nan, np.nan, 0.0, -0.0, np.inf, -np.inf]
y = [np.nan, -np.nan, np.inf, -np.inf, 0., -0.]
for dt in ['e', 'f', 'd', 'g']:
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
assert_equal(np.reciprocal(yf), xf)
with np.errstate(divide='raise'):
for dt in ['e', 'f', 'd', 'g']:
assert_raises(FloatingPointError, np.reciprocal,
np.array(-0.0, dtype=dt))
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
def test_tan(self):
with np.errstate(all='ignore'):
in_ = [np.nan, -np.nan, 0.0, -0.0, np.inf, -np.inf]
out = [np.nan, np.nan, 0.0, -0.0, np.nan, np.nan]
for dt in ['e', 'f', 'd']:
in_arr = np.array(in_, dtype=dt)
out_arr = np.array(out, dtype=dt)
assert_equal(np.tan(in_arr), out_arr)
with np.errstate(invalid='raise'):
for dt in ['e', 'f', 'd']:
assert_raises(FloatingPointError, np.tan,
np.array(np.inf, dtype=dt))
assert_raises(FloatingPointError, np.tan,
np.array(-np.inf, dtype=dt))
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
def test_arcsincos(self):
with np.errstate(all='ignore'):
in_ = [np.nan, -np.nan, np.inf, -np.inf]
out = [np.nan, np.nan, np.nan, np.nan]
for dt in ['e', 'f', 'd']:
in_arr = np.array(in_, dtype=dt)
out_arr = np.array(out, dtype=dt)
assert_equal(np.arcsin(in_arr), out_arr)
assert_equal(np.arccos(in_arr), out_arr)
for callable in [np.arcsin, np.arccos]:
for value in [np.inf, -np.inf, 2.0, -2.0]:
for dt in ['e', 'f', 'd']:
with np.errstate(invalid='raise'):
assert_raises(FloatingPointError, callable,
np.array(value, dtype=dt))
def test_arctan(self):
with np.errstate(all='ignore'):
in_ = [np.nan, -np.nan]
out = [np.nan, np.nan]
for dt in ['e', 'f', 'd']:
in_arr = np.array(in_, dtype=dt)
out_arr = np.array(out, dtype=dt)
assert_equal(np.arctan(in_arr), out_arr)
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
def test_sinh(self):
in_ = [np.nan, -np.nan, np.inf, -np.inf]
out = [np.nan, np.nan, np.inf, -np.inf]
for dt in ['e', 'f', 'd']:
in_arr = np.array(in_, dtype=dt)
out_arr = np.array(out, dtype=dt)
assert_equal(np.sinh(in_arr), out_arr)
with np.errstate(over='raise'):
assert_raises(FloatingPointError, np.sinh,
np.array(12.0, dtype='e'))
assert_raises(FloatingPointError, np.sinh,
np.array(120.0, dtype='f'))
assert_raises(FloatingPointError, np.sinh,
np.array(1200.0, dtype='d'))
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
@pytest.mark.skipif('bsd' in sys.platform,
reason="fallback implementation may not raise, see gh-2487")
def test_cosh(self):
in_ = [np.nan, -np.nan, np.inf, -np.inf]
out = [np.nan, np.nan, np.inf, np.inf]
for dt in ['e', 'f', 'd']:
in_arr = np.array(in_, dtype=dt)
out_arr = np.array(out, dtype=dt)
assert_equal(np.cosh(in_arr), out_arr)
with np.errstate(over='raise'):
assert_raises(FloatingPointError, np.cosh,
np.array(12.0, dtype='e'))
assert_raises(FloatingPointError, np.cosh,
np.array(120.0, dtype='f'))
assert_raises(FloatingPointError, np.cosh,
np.array(1200.0, dtype='d'))
def test_tanh(self):
in_ = [np.nan, -np.nan, np.inf, -np.inf]
out = [np.nan, np.nan, 1.0, -1.0]
for dt in ['e', 'f', 'd']:
in_arr = np.array(in_, dtype=dt)
out_arr = np.array(out, dtype=dt)
assert_array_max_ulp(np.tanh(in_arr), out_arr, 3)
def test_arcsinh(self):
in_ = [np.nan, -np.nan, np.inf, -np.inf]
out = [np.nan, np.nan, np.inf, -np.inf]
for dt in ['e', 'f', 'd']:
in_arr = np.array(in_, dtype=dt)
out_arr = np.array(out, dtype=dt)
assert_equal(np.arcsinh(in_arr), out_arr)
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
def test_arccosh(self):
with np.errstate(all='ignore'):
in_ = [np.nan, -np.nan, np.inf, -np.inf, 1.0, 0.0]
out = [np.nan, np.nan, np.inf, np.nan, 0.0, np.nan]
for dt in ['e', 'f', 'd']:
in_arr = np.array(in_, dtype=dt)
out_arr = np.array(out, dtype=dt)
assert_equal(np.arccosh(in_arr), out_arr)
for value in [0.0, -np.inf]:
with np.errstate(invalid='raise'):
for dt in ['e', 'f', 'd']:
assert_raises(FloatingPointError, np.arccosh,
np.array(value, dtype=dt))
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
def test_arctanh(self):
with np.errstate(all='ignore'):
in_ = [np.nan, -np.nan, np.inf, -np.inf, 1.0, -1.0, 2.0]
out = [np.nan, np.nan, np.nan, np.nan, np.inf, -np.inf, np.nan]
for dt in ['e', 'f', 'd']:
in_arr = np.array(in_, dtype=dt)
out_arr = np.array(out, dtype=dt)
assert_equal(np.arctanh(in_arr), out_arr)
for value in [1.01, np.inf, -np.inf, 1.0, -1.0]:
with np.errstate(invalid='raise', divide='raise'):
for dt in ['e', 'f', 'd']:
assert_raises(FloatingPointError, np.arctanh,
np.array(value, dtype=dt))
# Make sure glibc < 2.18 atanh is not used, issue 25087
assert np.signbit(np.arctanh(-1j).real)
# See: https://github.com/numpy/numpy/issues/20448
@pytest.mark.xfail(
_glibc_older_than("2.17"),
reason="Older glibc versions may not raise appropriate FP exceptions"
)
def test_exp2(self):
with np.errstate(all='ignore'):
in_ = [np.nan, -np.nan, np.inf, -np.inf]
out = [np.nan, np.nan, np.inf, 0.0]
for dt in ['e', 'f', 'd']:
in_arr = np.array(in_, dtype=dt)
out_arr = np.array(out, dtype=dt)
assert_equal(np.exp2(in_arr), out_arr)
for value in [2000.0, -2000.0]:
with np.errstate(over='raise', under='raise'):
for dt in ['e', 'f', 'd']:
assert_raises(FloatingPointError, np.exp2,
np.array(value, dtype=dt))
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
def test_expm1(self):
with np.errstate(all='ignore'):
in_ = [np.nan, -np.nan, np.inf, -np.inf]
out = [np.nan, np.nan, np.inf, -1.0]
for dt in ['e', 'f', 'd']:
in_arr = np.array(in_, dtype=dt)
out_arr = np.array(out, dtype=dt)
assert_equal(np.expm1(in_arr), out_arr)
for value in [200.0, 2000.0]:
with np.errstate(over='raise'):
for dt in ['e', 'f']:
assert_raises(FloatingPointError, np.expm1,
np.array(value, dtype=dt))
# test to ensure no spurious FP exceptions are raised due to SIMD
INF_INVALID_ERR = [
np.cos, np.sin, np.tan, np.arccos, np.arcsin, np.spacing, np.arctanh
]
NEG_INVALID_ERR = [
np.log, np.log2, np.log10, np.log1p, np.sqrt, np.arccosh,
np.arctanh
]
ONE_INVALID_ERR = [
np.arctanh,
]
LTONE_INVALID_ERR = [
np.arccosh,
]
BYZERO_ERR = [
np.log, np.log2, np.log10, np.reciprocal, np.arccosh
]
@pytest.mark.parametrize("ufunc", UFUNCS_UNARY_FP)
@pytest.mark.parametrize("dtype", ('e', 'f', 'd'))
@pytest.mark.parametrize("data, escape", (
([0.03], LTONE_INVALID_ERR),
([0.03]*32, LTONE_INVALID_ERR),
# neg
([-1.0], NEG_INVALID_ERR),
([-1.0]*32, NEG_INVALID_ERR),
# flat
([1.0], ONE_INVALID_ERR),
([1.0]*32, ONE_INVALID_ERR),
# zero
([0.0], BYZERO_ERR),
([0.0]*32, BYZERO_ERR),
([-0.0], BYZERO_ERR),
([-0.0]*32, BYZERO_ERR),
# nan
([0.5, 0.5, 0.5, np.nan], LTONE_INVALID_ERR),
([0.5, 0.5, 0.5, np.nan]*32, LTONE_INVALID_ERR),
([np.nan, 1.0, 1.0, 1.0], ONE_INVALID_ERR),
([np.nan, 1.0, 1.0, 1.0]*32, ONE_INVALID_ERR),
([np.nan], []),
([np.nan]*32, []),
# inf
([0.5, 0.5, 0.5, np.inf], INF_INVALID_ERR + LTONE_INVALID_ERR),
([0.5, 0.5, 0.5, np.inf]*32, INF_INVALID_ERR + LTONE_INVALID_ERR),
([np.inf, 1.0, 1.0, 1.0], INF_INVALID_ERR),
([np.inf, 1.0, 1.0, 1.0]*32, INF_INVALID_ERR),
([np.inf], INF_INVALID_ERR),
([np.inf]*32, INF_INVALID_ERR),
# ninf
([0.5, 0.5, 0.5, -np.inf],
NEG_INVALID_ERR + INF_INVALID_ERR + LTONE_INVALID_ERR),
([0.5, 0.5, 0.5, -np.inf]*32,
NEG_INVALID_ERR + INF_INVALID_ERR + LTONE_INVALID_ERR),
([-np.inf, 1.0, 1.0, 1.0], NEG_INVALID_ERR + INF_INVALID_ERR),
([-np.inf, 1.0, 1.0, 1.0]*32, NEG_INVALID_ERR + INF_INVALID_ERR),
([-np.inf], NEG_INVALID_ERR + INF_INVALID_ERR),
([-np.inf]*32, NEG_INVALID_ERR + INF_INVALID_ERR),
))
def test_unary_spurious_fpexception(self, ufunc, dtype, data, escape):
if escape and ufunc in escape:
return
# FIXME: NAN raises FP invalid exception:
# - ceil/float16 on MSVC:32-bit
# - spacing/float16 on almost all platforms
if ufunc in (np.spacing, np.ceil) and dtype == 'e':
return
array = np.array(data, dtype=dtype)
with assert_no_warnings():
ufunc(array)
@pytest.mark.parametrize("dtype", ('e', 'f', 'd'))
def test_divide_spurious_fpexception(self, dtype):
dt = np.dtype(dtype)
dt_info = np.finfo(dt)
subnorm = dt_info.smallest_subnormal
# Verify a bug fix caused due to filling the remaining lanes of the
# partially loaded dividend SIMD vector with ones, which leads to
# raising an overflow warning when the divisor is denormal.
# see https://github.com/numpy/numpy/issues/25097
with assert_no_warnings():
np.zeros(128 + 1, dtype=dt) / subnorm
class TestFPClass:
@pytest.mark.parametrize("stride", [-5, -4, -3, -2, -1, 1,
2, 4, 5, 6, 7, 8, 9, 10])
def test_fpclass(self, stride):
arr_f64 = np.array([np.nan, -np.nan, np.inf, -np.inf, -1.0, 1.0, -0.0, 0.0, 2.2251e-308, -2.2251e-308], dtype='d')
arr_f32 = np.array([np.nan, -np.nan, np.inf, -np.inf, -1.0, 1.0, -0.0, 0.0, 1.4013e-045, -1.4013e-045], dtype='f')
nan = np.array([True, True, False, False, False, False, False, False, False, False])
inf = np.array([False, False, True, True, False, False, False, False, False, False])
sign = np.array([False, True, False, True, True, False, True, False, False, True])
finite = np.array([False, False, False, False, True, True, True, True, True, True])
assert_equal(np.isnan(arr_f32[::stride]), nan[::stride])
assert_equal(np.isnan(arr_f64[::stride]), nan[::stride])
assert_equal(np.isinf(arr_f32[::stride]), inf[::stride])
assert_equal(np.isinf(arr_f64[::stride]), inf[::stride])
if platform.machine() == 'riscv64':
# On RISC-V, many operations that produce NaNs, such as converting
# a -NaN from f64 to f32, return a canonical NaN. The canonical
# NaNs are always positive. See section 11.3 NaN Generation and
# Propagation of the RISC-V Unprivileged ISA for more details.
# We disable the sign test on riscv64 for -np.nan as we
# cannot assume that its sign will be honoured in these tests.
arr_f64_rv = np.copy(arr_f64)
arr_f32_rv = np.copy(arr_f32)
arr_f64_rv[1] = -1.0
arr_f32_rv[1] = -1.0
assert_equal(np.signbit(arr_f32_rv[::stride]), sign[::stride])
assert_equal(np.signbit(arr_f64_rv[::stride]), sign[::stride])
else:
assert_equal(np.signbit(arr_f32[::stride]), sign[::stride])
assert_equal(np.signbit(arr_f64[::stride]), sign[::stride])
assert_equal(np.isfinite(arr_f32[::stride]), finite[::stride])
assert_equal(np.isfinite(arr_f64[::stride]), finite[::stride])
@pytest.mark.parametrize("dtype", ['d', 'f'])
def test_fp_noncontiguous(self, dtype):
data = np.array([np.nan, -np.nan, np.inf, -np.inf, -1.0,
1.0, -0.0, 0.0, 2.2251e-308,
-2.2251e-308], dtype=dtype)
nan = np.array([True, True, False, False, False, False,
False, False, False, False])
inf = np.array([False, False, True, True, False, False,
False, False, False, False])
sign = np.array([False, True, False, True, True, False,
True, False, False, True])
finite = np.array([False, False, False, False, True, True,
True, True, True, True])
out = np.ndarray(data.shape, dtype='bool')
ncontig_in = data[1::3]
ncontig_out = out[1::3]
contig_in = np.array(ncontig_in)
if platform.machine() == 'riscv64':
# Disable the -np.nan signbit tests on riscv64. See comments in
# test_fpclass for more details.
data_rv = np.copy(data)
data_rv[1] = -1.0
ncontig_sign_in = data_rv[1::3]
contig_sign_in = np.array(ncontig_sign_in)
else:
ncontig_sign_in = ncontig_in
contig_sign_in = contig_in
assert_equal(ncontig_in.flags.c_contiguous, False)
assert_equal(ncontig_out.flags.c_contiguous, False)
assert_equal(contig_in.flags.c_contiguous, True)
assert_equal(ncontig_sign_in.flags.c_contiguous, False)
assert_equal(contig_sign_in.flags.c_contiguous, True)
# ncontig in, ncontig out
assert_equal(np.isnan(ncontig_in, out=ncontig_out), nan[1::3])
assert_equal(np.isinf(ncontig_in, out=ncontig_out), inf[1::3])
assert_equal(np.signbit(ncontig_sign_in, out=ncontig_out), sign[1::3])
assert_equal(np.isfinite(ncontig_in, out=ncontig_out), finite[1::3])
# contig in, ncontig out
assert_equal(np.isnan(contig_in, out=ncontig_out), nan[1::3])
assert_equal(np.isinf(contig_in, out=ncontig_out), inf[1::3])
assert_equal(np.signbit(contig_sign_in, out=ncontig_out), sign[1::3])
assert_equal(np.isfinite(contig_in, out=ncontig_out), finite[1::3])
# ncontig in, contig out
assert_equal(np.isnan(ncontig_in), nan[1::3])
assert_equal(np.isinf(ncontig_in), inf[1::3])
assert_equal(np.signbit(ncontig_sign_in), sign[1::3])
assert_equal(np.isfinite(ncontig_in), finite[1::3])
# contig in, contig out, nd stride
data_split = np.array(np.array_split(data, 2))
nan_split = np.array(np.array_split(nan, 2))
inf_split = np.array(np.array_split(inf, 2))
sign_split = np.array(np.array_split(sign, 2))
finite_split = np.array(np.array_split(finite, 2))
assert_equal(np.isnan(data_split), nan_split)
assert_equal(np.isinf(data_split), inf_split)
if platform.machine() == 'riscv64':
data_split_rv = np.array(np.array_split(data_rv, 2))
assert_equal(np.signbit(data_split_rv), sign_split)
else:
assert_equal(np.signbit(data_split), sign_split)
assert_equal(np.isfinite(data_split), finite_split)
class TestLDExp:
@pytest.mark.parametrize("stride", [-4,-2,-1,1,2,4])
@pytest.mark.parametrize("dtype", ['f', 'd'])
def test_ldexp(self, dtype, stride):
mant = np.array([0.125, 0.25, 0.5, 1., 1., 2., 4., 8.], dtype=dtype)
exp = np.array([3, 2, 1, 0, 0, -1, -2, -3], dtype='i')
out = np.zeros(8, dtype=dtype)
assert_equal(np.ldexp(mant[::stride], exp[::stride], out=out[::stride]), np.ones(8, dtype=dtype)[::stride])
assert_equal(out[::stride], np.ones(8, dtype=dtype)[::stride])
class TestFRExp:
@pytest.mark.parametrize("stride", [-4,-2,-1,1,2,4])
@pytest.mark.parametrize("dtype", ['f', 'd'])
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="np.frexp gives different answers for NAN/INF on windows and linux")
@pytest.mark.xfail(IS_MUSL, reason="gh23049")
def test_frexp(self, dtype, stride):
arr = np.array([np.nan, np.nan, np.inf, -np.inf, 0.0, -0.0, 1.0, -1.0], dtype=dtype)
mant_true = np.array([np.nan, np.nan, np.inf, -np.inf, 0.0, -0.0, 0.5, -0.5], dtype=dtype)
exp_true = np.array([0, 0, 0, 0, 0, 0, 1, 1], dtype='i')
out_mant = np.ones(8, dtype=dtype)
out_exp = 2*np.ones(8, dtype='i')
mant, exp = np.frexp(arr[::stride], out=(out_mant[::stride], out_exp[::stride]))
assert_equal(mant_true[::stride], mant)
assert_equal(exp_true[::stride], exp)
assert_equal(out_mant[::stride], mant_true[::stride])
assert_equal(out_exp[::stride], exp_true[::stride])
# func : [maxulperror, low, high]
avx_ufuncs = {'sqrt' :[1, 0., 100.],
'absolute' :[0, -100., 100.],
'reciprocal' :[1, 1., 100.],
'square' :[1, -100., 100.],
'rint' :[0, -100., 100.],
'floor' :[0, -100., 100.],
'ceil' :[0, -100., 100.],
'trunc' :[0, -100., 100.]}
class TestAVXUfuncs:
def test_avx_based_ufunc(self):
strides = np.array([-4,-3,-2,-1,1,2,3,4])
np.random.seed(42)
for func, prop in avx_ufuncs.items():
maxulperr = prop[0]
minval = prop[1]
maxval = prop[2]
# various array sizes to ensure masking in AVX is tested
for size in range(1,32):
myfunc = getattr(np, func)
x_f32 = np.random.uniform(low=minval, high=maxval,
size=size).astype(np.float32)
x_f64 = x_f32.astype(np.float64)
x_f128 = x_f32.astype(np.longdouble)
y_true128 = myfunc(x_f128)
if maxulperr == 0:
assert_equal(myfunc(x_f32), y_true128.astype(np.float32))
assert_equal(myfunc(x_f64), y_true128.astype(np.float64))
else:
assert_array_max_ulp(myfunc(x_f32),
y_true128.astype(np.float32),
maxulp=maxulperr)
assert_array_max_ulp(myfunc(x_f64),
y_true128.astype(np.float64),
maxulp=maxulperr)
# various strides to test gather instruction
if size > 1:
y_true32 = myfunc(x_f32)
y_true64 = myfunc(x_f64)
for jj in strides:
assert_equal(myfunc(x_f64[::jj]), y_true64[::jj])
assert_equal(myfunc(x_f32[::jj]), y_true32[::jj])
class TestAVXFloat32Transcendental:
def test_exp_float32(self):
np.random.seed(42)
x_f32 = np.float32(np.random.uniform(low=0.0,high=88.1,size=1000000))
x_f64 = np.float64(x_f32)
assert_array_max_ulp(np.exp(x_f32), np.float32(np.exp(x_f64)), maxulp=3)
def test_log_float32(self):
np.random.seed(42)
x_f32 = np.float32(np.random.uniform(low=0.0,high=1000,size=1000000))
x_f64 = np.float64(x_f32)
assert_array_max_ulp(np.log(x_f32), np.float32(np.log(x_f64)), maxulp=4)
def test_sincos_float32(self):
np.random.seed(42)
N = 1000000
M = np.int_(N/20)
index = np.random.randint(low=0, high=N, size=M)
x_f32 = np.float32(np.random.uniform(low=-100.,high=100.,size=N))
if not _glibc_older_than("2.17"):
# test coverage for elements > 117435.992f for which glibc is used
# this is known to be problematic on old glibc, so skip it there
x_f32[index] = np.float32(10E+10*np.random.rand(M))
x_f64 = np.float64(x_f32)
assert_array_max_ulp(np.sin(x_f32), np.float32(np.sin(x_f64)), maxulp=2)
assert_array_max_ulp(np.cos(x_f32), np.float32(np.cos(x_f64)), maxulp=2)
# test aliasing(issue #17761)
tx_f32 = x_f32.copy()
assert_array_max_ulp(np.sin(x_f32, out=x_f32), np.float32(np.sin(x_f64)), maxulp=2)
assert_array_max_ulp(np.cos(tx_f32, out=tx_f32), np.float32(np.cos(x_f64)), maxulp=2)
def test_strided_float32(self):
np.random.seed(42)
strides = np.array([-4,-3,-2,-1,1,2,3,4])
sizes = np.arange(2,100)
for ii in sizes:
x_f32 = np.float32(np.random.uniform(low=0.01,high=88.1,size=ii))
x_f32_large = x_f32.copy()
x_f32_large[3:-1:4] = 120000.0
exp_true = np.exp(x_f32)
log_true = np.log(x_f32)
sin_true = np.sin(x_f32_large)
cos_true = np.cos(x_f32_large)
for jj in strides:
assert_array_almost_equal_nulp(np.exp(x_f32[::jj]), exp_true[::jj], nulp=2)
assert_array_almost_equal_nulp(np.log(x_f32[::jj]), log_true[::jj], nulp=2)
assert_array_almost_equal_nulp(np.sin(x_f32_large[::jj]), sin_true[::jj], nulp=2)
assert_array_almost_equal_nulp(np.cos(x_f32_large[::jj]), cos_true[::jj], nulp=2)
class TestLogAddExp(_FilterInvalids):
def test_logaddexp_values(self):
x = [1, 2, 3, 4, 5]
y = [5, 4, 3, 2, 1]
z = [6, 6, 6, 6, 6]
for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]):
xf = np.log(np.array(x, dtype=dt))
yf = np.log(np.array(y, dtype=dt))
zf = np.log(np.array(z, dtype=dt))
assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec_)
def test_logaddexp_range(self):
x = [1000000, -1000000, 1000200, -1000200]
y = [1000200, -1000200, 1000000, -1000000]
z = [1000200, -1000000, 1000200, -1000000]
for dt in ['f', 'd', 'g']:
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_almost_equal(np.logaddexp(logxf, logyf), logzf)
def test_inf(self):
inf = np.inf
x = [inf, -inf, inf, -inf, inf, 1, -inf, 1]
y = [inf, inf, -inf, -inf, 1, inf, 1, -inf]
z = [inf, inf, inf, -inf, inf, inf, 1, 1]
with np.errstate(invalid='raise'):
for dt in ['f', 'd', 'g']:
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_equal(np.logaddexp(logxf, logyf), logzf)
def test_nan(self):
assert_(np.isnan(np.logaddexp(np.nan, np.inf)))
assert_(np.isnan(np.logaddexp(np.inf, np.nan)))
assert_(np.isnan(np.logaddexp(np.nan, 0)))
assert_(np.isnan(np.logaddexp(0, np.nan)))
assert_(np.isnan(np.logaddexp(np.nan, np.nan)))
def test_reduce(self):
assert_equal(np.logaddexp.identity, -np.inf)
assert_equal(np.logaddexp.reduce([]), -np.inf)
class TestLog1p:
def test_log1p(self):
assert_almost_equal(ncu.log1p(0.2), ncu.log(1.2))
assert_almost_equal(ncu.log1p(1e-6), ncu.log(1+1e-6))
def test_special(self):
with np.errstate(invalid="ignore", divide="ignore"):
assert_equal(ncu.log1p(np.nan), np.nan)
assert_equal(ncu.log1p(np.inf), np.inf)
assert_equal(ncu.log1p(-1.), -np.inf)
assert_equal(ncu.log1p(-2.), np.nan)
assert_equal(ncu.log1p(-np.inf), np.nan)
class TestExpm1:
def test_expm1(self):
assert_almost_equal(ncu.expm1(0.2), ncu.exp(0.2)-1)
assert_almost_equal(ncu.expm1(1e-6), ncu.exp(1e-6)-1)
def test_special(self):
assert_equal(ncu.expm1(np.inf), np.inf)
assert_equal(ncu.expm1(0.), 0.)
assert_equal(ncu.expm1(-0.), -0.)
assert_equal(ncu.expm1(np.inf), np.inf)
assert_equal(ncu.expm1(-np.inf), -1.)
def test_complex(self):
x = np.asarray(1e-12)
assert_allclose(x, ncu.expm1(x))
x = x.astype(np.complex128)
assert_allclose(x, ncu.expm1(x))
class TestHypot:
def test_simple(self):
assert_almost_equal(ncu.hypot(1, 1), ncu.sqrt(2))
assert_almost_equal(ncu.hypot(0, 0), 0)
def test_reduce(self):
assert_almost_equal(ncu.hypot.reduce([3.0, 4.0]), 5.0)
assert_almost_equal(ncu.hypot.reduce([3.0, 4.0, 0]), 5.0)
assert_almost_equal(ncu.hypot.reduce([9.0, 12.0, 20.0]), 25.0)
assert_equal(ncu.hypot.reduce([]), 0.0)
def assert_hypot_isnan(x, y):
with np.errstate(invalid='ignore'):
assert_(np.isnan(ncu.hypot(x, y)),
"hypot(%s, %s) is %s, not nan" % (x, y, ncu.hypot(x, y)))
def assert_hypot_isinf(x, y):
with np.errstate(invalid='ignore'):
assert_(np.isinf(ncu.hypot(x, y)),
"hypot(%s, %s) is %s, not inf" % (x, y, ncu.hypot(x, y)))
class TestHypotSpecialValues:
def test_nan_outputs(self):
assert_hypot_isnan(np.nan, np.nan)
assert_hypot_isnan(np.nan, 1)
def test_nan_outputs2(self):
assert_hypot_isinf(np.nan, np.inf)
assert_hypot_isinf(np.inf, np.nan)
assert_hypot_isinf(np.inf, 0)
assert_hypot_isinf(0, np.inf)
assert_hypot_isinf(np.inf, np.inf)
assert_hypot_isinf(np.inf, 23.0)
def test_no_fpe(self):
assert_no_warnings(ncu.hypot, np.inf, 0)
def assert_arctan2_isnan(x, y):
assert_(np.isnan(ncu.arctan2(x, y)), "arctan(%s, %s) is %s, not nan" % (x, y, ncu.arctan2(x, y)))
def assert_arctan2_ispinf(x, y):
assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) > 0), "arctan(%s, %s) is %s, not +inf" % (x, y, ncu.arctan2(x, y)))
def assert_arctan2_isninf(x, y):
assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) < 0), "arctan(%s, %s) is %s, not -inf" % (x, y, ncu.arctan2(x, y)))
def assert_arctan2_ispzero(x, y):
assert_((ncu.arctan2(x, y) == 0 and not np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not +0" % (x, y, ncu.arctan2(x, y)))
def assert_arctan2_isnzero(x, y):
assert_((ncu.arctan2(x, y) == 0 and np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not -0" % (x, y, ncu.arctan2(x, y)))
class TestArctan2SpecialValues:
def test_one_one(self):
# atan2(1, 1) returns pi/4.
assert_almost_equal(ncu.arctan2(1, 1), 0.25 * np.pi)
assert_almost_equal(ncu.arctan2(-1, 1), -0.25 * np.pi)
assert_almost_equal(ncu.arctan2(1, -1), 0.75 * np.pi)
def test_zero_nzero(self):
# atan2(+-0, -0) returns +-pi.
assert_almost_equal(ncu.arctan2(ncu.PZERO, ncu.NZERO), np.pi)
assert_almost_equal(ncu.arctan2(ncu.NZERO, ncu.NZERO), -np.pi)
def test_zero_pzero(self):
# atan2(+-0, +0) returns +-0.
assert_arctan2_ispzero(ncu.PZERO, ncu.PZERO)
assert_arctan2_isnzero(ncu.NZERO, ncu.PZERO)
def test_zero_negative(self):
# atan2(+-0, x) returns +-pi for x < 0.
assert_almost_equal(ncu.arctan2(ncu.PZERO, -1), np.pi)
assert_almost_equal(ncu.arctan2(ncu.NZERO, -1), -np.pi)
def test_zero_positive(self):
# atan2(+-0, x) returns +-0 for x > 0.
assert_arctan2_ispzero(ncu.PZERO, 1)
assert_arctan2_isnzero(ncu.NZERO, 1)
def test_positive_zero(self):
# atan2(y, +-0) returns +pi/2 for y > 0.
assert_almost_equal(ncu.arctan2(1, ncu.PZERO), 0.5 * np.pi)
assert_almost_equal(ncu.arctan2(1, ncu.NZERO), 0.5 * np.pi)
def test_negative_zero(self):
# atan2(y, +-0) returns -pi/2 for y < 0.
assert_almost_equal(ncu.arctan2(-1, ncu.PZERO), -0.5 * np.pi)
assert_almost_equal(ncu.arctan2(-1, ncu.NZERO), -0.5 * np.pi)
def test_any_ninf(self):
# atan2(+-y, -infinity) returns +-pi for finite y > 0.
assert_almost_equal(ncu.arctan2(1, -np.inf), np.pi)
assert_almost_equal(ncu.arctan2(-1, -np.inf), -np.pi)
def test_any_pinf(self):
# atan2(+-y, +infinity) returns +-0 for finite y > 0.
assert_arctan2_ispzero(1, np.inf)
assert_arctan2_isnzero(-1, np.inf)
def test_inf_any(self):
# atan2(+-infinity, x) returns +-pi/2 for finite x.
assert_almost_equal(ncu.arctan2( np.inf, 1), 0.5 * np.pi)
assert_almost_equal(ncu.arctan2(-np.inf, 1), -0.5 * np.pi)
def test_inf_ninf(self):
# atan2(+-infinity, -infinity) returns +-3*pi/4.
assert_almost_equal(ncu.arctan2( np.inf, -np.inf), 0.75 * np.pi)
assert_almost_equal(ncu.arctan2(-np.inf, -np.inf), -0.75 * np.pi)
def test_inf_pinf(self):
# atan2(+-infinity, +infinity) returns +-pi/4.
assert_almost_equal(ncu.arctan2( np.inf, np.inf), 0.25 * np.pi)
assert_almost_equal(ncu.arctan2(-np.inf, np.inf), -0.25 * np.pi)
def test_nan_any(self):
# atan2(nan, x) returns nan for any x, including inf
assert_arctan2_isnan(np.nan, np.inf)
assert_arctan2_isnan(np.inf, np.nan)
assert_arctan2_isnan(np.nan, np.nan)
class TestLdexp:
def _check_ldexp(self, tp):
assert_almost_equal(ncu.ldexp(np.array(2., np.float32),
np.array(3, tp)), 16.)
assert_almost_equal(ncu.ldexp(np.array(2., np.float64),
np.array(3, tp)), 16.)
assert_almost_equal(ncu.ldexp(np.array(2., np.longdouble),
np.array(3, tp)), 16.)
def test_ldexp(self):
# The default Python int type should work
assert_almost_equal(ncu.ldexp(2., 3), 16.)
# The following int types should all be accepted
self._check_ldexp(np.int8)
self._check_ldexp(np.int16)
self._check_ldexp(np.int32)
self._check_ldexp('i')
self._check_ldexp('l')
def test_ldexp_overflow(self):
# silence warning emitted on overflow
with np.errstate(over="ignore"):
imax = np.iinfo(np.dtype('l')).max
imin = np.iinfo(np.dtype('l')).min
assert_equal(ncu.ldexp(2., imax), np.inf)
assert_equal(ncu.ldexp(2., imin), 0)
class TestMaximum(_FilterInvalids):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.maximum.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), np.nan)
assert_equal(func(tmp2), np.nan)
def test_reduce_complex(self):
assert_equal(np.maximum.reduce([1, 2j]), 1)
assert_equal(np.maximum.reduce([1+3j, 2j]), 1+3j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([nan, nan, nan])
assert_equal(np.maximum(arg1, arg2), out)
def test_object_nans(self):
# Multiple checks to give this a chance to
# fail if cmp is used instead of rich compare.
# Failure cannot be guaranteed.
for i in range(1):
x = np.array(float('nan'), object)
y = 1.0
z = np.array(float('nan'), object)
assert_(np.maximum(x, y) == 1.0)
assert_(np.maximum(z, y) == 1.0)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
arg1 = np.array([0, cnan, cnan], dtype=complex)
arg2 = np.array([cnan, 0, cnan], dtype=complex)
out = np.array([nan, nan, nan], dtype=complex)
assert_equal(np.maximum(arg1, arg2), out)
def test_object_array(self):
arg1 = np.arange(5, dtype=object)
arg2 = arg1 + 1
assert_equal(np.maximum(arg1, arg2), arg2)
def test_strided_array(self):
arr1 = np.array([-4.0, 1.0, 10.0, 0.0, np.nan, -np.nan, np.inf, -np.inf])
arr2 = np.array([-2.0,-1.0, np.nan, 1.0, 0.0, np.nan, 1.0, -3.0])
maxtrue = np.array([-2.0, 1.0, np.nan, 1.0, np.nan, np.nan, np.inf, -3.0])
out = np.ones(8)
out_maxtrue = np.array([-2.0, 1.0, 1.0, 10.0, 1.0, 1.0, np.nan, 1.0])
assert_equal(np.maximum(arr1,arr2), maxtrue)
assert_equal(np.maximum(arr1[::2],arr2[::2]), maxtrue[::2])
assert_equal(np.maximum(arr1[:4:], arr2[::2]), np.array([-2.0, np.nan, 10.0, 1.0]))
assert_equal(np.maximum(arr1[::3], arr2[:3:]), np.array([-2.0, 0.0, np.nan]))
assert_equal(np.maximum(arr1[:6:2], arr2[::3], out=out[::3]), np.array([-2.0, 10., np.nan]))
assert_equal(out, out_maxtrue)
def test_precision(self):
dtypes = [np.float16, np.float32, np.float64, np.longdouble]
for dt in dtypes:
dtmin = np.finfo(dt).min
dtmax = np.finfo(dt).max
d1 = dt(0.1)
d1_next = np.nextafter(d1, np.inf)
test_cases = [
# v1 v2 expected
(dtmin, -np.inf, dtmin),
(dtmax, -np.inf, dtmax),
(d1, d1_next, d1_next),
(dtmax, np.nan, np.nan),
]
for v1, v2, expected in test_cases:
assert_equal(np.maximum([v1], [v2]), [expected])
assert_equal(np.maximum.reduce([v1, v2]), expected)
class TestMinimum(_FilterInvalids):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.minimum.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), np.nan)
assert_equal(func(tmp2), np.nan)
def test_reduce_complex(self):
assert_equal(np.minimum.reduce([1, 2j]), 2j)
assert_equal(np.minimum.reduce([1+3j, 2j]), 2j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([nan, nan, nan])
assert_equal(np.minimum(arg1, arg2), out)
def test_object_nans(self):
# Multiple checks to give this a chance to
# fail if cmp is used instead of rich compare.
# Failure cannot be guaranteed.
for i in range(1):
x = np.array(float('nan'), object)
y = 1.0
z = np.array(float('nan'), object)
assert_(np.minimum(x, y) == 1.0)
assert_(np.minimum(z, y) == 1.0)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
arg1 = np.array([0, cnan, cnan], dtype=complex)
arg2 = np.array([cnan, 0, cnan], dtype=complex)
out = np.array([nan, nan, nan], dtype=complex)
assert_equal(np.minimum(arg1, arg2), out)
def test_object_array(self):
arg1 = np.arange(5, dtype=object)
arg2 = arg1 + 1
assert_equal(np.minimum(arg1, arg2), arg1)
def test_strided_array(self):
arr1 = np.array([-4.0, 1.0, 10.0, 0.0, np.nan, -np.nan, np.inf, -np.inf])
arr2 = np.array([-2.0,-1.0, np.nan, 1.0, 0.0, np.nan, 1.0, -3.0])
mintrue = np.array([-4.0, -1.0, np.nan, 0.0, np.nan, np.nan, 1.0, -np.inf])
out = np.ones(8)
out_mintrue = np.array([-4.0, 1.0, 1.0, 1.0, 1.0, 1.0, np.nan, 1.0])
assert_equal(np.minimum(arr1,arr2), mintrue)
assert_equal(np.minimum(arr1[::2],arr2[::2]), mintrue[::2])
assert_equal(np.minimum(arr1[:4:], arr2[::2]), np.array([-4.0, np.nan, 0.0, 0.0]))
assert_equal(np.minimum(arr1[::3], arr2[:3:]), np.array([-4.0, -1.0, np.nan]))
assert_equal(np.minimum(arr1[:6:2], arr2[::3], out=out[::3]), np.array([-4.0, 1.0, np.nan]))
assert_equal(out, out_mintrue)
def test_precision(self):
dtypes = [np.float16, np.float32, np.float64, np.longdouble]
for dt in dtypes:
dtmin = np.finfo(dt).min
dtmax = np.finfo(dt).max
d1 = dt(0.1)
d1_next = np.nextafter(d1, np.inf)
test_cases = [
# v1 v2 expected
(dtmin, np.inf, dtmin),
(dtmax, np.inf, dtmax),
(d1, d1_next, d1),
(dtmin, np.nan, np.nan),
]
for v1, v2, expected in test_cases:
assert_equal(np.minimum([v1], [v2]), [expected])
assert_equal(np.minimum.reduce([v1, v2]), expected)
class TestFmax(_FilterInvalids):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.fmax.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), 9)
assert_equal(func(tmp2), 9)
def test_reduce_complex(self):
assert_equal(np.fmax.reduce([1, 2j]), 1)
assert_equal(np.fmax.reduce([1+3j, 2j]), 1+3j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([0, 0, nan])
assert_equal(np.fmax(arg1, arg2), out)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
arg1 = np.array([0, cnan, cnan], dtype=complex)
arg2 = np.array([cnan, 0, cnan], dtype=complex)
out = np.array([0, 0, nan], dtype=complex)
assert_equal(np.fmax(arg1, arg2), out)
def test_precision(self):
dtypes = [np.float16, np.float32, np.float64, np.longdouble]
for dt in dtypes:
dtmin = np.finfo(dt).min
dtmax = np.finfo(dt).max
d1 = dt(0.1)
d1_next = np.nextafter(d1, np.inf)
test_cases = [
# v1 v2 expected
(dtmin, -np.inf, dtmin),
(dtmax, -np.inf, dtmax),
(d1, d1_next, d1_next),
(dtmax, np.nan, dtmax),
]
for v1, v2, expected in test_cases:
assert_equal(np.fmax([v1], [v2]), [expected])
assert_equal(np.fmax.reduce([v1, v2]), expected)
class TestFmin(_FilterInvalids):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.fmin.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), 1)
assert_equal(func(tmp2), 1)
def test_reduce_complex(self):
assert_equal(np.fmin.reduce([1, 2j]), 2j)
assert_equal(np.fmin.reduce([1+3j, 2j]), 2j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([0, 0, nan])
assert_equal(np.fmin(arg1, arg2), out)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
arg1 = np.array([0, cnan, cnan], dtype=complex)
arg2 = np.array([cnan, 0, cnan], dtype=complex)
out = np.array([0, 0, nan], dtype=complex)
assert_equal(np.fmin(arg1, arg2), out)
def test_precision(self):
dtypes = [np.float16, np.float32, np.float64, np.longdouble]
for dt in dtypes:
dtmin = np.finfo(dt).min
dtmax = np.finfo(dt).max
d1 = dt(0.1)
d1_next = np.nextafter(d1, np.inf)
test_cases = [
# v1 v2 expected
(dtmin, np.inf, dtmin),
(dtmax, np.inf, dtmax),
(d1, d1_next, d1),
(dtmin, np.nan, dtmin),
]
for v1, v2, expected in test_cases:
assert_equal(np.fmin([v1], [v2]), [expected])
assert_equal(np.fmin.reduce([v1, v2]), expected)
class TestBool:
def test_exceptions(self):
a = np.ones(1, dtype=np.bool)
assert_raises(TypeError, np.negative, a)
assert_raises(TypeError, np.positive, a)
assert_raises(TypeError, np.subtract, a, a)
def test_truth_table_logical(self):
# 2, 3 and 4 serves as true values
input1 = [0, 0, 3, 2]
input2 = [0, 4, 0, 2]
typecodes = (np.typecodes['AllFloat']
+ np.typecodes['AllInteger']
+ '?') # boolean
for dtype in map(np.dtype, typecodes):
arg1 = np.asarray(input1, dtype=dtype)
arg2 = np.asarray(input2, dtype=dtype)
# OR
out = [False, True, True, True]
for func in (np.logical_or, np.maximum):
assert_equal(func(arg1, arg2).astype(bool), out)
# AND
out = [False, False, False, True]
for func in (np.logical_and, np.minimum):
assert_equal(func(arg1, arg2).astype(bool), out)
# XOR
out = [False, True, True, False]
for func in (np.logical_xor, np.not_equal):
assert_equal(func(arg1, arg2).astype(bool), out)
def test_truth_table_bitwise(self):
arg1 = [False, False, True, True]
arg2 = [False, True, False, True]
out = [False, True, True, True]
assert_equal(np.bitwise_or(arg1, arg2), out)
out = [False, False, False, True]
assert_equal(np.bitwise_and(arg1, arg2), out)
out = [False, True, True, False]
assert_equal(np.bitwise_xor(arg1, arg2), out)
def test_reduce(self):
none = np.array([0, 0, 0, 0], bool)
some = np.array([1, 0, 1, 1], bool)
every = np.array([1, 1, 1, 1], bool)
empty = np.array([], bool)
arrs = [none, some, every, empty]
for arr in arrs:
assert_equal(np.logical_and.reduce(arr), all(arr))
for arr in arrs:
assert_equal(np.logical_or.reduce(arr), any(arr))
for arr in arrs:
assert_equal(np.logical_xor.reduce(arr), arr.sum() % 2 == 1)
class TestBitwiseUFuncs:
_all_ints_bits = [
np.dtype(c).itemsize * 8 for c in np.typecodes["AllInteger"]]
bitwise_types = [
np.dtype(c) for c in '?' + np.typecodes["AllInteger"] + 'O']
bitwise_bits = [
2, # boolean type
*_all_ints_bits, # All integers
max(_all_ints_bits) + 1, # Object_ type
]
def test_values(self):
for dt in self.bitwise_types:
zeros = np.array([0], dtype=dt)
ones = np.array([-1]).astype(dt)
msg = "dt = '%s'" % dt.char
assert_equal(np.bitwise_not(zeros), ones, err_msg=msg)
assert_equal(np.bitwise_not(ones), zeros, err_msg=msg)
assert_equal(np.bitwise_or(zeros, zeros), zeros, err_msg=msg)
assert_equal(np.bitwise_or(zeros, ones), ones, err_msg=msg)
assert_equal(np.bitwise_or(ones, zeros), ones, err_msg=msg)
assert_equal(np.bitwise_or(ones, ones), ones, err_msg=msg)
assert_equal(np.bitwise_xor(zeros, zeros), zeros, err_msg=msg)
assert_equal(np.bitwise_xor(zeros, ones), ones, err_msg=msg)
assert_equal(np.bitwise_xor(ones, zeros), ones, err_msg=msg)
assert_equal(np.bitwise_xor(ones, ones), zeros, err_msg=msg)
assert_equal(np.bitwise_and(zeros, zeros), zeros, err_msg=msg)
assert_equal(np.bitwise_and(zeros, ones), zeros, err_msg=msg)
assert_equal(np.bitwise_and(ones, zeros), zeros, err_msg=msg)
assert_equal(np.bitwise_and(ones, ones), ones, err_msg=msg)
def test_types(self):
for dt in self.bitwise_types:
zeros = np.array([0], dtype=dt)
ones = np.array([-1]).astype(dt)
msg = "dt = '%s'" % dt.char
assert_(np.bitwise_not(zeros).dtype == dt, msg)
assert_(np.bitwise_or(zeros, zeros).dtype == dt, msg)
assert_(np.bitwise_xor(zeros, zeros).dtype == dt, msg)
assert_(np.bitwise_and(zeros, zeros).dtype == dt, msg)
def test_identity(self):
assert_(np.bitwise_or.identity == 0, 'bitwise_or')
assert_(np.bitwise_xor.identity == 0, 'bitwise_xor')
assert_(np.bitwise_and.identity == -1, 'bitwise_and')
def test_reduction(self):
binary_funcs = (np.bitwise_or, np.bitwise_xor, np.bitwise_and)
for dt in self.bitwise_types:
zeros = np.array([0], dtype=dt)
ones = np.array([-1]).astype(dt)
for f in binary_funcs:
msg = "dt: '%s', f: '%s'" % (dt, f)
assert_equal(f.reduce(zeros), zeros, err_msg=msg)
assert_equal(f.reduce(ones), ones, err_msg=msg)
# Test empty reduction, no object dtype
for dt in self.bitwise_types[:-1]:
# No object array types
empty = np.array([], dtype=dt)
for f in binary_funcs:
msg = "dt: '%s', f: '%s'" % (dt, f)
tgt = np.array(f.identity).astype(dt)
res = f.reduce(empty)
assert_equal(res, tgt, err_msg=msg)
assert_(res.dtype == tgt.dtype, msg)
# Empty object arrays use the identity. Note that the types may
# differ, the actual type used is determined by the assign_identity
# function and is not the same as the type returned by the identity
# method.
for f in binary_funcs:
msg = "dt: '%s'" % (f,)
empty = np.array([], dtype=object)
tgt = f.identity
res = f.reduce(empty)
assert_equal(res, tgt, err_msg=msg)
# Non-empty object arrays do not use the identity
for f in binary_funcs:
msg = "dt: '%s'" % (f,)
btype = np.array([True], dtype=object)
assert_(type(f.reduce(btype)) is bool, msg)
@pytest.mark.parametrize("input_dtype_obj, bitsize",
zip(bitwise_types, bitwise_bits))
def test_bitwise_count(self, input_dtype_obj, bitsize):
input_dtype = input_dtype_obj.type
for i in range(1, bitsize):
num = 2**i - 1
msg = f"bitwise_count for {num}"
assert i == np.bitwise_count(input_dtype(num)), msg
if np.issubdtype(
input_dtype, np.signedinteger) or input_dtype == np.object_:
assert i == np.bitwise_count(input_dtype(-num)), msg
a = np.array([2**i-1 for i in range(1, bitsize)], dtype=input_dtype)
bitwise_count_a = np.bitwise_count(a)
expected = np.arange(1, bitsize, dtype=input_dtype)
msg = f"array bitwise_count for {input_dtype}"
assert all(bitwise_count_a == expected), msg
class TestInt:
def test_logical_not(self):
x = np.ones(10, dtype=np.int16)
o = np.ones(10 * 2, dtype=bool)
tgt = o.copy()
tgt[::2] = False
os = o[::2]
assert_array_equal(np.logical_not(x, out=os), False)
assert_array_equal(o, tgt)
class TestFloatingPoint:
def test_floating_point(self):
assert_equal(ncu.FLOATING_POINT_SUPPORT, 1)
class TestDegrees:
def test_degrees(self):
assert_almost_equal(ncu.degrees(np.pi), 180.0)
assert_almost_equal(ncu.degrees(-0.5*np.pi), -90.0)
class TestRadians:
def test_radians(self):
assert_almost_equal(ncu.radians(180.0), np.pi)
assert_almost_equal(ncu.radians(-90.0), -0.5*np.pi)
class TestHeavside:
def test_heaviside(self):
x = np.array([[-30.0, -0.1, 0.0, 0.2], [7.5, np.nan, np.inf, -np.inf]])
expectedhalf = np.array([[0.0, 0.0, 0.5, 1.0], [1.0, np.nan, 1.0, 0.0]])
expected1 = expectedhalf.copy()
expected1[0, 2] = 1
h = ncu.heaviside(x, 0.5)
assert_equal(h, expectedhalf)
h = ncu.heaviside(x, 1.0)
assert_equal(h, expected1)
x = x.astype(np.float32)
h = ncu.heaviside(x, np.float32(0.5))
assert_equal(h, expectedhalf.astype(np.float32))
h = ncu.heaviside(x, np.float32(1.0))
assert_equal(h, expected1.astype(np.float32))
class TestSign:
def test_sign(self):
a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0])
out = np.zeros(a.shape)
tgt = np.array([1., -1., np.nan, 0.0, 1.0, -1.0])
with np.errstate(invalid='ignore'):
res = ncu.sign(a)
assert_equal(res, tgt)
res = ncu.sign(a, out)
assert_equal(res, tgt)
assert_equal(out, tgt)
def test_sign_complex(self):
a = np.array([
np.inf, -np.inf, complex(0, np.inf), complex(0, -np.inf),
complex(np.inf, np.inf), complex(np.inf, -np.inf), # nan
np.nan, complex(0, np.nan), complex(np.nan, np.nan), # nan
0.0, # 0.
3.0, -3.0, -2j, 3.0+4.0j, -8.0+6.0j
])
out = np.zeros(a.shape, a.dtype)
tgt = np.array([
1., -1., 1j, -1j,
] + [complex(np.nan, np.nan)] * 5 + [
0.0,
1.0, -1.0, -1j, 0.6+0.8j, -0.8+0.6j])
with np.errstate(invalid='ignore'):
res = ncu.sign(a)
assert_equal(res, tgt)
res = ncu.sign(a, out)
assert_(res is out)
assert_equal(res, tgt)
def test_sign_dtype_object(self):
# In reference to github issue #6229
foo = np.array([-.1, 0, .1])
a = np.sign(foo.astype(object))
b = np.sign(foo)
assert_array_equal(a, b)
def test_sign_dtype_nan_object(self):
# In reference to github issue #6229
def test_nan():
foo = np.array([np.nan])
# FIXME: a not used
a = np.sign(foo.astype(object))
assert_raises(TypeError, test_nan)
class TestMinMax:
def test_minmax_blocked(self):
# simd tests on max/min, test all alignments, slow but important
# for 2 * vz + 2 * (vs - 1) + 1 (unrolled once)
for dt, sz in [(np.float32, 15), (np.float64, 7)]:
for out, inp, msg in _gen_alignment_data(dtype=dt, type='unary',
max_size=sz):
for i in range(inp.size):
inp[:] = np.arange(inp.size, dtype=dt)
inp[i] = np.nan
emsg = lambda: '%r\n%s' % (inp, msg)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning,
"invalid value encountered in reduce")
assert_(np.isnan(inp.max()), msg=emsg)
assert_(np.isnan(inp.min()), msg=emsg)
inp[i] = 1e10
assert_equal(inp.max(), 1e10, err_msg=msg)
inp[i] = -1e10
assert_equal(inp.min(), -1e10, err_msg=msg)
def test_lower_align(self):
# check data that is not aligned to element size
# i.e doubles are aligned to 4 bytes on i386
d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)
assert_equal(d.max(), d[0])
assert_equal(d.min(), d[0])
def test_reduce_reorder(self):
# gh 10370, 11029 Some compilers reorder the call to npy_getfloatstatus
# and put it before the call to an intrinsic function that causes
# invalid status to be set. Also make sure warnings are not emitted
for n in (2, 4, 8, 16, 32):
for dt in (np.float32, np.float16, np.complex64):
for r in np.diagflat(np.array([np.nan] * n, dtype=dt)):
assert_equal(np.min(r), np.nan)
def test_minimize_no_warns(self):
a = np.minimum(np.nan, 1)
assert_equal(a, np.nan)
class TestAbsoluteNegative:
def test_abs_neg_blocked(self):
# simd tests on abs, test all alignments for vz + 2 * (vs - 1) + 1
for dt, sz in [(np.float32, 11), (np.float64, 5)]:
for out, inp, msg in _gen_alignment_data(dtype=dt, type='unary',
max_size=sz):
tgt = [ncu.absolute(i) for i in inp]
np.absolute(inp, out=out)
assert_equal(out, tgt, err_msg=msg)
assert_((out >= 0).all())
tgt = [-1*(i) for i in inp]
np.negative(inp, out=out)
assert_equal(out, tgt, err_msg=msg)
for v in [np.nan, -np.inf, np.inf]:
for i in range(inp.size):
d = np.arange(inp.size, dtype=dt)
inp[:] = -d
inp[i] = v
d[i] = -v if v == -np.inf else v
assert_array_equal(np.abs(inp), d, err_msg=msg)
np.abs(inp, out=out)
assert_array_equal(out, d, err_msg=msg)
assert_array_equal(-inp, -1*inp, err_msg=msg)
d = -1 * inp
np.negative(inp, out=out)
assert_array_equal(out, d, err_msg=msg)
def test_lower_align(self):
# check data that is not aligned to element size
# i.e doubles are aligned to 4 bytes on i386
d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)
assert_equal(np.abs(d), d)
assert_equal(np.negative(d), -d)
np.negative(d, out=d)
np.negative(np.ones_like(d), out=d)
np.abs(d, out=d)
np.abs(np.ones_like(d), out=d)
@pytest.mark.parametrize("dtype", ['d', 'f', 'int32', 'int64'])
@pytest.mark.parametrize("big", [True, False])
def test_noncontiguous(self, dtype, big):
data = np.array([-1.0, 1.0, -0.0, 0.0, 2.2251e-308, -2.5, 2.5, -6,
6, -2.2251e-308, -8, 10], dtype=dtype)
expect = np.array([1.0, -1.0, 0.0, -0.0, -2.2251e-308, 2.5, -2.5, 6,
-6, 2.2251e-308, 8, -10], dtype=dtype)
if big:
data = np.repeat(data, 10)
expect = np.repeat(expect, 10)
out = np.ndarray(data.shape, dtype=dtype)
ncontig_in = data[1::2]
ncontig_out = out[1::2]
contig_in = np.array(ncontig_in)
# contig in, contig out
assert_array_equal(np.negative(contig_in), expect[1::2])
# contig in, ncontig out
assert_array_equal(np.negative(contig_in, out=ncontig_out),
expect[1::2])
# ncontig in, contig out
assert_array_equal(np.negative(ncontig_in), expect[1::2])
# ncontig in, ncontig out
assert_array_equal(np.negative(ncontig_in, out=ncontig_out),
expect[1::2])
# contig in, contig out, nd stride
data_split = np.array(np.array_split(data, 2))
expect_split = np.array(np.array_split(expect, 2))
assert_equal(np.negative(data_split), expect_split)
class TestPositive:
def test_valid(self):
valid_dtypes = [int, float, complex, object]
for dtype in valid_dtypes:
x = np.arange(5, dtype=dtype)
result = np.positive(x)
assert_equal(x, result, err_msg=str(dtype))
def test_invalid(self):
with assert_raises(TypeError):
np.positive(True)
with assert_raises(TypeError):
np.positive(np.datetime64('2000-01-01'))
with assert_raises(TypeError):
np.positive(np.array(['foo'], dtype=str))
with assert_raises(TypeError):
np.positive(np.array(['bar'], dtype=object))
class TestSpecialMethods:
def test_wrap(self):
class with_wrap:
def __array__(self, dtype=None, copy=None):
return np.zeros(1)
def __array_wrap__(self, arr, context, return_scalar):
r = with_wrap()
r.arr = arr
r.context = context
return r
a = with_wrap()
x = ncu.minimum(a, a)
assert_equal(x.arr, np.zeros(1))
func, args, i = x.context
assert_(func is ncu.minimum)
assert_equal(len(args), 2)
assert_equal(args[0], a)
assert_equal(args[1], a)
assert_equal(i, 0)
def test_wrap_out(self):
# Calling convention for out should not affect how special methods are
# called
class StoreArrayPrepareWrap(np.ndarray):
_wrap_args = None
_prepare_args = None
def __new__(cls):
return np.zeros(()).view(cls)
def __array_wrap__(self, obj, context, return_scalar):
self._wrap_args = context[1]
return obj
@property
def args(self):
# We need to ensure these are fetched at the same time, before
# any other ufuncs are called by the assertions
return self._wrap_args
def __repr__(self):
return "a" # for short test output
def do_test(f_call, f_expected):
a = StoreArrayPrepareWrap()
f_call(a)
w = a.args
expected = f_expected(a)
try:
assert w == expected
except AssertionError as e:
# assert_equal produces truly useless error messages
raise AssertionError("\n".join([
"Bad arguments passed in ufunc call",
" expected: {}".format(expected),
" __array_wrap__ got: {}".format(w)
]))
# method not on the out argument
do_test(lambda a: np.add(a, 0), lambda a: (a, 0))
do_test(lambda a: np.add(a, 0, None), lambda a: (a, 0))
do_test(lambda a: np.add(a, 0, out=None), lambda a: (a, 0))
do_test(lambda a: np.add(a, 0, out=(None,)), lambda a: (a, 0))
# method on the out argument
do_test(lambda a: np.add(0, 0, a), lambda a: (0, 0, a))
do_test(lambda a: np.add(0, 0, out=a), lambda a: (0, 0, a))
do_test(lambda a: np.add(0, 0, out=(a,)), lambda a: (0, 0, a))
# Also check the where mask handling:
do_test(lambda a: np.add(a, 0, where=False), lambda a: (a, 0))
do_test(lambda a: np.add(0, 0, a, where=False), lambda a: (0, 0, a))
def test_wrap_with_iterable(self):
# test fix for bug #1026:
class with_wrap(np.ndarray):
__array_priority__ = 10
def __new__(cls):
return np.asarray(1).view(cls).copy()
def __array_wrap__(self, arr, context, return_scalar):
return arr.view(type(self))
a = with_wrap()
x = ncu.multiply(a, (1, 2, 3))
assert_(isinstance(x, with_wrap))
assert_array_equal(x, np.array((1, 2, 3)))
def test_priority_with_scalar(self):
# test fix for bug #826:
class A(np.ndarray):
__array_priority__ = 10
def __new__(cls):
return np.asarray(1.0, 'float64').view(cls).copy()
a = A()
x = np.float64(1)*a
assert_(isinstance(x, A))
assert_array_equal(x, np.array(1))
def test_priority(self):
class A:
def __array__(self, dtype=None, copy=None):
return np.zeros(1)
def __array_wrap__(self, arr, context, return_scalar):
r = type(self)()
r.arr = arr
r.context = context
return r
class B(A):
__array_priority__ = 20.
class C(A):
__array_priority__ = 40.
x = np.zeros(1)
a = A()
b = B()
c = C()
f = ncu.minimum
assert_(type(f(x, x)) is np.ndarray)
assert_(type(f(x, a)) is A)
assert_(type(f(x, b)) is B)
assert_(type(f(x, c)) is C)
assert_(type(f(a, x)) is A)
assert_(type(f(b, x)) is B)
assert_(type(f(c, x)) is C)
assert_(type(f(a, a)) is A)
assert_(type(f(a, b)) is B)
assert_(type(f(b, a)) is B)
assert_(type(f(b, b)) is B)
assert_(type(f(b, c)) is C)
assert_(type(f(c, b)) is C)
assert_(type(f(c, c)) is C)
assert_(type(ncu.exp(a) is A))
assert_(type(ncu.exp(b) is B))
assert_(type(ncu.exp(c) is C))
def test_failing_wrap(self):
class A:
def __array__(self, dtype=None, copy=None):
return np.zeros(2)
def __array_wrap__(self, arr, context, return_scalar):
raise RuntimeError
a = A()
assert_raises(RuntimeError, ncu.maximum, a, a)
assert_raises(RuntimeError, ncu.maximum.reduce, a)
def test_failing_out_wrap(self):
singleton = np.array([1.0])
class Ok(np.ndarray):
def __array_wrap__(self, obj, context, return_scalar):
return singleton
class Bad(np.ndarray):
def __array_wrap__(self, obj, context, return_scalar):
raise RuntimeError
ok = np.empty(1).view(Ok)
bad = np.empty(1).view(Bad)
# double-free (segfault) of "ok" if "bad" raises an exception
for i in range(10):
assert_raises(RuntimeError, ncu.frexp, 1, ok, bad)
def test_none_wrap(self):
# Tests that issue #8507 is resolved. Previously, this would segfault
class A:
def __array__(self, dtype=None, copy=None):
return np.zeros(1)
def __array_wrap__(self, arr, context=None, return_scalar=False):
return None
a = A()
assert_equal(ncu.maximum(a, a), None)
def test_default_prepare(self):
class with_wrap:
__array_priority__ = 10
def __array__(self, dtype=None, copy=None):
return np.zeros(1)
def __array_wrap__(self, arr, context, return_scalar):
return arr
a = with_wrap()
x = ncu.minimum(a, a)
assert_equal(x, np.zeros(1))
assert_equal(type(x), np.ndarray)
def test_array_too_many_args(self):
class A:
def __array__(self, dtype, context, copy=None):
return np.zeros(1)
a = A()
assert_raises_regex(TypeError, '2 required positional', np.sum, a)
def test_ufunc_override(self):
# check override works even with instance with high priority.
class A:
def __array_ufunc__(self, func, method, *inputs, **kwargs):
return self, func, method, inputs, kwargs
class MyNDArray(np.ndarray):
__array_priority__ = 100
a = A()
b = np.array([1]).view(MyNDArray)
res0 = np.multiply(a, b)
res1 = np.multiply(b, b, out=a)
# self
assert_equal(res0[0], a)
assert_equal(res1[0], a)
assert_equal(res0[1], np.multiply)
assert_equal(res1[1], np.multiply)
assert_equal(res0[2], '__call__')
assert_equal(res1[2], '__call__')
assert_equal(res0[3], (a, b))
assert_equal(res1[3], (b, b))
assert_equal(res0[4], {})
assert_equal(res1[4], {'out': (a,)})
def test_ufunc_override_mro(self):
# Some multi arg functions for testing.
def tres_mul(a, b, c):
return a * b * c
def quatro_mul(a, b, c, d):
return a * b * c * d
# Make these into ufuncs.
three_mul_ufunc = np.frompyfunc(tres_mul, 3, 1)
four_mul_ufunc = np.frompyfunc(quatro_mul, 4, 1)
class A:
def __array_ufunc__(self, func, method, *inputs, **kwargs):
return "A"
class ASub(A):
def __array_ufunc__(self, func, method, *inputs, **kwargs):
return "ASub"
class B:
def __array_ufunc__(self, func, method, *inputs, **kwargs):
return "B"
class C:
def __init__(self):
self.count = 0
def __array_ufunc__(self, func, method, *inputs, **kwargs):
self.count += 1
return NotImplemented
class CSub(C):
def __array_ufunc__(self, func, method, *inputs, **kwargs):
self.count += 1
return NotImplemented
a = A()
a_sub = ASub()
b = B()
c = C()
# Standard
res = np.multiply(a, a_sub)
assert_equal(res, "ASub")
res = np.multiply(a_sub, b)
assert_equal(res, "ASub")
# With 1 NotImplemented
res = np.multiply(c, a)
assert_equal(res, "A")
assert_equal(c.count, 1)
# Check our counter works, so we can trust tests below.
res = np.multiply(c, a)
assert_equal(c.count, 2)
# Both NotImplemented.
c = C()
c_sub = CSub()
assert_raises(TypeError, np.multiply, c, c_sub)
assert_equal(c.count, 1)
assert_equal(c_sub.count, 1)
c.count = c_sub.count = 0
assert_raises(TypeError, np.multiply, c_sub, c)
assert_equal(c.count, 1)
assert_equal(c_sub.count, 1)
c.count = 0
assert_raises(TypeError, np.multiply, c, c)
assert_equal(c.count, 1)
c.count = 0
assert_raises(TypeError, np.multiply, 2, c)
assert_equal(c.count, 1)
# Ternary testing.
assert_equal(three_mul_ufunc(a, 1, 2), "A")
assert_equal(three_mul_ufunc(1, a, 2), "A")
assert_equal(three_mul_ufunc(1, 2, a), "A")
assert_equal(three_mul_ufunc(a, a, 6), "A")
assert_equal(three_mul_ufunc(a, 2, a), "A")
assert_equal(three_mul_ufunc(a, 2, b), "A")
assert_equal(three_mul_ufunc(a, 2, a_sub), "ASub")
assert_equal(three_mul_ufunc(a, a_sub, 3), "ASub")
c.count = 0
assert_equal(three_mul_ufunc(c, a_sub, 3), "ASub")
assert_equal(c.count, 1)
c.count = 0
assert_equal(three_mul_ufunc(1, a_sub, c), "ASub")
assert_equal(c.count, 0)
c.count = 0
assert_equal(three_mul_ufunc(a, b, c), "A")
assert_equal(c.count, 0)
c_sub.count = 0
assert_equal(three_mul_ufunc(a, b, c_sub), "A")
assert_equal(c_sub.count, 0)
assert_equal(three_mul_ufunc(1, 2, b), "B")
assert_raises(TypeError, three_mul_ufunc, 1, 2, c)
assert_raises(TypeError, three_mul_ufunc, c_sub, 2, c)
assert_raises(TypeError, three_mul_ufunc, c_sub, 2, 3)
# Quaternary testing.
assert_equal(four_mul_ufunc(a, 1, 2, 3), "A")
assert_equal(four_mul_ufunc(1, a, 2, 3), "A")
assert_equal(four_mul_ufunc(1, 1, a, 3), "A")
assert_equal(four_mul_ufunc(1, 1, 2, a), "A")
assert_equal(four_mul_ufunc(a, b, 2, 3), "A")
assert_equal(four_mul_ufunc(1, a, 2, b), "A")
assert_equal(four_mul_ufunc(b, 1, a, 3), "B")
assert_equal(four_mul_ufunc(a_sub, 1, 2, a), "ASub")
assert_equal(four_mul_ufunc(a, 1, 2, a_sub), "ASub")
c = C()
c_sub = CSub()
assert_raises(TypeError, four_mul_ufunc, 1, 2, 3, c)
assert_equal(c.count, 1)
c.count = 0
assert_raises(TypeError, four_mul_ufunc, 1, 2, c_sub, c)
assert_equal(c_sub.count, 1)
assert_equal(c.count, 1)
c2 = C()
c.count = c_sub.count = 0
assert_raises(TypeError, four_mul_ufunc, 1, c, c_sub, c2)
assert_equal(c_sub.count, 1)
assert_equal(c.count, 1)
assert_equal(c2.count, 0)
c.count = c2.count = c_sub.count = 0
assert_raises(TypeError, four_mul_ufunc, c2, c, c_sub, c)
assert_equal(c_sub.count, 1)
assert_equal(c.count, 0)
assert_equal(c2.count, 1)
def test_ufunc_override_methods(self):
class A:
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
return self, ufunc, method, inputs, kwargs
# __call__
a = A()
with assert_raises(TypeError):
np.multiply.__call__(1, a, foo='bar', answer=42)
res = np.multiply.__call__(1, a, subok='bar', where=42)
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], '__call__')
assert_equal(res[3], (1, a))
assert_equal(res[4], {'subok': 'bar', 'where': 42})
# __call__, wrong args
assert_raises(TypeError, np.multiply, a)
assert_raises(TypeError, np.multiply, a, a, a, a)
assert_raises(TypeError, np.multiply, a, a, sig='a', signature='a')
assert_raises(TypeError, ncu_tests.inner1d, a, a, axis=0, axes=[0, 0])
# reduce, positional args
res = np.multiply.reduce(a, 'axis0', 'dtype0', 'out0', 'keep0')
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'reduce')
assert_equal(res[3], (a,))
assert_equal(res[4], {'dtype':'dtype0',
'out': ('out0',),
'keepdims': 'keep0',
'axis': 'axis0'})
# reduce, kwargs
res = np.multiply.reduce(a, axis='axis0', dtype='dtype0', out='out0',
keepdims='keep0', initial='init0',
where='where0')
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'reduce')
assert_equal(res[3], (a,))
assert_equal(res[4], {'dtype':'dtype0',
'out': ('out0',),
'keepdims': 'keep0',
'axis': 'axis0',
'initial': 'init0',
'where': 'where0'})
# reduce, output equal to None removed, but not other explicit ones,
# even if they are at their default value.
res = np.multiply.reduce(a, 0, None, None, False)
assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False})
res = np.multiply.reduce(a, out=None, axis=0, keepdims=True)
assert_equal(res[4], {'axis': 0, 'keepdims': True})
res = np.multiply.reduce(a, None, out=(None,), dtype=None)
assert_equal(res[4], {'axis': None, 'dtype': None})
res = np.multiply.reduce(a, 0, None, None, False, 2, True)
assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False,
'initial': 2, 'where': True})
# np._NoValue ignored for initial
res = np.multiply.reduce(a, 0, None, None, False,
np._NoValue, True)
assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False,
'where': True})
# None kept for initial, True for where.
res = np.multiply.reduce(a, 0, None, None, False, None, True)
assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False,
'initial': None, 'where': True})
# reduce, wrong args
assert_raises(ValueError, np.multiply.reduce, a, out=())
assert_raises(ValueError, np.multiply.reduce, a, out=('out0', 'out1'))
assert_raises(TypeError, np.multiply.reduce, a, 'axis0', axis='axis0')
# accumulate, pos args
res = np.multiply.accumulate(a, 'axis0', 'dtype0', 'out0')
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'accumulate')
assert_equal(res[3], (a,))
assert_equal(res[4], {'dtype':'dtype0',
'out': ('out0',),
'axis': 'axis0'})
# accumulate, kwargs
res = np.multiply.accumulate(a, axis='axis0', dtype='dtype0',
out='out0')
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'accumulate')
assert_equal(res[3], (a,))
assert_equal(res[4], {'dtype':'dtype0',
'out': ('out0',),
'axis': 'axis0'})
# accumulate, output equal to None removed.
res = np.multiply.accumulate(a, 0, None, None)
assert_equal(res[4], {'axis': 0, 'dtype': None})
res = np.multiply.accumulate(a, out=None, axis=0, dtype='dtype1')
assert_equal(res[4], {'axis': 0, 'dtype': 'dtype1'})
res = np.multiply.accumulate(a, None, out=(None,), dtype=None)
assert_equal(res[4], {'axis': None, 'dtype': None})
# accumulate, wrong args
assert_raises(ValueError, np.multiply.accumulate, a, out=())
assert_raises(ValueError, np.multiply.accumulate, a,
out=('out0', 'out1'))
assert_raises(TypeError, np.multiply.accumulate, a,
'axis0', axis='axis0')
# reduceat, pos args
res = np.multiply.reduceat(a, [4, 2], 'axis0', 'dtype0', 'out0')
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'reduceat')
assert_equal(res[3], (a, [4, 2]))
assert_equal(res[4], {'dtype':'dtype0',
'out': ('out0',),
'axis': 'axis0'})
# reduceat, kwargs
res = np.multiply.reduceat(a, [4, 2], axis='axis0', dtype='dtype0',
out='out0')
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'reduceat')
assert_equal(res[3], (a, [4, 2]))
assert_equal(res[4], {'dtype':'dtype0',
'out': ('out0',),
'axis': 'axis0'})
# reduceat, output equal to None removed.
res = np.multiply.reduceat(a, [4, 2], 0, None, None)
assert_equal(res[4], {'axis': 0, 'dtype': None})
res = np.multiply.reduceat(a, [4, 2], axis=None, out=None, dtype='dt')
assert_equal(res[4], {'axis': None, 'dtype': 'dt'})
res = np.multiply.reduceat(a, [4, 2], None, None, out=(None,))
assert_equal(res[4], {'axis': None, 'dtype': None})
# reduceat, wrong args
assert_raises(ValueError, np.multiply.reduce, a, [4, 2], out=())
assert_raises(ValueError, np.multiply.reduce, a, [4, 2],
out=('out0', 'out1'))
assert_raises(TypeError, np.multiply.reduce, a, [4, 2],
'axis0', axis='axis0')
# outer
res = np.multiply.outer(a, 42)
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'outer')
assert_equal(res[3], (a, 42))
assert_equal(res[4], {})
# outer, wrong args
assert_raises(TypeError, np.multiply.outer, a)
assert_raises(TypeError, np.multiply.outer, a, a, a, a)
assert_raises(TypeError, np.multiply.outer, a, a, sig='a', signature='a')
# at
res = np.multiply.at(a, [4, 2], 'b0')
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'at')
assert_equal(res[3], (a, [4, 2], 'b0'))
# at, wrong args
assert_raises(TypeError, np.multiply.at, a)
assert_raises(TypeError, np.multiply.at, a, a, a, a)
def test_ufunc_override_out(self):
class A:
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
return kwargs
class B:
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
return kwargs
a = A()
b = B()
res0 = np.multiply(a, b, 'out_arg')
res1 = np.multiply(a, b, out='out_arg')
res2 = np.multiply(2, b, 'out_arg')
res3 = np.multiply(3, b, out='out_arg')
res4 = np.multiply(a, 4, 'out_arg')
res5 = np.multiply(a, 5, out='out_arg')
assert_equal(res0['out'][0], 'out_arg')
assert_equal(res1['out'][0], 'out_arg')
assert_equal(res2['out'][0], 'out_arg')
assert_equal(res3['out'][0], 'out_arg')
assert_equal(res4['out'][0], 'out_arg')
assert_equal(res5['out'][0], 'out_arg')
# ufuncs with multiple output modf and frexp.
res6 = np.modf(a, 'out0', 'out1')
res7 = np.frexp(a, 'out0', 'out1')
assert_equal(res6['out'][0], 'out0')
assert_equal(res6['out'][1], 'out1')
assert_equal(res7['out'][0], 'out0')
assert_equal(res7['out'][1], 'out1')
# While we're at it, check that default output is never passed on.
assert_(np.sin(a, None) == {})
assert_(np.sin(a, out=None) == {})
assert_(np.sin(a, out=(None,)) == {})
assert_(np.modf(a, None) == {})
assert_(np.modf(a, None, None) == {})
assert_(np.modf(a, out=(None, None)) == {})
with assert_raises(TypeError):
# Out argument must be tuple, since there are multiple outputs.
np.modf(a, out=None)
# don't give positional and output argument, or too many arguments.
# wrong number of arguments in the tuple is an error too.
assert_raises(TypeError, np.multiply, a, b, 'one', out='two')
assert_raises(TypeError, np.multiply, a, b, 'one', 'two')
assert_raises(ValueError, np.multiply, a, b, out=('one', 'two'))
assert_raises(TypeError, np.multiply, a, out=())
assert_raises(TypeError, np.modf, a, 'one', out=('two', 'three'))
assert_raises(TypeError, np.modf, a, 'one', 'two', 'three')
assert_raises(ValueError, np.modf, a, out=('one', 'two', 'three'))
assert_raises(ValueError, np.modf, a, out=('one',))
def test_ufunc_override_where(self):
class OverriddenArrayOld(np.ndarray):
def _unwrap(self, objs):
cls = type(self)
result = []
for obj in objs:
if isinstance(obj, cls):
obj = np.array(obj)
elif type(obj) != np.ndarray:
return NotImplemented
result.append(obj)
return result
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
inputs = self._unwrap(inputs)
if inputs is NotImplemented:
return NotImplemented
kwargs = kwargs.copy()
if "out" in kwargs:
kwargs["out"] = self._unwrap(kwargs["out"])
if kwargs["out"] is NotImplemented:
return NotImplemented
r = super().__array_ufunc__(ufunc, method, *inputs, **kwargs)
if r is not NotImplemented:
r = r.view(type(self))
return r
class OverriddenArrayNew(OverriddenArrayOld):
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
kwargs = kwargs.copy()
if "where" in kwargs:
kwargs["where"] = self._unwrap((kwargs["where"], ))
if kwargs["where"] is NotImplemented:
return NotImplemented
else:
kwargs["where"] = kwargs["where"][0]
r = super().__array_ufunc__(ufunc, method, *inputs, **kwargs)
if r is not NotImplemented:
r = r.view(type(self))
return r
ufunc = np.negative
array = np.array([1, 2, 3])
where = np.array([True, False, True])
expected = ufunc(array, where=where)
with pytest.raises(TypeError):
ufunc(array, where=where.view(OverriddenArrayOld))
result_1 = ufunc(
array,
where=where.view(OverriddenArrayNew)
)
assert isinstance(result_1, OverriddenArrayNew)
assert np.all(np.array(result_1) == expected, where=where)
result_2 = ufunc(
array.view(OverriddenArrayNew),
where=where.view(OverriddenArrayNew)
)
assert isinstance(result_2, OverriddenArrayNew)
assert np.all(np.array(result_2) == expected, where=where)
def test_ufunc_override_exception(self):
class A:
def __array_ufunc__(self, *a, **kwargs):
raise ValueError("oops")
a = A()
assert_raises(ValueError, np.negative, 1, out=a)
assert_raises(ValueError, np.negative, a)
assert_raises(ValueError, np.divide, 1., a)
def test_ufunc_override_not_implemented(self):
class A:
def __array_ufunc__(self, *args, **kwargs):
return NotImplemented
msg = ("operand type(s) all returned NotImplemented from "
"__array_ufunc__(<ufunc 'negative'>, '__call__', <*>): 'A'")
with assert_raises_regex(TypeError, fnmatch.translate(msg)):
np.negative(A())
msg = ("operand type(s) all returned NotImplemented from "
"__array_ufunc__(<ufunc 'add'>, '__call__', <*>, <object *>, "
"out=(1,)): 'A', 'object', 'int'")
with assert_raises_regex(TypeError, fnmatch.translate(msg)):
np.add(A(), object(), out=1)
def test_ufunc_override_disabled(self):
class OptOut:
__array_ufunc__ = None
opt_out = OptOut()
# ufuncs always raise
msg = "operand 'OptOut' does not support ufuncs"
with assert_raises_regex(TypeError, msg):
np.add(opt_out, 1)
with assert_raises_regex(TypeError, msg):
np.add(1, opt_out)
with assert_raises_regex(TypeError, msg):
np.negative(opt_out)
# opt-outs still hold even when other arguments have pathological
# __array_ufunc__ implementations
class GreedyArray:
def __array_ufunc__(self, *args, **kwargs):
return self
greedy = GreedyArray()
assert_(np.negative(greedy) is greedy)
with assert_raises_regex(TypeError, msg):
np.add(greedy, opt_out)
with assert_raises_regex(TypeError, msg):
np.add(greedy, 1, out=opt_out)
def test_gufunc_override(self):
# gufunc are just ufunc instances, but follow a different path,
# so check __array_ufunc__ overrides them properly.
class A:
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
return self, ufunc, method, inputs, kwargs
inner1d = ncu_tests.inner1d
a = A()
res = inner1d(a, a)
assert_equal(res[0], a)
assert_equal(res[1], inner1d)
assert_equal(res[2], '__call__')
assert_equal(res[3], (a, a))
assert_equal(res[4], {})
res = inner1d(1, 1, out=a)
assert_equal(res[0], a)
assert_equal(res[1], inner1d)
assert_equal(res[2], '__call__')
assert_equal(res[3], (1, 1))
assert_equal(res[4], {'out': (a,)})
# wrong number of arguments in the tuple is an error too.
assert_raises(TypeError, inner1d, a, out='two')
assert_raises(TypeError, inner1d, a, a, 'one', out='two')
assert_raises(TypeError, inner1d, a, a, 'one', 'two')
assert_raises(ValueError, inner1d, a, a, out=('one', 'two'))
assert_raises(ValueError, inner1d, a, a, out=())
def test_ufunc_override_with_super(self):
# NOTE: this class is used in doc/source/user/basics.subclassing.rst
# if you make any changes here, do update it there too.
class A(np.ndarray):
def __array_ufunc__(self, ufunc, method, *inputs, out=None, **kwargs):
args = []
in_no = []
for i, input_ in enumerate(inputs):
if isinstance(input_, A):
in_no.append(i)
args.append(input_.view(np.ndarray))
else:
args.append(input_)
outputs = out
out_no = []
if outputs:
out_args = []
for j, output in enumerate(outputs):
if isinstance(output, A):
out_no.append(j)
out_args.append(output.view(np.ndarray))
else:
out_args.append(output)
kwargs['out'] = tuple(out_args)
else:
outputs = (None,) * ufunc.nout
info = {}
if in_no:
info['inputs'] = in_no
if out_no:
info['outputs'] = out_no
results = super().__array_ufunc__(ufunc, method,
*args, **kwargs)
if results is NotImplemented:
return NotImplemented
if method == 'at':
if isinstance(inputs[0], A):
inputs[0].info = info
return
if ufunc.nout == 1:
results = (results,)
results = tuple((np.asarray(result).view(A)
if output is None else output)
for result, output in zip(results, outputs))
if results and isinstance(results[0], A):
results[0].info = info
return results[0] if len(results) == 1 else results
class B:
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
if any(isinstance(input_, A) for input_ in inputs):
return "A!"
else:
return NotImplemented
d = np.arange(5.)
# 1 input, 1 output
a = np.arange(5.).view(A)
b = np.sin(a)
check = np.sin(d)
assert_(np.all(check == b))
assert_equal(b.info, {'inputs': [0]})
b = np.sin(d, out=(a,))
assert_(np.all(check == b))
assert_equal(b.info, {'outputs': [0]})
assert_(b is a)
a = np.arange(5.).view(A)
b = np.sin(a, out=a)
assert_(np.all(check == b))
assert_equal(b.info, {'inputs': [0], 'outputs': [0]})
# 1 input, 2 outputs
a = np.arange(5.).view(A)
b1, b2 = np.modf(a)
assert_equal(b1.info, {'inputs': [0]})
b1, b2 = np.modf(d, out=(None, a))
assert_(b2 is a)
assert_equal(b1.info, {'outputs': [1]})
a = np.arange(5.).view(A)
b = np.arange(5.).view(A)
c1, c2 = np.modf(a, out=(a, b))
assert_(c1 is a)
assert_(c2 is b)
assert_equal(c1.info, {'inputs': [0], 'outputs': [0, 1]})
# 2 input, 1 output
a = np.arange(5.).view(A)
b = np.arange(5.).view(A)
c = np.add(a, b, out=a)
assert_(c is a)
assert_equal(c.info, {'inputs': [0, 1], 'outputs': [0]})
# some tests with a non-ndarray subclass
a = np.arange(5.)
b = B()
assert_(a.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented)
assert_(b.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented)
assert_raises(TypeError, np.add, a, b)
a = a.view(A)
assert_(a.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented)
assert_(b.__array_ufunc__(np.add, '__call__', a, b) == "A!")
assert_(np.add(a, b) == "A!")
# regression check for gh-9102 -- tests ufunc.reduce implicitly.
d = np.array([[1, 2, 3], [1, 2, 3]])
a = d.view(A)
c = a.any()
check = d.any()
assert_equal(c, check)
assert_(c.info, {'inputs': [0]})
c = a.max()
check = d.max()
assert_equal(c, check)
assert_(c.info, {'inputs': [0]})
b = np.array(0).view(A)
c = a.max(out=b)
assert_equal(c, check)
assert_(c is b)
assert_(c.info, {'inputs': [0], 'outputs': [0]})
check = a.max(axis=0)
b = np.zeros_like(check).view(A)
c = a.max(axis=0, out=b)
assert_equal(c, check)
assert_(c is b)
assert_(c.info, {'inputs': [0], 'outputs': [0]})
# simple explicit tests of reduce, accumulate, reduceat
check = np.add.reduce(d, axis=1)
c = np.add.reduce(a, axis=1)
assert_equal(c, check)
assert_(c.info, {'inputs': [0]})
b = np.zeros_like(c)
c = np.add.reduce(a, 1, None, b)
assert_equal(c, check)
assert_(c is b)
assert_(c.info, {'inputs': [0], 'outputs': [0]})
check = np.add.accumulate(d, axis=0)
c = np.add.accumulate(a, axis=0)
assert_equal(c, check)
assert_(c.info, {'inputs': [0]})
b = np.zeros_like(c)
c = np.add.accumulate(a, 0, None, b)
assert_equal(c, check)
assert_(c is b)
assert_(c.info, {'inputs': [0], 'outputs': [0]})
indices = [0, 2, 1]
check = np.add.reduceat(d, indices, axis=1)
c = np.add.reduceat(a, indices, axis=1)
assert_equal(c, check)
assert_(c.info, {'inputs': [0]})
b = np.zeros_like(c)
c = np.add.reduceat(a, indices, 1, None, b)
assert_equal(c, check)
assert_(c is b)
assert_(c.info, {'inputs': [0], 'outputs': [0]})
# and a few tests for at
d = np.array([[1, 2, 3], [1, 2, 3]])
check = d.copy()
a = d.copy().view(A)
np.add.at(check, ([0, 1], [0, 2]), 1.)
np.add.at(a, ([0, 1], [0, 2]), 1.)
assert_equal(a, check)
assert_(a.info, {'inputs': [0]})
b = np.array(1.).view(A)
a = d.copy().view(A)
np.add.at(a, ([0, 1], [0, 2]), b)
assert_equal(a, check)
assert_(a.info, {'inputs': [0, 2]})
def test_array_ufunc_direct_call(self):
# This is mainly a regression test for gh-24023 (shouldn't segfault)
a = np.array(1)
with pytest.raises(TypeError):
a.__array_ufunc__()
# No kwargs means kwargs may be NULL on the C-level
with pytest.raises(TypeError):
a.__array_ufunc__(1, 2)
# And the same with a valid call:
res = a.__array_ufunc__(np.add, "__call__", a, a)
assert_array_equal(res, a + a)
def test_ufunc_docstring(self):
original_doc = np.add.__doc__
new_doc = "new docs"
expected_dict = (
{} if IS_PYPY else {"__module__": "numpy", "__qualname__": "add"}
)
np.add.__doc__ = new_doc
assert np.add.__doc__ == new_doc
assert np.add.__dict__["__doc__"] == new_doc
del np.add.__doc__
assert np.add.__doc__ == original_doc
assert np.add.__dict__ == expected_dict
np.add.__dict__["other"] = 1
np.add.__dict__["__doc__"] = new_doc
assert np.add.__doc__ == new_doc
del np.add.__dict__["__doc__"]
assert np.add.__doc__ == original_doc
del np.add.__dict__["other"]
assert np.add.__dict__ == expected_dict
class TestChoose:
def test_mixed(self):
c = np.array([True, True])
a = np.array([True, True])
assert_equal(np.choose(c, (a, 1)), np.array([1, 1]))
class TestRationalFunctions:
def test_lcm(self):
self._test_lcm_inner(np.int16)
self._test_lcm_inner(np.uint16)
def test_lcm_object(self):
self._test_lcm_inner(np.object_)
def test_gcd(self):
self._test_gcd_inner(np.int16)
self._test_lcm_inner(np.uint16)
def test_gcd_object(self):
self._test_gcd_inner(np.object_)
def _test_lcm_inner(self, dtype):
# basic use
a = np.array([12, 120], dtype=dtype)
b = np.array([20, 200], dtype=dtype)
assert_equal(np.lcm(a, b), [60, 600])
if not issubclass(dtype, np.unsignedinteger):
# negatives are ignored
a = np.array([12, -12, 12, -12], dtype=dtype)
b = np.array([20, 20, -20, -20], dtype=dtype)
assert_equal(np.lcm(a, b), [60]*4)
# reduce
a = np.array([3, 12, 20], dtype=dtype)
assert_equal(np.lcm.reduce([3, 12, 20]), 60)
# broadcasting, and a test including 0
a = np.arange(6).astype(dtype)
b = 20
assert_equal(np.lcm(a, b), [0, 20, 20, 60, 20, 20])
def _test_gcd_inner(self, dtype):
# basic use
a = np.array([12, 120], dtype=dtype)
b = np.array([20, 200], dtype=dtype)
assert_equal(np.gcd(a, b), [4, 40])
if not issubclass(dtype, np.unsignedinteger):
# negatives are ignored
a = np.array([12, -12, 12, -12], dtype=dtype)
b = np.array([20, 20, -20, -20], dtype=dtype)
assert_equal(np.gcd(a, b), [4]*4)
# reduce
a = np.array([15, 25, 35], dtype=dtype)
assert_equal(np.gcd.reduce(a), 5)
# broadcasting, and a test including 0
a = np.arange(6).astype(dtype)
b = 20
assert_equal(np.gcd(a, b), [20, 1, 2, 1, 4, 5])
def test_lcm_overflow(self):
# verify that we don't overflow when a*b does overflow
big = np.int32(np.iinfo(np.int32).max // 11)
a = 2*big
b = 5*big
assert_equal(np.lcm(a, b), 10*big)
def test_gcd_overflow(self):
for dtype in (np.int32, np.int64):
# verify that we don't overflow when taking abs(x)
# not relevant for lcm, where the result is unrepresentable anyway
a = dtype(np.iinfo(dtype).min) # negative power of two
q = -(a // 4)
assert_equal(np.gcd(a, q*3), q)
assert_equal(np.gcd(a, -q*3), q)
def test_decimal(self):
from decimal import Decimal
a = np.array([1, 1, -1, -1]) * Decimal('0.20')
b = np.array([1, -1, 1, -1]) * Decimal('0.12')
assert_equal(np.gcd(a, b), 4*[Decimal('0.04')])
assert_equal(np.lcm(a, b), 4*[Decimal('0.60')])
def test_float(self):
# not well-defined on float due to rounding errors
assert_raises(TypeError, np.gcd, 0.3, 0.4)
assert_raises(TypeError, np.lcm, 0.3, 0.4)
def test_huge_integers(self):
# Converting to an array first is a bit different as it means we
# have an explicit object dtype:
assert_equal(np.array(2**200), 2**200)
# Special promotion rules should ensure that this also works for
# two Python integers (even if slow).
# (We do this for comparisons, as the result is always bool and
# we also special case array comparisons with Python integers)
np.equal(2**200, 2**200)
# But, we cannot do this when it would affect the result dtype:
with pytest.raises(OverflowError):
np.gcd(2**100, 3**100)
# Asking for `object` explicitly is fine, though:
assert np.gcd(2**100, 3**100, dtype=object) == 1
# As of now, the below work, because it is using arrays (which
# will be object arrays)
a = np.array(2**100 * 3**5)
b = np.array([2**100 * 5**7, 2**50 * 3**10])
assert_equal(np.gcd(a, b), [2**100, 2**50 * 3**5])
assert_equal(np.lcm(a, b), [2**100 * 3**5 * 5**7, 2**100 * 3**10])
def test_inf_and_nan(self):
inf = np.array([np.inf], dtype=np.object_)
assert_raises(ValueError, np.gcd, inf, 1)
assert_raises(ValueError, np.gcd, 1, inf)
assert_raises(ValueError, np.gcd, np.nan, inf)
assert_raises(TypeError, np.gcd, 4, float(np.inf))
class TestRoundingFunctions:
def test_object_direct(self):
""" test direct implementation of these magic methods """
class C:
def __floor__(self):
return 1
def __ceil__(self):
return 2
def __trunc__(self):
return 3
arr = np.array([C(), C()])
assert_equal(np.floor(arr), [1, 1])
assert_equal(np.ceil(arr), [2, 2])
assert_equal(np.trunc(arr), [3, 3])
def test_object_indirect(self):
""" test implementations via __float__ """
class C:
def __float__(self):
return -2.5
arr = np.array([C(), C()])
assert_equal(np.floor(arr), [-3, -3])
assert_equal(np.ceil(arr), [-2, -2])
with pytest.raises(TypeError):
np.trunc(arr) # consistent with math.trunc
def test_fraction(self):
f = Fraction(-4, 3)
assert_equal(np.floor(f), -2)
assert_equal(np.ceil(f), -1)
assert_equal(np.trunc(f), -1)
@pytest.mark.parametrize('func', [np.floor, np.ceil, np.trunc])
@pytest.mark.parametrize('dtype', [np.bool, np.float64, np.float32,
np.int64, np.uint32])
def test_output_dtype(self, func, dtype):
arr = np.array([-2, 0, 4, 8]).astype(dtype)
result = func(arr)
assert_equal(arr, result)
assert result.dtype == dtype
class TestComplexFunctions:
funcs = [np.arcsin, np.arccos, np.arctan, np.arcsinh, np.arccosh,
np.arctanh, np.sin, np.cos, np.tan, np.exp,
np.exp2, np.log, np.sqrt, np.log10, np.log2,
np.log1p]
def test_it(self):
for f in self.funcs:
if f is np.arccosh:
x = 1.5
else:
x = .5
fr = f(x)
fz = f(complex(x))
assert_almost_equal(fz.real, fr, err_msg='real part %s' % f)
assert_almost_equal(fz.imag, 0., err_msg='imag part %s' % f)
@pytest.mark.xfail(IS_WASM, reason="doesn't work")
def test_precisions_consistent(self):
z = 1 + 1j
for f in self.funcs:
fcf = f(np.csingle(z))
fcd = f(np.cdouble(z))
fcl = f(np.clongdouble(z))
assert_almost_equal(fcf, fcd, decimal=6, err_msg='fch-fcd %s' % f)
assert_almost_equal(fcl, fcd, decimal=15, err_msg='fch-fcl %s' % f)
@pytest.mark.xfail(IS_WASM, reason="doesn't work")
def test_branch_cuts(self):
# check branch cuts and continuity on them
_check_branch_cut(np.log, -0.5, 1j, 1, -1, True)
_check_branch_cut(np.log2, -0.5, 1j, 1, -1, True)
_check_branch_cut(np.log10, -0.5, 1j, 1, -1, True)
_check_branch_cut(np.log1p, -1.5, 1j, 1, -1, True)
_check_branch_cut(np.sqrt, -0.5, 1j, 1, -1, True)
_check_branch_cut(np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True)
_check_branch_cut(np.arccos, [ -2, 2], [1j, 1j], 1, -1, True)
_check_branch_cut(np.arctan, [0-2j, 2j], [1, 1], -1, 1, True)
_check_branch_cut(np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True)
_check_branch_cut(np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True)
_check_branch_cut(np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True)
# check against bogus branch cuts: assert continuity between quadrants
_check_branch_cut(np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1)
_check_branch_cut(np.arccos, [0-2j, 2j], [ 1, 1], 1, 1)
_check_branch_cut(np.arctan, [ -2, 2], [1j, 1j], 1, 1)
_check_branch_cut(np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1)
_check_branch_cut(np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1)
_check_branch_cut(np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1)
@pytest.mark.xfail(IS_WASM, reason="doesn't work")
def test_branch_cuts_complex64(self):
# check branch cuts and continuity on them
_check_branch_cut(np.log, -0.5, 1j, 1, -1, True, np.complex64)
_check_branch_cut(np.log2, -0.5, 1j, 1, -1, True, np.complex64)
_check_branch_cut(np.log10, -0.5, 1j, 1, -1, True, np.complex64)
_check_branch_cut(np.log1p, -1.5, 1j, 1, -1, True, np.complex64)
_check_branch_cut(np.sqrt, -0.5, 1j, 1, -1, True, np.complex64)
_check_branch_cut(np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64)
_check_branch_cut(np.arccos, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64)
_check_branch_cut(np.arctan, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64)
_check_branch_cut(np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64)
_check_branch_cut(np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True, np.complex64)
_check_branch_cut(np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64)
# check against bogus branch cuts: assert continuity between quadrants
_check_branch_cut(np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64)
_check_branch_cut(np.arccos, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64)
_check_branch_cut(np.arctan, [ -2, 2], [1j, 1j], 1, 1, False, np.complex64)
_check_branch_cut(np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1, False, np.complex64)
_check_branch_cut(np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1, False, np.complex64)
_check_branch_cut(np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1, False, np.complex64)
def test_against_cmath(self):
import cmath
points = [-1-1j, -1+1j, +1-1j, +1+1j]
name_map = {'arcsin': 'asin', 'arccos': 'acos', 'arctan': 'atan',
'arcsinh': 'asinh', 'arccosh': 'acosh', 'arctanh': 'atanh'}
atol = 4*np.finfo(complex).eps
for func in self.funcs:
fname = func.__name__.split('.')[-1]
cname = name_map.get(fname, fname)
try:
cfunc = getattr(cmath, cname)
except AttributeError:
continue
for p in points:
a = complex(func(np.complex128(p)))
b = cfunc(p)
assert_(
abs(a - b) < atol,
"%s %s: %s; cmath: %s" % (fname, p, a, b)
)
@pytest.mark.xfail(
# manylinux2014 uses glibc2.17
_glibc_older_than("2.18"),
reason="Older glibc versions are imprecise (maybe passes with SIMD?)"
)
@pytest.mark.xfail(IS_WASM, reason="doesn't work")
@pytest.mark.parametrize('dtype', [
np.complex64, np.complex128, np.clongdouble
])
def test_loss_of_precision(self, dtype):
"""Check loss of precision in complex arc* functions"""
if dtype is np.clongdouble and platform.machine() != 'x86_64':
# Failures on musllinux, aarch64, s390x, ppc64le (see gh-17554)
pytest.skip('Only works reliably for x86-64 and recent glibc')
# Check against known-good functions
info = np.finfo(dtype)
real_dtype = dtype(0.).real.dtype
eps = info.eps
def check(x, rtol):
x = x.astype(real_dtype)
z = x.astype(dtype)
d = np.absolute(np.arcsinh(x)/np.arcsinh(z).real - 1)
assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
'arcsinh'))
z = (1j*x).astype(dtype)
d = np.absolute(np.arcsinh(x)/np.arcsin(z).imag - 1)
assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
'arcsin'))
z = x.astype(dtype)
d = np.absolute(np.arctanh(x)/np.arctanh(z).real - 1)
assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
'arctanh'))
z = (1j*x).astype(dtype)
d = np.absolute(np.arctanh(x)/np.arctan(z).imag - 1)
assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
'arctan'))
# The switchover was chosen as 1e-3; hence there can be up to
# ~eps/1e-3 of relative cancellation error before it
x_series = np.logspace(-20, -3.001, 200)
x_basic = np.logspace(-2.999, 0, 10, endpoint=False)
if dtype is np.clongdouble:
if bad_arcsinh():
pytest.skip("Trig functions of np.clongdouble values known "
"to be inaccurate on aarch64 and PPC for some "
"compilation configurations.")
# It's not guaranteed that the system-provided arc functions
# are accurate down to a few epsilons. (Eg. on Linux 64-bit)
# So, give more leeway for long complex tests here:
check(x_series, 50.0*eps)
else:
check(x_series, 2.1*eps)
check(x_basic, 2.0*eps/1e-3)
# Check a few points
z = np.array([1e-5*(1+1j)], dtype=dtype)
p = 9.999999999333333333e-6 + 1.000000000066666666e-5j
d = np.absolute(1-np.arctanh(z)/p)
assert_(np.all(d < 1e-15))
p = 1.0000000000333333333e-5 + 9.999999999666666667e-6j
d = np.absolute(1-np.arcsinh(z)/p)
assert_(np.all(d < 1e-15))
p = 9.999999999333333333e-6j + 1.000000000066666666e-5
d = np.absolute(1-np.arctan(z)/p)
assert_(np.all(d < 1e-15))
p = 1.0000000000333333333e-5j + 9.999999999666666667e-6
d = np.absolute(1-np.arcsin(z)/p)
assert_(np.all(d < 1e-15))
# Check continuity across switchover points
def check(func, z0, d=1):
z0 = np.asarray(z0, dtype=dtype)
zp = z0 + abs(z0) * d * eps * 2
zm = z0 - abs(z0) * d * eps * 2
assert_(np.all(zp != zm), (zp, zm))
# NB: the cancellation error at the switchover is at least eps
good = (abs(func(zp) - func(zm)) < 2*eps)
assert_(np.all(good), (func, z0[~good]))
for func in (np.arcsinh, np.arcsinh, np.arcsin, np.arctanh, np.arctan):
pts = [rp+1j*ip for rp in (-1e-3, 0, 1e-3) for ip in(-1e-3, 0, 1e-3)
if rp != 0 or ip != 0]
check(func, pts, 1)
check(func, pts, 1j)
check(func, pts, 1+1j)
@np.errstate(all="ignore")
def test_promotion_corner_cases(self):
for func in self.funcs:
assert func(np.float16(1)).dtype == np.float16
# Integer to low precision float promotion is a dubious choice:
assert func(np.uint8(1)).dtype == np.float16
assert func(np.int16(1)).dtype == np.float32
class TestAttributes:
def test_attributes(self):
add = ncu.add
assert_equal(add.__name__, 'add')
assert_(add.ntypes >= 18) # don't fail if types added
assert_('ii->i' in add.types)
assert_equal(add.nin, 2)
assert_equal(add.nout, 1)
assert_equal(add.identity, 0)
def test_doc(self):
# don't bother checking the long list of kwargs, which are likely to
# change
assert_(ncu.add.__doc__.startswith(
"add(x1, x2, /, out=None, *, where=True"))
assert_(ncu.frexp.__doc__.startswith(
"frexp(x[, out1, out2], / [, out=(None, None)], *, where=True"))
class TestSubclass:
def test_subclass_op(self):
class simple(np.ndarray):
def __new__(subtype, shape):
self = np.ndarray.__new__(subtype, shape, dtype=object)
self.fill(0)
return self
a = simple((3, 4))
assert_equal(a+a, a)
class TestFrompyfunc:
def test_identity(self):
def mul(a, b):
return a * b
# with identity=value
mul_ufunc = np.frompyfunc(mul, nin=2, nout=1, identity=1)
assert_equal(mul_ufunc.reduce([2, 3, 4]), 24)
assert_equal(mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1)), 1)
assert_equal(mul_ufunc.reduce([]), 1)
# with identity=None (reorderable)
mul_ufunc = np.frompyfunc(mul, nin=2, nout=1, identity=None)
assert_equal(mul_ufunc.reduce([2, 3, 4]), 24)
assert_equal(mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1)), 1)
assert_raises(ValueError, lambda: mul_ufunc.reduce([]))
# with no identity (not reorderable)
mul_ufunc = np.frompyfunc(mul, nin=2, nout=1)
assert_equal(mul_ufunc.reduce([2, 3, 4]), 24)
assert_raises(ValueError, lambda: mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1)))
assert_raises(ValueError, lambda: mul_ufunc.reduce([]))
def _check_branch_cut(f, x0, dx, re_sign=1, im_sign=-1, sig_zero_ok=False,
dtype=complex):
"""
Check for a branch cut in a function.
Assert that `x0` lies on a branch cut of function `f` and `f` is
continuous from the direction `dx`.
Parameters
----------
f : func
Function to check
x0 : array-like
Point on branch cut
dx : array-like
Direction to check continuity in
re_sign, im_sign : {1, -1}
Change of sign of the real or imaginary part expected
sig_zero_ok : bool
Whether to check if the branch cut respects signed zero (if applicable)
dtype : dtype
Dtype to check (should be complex)
"""
x0 = np.atleast_1d(x0).astype(dtype)
dx = np.atleast_1d(dx).astype(dtype)
if np.dtype(dtype).char == 'F':
scale = np.finfo(dtype).eps * 1e2
atol = np.float32(1e-2)
else:
scale = np.finfo(dtype).eps * 1e3
atol = 1e-4
y0 = f(x0)
yp = f(x0 + dx*scale*np.absolute(x0)/np.absolute(dx))
ym = f(x0 - dx*scale*np.absolute(x0)/np.absolute(dx))
assert_(np.all(np.absolute(y0.real - yp.real) < atol), (y0, yp))
assert_(np.all(np.absolute(y0.imag - yp.imag) < atol), (y0, yp))
assert_(np.all(np.absolute(y0.real - ym.real*re_sign) < atol), (y0, ym))
assert_(np.all(np.absolute(y0.imag - ym.imag*im_sign) < atol), (y0, ym))
if sig_zero_ok:
# check that signed zeros also work as a displacement
jr = (x0.real == 0) & (dx.real != 0)
ji = (x0.imag == 0) & (dx.imag != 0)
if np.any(jr):
x = x0[jr]
x.real = ncu.NZERO
ym = f(x)
assert_(np.all(np.absolute(y0[jr].real - ym.real*re_sign) < atol), (y0[jr], ym))
assert_(np.all(np.absolute(y0[jr].imag - ym.imag*im_sign) < atol), (y0[jr], ym))
if np.any(ji):
x = x0[ji]
x.imag = ncu.NZERO
ym = f(x)
assert_(np.all(np.absolute(y0[ji].real - ym.real*re_sign) < atol), (y0[ji], ym))
assert_(np.all(np.absolute(y0[ji].imag - ym.imag*im_sign) < atol), (y0[ji], ym))
def test_copysign():
assert_(np.copysign(1, -1) == -1)
with np.errstate(divide="ignore"):
assert_(1 / np.copysign(0, -1) < 0)
assert_(1 / np.copysign(0, 1) > 0)
assert_(np.signbit(np.copysign(np.nan, -1)))
assert_(not np.signbit(np.copysign(np.nan, 1)))
def _test_nextafter(t):
one = t(1)
two = t(2)
zero = t(0)
eps = np.finfo(t).eps
assert_(np.nextafter(one, two) - one == eps)
assert_(np.nextafter(one, zero) - one < 0)
assert_(np.isnan(np.nextafter(np.nan, one)))
assert_(np.isnan(np.nextafter(one, np.nan)))
assert_(np.nextafter(one, one) == one)
def test_nextafter():
return _test_nextafter(np.float64)
def test_nextafterf():
return _test_nextafter(np.float32)
@pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble),
reason="long double is same as double")
@pytest.mark.xfail(condition=platform.machine().startswith("ppc64"),
reason="IBM double double")
def test_nextafterl():
return _test_nextafter(np.longdouble)
def test_nextafter_0():
for t, direction in itertools.product(np._core.sctypes['float'], (1, -1)):
# The value of tiny for double double is NaN, so we need to pass the
# assert
with suppress_warnings() as sup:
sup.filter(UserWarning)
if not np.isnan(np.finfo(t).tiny):
tiny = np.finfo(t).tiny
assert_(
0. < direction * np.nextafter(t(0), t(direction)) < tiny)
assert_equal(np.nextafter(t(0), t(direction)) / t(2.1), direction * 0.0)
def _test_spacing(t):
one = t(1)
eps = np.finfo(t).eps
nan = t(np.nan)
inf = t(np.inf)
with np.errstate(invalid='ignore'):
assert_equal(np.spacing(one), eps)
assert_(np.isnan(np.spacing(nan)))
assert_(np.isnan(np.spacing(inf)))
assert_(np.isnan(np.spacing(-inf)))
assert_(np.spacing(t(1e30)) != 0)
def test_spacing():
return _test_spacing(np.float64)
def test_spacingf():
return _test_spacing(np.float32)
@pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble),
reason="long double is same as double")
@pytest.mark.xfail(condition=platform.machine().startswith("ppc64"),
reason="IBM double double")
def test_spacingl():
return _test_spacing(np.longdouble)
def test_spacing_gfortran():
# Reference from this fortran file, built with gfortran 4.3.3 on linux
# 32bits:
# PROGRAM test_spacing
# INTEGER, PARAMETER :: SGL = SELECTED_REAL_KIND(p=6, r=37)
# INTEGER, PARAMETER :: DBL = SELECTED_REAL_KIND(p=13, r=200)
#
# WRITE(*,*) spacing(0.00001_DBL)
# WRITE(*,*) spacing(1.0_DBL)
# WRITE(*,*) spacing(1000._DBL)
# WRITE(*,*) spacing(10500._DBL)
#
# WRITE(*,*) spacing(0.00001_SGL)
# WRITE(*,*) spacing(1.0_SGL)
# WRITE(*,*) spacing(1000._SGL)
# WRITE(*,*) spacing(10500._SGL)
# END PROGRAM
ref = {np.float64: [1.69406589450860068E-021,
2.22044604925031308E-016,
1.13686837721616030E-013,
1.81898940354585648E-012],
np.float32: [9.09494702E-13,
1.19209290E-07,
6.10351563E-05,
9.76562500E-04]}
for dt, dec_ in zip([np.float32, np.float64], (10, 20)):
x = np.array([1e-5, 1, 1000, 10500], dtype=dt)
assert_array_almost_equal(np.spacing(x), ref[dt], decimal=dec_)
def test_nextafter_vs_spacing():
# XXX: spacing does not handle long double yet
for t in [np.float32, np.float64]:
for _f in [1, 1e-5, 1000]:
f = t(_f)
f1 = t(_f + 1)
assert_(np.nextafter(f, f1) - f == np.spacing(f))
def test_pos_nan():
"""Check np.nan is a positive nan."""
assert_(np.signbit(np.nan) == 0)
def test_reduceat():
"""Test bug in reduceat when structured arrays are not copied."""
db = np.dtype([('name', 'S11'), ('time', np.int64), ('value', np.float32)])
a = np.empty([100], dtype=db)
a['name'] = 'Simple'
a['time'] = 10
a['value'] = 100
indx = [0, 7, 15, 25]
h2 = []
val1 = indx[0]
for val2 in indx[1:]:
h2.append(np.add.reduce(a['value'][val1:val2]))
val1 = val2
h2.append(np.add.reduce(a['value'][val1:]))
h2 = np.array(h2)
# test buffered -- this should work
h1 = np.add.reduceat(a['value'], indx)
assert_array_almost_equal(h1, h2)
# This is when the error occurs.
# test no buffer
np.setbufsize(32)
h1 = np.add.reduceat(a['value'], indx)
np.setbufsize(ncu.UFUNC_BUFSIZE_DEFAULT)
assert_array_almost_equal(h1, h2)
def test_reduceat_empty():
"""Reduceat should work with empty arrays"""
indices = np.array([], 'i4')
x = np.array([], 'f8')
result = np.add.reduceat(x, indices)
assert_equal(result.dtype, x.dtype)
assert_equal(result.shape, (0,))
# Another case with a slightly different zero-sized shape
x = np.ones((5, 2))
result = np.add.reduceat(x, [], axis=0)
assert_equal(result.dtype, x.dtype)
assert_equal(result.shape, (0, 2))
result = np.add.reduceat(x, [], axis=1)
assert_equal(result.dtype, x.dtype)
assert_equal(result.shape, (5, 0))
def test_complex_nan_comparisons():
nans = [complex(np.nan, 0), complex(0, np.nan), complex(np.nan, np.nan)]
fins = [complex(1, 0), complex(-1, 0), complex(0, 1), complex(0, -1),
complex(1, 1), complex(-1, -1), complex(0, 0)]
with np.errstate(invalid='ignore'):
for x in nans + fins:
x = np.array([x])
for y in nans + fins:
y = np.array([y])
if np.isfinite(x) and np.isfinite(y):
continue
assert_equal(x < y, False, err_msg="%r < %r" % (x, y))
assert_equal(x > y, False, err_msg="%r > %r" % (x, y))
assert_equal(x <= y, False, err_msg="%r <= %r" % (x, y))
assert_equal(x >= y, False, err_msg="%r >= %r" % (x, y))
assert_equal(x == y, False, err_msg="%r == %r" % (x, y))
def test_rint_big_int():
# np.rint bug for large integer values on Windows 32-bit and MKL
# https://github.com/numpy/numpy/issues/6685
val = 4607998452777363968
# This is exactly representable in floating point
assert_equal(val, int(float(val)))
# Rint should not change the value
assert_equal(val, np.rint(val))
@pytest.mark.parametrize('ftype', [np.float32, np.float64])
def test_memoverlap_accumulate(ftype):
# Reproduces bug https://github.com/numpy/numpy/issues/15597
arr = np.array([0.61, 0.60, 0.77, 0.41, 0.19], dtype=ftype)
out_max = np.array([0.61, 0.61, 0.77, 0.77, 0.77], dtype=ftype)
out_min = np.array([0.61, 0.60, 0.60, 0.41, 0.19], dtype=ftype)
assert_equal(np.maximum.accumulate(arr), out_max)
assert_equal(np.minimum.accumulate(arr), out_min)
@pytest.mark.parametrize("ufunc, dtype", [
(ufunc, t[0])
for ufunc in UFUNCS_BINARY_ACC
for t in ufunc.types
if t[-1] == '?' and t[0] not in 'DFGMmO'
])
def test_memoverlap_accumulate_cmp(ufunc, dtype):
if ufunc.signature:
pytest.skip('For generic signatures only')
for size in (2, 8, 32, 64, 128, 256):
arr = np.array([0, 1, 1]*size, dtype=dtype)
acc = ufunc.accumulate(arr, dtype='?')
acc_u8 = acc.view(np.uint8)
exp = np.array(list(itertools.accumulate(arr, ufunc)), dtype=np.uint8)
assert_equal(exp, acc_u8)
@pytest.mark.parametrize("ufunc, dtype", [
(ufunc, t[0])
for ufunc in UFUNCS_BINARY_ACC
for t in ufunc.types
if t[0] == t[1] and t[0] == t[-1] and t[0] not in 'DFGMmO?'
])
def test_memoverlap_accumulate_symmetric(ufunc, dtype):
if ufunc.signature:
pytest.skip('For generic signatures only')
with np.errstate(all='ignore'):
for size in (2, 8, 32, 64, 128, 256):
arr = np.array([0, 1, 2]*size).astype(dtype)
acc = ufunc.accumulate(arr, dtype=dtype)
exp = np.array(list(itertools.accumulate(arr, ufunc)), dtype=dtype)
assert_equal(exp, acc)
def test_signaling_nan_exceptions():
with assert_no_warnings():
a = np.ndarray(shape=(), dtype='float32', buffer=b'\x00\xe0\xbf\xff')
np.isnan(a)
@pytest.mark.parametrize("arr", [
np.arange(2),
np.matrix([0, 1]),
np.matrix([[0, 1], [2, 5]]),
])
def test_outer_subclass_preserve(arr):
# for gh-8661
class foo(np.ndarray):
pass
actual = np.multiply.outer(arr.view(foo), arr.view(foo))
assert actual.__class__.__name__ == 'foo'
def test_outer_bad_subclass():
class BadArr1(np.ndarray):
def __array_finalize__(self, obj):
# The outer call reshapes to 3 dims, try to do a bad reshape.
if self.ndim == 3:
self.shape = self.shape + (1,)
class BadArr2(np.ndarray):
def __array_finalize__(self, obj):
if isinstance(obj, BadArr2):
# outer inserts 1-sized dims. In that case disturb them.
if self.shape[-1] == 1:
self.shape = self.shape[::-1]
for cls in [BadArr1, BadArr2]:
arr = np.ones((2, 3)).view(cls)
with assert_raises(TypeError) as a:
# The first array gets reshaped (not the second one)
np.add.outer(arr, [1, 2])
# This actually works, since we only see the reshaping error:
arr = np.ones((2, 3)).view(cls)
assert type(np.add.outer([1, 2], arr)) is cls
def test_outer_exceeds_maxdims():
deep = np.ones((1,) * 33)
with assert_raises(ValueError):
np.add.outer(deep, deep)
def test_bad_legacy_ufunc_silent_errors():
# legacy ufuncs can't report errors and NumPy can't check if the GIL
# is released. So NumPy has to check after the GIL is released just to
# cover all bases. `np.power` uses/used to use this.
arr = np.arange(3).astype(np.float64)
with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
ncu_tests.always_error(arr, arr)
with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
# not contiguous means the fast-path cannot be taken
non_contig = arr.repeat(20).reshape(-1, 6)[:, ::2]
ncu_tests.always_error(non_contig, arr)
with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
ncu_tests.always_error.outer(arr, arr)
with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
ncu_tests.always_error.reduce(arr)
with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
ncu_tests.always_error.reduceat(arr, [0, 1])
with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
ncu_tests.always_error.accumulate(arr)
with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
ncu_tests.always_error.at(arr, [0, 1, 2], arr)
@pytest.mark.parametrize('x1', [np.arange(3.0), [0.0, 1.0, 2.0]])
def test_bad_legacy_gufunc_silent_errors(x1):
# Verify that an exception raised in a gufunc loop propagates correctly.
# The signature of always_error_gufunc is '(i),()->()'.
with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
ncu_tests.always_error_gufunc(x1, 0.0)
class TestAddDocstring:
@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO")
@pytest.mark.skipif(IS_PYPY, reason="PyPy does not modify tp_doc")
def test_add_same_docstring(self):
# test for attributes (which are C-level defined)
ncu.add_docstring(np.ndarray.flat, np.ndarray.flat.__doc__)
# And typical functions:
def func():
"""docstring"""
return
ncu.add_docstring(func, func.__doc__)
@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO")
def test_different_docstring_fails(self):
# test for attributes (which are C-level defined)
with assert_raises(RuntimeError):
ncu.add_docstring(np.ndarray.flat, "different docstring")
# And typical functions:
def func():
"""docstring"""
return
with assert_raises(RuntimeError):
ncu.add_docstring(func, "different docstring")
class TestAdd_newdoc_ufunc:
@pytest.mark.filterwarnings("ignore:_add_newdoc_ufunc:DeprecationWarning")
def test_ufunc_arg(self):
assert_raises(TypeError, ncu._add_newdoc_ufunc, 2, "blah")
assert_raises(ValueError, ncu._add_newdoc_ufunc, np.add, "blah")
@pytest.mark.filterwarnings("ignore:_add_newdoc_ufunc:DeprecationWarning")
def test_string_arg(self):
assert_raises(TypeError, ncu._add_newdoc_ufunc, np.add, 3)
|
numpyREPO_NAMEnumpyPATH_START.@numpy_extracted@numpy-main@numpy@_core@tests@test_umath.py@.PATH_END.py
|
{
"filename": "_annotation.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/layout/scene/_annotation.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Annotation(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout.scene"
_path_str = "layout.scene.annotation"
_valid_props = {
"align",
"arrowcolor",
"arrowhead",
"arrowside",
"arrowsize",
"arrowwidth",
"ax",
"ay",
"bgcolor",
"bordercolor",
"borderpad",
"borderwidth",
"captureevents",
"font",
"height",
"hoverlabel",
"hovertext",
"name",
"opacity",
"showarrow",
"standoff",
"startarrowhead",
"startarrowsize",
"startstandoff",
"templateitemname",
"text",
"textangle",
"valign",
"visible",
"width",
"x",
"xanchor",
"xshift",
"y",
"yanchor",
"yshift",
"z",
}
# align
# -----
@property
def align(self):
"""
Sets the horizontal alignment of the `text` within the box. Has
an effect only if `text` spans two or more lines (i.e. `text`
contains one or more <br> HTML tags) or if an explicit width is
set to override the text width.
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
# arrowcolor
# ----------
@property
def arrowcolor(self):
"""
Sets the color of the annotation arrow.
The 'arrowcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["arrowcolor"]
@arrowcolor.setter
def arrowcolor(self, val):
self["arrowcolor"] = val
# arrowhead
# ---------
@property
def arrowhead(self):
"""
Sets the end annotation arrow head style.
The 'arrowhead' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 8]
Returns
-------
int
"""
return self["arrowhead"]
@arrowhead.setter
def arrowhead(self, val):
self["arrowhead"] = val
# arrowside
# ---------
@property
def arrowside(self):
"""
Sets the annotation arrow head position.
The 'arrowside' property is a flaglist and may be specified
as a string containing:
- Any combination of ['end', 'start'] joined with '+' characters
(e.g. 'end+start')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["arrowside"]
@arrowside.setter
def arrowside(self, val):
self["arrowside"] = val
# arrowsize
# ---------
@property
def arrowsize(self):
"""
Sets the size of the end annotation arrow head, relative to
`arrowwidth`. A value of 1 (default) gives a head about 3x as
wide as the line.
The 'arrowsize' property is a number and may be specified as:
- An int or float in the interval [0.3, inf]
Returns
-------
int|float
"""
return self["arrowsize"]
@arrowsize.setter
def arrowsize(self, val):
self["arrowsize"] = val
# arrowwidth
# ----------
@property
def arrowwidth(self):
"""
Sets the width (in px) of annotation arrow line.
The 'arrowwidth' property is a number and may be specified as:
- An int or float in the interval [0.1, inf]
Returns
-------
int|float
"""
return self["arrowwidth"]
@arrowwidth.setter
def arrowwidth(self, val):
self["arrowwidth"] = val
# ax
# --
@property
def ax(self):
"""
Sets the x component of the arrow tail about the arrow head (in
pixels).
The 'ax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["ax"]
@ax.setter
def ax(self, val):
self["ax"] = val
# ay
# --
@property
def ay(self):
"""
Sets the y component of the arrow tail about the arrow head (in
pixels).
The 'ay' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["ay"]
@ay.setter
def ay(self, val):
self["ay"] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the background color of the annotation.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the color of the border enclosing the annotation `text`.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# borderpad
# ---------
@property
def borderpad(self):
"""
Sets the padding (in px) between the `text` and the enclosing
border.
The 'borderpad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderpad"]
@borderpad.setter
def borderpad(self, val):
self["borderpad"] = val
# borderwidth
# -----------
@property
def borderwidth(self):
"""
Sets the width (in px) of the border enclosing the annotation
`text`.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
# captureevents
# -------------
@property
def captureevents(self):
"""
Determines whether the annotation text box captures mouse move
and click events, or allows those events to pass through to
data points in the plot that may be behind the annotation. By
default `captureevents` is False unless `hovertext` is
provided. If you use the event `plotly_clickannotation` without
`hovertext` you must explicitly enable `captureevents`.
The 'captureevents' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["captureevents"]
@captureevents.setter
def captureevents(self, val):
self["captureevents"] = val
# font
# ----
@property
def font(self):
"""
Sets the annotation text font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.scene.annotation.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.layout.scene.annotation.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# height
# ------
@property
def height(self):
"""
Sets an explicit height for the text box. null (default) lets
the text set the box height. Taller text will be clipped.
The 'height' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["height"]
@height.setter
def height(self, val):
self["height"] = val
# hoverlabel
# ----------
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.scene.annotation.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Supported dict properties:
bgcolor
Sets the background color of the hover label.
By default uses the annotation's `bgcolor` made
opaque, or white if it was transparent.
bordercolor
Sets the border color of the hover label. By
default uses either dark grey or white, for
maximum contrast with `hoverlabel.bgcolor`.
font
Sets the hover label text font. By default uses
the global hover font and size, with color from
`hoverlabel.bordercolor`.
Returns
-------
plotly.graph_objs.layout.scene.annotation.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
# hovertext
# ---------
@property
def hovertext(self):
"""
Sets text to appear when hovering over this annotation. If
omitted or blank, no hover label will appear.
The 'hovertext' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["hovertext"]
@hovertext.setter
def hovertext(self, val):
self["hovertext"] = val
# name
# ----
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the opacity of the annotation (text + arrow).
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# showarrow
# ---------
@property
def showarrow(self):
"""
Determines whether or not the annotation is drawn with an
arrow. If True, `text` is placed near the arrow's tail. If
False, `text` lines up with the `x` and `y` provided.
The 'showarrow' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showarrow"]
@showarrow.setter
def showarrow(self, val):
self["showarrow"] = val
# standoff
# --------
@property
def standoff(self):
"""
Sets a distance, in pixels, to move the end arrowhead away from
the position it is pointing at, for example to point at the
edge of a marker independent of zoom. Note that this shortens
the arrow from the `ax` / `ay` vector, in contrast to `xshift`
/ `yshift` which moves everything by this amount.
The 'standoff' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["standoff"]
@standoff.setter
def standoff(self, val):
self["standoff"] = val
# startarrowhead
# --------------
@property
def startarrowhead(self):
"""
Sets the start annotation arrow head style.
The 'startarrowhead' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 8]
Returns
-------
int
"""
return self["startarrowhead"]
@startarrowhead.setter
def startarrowhead(self, val):
self["startarrowhead"] = val
# startarrowsize
# --------------
@property
def startarrowsize(self):
"""
Sets the size of the start annotation arrow head, relative to
`arrowwidth`. A value of 1 (default) gives a head about 3x as
wide as the line.
The 'startarrowsize' property is a number and may be specified as:
- An int or float in the interval [0.3, inf]
Returns
-------
int|float
"""
return self["startarrowsize"]
@startarrowsize.setter
def startarrowsize(self, val):
self["startarrowsize"] = val
# startstandoff
# -------------
@property
def startstandoff(self):
"""
Sets a distance, in pixels, to move the start arrowhead away
from the position it is pointing at, for example to point at
the edge of a marker independent of zoom. Note that this
shortens the arrow from the `ax` / `ay` vector, in contrast to
`xshift` / `yshift` which moves everything by this amount.
The 'startstandoff' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["startstandoff"]
@startstandoff.setter
def startstandoff(self, val):
self["startstandoff"] = val
# templateitemname
# ----------------
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
# text
# ----
@property
def text(self):
"""
Sets the text associated with this annotation. Plotly uses a
subset of HTML tags to do things like newline (<br>), bold
(<b></b>), italics (<i></i>), hyperlinks (<a href='...'></a>).
Tags <em>, <sup>, <sub> <span> are also supported.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# textangle
# ---------
@property
def textangle(self):
"""
Sets the angle at which the `text` is drawn with respect to the
horizontal.
The 'textangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180. Numeric values outside this
range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["textangle"]
@textangle.setter
def textangle(self, val):
self["textangle"] = val
# valign
# ------
@property
def valign(self):
"""
Sets the vertical alignment of the `text` within the box. Has
an effect only if an explicit height is set to override the
text height.
The 'valign' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["valign"]
@valign.setter
def valign(self, val):
self["valign"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this annotation is visible.
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# width
# -----
@property
def width(self):
"""
Sets an explicit width for the text box. null (default) lets
the text set the box width. Wider text will be clipped. There
is no automatic wrapping; use <br> to start a new line.
The 'width' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# x
# -
@property
def x(self):
"""
Sets the annotation's x position.
The 'x' property accepts values of any type
Returns
-------
Any
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# xanchor
# -------
@property
def xanchor(self):
"""
Sets the text box's horizontal position anchor This anchor
binds the `x` position to the "left", "center" or "right" of
the annotation. For example, if `x` is set to 1, `xref` to
"paper" and `xanchor` to "right" then the right-most portion of
the annotation lines up with the right-most edge of the
plotting area. If "auto", the anchor is equivalent to "center"
for data-referenced annotations or if there is an arrow,
whereas for paper-referenced with no arrow, the anchor picked
corresponds to the closest side.
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
# xshift
# ------
@property
def xshift(self):
"""
Shifts the position of the whole annotation and arrow to the
right (positive) or left (negative) by this many pixels.
The 'xshift' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["xshift"]
@xshift.setter
def xshift(self, val):
self["xshift"] = val
# y
# -
@property
def y(self):
"""
Sets the annotation's y position.
The 'y' property accepts values of any type
Returns
-------
Any
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# yanchor
# -------
@property
def yanchor(self):
"""
Sets the text box's vertical position anchor This anchor binds
the `y` position to the "top", "middle" or "bottom" of the
annotation. For example, if `y` is set to 1, `yref` to "paper"
and `yanchor` to "top" then the top-most portion of the
annotation lines up with the top-most edge of the plotting
area. If "auto", the anchor is equivalent to "middle" for data-
referenced annotations or if there is an arrow, whereas for
paper-referenced with no arrow, the anchor picked corresponds
to the closest side.
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
# yshift
# ------
@property
def yshift(self):
"""
Shifts the position of the whole annotation and arrow up
(positive) or down (negative) by this many pixels.
The 'yshift' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["yshift"]
@yshift.setter
def yshift(self, val):
self["yshift"] = val
# z
# -
@property
def z(self):
"""
Sets the annotation's z position.
The 'z' property accepts values of any type
Returns
-------
Any
"""
return self["z"]
@z.setter
def z(self, val):
self["z"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the `text` within the
box. Has an effect only if `text` spans two or more
lines (i.e. `text` contains one or more <br> HTML tags)
or if an explicit width is set to override the text
width.
arrowcolor
Sets the color of the annotation arrow.
arrowhead
Sets the end annotation arrow head style.
arrowside
Sets the annotation arrow head position.
arrowsize
Sets the size of the end annotation arrow head,
relative to `arrowwidth`. A value of 1 (default) gives
a head about 3x as wide as the line.
arrowwidth
Sets the width (in px) of annotation arrow line.
ax
Sets the x component of the arrow tail about the arrow
head (in pixels).
ay
Sets the y component of the arrow tail about the arrow
head (in pixels).
bgcolor
Sets the background color of the annotation.
bordercolor
Sets the color of the border enclosing the annotation
`text`.
borderpad
Sets the padding (in px) between the `text` and the
enclosing border.
borderwidth
Sets the width (in px) of the border enclosing the
annotation `text`.
captureevents
Determines whether the annotation text box captures
mouse move and click events, or allows those events to
pass through to data points in the plot that may be
behind the annotation. By default `captureevents` is
False unless `hovertext` is provided. If you use the
event `plotly_clickannotation` without `hovertext` you
must explicitly enable `captureevents`.
font
Sets the annotation text font.
height
Sets an explicit height for the text box. null
(default) lets the text set the box height. Taller text
will be clipped.
hoverlabel
:class:`plotly.graph_objects.layout.scene.annotation.Ho
verlabel` instance or dict with compatible properties
hovertext
Sets text to appear when hovering over this annotation.
If omitted or blank, no hover label will appear.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
opacity
Sets the opacity of the annotation (text + arrow).
showarrow
Determines whether or not the annotation is drawn with
an arrow. If True, `text` is placed near the arrow's
tail. If False, `text` lines up with the `x` and `y`
provided.
standoff
Sets a distance, in pixels, to move the end arrowhead
away from the position it is pointing at, for example
to point at the edge of a marker independent of zoom.
Note that this shortens the arrow from the `ax` / `ay`
vector, in contrast to `xshift` / `yshift` which moves
everything by this amount.
startarrowhead
Sets the start annotation arrow head style.
startarrowsize
Sets the size of the start annotation arrow head,
relative to `arrowwidth`. A value of 1 (default) gives
a head about 3x as wide as the line.
startstandoff
Sets a distance, in pixels, to move the start arrowhead
away from the position it is pointing at, for example
to point at the edge of a marker independent of zoom.
Note that this shortens the arrow from the `ax` / `ay`
vector, in contrast to `xshift` / `yshift` which moves
everything by this amount.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
text
Sets the text associated with this annotation. Plotly
uses a subset of HTML tags to do things like newline
(<br>), bold (<b></b>), italics (<i></i>), hyperlinks
(<a href='...'></a>). Tags <em>, <sup>, <sub> <span>
are also supported.
textangle
Sets the angle at which the `text` is drawn with
respect to the horizontal.
valign
Sets the vertical alignment of the `text` within the
box. Has an effect only if an explicit height is set to
override the text height.
visible
Determines whether or not this annotation is visible.
width
Sets an explicit width for the text box. null (default)
lets the text set the box width. Wider text will be
clipped. There is no automatic wrapping; use <br> to
start a new line.
x
Sets the annotation's x position.
xanchor
Sets the text box's horizontal position anchor This
anchor binds the `x` position to the "left", "center"
or "right" of the annotation. For example, if `x` is
set to 1, `xref` to "paper" and `xanchor` to "right"
then the right-most portion of the annotation lines up
with the right-most edge of the plotting area. If
"auto", the anchor is equivalent to "center" for data-
referenced annotations or if there is an arrow, whereas
for paper-referenced with no arrow, the anchor picked
corresponds to the closest side.
xshift
Shifts the position of the whole annotation and arrow
to the right (positive) or left (negative) by this many
pixels.
y
Sets the annotation's y position.
yanchor
Sets the text box's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the annotation. For example, if `y` is set
to 1, `yref` to "paper" and `yanchor` to "top" then the
top-most portion of the annotation lines up with the
top-most edge of the plotting area. If "auto", the
anchor is equivalent to "middle" for data-referenced
annotations or if there is an arrow, whereas for paper-
referenced with no arrow, the anchor picked corresponds
to the closest side.
yshift
Shifts the position of the whole annotation and arrow
up (positive) or down (negative) by this many pixels.
z
Sets the annotation's z position.
"""
def __init__(
self,
arg=None,
align=None,
arrowcolor=None,
arrowhead=None,
arrowside=None,
arrowsize=None,
arrowwidth=None,
ax=None,
ay=None,
bgcolor=None,
bordercolor=None,
borderpad=None,
borderwidth=None,
captureevents=None,
font=None,
height=None,
hoverlabel=None,
hovertext=None,
name=None,
opacity=None,
showarrow=None,
standoff=None,
startarrowhead=None,
startarrowsize=None,
startstandoff=None,
templateitemname=None,
text=None,
textangle=None,
valign=None,
visible=None,
width=None,
x=None,
xanchor=None,
xshift=None,
y=None,
yanchor=None,
yshift=None,
z=None,
**kwargs
):
"""
Construct a new Annotation object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.scene.Annotation`
align
Sets the horizontal alignment of the `text` within the
box. Has an effect only if `text` spans two or more
lines (i.e. `text` contains one or more <br> HTML tags)
or if an explicit width is set to override the text
width.
arrowcolor
Sets the color of the annotation arrow.
arrowhead
Sets the end annotation arrow head style.
arrowside
Sets the annotation arrow head position.
arrowsize
Sets the size of the end annotation arrow head,
relative to `arrowwidth`. A value of 1 (default) gives
a head about 3x as wide as the line.
arrowwidth
Sets the width (in px) of annotation arrow line.
ax
Sets the x component of the arrow tail about the arrow
head (in pixels).
ay
Sets the y component of the arrow tail about the arrow
head (in pixels).
bgcolor
Sets the background color of the annotation.
bordercolor
Sets the color of the border enclosing the annotation
`text`.
borderpad
Sets the padding (in px) between the `text` and the
enclosing border.
borderwidth
Sets the width (in px) of the border enclosing the
annotation `text`.
captureevents
Determines whether the annotation text box captures
mouse move and click events, or allows those events to
pass through to data points in the plot that may be
behind the annotation. By default `captureevents` is
False unless `hovertext` is provided. If you use the
event `plotly_clickannotation` without `hovertext` you
must explicitly enable `captureevents`.
font
Sets the annotation text font.
height
Sets an explicit height for the text box. null
(default) lets the text set the box height. Taller text
will be clipped.
hoverlabel
:class:`plotly.graph_objects.layout.scene.annotation.Ho
verlabel` instance or dict with compatible properties
hovertext
Sets text to appear when hovering over this annotation.
If omitted or blank, no hover label will appear.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
opacity
Sets the opacity of the annotation (text + arrow).
showarrow
Determines whether or not the annotation is drawn with
an arrow. If True, `text` is placed near the arrow's
tail. If False, `text` lines up with the `x` and `y`
provided.
standoff
Sets a distance, in pixels, to move the end arrowhead
away from the position it is pointing at, for example
to point at the edge of a marker independent of zoom.
Note that this shortens the arrow from the `ax` / `ay`
vector, in contrast to `xshift` / `yshift` which moves
everything by this amount.
startarrowhead
Sets the start annotation arrow head style.
startarrowsize
Sets the size of the start annotation arrow head,
relative to `arrowwidth`. A value of 1 (default) gives
a head about 3x as wide as the line.
startstandoff
Sets a distance, in pixels, to move the start arrowhead
away from the position it is pointing at, for example
to point at the edge of a marker independent of zoom.
Note that this shortens the arrow from the `ax` / `ay`
vector, in contrast to `xshift` / `yshift` which moves
everything by this amount.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
text
Sets the text associated with this annotation. Plotly
uses a subset of HTML tags to do things like newline
(<br>), bold (<b></b>), italics (<i></i>), hyperlinks
(<a href='...'></a>). Tags <em>, <sup>, <sub> <span>
are also supported.
textangle
Sets the angle at which the `text` is drawn with
respect to the horizontal.
valign
Sets the vertical alignment of the `text` within the
box. Has an effect only if an explicit height is set to
override the text height.
visible
Determines whether or not this annotation is visible.
width
Sets an explicit width for the text box. null (default)
lets the text set the box width. Wider text will be
clipped. There is no automatic wrapping; use <br> to
start a new line.
x
Sets the annotation's x position.
xanchor
Sets the text box's horizontal position anchor This
anchor binds the `x` position to the "left", "center"
or "right" of the annotation. For example, if `x` is
set to 1, `xref` to "paper" and `xanchor` to "right"
then the right-most portion of the annotation lines up
with the right-most edge of the plotting area. If
"auto", the anchor is equivalent to "center" for data-
referenced annotations or if there is an arrow, whereas
for paper-referenced with no arrow, the anchor picked
corresponds to the closest side.
xshift
Shifts the position of the whole annotation and arrow
to the right (positive) or left (negative) by this many
pixels.
y
Sets the annotation's y position.
yanchor
Sets the text box's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the annotation. For example, if `y` is set
to 1, `yref` to "paper" and `yanchor` to "top" then the
top-most portion of the annotation lines up with the
top-most edge of the plotting area. If "auto", the
anchor is equivalent to "middle" for data-referenced
annotations or if there is an arrow, whereas for paper-
referenced with no arrow, the anchor picked corresponds
to the closest side.
yshift
Shifts the position of the whole annotation and arrow
up (positive) or down (negative) by this many pixels.
z
Sets the annotation's z position.
Returns
-------
Annotation
"""
super(Annotation, self).__init__("annotations")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.scene.Annotation
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.scene.Annotation`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("align", None)
_v = align if align is not None else _v
if _v is not None:
self["align"] = _v
_v = arg.pop("arrowcolor", None)
_v = arrowcolor if arrowcolor is not None else _v
if _v is not None:
self["arrowcolor"] = _v
_v = arg.pop("arrowhead", None)
_v = arrowhead if arrowhead is not None else _v
if _v is not None:
self["arrowhead"] = _v
_v = arg.pop("arrowside", None)
_v = arrowside if arrowside is not None else _v
if _v is not None:
self["arrowside"] = _v
_v = arg.pop("arrowsize", None)
_v = arrowsize if arrowsize is not None else _v
if _v is not None:
self["arrowsize"] = _v
_v = arg.pop("arrowwidth", None)
_v = arrowwidth if arrowwidth is not None else _v
if _v is not None:
self["arrowwidth"] = _v
_v = arg.pop("ax", None)
_v = ax if ax is not None else _v
if _v is not None:
self["ax"] = _v
_v = arg.pop("ay", None)
_v = ay if ay is not None else _v
if _v is not None:
self["ay"] = _v
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("borderpad", None)
_v = borderpad if borderpad is not None else _v
if _v is not None:
self["borderpad"] = _v
_v = arg.pop("borderwidth", None)
_v = borderwidth if borderwidth is not None else _v
if _v is not None:
self["borderwidth"] = _v
_v = arg.pop("captureevents", None)
_v = captureevents if captureevents is not None else _v
if _v is not None:
self["captureevents"] = _v
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("height", None)
_v = height if height is not None else _v
if _v is not None:
self["height"] = _v
_v = arg.pop("hoverlabel", None)
_v = hoverlabel if hoverlabel is not None else _v
if _v is not None:
self["hoverlabel"] = _v
_v = arg.pop("hovertext", None)
_v = hovertext if hovertext is not None else _v
if _v is not None:
self["hovertext"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
_v = arg.pop("showarrow", None)
_v = showarrow if showarrow is not None else _v
if _v is not None:
self["showarrow"] = _v
_v = arg.pop("standoff", None)
_v = standoff if standoff is not None else _v
if _v is not None:
self["standoff"] = _v
_v = arg.pop("startarrowhead", None)
_v = startarrowhead if startarrowhead is not None else _v
if _v is not None:
self["startarrowhead"] = _v
_v = arg.pop("startarrowsize", None)
_v = startarrowsize if startarrowsize is not None else _v
if _v is not None:
self["startarrowsize"] = _v
_v = arg.pop("startstandoff", None)
_v = startstandoff if startstandoff is not None else _v
if _v is not None:
self["startstandoff"] = _v
_v = arg.pop("templateitemname", None)
_v = templateitemname if templateitemname is not None else _v
if _v is not None:
self["templateitemname"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
_v = arg.pop("textangle", None)
_v = textangle if textangle is not None else _v
if _v is not None:
self["textangle"] = _v
_v = arg.pop("valign", None)
_v = valign if valign is not None else _v
if _v is not None:
self["valign"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
_v = arg.pop("width", None)
_v = width if width is not None else _v
if _v is not None:
self["width"] = _v
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("xanchor", None)
_v = xanchor if xanchor is not None else _v
if _v is not None:
self["xanchor"] = _v
_v = arg.pop("xshift", None)
_v = xshift if xshift is not None else _v
if _v is not None:
self["xshift"] = _v
_v = arg.pop("y", None)
_v = y if y is not None else _v
if _v is not None:
self["y"] = _v
_v = arg.pop("yanchor", None)
_v = yanchor if yanchor is not None else _v
if _v is not None:
self["yanchor"] = _v
_v = arg.pop("yshift", None)
_v = yshift if yshift is not None else _v
if _v is not None:
self["yshift"] = _v
_v = arg.pop("z", None)
_v = z if z is not None else _v
if _v is not None:
self["z"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@layout@scene@_annotation.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/table/__init__.py",
"type": "Python"
}
|
import sys
if sys.version_info < (3, 7):
from ._visible import VisibleValidator
from ._uirevision import UirevisionValidator
from ._uid import UidValidator
from ._stream import StreamValidator
from ._name import NameValidator
from ._metasrc import MetasrcValidator
from ._meta import MetaValidator
from ._idssrc import IdssrcValidator
from ._ids import IdsValidator
from ._hoverlabel import HoverlabelValidator
from ._hoverinfosrc import HoverinfosrcValidator
from ._hoverinfo import HoverinfoValidator
from ._header import HeaderValidator
from ._domain import DomainValidator
from ._customdatasrc import CustomdatasrcValidator
from ._customdata import CustomdataValidator
from ._columnwidthsrc import ColumnwidthsrcValidator
from ._columnwidth import ColumnwidthValidator
from ._columnordersrc import ColumnordersrcValidator
from ._columnorder import ColumnorderValidator
from ._cells import CellsValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._visible.VisibleValidator",
"._uirevision.UirevisionValidator",
"._uid.UidValidator",
"._stream.StreamValidator",
"._name.NameValidator",
"._metasrc.MetasrcValidator",
"._meta.MetaValidator",
"._idssrc.IdssrcValidator",
"._ids.IdsValidator",
"._hoverlabel.HoverlabelValidator",
"._hoverinfosrc.HoverinfosrcValidator",
"._hoverinfo.HoverinfoValidator",
"._header.HeaderValidator",
"._domain.DomainValidator",
"._customdatasrc.CustomdatasrcValidator",
"._customdata.CustomdataValidator",
"._columnwidthsrc.ColumnwidthsrcValidator",
"._columnwidth.ColumnwidthValidator",
"._columnordersrc.ColumnordersrcValidator",
"._columnorder.ColumnorderValidator",
"._cells.CellsValidator",
],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@table@__init__.py@.PATH_END.py
|
{
"filename": "_insidetextorientation.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/sunburst/_insidetextorientation.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class InsidetextorientationValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="insidetextorientation", parent_name="sunburst", **kwargs
):
super(InsidetextorientationValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
values=kwargs.pop("values", ["horizontal", "radial", "tangential", "auto"]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@sunburst@_insidetextorientation.py@.PATH_END.py
|
{
"filename": "ps_fit.py",
"repo_name": "TianlaiProject/tlpipe",
"repo_path": "tlpipe_extracted/tlpipe-master/tlpipe/timestream/ps_fit.py",
"type": "Python"
}
|
"""Calibration by strong point source fitting.
Inheritance diagram
-------------------
.. inheritance-diagram:: PsFit
:parts: 2
"""
import os
import numpy as np
import ephem
import aipy as a
import timestream_task
from tlpipe.container.timestream import Timestream
from caput import mpiutil
from tlpipe.utils.path_util import output_path
from tlpipe.core import constants as const
import tlpipe.plot
import matplotlib.pyplot as plt
def fit(vis_obs, vis_mask, vis_sim, start_ind, end_ind, num_shift, idx, plot_fit, fig_prefix, iteration, tag_output_iter, bls_plt, freq_plt):
vis_obs = np.ma.array(vis_obs, mask=vis_mask)
num_nomask = vis_obs.count()
if num_nomask == 0: # no valid vis data
return 1.0, 0
fi, pi, (i, j) = idx
gains = []
chi2s = []
shifts = xrange(-num_shift/2, num_shift/2+1)
for si in shifts:
# vis = vis_obs[start_ind+si:end_ind+si].astype(np.complex128) # improve precision
vis = vis_obs[start_ind+si:end_ind+si]
num_nomask = vis.count()
if num_nomask == 0: # no valid vis data
continue
else:
xx = np.ma.dot(vis_sim.conj(), vis_sim)
xy = np.ma.dot(vis_sim.conj(), vis)
gain = xy / xx
vis_cal = gain * vis_sim
err = vis - vis_cal
chi2 = np.ma.dot(err.conj(), err).real
gains.append(gain)
chi2s.append(chi2/num_nomask)
if len(gains) == 0: # no valid vis data
return 1.0, 0
chi2s = np.array(chi2s)
if np.allclose(chi2s, np.sort(chi2s)):
if mpiutil.rank0:
print 'Warn: chi2 increasing for %s...' % (idx,)
if np.allclose(chi2s, np.sort(chi2s)[::-1]):
if mpiutil.rank0:
print 'Warn: chi2 decreasing for %s...' % (idx,)
ind = np.argmin(chi2s)
gain = gains[ind]
# chi2 = chi2s[ind]
si = shifts[ind]
obs_data = np.ma.array(vis_obs[start_ind:end_ind], mask=vis_mask[start_ind:end_ind])
factor = np.ma.max(np.ma.abs(obs_data)) / np.max(np.abs(vis_sim))
obs_data = obs_data / factor # make amp close to each other
vis_cal = np.ma.array(vis_obs[start_ind+si:end_ind+si], mask=vis_mask[start_ind+si:end_ind+si]) / gain
if si != 0 and mpiutil.rank0:
print 'shift %d for %s...' % (si, idx)
if plot_fit and (fi in freq_plt and (i, j) in bls_plt):
# plot the fit
plt.figure()
plt.subplot(311)
plt.plot(obs_data.real, label='obs, real')
if not vis_cal is np.ma.masked: # in case gain is --
plt.plot(vis_cal.real, label='cal, real')
plt.plot(vis_sim.real, label='sim, real')
plt.legend(loc='best')
plt.subplot(312)
plt.plot(obs_data.imag, label='obs, imag')
if not vis_cal is np.ma.masked: # in case gain is --
plt.plot(vis_cal.imag, label='cal, imag')
plt.plot(vis_sim.imag, label='sim, imag')
plt.legend(loc='best')
plt.subplot(313)
plt.plot(np.abs(obs_data), label='obs, abs')
if not vis_cal is np.ma.masked: # in case gain is --
plt.plot(np.abs(vis_cal), label='cal, abs')
plt.plot(np.abs(vis_sim), label='sim, abs')
plt.legend(loc='best')
fig_name = '%s_%d_%d_%d_%d.png' % (fig_prefix, fi, pi, i, j)
if tag_output_iter:
fig_name = output_path(fig_name, iteration=iteration)
else:
fig_name = output_path(fig_name)
plt.savefig(fig_name)
plt.close()
return gain, si
class PsFit(timestream_task.TimestreamTask):
"""Calibration by strong point source fitting.
This works by minimize
.. math:: \\chi^2 = \| V_{ij}^{\\text{obs}}(t + \\Delta t) - G_{ij} V_{ij}^{\\text{sim}}(t) \|^2
Its solution is
.. math:: G_{ij} = \\frac{V_{ij}^{\\text{sim} \\dagger} V_{ij}^{\\text{obs}}}{V_{ij}^{\\text{sim} \\dagger} V_{ij}^{\\text{sim}}}
"""
params_init = {
'calibrator': 'cas',
'catalog': 'misc', # or helm,nvss
'span': 1200.0, # second
'shift': 600.0, # second
'plot_fit': False, # plot the smoothing fit
'fig_name': 'fit/fit',
'bl_incl': 'all', # or a list of include (bl1, bl2)
'bl_excl': [],
'freq_incl': 'all', # or a list of include freq idx
'freq_excl': [],
}
prefix = 'pf_'
def process(self, ts):
assert isinstance(ts, Timestream), '%s only works for Timestream object' % self.__class__.__name__
calibrator = self.params['calibrator']
catalog = self.params['catalog']
span = self.params['span']
shift = self.params['shift']
plot_fit = self.params['plot_fit']
fig_prefix = self.params['fig_name']
tag_output_iter = self.params['tag_output_iter']
bl_incl = self.params['bl_incl']
bl_excl = self.params['bl_excl']
freq_incl = self.params['freq_incl']
freq_excl = self.params['freq_excl']
ts.redistribute('baseline')
if bl_incl == 'all':
bls_plt = [ tuple(bl) for bl in ts.local_bl ]
else:
bls_plt = [ bl for bl in bl_incl if not bl in bl_excl ]
if freq_incl == 'all':
freq_plt = range(ts.freq.shape[0])
else:
freq_plt = [ fi for fi in freq_incl if not fi in freq_excl ]
feedno = ts['feedno'][:].tolist()
freq = ts['freq'][:]
nfreq = len(freq)
pol = ts['pol'][:].tolist()
bl = ts.local_bl[:] # local bls
bls = [ tuple(b) for b in bl ]
# calibrator
srclist, cutoff, catalogs = a.scripting.parse_srcs(calibrator, catalog)
cat = a.src.get_catalog(srclist, cutoff, catalogs)
assert(len(cat) == 1), 'Allow only one calibrator'
s = cat.values()[0]
if mpiutil.rank0:
print 'Calibrating for source %s with' % calibrator,
print 'strength', s._jys, 'Jy',
print 'measured at', s.mfreq, 'GHz',
print 'with index', s.index
# get transit time of calibrator
# array
aa = ts.array
aa.set_jultime(ts['jul_date'][0]) # the first obs time point
next_transit = aa.next_transit(s)
transit_time = a.phs.ephem2juldate(next_transit) # Julian date
if transit_time > ts['jul_date'][-1]:
local_next_transit = ephem.Date(next_transit + 8.0 * ephem.hour)
raise RuntimeError('Data does not contain local transit time %s of source %s' % (local_next_transit, calibrator))
# the first transit index
transit_inds = [ np.searchsorted(ts['jul_date'][:], transit_time) ]
# find all other transit indices
aa.set_jultime(ts['jul_date'][0] + 1.0) # maybe should use a sidereal day which is a litter shorter than 1.0 ???
transit_time = a.phs.ephem2juldate(aa.next_transit(s)) # Julian date
cnt = 2
while(transit_time <= ts['jul_date'][-1]):
transit_inds.append(np.searchsorted(ts['jul_date'][:], transit_time))
aa.set_jultime(ts['jul_date'][0] + 1.0*cnt)
transit_time = a.phs.ephem2juldate(aa.next_transit(s)) # Julian date
cnt += 1
if mpiutil.rank0:
print 'transit inds: ', transit_inds
### now only use the first transit point to do the cal
### may need to improve in the future
transit_ind = transit_inds[0]
int_time = ts.attrs['inttime'] # second
start_ind = max(0, transit_ind - np.int(span / int_time))
end_ind = min(len(ts.local_time), transit_ind + np.int(span / int_time))
num_shift = np.int(shift / int_time)
num_shift = min(num_shift, end_ind - start_ind)
############################################
# if ts.is_cylinder:
# ts.local_vis[:] = ts.local_vis.conj() # now for cylinder array
############################################
vis = ts.local_vis
vis_mask = ts.local_vis_mask
# vis[ts.local_vis_mask] = complex(np.nan, np.nan) # set masked vis to nan
nt = end_ind - start_ind
# vis_sim = np.zeros((nt,)+vis.shape[1:], dtype=np.complex128) # to hold the simulated vis, use float64 to have better precision
vis_sim = np.zeros((nt,)+vis.shape[1:], dtype=vis.dtype)
# get beam solid angle (suppose it is the same for all feeds)
Omega_ij = aa[0].beam.Omega
pre_factor = 1.0e-26 * (const.c**2 / (2 * const.k_B * (1.0e6*freq)**2) / Omega_ij) # NOTE: 1Jy = 1.0e-26 W m^-2 Hz^-1
for ind, ti in enumerate(xrange(start_ind, end_ind)):
aa.set_jultime(ts['jul_date'][ti])
s.compute(aa)
# get fluxes vs. freq of the calibrator
Sc = s.get_jys()
# get the topocentric coordinate of the calibrator at the current time
s_top = s.get_crds('top', ncrd=3)
aa.sim_cache(cat.get_crds('eq', ncrd=3)) # for compute bm_response and sim
# for pi in range(len(pol)):
for pi in xrange(2): # only cal for xx, yy
aa.set_active_pol(pol[pi])
# assume all have the same beam responce, speed the calculation
# resp1 = aa[0].bm_response(s_top, pol=pol[pi][0]).transpose()
# resp2 = aa[0].bm_response(s_top, pol=pol[pi][1]).transpose()
# bmij = resp1 * np.conjugate(resp2)
bmij = aa.bm_response(0, 0).reshape(-1)
factor = pre_factor * Sc * bmij
for bi, (i, j) in enumerate(bls):
ai = feedno.index(i)
aj = feedno.index(j)
uij = aa.gen_uvw(ai, aj, src='z')[:, 0, :] # (rj - ri)/lambda
# bmij = aa.bm_response(ai, aj).reshape(-1)
vis_sim[ind, :, pi, bi] = factor * np.exp(-2.0J * np.pi * np.dot(s_top, uij)) # Unit: K
# vis_sim[ind, :, pi, bi] = Sc * bmij * np.exp(-2.0J * np.pi * np.dot(s_top, uij))
mpiutil.barrier()
# iterate over freq
for fi in xrange(nfreq):
# for pi in xrange(len(pol)):
for pi in xrange(2): # only cal for xx, yy
for bi, (i, j) in enumerate(bls):
gain, si = fit(vis[:, fi, pi, bi], vis_mask[:, fi, pi, bi], vis_sim[:, fi, pi, bi], start_ind, end_ind, num_shift, (fi, pi, (i, j)), plot_fit, fig_prefix, self.iteration, tag_output_iter, bls_plt, freq_plt)
# cal for vis
ts.local_vis[:, fi, pi, bi] = np.roll(vis[:, fi, pi, bi], -si) / gain # NOTE the use of -si
ts.local_vis_mask[:, fi, pi, bi] = np.roll(vis_mask[:, fi, pi, bi], -si) # NOTE the use of -si
mpiutil.barrier()
return super(PsFit, self).process(ts)
|
TianlaiProjectREPO_NAMEtlpipePATH_START.@tlpipe_extracted@tlpipe-master@tlpipe@timestream@ps_fit.py@.PATH_END.py
|
{
"filename": "_bounds.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/layout/map/_bounds.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Bounds(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout.map"
_path_str = "layout.map.bounds"
_valid_props = {"east", "north", "south", "west"}
# east
# ----
@property
def east(self):
"""
Sets the maximum longitude of the map (in degrees East) if
`west`, `south` and `north` are declared.
The 'east' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["east"]
@east.setter
def east(self, val):
self["east"] = val
# north
# -----
@property
def north(self):
"""
Sets the maximum latitude of the map (in degrees North) if
`east`, `west` and `south` are declared.
The 'north' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["north"]
@north.setter
def north(self, val):
self["north"] = val
# south
# -----
@property
def south(self):
"""
Sets the minimum latitude of the map (in degrees North) if
`east`, `west` and `north` are declared.
The 'south' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["south"]
@south.setter
def south(self, val):
self["south"] = val
# west
# ----
@property
def west(self):
"""
Sets the minimum longitude of the map (in degrees East) if
`east`, `south` and `north` are declared.
The 'west' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["west"]
@west.setter
def west(self, val):
self["west"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
east
Sets the maximum longitude of the map (in degrees East)
if `west`, `south` and `north` are declared.
north
Sets the maximum latitude of the map (in degrees North)
if `east`, `west` and `south` are declared.
south
Sets the minimum latitude of the map (in degrees North)
if `east`, `west` and `north` are declared.
west
Sets the minimum longitude of the map (in degrees East)
if `east`, `south` and `north` are declared.
"""
def __init__(
self, arg=None, east=None, north=None, south=None, west=None, **kwargs
):
"""
Construct a new Bounds object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.map.Bounds`
east
Sets the maximum longitude of the map (in degrees East)
if `west`, `south` and `north` are declared.
north
Sets the maximum latitude of the map (in degrees North)
if `east`, `west` and `south` are declared.
south
Sets the minimum latitude of the map (in degrees North)
if `east`, `west` and `north` are declared.
west
Sets the minimum longitude of the map (in degrees East)
if `east`, `south` and `north` are declared.
Returns
-------
Bounds
"""
super(Bounds, self).__init__("bounds")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.map.Bounds
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.map.Bounds`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("east", None)
_v = east if east is not None else _v
if _v is not None:
self["east"] = _v
_v = arg.pop("north", None)
_v = north if north is not None else _v
if _v is not None:
self["north"] = _v
_v = arg.pop("south", None)
_v = south if south is not None else _v
if _v is not None:
self["south"] = _v
_v = arg.pop("west", None)
_v = west if west is not None else _v
if _v is not None:
self["west"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@layout@map@_bounds.py@.PATH_END.py
|
{
"filename": "_histnorm.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/histogram2dcontour/_histnorm.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class HistnormValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="histnorm", parent_name="histogram2dcontour", **kwargs
):
super(HistnormValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
values=kwargs.pop(
"values",
["", "percent", "probability", "density", "probability density"],
),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@histogram2dcontour@_histnorm.py@.PATH_END.py
|
{
"filename": "_color.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/choropleth/hoverlabel/font/_color.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="color", parent_name="choropleth.hoverlabel.font", **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@choropleth@hoverlabel@font@_color.py@.PATH_END.py
|
{
"filename": "list.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/core/langchain_core/output_parsers/list.py",
"type": "Python"
}
|
from __future__ import annotations
import csv
import re
from abc import abstractmethod
from collections import deque
from collections.abc import AsyncIterator, Iterator
from io import StringIO
from typing import Optional as Optional
from typing import TypeVar, Union
from langchain_core.messages import BaseMessage
from langchain_core.output_parsers.transform import BaseTransformOutputParser
T = TypeVar("T")
def droplastn(iter: Iterator[T], n: int) -> Iterator[T]:
"""Drop the last n elements of an iterator.
Args:
iter: The iterator to drop elements from.
n: The number of elements to drop.
Yields:
The elements of the iterator, except the last n elements.
"""
buffer: deque[T] = deque()
for item in iter:
buffer.append(item)
if len(buffer) > n:
yield buffer.popleft()
class ListOutputParser(BaseTransformOutputParser[list[str]]):
"""Parse the output of an LLM call to a list."""
@property
def _type(self) -> str:
return "list"
@abstractmethod
def parse(self, text: str) -> list[str]:
"""Parse the output of an LLM call.
Args:
text: The output of an LLM call.
Returns:
A list of strings.
"""
def parse_iter(self, text: str) -> Iterator[re.Match]:
"""Parse the output of an LLM call.
Args:
text: The output of an LLM call.
Yields:
A match object for each part of the output.
"""
raise NotImplementedError
def _transform(
self, input: Iterator[Union[str, BaseMessage]]
) -> Iterator[list[str]]:
buffer = ""
for chunk in input:
if isinstance(chunk, BaseMessage):
# extract text
chunk_content = chunk.content
if not isinstance(chunk_content, str):
continue
chunk = chunk_content
# add current chunk to buffer
buffer += chunk
# parse buffer into a list of parts
try:
done_idx = 0
# yield only complete parts
for m in droplastn(self.parse_iter(buffer), 1):
done_idx = m.end()
yield [m.group(1)]
buffer = buffer[done_idx:]
except NotImplementedError:
parts = self.parse(buffer)
# yield only complete parts
if len(parts) > 1:
for part in parts[:-1]:
yield [part]
buffer = parts[-1]
# yield the last part
for part in self.parse(buffer):
yield [part]
async def _atransform(
self, input: AsyncIterator[Union[str, BaseMessage]]
) -> AsyncIterator[list[str]]:
buffer = ""
async for chunk in input:
if isinstance(chunk, BaseMessage):
# extract text
chunk_content = chunk.content
if not isinstance(chunk_content, str):
continue
chunk = chunk_content
# add current chunk to buffer
buffer += chunk
# parse buffer into a list of parts
try:
done_idx = 0
# yield only complete parts
for m in droplastn(self.parse_iter(buffer), 1):
done_idx = m.end()
yield [m.group(1)]
buffer = buffer[done_idx:]
except NotImplementedError:
parts = self.parse(buffer)
# yield only complete parts
if len(parts) > 1:
for part in parts[:-1]:
yield [part]
buffer = parts[-1]
# yield the last part
for part in self.parse(buffer):
yield [part]
ListOutputParser.model_rebuild()
class CommaSeparatedListOutputParser(ListOutputParser):
"""Parse the output of an LLM call to a comma-separated list."""
@classmethod
def is_lc_serializable(cls) -> bool:
"""Check if the langchain object is serializable.
Returns True."""
return True
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object.
Returns:
A list of strings.
Default is ["langchain", "output_parsers", "list"].
"""
return ["langchain", "output_parsers", "list"]
def get_format_instructions(self) -> str:
"""Return the format instructions for the comma-separated list output."""
return (
"Your response should be a list of comma separated values, "
"eg: `foo, bar, baz` or `foo,bar,baz`"
)
def parse(self, text: str) -> list[str]:
"""Parse the output of an LLM call.
Args:
text: The output of an LLM call.
Returns:
A list of strings.
"""
try:
reader = csv.reader(
StringIO(text), quotechar='"', delimiter=",", skipinitialspace=True
)
return [item for sublist in reader for item in sublist]
except csv.Error:
# keep old logic for backup
return [part.strip() for part in text.split(",")]
@property
def _type(self) -> str:
return "comma-separated-list"
class NumberedListOutputParser(ListOutputParser):
"""Parse a numbered list."""
pattern: str = r"\d+\.\s([^\n]+)"
"""The pattern to match a numbered list item."""
def get_format_instructions(self) -> str:
return (
"Your response should be a numbered list with each item on a new line. "
"For example: \n\n1. foo\n\n2. bar\n\n3. baz"
)
def parse(self, text: str) -> list[str]:
"""Parse the output of an LLM call.
Args:
text: The output of an LLM call.
Returns:
A list of strings.
"""
return re.findall(self.pattern, text)
def parse_iter(self, text: str) -> Iterator[re.Match]:
"""Parse the output of an LLM call.
Args:
text: The output of an LLM call.
Yields:
A match object for each part of the output.
"""
return re.finditer(self.pattern, text)
@property
def _type(self) -> str:
return "numbered-list"
class MarkdownListOutputParser(ListOutputParser):
"""Parse a Markdown list."""
pattern: str = r"^\s*[-*]\s([^\n]+)$"
"""The pattern to match a Markdown list item."""
def get_format_instructions(self) -> str:
"""Return the format instructions for the Markdown list output."""
return "Your response should be a markdown list, " "eg: `- foo\n- bar\n- baz`"
def parse(self, text: str) -> list[str]:
"""Parse the output of an LLM call.
Args:
text: The output of an LLM call.
Returns:
A list of strings.
"""
return re.findall(self.pattern, text, re.MULTILINE)
def parse_iter(self, text: str) -> Iterator[re.Match]:
"""Parse the output of an LLM call.
Args:
text: The output of an LLM call.
Yields:
A match object for each part of the output.
"""
return re.finditer(self.pattern, text, re.MULTILINE)
@property
def _type(self) -> str:
return "markdown-list"
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@core@langchain_core@output_parsers@list.py@.PATH_END.py
|
{
"filename": "metaestimators.py",
"repo_name": "scikit-learn/scikit-learn",
"repo_path": "scikit-learn_extracted/scikit-learn-main/sklearn/utils/metaestimators.py",
"type": "Python"
}
|
"""Utilities for meta-estimators."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from abc import ABCMeta, abstractmethod
from contextlib import suppress
from typing import Any, List
import numpy as np
from ..base import BaseEstimator
from ..utils import _safe_indexing
from ..utils._tags import get_tags
from ._available_if import available_if
__all__ = ["available_if"]
class _BaseComposition(BaseEstimator, metaclass=ABCMeta):
"""Handles parameter management for classifiers composed of named estimators."""
steps: List[Any]
@abstractmethod
def __init__(self):
pass
def _get_params(self, attr, deep=True):
out = super().get_params(deep=deep)
if not deep:
return out
estimators = getattr(self, attr)
try:
out.update(estimators)
except (TypeError, ValueError):
# Ignore TypeError for cases where estimators is not a list of
# (name, estimator) and ignore ValueError when the list is not
# formatted correctly. This is to prevent errors when calling
# `set_params`. `BaseEstimator.set_params` calls `get_params` which
# can error for invalid values for `estimators`.
return out
for name, estimator in estimators:
if hasattr(estimator, "get_params"):
for key, value in estimator.get_params(deep=True).items():
out["%s__%s" % (name, key)] = value
return out
def _set_params(self, attr, **params):
# Ensure strict ordering of parameter setting:
# 1. All steps
if attr in params:
setattr(self, attr, params.pop(attr))
# 2. Replace items with estimators in params
items = getattr(self, attr)
if isinstance(items, list) and items:
# Get item names used to identify valid names in params
# `zip` raises a TypeError when `items` does not contains
# elements of length 2
with suppress(TypeError):
item_names, _ = zip(*items)
for name in list(params.keys()):
if "__" not in name and name in item_names:
self._replace_estimator(attr, name, params.pop(name))
# 3. Step parameters and other initialisation arguments
super().set_params(**params)
return self
def _replace_estimator(self, attr, name, new_val):
# assumes `name` is a valid estimator name
new_estimators = list(getattr(self, attr))
for i, (estimator_name, _) in enumerate(new_estimators):
if estimator_name == name:
new_estimators[i] = (name, new_val)
break
setattr(self, attr, new_estimators)
def _validate_names(self, names):
if len(set(names)) != len(names):
raise ValueError("Names provided are not unique: {0!r}".format(list(names)))
invalid_names = set(names).intersection(self.get_params(deep=False))
if invalid_names:
raise ValueError(
"Estimator names conflict with constructor arguments: {0!r}".format(
sorted(invalid_names)
)
)
invalid_names = [name for name in names if "__" in name]
if invalid_names:
raise ValueError(
"Estimator names must not contain __: got {0!r}".format(invalid_names)
)
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels.
Slice X, y according to indices for cross-validation, but take care of
precomputed kernel-matrices or pairwise affinities / distances.
If ``estimator._pairwise is True``, X needs to be square and
we slice rows and columns. If ``train_indices`` is not None,
we slice rows using ``indices`` (assumed the test set) and columns
using ``train_indices``, indicating the training set.
Labels y will always be indexed only along the first axis.
Parameters
----------
estimator : object
Estimator to determine whether we should slice only rows or rows and
columns.
X : array-like, sparse matrix or iterable
Data to be indexed. If ``estimator._pairwise is True``,
this needs to be a square array-like or sparse matrix.
y : array-like, sparse matrix or iterable
Targets to be indexed.
indices : array of int
Rows to select from X and y.
If ``estimator._pairwise is True`` and ``train_indices is None``
then ``indices`` will also be used to slice columns.
train_indices : array of int or None, default=None
If ``estimator._pairwise is True`` and ``train_indices is not None``,
then ``train_indices`` will be use to slice the columns of X.
Returns
-------
X_subset : array-like, sparse matrix or list
Indexed data.
y_subset : array-like, sparse matrix or list
Indexed targets.
"""
if get_tags(estimator).input_tags.pairwise:
if not hasattr(X, "shape"):
raise ValueError(
"Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices."
)
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = _safe_indexing(X, indices)
if y is not None:
y_subset = _safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
|
scikit-learnREPO_NAMEscikit-learnPATH_START.@scikit-learn_extracted@scikit-learn-main@sklearn@utils@metaestimators.py@.PATH_END.py
|
{
"filename": "RadFil_Tutorial.ipynb",
"repo_name": "catherinezucker/radfil",
"repo_path": "radfil_extracted/radfil-master/RadFil_Tutorial.ipynb",
"type": "Jupyter Notebook"
}
|
# RadFil Tutorial
This tutorial walks through all the steps you need in order to build and fit your own radial density profiles. Feel free to run it in Python 2.7, Python 3.4, Python 3.5, or Python 3.6. For this tutorial, our filament of choice is the Musca infrared dark cloud, whose density profile has already been analyzed in [Cox et al. 2016](http://www.aanda.org/articles/aa/pdf/2016/06/aa27068-15.pdf) using an independent radial density profile code. We are going to apply the `RadFil` code to the same published column density map used in the Cox et al. 2016 study ("HGBS_musca_column_density_map.fits") which can be downloaded from the Herschel Gould Belt Survey archives [here](http://www.herschel.fr/cea/gouldbelt/en/Phocea/Vie_des_labos/Ast/ast_visu.php?id_ast=66). The data for the tutorial is also stored locally in the Tutorial_Data folder.
For the most common workflow, the two basic ingredients we need to build profiles using `RadFil` is a fits image and a fits mask for your filament. The third is the filament spine, across which we will sample the profile. If you already have an existing spine for your filament (e.g. from DisPerSE) you can input the spine into `RadFil`. If not, you can ask `RadFil` to create a spine for you by performing medial axis skeletonization on your inputted mask; this is done via the [FilFinder package](https://github.com/e-koch/FilFinder).
Even if you input your own spine, in most cases mask is requisite because `RadFil` searches for the pixel of maximum column density along each cut, bounded by the mask, and then shifts the profile to the maximum value. This ensures that your resulting profile is always centered at r=0 pc. In rare cases that you have a spine and want to shift the profile (but don't have a mask), you can indicate a maximum radial distance from the spine with which to search for the peak column density, and `RadFil` will make your mask for you. We will get to that a bit later. Let's get started!
First, we are going to read in our fits image and fits mask (created by applying a simple contour at a level of 2.25e+21 $\rm cm^{-2}$ to the fits image) via astropy. If you have it, be sure to read in the header in addition to the image array, as `RadFil` uses that to determine the image scale. The size of the image array and the mask array must be identical.
```python
%matplotlib inline
from astropy.io import fits
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.colors import LogNorm
from radfil import radfil_class, styles
from astropy import units as u
import numpy as np
fil_image,fil_header=fits.getdata("./Tutorial_Data/HGBS_musca_column_density_map.fits", header=True)
fil_image=fil_image
fil_mask=fits.getdata("./Tutorial_Data/Musca_mask.fits").astype(bool)
#plot our image and mask
fig, ax = plt.subplots(figsize=(20,10), ncols = 2)
ax[0].imshow(fil_image,origin='lower',cmap='Greys',norm=LogNorm())
ax[0].set_title("Image Data")
ax[1].imshow(fil_mask, origin="lower",cmap='Greys_r')
ax[1].set_title("Image Mask")
```
Text(0.5, 1.0, 'Image Mask')
/anaconda3/lib/python3.6/site-packages/matplotlib/colors.py:1028: RuntimeWarning: invalid value encountered in less_equal
mask |= resdat <= 0

Now let's set up our `RadFil` object. The first required argument is the image array (type numpy.ndarray). The second required argument is a mask array of the same shape (type numpy.ndarray); the only time the mask is not required is if you provide a filament spine via the "filspine" argument upon instantiating the object. A fits header and a distance to the filament (in pc) are optional, but are necessary if you want to carry out the analysis in physical units and not pixel units. We are going to adopt the same distance for Musca as in Cox et al. 2016 (200 pc) and carry out the analysis in physical units.
```python
radobj=radfil_class.radfil(fil_image, mask=fil_mask, header=fil_header, distance=200)
```
Because we don't have a spine, we're going to make one using the `FilFinder` package from [Koch & Rosolowsky (2015)](https://academic.oup.com/mnras/article-abstract/452/4/3435/1058975/Filament-identification-through-mathematical?redirectedFrom=fulltext), which can be downloaded [here](https://github.com/e-koch/FilFinder). Make sure you are running at least version 1.6. `FilFinder` creates filament spines by reducing the image mask to a one-pixel wide topological representation of the mask using medial axis skeletonization. The only additional parameter we need to provide FilFinder is the beamwidth of our image in arcseconds, which in Musca's case is 36.3". The verbose argument indicates whether you want `FilFinder` to output all the skeletonization plots.
```python
radobj.make_fil_spine(beamwidth=36.3,verbose=False)
fits.writeto("./Tutorial_Data/Musca_spine.fits", radobj.filspine.astype(int), overwrite=True)
```
/anaconda3/lib/python3.6/site-packages/fil_finder-2.0.dev877-py3.6.egg/fil_finder/length.py:699: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.
skeleton[list(zip(*inters[k - 1]))] = 2
Creating the filament spine could take awhile, because the image is very large. Once it's done, your radobj will now have an attribute called filspine, which is the boolean array demarcating the one-pixel wide spine. Once you created your spine the first time, just write it out as a fits image and read it into your radobj object above (as the filspine parameter) the next time around to avoid having to create the spine again. Here's what the spine looks like:
```python
fig=plt.figure()
ax=fig.gca()
ax.imshow(radobj.filspine.astype(int), cmap="binary_r", origin='lower')
ax.set_title("Filament Spine",fontsize=12)
ax.set_xticks([])
ax.set_yticks([])
fig.set_dpi(500)
```

Running the make_fil_spine command also returns the length of the filament, which can be accessed under the radobj.length attribute:
```python
print("The Musca filament is {:.1f} long".format(radobj.length))
```
The Musca filament is 8.1 pc long
Now that we have everything we need, let's briefly walk through how `RadFil` builds these profiles. `RadFil` starts by smoothing the filament spine outputted by the FilFinder program (or DiSperSe), taking the derivative across the spine, and making cuts perpendicular to tangent lines sampled evenly across the smoothed spine. The spine smoothing is necessary because the original spines often do not have a smooth continuous derivative, so perpendicular cuts made locally might not reflect the global curvature of the filament. Smoothing the spines and sampling the cuts is a multi-step process. We start by collecting all pixels in filspine belonging to our original spine and use python’s networkx module to sort the array by the order in which they lie along the spine path. As the filament spines are often non-monotonic, we parameterize them using not only their x and y values, but also this order along the path "t". We then use scipy.interpolate’s splprep function to determine a B-spline representation of the spine, applying a smoothness condition to remove kinks in the original spine. Next, we use scipy.interpolate’s splev function to take the first derivative of the spine and evaluate the local curvature across its entire length. Finally, we make fine cuts across the spine by selecting tangent points at well-sampled intervals, parameterizing the tangent lines at these points, and then taking cuts perpendicular to these tangent lines. For each pixel the cut passes through, the profile is sampled at the radial distance closest to the center of the pixel.
We build the profiles using the build_profile method, which takes a few optional arguments. Here is a brief explanation of the major features:
- **samp_int** -- determines how finely you'd like to make cuts across the spine. Inputted as an integer, this indicates the approximate distance between cuts in pixels (samp_int=25 means the cuts are ~25 pixels apart, or 25 x imgscale in pc). You can always determine the image scale of your image by typing "radobj.imgscale," and it will return the value in pc (or pixel units, if no distance is provided). Since radobj.imgscale for Musca=.00387, we are sampling ~ every 0.10 pc in the cell below
- **bins** -- indicates whether you want to average the profile heights in discrete radial bins, or whether the profiles should remain unbinned. If you choose to bin, you can enter bins as an integer (indicating the number of bins, distributed evenly from the minimum radial distance sampled to the maximum radial distance sample in all cuts) or as an array indicating the bin edges. If "none", no binning is applied. *Note that if you choose to bin, the fitting will be performed on the single (bin-averaged) profile.* Otherwise, it will be performed on the entire ensemble of cuts
- **shift** -- whether the center of the profile along each cut should be shifted to the pixel with the maximum column density. `RadFil` searches the region of the cut confined within the filament mask to determine which one has the peak column density. That pixel then becomes r=0 pc and the profiles are built out from there. The default is True.
- **pts_mask** -- use this argument if you want to exclude certain spine points from the profile analysis. For instance, if you have a filament that loops around on itself at either end (producing overlapping cuts) and you want to exclude this region from the profile, provide pts_mask as a 2D boolean array of the same shape as the image, where the loop region is False and all other regions are True.
- **cutdist** -- this is used in cases where you input a spine without a mask and want to shift the profile. If this happens, you have to enter "cutdist"; this creates a mask for you by finding all points within a distance "cutdist" of the spine. Then, if shift=True, all peak column density pixels will be confined to within cutdist of your spine
For now we are just going to build a profile with a) sampling interval of 25 b) shifting (the default) c) no point masking and d) no binning.
```python
radobj.build_profile(samp_int=25)
plt.gcf().set_size_inches(18.5, 10.5)
```
No binning is applied.

In the figure above, the background grayscale shows the underlying column density of the filament within the mask. The thick red line is the smoothed spine of the filament. The perpendicular red cuts are taken at approximately 0.10 pc intervals tangent to the smoothed spine. Because "shift=True" by default, each cut is shifted to the peak column density along the cut, which are marked via the blue scatter points.
<!--The top figure in the above panel shows the smoothed spine as a thick red line running down the original filament mask, with the perpendicular cuts across the spine also shown in red. RadFil searches for the pixel with maximum column density along each of the perpendicular red cuts ("local width lines") which are marked with a blue dot. RadFil then shifts the center of the profile to this pixel, and builds the radial column density profile out from that point.
The bottom profile shows overlays the radial column density profile for each of the red cuts in the top panel. To create the "master" representative profile shown in red, RadFil linearly interpolates between the points in each of the original profiles, bins them by radial distance from the spine, and takes the median column density in each bin. You can change the number of bins that the profile is divided into (from -cutdist to +cutdist) by changing the numbins parmater above! -->
Now that we have built the profile, we have access to a lot of additional attributes. To access the pixel values for the smoothed spine (the B-spline), for instance, we can use radobj.xspline and radboj.yspline. We can also retrieve information on the radial distances and density profiles of each cut via the "dictionary_cuts" attribute. To access the radial distances of each cut, we can use "radobj.dictionary_cuts['distance']" which will return a list the same size as the number of cuts. To access the profile heights at each of the distances, we can use "radobj.dictionary_cuts['profile']". To access the "master profile" (i.e. the average profile in the case of binning, or every single point in the array of profiles concatenated into a list for no binning), we can use radobj.masterx and radobj.mastery.
```python
fig=plt.figure(figsize=(10,5))
#plot the profile of each cut
for i in range(0,len(radobj.dictionary_cuts['distance'])):
plt.plot(radobj.dictionary_cuts['distance'][i], radobj.dictionary_cuts['profile'][i],c='gray',alpha=0.15)
plt.xlim(-2,2)
plt.legend(fontsize=15)
plt.xlabel("Radial Distance (pc)",fontsize=22)
plt.ylabel(r"Column Density (cm$^{-2}$)",fontsize=22)
plt.tick_params(axis='both', which='major', labelsize=16)
```
No handles with labels found to put in legend.

Another useful attribute retrievable after building the profile is the "mask_width" attribute. This returns a list containing the width of each "local width cut," i.e. the length of each of the red cuts across the spine, confined within the filament mask. If you take an average of these values, it gives you an average mask-based width of the filament. This can be relevant in cases where you cannot reliably fit a profile, but still want to approximate a width of some contour.
```python
print("The average mask-based width of Musca is {:.2f} pc".format(np.nanmedian(radobj.dictionary_cuts['mask_width'])))
```
The average mask-based width of Musca is 0.34 pc
Now that we have the master profile, we are ready for fitting using the fit_profile method. `RadFil` comes with two built in models for fitting: a Plummer-like model and a Gaussian model. The Plummer-like model taken is from Cox et al. (2016) and parameterized as:
$$\rm N(r) = \frac{N_0}{[{1+{(\frac{r}{R_{flat}})^2}]}^{\; \frac{p-1}{2}}}$$
where $\rm N_0$ is the amplitude, p is the power index, and $\rm R_{flat}$ is the inner flattening radius.
The Gaussian model is your standard Gaussian:
$$\rm N(r)=a \times \exp{[\frac{-{(r-\mu)}^2}{2\sigma^2}]}$$
where a is the amplitude, $\sigma$ is the standard deviation, and $\mu$ is the mean.
For both models we can also choose whether we would like to fit and subtract a background. For now, we are going to fit both a Plummer-like fit and a Gaussian fit after doing some background subtraction. We have two options for background subtraction which we control via the 'bgdegree" argument:
- bgdegree=0 indicates you want to fit a flat line (zeroeth order polynomial)
- bgdegree=1 indicates you want to fit a sloping line (first order polynomial). This is the default.
We choose the sloping background option (bgdegree=1). This will fit a line on either side of your profile within the bounds of the "bgdist" parameter, which specifies the inner and outer radial distances over which to perform the background fit. We adopt inner and outer background radii of 1 and 2 pc. Thus, we input bgdist=(1,2). Finally, after performing the subtraction, we will fit the Plummer-like function out to a distance of 1 pc, which we indicate via the fitdist=1 argument. Note that we could also adopt an assymetric fitting range, by giving fitdist a tuple, e.g. fitdist=(-1,2), which would fit the radial profile between -1 pc and 2 pc. In every case, the range of radii over which the background and Plummer-functions are fit are highlighted in green and blue in the figures below.
```python
radobj.fit_profile(fitfunc="Plummer",fitdist=1.0,bgdist=(1.0,2.0),bgdegree=1, beamwidth=36.3)
```
==== Plummer-like ====
amplitude: 3.766E+21
p: 2.224
R_flat: 0.076
('Physical Size of the Beam:', <Quantity 0.03519747 pc>)
/Users/catherinezucker/radfil/radfil/radfil_class.py:1098: UserWarning: The deconvolution procedure is not robust. Calculating deconvolved widths for the same data convolved with different beams will not produce identical values
warnings.warn("The deconvolution procedure is not robust. Calculating deconvolved widths for the same data convolved with different beams will not produce identical values")
<radfil.radfil_class.radfil at 0x1c29576898>

Both are astropy.modeling objects and can be manipulated via the normal channels.
```python
for (name,value) in zip(radobj.profilefit.param_names,radobj.profilefit.parameters):
print("The best-fit {} is {}".format(name,value))
```
The best-fit amplitude is 3.7661685289264087e+21
The best-fit powerIndex is 2.224050939382413
The best-fit flatteningRadius is 0.0762059013789237
Note that if you want access the covariance matrix for the parameters you can do so by typing radobj.param_cov. This will return an NxN array where N is the number of fitted parameters.
```python
print("The covariance matrix for our fit is given by...")
print(radobj.param_cov)
```
The covariance matrix for our fit is given by...
[[ 2.61278386e+38 -7.02449317e+16 1.16106079e+16]
[-7.02449317e+16 1.27190492e-04 -1.09837616e-05]
[ 1.16106079e+16 -1.09837616e-05 1.17745755e-06]]
You can get an estimate of the statistical error by taking the square root of the diagonal elements of this matrix. RadFil has a standard error attribute that does this for you...
```python
for (name,error) in zip(radobj.profilefit.param_names,radobj.std_error):
print("The statistical uncertainty on the best-fit {} is {}".format(name,error))
```
The statistical uncertainty on the best-fit amplitude is 1.6164107966862752e+19
The statistical uncertainty on the best-fit powerIndex is 0.0112778762017101
The statistical uncertainty on the best-fit flatteningRadius is 0.001085107160081439
However, in many cases the systematic uncertainty will dominate the uncertainty you calculate. RadFil has a built in method for estimating the systematic uncertainty due to the choice of background subtraction radii and fitting radii. Given two lists (one containing background subtraction radii options and the other containing fitting radii options), RadFil will compute all possible combinations and determine the best-fit value in each case.
```python
%%capture
bgdist_list=[[1,2], [2,3], [2,4], [3,4]]
fitdist_list=[0.5, 1.0, 1.5, 2.0]
radobj.calculate_systematic_uncertainty(fitfunc='Plummer',fitdist_list=fitdist_list,bgdist_list=bgdist_list)
```
After running this function, RadFil stores a dictionary as an attribute (called "radfil_trials"), with the keys of the dictionary corresponding to different model paramters. Accessing each key returns a pandas dataframe, where the columns are the fitting radii and the rows are the background subtraction radii. You can access any cell using its row and column strings, or print the entire dataframe to screen:
```python
print(radobj.radfil_trials.keys())
print("The best-fit powerIndex for a fitting radius of 1 pc and background subtraction radii between 1 and 3 pc is {}".format(radobj.radfil_trials['powerIndex']['1.0']['[1, 2]']))
```
dict_keys(['amplitude', 'powerIndex', 'flatteningRadius'])
The best-fit powerIndex for a fitting radius of 1 pc and background subtraction radii between 1 and 3 pc is 2.224050939382413
If you want to calculate the systematic uncertainties using this method, you could (for example) take the standard deviation of the best-fit values for each parameter, summarized in each dataframe!
```python
for name in radobj.profilefit.param_names:
print("The systematic uncertainty on the {} parameter is {}".format(name,np.std(np.array(radobj.radfil_trials[name]))))
```
The systematic uncertainty on the amplitude parameter is 5.47443582922503e+19
The systematic uncertainty on the powerIndex parameter is 0.1274064386797959
The systematic uncertainty on the flatteningRadius parameter is 0.009334135533916063
**The values for R_flat and p given in Cox et al. 2016 are R_flat=0.08 pc, p=2.2 +/- 0.3, while we find R_flat=0.08 and p=2.22, which are practically identical to the Cox values and well within the margin of error.**
Next, we are going to fit the inner width of the filament with a Gaussian, using the same background subtraction parameters as in the Plummer-like fit above. However, we will adopt a new fitting distance. In Cox et al. 2016, they fit out to a distance of 0.05 pc (private communication), so we will also fit out to 0.05 pc in order to reproduce their method.
```python
radobj.fit_profile(fitfunc="Gaussian",fitdist=0.05,bgdist=(1.0,2.0),bgdegree=1,beamwidth=36.3)
```
==== Gaussian ====
amplitude: 3.824E+21
mean: 0.000
width: 0.068
('Physical Size of the Beam:', <Quantity 0.03519747 pc>)
/Users/catherinezucker/radfil/radfil/radfil_class.py:1062: UserWarning: The deconvolution procedure is not robust. Calculating deconvolved widths for the same data convolved with different beams will not produce identical values
warnings.warn("The deconvolution procedure is not robust. Calculating deconvolved widths for the same data convolved with different beams will not produce identical values")
<radfil.radfil_class.radfil at 0x1c29576898>

In addition to providing the best fit standard deviation, running the fit_profile method also automatically calculates the FWHM (both deconvolved and non-deconvolved with the beam). These can be accessed via the radobj.FWHM and radobj.FWHM_deconv attributes. In order to calculate the deconcolved FWHM, you must input the beamwidth in the make_fil_spine method, or (if you enter a pre-computed spine) in the fit_profile method. Otherwise the deconvolved FWHM will be set to nan. To calculate the deconvolved width, we use the formula from [Konyves et al. 2015](https://www.aanda.org/articles/aa/pdf/2015/12/aa25861-15.pdf): $FWHM_{deconv}=\sqrt{FWHM^2 - HPBW^2}$, where HPBW is the half-power beamwidth.
```python
print("The deconvolved FWHM is {:.3f} pc and the non-deconvolved FWHM is {:.3f}".format(radobj.FWHM_deconv,radobj.FWHM))
```
The deconvolved FWHM is 0.155 pc and the non-deconvolved FWHM is 0.159
**The published Cox et al. 2016 deconvolved FWHM value is 0.14 +/- 0.03 pc while our deconvolved FWHM value is 0.16, so again, our results agree very well with the published value and are within their margin of error. **
____
The resulting plots can be retrieved and plotted to the user's own matplotlib axis instance. This gives the user the freedom to edit the axis labels, adjust the figure size, and adjust the ticks. This is done using the plotter class in `RadFil`.
```python
radobj_plotter = radobj.plotter()
```
For example, the plot for the spline and the cuts can be reproduced to an axis with proper wcs projection, and change properties like the x and y axis label text, size, etc:
```python
from astropy import wcs
fig = plt.figure()
# Set up the wcs axis.
ax = fig.gca(projection = wcs.WCS(fil_header))
ax.coords[0].set_axislabel('R.A. [J2000]',fontsize=30)
ax.coords[0].set_major_formatter('hh:mm')
ax.coords[1].set_axislabel('Dec. [J2000]',fontsize=30)
ax.coords[0].set_ticklabel(size=20)
ax.coords[1].set_ticklabel(size=20)
fig.set_size_inches(18.5, 10.5)
# Plot!!
radobj_plotter.plotCuts(ax)
```

The user can also retrieve the plots generated during fitting and adjust various properties of the plot. Below we retrieve the background fit plot, set the axis labels, set the x axis range, and set the size of the tick labels.
```python
fig = plt.figure(figsize = (12., 6.))
ax = fig.gca()
radobj_plotter.plotFits(ax, 'bg')
ax.set_xlabel('Distance [pc]',fontsize=30)
ax.set_ylabel('Column Density [cm$^{-2}$]',fontsize=30)
ax.tick_params(axis='both', which='major', labelsize=20)
ax.set_xlim(-2.5,2.5)
```
(-2.5, 2.5)

We can do the same thing for the fitted model. Below we retrieve the model fit plot, and again set the axis labels, set the x axis range, and set the size of the tick labels.
```python
fig = plt.figure(figsize = (12., 6.))
ax = fig.gca()
radobj_plotter.plotFits(ax, 'model')
ax.set_xlabel('Distance [pc]',fontsize=30)
ax.set_ylabel('Column Density [cm$^{-2}$]',fontsize=30)
ax.tick_params(axis='both', which='major', labelsize=20)
ax.set_xlim(-2.5,2.5)
```
(-2.5, 2.5)

And we can do them both together!
```python
fig,(ax1,ax2) = plt.subplots(2,figsize = (12,12))
radobj_plotter.plotFits(ax1, 'bg')
ax1.set_ylabel('Column Density [cm$^{-2}$]',fontsize=22)
ax1.set_xlim(-2.5,2.5)
ax1.tick_params(axis='y', which='major', labelsize=20)
ax1.set_xticklabels([])
radobj_plotter.plotFits(ax2, 'model')
ax2.set_xlabel('Radial Distance (pc)',fontsize=22)
ax2.set_ylabel('Column Density [cm$^{-2}$]',fontsize=22)
ax2.set_xlim(-2.5,2.5)
ax2.tick_params(axis='both', which='major', labelsize=20)
fig.subplots_adjust(hspace=0.1)
```

### Have any questions about this tutorial? Open an issue on github, or email the co-authors (catherine.zucker@cfa.harvard.edu and hopechen@utexas.edu)
|
catherinezuckerREPO_NAMEradfilPATH_START.@radfil_extracted@radfil-master@RadFil_Tutorial.ipynb@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "matplotlib/matplotlib",
"repo_path": "matplotlib_extracted/matplotlib-main/lib/matplotlib/sphinxext/__init__.py",
"type": "Python"
}
|
matplotlibREPO_NAMEmatplotlibPATH_START.@matplotlib_extracted@matplotlib-main@lib@matplotlib@sphinxext@__init__.py@.PATH_END.py
|
|
{
"filename": "write_parset_tables.py",
"repo_name": "sdss/mangadap",
"repo_path": "mangadap_extracted/mangadap-main/docs/scripts/write_parset_tables.py",
"type": "Python"
}
|
#!/usr/bin/env python3
"""
Dynamically build the rst documentation of the bitmasks.
"""
import time
from importlib import resources
#-----------------------------------------------------------------------------
def write_parset(parset_class, opath, class_link=True):
ofile = opath / f'{parset_class.__name__.lower()}.rst'
lines = parset_class().to_rst_table(header=False, class_link=class_link, nested=False)
with open(ofile, 'w') as f:
f.write('\n'.join(lines))
if __name__ == '__main__':
t = time.perf_counter()
root = resources.files('mangadap').parent
path = root / 'docs' / 'tables'
if not path.is_dir():
path.mkdir(parents=True)
# Tables to write:
#
# from mangadap.config.analysisplan import AnalysisPlan
# write_parset(AnalysisPlan, path, class_link=False)
from mangadap.par.artifactdb import ArtifactPar
write_parset(ArtifactPar, path, class_link=False)
from mangadap.par.emissionlinedb import EmissionLinePar
write_parset(EmissionLinePar, path, class_link=False)
from mangadap.proc.reductionassessments import ReductionAssessmentDef
write_parset(ReductionAssessmentDef, path, class_link=False)
from mangadap.proc.spatiallybinnedspectra import SpatiallyBinnedSpectraDef
write_parset(SpatiallyBinnedSpectraDef, path, class_link=False)
from mangadap.proc.spatialbinning import RadialBinningPar, VoronoiBinningPar, SquareBinningPar
write_parset(RadialBinningPar, path, class_link=False)
write_parset(VoronoiBinningPar, path, class_link=False)
write_parset(SquareBinningPar, path, class_link=False)
from mangadap.proc.spectralstack import SpectralStackPar
write_parset(SpectralStackPar, path, class_link=False)
from mangadap.proc.templatelibrary import TemplateLibraryDef
write_parset(TemplateLibraryDef, path, class_link=False)
from mangadap.proc.stellarcontinuummodel import StellarContinuumModelDef
write_parset(StellarContinuumModelDef, path, class_link=False)
from mangadap.proc.ppxffit import PPXFFitPar
write_parset(PPXFFitPar, path, class_link=False)
from mangadap.proc.emissionlinemoments import EmissionLineMomentsDef
write_parset(EmissionLineMomentsDef, path, class_link=False)
from mangadap.proc.bandpassfilter import BandPassFilterPar
write_parset(BandPassFilterPar, path, class_link=False)
from mangadap.proc.emissionlinemodel import EmissionLineModelDef
write_parset(EmissionLineModelDef, path, class_link=False)
# from mangadap.proc.elric import ElricPar
# write_parset(ElricPar, path, class_link=False)
from mangadap.proc.sasuke import SasukePar
write_parset(SasukePar, path, class_link=False)
from mangadap.proc.spectralindices import SpectralIndicesDef
write_parset(SpectralIndicesDef, path, class_link=False)
print('Elapsed time: {0} seconds'.format(time.perf_counter() - t))
|
sdssREPO_NAMEmangadapPATH_START.@mangadap_extracted@mangadap-main@docs@scripts@write_parset_tables.py@.PATH_END.py
|
{
"filename": "rp.py",
"repo_name": "AMReX-Astro/MAESTROeX",
"repo_path": "MAESTROeX_extracted/MAESTROeX-main/sphinx_docs/rp.py",
"type": "Python"
}
|
#!/usr/bin/env python3
import os
import re
import sys
import textwrap
from more_itertools import unique_everseen
MAIN_HEADER = """
+----------------------------------------+---------------------------------------------------------+---------------+
| parameter | description | default value |
+========================================+=========================================================+===============+
"""
SEPARATOR = """
+----------------------------------------+---------------------------------------------------------+---------------+
"""
ENTRY = """
| {:38} | {:55} | {:13} |
"""
WRAP_LEN = 55
class Parameter:
# container class for the parameters
def __init__(self):
self.var = ""
self.default = ""
self.description = []
self.category = ""
self.namespace = ""
def value(self):
""" the value is what we sort based on """
return self.category + "." + self.var
def __lt__(self, other):
return self.value() < other.value()
def make_rest_table(param_files):
params_list = []
for pf in param_files:
# each file is a category
category = os.path.basename(os.path.dirname(pf))
# open the file
try:
f = open(pf)
except OSError:
sys.exit(f"ERROR: {pf} does not exist")
descr = r""
category = ""
# read in the file
line = f.readline()
while line:
# we assume that parameters have an optional descriptive
# heading before them without any blank line between the
# description and the parameter definition. Therefore,
# if we encounter a blank line, zero out the description.
if line.strip() == "":
descr = r""
line = f.readline()
continue
if line.startswith("@"):
# this is a command -- we only know namespace
fields = line.split()
if fields[0].startswith("@namespace"):
namespace = fields[1].strip()
category = ""
line = f.readline()
continue
# look for category definition
if line.startswith("#------"):
# the next line should be the category definition
line = f.readline()
index = line.find(":")
category = line[index+1:]
# following this is another #---------
line = f.readline()
if not line.startswith("#------"):
sys.exit("ERROR: category block not formatted correctly")
line = f.readline()
continue
# find the description
if line.startswith("#"):
# handle descriptions here
tmp = line[1:].rstrip()
if tmp.endswith("@@"):
tmp = tmp.replace("@@", "")
descr += tmp
line = f.readline()
continue
else:
current_param = Parameter()
# this splits the line into separate fields. A field
# is a single word or a pair in parentheses like "(a,
# b)"
fields = re.findall(r'[\w\"\+\.-]+|\([\w+\.-]+\s*,\s*[\w\+\.-]+\)', line)
current_param.var = fields[0]
if current_param.var.startswith("("):
current_param.var = re.findall(r"\w+", fields[0])[0]
current_param.default = fields[2]
current_param.description = descr
current_param.category = category.strip()
current_param.namespace = namespace.strip()
descr = r""
# store the current parameter in the list
params_list.append(current_param)
line = f.readline()
namespaces = list(unique_everseen([q.namespace for q in params_list]))
for nm in sorted(namespaces):
# print the heading
heading_name = fr"namespace: ``{nm}``"
nmlen = len(heading_name)
print(".. _sec:runtime-parameters-tables:\n")
print(heading_name)
print(nmlen*"-" + "\n")
# now group by category
categories = list(unique_everseen([q.category for q in params_list if q.namespace == nm]))
for c in categories:
# print the subheading
if c != "":
print(f"**{c}**\n")
params = [q for q in params_list if q.namespace == nm and q.category == c]
# print the index stuff
fmt = [f"{nm}.{q.var}" for q in params]
print(".. index:: {}\n\n".format(", ".join(fmt)))
print(MAIN_HEADER.strip())
for p in params:
desc = list(textwrap.wrap(p.description.strip(), WRAP_LEN))
if not desc:
desc = [""]
for n, d in enumerate(desc):
if n == 0:
print(ENTRY.format("``"+p.var+"``", d, p.default).strip())
else:
print(ENTRY.format(" ", d, " ").strip())
print(SEPARATOR.strip())
print("\n\n")
if __name__ == "__main__":
# find all of the _parameter files
param_files = ["../Source/param/_cpp_parameters"]
make_rest_table(param_files)
|
AMReX-AstroREPO_NAMEMAESTROeXPATH_START.@MAESTROeX_extracted@MAESTROeX-main@sphinx_docs@rp.py@.PATH_END.py
|
{
"filename": "surgery.md",
"repo_name": "google/flax",
"repo_path": "flax_extracted/flax-main/docs_nnx/guides/surgery.md",
"type": "Markdown"
}
|
---
jupytext:
formats: ipynb,md:myst
text_representation:
extension: .md
format_name: myst
format_version: 0.13
jupytext_version: 1.13.8
---
# Model surgery
Model surgery is an act of making modifications on an existing neural network's building blocks and parameters, such as layer replacement, parameter or state manipulation, or even "monkey patching". In this guide, you will learn how to perform model surgery in Flax NNX using several real-world scenarios:
* __Pythonic `nnx.Module` manipulation__: Using Pythonic ways to manipulate sub-`Module`s given a model.
* __Manipulation of an abstract model or state__: A key trick for playing with `flax.nnx.Module`s and states without memory allocation.
* __Checkpoint surgery from a raw state to model__: How to manipulate parameter states when they are incompatible with existing model code.
* __Partial initialization__: How to initialize only a part of the model from scratch using a naive method or a memory-efficient method.
```{code-cell} ipython3
from typing import *
from pprint import pprint
import functools
import jax
from jax import lax, numpy as jnp, tree_util as jtu
from jax.sharding import PartitionSpec, Mesh, NamedSharding
from jax.experimental import mesh_utils
import flax
from flax import nnx
import flax.traverse_util
import numpy as np
import orbax.checkpoint as orbax
key = jax.random.key(0)
```
```{code-cell} ipython3
class TwoLayerMLP(nnx.Module):
def __init__(self, dim, rngs: nnx.Rngs):
self.linear1 = nnx.Linear(dim, dim, rngs=rngs)
self.linear2 = nnx.Linear(dim, dim, rngs=rngs)
def __call__(self, x):
x = self.linear1(x)
return self.linear2(x)
```
## Pythonic `nnx.Module` manipulation
It is easier to perform model surgery when:
1) You already have a fully fleshed-out model loaded with correct parameters; and
2) You don't intend to change your model definition code.
You can perform a variety of Pythonic operations on its sub-`Module`s, such as sub-`Module` swapping, `Module` sharing, variable sharing, and monkey-patching:
```{code-cell} ipython3
model = TwoLayerMLP(4, rngs=nnx.Rngs(0))
x = jax.random.normal(jax.random.key(42), (3, 4))
np.testing.assert_allclose(model(x), model.linear2(model.linear1(x)))
# Sub-`Module` swapping.
original1, original2 = model.linear1, model.linear2
model.linear1, model.linear2 = model.linear2, model.linear1
np.testing.assert_allclose(model(x), original1(original2(x)))
# `Module` sharing (tying all weights together).
model = TwoLayerMLP(4, rngs=nnx.Rngs(0))
model.linear2 = model.linear1
assert not hasattr(nnx.state(model), 'linear2')
np.testing.assert_allclose(model(x), model.linear1(model.linear1(x)))
# Variable sharing (weight-tying).
model = TwoLayerMLP(4, rngs=nnx.Rngs(0))
model.linear1.kernel = model.linear2.kernel # the bias parameter is kept separate
assert hasattr(nnx.state(model), 'linear2')
assert hasattr(nnx.state(model)['linear2'], 'bias')
assert not hasattr(nnx.state(model)['linear2'], 'kernel')
# Monkey-patching.
model = TwoLayerMLP(4, rngs=nnx.Rngs(0))
def awesome_layer(x): return x
model.linear2 = awesome_layer
np.testing.assert_allclose(model(x), model.linear1(x))
```
## Creating an abstract model or state without memory allocation
To do more complex model surgery, the key technique you can use is creating and manipulating an abstract model or state without allocating any real parameter data. This makes trial iteration faster and removes any concern on memory constraints.
To create an abstract model:
* Create a function that returns a valid Flax NNX model; and
* Run `nnx.eval_shape` (not `jax.eval_shape`) upon it.
Now you can use `nnx.split` as usual to get its abstract state. Note that all fields that should be `jax.Array`s in a real model are now of an abstract `jax.ShapeDtypeStruct` type with only shape/dtype/sharding information.
```{code-cell} ipython3
abs_model = nnx.eval_shape(lambda: TwoLayerMLP(4, rngs=nnx.Rngs(0)))
gdef, abs_state = nnx.split(abs_model)
pprint(abs_state)
```
When you fill every `nnx.VariableState` pytree leaf's `value` attributes with real `jax.Array`s, the abstract model becomes equivalent to a real model.
```{code-cell} ipython3
model = TwoLayerMLP(4, rngs=nnx.Rngs(0))
abs_state['linear1']['kernel'].value = model.linear1.kernel
abs_state['linear1']['bias'].value = model.linear1.bias
abs_state['linear2']['kernel'].value = model.linear2.kernel
abs_state['linear2']['bias'].value = model.linear2.bias
nnx.update(abs_model, abs_state)
np.testing.assert_allclose(abs_model(x), model(x)) # They are equivalent now!
```
## Checkpoint surgery
With the abstract state technique in hand, you can perform arbitrary manipulation on any checkpoint - or runtime parameter pytree - to make them fit with your given model code, and then call `nnx.update` to merge them.
This can be helpful if you are trying to significantly change the model code - for example, when migrating from Flax Linen to Flax NNX - and old weights are no longer naturally compatible.
Let's run a simple example here:
```{code-cell} ipython3
# Save a version of model into a checkpoint
checkpointer = orbax.PyTreeCheckpointer()
old_model = TwoLayerMLP(4, rngs=nnx.Rngs(0))
checkpointer.save(f'/tmp/nnx-surgery-state', nnx.state(model), force=True)
```
In this new model, the sub-`Module`s are renamed from `linear(1|2)` to `layer(1|2)`. Since the pytree structure has changed, it is impossible to directly load the old checkpoint with the new model state structure:
```{code-cell} ipython3
class ModifiedTwoLayerMLP(nnx.Module):
def __init__(self, dim, rngs: nnx.Rngs):
self.layer1 = nnx.Linear(dim, dim, rngs=rngs) # no longer linear1!
self.layer2 = nnx.Linear(dim, dim, rngs=rngs)
def __call__(self, x):
x = self.layer1(x)
return self.layer2(x)
abs_model = nnx.eval_shape(lambda: ModifiedTwoLayerMLP(4, rngs=nnx.Rngs(0)))
try:
with_item = checkpointer.restore('/tmp/nnx-surgery-state', item=nnx.state(abs_model))
print(with_item)
except Exception as e:
print(f'This will throw error: {type(e)}: {e}')
```
However, you can load the parameter pytree as a raw dictionary, perform the renames, and generate a new state that is guaranteed to be compatible with your new model definition.
```{code-cell} ipython3
def process_raw_dict(raw_state_dict):
flattened = nnx.traversals.flatten_mapping(raw_state_dict)
# Cut the '.value' postfix on every leaf path.
flattened = {(path[:-1] if path[-1] == 'value' else path): value
for path, value in flattened.items()}
return nnx.traversals.unflatten_mapping(flattened)
# Make your local change on the checkpoint dictionary.
raw_dict = checkpointer.restore('/tmp/nnx-surgery-state')
pprint(raw_dict)
raw_dict['layer1'] = raw_dict.pop('linear1')
raw_dict['layer2'] = raw_dict.pop('linear2')
# Fit it into the model state.
abs_model = nnx.eval_shape(lambda: ModifiedTwoLayerMLP(4, rngs=nnx.Rngs(0)))
graph_def, state = nnx.split(abs_model)
state.replace_by_pure_dict(process_raw_dict(raw_dict))
restored_model = nnx.merge(graph_def, state)
np.testing.assert_allclose(restored_model(jnp.ones((3, 4))), old_model(jnp.ones((3, 4))))
```
## Partial initialization
In some cases - such as with LoRA (Low-Rank Adaption) - you may want to randomly-initialize only *part of* your model parameters. This can be achieved through:
- Naive partial initialization; or
- Memory-efficient partial initialization.
+++
### Naive partial initialization
To do naive partial initialization, you can just initialize the whole model, then swap the pre-trained parameters in. However, this approach may allocate additional memory midway if your modification requires re-creating module parameters that you will later discard. Below is an example of this.
> **Note:** You can use `jax.live_arrays()` to check all the arrays live in memory at any given time. This call can be “messed up” when you run a single Jupyter notebook cell multiple times (due to garbage-collection of old Python variables). However, restarting the Python kernel in the notebook and running the code from scratch will always yield the same output.
```{code-cell} ipython3
# Some pretrained model state
old_state = nnx.state(TwoLayerMLP(4, rngs=nnx.Rngs(0)))
simple_model = nnx.eval_shape(lambda: TwoLayerMLP(4, rngs=nnx.Rngs(42)))
print(f'Number of jax arrays in memory at start: {len(jax.live_arrays())}')
# In this line, extra kernel and bias is created inside the new LoRALinear!
# They are wasted, because you are going to use the kernel and bias in `old_state` anyway.
simple_model.linear1 = nnx.LoRALinear(4, 4, lora_rank=3, rngs=nnx.Rngs(42))
print(f'Number of jax arrays in memory midway: {len(jax.live_arrays())}'
' (4 new created in LoRALinear - kernel, bias, lora_a & lora_b)')
nnx.update(simple_model, old_state)
print(f'Number of jax arrays in memory at end: {len(jax.live_arrays())}'
' (2 discarded - only lora_a & lora_b are used in model)')
```
### Memory-efficient partial initialization
To do memory-efficient partial initialization, use `nnx.jit`'s efficiently compiled code to make sure only the state parameters you need are initialized:
```{code-cell} ipython3
# Some pretrained model state
old_state = nnx.state(TwoLayerMLP(4, rngs=nnx.Rngs(0)))
# Use `nnx.jit` (which wraps `jax.jit`) to automatically skip unused arrays - memory efficient!
@nnx.jit(donate_argnums=0)
def partial_init(old_state, rngs):
model = TwoLayerMLP(4, rngs=rngs)
# Create a new state.
model.linear1 = nnx.LoRALinear(4, 4, lora_rank=3, rngs=rngs)
# Add the existing state.
nnx.update(model, old_state)
return model
print(f'Number of JAX Arrays in memory at start: {len(jax.live_arrays())}')
# Note that `old_state` will be deleted after this `partial_init` call.
good_model = partial_init(old_state, nnx.Rngs(42))
print(f'Number of JAX Arrays in memory at end: {len(jax.live_arrays())}'
' (2 new created - lora_a and lora_b)')
```
```{code-cell} ipython3
```
```{code-cell} ipython3
```
|
googleREPO_NAMEflaxPATH_START.@flax_extracted@flax-main@docs_nnx@guides@surgery.md@.PATH_END.py
|
{
"filename": "old_build_frbs.py",
"repo_name": "FRBs/FRB",
"repo_path": "FRB_extracted/FRB-main/frb/builds/old_build_frbs.py",
"type": "Python"
}
|
""" Module to generate individual FRB files """
import importlib_resources
import numpy as np
from astropy import units
from astropy.coordinates import SkyCoord
from frb import frb
def frb_121102():
"""
FRB 121102
All of the data currently comes from Tendulkar et al. 2017
https://ui.adsabs.harvard.edu/abs/2017ApJ...834L...7T/abstract
or Chatterjee et al. 2017
Update for Marcote et al. 2017
05h31m58.7013s +/- 4 mas
+33d08m52.5536s +/- 2.3 mas
"""
frb121102 = frb.FRB('FRB20121102', 'J053158.7013+330852.5536',
558.1*units.pc/units.cm**3,
z_frb=0.19273, repeater=True)
# NE2001
frb121102.set_DMISM()
# RM
frb121102.RM = 1.e5 * units.rad / units.m**2
# Pulse properties
frb121102.set_pulse(1*units.GHz,
Wi=3.0*units.ms,
Wi_err=0.5*units.ms,
tscatt=0.024*units.ms) # no errors given
# Error ellipse
frb121102.set_ee(0.004, 0.002, theta=90., cl=68.)
frb121102.set_ee(a=0.0015, b=0.0015, theta=0., cl=68.,stat=False) # Marcote priv. corr
# References
frb121102.refs = ['Tendulkar2017', 'Marcote2017']
# Write
path = importlib_resources.files('frb.data.FRBs')
frb121102.write_to_json(path=path)
# Test
#frb121102.from_json('FRB121102.json')
def frb_180301():
"""
Bhandari+2021
"""
frbname = 'FRB20180301'
FRB_180301_coord = SkyCoord("06h12m54.44s +04d40m15.8s", frame='icrs') # Bhandari+2021
frb180301 = frb.FRB(frbname, FRB_180301_coord,
536 * units.pc / units.cm ** 3,
z_frb=0.33044, repeater=True) # Slack posting
# Error ellipse (Statistical) #
frb180301.set_ee(0.01142,0.00825, 0., 68.)
# Error ellipse (Systematic)
frb180301.set_ee(0.619, 0.603, 0., 68., stat=False)
# Error in DM
frb180301.DM_err = 13 * units.pc / units.cm ** 3
# NE2001
frb180301.set_DMISM()
# RM
# frb190102.RM = 10 * units.rad / units.m**2
# References
frb180301.refs = ['Bhandari2021']
# Write
path = importlib_resources.files('frb.data.FRBs')
frb180301.write_to_json(path=path)
def frb_180916():
"""
FRB 180916.J0158+65
All of the data currently comes from Marcote et al. 2020
https://ui.adsabs.harvard.edu/abs/2020Natur.577..190M/abstract
"""
coord = SkyCoord("01h58m00.75017s 65d43m00.3152s", frame='icrs')
name = 'FRB20180916'
frb180916 = frb.FRB(name, coord,
348.76*units.pc / units.cm**3,
z_frb=0.0337, repeater=True)
# Error ellipse
frb180916.set_ee(0.0011, 0.0011, theta=0., cl=68.)
frb180916.set_ee(a=0.002, b=0.002, theta=0., cl=68.,stat=False) # Marcote priv. corr
# Error in DM
frb180916.DM_err = 0.10 * units.pc / units.cm**3
# NE2001
frb180916.set_DMISM()
# RM and fluence
frb180916.fluence = 2.53*units.Jy*units.ms # Brightest
frb180916.RM = -114.6 * units.rad / units.m**2 # From CHIME/FRB 2019
frb180916.RM_err = 0.6 * units.rad / units.m**2
# Pulse properties
frb180916.set_pulse(1*units.GHz,
Wi=1.66*units.ms,
Wi_err=0.05*units.ms,
tscatt=0.0027*units.ms) # no errors given
# References
frb180916.refs = ['Marcote2020']
# Write
path = importlib_resources.files('frb.data.FRBs')
frb180916.write_to_json(path=path)
def frb_180924():
"""
FRB 180924
All of the data currently comes from Bannister et al. 2019
https://ui.adsabs.harvard.edu/abs/2019Sci...365..565B/abstract
Now including updated values on FRB position, DM, pulse width
and scattering time from Day et al. 2020
Uncertinties in FRB localization updated by Day et al. 2021
"""
frb180924 = frb.FRB('FRB20180924', 'J214425.255-405400.10',
362.16*units.pc / units.cm**3,
z_frb=0.3212, repeater=False)
# Error ellipse
frb180924.set_ee(a=0.07, b=0.07, theta=0., cl=68.) # Statistical (Day+2020)
frb180924.set_ee(a=0.1611, b=0.1611, theta=0., cl=68.,stat=False) # Systematic (Day+2021)
# Error in DM (Day 2019)
frb180924.DM_err = 0.01 * units.pc / units.cm**3
# NE2001
frb180924.set_DMISM()
# RM, fluence and polarization
frb180924.fluence = 16 * units.Jy * units.ms # From Bhandari+20
frb180924.fluence_err = 1 * units.Jy * units.ms # -- //--
frb180924.RM = 22 * units.rad / units.m**2
frb180924.RM_err = 2 * units.rad / units.m**2
frb180924.lpol = 80. # %
frb180924.lpol_err = 10.
# Pulse properties
frb180924.set_pulse(1.2725*units.GHz,
Wi=0.09*units.ms,
Wi_err=0.04*units.ms,
tscatt=0.68*units.ms,
tscatt_err=0.03*units.ms)
# References
frb180924.refs = ['Bannister2019', 'Day2020', 'Day2021']
# Write
path = importlib_resources.files('frb.data.FRBs')
frb180924.write_to_json(path=path)
def frb_181112():
"""
Generate the JSON file for FRB 181112
All of the data comes from Prochaska+2019, Science, in press
Returns:
FRB position and localization updated by Day+2021
"""
frb181112 = frb.FRB('FRB20181112',
'J214923.63-525815.4',
589.27 * units.pc / units.cm**3,
z_frb=0.4755, repeater=False)
# Error in DM
frb181112.DM_err = 0.03 * units.pc / units.cm**3
# Error ellipse -- Updated by Day+2021
frb181112.set_ee(a=555.30/1e3, b=152.93/1e3, theta=120.15, cl=68.) # Statistical (Prochaska+2019)
frb181112.set_ee(a=3.2*1.79, b=0.8*1.79, theta=120.15, cl=68.,stat=False) # Systematic
# RM and fluence
frb181112.RM = 10.5 * units.rad / units.m**2
frb181112.RM_err = 0.4 * units.rad / units.m**2
frb181112.fluence = 20.2 * units.Jy * units.ms
frb181112.fluence_err = 0.1 * units.Jy * units.ms
# Pulse properties
frb181112.set_pulse(1.2725*units.GHz,
Wi=0.016*units.ms,
Wi_err=0.001*units.ms,
tscatt=0.021*units.ms,
tscatt_err=0.001*units.ms)
# NE2001
frb181112.set_DMISM()
# References
frb181112.refs = ['Prochaska2019', 'Day2021']
# Write
path = importlib_resources.files('frb.data.FRBs')
frb181112.write_to_json(path=path)
def frb_190102():
"""Bhandari+20, ApJL --
Now including updated values on FRB position, DM, pulse width
and scattering time from Day et al. 2020
Update systematic uncertainty in FRB localization from Day+2021
"""
fname = 'FRB20190102'
#wv_oiii = 6466.48
#z_OIII = wv_oiii / 5008.239 - 1
frb190102 = frb.FRB(fname, 'J212939.76-792832.5', # Day+20
364.545 * units.pc / units.cm**3,
z_frb=0.2912, repeater=False) # Updated redshift
# Error ellipse [REQUIRED]
frb190102.set_ee(0.21, 0.17, theta=0., cl=68.) # Statistical (Day+2020)
frb190102.set_ee(0.936, 0.7876, theta=0., cl=68., stat=False) # Systematic (Day+2021)
# Error in DM
frb190102.DM_err = 0.004 * units.pc / units.cm**3
# NE2001
frb190102.set_DMISM()
# RM and fluence
frb190102.fluence = 14 * units.Jy * units.ms # From Bhandari+20
frb190102.fluence_err = 1 * units.Jy * units.ms # -- //--
frb190102.RM = -105 * units.rad / units.m**2
frb190102.RM_err = 1 * units.rad / units.m**2
# Pulse properties
frb190102.set_pulse(1.2725*units.GHz,
Wi=0.053*units.ms,
Wi_err=0.002*units.ms,
tscatt=0.041*units.ms,
tscatt_err=0.003*units.ms)
# References
frb190102.refs = ['Bhandari2020', 'Day2020', 'Day2021']
# Write
path = importlib_resources.files('frb.data.FRBs')
frb190102.write_to_json(path=path)
def frb_190523():
"""Ravi+19, Nature --
https://ui.adsabs.harvard.edu/abs/2019Natur.572..352R/abstract
"""
fname = 'FRB20190523'
frb190523 = frb.FRB(fname, 'J134815.6+722811',
760.8 * units.pc / units.cm**3,
z_frb=0.660, repeater=False)
# Error ellipse [REQUIRED]
frb190523.set_ee(4, 1.5, cl=68., theta=340.) # Halfing the 3x8'' at 95% reported by Ravi et al.
# Error in DM
frb190523.DM_err = 0.6 * units.pc / units.cm**3
# FRB properties
frb190523.fluence = 280 * units.Jy * units.ms
#frb180924.fluence_err = 1 * units.Jy * units.ms
frb190523.tau = 1.4 * units.ms
frb190523.tau_err = 0.2 * units.ms
# NE2001
frb190523.set_DMISM()
# Pulse properties
frb190523.set_pulse(1.*units.GHz,
Wi=0.42*units.ms,
Wi_err=0.05*units.ms,
tscatt=1.4*units.ms,
tscatt_err=0.2*units.ms)
# References
frb190523.refs = ['Ravi2019']
# Write
path = importlib_resources.files('frb.data.FRBs')
frb190523.write_to_json(path=path)
def frb_190608():
"""Bhandari+20, ApJL,
Day+20 https://ui.adsabs.harvard.edu/abs/2020MNRAS.497.3335D/abstract
Macquart al. Nature
Update systematic uncertainty in FRB localization from Day+2021
"""
fname = 'FRB20190608'
frb190608 = frb.FRB(fname, "J221604.77-075353.7", # Pulled from Slack on 2020 Mar 18
340.05 * units.pc / units.cm**3,
z_frb=0.1177805, repeater=False) # Taken from the SDSS table
# Error ellipse [REQUIRED]
frb190608.set_ee(0.19, 0.18, theta=90., cl=68.) # Statistsical (Day+2020)
frb190608.set_ee(0.33, 0.30, theta=90., cl=68., stat=False) # Systematic
# Error in DM
frb190608.DM_err = 0.6 * units.pc / units.cm**3
# NE2001
frb190608.set_DMISM()
# RM and fluence
frb190608.fluence = 26 * units.Jy * units.ms # From Macquart+20
frb190608.fluence_err = 4 * units.Jy * units.ms # -- //--
frb190608.RM = 353 * units.rad / units.m**2 # From Day+20
frb190608.RM_err = 2 * units.rad / units.m**2
# Pulse properties
frb190608.set_pulse(1.2725*units.GHz,
Wi=1.1*units.ms,
Wi_err=0.2*units.ms,
tscatt=3.3*units.ms,
tscatt_err=0.2*units.ms)
# References
frb190608.refs = ['Bhandari2020','Day2020', 'Day2021']
# Write
path = importlib_resources.files('frb.data.FRBs')
frb190608.write_to_json(path=path)
def frb_190611():
"""
Macquart al. Nature
Day et al. 2020 https://ui.adsabs.harvard.edu/abs/2020MNRAS.497.3335D/abstract
Update systematic uncertainty in FRB localization from Day+2021
"""
FRB_190611_coord = SkyCoord('J212258.94-792351.3', # Day+2021
unit=(units.hourangle, units.deg))
frb190611 = frb.FRB('FRB20190611', FRB_190611_coord,
332.63 * units.pc / units.cm**3,
z_frb=0.3778, repeater=False) # Bright
# Error ellipse
frb190611.set_ee(0.34, 0.32, theta=0., cl=68.) # Statistical (Day+2020)
frb190611.set_ee(1.12, 1.07, theta=0., cl=68., stat=False) # Systematic (Day+2021)
# Error in DM
frb190611.DM_err = 0.04 * units.pc / units.cm**3
# NE2001
frb190611.set_DMISM()
# RM and fluence
frb190611.RM = 20 * units.rad / units.m**2 # From Day+20
frb190611.RM_err = 4 * units.rad / units.m**2
frb190611.fluence = 10 * units.Jy * units.ms # From Macquart+20
frb190611.fluence_err = 2 * units.Jy * units.ms
# Pulse properties
frb190611.set_pulse(1.2725*units.GHz,
Wi=0.09*units.ms,
Wi_err=0.02*units.ms,
tscatt=0.18*units.ms,
tscatt_err=0.02*units.ms)
# References
frb190611.refs = ['MacQuart2020', 'Day2020', 'Day2021']
# Write
path = importlib_resources.files('frb.data.FRBs')
frb190611.write_to_json(path=path)
def frb_190614():
"""
Law+2020 https://ui.adsabs.harvard.edu/abs/2020ApJ...899..161L/abstract
"""
fname = 'FRB20190614'
FRB_190614_coord = SkyCoord(ra=65.07552, dec=73.70674, unit='deg')
frb190614 = frb.FRB(fname, FRB_190614_coord,
959 * units.pc / units.cm ** 3, repeater=False)
# Error ellipse [REQUIRED]
frb190614.set_ee(a=0.8, b=0.4, theta=67., cl=68.)
# Error in DM
frb190614.DM_err = 1 * units.pc / units.cm ** 3
# NE2001
frb190614.set_DMISM()
# FRB properties
frb190614.fluence = 0.62 * units.Jy * units.ms
frb190614.fluence_err = 0.07 * units.Jy * units.ms
# References
frb190614.refs = ['Law2020']
# Write
path = importlib_resources.files('frb.data.FRBs')
frb190614.write_to_json(path=path)
def frb_190711():
"""MacQuart+20, Day+2020
https://ui.adsabs.harvard.edu/abs/2020MNRAS.497.3335D/abstract
Updated FRB localization + error from Day+2021
"""
fname = 'FRB20190711'
frb190711 = frb.FRB(fname,'J215740.62-802128.8', # MacQuarter+2020, Day+2020
587.9 * units.pc / units.cm ** 3, # Day+2020
z_frb=0.52172, repeater=True)
# Error ellipse
frb190711.set_ee(0.12, 0.075, theta=90., cl=68.) # Statistical (Day+2021)
frb190711.set_ee(0.646, 0.563, theta=90., cl=68., stat=False) # Systematic (Day+2021)
# Error in DM
frb190711.DM_err = 1 * units.pc / units.cm ** 3
# NE2001
frb190711.set_DMISM()
# RM and fluence -- Day+2020
frb190711.RM = 9 * units.rad / units.m**2 # Day+20
frb190711.RM_err = 2 * units.rad / units.m**2
frb190711.fluence = 34 * units.Jy * units.ms # Macquart+20
frb190711.fluence_err = 3 * units.Jy * units.ms
# References
frb190711.refs = ['MacQuart2020', 'Day2020', 'Day2021']
# Write
path = importlib_resources.files('frb.data.FRBs')
frb190711.write_to_json(path=path)
def frb_190714():
"""
Day+2020 https://ui.adsabs.harvard.edu/abs/2020MNRAS.497.3335D/abstract
Heintz+2020
Updated FRB localization + error from Day+2021
"""
# Taken from Slack; Cherie posted on 19 June 2020
# Updated as per Day 2021
FRB_190714_coord = SkyCoord('J121555.13-130115.6',
unit=(units.hourangle, units.deg))
frb190714 = frb.FRB('FRB20190714', FRB_190714_coord,
504.13 * units.pc / units.cm ** 3,
z_frb=0.2365, repeater=False)
# Error ellipse (Day+2021)
frb190714.set_ee(0.17, 0.10, theta=90., cl=68.) # Statistical
frb190714.set_ee(0.5191, 0.376, theta=90., cl=68., stat=False) # Systematic
# Error in DM
frb190714.DM_err = 0.1 * units.pc / units.cm ** 3
# NE2001
frb190714.set_DMISM()
# Fluence
frb190714.fluence = 12 * units.Jy * units.ms # From Cherie on Slack (2/10 - 2020)
frb190714.fluence_err = 2 * units.Jy * units.ms
# References
frb190714.refs = ['Heintz2020', 'Day2021']
# Write
path = importlib_resources.files('frb.data.FRBs')
frb190714.write_to_json(path=path)
def frb_191001():
"""
Bhandari+2020b https://ui.adsabs.harvard.edu/abs/2020arXiv200812488B/abstract
Updated FRB localization + error from Day+2021
"""
# Taken from Bhandari, Table 1
# Updated Day 2021
FRB_191001_coord = SkyCoord("21h33m24.313s -54d44m51.86s", frame='icrs')
frb191001 = frb.FRB('FRB20191001', FRB_191001_coord,
507.90 * units.pc / units.cm ** 3,
z_frb=0.2340, repeater=False)
# Error ellipse [REQUIRED]
frb191001.set_ee(0.13, 0.08, theta=90., cl=68.) # Statistical -- Day+2021
frb191001.set_ee(0.1737, 0.160, theta=90., cl=68., stat=False) # Systematic
# Error in DM
frb191001.DM_err = 0.07 * units.pc / units.cm ** 3
# NE2001
frb191001.set_DMISM()
# RM and fluence (Bhandari+20b)
frb191001.RM = 55.5 * units.rad / units.m**2
frb191001.RM_err = 0.9 * units.rad / units.m**2
frb191001.fluence = 143 * units.Jy * units.ms
frb191001.fluence_err = 15 * units.Jy * units.ms
# Pulse properties
frb191001.set_pulse(0.920*units.GHz,
Wi=0.22*units.ms,
Wi_err=0.03*units.ms,
tscatt=3.3*units.ms,
tscatt_err=0.2*units.ms)
# References
frb191001.refs = ['Bhandari2020b', 'Day2021']
# Write
path = importlib_resources.files('frb.data.FRBs')
frb191001.write_to_json(path=path)
def frb_191228():
"""
Bhandari+2021
Updated FRB localization + error from Day+2021
"""
frbname = 'FRB20191228'
FRB_191228_coord = SkyCoord('22h57m43.30s -29d35m38.7s', frame='icrs') # Taken from Slack on 11 Oct 2020
frb191228 = frb.FRB(frbname, FRB_191228_coord,
298 * units.pc / units.cm ** 3,
repeater=False) # First Slack posting
# Error ellipse : Day+2021
frb191228.set_ee(0.34, 0.34, 0., 68.)
frb191228.set_ee(0.830, 0.823, 0., 68., stat=False)
# Error in DM
frb191228.DM_err = 0.05 * units.pc / units.cm ** 3
# NE2001
frb191228.set_DMISM()
# RM
# frb190102.RM = 10 * units.rad / units.m**2
# frb190102.RM_err = 1 * units.rad / units.m**2
# References
frb191228.refs = ['Bhandari2021', 'Day2021']
# Write
path = importlib_resources.files('frb.data.FRBs')
frb191228.write_to_json(path=path)
def frb_20200120E():
"""M81 + globular cluster
"""
frbname = 'FRB20200120E'
FRB_20200120E_coord = SkyCoord("09h57m54.68s +68d49m08.0s", frame='icrs') # From EVN localisation
frb20200120E = frb.FRB(frbname, FRB_20200120E_coord,
87.818 * units.pc / units.cm ** 3,z_frb=0.0008,repeater=True)
# Error ellipse (Statistical)
frb20200120E.set_ee(0.4,0.4, 0., 68.)
# Error ellipse (Systematic)
frb20200120E.set_ee(0., 0., 0., 68., stat=False)
# Error in DM
frb20200120E.DM_err = 0.007 * units.pc / units.cm ** 3
# RM
frb20200120E.RM = -29.8 * units.rad / units.m**2
frb20200120E.RM_err = 0.5 * units.rad / units.m**2
# NE2001
frb20200120E.set_DMISM()
# RM
# frb190102.RM = 10 * units.rad / units.m**2
# frb190102.RM_err = 1 * units.rad / units.m**2
# References
frb20200120E.refs = ['Bhardwaj2021','Kirsten2021']
# Write
path = importlib_resources.files('frb.data.FRBs')
frb20200120E.write_to_json(path=path)
def frb_171020():
frbname = 'FRB20171020'
FRB_171020_coord = SkyCoord("22h15m18.55s -19d40m11.23s", frame='icrs') # From Shannon+18
frb171020 = frb.FRB(frbname, FRB_171020_coord,
114.1 * units.pc / units.cm ** 3,z_frb=0.00867,repeater=False)
# Error ellipse (Statistical)
frb171020.set_ee(600,600, 0., 68.)
# Error ellipse (Systematic)
frb171020.set_ee(0., 0., 0., 68., stat=False)
# Error in DM
frb171020.DM_err = 0.2 * units.pc / units.cm ** 3
# NE2001
frb171020.set_DMISM()
# RM
# frb190102.RM = 10 * units.rad / units.m**2
# frb190102.RM_err = 1 * units.rad / units.m**2
# References
frb171020.refs = ['Shannon2018','Mahony2018']
# Write
path = importlib_resources.files('frb.data.FRBs')
frb171020.write_to_json(path=path)
def frb_200430():
"""
Heintz+2020
Bhandari+2021
Updated FRB localization + error from Day+2021
"""
frbname = 'FRB20200430'
FRB_200430_coord = SkyCoord('15h18m49.54s +12d22m36.3s', frame='icrs')
frb200430 = frb.FRB(frbname, FRB_200430_coord,
380 * units.pc / units.cm ** 3, # First Slack posting
z_frb = 0.161,
repeater=False)
# Error ellipse (Statistical)
frb200430.set_ee(0.24, 0.17, 0., 68.)
# Error ellipse (Systematic)
frb200430.set_ee(0.98, 0.251, 0., 68., stat=False)
# Error in DM
#frb191001.DM_err = 1 * units.pc / units.cm ** 3
# NE2001
frb200430.set_DMISM()
# RM
# frb190102.RM = 10 * units.rad / units.m**2
# frb190102.RM_err = 1 * units.rad / units.m**2
# References
frb200430.refs = ['Bhandari2021', 'Day2021']
# Write
path = importlib_resources.files('frb.data.FRBs')
frb200430.write_to_json(path=path)
def frb_200906():
"""
Bhandari+2021
Updated FRB localization + error from Day+2021
"""
frbname = 'FRB20200906'
FRB_200906_coord = SkyCoord("03h33m59.08s -14d04m59.46s", frame='icrs') # Day+2021
frb200906 = frb.FRB(frbname, FRB_200906_coord,
577.84 * units.pc / units.cm**3,
z_frb=0.36879,
repeater=False) # Slack posting
# Error ellipse (Statistical)
frb200906.set_ee(0.11, 0.102, 0., 68.)
# Error ellipse (Systematic)
frb200906.set_ee(0.55, 0.340, 0., 68., stat=False)
# Error in DM
frb200906.DM_err = 0.02 * units.pc / units.cm ** 3
# NE2001
frb200906.set_DMISM()
# RM
# frb190102.RM = 10 * units.rad / units.m**2
# frb190102.RM_err = 1 * units.rad / units.m**2
# References
frb200906.refs = ['Bhandari2021', 'Day2021']
# Write
path = importlib_resources.files('frb.data.FRBs')
frb200906.write_to_json(path=path)
def frb_201124():
"""
ATELs only so far
"""
# ATEL 14603 (VLBI)
FRB_201124_coord = SkyCoord("05h08m03.5077s 26d03m38.504s", frame='icrs')
frb201124 = frb.FRB('FRB20201124', FRB_201124_coord,
411. * units.pc / units.cm ** 3,
z_frb=0.0982, repeater=True)
# Error ellipse [REQUIRED]
frb201124.set_ee(0.004, 0.004, theta=0., cl=68.) # ATEL
# Error in DM
#frb191001.DM_err = 0.07 * units.pc / units.cm ** 3
# NE2001
frb201124.set_DMISM()
# RM (Kumar+21)
frb201124.RM = -613 * units.rad / units.m**2
frb201124.RM_err = 2 * units.rad / units.m**2
# RM and fluence (Bhandari+20b)
#frb201124.RM = 55.5 * units.rad / units.m**2
#frb201124.RM_err = 0.9 * units.rad / units.m**2
#frb201124.fluence = 143 * units.Jy * units.ms
#frb201124.fluence_err = 15 * units.Jy * units.ms
# Pulse properties
#frb191001.set_pulse(0.920*units.GHz,
# Wi=0.22*units.ms,
# Wi_err=0.03*units.ms,
# tscatt=3.3*units.ms,
# tscatt_err=0.2*units.ms)
# References
#frb191001.refs = ['Bhandari2020b']
# Write
path = importlib_resources.files('frb.data.FRBs')
frb201124.write_to_json(path=path)
def main(inflg='all'):
if inflg == 'all':
flg = np.sum(np.array( [2**ii for ii in range(25)]))
else:
flg = int(inflg)
# 121102
if flg & (2**0): # 1
frb_121102()
# 180924
if flg & (2**1): # 2
frb_180924()
# 181112
if flg & (2**2): #4
frb_181112()
# 195023
if flg & (2**3): #8
frb_190523()
# 190608
if flg & (2**4): #16
frb_190608()
# 190102
if flg & (2**5): #32
frb_190102()
# 190711
if flg & (2**6): # 64
frb_190711()
# 180916
if flg & (2**7): # 128
frb_180916()
# 190611
if flg & (2**8): # 256
frb_190611()
# FRB 190614 # 512
if flg & (2**9):
frb_190614()
# FRB 190714 # 1024
if flg & (2**10):
frb_190714()
# FRB 191001 # 2048
if flg & (2**11):
frb_191001()
# FRB 201124a # 4096
if flg & (2**12):
frb_201124()
# FRB 180301 # 8192
if flg & (2**13):
frb_180301()
# FRB 191228 # 16384
if flg & (2**14):
frb_191228()
# FRB 200430 # 32768
if flg & (2**15):
frb_200430()
# FRB 200906 # 65536
if flg & (2**16):
frb_200906()
# FRB 200906
if flg & (2**17): # 131072
frb_20200120E()
# FRB 20171020
if flg & (2**18):
frb_171020()
# Command line execution
# Only for testing
# Use the Build script to build
if __name__ == '__main__':
# FRB 121102
#frb_190611()
#frb_190711()
#frb_190714()
#frb_191001()
#frb_180301()
#frb_200906()
#frb_191228()
#frb_171020()
frb_20200120E()
#frb_201124()
|
FRBsREPO_NAMEFRBPATH_START.@FRB_extracted@FRB-main@frb@builds@old_build_frbs.py@.PATH_END.py
|
{
"filename": "test_r0_to_dl1.py",
"repo_name": "cta-observatory/cta-lstchain",
"repo_path": "cta-lstchain_extracted/cta-lstchain-main/lstchain/reco/tests/test_r0_to_dl1.py",
"type": "Python"
}
|
from ctapipe.containers import ArrayEventContainer
import numpy as np
from lstchain.reco.r0_to_dl1 import r0_to_dl1, rescale_dl1_charge
from lstchain.io import standard_config
from copy import copy, deepcopy
def test_rescale_dl1_charge():
event = ArrayEventContainer()
tel_ids = [1, 3]
images = {}
for tel_id in tel_ids:
images[tel_id] = np.random.rand(1855)
event.dl1.tel[tel_id].image = copy(images[tel_id])
rescaling_factor = np.random.rand() * 10
rescale_dl1_charge(event, rescaling_factor)
for tel_id in tel_ids:
np.testing.assert_allclose(event.dl1.tel[tel_id].image, images[tel_id]*rescaling_factor)
def test_r0_to_dl1_nsb_tuning(tmp_path, mc_gamma_testfile):
config = deepcopy(standard_config)
config['source_config']['EventSource']['allowed_tels'] = [1]
config['waveform_nsb_tuning']['nsb_tuning'] = True
config['waveform_nsb_tuning']['spe_location'] = None ## If None, the default file data/SinglePhE_ResponseInPhE_expo2Gaus.dat is used
r0_to_dl1(mc_gamma_testfile, custom_config=config, output_filename=tmp_path / "tmp.h5")
|
cta-observatoryREPO_NAMEcta-lstchainPATH_START.@cta-lstchain_extracted@cta-lstchain-main@lstchain@reco@tests@test_r0_to_dl1.py@.PATH_END.py
|
{
"filename": "text.py",
"repo_name": "keras-team/keras",
"repo_path": "keras_extracted/keras-master/keras/src/legacy/preprocessing/text.py",
"type": "Python"
}
|
"""Deprecated text preprocessing APIs from Keras 1."""
import collections
import hashlib
import json
import warnings
import numpy as np
from keras.src.api_export import keras_export
@keras_export("keras._legacy.preprocessing.text.text_to_word_sequence")
def text_to_word_sequence(
input_text,
filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
lower=True,
split=" ",
):
"""DEPRECATED."""
if lower:
input_text = input_text.lower()
translate_dict = {c: split for c in filters}
translate_map = str.maketrans(translate_dict)
input_text = input_text.translate(translate_map)
seq = input_text.split(split)
return [i for i in seq if i]
@keras_export("keras._legacy.preprocessing.text.one_hot")
def one_hot(
input_text,
n,
filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
lower=True,
split=" ",
analyzer=None,
):
"""DEPRECATED."""
return hashing_trick(
input_text,
n,
hash_function=hash,
filters=filters,
lower=lower,
split=split,
analyzer=analyzer,
)
@keras_export("keras._legacy.preprocessing.text.hashing_trick")
def hashing_trick(
text,
n,
hash_function=None,
filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
lower=True,
split=" ",
analyzer=None,
):
"""DEPRECATED."""
if hash_function is None:
hash_function = hash
elif hash_function == "md5":
def hash_function(w):
return int(hashlib.md5(w.encode()).hexdigest(), 16)
if analyzer is None:
seq = text_to_word_sequence(
text, filters=filters, lower=lower, split=split
)
else:
seq = analyzer(text)
return [(hash_function(w) % (n - 1) + 1) for w in seq]
@keras_export("keras._legacy.preprocessing.text.Tokenizer")
class Tokenizer:
"""DEPRECATED."""
def __init__(
self,
num_words=None,
filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
lower=True,
split=" ",
char_level=False,
oov_token=None,
analyzer=None,
**kwargs,
):
# Legacy support
if "nb_words" in kwargs:
warnings.warn(
"The `nb_words` argument in `Tokenizer` "
"has been renamed `num_words`."
)
num_words = kwargs.pop("nb_words")
document_count = kwargs.pop("document_count", 0)
if kwargs:
raise TypeError("Unrecognized keyword arguments: " + str(kwargs))
self.word_counts = collections.OrderedDict()
self.word_docs = collections.defaultdict(int)
self.filters = filters
self.split = split
self.lower = lower
self.num_words = num_words
self.document_count = document_count
self.char_level = char_level
self.oov_token = oov_token
self.index_docs = collections.defaultdict(int)
self.word_index = {}
self.index_word = {}
self.analyzer = analyzer
def fit_on_texts(self, texts):
for text in texts:
self.document_count += 1
if self.char_level or isinstance(text, list):
if self.lower:
if isinstance(text, list):
text = [text_elem.lower() for text_elem in text]
else:
text = text.lower()
seq = text
else:
if self.analyzer is None:
seq = text_to_word_sequence(
text,
filters=self.filters,
lower=self.lower,
split=self.split,
)
else:
seq = self.analyzer(text)
for w in seq:
if w in self.word_counts:
self.word_counts[w] += 1
else:
self.word_counts[w] = 1
for w in set(seq):
# In how many documents each word occurs
self.word_docs[w] += 1
wcounts = list(self.word_counts.items())
wcounts.sort(key=lambda x: x[1], reverse=True)
# forcing the oov_token to index 1 if it exists
if self.oov_token is None:
sorted_voc = []
else:
sorted_voc = [self.oov_token]
sorted_voc.extend(wc[0] for wc in wcounts)
# note that index 0 is reserved, never assigned to an existing word
self.word_index = dict(
zip(sorted_voc, list(range(1, len(sorted_voc) + 1)))
)
self.index_word = {c: w for w, c in self.word_index.items()}
for w, c in list(self.word_docs.items()):
self.index_docs[self.word_index[w]] = c
def fit_on_sequences(self, sequences):
self.document_count += len(sequences)
for seq in sequences:
seq = set(seq)
for i in seq:
self.index_docs[i] += 1
def texts_to_sequences(self, texts):
return list(self.texts_to_sequences_generator(texts))
def texts_to_sequences_generator(self, texts):
num_words = self.num_words
oov_token_index = self.word_index.get(self.oov_token)
for text in texts:
if self.char_level or isinstance(text, list):
if self.lower:
if isinstance(text, list):
text = [text_elem.lower() for text_elem in text]
else:
text = text.lower()
seq = text
else:
if self.analyzer is None:
seq = text_to_word_sequence(
text,
filters=self.filters,
lower=self.lower,
split=self.split,
)
else:
seq = self.analyzer(text)
vect = []
for w in seq:
i = self.word_index.get(w)
if i is not None:
if num_words and i >= num_words:
if oov_token_index is not None:
vect.append(oov_token_index)
else:
vect.append(i)
elif self.oov_token is not None:
vect.append(oov_token_index)
yield vect
def sequences_to_texts(self, sequences):
return list(self.sequences_to_texts_generator(sequences))
def sequences_to_texts_generator(self, sequences):
num_words = self.num_words
oov_token_index = self.word_index.get(self.oov_token)
for seq in sequences:
vect = []
for num in seq:
word = self.index_word.get(num)
if word is not None:
if num_words and num >= num_words:
if oov_token_index is not None:
vect.append(self.index_word[oov_token_index])
else:
vect.append(word)
elif self.oov_token is not None:
vect.append(self.index_word[oov_token_index])
vect = " ".join(vect)
yield vect
def texts_to_matrix(self, texts, mode="binary"):
sequences = self.texts_to_sequences(texts)
return self.sequences_to_matrix(sequences, mode=mode)
def sequences_to_matrix(self, sequences, mode="binary"):
if not self.num_words:
if self.word_index:
num_words = len(self.word_index) + 1
else:
raise ValueError(
"Specify a dimension (`num_words` argument), "
"or fit on some text data first."
)
else:
num_words = self.num_words
if mode == "tfidf" and not self.document_count:
raise ValueError(
"Fit the Tokenizer on some data before using tfidf mode."
)
x = np.zeros((len(sequences), num_words))
for i, seq in enumerate(sequences):
if not seq:
continue
counts = collections.defaultdict(int)
for j in seq:
if j >= num_words:
continue
counts[j] += 1
for j, c in list(counts.items()):
if mode == "count":
x[i][j] = c
elif mode == "freq":
x[i][j] = c / len(seq)
elif mode == "binary":
x[i][j] = 1
elif mode == "tfidf":
# Use weighting scheme 2 in
# https://en.wikipedia.org/wiki/Tf%E2%80%93idf
tf = 1 + np.log(c)
idf = np.log(
1
+ self.document_count / (1 + self.index_docs.get(j, 0))
)
x[i][j] = tf * idf
else:
raise ValueError("Unknown vectorization mode:", mode)
return x
def get_config(self):
json_word_counts = json.dumps(self.word_counts)
json_word_docs = json.dumps(self.word_docs)
json_index_docs = json.dumps(self.index_docs)
json_word_index = json.dumps(self.word_index)
json_index_word = json.dumps(self.index_word)
return {
"num_words": self.num_words,
"filters": self.filters,
"lower": self.lower,
"split": self.split,
"char_level": self.char_level,
"oov_token": self.oov_token,
"document_count": self.document_count,
"word_counts": json_word_counts,
"word_docs": json_word_docs,
"index_docs": json_index_docs,
"index_word": json_index_word,
"word_index": json_word_index,
}
def to_json(self, **kwargs):
config = self.get_config()
tokenizer_config = {
"class_name": self.__class__.__name__,
"config": config,
}
return json.dumps(tokenizer_config, **kwargs)
@keras_export("keras._legacy.preprocessing.text.tokenizer_from_json")
def tokenizer_from_json(json_string):
"""DEPRECATED."""
tokenizer_config = json.loads(json_string)
config = tokenizer_config.get("config")
word_counts = json.loads(config.pop("word_counts"))
word_docs = json.loads(config.pop("word_docs"))
index_docs = json.loads(config.pop("index_docs"))
# Integer indexing gets converted to strings with json.dumps()
index_docs = {int(k): v for k, v in index_docs.items()}
index_word = json.loads(config.pop("index_word"))
index_word = {int(k): v for k, v in index_word.items()}
word_index = json.loads(config.pop("word_index"))
tokenizer = Tokenizer(**config)
tokenizer.word_counts = word_counts
tokenizer.word_docs = word_docs
tokenizer.index_docs = index_docs
tokenizer.word_index = word_index
tokenizer.index_word = index_word
return tokenizer
|
keras-teamREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@keras@src@legacy@preprocessing@text.py@.PATH_END.py
|
{
"filename": "test_nonlinearity.py",
"repo_name": "litebird/litebird_sim",
"repo_path": "litebird_sim_extracted/litebird_sim-master/test/test_nonlinearity.py",
"type": "Python"
}
|
import numpy as np
import litebird_sim as lbs
from astropy.time import Time
def test_add_quadratic_nonlinearity():
# Test function to check consistency of wrappers and low level functions
start_time = Time("2025-02-02T00:00:00")
mission_time_days = 1
sampling_hz = 1
dets = [
lbs.DetectorInfo(name="det_A", sampling_rate_hz=sampling_hz),
lbs.DetectorInfo(name="det_B", sampling_rate_hz=sampling_hz),
]
sim = lbs.Simulation(
base_path="nonlin_example",
start_time=start_time,
duration_s=mission_time_days * 24 * 3600.0,
random_seed=12345,
)
sim.create_observations(
detectors=dets,
split_list_over_processes=False,
)
# Creating fiducial TODs
sim.observations[0].nl_2_self = np.ones_like(sim.observations[0].tod)
sim.observations[0].nl_2_obs = np.ones_like(sim.observations[0].tod)
sim.observations[0].nl_2_det = np.ones_like(sim.observations[0].tod)
# Define non-linear parameters for the detectors.
sim.observations[0].g_one_over_k = np.ones(len(dets)) * 1e-3
# Applying non-linearity using the `Simulation` class method
sim.apply_quadratic_nonlin(
component="nl_2_self",
)
# Applying non-linearity on the given TOD component of an `Observation` object
lbs.non_linearity.apply_quadratic_nonlin_to_observations(
observations=sim.observations,
component="nl_2_obs",
)
# Applying non-linearity on the TOD arrays of the individual detectors.
for idx, tod in enumerate(sim.observations[0].nl_2_det):
lbs.non_linearity.apply_quadratic_nonlin_for_one_detector(
tod_det=tod,
g_one_over_k=sim.observations[0].g_one_over_k[idx],
)
# Check if the three non-linear tods are equal
np.testing.assert_array_equal(
sim.observations[0].nl_2_self, sim.observations[0].nl_2_obs
)
np.testing.assert_array_equal(
sim.observations[0].nl_2_self, sim.observations[0].nl_2_det
)
# Check if non-linearity is applied correctly
sim.observations[0].tod_origin = np.ones_like(sim.observations[0].tod)
np.testing.assert_array_equal(
sim.observations[0].nl_2_self[0],
sim.observations[0].tod_origin[0]
* (1 + sim.observations[0].g_one_over_k[0] * sim.observations[0].tod_origin[0]),
)
|
litebirdREPO_NAMElitebird_simPATH_START.@litebird_sim_extracted@litebird_sim-master@test@test_nonlinearity.py@.PATH_END.py
|
{
"filename": "fastpm.py",
"repo_name": "changhoonhahn/pySpectrum",
"repo_path": "pySpectrum_extracted/pySpectrum-master/run/fastpm/fastpm.py",
"type": "Python"
}
|
#!/bin/python
'''
calculate the powerspectrum and bipsectrum for QPM halo box
'''
import os
import h5py
import numpy as np
# -- nbodykit --
import nbodykit.lab as NBlab
# -- pyspectrum --
from pyspectrum import util as UT
from pyspectrum import plots as Plots
from pyspectrum import pyspectrum as pySpec
# -- plotting --
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
mpl.rcParams['text.usetex'] = True
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['axes.linewidth'] = 1.5
mpl.rcParams['axes.xmargin'] = 1
mpl.rcParams['xtick.labelsize'] = 'x-large'
mpl.rcParams['xtick.major.size'] = 5
mpl.rcParams['xtick.major.width'] = 1.5
mpl.rcParams['ytick.labelsize'] = 'x-large'
mpl.rcParams['ytick.major.size'] = 5
mpl.rcParams['ytick.major.width'] = 1.5
mpl.rcParams['legend.frameon'] = False
def fastPM(z, str_flag='', mh_lim=15., Lbox=205., Nmax=40, Ncut=3, step=3):
''' calculate the powerspectrum and bispectrum of the fastPM catalog.
'''
dir_fpm = os.path.join(UT.dat_dir(), 'fastpm')
f_halo = ('halocat_FastPM_40step_N250_IC500_B2_z%.2f%s.txt' % (z, str_flag))
f_mlim = ('halocat_FastPM_40step_N250_IC500_B2_z%.2f%s.mlim%.fe10' % (z, str_flag, mh_lim))
f_hdf5 = ('%s/%s.hdf5' % (dir_fpm, f_mlim))
f_pell = ('%s/pySpec.Plk.%s.Lbox%.f.Ngrid360.dat' % (dir_fpm, f_mlim, Lbox))
f_pnkt = ('%s/pySpec.Plk.%s.Lbox%.f.Ngrid360.nbodykit.dat' % (dir_fpm, f_mlim, Lbox))
f_b123 = ('%s/pySpec.Bk.%s.Lbox%.f.Ngrid360.step%i.Ncut%i.Nmax%i.dat' % (dir_fpm, f_mlim, Lbox, step, Ncut, Nmax))
kf = 2.*np.pi/Lbox
if not os.path.isfile(f_hdf5):
# read in halo catalog
dat_halo = np.loadtxt(os.path.join(dir_fpm, f_halo), unpack=True, usecols=[0,1,2,3,7,8,9])
mh = dat_halo[0]
Nhalo = len(mh)
print('%i halos in %.f Mpc/h box' % (len(mh), Lbox))
print('%f < M_h/10^10Msun < %f' % (mh.min(), mh.max()))
xyz = np.zeros((Nhalo,3))
xyz[:,0] = dat_halo[1]
xyz[:,1] = dat_halo[2]
xyz[:,2] = dat_halo[3]
print('%f < x < %f' % (xyz[:,0].min(), xyz[:,0].max()))
print('%f < y < %f' % (xyz[:,1].min(), xyz[:,1].max()))
print('%f < z < %f' % (xyz[:,2].min(), xyz[:,2].max()))
vxyz = np.zeros((Nhalo,3))
vxyz[:,0] = dat_halo[4]
vxyz[:,1] = dat_halo[5]
vxyz[:,2] = dat_halo[6]
mlim = (mh > 15.)
Nhalo = np.sum(mlim)
print('%i halos in %.f Mpc/h box with Mh > %f' % (Nhalo, Lbox, mh_lim))
mh = mh[mlim]
xyz = xyz[mlim,:]
vxyz = vxyz[mlim,:]
f = h5py.File(f_hdf5, 'w')
f.create_dataset('xyz', data=xyz)
f.create_dataset('vxyz', data=vxyz)
f.create_dataset('mhalo', data=mh)
f.close()
else:
f = h5py.File(f_hdf5, 'r')
xyz = f['xyz'].value
vxyz = f['vxyz'].value
mh = f['mhalo'].value
Nhalo = xyz.shape[0]
print('%i halos in %.f Mpc/h box with Mh > %f' % (len(mh), Lbox, mh_lim))
nhalo = float(Nhalo) / Lbox**3
print('number density = %f' % nhalo)
print('1/nbar = %f' % (1./nhalo))
# calculate powerspectrum
if not os.path.isfile(f_pell):
delta = pySpec.FFTperiodic(xyz.T, fft='fortran', Lbox=Lbox, Ngrid=360, silent=False)
delta_fft = pySpec.reflect_delta(delta, Ngrid=360)
# calculate powerspectrum monopole
k, p0k, cnts = pySpec.Pk_periodic(delta_fft)
k *= kf
p0k = p0k/(kf**3) - 1./nhalo
# save to file
hdr = ('pySpectrum P_l=0(k). Nhalo=%i, Lbox=%.f, k_f=%.5e, SN=%.5e' % (Nhalo, Lbox, kf, 1./nhalo))
hdr += '\n k, p0k, counts'
np.savetxt(f_pell, np.array([k, p0k, cnts]).T, fmt='%.5e %.5e %.5e', delimiter='\t', header=hdr)
else:
k, p0k, cnts = np.loadtxt(f_pell, skiprows=1, unpack=True, usecols=[0,1,2])
# calculate P(k) using nbodykit for santiy check
if not os.path.isfile(f_pnkt):
cosmo = NBlab.cosmology.Planck15
halo_data = {}
halo_data['Position'] = xyz
halo_data['Velocity'] = vxyz
halo_data['Mass'] = mh
print("putting it into array catalog")
halos = NBlab.ArrayCatalog(halo_data, BoxSize=np.array([Lbox, Lbox, Lbox]))
print("putting it into halo catalog")
halos = NBlab.HaloCatalog(halos, cosmo=cosmo, redshift=z, mdef='vir')
print("putting it into mesh")
mesh = halos.to_mesh(window='tsc', Nmesh=360, compensated=True, position='Position')
print("calculating powerspectrum" )
r = NBlab.FFTPower(mesh, mode='1d', dk=kf, kmin=kf, poles=[0,2,4])
poles = r.poles
plk = {'k': poles['k']}
for ell in [0, 2, 4]:
P = (poles['power_%d' % ell].real)
if ell == 0:
P = P - poles.attrs['shotnoise'] # subtract shotnoise from monopole
plk['p%dk' % ell] = P
plk['shotnoise'] = poles.attrs['shotnoise'] # save shot noise term
# header
hdr = ('pySpectrum P_l(k). Nhalo=%i, Lbox=%.f, k_f=%.5e, SN=%.5e' % (Nhalo, Lbox, kf, plk['shotnoise']))
hdr += '\n k, p0k, p2k, p4k'
# save to file
np.savetxt(f_pnkt, np.array([plk['k'], plk['p0k'], plk['p2k'], plk['p4k']]).T, header=hdr)
else:
_k, _p0k, _p2k, _p4k = np.loadtxt(f_pnkt, skiprows=1, unpack=True, usecols=[0,1,2,3])
plk = {}
plk['k'] = _k
plk['p0k'] = _p0k
plk['p2k'] = _p2k
plk['p4k'] = _p4k
# calculate bispectrum
if not os.path.isfile(f_b123):
# calculate bispectrum
bispec = pySpec.Bk_periodic(xyz.T, Lbox=Lbox, Ngrid=360, Nmax=40, Ncut=3, step=3, fft='pyfftw', nthreads=1, silent=False)
i_k = bispec['i_k1']
j_k = bispec['i_k2']
l_k = bispec['i_k3']
p0k1 = bispec['p0k1']
p0k2 = bispec['p0k2']
p0k3 = bispec['p0k3']
b123 = bispec['b123']
b123_sn = bispec['b123_sn']
q123 = bispec['q123']
counts = bispec['counts']
# save to file
hdr = 'pyspectrum bispectrum calculation test. k_f = 2pi/%.1f' % Lbox
hdr += '\n i_k1, i_k2, i_k3, p0k1, p0k2, p0k3, bk, qk, counts, bk_shotnoise'
np.savetxt(f_b123,
np.array([i_k, j_k, l_k, p0k1, p0k2, p0k3, b123, q123, counts, b123_sn]).T,
fmt='%i %i %i %.5e %.5e %.5e %.5e %.5e %.5e %.5e',
delimiter='\t', header=hdr)
else:
i_k, j_k, l_k, p0k1, p0k2, p0k3, b123, q123, counts, b123_sn = np.loadtxt(f_b123,
skiprows=1, unpack=True, usecols=range(10))
# plot powerspecrtrum shape triangle plot
fig = plt.figure(figsize=(5,5))
sub = fig.add_subplot(111)
sub.plot(k, p0k, c='k', lw=1, label='pySpectrum')
sub.plot(plk['k'], plk['p0k'], c='C1', lw=1, label='nbodykit')
iksort = np.argsort(i_k)
sub.plot(i_k[iksort] * kf, p0k1[iksort], c='k', lw=1, ls='--', label='bispectrum code')
sub.legend(loc='lower left', fontsize=20)
sub.set_ylabel('$P_0(k)$', fontsize=25)
sub.set_ylim([1e0, 1e4])
sub.set_yscale('log')
sub.set_xlabel('$k$', fontsize=25)
sub.set_xlim([1e-2, 10.])
sub.set_xscale('log')
fig.savefig(f_pell.replace('.dat', '.png'), bbox_inches='tight')
# plot bispectrum shape triangle plot
nbin = 31
x_bins = np.linspace(0., 1., nbin+1)
y_bins = np.linspace(0.5, 1., (nbin//2) + 1)
fig = plt.figure(figsize=(10,5))
sub = fig.add_subplot(111)
Bgrid = Plots._BorQgrid(l_k.astype(float)/i_k.astype(float), j_k.astype(float)/i_k.astype(float), q123, counts, x_bins, y_bins)
bplot = plt.pcolormesh(x_bins, y_bins, Bgrid.T, vmin=0, vmax=1, cmap='RdBu')
cbar = plt.colorbar(bplot, orientation='vertical')
sub.set_title(r'$Q(k_1, k_2, k_3)$ FastPM halo catalog', fontsize=25)
sub.set_xlabel('$k_3/k_1$', fontsize=25)
sub.set_ylabel('$k_2/k_1$', fontsize=25)
fig.savefig(f_b123.replace('.dat', '.Qk_shape.png'), bbox_inches='tight')
fig = plt.figure(figsize=(10,5))
sub = fig.add_subplot(111)
Bgrid = Plots._BorQgrid(l_k.astype(float)/i_k.astype(float), j_k.astype(float)/i_k.astype(float), b123, counts, x_bins, y_bins)
bplot = plt.pcolormesh(x_bins, y_bins, Bgrid.T, norm=LogNorm(vmin=1e6, vmax=1e8), cmap='RdBu')
cbar = plt.colorbar(bplot, orientation='vertical')
sub.set_title(r'$B(k_1, k_2, k_3)$ FastPM halo catalog', fontsize=25)
sub.set_xlabel('$k_3/k_1$', fontsize=25)
sub.set_ylabel('$k_2/k_1$', fontsize=25)
fig.savefig(f_b123.replace('.dat', '.Bk_shape.png'), bbox_inches='tight')
# plot bispectrum amplitude
fig = plt.figure(figsize=(10,5))
sub = fig.add_subplot(111)
sub.scatter(range(len(b123)), q123, c='k', s=1)
sub.set_xlabel(r'$k_1 > k_2 > k_3$ triangle index', fontsize=25)
sub.set_xlim([0, len(b123)])
sub.set_ylabel(r'$Q(k_1, k_2, k_3)$', fontsize=25)
sub.set_ylim([0., 1.])
fig.savefig(f_b123.replace('.dat', '.Qk.png'), bbox_inches='tight')
fig = plt.figure(figsize=(10,5))
sub = fig.add_subplot(111)
sub.scatter(range(len(b123)), b123, c='k', s=1)
sub.set_xlabel(r'$k_1 > k_2 > k_3$ triangle index', fontsize=25)
sub.set_xlim([0, len(b123)])
sub.set_ylabel(r'$B(k_1, k_2, k_3)$', fontsize=25)
sub.set_yscale('log')
fig.savefig(f_b123.replace('.dat', '.Bk.png'), bbox_inches='tight')
return None
def TNG(z, mh_lim=15., Lbox=205., Nmax=40, Ncut=3, step=3):
''' calculate the powerspectrum and bispectrum of the fastPM catalog.
'''
dir_fpm = os.path.join(UT.dat_dir(), 'fastpm')
f_halo = ('halocat_TNG300Dark_z%.2f.txt' % z)
f_mlim = ('halocat_TNG300Dark_z%.2f.mlim%.fe10' % (z, mh_lim))
f_hdf5 = ('%s/%s.hdf5' % (dir_fpm, f_mlim))
f_pell = ('%s/pySpec.Plk.%s.Lbox%.f.Ngrid360.dat' % (dir_fpm, f_mlim, Lbox))
f_pnkt = ('%s/pySpec.Plk.%s.Lbox%.f.Ngrid360.nbodykit.dat' % (dir_fpm, f_mlim, Lbox))
f_b123 = ('%s/pySpec.Bk.%s.Lbox%.f.Ngrid360.step%i.Ncut%i.Nmax%i.dat' % (dir_fpm, f_mlim, Lbox, step, Ncut, Nmax))
kf = 2.*np.pi/Lbox
if not os.path.isfile(f_hdf5):
# read in halo catalog
dat_halo = np.loadtxt(os.path.join(dir_fpm, f_halo), unpack=True, usecols=[0,1,2,3,7,8,9])
mh = dat_halo[0]
Nhalo = len(mh)
print('%i halos in %.f Mpc/h box' % (Nhalo, Lbox))
print('%f < M_h/10^10Msun < %f' % (mh.min(), mh.max()))
xyz = np.zeros((Nhalo,3))
xyz[:,0] = dat_halo[1]
xyz[:,1] = dat_halo[2]
xyz[:,2] = dat_halo[3]
vxyz = np.zeros((Nhalo,3))
vxyz[:,0] = dat_halo[4]
vxyz[:,1] = dat_halo[5]
vxyz[:,2] = dat_halo[6]
mlim = (mh > 15.)
Nhalo = np.sum(mlim)
print('%i halos in %.f Mpc/h box with Mh > %f' % (Nhalo, Lbox, mh_lim))
mh = mh[mlim]
xyz = xyz[mlim,:]
vxyz = vxyz[mlim,:]
f = h5py.File(f_hdf5, 'w')
f.create_dataset('xyz', data=xyz)
f.create_dataset('vxyz', data=vxyz)
f.create_dataset('mhalo', data=mh)
f.close()
else:
f = h5py.File(f_hdf5, 'r')
xyz = f['xyz'].value
vxyz = f['vxyz'].value
mh = f['mhalo'].value
Nhalo = xyz.shape[0]
print('%i halos in %.f Mpc/h box with Mh > %f' % (len(mh), Lbox, mh_lim))
nhalo = float(Nhalo) / Lbox**3
print('number density = %f' % nhalo)
print('1/nbar = %f' % (1./nhalo))
# calculate powerspectrum
if not os.path.isfile(f_pell):
delta = pySpec.FFTperiodic(xyz.T, fft='fortran', Lbox=Lbox, Ngrid=360, silent=False)
delta_fft = pySpec.reflect_delta(delta, Ngrid=360)
# calculate powerspectrum monopole
k, p0k, cnts = pySpec.Pk_periodic(delta_fft)
k *= kf
p0k = p0k/(kf**3) - 1./nhalo
# save to file
hdr = ('pySpectrum P_l=0(k). Nhalo=%i, Lbox=%.f, k_f=%.5e, SN=%.5e' % (Nhalo, Lbox, kf, 1./nhalo))
hdr += '\n k, p0k, counts'
np.savetxt(f_pell, np.array([k, p0k, cnts]).T, fmt='%.5e %.5e %.5e', delimiter='\t', header=hdr)
else:
k, p0k, cnts = np.loadtxt(f_pell, skiprows=1, unpack=True, usecols=[0,1,2])
# calculate P(k) using nbodykit for santiy check
if not os.path.isfile(f_pnkt):
cosmo = NBlab.cosmology.Planck15
halo_data = {}
halo_data['Position'] = xyz
halo_data['Velocity'] = vxyz
halo_data['Mass'] = mh
print("putting it into array catalog")
halos = NBlab.ArrayCatalog(halo_data, BoxSize=np.array([Lbox, Lbox, Lbox]))
print("putting it into halo catalog")
halos = NBlab.HaloCatalog(halos, cosmo=cosmo, redshift=z, mdef='vir')
print("putting it into mesh")
mesh = halos.to_mesh(window='tsc', Nmesh=360, compensated=True, position='Position')
print("calculating powerspectrum" )
r = NBlab.FFTPower(mesh, mode='1d', dk=kf, kmin=kf, poles=[0,2,4])
poles = r.poles
plk = {'k': poles['k']}
for ell in [0, 2, 4]:
P = (poles['power_%d' % ell].real)
if ell == 0:
P = P - poles.attrs['shotnoise'] # subtract shotnoise from monopole
plk['p%dk' % ell] = P
plk['shotnoise'] = poles.attrs['shotnoise'] # save shot noise term
# header
hdr = ('pySpectrum P_l(k). Nhalo=%i, Lbox=%.f, k_f=%.5e, SN=%.5e' % (Nhalo, Lbox, kf, plk['shotnoise']))
hdr += '\n k, p0k, p2k, p4k'
# save to file
np.savetxt(f_pnkt, np.array([plk['k'], plk['p0k'], plk['p2k'], plk['p4k']]).T, header=hdr)
else:
_k, _p0k, _p2k, _p4k = np.loadtxt(f_pnkt, skiprows=1, unpack=True, usecols=[0,1,2,3])
plk = {}
plk['k'] = _k
plk['p0k'] = _p0k
plk['p2k'] = _p2k
plk['p4k'] = _p4k
# calculate bispectrum
if not os.path.isfile(f_b123):
# calculate bispectrum
bispec = pySpec.Bk_periodic(xyz.T, Lbox=Lbox, Ngrid=360, Nmax=40, Ncut=3, step=3, fft='pyfftw', nthreads=1, silent=False)
i_k = bispec['i_k1']
j_k = bispec['i_k2']
l_k = bispec['i_k3']
p0k1 = bispec['p0k1']
p0k2 = bispec['p0k2']
p0k3 = bispec['p0k3']
b123 = bispec['b123']
b123_sn = bispec['b123_sn']
q123 = bispec['q123']
counts = bispec['counts']
# save to file
hdr = 'pyspectrum bispectrum calculation test. k_f = 2pi/%.1f' % Lbox
hdr += '\n i_k1, i_k2, i_k3, p0k1, p0k2, p0k3, bk, qk, counts, bk_shotnoise'
np.savetxt(f_b123,
np.array([i_k, j_k, l_k, p0k1, p0k2, p0k3, b123, q123, counts, b123_sn]).T,
fmt='%i %i %i %.5e %.5e %.5e %.5e %.5e %.5e %.5e',
delimiter='\t', header=hdr)
else:
i_k, j_k, l_k, p0k1, p0k2, p0k3, b123, q123, counts, b123_sn = np.loadtxt(f_b123,
skiprows=1, unpack=True, usecols=range(10))
# plot powerspecrtrum shape triangle plot
fig = plt.figure(figsize=(5,5))
sub = fig.add_subplot(111)
sub.plot(k, p0k, c='k', lw=1, label='pySpectrum')
sub.plot(plk['k'], plk['p0k'], c='C1', lw=1, label='nbodykit')
iksort = np.argsort(i_k)
sub.plot(i_k[iksort] * kf, p0k1[iksort], c='k', lw=1, ls='--', label='bispectrum code')
sub.legend(loc='lower left', fontsize=20)
sub.set_ylabel('$P_0(k)$', fontsize=25)
#sub.set_ylim([1e2, 3e4])
sub.set_yscale('log')
sub.set_xlabel('$k$', fontsize=25)
sub.set_xlim([1e-2, 10.])
sub.set_xscale('log')
fig.savefig(f_pell.replace('.dat', '.png'), bbox_inches='tight')
# plot bispectrum shape triangle plot
nbin = 31
x_bins = np.linspace(0., 1., nbin+1)
y_bins = np.linspace(0.5, 1., (nbin//2) + 1)
fig = plt.figure(figsize=(10,5))
sub = fig.add_subplot(111)
Bgrid = Plots._BorQgrid(l_k.astype(float)/i_k.astype(float), j_k.astype(float)/i_k.astype(float), q123, counts, x_bins, y_bins)
bplot = plt.pcolormesh(x_bins, y_bins, Bgrid.T, vmin=0, vmax=1, cmap='RdBu')
cbar = plt.colorbar(bplot, orientation='vertical')
sub.set_title(r'$Q(k_1, k_2, k_3)$ Illustris TNG halo catalog', fontsize=25)
sub.set_xlabel('$k_3/k_1$', fontsize=25)
sub.set_ylabel('$k_2/k_1$', fontsize=25)
fig.savefig(f_b123.replace('.dat', '.Qk_shape.png'), bbox_inches='tight')
fig = plt.figure(figsize=(10,5))
sub = fig.add_subplot(111)
Bgrid = Plots._BorQgrid(l_k.astype(float)/i_k.astype(float), j_k.astype(float)/i_k.astype(float), b123, counts, x_bins, y_bins)
bplot = plt.pcolormesh(x_bins, y_bins, Bgrid.T, norm=LogNorm(vmin=1e6, vmax=1e8), cmap='RdBu')
cbar = plt.colorbar(bplot, orientation='vertical')
sub.set_title(r'$B(k_1, k_2, k_3)$ Illustris TNG halo catalog', fontsize=25)
sub.set_xlabel('$k_3/k_1$', fontsize=25)
sub.set_ylabel('$k_2/k_1$', fontsize=25)
fig.savefig(f_b123.replace('.dat', '.Bk_shape.png'), bbox_inches='tight')
# plot bispectrum amplitude
fig = plt.figure(figsize=(10,5))
sub = fig.add_subplot(111)
sub.scatter(range(len(b123)), q123, c='k', s=1)
sub.set_xlabel(r'$k_1 > k_2 > k_3$ triangle index', fontsize=25)
sub.set_xlim([0, len(b123)])
sub.set_ylabel(r'$Q(k_1, k_2, k_3)$', fontsize=25)
sub.set_ylim([0., 1.])
fig.savefig(f_b123.replace('.dat', '.Qk.png'), bbox_inches='tight')
fig = plt.figure(figsize=(10,5))
sub = fig.add_subplot(111)
sub.scatter(range(len(b123)), b123, c='k', s=1)
sub.set_xlabel(r'$k_1 > k_2 > k_3$ triangle index', fontsize=25)
sub.set_xlim([0, len(b123)])
sub.set_ylabel(r'$B(k_1, k_2, k_3)$', fontsize=25)
sub.set_yscale('log')
fig.savefig(f_b123.replace('.dat', '.Bk.png'), bbox_inches='tight')
return None
def fastpm_v_tng(z, str_flag='', mh_lim=15., Lbox=205., Nmax=40, Ncut=3, step=3):
''' make plots that compare the powerspectrum and bispectrum of
fastpm and illustris tng
'''
dir_fpm = os.path.join(UT.dat_dir(), 'fastpm')
f_fpm = ('halocat_FastPM_40step_N250_IC500_B2_z%.2f%s.mlim%.fe10' % (z, str_flag, mh_lim))
f_tng = ('halocat_TNG300Dark_z%.2f.mlim%.fe10' % (z, mh_lim))
print('FastPM file: %s' % f_fpm)
print('Illustris TNG file: %s' % f_tng)
f_pell = lambda f_halo: ('%s/pySpec.Plk.%s.Lbox%.f.Ngrid360.dat' % (dir_fpm, f_halo, Lbox))
f_pnkt = lambda f_halo: ('%s/pySpec.Plk.%s.Lbox%.f.Ngrid360.nbodykit.dat' % (dir_fpm, f_halo, Lbox))
f_b123 = lambda f_halo: ('%s/pySpec.Bk.%s.Lbox%.f.Ngrid360.step%i.Ncut%i.Nmax%i.dat' % (dir_fpm, f_halo, Lbox, step, Ncut, Nmax))
kf = 2.*np.pi/Lbox
# P(k) comparison
fig = plt.figure(figsize=(5,5))
sub = fig.add_subplot(111)
k, p0k = np.loadtxt(f_pell(f_fpm), unpack=True, skiprows=1, usecols=[0,1])
sub.plot(k, p0k, c='k', lw=1, label='FastPM')
k, p0k_fpm = np.loadtxt(f_pnkt(f_fpm), unpack=True, skiprows=1, usecols=[0,1])
sub.plot(k, p0k_fpm, c='k', ls='--', lw=1)
k, p0k = np.loadtxt(f_pell(f_tng), unpack=True, skiprows=1, usecols=[0,1])
sub.plot(k, p0k, c='C1', lw=1, label='Illustris TNG')
k, p0k_tng = np.loadtxt(f_pnkt(f_tng), unpack=True, skiprows=1, usecols=[0,1])
sub.plot(k, p0k_tng, c='C1', ls='--', lw=1)
print (p0k_fpm / p0k_tng)[k < 0.2] - 1.
sub.legend(loc='lower left', fontsize=20)
sub.set_ylabel('$P_0(k)$', fontsize=25)
sub.set_yscale('log')
sub.set_ylim([1e0, 1e4])
sub.set_xlabel('$k$', fontsize=25)
sub.set_xscale('log')
sub.set_xlim([1e-2, 10.])
fig.savefig(os.path.join(dir_fpm, 'p0k_fpm_tng_z%.2f%s.png' % (z, str_flag)), bbox_inches='tight')
fig = plt.figure(figsize=(10,5))
sub = fig.add_subplot(111)
for fh, lbl, c in zip([f_fpm, f_tng], ['FastPM', 'Illustris TNG'], ['k', 'C1']):
i_k, j_k, l_k, p0k1, p0k2, p0k3, b123, q123, counts, b123_sn = np.loadtxt(f_b123(fh),
skiprows=1, unpack=True, usecols=range(10))
klim = ((i_k * kf <= 1.) & (i_k * kf >= 0.01) &
(j_k * kf <= 1.) & (j_k * kf >= 0.01) &
(l_k * kf <= 1.) & (l_k * kf >= 0.01))
i_k, j_k, l_k = i_k[klim], j_k[klim], l_k[klim]
ijl = UT.ijl_order(i_k, j_k, l_k, typ='GM') # order of triangles
sub.scatter(range(np.sum(klim)), b123[klim][ijl], c=c, s=5, label=lbl)
sub.plot(range(np.sum(klim)), b123[klim][ijl], c=c)
sub.legend(loc='upper right', markerscale=4, handletextpad=0., fontsize=20)
sub.set_ylabel('$B(k_1, k_2, k_3)$', fontsize=25)
sub.set_yscale('log')
sub.set_ylim([1e3, 5e6])
sub.set_xlabel(r'$k_1 \le k_2 \le k_3$ triangle indices', fontsize=25)
sub.set_xlim([0, np.sum(klim)])
fig.savefig(os.path.join(dir_fpm, 'bk_fpm_tng_z%.2f%s.png' % (z, str_flag)), bbox_inches='tight')
fig = plt.figure(figsize=(10,5))
sub = fig.add_subplot(111)
for fh, lbl, c in zip([f_fpm, f_tng], ['FastPM', 'Illustris TNG'], ['k', 'C1']):
i_k, j_k, l_k, p0k1, p0k2, p0k3, b123, q123, counts, b123_sn = np.loadtxt(f_b123(fh),
skiprows=1, unpack=True, usecols=range(10))
klim = ((i_k *kf >= 0.01) & (j_k *kf >= 0.01) & (l_k *kf >= 0.01))
i_k, j_k, l_k = i_k[klim], j_k[klim], l_k[klim]
ijl = UT.ijl_order(i_k, j_k, l_k, typ='GM') # order of triangles
sub.scatter(range(np.sum(klim)), q123[klim][ijl], c=c, s=5, label=lbl)
sub.plot(range(np.sum(klim)), q123[klim][ijl], c=c)
sub.legend(loc='upper right', markerscale=4, handletextpad=0., fontsize=20)
sub.set_ylabel('$Q(k_1, k_2, k_3)$', fontsize=25)
sub.set_ylim([0., 1.])
sub.set_xlabel(r'$k_1 \le k_2 \le k_3$ triangle indices', fontsize=25)
sub.set_xlim([0, np.sum(klim)])
fig.savefig(os.path.join(dir_fpm, 'qk_fpm_tng_z%.2f%s.png' % (z, str_flag)), bbox_inches='tight')
return None
if __name__=="__main__":
for z in [0., 0.52, 1.04, 2.]:
#fastPM(z)
#fastPM(z, str_flag='_calibratebias')
#TNG(z)
#fastpm_v_tng(z, Lbox=205., Nmax=40, Ncut=3, step=3)
fastpm_v_tng(z, str_flag='_calibratebias', Lbox=205., Nmax=40, Ncut=3, step=3)
|
changhoonhahnREPO_NAMEpySpectrumPATH_START.@pySpectrum_extracted@pySpectrum-master@run@fastpm@fastpm.py@.PATH_END.py
|
{
"filename": "short_uvmap.py",
"repo_name": "micbia/serenet",
"repo_path": "serenet_extracted/serenet-main/utils_data/short_uvmap.py",
"type": "Python"
}
|
import numpy as np, pickle
uvs = pickle.load(open('uvmap_128_z7-20.pkl', 'rb'))
redshift = np.arange(7,11.5005,0.001)
uvs_short = {}
for i in range(redshift.size+1):
if(i < redshift.size):
z = redshift[i]
uvs_short['%.3f' %z] = uvs['%.3f' %z]
else:
uvs_short['Nant'] = uvs['Nant']
pickle.dump(uvs, open('uvmap_128_z%d-%d.pkl' %(redshift.min(), redshift.max()), 'wb'))
|
micbiaREPO_NAMEserenetPATH_START.@serenet_extracted@serenet-main@utils_data@short_uvmap.py@.PATH_END.py
|
{
"filename": "base_optimizer.py",
"repo_name": "fchollet/keras",
"repo_path": "keras_extracted/keras-master/keras/src/optimizers/base_optimizer.py",
"type": "Python"
}
|
import re
import warnings
from keras.src import backend
from keras.src import initializers
from keras.src import ops
from keras.src.optimizers.schedules import learning_rate_schedule
from keras.src.saving import serialization_lib
from keras.src.saving.keras_saveable import KerasSaveable
from keras.src.utils import tracking
from keras.src.utils.naming import auto_name
class BaseOptimizer(KerasSaveable):
"""Abstract optimizer base class.
If you intend to create your own optimization algorithm, please inherit from
this class and override the following methods:
- `build`: Create your optimizer-related variables, such as momentum
variables in the SGD optimizer.
- `update_step`: Implement your optimizer's variable updating logic.
- `get_config`: serialization of the optimizer.
Example:
```python
class SGD(Optimizer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.momentum = 0.9
def build(self, variables):
super().build(variables)
self.momentums = []
for variable in variables:
self.momentums.append(
self.add_variable_from_reference(
reference_variable=variable, name="momentum"
)
)
def update_step(self, gradient, variable, learning_rate):
learning_rate = ops.cast(learning_rate, variable.dtype)
gradient = ops.cast(gradient, variable.dtype)
m = self.momentums[self._get_variable_index(variable)]
self.assign(
m,
ops.subtract(
ops.multiply(m, ops.cast(self.momentum, variable.dtype)),
ops.multiply(gradient, learning_rate),
),
)
self.assign_add(variable, m)
def get_config(self):
config = super().get_config()
config.update(
{
"momentum": self.momentum,
"nesterov": self.nesterov,
}
)
return config
```
"""
def __init__(
self,
learning_rate,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
loss_scale_factor=None,
gradient_accumulation_steps=None,
name=None,
**kwargs,
):
self._lock = False
if kwargs.pop("decay", None) is not None:
warnings.warn(
"Argument `decay` is no longer supported and will be ignored."
)
if kwargs:
raise ValueError(f"Argument(s) not recognized: {kwargs}")
if name is None:
name = auto_name(self.__class__.__name__)
self.name = name
self.weight_decay = weight_decay
self.clipnorm = clipnorm
self.global_clipnorm = global_clipnorm
self.clipvalue = clipvalue
self.use_ema = use_ema
self.loss_scale_factor = loss_scale_factor
self.gradient_accumulation_steps = gradient_accumulation_steps
if gradient_accumulation_steps:
if not gradient_accumulation_steps >= 2:
raise ValueError(
"`gradient_accumulation_steps` must be an integer >= 2. "
"Received: gradient_accumulation_steps="
f"{gradient_accumulation_steps}"
)
if use_ema:
# Verify the arguments related to EMA.
if ema_momentum > 1 or ema_momentum < 0:
raise ValueError(
"`ema_momentum` must be in the range [0, 1]. "
f"Received: ema_momentum={ema_momentum}"
)
if ema_overwrite_frequency and (
not isinstance(ema_overwrite_frequency, int)
or ema_overwrite_frequency < 1
):
raise ValueError(
"`ema_overwrite_frequency` must be an integer >= 1 or "
"None. Received: ema_overwrite_frequency="
f"{ema_overwrite_frequency}"
)
self.ema_momentum = ema_momentum
self.ema_overwrite_frequency = ema_overwrite_frequency
clip_args_sum = sum(
a is not None for a in [clipnorm, clipvalue, global_clipnorm]
)
if clip_args_sum > 1:
raise ValueError(
"Only one of `clipnorm`, `clipvalue` and `global_clipnorm` can "
f"be set. Received: clipnorm={clipnorm}, "
f"clipvalue={clipvalue}, global_clipnorm={global_clipnorm}"
)
self.built = False
# Set up variable tracking.
self._variables = []
self._trainable_variables = []
self._tracker = tracking.Tracker(
{
"variables": (
lambda x: isinstance(x, backend.Variable),
self._variables,
),
}
)
self._trainable_variables_indices = {}
# Create iteration variable
# Note: dtype="int" will resolve to int32 in JAX
# (since int64 is disallowed in JAX) and to int64 in TF.
with backend.name_scope(self.name, caller=self):
iterations = backend.Variable(
0,
name="iteration",
dtype="int",
trainable=False,
aggregation="only_first_replica",
)
self._track_variable(iterations)
self._iterations = iterations
# Create learning rate (schedule or variable)
if isinstance(
learning_rate, learning_rate_schedule.LearningRateSchedule
):
self._learning_rate = learning_rate
elif callable(learning_rate):
self._learning_rate = learning_rate
else:
if not isinstance(learning_rate, float):
raise ValueError(
"Argument `learning_rate` should be float, or an instance "
"of LearningRateSchedule, or a callable "
"(that takes in the current iteration value "
"and returns the corresponding learning rate value). "
f"Received instead: learning_rate={learning_rate}"
)
with backend.name_scope(self.name, caller=self):
learning_rate = backend.Variable(
learning_rate,
name="learning_rate",
dtype=backend.floatx(),
trainable=False,
aggregation="only_first_replica",
)
self._track_variable(learning_rate)
self._learning_rate = learning_rate
@property
def iterations(self):
if self.gradient_accumulation_steps:
return ops.floor_divide(
self._iterations, self.gradient_accumulation_steps
)
return self._iterations
def _track_variable(self, variable):
self._tracker.add_to_store("variables", variable)
@tracking.no_automatic_dependency_tracking
def build(self, variables):
if self.use_ema:
self._model_variables_moving_average = []
if self.gradient_accumulation_steps:
self._accumulated_gradients = []
for i, variable in enumerate(variables):
self._trainable_variables_indices[self._var_key(variable)] = i
if self.use_ema:
self._model_variables_moving_average.append(
self.add_variable_from_reference(
variable,
name="average",
)
)
if self.gradient_accumulation_steps:
self._accumulated_gradients.append(
self.add_variable_from_reference(
variable,
name="gradient_accumulator",
)
)
self._trainable_variables = variables[:]
self.built = True
def _var_key(self, variable):
# Helper function to get a stable ID and the variable instance mapping.
return id(variable)
@property
def variables(self):
return self._variables[:]
def _get_variable_index(self, variable):
return self._trainable_variables_indices[self._var_key(variable)]
def add_variable(
self,
shape,
initializer="zeros",
dtype=None,
aggregation="mean",
name=None,
):
self._check_super_called()
initializer = initializers.get(initializer)
with backend.name_scope(self.name, caller=self):
variable = backend.Variable(
initializer=initializer,
shape=shape,
dtype=dtype,
trainable=False,
aggregation=aggregation,
name=name,
)
self._track_variable(variable)
return variable
def add_variable_from_reference(
self, reference_variable, name=None, initializer="zeros"
):
"""Add an all-zeros variable with the shape and dtype of a reference
variable.
"""
name = name or "var"
if hasattr(reference_variable, "path"):
name = reference_variable.path.replace("/", "_") + "_" + name
else:
name = (
str(reference_variable.name).replace("/", "_").replace(":", "_")
+ "_"
+ name
)
return self.add_variable(
shape=reference_variable.shape,
initializer=initializer,
dtype=reference_variable.dtype,
name=name,
)
def _check_variables_are_known(self, variables):
for v in variables:
if self._var_key(v) not in self._trainable_variables_indices:
raise ValueError(
f"Unknown variable: {v}. This optimizer can only "
"be called for the variables it was originally built with. "
"When working with a new set of variables, you should "
"recreate a new optimizer instance."
)
def assign(self, variable, value):
"""Assign a value to a variable.
This should be used in optimizers instead of `variable.assign(value)` to
support backend specific optimizations.
Note that the variable can be a model variable or an optimizer variable;
it can be a backend native variable or a Keras variable.
Args:
variable: The variable to update.
value: The value to add to the variable.
"""
variable.assign(value)
def assign_add(self, variable, value):
"""Add a value to a variable.
This should be used in optimizers instead of
`variable.assign_add(value)` to support backend specific optimizations.
Note that the variable can be a model variable or an optimizer variable;
it can be a backend native variable or a Keras variable.
Args:
variable: The variable to update.
value: The value to add to the variable.
"""
variable.assign_add(value)
def assign_sub(self, variable, value):
"""Subtract a value from a variable.
This should be used in optimizers instead of
`variable.assign_sub(value)` to support backend specific optimizations.
Note that the variable can be a model variable or an optimizer variable;
it can be a backend native variable or a Keras variable.
Args:
variable: The variable to update.
value: The value to add to the variable.
"""
variable.assign_sub(value)
def update_step(self, gradient, variable, learning_rate):
raise NotImplementedError
def apply_gradients(self, grads_and_vars):
grads, trainable_variables = zip(*grads_and_vars)
self.apply(grads, trainable_variables)
# Return iterations for compat with tf.keras.
return self._iterations
def apply(self, grads, trainable_variables=None):
"""Update traininable variables according to provided gradient values.
`grads` should be a list of gradient tensors
with 1:1 mapping to the list of variables the optimizer was built with.
`trainable_variables` can be provided
on the first call to build the optimizer.
"""
if len(grads) == 0:
# It is possible that the grad is empty. In this case,
# `apply_gradients` is a no-op.
return
if trainable_variables is None:
if not self.built:
raise ValueError(
"When passing `grads` without `variables`, the optimizer "
"must already be built on a list of variables. "
"Call `optimizer.build(trainable_variables)` first. "
)
if len(grads) != len(self._trainable_variables_indices):
raise ValueError(
"When passing `grads` as a list of gradient tensors, the "
f"gradients must match `optimizer.variables` one-to-on. "
f"Received a list of {len(grads)} gradients, but the "
f"optimizer is tracking {len(self._trainable_variables)} "
"trainable variables."
)
trainable_variables = self._trainable_variables
else:
trainable_variables = list(trainable_variables)
# Optionally build optimizer.
if not self.built:
with backend.name_scope(self.name, caller=self):
self.build(trainable_variables)
self.built = True
self._check_variables_are_known(trainable_variables)
with backend.name_scope(self.name, caller=self):
# Overwrite targeted variables directly with their gradients if
# their `overwrite_with_gradient` is set.
grads, trainable_variables = (
self._overwrite_variables_directly_with_gradients(
grads, trainable_variables
)
)
# Filter empty gradients.
grads, trainable_variables = self._filter_empty_gradients(
grads, trainable_variables
)
if len(list(grads)) == 0:
return
# Unscale gradients.
scale = self.loss_scale_factor
if scale is not None:
grads = [g if g is None else g / scale for g in grads]
# Apply gradient updates.
self._backend_apply_gradients(grads, trainable_variables)
# Apply variable constraints after applying gradients.
for variable in trainable_variables:
if variable.constraint is not None:
variable.assign(variable.constraint(variable))
def _backend_apply_gradients(self, grads, trainable_variables):
"""Apply method that can be overridden by different backends.
JAX overrides it in order to deal with statelessness in gradient
accumulation and EMA handling.
The below implementation is intended to be generally backend-agnostic,
but may not work with all backends.
This method does 4 things:
- Call the optimizer's update_step() to update trainable variables
and optimizer variables.
- Update EMA variables, if EMA is configured.
- Update gradient accumulators, if gradient accumulation is configured.
- Update the iteration counter.
"""
if self.gradient_accumulation_steps:
is_update_step = (
self._iterations + 1
) % self.gradient_accumulation_steps == 0
# `trainable_variables` might have been filtered in previous
# processing steps, so we need to ensure the correct mapping between
# `self._accumulated_gradients` and `trainable_variables`
acc_grads = [
self._accumulated_gradients[self._get_variable_index(v)]
for v in trainable_variables
]
def _update_step_fn(grads, trainable_variables):
# Run update step with accumulated grads + reset accumulators
steps = self.gradient_accumulation_steps
grads = [
(g + acc_g) / steps for g, acc_g in zip(grads, acc_grads)
]
# Apply clipping and weight decay.
grads = self._clip_gradients(grads)
self._apply_weight_decay(trainable_variables)
self._backend_update_step(
grads, trainable_variables, self.learning_rate
)
self._backend_reset_gradient_accumulators()
ops.cond(
is_update_step,
lambda: _update_step_fn(grads, trainable_variables),
lambda: self._backend_increment_gradient_accumulators(
grads, acc_grads
),
)
else:
# Apply clipping and weight decay.
grads = self._clip_gradients(grads)
self._apply_weight_decay(trainable_variables)
# Run update step.
self._backend_update_step(
grads, trainable_variables, self.learning_rate
)
if self.use_ema:
self._update_model_variables_moving_average(
self._trainable_variables
)
if self.ema_overwrite_frequency:
# Only when self.ema_overwrite_frequency is not None, we
# overwrite the model variables.
should_overwrite_model_vars = (
self.iterations + 1
) % self.ema_overwrite_frequency == 0
ops.cond(
should_overwrite_model_vars,
lambda: self._overwrite_model_variables_with_average_value(
self._trainable_variables
),
lambda: None,
)
# Update iteration counter.
self._iterations.assign_add(1)
def _backend_update_step(self, grads, trainable_variables, learning_rate):
"""Collective update_step that can be overridden by the backend.
It is overridden by torch for performance reasons, and
by TF to support tf.distribute.
"""
for grad, var in zip(grads, trainable_variables):
self.update_step(grad, var, learning_rate)
def _backend_reset_gradient_accumulators(self):
for g_acc in self._accumulated_gradients:
g_acc.assign(ops.zeros(g_acc.shape, dtype=g_acc.dtype))
def _backend_increment_gradient_accumulators(self, grads, acc_grads):
new_g_accs = [(g + acc_g) for g, acc_g in zip(grads, acc_grads)]
for n_g_acc, g_acc in zip(new_g_accs, acc_grads):
g_acc.assign(n_g_acc)
def stateless_apply(self, optimizer_variables, grads, trainable_variables):
self._check_super_called()
if not self.built:
raise ValueError(
f"To call `stateless_apply`, {self.__class__.__name__} "
"must be built (i.e. its variables must have been created). "
"You can build it via `optimizer.build(trainable_variables)`."
)
if len(optimizer_variables) != len(self.variables):
raise ValueError(
"Argument `optimizer_variables` must be a list of tensors "
f"corresponding 1:1 to {self.__class__.__name__}().variables. "
f"Received list with length {len(optimizer_variables)}, but "
f"expected {len(self.variables)} variables."
)
if len(trainable_variables) != len(self._trainable_variables):
raise ValueError(
"Argument `optimizer_variables` must be a list of tensors "
"corresponding 1:1 to the trainable variables list that "
"the optimizer was built with. Received "
f"len(trainable_variables) == {len(trainable_variables)} "
"whereas the optimizer was built with "
f"{len(self._trainable_variables)} variables."
)
# Gather variable mapping
mapping = list(
zip(self._trainable_variables, trainable_variables)
) + list(zip(self.variables, optimizer_variables))
# Call in stateless scope
with backend.StatelessScope(state_mapping=mapping) as scope:
self.apply(grads)
# Gather updated variables
trainable_variables = []
for v in self._trainable_variables:
new_v = scope.get_current_value(v)
if new_v is not None:
trainable_variables.append(new_v)
else:
trainable_variables.append(v)
optimizer_variables = []
for v in self.variables:
new_v = scope.get_current_value(v)
if new_v is not None:
optimizer_variables.append(new_v)
else:
optimizer_variables.append(v)
return trainable_variables, optimizer_variables
def scale_loss(self, loss):
"""Scale the loss before computing gradients.
Scales the loss before gradients are computed in a `train_step`. This
is primarily useful during mixed precision training to prevent numeric
underflow.
"""
if self.loss_scale_factor is not None:
return loss * self.loss_scale_factor
return loss
@property
def learning_rate(self):
return self._get_current_learning_rate()
@learning_rate.setter
def learning_rate(self, learning_rate):
if isinstance(self._learning_rate, backend.Variable):
prev_lr_var = self._learning_rate
else:
prev_lr_var = None
if isinstance(
learning_rate, learning_rate_schedule.LearningRateSchedule
):
self._learning_rate = learning_rate
elif callable(learning_rate):
self._learning_rate = learning_rate
else:
if isinstance(
self._learning_rate, learning_rate_schedule.LearningRateSchedule
):
raise TypeError(
"This optimizer was created with a `LearningRateSchedule`"
" object as its `learning_rate` constructor argument, "
"hence its learning rate is not settable. If you need the"
" learning rate to be settable, you should instantiate "
"the optimizer with a float `learning_rate` argument."
)
self._learning_rate.assign(learning_rate)
if prev_lr_var is not None and not isinstance(
self._learning_rate, backend.Variable
):
# Untrack learning rate variable
self._untrack_variable(prev_lr_var)
def set_weights(self, weights):
"""Set the weights of the optimizer."""
if not self.built:
raise ValueError(
"You are calling `set_weights()` on an optimizer that has not "
"yet been built. Please call "
"`optimizer.build(trainable_variables)` to create the "
"optimizer weights before calling `set_weights()`."
)
for variable, weight in zip(self._variables, weights):
if variable.shape != weight.shape:
raise ValueError(
f"Optimizer variable {self._var_key(variable)} has shape "
f"{str(variable.shape)} not compatible with provided "
f"weight shape {str(weight.shape)}."
)
variable.assign(weight)
def save_own_variables(self, store):
"""Get the state of this optimizer object."""
for i, variable in enumerate(self.variables):
store[str(i)] = variable.numpy()
def load_own_variables(self, store):
"""Set the state of this optimizer object."""
if len(store.keys()) != len(self.variables):
msg = (
f"Skipping variable loading for optimizer '{self.name}', "
f"because it has {len(self.variables)} variables whereas "
f"the saved optimizer has {len(store.keys())} variables. "
)
if len(self.variables) == 0:
msg += (
"This is likely because the optimizer has not been "
"called/built yet."
)
warnings.warn(msg, stacklevel=2)
return
for i, variable in enumerate(self.variables):
variable.assign(store[str(i)])
def _get_current_learning_rate(self):
if isinstance(
self._learning_rate, learning_rate_schedule.LearningRateSchedule
):
return self._learning_rate(self._iterations)
elif callable(self._learning_rate):
return self._learning_rate()
return self._learning_rate
def _overwrite_variables_directly_with_gradients(self, grads, vars):
"""Overwrite the variables directly by their gradients.
This method is designed for a special case where we want to overwrite
the variable directly with its computed gradient. For example, in float8
training, new `scale` and `amax_history` are computed as gradients, and
we want to overwrite them directly instead of following the typical
procedure such as gradient descent with a learning rate, gradient
clipping and weight decaying.
After the update, the processed pairs will be filtered out.
"""
# Shortcut for `tf.Variable` because it doesn't have a
# `overwrite_with_gradient` attr
if any(not hasattr(v, "overwrite_with_gradient") for v in vars):
return grads, vars
# Shallow copies
filtered_grads = list(grads)
filtered_vars = list(vars)
# Iterate from right to left for safe popping
for i in range(len(filtered_grads) - 1, -1, -1):
g, v = filtered_grads[i], filtered_vars[i]
if v.overwrite_with_gradient:
if self.gradient_accumulation_steps:
# Utilize a stateless manner for JAX compatibility
steps = self.gradient_accumulation_steps
is_update_step = (self._iterations + 1) % steps == 0
acc_g = self._accumulated_gradients[
self._get_variable_index(v)
]
# `ops.maximum` is utilized for gradient accumulation for
# `overwrite_with_gradient=True` variables
new_g_acc = ops.cond(
is_update_step,
lambda: ops.zeros(g.shape, dtype=g.dtype),
lambda: ops.maximum(g, acc_g),
)
new_g = ops.cond(
is_update_step,
lambda: ops.maximum(g, acc_g),
lambda: g,
)
new_v = ops.cond(
is_update_step, lambda: new_g, lambda: v.value
)
v.assign(new_v)
acc_g.assign(new_g_acc)
else:
v.assign(g)
filtered_grads.pop(i)
filtered_vars.pop(i)
return filtered_grads, filtered_vars
def _filter_empty_gradients(self, grads, vars):
filtered_grads = list(grads)
filtered_vars = list(vars)
missing_grad_vars = []
# Iterate from right to left for safe popping
for i in range(len(filtered_grads) - 1, -1, -1):
if filtered_grads[i] is None:
filtered_grads.pop(i)
v = filtered_vars.pop(i)
try:
missing_grad_vars.append(v.path)
except AttributeError:
# `tf.Variable` doesn't have `path` attr.
missing_grad_vars.append(v.name)
if not filtered_grads:
raise ValueError("No gradients provided for any variable.")
if missing_grad_vars:
warnings.warn(
"Gradients do not exist for variables "
f"{list(reversed(missing_grad_vars))} when minimizing the loss."
" If using `model.compile()`, did you forget to provide a "
"`loss` argument?"
)
return filtered_grads, filtered_vars
def _clip_gradients(self, grads):
if self.clipnorm and self.clipnorm > 0:
return [
self._clip_by_norm(g) if g is not None else g for g in grads
]
elif self.global_clipnorm and self.global_clipnorm > 0:
return clip_by_global_norm(grads, self.global_clipnorm)
elif self.clipvalue and self.clipvalue > 0:
v = self.clipvalue
return [ops.clip(g, -v, v) if g is not None else g for g in grads]
else:
return grads
def exclude_from_weight_decay(self, var_list=None, var_names=None):
"""Exclude variables from weight decay.
This method must be called before the optimizer's `build` method is
called. You can set specific variables to exclude out, or set a list of
strings as the anchor words, if any of which appear in a variable's
name, then the variable is excluded.
Args:
var_list: A list of `Variable`s to exclude from weight decay.
var_names: A list of strings. If any string in `var_names` appear
in the model variable's name, then this model variable is
excluded from weight decay. For example, `var_names=['bias']`
excludes all bias variables from weight decay.
"""
if hasattr(self, "_built") and self._built:
raise ValueError(
"`exclude_from_weight_decay()` can only be configured before "
"the optimizer is built."
)
# Use a `set` for the ids of `var_list` to speed up the searching
if var_list:
self._exclude_from_weight_decay = set(
self._var_key(variable) for variable in var_list
)
else:
self._exclude_from_weight_decay = set()
# Precompile the pattern for `var_names` to speed up the searching
if var_names and len(var_names) > 0:
self._exclude_from_weight_decay_pattern = re.compile(
"|".join(set(var_names))
)
else:
self._exclude_from_weight_decay_pattern = None
# Reset cache
self._exclude_from_weight_decay_cache = dict()
def _use_weight_decay(self, variable):
variable_id = self._var_key(variable)
# Immediately return the value if `variable_id` hits the cache
if not hasattr(self, "_exclude_from_weight_decay_cache"):
self._exclude_from_weight_decay_cache = dict()
if variable_id in self._exclude_from_weight_decay_cache:
return self._exclude_from_weight_decay_cache[variable_id]
# Determine whether the variable should apply weight decay or not
exclude_from_weight_decay = getattr(
self, "_exclude_from_weight_decay", set()
)
exclude_from_weight_decay_pattern = getattr(
self, "_exclude_from_weight_decay_pattern", None
)
if variable_id in exclude_from_weight_decay:
self._exclude_from_weight_decay_cache[variable_id] = False
return False
if exclude_from_weight_decay_pattern is not None:
if (
re.search(exclude_from_weight_decay_pattern, variable.name)
is not None
):
self._exclude_from_weight_decay_cache[variable_id] = False
return False
self._exclude_from_weight_decay_cache[variable_id] = True
return True
def _apply_weight_decay(self, variables):
if self.weight_decay is None:
return
for variable in variables:
if self._use_weight_decay(variable):
lr = ops.cast(self.learning_rate, variable.dtype)
wd = ops.cast(self.weight_decay, variable.dtype)
variable.assign(variable - variable * wd * lr)
def _check_super_called(self):
if not hasattr(self, "_lock"):
raise RuntimeError(
f"In optimizer '{self.__class__.__name__}', you forgot to call "
"`super().__init__()` as the first statement "
"in the `__init__()` method. "
"Go add it!"
)
def _update_model_variables_moving_average(self, trainable_variables):
"""Update the stored moving average using the latest value."""
if self.use_ema:
for var, average in zip(
trainable_variables, self._model_variables_moving_average
):
not_first_step = ops.not_equal(self.iterations, 0)
momentum = (
ops.cast(not_first_step, var.dtype) * self.ema_momentum
)
average.assign(momentum * average + (1 - momentum) * var)
def _overwrite_model_variables_with_average_value(
self, trainable_variables
):
"""Overwrite model variables with its moving average."""
if len(trainable_variables) != len(
self._model_variables_moving_average
):
raise ValueError(
f"The length of model variables ({len(trainable_variables)}) "
"to override does not match the length of model variables "
"stored in the optimizer "
f"({len(self._model_variables_moving_average)}). Please "
"check if the optimizer was called on your model."
)
for var, average_var in zip(
trainable_variables, self._model_variables_moving_average
):
var.assign(average_var)
def finalize_variable_values(self, var_list):
"""Set the final value of model's trainable variables.
Sometimes there are some extra steps before ending the variable updates,
such as overriding the model variables with its average value.
Args:
var_list: list of model variables.
"""
if self.use_ema:
# If the optimizer uses EMA, then when finalizing, we replace the
# model variable value with its moving average stored inside
# optimizer.
self._overwrite_model_variables_with_average_value(var_list)
def _obj_type(self):
return "Optimizer"
def get_config(self):
"""Returns the config of the optimizer.
An optimizer config is a Python dictionary (serializable)
containing the configuration of an optimizer.
The same optimizer can be reinstantiated later
(without any saved state) from this configuration.
Subclass optimizer should override this method to include other
hyperparameters.
Returns:
Python dictionary.
"""
if isinstance(
self._learning_rate, learning_rate_schedule.LearningRateSchedule
):
learning_rate = learning_rate_schedule.serialize(
self._learning_rate
)
elif isinstance(self._learning_rate, backend.Variable):
learning_rate = float(self._learning_rate.numpy())
elif ops.is_tensor(self._learning_rate):
learning_rate = float(self._learning_rate)
elif callable(self._learning_rate):
learning_rate = serialization_lib.serialize_keras_object(
self._learning_rate
)
else:
learning_rate = 0.5
config = {
"name": self.name,
"learning_rate": learning_rate,
"weight_decay": self.weight_decay,
"clipnorm": self.clipnorm,
"global_clipnorm": self.global_clipnorm,
"clipvalue": self.clipvalue,
"use_ema": self.use_ema,
"ema_momentum": self.ema_momentum,
"ema_overwrite_frequency": self.ema_overwrite_frequency,
"loss_scale_factor": self.loss_scale_factor,
"gradient_accumulation_steps": self.gradient_accumulation_steps,
}
return config
@classmethod
def from_config(cls, config, custom_objects=None):
"""Creates an optimizer from its config.
This method is the reverse of `get_config`, capable of instantiating the
same optimizer from the config dictionary.
Args:
config: A Python dictionary, typically the output of get_config.
custom_objects: A Python dictionary mapping names to additional
user-defined Python objects needed to recreate this optimizer.
Returns:
An optimizer instance.
"""
if "learning_rate" in config:
if isinstance(config["learning_rate"], dict):
config["learning_rate"] = (
serialization_lib.deserialize_keras_object(
config["learning_rate"], custom_objects=custom_objects
)
)
return cls(**config)
def __setattr__(self, name, value):
# Prevent users from attaching state to the
# layer before `super()` is called -- since that
# state would silently not be tracked.
if name != "_lock":
self._check_super_called()
# Track Variables.
if hasattr(self, "_tracker"):
value = self._tracker.track(value)
return super().__setattr__(name, value)
def _clip_by_norm(self, values, axes=None):
# Calculate L2-norm, clip elements by ratio of clip_norm to L2-norm
l2sum = ops.sum(ops.square(values), axes, keepdims=True)
pred = l2sum > 0
# Two-tap tf.where trick to bypass NaN gradients
l2sum_safe = ops.where(pred, l2sum, ops.ones_like(l2sum))
l2norm = ops.where(pred, ops.sqrt(l2sum_safe), l2sum)
intermediate = ops.multiply(values, self.clipnorm)
values_clip = ops.convert_to_tensor(intermediate) / ops.maximum(
l2norm, self.clipnorm
)
return values_clip
def _untrack_variable(self, variable):
previous_lock_state = self._tracker.locked
self._tracker.unlock()
self._tracker.untrack(variable)
if previous_lock_state is True:
self._tracker.lock()
base_optimizer_keyword_args = """name: String. The name to use
for momentum accumulator weights created by
the optimizer.
weight_decay: Float. If set, weight decay is applied.
clipnorm: Float. If set, the gradient of each weight is individually
clipped so that its norm is no higher than this value.
clipvalue: Float. If set, the gradient of each weight is clipped to be
no higher than this value.
global_clipnorm: Float. If set, the gradient of all weights is clipped
so that their global norm is no higher than this value.
use_ema: Boolean, defaults to `False`.
If `True`, exponential moving average
(EMA) is applied. EMA consists of computing an exponential moving
average of the weights of the model (as the weight values change
after each training batch), and periodically overwriting the
weights with their moving average.
ema_momentum: Float, defaults to 0.99. Only used if `use_ema=True`.
This is the momentum to use when computing
the EMA of the model's weights:
`new_average = ema_momentum * old_average + (1 - ema_momentum) *
current_variable_value`.
ema_overwrite_frequency: Int or None, defaults to None. Only used if
`use_ema=True`. Every `ema_overwrite_frequency` steps of iterations,
we overwrite the model variable by its moving average.
If None, the optimizer
does not overwrite model variables in the middle of training,
and you need to explicitly overwrite the variables
at the end of training by calling
`optimizer.finalize_variable_values()` (which updates the model
variables in-place). When using the built-in `fit()` training loop,
this happens automatically after the last epoch,
and you don't need to do anything.
loss_scale_factor: Float or `None`. If a float, the scale factor will
be multiplied the loss before computing gradients, and the inverse
of the scale factor will be multiplied by the gradients before
updating variables. Useful for preventing underflow during
mixed precision training. Alternately,
`keras.optimizers.LossScaleOptimizer` will
automatically set a loss scale factor.
gradient_accumulation_steps: Int or `None`. If an int, model & optimizer
variables will not be updated at every step; instead they will be
updated every `gradient_accumulation_steps` steps, using the average
value of the gradients since the last update. This is known as
"gradient accumulation". This can be useful
when your batch size is very small, in order to reduce gradient
noise at each update step. EMA frequency will look at "accumulated"
iterations value (optimizer steps // gradient_accumulation_steps).
Learning rate schedules will look at "real" iterations value
(optimizer steps).
"""
def global_norm(value_list):
"""Computes the global norm of multiple tensors."""
squared_norms = [
ops.sum(ops.square(v)) for v in value_list if v is not None
]
squared_norm = ops.sum(ops.stack(squared_norms))
return ops.sqrt(squared_norm)
def clip_by_global_norm(value_list, clip_norm):
use_norm = global_norm(value_list)
# Calculate L2-norm, clip elements by ratio of clip_norm to L2-norm
scale_for_finite = clip_norm * ops.minimum(1.0 / use_norm, 1.0 / clip_norm)
# If use_norm is any finite number, this is a no-op. For inf/-inf/NaN,
# this will make scale NaN.
scale = scale_for_finite + (use_norm - use_norm)
return [v * scale if v is not None else v for v in value_list]
|
fcholletREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@keras@src@optimizers@base_optimizer.py@.PATH_END.py
|
{
"filename": "api.py",
"repo_name": "rennehan/yt-swift",
"repo_path": "yt-swift_extracted/yt-swift-main/yt/analysis_modules/star_analysis/api.py",
"type": "Python"
}
|
from yt.utilities.exceptions import YTModuleRemoved
raise YTModuleRemoved(
"star_analysis",
"https://github.com/yt-project/yt_attic",
"https://yt-attic.readthedocs.io/",
)
|
rennehanREPO_NAMEyt-swiftPATH_START.@yt-swift_extracted@yt-swift-main@yt@analysis_modules@star_analysis@api.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/image/hoverlabel/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._namelengthsrc import NamelengthsrcValidator
from ._namelength import NamelengthValidator
from ._font import FontValidator
from ._bordercolorsrc import BordercolorsrcValidator
from ._bordercolor import BordercolorValidator
from ._bgcolorsrc import BgcolorsrcValidator
from ._bgcolor import BgcolorValidator
from ._alignsrc import AlignsrcValidator
from ._align import AlignValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._namelengthsrc.NamelengthsrcValidator",
"._namelength.NamelengthValidator",
"._font.FontValidator",
"._bordercolorsrc.BordercolorsrcValidator",
"._bordercolor.BordercolorValidator",
"._bgcolorsrc.BgcolorsrcValidator",
"._bgcolor.BgcolorValidator",
"._alignsrc.AlignsrcValidator",
"._align.AlignValidator",
],
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@image@hoverlabel@__init__.py@.PATH_END.py
|
{
"filename": "test_svm_ibqk07.py",
"repo_name": "spacetelescope/drizzlepac",
"repo_path": "drizzlepac_extracted/drizzlepac-main/tests/hap/test_svm_ibqk07.py",
"type": "Python"
}
|
""" This module tests full pipeline SVM processing of a WFC3 dataset containing both IR and UVIS data.
The two detectors do NOT have the same WCS solution.
"""
import datetime
import os
import math
import numpy as np
import pytest
from drizzlepac.haputils import astroquery_utils as aqutils
from drizzlepac import runsinglehap
from astropy.io import fits, ascii
from astropy.table import Table
from pathlib import Path
"""
test_svm_ibqk07.py
This test file can be executed in the following manner:
$ pytest -s --basetemp=/internal/hladata/yourUniqueDirectoryHere test_svm_ibqk07.py >& ibqk07.log &
$ tail -f ibqk07.log
* Note: When running this test, the `--basetemp` directory should be set to a unique
existing directory to avoid deleting previous test output.
* The POLLER_FILE exists in the tests/hap directory with the PyTests.
* If running manually with `--basetemp`, the ibqk07.log file will still be written to the
originating directory.
"""
POLLER_FILE = "wfc3_bqk_07_input.out"
WCS_IR_SUB_NAME = "HSC30"
WCS_UVIS_SUB_NAME = "FIT_SVM_GAIA"
expected_total_point_sources = {
"hst_12557_07_wfc3_ir_total_ibqk07_point-cat.ecsv": 2,
"hst_12557_07_wfc3_uvis_total_ibqk07_point-cat.ecsv": 13}
expected_total_segment_sources= {
"hst_12557_07_wfc3_ir_total_ibqk07_segment-cat.ecsv": 2,
"hst_12557_07_wfc3_uvis_total_ibqk07_segment-cat.ecsv": 19}
tolerance = 0.25
MEAN_CAT_MAGAP2_POINT = {
"hst_12557_07_wfc3_ir_f160w_ibqk07_point-cat.ecsv": 24.27,
"hst_12557_07_wfc3_uvis_f555w_ibqk07_point-cat.ecsv": 25.36}
MEAN_CAT_MAGAP2_SEGMENT = {
"hst_12557_07_wfc3_uvis_f555w_ibqk07_segment-cat.ecsv": 24.97,
"hst_12557_07_wfc3_ir_f160w_ibqk07_segment-cat.ecsv": 18.76}
POINT_DIFF = 0.5
SEGMENT_DIFF = 0.5
@pytest.fixture(scope="module")
def read_csv_for_filenames():
# Read the CSV poller file residing in the tests directory to extract the individual visit FLT/FLC filenames
path = os.path.join(os.path.dirname(__file__), POLLER_FILE)
table = ascii.read(path, format="no_header")
filename_column = table.colnames[0]
filenames = list(table[filename_column])
print("\nread_csv_for_filenames. Filesnames from poller: {}".format(filenames))
return filenames
@pytest.fixture(scope="module")
def gather_data_for_processing(read_csv_for_filenames, tmp_path_factory):
# Create working directory specified for the test
curdir = tmp_path_factory.mktemp(os.path.basename(__file__))
os.chdir(curdir)
# Establish FLC/FLT lists and obtain the requested data
flc_flag = ""
flt_flag = ""
# In order to obtain individual FLC or FLT images from MAST (if the files are not reside on disk) which
# may be part of an ASN, use only IPPPSS with a wildcard. The unwanted images have to be removed
# after-the-fact.
for fn in read_csv_for_filenames:
if fn.lower().endswith("flc.fits") and flc_flag == "":
flc_flag = fn[0:6] + "*"
elif fn.lower().endswith("flt.fits") and flt_flag == "":
flt_flag = fn[0:6] + "*"
# If both flags have been set, then break out the loop early. It may be
# that all files have to be checked which means the for loop continues
# until its natural completion.
if flc_flag and flt_flag:
break
# Get test data through astroquery - only retrieve the pipeline processed FLC and/or FLT files
# (e.g., j*_flc.fits) as necessary. The logic here and the above for loop is an attempt to
# avoid downloading too many images which are not needed for processing.
flcfiles = []
fltfiles = []
if flc_flag:
flcfiles = aqutils.retrieve_observation(flc_flag, suffix=["FLC"], product_type="pipeline")
if flt_flag:
fltfiles = aqutils.retrieve_observation(flt_flag, suffix=["FLT"], product_type="pipeline")
flcfiles.extend(fltfiles)
# Keep only the files which exist in BOTH lists for processing
files_to_process= set(read_csv_for_filenames).intersection(set(flcfiles))
# Identify unwanted files from the download list and remove from disk
files_to_remove = set(read_csv_for_filenames).symmetric_difference(set(flcfiles))
try:
for ftr in files_to_remove:
os.remove(ftr)
except Exception as x_cept:
print("")
print("Exception encountered: {}.".format(x_cept))
print("The file {} could not be deleted from disk. ".format(ftr))
print("Remove files which are not used for processing from disk manually.")
print("\ngather_data_for_processing. Gathered data: {}".format(files_to_process))
return files_to_process
@pytest.fixture(scope="module")
def gather_output_data(construct_manifest_filename):
# Determine the filenames of all the output files from the manifest
files = []
with open(construct_manifest_filename, 'r') as fout:
for line in fout.readlines():
files.append(line.rstrip("\n"))
print("\ngather_output_data. Output data files: {}".format(files))
return files
@pytest.fixture(scope="module")
def construct_manifest_filename(read_csv_for_filenames):
# Construct the output manifest filename from input file keywords
inst = fits.getval(read_csv_for_filenames[0], "INSTRUME", ext=0).lower()
root = fits.getval(read_csv_for_filenames[0], "ROOTNAME", ext=0).lower()
tokens_tuple = (inst, root[1:4], root[4:6], "manifest.txt")
manifest_filename = "_".join(tokens_tuple)
print("\nconstruct_manifest_filename. Manifest filename: {}".format(manifest_filename))
return manifest_filename
@pytest.fixture(scope="module", autouse=True)
def svm_setup(gather_data_for_processing):
# Act: Process the input data by executing runsinglehap - time consuming activity
current_dt = datetime.datetime.now()
print(str(current_dt))
print("\nsvm_setup fixture")
# Read the "poller file" and download the input files, as necessary
input_names = gather_data_for_processing
# Run the SVM processing
path = os.path.join(os.path.dirname(__file__), POLLER_FILE)
try:
status = runsinglehap.perform(path)
# Catch anything that happens and report it. This is meant to catch unexpected errors and
# generate sufficient output exception information so algorithmic problems can be addressed.
except Exception as except_details:
print(except_details)
pytest.fail("\nsvm_setup. Exception Visit: {}\n", path)
current_dt = datetime.datetime.now()
print(str(current_dt))
# TESTS
def test_svm_manifest_name(construct_manifest_filename):
print("\ntest_svm_manifest.")
# Construct the manifest filename from the header of an input file in the list and check it exists.
path = Path(construct_manifest_filename)
print("\ntest_svm_manifest. Filename: {}".format(path))
# Ensure the manifest file uses the proper naming convention
assert(path.is_file())
def test_svm_wcs_ir(gather_output_data):
print("\ntest_svm_wcs_ir.")
# Get the TDP for this detector
tdp_files = [files for files in gather_output_data if files.lower().find("ir_total") > -1 and files.lower().endswith("drz.fits")]
# Check the WCS solution is as expected
wcsname = fits.getval(tdp_files[0], "WCSNAME", ext=1).upper()
print("\ntest_svm_wcs_ir. WCSNAME: {} Output file: {}".format(wcsname, tdp_files[0]))
assert WCS_IR_SUB_NAME in wcsname, f"WCSNAME is not as expected for file {tdp_files[0]}."
def test_svm_wcs_ir_all(gather_output_data):
print("\ntest_svm_wcs_ir_all.")
# Check the output primary WCSNAME
ir_files = [files for files in gather_output_data if files.lower().find("_ir_") > -1 and files.lower().endswith("drz.fits")]
wcsnames = [fits.getval(ir, "WCSNAME", ext=1).upper() for ir in ir_files]
assert len(set(wcsnames)) == 1, f"WCSNAMES are not all the same for the IR detector: {wcsnames}"
def test_svm_wcs_uvis(gather_output_data):
print("\ntest_svm_wcs_uvis.")
# Get the TDP for this detector
tdp_files = [files for files in gather_output_data if files.lower().find("uvis_total") > -1 and files.lower().endswith("drc.fits")]
# Check the WCS solution is as expected
wcsname = fits.getval(tdp_files[0], "WCSNAME", ext=1).upper()
print("\ntest_svm_wcs_uvis. WCSNAME: {} Output file: {}".format(wcsname, tdp_files[0]))
assert WCS_UVIS_SUB_NAME in wcsname, f"WCSNAME is not as expected for file {tdp_files[0]}."
def test_svm_wcs_uvis_all(gather_output_data):
# Check the output primary WCSNAME
print("\ntest_svm_wcs_uvis_all.")
uvis_files = [files for files in gather_output_data if files.lower().find("_uvis_") > -1 and files.lower().endswith("drc.fits")]
wcsnames = [fits.getval(uvis, "WCSNAME", ext=1).upper() for uvis in uvis_files]
assert len(set(wcsnames)) == 1, f"WCSNAMES are not all the same for the UVIS detector: {wcsnames}"
# Due to the way the catalogs are filtered, check the size of the total catalog and one of the filter
# catalogs separately. The total catalog has the row removed for each source where the constituent
# filter catalogs *ALL* have flag>5 for the source. Rows are NOT removed from the filter table based on
# flag values.
@pytest.mark.skip(reason="Modifying tests and cannot reproduce failed result at this time - need for RC.")
def test_svm_point_total_cat(gather_output_data):
# Check the output catalogs should contain the correct number of sources -- allows for a broad tolerance
print("\ntest_svm_point_total_cat.")
tdp_files = [files for files in gather_output_data if files.lower().find("total") > -1 and files.lower().endswith("point-cat.ecsv")]
num_sources = {tdp:len(ascii.read(tdp, format="ecsv")) for tdp in tdp_files}
valid_cats = {}
for tdp in expected_total_point_sources.keys():
for file in tdp_files:
if tdp in file:
tol_limit = tolerance * expected_total_point_sources[tdp]
valid_cats[tdp] = (file, np.isclose(expected_total_point_sources[tdp], num_sources[file], atol=tol_limit))
break
bad_cats = [cat for cat in valid_cats if not valid_cats[cat][1]]
assert len(bad_cats) == 0, f"Total Point Catalog(s) {bad_cats} had {valid_cats} sources, expected {expected_total_point_sources}"
@pytest.mark.skip(reason="Modifying tests and cannot reproduce failed result at this time - need for RC.")
def test_svm_segment_total_cat(gather_output_data):
# Check the output catalogs should contain the correct number of sources -- allows for a broad tolerance
print("\ntest_svm_segment_total_cat.")
tdp_files = [files for files in gather_output_data if files.lower().find("total") > -1 and files.lower().endswith("segment-cat.ecsv")]
num_sources = {tdp:len(ascii.read(tdp, format="ecsv")) for tdp in tdp_files}
valid_cats = {}
for tdp in expected_total_segment_sources.keys():
for file in tdp_files:
if tdp in file:
tol_limit = tolerance * expected_total_segment_sources[tdp]
valid_cats[tdp] = (file, np.isclose(expected_total_segment_sources[tdp], num_sources[file], atol=tol_limit))
break
bad_cats = [cat for cat in valid_cats if not valid_cats[cat][1]]
assert len(bad_cats) == 0, f"Total Segment Catalog(s) {bad_cats} had {valid_cats} sources, expected {expected_total_segment_sources}"
def test_svm_point_cat_meanmag(gather_output_data):
cat_files = [files.lower() for files in gather_output_data if files.lower().endswith("point-cat.ecsv") and files.lower().find("total") < 0]
# Compute the mean of the MagAp2 in the filtered catalogs and do not include flagged bad data
Mag2_mean = {}
for cat in cat_files:
table = ascii.read(cat)
Mag2_array = table['MagAp2'].data
Mag2_mean[cat] = -9999.0
if len(Mag2_array[Mag2_array > -9999.0]) > 0:
Mag2_mean[cat] = Mag2_array[Mag2_array > -9999.0].mean()
good_cats = {}
for cat in MEAN_CAT_MAGAP2_POINT.keys():
for file in cat_files:
if cat == file:
good_cats[cat] = (np.isclose(MEAN_CAT_MAGAP2_POINT[cat], Mag2_mean[cat], rtol=POINT_DIFF), Mag2_mean[cat])
break
bad_cats = [cat for cat in good_cats if not good_cats[cat][0]]
assert len(bad_cats) == 0, f"Point Catalog(s) {bad_cats} had {good_cats} sources, expected {MEAN_CAT_MAGAP2_POINT}"
def test_svm_segment_cat_meanmag(gather_output_data):
cat_files = [files.lower() for files in gather_output_data if files.lower().endswith("segment-cat.ecsv") and files.lower().find("total") < 0]
# Compute the mean of the MagAp2 in the filtered catalogs and do not include flagged bad data
Mag2_mean = {}
for cat in cat_files:
table = ascii.read(cat)
Mag2_array = table['MagAp2'].data
Mag2_mean[cat] = -9999.0
if len(Mag2_array[Mag2_array > -9999.0]) > 0:
Mag2_mean[cat] = Mag2_array[Mag2_array > -9999.0].mean()
good_cats = {}
for cat in MEAN_CAT_MAGAP2_SEGMENT.keys():
for file in cat_files:
if cat == file:
good_cats[cat] = (np.isclose(MEAN_CAT_MAGAP2_SEGMENT[cat], Mag2_mean[cat], rtol=SEGMENT_DIFF), Mag2_mean[cat])
break
bad_cats = [cat for cat in good_cats if not good_cats[cat][0]]
assert len(bad_cats) == 0, f"Segment Catalog(s) {bad_cats} had {good_cats} sources, expected {MEAN_CAT_MAGAP2_SEGMENT}"
|
spacetelescopeREPO_NAMEdrizzlepacPATH_START.@drizzlepac_extracted@drizzlepac-main@tests@hap@test_svm_ibqk07.py@.PATH_END.py
|
{
"filename": "emma.py",
"repo_name": "mirochaj/ares",
"repo_path": "ares_extracted/ares-main/input/litdata/emma.py",
"type": "Python"
}
|
#From Moster2010, table 7
logM_0 = 11.88 #(0.01)
mu = 0.019 #(0.002) #or not
N_0 = 0.0282 #(0.0003)
nu = -0.72 #(0.06)
gamma_0 = 0.556 #0.001
gamma_1 = -0.26 #(0.05)
beta_0 = 1.06 #(0.06)
beta_1 = 0.17 #(0.12)
model1 = \
{
'pop_sfr_model':'hod',
#star-forming fraction - a, b dpl z dependance, other linear
'pop_sf_A': 'pq[7]',
'pq_func[7]': 'pl',
'pq_func_var[7]': '1+z',
'pq_func_par0[7]': -1.5,
'pq_func_par1[7]': 1.0,
'pq_func_par2[7]': 0.4,
'pop_sf_B': 'pq[8]',
'pq_func[8]': 'pl',
'pq_func_var[8]': '1+z',
'pq_func_par0[8]': -10.7,
'pq_func_par1[8]': 1.0,
'pq_func_par2[8]': -0.1,
'pop_sf_C': 'pq[9]',
'pq_func[9]': 'linear',
'pq_func_var[9]': 'z',
'pq_func_par0[9]': 1.8, #const
'pq_func_par1[9]': 0, #offset
'pq_func_par2[9]': 0.8, #m
'pop_sf_D': 'pq[10]',
'pq_func[10]': 'linear',
'pq_func_var[10]': 'z',
'pq_func_par0[10]': 0.5, #const
'pq_func_par1[10]': 0, #offset
'pq_func_par2[10]': 1.0, #m
#for LF
'pop_lf': 'pq[4]',
'pq_func[4]': 'linear',
'pq_func_var[4]': 'z',
'pq_func_par0[4]': 3e-4,
'pq_func_par1[4]': 0,
'pq_func_par2[4]': 0,
#for SMF - default parameters for dpl
#beta
'pop_smhm_beta': 'pq[0]',
'pq_func[0]': 'linear',
'pq_func_var[0]': 'z',
'pq_func_par0[0]': beta_0,
'pq_func_par1[0]': 0,
'pq_func_par2[0]': beta_1,
#norm
'pop_smhm_n': 'pq[1]',
'pq_func[1]': 'pl',
'pq_func_var[1]': '1+z',
'pq_func_par0[1]': N_0,
'pq_func_par1[1]': 1.0,
'pq_func_par2[1]': nu,
#gamma
'pop_smhm_gamma': 'pq[2]',
'pq_func[2]': 'pl',
'pq_func_var[2]': '1+z',
'pq_func_par0[2]': gamma_0,
'pq_func_par1[2]': 1.0,
'pq_func_par2[2]': gamma_1,
#peak mass
'pop_smhm_m': 'pq[3]',
'pq_func[3]': 'pl_10',
'pq_func_var[3]': '1+z',
'pq_func_par0[3]': logM_0,
'pq_func_par1[3]': 1.0,
'pq_func_par2[3]': mu,
#SFR - added with log10(_1)
'pop_sfr_1': 'pq[5]',
'pq_func[5]': 'linear',
'pq_func_var[5]': 't',
'pq_func_par0[5]': 0.84,
'pq_func_par1[5]': 0.,
'pq_func_par2[5]': -0.026,
'pop_sfr_2': 'pq[6]',
'pq_func[6]': 'linear',
'pq_func_var[6]': 't',
'pq_func_par0[6]': 6.51,
'pq_func_par1[6]': 0.,
'pq_func_par2[6]': -0.11,
}
#SMF values updated from MCMC best fits
model2 = \
{
'pop_sfr_model':'hod',
#star-forming fraction - a, b dpl z dependance, other linear
'pop_sf_A': 'pq[7]',
'pq_func[7]': 'pl',
'pq_func_var[7]': '1+z',
'pq_func_par0[7]': -3.5,
'pq_func_par1[7]': 1.0,
'pq_func_par2[7]': 0.1,
'pop_sf_B': 'pq[8]',
'pq_func[8]': 'pl',
'pq_func_var[8]': '1+z',
'pq_func_par0[8]': -10.2,
'pq_func_par1[8]': 1.0,
'pq_func_par2[8]': 0.1,
'pop_sf_C': 'pq[9]',
'pq_func[9]': 'linear',
'pq_func_var[9]': 'z',
'pq_func_par0[9]': 2.1, #const
'pq_func_par1[9]': 0, #offset
'pq_func_par2[9]': 0.6, #m
'pop_sf_D': 'pq[10]',
'pq_func[10]': 'linear',
'pq_func_var[10]': 'z',
'pq_func_par0[10]': 1.05, #const
'pq_func_par1[10]': 0, #offset
'pq_func_par2[10]': 0.6, #m
#for LF
'pop_lf': 'pq[4]',
'pq_func[4]': 'linear',
'pq_func_var[4]': 'z',
'pq_func_par0[4]': 1.52e-06,
'pq_func_par1[4]': 0.13,
'pq_func_par2[4]': 6.69e-05,
#for SMF - best fit parameters for dpl
#beta
'pop_smhm_beta': 'pq[0]',
'pq_func[0]': 'linear',
'pq_func_var[0]': 'z',
'pq_func_par0[0]': 0.8828985178317218,
'pq_func_par1[0]': 0,
'pq_func_par2[0]': -0.03363387618820308,
#norm
'pop_smhm_n': 'pq[1]',
'pq_func[1]': 'pl',
'pq_func_var[1]': '1+z',
'pq_func_par0[1]': 0.010358061397412294,
'pq_func_par1[1]': 1.0,
'pq_func_par2[1]': 0.28690793780049106,
#gamma
'pop_smhm_gamma': 'pq[2]',
'pq_func[2]': 'pl',
'pq_func_var[2]': '1+z',
'pq_func_par0[2]': 0.5633902051902832,
'pq_func_par1[2]': 1.0,
'pq_func_par2[2]': 0.18194904277970236,
#peak mass
'pop_smhm_m': 'pq[3]',
'pq_func[3]': 'pl_10',
'pq_func_var[3]': '1+z',
'pq_func_par0[3]': 11.750289778904255,
'pq_func_par1[3]': 1.0,
'pq_func_par2[3]': 1.855774245368317,
#SFR - added with log10(_1)
'pop_sfr_1': 'pq[5]',
'pq_func[5]': 'linear',
'pq_func_var[5]': 't',
'pq_func_par0[5]': 0.84,
'pq_func_par1[5]': 0.,
'pq_func_par2[5]': -0.026,
'pop_sfr_2': 'pq[6]',
'pq_func[6]': 'linear',
'pq_func_var[6]': 't',
'pq_func_par0[6]': 6.51,
'pq_func_par1[6]': 0.,
'pq_func_par2[6]': -0.11,
}
#SMF values updated from MCMC best fits
model3 = \
{
'pop_sfr_model':'hod',
#star-forming fraction - all dpl z dependance
'pop_sf_A': 'pq[7]',
'pq_func[7]': 'pl',
'pq_func_var[7]': '1+z',
'pq_func_par0[7]': -3.5,
'pq_func_par1[7]': 1.0,
'pq_func_par2[7]': 0.1,
'pop_sf_B': 'pq[8]',
'pq_func[8]': 'pl',
'pq_func_var[8]': '1+z',
'pq_func_par0[8]': -10.2,
'pq_func_par1[8]': 1.0,
'pq_func_par2[8]': 0.1,
'pop_sf_C': 'pq[9]',
'pq_func[9]': 'pl',
'pq_func_var[9]': '1+z',
'pq_func_par0[9]': 2.1,
'pq_func_par1[9]': 1.0,
'pq_func_par2[9]': 0.4,
'pop_sf_D': 'pq[10]',
'pq_func[10]': 'pl',
'pq_func_var[10]': '1+z',
'pq_func_par0[10]': 1.05,
'pq_func_par1[10]': 1.0,
'pq_func_par2[10]': 0.4,
#for LF
'pop_lf': 'pq[4]',
'pq_func[4]': 'linear',
'pq_func_var[4]': 'z',
'pq_func_par0[4]': 1.52e-06,
'pq_func_par1[4]': 0.13,
'pq_func_par2[4]': 6.69e-05,
#for SMF - best fit parameters for dpl
#beta
'pop_smhm_beta': 'pq[0]',
'pq_func[0]': 'linear',
'pq_func_var[0]': 'z',
'pq_func_par0[0]': 0.8828985178317218,
'pq_func_par1[0]': 0,
'pq_func_par2[0]': -0.03363387618820308,
#norm
'pop_smhm_n': 'pq[1]',
'pq_func[1]': 'pl',
'pq_func_var[1]': '1+z',
'pq_func_par0[1]': 0.010358061397412294,
'pq_func_par1[1]': 1.0,
'pq_func_par2[1]': 0.28690793780049106,
#gamma
'pop_smhm_gamma': 'pq[2]',
'pq_func[2]': 'pl',
'pq_func_var[2]': '1+z',
'pq_func_par0[2]': 0.5633902051902832,
'pq_func_par1[2]': 1.0,
'pq_func_par2[2]': 0.18194904277970236,
#peak mass
'pop_smhm_m': 'pq[3]',
'pq_func[3]': 'pl_10',
'pq_func_var[3]': '1+z',
'pq_func_par0[3]': 11.750289778904255,
'pq_func_par1[3]': 1.0,
'pq_func_par2[3]': 1.855774245368317,
#SFR - added with log10(_1)
'pop_sfr_1': 'pq[5]',
'pq_func[5]': 'linear',
'pq_func_var[5]': 't',
'pq_func_par0[5]': 0.84,
'pq_func_par1[5]': 0.,
'pq_func_par2[5]': -0.026,
'pop_sfr_2': 'pq[6]',
'pq_func[6]': 'linear',
'pq_func_var[6]': 't',
'pq_func_par0[6]': 6.51,
'pq_func_par1[6]': 0.,
'pq_func_par2[6]': -0.11,
}
model4 = \
{
'pop_sfr_model':'hod',
#star-forming fraction - a, b linear z dependance, c, d constant
'pop_sf_A': 'pq[7]',
'pq_func[7]': 'linear',
'pq_func_var[7]': 'z',
'pq_func_par0[7]': -1.2, #const
'pq_func_par1[7]': 0.5,
'pq_func_par2[7]': -0.4, #m
'pop_sf_B': 'pq[8]',
'pq_func[8]': 'linear',
'pq_func_var[8]': 'z',
'pq_func_par0[8]': -10.7,
'pq_func_par1[8]': 0.0,
'pq_func_par2[8]': -0.1,
'pop_sf_C': 'pq[9]',
'pq_func[9]': 'linear',
'pq_func_var[9]': 'z',
'pq_func_par0[9]': 2.0, #const
'pq_func_par1[9]': 0,
'pq_func_par2[9]': 0,
'pop_sf_D': 'pq[10]',
'pq_func[10]': 'linear',
'pq_func_var[10]': 'z',
'pq_func_par0[10]': 1.0, #const
'pq_func_par1[10]': 0,
'pq_func_par2[10]': 0,
#for LF
'pop_lf': 'pq[4]',
'pq_func[4]': 'linear',
'pq_func_var[4]': 'z',
'pq_func_par0[4]': 3e-4,
'pq_func_par1[4]': 0,
'pq_func_par2[4]': 0,
#for SMF - default parameters for dpl
#beta
'pop_smhm_beta': 'pq[0]',
'pq_func[0]': 'linear',
'pq_func_var[0]': 'z',
'pq_func_par0[0]': beta_0,
'pq_func_par1[0]': 0,
'pq_func_par2[0]': beta_1,
#norm
'pop_smhm_n': 'pq[1]',
'pq_func[1]': 'pl',
'pq_func_var[1]': '1+z',
'pq_func_par0[1]': N_0,
'pq_func_par1[1]': 1.0,
'pq_func_par2[1]': nu,
#gamma
'pop_smhm_gamma': 'pq[2]',
'pq_func[2]': 'pl',
'pq_func_var[2]': '1+z',
'pq_func_par0[2]': gamma_0,
'pq_func_par1[2]': 1.0,
'pq_func_par2[2]': gamma_1,
#peak mass
'pop_smhm_m': 'pq[3]',
'pq_func[3]': 'pl_10',
'pq_func_var[3]': '1+z',
'pq_func_par0[3]': logM_0,
'pq_func_par1[3]': 1.0,
'pq_func_par2[3]': mu,
#SFR - added with log10(_1)
'pop_sfr_1': 'pq[5]',
'pq_func[5]': 'linear',
'pq_func_var[5]': 't',
'pq_func_par0[5]': 0.84,
'pq_func_par1[5]': 0.,
'pq_func_par2[5]': -0.026,
'pop_sfr_2': 'pq[6]',
'pq_func[6]': 'linear',
'pq_func_var[6]': 't',
'pq_func_par0[6]': 6.51,
'pq_func_par1[6]': 0.,
'pq_func_par2[6]': -0.11,
}
#Best fit values for sf_fract and SMF
model5 = \
{
'pop_sfr_model':'hod',
#star-forming fraction - a, b dpl z dependance, other linear
'pop_sf_A': 'pq[7]',
'pq_func[7]': 'pl',
'pq_func_var[7]': '1+z',
'pq_func_par0[7]': -1.1847036287831372,
'pq_func_par1[7]': 1.0,
'pq_func_par2[7]': 0.41859919954205005,
'pop_sf_B': 'pq[8]',
'pq_func[8]': 'pl',
'pq_func_var[8]': '1+z',
'pq_func_par0[8]': -10.70135313806234,
'pq_func_par1[8]': 1.0,
'pq_func_par2[8]': -0.022495626306877965,
'pop_sf_C': 'pq[9]',
'pq_func[9]': 'linear',
'pq_func_var[9]': 'z',
'pq_func_par0[9]': 1.7148405951932986, #const
'pq_func_par1[9]': 0, #offset
'pq_func_par2[9]': 0.8788516704318952, #m
'pop_sf_D': 'pq[10]',
'pq_func[10]': 'linear',
'pq_func_var[10]': 'z',
'pq_func_par0[10]': 0.5368355259379274, #const
'pq_func_par1[10]': 0, #offset
'pq_func_par2[10]': 0.9642440270517514, #m
#for LF
'pop_lf': 'pq[4]',
'pq_func[4]': 'linear',
'pq_func_var[4]': 'z',
'pq_func_par0[4]': 3e-4,
'pq_func_par1[4]': 0,
'pq_func_par2[4]': 0,
#for SMF - best fit parameters for dpl
#beta
'pop_smhm_beta': 'pq[0]',
'pq_func[0]': 'linear',
'pq_func_var[0]': 'z',
'pq_func_par0[0]': 0.8828985178317218,
'pq_func_par1[0]': 0,
'pq_func_par2[0]': -0.03363387618820308,
#norm
'pop_smhm_n': 'pq[1]',
'pq_func[1]': 'pl',
'pq_func_var[1]': '1+z',
'pq_func_par0[1]': 0.010358061397412294,
'pq_func_par1[1]': 1.0,
'pq_func_par2[1]': 0.28690793780049106,
#gamma
'pop_smhm_gamma': 'pq[2]',
'pq_func[2]': 'pl',
'pq_func_var[2]': '1+z',
'pq_func_par0[2]': 0.5633902051902832,
'pq_func_par1[2]': 1.0,
'pq_func_par2[2]': 0.18194904277970236,
#peak mass
'pop_smhm_m': 'pq[3]',
'pq_func[3]': 'pl_10',
'pq_func_var[3]': '1+z',
'pq_func_par0[3]': 11.750289778904255,
'pq_func_par1[3]': 1.0,
'pq_func_par2[3]': 1.855774245368317,
#SFR - added with log10(_1)
'pop_sfr_1': 'pq[5]',
'pq_func[5]': 'linear',
'pq_func_var[5]': 't',
'pq_func_par0[5]': 0.84,
'pq_func_par1[5]': 0.,
'pq_func_par2[5]': -0.026,
'pop_sfr_2': 'pq[6]',
'pq_func[6]': 'linear',
'pq_func_var[6]': 't',
'pq_func_par0[6]': 6.51,
'pq_func_par1[6]': 0.,
'pq_func_par2[6]': -0.11,
}
|
mirochajREPO_NAMEaresPATH_START.@ares_extracted@ares-main@input@litdata@emma.py@.PATH_END.py
|
{
"filename": "shared_to_item.py",
"repo_name": "D-arioSpace/astroquery",
"repo_path": "astroquery_extracted/astroquery-main/astroquery/utils/tap/model/shared_to_item.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
=============
TAP plus
=============
@author: Javier Durtan
@contact: javier.duran@sciops.esa.int
European Space Astronomy Centre (ESAC)
European Space Agency (ESA)
Created on 28 sep. 2018
"""
class TapSharedToItem:
"""TAP shared to item object
"""
def __init__(self, attrs):
"""Constructor
"""
self.__attributes = attrs
self.id = attrs.getValue("shareTo")
self.type = attrs.getValue("shareType")
self.mode = attrs.getValue("shareMode")
def __str__(self):
return f"Shared to item: {self.id}" \
f"\n\tType: {self.type}" \
f"\n\tMode: {self.mode}"
|
D-arioSpaceREPO_NAMEastroqueryPATH_START.@astroquery_extracted@astroquery-main@astroquery@utils@tap@model@shared_to_item.py@.PATH_END.py
|
{
"filename": "_y.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/violin/_y.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class YValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="y", parent_name="violin", **kwargs):
super(YValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc+clearAxisTypes"),
role=kwargs.pop("role", "data"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@violin@_y.py@.PATH_END.py
|
{
"filename": "constants.py",
"repo_name": "discsim/frank",
"repo_path": "frank_extracted/frank-master/frank/constants.py",
"type": "Python"
}
|
# Frankenstein: 1D disc brightness profile reconstruction from Fourier data
# using non-parametric Gaussian Processes
#
# Copyright (C) 2019-2020 R. Booth, J. Jennings, M. Tazzari
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>
#
"""This module contains useful conversion constants."""
import numpy as np
rad_to_arcsec = 3600 * 180 / np.pi
sterad_to_arcsec = rad_to_arcsec ** 2
deg_to_rad = np.pi / 180
|
discsimREPO_NAMEfrankPATH_START.@frank_extracted@frank-master@frank@constants.py@.PATH_END.py
|
{
"filename": "test_dataset.py",
"repo_name": "ska-sa/katdal",
"repo_path": "katdal_extracted/katdal-master/katdal/test/test_dataset.py",
"type": "Python"
}
|
###############################################################################
# Copyright (c) 2018-2019,2021-2024, National Research Foundation (SARAO)
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy
# of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""Tests for :py:mod:`katdal.dataset`."""
import logging
import numpy as np
import pytest
from katpoint import Antenna, Target, Timestamp, rad2deg
from numpy.testing import assert_array_almost_equal, assert_array_equal
from katdal.categorical import CategoricalData
from katdal.dataset import (DEFAULT_VIRTUAL_SENSORS, DataSet, Subarray,
_selection_to_list, parse_url_or_path)
from katdal.sensordata import SensorCache
from katdal.spectral_window import SpectralWindow
ANTENNAS = [
Antenna('m000, -30:42:39.8, 21:26:38.0, 1086.6, 13.5, -8.264 -207.29 8.5965'),
Antenna('m063, -30:42:39.8, 21:26:38.0, 1086.6, 13.5, -3419.5845 -1840.48 16.3825')
]
CORRPRODS = [
('m000h', 'm000h'), ('m000v', 'm000v'),
('m063h', 'm063h'), ('m063v', 'm063v'),
('m000h', 'm063h'), ('m000v', 'm063v')
]
SUBARRAY = Subarray(ANTENNAS, CORRPRODS)
SPW = SpectralWindow(
centre_freq=1284e6, channel_width=0, num_chans=16, sideband=1, bandwidth=856e6
)
class MinimalDataSet(DataSet):
"""Minimal data set containing a series of slews and tracks.
The timestamps are divided evenly into compound scans (one per target).
Each compound scan consists of a 1-dump slew followed by a track.
This has to be a derived class instead of a factory function or fixture
because :class:`DataSet` is abstract. (XXX Actually it only needs
timestamps to be implemented for these tests to work, so it is nearly
there.)
Parameters
----------
targets : list of :class:`katpoint.Target`
timestamps : array of float
subarray : :class:`katdal.dataset.Subarray`
spectral_window : :class:`katdal.spectral_window.SpectralWindow`
"""
def __init__(self, targets, timestamps, subarray=SUBARRAY, spectral_window=SPW):
super().__init__(name='test', ref_ant='array')
num_dumps = len(timestamps)
num_chans = spectral_window.num_chans
num_corrprods = len(subarray.corr_products)
dump_period = timestamps[1] - timestamps[0]
num_compscans = len(targets)
num_dumps_per_compscan = num_dumps // num_compscans
assert num_dumps_per_compscan * num_compscans == num_dumps, \
"len(timestamps) should be an integer multiple of len(targets)"
compscan_starts = np.arange(0, num_dumps, num_dumps_per_compscan)
compscan_events = np.r_[compscan_starts, num_dumps]
# Each slew contains just 1 dump to make things simple
scan_events = sorted(np.r_[compscan_starts, compscan_starts + 1, num_dumps])
target_sensor = CategoricalData(targets, compscan_events)
def constant_sensor(value):
return CategoricalData([value], [0, num_dumps])
self.subarrays = [subarray]
self.spectral_windows = [spectral_window]
sensors = {}
sensors['Observation/spw_index'] = constant_sensor(0)
sensors['Observation/subarray_index'] = constant_sensor(0)
for ant in subarray.ants:
sensors[f'Antennas/{ant.name}/antenna'] = constant_sensor(ant)
ant_az = []
ant_el = []
for segment, target in target_sensor.segments():
az, el = target.azel(timestamps[segment], ant)
ant_az.append(az)
ant_el.append(el)
sensors[f'Antennas/{ant.name}/az'] = np.concatenate(ant_az)
sensors[f'Antennas/{ant.name}/el'] = np.concatenate(ant_el)
array_ant = subarray.ants[0].array_reference_antenna()
sensors['Antennas/array/antenna'] = constant_sensor(array_ant)
compscan_sensor = CategoricalData(range(num_compscans), compscan_events)
label_sensor = CategoricalData(['track'] * num_compscans, compscan_events)
sensors['Observation/target'] = target_sensor
sensors['Observation/compscan_index'] = compscan_sensor
sensors['Observation/target_index'] = compscan_sensor
sensors['Observation/label'] = label_sensor
scan_sensor = CategoricalData(range(2 * num_compscans), scan_events)
state_sensor = CategoricalData(['slew', 'track'] * num_compscans, scan_events)
sensors['Observation/scan_index'] = scan_sensor
sensors['Observation/scan_state'] = state_sensor
self._timestamps = timestamps
self._time_keep = np.full(num_dumps, True, dtype=bool)
self._freq_keep = np.full(num_chans, True, dtype=bool)
self._corrprod_keep = np.full(num_corrprods, True, dtype=bool)
self.dump_period = dump_period
self.start_time = Timestamp(timestamps[0] - 0.5 * dump_period)
self.end_time = Timestamp(timestamps[-1] + 0.5 * dump_period)
self.sensor = SensorCache(sensors, timestamps, dump_period,
keep=self._time_keep,
virtual=DEFAULT_VIRTUAL_SENSORS)
self.catalogue.add(targets)
self.catalogue.antenna = array_ant
self.select(spw=0, subarray=0)
@property
def timestamps(self):
return self._timestamps[self._time_keep]
def test_parse_url_or_path():
# Normal URLs and empty strings pass right through
assert parse_url_or_path('https://archive/file').geturl() == 'https://archive/file'
assert parse_url_or_path('').geturl() == ''
# Relative paths are turned into absolute paths and gain a 'file' scheme
relative_file_url = parse_url_or_path('dir/filename.rdb')
assert relative_file_url.scheme == 'file'
parts = relative_file_url.path.rpartition('dir/filename.rdb')
assert len(parts[0]) > 0
assert parts[1] == 'dir/filename.rdb'
assert len(parts[2]) == 0
# Absolute paths remain the same (just gaining a 'file' scheme)
absolute_file_url = parse_url_or_path('/dir/filename.rdb')
assert absolute_file_url.scheme == 'file'
assert absolute_file_url.path == '/dir/filename.rdb'
def test_selection_to_list():
# Empty
assert _selection_to_list('') == []
assert _selection_to_list([]) == []
# Names
assert _selection_to_list('a,b,c') == ['a', 'b', 'c']
assert _selection_to_list('a, b,c') == ['a', 'b', 'c']
assert _selection_to_list(['a', 'b', 'c']) == ['a', 'b', 'c']
assert _selection_to_list(('a', 'b', 'c')) == ['a', 'b', 'c']
assert _selection_to_list('a') == ['a']
# Objects
assert _selection_to_list([1, 2, 3]) == [1, 2, 3]
assert _selection_to_list(1) == [1]
# Groups
assert _selection_to_list('all', all=['a', 'b']) == ['a', 'b']
class TestVirtualSensors:
def setup_method(self):
self.target = Target('PKS1934-638, radec, 19:39, -63:42')
# Pick a time when the source is up as that seems more realistic
self.timestamps = 1234667890.0 + 1.0 * np.arange(10)
self.dataset = MinimalDataSet([self.target], self.timestamps)
self.antennas = self.dataset.subarrays[0].ants
self.array_ant = self.dataset.sensor.get('Antennas/array/antenna')[0]
def test_timestamps(self):
mjd = Timestamp(self.timestamps[0]).to_mjd()
assert self.dataset.mjd[0] == mjd
lst = self.array_ant.local_sidereal_time(self.timestamps)
# Convert LST from radians (katpoint) to hours (katdal)
assert_array_equal(self.dataset.lst, lst * (12 / np.pi))
def test_pointing(self):
az, el = self.target.azel(self.timestamps, self.antennas[1])
assert_array_equal(self.dataset.az[:, 1], rad2deg(az))
assert_array_equal(self.dataset.el[:, 1], rad2deg(el))
ra, dec = self.target.radec(self.timestamps, self.antennas[0])
assert_array_almost_equal(self.dataset.ra[:, 0], rad2deg(ra), decimal=5)
assert_array_almost_equal(self.dataset.dec[:, 0], rad2deg(dec), decimal=5)
angle = self.target.parallactic_angle(self.timestamps, self.antennas[0])
# TODO: Check why this is so poor... see SR-1882 for progress on this
assert_array_almost_equal(self.dataset.parangle[:, 0], rad2deg(angle), decimal=0)
x, y = self.target.sphere_to_plane(az, el, self.timestamps, self.antennas[1])
assert_array_equal(self.dataset.target_x[:, 1], rad2deg(x))
assert_array_equal(self.dataset.target_y[:, 1], rad2deg(y))
def test_uvw(self):
u0, v0, w0 = self.target.uvw(self.antennas[0], self.timestamps, self.array_ant)
u1, v1, w1 = self.target.uvw(self.antennas[1], self.timestamps, self.array_ant)
u = u0 - u1
v = v0 - v1
w = w0 - w1
assert_array_equal(self.dataset.u[:, 4], u)
assert_array_equal(self.dataset.v[:, 4], v)
assert_array_equal(self.dataset.w[:, 4], w)
# Check that both H and V polarisations have the same (u, v, w)
assert_array_equal(self.dataset.u[:, 5], u)
assert_array_equal(self.dataset.v[:, 5], v)
assert_array_equal(self.dataset.w[:, 5], w)
@pytest.fixture
def dataset():
"""A basic dataset used to test the selection mechanism."""
targets = [
# It would have been nice to have radec = 19:39, -63:42 but then
# selection by description string does not work because the catalogue's
# description string pads it out to radec = 19:39:00.00, -63:42:00.0.
# (XXX Maybe fix Target comparison in katpoint to support this?)
Target('J1939-6342 | PKS1934-638, radec bpcal, 19:39:25.03, -63:42:45.6'),
Target('J1939-6342, radec gaincal, 19:39:25.03, -63:42:45.6'),
Target('J0408-6545 | PKS 0408-65, radec bpcal, 4:08:20.38, -65:45:09.1'),
Target('J1346-6024 | Cen B, radec, 13:46:49.04, -60:24:29.4'),
]
# Ensure that len(timestamps) is an integer multiple of len(targets)
timestamps = 1234667890.0 + 1.0 * np.arange(12)
return MinimalDataSet(targets, timestamps)
def test_selecting_antenna(dataset):
dataset.select(ants='~m000')
assert_array_equal(
dataset.corr_products,
[('m063h', 'm063h'), ('m063v', 'm063v')])
@pytest.mark.parametrize(
'scans,expected_dumps',
[
('track', [1, 2, 4, 5, 7, 8, 10, 11]),
('slew', [0, 3, 6, 9]),
('~slew', [1, 2, 4, 5, 7, 8, 10, 11]),
(('track', 'slew'), np.arange(12)),
(('track', '~slew'), [1, 2, 4, 5, 7, 8, 10, 11]),
(1, [1, 2]),
([1, 3, 4], [1, 2, 4, 5, 6]),
# Empty selections
('scan', []),
(-1, []),
(100, []),
]
)
def test_select_scans(dataset, scans, expected_dumps):
dataset.select(scans=scans)
assert_array_equal(dataset.dumps, expected_dumps)
@pytest.mark.parametrize(
'compscans,expected_dumps',
[
('track', np.arange(12)),
(1, [3, 4, 5]),
([1, 3], [3, 4, 5, 9, 10, 11]),
# Empty selections
('~track', []),
(-1, []),
(100, []),
]
)
def test_select_compscans(dataset, compscans, expected_dumps):
dataset.select(compscans=compscans)
assert_array_equal(dataset.dumps, expected_dumps)
@pytest.mark.parametrize(
'targets,expected_dumps',
[
('PKS1934-638', [0, 1, 2]),
('J1939-6342', [0, 1, 2, 3, 4, 5]),
(('J0408-6545', 'Cen B'), [6, 7, 8, 9, 10, 11]),
('J1939-6342, radec gaincal, 19:39:25.03, -63:42:45.6', [3, 4, 5]),
(Target('J1939-6342, radec gaincal, 19:39:25.03, -63:42:45.6'), [3, 4, 5]),
(
[
Target('J1939-6342, radec gaincal, 19:39:25.03, -63:42:45.6'),
'J1346-6024 | Cen B, radec, 13:46:49.04, -60:24:29.4',
],
[3, 4, 5, 9, 10, 11]
),
(1, [3, 4, 5]),
([1, 3], [3, 4, 5, 9, 10, 11]),
# Empty selections
('Moon', []),
('J1939-6342, radec gaincal, 19:39, -63:42', []),
(Target('Sun, special'), []),
]
)
def test_select_targets(caplog, dataset, targets, expected_dumps):
with caplog.at_level(logging.WARNING, logger='katdal.dataset'):
dataset.select(targets=targets)
# If the target is not found, check that a warning is logged
if len(expected_dumps) == 0:
assert 'Skipping unknown selected target' in caplog.text
assert_array_equal(dataset.dumps, expected_dumps)
@pytest.mark.parametrize(
'target_tags,expected_dumps',
[
('radec', np.arange(12)),
('bpcal', [0, 1, 2, 6, 7, 8]),
('gaincal', [3, 4, 5]),
('gaincal,incorrect_tag', [3, 4, 5]),
(['gaincal'], [3, 4, 5]),
('bpcal,gaincal', [0, 1, 2, 3, 4, 5, 6, 7, 8]),
(('bpcal', 'gaincal'), [0, 1, 2, 3, 4, 5, 6, 7, 8]),
('incorrect_tag', []),
]
)
def test_select_target_tags(caplog, dataset, target_tags, expected_dumps):
with caplog.at_level(logging.WARNING, logger='katdal.dataset'):
dataset.select(target_tags=target_tags)
if 'incorrect_tag' in target_tags:
assert 'Skipping unknown selected target tag' in caplog.text
assert_array_equal(dataset.dumps, expected_dumps)
|
ska-saREPO_NAMEkatdalPATH_START.@katdal_extracted@katdal-master@katdal@test@test_dataset.py@.PATH_END.py
|
{
"filename": "test_fitsblob.py",
"repo_name": "transientskp/pyse",
"repo_path": "pyse_extracted/pyse-master/test/test_fitsblob.py",
"type": "Python"
}
|
"""
Try the in memory fits stream Accessor
"""
import os
import unittest
from astropy.io.fits import open as fitsopen
from sourcefinder.accessors import open as tkpopen
from sourcefinder.testutil.decorators import requires_data
from sourcefinder.accessors.fitsimageblob import FitsImageBlob
from .conftest import DATAPATH
FITS_FILE = os.path.join(DATAPATH, 'accessors/aartfaac.fits')
@requires_data(FITS_FILE)
class PyfitsFitsImage(unittest.TestCase):
def setUp(self):
self.hudelist = fitsopen(FITS_FILE)
def test_tkp_open(self):
accessor = tkpopen(FITS_FILE)
def test_fits_blob_accessor(self):
accessor = FitsImageBlob(self.hudelist)
|
transientskpREPO_NAMEpysePATH_START.@pyse_extracted@pyse-master@test@test_fitsblob.py@.PATH_END.py
|
{
"filename": "tpu_sharding.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/tpu/tpu_sharding.py",
"type": "Python"
}
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Helper library for sharding during TPU compilation."""
from tensorflow.python.framework import tensor_shape
_DEFAULT_NUMBER_OF_SHARDS = 1
_DEFAULT_SHARD_DIMENSION = 0
# TODO(b/36777903) change other parts of tpu.py to use this class.
class ShardingPolicy(object):
"""An object use to hold the sharding policy for a Tensor."""
def __init__(self):
self._number_of_shards = None
self._number_of_partitions = 1
self._shard_dimension = None
self._frozen = False
def __str__(self):
if self.number_of_shards is None or self.shard_dimension is None:
return "ShardingPolicy(unset)"
else:
return ("ShardingPolicy(%d shards dimension %d)" %
(self.number_of_shards, self.shard_dimension))
def _fill_default_values(self):
if self._number_of_shards is None:
self._number_of_shards = _DEFAULT_NUMBER_OF_SHARDS
if self._shard_dimension is None:
self._shard_dimension = tensor_shape.as_dimension(
_DEFAULT_SHARD_DIMENSION)
def freeze(self):
"""Prevents further modification to the sharding policy.
Any values that have not been set when freeze is called are set to
defaults. If the ShardingPolicy is already frozen, this is a NoOp.
"""
if not self._frozen:
self._fill_default_values()
self._frozen = True
@property
def number_of_shards(self):
"""Returns the number of shards in the policy or None if unspecified."""
return self._number_of_shards
def set_number_of_shards(self, number_of_shards):
"""Sets the number of shards for the current policy.
If the policy has been frozen then number_of_shards must match the
existing setting.
Args:
number_of_shards: The number of shards to use in the policy.
Raises:
ValueError: If the policy has been frozen and number_of_shards
differs from the frozen value; or number_of_shards <= 0.
"""
if self._frozen:
if self._number_of_shards != number_of_shards:
raise ValueError(
f"Can't set sharding policy to use {number_of_shards} shards since "
f"it has been frozen to use {self._number_of_shards}")
else:
if number_of_shards > 0:
self._number_of_shards = number_of_shards
else:
raise ValueError(
f"Can't set sharding policy to use {number_of_shards} shards; "
"value must be > 0")
@property
def number_of_partitions(self):
"""Returns the number of partitions of the policy or None if unspecified."""
return self._number_of_partitions
def set_number_of_partitions(self, number_of_partitions):
"""Sets the number of partitions for the current policy.
If the policy has been frozen then shard_dimension must match the
existing setting.
Args:
number_of_partitions: The number of partitions to use in the policy.
Raises:
ValueError: If the policy has been frozen and shard_dimension
differs from the frozen value.
"""
if self._frozen:
if self._number_of_partitions != number_of_partitions:
raise ValueError(
f"Can't set number_of_partitions to {number_of_partitions} since "
f"it has been frozen to use {self._number_of_partitions}.")
else:
self._number_of_partitions = number_of_partitions
@property
def shard_dimension(self):
"""Returns the shard dimension of the policy or None if unspecified."""
return self._shard_dimension
def set_shard_dimension(self, shard_dimension):
"""Sets the shard dimension for the current policy.
If the policy has been frozen then shard_dimension must match the
existing setting.
Args:
shard_dimension: The shard dimension to use in the policy.
Raises:
ValueError: If the policy has been frozen and shard_dimension
differs from the frozen value, or shard_dimension can't be
interpreted as a Dimension.
"""
if self._frozen:
if self._shard_dimension != shard_dimension:
raise ValueError(
"Can't set shard dimension to %d since it has been frozen to "
"use %d." % (shard_dimension, self._shard_dimension))
else:
self._shard_dimension = tensor_shape.as_dimension(shard_dimension)
def merge(self, other):
"""Merges the policy of another policy into the current policy.
Args:
other: The policy to merge into this one.
Raises:
ValueError: If this policy has been frozen and the merge conflicts with
the frozen policy.
"""
if other.number_of_shards is not None:
self.set_number_of_shards(other.number_of_shards)
if other.shard_dimension is not None:
self.set_shard_dimension(other.shard_dimension)
def get_unpartitioned_shape(self, shape):
"""Returns the shape of an unpartitioned Tensor.
When given the shape of a 'sharded-size' Tensor, returns the shape
of the full shape of its unpartitioned Tensor.
Args:
shape: The shape of the sharded Tensor.
Returns:
The shape of the unpartitioned version of the Tensor.
Raises:
ValueError: if shape has unknown sharded dimension
"""
shape = tensor_shape.as_shape(shape)
dims = shape.as_list()
if (self._shard_dimension is None or self._number_of_partitions is None or
not dims):
return None
if dims[self._shard_dimension] is None:
raise ValueError(f"Shape {shape.as_list()} must have a fixed size for "
f"dimension {self._shard_dimension} that is known. ")
if self._number_of_partitions > 1:
dims[self._shard_dimension] *= self._number_of_partitions
return tensor_shape.as_shape(dims)
def get_sharded_shape(self, shape, shard_index=None):
"""Returns the shape of a shard of a full Tensor.
When given the shape of a 'full-size' Tensor, returns the shape of
the sub-Tensor after it has been sharded. Freezes the policy if it
has not yet been frozen.
Args:
shape: The shape of the full-size Tensor to be sharded.
shard_index: The index of the shard whose shape should be returned.
shard_index can be None for sharding policies that use the same shape
for every shard.
Returns:
The shape of the sharded version of the Tensor.
Raises:
ValueError: If shard_index is None when shards are of different
shapes; or shard_index is not None and
!(0<=shard_index<number_of_shards); or shape does not have at
least self.shard_dimension+1 dimensions; or the value of
shape's shard dimension is not a multiple of
self.number_of_shards
"""
if self._shard_dimension is None or self._number_of_shards is None:
# Don't raise an error if the config is unset.
return None
if shard_index is not None:
if shard_index < 0 or shard_index >= self.number_of_shards:
raise ValueError(
f"Requested shard_index {shard_index}, but shard_index must be in "
f"[0,{self._number_of_shards}).")
shape = tensor_shape.as_shape(shape)
if self._number_of_shards == 1:
# Don't do anything when there's only one shard.
return shape
ndims = shape.ndims
if ndims is None:
raise ValueError(f"Shape {shape} must be a known shape.")
if ndims <= self._shard_dimension:
raise ValueError(
f"Shape {shape.as_list()} does not contain shard_dimension "
f"{self._shard_dimension}")
dims = shape.as_list()
if dims[self._shard_dimension] is None:
raise ValueError(
f"Shape {shape.as_list()} must have a fixed size for dimension "
f"{self._shard_dimension} that is known at construction time.")
if (dims[self._shard_dimension] % self._number_of_shards) != 0:
raise ValueError(
f"Shape {shape.as_list()} cannot be sharded {self._number_of_shards} "
f"ways along dimension {self._shard_dimension}")
dims[self._shard_dimension] //= self._number_of_shards
return tensor_shape.TensorShape(dims)
def _unshard_shape(self, shape):
"""Return the unsharded shape that would generate a given sharded shape.
Args:
shape: the sharded shape to unshard
Returns:
The unsharded shape.
Raises:
ValueError: if shape is unknown or does not contain
self.shard_dimension
TypeError: if shape is not convertible to a TensorShape
"""
shape = tensor_shape.as_shape(shape)
if self._number_of_shards == 1:
# Don't do anything when there's only one shard.
return shape
ndims = shape.ndims
if ndims is None:
raise ValueError(f"Shape {shape} must be statically known.")
if ndims <= self._shard_dimension:
raise ValueError(f"Shape {shape.as_list()} does not contain "
f"shard_dimension {self._shard_dimension}. "
f"Rank is too small.")
dims = shape.as_list()
dims[self._shard_dimension] *= self._number_of_shards
return tensor_shape.TensorShape(dims)
def get_unsharded_shape(self, shapes):
"""Returns the shape of an unsharded Tensor given a list of shards.
When given a list of shapes of shards, returns the shape of the
unsharded Tensor that would generate the shards. Sets defaults for the
policy if number_of_shards or shard_dimension is None.
Args:
shapes: The shapes of the Tensor shards to be combined.
Returns:
The shape of the unsharded version of the Tensor.
Raises:
ValueError: if shapes is not a list of length
self.number_of_shards; or any element of shapes is not a valid
shape consistent with the sharding policy; or the list of
shapes is not a valid sharding of a full shape.
TypeError: if an element of shapes is not convertible to a
TensorShape
"""
self._fill_default_values()
if len(shapes) != self.number_of_shards:
raise ValueError(
f"Shapes {shapes} is length {len(shapes)} but must be a list of "
f"length number_of_shards={self.number_of_shards}")
unsharded_shapes = [self._unshard_shape(s) for s in shapes]
for i in range(self.number_of_shards - 1):
if not unsharded_shapes[i].is_compatible_with(
unsharded_shapes[self.number_of_shards - 1]):
raise ValueError(
f"Sharded shapes {shapes} are not consistent shards of a full shape "
f"sharded {self.number_of_shards} ways along "
f"dimension {self.shard_dimension}.")
return unsharded_shapes[0]
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@tpu@tpu_sharding.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "ebachelet/pyLIMA",
"repo_path": "pyLIMA_extracted/pyLIMA-master/pyLIMA/pyLIMASS/__init__.py",
"type": "Python"
}
|
from .pyLIMASS import SourceLensProbabilities
__all__ = ["SourceLensProbabilities"]
|
ebacheletREPO_NAMEpyLIMAPATH_START.@pyLIMA_extracted@pyLIMA-master@pyLIMA@pyLIMASS@__init__.py@.PATH_END.py
|
{
"filename": "test_like.ipynb",
"repo_name": "threeML/hawc_hal",
"repo_path": "hawc_hal_extracted/hawc_hal-master/notebooks/test_like.ipynb",
"type": "Jupyter Notebook"
}
|
```python
from threeML import *
from hawc_hal import HAL, HealpixConeROI
import os
os.environ['OMP_NUM_THREADS'] = "1"
os.environ['MKL_NUM_THREADS'] = "1"
%matplotlib notebook
```
Configuration read from /home/giacomov/.threeML/threeML_config.yml
```python
ra_crab, dec_crab = 83.633083, 22.014500
roi = HealpixConeROI(3.0, 12.0, ra=ra_crab, dec=dec_crab)
hawc = HAL("HAWC",
"/home/giacomov/science/hawc/data/maptree_1024.root",
"/home/giacomov/science/hawc/data/response.root",
roi)
hawc.set_active_measurements(1, 9)
hawc.display()
```
Creating singleton for /home/giacomov/science/hawc/data/response.root
Region of Interest:
--------------------
HealpixConeROI: Center (R.A., Dec) = (83.633, 22.015), data radius = 3.000 deg, model radius: 12.000 deg
Flat sky projection:
----------------------
Width x height: 142 x 142 px
Pixel sizes: 0.17 deg
Response:
----------
Response file: /home/giacomov/science/hawc/data/response.root
Number of dec bins: 106
Number of energy/nHit planes per dec bin_name: 10
Map Tree:
----------
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Bin</th>
<th>Nside</th>
<th>Scheme</th>
<th>Obs counts</th>
<th>Bkg counts</th>
<th>obs/bkg</th>
<th>Pixels in ROI</th>
<th>Area (deg^2)</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>0</td>
<td>1024</td>
<td>RING</td>
<td>964234222.0</td>
<td>9.640280e+08</td>
<td>1.000214</td>
<td>8627</td>
<td>28.28354</td>
</tr>
<tr>
<th>1</th>
<td>1</td>
<td>1024</td>
<td>RING</td>
<td>63464148.0</td>
<td>6.338158e+07</td>
<td>1.001303</td>
<td>8627</td>
<td>28.28354</td>
</tr>
<tr>
<th>2</th>
<td>2</td>
<td>1024</td>
<td>RING</td>
<td>20357059.0</td>
<td>2.029720e+07</td>
<td>1.002949</td>
<td>8627</td>
<td>28.28354</td>
</tr>
<tr>
<th>3</th>
<td>3</td>
<td>1024</td>
<td>RING</td>
<td>5589697.0</td>
<td>5.554641e+06</td>
<td>1.006311</td>
<td>8627</td>
<td>28.28354</td>
</tr>
<tr>
<th>4</th>
<td>4</td>
<td>1024</td>
<td>RING</td>
<td>719476.0</td>
<td>7.072707e+05</td>
<td>1.017257</td>
<td>8627</td>
<td>28.28354</td>
</tr>
<tr>
<th>5</th>
<td>5</td>
<td>1024</td>
<td>RING</td>
<td>162229.0</td>
<td>1.559630e+05</td>
<td>1.040176</td>
<td>8627</td>
<td>28.28354</td>
</tr>
<tr>
<th>6</th>
<td>6</td>
<td>1024</td>
<td>RING</td>
<td>30084.0</td>
<td>2.797803e+04</td>
<td>1.075272</td>
<td>8627</td>
<td>28.28354</td>
</tr>
<tr>
<th>7</th>
<td>7</td>
<td>1024</td>
<td>RING</td>
<td>17787.0</td>
<td>1.656240e+04</td>
<td>1.073938</td>
<td>8627</td>
<td>28.28354</td>
</tr>
<tr>
<th>8</th>
<td>8</td>
<td>1024</td>
<td>RING</td>
<td>4909.0</td>
<td>4.454568e+03</td>
<td>1.102015</td>
<td>8627</td>
<td>28.28354</td>
</tr>
<tr>
<th>9</th>
<td>9</td>
<td>1024</td>
<td>RING</td>
<td>6607.0</td>
<td>6.144495e+03</td>
<td>1.075271</td>
<td>8627</td>
<td>28.28354</td>
</tr>
</tbody>
</table>
</div>
This Map Tree contains 1017.288 transits in the first bin
Total data size: 1.38 Mb
Active energy/nHit planes:
---------------------------
[1, 2, 3, 4, 5, 6, 7, 8, 9]
```python
spectrum = Log_parabola()
source = PointSource("CrabNebula", ra=ra_crab, dec=dec_crab, spectral_shape=spectrum)
# NOTE: if you use units, you have to set up the values for the parameters
# AFTER you create the source, because during creation the function Log_parabola
# gets its units
source.position.ra.bounds = (ra_crab - 0.5, ra_crab + 0.5)
source.position.dec.bounds = (dec_crab - 0.5, dec_crab + 0.5)
spectrum.piv = 10 * u.TeV # Pivot energy
spectrum.piv.fix = True
spectrum.K = 1e-14 / (u.TeV * u.cm**2 * u.s) # norm (in 1/(keV cm2 s))
spectrum.K.bounds = (1e-25, 1e-19) # without units energies are in keV
spectrum.beta = 0 # log parabolic beta
spectrum.beta.bounds = (-4., 2.)
spectrum.alpha = -2.5 # log parabolic alpha (index)
spectrum.alpha.bounds = (-4., 2.)
model = Model(source)
data = DataList(hawc)
```
```python
jl = JointLikelihood(model, data, verbose=False)
```
```python
jl.set_minimizer("minuit")
#%lprun -f FlatSkyToHealpixTransform.__call__
#%lprun -f hawc.get_log_like _ = jl.fit(quiet=False)
%prun _ = jl.fit()
# 12 s
# CrabNebula.spectrum.main.Log_parabola.K (1.044 +/- 0.017) x 10^-22 1 / (cm2 keV s)
# CrabNebula.spectrum.main.Log_parabola.alpha -2.807 +/- 0.018
# CrabNebula.spectrum.main.Log_parabola.beta (1.750 +/- 0.11) x 10^-1
```
Best fit values:
WARNING FutureWarning: from_items is deprecated. Please use DataFrame.from_dict(dict(items), ...) instead. DataFrame.from_dict(OrderedDict(items)) may be used to preserve the key order.
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>result</th>
<th>unit</th>
</tr>
<tr>
<th>parameter</th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>CrabNebula.spectrum.main.Log_parabola.K</th>
<td>(1.044 -0.017 +0.018) x 10^-22</td>
<td>1 / (cm2 keV s)</td>
</tr>
<tr>
<th>CrabNebula.spectrum.main.Log_parabola.alpha</th>
<td>-2.791 +/- 0.020</td>
<td></td>
</tr>
<tr>
<th>CrabNebula.spectrum.main.Log_parabola.beta</th>
<td>(1.720 +/- 0.12) x 10^-1</td>
<td></td>
</tr>
</tbody>
</table>
</div>
Correlation matrix:
<table id="table140026022180048">
<tr><td>1.00</td><td>-0.17</td><td>0.67</td></tr>
<tr><td>-0.17</td><td>1.00</td><td>-0.77</td></tr>
<tr><td>0.67</td><td>-0.77</td><td>1.00</td></tr>
</table>
Values of -log(likelihood) at the minimum:
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>-log(likelihood)</th>
</tr>
</thead>
<tbody>
<tr>
<th>HAWC</th>
<td>20257.502536</td>
</tr>
<tr>
<th>total</th>
<td>20257.502536</td>
</tr>
</tbody>
</table>
</div>
Values of statistical measures:
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>statistical measures</th>
</tr>
</thead>
<tbody>
<tr>
<th>AIC</th>
<td>40521.005350</td>
</tr>
<tr>
<th>BIC</th>
<td>40549.100783</td>
</tr>
</tbody>
</table>
</div>
```python
gf = GoodnessOfFit(jl)
```
```python
%load_ext line_profiler
```
```python
from hawc_hal.convolved_source import ConvolvedPointSource
from hawc_hal.response.response import HAWCResponse, ResponseBin
from hawc_hal.psf_fast import PSFWrapper
```
```python
import cProfile
command = """gf.by_mc(100)"""
cProfile.runctx( command, globals(), locals(), filename="profiling.profile" )
#%lprun -f PSFWrapper.__init__ gof, param, likes = gf.by_mc(10)
```
VBox(children=(HTML(value=u'Goodness of fit computation : '), HTML(value=u''), FloatProgress(value=0.0)))
```python
#print(gof)
#likes.plot()
```
```python
#param.loc[(slice(None), ['CrabNebula.spectrum.main.Log_parabola.alpha']), 'value'].plot()
```
```python
%load_ext line_profiler
```
```python
source.position.ra.free = True
source.position.dec.free = True
jl.set_minimizer("minuit")
command = """jl.fit()"""
cProfile.runctx( command, globals(), locals(), filename="freepos.profile" )
#best_fit, like_frame = jl.fit()
# CrabNebula.position.ra (8.362 +/- 0.00026) x 10 deg
# CrabNebula.position.dec (2.202 +/- 0.00024) x 10 deg
# CrabNebula.spectrum.main.Log_parabola.K (9.970 +/- 0.17) x 10^-23 1 / (cm2 keV s)
# CrabNebula.spectrum.main.Log_parabola.alpha -2.798 +/- 0.021
# CrabNebula.spectrum.main.Log_parabola.beta (1.590 +/- 0.13) x 10^-1
```
Best fit values:
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>result</th>
<th>unit</th>
</tr>
<tr>
<th>parameter</th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>CrabNebula.position.ra</th>
<td>(8.371 +/- 0.00035) x 10</td>
<td>deg</td>
</tr>
<tr>
<th>CrabNebula.position.dec</th>
<td>(2.196 +/- 0.0004) x 10</td>
<td>deg</td>
</tr>
<tr>
<th>CrabNebula.spectrum.main.Log_parabola.K</th>
<td>(1.075 +/- 0.018) x 10^-22</td>
<td>1 / (cm2 keV s)</td>
</tr>
<tr>
<th>CrabNebula.spectrum.main.Log_parabola.alpha</th>
<td>-2.795 +/- 0.019</td>
<td></td>
</tr>
<tr>
<th>CrabNebula.spectrum.main.Log_parabola.beta</th>
<td>(1.780 +/- 0.12) x 10^-1</td>
<td></td>
</tr>
</tbody>
</table>
</div>
Correlation matrix:
<table id="table140218620644432">
<tr><td>1.00</td><td>0.60</td><td>-0.04</td><td>-0.03</td><td>0.01</td></tr>
<tr><td>0.60</td><td>1.00</td><td>-0.05</td><td>-0.03</td><td>0.01</td></tr>
<tr><td>-0.04</td><td>-0.05</td><td>1.00</td><td>-0.17</td><td>0.67</td></tr>
<tr><td>-0.03</td><td>-0.03</td><td>-0.17</td><td>1.00</td><td>-0.77</td></tr>
<tr><td>0.01</td><td>0.01</td><td>0.67</td><td>-0.77</td><td>1.00</td></tr>
</table>
Values of -log(likelihood) at the minimum:
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>-log(likelihood)</th>
</tr>
</thead>
<tbody>
<tr>
<th>HAWC</th>
<td>19568.932327</td>
</tr>
<tr>
<th>total</th>
<td>19568.932327</td>
</tr>
</tbody>
</table>
</div>
Values of statistical measures:
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>statistical measures</th>
</tr>
</thead>
<tbody>
<tr>
<th>AIC</th>
<td>39147.865349</td>
</tr>
<tr>
<th>BIC</th>
<td>39194.690839</td>
</tr>
</tbody>
</table>
</div>
```python
_ = jl.get_errors()
# CrabNebula.position.ra (8.36201 -0.00027 +0.00026) x 10 deg
# CrabNebula.position.dec (2.20206 -0.00025 +0.00023) x 10 deg
# CrabNebula.spectrum.main.Log_parabola.K (1.010 +/- 0.017) x 10^-22 1 / (cm2 keV s)
# CrabNebula.spectrum.main.Log_parabola.alpha -2.797 +/- 0.019
# CrabNebula.spectrum.main.Log_parabola.beta (1.630 +/- 0.11) x 10^-1
```
```python
%prun -D contour.profile jl.get_contours(model.CrabNebula.position.ra, 83.610, 83.630, 16, model.CrabNebula.position.dec, 22.010, 22.030, 16)
import matplotlib.pyplot as plt
plt.plot([dec_crab], [ra_crab], 'x')
# Parallel: 183.5
# 225 / 225 in 249.2 s (0:00:00 remaining)
```
VBox(children=(HTML(value=u'Profiling likelihood : '), HTML(value=u''), FloatProgress(value=0.0)))
*** Profile stats marshalled to file u'contour.profile'.
WARNING UserWarning: No contour levels were found within the data range.
[<matplotlib.lines.Line2D at 0x7f872fb2a110>]

```python
print spectrum(1.0 * u.TeV).to(1/(u.TeV * u.cm**2 * u.s))
```
```python
source.position.ra = ra_crab
source.position.ra.free = True
source.position.dec = dec_crab
source.position.dec.free = True
for parameter in model.parameters.values():
if parameter.fix:
continue
if parameter.is_normalization:
parameter.set_uninformative_prior(Log_uniform_prior)
else:
parameter.set_uninformative_prior(Uniform_prior)
bs = BayesianAnalysis(model, data)
%prun -D bayes.profile bs.sample(30, 10, 10)
# 71.3
# 38.3
```
WARNING RuntimeWarning: External parameter HAWC_bkg_renorm already exist in the model. Overwriting it...
VBox(children=(HTML(value=u'Burn-in : '), HTML(value=u''), FloatProgress(value=0.0)))
VBox(children=(HTML(value=u'Sampling : '), HTML(value=u''), FloatProgress(value=0.0)))
Mean acceptance fraction: 0.37333333333333335
Maximum a posteriori probability (MAP) point:
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>result</th>
<th>unit</th>
</tr>
<tr>
<th>parameter</th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>CrabNebula.position.ra</th>
<td>(8.363 +/- 0.009) x 10</td>
<td>deg</td>
</tr>
<tr>
<th>CrabNebula.position.dec</th>
<td>(2.205 -0.009 +0.008) x 10</td>
<td>deg</td>
</tr>
<tr>
<th>CrabNebula.spectrum.main.Log_parabola.K</th>
<td>(1.00 -0.05 +0.04) x 10^-22</td>
<td>1 / (cm2 keV s)</td>
</tr>
<tr>
<th>CrabNebula.spectrum.main.Log_parabola.alpha</th>
<td>-2.85 -0.13 +0.12</td>
<td></td>
</tr>
<tr>
<th>CrabNebula.spectrum.main.Log_parabola.beta</th>
<td>(1.76 -0.14 +0.09) x 10^-1</td>
<td></td>
</tr>
</tbody>
</table>
</div>
Values of -log(posterior) at the minimum:
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>-log(posterior)</th>
</tr>
</thead>
<tbody>
<tr>
<th>HAWC</th>
<td>-19579.855146</td>
</tr>
<tr>
<th>total</th>
<td>-19579.855146</td>
</tr>
</tbody>
</table>
</div>
Values of statistical measures:
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>statistical measures</th>
</tr>
</thead>
<tbody>
<tr>
<th>AIC</th>
<td>39169.710987</td>
</tr>
<tr>
<th>BIC</th>
<td>39216.536478</td>
</tr>
<tr>
<th>DIC</th>
<td>46104.965950</td>
</tr>
<tr>
<th>PDIC</th>
<td>2373.982072</td>
</tr>
</tbody>
</table>
</div>
*** Profile stats marshalled to file u'bayes.profile'.
```python
_ = bs.corner_plot()
```
```python
```
|
threeMLREPO_NAMEhawc_halPATH_START.@hawc_hal_extracted@hawc_hal-master@notebooks@test_like.ipynb@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "dask/dask-image",
"repo_path": "dask-image_extracted/dask-image-main/dask_image/ndfourier/__init__.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
import numbers
import dask.array as da
from . import _utils
__all__ = [
"fourier_gaussian",
"fourier_shift",
"fourier_uniform",
]
def fourier_gaussian(image, sigma, n=-1, axis=-1):
"""
Multi-dimensional Gaussian fourier filter.
The array is multiplied with the fourier transform of a Gaussian
kernel.
Parameters
----------
image : array_like
The input image.
sigma : float or sequence
The sigma of the Gaussian kernel. If a float, `sigma` is the same for
all axes. If a sequence, `sigma` has to contain one value for each
axis.
n : int, optional
If `n` is negative (default), then the image is assumed to be the
result of a complex fft.
If `n` is larger than or equal to zero, the image is assumed to be the
result of a real fft, and `n` gives the length of the array before
transformation along the real transform direction.
axis : int, optional
The axis of the real transform.
Returns
-------
fourier_gaussian : Dask Array
Examples
--------
>>> from scipy import ndimage, misc
>>> import numpy.fft
>>> import matplotlib.pyplot as plt
>>> fig, (ax1, ax2) = plt.subplots(1, 2)
>>> plt.gray() # show the filtered result in grayscale
>>> ascent = misc.ascent()
>>> image = numpy.fft.fft2(ascent)
>>> result = ndimage.fourier_gaussian(image, sigma=4)
>>> result = numpy.fft.ifft2(result)
>>> ax1.imshow(ascent)
"""
# Validate and normalize arguments
image, sigma, n, axis = _utils._norm_args(image, sigma, n=n, axis=axis)
# Compute frequencies
ang_freq_grid = _utils._get_ang_freq_grid(
image.shape,
chunks=image.chunks,
n=n,
axis=axis,
dtype=sigma.dtype
)
# Compute Fourier transformed Gaussian
result = image.copy()
scale = (sigma ** 2) / -2
for ax, f in enumerate(ang_freq_grid):
f *= f
gaussian = da.exp(scale[ax] * f)
gaussian = _utils._reshape_nd(gaussian, ndim=image.ndim, axis=ax)
result *= gaussian
return result
def fourier_shift(image, shift, n=-1, axis=-1):
"""
Multi-dimensional fourier shift filter.
The array is multiplied with the fourier transform of a shift operation.
Parameters
----------
image : array_like
The input image.
shift : float or sequence
The size of the box used for filtering.
If a float, `shift` is the same for all axes. If a sequence, `shift`
has to contain one value for each axis.
n : int, optional
If `n` is negative (default), then the image is assumed to be the
result of a complex fft.
If `n` is larger than or equal to zero, the image is assumed to be the
result of a real fft, and `n` gives the length of the array before
transformation along the real transform direction.
axis : int, optional
The axis of the real transform.
Returns
-------
fourier_shift : Dask Array
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> import numpy.fft
>>> fig, (ax1, ax2) = plt.subplots(1, 2)
>>> plt.gray() # show the filtered result in grayscale
>>> ascent = misc.ascent()
>>> image = numpy.fft.fft2(ascent)
>>> result = ndimage.fourier_shift(image, shift=200)
>>> result = numpy.fft.ifft2(result)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result.real) # the imaginary part is an artifact
>>> plt.show()
"""
if issubclass(image.dtype.type, numbers.Real):
image = image.astype(complex)
# Validate and normalize arguments
image, shift, n, axis = _utils._norm_args(image, shift, n=n, axis=axis)
# Constants with type converted
J = image.dtype.type(1j)
# Get the grid of frequencies
ang_freq_grid = _utils._get_ang_freq_grid(
image.shape,
chunks=image.chunks,
n=n,
axis=axis,
dtype=shift.dtype
)
# Apply shift
result = image.copy()
for ax, f in enumerate(ang_freq_grid):
phase_shift = da.exp((-J) * shift[ax] * f)
phase_shift = _utils._reshape_nd(phase_shift, ndim=image.ndim, axis=ax)
result *= phase_shift
return result
def fourier_uniform(image, size, n=-1, axis=-1):
"""
Multi-dimensional uniform fourier filter.
The array is multiplied with the fourier transform of a box of given
size.
Parameters
----------
image : array_like
The input image.
size : float or sequence
The size of the box used for filtering.
If a float, `size` is the same for all axes. If a sequence, `size` has
to contain one value for each axis.
n : int, optional
If `n` is negative (default), then the image is assumed to be the
result of a complex fft.
If `n` is larger than or equal to zero, the image is assumed to be the
result of a real fft, and `n` gives the length of the array before
transformation along the real transform direction.
axis : int, optional
The axis of the real transform.
Returns
-------
fourier_uniform : Dask Array
The filtered image. If `output` is given as a parameter, None is
returned.
Examples
--------
>>> from scipy import ndimage, misc
>>> import numpy.fft
>>> import matplotlib.pyplot as plt
>>> fig, (ax1, ax2) = plt.subplots(1, 2)
>>> plt.gray() # show the filtered result in grayscale
>>> ascent = misc.ascent()
>>> image = numpy.fft.fft2(ascent)
>>> result = ndimage.fourier_uniform(image, size=20)
>>> result = numpy.fft.ifft2(result)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result.real) # the imaginary part is an artifact
>>> plt.show()
"""
# Validate and normalize arguments
image, size, n, axis = _utils._norm_args(image, size, n=n, axis=axis)
# Get the grid of frequencies
freq_grid = _utils._get_freq_grid(
image.shape,
chunks=image.chunks,
n=n,
axis=axis,
dtype=size.dtype
)
# Compute uniform filter
result = image.copy()
for ax, f in enumerate(freq_grid):
uniform = da.sinc(size[ax] * f)
uniform = _utils._reshape_nd(uniform, ndim=image.ndim, axis=ax)
result *= uniform
return result
|
daskREPO_NAMEdask-imagePATH_START.@dask-image_extracted@dask-image-main@dask_image@ndfourier@__init__.py@.PATH_END.py
|
{
"filename": "test_formats.py",
"repo_name": "pandas-dev/pandas",
"repo_path": "pandas_extracted/pandas-main/pandas/tests/indexes/base_class/test_formats.py",
"type": "Python"
}
|
import numpy as np
import pytest
from pandas._config import using_string_dtype
import pandas._config.config as cf
from pandas import Index
import pandas._testing as tm
class TestIndexRendering:
def test_repr_is_valid_construction_code(self):
# for the case of Index, where the repr is traditional rather than
# stylized
idx = Index(["a", "b"])
res = eval(repr(idx))
tm.assert_index_equal(res, idx)
@pytest.mark.xfail(using_string_dtype(), reason="repr different")
@pytest.mark.parametrize(
"index,expected",
[
# ASCII
# short
(
Index(["a", "bb", "ccc"]),
"""Index(['a', 'bb', 'ccc'], dtype='object')""",
),
# multiple lines
(
Index(["a", "bb", "ccc"] * 10),
"Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', "
"'bb', 'ccc', 'a', 'bb', 'ccc',\n"
" 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', "
"'bb', 'ccc', 'a', 'bb', 'ccc',\n"
" 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],\n"
" dtype='object')",
),
# truncated
(
Index(["a", "bb", "ccc"] * 100),
"Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',\n"
" ...\n"
" 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],\n"
" dtype='object', length=300)",
),
# Non-ASCII
# short
(
Index(["あ", "いい", "ううう"]),
"""Index(['あ', 'いい', 'ううう'], dtype='object')""",
),
# multiple lines
(
Index(["あ", "いい", "ううう"] * 10),
(
"Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
"'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n"
" 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
"'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n"
" 'あ', 'いい', 'ううう', 'あ', 'いい', "
"'ううう'],\n"
" dtype='object')"
),
),
# truncated
(
Index(["あ", "いい", "ううう"] * 100),
(
"Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
"'あ', 'いい', 'ううう', 'あ',\n"
" ...\n"
" 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', "
"'ううう', 'あ', 'いい', 'ううう'],\n"
" dtype='object', length=300)"
),
),
],
)
def test_string_index_repr(self, index, expected):
result = repr(index)
assert result == expected
@pytest.mark.xfail(using_string_dtype(), reason="repr different")
@pytest.mark.parametrize(
"index,expected",
[
# short
(
Index(["あ", "いい", "ううう"]),
("Index(['あ', 'いい', 'ううう'], dtype='object')"),
),
# multiple lines
(
Index(["あ", "いい", "ううう"] * 10),
(
"Index(['あ', 'いい', 'ううう', 'あ', 'いい', "
"'ううう', 'あ', 'いい', 'ううう',\n"
" 'あ', 'いい', 'ううう', 'あ', 'いい', "
"'ううう', 'あ', 'いい', 'ううう',\n"
" 'あ', 'いい', 'ううう', 'あ', 'いい', "
"'ううう', 'あ', 'いい', 'ううう',\n"
" 'あ', 'いい', 'ううう'],\n"
" dtype='object')"
""
),
),
# truncated
(
Index(["あ", "いい", "ううう"] * 100),
(
"Index(['あ', 'いい', 'ううう', 'あ', 'いい', "
"'ううう', 'あ', 'いい', 'ううう',\n"
" 'あ',\n"
" ...\n"
" 'ううう', 'あ', 'いい', 'ううう', 'あ', "
"'いい', 'ううう', 'あ', 'いい',\n"
" 'ううう'],\n"
" dtype='object', length=300)"
),
),
],
)
def test_string_index_repr_with_unicode_option(self, index, expected):
# Enable Unicode option -----------------------------------------
with cf.option_context("display.unicode.east_asian_width", True):
result = repr(index)
assert result == expected
def test_repr_summary(self):
with cf.option_context("display.max_seq_items", 10):
result = repr(Index(np.arange(1000)))
assert len(result) < 200
assert "..." in result
def test_summary_bug(self):
# GH#3869
ind = Index(["{other}%s", "~:{range}:0"], name="A")
result = ind._summary()
# shouldn't be formatted accidentally.
assert "~:{range}:0" in result
assert "{other}%s" in result
def test_index_repr_bool_nan(self):
# GH32146
arr = Index([True, False, np.nan], dtype=object)
exp2 = repr(arr)
out2 = "Index([True, False, nan], dtype='object')"
assert out2 == exp2
|
pandas-devREPO_NAMEpandasPATH_START.@pandas_extracted@pandas-main@pandas@tests@indexes@base_class@test_formats.py@.PATH_END.py
|
{
"filename": "7-IndividualLine_Tutorial.ipynb",
"repo_name": "folsomcp/specpolFlow",
"repo_path": "specpolFlow_extracted/specpolFlow-main/docs-jb/Tutorials/7-IndividualLine_Tutorial.ipynb",
"type": "Jupyter Notebook"
}
|
# How analyze individual spectral lines
Many of the tools in SpecpolFlow can be applied to a single line in an observed spectrum. In this example we analyze a few emission lines in the spectrum of a T Tauri star.
First, import SpecpolFlow and some packages that will help with data visualization.
```python
import specpolFlow as pol
import pandas as pd
import matplotlib.pyplot as plt
```
## 1. Create individual line file
In this tutorial, we use the UPENA normalized spectrum ([IndividualLine_tutorialfiles/1423137pn.s](https://github.com/folsomcp/specpolFlow/blob/main/docs-jb/Tutorials/IndividualLine_tutorialfiles/1423137pn.s)) of the active cool star BP Tau.
We first load in the spectrum.
```python
spec = pol.read_spectrum("IndividualLine_tutorialfiles/1423137pn.s")
```
To create extract an individual line from the spectrum, we have to define the transition wavelength `lambda0` and a range with respect to the line center. In the example below, we select the HeI line at 667.815 nm in a 0.6 nm window.
```python
# Create individual line profile
lambda0 = 667.815 # He i 667.815 nm
geff = 1.0 # He i 667.815 nm, used later for calculating Bz
lwidth = 0.3
prof = spec.individual_line(lambda0 = lambda0, lwidth = lwidth)
```
The `individual_line` function returns `prof`, which is really an LSD profile object, and inherits all the attributes from the LSD class. Therefore, you can use all the functionality discussed in the [LSD Class Tutorial](5-LSDClass_Tutorial.ipynb) (i.e., one can normalize, slice, or shift the line profile).
Below, we visualize the individual line using the `plot` class function.
```python
fig, axes = prof.plot()
axes[-1].axhline(1, color ='k', ls = '--')
```
<matplotlib.lines.Line2D at 0x7f040b391a50>

## 2. Calculate Bz
To calculate the longitudinal field (Bz) of the individual line, we can use the <a href='../API/LSD_API.html#specpolFlow.LSD.calc_bz'>`calc_bz`</a> function of the LSD class.
```python
# Define the limits for the continuum normalization
cmin = -50.0; cmax = 80.0
# Compute Bz in a 45 km/s window
Bz, fig = prof.calc_bz(norm = 'auto', cog = 'I',
velrange = [cmin, cmax], bzwidth = 45.0,
geff = geff, lambda0 = lambda0,
plot = True)
```
using AUTO method for the normalization
using the median of the continuum outside of the line

We can then display the results using the pandas dataframe. For this particular line, we find Bz $= 1.2 \pm 0.1$ kG.
```python
Bz_df = pd.DataFrame(data = [Bz])
# Simple display of the pandas dataframe
Bz_df.style
```
<style type="text/css">
</style>
<table id="T_76101_">
<thead>
<tr>
<th class="blank level0" > </th>
<th class="col_heading level0 col0" >V bz (G)</th>
<th class="col_heading level0 col1" >V bz sig (G)</th>
<th class="col_heading level0 col2" >V FAP</th>
<th class="col_heading level0 col3" >N1 bz (G)</th>
<th class="col_heading level0 col4" >N1 bz sig (G)</th>
<th class="col_heading level0 col5" >N1 FAP</th>
<th class="col_heading level0 col6" >N2 bz (G)</th>
<th class="col_heading level0 col7" >N2 bz sig (G)</th>
<th class="col_heading level0 col8" >N2 FAP</th>
<th class="col_heading level0 col9" >norm_method</th>
<th class="col_heading level0 col10" >norm</th>
<th class="col_heading level0 col11" >cog_method</th>
<th class="col_heading level0 col12" >cog</th>
<th class="col_heading level0 col13" >int. range start</th>
<th class="col_heading level0 col14" >int. range end</th>
</tr>
</thead>
<tbody>
<tr>
<th id="T_76101_level0_row0" class="row_heading level0 row0" >0</th>
<td id="T_76101_row0_col0" class="data row0 col0" >1191.652271</td>
<td id="T_76101_row0_col1" class="data row0 col1" >130.184898</td>
<td id="T_76101_row0_col2" class="data row0 col2" >0.000000</td>
<td id="T_76101_row0_col3" class="data row0 col3" >-34.686996</td>
<td id="T_76101_row0_col4" class="data row0 col4" >130.041356</td>
<td id="T_76101_row0_col5" class="data row0 col5" >0.564259</td>
<td id="T_76101_row0_col6" class="data row0 col6" >0.000000</td>
<td id="T_76101_row0_col7" class="data row0 col7" >0.000000</td>
<td id="T_76101_row0_col8" class="data row0 col8" >0.000000</td>
<td id="T_76101_row0_col9" class="data row0 col9" >auto</td>
<td id="T_76101_row0_col10" class="data row0 col10" >0.931285</td>
<td id="T_76101_row0_col11" class="data row0 col11" >I</td>
<td id="T_76101_row0_col12" class="data row0 col12" >21.173209</td>
<td id="T_76101_row0_col13" class="data row0 col13" >-23.826791</td>
<td id="T_76101_row0_col14" class="data row0 col14" >66.173209</td>
</tr>
</tbody>
</table>
## 3. Normalize the individual line
In the figure above, we see that `calc_bz` improved the continuum normalization, taking the median of the continuum outside of the line.
Therefore, we propagate this normalization in the `prof` object before saving the individual line profile into a file.
```python
# Renormalize prof using the continuum value obtained by calc_bz
prof_norm = prof.norm(Bz['norm'])
# Display the profile
fig, axes = prof_norm.plot()
axes[-1].axhline(1, color = 'k', ls = '--')
```
<matplotlib.lines.Line2D at 0x7f0401fed2a0>

## 4. Saving individual lines
Finally, you can save the individual line into a file using the `save` function, shown below. This file will have a text format like an LSD profile.
```python
prof_norm.save("Output/1423137.Helium")
```
## 5. Dealing with order overlaps
Now, let's do the same for the Halpha line. However, there is a complication, the Halpha line is split across two orders in this echelle spectrum.
```python
# reading the spectrum
spec = pol.read_spectrum("IndividualLine_tutorialfiles/1423137pn.s")
# defining the line parameters
lambda0 = 656.281 ; geff = 1.0 # Halpha
lwidth = 700. * lambda0 / 2.99e5 #convert a 700 km/s width into a width in nm
# getting the profile for that line
prof = spec.individual_line(lambda0 = lambda0, lwidth = lwidth)
# Define the limits for the continuum normalization
# Compute Bz in a 200 km/s window around the cog
cogRange = [-400., 400.]
intRange = 200.
Bz, fig = prof.calc_bz(norm = 'auto', cog = 'I',
velrange = cogRange, bzwidth = intRange,
geff = geff, lambda0 = lambda0,
plot = True)
# Create a dataframe to record the result of the different methods in this section
d = {'Merge type':'None'}
d.update(Bz)
df_results = pd.DataFrame(data = [d])
df_results.style
```
using AUTO method for the normalization
using the median of the continuum outside of the line
/tmp/ipykernel_980312/871814974.py:13: UserWarning:
The velocity array is not monotonically increasing.
There might be an order overlap in the region of the observation used.
calc_bz will sort the LSD profile in velocity order.
Make sure this is what you want -- merge orders before running calc_bz()!
Bz, fig = prof.calc_bz(norm = 'auto', cog = 'I',
<style type="text/css">
</style>
<table id="T_db87a_">
<thead>
<tr>
<th class="blank level0" > </th>
<th class="col_heading level0 col0" >Merge type</th>
<th class="col_heading level0 col1" >V bz (G)</th>
<th class="col_heading level0 col2" >V bz sig (G)</th>
<th class="col_heading level0 col3" >V FAP</th>
<th class="col_heading level0 col4" >N1 bz (G)</th>
<th class="col_heading level0 col5" >N1 bz sig (G)</th>
<th class="col_heading level0 col6" >N1 FAP</th>
<th class="col_heading level0 col7" >N2 bz (G)</th>
<th class="col_heading level0 col8" >N2 bz sig (G)</th>
<th class="col_heading level0 col9" >N2 FAP</th>
<th class="col_heading level0 col10" >norm_method</th>
<th class="col_heading level0 col11" >norm</th>
<th class="col_heading level0 col12" >cog_method</th>
<th class="col_heading level0 col13" >cog</th>
<th class="col_heading level0 col14" >int. range start</th>
<th class="col_heading level0 col15" >int. range end</th>
</tr>
</thead>
<tbody>
<tr>
<th id="T_db87a_level0_row0" class="row_heading level0 row0" >0</th>
<td id="T_db87a_row0_col0" class="data row0 col0" >None</td>
<td id="T_db87a_row0_col1" class="data row0 col1" >72.964936</td>
<td id="T_db87a_row0_col2" class="data row0 col2" >17.843167</td>
<td id="T_db87a_row0_col3" class="data row0 col3" >0.000000</td>
<td id="T_db87a_row0_col4" class="data row0 col4" >7.405907</td>
<td id="T_db87a_row0_col5" class="data row0 col5" >17.843162</td>
<td id="T_db87a_row0_col6" class="data row0 col6" >0.435065</td>
<td id="T_db87a_row0_col7" class="data row0 col7" >0.000000</td>
<td id="T_db87a_row0_col8" class="data row0 col8" >0.000000</td>
<td id="T_db87a_row0_col9" class="data row0 col9" >0.000000</td>
<td id="T_db87a_row0_col10" class="data row0 col10" >auto</td>
<td id="T_db87a_row0_col11" class="data row0 col11" >1.101100</td>
<td id="T_db87a_row0_col12" class="data row0 col12" >I</td>
<td id="T_db87a_row0_col13" class="data row0 col13" >9.725353</td>
<td id="T_db87a_row0_col14" class="data row0 col14" >-190.274647</td>
<td id="T_db87a_row0_col15" class="data row0 col15" >209.725353</td>
</tr>
</tbody>
</table>

The calc_bz function gave us a warning about there probably being different spectral orders here. And the plot of results is relatively messy.
### 5.1 Option 1: Selecting only one order
The simplest solution to this problem is to only use one spectral order for the Bz calculation. In the case of this Halpha line, both orders include the center of the line that we are interested in. So we can calculate Bz from the two orders separately.
To get the spectral orders that include a wavelength, or range of wavelengths, we can use the <a href='../API/Spectrum_API.html#specpolFlow.Spectrum.get_orders_in_range'>`get_orders_in_range`</a> function of the `Spectrum` class. This returns a list containing `Spectrum` objects for each order.
```python
## Selecting only one order
orders = spec.get_orders_in_range(lambda0)
print('There are {} orders with Halpha in them'.format(len(orders)))
# Illustration of the order overlap
fig, ax = plt.subplots(1,1)
ax.plot(orders[0].wl, orders[0].specI, label = 'Lower order')
ax.plot(orders[1].wl, orders[1].specI, label = 'Higher order')
ax.set_xlabel('Wavelength (nm)')
ax.set_ylabel('Stokes I / Ic')
ax.legend(loc = 0)
# loop over the orders
type = ['lower only', 'higher only']
i = 0
for ord in orders:
# get the profile for the line in this order
prof = ord.individual_line(lambda0 = lambda0, lwidth = lwidth)
# compute Bz, in the defined velocity window around the cog
Bz, fig = prof.calc_bz(norm = 'auto', cog = 'I',
velrange = cogRange, bzwidth = intRange,
geff = geff, lambda0 = lambda0,
plot = True)
d = {'Merge type':type[i]}
d.update(Bz)
df = pd.DataFrame(data=[d])
df_results = pd.concat([df_results,df], ignore_index=True)
i += 1
# display the Bz result as a dataframe
df_results.style
```
There are 2 orders with Halpha in them
using AUTO method for the normalization
using the median of the continuum outside of the line
using AUTO method for the normalization
using the median of the continuum outside of the line
<style type="text/css">
</style>
<table id="T_d9778_">
<thead>
<tr>
<th class="blank level0" > </th>
<th class="col_heading level0 col0" >Merge type</th>
<th class="col_heading level0 col1" >V bz (G)</th>
<th class="col_heading level0 col2" >V bz sig (G)</th>
<th class="col_heading level0 col3" >V FAP</th>
<th class="col_heading level0 col4" >N1 bz (G)</th>
<th class="col_heading level0 col5" >N1 bz sig (G)</th>
<th class="col_heading level0 col6" >N1 FAP</th>
<th class="col_heading level0 col7" >N2 bz (G)</th>
<th class="col_heading level0 col8" >N2 bz sig (G)</th>
<th class="col_heading level0 col9" >N2 FAP</th>
<th class="col_heading level0 col10" >norm_method</th>
<th class="col_heading level0 col11" >norm</th>
<th class="col_heading level0 col12" >cog_method</th>
<th class="col_heading level0 col13" >cog</th>
<th class="col_heading level0 col14" >int. range start</th>
<th class="col_heading level0 col15" >int. range end</th>
</tr>
</thead>
<tbody>
<tr>
<th id="T_d9778_level0_row0" class="row_heading level0 row0" >0</th>
<td id="T_d9778_row0_col0" class="data row0 col0" >None</td>
<td id="T_d9778_row0_col1" class="data row0 col1" >72.964936</td>
<td id="T_d9778_row0_col2" class="data row0 col2" >17.843167</td>
<td id="T_d9778_row0_col3" class="data row0 col3" >0.000000</td>
<td id="T_d9778_row0_col4" class="data row0 col4" >7.405907</td>
<td id="T_d9778_row0_col5" class="data row0 col5" >17.843162</td>
<td id="T_d9778_row0_col6" class="data row0 col6" >0.435065</td>
<td id="T_d9778_row0_col7" class="data row0 col7" >0.000000</td>
<td id="T_d9778_row0_col8" class="data row0 col8" >0.000000</td>
<td id="T_d9778_row0_col9" class="data row0 col9" >0.000000</td>
<td id="T_d9778_row0_col10" class="data row0 col10" >auto</td>
<td id="T_d9778_row0_col11" class="data row0 col11" >1.101100</td>
<td id="T_d9778_row0_col12" class="data row0 col12" >I</td>
<td id="T_d9778_row0_col13" class="data row0 col13" >9.725353</td>
<td id="T_d9778_row0_col14" class="data row0 col14" >-190.274647</td>
<td id="T_d9778_row0_col15" class="data row0 col15" >209.725353</td>
</tr>
<tr>
<th id="T_d9778_level0_row1" class="row_heading level0 row1" >1</th>
<td id="T_d9778_row1_col0" class="data row1 col0" >lower only</td>
<td id="T_d9778_row1_col1" class="data row1 col1" >94.012588</td>
<td id="T_d9778_row1_col2" class="data row1 col2" >25.428151</td>
<td id="T_d9778_row1_col3" class="data row1 col3" >0.000003</td>
<td id="T_d9778_row1_col4" class="data row1 col4" >5.209152</td>
<td id="T_d9778_row1_col5" class="data row1 col5" >25.428140</td>
<td id="T_d9778_row1_col6" class="data row1 col6" >0.221315</td>
<td id="T_d9778_row1_col7" class="data row1 col7" >0.000000</td>
<td id="T_d9778_row1_col8" class="data row1 col8" >0.000000</td>
<td id="T_d9778_row1_col9" class="data row1 col9" >0.000000</td>
<td id="T_d9778_row1_col10" class="data row1 col10" >auto</td>
<td id="T_d9778_row1_col11" class="data row1 col11" >1.104800</td>
<td id="T_d9778_row1_col12" class="data row1 col12" >I</td>
<td id="T_d9778_row1_col13" class="data row1 col13" >14.197130</td>
<td id="T_d9778_row1_col14" class="data row1 col14" >-185.802870</td>
<td id="T_d9778_row1_col15" class="data row1 col15" >214.197130</td>
</tr>
<tr>
<th id="T_d9778_level0_row2" class="row_heading level0 row2" >2</th>
<td id="T_d9778_row2_col0" class="data row2 col0" >higher only</td>
<td id="T_d9778_row2_col1" class="data row2 col1" >54.213318</td>
<td id="T_d9778_row2_col2" class="data row2 col2" >21.662775</td>
<td id="T_d9778_row2_col3" class="data row2 col3" >0.000007</td>
<td id="T_d9778_row2_col4" class="data row2 col4" >3.435552</td>
<td id="T_d9778_row2_col5" class="data row2 col5" >21.662771</td>
<td id="T_d9778_row2_col6" class="data row2 col6" >0.803308</td>
<td id="T_d9778_row2_col7" class="data row2 col7" >0.000000</td>
<td id="T_d9778_row2_col8" class="data row2 col8" >0.000000</td>
<td id="T_d9778_row2_col9" class="data row2 col9" >0.000000</td>
<td id="T_d9778_row2_col10" class="data row2 col10" >auto</td>
<td id="T_d9778_row2_col11" class="data row2 col11" >1.096300</td>
<td id="T_d9778_row2_col12" class="data row2 col12" >I</td>
<td id="T_d9778_row2_col13" class="data row2 col13" >13.943177</td>
<td id="T_d9778_row2_col14" class="data row2 col14" >-186.056823</td>
<td id="T_d9778_row2_col15" class="data row2 col15" >213.943177</td>
</tr>
</tbody>
</table>



### 5.2 Option 2: merging the orders
An alternative solution is to merge the spectral orders before calculating Bz. Combining orders is more complex, but it can optimize S/N, and it can be useful when the wavelength range of interest extends beyond the edge of an order.
We can combine spectral orders with the <a href='../API/Spectrum_API.html#specpolFlow.Spectrum.merge_orders'>`merge_orders`</a> function of the `Spectrum` class. This function provides two methods of combining orders `'trim'` and `'coadd'`. This function only really works properly if the continuum levels are consistent between orders.
In `merge_orders` with `mode = 'trim'`, the function just uses the first order up to the midpoint of the region where the orders overlap, then it uses the second order beyond that midpoint. This approach is relatively simple and essentially throws away a bit of signal. But it is more robust against errors in continuum normalization (or spectrum extraction) at the edges of spectral orders, and these errors are relatively common.
In `merge_orders` with `mode = 'coadd'`, the function essentially averages the orders, weighted by the uncertainties on individual pixels (specifically by $1/\sigma^2$). To do this it needs to interpolate in wavelength, and it uses the wavelengths from the first order, interpolating the second order onto that grid. This mode requires reliable uncertainties for the Stokes I spectrum. This approach has the advantage that it optimizes the S/N in the merged spectrum. But it is more vulnerable to continuum normalization errors at the edges of orders, since an error in either order will cause problems for the merged result.
Here we try out both methods to compare the results. In this case the normalization is pretty good in the region we care about, so the `'coadd'` mode gives a more precise result.
```python
## Merging using the trim method
merge = spec.merge_orders(mode = 'trim', midpoint = 0.5)
prof = merge.individual_line(lambda0 = lambda0, lwidth = lwidth)
Bz, fig = prof.calc_bz(norm = 'auto', cog = 'I',
velrange = cogRange, bzwidth = intRange,
geff = geff, lambda0 = lambda0,
plot = True)
d = {'Merge type':'trim at mid-point'}
d.update(Bz)
df = pd.DataFrame(data = [d])
df_results = pd.concat([df_results,df], ignore_index=True)
## Merging using the coadd method
merge = spec.merge_orders(mode='coadd')
prof = merge.individual_line(lambda0 = lambda0, lwidth = lwidth)
Bz, fig = prof.calc_bz(norm = 'auto', cog = 'I',
velrange = cogRange, bzwidth = intRange,
geff = geff, lambda0 = lambda0,
plot = True)
d = {'Merge type':'coadd'}
d.update(Bz)
df = pd.DataFrame(data = [d])
df_results = pd.concat([df_results,df], ignore_index = True)
# Display the Bz result as a dataframe
df_results.style
```
using AUTO method for the normalization
using the median of the continuum outside of the line
using AUTO method for the normalization
using the median of the continuum outside of the line
<style type="text/css">
</style>
<table id="T_2b1bb_">
<thead>
<tr>
<th class="blank level0" > </th>
<th class="col_heading level0 col0" >Merge type</th>
<th class="col_heading level0 col1" >V bz (G)</th>
<th class="col_heading level0 col2" >V bz sig (G)</th>
<th class="col_heading level0 col3" >V FAP</th>
<th class="col_heading level0 col4" >N1 bz (G)</th>
<th class="col_heading level0 col5" >N1 bz sig (G)</th>
<th class="col_heading level0 col6" >N1 FAP</th>
<th class="col_heading level0 col7" >N2 bz (G)</th>
<th class="col_heading level0 col8" >N2 bz sig (G)</th>
<th class="col_heading level0 col9" >N2 FAP</th>
<th class="col_heading level0 col10" >norm_method</th>
<th class="col_heading level0 col11" >norm</th>
<th class="col_heading level0 col12" >cog_method</th>
<th class="col_heading level0 col13" >cog</th>
<th class="col_heading level0 col14" >int. range start</th>
<th class="col_heading level0 col15" >int. range end</th>
</tr>
</thead>
<tbody>
<tr>
<th id="T_2b1bb_level0_row0" class="row_heading level0 row0" >0</th>
<td id="T_2b1bb_row0_col0" class="data row0 col0" >None</td>
<td id="T_2b1bb_row0_col1" class="data row0 col1" >72.964936</td>
<td id="T_2b1bb_row0_col2" class="data row0 col2" >17.843167</td>
<td id="T_2b1bb_row0_col3" class="data row0 col3" >0.000000</td>
<td id="T_2b1bb_row0_col4" class="data row0 col4" >7.405907</td>
<td id="T_2b1bb_row0_col5" class="data row0 col5" >17.843162</td>
<td id="T_2b1bb_row0_col6" class="data row0 col6" >0.435065</td>
<td id="T_2b1bb_row0_col7" class="data row0 col7" >0.000000</td>
<td id="T_2b1bb_row0_col8" class="data row0 col8" >0.000000</td>
<td id="T_2b1bb_row0_col9" class="data row0 col9" >0.000000</td>
<td id="T_2b1bb_row0_col10" class="data row0 col10" >auto</td>
<td id="T_2b1bb_row0_col11" class="data row0 col11" >1.101100</td>
<td id="T_2b1bb_row0_col12" class="data row0 col12" >I</td>
<td id="T_2b1bb_row0_col13" class="data row0 col13" >9.725353</td>
<td id="T_2b1bb_row0_col14" class="data row0 col14" >-190.274647</td>
<td id="T_2b1bb_row0_col15" class="data row0 col15" >209.725353</td>
</tr>
<tr>
<th id="T_2b1bb_level0_row1" class="row_heading level0 row1" >1</th>
<td id="T_2b1bb_row1_col0" class="data row1 col0" >lower only</td>
<td id="T_2b1bb_row1_col1" class="data row1 col1" >94.012588</td>
<td id="T_2b1bb_row1_col2" class="data row1 col2" >25.428151</td>
<td id="T_2b1bb_row1_col3" class="data row1 col3" >0.000003</td>
<td id="T_2b1bb_row1_col4" class="data row1 col4" >5.209152</td>
<td id="T_2b1bb_row1_col5" class="data row1 col5" >25.428140</td>
<td id="T_2b1bb_row1_col6" class="data row1 col6" >0.221315</td>
<td id="T_2b1bb_row1_col7" class="data row1 col7" >0.000000</td>
<td id="T_2b1bb_row1_col8" class="data row1 col8" >0.000000</td>
<td id="T_2b1bb_row1_col9" class="data row1 col9" >0.000000</td>
<td id="T_2b1bb_row1_col10" class="data row1 col10" >auto</td>
<td id="T_2b1bb_row1_col11" class="data row1 col11" >1.104800</td>
<td id="T_2b1bb_row1_col12" class="data row1 col12" >I</td>
<td id="T_2b1bb_row1_col13" class="data row1 col13" >14.197130</td>
<td id="T_2b1bb_row1_col14" class="data row1 col14" >-185.802870</td>
<td id="T_2b1bb_row1_col15" class="data row1 col15" >214.197130</td>
</tr>
<tr>
<th id="T_2b1bb_level0_row2" class="row_heading level0 row2" >2</th>
<td id="T_2b1bb_row2_col0" class="data row2 col0" >higher only</td>
<td id="T_2b1bb_row2_col1" class="data row2 col1" >54.213318</td>
<td id="T_2b1bb_row2_col2" class="data row2 col2" >21.662775</td>
<td id="T_2b1bb_row2_col3" class="data row2 col3" >0.000007</td>
<td id="T_2b1bb_row2_col4" class="data row2 col4" >3.435552</td>
<td id="T_2b1bb_row2_col5" class="data row2 col5" >21.662771</td>
<td id="T_2b1bb_row2_col6" class="data row2 col6" >0.803308</td>
<td id="T_2b1bb_row2_col7" class="data row2 col7" >0.000000</td>
<td id="T_2b1bb_row2_col8" class="data row2 col8" >0.000000</td>
<td id="T_2b1bb_row2_col9" class="data row2 col9" >0.000000</td>
<td id="T_2b1bb_row2_col10" class="data row2 col10" >auto</td>
<td id="T_2b1bb_row2_col11" class="data row2 col11" >1.096300</td>
<td id="T_2b1bb_row2_col12" class="data row2 col12" >I</td>
<td id="T_2b1bb_row2_col13" class="data row2 col13" >13.943177</td>
<td id="T_2b1bb_row2_col14" class="data row2 col14" >-186.056823</td>
<td id="T_2b1bb_row2_col15" class="data row2 col15" >213.943177</td>
</tr>
<tr>
<th id="T_2b1bb_level0_row3" class="row_heading level0 row3" >3</th>
<td id="T_2b1bb_row3_col0" class="data row3 col0" >trim at mid-point</td>
<td id="T_2b1bb_row3_col1" class="data row3 col1" >54.213318</td>
<td id="T_2b1bb_row3_col2" class="data row3 col2" >21.662775</td>
<td id="T_2b1bb_row3_col3" class="data row3 col3" >0.000007</td>
<td id="T_2b1bb_row3_col4" class="data row3 col4" >3.435552</td>
<td id="T_2b1bb_row3_col5" class="data row3 col5" >21.662771</td>
<td id="T_2b1bb_row3_col6" class="data row3 col6" >0.803308</td>
<td id="T_2b1bb_row3_col7" class="data row3 col7" >0.000000</td>
<td id="T_2b1bb_row3_col8" class="data row3 col8" >0.000000</td>
<td id="T_2b1bb_row3_col9" class="data row3 col9" >0.000000</td>
<td id="T_2b1bb_row3_col10" class="data row3 col10" >auto</td>
<td id="T_2b1bb_row3_col11" class="data row3 col11" >1.096300</td>
<td id="T_2b1bb_row3_col12" class="data row3 col12" >I</td>
<td id="T_2b1bb_row3_col13" class="data row3 col13" >13.943177</td>
<td id="T_2b1bb_row3_col14" class="data row3 col14" >-186.056823</td>
<td id="T_2b1bb_row3_col15" class="data row3 col15" >213.943177</td>
</tr>
<tr>
<th id="T_2b1bb_level0_row4" class="row_heading level0 row4" >4</th>
<td id="T_2b1bb_row4_col0" class="data row4 col0" >coadd</td>
<td id="T_2b1bb_row4_col1" class="data row4 col1" >69.328145</td>
<td id="T_2b1bb_row4_col2" class="data row4 col2" >15.158243</td>
<td id="T_2b1bb_row4_col3" class="data row4 col3" >0.000000</td>
<td id="T_2b1bb_row4_col4" class="data row4 col4" >5.479275</td>
<td id="T_2b1bb_row4_col5" class="data row4 col5" >15.158239</td>
<td id="T_2b1bb_row4_col6" class="data row4 col6" >0.999346</td>
<td id="T_2b1bb_row4_col7" class="data row4 col7" >0.000000</td>
<td id="T_2b1bb_row4_col8" class="data row4 col8" >0.000000</td>
<td id="T_2b1bb_row4_col9" class="data row4 col9" >0.000000</td>
<td id="T_2b1bb_row4_col10" class="data row4 col10" >auto</td>
<td id="T_2b1bb_row4_col11" class="data row4 col11" >1.098296</td>
<td id="T_2b1bb_row4_col12" class="data row4 col12" >I</td>
<td id="T_2b1bb_row4_col13" class="data row4 col13" >14.040448</td>
<td id="T_2b1bb_row4_col14" class="data row4 col14" >-185.959552</td>
<td id="T_2b1bb_row4_col15" class="data row4 col15" >214.040448</td>
</tr>
</tbody>
</table>


The Halpha line is somewhat complicated to analyze, in that it has a very broad emission component from regions not associated with the magnetic field. This makes the correct velocity range to use for integration in the Bz calculation ambiguous.
For a somewhat cleaner example that is still in a region where two spectral orders overlap, we can look at the He I 587.56 nm line.
```python
# reading the spectrum
spec = pol.read_spectrum("IndividualLine_tutorialfiles/1423137pn.s")
# defining the line parameters
lambda0 = 587.56 ; geff = 1.0 # He I 587, really a multiplet (so geff is rather approximate)
lwidth = 500. * lambda0 / 2.99e5 #width in nm from width in km/s
# getting the profile for that line
prof = spec.individual_line(lambda0 = lambda0, lwidth = lwidth)
# define the limits for the continuum normalization
# compute Bz in a 50 km/s window around the cog
cogRange = [-100., 150.]
intRange = 50.
Bz = prof.calc_bz(norm = 'auto', cog = 'I',
velrange = cogRange, bzwidth = intRange,
geff = geff, lambda0 = lambda0,
plot = False)
# creating a dataframe to record the result of the different methods in this section
d = {'Merge type':'None'}
d.update(Bz)
df_results = pd.DataFrame(data = [d])
orders = spec.get_orders_in_range(lambda0)
# loop over the individual orders
for ord, type in zip(orders,['lower only', 'higher only']):
# get the profile for the line in this order
prof = ord.individual_line(lambda0 = lambda0, lwidth = lwidth)
Bz = prof.calc_bz(norm = 'auto', cog = 'I',
velrange = cogRange, bzwidth = intRange,
geff = geff, lambda0 = lambda0,
plot = False)
d = {'Merge type':type}
d.update(Bz)
df = pd.DataFrame(data = [d])
df_results = pd.concat([df_results,df], ignore_index=True)
# merging using the trim method
merge = spec.merge_orders(mode = 'trim', midpoint = 0.5)
prof = merge.individual_line(lambda0 = lambda0, lwidth = lwidth)
Bz = prof.calc_bz(norm = 'auto', cog = 'I',
velrange = cogRange, bzwidth = intRange,
geff = geff, lambda0 = lambda0,
plot = False)
d = {'Merge type':'trim at mid-point'}
d.update(Bz)
df = pd.DataFrame(data = [d])
df_results = pd.concat([df_results,df], ignore_index=True)
# merging using the coadd method
merge = spec.merge_orders(mode = 'coadd')
prof = merge.individual_line(lambda0 = lambda0, lwidth = lwidth)
Bz, fig = prof.calc_bz(norm = 'auto', cog = 'I',
velrange = cogRange, bzwidth = intRange,
geff = geff, lambda0 = lambda0,
plot = True)
d = {'Merge type':'coadd'}
d.update(Bz)
df = pd.DataFrame(data = [d])
df_results = pd.concat([df_results,df], ignore_index=True)
# display the Bz result as a dataframe
df_results.style
```
using AUTO method for the normalization
using the median of the continuum outside of the line
using AUTO method for the normalization
using the median of the continuum outside of the line
using AUTO method for the normalization
using the median of the continuum outside of the line
using AUTO method for the normalization
using the median of the continuum outside of the line
using AUTO method for the normalization
using the median of the continuum outside of the line
/tmp/ipykernel_980312/2408171684.py:13: UserWarning:
The velocity array is not monotonically increasing.
There might be an order overlap in the region of the observation used.
calc_bz will sort the LSD profile in velocity order.
Make sure this is what you want -- merge orders before running calc_bz()!
Bz = prof.calc_bz(norm = 'auto', cog = 'I',
<style type="text/css">
</style>
<table id="T_11610_">
<thead>
<tr>
<th class="blank level0" > </th>
<th class="col_heading level0 col0" >Merge type</th>
<th class="col_heading level0 col1" >V bz (G)</th>
<th class="col_heading level0 col2" >V bz sig (G)</th>
<th class="col_heading level0 col3" >V FAP</th>
<th class="col_heading level0 col4" >N1 bz (G)</th>
<th class="col_heading level0 col5" >N1 bz sig (G)</th>
<th class="col_heading level0 col6" >N1 FAP</th>
<th class="col_heading level0 col7" >N2 bz (G)</th>
<th class="col_heading level0 col8" >N2 bz sig (G)</th>
<th class="col_heading level0 col9" >N2 FAP</th>
<th class="col_heading level0 col10" >norm_method</th>
<th class="col_heading level0 col11" >norm</th>
<th class="col_heading level0 col12" >cog_method</th>
<th class="col_heading level0 col13" >cog</th>
<th class="col_heading level0 col14" >int. range start</th>
<th class="col_heading level0 col15" >int. range end</th>
</tr>
</thead>
<tbody>
<tr>
<th id="T_11610_level0_row0" class="row_heading level0 row0" >0</th>
<td id="T_11610_row0_col0" class="data row0 col0" >None</td>
<td id="T_11610_row0_col1" class="data row0 col1" >699.503139</td>
<td id="T_11610_row0_col2" class="data row0 col2" >66.521645</td>
<td id="T_11610_row0_col3" class="data row0 col3" >0.000000</td>
<td id="T_11610_row0_col4" class="data row0 col4" >49.584163</td>
<td id="T_11610_row0_col5" class="data row0 col5" >66.504701</td>
<td id="T_11610_row0_col6" class="data row0 col6" >0.000385</td>
<td id="T_11610_row0_col7" class="data row0 col7" >0.000000</td>
<td id="T_11610_row0_col8" class="data row0 col8" >0.000000</td>
<td id="T_11610_row0_col9" class="data row0 col9" >0.000000</td>
<td id="T_11610_row0_col10" class="data row0 col10" >auto</td>
<td id="T_11610_row0_col11" class="data row0 col11" >0.991090</td>
<td id="T_11610_row0_col12" class="data row0 col12" >I</td>
<td id="T_11610_row0_col13" class="data row0 col13" >18.941158</td>
<td id="T_11610_row0_col14" class="data row0 col14" >-31.058842</td>
<td id="T_11610_row0_col15" class="data row0 col15" >68.941158</td>
</tr>
<tr>
<th id="T_11610_level0_row1" class="row_heading level0 row1" >1</th>
<td id="T_11610_row1_col0" class="data row1 col0" >lower only</td>
<td id="T_11610_row1_col1" class="data row1 col1" >822.700767</td>
<td id="T_11610_row1_col2" class="data row1 col2" >91.691815</td>
<td id="T_11610_row1_col3" class="data row1 col3" >0.000000</td>
<td id="T_11610_row1_col4" class="data row1 col4" >-17.652850</td>
<td id="T_11610_row1_col5" class="data row1 col5" >91.658895</td>
<td id="T_11610_row1_col6" class="data row1 col6" >0.031436</td>
<td id="T_11610_row1_col7" class="data row1 col7" >0.000000</td>
<td id="T_11610_row1_col8" class="data row1 col8" >0.000000</td>
<td id="T_11610_row1_col9" class="data row1 col9" >0.000000</td>
<td id="T_11610_row1_col10" class="data row1 col10" >auto</td>
<td id="T_11610_row1_col11" class="data row1 col11" >0.992395</td>
<td id="T_11610_row1_col12" class="data row1 col12" >I</td>
<td id="T_11610_row1_col13" class="data row1 col13" >25.706350</td>
<td id="T_11610_row1_col14" class="data row1 col14" >-24.293650</td>
<td id="T_11610_row1_col15" class="data row1 col15" >75.706350</td>
</tr>
<tr>
<th id="T_11610_level0_row2" class="row_heading level0 row2" >2</th>
<td id="T_11610_row2_col0" class="data row2 col0" >higher only</td>
<td id="T_11610_row2_col1" class="data row2 col1" >793.994679</td>
<td id="T_11610_row2_col2" class="data row2 col2" >90.964654</td>
<td id="T_11610_row2_col3" class="data row2 col3" >0.000000</td>
<td id="T_11610_row2_col4" class="data row2 col4" >220.357069</td>
<td id="T_11610_row2_col5" class="data row2 col5" >90.937349</td>
<td id="T_11610_row2_col6" class="data row2 col6" >0.002410</td>
<td id="T_11610_row2_col7" class="data row2 col7" >0.000000</td>
<td id="T_11610_row2_col8" class="data row2 col8" >0.000000</td>
<td id="T_11610_row2_col9" class="data row2 col9" >0.000000</td>
<td id="T_11610_row2_col10" class="data row2 col10" >auto</td>
<td id="T_11610_row2_col11" class="data row2 col11" >0.990170</td>
<td id="T_11610_row2_col12" class="data row2 col12" >I</td>
<td id="T_11610_row2_col13" class="data row2 col13" >25.578820</td>
<td id="T_11610_row2_col14" class="data row2 col14" >-24.421180</td>
<td id="T_11610_row2_col15" class="data row2 col15" >75.578820</td>
</tr>
<tr>
<th id="T_11610_level0_row3" class="row_heading level0 row3" >3</th>
<td id="T_11610_row3_col0" class="data row3 col0" >trim at mid-point</td>
<td id="T_11610_row3_col1" class="data row3 col1" >797.884180</td>
<td id="T_11610_row3_col2" class="data row3 col2" >91.419382</td>
<td id="T_11610_row3_col3" class="data row3 col3" >0.000000</td>
<td id="T_11610_row3_col4" class="data row3 col4" >221.513065</td>
<td id="T_11610_row3_col5" class="data row3 col5" >91.391673</td>
<td id="T_11610_row3_col6" class="data row3 col6" >0.002410</td>
<td id="T_11610_row3_col7" class="data row3 col7" >0.000000</td>
<td id="T_11610_row3_col8" class="data row3 col8" >0.000000</td>
<td id="T_11610_row3_col9" class="data row3 col9" >0.000000</td>
<td id="T_11610_row3_col10" class="data row3 col10" >auto</td>
<td id="T_11610_row3_col11" class="data row3 col11" >0.993520</td>
<td id="T_11610_row3_col12" class="data row3 col12" >I</td>
<td id="T_11610_row3_col13" class="data row3 col13" >25.587490</td>
<td id="T_11610_row3_col14" class="data row3 col14" >-24.412510</td>
<td id="T_11610_row3_col15" class="data row3 col15" >75.587490</td>
</tr>
<tr>
<th id="T_11610_level0_row4" class="row_heading level0 row4" >4</th>
<td id="T_11610_row4_col0" class="data row4 col0" >coadd</td>
<td id="T_11610_row4_col1" class="data row4 col1" >805.924582</td>
<td id="T_11610_row4_col2" class="data row4 col2" >60.001692</td>
<td id="T_11610_row4_col3" class="data row4 col3" >0.000000</td>
<td id="T_11610_row4_col4" class="data row4 col4" >116.774813</td>
<td id="T_11610_row4_col5" class="data row4 col5" >59.981432</td>
<td id="T_11610_row4_col6" class="data row4 col6" >0.097482</td>
<td id="T_11610_row4_col7" class="data row4 col7" >0.000000</td>
<td id="T_11610_row4_col8" class="data row4 col8" >0.000000</td>
<td id="T_11610_row4_col9" class="data row4 col9" >0.000000</td>
<td id="T_11610_row4_col10" class="data row4 col10" >auto</td>
<td id="T_11610_row4_col11" class="data row4 col11" >0.990281</td>
<td id="T_11610_row4_col12" class="data row4 col12" >I</td>
<td id="T_11610_row4_col13" class="data row4 col13" >25.660364</td>
<td id="T_11610_row4_col14" class="data row4 col14" >-24.339636</td>
<td id="T_11610_row4_col15" class="data row4 col15" >75.660364</td>
</tr>
</tbody>
</table>

|
folsomcpREPO_NAMEspecpolFlowPATH_START.@specpolFlow_extracted@specpolFlow-main@docs-jb@Tutorials@7-IndividualLine_Tutorial.ipynb@.PATH_END.py
|
{
"filename": "cpu_eigh_lapack_syev.py",
"repo_name": "google/jax",
"repo_path": "jax_extracted/jax-main/jax/_src/internal_test_util/export_back_compat_test_data/cpu_eigh_lapack_syev.py",
"type": "Python"
}
|
# Copyright 2023 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ruff: noqa
import datetime
from numpy import array, float32, complex64
data_2023_03_17 = dict(
# Pasted from the test output (see back_compat_test.py module docstring)
f32=dict(
testdata_version=1,
platform='cpu',
custom_call_targets=['lapack_ssyevd'],
serialized_date=datetime.date(2023, 3, 17),
inputs=(),
expected_outputs=(array([[-0.6185769 , -0.20142993 , -0.09725195 , 0.62983674 ,
-0.07926044 , 0.3605001 , -0.019093221 , -0.18446997 ],
[-0.47070873 , 0.29325768 , -0.19454119 , -0.6394365 ,
0.0622955 , 0.33249345 , 0.28112718 , -0.22856665 ],
[-0.32284075 , -0.12361939 , 0.20547704 , -0.18307868 ,
0.47294614 , -0.3170349 , -0.6373532 , -0.27266347 ],
[-0.17497246 , -0.079641335 , 0.15042791 , -0.15416273 ,
-0.815209 , -0.38054234 , -0.083263926 , -0.31676024 ],
[-0.027104253 , -0.26490977 , 0.32271704 , 0.08653544 ,
0.30305928 , -0.33998996 , 0.6926741 , -0.360857 ],
[ 0.12076397 , 0.43288827 , -0.64385164 , 0.2652551 ,
0.09482376 , -0.37435007 , 0.00091664493, -0.40495378 ],
[ 0.26863196 , 0.51607686 , 0.53846526 , 0.16969058 ,
-0.021670295 , 0.35755336 , -0.113144726 , -0.4490505 ],
[ 0.4165004 , -0.57262254 , -0.2814425 , -0.17463988 ,
-0.01698498 , 0.3613705 , -0.12186296 , -0.49314725 ]],
dtype=float32), array([-2.4598808e+01, -3.3105560e-05, -3.1002426e-05, -1.0103593e-05,
-1.0022322e-05, 4.0141886e-06, 9.5510331e-06, 2.7659882e+02],
dtype=float32)),
mlir_module_text=r"""
module @jit__lambda_ {
func.func public @main() -> (tensor<8x8xf32> {jax.result_info = "[0]"}, tensor<8xf32> {jax.result_info = "[1]"}) {
%0 = stablehlo.iota dim = 0 : tensor<64xf32>
%1 = stablehlo.reshape %0 : (tensor<64xf32>) -> tensor<8x8xf32>
%2 = stablehlo.transpose %1, dims = [1, 0] : (tensor<8x8xf32>) -> tensor<8x8xf32>
%3 = stablehlo.add %1, %2 : tensor<8x8xf32>
%4 = stablehlo.constant dense<2.000000e+00> : tensor<f32>
%5 = stablehlo.broadcast_in_dim %4, dims = [] : (tensor<f32>) -> tensor<8x8xf32>
%6 = stablehlo.divide %3, %5 : tensor<8x8xf32>
%7 = call @tril(%6) : (tensor<8x8xf32>) -> tensor<8x8xf32>
%8 = stablehlo.constant dense<1> : tensor<i32>
%9 = stablehlo.constant dense<1> : tensor<i32>
%10 = stablehlo.constant dense<8> : tensor<i32>
%11 = stablehlo.custom_call @lapack_ssyevd(%8, %9, %10, %7) {api_version = 2 : i32, operand_layouts = [dense<> : tensor<0xindex>, dense<> : tensor<0xindex>, dense<> : tensor<0xindex>, dense<[0, 1]> : tensor<2xindex>], output_operand_aliases = [#stablehlo.output_operand_alias<output_tuple_indices = [0], operand_index = 3, operand_tuple_indices = []>], result_layouts = [dense<[0, 1]> : tensor<2xindex>, dense<0> : tensor<1xindex>, dense<> : tensor<0xindex>, dense<0> : tensor<1xindex>, dense<0> : tensor<1xindex>]} : (tensor<i32>, tensor<i32>, tensor<i32>, tensor<8x8xf32>) -> tuple<tensor<8x8xf32>, tensor<8xf32>, tensor<i32>, tensor<177xf32>, tensor<43xi32>>
%12 = stablehlo.get_tuple_element %11[0] : (tuple<tensor<8x8xf32>, tensor<8xf32>, tensor<i32>, tensor<177xf32>, tensor<43xi32>>) -> tensor<8x8xf32>
%13 = stablehlo.get_tuple_element %11[1] : (tuple<tensor<8x8xf32>, tensor<8xf32>, tensor<i32>, tensor<177xf32>, tensor<43xi32>>) -> tensor<8xf32>
%14 = stablehlo.get_tuple_element %11[2] : (tuple<tensor<8x8xf32>, tensor<8xf32>, tensor<i32>, tensor<177xf32>, tensor<43xi32>>) -> tensor<i32>
%15 = stablehlo.get_tuple_element %11[3] : (tuple<tensor<8x8xf32>, tensor<8xf32>, tensor<i32>, tensor<177xf32>, tensor<43xi32>>) -> tensor<177xf32>
%16 = stablehlo.get_tuple_element %11[4] : (tuple<tensor<8x8xf32>, tensor<8xf32>, tensor<i32>, tensor<177xf32>, tensor<43xi32>>) -> tensor<43xi32>
%17 = stablehlo.constant dense<0> : tensor<i32>
%18 = stablehlo.broadcast_in_dim %17, dims = [] : (tensor<i32>) -> tensor<i32>
%19 = stablehlo.compare EQ, %14, %18, SIGNED : (tensor<i32>, tensor<i32>) -> tensor<i1>
%20 = stablehlo.broadcast_in_dim %19, dims = [] : (tensor<i1>) -> tensor<1x1xi1>
%21 = stablehlo.constant dense<0x7FC00000> : tensor<f32>
%22 = stablehlo.broadcast_in_dim %21, dims = [] : (tensor<f32>) -> tensor<8x8xf32>
%23 = stablehlo.broadcast_in_dim %20, dims = [0, 1] : (tensor<1x1xi1>) -> tensor<8x8xi1>
%24 = stablehlo.select %23, %12, %22 : tensor<8x8xi1>, tensor<8x8xf32>
%25 = stablehlo.broadcast_in_dim %19, dims = [] : (tensor<i1>) -> tensor<1xi1>
%26 = stablehlo.constant dense<0x7FC00000> : tensor<f32>
%27 = stablehlo.broadcast_in_dim %26, dims = [] : (tensor<f32>) -> tensor<8xf32>
%28 = stablehlo.broadcast_in_dim %25, dims = [0] : (tensor<1xi1>) -> tensor<8xi1>
%29 = stablehlo.select %28, %13, %27 : tensor<8xi1>, tensor<8xf32>
return %24, %29 : tensor<8x8xf32>, tensor<8xf32>
}
func.func private @tril(%arg0: tensor<8x8xf32>) -> tensor<8x8xf32> {
%0 = stablehlo.iota dim = 0 : tensor<8x8xi32>
%1 = stablehlo.constant dense<0> : tensor<i32>
%2 = stablehlo.broadcast_in_dim %1, dims = [] : (tensor<i32>) -> tensor<8x8xi32>
%3 = stablehlo.add %0, %2 : tensor<8x8xi32>
%4 = stablehlo.iota dim = 1 : tensor<8x8xi32>
%5 = stablehlo.compare GE, %3, %4, SIGNED : (tensor<8x8xi32>, tensor<8x8xi32>) -> tensor<8x8xi1>
%6 = stablehlo.constant dense<0.000000e+00> : tensor<f32>
%7 = stablehlo.broadcast_in_dim %6, dims = [] : (tensor<f32>) -> tensor<8x8xf32>
%8 = stablehlo.select %5, %arg0, %7 : tensor<8x8xi1>, tensor<8x8xf32>
return %8 : tensor<8x8xf32>
}
}
""",
mlir_module_serialized=b"ML\xefR\x03MLIRxxx-trunk\x00\x01-\x05\x01\x05\x01\x03\x05\x03\x1d\x07\t\x0b\r\x0f\x11\x13\x15\x17\x19\x1b\x1d\x1f!\x03z\x02\xf77\x01\x9b\x0f\x17\x13\x0b\x07\x0f\x0b\x0b\x0b\x0b\x17\x0b\x0b\x0b\x0b\x13\x0b\x13\x0f\x0b\x0b\x17\x0f\x13\x13\x13\x0b33\x0b\x0f\x0b\x0b\x13\x0f\x0b\x1b\x0f\x0b\x13\x0f\x0b\x0f\x0b\x0f\x0b\x0f\x0b\x13\x0b\x0f\x0b\x0f\x0b\x13\x0b\x13\x0b\x0b\x13K\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x13\x13\x13\x13\x13\x1b\x13\x13\x03]\x0f/\x0b\x0b\x0f\x0b\x0bO\x0b\x13\x13\x0b\x13\x0b\x0b\x0b\x0b\x0b\x0b\x0f\x1f\x0f\x0f\x0b\x1fO\x1f\x1f\x1f\x0b\x0b\x0b\x0b\x1b\x0f\x17\x1f\x0f\x0f\x0f\x0f\x0f\x0b\x1fO/\x037\x17\x0f\x07\x0f\x07\x13\x07\x07\x17\x07\x17\x13\x17\x13\x17\x17\x13\x17\x1f\x13\x13\x13\x0f\x17\x13\x13\x13\x02\n\t\x1du\x03\x17\x11\xf6\x04\x01\x03\x03\x13\xc5\x05#\x1f\x1d;\x03\x05%\x05'\x05)\x05+\x17\x11\xf2\x04\x01\x05-\x05/\x051\x053\x03\x03!\xc1\x055\x03\x03\x07\xc3\x1dA\x03\x057\x059\x17\x11\xea\x04\x01\x1do\x15\x03\x03\x07\xd1\x03\x03\x07\xf1\x03\x03\x0f5\x05;\x03\x0b\x17\x9f\x19\xab\x1b\xad\x0f\xb7\x1d\xb9\x03\x0b\x17\xa3\x19\xbd\x1b\xa3\x0f\xa5\x1d\xbf\x05=\x1d?\x03\x05?\x05A\x03\x03!\xc7\x1dG\x03\x05C\x03\x05'\xa7)\xc9\x1dM\x03\x05E\x03\x03\x07\xcb\x1dS\x03\x05G\x1dW\x03\x05I\x1d[+\x05K\x1d_+\x05M\x03\x03c\xcd\x05O\x1dg\x15\x05Q\x1dk\x15\x05S\x03\x03\x07\xcf\x05U\x03\x03s\xa5\x05W\x05Y\x03\x03\x07\xd3\x03\x11{\xd5}\xd7\x7f\xd9\x81\x9f\x83\xdb\x85\xdd\x87\xdf\x89\xe3\x05[\x05]\x05_\x05a\x05c\x05e\x05g\x05i\x03\x03\r\xe5\x03\x03\r\xe7\x03\x03\r\xe9\x03\x03\r\xeb\x03\x03\r\xed\x03\x05'\xa7)\xef\x03\x03\x13\xf3\x03\x03\x13\xf5\x1f'\x01\x1f+\x11\x00\x00\x00\x00\x00\x00\x00\x00\x03\x01\x1dk\x03\x03\xbb\x1dm\t\x07\x1f)!\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00#\x1d\x03\x05\xaf\xb3\r\x03\xa1\xb1\x1do\r\x03\xa1\xb5\x1dq\x1ds\x1du\r\x01#\x1f\x1dw\x13\r\x01\x1f\x03\t\x00\x00\x00\x00\x1f!\x01\x13\r\x05\x07\x05\x1f\x07\t\x00\x00\x00\x00\x1f\x17!\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1f\x07\t\x00\x00\x00@\x1f\x03\t\x01\x00\x00\x00\x1f\x03\t\x08\x00\x00\x00\x0b\x05\x1dy\x1d{\x05\x01\x03\t\x9b\x9b\x9b\xa9\x03\x03\xe1\x15\x03\x01\r\x01\x03\x0b\xa9\x9d\x9b\x9d\x9d\x13\x05\x01\x13\x05\x05\x13\x05\t\x13\x05\r\x13\x05\x11\x07\x01\x1f\x07\t\x00\x00\xc0\x7f\x1f\x17!\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x1f5\x11\x00\x00\x00\x00\x00\x00\x00\x00)\x05!!\t)\x01\x05\x1b)\x01\t\t)\x03!\t\x1d\x01)\x05!!\x05\x13)\x05!!\x0f)\x03\t\r)\x03\x8a\x05\t)\x03\xad\x05\x11\x01\x05\x01\x0b\x11\x03\x01\x03\x01)\x03\x01\r)\x03\x02\x02\t/\x0b\x01\x0b\x03\x19\x1b)\x03\x01\x13)\x03\t\x13)\x03\x05\x13)\x01\x0f)\x05\x05\x05\x0f)\x03\x05\x0f)\x03!\x0f)\x03\x05\r\x04:\x05\x05\x01\x11\t3\x07\x03\x01\t\r\x11\t7\x05\x03=}\t\x03Y\x1f\x03#\x15\x06]\x03\x01\x03\x01\x17\x07ea\x03\x01\x03\x03\x0f\x06i\x03\x01\x05\x03\x05\x05\x03\tm\x03\x07\x03\x07-\x05\x03\x01\x03\t\x19\x06-\x03\x01\x05\x07\x0b\x1b\x07\x0bq\x03\x01\x03\r\x05\x03\x01/\x03\x03\x05\x03\x01/\x03\x03\x05\x03\x01w\x03\x03\x1d\x07\x01y\x03%\t\x11\x13\x15\x0f\x07\x07\x01\x8b\x03\x01\x03\x17\x07\x07\x01\x8d\x03\x0b\x03\x17\x07\x07\x01\x8f\x03\x03\x03\x17\x07\x07\x01\x91\x03\x19\x03\x17\x07\x07\x01\x93\x03\x1b\x03\x17\x05\x03\x01#\x03\x03\x03\x07\x01\x05\x03\x03\x03#\x11\x07\x01\x95\x03-\x05\x1d%\x03\x07\x01\x05\x03/\x03'\x05\x03\x011\x03\x07\x03\x07\x01\x05\x03\x01\x03+\x03\x07\x01\x97\x03\x15\x03)\x0b\x06\x01\x03\x01\x07/\x19-\x03\x07\x01\x05\x031\x03'\x05\x03\x011\x03\x07\x03\x07\x01\x05\x03\x0b\x035\x03\x07\x01\x99\x033\x033\x0b\x06\x01\x03\x0b\x079\x1b7\x13\x04\t\x051;\r\x11\x0b9\x05\x03\x15+\x03\x01\t\t\x03=\x1f\x03\x11\x05\x03\x0b#\x03\x03\x03\x07%\x05\x03\x11\x03\x05\x0f\x06%\x03\x11\x05\x03\x07\t\x03EC\x03\x11\x11\x07KI\x03\x15\x05\t\x0b\x05\x03\x0bO\x03\x07\x03\x07Q\x05\x03\x01\x03\x0f\x0b\x06U\x03\x01\x07\r\x01\x11\x13\x04\x0b\x03\x13\x06\x03\x01\x05\x01\x00\xb2\x19}\x1d\x03\x11\x0f\x0b\t\t\x0b!\x1f/!!)#\x1f\x19\x7f\x0f99m\x19\x85\x89W\xb3K\x9bM\x9b\x96\x04\x1b+\x1b\x1f\x1f\x15\x1d\x15+\x83\x13\r\r\x1f\x11\x15\x1b\x17\x15\x17\x0f\x11\x15\x11+\x19)\x0f\x0b\x11builtin\x00vhlo\x00module\x00broadcast_in_dim_v1\x00constant_v1\x00get_tuple_element_v1\x00iota_v1\x00select_v1\x00func_v1\x00add_v1\x00compare_v1\x00return_v1\x00reshape_v1\x00transpose_v1\x00divide_v1\x00call_v1\x00custom_call_v1\x00value\x00index\x00sym_name\x00third_party/py/jax/experimental/jax2tf/tests/back_compat_test.py\x00broadcast_dimensions\x00arg_attrs\x00function_type\x00res_attrs\x00sym_visibility\x00iota_dimension\x00compare_type\x00comparison_direction\x00jit__lambda_\x00jit(<lambda>)/jit(main)/pjit[in_shardings=(UnspecifiedValue,) out_shardings=(UnspecifiedValue,) resource_env=None donated_invars=(False,) name=tril in_positional_semantics=(<_PositionalSemantics.GLOBAL: 1>,) out_positional_semantics=_PositionalSemantics.GLOBAL keep_unused=False inline=False]\x00jit(<lambda>)/jit(main)/jit(tril)/iota[dtype=int32 shape=(8, 8) dimension=0]\x00jit(<lambda>)/jit(main)/jit(tril)/add\x00jit(<lambda>)/jit(main)/jit(tril)/iota[dtype=int32 shape=(8, 8) dimension=1]\x00jit(<lambda>)/jit(main)/jit(tril)/ge\x00jit(<lambda>)/jit(main)/jit(tril)/broadcast_in_dim[shape=(8, 8) broadcast_dimensions=()]\x00jit(<lambda>)/jit(main)/jit(tril)/select_n\x00jit(<lambda>)/jit(main)/iota[dtype=float32 shape=(64,) dimension=0]\x00jit(<lambda>)/jit(main)/reshape[new_sizes=(8, 8) dimensions=None]\x00permutation\x00jit(<lambda>)/jit(main)/transpose[permutation=(1, 0)]\x00jit(<lambda>)/jit(main)/add\x00jit(<lambda>)/jit(main)/div\x00callee\x00jit(<lambda>)/jit(main)/eigh[lower=True sort_eigenvalues=True]\x00api_version\x00backend_config\x00call_target_name\x00called_computations\x00has_side_effect\x00operand_layouts\x00output_operand_aliases\x00result_layouts\x00jax.result_info\x00tril\x00[0]\x00[1]\x00main\x00public\x00private\x00\x00lapack_ssyevd\x00",
xla_call_module_version=4,
), # End paste
# Pasted from the test output (see back_compat_test.py module docstring)
f64=dict(
testdata_version=1,
platform='cpu',
custom_call_targets=['lapack_dsyevd'],
serialized_date=datetime.date(2023, 3, 17),
inputs=(),
expected_outputs=(array([[-6.1857700048412056e-01, 2.4081403770912022e-01,
3.5662489253627483e-01, -6.3034019033669797e-01,
1.0043483479985752e-16, -2.8842036081919542e-02,
7.7164692943283169e-25, -1.8446994643771725e-01],
[-4.7070881487314614e-01, 4.7473787464450845e-01,
-4.8036836210243367e-01, 4.3802686872516400e-01,
1.7961797619639258e-01, 8.3080980076741355e-03,
2.1415294457221756e-01, -2.2856669794666584e-01],
[-3.2284062926217072e-01, -5.4336490915553370e-01,
2.2181041859724990e-01, 2.9947877954402297e-01,
-3.6491813600134632e-01, 3.2867679819727436e-01,
3.8223299448843473e-01, -2.7266344945561438e-01],
[-1.7497244365119530e-01, -8.9251550609769414e-02,
-6.3518515114898394e-02, 1.9162997359209971e-01,
-2.2087281326110139e-01, 5.9957027043505064e-02,
-8.7632498908241274e-01, -3.1676020096456303e-01],
[-2.7104258040220038e-02, -3.3772873786627672e-01,
2.5901386593721748e-01, 1.7032650752287815e-01,
6.7521217612940332e-01, -4.5036136532965476e-01,
-1.2279030059078447e-02, -3.6085695247351163e-01],
[ 1.2076392757075530e-01, -3.3834734096469254e-01,
-6.5506827461665540e-01, -5.0472498521116749e-01,
6.9987430903492118e-02, 1.0595648906599275e-01,
8.3443844143082022e-02, -4.0495370398246017e-01],
[ 2.6863211318173097e-01, 2.2958613191407318e-01,
6.3952843755683941e-02, 1.8776775771084137e-02,
-5.3523731432241317e-01, -5.9199531677602002e-01,
1.7916671834524248e-01, -4.4905045549140887e-01],
[ 4.1650029879270661e-01, 3.6355449432857079e-01,
2.9755313100756142e-01, 1.6826270392615944e-02,
1.9621068035557282e-01, 5.6830030587314817e-01,
2.9607517592514246e-02, -4.9314720700035747e-01]]), array([-2.4598804776133626e+01, -4.6567755957874661e-14,
-1.9932120610662194e-14, -5.7323356091157378e-15,
-4.5459724251334835e-16, 4.0479851042511616e-14,
9.2325194924982089e-14, 2.7659880477613365e+02])),
mlir_module_text=r"""
module @jit__lambda_ {
func.func public @main() -> (tensor<8x8xf64> {jax.result_info = "[0]"}, tensor<8xf64> {jax.result_info = "[1]"}) {
%0 = stablehlo.iota dim = 0 : tensor<64xf64>
%1 = stablehlo.reshape %0 : (tensor<64xf64>) -> tensor<8x8xf64>
%2 = stablehlo.transpose %1, dims = [1, 0] : (tensor<8x8xf64>) -> tensor<8x8xf64>
%3 = stablehlo.add %1, %2 : tensor<8x8xf64>
%4 = stablehlo.constant dense<2.000000e+00> : tensor<f64>
%5 = stablehlo.broadcast_in_dim %4, dims = [] : (tensor<f64>) -> tensor<8x8xf64>
%6 = stablehlo.divide %3, %5 : tensor<8x8xf64>
%7 = call @tril(%6) : (tensor<8x8xf64>) -> tensor<8x8xf64>
%8 = stablehlo.constant dense<1> : tensor<i32>
%9 = stablehlo.constant dense<1> : tensor<i32>
%10 = stablehlo.constant dense<8> : tensor<i32>
%11 = stablehlo.custom_call @lapack_dsyevd(%8, %9, %10, %7) {api_version = 2 : i32, operand_layouts = [dense<> : tensor<0xindex>, dense<> : tensor<0xindex>, dense<> : tensor<0xindex>, dense<[0, 1]> : tensor<2xindex>], output_operand_aliases = [#stablehlo.output_operand_alias<output_tuple_indices = [0], operand_index = 3, operand_tuple_indices = []>], result_layouts = [dense<[0, 1]> : tensor<2xindex>, dense<0> : tensor<1xindex>, dense<> : tensor<0xindex>, dense<0> : tensor<1xindex>, dense<0> : tensor<1xindex>]} : (tensor<i32>, tensor<i32>, tensor<i32>, tensor<8x8xf64>) -> tuple<tensor<8x8xf64>, tensor<8xf64>, tensor<i32>, tensor<177xf64>, tensor<43xi32>>
%12 = stablehlo.get_tuple_element %11[0] : (tuple<tensor<8x8xf64>, tensor<8xf64>, tensor<i32>, tensor<177xf64>, tensor<43xi32>>) -> tensor<8x8xf64>
%13 = stablehlo.get_tuple_element %11[1] : (tuple<tensor<8x8xf64>, tensor<8xf64>, tensor<i32>, tensor<177xf64>, tensor<43xi32>>) -> tensor<8xf64>
%14 = stablehlo.get_tuple_element %11[2] : (tuple<tensor<8x8xf64>, tensor<8xf64>, tensor<i32>, tensor<177xf64>, tensor<43xi32>>) -> tensor<i32>
%15 = stablehlo.get_tuple_element %11[3] : (tuple<tensor<8x8xf64>, tensor<8xf64>, tensor<i32>, tensor<177xf64>, tensor<43xi32>>) -> tensor<177xf64>
%16 = stablehlo.get_tuple_element %11[4] : (tuple<tensor<8x8xf64>, tensor<8xf64>, tensor<i32>, tensor<177xf64>, tensor<43xi32>>) -> tensor<43xi32>
%17 = stablehlo.constant dense<0> : tensor<i32>
%18 = stablehlo.broadcast_in_dim %17, dims = [] : (tensor<i32>) -> tensor<i32>
%19 = stablehlo.compare EQ, %14, %18, SIGNED : (tensor<i32>, tensor<i32>) -> tensor<i1>
%20 = stablehlo.broadcast_in_dim %19, dims = [] : (tensor<i1>) -> tensor<1x1xi1>
%21 = stablehlo.constant dense<0x7FF8000000000000> : tensor<f64>
%22 = stablehlo.broadcast_in_dim %21, dims = [] : (tensor<f64>) -> tensor<8x8xf64>
%23 = stablehlo.broadcast_in_dim %20, dims = [0, 1] : (tensor<1x1xi1>) -> tensor<8x8xi1>
%24 = stablehlo.select %23, %12, %22 : tensor<8x8xi1>, tensor<8x8xf64>
%25 = stablehlo.broadcast_in_dim %19, dims = [] : (tensor<i1>) -> tensor<1xi1>
%26 = stablehlo.constant dense<0x7FF8000000000000> : tensor<f64>
%27 = stablehlo.broadcast_in_dim %26, dims = [] : (tensor<f64>) -> tensor<8xf64>
%28 = stablehlo.broadcast_in_dim %25, dims = [0] : (tensor<1xi1>) -> tensor<8xi1>
%29 = stablehlo.select %28, %13, %27 : tensor<8xi1>, tensor<8xf64>
return %24, %29 : tensor<8x8xf64>, tensor<8xf64>
}
func.func private @tril(%arg0: tensor<8x8xf64>) -> tensor<8x8xf64> {
%0 = stablehlo.iota dim = 0 : tensor<8x8xi32>
%1 = stablehlo.constant dense<0> : tensor<i32>
%2 = stablehlo.broadcast_in_dim %1, dims = [] : (tensor<i32>) -> tensor<8x8xi32>
%3 = stablehlo.add %0, %2 : tensor<8x8xi32>
%4 = stablehlo.iota dim = 1 : tensor<8x8xi32>
%5 = stablehlo.compare GE, %3, %4, SIGNED : (tensor<8x8xi32>, tensor<8x8xi32>) -> tensor<8x8xi1>
%6 = stablehlo.constant dense<0.000000e+00> : tensor<f64>
%7 = stablehlo.broadcast_in_dim %6, dims = [] : (tensor<f64>) -> tensor<8x8xf64>
%8 = stablehlo.select %5, %arg0, %7 : tensor<8x8xi1>, tensor<8x8xf64>
return %8 : tensor<8x8xf64>
}
}
""",
mlir_module_serialized=b"ML\xefR\x03MLIRxxx-trunk\x00\x01-\x05\x01\x05\x01\x03\x05\x03\x1d\x07\t\x0b\r\x0f\x11\x13\x15\x17\x19\x1b\x1d\x1f!\x03z\x02\xf77\x01\x9b\x0f\x17\x13\x0b\x07\x0f\x0b\x0b\x0b\x0b\x17\x0b\x0b\x0b\x0b\x13\x0b\x13\x0f\x0b\x0b\x17\x0f\x13\x13\x13\x0b33\x0b\x0f\x0b\x0b\x13\x0f\x0b\x1b\x0f\x0b\x13\x0f\x0b\x0f\x0b\x0f\x0b\x0f\x0b\x13\x0b\x0f\x0b\x0f\x0b\x13\x0b\x13\x0b\x0b\x13K\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x13\x13\x13\x13\x13\x1b\x13\x13\x03]\x0f/\x0b\x0b\x0f\x0b\x0bO\x0b\x13\x13\x0b\x13\x0b\x0b\x0b\x0b\x0b\x0b\x0f\x1f\x0f\x0f\x0b/O/\x1f\x1f\x0b\x0b\x0b\x0b\x1b\x0f\x17\x1f\x0f\x0f\x0f\x0f\x0f\x0b/O/\x037\x17\x0f\x07\x0f\x07\x13\x07\x07\x17\x07\x17\x13\x17\x13\x17\x17\x13\x17\x1f\x13\x13\x13\x0f\x17\x13\x13\x13\x02:\t\x1du\x03\x17\x11\xf6\x04\x01\x03\x03\x13\xc5\x05#\x1f\x1d;\x03\x05%\x05'\x05)\x05+\x17\x11\xf2\x04\x01\x05-\x05/\x051\x053\x03\x03!\xc1\x055\x03\x03\x07\xc3\x1dA\x03\x057\x059\x17\x11\xea\x04\x01\x1do\x15\x03\x03\x07\xd1\x03\x03\x07\xf1\x03\x03\x0f5\x05;\x03\x0b\x17\x9f\x19\xab\x1b\xad\x0f\xb7\x1d\xb9\x03\x0b\x17\xa3\x19\xbd\x1b\xa3\x0f\xa5\x1d\xbf\x05=\x1d?\x03\x05?\x05A\x03\x03!\xc7\x1dG\x03\x05C\x03\x05'\xa7)\xc9\x1dM\x03\x05E\x03\x03\x07\xcb\x1dS\x03\x05G\x1dW\x03\x05I\x1d[+\x05K\x1d_+\x05M\x03\x03c\xcd\x05O\x1dg\x15\x05Q\x1dk\x15\x05S\x03\x03\x07\xcf\x05U\x03\x03s\xa5\x05W\x05Y\x03\x03\x07\xd3\x03\x11{\xd5}\xd7\x7f\xd9\x81\x9f\x83\xdb\x85\xdd\x87\xdf\x89\xe3\x05[\x05]\x05_\x05a\x05c\x05e\x05g\x05i\x03\x03\r\xe5\x03\x03\r\xe7\x03\x03\r\xe9\x03\x03\r\xeb\x03\x03\r\xed\x03\x05'\xa7)\xef\x03\x03\x13\xf3\x03\x03\x13\xf5\x1f'\x01\x1f+\x11\x00\x00\x00\x00\x00\x00\x00\x00\x03\x01\x1dk\x03\x03\xbb\x1dm\t\x07\x1f)!\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00#\x1d\x03\x05\xaf\xb3\r\x03\xa1\xb1\x1do\r\x03\xa1\xb5\x1dq\x1ds\x1du\r\x01#\x1f\x1dw\x13\r\x01\x1f\x03\t\x00\x00\x00\x00\x1f!\x01\x13\r\x05\x07\x05\x1f\x07\x11\x00\x00\x00\x00\x00\x00\x00\x00\x1f\x17!\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1f\x07\x11\x00\x00\x00\x00\x00\x00\x00@\x1f\x03\t\x01\x00\x00\x00\x1f\x03\t\x08\x00\x00\x00\x0b\x05\x1dy\x1d{\x05\x01\x03\t\x9b\x9b\x9b\xa9\x03\x03\xe1\x15\x03\x01\r\x01\x03\x0b\xa9\x9d\x9b\x9d\x9d\x13\x05\x01\x13\x05\x05\x13\x05\t\x13\x05\r\x13\x05\x11\x07\x01\x1f\x07\x11\x00\x00\x00\x00\x00\x00\xf8\x7f\x1f\x17!\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x1f5\x11\x00\x00\x00\x00\x00\x00\x00\x00)\x05!!\t)\x01\x05\x1b)\x01\t\x0b)\x03!\t\x1d\x01)\x05!!\x05\x13)\x05!!\x0f)\x03\t\r)\x03\x8a\x05\t)\x03\xad\x05\x11\x01\x05\x01\x0b\x11\x03\x01\x03\x01)\x03\x01\r)\x03\x02\x02\t/\x0b\x01\x0b\x03\x19\x1b)\x03\x01\x13)\x03\t\x13)\x03\x05\x13)\x01\x0f)\x05\x05\x05\x0f)\x03\x05\x0f)\x03!\x0f)\x03\x05\r\x04:\x05\x05\x01\x11\t3\x07\x03\x01\t\r\x11\t7\x05\x03=}\t\x03Y\x1f\x03#\x15\x06]\x03\x01\x03\x01\x17\x07ea\x03\x01\x03\x03\x0f\x06i\x03\x01\x05\x03\x05\x05\x03\tm\x03\x07\x03\x07-\x05\x03\x01\x03\t\x19\x06-\x03\x01\x05\x07\x0b\x1b\x07\x0bq\x03\x01\x03\r\x05\x03\x01/\x03\x03\x05\x03\x01/\x03\x03\x05\x03\x01w\x03\x03\x1d\x07\x01y\x03%\t\x11\x13\x15\x0f\x07\x07\x01\x8b\x03\x01\x03\x17\x07\x07\x01\x8d\x03\x0b\x03\x17\x07\x07\x01\x8f\x03\x03\x03\x17\x07\x07\x01\x91\x03\x19\x03\x17\x07\x07\x01\x93\x03\x1b\x03\x17\x05\x03\x01#\x03\x03\x03\x07\x01\x05\x03\x03\x03#\x11\x07\x01\x95\x03-\x05\x1d%\x03\x07\x01\x05\x03/\x03'\x05\x03\x011\x03\x07\x03\x07\x01\x05\x03\x01\x03+\x03\x07\x01\x97\x03\x15\x03)\x0b\x06\x01\x03\x01\x07/\x19-\x03\x07\x01\x05\x031\x03'\x05\x03\x011\x03\x07\x03\x07\x01\x05\x03\x0b\x035\x03\x07\x01\x99\x033\x033\x0b\x06\x01\x03\x0b\x079\x1b7\x13\x04\t\x051;\r\x11\x0b9\x05\x03\x15+\x03\x01\t\t\x03=\x1f\x03\x11\x05\x03\x0b#\x03\x03\x03\x07%\x05\x03\x11\x03\x05\x0f\x06%\x03\x11\x05\x03\x07\t\x03EC\x03\x11\x11\x07KI\x03\x15\x05\t\x0b\x05\x03\x0bO\x03\x07\x03\x07Q\x05\x03\x01\x03\x0f\x0b\x06U\x03\x01\x07\r\x01\x11\x13\x04\x0b\x03\x13\x06\x03\x01\x05\x01\x00\xb2\x19}\x1d\x03\x11\x0f\x0b\t\t\x0b!\x1f/!!)#\x1f\x19\x7f\x0f99m\x19\x85\x89W\xb3K\x9bM\x9b\x96\x04\x1b+\x1b\x1f\x1f\x15\x1d\x15+\x83\x13\r\r\x1f\x11\x15\x1b\x17\x15\x17\x0f\x11\x15\x11+\x19)\x0f\x0b\x11builtin\x00vhlo\x00module\x00broadcast_in_dim_v1\x00constant_v1\x00get_tuple_element_v1\x00iota_v1\x00select_v1\x00func_v1\x00add_v1\x00compare_v1\x00return_v1\x00reshape_v1\x00transpose_v1\x00divide_v1\x00call_v1\x00custom_call_v1\x00value\x00index\x00sym_name\x00third_party/py/jax/experimental/jax2tf/tests/back_compat_test.py\x00broadcast_dimensions\x00arg_attrs\x00function_type\x00res_attrs\x00sym_visibility\x00iota_dimension\x00compare_type\x00comparison_direction\x00jit__lambda_\x00jit(<lambda>)/jit(main)/pjit[in_shardings=(UnspecifiedValue,) out_shardings=(UnspecifiedValue,) resource_env=None donated_invars=(False,) name=tril in_positional_semantics=(<_PositionalSemantics.GLOBAL: 1>,) out_positional_semantics=_PositionalSemantics.GLOBAL keep_unused=False inline=False]\x00jit(<lambda>)/jit(main)/jit(tril)/iota[dtype=int32 shape=(8, 8) dimension=0]\x00jit(<lambda>)/jit(main)/jit(tril)/add\x00jit(<lambda>)/jit(main)/jit(tril)/iota[dtype=int32 shape=(8, 8) dimension=1]\x00jit(<lambda>)/jit(main)/jit(tril)/ge\x00jit(<lambda>)/jit(main)/jit(tril)/broadcast_in_dim[shape=(8, 8) broadcast_dimensions=()]\x00jit(<lambda>)/jit(main)/jit(tril)/select_n\x00jit(<lambda>)/jit(main)/iota[dtype=float64 shape=(64,) dimension=0]\x00jit(<lambda>)/jit(main)/reshape[new_sizes=(8, 8) dimensions=None]\x00permutation\x00jit(<lambda>)/jit(main)/transpose[permutation=(1, 0)]\x00jit(<lambda>)/jit(main)/add\x00jit(<lambda>)/jit(main)/div\x00callee\x00jit(<lambda>)/jit(main)/eigh[lower=True sort_eigenvalues=True]\x00api_version\x00backend_config\x00call_target_name\x00called_computations\x00has_side_effect\x00operand_layouts\x00output_operand_aliases\x00result_layouts\x00jax.result_info\x00tril\x00[0]\x00[1]\x00main\x00public\x00private\x00\x00lapack_dsyevd\x00",
xla_call_module_version=4,
), # End paste
# Pasted from the test output (see back_compat_test.py module docstring)
c64=dict(
testdata_version=1,
platform='cpu',
custom_call_targets=['lapack_cheevd'],
serialized_date=datetime.date(2023, 3, 17),
inputs=(),
expected_outputs=(array([[-0.6185769 +0.j, -0.20142993 +0.j, -0.09725195 +0.j,
0.62983674 +0.j, -0.07926044 +0.j, 0.3605001 -0.j,
-0.019093221 +0.j, -0.18446997 +0.j],
[-0.47070873 +0.j, 0.29325768 +0.j, -0.19454116 +0.j,
-0.6394365 +0.j, 0.06229549 +0.j, 0.33249345 +0.j,
0.28112718 +0.j, -0.22856665 +0.j],
[-0.32284075 +0.j, -0.12361939 +0.j, 0.20547704 +0.j,
-0.18307868 +0.j, 0.47294614 +0.j, -0.3170349 +0.j,
-0.6373532 +0.j, -0.27266347 +0.j],
[-0.17497246 +0.j, -0.079641335 +0.j, 0.15042792 +0.j,
-0.15416273 +0.j, -0.815209 +0.j, -0.38054234 +0.j,
-0.083263926 +0.j, -0.31676024 +0.j],
[-0.027104257 +0.j, -0.26490977 +0.j, 0.32271704 +0.j,
0.08653544 +0.j, 0.30305928 +0.j, -0.33998996 +0.j,
0.6926741 +0.j, -0.360857 +0.j],
[ 0.120763965 +0.j, 0.43288827 +0.j, -0.64385164 +0.j,
0.2652551 +0.j, 0.094823755 +0.j, -0.37435007 +0.j,
0.00091664493+0.j, -0.40495378 +0.j],
[ 0.26863196 +0.j, 0.51607686 +0.j, 0.53846526 +0.j,
0.16969058 +0.j, -0.0216703 +0.j, 0.35755336 +0.j,
-0.113144726 +0.j, -0.4490505 +0.j],
[ 0.4165004 +0.j, -0.57262254 +0.j, -0.28144246 +0.j,
-0.17463988 +0.j, -0.016984984 +0.j, 0.3613705 +0.j,
-0.12186296 +0.j, -0.49314725 +0.j]], dtype=complex64), array([-2.4598808e+01, -3.3105560e-05, -3.1002426e-05, -1.0103593e-05,
-1.0022322e-05, 4.0141886e-06, 9.5510331e-06, 2.7659882e+02],
dtype=float32)),
mlir_module_text=r"""
module @jit__lambda_ {
func.func public @main() -> (tensor<8x8xcomplex<f32>> {jax.result_info = "[0]"}, tensor<8xf32> {jax.result_info = "[1]"}) {
%0 = stablehlo.iota dim = 0 : tensor<64xcomplex<f32>>
%1 = stablehlo.reshape %0 : (tensor<64xcomplex<f32>>) -> tensor<8x8xcomplex<f32>>
%2 = stablehlo.transpose %1, dims = [1, 0] : (tensor<8x8xcomplex<f32>>) -> tensor<8x8xcomplex<f32>>
%3 = stablehlo.real %2 : (tensor<8x8xcomplex<f32>>) -> tensor<8x8xf32>
%4 = stablehlo.imag %2 : (tensor<8x8xcomplex<f32>>) -> tensor<8x8xf32>
%5 = stablehlo.negate %4 : tensor<8x8xf32>
%6 = stablehlo.complex %3, %5 : tensor<8x8xcomplex<f32>>
%7 = stablehlo.add %1, %6 : tensor<8x8xcomplex<f32>>
%8 = stablehlo.constant dense<(2.000000e+00,0.000000e+00)> : tensor<complex<f32>>
%9 = stablehlo.broadcast_in_dim %8, dims = [] : (tensor<complex<f32>>) -> tensor<8x8xcomplex<f32>>
%10 = stablehlo.divide %7, %9 : tensor<8x8xcomplex<f32>>
%11 = call @tril(%10) : (tensor<8x8xcomplex<f32>>) -> tensor<8x8xcomplex<f32>>
%12 = stablehlo.constant dense<1> : tensor<i32>
%13 = stablehlo.constant dense<1> : tensor<i32>
%14 = stablehlo.constant dense<8> : tensor<i32>
%15 = stablehlo.custom_call @lapack_cheevd(%12, %13, %14, %11) {api_version = 2 : i32, operand_layouts = [dense<> : tensor<0xindex>, dense<> : tensor<0xindex>, dense<> : tensor<0xindex>, dense<[0, 1]> : tensor<2xindex>], output_operand_aliases = [#stablehlo.output_operand_alias<output_tuple_indices = [0], operand_index = 3, operand_tuple_indices = []>], result_layouts = [dense<[0, 1]> : tensor<2xindex>, dense<0> : tensor<1xindex>, dense<> : tensor<0xindex>, dense<0> : tensor<1xindex>, dense<0> : tensor<1xindex>, dense<0> : tensor<1xindex>]} : (tensor<i32>, tensor<i32>, tensor<i32>, tensor<8x8xcomplex<f32>>) -> tuple<tensor<8x8xcomplex<f32>>, tensor<8xf32>, tensor<i32>, tensor<81xcomplex<f32>>, tensor<169xf32>, tensor<43xi32>>
%16 = stablehlo.get_tuple_element %15[0] : (tuple<tensor<8x8xcomplex<f32>>, tensor<8xf32>, tensor<i32>, tensor<81xcomplex<f32>>, tensor<169xf32>, tensor<43xi32>>) -> tensor<8x8xcomplex<f32>>
%17 = stablehlo.get_tuple_element %15[1] : (tuple<tensor<8x8xcomplex<f32>>, tensor<8xf32>, tensor<i32>, tensor<81xcomplex<f32>>, tensor<169xf32>, tensor<43xi32>>) -> tensor<8xf32>
%18 = stablehlo.get_tuple_element %15[2] : (tuple<tensor<8x8xcomplex<f32>>, tensor<8xf32>, tensor<i32>, tensor<81xcomplex<f32>>, tensor<169xf32>, tensor<43xi32>>) -> tensor<i32>
%19 = stablehlo.get_tuple_element %15[3] : (tuple<tensor<8x8xcomplex<f32>>, tensor<8xf32>, tensor<i32>, tensor<81xcomplex<f32>>, tensor<169xf32>, tensor<43xi32>>) -> tensor<81xcomplex<f32>>
%20 = stablehlo.get_tuple_element %15[4] : (tuple<tensor<8x8xcomplex<f32>>, tensor<8xf32>, tensor<i32>, tensor<81xcomplex<f32>>, tensor<169xf32>, tensor<43xi32>>) -> tensor<169xf32>
%21 = stablehlo.get_tuple_element %15[5] : (tuple<tensor<8x8xcomplex<f32>>, tensor<8xf32>, tensor<i32>, tensor<81xcomplex<f32>>, tensor<169xf32>, tensor<43xi32>>) -> tensor<43xi32>
%22 = stablehlo.constant dense<0> : tensor<i32>
%23 = stablehlo.broadcast_in_dim %22, dims = [] : (tensor<i32>) -> tensor<i32>
%24 = stablehlo.compare EQ, %18, %23, SIGNED : (tensor<i32>, tensor<i32>) -> tensor<i1>
%25 = stablehlo.broadcast_in_dim %24, dims = [] : (tensor<i1>) -> tensor<1x1xi1>
%26 = stablehlo.constant dense<(0x7FC00000,0x7FC00000)> : tensor<complex<f32>>
%27 = stablehlo.broadcast_in_dim %26, dims = [] : (tensor<complex<f32>>) -> tensor<8x8xcomplex<f32>>
%28 = stablehlo.broadcast_in_dim %25, dims = [0, 1] : (tensor<1x1xi1>) -> tensor<8x8xi1>
%29 = stablehlo.select %28, %16, %27 : tensor<8x8xi1>, tensor<8x8xcomplex<f32>>
%30 = stablehlo.broadcast_in_dim %24, dims = [] : (tensor<i1>) -> tensor<1xi1>
%31 = stablehlo.constant dense<0x7FC00000> : tensor<f32>
%32 = stablehlo.broadcast_in_dim %31, dims = [] : (tensor<f32>) -> tensor<8xf32>
%33 = stablehlo.broadcast_in_dim %30, dims = [0] : (tensor<1xi1>) -> tensor<8xi1>
%34 = stablehlo.select %33, %17, %32 : tensor<8xi1>, tensor<8xf32>
return %29, %34 : tensor<8x8xcomplex<f32>>, tensor<8xf32>
}
func.func private @tril(%arg0: tensor<8x8xcomplex<f32>>) -> tensor<8x8xcomplex<f32>> {
%0 = stablehlo.iota dim = 0 : tensor<8x8xi32>
%1 = stablehlo.constant dense<0> : tensor<i32>
%2 = stablehlo.broadcast_in_dim %1, dims = [] : (tensor<i32>) -> tensor<8x8xi32>
%3 = stablehlo.add %0, %2 : tensor<8x8xi32>
%4 = stablehlo.iota dim = 1 : tensor<8x8xi32>
%5 = stablehlo.compare GE, %3, %4, SIGNED : (tensor<8x8xi32>, tensor<8x8xi32>) -> tensor<8x8xi1>
%6 = stablehlo.constant dense<(0.000000e+00,0.000000e+00)> : tensor<complex<f32>>
%7 = stablehlo.broadcast_in_dim %6, dims = [] : (tensor<complex<f32>>) -> tensor<8x8xcomplex<f32>>
%8 = stablehlo.select %5, %arg0, %7 : tensor<8x8xi1>, tensor<8x8xcomplex<f32>>
return %8 : tensor<8x8xcomplex<f32>>
}
}
""",
mlir_module_serialized=b"ML\xefR\x03MLIRxxx-trunk\x00\x015\x05\x01\x05\x01\x03\x05\x03%\x07\t\x0b\r\x0f\x11\x13\x15\x17\x19\x1b\x1d\x1f!#%')\x03\xc6\x02\x1e\x02?\x01\xa9\x0f\x17\x13\x0b\x17\x0b\x07\x0f\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x13\x0b\x13\x0f\x0b\x0b\x17\x0f\x13\x13\x0b33\x0b\x0f\x0b\x0b\x13\x0f\x0b\x1b\x0f\x0b\x13\x0f\x0b\x0f\x0b\x0f\x0b\x0f\x0b\x13\x0b\x0f\x0b\x0f\x0b\x0f\x0b\x0f\x0b\x0f\x0b\x0f\x0b\x13\x0b\x13\x0b\x0b\x13K\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x13\x13\x13\x13\x13\x13\x1b\x17\x03a\x0f/\x0b\x0b\x0f\x0b\x0bO\x0b\x13\x13\x0b\x13\x0b\x0b\x0b\x0b\x0b\x0b\x0f\x1f\x0f\x0f\x0b/O/\x1f\x1f\x0b\x0b\x0b\x0b\x1b\x0f\x17#\x0f\x0f\x0f\x0f\x0f\x0f\x0b/O\x1f/\x01\x07\x17\x17\x17\x03?\x17\x0f\x07\x0f\x07\x13\x07\x07\x0b\x17\x17\x07\x17\x13\x17\x17\x13\x0f\x17\x17\x13\x17#\x13\x13\x13\x0f\x17\x13\x13\x13\x02&\n\x1d\x83\x03\x17\x13\xf6\x04\x01\x03\x03\x15\xd3\x05+\x17\x13\xf2\x04\x01\x05-\x1f\x1d9\x03\x05/\x051\x053\x055\x057\x059\x05;\x03\x03!\xcf\x05=\x03\x03\x07\xd1\x1d?\x03\x05?\x05A\x17\x13\xea\x04\x01\x1d}\t\x03\x03\x07\xdf\x03\x03\x113\x05C\x03\x0b\x17\xad\x19\xb9\x1b\xbb\x11\xc5\x1d\xc7\x03\x0b\x17\xb1\x19\xcb\x1b\xb1\x11\xb3\x1d\xcd\x05E\x1d=\x03\x05G\x05I\x03\x03!\xd5\x1dE\x03\x05K\x03\x05'\xb5)\xd7\x1dK\x03\x05M\x03\x03\x07\xd9\x1dQ\x03\x05O\x1dU\x03\x05Q\x1dY+\x05S\x1d]+\x05U\x03\x03a\xdb\x05W\x1de\t\x05Y\x1di\t\x05[\x1dm\t\x05]\x1dq\t\x05_\x1du\t\x05a\x1dy\t\x05c\x03\x03\x07\xdd\x05e\x03\x03\x81\xb3\x05g\x05i\x03\x03\x07\xe1\x03\x11\x89\xe3\x8b\xe5\x8d\xe7\x8f\xad\x91\xe9\x93\xeb\x95\xed\x97\xf1\x05k\x05m\x05o\x05q\x05s\x05u\x05w\x05y\x03\x03\x0b\xf3\x03\x03\x0b\xf5\x03\x03\x0b\xf7\x03\x03\x0b\xf9\x03\x03\x0b\xfb\x03\x03\x0b\xfd\x03\x05'\xb5)\xff\x03\x03\x07\x02\x02\x1f/\x01\x1f3\x11\x00\x00\x00\x00\x00\x00\x00\x00\x03\x01\x1d{\x03\x03\xc9\x1d}\t\x07\x1f1!\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00#%\x03\x05\xbd\xc1\r\x03\xaf\xbf\x1d\x7f\r\x03\xaf\xc3\x1d\x81\x1d\x83\x1d\x85\r\x01#'\x1d\x87\x13\r\x01\x1f\x03\t\x00\x00\x00\x00\x1f)\x01\x13\r\x05\x07\x05\x1f\x07\x11\x00\x00\x00\x00\x00\x00\x00\x00\x1f\x1b!\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1f\x07\x11\x00\x00\x00@\x00\x00\x00\x00\x1f\x03\t\x01\x00\x00\x00\x1f\x03\t\x08\x00\x00\x00\x0b\x05\x1d\x89\x1d\x8b\x05\x01\x03\t\xa9\xa9\xa9\xb7\x03\x03\xef\x15\x03\x01\r\x01\x03\r\xb7\xab\xa9\xab\xab\xab\x13\x05\x01\x13\x05\x05\x13\x05\t\x13\x05\r\x13\x05\x11\x13\x05\x15\x07\x01\x1f\x07\x11\x00\x00\xc0\x7f\x00\x00\xc0\x7f\x1f\x1b!\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x1f#\t\x00\x00\xc0\x7f\x1f=\x11\x00\x00\x00\x00\x00\x00\x00\x00\x03\x03\x15\x06\x02\x03\x03\x07\n\x02\x03\x03\x15\x0e\x02)\x05!!\x11)\x01\x05\x1b)\x01\x11\t)\x03!\t\x1d\x01\x03\t)\x05!!\x05)\x05!!\t\x13)\x05!!\x0f)\x03\t\r)\x03\x8a\x02\x11)\x03J\x05\t)\x03\xad\x05)\x01\t\x11\x01\x05\x01\x0b\x11\x03\x01\x03\x01)\x03\x01\r)\x03\x02\x02\x11/\r\x01\x0b\x03\x1d\x1f!)\x03\x01\x17)\x03\t\x17)\x03\x05\x17)\x01\x0f)\x05\x05\x05\x0f)\x03\x05\x0f)\x03!\x0f)\x03\x05\r\x04\xda\x05\x05\x01\x11\r1\x07\x03\x01\t\r\x11\r5\x05\x03G\x91\t\x03W\x1f\x03+\x15\x06[\x03\x01\x03\x01\x17\x07c_\x03\x01\x03\x03\x19\x06g\x03\x15\x03\x05\x1b\x06k\x03\x15\x03\x05\x1d\x06o\x03\x15\x03\t\x1f\x06s\x03\x01\x05\x07\x0b\x0f\x06w\x03\x01\x05\x03\r\x05\x03\r{\x03\x07\x03\x07-\x05\x03\x01\x03\x11!\x06-\x03\x01\x05\x0f\x13#\x07\x0f\x7f\x03\x01\x03\x15\x05\x03\x01/\x03\x03\x05\x03\x01/\x03\x03\x05\x03\x01\x85\x03\x03%\x07\x01\x87\x03-\t\x19\x1b\x1d\x17\x07\x07\x01\x99\x03\x01\x03\x1f\x07\x07\x01\x9b\x03\x0b\x03\x1f\x07\x07\x01\x9d\x03\x03\x03\x1f\x07\x07\x01\x9f\x03\x1d\x03\x1f\x07\x07\x01\xa1\x03\x1f\x03\x1f\x07\x07\x01\xa3\x03!\x03\x1f\x05\x03\x01#\x03\x03\x03\x07\x01\x05\x03\x03\x03-\x11\x07\x01\xa5\x035\x05%/\x03\x07\x01\x05\x037\x031\x05\x03\x01\xa7\x03\x07\x03\x07\x01\x05\x03\x01\x035\x03\x07\x01\x12\x02\x03\x19\x033\x0b\x06\x01\x03\x01\x079!7\x03\x07\x01\x05\x039\x031\x05\x03\x01\x16\x02\x03#\x03\x07\x01\x05\x03\x0b\x03?\x03\x07\x01\x1a\x02\x03;\x03=\x0b\x06\x01\x03\x0b\x07C#A\x13\x04\r\x05;E\r\x11\x0f7\x05\x03\x15+\x03\x01\r\t\x03;\x1f\x03\x13\x05\x03\x0f#\x03\x03\x03\x07%\x05\x03\x13\x03\x05\x0f\x06%\x03\x13\x05\x03\x07\t\x03CA\x03\x13\x11\x07IG\x03\x19\x05\t\x0b\x05\x03\x0fM\x03\x07\x03\x07O\x05\x03\x01\x03\x0f\x0b\x06S\x03\x01\x07\r\x01\x11\x13\x04\x0f\x03\x13\x06\x03\x01\x05\x01\x00F\x1c\x8d\x1d\x03\x11\x0f\x0b\t\t\x0b!\x1f/!!)#\x1f\x19\x7f\x0f99A9;;m\x19\x85\x8dW\xb3K\x9bM\x9b\x96\x04\x1b+\x1b\x1f\x1f\x15\x1d\x15+\x83\x13\r\r\x1f\x11\x15\x17\x15\x11\x11\x1b\x17\x15\x17\x0f\x11\x15\x11+\x19)\x0f\x0b\x11builtin\x00vhlo\x00module\x00broadcast_in_dim_v1\x00constant_v1\x00get_tuple_element_v1\x00iota_v1\x00select_v1\x00func_v1\x00add_v1\x00compare_v1\x00return_v1\x00reshape_v1\x00transpose_v1\x00real_v1\x00imag_v1\x00negate_v1\x00complex_v1\x00divide_v1\x00call_v1\x00custom_call_v1\x00value\x00index\x00sym_name\x00third_party/py/jax/experimental/jax2tf/tests/back_compat_test.py\x00broadcast_dimensions\x00arg_attrs\x00function_type\x00res_attrs\x00sym_visibility\x00iota_dimension\x00compare_type\x00comparison_direction\x00jit__lambda_\x00jit(<lambda>)/jit(main)/pjit[in_shardings=(UnspecifiedValue,) out_shardings=(UnspecifiedValue,) resource_env=None donated_invars=(False,) name=tril in_positional_semantics=(<_PositionalSemantics.GLOBAL: 1>,) out_positional_semantics=_PositionalSemantics.GLOBAL keep_unused=False inline=False]\x00jit(<lambda>)/jit(main)/jit(tril)/iota[dtype=int32 shape=(8, 8) dimension=0]\x00jit(<lambda>)/jit(main)/jit(tril)/add\x00jit(<lambda>)/jit(main)/jit(tril)/iota[dtype=int32 shape=(8, 8) dimension=1]\x00jit(<lambda>)/jit(main)/jit(tril)/ge\x00jit(<lambda>)/jit(main)/jit(tril)/broadcast_in_dim[shape=(8, 8) broadcast_dimensions=()]\x00jit(<lambda>)/jit(main)/jit(tril)/select_n\x00jit(<lambda>)/jit(main)/iota[dtype=complex64 shape=(64,) dimension=0]\x00jit(<lambda>)/jit(main)/reshape[new_sizes=(8, 8) dimensions=None]\x00permutation\x00jit(<lambda>)/jit(main)/transpose[permutation=(1, 0)]\x00jit(<lambda>)/jit(main)/real\x00jit(<lambda>)/jit(main)/imag\x00jit(<lambda>)/jit(main)/neg\x00jit(<lambda>)/jit(main)/complex\x00jit(<lambda>)/jit(main)/add\x00jit(<lambda>)/jit(main)/div\x00callee\x00jit(<lambda>)/jit(main)/eigh[lower=True sort_eigenvalues=True]\x00api_version\x00backend_config\x00call_target_name\x00called_computations\x00has_side_effect\x00operand_layouts\x00output_operand_aliases\x00result_layouts\x00jax.result_info\x00tril\x00[0]\x00[1]\x00main\x00public\x00private\x00\x00lapack_cheevd\x00",
xla_call_module_version=4,
), # End paste
# Pasted from the test output (see back_compat_test.py module docstring)
c128=dict(
testdata_version=1,
platform='cpu',
custom_call_targets=['lapack_zheevd'],
serialized_date=datetime.date(2023, 3, 17),
inputs=(),
expected_outputs=(array([[-6.1857700048412056e-01+0.j, 2.4081403770912022e-01+0.j,
3.5662489253627483e-01+0.j, -6.3034019033669797e-01+0.j,
1.0043483479985752e-16+0.j, -2.8842036081919542e-02+0.j,
7.7164692943283169e-25+0.j, -1.8446994643771725e-01+0.j],
[-4.7070881487314609e-01+0.j, 4.7473787464450828e-01+0.j,
-4.8036836210243361e-01+0.j, 4.3802686872516400e-01+0.j,
1.7961797619639255e-01+0.j, 8.3080980076741355e-03+0.j,
2.1415294457221759e-01+0.j, -2.2856669794666584e-01+0.j],
[-3.2284062926217072e-01+0.j, -5.4336490915553370e-01+0.j,
2.2181041859724987e-01+0.j, 2.9947877954402286e-01+0.j,
-3.6491813600134637e-01+0.j, 3.2867679819727436e-01+0.j,
3.8223299448843473e-01+0.j, -2.7266344945561438e-01+0.j],
[-1.7497244365119527e-01+0.j, -8.9251550609769331e-02+0.j,
-6.3518515114898352e-02+0.j, 1.9162997359209963e-01+0.j,
-2.2087281326110142e-01+0.j, 5.9957027043505008e-02+0.j,
-8.7632498908241274e-01+0.j, -3.1676020096456303e-01+0.j],
[-2.7104258040220017e-02+0.j, -3.3772873786627688e-01+0.j,
2.5901386593721754e-01+0.j, 1.7032650752287815e-01+0.j,
6.7521217612940321e-01+0.j, -4.5036136532965476e-01+0.j,
-1.2279030059078447e-02+0.j, -3.6085695247351163e-01+0.j],
[ 1.2076392757075533e-01+0.j, -3.3834734096469249e-01+0.j,
-6.5506827461665529e-01+0.j, -5.0472498521116760e-01+0.j,
6.9987430903492132e-02+0.j, 1.0595648906599270e-01+0.j,
8.3443844143082035e-02+0.j, -4.0495370398246017e-01+0.j],
[ 2.6863211318173102e-01+0.j, 2.2958613191407312e-01+0.j,
6.3952843755683969e-02+0.j, 1.8776775771084192e-02+0.j,
-5.3523731432241317e-01+0.j, -5.9199531677602002e-01+0.j,
1.7916671834524250e-01+0.j, -4.4905045549140887e-01+0.j],
[ 4.1650029879270667e-01+0.j, 3.6355449432857068e-01+0.j,
2.9755313100756148e-01+0.j, 1.6826270392616000e-02+0.j,
1.9621068035557282e-01+0.j, 5.6830030587314817e-01+0.j,
2.9607517592514260e-02+0.j, -4.9314720700035747e-01+0.j]]), array([-2.4598804776133626e+01, -4.6567755957874661e-14,
-1.9932120610662194e-14, -5.7323356091157378e-15,
-4.5459724251334835e-16, 4.0479851042511616e-14,
9.2325194924982089e-14, 2.7659880477613365e+02])),
mlir_module_text=r"""
module @jit__lambda_ {
func.func public @main() -> (tensor<8x8xcomplex<f64>> {jax.result_info = "[0]"}, tensor<8xf64> {jax.result_info = "[1]"}) {
%0 = stablehlo.iota dim = 0 : tensor<64xcomplex<f64>>
%1 = stablehlo.reshape %0 : (tensor<64xcomplex<f64>>) -> tensor<8x8xcomplex<f64>>
%2 = stablehlo.transpose %1, dims = [1, 0] : (tensor<8x8xcomplex<f64>>) -> tensor<8x8xcomplex<f64>>
%3 = stablehlo.real %2 : (tensor<8x8xcomplex<f64>>) -> tensor<8x8xf64>
%4 = stablehlo.imag %2 : (tensor<8x8xcomplex<f64>>) -> tensor<8x8xf64>
%5 = stablehlo.negate %4 : tensor<8x8xf64>
%6 = stablehlo.complex %3, %5 : tensor<8x8xcomplex<f64>>
%7 = stablehlo.add %1, %6 : tensor<8x8xcomplex<f64>>
%8 = stablehlo.constant dense<(2.000000e+00,0.000000e+00)> : tensor<complex<f64>>
%9 = stablehlo.broadcast_in_dim %8, dims = [] : (tensor<complex<f64>>) -> tensor<8x8xcomplex<f64>>
%10 = stablehlo.divide %7, %9 : tensor<8x8xcomplex<f64>>
%11 = call @tril(%10) : (tensor<8x8xcomplex<f64>>) -> tensor<8x8xcomplex<f64>>
%12 = stablehlo.constant dense<1> : tensor<i32>
%13 = stablehlo.constant dense<1> : tensor<i32>
%14 = stablehlo.constant dense<8> : tensor<i32>
%15 = stablehlo.custom_call @lapack_zheevd(%12, %13, %14, %11) {api_version = 2 : i32, operand_layouts = [dense<> : tensor<0xindex>, dense<> : tensor<0xindex>, dense<> : tensor<0xindex>, dense<[0, 1]> : tensor<2xindex>], output_operand_aliases = [#stablehlo.output_operand_alias<output_tuple_indices = [0], operand_index = 3, operand_tuple_indices = []>], result_layouts = [dense<[0, 1]> : tensor<2xindex>, dense<0> : tensor<1xindex>, dense<> : tensor<0xindex>, dense<0> : tensor<1xindex>, dense<0> : tensor<1xindex>, dense<0> : tensor<1xindex>]} : (tensor<i32>, tensor<i32>, tensor<i32>, tensor<8x8xcomplex<f64>>) -> tuple<tensor<8x8xcomplex<f64>>, tensor<8xf64>, tensor<i32>, tensor<81xcomplex<f64>>, tensor<169xf64>, tensor<43xi32>>
%16 = stablehlo.get_tuple_element %15[0] : (tuple<tensor<8x8xcomplex<f64>>, tensor<8xf64>, tensor<i32>, tensor<81xcomplex<f64>>, tensor<169xf64>, tensor<43xi32>>) -> tensor<8x8xcomplex<f64>>
%17 = stablehlo.get_tuple_element %15[1] : (tuple<tensor<8x8xcomplex<f64>>, tensor<8xf64>, tensor<i32>, tensor<81xcomplex<f64>>, tensor<169xf64>, tensor<43xi32>>) -> tensor<8xf64>
%18 = stablehlo.get_tuple_element %15[2] : (tuple<tensor<8x8xcomplex<f64>>, tensor<8xf64>, tensor<i32>, tensor<81xcomplex<f64>>, tensor<169xf64>, tensor<43xi32>>) -> tensor<i32>
%19 = stablehlo.get_tuple_element %15[3] : (tuple<tensor<8x8xcomplex<f64>>, tensor<8xf64>, tensor<i32>, tensor<81xcomplex<f64>>, tensor<169xf64>, tensor<43xi32>>) -> tensor<81xcomplex<f64>>
%20 = stablehlo.get_tuple_element %15[4] : (tuple<tensor<8x8xcomplex<f64>>, tensor<8xf64>, tensor<i32>, tensor<81xcomplex<f64>>, tensor<169xf64>, tensor<43xi32>>) -> tensor<169xf64>
%21 = stablehlo.get_tuple_element %15[5] : (tuple<tensor<8x8xcomplex<f64>>, tensor<8xf64>, tensor<i32>, tensor<81xcomplex<f64>>, tensor<169xf64>, tensor<43xi32>>) -> tensor<43xi32>
%22 = stablehlo.constant dense<0> : tensor<i32>
%23 = stablehlo.broadcast_in_dim %22, dims = [] : (tensor<i32>) -> tensor<i32>
%24 = stablehlo.compare EQ, %18, %23, SIGNED : (tensor<i32>, tensor<i32>) -> tensor<i1>
%25 = stablehlo.broadcast_in_dim %24, dims = [] : (tensor<i1>) -> tensor<1x1xi1>
%26 = stablehlo.constant dense<(0x7FF8000000000000,0x7FF8000000000000)> : tensor<complex<f64>>
%27 = stablehlo.broadcast_in_dim %26, dims = [] : (tensor<complex<f64>>) -> tensor<8x8xcomplex<f64>>
%28 = stablehlo.broadcast_in_dim %25, dims = [0, 1] : (tensor<1x1xi1>) -> tensor<8x8xi1>
%29 = stablehlo.select %28, %16, %27 : tensor<8x8xi1>, tensor<8x8xcomplex<f64>>
%30 = stablehlo.broadcast_in_dim %24, dims = [] : (tensor<i1>) -> tensor<1xi1>
%31 = stablehlo.constant dense<0x7FF8000000000000> : tensor<f64>
%32 = stablehlo.broadcast_in_dim %31, dims = [] : (tensor<f64>) -> tensor<8xf64>
%33 = stablehlo.broadcast_in_dim %30, dims = [0] : (tensor<1xi1>) -> tensor<8xi1>
%34 = stablehlo.select %33, %17, %32 : tensor<8xi1>, tensor<8xf64>
return %29, %34 : tensor<8x8xcomplex<f64>>, tensor<8xf64>
}
func.func private @tril(%arg0: tensor<8x8xcomplex<f64>>) -> tensor<8x8xcomplex<f64>> {
%0 = stablehlo.iota dim = 0 : tensor<8x8xi32>
%1 = stablehlo.constant dense<0> : tensor<i32>
%2 = stablehlo.broadcast_in_dim %1, dims = [] : (tensor<i32>) -> tensor<8x8xi32>
%3 = stablehlo.add %0, %2 : tensor<8x8xi32>
%4 = stablehlo.iota dim = 1 : tensor<8x8xi32>
%5 = stablehlo.compare GE, %3, %4, SIGNED : (tensor<8x8xi32>, tensor<8x8xi32>) -> tensor<8x8xi1>
%6 = stablehlo.constant dense<(0.000000e+00,0.000000e+00)> : tensor<complex<f64>>
%7 = stablehlo.broadcast_in_dim %6, dims = [] : (tensor<complex<f64>>) -> tensor<8x8xcomplex<f64>>
%8 = stablehlo.select %5, %arg0, %7 : tensor<8x8xi1>, tensor<8x8xcomplex<f64>>
return %8 : tensor<8x8xcomplex<f64>>
}
}
""",
mlir_module_serialized=b"ML\xefR\x03MLIRxxx-trunk\x00\x015\x05\x01\x05\x01\x03\x05\x03%\x07\t\x0b\r\x0f\x11\x13\x15\x17\x19\x1b\x1d\x1f!#%')\x03\xc6\x02\x1e\x02?\x01\xa9\x0f\x17\x13\x0b\x17\x0b\x07\x0f\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x13\x0b\x13\x0f\x0b\x0b\x17\x0f\x13\x13\x0b33\x0b\x0f\x0b\x0b\x13\x0f\x0b\x1b\x0f\x0b\x13\x0f\x0b\x0f\x0b\x0f\x0b\x0f\x0b\x13\x0b\x0f\x0b\x0f\x0b\x0f\x0b\x0f\x0b\x0f\x0b\x0f\x0b\x13\x0b\x13\x0b\x0b\x13K\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x13\x13\x13\x13\x13\x13\x1b\x17\x03a\x0f/\x0b\x0b\x0f\x0b\x0bO\x0b\x13\x13\x0b\x13\x0b\x0b\x0b\x0b\x0b\x0b\x0f\x1f\x0f\x0f\x0bOOO\x1f\x1f\x0b\x0b\x0b\x0b\x1b\x0f\x17#\x0f\x0f\x0f\x0f\x0f\x0f\x0bOO//\x01\x07\x17\x17\x17\x03?\x17\x0f\x07\x0f\x07\x13\x07\x07\x0b\x17\x17\x07\x17\x13\x17\x17\x13\x0f\x17\x17\x13\x17#\x13\x13\x13\x0f\x17\x13\x13\x13\x02\x96\n\x1d\x83\x03\x17\x13\xf6\x04\x01\x03\x03\x15\xd3\x05+\x17\x13\xf2\x04\x01\x05-\x1f\x1d9\x03\x05/\x051\x053\x055\x057\x059\x05;\x03\x03!\xcf\x05=\x03\x03\x07\xd1\x1d?\x03\x05?\x05A\x17\x13\xea\x04\x01\x1d}\t\x03\x03\x07\xdf\x03\x03\x113\x05C\x03\x0b\x17\xad\x19\xb9\x1b\xbb\x11\xc5\x1d\xc7\x03\x0b\x17\xb1\x19\xcb\x1b\xb1\x11\xb3\x1d\xcd\x05E\x1d=\x03\x05G\x05I\x03\x03!\xd5\x1dE\x03\x05K\x03\x05'\xb5)\xd7\x1dK\x03\x05M\x03\x03\x07\xd9\x1dQ\x03\x05O\x1dU\x03\x05Q\x1dY+\x05S\x1d]+\x05U\x03\x03a\xdb\x05W\x1de\t\x05Y\x1di\t\x05[\x1dm\t\x05]\x1dq\t\x05_\x1du\t\x05a\x1dy\t\x05c\x03\x03\x07\xdd\x05e\x03\x03\x81\xb3\x05g\x05i\x03\x03\x07\xe1\x03\x11\x89\xe3\x8b\xe5\x8d\xe7\x8f\xad\x91\xe9\x93\xeb\x95\xed\x97\xf1\x05k\x05m\x05o\x05q\x05s\x05u\x05w\x05y\x03\x03\x0b\xf3\x03\x03\x0b\xf5\x03\x03\x0b\xf7\x03\x03\x0b\xf9\x03\x03\x0b\xfb\x03\x03\x0b\xfd\x03\x05'\xb5)\xff\x03\x03\x07\x02\x02\x1f/\x01\x1f3\x11\x00\x00\x00\x00\x00\x00\x00\x00\x03\x01\x1d{\x03\x03\xc9\x1d}\t\x07\x1f1!\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00#%\x03\x05\xbd\xc1\r\x03\xaf\xbf\x1d\x7f\r\x03\xaf\xc3\x1d\x81\x1d\x83\x1d\x85\r\x01#'\x1d\x87\x13\r\x01\x1f\x03\t\x00\x00\x00\x00\x1f)\x01\x13\r\x05\x07\x05\x1f\x07!\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1f\x1b!\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1f\x07!\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00\x00\x1f\x03\t\x01\x00\x00\x00\x1f\x03\t\x08\x00\x00\x00\x0b\x05\x1d\x89\x1d\x8b\x05\x01\x03\t\xa9\xa9\xa9\xb7\x03\x03\xef\x15\x03\x01\r\x01\x03\r\xb7\xab\xa9\xab\xab\xab\x13\x05\x01\x13\x05\x05\x13\x05\t\x13\x05\r\x13\x05\x11\x13\x05\x15\x07\x01\x1f\x07!\x00\x00\x00\x00\x00\x00\xf8\x7f\x00\x00\x00\x00\x00\x00\xf8\x7f\x1f\x1b!\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x1f#\x11\x00\x00\x00\x00\x00\x00\xf8\x7f\x1f=\x11\x00\x00\x00\x00\x00\x00\x00\x00\x03\x03\x15\x06\x02\x03\x03\x07\n\x02\x03\x03\x15\x0e\x02)\x05!!\x11)\x01\x05\x1b)\x01\x11\x0b)\x03!\t\x1d\x01\x03\t)\x05!!\x05)\x05!!\t\x13)\x05!!\x0f)\x03\t\r)\x03\x8a\x02\x11)\x03J\x05\t)\x03\xad\x05)\x01\t\x11\x01\x05\x01\x0b\x11\x03\x01\x03\x01)\x03\x01\r)\x03\x02\x02\x11/\r\x01\x0b\x03\x1d\x1f!)\x03\x01\x17)\x03\t\x17)\x03\x05\x17)\x01\x0f)\x05\x05\x05\x0f)\x03\x05\x0f)\x03!\x0f)\x03\x05\r\x04\xda\x05\x05\x01\x11\r1\x07\x03\x01\t\r\x11\r5\x05\x03G\x91\t\x03W\x1f\x03+\x15\x06[\x03\x01\x03\x01\x17\x07c_\x03\x01\x03\x03\x19\x06g\x03\x15\x03\x05\x1b\x06k\x03\x15\x03\x05\x1d\x06o\x03\x15\x03\t\x1f\x06s\x03\x01\x05\x07\x0b\x0f\x06w\x03\x01\x05\x03\r\x05\x03\r{\x03\x07\x03\x07-\x05\x03\x01\x03\x11!\x06-\x03\x01\x05\x0f\x13#\x07\x0f\x7f\x03\x01\x03\x15\x05\x03\x01/\x03\x03\x05\x03\x01/\x03\x03\x05\x03\x01\x85\x03\x03%\x07\x01\x87\x03-\t\x19\x1b\x1d\x17\x07\x07\x01\x99\x03\x01\x03\x1f\x07\x07\x01\x9b\x03\x0b\x03\x1f\x07\x07\x01\x9d\x03\x03\x03\x1f\x07\x07\x01\x9f\x03\x1d\x03\x1f\x07\x07\x01\xa1\x03\x1f\x03\x1f\x07\x07\x01\xa3\x03!\x03\x1f\x05\x03\x01#\x03\x03\x03\x07\x01\x05\x03\x03\x03-\x11\x07\x01\xa5\x035\x05%/\x03\x07\x01\x05\x037\x031\x05\x03\x01\xa7\x03\x07\x03\x07\x01\x05\x03\x01\x035\x03\x07\x01\x12\x02\x03\x19\x033\x0b\x06\x01\x03\x01\x079!7\x03\x07\x01\x05\x039\x031\x05\x03\x01\x16\x02\x03#\x03\x07\x01\x05\x03\x0b\x03?\x03\x07\x01\x1a\x02\x03;\x03=\x0b\x06\x01\x03\x0b\x07C#A\x13\x04\r\x05;E\r\x11\x0f7\x05\x03\x15+\x03\x01\r\t\x03;\x1f\x03\x13\x05\x03\x0f#\x03\x03\x03\x07%\x05\x03\x13\x03\x05\x0f\x06%\x03\x13\x05\x03\x07\t\x03CA\x03\x13\x11\x07IG\x03\x19\x05\t\x0b\x05\x03\x0fM\x03\x07\x03\x07O\x05\x03\x01\x03\x0f\x0b\x06S\x03\x01\x07\r\x01\x11\x13\x04\x0f\x03\x13\x06\x03\x01\x05\x01\x00J\x1c\x8d\x1d\x03\x11\x0f\x0b\t\t\x0b!\x1f/!!)#\x1f\x19\x7f\x0f99A9;;m\x19\x85\x8fW\xb3K\x9bM\x9b\x96\x04\x1b+\x1b\x1f\x1f\x15\x1d\x15+\x83\x13\r\r\x1f\x11\x15\x17\x15\x11\x11\x1b\x17\x15\x17\x0f\x11\x15\x11+\x19)\x0f\x0b\x11builtin\x00vhlo\x00module\x00broadcast_in_dim_v1\x00constant_v1\x00get_tuple_element_v1\x00iota_v1\x00select_v1\x00func_v1\x00add_v1\x00compare_v1\x00return_v1\x00reshape_v1\x00transpose_v1\x00real_v1\x00imag_v1\x00negate_v1\x00complex_v1\x00divide_v1\x00call_v1\x00custom_call_v1\x00value\x00index\x00sym_name\x00third_party/py/jax/experimental/jax2tf/tests/back_compat_test.py\x00broadcast_dimensions\x00arg_attrs\x00function_type\x00res_attrs\x00sym_visibility\x00iota_dimension\x00compare_type\x00comparison_direction\x00jit__lambda_\x00jit(<lambda>)/jit(main)/pjit[in_shardings=(UnspecifiedValue,) out_shardings=(UnspecifiedValue,) resource_env=None donated_invars=(False,) name=tril in_positional_semantics=(<_PositionalSemantics.GLOBAL: 1>,) out_positional_semantics=_PositionalSemantics.GLOBAL keep_unused=False inline=False]\x00jit(<lambda>)/jit(main)/jit(tril)/iota[dtype=int32 shape=(8, 8) dimension=0]\x00jit(<lambda>)/jit(main)/jit(tril)/add\x00jit(<lambda>)/jit(main)/jit(tril)/iota[dtype=int32 shape=(8, 8) dimension=1]\x00jit(<lambda>)/jit(main)/jit(tril)/ge\x00jit(<lambda>)/jit(main)/jit(tril)/broadcast_in_dim[shape=(8, 8) broadcast_dimensions=()]\x00jit(<lambda>)/jit(main)/jit(tril)/select_n\x00jit(<lambda>)/jit(main)/iota[dtype=complex128 shape=(64,) dimension=0]\x00jit(<lambda>)/jit(main)/reshape[new_sizes=(8, 8) dimensions=None]\x00permutation\x00jit(<lambda>)/jit(main)/transpose[permutation=(1, 0)]\x00jit(<lambda>)/jit(main)/real\x00jit(<lambda>)/jit(main)/imag\x00jit(<lambda>)/jit(main)/neg\x00jit(<lambda>)/jit(main)/complex\x00jit(<lambda>)/jit(main)/add\x00jit(<lambda>)/jit(main)/div\x00callee\x00jit(<lambda>)/jit(main)/eigh[lower=True sort_eigenvalues=True]\x00api_version\x00backend_config\x00call_target_name\x00called_computations\x00has_side_effect\x00operand_layouts\x00output_operand_aliases\x00result_layouts\x00jax.result_info\x00tril\x00[0]\x00[1]\x00main\x00public\x00private\x00\x00lapack_zheevd\x00",
xla_call_module_version=4,
), # End paste
)
data_2024_08_19 = {}
# Pasted from the test output (see export_back_compat_test_util.py module docstring)
data_2024_08_19["c128"] = dict(
testdata_version=1,
platform='cpu',
custom_call_targets=['lapack_zheevd_ffi'],
serialized_date=datetime.date(2024, 8, 19),
inputs=(),
expected_outputs=(array([[-6.1857700048412056e-01+0.j, 2.4081403770912022e-01+0.j,
3.5662489253627483e-01+0.j, -6.3034019033669797e-01+0.j,
1.0043483479985752e-16+0.j, -2.8842036081919542e-02+0.j,
7.7164692943283169e-25+0.j, -1.8446994643771725e-01+0.j],
[-4.7070881487314609e-01+0.j, 4.7473787464450828e-01+0.j,
-4.8036836210243361e-01+0.j, 4.3802686872516400e-01+0.j,
1.7961797619639255e-01+0.j, 8.3080980076741355e-03+0.j,
2.1415294457221759e-01+0.j, -2.2856669794666584e-01+0.j],
[-3.2284062926217072e-01+0.j, -5.4336490915553370e-01+0.j,
2.2181041859724987e-01+0.j, 2.9947877954402286e-01+0.j,
-3.6491813600134637e-01+0.j, 3.2867679819727436e-01+0.j,
3.8223299448843473e-01+0.j, -2.7266344945561438e-01+0.j],
[-1.7497244365119527e-01+0.j, -8.9251550609769331e-02+0.j,
-6.3518515114898352e-02+0.j, 1.9162997359209963e-01+0.j,
-2.2087281326110142e-01+0.j, 5.9957027043505008e-02+0.j,
-8.7632498908241274e-01+0.j, -3.1676020096456303e-01+0.j],
[-2.7104258040220017e-02+0.j, -3.3772873786627688e-01+0.j,
2.5901386593721754e-01+0.j, 1.7032650752287815e-01+0.j,
6.7521217612940321e-01+0.j, -4.5036136532965476e-01+0.j,
-1.2279030059078447e-02+0.j, -3.6085695247351163e-01+0.j],
[ 1.2076392757075533e-01+0.j, -3.3834734096469249e-01+0.j,
-6.5506827461665529e-01+0.j, -5.0472498521116760e-01+0.j,
6.9987430903492132e-02+0.j, 1.0595648906599270e-01+0.j,
8.3443844143082035e-02+0.j, -4.0495370398246017e-01+0.j],
[ 2.6863211318173102e-01+0.j, 2.2958613191407312e-01+0.j,
6.3952843755683969e-02+0.j, 1.8776775771084192e-02+0.j,
-5.3523731432241317e-01+0.j, -5.9199531677602002e-01+0.j,
1.7916671834524250e-01+0.j, -4.4905045549140887e-01+0.j],
[ 4.1650029879270667e-01+0.j, 3.6355449432857068e-01+0.j,
2.9755313100756148e-01+0.j, 1.6826270392616000e-02+0.j,
1.9621068035557282e-01+0.j, 5.6830030587314817e-01+0.j,
2.9607517592514260e-02+0.j, -4.9314720700035747e-01+0.j]]), array([-2.4598804776133626e+01, -4.6567755957874661e-14,
-1.9932120610662194e-14, -5.7323356091157378e-15,
-4.5459724251334835e-16, 4.0479851042511616e-14,
9.2325194924982089e-14, 2.7659880477613365e+02])),
mlir_module_text=r"""
#loc7 = loc("third_party/py/jax/tests/export_back_compat_test.py":277:27)
#loc18 = loc("jit(<lambda>)/jit(main)/pjit[in_shardings=(UnspecifiedValue,) out_shardings=(UnspecifiedValue,) in_layouts=(None,) out_layouts=(None,) resource_env=None donated_invars=(False,) name=tril keep_unused=False inline=False]"(#loc7))
module @jit__lambda_ attributes {jax.uses_shape_polymorphism = false, mhlo.num_partitions = 1 : i32, mhlo.num_replicas = 1 : i32} {
func.func public @main() -> (tensor<8x8xcomplex<f64>> {jax.result_info = "[0]", mhlo.layout_mode = "default"}, tensor<8xf64> {jax.result_info = "[1]", mhlo.layout_mode = "default"}) {
%0 = stablehlo.iota dim = 0 : tensor<64xcomplex<f64>> loc(#loc9)
%1 = stablehlo.reshape %0 : (tensor<64xcomplex<f64>>) -> tensor<8x8xcomplex<f64>> loc(#loc10)
%2 = stablehlo.transpose %1, dims = [1, 0] : (tensor<8x8xcomplex<f64>>) -> tensor<8x8xcomplex<f64>> loc(#loc11)
%3 = stablehlo.real %2 : (tensor<8x8xcomplex<f64>>) -> tensor<8x8xf64> loc(#loc12)
%4 = stablehlo.imag %2 : (tensor<8x8xcomplex<f64>>) -> tensor<8x8xf64> loc(#loc13)
%5 = stablehlo.negate %4 : tensor<8x8xf64> loc(#loc14)
%6 = stablehlo.complex %3, %5 : tensor<8x8xcomplex<f64>> loc(#loc15)
%7 = stablehlo.add %1, %6 : tensor<8x8xcomplex<f64>> loc(#loc16)
%cst = stablehlo.constant dense<(2.000000e+00,0.000000e+00)> : tensor<complex<f64>> loc(#loc)
%8 = stablehlo.broadcast_in_dim %cst, dims = [] : (tensor<complex<f64>>) -> tensor<8x8xcomplex<f64>> loc(#loc17)
%9 = stablehlo.divide %7, %8 : tensor<8x8xcomplex<f64>> loc(#loc17)
%10 = call @tril(%9) : (tensor<8x8xcomplex<f64>>) -> tensor<8x8xcomplex<f64>> loc(#loc18)
%c = stablehlo.constant dense<8> : tensor<i64> loc(#loc19)
%c_0 = stablehlo.constant dense<8> : tensor<i64> loc(#loc19)
%11:3 = stablehlo.custom_call @lapack_zheevd_ffi(%10) {mhlo.backend_config = {mode = 86 : ui8, uplo = 76 : ui8}, operand_layouts = [dense<[0, 1]> : tensor<2xindex>], output_operand_aliases = [#stablehlo.output_operand_alias<output_tuple_indices = [0], operand_index = 0, operand_tuple_indices = []>], result_layouts = [dense<[0, 1]> : tensor<2xindex>, dense<0> : tensor<1xindex>, dense<> : tensor<0xindex>]} : (tensor<8x8xcomplex<f64>>) -> (tensor<8x8xcomplex<f64>>, tensor<8xf64>, tensor<i32>) loc(#loc19)
%c_1 = stablehlo.constant dense<0> : tensor<i32> loc(#loc19)
%12 = stablehlo.broadcast_in_dim %c_1, dims = [] : (tensor<i32>) -> tensor<i32> loc(#loc19)
%13 = stablehlo.compare EQ, %11#2, %12, SIGNED : (tensor<i32>, tensor<i32>) -> tensor<i1> loc(#loc19)
%14 = stablehlo.broadcast_in_dim %13, dims = [] : (tensor<i1>) -> tensor<1x1xi1> loc(#loc19)
%cst_2 = stablehlo.constant dense<(0x7FF8000000000000,0x7FF8000000000000)> : tensor<complex<f64>> loc(#loc19)
%15 = stablehlo.broadcast_in_dim %cst_2, dims = [] : (tensor<complex<f64>>) -> tensor<8x8xcomplex<f64>> loc(#loc19)
%16 = stablehlo.broadcast_in_dim %14, dims = [0, 1] : (tensor<1x1xi1>) -> tensor<8x8xi1> loc(#loc19)
%17 = stablehlo.select %16, %11#0, %15 : tensor<8x8xi1>, tensor<8x8xcomplex<f64>> loc(#loc19)
%18 = stablehlo.broadcast_in_dim %13, dims = [] : (tensor<i1>) -> tensor<1xi1> loc(#loc19)
%cst_3 = stablehlo.constant dense<0x7FF8000000000000> : tensor<f64> loc(#loc19)
%19 = stablehlo.broadcast_in_dim %cst_3, dims = [] : (tensor<f64>) -> tensor<8xf64> loc(#loc19)
%20 = stablehlo.broadcast_in_dim %18, dims = [0] : (tensor<1xi1>) -> tensor<8xi1> loc(#loc19)
%21 = stablehlo.select %20, %11#1, %19 : tensor<8xi1>, tensor<8xf64> loc(#loc19)
return %17, %21 : tensor<8x8xcomplex<f64>>, tensor<8xf64> loc(#loc)
} loc(#loc)
func.func private @tril(%arg0: tensor<8x8xcomplex<f64>> {mhlo.layout_mode = "default"} loc("jit(<lambda>)/jit(main)/pjit[in_shardings=(UnspecifiedValue,) out_shardings=(UnspecifiedValue,) in_layouts=(None,) out_layouts=(None,) resource_env=None donated_invars=(False,) name=tril keep_unused=False inline=False]"(#loc7))) -> (tensor<8x8xcomplex<f64>> {mhlo.layout_mode = "default"}) {
%0 = stablehlo.iota dim = 0 : tensor<8x8xi32> loc(#loc20)
%c = stablehlo.constant dense<0> : tensor<i32> loc(#loc18)
%1 = stablehlo.broadcast_in_dim %c, dims = [] : (tensor<i32>) -> tensor<8x8xi32> loc(#loc21)
%2 = stablehlo.add %0, %1 : tensor<8x8xi32> loc(#loc21)
%3 = stablehlo.iota dim = 1 : tensor<8x8xi32> loc(#loc22)
%4 = stablehlo.compare GE, %2, %3, SIGNED : (tensor<8x8xi32>, tensor<8x8xi32>) -> tensor<8x8xi1> loc(#loc23)
%cst = stablehlo.constant dense<(0.000000e+00,0.000000e+00)> : tensor<complex<f64>> loc(#loc18)
%5 = stablehlo.broadcast_in_dim %cst, dims = [] : (tensor<complex<f64>>) -> tensor<8x8xcomplex<f64>> loc(#loc24)
%6 = stablehlo.select %4, %arg0, %5 : tensor<8x8xi1>, tensor<8x8xcomplex<f64>> loc(#loc25)
return %6 : tensor<8x8xcomplex<f64>> loc(#loc18)
} loc(#loc18)
} loc(#loc)
#loc = loc(unknown)
#loc1 = loc("third_party/py/jax/tests/export_back_compat_test.py":269:26)
#loc2 = loc("third_party/py/jax/tests/export_back_compat_test.py":269:14)
#loc3 = loc("third_party/py/jax/tests/export_back_compat_test.py":271:34)
#loc4 = loc("third_party/py/jax/tests/export_back_compat_test.py":271:25)
#loc5 = loc("third_party/py/jax/tests/export_back_compat_test.py":271:15)
#loc6 = loc("third_party/py/jax/tests/export_back_compat_test.py":271:14)
#loc8 = loc("third_party/py/jax/tests/export_back_compat_test.py":277:11)
#loc9 = loc("jit(<lambda>)/jit(main)/iota[dtype=complex128 shape=(64,) dimension=0]"(#loc1))
#loc10 = loc("jit(<lambda>)/jit(main)/reshape[new_sizes=(8, 8) dimensions=None]"(#loc2))
#loc11 = loc("jit(<lambda>)/jit(main)/transpose[permutation=(1, 0)]"(#loc3))
#loc12 = loc("jit(<lambda>)/jit(main)/real"(#loc4))
#loc13 = loc("jit(<lambda>)/jit(main)/imag"(#loc4))
#loc14 = loc("jit(<lambda>)/jit(main)/neg"(#loc4))
#loc15 = loc("jit(<lambda>)/jit(main)/complex"(#loc4))
#loc16 = loc("jit(<lambda>)/jit(main)/add"(#loc5))
#loc17 = loc("jit(<lambda>)/jit(main)/div"(#loc6))
#loc19 = loc("jit(<lambda>)/jit(main)/eigh[lower=True sort_eigenvalues=True subset_by_index=None]"(#loc8))
#loc20 = loc("jit(<lambda>)/jit(main)/jit(tril)/iota[dtype=int32 shape=(8, 8) dimension=0]"(#loc7))
#loc21 = loc("jit(<lambda>)/jit(main)/jit(tril)/add"(#loc7))
#loc22 = loc("jit(<lambda>)/jit(main)/jit(tril)/iota[dtype=int32 shape=(8, 8) dimension=1]"(#loc7))
#loc23 = loc("jit(<lambda>)/jit(main)/jit(tril)/ge"(#loc7))
#loc24 = loc("jit(<lambda>)/jit(main)/jit(tril)/broadcast_in_dim[shape=(8, 8) broadcast_dimensions=()]"(#loc7))
#loc25 = loc("jit(<lambda>)/jit(main)/jit(tril)/select_n"(#loc7))
""",
mlir_module_serialized=b'ML\xefR\x01StableHLO_v0.9.0\x00\x013\x05\x01\x03\x01\x03\x05\x03#\x07\t\x0b\r\x0f\x11\x13\x15\x17\x19\x1b\x1d\x1f!#%\'\x03\xda\x02*\x02?\x01\xab\x0f\x0b\x13\x17\x0f\x0b\x07\x17\x0b\x0b\x0f\x0b\x0b\x0b\x0b\x13\x0b\x13\x0f\x0b\x0b\x0f\x13+\x0b\x0f\x0b\x0b\x0b33\x0b\x0f\x0b\x0b\x13\x0f\x0b\x1b\x0f\x0b\x13\x0f\x0b\x0f\x0b\x0f\x0b\x17\x0f\x0b\x17\x13\x0b\x0f\x0b\x17\x0f\x0b\x0f\x0b\x0f\x0b\x0f\x0b\x0f\x0b\x17\x13\x0b\x17\x13\x0b\x0b\x17S\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x03a\x0b\x0b\x0b\x0b\x0f\x0b\x0bO\x0b\x13\x1b\x0b\x1b\x0b\x0b\x0b\x13\x0b\x0b\x0f\x1f\x0f\x0f\x0bOOO/\x0b\x0b\x0b\x0b\x1b\x0b\x0f\x0b\x0f\x0f\x0f\x17\x17/\x0f\x0bOO//\x01\x0b\x1f\x17\x17\x17\x17\x01\x05\x0b\x0f\x03;\x17\x07\x0f\x0f\x07\x07\x13\x17\x0b\x17\x0f\x07\x07\x17\x13\x07\x0f\x17\x17\x13\x17\x13\x13\x13\x0f\x17\x13\x13\x13\x02\xa6\n\x1d\x93\x95\x05)\x03\x03\x13\xd5\x17\x03V\x047\x1d?\x07\x05+\x1f\x17\x03>\x043\x05-\x05/\x11\x03\x05\x051\x053\x055\x057\x03\x03!\xd1\x059\x03\x03\x0b\xd3\x1dE\x07\x05;\x05=\x1d\x8b\x8d\x03\x03\x0b\xe1\x03\t135\x157\x15\x119\x05?\x11\x01\x00\x05A\x05C\x05E\x03\x0b\x17\xaf\x19\xbb\x1b\xbd\x11\xc7\x1d\xc9\x03\x0b\x17\xb3\x19\xcd\x1b\xb3\x11\xb5\x1d\xcf\x05G\x1dC\x07\x05I\x05K\x03\x03!\xd7\x1dK\x07\x05M\x03\x05\'\xb7)\xd9\x1dQ\x07\x05O\x03\x03\x0b\xdb\x1dW\x07\x05Q\x1d[\x07\x05S\x1d_a\x05U\x17\x036\x045\x1deg\x05W\x17\x036\x04\x1d\x03\x03k\xdd\x05Y\x1doq\x05[\x17\x03>\x04E\x1du\x0f\x05]\x1dy\x0f\x05_\x1d}\x0f\x05a\x1d\x81\x0f\x05c\x1d\x85\x87\x05e\x17\x03>\x04\x1f\x03\x03\x0b\xdf\x05g\x17\x03>\x04\x1d\x03\x03\x91\xb5\x05i\x05k\x17\x03V\x04\x17\x03\x13\x99\xe3\x9b\xe5\x9d\xe7\x9f\xaf\xa1\xe9\xa3\xeb\xa5\xf5\xa7\xf7\xa9\xfb\x05m\x05o\x05q\x05s\x05u\x05w\x05y\x05{\x05}\x1d\x7f\x1d\x81\x03\x01\x1d\x83\x03\x03\xcb\x1d\x85\t\x07\x1f/!\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00#\'\x03\x05\xbf\xc3\r\x05\xb1\xc1\xab\xad\x1d\x87\r\x05\xb1\xc5\xab\xad\x1d\x89\x1d\x8b\x1d\x8d\r\x03\xab\xad#)\x1d\x8f\x13\x07\x01\x1f\x0b\t\x00\x00\x00\x00\x1f+\x01\x13\x07\x05\x07\x05\x1f\t!\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1f!!\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1f\t!\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00\x00\x1f\x19\x11\x08\x00\x00\x00\x00\x00\x00\x00\x0b\x03\x1d\x91\x1d\x93\x05\x01\r\x05\xed\xef\xf1\xf3\x1d\x95\x13#V\x1d\x97\x13#L\x03\x03\xb9\x03\x03\xf9\x15\x03\x01\x01\x01\x03\x07\xb9\xfd\xff\x1f1\x11\x00\x00\x00\x00\x00\x00\x00\x00\x1f3\x01\x07\x01\x1f\t!\x00\x00\x00\x00\x00\x00\xf8\x7f\x00\x00\x00\x00\x00\x00\xf8\x7f\x1f!!\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x1f%\x11\x00\x00\x00\x00\x00\x00\xf8\x7f\x1f=\x11\x00\x00\x00\x00\x00\x00\x00\x00\x03\x05\'\xb7)\x02\x02\x03\x03\x0b\x06\x02\x03\x03\x13\n\x02\x03\x03\x0b\x0e\x02\x03\x03\x13\x12\x02\x01\t\x01\x02\x02)\x05!!\x15\x1d)\x01\x15)\x01\x1d\x01\x0b)\x03!\x0f)\x05!!\x1d\x03\x0f)\x05!!\x0f)\x01\x07\x13\x1b)\x05!!\r)\x03\t\x07!)\x01\x0f\x11\x01\x05\x05\x11\x11\x03\x05\x03\x05)\x03\x01\x07)\x03\x02\x02\x15)\x03\t\x1b)\x03\x05\x1b)\x03\x01\x1b)\x01\r)\x05\x05\x05\r)\x03\x05\r)\x03!\r)\x03\x05\x07\x04\x06\x05\x05\x01\x11\r/\x07\x03\x01\t\x0b\x11\r;\x07\x03=u\x07\x03]\x1f\x03-\x13\x06c\x03\x05\x03\x01\x15\x07mi\x03\x05\x03\x03\x17\x06s\x03\x17\x03\x05\x19\x06w\x03\x17\x03\x05\x1b\x06{\x03\x17\x03\t\x1d\x06\x7f\x03\x05\x05\x07\x0b\r\x06\x83\x03\x05\x05\x03\r\x05\x03\r\x89\x03\t\x03\x07+\x05\x03\x05\x03\x11\x1f\x06+\x03\x05\x05\x0f\x13!\x07\t\x8f\x03\x05\x03\x15\x05\x03\x01-\x03\x19\x05\x03\x01-\x03\x19#\x07\x01\x97\x07\x05\x11\x0b\x03\x17\x05\x03\x01#\x03\x0b\x03\x07\x01\x05\x03\x0b\x03#\x0f\x07\x01\x16\x02\x035\x05!%\x03\x07\x01\x05\x037\x03\'\x05\x03\x01\x1a\x02\x03\t\x03\x07\x01\x05\x03\x05\x03+\x03\x07\x01\x1e\x02\x03\x1f\x03)\t\x06\x01\x03\x05\x07/\x1d-\x03\x07\x01\x05\x039\x03\'\x05\x03\x01"\x02\x03%\x03\x07\x01\x05\x03\x11\x035\x03\x07\x01&\x02\x03;\x033\t\x06\x01\x03\x11\x079\x1f7\x11\x04\r\x051;\x0b\x11\t=\x07\x03\x15+\x03\x05\t\x07\x03A\x1f\x03\x13\x05\x03\t#\x03\x0b\x03\x07%\x05\x03\x13\x03\x05\r\x06%\x03\x13\x05\x03\x07\x07\x03IG\x03\x13\x0f\x07OM\x03\x1f\x05\t\x0b\x05\x03\tS\x03\t\x03\x07U\x05\x03\x05\x03\x0f\t\x06Y\x03\x05\x07\r\x01\x11\x11\x04\t\x03\x13\x06\x03\x01\x05\x01\x00\xe2\x1c\x99\x0b\x0b%\x03\x11\x0f\x0b\t\t\x0b!\x11#\x1f/!)!)#\x1f\x19\xa9\x0f99A9;;m\x19\x85\x8fW\xb3K\x9bM\x9bn\x03\x1b%)9+\x1b\x1f\x1f\x15\x1d\x15+\x13\ri\x1f\x11\x15\x17\x15\x11\x11\x1b\x17\x15\x17\x0f\x11\x15\x11\x19)\x0f\x0b\x11builtin\x00vhlo\x00module\x00broadcast_in_dim_v1\x00constant_v1\x00iota_v1\x00select_v1\x00func_v1\x00add_v1\x00compare_v1\x00return_v1\x00reshape_v1\x00transpose_v1\x00real_v1\x00imag_v1\x00negate_v1\x00complex_v1\x00divide_v1\x00call_v1\x00custom_call_v1\x00third_party/py/jax/tests/export_back_compat_test.py\x00value\x00sym_name\x00broadcast_dimensions\x00arg_attrs\x00function_type\x00res_attrs\x00sym_visibility\x00iota_dimension\x00compare_type\x00comparison_direction\x00jax.uses_shape_polymorphism\x00mhlo.num_partitions\x00mhlo.num_replicas\x00jit__lambda_\x00jit(<lambda>)/jit(main)/pjit[in_shardings=(UnspecifiedValue,) out_shardings=(UnspecifiedValue,) in_layouts=(None,) out_layouts=(None,) resource_env=None donated_invars=(False,) name=tril keep_unused=False inline=False]\x00jit(<lambda>)/jit(main)/jit(tril)/iota[dtype=int32 shape=(8, 8) dimension=0]\x00jit(<lambda>)/jit(main)/jit(tril)/add\x00jit(<lambda>)/jit(main)/jit(tril)/iota[dtype=int32 shape=(8, 8) dimension=1]\x00jit(<lambda>)/jit(main)/jit(tril)/ge\x00jit(<lambda>)/jit(main)/jit(tril)/broadcast_in_dim[shape=(8, 8) broadcast_dimensions=()]\x00jit(<lambda>)/jit(main)/jit(tril)/select_n\x00jit(<lambda>)/jit(main)/iota[dtype=complex128 shape=(64,) dimension=0]\x00jit(<lambda>)/jit(main)/reshape[new_sizes=(8, 8) dimensions=None]\x00permutation\x00jit(<lambda>)/jit(main)/transpose[permutation=(1, 0)]\x00jit(<lambda>)/jit(main)/real\x00jit(<lambda>)/jit(main)/imag\x00jit(<lambda>)/jit(main)/neg\x00jit(<lambda>)/jit(main)/complex\x00jit(<lambda>)/jit(main)/add\x00jit(<lambda>)/jit(main)/div\x00callee\x00jit(<lambda>)/jit(main)/eigh[lower=True sort_eigenvalues=True subset_by_index=None]\x00api_version\x00backend_config\x00call_target_name\x00called_computations\x00has_side_effect\x00mhlo.backend_config\x00operand_layouts\x00output_operand_aliases\x00result_layouts\x00mhlo.layout_mode\x00default\x00jax.result_info\x00tril\x00[0]\x00[1]\x00main\x00public\x00private\x00\x00lapack_zheevd_ffi\x00mode\x00uplo\x00',
xla_call_module_version=9,
nr_devices=1,
) # End paste
# Pasted from the test output (see export_back_compat_test_util.py module docstring)
data_2024_08_19["c64"] = dict(
testdata_version=1,
platform='cpu',
custom_call_targets=['lapack_cheevd_ffi'],
serialized_date=datetime.date(2024, 8, 19),
inputs=(),
expected_outputs=(array([[-0.6185769 +0.j, -0.20142993 +0.j, -0.09725195 +0.j,
0.62983674 +0.j, -0.07926044 +0.j, 0.3605001 -0.j,
-0.019093221 +0.j, -0.18446997 +0.j],
[-0.47070873 +0.j, 0.29325768 +0.j, -0.19454116 +0.j,
-0.6394365 +0.j, 0.06229549 +0.j, 0.33249345 +0.j,
0.28112718 +0.j, -0.22856665 +0.j],
[-0.32284075 +0.j, -0.12361939 +0.j, 0.20547704 +0.j,
-0.18307868 +0.j, 0.47294614 +0.j, -0.3170349 +0.j,
-0.6373532 +0.j, -0.27266347 +0.j],
[-0.17497246 +0.j, -0.079641335 +0.j, 0.15042792 +0.j,
-0.15416273 +0.j, -0.815209 +0.j, -0.38054234 +0.j,
-0.083263926 +0.j, -0.31676024 +0.j],
[-0.027104257 +0.j, -0.26490977 +0.j, 0.32271704 +0.j,
0.08653544 +0.j, 0.30305928 +0.j, -0.33998996 +0.j,
0.6926741 +0.j, -0.360857 +0.j],
[ 0.120763965 +0.j, 0.43288827 +0.j, -0.64385164 +0.j,
0.2652551 +0.j, 0.094823755 +0.j, -0.37435007 +0.j,
0.00091664493+0.j, -0.40495378 +0.j],
[ 0.26863196 +0.j, 0.51607686 +0.j, 0.53846526 +0.j,
0.16969058 +0.j, -0.0216703 +0.j, 0.35755336 +0.j,
-0.113144726 +0.j, -0.4490505 +0.j],
[ 0.4165004 +0.j, -0.57262254 +0.j, -0.28144246 +0.j,
-0.17463988 +0.j, -0.016984984 +0.j, 0.3613705 +0.j,
-0.12186296 +0.j, -0.49314725 +0.j]], dtype=complex64), array([-2.4598808e+01, -3.3105560e-05, -3.1002426e-05, -1.0103593e-05,
-1.0022322e-05, 4.0141886e-06, 9.5510331e-06, 2.7659882e+02],
dtype=float32)),
mlir_module_text=r"""
#loc7 = loc("third_party/py/jax/tests/export_back_compat_test.py":277:27)
#loc18 = loc("jit(<lambda>)/jit(main)/pjit[in_shardings=(UnspecifiedValue,) out_shardings=(UnspecifiedValue,) in_layouts=(None,) out_layouts=(None,) resource_env=None donated_invars=(False,) name=tril keep_unused=False inline=False]"(#loc7))
module @jit__lambda_ attributes {jax.uses_shape_polymorphism = false, mhlo.num_partitions = 1 : i32, mhlo.num_replicas = 1 : i32} {
func.func public @main() -> (tensor<8x8xcomplex<f32>> {jax.result_info = "[0]", mhlo.layout_mode = "default"}, tensor<8xf32> {jax.result_info = "[1]", mhlo.layout_mode = "default"}) {
%0 = stablehlo.iota dim = 0 : tensor<64xcomplex<f32>> loc(#loc9)
%1 = stablehlo.reshape %0 : (tensor<64xcomplex<f32>>) -> tensor<8x8xcomplex<f32>> loc(#loc10)
%2 = stablehlo.transpose %1, dims = [1, 0] : (tensor<8x8xcomplex<f32>>) -> tensor<8x8xcomplex<f32>> loc(#loc11)
%3 = stablehlo.real %2 : (tensor<8x8xcomplex<f32>>) -> tensor<8x8xf32> loc(#loc12)
%4 = stablehlo.imag %2 : (tensor<8x8xcomplex<f32>>) -> tensor<8x8xf32> loc(#loc13)
%5 = stablehlo.negate %4 : tensor<8x8xf32> loc(#loc14)
%6 = stablehlo.complex %3, %5 : tensor<8x8xcomplex<f32>> loc(#loc15)
%7 = stablehlo.add %1, %6 : tensor<8x8xcomplex<f32>> loc(#loc16)
%cst = stablehlo.constant dense<(2.000000e+00,0.000000e+00)> : tensor<complex<f32>> loc(#loc)
%8 = stablehlo.broadcast_in_dim %cst, dims = [] : (tensor<complex<f32>>) -> tensor<8x8xcomplex<f32>> loc(#loc17)
%9 = stablehlo.divide %7, %8 : tensor<8x8xcomplex<f32>> loc(#loc17)
%10 = call @tril(%9) : (tensor<8x8xcomplex<f32>>) -> tensor<8x8xcomplex<f32>> loc(#loc18)
%c = stablehlo.constant dense<8> : tensor<i64> loc(#loc19)
%c_0 = stablehlo.constant dense<8> : tensor<i64> loc(#loc19)
%11:3 = stablehlo.custom_call @lapack_cheevd_ffi(%10) {mhlo.backend_config = {mode = 86 : ui8, uplo = 76 : ui8}, operand_layouts = [dense<[0, 1]> : tensor<2xindex>], output_operand_aliases = [#stablehlo.output_operand_alias<output_tuple_indices = [0], operand_index = 0, operand_tuple_indices = []>], result_layouts = [dense<[0, 1]> : tensor<2xindex>, dense<0> : tensor<1xindex>, dense<> : tensor<0xindex>]} : (tensor<8x8xcomplex<f32>>) -> (tensor<8x8xcomplex<f32>>, tensor<8xf32>, tensor<i32>) loc(#loc19)
%c_1 = stablehlo.constant dense<0> : tensor<i32> loc(#loc19)
%12 = stablehlo.broadcast_in_dim %c_1, dims = [] : (tensor<i32>) -> tensor<i32> loc(#loc19)
%13 = stablehlo.compare EQ, %11#2, %12, SIGNED : (tensor<i32>, tensor<i32>) -> tensor<i1> loc(#loc19)
%14 = stablehlo.broadcast_in_dim %13, dims = [] : (tensor<i1>) -> tensor<1x1xi1> loc(#loc19)
%cst_2 = stablehlo.constant dense<(0x7FC00000,0x7FC00000)> : tensor<complex<f32>> loc(#loc19)
%15 = stablehlo.broadcast_in_dim %cst_2, dims = [] : (tensor<complex<f32>>) -> tensor<8x8xcomplex<f32>> loc(#loc19)
%16 = stablehlo.broadcast_in_dim %14, dims = [0, 1] : (tensor<1x1xi1>) -> tensor<8x8xi1> loc(#loc19)
%17 = stablehlo.select %16, %11#0, %15 : tensor<8x8xi1>, tensor<8x8xcomplex<f32>> loc(#loc19)
%18 = stablehlo.broadcast_in_dim %13, dims = [] : (tensor<i1>) -> tensor<1xi1> loc(#loc19)
%cst_3 = stablehlo.constant dense<0x7FC00000> : tensor<f32> loc(#loc19)
%19 = stablehlo.broadcast_in_dim %cst_3, dims = [] : (tensor<f32>) -> tensor<8xf32> loc(#loc19)
%20 = stablehlo.broadcast_in_dim %18, dims = [0] : (tensor<1xi1>) -> tensor<8xi1> loc(#loc19)
%21 = stablehlo.select %20, %11#1, %19 : tensor<8xi1>, tensor<8xf32> loc(#loc19)
return %17, %21 : tensor<8x8xcomplex<f32>>, tensor<8xf32> loc(#loc)
} loc(#loc)
func.func private @tril(%arg0: tensor<8x8xcomplex<f32>> {mhlo.layout_mode = "default"} loc("jit(<lambda>)/jit(main)/pjit[in_shardings=(UnspecifiedValue,) out_shardings=(UnspecifiedValue,) in_layouts=(None,) out_layouts=(None,) resource_env=None donated_invars=(False,) name=tril keep_unused=False inline=False]"(#loc7))) -> (tensor<8x8xcomplex<f32>> {mhlo.layout_mode = "default"}) {
%0 = stablehlo.iota dim = 0 : tensor<8x8xi32> loc(#loc20)
%c = stablehlo.constant dense<0> : tensor<i32> loc(#loc18)
%1 = stablehlo.broadcast_in_dim %c, dims = [] : (tensor<i32>) -> tensor<8x8xi32> loc(#loc21)
%2 = stablehlo.add %0, %1 : tensor<8x8xi32> loc(#loc21)
%3 = stablehlo.iota dim = 1 : tensor<8x8xi32> loc(#loc22)
%4 = stablehlo.compare GE, %2, %3, SIGNED : (tensor<8x8xi32>, tensor<8x8xi32>) -> tensor<8x8xi1> loc(#loc23)
%cst = stablehlo.constant dense<(0.000000e+00,0.000000e+00)> : tensor<complex<f32>> loc(#loc18)
%5 = stablehlo.broadcast_in_dim %cst, dims = [] : (tensor<complex<f32>>) -> tensor<8x8xcomplex<f32>> loc(#loc24)
%6 = stablehlo.select %4, %arg0, %5 : tensor<8x8xi1>, tensor<8x8xcomplex<f32>> loc(#loc25)
return %6 : tensor<8x8xcomplex<f32>> loc(#loc18)
} loc(#loc18)
} loc(#loc)
#loc = loc(unknown)
#loc1 = loc("third_party/py/jax/tests/export_back_compat_test.py":269:26)
#loc2 = loc("third_party/py/jax/tests/export_back_compat_test.py":269:14)
#loc3 = loc("third_party/py/jax/tests/export_back_compat_test.py":271:34)
#loc4 = loc("third_party/py/jax/tests/export_back_compat_test.py":271:25)
#loc5 = loc("third_party/py/jax/tests/export_back_compat_test.py":271:15)
#loc6 = loc("third_party/py/jax/tests/export_back_compat_test.py":271:14)
#loc8 = loc("third_party/py/jax/tests/export_back_compat_test.py":277:11)
#loc9 = loc("jit(<lambda>)/jit(main)/iota[dtype=complex64 shape=(64,) dimension=0]"(#loc1))
#loc10 = loc("jit(<lambda>)/jit(main)/reshape[new_sizes=(8, 8) dimensions=None]"(#loc2))
#loc11 = loc("jit(<lambda>)/jit(main)/transpose[permutation=(1, 0)]"(#loc3))
#loc12 = loc("jit(<lambda>)/jit(main)/real"(#loc4))
#loc13 = loc("jit(<lambda>)/jit(main)/imag"(#loc4))
#loc14 = loc("jit(<lambda>)/jit(main)/neg"(#loc4))
#loc15 = loc("jit(<lambda>)/jit(main)/complex"(#loc4))
#loc16 = loc("jit(<lambda>)/jit(main)/add"(#loc5))
#loc17 = loc("jit(<lambda>)/jit(main)/div"(#loc6))
#loc19 = loc("jit(<lambda>)/jit(main)/eigh[lower=True sort_eigenvalues=True subset_by_index=None]"(#loc8))
#loc20 = loc("jit(<lambda>)/jit(main)/jit(tril)/iota[dtype=int32 shape=(8, 8) dimension=0]"(#loc7))
#loc21 = loc("jit(<lambda>)/jit(main)/jit(tril)/add"(#loc7))
#loc22 = loc("jit(<lambda>)/jit(main)/jit(tril)/iota[dtype=int32 shape=(8, 8) dimension=1]"(#loc7))
#loc23 = loc("jit(<lambda>)/jit(main)/jit(tril)/ge"(#loc7))
#loc24 = loc("jit(<lambda>)/jit(main)/jit(tril)/broadcast_in_dim[shape=(8, 8) broadcast_dimensions=()]"(#loc7))
#loc25 = loc("jit(<lambda>)/jit(main)/jit(tril)/select_n"(#loc7))
""",
mlir_module_serialized=b'ML\xefR\x01StableHLO_v0.9.0\x00\x013\x05\x01\x03\x01\x03\x05\x03#\x07\t\x0b\r\x0f\x11\x13\x15\x17\x19\x1b\x1d\x1f!#%\'\x03\xda\x02*\x02?\x01\xab\x0f\x0b\x13\x17\x0f\x0b\x07\x17\x0b\x0b\x0f\x0b\x0b\x0b\x0b\x13\x0b\x13\x0f\x0b\x0b\x0f\x13+\x0b\x0f\x0b\x0b\x0b33\x0b\x0f\x0b\x0b\x13\x0f\x0b\x1b\x0f\x0b\x13\x0f\x0b\x0f\x0b\x0f\x0b\x17\x0f\x0b\x17\x13\x0b\x0f\x0b\x17\x0f\x0b\x0f\x0b\x0f\x0b\x0f\x0b\x0f\x0b\x17\x13\x0b\x17\x13\x0b\x0b\x17S\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x03a\x0b\x0b\x0b\x0b\x0f\x0b\x0bO\x0b\x13\x1b\x0b\x1b\x0b\x0b\x0b\x13\x0b\x0b\x0f\x1f\x0f\x0f\x0b/O//\x0b\x0b\x0b\x0b\x1b\x0b\x0f\x0b\x0f\x0f\x0f\x17\x17/\x0f\x0b/O\x1f/\x01\x0b\x1f\x17\x17\x17\x17\x01\x05\x0b\x0f\x03;\x17\x07\x0f\x0f\x07\x07\x13\x17\x0b\x17\x0f\x07\x07\x17\x13\x07\x0f\x17\x17\x13\x17\x13\x13\x13\x0f\x17\x13\x13\x13\x026\n\x1d\x93\x95\x05)\x03\x03\x13\xd5\x17\x03V\x047\x1d?\x07\x05+\x1f\x17\x03>\x043\x05-\x05/\x11\x03\x05\x051\x053\x055\x057\x03\x03!\xd1\x059\x03\x03\x0b\xd3\x1dE\x07\x05;\x05=\x1d\x8b\x8d\x03\x03\x0b\xe1\x03\t135\x157\x15\x119\x05?\x11\x01\x00\x05A\x05C\x05E\x03\x0b\x17\xaf\x19\xbb\x1b\xbd\x11\xc7\x1d\xc9\x03\x0b\x17\xb3\x19\xcd\x1b\xb3\x11\xb5\x1d\xcf\x05G\x1dC\x07\x05I\x05K\x03\x03!\xd7\x1dK\x07\x05M\x03\x05\'\xb7)\xd9\x1dQ\x07\x05O\x03\x03\x0b\xdb\x1dW\x07\x05Q\x1d[\x07\x05S\x1d_a\x05U\x17\x036\x045\x1deg\x05W\x17\x036\x04\x1d\x03\x03k\xdd\x05Y\x1doq\x05[\x17\x03>\x04E\x1du\x0f\x05]\x1dy\x0f\x05_\x1d}\x0f\x05a\x1d\x81\x0f\x05c\x1d\x85\x87\x05e\x17\x03>\x04\x1f\x03\x03\x0b\xdf\x05g\x17\x03>\x04\x1d\x03\x03\x91\xb5\x05i\x05k\x17\x03V\x04\x17\x03\x13\x99\xe3\x9b\xe5\x9d\xe7\x9f\xaf\xa1\xe9\xa3\xeb\xa5\xf5\xa7\xf7\xa9\xfb\x05m\x05o\x05q\x05s\x05u\x05w\x05y\x05{\x05}\x1d\x7f\x1d\x81\x03\x01\x1d\x83\x03\x03\xcb\x1d\x85\t\x07\x1f/!\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00#\'\x03\x05\xbf\xc3\r\x05\xb1\xc1\xab\xad\x1d\x87\r\x05\xb1\xc5\xab\xad\x1d\x89\x1d\x8b\x1d\x8d\r\x03\xab\xad#)\x1d\x8f\x13\x07\x01\x1f\x0b\t\x00\x00\x00\x00\x1f+\x01\x13\x07\x05\x07\x05\x1f\t\x11\x00\x00\x00\x00\x00\x00\x00\x00\x1f!!\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1f\t\x11\x00\x00\x00@\x00\x00\x00\x00\x1f\x19\x11\x08\x00\x00\x00\x00\x00\x00\x00\x0b\x03\x1d\x91\x1d\x93\x05\x01\r\x05\xed\xef\xf1\xf3\x1d\x95\x13#V\x1d\x97\x13#L\x03\x03\xb9\x03\x03\xf9\x15\x03\x01\x01\x01\x03\x07\xb9\xfd\xff\x1f1\x11\x00\x00\x00\x00\x00\x00\x00\x00\x1f3\x01\x07\x01\x1f\t\x11\x00\x00\xc0\x7f\x00\x00\xc0\x7f\x1f!!\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x1f%\t\x00\x00\xc0\x7f\x1f=\x11\x00\x00\x00\x00\x00\x00\x00\x00\x03\x05\'\xb7)\x02\x02\x03\x03\x0b\x06\x02\x03\x03\x13\n\x02\x03\x03\x0b\x0e\x02\x03\x03\x13\x12\x02\x01\t\x01\x02\x02)\x05!!\x15\x1d)\x01\x15)\x01\x1d\x01\t)\x03!\x0f)\x05!!\x1d\x03\x0f)\x05!!\x0f)\x01\x07\x13\x1b)\x05!!\r)\x03\t\x07!)\x01\x0f\x11\x01\x05\x05\x11\x11\x03\x05\x03\x05)\x03\x01\x07)\x03\x02\x02\x15)\x03\t\x1b)\x03\x05\x1b)\x03\x01\x1b)\x01\r)\x05\x05\x05\r)\x03\x05\r)\x03!\r)\x03\x05\x07\x04\x06\x05\x05\x01\x11\r/\x07\x03\x01\t\x0b\x11\r;\x07\x03=u\x07\x03]\x1f\x03-\x13\x06c\x03\x05\x03\x01\x15\x07mi\x03\x05\x03\x03\x17\x06s\x03\x17\x03\x05\x19\x06w\x03\x17\x03\x05\x1b\x06{\x03\x17\x03\t\x1d\x06\x7f\x03\x05\x05\x07\x0b\r\x06\x83\x03\x05\x05\x03\r\x05\x03\r\x89\x03\t\x03\x07+\x05\x03\x05\x03\x11\x1f\x06+\x03\x05\x05\x0f\x13!\x07\t\x8f\x03\x05\x03\x15\x05\x03\x01-\x03\x19\x05\x03\x01-\x03\x19#\x07\x01\x97\x07\x05\x11\x0b\x03\x17\x05\x03\x01#\x03\x0b\x03\x07\x01\x05\x03\x0b\x03#\x0f\x07\x01\x16\x02\x035\x05!%\x03\x07\x01\x05\x037\x03\'\x05\x03\x01\x1a\x02\x03\t\x03\x07\x01\x05\x03\x05\x03+\x03\x07\x01\x1e\x02\x03\x1f\x03)\t\x06\x01\x03\x05\x07/\x1d-\x03\x07\x01\x05\x039\x03\'\x05\x03\x01"\x02\x03%\x03\x07\x01\x05\x03\x11\x035\x03\x07\x01&\x02\x03;\x033\t\x06\x01\x03\x11\x079\x1f7\x11\x04\r\x051;\x0b\x11\t=\x07\x03\x15+\x03\x05\t\x07\x03A\x1f\x03\x13\x05\x03\t#\x03\x0b\x03\x07%\x05\x03\x13\x03\x05\r\x06%\x03\x13\x05\x03\x07\x07\x03IG\x03\x13\x0f\x07OM\x03\x1f\x05\t\x0b\x05\x03\tS\x03\t\x03\x07U\x05\x03\x05\x03\x0f\t\x06Y\x03\x05\x07\r\x01\x11\x11\x04\t\x03\x13\x06\x03\x01\x05\x01\x00\xde\x1c\x99\x0b\x0b%\x03\x11\x0f\x0b\t\t\x0b!\x11#\x1f/!)!)#\x1f\x19\xa9\x0f99A9;;m\x19\x85\x8dW\xb3K\x9bM\x9bn\x03\x1b%)9+\x1b\x1f\x1f\x15\x1d\x15+\x13\ri\x1f\x11\x15\x17\x15\x11\x11\x1b\x17\x15\x17\x0f\x11\x15\x11\x19)\x0f\x0b\x11builtin\x00vhlo\x00module\x00broadcast_in_dim_v1\x00constant_v1\x00iota_v1\x00select_v1\x00func_v1\x00add_v1\x00compare_v1\x00return_v1\x00reshape_v1\x00transpose_v1\x00real_v1\x00imag_v1\x00negate_v1\x00complex_v1\x00divide_v1\x00call_v1\x00custom_call_v1\x00third_party/py/jax/tests/export_back_compat_test.py\x00value\x00sym_name\x00broadcast_dimensions\x00arg_attrs\x00function_type\x00res_attrs\x00sym_visibility\x00iota_dimension\x00compare_type\x00comparison_direction\x00jax.uses_shape_polymorphism\x00mhlo.num_partitions\x00mhlo.num_replicas\x00jit__lambda_\x00jit(<lambda>)/jit(main)/pjit[in_shardings=(UnspecifiedValue,) out_shardings=(UnspecifiedValue,) in_layouts=(None,) out_layouts=(None,) resource_env=None donated_invars=(False,) name=tril keep_unused=False inline=False]\x00jit(<lambda>)/jit(main)/jit(tril)/iota[dtype=int32 shape=(8, 8) dimension=0]\x00jit(<lambda>)/jit(main)/jit(tril)/add\x00jit(<lambda>)/jit(main)/jit(tril)/iota[dtype=int32 shape=(8, 8) dimension=1]\x00jit(<lambda>)/jit(main)/jit(tril)/ge\x00jit(<lambda>)/jit(main)/jit(tril)/broadcast_in_dim[shape=(8, 8) broadcast_dimensions=()]\x00jit(<lambda>)/jit(main)/jit(tril)/select_n\x00jit(<lambda>)/jit(main)/iota[dtype=complex64 shape=(64,) dimension=0]\x00jit(<lambda>)/jit(main)/reshape[new_sizes=(8, 8) dimensions=None]\x00permutation\x00jit(<lambda>)/jit(main)/transpose[permutation=(1, 0)]\x00jit(<lambda>)/jit(main)/real\x00jit(<lambda>)/jit(main)/imag\x00jit(<lambda>)/jit(main)/neg\x00jit(<lambda>)/jit(main)/complex\x00jit(<lambda>)/jit(main)/add\x00jit(<lambda>)/jit(main)/div\x00callee\x00jit(<lambda>)/jit(main)/eigh[lower=True sort_eigenvalues=True subset_by_index=None]\x00api_version\x00backend_config\x00call_target_name\x00called_computations\x00has_side_effect\x00mhlo.backend_config\x00operand_layouts\x00output_operand_aliases\x00result_layouts\x00mhlo.layout_mode\x00default\x00jax.result_info\x00tril\x00[0]\x00[1]\x00main\x00public\x00private\x00\x00lapack_cheevd_ffi\x00mode\x00uplo\x00',
xla_call_module_version=9,
nr_devices=1,
) # End paste
# Pasted from the test output (see export_back_compat_test_util.py module docstring)
data_2024_08_19["f32"] = dict(
testdata_version=1,
platform='cpu',
custom_call_targets=['lapack_ssyevd_ffi'],
serialized_date=datetime.date(2024, 8, 19),
inputs=(),
expected_outputs=(array([[-0.6185769 , -0.20142993 , -0.09725195 , 0.62983674 ,
-0.07926044 , 0.3605001 , -0.019093221 , -0.18446997 ],
[-0.47070873 , 0.29325768 , -0.19454119 , -0.6394365 ,
0.0622955 , 0.33249345 , 0.28112718 , -0.22856665 ],
[-0.32284075 , -0.12361939 , 0.20547704 , -0.18307868 ,
0.47294614 , -0.3170349 , -0.6373532 , -0.27266347 ],
[-0.17497246 , -0.079641335 , 0.15042791 , -0.15416273 ,
-0.815209 , -0.38054234 , -0.083263926 , -0.31676024 ],
[-0.027104253 , -0.26490977 , 0.32271704 , 0.08653544 ,
0.30305928 , -0.33998996 , 0.6926741 , -0.360857 ],
[ 0.12076397 , 0.43288827 , -0.64385164 , 0.2652551 ,
0.09482376 , -0.37435007 , 0.00091664493, -0.40495378 ],
[ 0.26863196 , 0.51607686 , 0.53846526 , 0.16969058 ,
-0.021670295 , 0.35755336 , -0.113144726 , -0.4490505 ],
[ 0.4165004 , -0.57262254 , -0.2814425 , -0.17463988 ,
-0.01698498 , 0.3613705 , -0.12186296 , -0.49314725 ]],
dtype=float32), array([-2.4598808e+01, -3.3105560e-05, -3.1002426e-05, -1.0103593e-05,
-1.0022322e-05, 4.0141886e-06, 9.5510331e-06, 2.7659882e+02],
dtype=float32)),
mlir_module_text=r"""
#loc6 = loc("third_party/py/jax/tests/export_back_compat_test.py":277:27)
#loc13 = loc("jit(<lambda>)/jit(main)/pjit[in_shardings=(UnspecifiedValue,) out_shardings=(UnspecifiedValue,) in_layouts=(None,) out_layouts=(None,) resource_env=None donated_invars=(False,) name=tril keep_unused=False inline=False]"(#loc6))
module @jit__lambda_ attributes {jax.uses_shape_polymorphism = false, mhlo.num_partitions = 1 : i32, mhlo.num_replicas = 1 : i32} {
func.func public @main() -> (tensor<8x8xf32> {jax.result_info = "[0]", mhlo.layout_mode = "default"}, tensor<8xf32> {jax.result_info = "[1]", mhlo.layout_mode = "default"}) {
%0 = stablehlo.iota dim = 0 : tensor<64xf32> loc(#loc8)
%1 = stablehlo.reshape %0 : (tensor<64xf32>) -> tensor<8x8xf32> loc(#loc9)
%2 = stablehlo.transpose %1, dims = [1, 0] : (tensor<8x8xf32>) -> tensor<8x8xf32> loc(#loc10)
%3 = stablehlo.add %1, %2 : tensor<8x8xf32> loc(#loc11)
%cst = stablehlo.constant dense<2.000000e+00> : tensor<f32> loc(#loc)
%4 = stablehlo.broadcast_in_dim %cst, dims = [] : (tensor<f32>) -> tensor<8x8xf32> loc(#loc12)
%5 = stablehlo.divide %3, %4 : tensor<8x8xf32> loc(#loc12)
%6 = call @tril(%5) : (tensor<8x8xf32>) -> tensor<8x8xf32> loc(#loc13)
%c = stablehlo.constant dense<8> : tensor<i64> loc(#loc14)
%c_0 = stablehlo.constant dense<8> : tensor<i64> loc(#loc14)
%7:3 = stablehlo.custom_call @lapack_ssyevd_ffi(%6) {mhlo.backend_config = {mode = 86 : ui8, uplo = 76 : ui8}, operand_layouts = [dense<[0, 1]> : tensor<2xindex>], output_operand_aliases = [#stablehlo.output_operand_alias<output_tuple_indices = [0], operand_index = 0, operand_tuple_indices = []>], result_layouts = [dense<[0, 1]> : tensor<2xindex>, dense<0> : tensor<1xindex>, dense<> : tensor<0xindex>]} : (tensor<8x8xf32>) -> (tensor<8x8xf32>, tensor<8xf32>, tensor<i32>) loc(#loc14)
%c_1 = stablehlo.constant dense<0> : tensor<i32> loc(#loc14)
%8 = stablehlo.broadcast_in_dim %c_1, dims = [] : (tensor<i32>) -> tensor<i32> loc(#loc14)
%9 = stablehlo.compare EQ, %7#2, %8, SIGNED : (tensor<i32>, tensor<i32>) -> tensor<i1> loc(#loc14)
%10 = stablehlo.broadcast_in_dim %9, dims = [] : (tensor<i1>) -> tensor<1x1xi1> loc(#loc14)
%cst_2 = stablehlo.constant dense<0x7FC00000> : tensor<f32> loc(#loc14)
%11 = stablehlo.broadcast_in_dim %cst_2, dims = [] : (tensor<f32>) -> tensor<8x8xf32> loc(#loc14)
%12 = stablehlo.broadcast_in_dim %10, dims = [0, 1] : (tensor<1x1xi1>) -> tensor<8x8xi1> loc(#loc14)
%13 = stablehlo.select %12, %7#0, %11 : tensor<8x8xi1>, tensor<8x8xf32> loc(#loc14)
%14 = stablehlo.broadcast_in_dim %9, dims = [] : (tensor<i1>) -> tensor<1xi1> loc(#loc14)
%cst_3 = stablehlo.constant dense<0x7FC00000> : tensor<f32> loc(#loc14)
%15 = stablehlo.broadcast_in_dim %cst_3, dims = [] : (tensor<f32>) -> tensor<8xf32> loc(#loc14)
%16 = stablehlo.broadcast_in_dim %14, dims = [0] : (tensor<1xi1>) -> tensor<8xi1> loc(#loc14)
%17 = stablehlo.select %16, %7#1, %15 : tensor<8xi1>, tensor<8xf32> loc(#loc14)
return %13, %17 : tensor<8x8xf32>, tensor<8xf32> loc(#loc)
} loc(#loc)
func.func private @tril(%arg0: tensor<8x8xf32> {mhlo.layout_mode = "default"} loc("jit(<lambda>)/jit(main)/pjit[in_shardings=(UnspecifiedValue,) out_shardings=(UnspecifiedValue,) in_layouts=(None,) out_layouts=(None,) resource_env=None donated_invars=(False,) name=tril keep_unused=False inline=False]"(#loc6))) -> (tensor<8x8xf32> {mhlo.layout_mode = "default"}) {
%0 = stablehlo.iota dim = 0 : tensor<8x8xi32> loc(#loc15)
%c = stablehlo.constant dense<0> : tensor<i32> loc(#loc13)
%1 = stablehlo.broadcast_in_dim %c, dims = [] : (tensor<i32>) -> tensor<8x8xi32> loc(#loc16)
%2 = stablehlo.add %0, %1 : tensor<8x8xi32> loc(#loc16)
%3 = stablehlo.iota dim = 1 : tensor<8x8xi32> loc(#loc17)
%4 = stablehlo.compare GE, %2, %3, SIGNED : (tensor<8x8xi32>, tensor<8x8xi32>) -> tensor<8x8xi1> loc(#loc18)
%cst = stablehlo.constant dense<0.000000e+00> : tensor<f32> loc(#loc13)
%5 = stablehlo.broadcast_in_dim %cst, dims = [] : (tensor<f32>) -> tensor<8x8xf32> loc(#loc19)
%6 = stablehlo.select %4, %arg0, %5 : tensor<8x8xi1>, tensor<8x8xf32> loc(#loc20)
return %6 : tensor<8x8xf32> loc(#loc13)
} loc(#loc13)
} loc(#loc)
#loc = loc(unknown)
#loc1 = loc("third_party/py/jax/tests/export_back_compat_test.py":269:26)
#loc2 = loc("third_party/py/jax/tests/export_back_compat_test.py":269:14)
#loc3 = loc("third_party/py/jax/tests/export_back_compat_test.py":271:34)
#loc4 = loc("third_party/py/jax/tests/export_back_compat_test.py":271:15)
#loc5 = loc("third_party/py/jax/tests/export_back_compat_test.py":271:14)
#loc7 = loc("third_party/py/jax/tests/export_back_compat_test.py":277:11)
#loc8 = loc("jit(<lambda>)/jit(main)/iota[dtype=float32 shape=(64,) dimension=0]"(#loc1))
#loc9 = loc("jit(<lambda>)/jit(main)/reshape[new_sizes=(8, 8) dimensions=None]"(#loc2))
#loc10 = loc("jit(<lambda>)/jit(main)/transpose[permutation=(1, 0)]"(#loc3))
#loc11 = loc("jit(<lambda>)/jit(main)/add"(#loc4))
#loc12 = loc("jit(<lambda>)/jit(main)/div"(#loc5))
#loc14 = loc("jit(<lambda>)/jit(main)/eigh[lower=True sort_eigenvalues=True subset_by_index=None]"(#loc7))
#loc15 = loc("jit(<lambda>)/jit(main)/jit(tril)/iota[dtype=int32 shape=(8, 8) dimension=0]"(#loc6))
#loc16 = loc("jit(<lambda>)/jit(main)/jit(tril)/add"(#loc6))
#loc17 = loc("jit(<lambda>)/jit(main)/jit(tril)/iota[dtype=int32 shape=(8, 8) dimension=1]"(#loc6))
#loc18 = loc("jit(<lambda>)/jit(main)/jit(tril)/ge"(#loc6))
#loc19 = loc("jit(<lambda>)/jit(main)/jit(tril)/broadcast_in_dim[shape=(8, 8) broadcast_dimensions=()]"(#loc6))
#loc20 = loc("jit(<lambda>)/jit(main)/jit(tril)/select_n"(#loc6))
""",
mlir_module_serialized=b"ML\xefR\x01StableHLO_v0.9.0\x00\x01+\x05\x01\x03\x01\x03\x05\x03\x1b\x07\t\x0b\r\x0f\x11\x13\x15\x17\x19\x1b\x1d\x1f\x03\x96\x02\xff9\x01\xa1\x0f\x13\x17\x0b\x0f\x0b\x07\x0b\x0b\x0f\x0b\x0b\x0b\x0b\x13\x0b\x13\x0f\x0b\x0b\x0f\x13\x13+\x0b\x0f\x0b\x0b\x0b33\x0b\x0f\x0b\x0b\x13\x0f\x0b\x1b\x0f\x0b\x13\x0f\x0b\x0f\x0b\x0f\x0b\x17\x0f\x0b\x17\x13\x0b\x0f\x0b\x17\x0f\x0b\x17\x13\x0b\x17\x13\x0b\x0b\x17S\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x1b\x13\x13\x03_\x0b\x0b\x0b\x0b\x0f\x0b\x0bO\x0b\x13\x1b\x0b\x1b\x0b\x0b\x0b\x13\x0b\x0b\x0f\x1f\x0f\x0f\x0b\x1fO\x1f/\x0b\x0b\x0b\x0b\x1b\x0b\x0f\x0b\x0f\x0f\x0f\x17\x17/\x0f\x0b\x1fO/\x01\x05\x0b\x0f\x035\x17\x0f\x07\x0f\x07\x07\x13\x17\x0f\x07\x07\x17\x13\x07\x17\x17\x13\x17\x13\x13\x13\x0f\x17\x13\x13\x13\x02:\t\x1d\x83\x85\x03\x03\x11\xcb\x17\x07V\x047\x05!\x1d?\x05\x05#\x1f\x05%\x05'\x11\x03\x05\x05)\x05+\x05-\x05/\x03\x03\x1f\xc7\x051\x03\x03\x0b\xc9\x1dE\x05\x053\x055\x1d{}\x03\x03\x0b\xd7\x03\x03\x0b\xf9\x03\t135\x137\x13\x0f9\x057\x11\x01\x00\x059\x05;\x05=\x03\x0b\x15\xa5\x17\xb1\x19\xb3\x0f\xbd\x1b\xbf\x03\x0b\x15\xa9\x17\xc3\x19\xa9\x0f\xab\x1b\xc5\x05?\x1dC\x05\x05A\x05C\x03\x03\x1f\xcd\x1dK\x05\x05E\x03\x05%\xad'\xcf\x1dQ\x05\x05G\x03\x03\x0b\xd1\x1dW\x05\x05I\x1d[\x05\x05K\x1d_a\x05M\x17\x076\x045\x1deg\x05O\x17\x076\x04\x1d\x03\x03k\xd3\x05Q\x1doq\x05S\x17\x07>\x04E\x1duw\x05U\x17\x07>\x04\x1f\x03\x03\x0b\xd5\x05W\x17\x07>\x04\x1d\x03\x03\x81\xab\x05Y\x05[\x17\x07V\x04\x17\x03\x13\x89\xd9\x8b\xdb\x8d\xdd\x8f\xa5\x91\xdf\x93\xe1\x95\xeb\x97\xed\x99\xf1\x05]\x05_\x05a\x05c\x05e\x05g\x05i\x05k\x05m\x03\x05%\xad'\xf7\x03\x03\x11\xfb\x03\x03\x11\xfd\x1do\x1dq\x03\x01\x1ds\x03\x03\xc1\x1du\t\x07\x1f)!\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00#!\x03\x05\xb5\xb9\r\x05\xa7\xb7\xa1\xa3\x1dw\r\x05\xa7\xbb\xa1\xa3\x1dy\x1d{\x1d}\r\x03\xa1\xa3##\x1d\x7f\x13\t\x01\x1f\x0b\t\x00\x00\x00\x00\x1f%\x01\x13\t\x05\x07\x05\x1f\x07\t\x00\x00\x00\x00\x1f\x1d!\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1f\x07\t\x00\x00\x00@\x1f\x15\x11\x08\x00\x00\x00\x00\x00\x00\x00\x0b\x03\x1d\x81\x1d\x83\x05\x01\r\x05\xe3\xe5\xe7\xe9\x1d\x85\x13\x1fV\x1d\x87\x13\x1fL\x03\x03\xaf\x03\x03\xef\x15\x03\x01\x01\x01\x03\x07\xaf\xf3\xf5\x1f+\x11\x00\x00\x00\x00\x00\x00\x00\x00\x1f-\x01\x07\x01\x1f\x07\t\x00\x00\xc0\x7f\x1f\x1d!\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x1f7\x11\x00\x00\x00\x00\x00\x00\x00\x00\x01\t\x01\x02\x02)\x05!!\x0f)\x01\x0f\x1d)\x01\x19\x01\t)\x03!\x0f)\x05!!\x19)\x01\t\x13\x1b)\x05!!\r)\x03\t\t!\x11\x01\x05\x05\x11\x11\x03\x05\x03\x05)\x03\x01\t)\x03\x02\x02\x0f)\x03\t\x17)\x03\x05\x17)\x03\x01\x17)\x01\r)\x05\x05\x05\r)\x03\x05\r)\x03!\r)\x03\x05\t\x04~\x04\x05\x01\x11\r/\x07\x03\x01\t\x0b\x11\r;\x07\x035e\x07\x03]\x1d\x03'\x13\x06c\x03\x05\x03\x01\x15\x07mi\x03\x05\x03\x03\r\x06s\x03\x05\x05\x03\x05\x05\x03\ry\x03\x07\x03\x07)\x03\x03\x05\x03\t\x17\x06)\x03\x05\x05\x07\x0b\x19\x07\t\x7f\x03\x05\x03\r\x05\x03\x01+\x03\x15\x05\x03\x01+\x03\x15\x1b\x07\x01\x87\x07\x05\x11\x0b\x03\x0f\x05\x03\x01!\x03\x0b\x03\x07\x01\x03\x03\x0b\x03\x1b\x0f\x07\x01\x9b\x03/\x05\x19\x1d\x03\x07\x01\x03\x031\x03\x1f\x05\x03\x01-\x03\x07\x03\x07\x01\x03\x03\x05\x03#\x03\x07\x01\x9d\x03\x1b\x03!\t\x06\x01\x03\x05\x07'\x15%\x03\x07\x01\x03\x033\x03\x1f\x05\x03\x01-\x03\x07\x03\x07\x01\x03\x03\x11\x03-\x03\x07\x01\x9f\x035\x03+\t\x06\x01\x03\x11\x071\x17/\x11\x04\r\x05)3\x0b\x11\t=\x07\x03\x15+\x03\x05\t\x07\x03A\x1d\x03\x13\x05\x03\t!\x03\x0b\x03\x07#\x03\x03\x13\x03\x05\r\x06#\x03\x13\x05\x03\x07\x07\x03IG\x03\x13\x0f\x07OM\x03\x1b\x05\t\x0b\x05\x03\tS\x03\x07\x03\x07U\x03\x03\x05\x03\x0f\t\x06Y\x03\x05\x07\r\x01\x11\x11\x04\t\x03\x13\x06\x03\x01\x05\x01\x00J\x1a\x89\x0b\x0b%\x03\x11\x0f\x0b\t\t\x0b!\x11#\x1f/!)!)#\x1f\x19\xa9\x0f99m\x19\x85\x89W\xb3K\x9bM\x9bn\x03\x1b%)9+\x1b\x1f\x1f\x15\x1d\x15+\x13\ri\x1f\x11\x15\x1b\x17\x15\x17\x0f\x11\x15\x11\x19)\x0f\x0b\x11builtin\x00vhlo\x00module\x00broadcast_in_dim_v1\x00constant_v1\x00iota_v1\x00select_v1\x00func_v1\x00add_v1\x00compare_v1\x00return_v1\x00reshape_v1\x00transpose_v1\x00divide_v1\x00call_v1\x00custom_call_v1\x00third_party/py/jax/tests/export_back_compat_test.py\x00value\x00sym_name\x00broadcast_dimensions\x00arg_attrs\x00function_type\x00res_attrs\x00sym_visibility\x00iota_dimension\x00compare_type\x00comparison_direction\x00jax.uses_shape_polymorphism\x00mhlo.num_partitions\x00mhlo.num_replicas\x00jit__lambda_\x00jit(<lambda>)/jit(main)/pjit[in_shardings=(UnspecifiedValue,) out_shardings=(UnspecifiedValue,) in_layouts=(None,) out_layouts=(None,) resource_env=None donated_invars=(False,) name=tril keep_unused=False inline=False]\x00jit(<lambda>)/jit(main)/jit(tril)/iota[dtype=int32 shape=(8, 8) dimension=0]\x00jit(<lambda>)/jit(main)/jit(tril)/add\x00jit(<lambda>)/jit(main)/jit(tril)/iota[dtype=int32 shape=(8, 8) dimension=1]\x00jit(<lambda>)/jit(main)/jit(tril)/ge\x00jit(<lambda>)/jit(main)/jit(tril)/broadcast_in_dim[shape=(8, 8) broadcast_dimensions=()]\x00jit(<lambda>)/jit(main)/jit(tril)/select_n\x00jit(<lambda>)/jit(main)/iota[dtype=float32 shape=(64,) dimension=0]\x00jit(<lambda>)/jit(main)/reshape[new_sizes=(8, 8) dimensions=None]\x00permutation\x00jit(<lambda>)/jit(main)/transpose[permutation=(1, 0)]\x00jit(<lambda>)/jit(main)/add\x00jit(<lambda>)/jit(main)/div\x00callee\x00jit(<lambda>)/jit(main)/eigh[lower=True sort_eigenvalues=True subset_by_index=None]\x00api_version\x00backend_config\x00call_target_name\x00called_computations\x00has_side_effect\x00mhlo.backend_config\x00operand_layouts\x00output_operand_aliases\x00result_layouts\x00mhlo.layout_mode\x00default\x00jax.result_info\x00tril\x00[0]\x00[1]\x00main\x00public\x00private\x00\x00lapack_ssyevd_ffi\x00mode\x00uplo\x00",
xla_call_module_version=9,
nr_devices=1,
) # End paste
# Pasted from the test output (see export_back_compat_test_util.py module docstring)
data_2024_08_19["f64"] = dict(
testdata_version=1,
platform='cpu',
custom_call_targets=['lapack_dsyevd_ffi'],
serialized_date=datetime.date(2024, 8, 19),
inputs=(),
expected_outputs=(array([[-6.1857700048412056e-01, 2.4081403770912022e-01,
3.5662489253627483e-01, -6.3034019033669797e-01,
1.0043483479985752e-16, -2.8842036081919542e-02,
7.7164692943283169e-25, -1.8446994643771725e-01],
[-4.7070881487314614e-01, 4.7473787464450845e-01,
-4.8036836210243367e-01, 4.3802686872516400e-01,
1.7961797619639258e-01, 8.3080980076741355e-03,
2.1415294457221756e-01, -2.2856669794666584e-01],
[-3.2284062926217072e-01, -5.4336490915553370e-01,
2.2181041859724990e-01, 2.9947877954402297e-01,
-3.6491813600134632e-01, 3.2867679819727436e-01,
3.8223299448843473e-01, -2.7266344945561438e-01],
[-1.7497244365119530e-01, -8.9251550609769414e-02,
-6.3518515114898394e-02, 1.9162997359209971e-01,
-2.2087281326110139e-01, 5.9957027043505064e-02,
-8.7632498908241274e-01, -3.1676020096456303e-01],
[-2.7104258040220038e-02, -3.3772873786627672e-01,
2.5901386593721748e-01, 1.7032650752287815e-01,
6.7521217612940332e-01, -4.5036136532965476e-01,
-1.2279030059078447e-02, -3.6085695247351163e-01],
[ 1.2076392757075530e-01, -3.3834734096469254e-01,
-6.5506827461665540e-01, -5.0472498521116749e-01,
6.9987430903492118e-02, 1.0595648906599275e-01,
8.3443844143082022e-02, -4.0495370398246017e-01],
[ 2.6863211318173097e-01, 2.2958613191407318e-01,
6.3952843755683941e-02, 1.8776775771084137e-02,
-5.3523731432241317e-01, -5.9199531677602002e-01,
1.7916671834524248e-01, -4.4905045549140887e-01],
[ 4.1650029879270661e-01, 3.6355449432857079e-01,
2.9755313100756142e-01, 1.6826270392615944e-02,
1.9621068035557282e-01, 5.6830030587314817e-01,
2.9607517592514246e-02, -4.9314720700035747e-01]]), array([-2.4598804776133626e+01, -4.6567755957874661e-14,
-1.9932120610662194e-14, -5.7323356091157378e-15,
-4.5459724251334835e-16, 4.0479851042511616e-14,
9.2325194924982089e-14, 2.7659880477613365e+02])),
mlir_module_text=r"""
#loc6 = loc("third_party/py/jax/tests/export_back_compat_test.py":277:27)
#loc13 = loc("jit(<lambda>)/jit(main)/pjit[in_shardings=(UnspecifiedValue,) out_shardings=(UnspecifiedValue,) in_layouts=(None,) out_layouts=(None,) resource_env=None donated_invars=(False,) name=tril keep_unused=False inline=False]"(#loc6))
module @jit__lambda_ attributes {jax.uses_shape_polymorphism = false, mhlo.num_partitions = 1 : i32, mhlo.num_replicas = 1 : i32} {
func.func public @main() -> (tensor<8x8xf64> {jax.result_info = "[0]", mhlo.layout_mode = "default"}, tensor<8xf64> {jax.result_info = "[1]", mhlo.layout_mode = "default"}) {
%0 = stablehlo.iota dim = 0 : tensor<64xf64> loc(#loc8)
%1 = stablehlo.reshape %0 : (tensor<64xf64>) -> tensor<8x8xf64> loc(#loc9)
%2 = stablehlo.transpose %1, dims = [1, 0] : (tensor<8x8xf64>) -> tensor<8x8xf64> loc(#loc10)
%3 = stablehlo.add %1, %2 : tensor<8x8xf64> loc(#loc11)
%cst = stablehlo.constant dense<2.000000e+00> : tensor<f64> loc(#loc)
%4 = stablehlo.broadcast_in_dim %cst, dims = [] : (tensor<f64>) -> tensor<8x8xf64> loc(#loc12)
%5 = stablehlo.divide %3, %4 : tensor<8x8xf64> loc(#loc12)
%6 = call @tril(%5) : (tensor<8x8xf64>) -> tensor<8x8xf64> loc(#loc13)
%c = stablehlo.constant dense<8> : tensor<i64> loc(#loc14)
%c_0 = stablehlo.constant dense<8> : tensor<i64> loc(#loc14)
%7:3 = stablehlo.custom_call @lapack_dsyevd_ffi(%6) {mhlo.backend_config = {mode = 86 : ui8, uplo = 76 : ui8}, operand_layouts = [dense<[0, 1]> : tensor<2xindex>], output_operand_aliases = [#stablehlo.output_operand_alias<output_tuple_indices = [0], operand_index = 0, operand_tuple_indices = []>], result_layouts = [dense<[0, 1]> : tensor<2xindex>, dense<0> : tensor<1xindex>, dense<> : tensor<0xindex>]} : (tensor<8x8xf64>) -> (tensor<8x8xf64>, tensor<8xf64>, tensor<i32>) loc(#loc14)
%c_1 = stablehlo.constant dense<0> : tensor<i32> loc(#loc14)
%8 = stablehlo.broadcast_in_dim %c_1, dims = [] : (tensor<i32>) -> tensor<i32> loc(#loc14)
%9 = stablehlo.compare EQ, %7#2, %8, SIGNED : (tensor<i32>, tensor<i32>) -> tensor<i1> loc(#loc14)
%10 = stablehlo.broadcast_in_dim %9, dims = [] : (tensor<i1>) -> tensor<1x1xi1> loc(#loc14)
%cst_2 = stablehlo.constant dense<0x7FF8000000000000> : tensor<f64> loc(#loc14)
%11 = stablehlo.broadcast_in_dim %cst_2, dims = [] : (tensor<f64>) -> tensor<8x8xf64> loc(#loc14)
%12 = stablehlo.broadcast_in_dim %10, dims = [0, 1] : (tensor<1x1xi1>) -> tensor<8x8xi1> loc(#loc14)
%13 = stablehlo.select %12, %7#0, %11 : tensor<8x8xi1>, tensor<8x8xf64> loc(#loc14)
%14 = stablehlo.broadcast_in_dim %9, dims = [] : (tensor<i1>) -> tensor<1xi1> loc(#loc14)
%cst_3 = stablehlo.constant dense<0x7FF8000000000000> : tensor<f64> loc(#loc14)
%15 = stablehlo.broadcast_in_dim %cst_3, dims = [] : (tensor<f64>) -> tensor<8xf64> loc(#loc14)
%16 = stablehlo.broadcast_in_dim %14, dims = [0] : (tensor<1xi1>) -> tensor<8xi1> loc(#loc14)
%17 = stablehlo.select %16, %7#1, %15 : tensor<8xi1>, tensor<8xf64> loc(#loc14)
return %13, %17 : tensor<8x8xf64>, tensor<8xf64> loc(#loc)
} loc(#loc)
func.func private @tril(%arg0: tensor<8x8xf64> {mhlo.layout_mode = "default"} loc("jit(<lambda>)/jit(main)/pjit[in_shardings=(UnspecifiedValue,) out_shardings=(UnspecifiedValue,) in_layouts=(None,) out_layouts=(None,) resource_env=None donated_invars=(False,) name=tril keep_unused=False inline=False]"(#loc6))) -> (tensor<8x8xf64> {mhlo.layout_mode = "default"}) {
%0 = stablehlo.iota dim = 0 : tensor<8x8xi32> loc(#loc15)
%c = stablehlo.constant dense<0> : tensor<i32> loc(#loc13)
%1 = stablehlo.broadcast_in_dim %c, dims = [] : (tensor<i32>) -> tensor<8x8xi32> loc(#loc16)
%2 = stablehlo.add %0, %1 : tensor<8x8xi32> loc(#loc16)
%3 = stablehlo.iota dim = 1 : tensor<8x8xi32> loc(#loc17)
%4 = stablehlo.compare GE, %2, %3, SIGNED : (tensor<8x8xi32>, tensor<8x8xi32>) -> tensor<8x8xi1> loc(#loc18)
%cst = stablehlo.constant dense<0.000000e+00> : tensor<f64> loc(#loc13)
%5 = stablehlo.broadcast_in_dim %cst, dims = [] : (tensor<f64>) -> tensor<8x8xf64> loc(#loc19)
%6 = stablehlo.select %4, %arg0, %5 : tensor<8x8xi1>, tensor<8x8xf64> loc(#loc20)
return %6 : tensor<8x8xf64> loc(#loc13)
} loc(#loc13)
} loc(#loc)
#loc = loc(unknown)
#loc1 = loc("third_party/py/jax/tests/export_back_compat_test.py":269:26)
#loc2 = loc("third_party/py/jax/tests/export_back_compat_test.py":269:14)
#loc3 = loc("third_party/py/jax/tests/export_back_compat_test.py":271:34)
#loc4 = loc("third_party/py/jax/tests/export_back_compat_test.py":271:15)
#loc5 = loc("third_party/py/jax/tests/export_back_compat_test.py":271:14)
#loc7 = loc("third_party/py/jax/tests/export_back_compat_test.py":277:11)
#loc8 = loc("jit(<lambda>)/jit(main)/iota[dtype=float64 shape=(64,) dimension=0]"(#loc1))
#loc9 = loc("jit(<lambda>)/jit(main)/reshape[new_sizes=(8, 8) dimensions=None]"(#loc2))
#loc10 = loc("jit(<lambda>)/jit(main)/transpose[permutation=(1, 0)]"(#loc3))
#loc11 = loc("jit(<lambda>)/jit(main)/add"(#loc4))
#loc12 = loc("jit(<lambda>)/jit(main)/div"(#loc5))
#loc14 = loc("jit(<lambda>)/jit(main)/eigh[lower=True sort_eigenvalues=True subset_by_index=None]"(#loc7))
#loc15 = loc("jit(<lambda>)/jit(main)/jit(tril)/iota[dtype=int32 shape=(8, 8) dimension=0]"(#loc6))
#loc16 = loc("jit(<lambda>)/jit(main)/jit(tril)/add"(#loc6))
#loc17 = loc("jit(<lambda>)/jit(main)/jit(tril)/iota[dtype=int32 shape=(8, 8) dimension=1]"(#loc6))
#loc18 = loc("jit(<lambda>)/jit(main)/jit(tril)/ge"(#loc6))
#loc19 = loc("jit(<lambda>)/jit(main)/jit(tril)/broadcast_in_dim[shape=(8, 8) broadcast_dimensions=()]"(#loc6))
#loc20 = loc("jit(<lambda>)/jit(main)/jit(tril)/select_n"(#loc6))
""",
mlir_module_serialized=b"ML\xefR\x01StableHLO_v0.9.0\x00\x01+\x05\x01\x03\x01\x03\x05\x03\x1b\x07\t\x0b\r\x0f\x11\x13\x15\x17\x19\x1b\x1d\x1f\x03\x96\x02\xff9\x01\xa1\x0f\x13\x17\x0b\x0f\x0b\x07\x0b\x0b\x0f\x0b\x0b\x0b\x0b\x13\x0b\x13\x0f\x0b\x0b\x0f\x13\x13+\x0b\x0f\x0b\x0b\x0b33\x0b\x0f\x0b\x0b\x13\x0f\x0b\x1b\x0f\x0b\x13\x0f\x0b\x0f\x0b\x0f\x0b\x17\x0f\x0b\x17\x13\x0b\x0f\x0b\x17\x0f\x0b\x17\x13\x0b\x17\x13\x0b\x0b\x17S\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x1b\x13\x13\x03_\x0b\x0b\x0b\x0b\x0f\x0b\x0bO\x0b\x13\x1b\x0b\x1b\x0b\x0b\x0b\x13\x0b\x0b\x0f\x1f\x0f\x0f\x0b/O//\x0b\x0b\x0b\x0b\x1b\x0b\x0f\x0b\x0f\x0f\x0f\x17\x17/\x0f\x0b/O/\x01\x05\x0b\x0f\x035\x17\x0f\x07\x0f\x07\x07\x13\x17\x0f\x07\x07\x17\x13\x07\x17\x17\x13\x17\x13\x13\x13\x0f\x17\x13\x13\x13\x02j\t\x1d\x83\x85\x03\x03\x11\xcb\x17\x07V\x047\x05!\x1d?\x05\x05#\x1f\x05%\x05'\x11\x03\x05\x05)\x05+\x05-\x05/\x03\x03\x1f\xc7\x051\x03\x03\x0b\xc9\x1dE\x05\x053\x055\x1d{}\x03\x03\x0b\xd7\x03\x03\x0b\xf9\x03\t135\x137\x13\x0f9\x057\x11\x01\x00\x059\x05;\x05=\x03\x0b\x15\xa5\x17\xb1\x19\xb3\x0f\xbd\x1b\xbf\x03\x0b\x15\xa9\x17\xc3\x19\xa9\x0f\xab\x1b\xc5\x05?\x1dC\x05\x05A\x05C\x03\x03\x1f\xcd\x1dK\x05\x05E\x03\x05%\xad'\xcf\x1dQ\x05\x05G\x03\x03\x0b\xd1\x1dW\x05\x05I\x1d[\x05\x05K\x1d_a\x05M\x17\x076\x045\x1deg\x05O\x17\x076\x04\x1d\x03\x03k\xd3\x05Q\x1doq\x05S\x17\x07>\x04E\x1duw\x05U\x17\x07>\x04\x1f\x03\x03\x0b\xd5\x05W\x17\x07>\x04\x1d\x03\x03\x81\xab\x05Y\x05[\x17\x07V\x04\x17\x03\x13\x89\xd9\x8b\xdb\x8d\xdd\x8f\xa5\x91\xdf\x93\xe1\x95\xeb\x97\xed\x99\xf1\x05]\x05_\x05a\x05c\x05e\x05g\x05i\x05k\x05m\x03\x05%\xad'\xf7\x03\x03\x11\xfb\x03\x03\x11\xfd\x1do\x1dq\x03\x01\x1ds\x03\x03\xc1\x1du\t\x07\x1f)!\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00#!\x03\x05\xb5\xb9\r\x05\xa7\xb7\xa1\xa3\x1dw\r\x05\xa7\xbb\xa1\xa3\x1dy\x1d{\x1d}\r\x03\xa1\xa3##\x1d\x7f\x13\t\x01\x1f\x0b\t\x00\x00\x00\x00\x1f%\x01\x13\t\x05\x07\x05\x1f\x07\x11\x00\x00\x00\x00\x00\x00\x00\x00\x1f\x1d!\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1f\x07\x11\x00\x00\x00\x00\x00\x00\x00@\x1f\x15\x11\x08\x00\x00\x00\x00\x00\x00\x00\x0b\x03\x1d\x81\x1d\x83\x05\x01\r\x05\xe3\xe5\xe7\xe9\x1d\x85\x13\x1fV\x1d\x87\x13\x1fL\x03\x03\xaf\x03\x03\xef\x15\x03\x01\x01\x01\x03\x07\xaf\xf3\xf5\x1f+\x11\x00\x00\x00\x00\x00\x00\x00\x00\x1f-\x01\x07\x01\x1f\x07\x11\x00\x00\x00\x00\x00\x00\xf8\x7f\x1f\x1d!\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x1f7\x11\x00\x00\x00\x00\x00\x00\x00\x00\x01\t\x01\x02\x02)\x05!!\x0f)\x01\x0f\x1d)\x01\x19\x01\x0b)\x03!\x0f)\x05!!\x19)\x01\t\x13\x1b)\x05!!\r)\x03\t\t!\x11\x01\x05\x05\x11\x11\x03\x05\x03\x05)\x03\x01\t)\x03\x02\x02\x0f)\x03\t\x17)\x03\x05\x17)\x03\x01\x17)\x01\r)\x05\x05\x05\r)\x03\x05\r)\x03!\r)\x03\x05\t\x04~\x04\x05\x01\x11\r/\x07\x03\x01\t\x0b\x11\r;\x07\x035e\x07\x03]\x1d\x03'\x13\x06c\x03\x05\x03\x01\x15\x07mi\x03\x05\x03\x03\r\x06s\x03\x05\x05\x03\x05\x05\x03\ry\x03\x07\x03\x07)\x03\x03\x05\x03\t\x17\x06)\x03\x05\x05\x07\x0b\x19\x07\t\x7f\x03\x05\x03\r\x05\x03\x01+\x03\x15\x05\x03\x01+\x03\x15\x1b\x07\x01\x87\x07\x05\x11\x0b\x03\x0f\x05\x03\x01!\x03\x0b\x03\x07\x01\x03\x03\x0b\x03\x1b\x0f\x07\x01\x9b\x03/\x05\x19\x1d\x03\x07\x01\x03\x031\x03\x1f\x05\x03\x01-\x03\x07\x03\x07\x01\x03\x03\x05\x03#\x03\x07\x01\x9d\x03\x1b\x03!\t\x06\x01\x03\x05\x07'\x15%\x03\x07\x01\x03\x033\x03\x1f\x05\x03\x01-\x03\x07\x03\x07\x01\x03\x03\x11\x03-\x03\x07\x01\x9f\x035\x03+\t\x06\x01\x03\x11\x071\x17/\x11\x04\r\x05)3\x0b\x11\t=\x07\x03\x15+\x03\x05\t\x07\x03A\x1d\x03\x13\x05\x03\t!\x03\x0b\x03\x07#\x03\x03\x13\x03\x05\r\x06#\x03\x13\x05\x03\x07\x07\x03IG\x03\x13\x0f\x07OM\x03\x1b\x05\t\x0b\x05\x03\tS\x03\x07\x03\x07U\x03\x03\x05\x03\x0f\t\x06Y\x03\x05\x07\r\x01\x11\x11\x04\t\x03\x13\x06\x03\x01\x05\x01\x00J\x1a\x89\x0b\x0b%\x03\x11\x0f\x0b\t\t\x0b!\x11#\x1f/!)!)#\x1f\x19\xa9\x0f99m\x19\x85\x89W\xb3K\x9bM\x9bn\x03\x1b%)9+\x1b\x1f\x1f\x15\x1d\x15+\x13\ri\x1f\x11\x15\x1b\x17\x15\x17\x0f\x11\x15\x11\x19)\x0f\x0b\x11builtin\x00vhlo\x00module\x00broadcast_in_dim_v1\x00constant_v1\x00iota_v1\x00select_v1\x00func_v1\x00add_v1\x00compare_v1\x00return_v1\x00reshape_v1\x00transpose_v1\x00divide_v1\x00call_v1\x00custom_call_v1\x00third_party/py/jax/tests/export_back_compat_test.py\x00value\x00sym_name\x00broadcast_dimensions\x00arg_attrs\x00function_type\x00res_attrs\x00sym_visibility\x00iota_dimension\x00compare_type\x00comparison_direction\x00jax.uses_shape_polymorphism\x00mhlo.num_partitions\x00mhlo.num_replicas\x00jit__lambda_\x00jit(<lambda>)/jit(main)/pjit[in_shardings=(UnspecifiedValue,) out_shardings=(UnspecifiedValue,) in_layouts=(None,) out_layouts=(None,) resource_env=None donated_invars=(False,) name=tril keep_unused=False inline=False]\x00jit(<lambda>)/jit(main)/jit(tril)/iota[dtype=int32 shape=(8, 8) dimension=0]\x00jit(<lambda>)/jit(main)/jit(tril)/add\x00jit(<lambda>)/jit(main)/jit(tril)/iota[dtype=int32 shape=(8, 8) dimension=1]\x00jit(<lambda>)/jit(main)/jit(tril)/ge\x00jit(<lambda>)/jit(main)/jit(tril)/broadcast_in_dim[shape=(8, 8) broadcast_dimensions=()]\x00jit(<lambda>)/jit(main)/jit(tril)/select_n\x00jit(<lambda>)/jit(main)/iota[dtype=float64 shape=(64,) dimension=0]\x00jit(<lambda>)/jit(main)/reshape[new_sizes=(8, 8) dimensions=None]\x00permutation\x00jit(<lambda>)/jit(main)/transpose[permutation=(1, 0)]\x00jit(<lambda>)/jit(main)/add\x00jit(<lambda>)/jit(main)/div\x00callee\x00jit(<lambda>)/jit(main)/eigh[lower=True sort_eigenvalues=True subset_by_index=None]\x00api_version\x00backend_config\x00call_target_name\x00called_computations\x00has_side_effect\x00mhlo.backend_config\x00operand_layouts\x00output_operand_aliases\x00result_layouts\x00mhlo.layout_mode\x00default\x00jax.result_info\x00tril\x00[0]\x00[1]\x00main\x00public\x00private\x00\x00lapack_dsyevd_ffi\x00mode\x00uplo\x00",
xla_call_module_version=9,
nr_devices=1,
) # End paste
|
googleREPO_NAMEjaxPATH_START.@jax_extracted@jax-main@jax@_src@internal_test_util@export_back_compat_test_data@cpu_eigh_lapack_syev.py@.PATH_END.py
|
{
"filename": "_show.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/volume/slices/y/_show.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShowValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="show", parent_name="volume.slices.y", **kwargs):
super(ShowValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@volume@slices@y@_show.py@.PATH_END.py
|
{
"filename": "events.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/ipython/py2/IPython/core/events.py",
"type": "Python"
}
|
"""Infrastructure for registering and firing callbacks on application events.
Unlike :mod:`IPython.core.hooks`, which lets end users set single functions to
be called at specific times, or a collection of alternative methods to try,
callbacks are designed to be used by extension authors. A number of callbacks
can be registered for the same event without needing to be aware of one another.
The functions defined in this module are no-ops indicating the names of available
events and the arguments which will be passed to them.
.. note::
This API is experimental in IPython 2.0, and may be revised in future versions.
"""
from __future__ import print_function
class EventManager(object):
"""Manage a collection of events and a sequence of callbacks for each.
This is attached to :class:`~IPython.core.interactiveshell.InteractiveShell`
instances as an ``events`` attribute.
.. note::
This API is experimental in IPython 2.0, and may be revised in future versions.
"""
def __init__(self, shell, available_events):
"""Initialise the :class:`CallbackManager`.
Parameters
----------
shell
The :class:`~IPython.core.interactiveshell.InteractiveShell` instance
available_callbacks
An iterable of names for callback events.
"""
self.shell = shell
self.callbacks = {n:[] for n in available_events}
def register(self, event, function):
"""Register a new event callback
Parameters
----------
event : str
The event for which to register this callback.
function : callable
A function to be called on the given event. It should take the same
parameters as the appropriate callback prototype.
Raises
------
TypeError
If ``function`` is not callable.
KeyError
If ``event`` is not one of the known events.
"""
if not callable(function):
raise TypeError('Need a callable, got %r' % function)
self.callbacks[event].append(function)
def unregister(self, event, function):
"""Remove a callback from the given event."""
self.callbacks[event].remove(function)
def trigger(self, event, *args, **kwargs):
"""Call callbacks for ``event``.
Any additional arguments are passed to all callbacks registered for this
event. Exceptions raised by callbacks are caught, and a message printed.
"""
for func in self.callbacks[event][:]:
try:
func(*args, **kwargs)
except Exception:
print("Error in callback {} (for {}):".format(func, event))
self.shell.showtraceback()
# event_name -> prototype mapping
available_events = {}
def _define_event(callback_proto):
available_events[callback_proto.__name__] = callback_proto
return callback_proto
# ------------------------------------------------------------------------------
# Callback prototypes
#
# No-op functions which describe the names of available events and the
# signatures of callbacks for those events.
# ------------------------------------------------------------------------------
@_define_event
def pre_execute():
"""Fires before code is executed in response to user/frontend action.
This includes comm and widget messages and silent execution, as well as user
code cells."""
pass
@_define_event
def pre_run_cell():
"""Fires before user-entered code runs."""
pass
@_define_event
def post_execute():
"""Fires after code is executed in response to user/frontend action.
This includes comm and widget messages and silent execution, as well as user
code cells."""
pass
@_define_event
def post_run_cell():
"""Fires after user-entered code runs."""
pass
@_define_event
def shell_initialized(ip):
"""Fires after initialisation of :class:`~IPython.core.interactiveshell.InteractiveShell`.
This is before extensions and startup scripts are loaded, so it can only be
set by subclassing.
Parameters
----------
ip : :class:`~IPython.core.interactiveshell.InteractiveShell`
The newly initialised shell.
"""
pass
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@ipython@py2@IPython@core@events.py@.PATH_END.py
|
{
"filename": "CustomBackground_wnorm.py",
"repo_name": "ThomasEdwardRiley/xpsi-pre-transfer",
"repo_path": "xpsi-pre-transfer_extracted/xpsi-pre-transfer-master/examples/true_background/CustomBackground_wnorm.py",
"type": "Python"
}
|
from __future__ import print_function, division
import numpy as np
import math
import xpsi
class CustomBackground(xpsi.Background):
""" Currently tailored to the NICER light-curve SWG model specification.
NICER parameter recovery from synthetic photon count data.
The background must be set using the property method defined in the
parent class, which will perform basic compatibility checks.
"""
def __init__(self, num_params, bounds):
super(CustomBackground, self).__init__(num_params, bounds)
def __call__(self, p, energy_edges, phases):
""" Evaluate the incident background field. """
Gamma = p[0]
norm = 10.0**(p[1])
temp = np.zeros((energy_edges.shape[0] - 1, phases.shape[0] - 1))
temp[:,0] = (energy_edges[1:]**(Gamma + 1.0) - energy_edges[:-1]**(Gamma + 1.0)) / (Gamma + 1.0)
for i in range(temp.shape[1]):
temp[:,i] = temp[:,0]
self.background = norm * temp
|
ThomasEdwardRileyREPO_NAMExpsi-pre-transferPATH_START.@xpsi-pre-transfer_extracted@xpsi-pre-transfer-master@examples@true_background@CustomBackground_wnorm.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "CosmoStat/shapepipe",
"repo_path": "shapepipe_extracted/shapepipe-master/shapepipe/__init__.py",
"type": "Python"
}
|
"""SHAPEPIPE PACKAGE.
ShapePipe is a galaxy shape measurement pipeline.
:Author: Samuel Farrens <samuel.farrens@cea.fr>
"""
__all__ = ['modules', 'pipeline', 'utilities']
from . import *
from .info import __about__, __version__
|
CosmoStatREPO_NAMEshapepipePATH_START.@shapepipe_extracted@shapepipe-master@shapepipe@__init__.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "BEAST-Fitting/beast",
"repo_path": "beast_extracted/beast-master/beast/tools/tests/__init__.py",
"type": "Python"
}
|
BEAST-FittingREPO_NAMEbeastPATH_START.@beast_extracted@beast-master@beast@tools@tests@__init__.py@.PATH_END.py
|
|
{
"filename": "tf_utils.py",
"repo_name": "SKA-INAF/sclassifier",
"repo_path": "sclassifier_extracted/sclassifier-master/sclassifier/tf_utils.py",
"type": "Python"
}
|
#!/usr/bin/env python
from __future__ import print_function
##################################################
### MODULE IMPORT
##################################################
## STANDARD MODULES
import os
import sys
import subprocess
import string
import time
import signal
from threading import Thread
import datetime
import numpy as np
import random
import math
import logging
import collections
import csv
import pickle
##############################
## GLOBAL VARS
##############################
from sclassifier import logger
## TENSORFLOW & KERAS MODULES
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import models
from tensorflow.keras import optimizers
try:
from tensorflow.keras.utils import plot_model
except:
from tensorflow.keras.utils.vis_utils import plot_model
from tensorflow.keras import backend as K
from tensorflow.keras.models import Model
from tensorflow.keras.models import load_model
from tensorflow.keras.models import model_from_json
try:
from tensorflow.keras.layers import BatchNormalization
except Exception as e:
logger.warn("Failed to import BatchNormalization (err=%s), trying in another way ..." % str(e))
from tensorflow.keras.layers.normalization import BatchNormalization
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D, UpSampling2D
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Lambda
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Input
from tensorflow.keras.utils import get_custom_objects
from tensorflow.keras.losses import mse, binary_crossentropy
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.image import convert_image_dtype
from tensorflow.python.ops.image_ops_impl import _fspecial_gauss, _ssim_helper, _verify_compatible_image_shapes
from tensorflow.python.framework.ops import disable_eager_execution, enable_eager_execution
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.activations import softmax
###############################################
## ChanMinMaxNorm LAYER
###############################################
class ChanMinMaxNorm(layers.Layer):
"""Scale inputs in range.
The rescaling is applied both during training and inference.
Input shape:
Arbitrary.
Output shape:
Same as input.
Arguments:
norm_min: Float, the data min to the inputs.
norm_max: Float, the offset to apply to the inputs.
name: A string, the name of the layer.
"""
def __init__(self, norm_min=0., norm_max=1., name=None, **kwargs):
self.norm_min = norm_min
self.norm_max = norm_max
super(ChanMinMaxNorm, self).__init__(name=name, **kwargs)
def build(self, input_shape):
super(ChanMinMaxNorm, self).build(input_shape)
def call(self, inputs, training=False):
# - Init stuff
input_shape = tf.shape( inputs )
norm_min= self.norm_min
norm_max= self.norm_max
# - Compute input data min & max, excluding NANs & zeros
cond= tf.logical_and(tf.math.is_finite(inputs), tf.math.not_equal(inputs, 0.))
data_min= tf.reduce_min(tf.where(~cond, tf.ones_like(inputs) * 1.e+99, inputs), axis=(1,2))
data_max= tf.reduce_max(tf.where(~cond, tf.ones_like(inputs) * -1.e+99, inputs), axis=(1,2))
##### DEBUG ############
#tf.print("data_min (before norm)", data_min, output_stream=sys.stdout)
#tf.print("data_max (before norm)", data_max, output_stream=sys.stdout)
#########################
# - Normalize data in range (norm_min, norm_max)
data_min= tf.expand_dims(tf.expand_dims(data_min, axis=1),axis=1)
data_max= tf.expand_dims(tf.expand_dims(data_max, axis=1),axis=1)
data_norm= (inputs-data_min)/(data_max-data_min) * (norm_max-norm_min) + norm_min
# - Set masked values (NANs, zeros) to norm_min
data_norm= tf.where(~cond, tf.ones_like(data_norm) * norm_min, data_norm)
####### DEBUG ###########
data_min= tf.reduce_min(data_norm, axis=(1,2))
data_max= tf.reduce_max(data_norm, axis=(1,2))
#data_min= tf.expand_dims(tf.expand_dims(data_min, axis=1), axis=1)
#data_max= tf.expand_dims(tf.expand_dims(data_max, axis=1), axis=1)
#tf.print("data_min (after norm)", data_min, output_stream=sys.stdout)
#tf.print("data_max (after norm)", data_max, output_stream=sys.stdout)
###########################
return tf.reshape(data_norm, self.compute_output_shape(input_shape))
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'norm_min': self.norm_min,
'norm_max': self.norm_max,
}
base_config = super(ChanMinMaxNorm, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
###############################################
## ChanMaxScale LAYER
###############################################
class ChanMaxScale(layers.Layer):
"""Scale inputs to channel maximum.
The rescaling is applied both during training and inference.
Input shape:
Arbitrary.
Output shape:
Same as input.
"""
def __init__(self, name=None, **kwargs):
super(ChanMaxScale, self).__init__(name=name, **kwargs)
def build(self, input_shape):
super(ChanMaxScale, self).build(input_shape)
def call(self, inputs, training=False):
# - Init stuff
input_shape = tf.shape(inputs)
# - Compute input data min & max, excluding NANs & zeros
cond= tf.logical_and(tf.math.is_finite(inputs), tf.math.not_equal(inputs, 0.))
data_min= tf.reduce_min(tf.where(~cond, tf.ones_like(inputs) * 1.e+99, inputs), axis=(1,2))
data_max= tf.reduce_max(tf.where(~cond, tf.ones_like(inputs) * -1.e+99, inputs), axis=(1,2))
data_min= tf.expand_dims(tf.expand_dims(data_min, axis=1),axis=1)
data_max= tf.expand_dims(tf.expand_dims(data_max, axis=1),axis=1)
##### DEBUG ############
#tf.print("data_min (before max scale)", data_min, output_stream=sys.stdout)
#tf.print("data_max (before max scale)", data_max, output_stream=sys.stdout)
#########################
# - Scale data to max
inputs_scaled= inputs/data_max
# - Set masked values (NANs, zeros) to norm_min
norm_min= 0
inputs_scaled= tf.where(~cond, tf.ones_like(inputs_scaled) * norm_min, inputs_scaled)
####### DEBUG ###########
#data_min= tf.reduce_min(inputs_scaled, axis=(1,2))
#data_max= tf.reduce_max(inputs_scaled, axis=(1,2))
#data_min= tf.expand_dims(tf.expand_dims(data_min, axis=1), axis=1)
#data_max= tf.expand_dims(tf.expand_dims(data_max, axis=1), axis=1)
#tf.print("data_min (after max scale)", data_min, output_stream=sys.stdout)
#tf.print("data_max (after max scale)", data_max, output_stream=sys.stdout)
###########################
return tf.reshape(inputs_scaled, self.compute_output_shape(input_shape))
def compute_output_shape(self, input_shape):
return input_shape
###############################################
## ChanPosDef LAYER
###############################################
class ChanPosDef(layers.Layer):
"""Make images positive, as subtract chan minimum
Input shape:
Arbitrary.
Output shape:
Same as input.
"""
def __init__(self, name=None, **kwargs):
super(ChanPosDef, self).__init__(name=name, **kwargs)
def build(self, input_shape):
super(ChanPosDef, self).build(input_shape)
def call(self, inputs, training=False):
# - Init stuff
input_shape = tf.shape(inputs)
# - Compute input data min & max, excluding NANs & zeros
cond= tf.logical_and(tf.math.is_finite(inputs), tf.math.not_equal(inputs, 0.))
data_min= tf.reduce_min(tf.where(~cond, tf.ones_like(inputs) * 1.e+99, inputs), axis=(1,2))
#data_max= tf.reduce_max(tf.where(~cond, tf.ones_like(inputs) * -1.e+99, inputs), axis=(1,2))
data_min= tf.expand_dims(tf.expand_dims(data_min, axis=1),axis=1)
#data_max= tf.expand_dims(tf.expand_dims(data_max, axis=1),axis=1)
##### DEBUG ############
#tf.print("data_min (before posdef)", data_min, output_stream=sys.stdout)
#tf.print("data_max (before posdef)", data_max, output_stream=sys.stdout)
#########################
# - Subtract data_min on channels with negative data_min
cond2= tf.math.less(data_min, 0)
inputs_scaled= tf.where(cond2, inputs - data_min, inputs)
# - Set masked values (NANs, zeros) to norm_min
norm_min= 0
inputs_scaled= tf.where(~cond, tf.ones_like(inputs_scaled) * norm_min, inputs_scaled)
####### DEBUG ###########
#data_min= tf.reduce_min(inputs_scaled, axis=(1,2))
#data_max= tf.reduce_max(inputs_scaled, axis=(1,2))
#data_min_nozeros= tf.reduce_min(tf.where(~cond, tf.ones_like(inputs_scaled) * 1.e+99, inputs_scaled), axis=(1,2))
#data_max_nozeros= tf.reduce_max(tf.where(~cond, tf.ones_like(inputs_scaled) * -1.e+99, inputs_scaled), axis=(1,2))
#tf.print("data_min (nozeros, after posdef)", data_min_nozeros, output_stream=sys.stdout)
#tf.print("data_max (nozeros, after posdef)", data_max_nozeros, output_stream=sys.stdout)
#tf.print("data_min (after posdef)", data_min, output_stream=sys.stdout)
#tf.print("data_max (after posdef)", data_max, output_stream=sys.stdout)
###########################
return tf.reshape(inputs_scaled, self.compute_output_shape(input_shape))
def compute_output_shape(self, input_shape):
return input_shape
###############################################
## ChanMaxRatio LAYER
###############################################
class ChanMaxRatio(layers.Layer):
""".
Input shape:
Arbitrary.
Output shape:
[nbatches, nchans]
"""
def __init__(self, name=None, **kwargs):
super(ChanMaxRatio, self).__init__(name=name, **kwargs)
def build(self, input_shape):
super(ChanMaxRatio, self).build(input_shape)
def call(self, inputs, training=False):
# - Init stuff
input_shape = tf.shape(inputs)
# - Compute input data channel max, excluding NANs & zeros
cond= tf.logical_and(tf.math.is_finite(inputs), tf.math.not_equal(inputs, 0.))
data_max= tf.reduce_max(tf.where(~cond, tf.ones_like(inputs) * -1.e+99, inputs), axis=(1,2))
#data_max= tf.expand_dims(tf.expand_dims(data_max, axis=1),axis=1)
# - Compute absolute max across channels
data_absmax= tf.reduce_max(data_max, axis=1)
data_absmax= tf.expand_dims(data_absmax, axis=1)
# - Compute max ratios
xmax_ratio= data_max/data_absmax
return xmax_ratio
def compute_output_shape(self, input_shape):
return (input_shape[0], input_shape[-1])
###############################################
## ChanMeanRatio LAYER
###############################################
class ChanMeanRatio(layers.Layer):
""".
Input shape:
Arbitrary.
Output shape:
[nbatches, nchans]
"""
def __init__(self, name=None, **kwargs):
super(ChanMeanRatio, self).__init__(name=name, **kwargs)
def build(self, input_shape):
super(ChanMeanRatio, self).build(input_shape)
def call(self, inputs, training=False):
# - Init stuff
input_shape = tf.shape(inputs)
# - Compute input data channel max, excluding NANs & zeros
cond= tf.logical_and(tf.math.is_finite(inputs), tf.math.not_equal(inputs, 0.))
npix= tf.reduce_sum( tf.cast(cond, tf.float32), axis=(1,2) )
pix_sum= tf.reduce_sum(tf.where(~cond, tf.ones_like(inputs) * 0, inputs), axis=(1,2))
data_mean= tf.math.divide_no_nan(pix_sum, npix)
# - Compute max of means across channels
data_mean_max= tf.reduce_max(data_mean, axis=1)
data_mean_max= tf.expand_dims(data_mean_max, axis=1)
# - Compute mean ratios
data_mean_ratio= data_mean/data_mean_max
return data_mean_ratio
def compute_output_shape(self, input_shape):
return (input_shape[0], input_shape[-1])
###############################################
## ChanSumRatio LAYER
###############################################
class ChanSumRatio(layers.Layer):
""".
Input shape:
Arbitrary.
Output shape:
[nbatches, nchans]
"""
def __init__(self, name=None, **kwargs):
super(ChanSumRatio, self).__init__(name=name, **kwargs)
def build(self, input_shape):
super(ChanSumRatio, self).build(input_shape)
def call(self, inputs, training=False):
# - Init stuff
input_shape = tf.shape(inputs)
# - Compute input data channel max, excluding NANs & zeros
cond= tf.logical_and(tf.math.is_finite(inputs), tf.math.not_equal(inputs, 0.))
data_sum= tf.reduce_sum(tf.where(~cond, tf.ones_like(inputs) * 0, inputs), axis=(1,2))
# - Compute max of pixel sums across channels
data_sum_max= tf.reduce_max(data_sum, axis=1)
data_sum_max= tf.expand_dims(data_sum_max, axis=1)
# - Compute sum ratios
data_sum_ratio= data_sum/data_sum_max
return data_sum_ratio
def compute_output_shape(self, input_shape):
return (input_shape[0], input_shape[-1])
###############################################
## SoftmaxCosineSim LAYER
###############################################
# Taken from https://github.com/mwdhont/SimCLRv1-keras-tensorflow/blob/master/SoftmaxCosineSim.py
# ==============================================================================
# Code modified from NT-XENT-loss:
# https://github.com/google-research/simclr/blob/master/objective.py
# ==============================================================================
# coding=utf-8
# Copyright 2020 The SimCLR Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific simclr governing permissions and
# limitations under the License.
# ==============================================================================
class SoftmaxCosineSim(layers.Layer):
""" Custom Keras layer: takes all z-projections as input and calculates
output matrix which needs to match to [I|O|I|O], where
I = Unity matrix of size (batch_size x batch_size)
O = Zero matrix of size (batch_size x batch_size)
"""
def __init__(self, batch_size, feat_dim, **kwargs):
super(SoftmaxCosineSim, self).__init__()
self.batch_size = batch_size
self.feat_dim = feat_dim
self.units = (batch_size, 4 * feat_dim)
self.input_dim = [(None, feat_dim)] * (batch_size * 2)
self.temperature = 0.1
self.LARGE_NUM = 1e9
def get_config(self):
config = super().get_config().copy()
config.update(
{
"batch_size": self.batch_size,
"feat_dim": self.feat_dim,
"units": self.units,
"input_dim": self.input_dim,
"temperature": self.temperature,
"LARGE_NUM": self.LARGE_NUM,
}
)
return config
def call(self, inputs):
z1 = []
z2 = []
for index in range(self.batch_size):
# 0-index assumes that batch_size in generator is equal to 1
z1.append(
tf.math.l2_normalize(inputs[index][0], -1)
#tf.math.l2_normalize(inputs[index], -1)
)
z2.append(
tf.math.l2_normalize(inputs[self.batch_size + index][0], -1)
#tf.math.l2_normalize(inputs[self.batch_size + index], -1)
)
# Gather hidden1/hidden2 across replicas and create local labels.
z1_large = z1
z2_large = z2
masks = tf.one_hot(tf.range(self.batch_size), self.batch_size)
# Products of vectors of same side of network (z_i), count as negative examples
# Values on the diagonal are put equal to a very small value
# -> exclude product between 2 identical values, no added value
logits_aa = tf.matmul(z1, z1_large, transpose_b=True) / self.temperature
logits_aa = logits_aa - masks * self.LARGE_NUM
logits_bb = tf.matmul(z2, z2_large, transpose_b=True) / self.temperature
logits_bb = logits_bb - masks * self.LARGE_NUM
# Similarity between two transformation sides of the network (z_i and z_j)
# -> diagonal should be as close as possible to 1
logits_ab = tf.matmul(z1, z2_large, transpose_b=True) / self.temperature
logits_ba = tf.matmul(z2, z1_large, transpose_b=True) / self.temperature
part1 = softmax(tf.concat([logits_ab, logits_aa], 1))
part2 = softmax(tf.concat([logits_ba, logits_bb], 1))
output = tf.concat([part1, part2], 1)
return output
###############################################
## BYOL LOSS DEFINITION
###############################################
# - Taken from https://github.com/garder14/byol-tensorflow2/blob/main/losses.py
def byol_loss(p, z):
""" BYOL loss definition """
p = tf.math.l2_normalize(p, axis=1) # (2*bs, 128)
z = tf.math.l2_normalize(z, axis=1) # (2*bs, 128)
similarities = tf.reduce_sum(tf.multiply(p, z), axis=1)
return 2 - 2 * tf.reduce_mean(similarities)
###############################################
## SIMCLR LOSS DEFINITION
###############################################
# - Taken from https://github.com/garder14/simclr-tensorflow2/blob/main/losses.py
def nt_xent_loss(z, temperature):
""" SimCLR loss definition """
z = tf.math.l2_normalize(z, axis=1) # (2*bs, 128)
similarity_matrix = tf.matmul(z, z, transpose_b=True) # compute pairwise cosine similarities
similarity_matrix_edit = tf.exp(similarity_matrix / temperature) # divide by temperature and apply exp
ij_indices = tf.reshape(tf.range(z.shape[0]), shape=[-1, 2])
ji_indices = tf.reverse(ij_indices, axis=[1])
positive_indices = tf.reshape(tf.concat([ij_indices, ji_indices], axis=1), shape=[-1, 2]) # indices of positive pairs: [[0, 1], [1, 0], [2, 3], [3, 2], ...]
numerators = tf.gather_nd(similarity_matrix_edit, positive_indices)
negative_mask = 1 - tf.eye(z.shape[0]) # mask that discards self-similarities
denominators = tf.reduce_sum(tf.multiply(negative_mask, similarity_matrix_edit), axis=1)
losses = -tf.math.log(numerators/denominators)
return tf.reduce_mean(losses)
|
SKA-INAFREPO_NAMEsclassifierPATH_START.@sclassifier_extracted@sclassifier-master@sclassifier@tf_utils.py@.PATH_END.py
|
{
"filename": "omegaxyz.py",
"repo_name": "spacetelescope/drizzlepac",
"repo_path": "drizzlepac_extracted/drizzlepac-main/drizzlepac/devutils/comparison_tools/read_hla/omegaxyz.py",
"type": "Python"
}
|
"""Functions to get and apply HSC infinitesimal rotations to correct small shifts and rotations in spherical coordinates
Uses formulae from `Budavari & Lubow (2012, ApJ, 761, 188) <http://adsabs.harvard.edu/abs/2012ApJ...761..188B>`_
vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 ai :
2019 June 12, Rick White
"""
import math
import os
import pdb
import sys
import numpy as np
from astropy.io import fits
import requests
# cache of omega vectors that have already been retrieved
getomegaxyz_cache = {}
def getomegaxyz(dataset, service="https://hla.stsci.edu/cgi-bin/getomega.cgi"):
"""Return tuple (omegax, omegay, omegaz) with rotation vector for an image. (0, 0, 0) will be returned if the image
does not have a rotation correction.
Parameters
----------
dataset : str
dataset name
service : str, optional
URL of web service used to get omega values. Default value = https://hla.stsci.edu/cgi-bin/getomega.cgi
Returns
-------
omega : tuple
3-element tuple containing omega X, Y, and Z components. (0, 0, 0) if the image does not have a rotation
correction
"""
if not dataset:
return (0.0, 0.0, 0.0)
dataset = getdataset(dataset.lower())
if dataset in getomegaxyz_cache:
return getomegaxyz_cache[dataset]
r = requests.get(service, params=dict(image=dataset))
result = r.json()
omega = tuple(result.get("omega", (0.0, 0.0, 0.0)))
getomegaxyz_cache[dataset] = omega
return omega
getwcs_cache = {}
def getwcs(dataset, applyomega=True, service="https://hla.stsci.edu/cgi-bin/fitscut.cgi"):
"""Return dictionary with WCS information for an image.
Parameters
----------
dataset : str
dataset name
applyomega : bool, optional
Apply HSC correction? Default value = True
service : str, optional
web service URL. Default value = https://hla.stsci.edu/cgi-bin/fitscut.cgi
Returns
-------
updated WCS values
"""
if not dataset:
raise ValueError("Undefined dataset '{}'".format(dataset))
key = (dataset, bool(applyomega))
if key in getwcs_cache:
return getwcs_cache[key]
r = requests.get(service, params=dict(red=dataset, getwcs=1, applyomega=applyomega))
result = r.json()
getwcs_cache[key] = result
return result
def crossproduct(a, b):
"""Return cross product (a X b)
Parameters
----------
a : 3-element list of floats
first set of values in cross-product calculation
b : 3-element list of floats
second set of values in cross-product calculation
Returns
-------
c : 3-element list of floats
result of cross-product calculation
"""
c = [0]*3
c[0] = a[1]*b[2] - a[2]*b[1]
c[1] = a[2]*b[0] - a[0]*b[2]
c[2] = a[0]*b[1] - a[1]*b[0]
return c
def dotproduct(a, b):
"""Return a . b
Parameters
----------
a : 3-element list of floats
first set of values in dot-product calculation
b : 3-element list of floats
second set of values in dot-product calculation
Returns
-------
c : 3-element list of floats
result of dot-product calculation
"""
return a[0]*b[0]+a[1]*b[1]+a[2]*b[2]
def getdataset(fitsfile):
"""Extract visit identifier from FITS filename
Returns None if this does not look like an HLA dataset name
This works on either a plain dataset name (hst_10188_10_acs_wfc_f814) or a
getdata.cgi URL (http://hla.stsci.edu/getdata.cgi?dataset=hst_10888_10_acs_wfc_f814w)
Parameters
----------
fitsfile : str
fits file name
Returns
-------
vis_id : str
visit identifier value. Returns logical 'None' if this does not look like an HLA dataset name
"""
f = os.path.split(fitsfile)[-1].split('_')
if len(f) < 5 or f[0].lower() != 'hst':
# perhaps this is a getdata URL?
ff = fitsfile.split('dataset=')
f = os.path.split(ff[-1])[-1].split('_')
if len(f) < 5 or f[0].lower() != 'hst':
return None
if f[3].lower() == 'wfpc2':
# look for shift for either WFPC2 or PC using WFPC2 catalogs
f[4] = 'wfpc2'
vis_id = '_'.join(f[:5])
return vis_id
def applyomegawcs(filename, crval, cdmatrix, omega=None):
"""Get the omega values for this dataset and return the updated (crval, cdmatrix) values
Parameters
----------
filename : str
dataset filename
crval : list of floats
Right Ascension and Declination position at the reference pixel
cdmatrix : list of floats
Description of linear distortions: plate scale, rotation, and skew of the image
omega : tuple, optional. Default value = None
3-element tuple containing omega X, Y, and Z components.
Returns
-------
Updated versions of input variables crval and omega
"""
if (not crval) or (not cdmatrix):
return (crval, cdmatrix)
if not omega:
# parse the filename and extract the visit identification
dataset = getdataset(filename)
if not dataset:
return (crval, cdmatrix)
omega = getomegaxyz(dataset)
if omega[0] != 0 or omega[1] != 0 or omega[2] != 0:
d2r = math.pi/180
# apply rotation to the tangent point
ra0 = crval[0]*d2r
dec0 = crval[1]*d2r
cdec0 = math.cos(dec0)
p0 = [cdec0*math.cos(ra0), cdec0*math.sin(ra0), math.sin(dec0)]
dp0 = crossproduct(omega, p0)
decnew = math.asin(p0[2]+dp0[2])
ranew = math.atan2(p0[1]+dp0[1], p0[0]+dp0[0])
crval[0] = ranew/d2r
crval[1] = decnew/d2r
# compute angle of rotation
# the 2 terms are the rotation from omega and from the shift of the north
# vector at the new reference position
rot = math.atan(dotproduct(p0, omega)) + math.asin(math.sin(decnew)*math.sin(ra0-ranew))
cth = math.cos(rot)
sth = math.sin(rot)
cd = cdmatrix[:]
cdmatrix[0] = cth*cd[0] - sth*cd[2]
cdmatrix[1] = cth*cd[1] - sth*cd[3]
cdmatrix[2] = sth*cd[0] + cth*cd[2]
cdmatrix[3] = sth*cd[1] + cth*cd[3]
return (crval, cdmatrix)
def getdeltas(filename, crval, cdmatrix, omega=None):
"""Get the omega values for this dataset and return the shifts and rotation.
Similar to applyomegaxyz but returns deltas instead of offsets
Parameters
----------
filename : str
dataset filename
crval : list of floats
Right Ascension and Declination position at the reference pixel
cdmatrix : list of floats
Description of linear distortions: plate scale, rotation, and skew of the image
omega : tuple, optional. Default value = None
3-element tuple containing omega X, Y, and Z components.
Returns
-------
dra : float
delta ra in arcseconds
ddec : float
delta dec in arcseconds
rot : float
delta rotation in degrees
"""
dx = dy = rot = 0.0
if (not crval) or (not cdmatrix):
return (dx, dy, rot)
if not omega:
# parse the filename and extract the visit identification
dataset = getdataset(filename)
if not dataset:
return (dx, dy, rot)
omega = getomegaxyz(dataset)
if omega[0] != 0 or omega[1] != 0 or omega[2] != 0:
# apply rotation to the tangent point
ra0 = math.radians(crval[0])
dec0 = math.radians(crval[1])
cdec0 = math.cos(dec0)
p0 = [cdec0*math.cos(ra0), cdec0*math.sin(ra0), math.sin(dec0)]
dp0 = crossproduct(omega, p0)
decnew = math.asin(p0[2]+dp0[2])
ranew = math.atan2(p0[1]+dp0[1], p0[0]+dp0[0])
dx = math.degrees((ranew - ra0))
if dx > 180:
dx = dx-360
elif dx < -180:
dx = dx+360
dx = dx*math.cos(dec0) * 3600
dy = math.degrees(((decnew-dec0)*3600))
# compute angle of rotation
# the 2 terms are the rotation from omega and from the shift of the north
# vector at the new reference position
rot = math.degrees((math.atan(dotproduct(p0, omega)) + math.asin(math.sin(decnew)*math.sin(ra0-ranew))))
return (dx, dy, rot)
def updatefits(infile, outfile=None, dataset=None, omega=None, wcsname='HLA_HSC', verbose=False, overwrite=False):
"""Read input FITS file infile, update astrometry keyword, write outfile
Default is to assume infile gives the dataset name.
Parameters
----------
infile : str
fits image filename
outfile : str, optional
optional output filename. If outfile is omitted (or None), the data is not written but the function returns the
updated astropy.io.fits object. Default value = None
dataset : str, optional
dataset filename. If it is specified then it is used as the dataset name for the wcs query. Default value = None
omega : tuple, optional. Default value = None
3-element tuple containing omega X, Y, and Z components.
wcsname : str, optional
WCS name. Default value = 'HLA_HSC'
verbose : bool, optional
display extra information? Default value = False
overwrite : bool, optional
if outfile is specified and a file with the same name already exists, overwrite? Default value = False
Returns
-------
pout : astropy.io.fits object
Updated fits data object
"""
if not omega:
if not dataset:
# parse the filename and extract the visit identification
dataset = getdataset(infile)
if not dataset:
raise ValueError("Unable to determine dataset for file")
omega = getomegaxyz(dataset)
if verbose:
print("omega=", omega)
nonzero = omega[0] != 0 or omega[1] != 0 or omega[2] != 0
if verbose:
print("reading", infile)
pin = fits.open(infile)
if outfile:
if os.path.exists(outfile):
if overwrite:
if verbose:
print("Replacing existing file", outfile)
os.remove(outfile)
else:
raise ValueError("Output file {} exists; specify overwrite=True to replace it".format(outfile))
if verbose:
print("creating", outfile)
pout = fits.open(outfile, mode='append')
else:
if verbose:
print("creating HDUlist for output")
pout = fits.HDUList()
for i, hdu in enumerate(pin):
if nonzero:
# update WCS keywords if present
try:
# skip update if the wcsname indicates correction has already been made
oldwcsname = hdu.header.get("wcsname", "")
if not oldwcsname.upper().find(wcsname.upper()) >= 0:
crval = [hdu.header['crval1'], hdu.header['crval2']]
try:
cdmatrix = [hdu.header['cd1_1'], hdu.header['cd1_2'], hdu.header['cd2_1'], hdu.header['cd2_2']]
except KeyError:
# try computing CD matrix from CROTA2 and CDELT
# delete those keywords if present and switch to CD matrix
cdelt1 = hdu.header['cdelt1']
cdelt2 = hdu.header['cdelt2']
try:
crota2 = hdu.header['crota2'] * math.pi / 180
del hdu.header['crota2']
except KeyError:
crota2 = 0.0
sinrot = math.sin(crota2)
cosrot = math.cos(crota2)
cdmatrix = [cdelt1*cosrot, -cdelt2*sinrot, cdelt1*sinrot, cdelt2*cosrot]
del hdu.header['cdelt1']
del hdu.header['cdelt2']
crval, cdmatrix = applyomegawcs(None, crval, cdmatrix, omega=omega)
if crval[0] < 0:
crval[0] = crval[0] + 360
elif crval[0] > 360:
crval[0] = crval[0] - 360
hdu.header['crval1'] = crval[0]
hdu.header['crval2'] = crval[1]
hdu.header['cd1_1'] = cdmatrix[0]
hdu.header['cd1_2'] = cdmatrix[1]
hdu.header['cd2_1'] = cdmatrix[2]
hdu.header['cd2_2'] = cdmatrix[3]
hdu.header['wcsname'] = wcsname
if verbose:
print("updated WCS in extension {} ({})".format(i, hdu.header.get('extname', 'primary')))
else:
if verbose:
print("wcsname in extension {} = {}, no update".format(i, hdu.header['wcsname']))
except KeyError:
# OK, no WCS
if verbose:
print("no WCS found in extension {} ({})".format(i, hdu.header.get('extname', 'primary')))
pout.append(hdu)
if outfile:
pout.flush()
# control memory usage
hdu.data = None
pout[i].data = None
pin.close()
if outfile:
pout.close()
if verbose:
print("wrote", outfile)
else:
return pout
def applyomegacat(rain, decin, omega, radians=False):
"""Apply the 3-element infinitesimal rotation vector to a set of RA and Dec positions.
Usage: raout, decout = applyomegacat(rain, decin, omega)
Parameters
----------
rain : numpy.ndarray
Input right ascension positions in degrees (or radians)
decin : numpy.ndarray
Input declination positions in degrees (or radians)
omega : numpy.ndarray
3-element infinitesimal rotation vector
radians : bool, optional
If True, ra/dec values are in radians instead of degrees. Default value = False
Returns
-------
raout : float
RA output position in degrees (or radians if input argument radians = True)
decout : float
Dec output position in degrees (or radians if input argument radians = True)
"""
xyz = radec2xyz(rain, decin, radians=radians)
xyz += np.cross(omega, xyz)
raout, decout = xyz2radec(xyz, radians=radians)
return raout, decout
def radec2xyz(ra, dec, radians=False):
"""Convert RA, Dec to Cartesian (x, y, z) coordinates
Usage: xyz = radec2xyz(ra, dec)
Important Notes:
- inputs *ra* and *dec* must match in shape
Parameters
----------
ra : numpy.ndarray
Input right ascension positions in degrees
dec : numpy.ndarray
Input declination positions in degrees
radians : bool, optional
If True, ra/dec values are in radians instead of degrees. Default value = False
Returns
-------
c : numpy.ndarray
[\*,3] array with normalized cartesian coordinates
"""
ra = np.asarray(ra)
dec = np.asarray(dec)
s = ra.shape
if s != dec.shape:
raise ValueError("ra, dec must be same-shape arrays")
if not radians:
dtor = np.pi/180
ra = ra * dtor
dec = dec * dtor
c = np.empty(s + (3, ), dtype=float)
cdec = np.cos(dec)
c[:, 0] = np.cos(ra)*cdec
c[:, 1] = np.sin(ra)*cdec
c[:, 2] = np.sin(dec)
return c
def xyz2radec(xyz, radians=False):
"""
Convert Cartesian (x, y, z) coordinates to RA, Dec
Usage: ra, dec = xyz2radec(xyz)
Parameters
----------
xyz : numpy.ndarray
[\*,3] array with normalized cartesian coordinates. May be multi-dimensional but last dimension must be 3,
e.g., shape = (10,10,3).
radians : bool, optional
If True, ra/dec values are in radians instead of degrees. Default value = False
Returns
-------
ra : numpy.ndarray
right ascension values with normalized cartesian coordinates in degrees
dec : numpy.ndarray
declination values with normalized cartesian coordinates in degrees
"""
xyz = np.asarray(xyz)
s = xyz.shape
if s[-1] != 3:
raise ValueError('xyz last dimension must be 3')
# reshape to be a 2-D array [n,3]
n = xyz.size//3
c = np.reshape(xyz, (n, 3))
# normalize to unity (for safety)
norm = np.sqrt((c**2).sum(axis=-1))
c = c/norm[:, None]
dec = np.arcsin(c[:, 2])
ra = np.arctan2(c[:, 1], c[:, 0])
# force into range 0 to 2*pi
w = np.where(ra < 0)
ra[w] = ra[w] + 2*np.pi
if not radians:
# convert to degrees
radeg = 180/np.pi
ra = ra * radeg
dec = dec * radeg
# reshape using original dimensions of xyz
ra = ra.reshape(s[:-1])
dec = dec.reshape(s[:-1])
return (ra, dec)
if __name__ == "__main__":
dataset = 'HST_05397_2V_WFPC2_WFPC2'
v = getomegaxyz(dataset)
print(dataset, v)
dataset = 'hst_12109_17_wfc3_ir'
v = getomegaxyz(dataset)
print(dataset, v)
dataset = 'hst_10188_10_acs_wfc_f814w'
v = getomegaxyz(dataset)
print(dataset, v)
v = getwcs(dataset)
print(dataset, v)
v = getwcs(dataset, applyomega=False)
print(dataset, v)
# filename = '/ifs/public/hst/hla/acs/V10.0/10188/10188_10/hst_10188_10_acs_wfc_f814w_drz.fits'
# hdu = fits.open(filename)[1]
# crval = [hdu.header['crval1'], hdu.header['crval2']]
# cdmatrix = [hdu.header['cd1_1'], hdu.header['cd1_2'], hdu.header['cd2_1'], hdu.header['cd2_2']]
# v = getdeltas(dataset, crval, cdmatrix)
# print(dataset, v)
|
spacetelescopeREPO_NAMEdrizzlepacPATH_START.@drizzlepac_extracted@drizzlepac-main@drizzlepac@devutils@comparison_tools@read_hla@omegaxyz.py@.PATH_END.py
|
{
"filename": "probes.py",
"repo_name": "DifferentiableUniverseInitiative/jax_cosmo",
"repo_path": "jax_cosmo_extracted/jax_cosmo-master/jax_cosmo/probes.py",
"type": "Python"
}
|
# This module defines kernel functions for various tracers
import jax.numpy as np
from jax import jit
from jax import vmap
from jax.tree_util import register_pytree_node_class
import jax_cosmo.background as bkgrd
import jax_cosmo.constants as const
import jax_cosmo.redshift as rds
from jax_cosmo.jax_utils import container
from jax_cosmo.scipy.integrate import simps
from jax_cosmo.utils import a2z
from jax_cosmo.utils import z2a
__all__ = ["WeakLensing", "NumberCounts"]
@jit
def weak_lensing_kernel(cosmo, pzs, z, ell):
"""
Returns a weak lensing kernel
Note: this function handles differently nzs that correspond to extended redshift
distribution, and delta functions.
"""
z = np.atleast_1d(z)
zmax = max([pz.zmax for pz in pzs])
# Retrieve comoving distance corresponding to z
chi = bkgrd.radial_comoving_distance(cosmo, z2a(z))
# Extract the indices of pzs that can be treated as extended distributions,
# and the ones that need to be treated as delta functions.
pzs_extended_idx = [
i for i, pz in enumerate(pzs) if not isinstance(pz, rds.delta_nz)
]
pzs_delta_idx = [i for i, pz in enumerate(pzs) if isinstance(pz, rds.delta_nz)]
# Here we define a permutation that would put all extended pzs at the begining of the list
perm = pzs_extended_idx + pzs_delta_idx
# Compute inverse permutation
inv = np.argsort(np.array(perm, dtype=np.int32))
# Process extended distributions, if any
radial_kernels = []
if len(pzs_extended_idx) > 0:
@vmap
def integrand(z_prime):
chi_prime = bkgrd.radial_comoving_distance(cosmo, z2a(z_prime))
# Stack the dndz of all redshift bins
dndz = np.stack([pzs[i](z_prime) for i in pzs_extended_idx], axis=0)
return dndz * np.clip(chi_prime - chi, 0) / np.clip(chi_prime, 1.0)
radial_kernels.append(simps(integrand, z, zmax, 256) * (1.0 + z) * chi)
# Process single plane redshifts if any
if len(pzs_delta_idx) > 0:
@vmap
def integrand_single(z_prime):
chi_prime = bkgrd.radial_comoving_distance(cosmo, z2a(z_prime))
return np.clip(chi_prime - chi, 0) / np.clip(chi_prime, 1.0)
radial_kernels.append(
integrand_single(np.array([pzs[i].params[0] for i in pzs_delta_idx]))
* (1.0 + z)
* chi
)
# Fusing the results together
radial_kernel = np.concatenate(radial_kernels, axis=0)
# And perfoming inverse permutation to put all the indices where they should be
radial_kernel = radial_kernel[inv]
# Constant term
constant_factor = 3.0 * const.H0**2 * cosmo.Omega_m / 2.0 / const.c
# Ell dependent factor
ell_factor = np.sqrt((ell - 1) * (ell) * (ell + 1) * (ell + 2)) / (ell + 0.5) ** 2
return constant_factor * ell_factor * radial_kernel
@jit
def density_kernel(cosmo, pzs, bias, z, ell):
"""
Computes the number counts density kernel
"""
if any(isinstance(pz, rds.delta_nz) for pz in pzs):
raise NotImplementedError(
"Density kernel not properly implemented for delta redshift distributions"
)
# stack the dndz of all redshift bins
dndz = np.stack([pz(z) for pz in pzs], axis=0)
# Compute radial NLA kernel: same as clustering
if isinstance(bias, list):
# This is to handle the case where we get a bin-dependent bias
b = np.stack([b(cosmo, z) for b in bias], axis=0)
else:
b = bias(cosmo, z)
radial_kernel = dndz * b * bkgrd.H(cosmo, z2a(z))
# Normalization,
constant_factor = 1.0
# Ell dependent factor
ell_factor = 1.0
return constant_factor * ell_factor * radial_kernel
@jit
def nla_kernel(cosmo, pzs, bias, z, ell):
"""
Computes the NLA IA kernel
"""
if any(isinstance(pz, rds.delta_nz) for pz in pzs):
raise NotImplementedError(
"NLA kernel not properly implemented for delta redshift distributions"
)
# stack the dndz of all redshift bins
dndz = np.stack([pz(z) for pz in pzs], axis=0)
# Compute radial NLA kernel: same as clustering
if isinstance(bias, list):
# This is to handle the case where we get a bin-dependent bias
b = np.stack([b(cosmo, z) for b in bias], axis=0)
else:
b = bias(cosmo, z)
radial_kernel = dndz * b * bkgrd.H(cosmo, z2a(z))
# Apply common A_IA normalization to the kernel
# Joachimi et al. (2011), arXiv: 1008.3491, Eq. 6.
radial_kernel *= (
-(5e-14 * const.rhocrit) * cosmo.Omega_m / bkgrd.growth_factor(cosmo, z2a(z))
)
# Constant factor
constant_factor = 1.0
# Ell dependent factor
ell_factor = np.sqrt((ell - 1) * (ell) * (ell + 1) * (ell + 2)) / (ell + 0.5) ** 2
return constant_factor * ell_factor * radial_kernel
@register_pytree_node_class
class WeakLensing(container):
"""
Class representing a weak lensing probe, with a bunch of bins
Parameters:
-----------
redshift_bins: list of nzredshift distributions
ia_bias: (optional) if provided, IA will be added with the NLA model,
either a single bias object or a list of same size as nzs
multiplicative_bias: (optional) adds an (1+m) multiplicative bias, either single
value or list of same length as redshift bins
Configuration:
--------------
sigma_e: intrinsic galaxy ellipticity
"""
def __init__(
self,
redshift_bins,
ia_bias=None,
multiplicative_bias=0.0,
sigma_e=0.26,
**kwargs
):
# Depending on the Configuration we will trace or not the ia_bias in the
# container
if ia_bias is None:
ia_enabled = False
args = (redshift_bins, multiplicative_bias)
else:
ia_enabled = True
args = (redshift_bins, multiplicative_bias, ia_bias)
if "ia_enabled" not in kwargs.keys():
kwargs["ia_enabled"] = ia_enabled
super(WeakLensing, self).__init__(*args, sigma_e=sigma_e, **kwargs)
@property
def n_tracers(self):
"""
Returns the number of tracers for this probe, i.e. redshift bins
"""
# Extract parameters
pzs = self.params[0]
return len(pzs)
@property
def zmax(self):
"""
Returns the maximum redsfhit probed by this probe
"""
# Extract parameters
pzs = self.params[0]
return max([pz.zmax for pz in pzs])
def kernel(self, cosmo, z, ell):
"""
Compute the radial kernel for all nz bins in this probe.
Returns:
--------
radial_kernel: shape (nbins, nz)
"""
z = np.atleast_1d(z)
# Extract parameters
pzs, m = self.params[:2]
kernel = weak_lensing_kernel(cosmo, pzs, z, ell)
# If IA is enabled, we add the IA kernel
if self.config["ia_enabled"]:
bias = self.params[2]
kernel += nla_kernel(cosmo, pzs, bias, z, ell)
# Applies measurement systematics
if isinstance(m, list):
m = np.expand_dims(np.stack([mi for mi in m], axis=0), 1)
kernel *= 1.0 + m
return kernel
def noise(self):
"""
Returns the noise power for all redshifts
return: shape [nbins]
"""
# Extract parameters
pzs = self.params[0]
# retrieve number of galaxies in each bins
ngals = np.array([pz.gals_per_steradian for pz in pzs])
if isinstance(self.config["sigma_e"], list):
sigma_e = np.array([s for s in self.config["sigma_e"]])
else:
sigma_e = self.config["sigma_e"]
return sigma_e**2 / ngals
@register_pytree_node_class
class NumberCounts(container):
"""Class representing a galaxy clustering probe, with a bunch of bins
Parameters:
-----------
redshift_bins: nzredshift distributions
Configuration:
--------------
has_rsd....
"""
def __init__(self, redshift_bins, bias, has_rsd=False, **kwargs):
super(NumberCounts, self).__init__(
redshift_bins, bias, has_rsd=has_rsd, **kwargs
)
@property
def zmax(self):
"""
Returns the maximum redsfhit probed by this probe
"""
# Extract parameters
pzs = self.params[0]
return max([pz.zmax for pz in pzs])
@property
def n_tracers(self):
"""Returns the number of tracers for this probe, i.e. redshift bins"""
# Extract parameters
pzs = self.params[0]
return len(pzs)
def kernel(self, cosmo, z, ell):
"""Compute the radial kernel for all nz bins in this probe.
Returns:
--------
radial_kernel: shape (nbins, nz)
"""
z = np.atleast_1d(z)
# Extract parameters
pzs, bias = self.params
# Retrieve density kernel
kernel = density_kernel(cosmo, pzs, bias, z, ell)
return kernel
def noise(self):
"""Returns the noise power for all redshifts
return: shape [nbins]
"""
# Extract parameters
pzs = self.params[0]
ngals = np.array([pz.gals_per_steradian for pz in pzs])
return 1.0 / ngals
|
DifferentiableUniverseInitiativeREPO_NAMEjax_cosmoPATH_START.@jax_cosmo_extracted@jax_cosmo-master@jax_cosmo@probes.py@.PATH_END.py
|
{
"filename": "util.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/Pygments/py2/pygments/util.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
"""
pygments.util
~~~~~~~~~~~~~
Utility functions.
:copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import sys
split_path_re = re.compile(r'[/\\ ]')
doctype_lookup_re = re.compile(r'''
(<\?.*?\?>)?\s*
<!DOCTYPE\s+(
[a-zA-Z_][a-zA-Z0-9]*
(?: \s+ # optional in HTML5
[a-zA-Z_][a-zA-Z0-9]*\s+
"[^"]*")?
)
[^>]*>
''', re.DOTALL | re.MULTILINE | re.VERBOSE)
tag_re = re.compile(r'<(.+?)(\s.*?)?>.*?</.+?>',
re.UNICODE | re.IGNORECASE | re.DOTALL | re.MULTILINE)
xml_decl_re = re.compile(r'\s*<\?xml[^>]*\?>', re.I)
class ClassNotFound(ValueError):
"""Raised if one of the lookup functions didn't find a matching class."""
class OptionError(Exception):
pass
def get_choice_opt(options, optname, allowed, default=None, normcase=False):
string = options.get(optname, default)
if normcase:
string = string.lower()
if string not in allowed:
raise OptionError('Value for option %s must be one of %s' %
(optname, ', '.join(map(str, allowed))))
return string
def get_bool_opt(options, optname, default=None):
string = options.get(optname, default)
if isinstance(string, bool):
return string
elif isinstance(string, int):
return bool(string)
elif not isinstance(string, string_types):
raise OptionError('Invalid type %r for option %s; use '
'1/0, yes/no, true/false, on/off' % (
string, optname))
elif string.lower() in ('1', 'yes', 'true', 'on'):
return True
elif string.lower() in ('0', 'no', 'false', 'off'):
return False
else:
raise OptionError('Invalid value %r for option %s; use '
'1/0, yes/no, true/false, on/off' % (
string, optname))
def get_int_opt(options, optname, default=None):
string = options.get(optname, default)
try:
return int(string)
except TypeError:
raise OptionError('Invalid type %r for option %s; you '
'must give an integer value' % (
string, optname))
except ValueError:
raise OptionError('Invalid value %r for option %s; you '
'must give an integer value' % (
string, optname))
def get_list_opt(options, optname, default=None):
val = options.get(optname, default)
if isinstance(val, string_types):
return val.split()
elif isinstance(val, (list, tuple)):
return list(val)
else:
raise OptionError('Invalid type %r for option %s; you '
'must give a list value' % (
val, optname))
def docstring_headline(obj):
if not obj.__doc__:
return ''
res = []
for line in obj.__doc__.strip().splitlines():
if line.strip():
res.append(" " + line.strip())
else:
break
return ''.join(res).lstrip()
def make_analysator(f):
"""Return a static text analyser function that returns float values."""
def text_analyse(text):
try:
rv = f(text)
except Exception:
return 0.0
if not rv:
return 0.0
try:
return min(1.0, max(0.0, float(rv)))
except (ValueError, TypeError):
return 0.0
text_analyse.__doc__ = f.__doc__
return staticmethod(text_analyse)
def shebang_matches(text, regex):
r"""Check if the given regular expression matches the last part of the
shebang if one exists.
>>> from pygments.util import shebang_matches
>>> shebang_matches('#!/usr/bin/env python', r'python(2\.\d)?')
True
>>> shebang_matches('#!/usr/bin/python2.4', r'python(2\.\d)?')
True
>>> shebang_matches('#!/usr/bin/python-ruby', r'python(2\.\d)?')
False
>>> shebang_matches('#!/usr/bin/python/ruby', r'python(2\.\d)?')
False
>>> shebang_matches('#!/usr/bin/startsomethingwith python',
... r'python(2\.\d)?')
True
It also checks for common windows executable file extensions::
>>> shebang_matches('#!C:\\Python2.4\\Python.exe', r'python(2\.\d)?')
True
Parameters (``'-f'`` or ``'--foo'`` are ignored so ``'perl'`` does
the same as ``'perl -e'``)
Note that this method automatically searches the whole string (eg:
the regular expression is wrapped in ``'^$'``)
"""
index = text.find('\n')
if index >= 0:
first_line = text[:index].lower()
else:
first_line = text.lower()
if first_line.startswith('#!'):
try:
found = [x for x in split_path_re.split(first_line[2:].strip())
if x and not x.startswith('-')][-1]
except IndexError:
return False
regex = re.compile(r'^%s(\.(exe|cmd|bat|bin))?$' % regex, re.IGNORECASE)
if regex.search(found) is not None:
return True
return False
def doctype_matches(text, regex):
"""Check if the doctype matches a regular expression (if present).
Note that this method only checks the first part of a DOCTYPE.
eg: 'html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"'
"""
m = doctype_lookup_re.match(text)
if m is None:
return False
doctype = m.group(2)
return re.compile(regex, re.I).match(doctype.strip()) is not None
def html_doctype_matches(text):
"""Check if the file looks like it has a html doctype."""
return doctype_matches(text, r'html')
_looks_like_xml_cache = {}
def looks_like_xml(text):
"""Check if a doctype exists or if we have some tags."""
if xml_decl_re.match(text):
return True
key = hash(text)
try:
return _looks_like_xml_cache[key]
except KeyError:
m = doctype_lookup_re.match(text)
if m is not None:
return True
rv = tag_re.search(text[:1000]) is not None
_looks_like_xml_cache[key] = rv
return rv
# Python narrow build compatibility
def _surrogatepair(c):
# Given a unicode character code
# with length greater than 16 bits,
# return the two 16 bit surrogate pair.
# From example D28 of:
# http://www.unicode.org/book/ch03.pdf
return (0xd7c0 + (c >> 10), (0xdc00 + (c & 0x3ff)))
def unirange(a, b):
"""Returns a regular expression string to match the given non-BMP range."""
if b < a:
raise ValueError("Bad character range")
if a < 0x10000 or b < 0x10000:
raise ValueError("unirange is only defined for non-BMP ranges")
if sys.maxunicode > 0xffff:
# wide build
return u'[%s-%s]' % (unichr(a), unichr(b))
else:
# narrow build stores surrogates, and the 're' module handles them
# (incorrectly) as characters. Since there is still ordering among
# these characters, expand the range to one that it understands. Some
# background in http://bugs.python.org/issue3665 and
# http://bugs.python.org/issue12749
#
# Additionally, the lower constants are using unichr rather than
# literals because jython [which uses the wide path] can't load this
# file if they are literals.
ah, al = _surrogatepair(a)
bh, bl = _surrogatepair(b)
if ah == bh:
return u'(?:%s[%s-%s])' % (unichr(ah), unichr(al), unichr(bl))
else:
buf = []
buf.append(u'%s[%s-%s]' %
(unichr(ah), unichr(al),
ah == bh and unichr(bl) or unichr(0xdfff)))
if ah - bh > 1:
buf.append(u'[%s-%s][%s-%s]' %
unichr(ah+1), unichr(bh-1), unichr(0xdc00), unichr(0xdfff))
if ah != bh:
buf.append(u'%s[%s-%s]' %
(unichr(bh), unichr(0xdc00), unichr(bl)))
return u'(?:' + u'|'.join(buf) + u')'
def format_lines(var_name, seq, raw=False, indent_level=0):
"""Formats a sequence of strings for output."""
lines = []
base_indent = ' ' * indent_level * 4
inner_indent = ' ' * (indent_level + 1) * 4
lines.append(base_indent + var_name + ' = (')
if raw:
# These should be preformatted reprs of, say, tuples.
for i in seq:
lines.append(inner_indent + i + ',')
else:
for i in seq:
# Force use of single quotes
r = repr(i + '"')
lines.append(inner_indent + r[:-2] + r[-1] + ',')
lines.append(base_indent + ')')
return '\n'.join(lines)
def duplicates_removed(it, already_seen=()):
"""
Returns a list with duplicates removed from the iterable `it`.
Order is preserved.
"""
lst = []
seen = set()
for i in it:
if i in seen or i in already_seen:
continue
lst.append(i)
seen.add(i)
return lst
class Future(object):
"""Generic class to defer some work.
Handled specially in RegexLexerMeta, to support regex string construction at
first use.
"""
def get(self):
raise NotImplementedError
def guess_decode(text):
"""Decode *text* with guessed encoding.
First try UTF-8; this should fail for non-UTF-8 encodings.
Then try the preferred locale encoding.
Fall back to latin-1, which always works.
"""
try:
text = text.decode('utf-8')
return text, 'utf-8'
except UnicodeDecodeError:
try:
import locale
prefencoding = locale.getpreferredencoding()
text = text.decode()
return text, prefencoding
except (UnicodeDecodeError, LookupError):
text = text.decode('latin1')
return text, 'latin1'
def guess_decode_from_terminal(text, term):
"""Decode *text* coming from terminal *term*.
First try the terminal encoding, if given.
Then try UTF-8. Then try the preferred locale encoding.
Fall back to latin-1, which always works.
"""
if getattr(term, 'encoding', None):
try:
text = text.decode(term.encoding)
except UnicodeDecodeError:
pass
else:
return text, term.encoding
return guess_decode(text)
def terminal_encoding(term):
"""Return our best guess of encoding for the given *term*."""
if getattr(term, 'encoding', None):
return term.encoding
import locale
return locale.getpreferredencoding()
# Python 2/3 compatibility
if sys.version_info < (3, 0):
unichr = unichr
xrange = xrange
string_types = (str, unicode)
text_type = unicode
u_prefix = 'u'
iteritems = dict.iteritems
itervalues = dict.itervalues
import StringIO
import cStringIO
# unfortunately, io.StringIO in Python 2 doesn't accept str at all
StringIO = StringIO.StringIO
BytesIO = cStringIO.StringIO
else:
unichr = chr
xrange = range
string_types = (str,)
text_type = str
u_prefix = ''
iteritems = dict.items
itervalues = dict.values
from io import StringIO, BytesIO, TextIOWrapper
class UnclosingTextIOWrapper(TextIOWrapper):
# Don't close underlying buffer on destruction.
def close(self):
self.flush()
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
for slots_var in orig_vars.get('__slots__', ()):
orig_vars.pop(slots_var)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@Pygments@py2@pygments@util.py@.PATH_END.py
|
{
"filename": "field.py",
"repo_name": "DedalusProject/dedalus",
"repo_path": "dedalus_extracted/dedalus-master/dedalus/core/field.py",
"type": "Python"
}
|
"""
Class for data fields.
"""
import weakref
from functools import partial, reduce
from collections import defaultdict
import numpy as np
from mpi4py import MPI
from scipy import sparse
from scipy.sparse import linalg as splinalg
from numbers import Number
import h5py
from math import prod
from ..libraries.fftw import fftw_wrappers as fftw
from ..tools.array import copyto
from ..tools.config import config
from ..tools.cache import CachedMethod, CachedAttribute
from ..tools.exceptions import UndefinedParityError
from ..tools.exceptions import SymbolicParsingError
from ..tools.exceptions import NonlinearOperatorError
from ..tools.exceptions import DependentOperatorError
from ..tools.general import unify, unify_attributes, DeferredTuple, OrderedSet
from ..tools.random_arrays import ChunkedRandomArray
import logging
logger = logging.getLogger(__name__.split('.')[-1])
# Public interface
__all__ = ['Field',
'ScalarField',
'VectorField',
'TensorField',
'LockedField']
class Operand:
"""Base class for operand classes."""
__array_priority__ = 100.
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
from .operators import UnaryGridFunction
if method != "__call__":
return NotImplemented
if kw:
return NotImplemented
# Dispatch unary ufuncs to ufunc operator
if len(inputs) == 1:
return UnaryGridFunction(ufunc, inputs[0])
# Dispatch binary ufuncs to arithmetic operators, triggered by arithmetic with numpy scalars
elif len(inputs) == 2:
from . import arithmetic
from . import operators
if ufunc is np.add:
return arithmetic.Add(*inputs)
elif ufunc is np.subtract:
return arithmetic.Add(inputs[0], (-1)*inputs[1])
elif ufunc is np.multiply:
return arithmetic.Multiply(*inputs)
elif ufunc is np.divide:
return arithmetic.Multiply(inputs[0], inputs[1]**(-1))
elif ufunc is np.power:
return operators.Power(*inputs)
else:
return NotImplemented
else:
return NotImplemented
def __call__(self, *args, **kw):
"""Interpolate field."""
from .operators import interpolate
return interpolate(self, *args, **kw)
def __abs__(self):
# Call: abs(self)
from .operators import UnaryGridFunction
return UnaryGridFunction(np.absolute, self)
def __neg__(self):
# Call: -self
return ((-1) * self)
def __add__(self, other):
# Call: self + other
from .arithmetic import Add
return Add(self, other)
def __radd__(self, other):
# Call: other + self
from .arithmetic import Add
return Add(other, self)
def __sub__(self, other):
# Call: self - other
return (self + (-other))
def __rsub__(self, other):
# Call: other - self
return (other + (-self))
def __mul__(self, other):
# Call: self * other
from .arithmetic import Multiply
return Multiply(self, other)
def __rmul__(self, other):
# Call: other * self
from .arithmetic import Multiply
return Multiply(other, self)
def __matmul__(self, other):
# Call: self @ other
from .arithmetic import DotProduct
return DotProduct(self, other)
def __rmathmul__(self, other):
# Call: other @ self
from .arithmetic import DotProduct
return DotProduct(other, self)
def __truediv__(self, other):
# Call: self / other
return (self * other**(-1))
def __rtruediv__(self, other):
# Call: other / self
return (other * self**(-1))
def __pow__(self, other):
# Call: self ** other
if other == 0:
return 1
if other == 1:
return self
from .operators import Power
return Power(self, other)
def __rpow__(self, other):
# Call: other ** self
from .operators import Power
return Power(other, self)
@staticmethod
def cast(arg, dist, tensorsig, dtype):
# Check distributor for operands
if isinstance(arg, Operand):
if arg.domain.dist is not dist:
raise ValueError("Mismatching distributor.")
elif arg.tensorsig != tensorsig:
raise ValueError("Mismatching tensorsig.")
elif arg.dtype != dtype:
raise ValueError("Mismatching dtype.")
else:
return arg
# Cast numbers to constant fields
elif isinstance(arg, Number):
out = Field(dist=dist, tensorsig=tensorsig, dtype=dtype)
out['g'] = arg # Set in grid space arbitrarily
out.name = str(arg)
return out
else:
raise NotImplementedError("Cannot cast type %s" %type(arg))
# def get_basis(self, coord):
# return self.domain.get_basis(coord)
# space = self.domain.get_basis(coord)
# if self.domain.spaces[space.axis] in [space, None]:
# return self.bases[space.axis]
# else:
# raise ValueError()
# x = Operand.raw_cast(x)
# if domain:
# # Replace empty domains
# if hasattr(x, 'domain'):
# if x.domain != domain:
# raise ValueError("Cannot cast operand to different domain.")
# return x
# @staticmethod
# def raw_cast(x):
# if isinstance(x, Operand):
# return x
# elif isinstance(x, str):
# raise ValueError("Cannot directly cast string expressions, only fields/operators/scalars.")
# elif np.isscalar(x):
# return Scalar(value=x)
# else:
# raise ValueError("Cannot cast type: {}".format(type(x)))
def atoms(self, *types):
"""Gather all leaf-operands by type."""
raise NotImplementedError()
def has(self, *vars):
"""Determine if tree contains any specified operands/operators."""
raise NotImplementedError()
def split(self, *vars):
"""Split into expressions containing and not containing specified operands/operators."""
raise NotImplementedError()
def replace(self, old, new):
"""Replace specified operand/operator."""
raise NotImplementedError()
def sym_diff(self, var):
"""Symbolically differentiate with respect to specified operand."""
raise NotImplementedError()
def expand(self, *vars):
"""Expand expression over specified variables."""
raise NotImplementedError()
# def simplify(self, *vars):
# """Simplify expression, except subtrees containing specified variables."""
# raise NotImplementedError()
def require_linearity(self, *vars, allow_affine=False, self_name=None, vars_name=None, error=AssertionError):
"""Require expression to be linear in specified operands/operators."""
raise NotImplementedError("Subclasses must implement.")
def require_first_order(self, *ops, self_name=None, ops_name=None, error=AssertionError):
"""Require expression to be maximally first order in specified operators."""
raise NotImplementedError("Subclasses must implement.")
def require_independent(self, *vars, self_name=None, vars_name=None, error=AssertionError):
"""Require expression to be independent of specified operands/operators."""
if self.has(*vars):
if self_name is None:
self_name = str(self)
if vars_name is None:
vars_name = [str(var) for var in vars]
raise error(f"{self_name} must be independent of {vars_name}.")
def separability(self, *vars):
"""Determine separable dimensions of expression as a linear operator on specified variables."""
raise NotImplementedError("%s has not implemented a separability method." %type(self))
# def operator_order(self, operator):
# """Determine maximum application order of an operator in the expression."""
# raise NotImplementedError()
def build_ncc_matrices(self, separability, vars, **kw):
"""Precompute non-constant coefficients and build multiplication matrices."""
raise NotImplementedError()
def expression_matrices(self, subproblem, vars, **kw):
"""Build expression matrices for a specific subproblem and variables."""
raise NotImplementedError()
def frechet_differential(self, variables, perturbations, backgrounds=None):
"""
Compute Frechet differential with respect to specified variables/perturbations.
Parameters
----------
variables : list of Field objects
Variables to differentiate around.
perturbations : list of Field objects
Perturbation directions for each variable.
backgrounds : list of Field objects, optional
Backgrounds for each variable. Default: variables.
Notes
-----
This method symbolically computes the functional directional derivative around the
specified backgrounds in the direction of the specified perturbations:
F'(X0).X1 = lim_{ε -> 0} d/dε F(X0 + ε*X1)
The result is a linear operator acting on the perturbations with NCCs that
depend on the backgrounds.
"""
dist = self.dist
tensorsig = self.tensorsig
dtype = self.dtype
# Compute differential
epsilon = Field(dist=dist, dtype=dtype)
# d/dε F(X0 + ε*X1)
diff = self
for var, pert in zip(variables, perturbations):
diff = diff.replace(var, var + epsilon*pert)
diff = diff.sym_diff(epsilon)
# ε -> 0
if diff:
diff = Operand.cast(diff, self.dist, tensorsig=tensorsig, dtype=dtype)
diff = diff.replace(epsilon, 0)
# Replace variables with backgrounds, if specified
if diff:
if backgrounds:
for var, bg in zip(variables, backgrounds):
diff = diff.replace(var, bg)
return diff
@property
def T(self):
from .operators import TransposeComponents
return TransposeComponents(self)
@property
def H(self):
from .operators import TransposeComponents
return TransposeComponents(np.conj(self))
@CachedAttribute
def is_complex(self):
from ..tools.general import is_complex_dtype
return is_complex_dtype(self.dtype)
@CachedAttribute
def is_real(self):
from ..tools.general import is_real_dtype
return is_real_dtype(self.dtype)
@CachedAttribute
def valid_modes(self):
# Get general coeff valid modes
valid_modes = self.dist.coeff_layout.valid_elements(self.tensorsig, self.domain, scales=1)
# Return copy to avoid mangling cached result from coeff_layout
return valid_modes.copy()
class Current(Operand):
def __repr__(self):
return '<{} {}>'.format(self.__class__.__name__, id(self))
def __str__(self):
if self.name:
return self.name
else:
return self.__repr__()
def atoms(self, *types):
"""Gather all leaf-operands of specified types."""
atoms = OrderedSet()
if (not types) or isinstance(self, types):
atoms.add(self)
return atoms
def has(self, *vars):
"""Determine if tree contains any specified operands/operators."""
# Check for empty set or matching operand
return (not vars) or (self in vars)
def split(self, *vars):
"""Split into expressions containing and not containing specified operands/operators."""
if self in vars:
return (self, 0)
else:
return (0, self)
def replace(self, old, new):
"""Replace specified operand/operator."""
if self == old:
return new
else:
return self
def sym_diff(self, var):
"""Symbolically differentiate with respect to specified operand."""
if self == var:
return 1
else:
return 0
def expand(self, *vars):
"""Expand expression over specified variables."""
return self
def prep_nccs(self, vars):
if self not in vars:
raise ValueError("This should never happen.")
def gather_ncc_coeffs(self):
pass
def attempt(self, id=None):
"""Recursively attempt to evaluate operation."""
return self
# def simplify(self, *vars):
# """Simplify expression, except subtrees containing specified variables."""
# return self
def require_linearity(self, *vars, allow_affine=False, self_name=None, vars_name=None, error=AssertionError):
"""Require expression to be linear in specified variables."""
if (not allow_affine) and (self not in vars):
if self_name is None:
self_name = str(self)
if vars_name is None:
vars_name = [str(var) for var in vars]
raise error(f"{self_name} must be strictly linear in {vars_name}.")
def require_first_order(self, *args, **kw):
"""Require expression to be maximally first order in specified operators."""
pass
# def separability(self, *vars):
# """Determine separable dimensions of expression as a linear operator on specified variables."""
# self.require_linearity(*vars)
# return np.array([True for basis in self.domain.bases])
def matrix_dependence(self, *vars):
self.require_linearity(*vars)
return np.array([False for axis in range(self.domain.dist.dim)])
def matrix_coupling(self, *vars):
self.require_linearity(*vars)
return np.array([False for axis in range(self.domain.dist.dim)])
# def operator_order(self, operator):
# """Determine maximum application order of an operator in the expression."""
# return 0
def build_ncc_matrices(self, separability, vars, **kw):
"""Precompute non-constant coefficients and build multiplication matrices."""
self.require_linearity(*vars)
def expression_matrices(self, subproblem, vars, **kw):
"""Build expression matrices for a specific subproblem and variables."""
self.require_linearity(*vars)
# Build identity matrices over subproblem data
# group_shape = subproblem.group_shape(self.domain)
# factors = (sparse.identity(n, format='csr') for n in group_shape)
# matrix = reduce(sparse.kron, factors, 1).tocsr()
#size = self.domain.bases[0].field_radial_size(self, subproblem.ell)
size = subproblem.field_size(self)
matrix = sparse.identity(size, format='csr')
return {self: matrix}
# def setup_operator_matrix(self, separability, vars, **kw):
# """Setup operator matrix components."""
# self.require_linearity(*vars)
# # axmats = []
# # for seperable, basis in zip(separability, self.bases):
# # # Size 1 for constant dimensions
# # if basis is None:
# # axmats.append(sparse.identity(1).tocsr())
# # # Group size for separable dimensions
# # elif separable:
# # axmats.append(sparse.identity(basis.space.group_size).tocsr())
# # # Coeff size for coupled dimensions
# # else:
# # axmats.append(sparse.identity(basis.space.coeff_size).tocsr())
# # # Store Kronecker product
# # self.operator_matrix = reduce(sparse.kron, axmats, 1).tocsr()
def evaluate(self):
return self
def reinitialize(self, **kw):
return self
@staticmethod
def _create_buffer(buffer_size):
"""Create buffer for Field data."""
if buffer_size == 0:
# FFTW doesn't like allocating size-0 arrays
return np.zeros((0,), dtype=np.float64)
else:
# Use FFTW SIMD aligned allocation
alloc_doubles = buffer_size // 8
return fftw.create_buffer(alloc_doubles)
@CachedAttribute
def _dealias_buffer_size(self):
return self.dist.buffer_size(self.domain, self.domain.dealias, dtype=self.dtype)
@CachedAttribute
def _dealias_buffer(self):
"""Build and cache buffer large enough for dealias-scale data."""
buffer_size = self._dealias_buffer_size
ncomp = prod([vs.dim for vs in self.tensorsig])
return self._create_buffer(ncomp * buffer_size)
def preset_scales(self, scales):
"""Set new transform scales."""
new_scales = self.dist.remedy_scales(scales)
old_scales = self.scales
# Return if scales are unchanged
if new_scales == old_scales:
return
# Get required buffer size
buffer_size = self.dist.buffer_size(self.domain, new_scales, dtype=self.dtype)
# Use dealias buffer if possible
if buffer_size <= self._dealias_buffer_size:
self.buffer = self._dealias_buffer
else:
ncomp = prod([vs.dim for vs in self.tensorsig])
self.buffer = self._create_buffer(ncomp * buffer_size)
# Reset layout to build new data view
self.scales = new_scales
self.preset_layout(self.layout)
def preset_layout(self, layout):
"""Interpret buffer as data in specified layout."""
layout = self.dist.get_layout_object(layout)
self.layout = layout
tens_shape = [vs.dim for vs in self.tensorsig]
local_shape = layout.local_shape(self.domain, self.scales)
total_shape = tuple(tens_shape) + tuple(local_shape)
self.data = np.ndarray(shape=total_shape,
dtype=self.dtype,
buffer=self.buffer)
#self.global_start = layout.start(self.domain, self.scales)
class Field(Current):
"""
Scalar field over a domain.
Parameters
----------
domain : domain object
Problem domain
name : str, optional
Field name (default: Python object id)
Attributes
----------
layout : layout object
Current layout of field
data : ndarray
View of internal buffer in current layout
"""
def __init__(self, dist, bases=None, name=None, tensorsig=None, dtype=None):
if bases is None:
bases = tuple()
# Accept single basis in place of tuple/list
if not isinstance(bases, (tuple, list)):
bases = (bases,)
if tensorsig is None:
tensorsig = tuple()
if dtype is None:
if dist.dtype is None:
raise ValueError("dtype must be specified for Distributor or Field.")
dtype = dist.dtype
from .domain import Domain
self.dist = dist
self.name = name
self.tensorsig = tensorsig
self.dtype = dtype
# Build domain
self.domain = Domain(dist, bases)
# Set initial scales and layout
self.scales = None
self.buffer_size = -1
self.layout = self.dist.get_layout_object('c')
# Change scales to build buffer and data
self.preset_scales((1,) * self.dist.dim)
def __getitem__(self, layout):
"""Return data viewed in specified layout."""
self.change_layout(layout)
return self.data
def __setitem__(self, layout, data):
"""Set data viewed in a specified layout."""
layout = self.dist.get_layout_object(layout)
self.preset_layout(layout)
copyto(self.data, data)
def get_basis(self, coord):
return self.domain.get_basis(coord)
#from .basis import Basis
#from .coords import Coordinate
#return self.domain.full_spaces[space.axis]
# if isinstance(space, Basis):
# if space in self.bases:
# return space
# elif isinstance(space, Coordinate):
# for basis in self.bases:
# if space is basis.coord:
# return basis
# return None
@property
def global_shape(self):
return self.layout.global_shape(self.domain, self.scales)
def copy(self):
copy = Field(self.dist, bases=self.domain.bases, tensorsig=self.tensorsig, dtype=self.dtype)
copy.preset_scales(self.scales)
copy[self.layout] = self.data
return copy
def set_global_data(self, global_data):
elements = self.layout.local_elements(self.domain, self.scales)
self.set_local_data(global_data[np.ix_(*elements)])
def set_local_data(self, local_data):
copyto(self.data, local_data)
def change_scales(self, scales):
"""Change data to specified scales."""
# Remedy scales
new_scales = self.dist.remedy_scales(scales)
old_scales = self.scales
# Quit if new scales aren't new
if new_scales == old_scales:
return
# Forward transform until remaining scales match
for axis in reversed(range(self.dist.dim)):
if not self.layout.grid_space[axis]:
break
if old_scales[axis] != new_scales[axis]:
self.require_coeff_space(axis)
break
# Copy over scale change
old_data = self.data
self.preset_scales(scales)
copyto(self.data, old_data)
def change_layout(self, layout):
"""Change data to specified layout."""
layout = self.dist.get_layout_object(layout)
# Transform to specified layout
if self.layout.index < layout.index:
while self.layout.index < layout.index:
#self.domain.distributor.increment_layout(self)
self.towards_grid_space()
elif self.layout.index > layout.index:
while self.layout.index > layout.index:
#self.domain.distributor.decrement_layout(self)
self.towards_coeff_space()
def towards_grid_space(self):
"""Change to next layout towards grid space."""
index = self.layout.index
self.dist.paths[index].increment([self])
def towards_coeff_space(self):
"""Change to next layout towards coefficient space."""
index = self.layout.index
self.dist.paths[index-1].decrement([self])
def require_grid_space(self, axis=None):
"""Require one axis (default: all axes) to be in grid space."""
if axis is None:
while not all(self.layout.grid_space):
self.towards_grid_space()
else:
while not self.layout.grid_space[axis]:
self.towards_grid_space()
def require_coeff_space(self, axis=None):
"""Require one axis (default: all axes) to be in coefficient space."""
if axis is None:
while any(self.layout.grid_space):
self.towards_coeff_space()
else:
while self.layout.grid_space[axis]:
self.towards_coeff_space()
def require_local(self, axis):
"""Require an axis to be local."""
# Move towards transform path, since the surrounding layouts are local
if self.layout.grid_space[axis]:
while not self.layout.local[axis]:
self.towards_coeff_space()
else:
while not self.layout.local[axis]:
self.towards_grid_space()
# @classmethod
# def cast_scalar(cls, scalar, domain):
# out = Field(bases=domain)
# out['c'] = scalar.value
# return out
# @classmethod
# def cast(cls, input, domain):
# from .operators import FieldCopy
# from .future import FutureField
# # Cast to operand and check domain
# input = Operand.cast(input)
# if isinstance(input, (Field, FutureField)):
# return input
# elif isinstance(input, Scalar):
# return cls.cast_scalar(input, domain)
# # Cast to FutureField
# #return FieldCopy(input, domain)
# else:
# raise ValueError()
@property
def is_scalar(self):
return all(basis is None for basis in self.domain.bases)
def local_elements(self):
return self.layout.local_elements(self.domain, self.scales)
# @CachedAttribute
# def mode_mask(self):
# return reduce()
def load_from_hdf5(self, file, index, task=None, func=None):
"""Load grid data from an hdf5 file. Task corresponds to field name by default."""
if task is None:
task = self.name
dset = file['tasks'][task]
if not np.all(dset.attrs['grid_space']):
raise ValueError("Can only load data from grid space")
self.load_from_global_grid_data(dset, pre_slices=(index,), func=func)
def load_from_global_grid_data(self, global_data, pre_slices=tuple(), func=None):
"""Load local grid data from array-like global grid data."""
dim = self.dist.dim
layout = self.dist.grid_layout
# Set scales to match saved data
scales = np.array(global_data.shape[-dim:]) / np.array(layout.global_shape(self.domain, scales=1))
self.preset_scales(scales)
# Extract local data from global data
component_slices = tuple(slice(None) for cs in self.tensorsig)
spatial_slices = layout.slices(self.domain, scales)
local_slices = pre_slices + component_slices + spatial_slices
if func is None:
self[layout] = global_data[local_slices]
else:
self[layout] = func(global_data[local_slices])
# Change scales back to dealias scales
self.change_scales(self.domain.dealias)
def allgather_data(self, layout=None):
"""Build global data on all processes."""
# Change layout
if layout is not None:
self.change_layout(layout)
# Shortcut for serial execution
if self.dist.comm.size == 1:
return self.data.copy()
# Build global buffers
tensor_shape = tuple(cs.dim for cs in self.tensorsig)
global_shape = tensor_shape + self.layout.global_shape(self.domain, self.scales)
local_slices = tuple(slice(None) for cs in self.tensorsig) + self.layout.slices(self.domain, self.scales)
send_buff = np.zeros(shape=global_shape, dtype=self.dtype)
recv_buff = np.empty_like(send_buff)
# Combine data via allreduce -- easy but not communication-optimal
# Should be optimized using Allgatherv if this is used past startup
send_buff[local_slices] = self.data
self.dist.comm.Allreduce(send_buff, recv_buff, op=MPI.SUM)
return recv_buff
def gather_data(self, root=0, layout=None):
# Change layout
if layout is not None:
self.change_layout(layout)
# Shortcut for serial execution
if self.dist.comm.size == 1:
return self.data.copy()
# TODO: Shortcut this for constant fields
# Gather data
# Should be optimized via Gatherv eventually
pieces = self.dist.comm.gather(self.data, root=root)
# Assemble on root node
if self.dist.comm.rank == root:
ext_mesh = self.layout.ext_mesh
combined = np.zeros(prod(ext_mesh), dtype=object)
combined[:] = pieces
return np.block(combined.reshape(ext_mesh).tolist())
def allreduce_data_norm(self, layout=None, order=2):
# Change layout
if layout is not None:
self.change_layout(layout)
# Compute local data
if self.data.size == 0:
norm = 0
elif order == np.inf:
norm = np.max(np.abs(self.data))
else:
norm = np.sum(np.abs(self.data)**order)
# Reduce
if order == np.inf:
if self.dist.comm.size > 1:
norm = self.dist.comm.allreduce(norm, op=MPI.MAX)
else:
if self.dist.comm.size > 1:
norm = self.dist.comm.allreduce(norm, op=MPI.SUM)
norm = norm ** (1 / order)
return norm
def allreduce_data_max(self, layout=None):
return self.allreduce_data_norm(layout=layout, order=np.inf)
def allreduce_L2_norm(self, normalize_volume=True):
from . import arithmetic
from . import operators
# Compute local self inner product
rank = len(self.tensorsig)
if rank == 0:
self_inner_product = np.conj(self) * self
elif rank == 1:
self_inner_product = arithmetic.dot(np.conj(self), self)
elif rank == 2:
self_inner_product = arithmetic.Trace(arithmetic.Dot(operators.Transpose(np.conj(self)), self))
else:
raise ValueError("Norms only implemented up to rank-2 tensors.")
# Compute L2 norm
if normalize_volume:
norm_sq = operators.Average(self_inner_product).evaluate().allreduce_data_max(layout='g')
else:
norm_sq = operators.Integrate(self_inner_product).evaluate().allreduce_data_max(layout='g')
return norm_sq ** 0.5
def normalize(self, normalize_volume=True):
"""
Normalize field inplace using L2 norm.
Parameters
----------
normalize_volume : bool, optional
Normalize inner product by domain volume. Default: True.
"""
norm = self.allreduce_L2_norm(normalize_volume=normalize_volume)
self.data /= norm
def broadcast_ghosts(self, output_nonconst_dims):
"""Copy data over constant distributed dimensions for arithmetic broadcasting."""
# Determine deployment dimensions
self_const_dims = np.array(self.domain.constant)
distributed = ~self.layout.local
broadcast_dims = output_nonconst_dims & self_const_dims
deploy_dims_ext = broadcast_dims & distributed
deploy_dims = deploy_dims_ext[distributed]
if not any(deploy_dims):
return self.data
# Broadcast on subgrid communicator
comm_sub = self.domain.dist.comm_cart.Sub(remain_dims=deploy_dims)
data = None
if comm_sub.rank == 0:
data = self.data
else:
shape = np.array(self.data.shape)
shape[shape == 0] = 1
data = np.empty(shape=shape, dtype=self.dtype)
comm_sub.Bcast(data, root=0)
return data
def fill_random(self, layout=None, scales=None, seed=None, chunk_size=2**20, distribution='standard_normal', **kw):
"""
Fill field with random data. If a seed is specified, the global data is
reproducibly generated for any process mesh.
Parameters
----------
layout : Layout object, 'c', or 'g', optional
Layout for setting field data. Default: current layout.
scales : number or tuple of numbers, optional
Scales for setting field data. Default: current scales.
seed : int, optional
RNG seed. Default: None.
chunk_size : int, optional
Chunk size for drawing from distribution. Should be less than locally
available memory. Default: 2**20, corresponding to 8 MB of float64.
distribution : str, optional
Distribution name, corresponding to numpy random Generator method.
Default: 'standard_normal'.
**kw : dict
Other keywords passed to the distribution method.
"""
init_layout = self.layout
# Set scales if requested
if scales is not None:
self.preset_scales(scales)
if layout is None:
self.preset_layout(init_layout)
# Set layout if requested
if layout is not None:
self.preset_layout(layout)
# Build global chunked random array (does not require global-sized memory)
shape = tuple(cs.dim for cs in self.tensorsig) + self.global_shape
if self.is_complex:
shape = shape + (2,)
global_data = ChunkedRandomArray(shape, seed, chunk_size, distribution, **kw)
# Extract local data
component_slices = tuple(slice(None) for cs in self.tensorsig)
spatial_slices = self.layout.slices(self.domain, self.scales)
local_slices = component_slices + spatial_slices
local_data = global_data[local_slices]
if self.is_real:
self.data[:] = local_data
else:
self.data.real[:] = local_data[..., 0]
self.data.imag[:] = local_data[..., 1]
def low_pass_filter(self, shape=None, scales=None):
"""
Apply a spectral low-pass filter by zeroing modes above specified relative scales.
The scales can be specified directly or deduced from a specified global grid shape.
Parameters
----------
shape : tuple of ints, optional
Global grid shape for inferring truncation scales.
scales : float or tuple of floats, optional
Scale factors for truncation.
"""
original_scales = self.scales
# Determine scales from shape
if shape is not None:
if scales is not None:
raise ValueError("Specify either shape or scales.")
global_shape = self.dist.grid_layout.global_shape(self.domain, scales=1)
scales = np.array(shape) / global_shape
# Low-pass filter by changing scales
self.change_scales(scales)
self.require_grid_space()
self.change_scales(original_scales)
def high_pass_filter(self, shape=None, scales=None):
"""
Apply a spectral high-pass filter by zeroing modes below specified relative scales.
The scales can be specified directly or deduced from a specified global grid shape.
Parameters
----------
shape : tuple of ints, optional
Global grid shape for inferring truncation scales.
scales : float or tuple of floats, optional
Scale factors for truncation.
"""
data_orig = self['c'].copy()
self.low_pass_filter(shape=shape, scales=scales)
data_filt = self['c'].copy()
self['c'] = data_orig - data_filt
ScalarField = Field
def VectorField(dist, coordsys, *args, **kw):
tensorsig = (coordsys,)
return Field(dist, *args, tensorsig=tensorsig, **kw)
def TensorField(dist, coordsys, *args, order=2, **kw):
if isinstance(coordsys, (tuple, list)):
tensorsig = coordsys
else:
tensorsig = (coordsys,) * order
return Field(dist, *args, tensorsig=tensorsig, **kw)
class LockedField(Field):
"""Field locked to particular layouts, disallowing changes to other layouts."""
def change_scales(self, scales):
scales = self.dist.remedy_scales(scales)
if scales != self.scales:
raise ValueError("Cannot change locked scales.")
def towards_grid_space(self):
"""Change to next layout towards grid space."""
index = self.layout.index
new_index = index + 1
new_layout = self.dist.layouts[new_index]
if new_layout in self.allowed_layouts:
super().towards_grid_space()
else:
raise ValueError("Cannot change locked layout.")
def towards_coeff_space(self):
"""Change to next layout towards coefficient space."""
index = self.layout.index
new_index = index - 1
new_layout = self.dist.layouts[new_index]
if new_layout in self.allowed_layouts:
super().towards_coeff_space()
else:
raise ValueError("Cannot change locked layout.")
def lock_to_layouts(self, *layouts):
self.allowed_layouts = tuple(layouts)
def lock_axis_to_grid(self, axis):
self.allowed_layouts = tuple(l for l in self.dist.layouts if l.grid_space[axis])
|
DedalusProjectREPO_NAMEdedalusPATH_START.@dedalus_extracted@dedalus-master@dedalus@core@field.py@.PATH_END.py
|
{
"filename": "plot_limb_darkening.py",
"repo_name": "DaneSpaeth/pyoscillot",
"repo_path": "pyoscillot_extracted/pyoscillot-main/pyoscillot/plot_scripts/plot_limb_darkening.py",
"type": "Python"
}
|
import numpy as np
import matplotlib.pyplot as plt
from three_dim_star import ThreeDimStar, TwoDimProjector
from utils import calc_mean_limb_dark, add_limb_darkening
from dataloader import phoenix_spectrum
from star import GridSpectrumSimulator
from cfg import parse_global_ini
conf_dict = parse_global_ini()
def plot_mean_limb_dark():
star = ThreeDimStar()
N = 1000
proj = TwoDimProjector(star, N=N, border=3, limb_darkening=False)
mu = proj.mu()
wave, spec, header = phoenix_spectrum(4500, 2.0, 0.0, wavelength_range=(3500, 17500))
wave_start = np.min(wave)
wave_stop = np.max(wave)
mean_limb_dark = calc_mean_limb_dark(wave, mu, N=N)
fig, ax = plt.subplots(1, 2, figsize=(7.16, 4.0275))
ax[0].imshow(mu, vmin=0, vmax=1, cmap="viridis")
ax[1].plot(wave, mean_limb_dark)
plt.savefig("mean_limb_dark.png", dpi=600)
####### Probably all wrong from here! ########
def plot_stellar_disk_comparison():
star = ThreeDimStar()
proj = TwoDimProjector(star, N=150, border=3, limb_darkening=False)
mu = proj.mu()
wave, spec, header = phoenix_spectrum(4500, 2.0, 0.0, wavelength_range=(3600, 7150))
wavelengths = [4500, 5500, 6500, 7500]
wave_idxs = []
for wavelength in wavelengths:
wave_idxs.append(np.argmin(np.abs(wave - wavelength)))
mask = np.zeros_like(wave, dtype=bool)
for idx in wave_idxs:
mask[idx] = True
wave = wave[mask]
spec = spec[mask]
intensities_at_waves = np.zeros((mu.shape[0], mu.shape[1], len(wave_idxs)))
for (row, col), m in np.ndenumerate(mu):
int_mu = add_limb_darkening(wave, spec, m)[0]
print(row, col)
for idx, wv_idx in enumerate(wave_idxs):
# print(idx)
intensities_at_waves[row, col, idx] = int_mu[idx]
colors = ["tab:blue", "tab:green", "yellow", "tab:red"]
fig, ax = plt.subplots(1,4, figsize=(7.16, 4.0275/2))
# print(len(wavelengths))
# print(len(colors))
# print(len(ax.flatten()))
for idx, (wavelength, color, a) in enumerate(zip(wavelengths, colors, ax.flatten())):
intensities_at_waves[:,:,idx][np.isnan(intensities_at_waves[:,:,idx])] = 0
wv_idx = np.argmin(np.abs(wave - wavelength))
# twina = a.twiny()
a.imshow(intensities_at_waves[:,:,idx], vmin=0.0, vmax=1.0, cmap="inferno")
y_offset = int(intensities_at_waves.shape[1]/2)
# a.plot(y_offset - 100*intensities_at_waves[:, y_offset, idx], color=color, label=f"{int(wavelength/10)}nm", lw=5)
a.set_title(f"{int(wavelength/10)}nm")
a.set_xticks([])
a.set_yticks([])
# ax.legend()
# ax.set_ylabel("Normalized Intensity")
fig.set_tight_layout(True)
plt.savefig("limb_dark_3D_1x4.png", dpi=600)
plt.close()
def plot_mu_comparison():
wavelengths = np.array([4500, 5500, 6500, 7500], dtype=float)
fig, ax = plt.subplots(1,1,figsize=(7.16, 4.0275))
mu = np.linspace(0,1,1000)
colors = ["tab:blue", "tab:green", "yellow", "tab:red"]
for idx, (wavelength, color) in enumerate(zip(wavelengths, colors )):
ax.plot(mu, [add_limb_darkening(wavelengths, None, m)[0][idx] for m in mu], color=color, label=f"{int(wavelength/10)}nm", lw=5)
ax.set_xlabel("µ")
ax.set_ylabel("Relative Intensity")
ax.legend()
ax.set_xlim(0,1)
fig.set_tight_layout(True)
plt.savefig("limb_dark_comparison.png", dpi=600)
def plot_spectral_change():
wave, spec, header = phoenix_spectrum(4500, 2.0, 0.0, wavelength_range=(4200, 10000))
norm = np.nanmax(spec)
fig, ax = plt.subplots(2, 1, sharex=True, figsize=(7.16, 4.0275))
lw = 0.25
ax[0].plot(wave, spec/norm, color="tab:blue", label="Original PHOENIX", lw=lw)
mu = 1
_, spec_limb = add_limb_darkening(wave, spec, mu)
ax[0].plot(wave, spec_limb/norm, color="tab:red", alpha=0.7, label=f"Adjusted for Limb Darkening at µ={mu}", lw=lw)
mu = 0.2
_, spec_limb = add_limb_darkening(wave, spec, mu)
ax[1].plot(wave, spec/norm, color="tab:blue", label="Original PHOENIX", lw=lw)
ax[1].plot(wave, spec_limb/norm, color="tab:red", alpha=0.7, label=f"Adjusted for Limb Darkening at µ={mu}", lw=lw)
ax[1].set_xlabel(r"Wavelength [$\AA$]")
ax[0].set_ylabel("Normalized Flux")
ax[1].set_ylabel("Normalized Flux")
ax[0].legend()
ax[1].legend()
ax[0].set_ylim(0, 1.05)
ax[1].set_ylim(0, 1.05)
fig.subplots_adjust(left=0.07, right=0.99, top=0.99, bottom=0.12, hspace=0)
plt.savefig("limb_dark_spec.png", dpi=600)
def plot_summed_spectral_change():
fig, ax = plt.subplots(1, figsize=(7.16, 4.0275))
star = GridSpectrumSimulator(N_star=100, Teff=4500, logg=2, limb_darkening=False)
min_wave = 4200
max_wave = 10000
wave, spec, v = star.calc_spectrum(min_wave=min_wave, max_wave=max_wave)
norm = np.nanmax(spec)
ax.plot(wave, spec/norm, color="tab:blue", label="No Limb Darkening Correction", lw=0.25)
star = GridSpectrumSimulator(N_star=100, Teff=4500, logg=2, limb_darkening=True)
wave, spec, v = star.calc_spectrum(min_wave=min_wave, max_wave=max_wave)
ax.plot(wave, spec/norm, color="tab:red", label="Limb Darkening Correction", lw=0.25, alpha=0.7)
ax.set_xlabel(r"Wavelength [$\AA$]")
ax.set_ylabel("Normalized Flux")
fig.set_tight_layout(True)
plt.savefig("limb_dark_summed_spec.png", dpi=600)
if __name__ == "__main__":
# plot_spectral_change()
# plot_summed_spectral_change()
# plot_stellar_disk_comparison()
plot_mean_limb_dark()
|
DaneSpaethREPO_NAMEpyoscillotPATH_START.@pyoscillot_extracted@pyoscillot-main@pyoscillot@plot_scripts@plot_limb_darkening.py@.PATH_END.py
|
{
"filename": "_position.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/indicator/delta/_position.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class PositionValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="position", parent_name="indicator.delta", **kwargs):
super(PositionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["top", "bottom", "left", "right"]),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@indicator@delta@_position.py@.PATH_END.py
|
{
"filename": "mixed_models.md",
"repo_name": "huggingface/peft",
"repo_path": "peft_extracted/peft-main/docs/source/developer_guides/mixed_models.md",
"type": "Markdown"
}
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Mixed adapter types
Normally, it isn't possible to mix different adapter types in 🤗 PEFT. You can create a PEFT model with two different LoRA adapters (which can have different config options), but it is not possible to combine a LoRA and LoHa adapter. With [`PeftMixedModel`] however, this works as long as the adapter types are compatible. The main purpose of allowing mixed adapter types is to combine trained adapters for inference. While it is possible to train a mixed adapter model, this has not been tested and is not recommended.
To load different adapter types into a PEFT model, use [`PeftMixedModel`] instead of [`PeftModel`]:
```py
from peft import PeftMixedModel
base_model = ... # load the base model, e.g. from transformers
# load first adapter, which will be called "default"
peft_model = PeftMixedModel.from_pretrained(base_model, <path_to_adapter1>)
peft_model.load_adapter(<path_to_adapter2>, adapter_name="other")
peft_model.set_adapter(["default", "other"])
```
The [`~PeftMixedModel.set_adapter`] method is necessary to activate both adapters, otherwise only the first adapter would be active. You can keep adding more adapters by calling [`~PeftModel.add_adapter`] repeatedly.
[`PeftMixedModel`] does not support saving and loading mixed adapters. The adapters should already be trained, and loading the model requires a script to be run each time.
## Tips
- Not all adapter types can be combined. See [`peft.tuners.mixed.COMPATIBLE_TUNER_TYPES`](https://github.com/huggingface/peft/blob/1c1c7fdaa6e6abaa53939b865dee1eded82ad032/src/peft/tuners/mixed/model.py#L35) for a list of compatible types. An error will be raised if you try to combine incompatible adapter types.
- It is possible to mix multiple adapters of the same type which can be useful for combining adapters with very different configs.
- If you want to combine a lot of different adapters, the most performant way to do it is to consecutively add the same adapter types. For example, add LoRA1, LoRA2, LoHa1, LoHa2 in this order, instead of LoRA1, LoHa1, LoRA2, and LoHa2. While the order can affect the output, there is no inherently *best* order, so it is best to choose the fastest one.
|
huggingfaceREPO_NAMEpeftPATH_START.@peft_extracted@peft-main@docs@source@developer_guides@mixed_models.md@.PATH_END.py
|
{
"filename": "_showexponent.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/choropleth/colorbar/_showexponent.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShowexponentValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="showexponent", parent_name="choropleth.colorbar", **kwargs
):
super(ShowexponentValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["all", "first", "last", "none"]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@choropleth@colorbar@_showexponent.py@.PATH_END.py
|
{
"filename": "gen_qa_noshape_models.py",
"repo_name": "triton-inference-server/server",
"repo_path": "server_extracted/server-main/qa/common/gen_qa_noshape_models.py",
"type": "Python"
}
|
#!/usr/bin/env python3
# Copyright 2018-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import os
from builtins import range
import numpy as np
from gen_common import np_to_model_dtype, np_to_tf_dtype
FLAGS = None
np_dtype_string = np.dtype(object)
def create_savedmodel_modelfile(
models_dir,
max_batch,
model_version,
input_shape,
output0_shape,
output1_shape,
input_dtype,
output0_dtype,
output1_dtype,
swap=False,
):
if not tu.validate_for_tf_model(
input_dtype,
output0_dtype,
output1_dtype,
input_shape,
output0_shape,
output1_shape,
):
return
tf_input_dtype = np_to_tf_dtype(input_dtype)
tf_output0_dtype = np_to_tf_dtype(output0_dtype)
tf_output1_dtype = np_to_tf_dtype(output1_dtype)
# Create the model. If non-batching then don't include the batch
# dimension.
tf.compat.v1.reset_default_graph()
if max_batch == 0:
in0 = tf.compat.v1.placeholder(
tf_input_dtype, tu.shape_to_tf_shape([]), "TENSOR_INPUT0"
)
in1 = tf.compat.v1.placeholder(
tf_input_dtype, tu.shape_to_tf_shape(input_shape), "TENSOR_INPUT1"
)
else:
in0 = tf.compat.v1.placeholder(
tf_input_dtype, tu.shape_to_tf_shape([]), "TENSOR_INPUT0"
)
in1 = tf.compat.v1.placeholder(
tf_input_dtype,
[
None,
]
+ tu.shape_to_tf_shape(input_shape),
"TENSOR_INPUT1",
)
# If the input is a string, then convert each string to the
# equivalent float value.
if tf_input_dtype == tf.string:
in0 = tf.strings.to_number(in0, tf.int32)
in1 = tf.strings.to_number(in1, tf.int32)
add = tf.add(in0, in1, "ADD")
sub = tf.subtract(in0, in1, "SUB")
# Cast or convert result to the output dtype.
if tf_output0_dtype == tf.string:
cast0 = tf.strings.as_string(add if not swap else sub, name="TOSTR0")
else:
cast0 = tf.cast(add if not swap else sub, tf_output0_dtype, "CAST0")
if tf_output1_dtype == tf.string:
cast1 = tf.strings.as_string(sub if not swap else add, name="TOSTR1")
else:
cast1 = tf.cast(sub if not swap else add, tf_output1_dtype, "CAST1")
tf.identity(cast0, "TENSOR_OUTPUT0")
tf.identity(cast1, "TENSOR_OUTPUT1")
# Use a different model name for the non-batching variant
model_name = tu.get_model_name(
"savedmodel_nobatch" if max_batch == 0 else "savedmodel",
input_dtype,
output0_dtype,
output1_dtype,
)
model_version_dir = models_dir + "/" + model_name + "/" + str(model_version)
try:
os.makedirs(model_version_dir)
except OSError as ex:
pass # ignore existing dir
with tf.compat.v1.Session() as sess:
input0_tensor = tf.compat.v1.get_default_graph().get_tensor_by_name(
"TENSOR_INPUT0:0"
)
input1_tensor = tf.compat.v1.get_default_graph().get_tensor_by_name(
"TENSOR_INPUT1:0"
)
output0_tensor = tf.compat.v1.get_default_graph().get_tensor_by_name(
"TENSOR_OUTPUT0:0"
)
output1_tensor = tf.compat.v1.get_default_graph().get_tensor_by_name(
"TENSOR_OUTPUT1:0"
)
tf.compat.v1.saved_model.simple_save(
sess,
model_version_dir + "/model.savedmodel",
inputs={"INPUT0": input0_tensor, "INPUT1": input1_tensor},
outputs={"OUTPUT0": output0_tensor, "OUTPUT1": output1_tensor},
)
def create_savedmodel_modelconfig(
models_dir,
max_batch,
model_version,
input_shape,
output0_shape,
output1_shape,
input_dtype,
output0_dtype,
output1_dtype,
output0_label_cnt,
version_policy,
):
if not tu.validate_for_tf_model(
input_dtype,
output0_dtype,
output1_dtype,
input_shape,
output0_shape,
output1_shape,
):
return
# Unpack version policy
version_policy_str = "{ latest { num_versions: 1 }}"
if version_policy is not None:
type, val = version_policy
if type == "latest":
version_policy_str = "{{ latest {{ num_versions: {} }}}}".format(val)
elif type == "specific":
version_policy_str = "{{ specific {{ versions: {} }}}}".format(val)
else:
version_policy_str = "{ all { }}"
# Use a different model name for the non-batching variant
model_name = tu.get_model_name(
"savedmodel_nobatch" if max_batch == 0 else "savedmodel",
input_dtype,
output0_dtype,
output1_dtype,
)
config_dir = models_dir + "/" + model_name
config = """
name: "{}"
platform: "tensorflow_savedmodel"
max_batch_size: {}
version_policy: {}
input [
{{
name: "INPUT0"
data_type: {}
dims: [ {} ]
}},
{{
name: "INPUT1"
data_type: {}
dims: [ {} ]
}}
]
output [
{{
name: "OUTPUT0"
data_type: {}
dims: [ {} ]
label_filename: "output0_labels.txt"
}},
{{
name: "OUTPUT1"
data_type: {}
dims: [ {} ]
}}
]
""".format(
model_name,
max_batch,
version_policy_str,
np_to_model_dtype(input_dtype),
tu.shape_to_dims_str(input_shape),
np_to_model_dtype(input_dtype),
tu.shape_to_dims_str(input_shape),
np_to_model_dtype(output0_dtype),
tu.shape_to_dims_str(output0_shape),
np_to_model_dtype(output1_dtype),
tu.shape_to_dims_str(output1_shape),
)
try:
os.makedirs(config_dir)
except OSError as ex:
pass # ignore existing dir
with open(config_dir + "/config.pbtxt", "w") as cfile:
cfile.write(config)
with open(config_dir + "/output0_labels.txt", "w") as lfile:
for l in range(output0_label_cnt):
lfile.write("label" + str(l) + "\n")
def create_models(
models_dir,
input_dtype,
output0_dtype,
output1_dtype,
input_shape,
output0_shape,
output1_shape,
output0_label_cnt,
version_policy=None,
):
model_version = 1
# Create two models, one that supports batching with a max-batch
# of 8, and one that does not with a max-batch of 0
if FLAGS.savedmodel:
# max-batch 8
create_savedmodel_modelconfig(
models_dir,
8,
model_version,
input_shape,
output0_shape,
output1_shape,
input_dtype,
output0_dtype,
output1_dtype,
output0_label_cnt,
version_policy,
)
create_savedmodel_modelfile(
models_dir,
8,
model_version,
input_shape,
output0_shape,
output1_shape,
input_dtype,
output0_dtype,
output1_dtype,
)
# max-batch 0
create_savedmodel_modelconfig(
models_dir,
0,
model_version,
input_shape,
output0_shape,
output1_shape,
input_dtype,
output0_dtype,
output1_dtype,
output0_label_cnt,
version_policy,
)
create_savedmodel_modelfile(
models_dir,
0,
model_version,
input_shape,
output0_shape,
output1_shape,
input_dtype,
output0_dtype,
output1_dtype,
)
def create_fixed_models(
models_dir, input_dtype, output0_dtype, output1_dtype, version_policy=None
):
input_size = 16
create_models(
models_dir,
input_dtype,
output0_dtype,
output1_dtype,
(input_size,),
(input_size,),
(input_size,),
input_size,
version_policy,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--models_dir", type=str, required=True, help="Top-level model directory"
)
parser.add_argument(
"--graphdef",
required=False,
action="store_true",
help="Generate GraphDef models",
)
parser.add_argument(
"--savedmodel",
required=False,
action="store_true",
help="Generate SavedModel models",
)
parser.add_argument(
"--tensorrt",
required=False,
action="store_true",
help="Generate TensorRT PLAN models",
)
parser.add_argument(
"--onnx",
required=False,
action="store_true",
help="Generate Onnx Runtime Onnx models",
)
parser.add_argument(
"--libtorch",
required=False,
action="store_true",
help="Generate Pytorch LibTorch models",
)
parser.add_argument(
"--variable",
required=False,
action="store_true",
help="Used variable-shape tensors for input/output",
)
parser.add_argument(
"--ensemble",
required=False,
action="store_true",
help="Generate ensemble models against the models"
+ " in all platforms. Note that the models generated"
+ " are not completed.",
)
FLAGS, unparsed = parser.parse_known_args()
if FLAGS.savedmodel:
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
import test_util as tu
# Tests with models that accept fixed-shape input/output tensors
if not FLAGS.variable:
create_fixed_models(FLAGS.models_dir, np.int8, np.int8, np.int8, ("latest", 1))
create_fixed_models(
FLAGS.models_dir, np.int16, np.int16, np.int16, ("latest", 2)
)
create_fixed_models(
FLAGS.models_dir, np.int32, np.int32, np.int32, ("all", None)
)
create_fixed_models(FLAGS.models_dir, np.int64, np.int64, np.int64)
create_fixed_models(
FLAGS.models_dir,
np.float16,
np.float16,
np.float16,
(
"specific",
[
1,
],
),
)
create_fixed_models(
FLAGS.models_dir, np.float32, np.float32, np.float32, ("specific", [1, 3])
)
create_fixed_models(FLAGS.models_dir, np.float16, np.float32, np.float32)
create_fixed_models(FLAGS.models_dir, np.int32, np.int8, np.int8)
create_fixed_models(FLAGS.models_dir, np.int8, np.int32, np.int32)
create_fixed_models(FLAGS.models_dir, np.int32, np.int8, np.int16)
create_fixed_models(FLAGS.models_dir, np.int32, np.float32, np.float32)
create_fixed_models(FLAGS.models_dir, np.float32, np.int32, np.int32)
create_fixed_models(FLAGS.models_dir, np.int32, np.float16, np.int16)
if FLAGS.savedmodel:
for vt in [np.float16, np.float32, np.int8, np.int16, np.int32]:
create_savedmodel_modelfile(
FLAGS.models_dir, 8, 2, (16,), (16,), (16,), vt, vt, vt, swap=True
)
create_savedmodel_modelfile(
FLAGS.models_dir, 8, 3, (16,), (16,), (16,), vt, vt, vt, swap=True
)
create_savedmodel_modelfile(
FLAGS.models_dir, 0, 2, (16,), (16,), (16,), vt, vt, vt, swap=True
)
create_savedmodel_modelfile(
FLAGS.models_dir, 0, 3, (16,), (16,), (16,), vt, vt, vt, swap=True
)
|
triton-inference-serverREPO_NAMEserverPATH_START.@server_extracted@server-main@qa@common@gen_qa_noshape_models.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "numpy/numpy",
"repo_path": "numpy_extracted/numpy-main/numpy/distutils/command/__init__.py",
"type": "Python"
}
|
"""distutils.command
Package containing implementation of all the standard Distutils
commands.
"""
def test_na_writable_attributes_deletion():
a = np.NA(2)
attr = ['payload', 'dtype']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
__revision__ = "$Id: __init__.py,v 1.3 2005/05/16 11:08:49 pearu Exp $"
distutils_all = [ #'build_py',
'clean',
'install_clib',
'install_scripts',
'bdist',
'bdist_dumb',
'bdist_wininst',
]
__import__('distutils.command', globals(), locals(), distutils_all)
__all__ = ['build',
'config_compiler',
'config',
'build_src',
'build_py',
'build_ext',
'build_clib',
'build_scripts',
'install',
'install_data',
'install_headers',
'install_lib',
'bdist_rpm',
'sdist',
] + distutils_all
|
numpyREPO_NAMEnumpyPATH_START.@numpy_extracted@numpy-main@numpy@distutils@command@__init__.py@.PATH_END.py
|
{
"filename": "sod.py",
"repo_name": "EigenDev/simbi",
"repo_path": "simbi_extracted/simbi-main/simbi_configs/examples/sod.py",
"type": "Python"
}
|
from simbi import BaseConfig, DynamicArg, simbi_property
from simbi.key_types import *
class SodProblem(BaseConfig):
"""
Sod's Shock Tube Problem in 1D Newtonian Fluid
"""
nzones = DynamicArg("nzones", 1000, help="number of grid zones", var_type=int)
ad_gamma = DynamicArg("ad-gamma", 5.0 / 3.0, help="Adiabatic gas index", var_type = float)
@simbi_property
def initial_state(self) -> Sequence[Sequence[float]]:
return ((1.0, 0.0, 1.0), (0.125, 0.0, 0.1))
@simbi_property
def geometry(self) -> Sequence[float]:
return (0.0, 1.0, 0.5)
@simbi_property
def x1_cell_spacing(self) -> str:
return "linear"
@simbi_property
def coord_system(self) -> str:
return "cartesian"
@simbi_property
def resolution(self) -> DynamicArg:
return self.nzones
@simbi_property
def gamma(self) -> DynamicArg:
return self.ad_gamma
@simbi_property
def regime(self) -> str:
return "srhd"
|
EigenDevREPO_NAMEsimbiPATH_START.@simbi_extracted@simbi-main@simbi_configs@examples@sod.py@.PATH_END.py
|
{
"filename": "_shape.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scattersmith/line/_shape.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShapeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="shape", parent_name="scattersmith.line", **kwargs):
super(ShapeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
values=kwargs.pop("values", ["linear", "spline"]),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scattersmith@line@_shape.py@.PATH_END.py
|
{
"filename": "resunet_se.py",
"repo_name": "BiaPyX/BiaPy",
"repo_path": "BiaPy_extracted/BiaPy-master/biapy/models/resunet_se.py",
"type": "Python"
}
|
import torch
import torch.nn as nn
from typing import List
from biapy.models.blocks import (
ResConvBlock,
ResUpBlock,
ConvBlock,
)
class ResUNet_SE(nn.Module):
"""
Create Residual 2D/3D U-Net with squeeze-excite blocks.
Reference: `Squeeze and Excitation Networks <https://arxiv.org/abs/1709.01507>`_.
Parameters
----------
image_shape : 3D/4D tuple
Dimensions of the input image. E.g. ``(y, x, channels)`` or ``(z, y, x, channels)``.
activation : str, optional
Activation layer.
feature_maps : array of ints, optional
Feature maps to use on each level.
drop_values : float, optional
Dropout value to be fixed.
normalization : str, optional
Normalization layer (one of ``'bn'``, ``'sync_bn'`` ``'in'``, ``'gn'`` or ``'none'``).
k_size : int, optional
Kernel size.
upsample_layer : str, optional
Type of layer to use to make upsampling. Two options: "convtranspose" or "upsampling".
z_down : List of ints, optional
Downsampling used in z dimension. Set it to ``1`` if the dataset is not isotropic.
n_classes: int, optional
Number of classes.
output_channels : str, optional
Channels to operate with. Possible values: ``BC``, ``BCD``, ``BP``, ``BCDv2``,
``BDv2``, ``Dv2`` and ``BCM``.
upsampling_factor : tuple of ints, optional
Factor of upsampling for super resolution workflow for each dimension.
upsampling_position : str, optional
Whether the upsampling is going to be made previously (``pre`` option) to the model
or after the model (``post`` option).
isotropy : bool or list of bool, optional
Whether to use 3d or 2d convolutions at each U-Net level even if input is 3d.
larger_io : bool, optional
Whether to use extra and larger kernels in the input and output layers.
extra_conv : bool, optional
To add a convolutional layer before the residual blocks (as in Kisuk et al, 2017, https://arxiv.org/pdf/1706.00120)
Returns
-------
model : Torch model
U-Net model.
Calling this function with its default parameters returns the following network:
.. image:: ../../img/models/unet.png
:width: 100%
:align: center
Image created with `PlotNeuralNet <https://github.com/HarisIqbal88/PlotNeuralNet>`_.
"""
def __init__(
self,
image_shape=(256, 256, 1),
activation="ELU",
feature_maps=[32, 64, 128, 256],
drop_values=[0.1, 0.1, 0.1, 0.1],
normalization="none",
k_size=3,
upsample_layer="convtranspose",
z_down=[2, 2, 2, 2],
n_classes=1,
output_channels="BC",
upsampling_factor=(),
upsampling_position="pre",
isotropy=False,
larger_io=True,
extra_conv=True,
):
super(ResUNet_SE, self).__init__()
self.depth = len(feature_maps) - 1
self.ndim = 3 if len(image_shape) == 4 else 2
self.z_down = z_down
self.n_classes = 1 if n_classes <= 2 else n_classes
self.multiclass = True if n_classes > 2 and output_channels is not None else False
if type(isotropy) == bool:
isotropy = isotropy * len(feature_maps)
if self.ndim == 3:
conv = nn.Conv3d
convtranspose = nn.ConvTranspose3d
pooling = nn.MaxPool3d
else:
conv = nn.Conv2d
convtranspose = nn.ConvTranspose2d
pooling = nn.MaxPool2d
# Super-resolution
self.pre_upsampling = None
if len(upsampling_factor) > 1 and upsampling_position == "pre":
self.pre_upsampling = convtranspose(
image_shape[-1],
image_shape[-1],
kernel_size=upsampling_factor,
stride=upsampling_factor,
)
# ENCODER
self.down_path = nn.ModuleList()
self.mpooling_layers = nn.ModuleList()
in_channels = image_shape[-1]
# extra (larger) input layer
if larger_io:
kernel_size = (k_size + 2, k_size + 2) if self.ndim == 2 else (k_size + 2, k_size + 2, k_size + 2)
if isotropy[0] is False and self.ndim == 3:
kernel_size = (1, k_size + 2, k_size + 2)
self.conv_in = ConvBlock(
conv=conv,
in_size=in_channels,
out_size=feature_maps[0],
k_size=kernel_size,
act=activation,
norm=normalization,
)
in_channels = feature_maps[0]
else:
self.conv_in = None
for i in range(self.depth):
kernel_size = (k_size, k_size) if self.ndim == 2 else (k_size, k_size, k_size)
if isotropy[i] is False and self.ndim == 3:
kernel_size = (1, k_size, k_size)
self.down_path.append(
ResConvBlock(
conv,
in_channels,
feature_maps[i],
kernel_size,
activation,
norm=normalization,
dropout=drop_values[i],
se_block=True,
first_block=True if i == 0 else False,
extra_conv=extra_conv,
)
)
mpool = (z_down[i], 2, 2) if self.ndim == 3 else (2, 2)
self.mpooling_layers.append(pooling(mpool))
in_channels = feature_maps[i]
kernel_size = (k_size, k_size) if self.ndim == 2 else (k_size, k_size, k_size)
if isotropy[-1] is False and self.ndim == 3:
kernel_size = (1, k_size, k_size)
self.bottleneck = ResConvBlock(
conv,
in_channels,
feature_maps[-1],
kernel_size,
activation,
norm=normalization,
dropout=drop_values[-1],
se_block=True,
extra_conv=extra_conv,
)
# DECODER
self.up_path = nn.ModuleList()
in_channels = feature_maps[-1]
for i in range(self.depth - 1, -1, -1):
kernel_size = (k_size, k_size) if self.ndim == 2 else (k_size, k_size, k_size)
if isotropy[i] is False and self.ndim == 3:
kernel_size = (1, k_size, k_size)
self.up_path.append(
ResUpBlock(
ndim=self.ndim,
convtranspose=convtranspose,
in_size=in_channels,
out_size=feature_maps[i],
in_size_bridge=feature_maps[i],
z_down=z_down[i],
up_mode=upsample_layer,
conv=conv,
k_size=kernel_size,
act=activation,
norm=normalization,
dropout=drop_values[i],
se_block=True,
extra_conv=extra_conv,
)
)
in_channels = feature_maps[i]
# extra (larger) output layer
if larger_io:
kernel_size = (k_size + 2, k_size + 2) if self.ndim == 2 else (k_size + 2, k_size + 2, k_size + 2)
if isotropy[0] is False and self.ndim == 3:
kernel_size = (1, k_size + 2, k_size + 2)
self.conv_out = ConvBlock(
conv=conv,
in_size=feature_maps[0],
out_size=feature_maps[0],
k_size=kernel_size,
act=activation,
norm=normalization,
)
else:
self.conv_out = None
# Super-resolution
self.post_upsampling = None
if len(upsampling_factor) > 1 and upsampling_position == "post":
self.post_upsampling = convtranspose(
feature_maps[0],
feature_maps[0],
kernel_size=upsampling_factor,
stride=upsampling_factor,
)
# Instance segmentation
if output_channels is not None:
if output_channels in ["C", "Dv2"]:
self.last_block = conv(feature_maps[0], 1, kernel_size=1, padding="same")
elif output_channels in ["BC", "BP"]:
self.last_block = conv(feature_maps[0], 2, kernel_size=1, padding="same")
elif output_channels in ["BDv2", "BD"]:
self.last_block = conv(feature_maps[0], 2, kernel_size=1, padding="same")
elif output_channels in ["BCM", "BCD", "BCDv2"]:
self.last_block = conv(feature_maps[0], 3, kernel_size=1, padding="same")
elif output_channels in ["A"]:
self.last_block = conv(feature_maps[0], self.ndim, kernel_size=1, padding="same")
# Other
else:
self.last_block = conv(feature_maps[0], self.n_classes, kernel_size=1, padding="same")
# Multi-head: instances + classification
self.last_class_head = None
if self.multiclass:
self.last_class_head = conv(feature_maps[0], self.n_classes, kernel_size=1, padding="same")
self.apply(self._init_weights)
def forward(self, x) -> torch.Tensor | List[torch.Tensor]:
# Super-resolution
if self.pre_upsampling is not None:
x = self.pre_upsampling(x)
# extra large-kernel input layer
if self.conv_in is not None:
x = self.conv_in(x)
# Down
blocks = []
for i, layers in enumerate(zip(self.down_path, self.mpooling_layers)):
down, pool = layers
x = down(x)
if i != len(self.down_path):
blocks.append(x)
x = pool(x)
x = self.bottleneck(x)
# Up
for i, up in enumerate(self.up_path):
x = up(x, blocks[-i - 1])
# extra large-kernel output layer
if self.conv_out is not None:
x = self.conv_out(x)
# Super-resolution
if self.post_upsampling is not None:
x = self.post_upsampling(x)
class_head_out = torch.empty(())
if self.multiclass and self.last_class_head is not None:
class_head_out = self.last_class_head(x)
x = self.last_block(x)
if self.multiclass:
return [x, class_head_out]
else:
return x
def _init_weights(self, m):
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv3d):
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
|
BiaPyXREPO_NAMEBiaPyPATH_START.@BiaPy_extracted@BiaPy-master@biapy@models@resunet_se.py@.PATH_END.py
|
{
"filename": "setup.py",
"repo_name": "keflavich/plfit",
"repo_path": "plfit_extracted/plfit-master/setup.py",
"type": "Python"
}
|
import sys
if 'build_sphinx' in sys.argv or 'develop' in sys.argv:
from setuptools import setup,Extension
else:
from distutils.core import setup,Extension
import distutils.util
#from scipy_distutils.core import Extension as scipyExtension
#from scipy_distutils.core import setup as scipysetup
from numpy.distutils.core import Extension as numpyExtension
from numpy.distutils.core import setup as numpysetup
#from numpy.distutils.core import build_ext
from numpy.distutils.command import build_src
import Cython
import Cython.Compiler.Main
build_src.Pyrex = Cython
build_src.have_pyrex = True
from Cython.Distutils import build_ext
from Cython.Build import cythonize
import numpy
import os
import shutil
# removed following lines as per http://www.mail-archive.com/numpy-discussion@scipy.org/msg19932.html
# OLD from numpy.distutils.core import setup
# OLD from numpy.distutils.core import Extension
# copied from pyspeckit's...
with open('README.rst') as file:
long_description = file.read()
with open('CHANGES') as file:
long_description += file.read()
with open('REQUIREMENTS') as file:
requirements = file.readlines()
# print "To create cplfit.so (for importing), call command: "
# print "python setup.py build_ext --inplace"
# print "If this fails, make sure c_numpy.pxd is in the path somewhere (e.g. this directory)"
try:
from numpy.distutils.misc_util import get_numpy_include_dirs
numpy_include_dirs = get_numpy_include_dirs()
except AttributeError:
numpy_include_dirs = numpy.get_include()
dirs = list(Cython.__path__)
dirs.extend(numpy_include_dirs)
dirs.append('.')
ext_cplfit = Extension("plfit.cplfit",
["plfit/cplfit.pyx"],
include_dirs=dirs,
extra_compile_args=['-O3'])
#ext_cplfit = cythonize('plfit/cplfit.pyx', include_path=dirs, extra_compile_args=['-O3'])[0]
#ext_fplfit = numpyExtension(name="fplfit",
# sources=["fplfit.f"])
if __name__=="__main__":
# can't specify fcompiler if numpysetup is included
# therefore, run this command separately
# gfortran = OK. g77, g95 NOT ok
# also, this is kind of a ridiculous hack...
if any([x in sys.argv for x in ['build','install','develop']]):
fortran_compile_command = "cd plfit && f2py -c fplfit.f -m fplfit --fcompiler=gfortran && cd .."
os.system(fortran_compile_command)
# do this first so it gets copied (in principle...)
# in practice, see hack cont'd
if os.path.exists('plfit/fplfit.so'):
build_dir = 'build/lib.{0}-{1}.{2}/plfit/'.format(distutils.util.get_platform(),
sys.version_info[0],
sys.version_info[1])
if not os.path.exists(build_dir):
os.makedirs(build_dir)
shutil.copy2('plfit/fplfit.so',build_dir+"/fplfit.so")
S = setup(
name="plfit",
version="1.0.3",
description="Python implementation of Aaron Clauset's power-law distribution fitter",
long_description=long_description,
author="Adam Ginsburg",
author_email="adam.g.ginsburg@gmail.com",
url="https://github.com/keflavich/plfit",
download_url="https://github.com/keflavich/plfit/archive/master.zip",
license="MIT",
platforms=["Linux","MacOS X"],
packages=['plfit','plfit.tests'],
# obsolete package_dir={'plfit':'.'},
install_requires=["numpy","cython"],
ext_modules=cythonize([ext_cplfit]),
cmdclass={'build_ext': build_ext}
)
#numpysetup(name = 'fplfit',
# ext_modules = [ext_fplfit]
# )
#print "I can't get numpy.distutils to compile the fortran. To do it yourself, run some variant of:"
#print 'f2py -c fplfit.f -m fplfit'
# keep an eye on this: http://stackoverflow.com/questions/7932028/setup-py-for-packages-that-depend-on-both-cython-and-f2py
# try:
# os.system('f2py -c fplfit.f -m fplfit')
# except:
# print "Could not build fplfit"
|
keflavichREPO_NAMEplfitPATH_START.@plfit_extracted@plfit-master@setup.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "andreicuceu/vega",
"repo_path": "vega_extracted/vega-master/vega/plots/__init__.py",
"type": "Python"
}
|
andreicuceuREPO_NAMEvegaPATH_START.@vega_extracted@vega-master@vega@plots@__init__.py@.PATH_END.py
|
|
{
"filename": "test_plot_psw.py",
"repo_name": "STBadman/ParkerSolarWind",
"repo_path": "ParkerSolarWind_extracted/ParkerSolarWind-main/parkersolarwind/tests/test_plot_psw.py",
"type": "Python"
}
|
import parkersolarwind as psw
import plot_parkersolarwind as parkersolarwind
def test_plot_isothermal() :
# Make example isothermal solution
# Check input error handling
# Check fexts error handling
# Check output is fig and axes object (3 columns)
assert False
def test_plot_polytropic() :
# Make example polytropic solution
# Check input error handling
# Check add_iso handling
# Check fexts error handling
# Check output is fig and axes object (3 columns)
assert False
def test_plot_isothermal_layer() :
# Make example isothermal layer solution
# Check input error handling
# Check add_iso handling
# Check fexts error handling
# Check output is fig and axes object (3 columns)
assert False
|
STBadmanREPO_NAMEParkerSolarWindPATH_START.@ParkerSolarWind_extracted@ParkerSolarWind-main@parkersolarwind@tests@test_plot_psw.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/graph_objs/scattersmith/hoverlabel/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._font import Font
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(__name__, [], ["._font.Font"])
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@graph_objs@scattersmith@hoverlabel@__init__.py@.PATH_END.py
|
{
"filename": "pol2noise.py",
"repo_name": "Starlink/starlink",
"repo_path": "starlink_extracted/starlink-master/applications/smurf/scripts/pol2noise.py",
"type": "Python"
}
|
#!/usr/bin/env python3
'''
*+
* Name:
* POL2NOISE
* Purpose:
* Analyse the noise in a POL2 vector catalogue
* Language:
* python (2.7 or 3.*)
* Description:
* This script manipulates the noise estimates contained within a POL2
* vector catalogue. Currently two options are available, selected by
* parameter MODE:
*
* - "Display" - this displays the noise values stored in a specified
* column of the catalogue (e.g. "DQ") and compares them to the noise
* values estimated from the variations of the corresponding value
* column (e.g. "Q"). This allows the noise values ("DQ" etc) to be
* verified.
*
* - "Remodel" - This replaces all the noise values stored in a specified
* catalogue with values based on smoother models of the background and
* source noise. This helps to remove outlying noise estimates that are
* anomalously low or high because of the random nature of the MAPVAR
* errors created by the pol2map script.
* Display Mode:
* The "display" mode compares the error estimates stored in the error
* columns of the catalogue (e.g. the values in the DI column) with
* estimates of the errors derived from the value columns (e.g. the
* I column). This comparison is limited to regions where the true
* astronomical signal can be assumed to be zero (i.e. the background
* regions). This mode can only be used with the intensity-like values
* in the catalogue (e.g. I, Q, U and PI).
*
* Two different methods are used for comparing the noise values.
* Ideally they should both show that the error columns stored in the
* catalogue accurately describe the noise in the data columns.
* However, in reality they will both give reports that depart from
* this ideal by differing amounts. Results for both methods are
* displayed on the graphics device specified by parameter DEVICE.
* The results from "Method 1" are displayed in the lower row of three
* pictures on the graphics device, and the results from "Method 2"
* are displayed in the upper row of three pictures. For convenience,
* the scatter plot produced by method 1 (top right picture) is
* overlayed in blue on top of the scatter plot produced by method 1
* (lower right plot).
*
* Method 1:
*
* Firstly, a mask is created that is later used to identify background
* regions. This is based on the total intensity, I, regardless of the
* column being checked, since I is usually much brighter than Q, U or
* PI. The values from catalogue columns "I" and "DI" are extracted into
* a pair of 2-dimensional maps. A basic SNR map is then formed (I/DI)
* and the FellWalker algorithm within the Starlink CUPID package (see
* SUN/255) is used to identify contiguous clumps of pixels with SNR
* higher than 5 and to extend each such clump down to an SNR of 2.
* Next, the values from the requested catalogue columns, "<X>" and "D<X>",
* are extracted into a pair of 2-dimensional maps and masked to remove
* source regions (i.e. the clumps of high SNR identified by FellWalker).
*
* The full range of "D<X>" values in the remaining background is divided
* into a set of bins, and each "<X>" value is then placed into a bin on
* the basis of its corresponding "D<X>" value. The "<X>" values in each
* bin should in principle have a mean value of zero since they are
* all background pixels. The standard deviation of the "<X>" values
* in each bin is found and plotted against the "D<X>" value associated
* with the bin (which varies across the map, being larger nearer the
* edges). Ideally the resulting best fit line should have a slope of
* unity and an offset of zero, indicating that the noise estimate
* "D<X>" associated with each bin is a good measure of the standard
* deviation of the "<X>" values in the bin.
*
* The masked "<X>" and "D<X>" maps are displayed using a common scaling
* on the graphics device specified by parameter DEVICE. A scatter plot
* showing the standard deviation of the "<X>" values in each bin on
* the vertical axis and the RMS "D<X>" value in each bin on the
* horizontal axis is also displayed. The slope and offset of the best
* fitting straight line are displayed on standard output, together with
* the RMS residual of the fit. The upper data limit included in the
* scatter plot is displayed as a red contour on the two map.
*
* The "D<X>" map may optionally be smoothed using a Gaussian kernel
* before being used - see parameter PRESMOOTH.
*
* The size of each "D<X>" bin and the data included in the scatter
* plot can make a significant difference to the final slope and
* offset. The first bin (lowest D<X>) is centred on the peak of the
* D<X> histogram. This histogram is usually heavily skewed with a very
* rapid rise at low D<X> values followed by a long tail to higher D<X>
* values. The bin width is 1.5 times the FWHM of the histogram peak,
* as determined solely from the D<X> values below the peak. All bins
* have equal width, and the highest bin includes the D<X> value
* corresponding to the value of parameter PERC. Any points below the
* first bin or above the last bin are excluded from the scatter plot.
* This binning scheme aims to reduce statistical bias at the low D<X>
* end, which tends to cause the lowest D<X> points in the scatter plot
* to be slightly higher than they should be. This bias is caused by
* there being few points at lower D<X> to balance those with higher
* D<X> value.
*
* Method 2:
*
* Firstly, the values from catalogue columns "I" and "DI" are
* extracted into a pair of 2-dimensional maps. A basic SNR map is
* then formed (I/DI) and significant spatial structures are
* identified and blanked out using the KAPPA:FFCLEAN command on the
* SNR map. The SNR map is used here, instead of simply using "I", in
* order to flatten the noise level across the map, which helps FFLCEAN.
* Each blanked out region in this mask (i.e. each source area) is then
* enlarged slightly to remove any remaining nearby low-level source
* pixels.
*
* Next, the values from catalogue columns "<X>" and "D<X>" are
* extracted into a pair of 2-dimensional maps and masked (using the
* mask described above) to remove source regions.
*
* The first noise estimate measures the spatial variation in pixel
* value in the neighbourhood of each pixel in the masked "<X>"
* map. It is formed by first squaring the masked "<X>" map, then
* smoothing the squared map using a Gaussian smoothing kernel, then
* taking the square root of the smoothed map. Thus each pixel value
* in the final map represents the RMS of the nearby pixels in masked
* "<X>" map. The FWHM of the Gaussian smoothing kernel is chosen in
* order to maximise the correlation between the first and second
* estimates of the noise.
*
* The "D<X>" map, which holds the second noise estimate, may optionally
* be smoothed using a Gaussian kernel before being used - see parameter
* PRESMOOTH.
*
* The maps holding the two masked noise estimates are displayed
* using a common scaling on the graphics device specified by
* parameter DEVICE. A scatter plot of the values in these two maps
* is also displayed. The slope and offset of the best fitting
* straight line, based on the visible points in the scatter plot, are
* displayed on standard output, together with the RMS residual of the
* fit. The upper data limits to be included in the scatter plot can
* be controlled by parameter PERC, and are displayed as red contours
* on the two maps.
* Remodel Mode:
* The "remodel" mode creates an output catalogue holding a copy of the
* input catalogue, and then calculates new values for all the error
* columns in the output catalogue. The new I, Q and U error values
* are first derived from a three component model of the noise in each
* quantity:
*
* The "background component" is derived from the exposure time map
* (obtained using parameter EXPTIME). The background component is equal
* to "A*(exptime^B)" where A and B are constants determined by doing a
* linear fit between the log of the noise estimate in the catalogue (DQ,
* DU or DI) and the log of the exposure time (in practice, B is usually
* close to -0.5). The fit is limited to background areas in the signal
* map, but also excludes a thin rim around the edge of the map where
* the original noise estimates are subject to large inaccuracies. Since
* the exposure time map is usually very much smoother than the original
* noise estimates, the background component is also much smoother.
*
* The "source component" represents the extra noise found in and around
* compact sources and caused by pointing errors, calibration errors,
* etc. The background component is first subtracted from the catalogue
* noise estimates and the residual noise values are then modelled using
* a collection of Gaussians. This modeling is done using the GaussClumps
* algorithm provided by the findclumps command in the Starlink CUPID
* package. The noise residuals are first divided into a number of
* "islands", each island being a collection of contiguous pixels with
* noise residual significantly higher than zero (this is done using
* the FellWalker algorithm in CUPID). The GaussClumps algorithm is
* then used to model the noise residuals in each island. The resulting
* model is smoothed lightly using a Gaussian kernel of FWHM 1.2 pixels.
*
* The "residual component" represents any noise not accounted for
* by the other two models. The noise residuals are first found by
* subtracting the other two components from the original catalogue
* noise estimates. Any strong outlier values are removed and the
* results are smoothed more heavily using a Gaussian kernel of FWHM
* 4 pixels.
*
* The final model is the sum of the above three components. The new
* DI, DQ and DU values are found independently using the above method.
* The errors for the derived quantities (DPI, DP and DANG) are then
* found from DQ, DU and DI using the usual error popagation formulae.
* Finally new P and PI values are found using a specified form of
* de-biasing (see parameter DEBIASTYPE).
*
* The results of the re-modelling are displayed on the graphics
* device specified by parameter DEVICE. A row of four pictures is
* created for each Stokes parametyer (I, Q and U). From left to
* right, these are:
*
* - An image of the original error estimates in the supplied catalogue.
* - An image of the re-modelled error estimates in the output catalogue.
* - An image of the residuals between original and re-modelled error
* estimates.
* - A scatter plot of re-modelled against original error estimates.
*
* The images of the original and re-modelled error estimates use the
* same scaling. The image of the residuals is scaled between the 2nd
* and 98th percentiles.
* Usage:
* pol2noise cat column [mode] [perc] [presmooth] [style] [device]
* ADAM Parameters:
* CAT = LITERAL (Read)
* The input vector catalogue. This should have been created by
* POL2MAP.
* COLUMN = LITERAL (Read)
* The name of the catalogue column to be used if parameter MODE is
* "Display". Both the named column and the associated error column
* ("<X>" and "D<X>") must exist in the catalogue. The name must be
* one of "Q", "U", "I" or "PI".
* DEBIASTYPE = LOGICAL (Given)
* Gives the type of bias estimator to use if paremeter MODE is
* "Remodel", using the nomeclature of Montier at al "Polarization
* measurements analysis II. Best estimators of polarization fraction
* and angle" (A&A, 2018):
* - "AS": The asymptotic estimator. See section 2.3 of Montier
* et al. This estimator produces bad P and PI values if the
* squared PI value is less than the variance in PI.
* - "MAS": The modified asymptotic estimator. See section 2.5 of
* Montier et al. This estimator does not produces bad P and PI
* values, even if the squared PI value is less than the
* variance in PI.
* - "None": No de-biasing.
* DEVICE = DEVICE (Read)
* The graphics workstation to use. The default is the current
* graphics device as previously set using KAPPA:GDSET. If GDSET
* has not been used to establish a current graphics device, the
* user is prompted for a device. Use KAPPA:GDNAMES to see a list
* of available device names. []
* EXPTIME = NDF (Read)
* An NDF that contains an exposure time map for the data in the
* supplied catalogue. Only used if parameter MODE is "Remodel". For
* instance, the "iext", "qext" or "uext" map that was created by
* POL2MAP at the same time as the catalogue could be supplied.
* GLEVEL = LITERAL (Read)
* Controls the level of information to write to a text log file.
* Allowed values are as for "ILEVEL". The log file to create is
* specified via parameter "LOGFILE. Note, the default value is
* "NONE", which caused no log file to be created. Setting this
* parameter to another value (e.g. "ATASK") causes the log file to
* be produced. ["NONE"]
* ILEVEL = LITERAL (Read)
* Controls the level of information displayed on the screen by the
* script. It can take any of the following values (note, these values
* are purposefully different to the SUN/104 values to avoid confusion
* in their effects):
*
* - "NONE": No screen output is created
*
* - "CRITICAL": Only critical messages are displayed such as warnings.
*
* - "PROGRESS": Extra messages indicating script progress are also
* displayed.
*
* - "ATASK": Extra messages are also displayed describing each atask
* invocation. Lines starting with ">>>" indicate the command name
* and parameter values, and subsequent lines hold the screen output
* generated by the command.
*
* - "DEBUG": Extra messages are also displayed containing unspecified
* debugging information.
*
* ["PROGRESS"]
* LOGFILE = LITERAL (Read)
* The name of the log file to create if GLEVEL is not NONE (which
* it is by default). The default log file name is "pol2noise.log"
* and is created in the current directory. Any file with the same
* name is over-written. Any old log file will be closed before the
* new one is opened. []
* MODE = LITERAL (Read)
* The operation to be performed on the input catalogue specified
* by parameter CAT:
*
* - "DISPLAY": Verify the noise estimates on a single quantity
* by comparing them to the local variations of the quantity. See
* parameters COLUMN, DEVICE, PER, PRESMOOTH.
*
* - "REMODEL": Replace the noise estimates in the catalogue using
* a smoother model. See parameters OUT, EXPTIME, DEBIASTYPE.
*
* ["Display"]
* MSG_FILTER = LITERAL (Read)
* Controls the default level of information reported by Starlink
* atasks invoked within the executing script. The accepted values
* are the list defined in SUN/104 ("None", "Quiet", "Normal",
* "Verbose", etc). ["Normal"]
* OUT = LITERAL (Read)
* The output FITS vector catalogue. Only used if parameter MODE is
* "Remodel".
* PERC = _REAL (Read)
* The percentile corresponding to the highest "D<X>" value to include
* in the scatter plot. Only used if parameter MODE is "Display". In
* the range 20 to 100. A value below 100 causes the edge pixels, which
* usually have very high variances, to be excluded from the plot. A
* red contour is displayed over the "D<X>" map indicating the noise
* level corresponding to the value of PERC. [90]
* PRESMOOTH = _REAL (Read)
* Controls initial smoothing of the "D<X>" map in "Method 2". If a
* value is supplied for PRESMOOTH, then the "D<X>" map read from the
* catalogue is first smoothed using a Gaussian kernel before being
* used. The value of PRESMOOTH gives the FWHM of the Gaussian
* kernel, in pixels. If a null (!) value is supplied for PRESMOOTH
* (which is the default value), the "D<X>" values read from the
* catalogue are used directly as the second noise estimate, without
* any smoothing. [!]
* RETAIN = _LOGICAL (Read)
* Should the temporary directory containing the intermediate files
* created by this script be retained? If not, it will be deleted
* before the script exits. If retained, a message will be
* displayed at the end specifying the path to the directory. [FALSE]
* STYLE = LITERAL (Read)
* A group of attribute settings describing the plotting style to
* use for the annotated axes, etc.
*
* A comma-separated list of strings should be given in which each
* string is either an attribute setting, or the name of a text
* file preceded by an up-arrow character "^". Such text files
* should contain further comma-separated lists which will be read
* and interpreted in the same manner. Attribute settings are
* applied in the order in which they occur within the list, with
* later settings overriding any earlier settings given for the
* same attribute.
*
* Each individual attribute setting should be of the form:
*
* <name>=<value>
*
* where <name> is the name of a plotting attribute, and <value>
* is the value to assign to the attribute. Default values will
* be used for any unspecified attributes. All attributes will be
* defaulted if a null value (!)---the initial default---is
* supplied.
*
* See section "Plotting Attributes" in SUN/95 for a description
* of the available attributes. Any unrecognised attributes are
* ignored (no error is reported). The appearance of the markers in
* the scatter plot is controlled by the attributes "Colour(Markers)",
* "Width(Markers)", etc. Likewise the appearance of the best fit
* line (and the contour lines) is controlled using "Colour(Curves)",
* "Width(Curves)", etc. [current value]
* Copyright:
* Copyright (C) 2020 East Asian Observatory
* All Rights Reserved.
* Licence:
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either Version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
* Authors:
* DSB: David S. Berry (EAO)
* {enter_new_authors_here}
* History:
* 25-MAR-2020 (DSB):
* Original version.
* 3-APR-2020 (DSB):
* Add parameter STYLE.
* 1-MAY-2020 (DSB):
* Completely re-wrote the DISPLAY algorithm, based on the new
* KAPPA:PIXBIN command.
* 29-MAY-2020 (DSB):
* - Added "Re-modelling" mode.
* - Changed "Display" mode to use two different independent methods.
*-
'''
import numpy as np
import math
import starutil
from starutil import invoke
from starutil import NDG
from starutil import Parameter
from starutil import ParSys
from starutil import msg_out
from starutil import get_task_par
# Assume for the moment that we will not be retaining temporary files.
retain = 0
# Do not create a log file by default. Setting parameter GLEVEL=ATASK
# will cause a logfile to be produced.
starutil.glevel = starutil.NONE
# A function to clean up before exiting. Delete all temporary NDFs etc,
# unless the script's RETAIN parameter indicates that they are to be
# retained. Also delete the script's temporary ADAM directory.
def cleanup():
global retain
if retain:
msg_out( "Retaining temporary files in {0}".format(NDG.tempdir))
else:
NDG.cleanup()
ParSys.cleanup()
# Catch any exception so that we can always clean up, even if control-C
# is pressed.
try:
# Declare the script parameters. Their positions in this list define
# their expected position on the script command line. They can also be
# specified by keyword on the command line. No validation of default
# values or values supplied on the command line is performed until the
# parameter value is first accessed within the script, at which time the
# user is prompted for a value if necessary. The parameters "MSG_FILTER",
# "ILEVEL", "GLEVEL" and "LOGFILE" are added automatically by the ParSys
# constructor.
params = []
params.append(starutil.Par0S("CAT", "Input vector catalogue" ))
params.append(starutil.Par0S("COLUMN", "Catalogue column" ))
params.append(starutil.ParChoice("MODE", ("DISPLAY","REMODEL"),
"Operation to perform", "DISPLAY",
noprompt=True ))
params.append(starutil.Par0F("PERC", "Upper percentile for scatter plot",
default=90.0, noprompt=True, maxval=100.0,
minval=20 ))
params.append(starutil.Par0F("PRESMOOTH", "FWHM (pixels) for pre-smoothing",
default=None, noprompt=True ))
params.append(starutil.ParGrp("STYLE", "Graphics style parameters",
"def", noprompt=True))
params.append(starutil.Par0S("DEVICE", "Input vector catalogue",
default=None, noprompt=True ))
params.append(starutil.Par0S("OUT", "The output FITS vector catalogue"))
params.append(starutil.ParNDG("EXPTIME", "An NDF containing an exposure "
"time map" ))
params.append(starutil.ParChoice("DEBIASTYPE", ("AS","MAS","NONE"),
"Bias estimator to be used" ))
params.append(starutil.Par0L("RETAIN", "Retain temporary files?", False,
noprompt=True))
# Initialise the parameters to hold any values supplied on the command
# line.
parsys = ParSys( params )
# Get the input catalogue
cat = parsys["CAT"].value
# Get the operation to perform.
mode = parsys["MODE"].value
# See if temp files are to be retained.
retain = parsys["RETAIN"].value
# Get the current graphics device.
device = get_task_par( "graphics_device", "GLOBAL", default=None )
# If it is defined, use it as the default for the DEVICE parameter, and
# indicate the user should not be prompted. Otherwise, set "xw" as the
# default and indicate the user should be prompted.
if device is not None:
parsys["DEVICE"].default = device
parsys["DEVICE"].noprompt = True
else:
parsys["DEVICE"].default = "xw"
parsys["DEVICE"].noprompt = False
# Get the graphics device to use.
device = parsys["DEVICE"].value
# The graphics style.
style = parsys["STYLE"].value
# First deal with "display" mode.
# -------------------------------
if mode == "DISPLAY":
# Get the column name.
col = parsys["COLUMN"].value.upper()
# if col not in ["Q","U","I","PI"]:
# raise starutil.InvalidParameterError("\nCannot use column '{0}' - "
# "must be one of Q, U, I or PI".format(col))
# Get the upper percentile for the "D<X>" values to include.
perc = parsys["PERC"].value
# Get the FWHM to use for any pre-smoothing to be applied to the "D<X>"
# values read from the catalogue.
presmooth = parsys["PRESMOOTH"].value
# Get maps of the requested column value and its error from the supplied
# catalogue.
msg_out( "\nExtracting columns {0} and D{0} from {1}...".format(col,cat) )
vcat = NDG( 1 )
invoke( "$POLPACK_DIR/polimage in={0} out={1} coldat={2} box=1".
format(cat,vcat,col) )
dvcat = NDG( 1 )
invoke( "$POLPACK_DIR/polimage in={0} out={1} coldat=D{2} box=1".
format(cat,dvcat,col) )
# Get maps of the I and DI column values from the supplied catalogue.
if col != "I":
msg_out( "Extracting columns I and DI from {0}...".format(cat) )
icat = NDG( 1 )
invoke( "$POLPACK_DIR/polimage in={0} out={1} coldat=I box=1".
format(cat,icat) )
dicat = NDG( 1 )
invoke( "$POLPACK_DIR/polimage in={0} out={1} coldat=DI box=1".
format(cat,dicat) )
else:
icat = vcat
dicat = dvcat
# Warn the user if it looks like this is an old catalogue with no
# negative I values.
invoke( "$KAPPA_DIR/stats ndf={0}".format(icat) )
mini = float( get_task_par( "minimum", "stats" ) )
if mini >= 0.0:
msg_out( "\nWARNING: it looks like the supplied catalogue may "
"have been created using an old (i.e. before 2nd April "
"2020) version of Starlink, causing there to be many "
"holes in the background regions of the total intensity "
"map. This may cause the results of this script to be "
"unreliable.\n" )
# Get the data units string and pixel size (in arc-sec).
invoke( "$KAPPA_DIR/ndftrace ndf={0}".format(vcat) )
units = get_task_par( "units", "ndftrace" )
pixsize = float( get_task_par( "fpixscale(1)", "ndftrace" ) )
# Form the basic total intensity SNR map to use when creating the mask.
msg_out( "Masking the {0} and D{0} maps...".format(col) )
snr = NDG( 1 )
invoke( "$KAPPA_DIR/div in1={0} in2={1} out={2}".format(icat,dicat,snr) )
# Omit everything brighter than +/- 3 sigma.
snr2 = NDG( 1 )
invoke( "$KAPPA_DIR/thresh in={0} out={1} thrlo=-3 newlo=bad thrhi=3 "
"newhi=bad".format(snr,snr2) )
# Get the clipped standard deviation of whatever is left.
invoke( "$KAPPA_DIR/stats ndf={0} clip=\[2,2,2\]".format(snr2) )
sigma = float( get_task_par( "sigma", "stats" ) )
# Use FellWalker to find islands of significant emission in the total
# intensity SNR map. Each island must contain one or more pixels with
# SNR above 5 and contains all neighbouring pixels down to an SNR of 2.
conf_fw = NDG.tempfile()
fd = open( conf_fw, "w" )
fd.write("FellWalker.FlatSlope=0\n")
fd.write("FellWalker.MinDip=1.0E30\n")
fd.write("FellWalker.Noise=2*RMS\n")
fd.write("FellWalker.Fwhmbeam={0}\n".format(15/pixsize))
fd.write("FellWalker.MinPix={0}\n".format(round(4*((12/pixsize)**2))))
fd.write("FellWalker.MaxJump=1\n")
fd.write("FellWalker.MaxBad=0.1\n")
fd.write("FellWalker.MinHeight=5*RMS\n")
fd.close()
islands = NDG( 1 )
invoke("$CUPID_DIR/findclumps in={0} out={1} method=fellwalker "
"rms={2} outcat=! config=^{3}".
format( snr, islands, sigma, conf_fw ))
# No masking if no clumps were found.
nclumps = int( get_task_par( "nclumps", "findclumps" ) )
if nclumps == 0:
back = vcat
temp2a = dvcat
# Otherwise use the inverted clump mask to mask out source regions in the two
# catalogue maps.
else:
back = NDG( 1 )
invoke( "$KAPPA_DIR/copybad in={0} ref={1} out={2} invert=yes".format(vcat,islands,back) )
temp2a = NDG( 1 )
invoke( "$KAPPA_DIR/copybad in={0} ref={1} out={2} invert=yes".format(dvcat,islands,temp2a) )
# If required, smooth the map holding the second error estimate (i.e.
# the "D<X>" values read from the catalogue).
if presmooth is not None:
msg_out( "Pre-smoothing the D{0} map using a Gaussian with FWHM={1} pixels".
format( col, presmooth ) )
temp2b = NDG( 1 )
invoke( "$KAPPA_DIR/mult in1={0} in2={0} out={1}".format(temp2a,temp2b) )
temp2c = NDG( 1 )
invoke( "$KAPPA_DIR/gausmooth in={0} out={1} fwhm={2}".
format(temp2b,temp2c,presmooth) )
temp2 = NDG( 1 )
invoke( "$KAPPA_DIR/maths exp=\"'sqrt(ia)'\" ia={0} out={1}".
format(temp2c,temp2) )
else:
temp2 = temp2a
# The size and nature of D<X> bins we use are important. Since there
# is random noise on the D<X> values, the very lowest D<X> values
# will constitute the extreme wing of the distribution centred on the
# lowest "true" D<X> value. Therefore you would expect the corresponding
# estimate measured from the spread of <X> values to be greater than the
# corresponding D<X> value. So if the D<X> bin width is very small the
# very lowest bins will be biased (the <X>-based estimate being biased
# upwards from the D<X> value). To avoid this bias at small D<X>, the
# bin width should be at least equal to the uncertainty in the D<X> value.
# The only way we have to get an estimate of this is to look at the
# width of the peak of the D<X> distribution. So find the peak position
# (the mode of the D<X> histogram) and then get the 32nd percentile of
# the data values below the mode - the difference between them should be
# about 1 standard deviation (i.e. about 0.42 of the peak FWHM).
msg_out( "Binning the {0} and D{0} maps...".format(col) )
invoke( "$KAPPA_DIR/histat ndf={0} percentiles={1} numbin=1000 "
"method=moments".format(temp2,perc) )
mode = float( get_task_par( "mode", "histat" ) )
percval = float( get_task_par( "perval(1)", "histat" ) )
temp5 = NDG( 1 )
invoke( "$KAPPA_DIR/thresh in={0} out={1} thrlo=-1E10 newlo=bad "
"thrhi={2} newhi=bad".format(temp2,temp5,mode) )
invoke( "$KAPPA_DIR/histat ndf={0} percentiles=32".format(temp5) )
pval = float( get_task_par( "perval(1)", "histat" ) )
fwhm = 2.3548*( mode - pval )
binwidth = 0.25*fwhm
# All D<X> bins have the same width (given by binwidth). The lowest bin is centred
# on the mode. The number of bins is determined by the D<X> value corresponding
# to parameter PERC. First get the bin number corresponding to the "perc"
# percentile. Points that have D<X> value higher than this value are ignored.
# Note bin numbers/indices are zero-based.
topbin = round( (percval - mode)/binwidth )
# Get the number of bins
nbin = topbin + 1
# Create an NDF that holds the integer bin number assigned to each map
# pixel.
index0 = NDG( 1 )
invoke( "$KAPPA_DIR/maths exp=\"'nint((ia-pa)/pb)'\" ia={0} "
"out={1} pa={2} pb={3}".format(temp2,index0,mode,binwidth) )
invoke( "$KAPPA_DIR/settype ndf={0} type=_integer".format(index0) )
# Threshold this to assign bad bin numbers to bins below index 0 (we use
# this image later as a mask for the displayed maps).
index1 = NDG( 1 )
invoke( "$KAPPA_DIR/thresh in={0} out={1} thrlo=0 newlo=bad "
"thrhi=1E8 newhi=bad".format( index0, index1 ) )
# Threshold it again to assign bad bin numbers to bins above index nbin.
index = NDG( 1 )
invoke( "$KAPPA_DIR/thresh in={0} out={1} thrlo=0 newlo=bad "
"thrhi={2} newhi=bad".format( index1, index, topbin ) )
# Collect the "<X>" values in each bin. The "<X>" values in each bin are
# stored in a single column of the 2D output NDF. The first pixel axis
# in the output NDF corresponds to bin number and goes from 1 to "nbin".
xbin = NDG( 1 )
invoke( "$KAPPA_DIR/pixbin in={0} out={1} index={2}".format(back,xbin,index))
# Collapse the columns to get the sigma-clipped standard deviation of the
# "<X>" values in each bin, and mask it.
xsigma = NDG( 1 )
invoke( "$KAPPA_DIR/collapse in={0} out={1} axis=2 estimator=csigma "
"wlim=0".format(xbin,xsigma) )
# Likewise collect the "D<X>" values in each bin.
dxbin = NDG( 1 )
invoke( "$KAPPA_DIR/pixbin in={0} out={1} index={2}".format(temp2,dxbin,index))
# Collapse the columns to get the RMS of the "D<X>" values in each bin,
# and mask it.
dxrms = NDG( 1 )
invoke( "$KAPPA_DIR/collapse in={0} out={1} axis=2 estimator=rms "
"wlim=0".format(dxbin,dxrms) )
# Mask the images to be displayed to remove pixels below the lowest bin.
temp2_disp = NDG( 1 )
invoke( "$KAPPA_DIR/copybad in={0} out={1} ref={2}".
format(temp2,temp2_disp,index1))
back_disp = NDG( 1 )
invoke( "$KAPPA_DIR/copybad in={0} out={1} ref={2}".
format(back,back_disp,index1))
# Create a basic graphics style file to use when displaying images with
# the kappa display command. We do not need to see the RA and Dec values
# round the edges.
msg_out( "Plotting...".format(col) )
stylefile = NDG.tempfile()
with open( stylefile, "w" ) as fd:
fd.write( "NumLab=0\n" )
fd.write( "TextLab=0\n" )
fd.write( "MajTickLen=0\n" )
fd.write( "MinTickLen=0\n" )
# Include any user-supplied style first.
if style and style != "def":
fd.write( "{0}\n".format(style) )
# Set the graphics device, clear it and divide its area into three pictures.
invoke( "$KAPPA_DIR/gdset device={0}".format(device), buffer=True )
invoke( "$KAPPA_DIR/gdclear", buffer=True )
invoke( "$KAPPA_DIR/picdef mode=a xpic=3 ypic=2 prefix=a outline=no",
buffer=True )
# Display the "D<X>" map in the second picture.
invoke( "$KAPPA_DIR/picsel label=a2", buffer=True )
with open( stylefile, "a" ) as fd:
fd.write( "title=Background D{0} values\n".format(col) )
invoke( "$KAPPA_DIR/display in={0} mode=perc percentiles=\[1,85\] "
"style=^{1} badcol=blue4 margin=0.1".format(temp2_disp,stylefile), buffer=True )
# The perc value is the upper limit of D<X>. Draw a contour at that level.
stylefile2 = NDG.tempfile()
with open( stylefile2, "w" ) as fd:
fd.write("colour(curve)=red\n" )
if style and style != "def":
fd.write( "{0}\n".format(style) )
invoke( "$KAPPA_DIR/contour ndf={0} clear=no key=no mode=free heights={1} "
"style=^{2}".format(temp2,percval,stylefile2))
# Display the <X> map in the first picture.
invoke( "$KAPPA_DIR/picsel label=a1", buffer=True )
with open( stylefile, "a" ) as fd:
fd.write( "title=Background {0} values\n".format(col) )
invoke( "$KAPPA_DIR/display in={0} mode=cur style=^{1} margin=0.1 "
"badcol=blue4".format(back_disp,stylefile), buffer=True )
# Draw the perc value contour.
invoke( "$KAPPA_DIR/contour ndf={0} clear=no key=no mode=free heights={1} "
"style=^{2}".format(temp2,percval,stylefile2))
# Display the scatter plot in the third picture. To avoid a tall thin
# plot, create a square picture inside the third picture.
invoke( "$KAPPA_DIR/picsel label=a3", buffer=True )
invoke( "$KAPPA_DIR/picdef mode=cc current=yes aspect=1 outline=no", buffer=True )
stylefile = NDG.tempfile()
with open( stylefile, "w" ) as fd:
fd.write("colour(curve)=red\n" )
if style and style != "def":
fd.write( "{0}\n".format(style) )
fd.write("Label(1)=Std. devn. of {0} values in each bin ({1})\n".format(col,units))
fd.write("Label(2)=RMS D{0} value in each bin ({1})\n".format(col,units))
fd.write("Title=Errors in {0}\n".format(col) )
invoke( "$KAPPA_DIR/scatter in1={0} in2={1} fit=yes xleft=0 ybot=0 "
"perc1=\[0,100\] perc2=\[0,100\] style=^{2}".
format( xsigma, dxrms, stylefile ), buffer=True )
slope = float( get_task_par( "slope", "scatter" ) )
offset = float( get_task_par( "offset", "scatter" ) )
rms = float( get_task_par( "rms", "scatter" ) )
msg_out("\nLinear fit to scatter plot:")
if offset >= 0:
msg_out(" Y = {0:.2f}*X + {1:.2f}".format(slope,offset) )
else:
msg_out(" Y = {0:.2f}*X - {1:.2f}".format(slope,-offset) )
msg_out(" RMS residual: {0:.2f} {1}\n".format(rms,units))
# ------------------------
# Now do it the other way....
# Get the clipped stats of whatever is left.
invoke( "$KAPPA_DIR/stats ndf={0} clip=\[3,3,3\]".format(snr2) )
# Threshold again, this time at three times the clipped sigma value.
sigma = float( get_task_par( "sigma", "stats" ) )
mask = NDG( 1 )
invoke( "$KAPPA_DIR/thresh in={0} out={1} thrlo={2} newlo=bad thrhi={3} "
"newhi=bad".format(snr,mask,-sigma,sigma) )
# Use this mask to mask out source regions in the two catalogue maps.
back = NDG( 1 )
invoke( "$KAPPA_DIR/copybad in={0} ref={1} out={2}".format(vcat,mask,back) )
temp2a = NDG( 1 )
invoke( "$KAPPA_DIR/copybad in={0} ref={1} out={2}".format(dvcat,mask,temp2a) )
# If required, smooth the map holding the second error estimate (i.e.
# the "D<X>" values read from the catalogue).
if presmooth is not None:
msg_out( "Pre-smoothing the D{0} map using a Gaussian with FWHM={1} pixels".
format( col, presmooth ) )
temp2b = NDG( 1 )
invoke( "$KAPPA_DIR/mult in1={0} in2={0} out={1}".format(temp2a,temp2b) )
temp2c = NDG( 1 )
invoke( "$KAPPA_DIR/gausmooth in={0} out={1} fwhm={2}".
format(temp2b,temp2c,presmooth) )
temp2 = NDG( 1 )
invoke( "$KAPPA_DIR/maths exp=\"'sqrt(ia)'\" ia={0} out={1}".
format(temp2c,temp2) )
else:
temp2 = temp2a
# Determine the smoothing width to use when forming the first error
# estimate from the local data variations. We use the smoothing width
# that gives the lowest RMS residual to the fit between the two noise
# estimates.
msg_out( "Determining smoothing width (FWHM) that gives best fit...\n" )
msg_out( "# FWHM (pixel) RMS ({0})".format(units) )
# First square the masked data values (needed for each fwhm).
temp3 = NDG( 1 )
invoke( "$KAPPA_DIR/mult in1={0} in2={0} out={1}".format(back,temp3) )
# Produce an image that is zero everywhere except for one central pixel,
# which has value 1.0
temp33 = NDG( 1 )
invoke( "$KAPPA_DIR/creframe like={0} mode=fl mean=0 out={1}".format(back,temp33) )
temp33a = NDG( 1 )
invoke( "$KAPPA_DIR/chpix in={0} out={1} section=\"'~1,~1'\" newval=1".format(temp33,temp33a) )
# Find the RMS using a set of different FWHM values. Remember the FWHM
# that gives the minimum RMS.
rms_list = []
fwhm_list= []
minrms = 1E30
iter = 0
nup = 0
for fwhm in range( 0, 21 ):
iter += 1
# Smooth the squared data values with the current fwhm. Then take the
# square root to get an RMS map. A correction factor is applied because
# of the bias caused by finding the RMS of a very low number of values
# ("v2" is the sum of the squared wieghts used to find the weighted mean of
# the squared values).
temp4 = NDG( 1 )
if fwhm > 0:
invoke( "$KAPPA_DIR/gausmooth in={0} out={1} fwhm={2}".
format(temp3,temp4,fwhm) )
temp4a = NDG( 1 )
invoke( "$KAPPA_DIR/gausmooth in={0} out={1} fwhm={2}".
format(temp33a,temp4a,fwhm) )
temp4b = NDG( 1 )
invoke( "$KAPPA_DIR/mult in1={0} in2={0} out={1}".
format(temp4a,temp4b) )
invoke( "$KAPPA_DIR/stats ndf={0}".format(temp4b))
v2 = float( get_task_par( "total", "stats" ) )
corfac = 1/math.sqrt( 1.0 - v2 )
else:
invoke( "$KAPPA_DIR/ndfcopy in={0} out={1}".format(temp3,temp4) )
corfac = 1.0
temp5 = NDG( 1 )
invoke( "$KAPPA_DIR/maths exp=\"'pa*sqrt(ia)'\" ia={0} out={1} pa={2}".
format(temp4,temp5,corfac) )
# Find the best fit line for the scatter plot of temp5 (the RMS of the local
# variations) against temp2 (the errors read from the catalogue).
invoke( "$KAPPA_DIR/scatter in1={0} in2={1} perc1=\[0,{2}\] "
"perc2=\[0,{2}\] fit=yes device=!".format( temp5, temp2, perc ),
buffer=True )
rms = float( get_task_par( "rms", "scatter" ) )
slope = float( get_task_par( "slope", "scatter" ) )
offset = float( get_task_par( "offset", "scatter" ) )
msg_out(" {0} {1:.2f}".format(fwhm,rms) )
# Form lists of the fwhm and rms values.
rms_list.append( rms )
fwhm_list.append( fwhm )
# Find the fwhm with the smallest RMS. Leave the loop when the RMS has
# increased 4 times following a minimum.
if fwhm == 0.0:
pass
elif rms < minrms:
minrms = rms
minfwhm = fwhm
nup = 0
elif nup > 3:
break
else:
nup += 1
# Fit a quadratic to three points centred on the lowest RMS, and find
# the FWHM at the maximum.
fwhm = None
if minfwhm == 0:
fwhm = 0.0
msg_out( "\nNo smoothing produces best fit (FWHM=0)" )
elif minfwhm < 20:
a = np.polyfit( fwhm_list[ minfwhm - 1 : minfwhm + 2 ],
rms_list[ minfwhm - 1 : minfwhm + 2 ], 2 )
b = np.poly1d( a )
if a[0] > 0.0:
fwhm = -0.5*a[1]/a[0]
else:
fwhm = minfwhm
msg_out( "\nBest FWHM is {0:.2f} pixels".format( fwhm ) )
if fwhm is None:
raise starutil.InvalidParameterError("\nCannot determine the best FWHM")
# Form the map of the first error estimate using the optimum fwhm.
temp4 = NDG( 1 )
if fwhm > 0:
invoke( "$KAPPA_DIR/gausmooth in={0} out={1} fwhm={2}".
format(temp3,temp4,fwhm) )
else:
invoke( "$KAPPA_DIR/ndfcopy in={0} out={1}".format(temp3,temp4) )
temp5 = NDG( 1 )
invoke( "$KAPPA_DIR/maths exp=\"'sqrt(ia)'\" ia={0} out={1}".
format(temp4,temp5) )
# Create a basic graphics style file to use when displaying images with
# the kappa display command. We do not need to see the RA and Dec values
# round the edges.
stylefile = NDG.tempfile()
with open( stylefile, "w" ) as fd:
fd.write( "NumLab=0\n" )
fd.write( "TextLab=0\n" )
fd.write( "MajTickLen=0\n" )
fd.write( "MinTickLen=0\n" )
# Include any user-supplied style first.
if style and style != "def":
fd.write( "{0}\n".format(style) )
# Display the first error estimate map in the first picture.
invoke( "$KAPPA_DIR/picsel label=a4", buffer=True )
with open( stylefile, "a" ) as fd:
fd.write( "title=Errors from local variations of {0}\n".format(col) )
invoke( "$KAPPA_DIR/display in={0} mode=perc percentiles=\[2,80\] "
"style=^{1} margin=0.1 badcol=blue4".format(temp5,stylefile), buffer=True )
# Calculate the largest temp5 (X) value that will be used in the scatter
# plot (controlled by PERC), and draw a contour at that height over the map.
invoke( "$KAPPA_DIR/histat ndf={0} percentiles={1}".format(temp5,perc) )
xright = get_task_par( "perval(1)", "histat" )
invoke( "$KAPPA_DIR/contour ndf={0} clear=no key=no mode=free heights={1} "
"style=\"'colour=red'\"".format(temp5,xright))
# Display the second error estimate map in the second picture.
invoke( "$KAPPA_DIR/picsel label=a5", buffer=True )
with open( stylefile, "a" ) as fd:
fd.write( "title=Errors from D{0} column\n".format(col) )
invoke( "$KAPPA_DIR/display in={0} mode=cur style=^{1} margin=0.1".
format(temp2,stylefile), buffer=True )
# Calculate the largest temp2 (Y) value that will be used in the scatter
# plot (controlled by PERC), and draw a contour at that height over the map.
invoke( "$KAPPA_DIR/histat ndf={0} percentiles={1}".format(temp2,perc) )
ytop = get_task_par( "perval(1)", "histat" )
stylefile = NDG.tempfile()
with open( stylefile, "w" ) as fd:
fd.write("colour(curve)=red\n" )
if style and style != "def":
fd.write( "{0}\n".format(style) )
invoke( "$KAPPA_DIR/contour ndf={0} clear=no key=no mode=free heights={1} "
"style=^{2}".format(temp2,ytop,stylefile))
# Display the scatter plot in the third picture. To avoid a tall thin
# plot, create a square picture inside the third picture.
invoke( "$KAPPA_DIR/picsel label=a6", buffer=True )
invoke( "$KAPPA_DIR/picdef mode=cc current=yes aspect=1 outline=no", buffer=True )
stylefile = NDG.tempfile()
with open( stylefile, "w" ) as fd:
fd.write("colour(curve)=red\n" )
if style and style != "def":
fd.write( "{0}\n".format(style) )
fd.write("Label(1)=Errors from local variations in {0} ({1})\n".format(col,units))
fd.write("Label(2)=Errors from D{0} column ({1})\n".format(col,units))
fd.write("Title=Errors in {0}\n".format(col) )
invoke( "$KAPPA_DIR/scatter in1={0} in2={1} fit=yes xleft=0 ybot=0 "
"xright={2} ytop={3} style=^{4}".
format( temp5, temp2, xright, ytop, stylefile ), buffer=True )
slope = float( get_task_par( "slope", "scatter" ) )
offset = float( get_task_par( "offset", "scatter" ) )
rms = float( get_task_par( "rms", "scatter" ) )
msg_out("\nLinear fit to scatter plot:")
if offset >= 0:
msg_out(" Y = {0:.2f}*X + {1:.2f}".format(slope,offset) )
else:
msg_out(" Y = {0:.2f}*X - {1:.2f}".format(slope,-offset) )
msg_out(" RMS residual: {0:.2f} {1}\n".format(rms,units))
# Overlay the second result on the first result.
invoke( "$KAPPA_DIR/picsel label=a3", buffer=True )
invoke( "$KAPPA_DIR/scatter in1={0} in2={1} fit=yes xleft=0 ybot=0 "
"xright={2} ytop={3} style=\"'^{4},colour(markers)=blue,colour(curves)=green'\" clear=no".
format( temp5, temp2, xright, ytop, stylefile ), buffer=True )
# Now deal with "remodel" mode.
# -----------------------------
else:
# Get the NDF containing the exposure time map. Report an error if the
# exp_time map is not present in the SMURF extension of the NDF.
exptime0 = parsys["EXPTIME"].value
try:
invoke( "$KAPPA_DIR/ndfecho ndf={0}.more.smurf.exp_time".
format(exptime0) )
exptime = None
except starutil.AtaskError:
raise starutil.InvalidParameterError( "\nNo exp_time map found in "
"{0}".format(exptime0))
# Get the output catalogue
outcat = parsys["OUT"].value
# Get the type of de-biasing
debiastype = parsys["DEBIASTYPE"].value
# Set the graphics device, clear it and divide its area into 12 pictures.
invoke( "$KAPPA_DIR/gdset device={0}".format(device), buffer=True )
invoke( "$KAPPA_DIR/gdclear", buffer=True )
invoke( "$KAPPA_DIR/picdef mode=a xpic=4 ypic=3 prefix=a outline=no",
buffer=True )
apic = 0
# Each pass through the following loop generates a new catalogue based
# on the contents of a previous catalogue. Initially, the "previous
# catalogue" is the supplied catalogue.
prevcat = cat
# Remodel the I, Q and U noise estimates in turn.
for iqu in ("I", "Q", "U"):
msg_out( "\nForming new D{0} values...".format(iqu) )
# First form the background component of the remodeled errors
# -----------------------------------------------------------
msg_out( " Forming background component..." )
# Extract the current Stokes parameter and the error on the current Stokes
# parameter from the input catalogue, creating a pair of 2D NDFs.
vcat = NDG( 1 )
invoke( "$POLPACK_DIR/polimage in={0} out={1} coldat={2} box=1".
format( cat, vcat, iqu ) )
dvcat = NDG( 1 )
invoke( "$POLPACK_DIR/polimage in={0} out={1} coldat=D{2} box=1".
format( cat, dvcat, iqu ) )
# Get the data units string and pixel size (in arc-sec).
invoke( "$KAPPA_DIR/ndftrace ndf={0}".format(dvcat) )
units = get_task_par( "units", "ndftrace" )
pixsize = float( get_task_par( "fpixscale(1)", "ndftrace" ) )
# If this is the first Stokes parameter, align the exposure time map
# with the catalogue map (since it may use a different pixel size).
if exptime is None:
exptime = NDG( 1 )
invoke( "$KAPPA_DIR/wcsalign in={0}.more.smurf.exp_time out={1} "
"ref={2} rebin=yes method=bilin accept".
format( exptime0, exptime, vcat ) )
try:
junk = NDG( exptime, "*" )
except starutil.NoNdfError:
raise starutil.InvalidParameterError( "\nCannot align {0} "
"with the supplied catalogue {1}. "
"Are they for the same field?".
format(exptime0,cat))
# Set pixels bad in the exp time map if they are less than 5% of the mean
# exposure time. This clips off a thin rim round the edge of the map.
invoke( "$KAPPA_DIR/stats ndf={0}".format(exptime) )
mean = float( get_task_par( "mean", "stats" ) )
expmask = NDG( 1 )
invoke( "$KAPPA_DIR/thresh in={0} out={1} thrlo={2} newlo=bad "
"thrhi=1E30 newhi=bad".format( exptime, expmask,
0.05*mean ) )
# Form the Stokes parameter SNR map.
snr = NDG( 1 )
invoke( "$KAPPA_DIR/div in1={0} in2={1} out={2}".format( vcat, dvcat,
snr ) )
# We want a reasonable estimate of the standard deviation in the background
# regions of the SNR map. In principle the standard deviation should be
# 1.0, but the noise estimate may be wrong so we need to calculate it from
# the spread of SNR values. Bright sources will bias the answer so first
# remove values about snr=60.
snr_cut = NDG( 1 )
invoke( "$KAPPA_DIR/thresh in={0} out={1} thrlo=-1E10 newlo=bad "
"thrhi=60 newhi=bad".format( snr, snr_cut ) )
# Now get the stats of the remaining values, doing a single 3-sigma clip to
# remove outliers.
invoke( "$KAPPA_DIR/stats ndf={0} clip=3".format(snr_cut) )
sigma = float( get_task_par( "sigma", "stats" ) )
# Now fill holes in the original SNR map (mainly the blanks map corners)
# with the value zero, including believable noise. This helps ffclean to
# work properly round the map edges.
snr_filled = NDG( 1 )
invoke( "$KAPPA_DIR/nomagic in={0} out={1} repval=0 sigma={2}".
format( snr, snr_filled, sigma ) )
# Identify compact features that are significantly higher than the noise
# in the filled SNR map and set them bad.
snr_cleaned = NDG( 1 )
invoke( "$KAPPA_DIR/ffclean in={0} out={1} box=7 clip=\[2,2,2\]".
format( snr_filled, snr_cleaned ) )
# Expand the blank areas a bit (the degree of expansion depends on the
# box and wlim values). This creates a mask that blanks out source
# regions.
mask0 = NDG( 1 )
invoke( "$KAPPA_DIR/block in={0} out={1} box=3 wlim=0.8".
format( snr_cleaned, mask0 ) )
# Combine this mask with the mask that blanks out the bad rim of the map.
mask = NDG( 1 )
invoke( "$KAPPA_DIR/copybad in={0} out={1} ref={2}".
format( mask0, mask, expmask ) )
# Use this mask to remove unusable areas from the Stokes error map.
dvcat_masked = NDG( 1 )
invoke( "$KAPPA_DIR/copybad in={0} out={1} ref={2}".
format( dvcat, dvcat_masked, mask ) )
# Take the log (base 10) of the above masked Stokes error map and the
# exposure time map.
logdvcat = NDG( 1 )
invoke( "$KAPPA_DIR/logar base=10D0 in={0} out={1}".
format( dvcat_masked, logdvcat ) )
logexptime = NDG( 1 )
invoke( "$KAPPA_DIR/logar base=10D0 in={0} out={1}".
format( exptime, logexptime ) )
# Do a least squares linear fit between the log maps created above, and
# get the slope and offset of the fit.
invoke( "$KAPPA_DIR/normalize in1={0} in2={1} out=! "
"pcrange=\[10,90\] device=!".format(logdvcat,logexptime))
slope = float( get_task_par( "slope", "normalize" ) )
offset = float( get_task_par( "offset", "normalize" ) )
# Create the basic background component as a function of the exposure
# time map.
dvcat_model = NDG( 1 )
invoke( "$KAPPA_DIR/maths exp=\"'10**(pa*ia+pb)'\" ia={0} "
"pa={1} pb={2} out={3}".
format(logexptime,slope,offset,dvcat_model))
# There often seems to be a remaining systematic offset between the model
# and the original Stokes error values. Possibly caused by us using a model
# of the form "A*(exptime^B)", which assumes zero offset. So now remove
# any offset by fitting a quadratic surface to the model residuals then
# subtracting the fit from the model.
resid = NDG( 1 )
invoke( "$KAPPA_DIR/sub in1={0} in2={1} out={2}".
format( dvcat_model, dvcat_masked, resid ) )
quad = NDG( 1 )
invoke( "$KAPPA_DIR/surfit in={0} out={1} fittype=poly "
"estimator=median order=2 fitclip=\[1,2,3\]".
format( resid, quad ) )
background_comp = NDG( 1 )
invoke( "$KAPPA_DIR/sub in1={0} in2={1} out={2}".
format( dvcat_model, quad, background_comp ) )
# Now form the source component of the remodeled errors
# -----------------------------------------------------
msg_out( " Forming source component..." )
# Get an NDF section describing the bounds of the whole NDF.
invoke( "$KAPPA_DIR/ndftrace ndf={0}".format(dvcat) )
xlo = get_task_par( "lbound(1)", "ndftrace" )
xhi = get_task_par( "ubound(1)", "ndftrace" )
ylo = get_task_par( "lbound(2)", "ndftrace" )
yhi = get_task_par( "ubound(2)", "ndftrace" )
sec = "{0}:{1},{2}:{3}".format(xlo,xhi,ylo,yhi)
# Get the residual noise values left after subtracting the background
# component from the original noise values, and convert them into
# relative values (i.e. fractions of the background component value).
# Mask using the exposure time mask. This removes a thin rim round the
# edges of the map.
resids = NDG( 1 )
invoke( "$KAPPA_DIR/maths exp=\"'(ia-ib)/ib+0*ic'\" ia={0} "
"ib={1} ic={2} out={3}". format( dvcat, background_comp,
expmask, resids ) )
# Get an estimate of the background noise in the resids map, first
# masking it to remove the source regions.
resids2 = NDG(1)
invoke( "$KAPPA_DIR/copybad in={0} out={1} ref={2}".
format(resids,resids2,dvcat_masked) )
invoke( "$KAPPA_DIR/stats ndf={0} clip=\[2,2,2\]".format(resids2) )
sigma = float( get_task_par( "sigma", "stats" ) )
# Remove all background pixels from the original resids map produced by
# maths above (i.e. before the sources were masked out).
resids3 = NDG(1)
invoke( "$KAPPA_DIR/copybad in={0} out={1} invert=yes ref={2}".
format(resids,resids3,mask0) )
# Use FellWalker to find islands of significant emission in the resids
# map.
conf_fw = NDG.tempfile()
fd = open( conf_fw, "w" )
fd.write("FellWalker.FlatSlope=0\n")
fd.write("FellWalker.MinDip=1.0E30\n")
fd.write("FellWalker.Noise=2*RMS\n")
fd.write("FellWalker.MaxJump=1\n")
fd.write("FellWalker.MaxBad=0.1\n")
fd.write("FellWalker.MinHeight=4*RMS\n")
fd.write("FellWalker.Fwhmbeam={0}\n".format(12/pixsize))
fd.write("FellWalker.MinPix={0}\n".format(round(4*((12/pixsize)**2))))
fd.close()
islands = NDG( 1 )
invoke("$CUPID_DIR/findclumps in={0} out={1} method=fellwalker "
"rms={2} outcat=! config=^{3}".
format( resids3, islands, sigma, conf_fw ))
# See how many islands were found.
nisland = int( get_task_par( "nclumps", "findclumps" ) )
# Create a config file to control the GaussClumps algorithm.
conf_gc = NDG.tempfile()
fd = open( conf_gc, "w" )
fd.write( "GaussClumps.allowedge=1\n" )
fd.write( "GaussClumps.maxbad=1\n" )
fd.write( "GaussClumps.rfctol=0.001\n" )
fd.write( "GaussClumps.sa=2\n" )
fd.write( "GaussClumps.maxfn=200\n" )
fd.write( "GaussClumps.fwhmbeam=0.4\n")
fd.write( "GaussClumps.thresh=0.5\n" )
fd.write( "GaussClumps.modellim=0.1\n" )
fd.close()
# Loop round each island found by fellwalker
totgauss = 0
models = []
for iisland in range(nisland):
# First expand the boundary of the island a little.
island_mask = NDG( 1 )
invoke("$KAPPA_DIR/gausmooth in={0}.more.cupid.clumps\({1}\).model\({2}\) "
"out={3} fwhm=3 wlim=0.1".format( islands, iisland+1, sec,
island_mask ) )
island = NDG( 1 )
invoke("$KAPPA_DIR/copybad in={0} ref={1} out={2}".format( resids,
island_mask, island ) )
# Now use GaussClumps to produce a model of the resids value in the
# expanded island, as the sum of a set of Gaussian blobs. This may fail
# so do it in a try block.
try:
gcmodel = NDG( 1 )
invoke("$CUPID_DIR/findclumps in={0} method=gauss rms={1} "
"outcat=! out={2} config=^{3}".format( island, sigma,
gcmodel, conf_gc ))
# If the model contains at least one Gaussian, ensure the model does not go
# outside the island, and add the model to the list of island models.
ngauss = int( get_task_par( "nclumps", "findclumps" ) )
if ngauss > 0:
totgauss += ngauss
model = NDG( 1 )
invoke("$KAPPA_DIR/copybad in={0} ref={1} out={2}".
format( gcmodel, island, model ))
models.append( model )
# If GaussClumps fails, ignore the island.
except starutil.AtaskError:
pass
# Paste the Gaussian models for all islands into a single NDF.
if len(models) > 0:
msg_out( " ({0} Gaussian(s) used to model {1} source(s))".format(totgauss,nisland) )
if len(models) == 1:
allmodels = models[ 0 ]
else:
allmodels = NDG( 1 )
invoke("$KAPPA_DIR/paste in={0} out={1} transp=yes".
format( NDG( models ), allmodels ))
# Fill missing pixels with zero
allmodels_filled = NDG( 1 )
invoke("$KAPPA_DIR/nomagic in={0} out={1} repval=0".
format( allmodels, allmodels_filled ))
# Apply some light smoothing.
allmodels_smoothed = NDG( 1 )
invoke("$KAPPA_DIR/gausmooth in={0} out={1} fwhm=1.2".
format( allmodels_filled, allmodels_smoothed ))
# Remove the normalisation to get the final source component of the model.
source_comp = NDG( 1 )
invoke( "$KAPPA_DIR/mult in1={0} in2={1} out={2}".
format( allmodels_smoothed, background_comp, source_comp ))
# Fill the source model with zeros if no sources were found.
else:
msg_out( " (no sources found)" )
source_comp = NDG( 1 )
invoke( "$KAPPA_DIR/cmult in={0} scalar=0 out={1}".
format( dvcat, source_comp ))
# Now form the residual component of the remodeled errors
# -----------------------------------------------------
msg_out( " Forming residual component..." )
# Subtract the background and source components from the original Stokes
# error estimatres to get the residuals.
resids = NDG( 1 )
invoke( "$KAPPA_DIR/maths exp=\"'ia-ib-ic'\" ia={0} "
"ib={1} ic={2} out={3}". format( dvcat, background_comp,
source_comp, resids ) )
# Blank out the bad rim round the edge of the map.
resids_masked = NDG( 1 )
invoke("$KAPPA_DIR/copybad in={0} ref={1} out={2}".
format( resids, expmask, resids_masked ))
# Remove outliers.
resids_cleaned = NDG( 1 )
invoke( "$KAPPA_DIR/ffclean in={0} out={1} box=7 clip=\[2,2,2\]".
format( resids_masked, resids_cleaned ) )
# Smooth. This is a slightly heavier smoothing, and wlim is set low so that
# holes are filled in.
residual_comp = NDG( 1 )
invoke("$KAPPA_DIR/gausmooth in={0} out={1} fwhm=4 wlim=1E-6".
format( resids_cleaned, residual_comp ) )
# Now form the total model and store in the catalogue column
# ----------------------------------------------------------
msg_out( " Updating catalogue D{0} column...".format(iqu) )
total_model = NDG( 1 )
invoke( "$KAPPA_DIR/maths exp=\"'max(0,ia+ib+ic)'\" ia={0} "
"ib={1} ic={2} out={3}". format( background_comp, source_comp,
residual_comp, total_model ) )
newcat = NDG.tempfile( ".FIT" )
invoke( "$POLPACK_DIR/poledit in={0} out={1} mode=ChangeColVals "
"ndf={2} col=D{3}".format(prevcat,newcat,total_model,iqu))
# The next pass through the I/Q/U loop should add its new values into the
# catalogue created on this pass.
prevcat = newcat
# Create a basic graphics style file to use when displaying images with
# the kappa display command. We do not need to see the RA and Dec values
# round the edges.
stylefile = NDG.tempfile()
with open( stylefile, "w" ) as fd:
fd.write( "NumLab=0\n" )
fd.write( "TextLab=0\n" )
fd.write( "MajTickLen=0\n" )
fd.write( "MinTickLen=0\n" )
# Include any user-supplied style first.
if style and style != "def":
fd.write( "{0}\n".format(style) )
# Display the original D<X> map in the first picture.
apic += 1
invoke( "$KAPPA_DIR/picsel label=a{0}".format(apic), buffer=True )
with open( stylefile, "a" ) as fd:
fd.write( "title=Original D{0} values\n".format(iqu) )
invoke( "$KAPPA_DIR/display in={0} mode=perc percentiles=\[1,85\] "
"style=^{1} badcol=blue4 margin=0.05".format(dvcat,stylefile), buffer=True )
# Display the re-modelled D<X> map in the second picture.
apic += 1
invoke( "$KAPPA_DIR/picsel label=a{0}".format(apic), buffer=True )
with open( stylefile, "a" ) as fd:
fd.write( "title=Re-modelled D{0} values\n".format(iqu) )
invoke( "$KAPPA_DIR/display in={0} mode=cur style=^{1} badcol=blue4 "
"margin=0.05".format(total_model,stylefile), buffer=True )
# Display the D<X> residual map in the third picture.
diff = NDG(1)
invoke( "$KAPPA_DIR/sub in1={0} in2={1} out={2}".format(dvcat,total_model,diff) )
apic += 1
invoke( "$KAPPA_DIR/picsel label=a{0}".format(apic), buffer=True )
with open( stylefile, "a" ) as fd:
fd.write( "title=D{0} residuals\n".format(iqu) )
invoke( "$KAPPA_DIR/display in={0} mode=cur style=^{1} badcol=blue4 "
"margin=0.05 mode=perc percentiles=\[2,98\]".format(diff,stylefile), buffer=True )
# Display a scatter plot of the before and after D<X> map in the fourth picture.
apic += 1
invoke( "$KAPPA_DIR/picsel label=a{0}".format(apic), buffer=True )
invoke( "$KAPPA_DIR/picdef mode=cc current=yes aspect=1 outline=no", buffer=True )
stylefile = NDG.tempfile()
with open( stylefile, "w" ) as fd:
fd.write("colour(curve)=red\n" )
if style and style != "def":
fd.write( "{0}\n".format(style) )
fd.write("Label(1)=Original D{0} values\n".format(iqu))
fd.write("Label(2)=Re-modelled D{0} values\n".format(iqu))
fd.write("Title={0} scatter plot ({1})".format(iqu,units) )
invoke( "$KAPPA_DIR/scatter in1={0} in2={1} fit=yes xleft=0 ybot=0 "
"style=^{2} axes=yes".format( dvcat, total_model, stylefile ), buffer=True )
slope = float( get_task_par( "slope", "scatter" ) )
offset = float( get_task_par( "offset", "scatter" ) )
rms = float( get_task_par( "rms", "scatter" ) )
msg_out("\nLinear fit to scatter plot:")
if offset >= 0:
msg_out(" Y = {0:.2f}*X + {1:.2f}".format(slope,offset) )
else:
msg_out(" Y = {0:.2f}*X - {1:.2f}".format(slope,-offset) )
msg_out(" RMS residual: {0:.2f} {1}\n".format(rms,units))
# Now remodeled errors have beens stored in the catalogue (newcat) for
# all three Stokes parameters. Update the other columns in the catalogue
# and create the output catalogue.
msg_out( "\nUpdating other catalogue columns..." )
invoke( "$POLPACK_DIR/poledit in={0} out={1} mode=recalc "
"debiastype={2}".format(newcat,outcat,debiastype))
# Remove temporary files.
cleanup()
# If an StarUtilError of any kind occurred, display the message but hide the
# python traceback. To see the trace back, uncomment "raise" instead.
except starutil.StarUtilError as err:
# raise
print( err )
cleanup()
# This is to trap control-C etc, so that we can clean up temp files.
except:
cleanup()
raise
|
StarlinkREPO_NAMEstarlinkPATH_START.@starlink_extracted@starlink-master@applications@smurf@scripts@pol2noise.py@.PATH_END.py
|
{
"filename": "event_file_writer_v2.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/summary/writer/event_file_writer_v2.py",
"type": "Python"
}
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Writes events to disk in a logdir."""
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.platform import gfile
class EventFileWriterV2(object):
"""Writes `Event` protocol buffers to an event file via the graph.
The `EventFileWriterV2` class is backed by the summary file writer in the v2
summary API (currently in tf.contrib.summary), so it uses a shared summary
writer resource and graph ops to write events.
As with the original EventFileWriter, this class will asynchronously write
Event protocol buffers to the backing file. The Event file is encoded using
the tfrecord format, which is similar to RecordIO.
"""
def __init__(self, session, logdir, max_queue=10, flush_secs=120,
filename_suffix=''):
"""Creates an `EventFileWriterV2` and an event file to write to.
On construction, this calls `tf.contrib.summary.create_file_writer` within
the graph from `session.graph` to look up a shared summary writer resource
for `logdir` if one exists, and create one if not. Creating the summary
writer resource in turn creates a new event file in `logdir` to be filled
with `Event` protocol buffers passed to `add_event`. Graph ops to control
this writer resource are added to `session.graph` during this init call;
stateful methods on this class will call `session.run()` on these ops.
Note that because the underlying resource is shared, it is possible that
other parts of the code using the same session may interact independently
with the resource, e.g. by flushing or even closing it. It is the caller's
responsibility to avoid any undesirable sharing in this regard.
The remaining arguments to the constructor (`flush_secs`, `max_queue`, and
`filename_suffix`) control the construction of the shared writer resource
if one is created. If an existing resource is reused, these arguments have
no effect. See `tf.contrib.summary.create_file_writer` for details.
Args:
session: A `tf.compat.v1.Session`. Session that will hold shared writer
resource. The writer ops will be added to session.graph during this
init call.
logdir: A string. Directory where event file will be written.
max_queue: Integer. Size of the queue for pending events and summaries.
flush_secs: Number. How often, in seconds, to flush the
pending events and summaries to disk.
filename_suffix: A string. Every event file's name is suffixed with
`filename_suffix`.
"""
self._session = session
self._logdir = logdir
self._closed = False
gfile.MakeDirs(self._logdir)
with self._session.graph.as_default():
with ops.name_scope('filewriter'):
file_writer = summary_ops_v2.create_file_writer(
logdir=self._logdir,
max_queue=max_queue,
flush_millis=flush_secs * 1000,
filename_suffix=filename_suffix)
with summary_ops_v2.always_record_summaries(), file_writer.as_default():
self._event_placeholder = array_ops.placeholder_with_default(
constant_op.constant('unused', dtypes.string),
shape=[])
self._add_event_op = summary_ops_v2.import_event(
self._event_placeholder)
self._init_op = file_writer.init() # pylint: disable=assignment-from-no-return
self._flush_op = file_writer.flush() # pylint: disable=assignment-from-no-return
self._close_op = file_writer.close() # pylint: disable=assignment-from-no-return
self._session.run(self._init_op)
def get_logdir(self):
"""Returns the directory where event file will be written."""
return self._logdir
def reopen(self):
"""Reopens the EventFileWriter.
Can be called after `close()` to add more events in the same directory.
The events will go into a new events file.
Does nothing if the EventFileWriter was not closed.
"""
if self._closed:
self._closed = False
self._session.run(self._init_op)
def add_event(self, event):
"""Adds an event to the event file.
Args:
event: An `Event` protocol buffer.
"""
if not self._closed:
event_pb = event.SerializeToString()
self._session.run(
self._add_event_op, feed_dict={self._event_placeholder: event_pb})
def flush(self):
"""Flushes the event file to disk.
Call this method to make sure that all pending events have been written to
disk.
"""
self._session.run(self._flush_op)
def close(self):
"""Flushes the event file to disk and close the file.
Call this method when you do not need the summary writer anymore.
"""
if not self._closed:
self.flush()
self._session.run(self._close_op)
self._closed = True
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@summary@writer@event_file_writer_v2.py@.PATH_END.py
|
{
"filename": "index.md",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/docs/docs/integrations/providers/pebblo/index.md",
"type": "Markdown"
}
|
# Pebblo
[Pebblo](https://www.daxa.ai/pebblo) enables developers to safely load and retrieve data to promote their Gen AI app to deployment without
worrying about the organization’s compliance and security requirements. The Pebblo SafeLoader identifies semantic topics and entities found in the
loaded data and the Pebblo SafeRetriever enforces identity and semantic controls on the retrieved context. The results are
summarized on the UI or a PDF report.
## Pebblo Overview:
`Pebblo` provides a safe way to load and retrieve data for Gen AI applications.
It includes:
1. **Identity-aware Safe Loader** that loads data and identifies semantic topics and entities.
2. **SafeRetrieval** that enforces identity and semantic controls on the retrieved context.
3. **User Data Report** that summarizes the data loaded and retrieved.
## Example Notebooks
For a more detailed examples of using Pebblo, see the following notebooks:
* [PebbloSafeLoader](/docs/integrations/document_loaders/pebblo) shows how to use Pebblo loader to safely load data.
* [PebbloRetrievalQA](/docs/integrations/providers/pebblo/pebblo_retrieval_qa) shows how to use Pebblo retrieval QA chain to safely retrieve data.
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@docs@docs@integrations@providers@pebblo@index.md@.PATH_END.py
|
{
"filename": "00aInstallPackages.py",
"repo_name": "MehrnooshTahani/MC-BLOS",
"repo_path": "MC-BLOS_extracted/MC-BLOS-main/MolecularClouds/00aInstallPackages.py",
"type": "Python"
}
|
'''
Downloads and installs all the packages needed to run these scripts.
'''
import sys
import subprocess
#Install packages:
'''recommended packages = ['numpy<=1.24.2',
'scipy<=1.10.1',
'astropy<=5.2.1',
'pandas<=1.5.3',
'matplotlib<=3.7.1',
'adjusttext==0.7.3',
'requests<=2.28.2',
'sklearn==0.0']'''
packages = ['numpy',
'scipy',
'astropy',
'matplotlib',
'requests',
'sklearn==0.0',
'pandas<=1.1.5',
'adjusttext==0.7.3',
]
commands = [[sys.executable, '-m', 'pip', 'install', '--force-reinstall', package] for package in packages]
#Report python version
print("Current Python Version:", sys.version)
#Update PIP
subprocess.run([sys.executable, '-m', 'pip', 'install', '--upgrade', 'pip'], shell=False)
#Install all the packages.
for command in commands:
subprocess.run(command, shell=False)
|
MehrnooshTahaniREPO_NAMEMC-BLOSPATH_START.@MC-BLOS_extracted@MC-BLOS-main@MolecularClouds@00aInstallPackages.py@.PATH_END.py
|
{
"filename": "test_psf_fitting.py",
"repo_name": "lenstronomy/lenstronomy",
"repo_path": "lenstronomy_extracted/lenstronomy-main/test/test_Workflow/test_psf_fitting.py",
"type": "Python"
}
|
__author__ = "sibirrer"
import pytest
import numpy as np
import copy
import lenstronomy.Util.util as util
import lenstronomy.Util.simulation_util as sim_util
from lenstronomy.ImSim.image_model import ImageModel
from lenstronomy.ImSim.image_linear_solve import ImageLinearFit
import lenstronomy.Util.param_util as param_util
from lenstronomy.PointSource.point_source import PointSource
from lenstronomy.LensModel.lens_model import LensModel
from lenstronomy.LightModel.light_model import LightModel
from lenstronomy.Workflow.psf_fitting import PsfFitting
from lenstronomy.Data.imaging_data import ImageData
from lenstronomy.Data.psf import PSF
class TestPSFIteration(object):
"""Tests the source model routines."""
def setup_method(self):
# data specifics
sigma_bkg = 0.01 # background noise per pixel
exp_time = 100 # exposure time (arbitrary units, flux per pixel is in units #photons/exp_time unit)
numPix = 100 # cutout pixel size
deltaPix = 0.05 # pixel size in arcsec (area per pixel = deltaPix**2)
fwhm = 0.3 # full width half max of PSF
# PSF specification
kwargs_data = sim_util.data_configure_simple(
numPix, deltaPix, exp_time, sigma_bkg
)
data_class = ImageData(**kwargs_data)
sigma = util.fwhm2sigma(fwhm)
x_grid, y_grid = util.make_grid(numPix=31, deltapix=0.05)
from lenstronomy.LightModel.Profiles.gaussian import Gaussian
gaussian = Gaussian()
kernel_point_source = gaussian.function(
x_grid, y_grid, amp=1.0, sigma=sigma, center_x=0, center_y=0
)
kernel_point_source /= np.sum(kernel_point_source)
kernel_point_source = util.array2image(kernel_point_source)
psf_error_map = np.zeros_like(kernel_point_source)
self.kwargs_psf = {
"psf_type": "PIXEL",
"kernel_point_source": kernel_point_source,
"psf_error_map": psf_error_map,
}
psf_class = PSF(**self.kwargs_psf)
# 'EXERNAL_SHEAR': external shear
kwargs_shear = {
"gamma1": 0.01,
"gamma2": 0.01,
} # gamma_ext: shear strength, psi_ext: shear angel (in radian)
phi, q = 0.2, 0.8
e1, e2 = param_util.phi_q2_ellipticity(phi, q)
kwargs_spemd = {
"theta_E": 1.0,
"gamma": 1.8,
"center_x": 0,
"center_y": 0,
"e1": e1,
"e2": e2,
}
lens_model_list = ["SPEP", "SHEAR"]
self.kwargs_lens = [kwargs_spemd, kwargs_shear]
lens_model_class = LensModel(lens_model_list=lens_model_list)
# list of light profiles (for lens and source)
# 'SERSIC': spherical Sersic profile
kwargs_sersic = {
"amp": 1.0,
"R_sersic": 0.1,
"n_sersic": 2,
"center_x": 0,
"center_y": 0,
}
# 'SERSIC_ELLIPSE': elliptical Sersic profile
phi, q = 0.2, 0.9
e1, e2 = param_util.phi_q2_ellipticity(phi, q)
kwargs_sersic_ellipse = {
"amp": 1.0,
"R_sersic": 0.6,
"n_sersic": 7,
"center_x": 0,
"center_y": 0,
"e1": e1,
"e2": e2,
}
lens_light_model_list = ["SERSIC"]
self.kwargs_lens_light = [kwargs_sersic]
lens_light_model_class = LightModel(light_model_list=lens_light_model_list)
source_model_list = ["SERSIC_ELLIPSE"]
self.kwargs_source = [kwargs_sersic_ellipse]
source_model_class = LightModel(light_model_list=source_model_list)
self.kwargs_ps = [
{"ra_source": 0.0, "dec_source": 0.0, "source_amp": 10.0}
] # quasar point source position in the source plane and intrinsic brightness
point_source_class = PointSource(
point_source_type_list=["SOURCE_POSITION"], fixed_magnification_list=[True]
)
kwargs_numerics = {
"supersampling_factor": 3,
"supersampling_convolution": False,
"compute_mode": "regular",
"point_source_supersampling_factor": 3,
}
imageModel = ImageModel(
data_class,
psf_class,
lens_model_class,
source_model_class,
lens_light_model_class,
point_source_class,
kwargs_numerics=kwargs_numerics,
)
image_sim = sim_util.simulate_simple(
imageModel,
self.kwargs_lens,
self.kwargs_source,
self.kwargs_lens_light,
self.kwargs_ps,
)
data_class.update_data(image_sim)
self.imageModel = ImageLinearFit(
data_class,
psf_class,
lens_model_class,
source_model_class,
lens_light_model_class,
point_source_class,
kwargs_numerics=kwargs_numerics,
)
self.psf_fitting = PsfFitting(self.imageModel)
self.kwargs_params = {
"kwargs_lens": self.kwargs_lens,
"kwargs_source": self.kwargs_source,
"kwargs_lens_light": self.kwargs_lens_light,
"kwargs_ps": self.kwargs_ps,
}
def test_update_psf(self):
fwhm = 0.5
sigma = util.fwhm2sigma(fwhm)
x_grid, y_grid = util.make_grid(numPix=31, deltapix=0.05)
from lenstronomy.LightModel.Profiles.gaussian import Gaussian
gaussian = Gaussian()
kernel_point_source = gaussian.function(
x_grid, y_grid, amp=1.0, sigma=sigma, center_x=0, center_y=0
)
kernel_point_source /= np.sum(kernel_point_source)
kernel_point_source = util.array2image(kernel_point_source)
kwargs_psf = {"psf_type": "PIXEL", "kernel_point_source": kernel_point_source}
kwargs_psf_iter = {
"stacking_method": "median",
"error_map_radius": 0.5,
"new_procedure": True,
}
kwargs_psf_return, improved_bool, error_map = self.psf_fitting.update_psf(
kwargs_psf, self.kwargs_params, **kwargs_psf_iter
)
assert improved_bool
kernel_new = kwargs_psf_return["kernel_point_source"]
kernel_true = self.kwargs_psf["kernel_point_source"]
kernel_old = kwargs_psf["kernel_point_source"]
diff_old = np.sum((kernel_old - kernel_true) ** 2)
diff_new = np.sum((kernel_new - kernel_true) ** 2)
assert diff_old > diff_new
# test STARRED
try:
import starred
run_starred_test = True
except:
run_starred_test = False
if run_starred_test:
self.psf_fitting._image_model_class.Data._C_D[42, 70] = (
-1
) # introducing one negative value in one of the noise maps cutouts to test warning message
kwargs_psf_iter_starred = {
"stacking_method": "median",
"error_map_radius": 0.5,
"psf_iter_factor": 1.0,
"psf_symmetry": 2, # to test warning message
"corner_symmetry": 2, # to test warning message
"new_procedure": False,
"use_starred": True,
"kwargs_starred": {
"verbose": False,
"lambda_scales": 3,
"lambda_hf": 3,
},
}
kwargs_psf_return_starred, improved_bool_starred, error_map_starred = (
self.psf_fitting.update_psf(
kwargs_psf, self.kwargs_params, **kwargs_psf_iter_starred
)
)
assert improved_bool_starred
kernel_new_starred = kwargs_psf_return_starred["kernel_point_source"]
diff_new_starred = np.sum((kernel_new_starred - kernel_true) ** 2)
# print(diff_new_starred, diff_new, diff_old)
assert diff_old > diff_new_starred
def test_calc_corner_mask(self):
kernel_old = np.ones((101, 101))
nsymmetry = 4
corner_mask = self.psf_fitting.calc_cornermask(len(kernel_old), nsymmetry)
assert corner_mask[corner_mask].size == 0
nsymmetry = 6
corner_mask = self.psf_fitting.calc_cornermask(len(kernel_old), nsymmetry)
assert corner_mask[corner_mask].size < kernel_old.size
assert corner_mask[corner_mask].size > 0
def test_combine_psf_corner(self):
## start kernel
kernel_old = np.ones((101, 101))
test_updated_kernel = copy.deepcopy(kernel_old)
##allow the residuals to have different normaliztions
kernel_list_new = [
test_updated_kernel * 2,
test_updated_kernel,
test_updated_kernel * 4,
test_updated_kernel,
]
nsymmetry = 6
corner_mask = self.psf_fitting.calc_cornermask(len(kernel_old), nsymmetry)
updated_psf = self.psf_fitting.combine_psf(
kernel_list_new,
kernel_old,
factor=1.0,
stacking_option="median",
symmetry=nsymmetry,
corner_symmetry=1,
corner_mask=corner_mask,
)
##maybe a better criteria here for floats?
assert abs(updated_psf.max() - updated_psf.min()) < 1e-10
updated_psf = self.psf_fitting.combine_psf(
kernel_list_new,
kernel_old,
factor=1.0,
stacking_option="median",
symmetry=nsymmetry,
corner_symmetry=2,
corner_mask=corner_mask,
)
assert abs(updated_psf.max() - updated_psf.min()) < 1e-10
def test_update_iterative(self):
fwhm = 0.5
sigma = util.fwhm2sigma(fwhm)
x_grid, y_grid = util.make_grid(numPix=31, deltapix=0.05)
from lenstronomy.LightModel.Profiles.gaussian import Gaussian
gaussian = Gaussian()
kernel_point_source = gaussian.function(
x_grid, y_grid, amp=1.0, sigma=sigma, center_x=0, center_y=0
)
kernel_point_source /= np.sum(kernel_point_source)
kernel_point_source = util.array2image(kernel_point_source)
kwargs_psf = {"psf_type": "PIXEL", "kernel_point_source": kernel_point_source}
kwargs_psf_iter = {
"stacking_method": "median",
"psf_symmetry": 2,
"psf_iter_factor": 0.2,
"block_center_neighbour": 0.1,
"error_map_radius": 0.5,
"new_procedure": True,
}
kwargs_params = copy.deepcopy(self.kwargs_params)
kwargs_ps = kwargs_params["kwargs_ps"]
del kwargs_ps[0]["source_amp"]
print(kwargs_params["kwargs_ps"])
kwargs_psf_new = self.psf_fitting.update_iterative(
kwargs_psf, kwargs_params, **kwargs_psf_iter
)
kernel_new = kwargs_psf_new["kernel_point_source"]
kernel_true = self.kwargs_psf["kernel_point_source"]
kernel_old = kwargs_psf["kernel_point_source"]
diff_old = np.sum((kernel_old - kernel_true) ** 2)
diff_new = np.sum((kernel_new - kernel_true) ** 2)
assert diff_old > diff_new
assert diff_new < 0.01
assert "psf_error_map" in kwargs_psf_new
kwargs_psf_new = self.psf_fitting.update_iterative(
kwargs_psf,
kwargs_params,
num_iter=3,
no_break=True,
keep_psf_error_map=True,
)
kernel_new = kwargs_psf_new["kernel_point_source"]
kernel_true = self.kwargs_psf["kernel_point_source"]
kernel_old = kwargs_psf["kernel_point_source"]
diff_old = np.sum((kernel_old - kernel_true) ** 2)
diff_new = np.sum((kernel_new - kernel_true) ** 2)
assert diff_old > diff_new
assert diff_new < 0.01
def test_mask_point_source(self):
ra_image, dec_image, amp = self.imageModel.PointSource.point_source_list(
self.kwargs_ps, self.kwargs_lens
)
print(ra_image, dec_image, amp)
x_grid, y_grid = self.imageModel.Data.pixel_coordinates
x_grid = util.image2array(x_grid)
y_grid = util.image2array(y_grid)
radius = 0.5
mask_point_source = self.psf_fitting.mask_point_source(
ra_image, dec_image, x_grid, y_grid, radius, i=0
)
assert mask_point_source[10, 10] == 1
class TestPSFIterationOld(object):
"""Tests the source model routines."""
def setup_method(self):
# data specifics
sigma_bkg = 0.01 # background noise per pixel
exp_time = 100 # exposure time (arbitrary units, flux per pixel is in units #photons/exp_time unit)
numPix = 100 # cutout pixel size
deltaPix = 0.05 # pixel size in arcsec (area per pixel = deltaPix**2)
fwhm = 0.3 # full width half max of PSF
# PSF specification
kwargs_data = sim_util.data_configure_simple(
numPix, deltaPix, exp_time, sigma_bkg
)
data_class = ImageData(**kwargs_data)
sigma = util.fwhm2sigma(fwhm)
x_grid, y_grid = util.make_grid(numPix=31, deltapix=0.05)
from lenstronomy.LightModel.Profiles.gaussian import Gaussian
gaussian = Gaussian()
kernel_point_source = gaussian.function(
x_grid, y_grid, amp=1.0, sigma=sigma, center_x=0, center_y=0
)
kernel_point_source /= np.sum(kernel_point_source)
kernel_point_source = util.array2image(kernel_point_source)
psf_error_map = np.zeros_like(kernel_point_source)
self.kwargs_psf = {
"psf_type": "PIXEL",
"kernel_point_source": kernel_point_source,
"psf_error_map": psf_error_map,
}
psf_class = PSF(**self.kwargs_psf)
# 'EXERNAL_SHEAR': external shear
kwargs_shear = {
"gamma1": 0.01,
"gamma2": 0.01,
} # gamma_ext: shear strength, psi_ext: shear angel (in radian)
phi, q = 0.2, 0.8
e1, e2 = param_util.phi_q2_ellipticity(phi, q)
kwargs_spemd = {
"theta_E": 1.0,
"gamma": 1.8,
"center_x": 0,
"center_y": 0,
"e1": e1,
"e2": e2,
}
lens_model_list = ["SPEP", "SHEAR"]
self.kwargs_lens = [kwargs_spemd, kwargs_shear]
lens_model_class = LensModel(lens_model_list=lens_model_list)
# list of light profiles (for lens and source)
# 'SERSIC': spherical Sersic profile
kwargs_sersic = {
"amp": 1.0,
"R_sersic": 0.1,
"n_sersic": 2,
"center_x": 0,
"center_y": 0,
}
# 'SERSIC_ELLIPSE': elliptical Sersic profile
phi, q = 0.2, 0.9
e1, e2 = param_util.phi_q2_ellipticity(phi, q)
kwargs_sersic_ellipse = {
"amp": 1.0,
"R_sersic": 0.6,
"n_sersic": 7,
"center_x": 0,
"center_y": 0,
"e1": e1,
"e2": e2,
}
lens_light_model_list = ["SERSIC"]
self.kwargs_lens_light = [kwargs_sersic]
lens_light_model_class = LightModel(light_model_list=lens_light_model_list)
source_model_list = ["SERSIC_ELLIPSE"]
self.kwargs_source = [kwargs_sersic_ellipse]
source_model_class = LightModel(light_model_list=source_model_list)
self.kwargs_ps = [
{"ra_source": 0.0, "dec_source": 0.0, "source_amp": 10.0}
] # quasar point source position in the source plane and intrinsic brightness
point_source_class = PointSource(
point_source_type_list=["SOURCE_POSITION"], fixed_magnification_list=[True]
)
kwargs_numerics = {
"supersampling_factor": 3,
"supersampling_convolution": False,
"compute_mode": "regular",
"point_source_supersampling_factor": 3,
}
imageModel = ImageModel(
data_class,
psf_class,
lens_model_class,
source_model_class,
lens_light_model_class,
point_source_class,
kwargs_numerics=kwargs_numerics,
)
image_sim = sim_util.simulate_simple(
imageModel,
self.kwargs_lens,
self.kwargs_source,
self.kwargs_lens_light,
self.kwargs_ps,
)
data_class.update_data(image_sim)
self.imageModel = ImageLinearFit(
data_class,
psf_class,
lens_model_class,
source_model_class,
lens_light_model_class,
point_source_class,
kwargs_numerics=kwargs_numerics,
)
self.psf_fitting = PsfFitting(self.imageModel)
self.kwargs_params = {
"kwargs_lens": self.kwargs_lens,
"kwargs_source": self.kwargs_source,
"kwargs_lens_light": self.kwargs_lens_light,
"kwargs_ps": self.kwargs_ps,
}
def test_update_psf(self):
fwhm = 0.5
sigma = util.fwhm2sigma(fwhm)
x_grid, y_grid = util.make_grid(numPix=31, deltapix=0.05)
from lenstronomy.LightModel.Profiles.gaussian import Gaussian
gaussian = Gaussian()
kernel_point_source = gaussian.function(
x_grid, y_grid, amp=1.0, sigma=sigma, center_x=0, center_y=0
)
kernel_point_source /= np.sum(kernel_point_source)
kernel_point_source = util.array2image(kernel_point_source)
kwargs_psf = {"psf_type": "PIXEL", "kernel_point_source": kernel_point_source}
kwargs_psf_iter = {
"stacking_method": "median",
"error_map_radius": 0.5,
"new_procedure": False,
}
kwargs_psf_return, improved_bool, error_map = self.psf_fitting.update_psf(
kwargs_psf, self.kwargs_params, **kwargs_psf_iter
)
assert improved_bool
kernel_new = kwargs_psf_return["kernel_point_source"]
kernel_true = self.kwargs_psf["kernel_point_source"]
kernel_old = kwargs_psf["kernel_point_source"]
diff_old = np.sum((kernel_old - kernel_true) ** 2)
diff_new = np.sum((kernel_new - kernel_true) ** 2)
assert diff_old > diff_new
def test_update_iterative(self):
fwhm = 0.5
sigma = util.fwhm2sigma(fwhm)
x_grid, y_grid = util.make_grid(numPix=31, deltapix=0.05)
from lenstronomy.LightModel.Profiles.gaussian import Gaussian
gaussian = Gaussian()
kernel_point_source = gaussian.function(
x_grid, y_grid, amp=1.0, sigma=sigma, center_x=0, center_y=0
)
kernel_point_source /= np.sum(kernel_point_source)
kernel_point_source = util.array2image(kernel_point_source)
kwargs_psf = {
"psf_type": "PIXEL",
"kernel_point_source": kernel_point_source,
"kernel_point_source_init": kernel_point_source,
}
kwargs_psf_iter = {
"stacking_method": "median",
"psf_symmetry": 2,
"psf_iter_factor": 0.2,
"block_center_neighbour": 0.1,
"error_map_radius": 0.5,
"new_procedure": False,
"no_break": False,
"verbose": True,
"keep_psf_error_map": False,
}
kwargs_params = copy.deepcopy(self.kwargs_params)
kwargs_ps = kwargs_params["kwargs_ps"]
del kwargs_ps[0]["source_amp"]
print(kwargs_params["kwargs_ps"])
kwargs_psf_new = self.psf_fitting.update_iterative(
kwargs_psf, kwargs_params, **kwargs_psf_iter
)
kernel_new = kwargs_psf_new["kernel_point_source"]
kernel_true = self.kwargs_psf["kernel_point_source"]
kernel_old = kwargs_psf["kernel_point_source"]
diff_old = np.sum((kernel_old - kernel_true) ** 2)
diff_new = np.sum((kernel_new - kernel_true) ** 2)
assert diff_old > diff_new
assert diff_new < 0.01
assert "psf_error_map" in kwargs_psf_new
kwargs_psf_new = self.psf_fitting.update_iterative(
kwargs_psf,
kwargs_params,
num_iter=3,
no_break=True,
keep_psf_error_map=True,
)
kernel_new = kwargs_psf_new["kernel_point_source"]
kernel_true = self.kwargs_psf["kernel_point_source"]
kernel_old = kwargs_psf["kernel_point_source"]
diff_old = np.sum((kernel_old - kernel_true) ** 2)
diff_new = np.sum((kernel_new - kernel_true) ** 2)
assert diff_old > diff_new
assert diff_new < 0.01
if __name__ == "__main__":
pytest.main()
|
lenstronomyREPO_NAMElenstronomyPATH_START.@lenstronomy_extracted@lenstronomy-main@test@test_Workflow@test_psf_fitting.py@.PATH_END.py
|
{
"filename": "AtmosClass.py",
"repo_name": "wmpg/Supracenter",
"repo_path": "Supracenter_extracted/Supracenter-master/supra/Atmosphere/AtmosClass.py",
"type": "Python"
}
|
import random
import numpy as np
from netCDF4 import Dataset
import pyximport
pyximport.install(setup_args={'include_dirs':[np.get_include()]})
from scipy.interpolate import CubicSpline
from supra.Utils.AngleConv import roundToNearest
from supra.Utils.Classes import Constants
from supra.Supracenter.cyzInteg import zInteg
from supra.GUI.Tools.GUITools import *
from supra.Atmosphere.Pressure import pressureConv, estPressure
from supra.Atmosphere.NRLMSISE import getAtmDensity
from wmpl.Utils.TrajConversions import date2JD
from supra.Atmosphere.pyHWM93 import getHWM
consts = Constants()
class AtmosType:
def __init__(self):
pass
def interp(self, lat, lon, div=0.25):
"""
Approximately interpolates grid points of a division between point A and point B
lat: [lat_initial, lat_final]
lon: [lon_initial, lon_final]
"""
# Collect interpolated points
pts = []
x0 = lat[0]
x1 = lat[1]
y0 = lon[0]
y1 = lon[1]
dx = np.abs(x1 - x0)/div
dy = np.abs(y1 - y0)/div
# Approximate upper limit on number of steps
try:
steps = np.abs(int((dx - 2)*(dy - 2) + 2)//3 + 1)
except ValueError:
steps = 10
x = np.linspace(x0, x1, steps)
y = np.linspace(y0, y1, steps)
# Take all steps and lock them to the nearest grid point
for i in range(steps):
pts.append([roundToNearest(x[i], div), roundToNearest(y[i], div)])
if len(pts) == 0:
return [[roundToNearest(x0, div), roundToNearest(y0, div)], [roundToNearest(x1, div), roundToNearest(y1, div)]]
return pts
def convert(self, t, u, v, z):
"""
Converts temp, u-wind, v-wind, geopotential to
height, sp of sound, wind magnitude and wind direction
"""
speed = np.sqrt(consts.GAMMA*consts.R/consts.M_0*t)
mags = np.sqrt(u**2 + v**2)
dirs = np.arctan2(u, v)
level = (z - z[-1])/consts.g_0
return level, speed, mags, dirs
def spline(self, level, speed, mags, dirs, pressure, interp=100):
"""
Cubic spline the data for smooth profiles
"""
speed, _ = self.splineSec(speed, level, interp=interp)
mags, _ = self.splineSec(mags, level, interp=interp)
pressure, _ = self.splineSec(pressure, level, interp=interp)
dirs, level = self.splineSec(dirs, level, interp=interp)
return level, speed, mags, dirs, pressure
def splineSec(self, y, x, interp=100):
if interp is None:
return y, x
x, y = np.flip(x), np.flip(y)
f = CubicSpline(x, y)
new_x = np.linspace(x[0], x[-1], interp-1)
new_y = f(new_x)
return new_y, new_x
class DefaultW(AtmosType):
def __init__(self):
pass
def genProfile(self, lat, lon, heights, prefs, spline, ref_time):
dh = 1000 #meters
missing_heights = np.arange(heights[0], heights[1], -dh)
missing_lats = np.linspace(lat[0], lat[1], len(missing_heights))
missing_lons = np.linspace(lon[0], lon[1], len(missing_heights))
if len(missing_heights) == 0:
if heights[0] > heights[1]:
missing_heights = np.array(heights)
missing_lats = np.array(lat)
missing_lons = np.array(lon)
else:
missing_heights = np.array([heights[1], heights[0]])
missing_lats = np.array([lat[1], lat[0]])
missing_lons = np.array([lon[1], lon[0]])
jd = date2JD(ref_time.year, ref_time.month, ref_time.day, ref_time.hour, ref_time.minute, ref_time.second)
level = []
speed = []
mags = []
dirs = []
pressure = []
for hh, la, lo in zip(missing_heights, missing_lats, missing_lons):
t = getAtmDensity(la, lo, hh, jd)
speed.append(np.sqrt(consts.GAMMA*consts.R/consts.M_0*t))
u, v = getHWM(ref_time, la, lo, hh/1000)
mags.append(np.sqrt(u**2 + v**2))
dirs.append(np.arctan2(u, v))
pressure.append(estPressure(hh))
level.append(hh)
try:
level, speed, mags, dirs, pressure = self.spline(level, speed, mags, dirs, pressure, interp=spline)
sounding = []
for i in range(len(level)):
sounding.append([level[i], speed[i], mags[i], dirs[i], pressure[i]])
except ValueError:
sounding = np.array([[ 0.0, 310, 0.0, 0.0, 0.0],
[ 0.0, 310, 0.0, 0.0, 0.0],
[99999.0, 310, 0.0, 0.0, 0.0]])
sounding = zInteg(heights[0], heights[1], np.array(sounding))
return sounding
class ECMWF(AtmosType):
def __init__(self, lat, lon, rng, time, file_name):
# the grided subsection of the world
self.generateECMWF(lat, lon, rng, time, file_name)
def readNCDF(self, lat, lon, rng, time, file_name, div):
dataset = Dataset(file_name, "r+", format="NETCDF4")
hour = time.time().hour()
if time.time().minute() > 30:
hour += 1
time = np.array(dataset.variables['time'])
longitude = np.array(dataset.variables['longitude'])
latitude = np.array(dataset.variables['latitude'])
lat = roundToNearest(lat, div)
lon = roundToNearest(lon, div)%360
try:
lon_index = int(np.where(longitude==lon)[0])
lat_index = int(np.where(latitude==lat)[0])
except TypeError as e:
# For when we can't find the right longitude and latitude in the data (Thanks Jouse for finding this!)
errorMessage('Unable to find exact lat/lon in given weather profile, using closest values!', 1, detail='{:}'.format(e))
lon_index = int(np.argmin(np.abs(longitude-lon)))
lat_index = int(np.argmin(np.abs(latitude-lat)))
time_index = int(hour - 1)
idx_rng = int(np.ceil(rng/div))
try:
temperature = np.array(dataset.variables['t'][time_index, :, lat_index-idx_rng:lat_index+idx_rng+1, lon_index-idx_rng:lon_index+idx_rng+1])
x_wind = np.array(dataset.variables['u'][time_index, :, lat_index-idx_rng:lat_index+idx_rng+1, lon_index-idx_rng:lon_index+idx_rng+1])
y_wind = np.array(dataset.variables['v'][time_index, :, lat_index-idx_rng:lat_index+idx_rng+1, lon_index-idx_rng:lon_index+idx_rng+1])
geo = np.array(dataset.variables['z'][time_index, :, lat_index-idx_rng:lat_index+idx_rng+1, lon_index-idx_rng:lon_index+idx_rng+1])
except IndexError:
# if only the needed time was downloaded
temperature = np.array(dataset.variables['t'][0, :, lat_index-idx_rng:lat_index+idx_rng+1, lon_index-idx_rng:lon_index+idx_rng+1])
x_wind = np.array(dataset.variables['u'][0, :, lat_index-idx_rng:lat_index+idx_rng+1, lon_index-idx_rng:lon_index+idx_rng+1])
y_wind = np.array(dataset.variables['v'][0, :, lat_index-idx_rng:lat_index+idx_rng+1, lon_index-idx_rng:lon_index+idx_rng+1])
geo = np.array(dataset.variables['z'][0, :, lat_index-idx_rng:lat_index+idx_rng+1, lon_index-idx_rng:lon_index+idx_rng+1])
lats = latitude[lat_index-idx_rng:lat_index+idx_rng+1]
lons = longitude[lon_index-idx_rng:lon_index+idx_rng+1]
return temperature, x_wind, y_wind, geo, lats, lons
def generateECMWF(self, lat, lon, rng, time, file_name):
temperature, x_wind, y_wind, geo, lats, lons = self.readNCDF(lat, lon, rng, time, file_name, 0.25)
self.temperature = temperature
self.x_wind = x_wind
self.y_wind = y_wind
self.geo = geo
self.lats = lats
self.lons = lons
def spread(self, lat, lon, rng, time, file_name):
temperature, x_wind, y_wind, geo, lats, lons = self.readNCDF(lat, lon, rng, time, file_name, 0.5)
self.temperature_spr = temperature
self.x_wind_spr = x_wind
self.y_wind_spr = y_wind
self.geo_spr = geo
self.lats_spr = lats
self.lons_spr = lons
def perturb(self, t, u, v, z, t_spr, u_spr, v_spr):
t_all = []
u_all = []
v_all = []
# Using a different random number for each variable and height
for level in range(len(t)):
t_all.append(random.gauss(t[level], t_spr[level]))
u_all.append(random.gauss(u[level], u_spr[level]))
v_all.append(random.gauss(v[level], v_spr[level]))
return np.array(t_all), np.array(u_all), np.array(v_all), z
def generateProfile(self, t, u, v, z, h, lats, lons, spline=100, ref_time=None):
level, speed, mags, dirs = self.convert(t, u, v, z)
pressure = pressureConv()
# # Temp Hack to get NRL data
# speed = []
# lat = lats[0]
# lon = lons[0]
# jd = date2JD(ref_time.year, ref_time.month, ref_time.day, ref_time.hour, ref_time.minute, ref_time.second)
# for hh in level:
# t = getAtmDensity(lat, lon, hh, jd)
# speed.append(np.sqrt(consts.GAMMA*consts.R/consts.M_0*t))
# speed = np.array(speed)
# #######################
# If the region extends above available data
dh = 2000 #meters
if h[0] > level[0]:
missing_heights = np.arange(level[0] + dh, h[0], dh)
lat = lats[0]
lon = lons[0]
jd = date2JD(ref_time.year, ref_time.month, ref_time.day, ref_time.hour, ref_time.minute, ref_time.second)
for hh in missing_heights:
### SLOW PART
t = getAtmDensity(lat, lon, hh, jd)
u, v = getHWM(ref_time, lat, lon, hh/1000)
mag = np.sqrt(u**2 + v**2)
d = np.arctan2(u, v)
p = estPressure(hh)
###############
speed = np.insert(speed, 0, np.sqrt(consts.GAMMA*consts.R/consts.M_0*t))
mags = np.insert(mags, 0, mag)
dirs = np.insert(dirs, 0, d)
level = np.insert(level, 0, hh)
pressure = np.insert(pressure, 0, p)
level, speed, mags, dirs, pressure = self.spline(level, speed, mags, dirs, pressure, interp=spline)
sounding = []
for i in range(len(level)):
sounding.append([level[i], speed[i], mags[i], dirs[i], pressure[i]])
sounding = zInteg(h[0], h[1], np.array(sounding))
return sounding
def getProfile(self, lat, lon, heights, prefs, spline=100, ref_time=None, perturbations=None):
''' Interpolates between starting and ending locations, converts, and splines
resulting curve into a smooth profile
lat = [start lat, end lat]
'''
spread = True
if perturbations is None:
perturb = prefs.pert_num
else:
perturb = perturbations
pts = self.interp(lat, lon, div=0.25)
t = []
u = []
v = []
z = []
t_spr = []
u_spr = []
v_spr = []
z_spr = []
num_pts = len(pts)
num_lvl = len(self.temperature)
last_frac = 0
range_lats = lat
range_lons = lon
for ii, pt in enumerate(pts):
lat = roundToNearest(pt[0], 0.25)
lon = roundToNearest(pt[1], 0.25)%360
try:
lon_index = int(np.where(self.lons==lon)[0])
except TypeError:
if np.abs(lon - self.lons[-1]) < np.abs(lon - self.lons[0]):
lon_index = 0
else:
lon_index = len(self.lons) - 1
try:
lat_index = int(np.where(self.lats==lat)[0])
except TypeError:
if np.abs(lat - self.lats[-1]) < np.abs(lat - self.lats[0]):
lat_index = 0
else:
lat_index = len(self.lats) - 1
frac = int(np.around((ii+1)/num_pts*num_lvl) + 1)
t.append(self.temperature[last_frac:frac, lat_index, lon_index])
u.append(self.x_wind[last_frac:frac, lat_index, lon_index])
v.append(self.y_wind[last_frac:frac, lat_index, lon_index])
z.append(self.geo[last_frac:frac, lat_index, lon_index])
if perturb > 0 and prefs.pert_en:
lat = roundToNearest(pt[0], 0.5)
lon = roundToNearest(pt[1], 0.5)%360
# I'm so sorry
try:
try:
lon_index = int(np.where(self.lons_spr==lon)[0])
except TypeError:
if np.abs(lon - self.lons_spr[-1]) < np.abs(lon - self.lons_spr[0]):
lon_index = 0
else:
lon_index = len(self.lons_spr) - 1
try:
lat_index = int(np.where(self.lats_spr==lat)[0])
except TypeError:
if np.abs(lat - self.lats_spr[-1]) < np.abs(lat - self.lats_spr[0]):
lat_index = 0
else:
lat_index = len(self.lats_spr) - 1
t_spr.append(self.temperature_spr[last_frac:frac, lat_index, lon_index])
u_spr.append(self.x_wind_spr[last_frac:frac, lat_index, lon_index])
v_spr.append(self.y_wind_spr[last_frac:frac, lat_index, lon_index])
z_spr.append(self.geo_spr[last_frac:frac, lat_index, lon_index])
except AttributeError:
spread = False
# no spread has been added
pass
last_frac = frac
t = np.array([item for sublist in t for item in sublist])
u = np.array([item for sublist in u for item in sublist])
v = np.array([item for sublist in v for item in sublist])
z = np.array([item for sublist in z for item in sublist])
t_spr = np.array([item for sublist in t_spr for item in sublist])
u_spr = np.array([item for sublist in u_spr for item in sublist])
v_spr = np.array([item for sublist in v_spr for item in sublist])
z_spr = np.array([item for sublist in z_spr for item in sublist])
sounding = self.generateProfile(t, u, v, z, heights, range_lats, range_lons, spline=spline, ref_time=ref_time)
perturbations = []
if perturb > 0 and prefs.pert_en and spread:
for i in range(perturb):
per_sounding = self.generateProfile(*self.perturb(t, u, v, z, t_spr, u_spr, v_spr), heights, range_lats, range_lons, spline=spline, ref_time=ref_time)
perturbations.append(np.array(per_sounding))
perturbations = np.array(perturbations)
return sounding, perturbations
class Radio:
def __init__(self):
pass
if __name__ == '__main__':
pass
|
wmpgREPO_NAMESupracenterPATH_START.@Supracenter_extracted@Supracenter-master@supra@Atmosphere@AtmosClass.py@.PATH_END.py
|
{
"filename": "fitsfunc.py",
"repo_name": "healpy/healpy",
"repo_path": "healpy_extracted/healpy-main/lib/healpy/fitsfunc.py",
"type": "Python"
}
|
#
# This file is part of Healpy.
#
# Healpy is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Healpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Healpy; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# For more information about Healpy, see http://code.google.com/p/healpy
#
"""Provides input and output functions for Healpix maps, alm, and cl.
"""
from __future__ import division
import logging
log = logging.getLogger("healpy")
import pathlib
import astropy.io.fits as pf
import numpy as np
from .utils.deprecation import deprecated_renamed_argument
from . import pixelfunc
from .sphtfunc import Alm
from .pixelfunc import UNSEEN
from . import cookbook as cb
from .utils.deprecation import deprecated
standard_column_names = {
1: "TEMPERATURE",
2: ["Q_POLARISATION", "U_POLARISATION"],
3: ["TEMPERATURE", "Q_POLARISATION", "U_POLARISATION"],
6: ["II", "IQ", "IU", "QQ", "QU", "UU"],
}
allowed_paths = (str, pathlib.Path)
@deprecated(since="1.15.0")
class HealpixFitsWarning(Warning):
pass
def read_cl(filename):
"""Reads Cl from a healpix file, as IDL fits2cl.
Parameters
----------
filename : str or HDUList or HDU or pathlib.Path instance
the fits file name
Returns
-------
cl : array
the cl array
"""
opened_file = False
if isinstance(filename, allowed_paths):
filename = pf.open(filename)
opened_file = True
fits_hdu = _get_hdu(filename, hdu=1)
cl = np.array([fits_hdu.data.field(n) for n in range(len(fits_hdu.columns))])
if opened_file:
filename.close()
if len(cl) == 1:
return cl[0]
else:
return cl
def write_cl(filename, cl, dtype=None, overwrite=False):
"""Writes Cl into a healpix file, as IDL cl2fits.
Parameters
----------
filename : str
the fits file name
cl : array
the cl array to write to file
dtype : np.dtype (optional)
The datatype in which the columns will be stored. If not supplied,
the dtype of the input cl will be used. This changed in `healpy` 1.15.0,
in previous versions, cl by default were saved in `float64`.
overwrite : bool, optional
If True, existing file is silently overwritten. Otherwise trying to write
an existing file raises an OSError.
"""
if dtype is None:
dtype = cl.dtype if isinstance(cl, np.ndarray) else cl[0].dtype
# check the dtype and convert it
fitsformat = getformat(dtype)
column_names = ["TEMPERATURE", "GRADIENT", "CURL", "G-T", "C-T", "C-G"]
if len(np.shape(cl)) == 2:
cols = [
pf.Column(name=column_name, format="%s" % fitsformat, array=column_cl)
for column_name, column_cl in zip(column_names[: len(cl)], cl)
]
elif len(np.shape(cl)) == 1:
# we write only TT
cols = [pf.Column(name="TEMPERATURE", format="%s" % fitsformat, array=cl)]
else:
raise RuntimeError("write_cl: Expected one or more vectors of equal length")
tbhdu = pf.BinTableHDU.from_columns(cols)
# add needed keywords
tbhdu.header["CREATOR"] = "healpy"
# Add str to convert pathlib.Path into str
# Due to https://github.com/astropy/astropy/issues/10594
tbhdu.writeto(str(filename), overwrite=overwrite)
def write_map(
filename,
m,
nest=False,
dtype=None,
fits_IDL=True,
coord=None,
partial=False,
column_names=None,
column_units=None,
extra_header=(),
overwrite=False,
):
"""Writes a healpix map into a healpix FITS file.
.. warning::
Starting from healpy 1.15.0, if you do not specify `dtype`,
the map will be written to disk with the same precision it is stored in memory.
Previously, by default `healpy` wrote maps in `float32`.
To reproduce the same behaviour of `healpy` 1.14.0 and below, set `dtype=np.float32`.
Parameters
----------
filename : str
the fits file name
m : array or sequence of 3 arrays
the map to write. Possibly a sequence of 3 maps of same size.
They will be considered as I, Q, U maps.
Supports masked maps, see the `ma` function.
nest : bool, optional
If True, ordering scheme is assumed to be NESTED, otherwise, RING. Default: RING.
The map ordering is not modified by this function, the input map array
should already be in the desired ordering (run `ud_grade` beforehand).
fits_IDL : bool, optional
If True, reshapes columns in rows of 1024, otherwise all the data will
go in one column. Default: True
coord : str
The coordinate system, typically 'E' for Ecliptic, 'G' for Galactic or 'C' for
Celestial (equatorial)
partial : bool, optional
If True, fits file is written as a partial-sky file with explicit indexing.
Otherwise, implicit indexing is used. Default: False.
column_names : str or list
Column name or list of column names, if None here the default column names based on
the number of columns:
1 : "TEMPERATURE",
2 : ["Q_POLARISATION", "U_POLARISATION"],
3 : ["TEMPERATURE", "Q_POLARISATION", "U_POLARISATION"],
6 : ["II", "IQ", "IU", "QQ", "QU", "UU"]
COLUMN_1, COLUMN_2... otherwise (FITS is 1-based)
column_units : str or list
Units for each column, or same units for all columns.
extra_header : list
Extra records to add to FITS header.
dtype: np.dtype or list of np.dtypes, optional
The datatype in which the columns will be stored. Will be converted
internally from the numpy datatype to the fits convention. If a list,
the length must correspond to the number of map arrays.
Default: use the data type of the input array(s)
.. note::
this changed in 1.15.0, previous versions saved in float32
by default
overwrite : bool, optional
If True, existing file is silently overwritten. Otherwise trying to write
an existing file raises an OSError (IOError for Python 2).
"""
if not hasattr(m, "__len__"):
raise TypeError("The map must be a sequence")
m = pixelfunc.ma_to_array(m)
if pixelfunc.maptype(m) == 0: # a single map is converted to a list
m = [m]
# check the dtype and convert it
if dtype is None:
dtype = [x.dtype for x in m]
log.warning("setting the output map dtype to %s" % str(dtype))
try:
fitsformat = []
for curr_dtype in dtype:
fitsformat.append(getformat(curr_dtype))
except TypeError:
# dtype is not iterable
fitsformat = [getformat(dtype)] * len(m)
if column_names is None:
column_names = standard_column_names.get(
len(m), ["COLUMN_%d" % n for n in range(1, len(m) + 1)]
)
else:
assert len(column_names) == len(m), "Length column_names != number of maps"
if column_units is None or isinstance(column_units, str):
column_units = [column_units] * len(m)
# maps must have same length
assert len(set(map(len, m))) == 1, "Maps must have same length"
nside = pixelfunc.npix2nside(len(m[0]))
if nside < 0:
raise ValueError("Invalid healpix map : wrong number of pixel")
cols = []
if partial:
fits_IDL = False
mask = pixelfunc.mask_good(m[0])
pix = np.where(mask)[0]
if len(pix) == 0:
raise ValueError("Invalid healpix map : empty partial map")
m = [mm[mask] for mm in m]
ff = getformat(np.min_scalar_type(-pix.max()))
if ff is None:
ff = "I"
cols.append(pf.Column(name="PIXEL", format=ff, array=pix, unit=None))
for cn, cu, mm, curr_fitsformat in zip(column_names, column_units, m, fitsformat):
if len(mm) > 1024 and fits_IDL:
# I need an ndarray, for reshape:
mm2 = np.asarray(mm)
cols.append(
pf.Column(
name=cn,
format="1024%s" % curr_fitsformat,
array=mm2.reshape(mm2.size // 1024, 1024),
unit=cu,
)
)
else:
cols.append(
pf.Column(name=cn, format="%s" % curr_fitsformat, array=mm, unit=cu)
)
tbhdu = pf.BinTableHDU.from_columns(cols)
# add needed keywords
tbhdu.header["PIXTYPE"] = ("HEALPIX", "HEALPIX pixelisation")
if nest:
ordering = "NESTED"
else:
ordering = "RING"
tbhdu.header["ORDERING"] = (
ordering,
"Pixel ordering scheme, either RING or NESTED",
)
if coord:
tbhdu.header["COORDSYS"] = (
coord,
"Ecliptic, Galactic or Celestial (equatorial)",
)
tbhdu.header["EXTNAME"] = ("xtension", "name of this binary table extension")
tbhdu.header["NSIDE"] = (nside, "Resolution parameter of HEALPIX")
if not partial:
tbhdu.header["FIRSTPIX"] = (0, "First pixel # (0 based)")
tbhdu.header["LASTPIX"] = (
pixelfunc.nside2npix(nside) - 1,
"Last pixel # (0 based)",
)
tbhdu.header["INDXSCHM"] = (
"EXPLICIT" if partial else "IMPLICIT",
"Indexing: IMPLICIT or EXPLICIT",
)
tbhdu.header["OBJECT"] = (
"PARTIAL" if partial else "FULLSKY",
"Sky coverage, either FULLSKY or PARTIAL",
)
# FIXME: In modern versions of Pyfits, header.update() understands a
# header as an argument, and headers can be concatenated with the `+'
# operator.
for args in extra_header:
tbhdu.header[args[0]] = args[1:]
# Add str to convert pathlib.Path into str
# Due to https://github.com/astropy/astropy/issues/10594
tbhdu.writeto(str(filename), overwrite=overwrite)
@deprecated_renamed_argument("verbose", None, "1.15.0")
def read_map(
filename,
field=0,
dtype=None,
nest=False,
partial=False,
hdu=1,
h=False,
verbose=True,
memmap=False,
):
"""Read a healpix map from a fits file. Partial-sky files,
if properly identified, are expanded to full size and filled with UNSEEN.
.. warning::
Starting from healpy 1.15.0, if you do not specify `dtype`,
the map will be read in memory with the same precision it is stored on disk.
Previously, by default `healpy` wrote maps in `float32` and then upcast to
`float64` when reading to memory. To reproduce the same behaviour of `healpy`
1.14.0 and below, set `dtype=np.float64` in `read_map`.
Parameters
----------
filename : str or HDU or HDUList or pathlib.Path instance
the fits file name
field : int or tuple of int, or None, optional
The column to read. Default: 0.
By convention 0 is temperature, 1 is Q, 2 is U.
Field can be a tuple to read multiple columns (0,1,2)
If the fits file is a partial-sky file, field=0 corresponds to the
first column after the pixel index column.
If None, all columns are read in.
dtype : data type or list of data types, optional
Force the conversion to some type. Passing a list allows different
types for each field. In that case, the length of the list must
correspond to the length of the field parameter.
If None, keep the dtype of the input FITS file
Default: Preserve the data types in the file
nest : bool, optional
If True return the map in NEST ordering, otherwise in RING ordering;
use fits keyword ORDERING to decide whether conversion is needed or not
If None, no conversion is performed.
partial : bool, optional
If True, fits file is assumed to be a partial-sky file with explicit indexing,
if the indexing scheme cannot be determined from the header.
If False, implicit indexing is assumed. Default: False.
A partial sky file is one in which OBJECT=PARTIAL and INDXSCHM=EXPLICIT,
and the first column is then assumed to contain pixel indices.
A full sky file is one in which OBJECT=FULLSKY and INDXSCHM=IMPLICIT.
At least one of these keywords must be set for the indexing
scheme to be properly identified.
hdu : int, optional
the header number to look at (start at 0)
h : bool, optional
If True, return also the header. Default: False.
verbose : bool, deprecated
It has no effect
memmap : bool, optional
Argument passed to astropy.io.fits.open, if True, the map is not read into memory,
but only the required pixels are read when needed. Default: False.
Returns
-------
m | (m0, m1, ...) [, header] : array or a tuple of arrays, optionally with header appended
The map(s) read from the file, and the header if *h* is True.
"""
opened_file = False
if isinstance(filename, allowed_paths):
filename = pf.open(filename, memmap=memmap)
opened_file = True
fits_hdu = _get_hdu(filename, hdu=hdu, memmap=memmap)
nside = fits_hdu.header.get("NSIDE")
if nside is None:
log.info("No NSIDE in the header file : will use length of array")
pix = fits_hdu.data.field(0)
nside = pixelfunc.npix2nside(pix.size)
else:
nside = int(nside)
log.info("NSIDE = %d", nside)
if not pixelfunc.isnsideok(nside):
raise ValueError("Wrong nside parameter.")
ordering = fits_hdu.header.get("ORDERING", "UNDEF").strip()
if ordering == "UNDEF":
ordering = nest and "NESTED" or "RING"
log.info("No ORDERING keyword in header file : assume %s", ordering)
log.info("ORDERING = %s in fits file", ordering)
sz = pixelfunc.nside2npix(nside)
ret = []
# partial sky: check OBJECT, then INDXSCHM
obj = str(fits_hdu.header.get("OBJECT", "UNDEF")).strip()
if obj != "UNDEF":
if obj == "PARTIAL":
partial = True
elif obj == "FULLSKY":
partial = False
schm = fits_hdu.header.get("INDXSCHM", "UNDEF").strip()
if schm != "UNDEF":
if schm == "EXPLICIT":
if obj == "FULLSKY":
raise ValueError("Incompatible INDXSCHM keyword")
partial = True
elif schm == "IMPLICIT":
if obj == "PARTIAL":
raise ValueError("Incompatible INDXSCHM keyword")
partial = False
if schm == "UNDEF":
schm = partial and "EXPLICIT" or "IMPLICIT"
log.info("No INDXSCHM keyword in header file: assume %s", schm)
log.info("INDXSCHM = %s", schm)
if field is None:
field = range(len(fits_hdu.data.columns) - 1 * partial)
if not (hasattr(field, "__len__") or isinstance(field, str)):
field = (field,)
if partial:
# increment field counters
field = tuple(f if isinstance(f, str) else f + 1 for f in field)
try:
pix = fits_hdu.data.field(0).astype(int, copy=False).ravel()
except pf.VerifyError as e:
log.warning(e)
log.warning("Trying to fix a badly formatted header")
fits_hdu.verify("fix")
pix = fits_hdu.data.field(0).astype(int, copy=False).ravel()
try:
assert len(dtype) == len(
field
), "The number of dtypes are not equal to the number of fields"
except TypeError:
dtype = [dtype] * len(field)
for ff, curr_dtype in zip(field, dtype):
try:
if curr_dtype is None:
m = fits_hdu.data.field(ff).ravel()
else:
m = fits_hdu.data.field(ff).astype(curr_dtype, copy=False).ravel()
except pf.VerifyError as e:
log.warning(e)
log.warning("Trying to fix a badly formatted header")
m = fits_hdu.verify("fix")
if curr_dtype is None:
m = fits_hdu.data.field(ff).ravel()
else:
m = fits_hdu.data.field(ff).astype(curr_dtype, copy=False).ravel()
if partial:
mnew = UNSEEN * np.ones(sz, dtype=m.dtype)
mnew[pix] = m
m = mnew
if not pixelfunc.isnpixok(m.size) or (sz > 0 and sz != m.size):
log.warning("nside={0:d}, sz={1:d}, m.size={2:d}".format(nside, sz, m.size))
raise ValueError("Wrong nside parameter.")
if not nest is None: # no conversion with None
if nest and ordering == "RING":
idx = pixelfunc.nest2ring(nside, np.arange(m.size, dtype=np.int32))
m = m[idx]
log.info("Ordering converted to NEST")
elif (not nest) and ordering == "NESTED":
idx = pixelfunc.ring2nest(nside, np.arange(m.size, dtype=np.int32))
m = m[idx]
log.info("Ordering converted to RING")
try:
m[pixelfunc.mask_bad(m)] = UNSEEN
except OverflowError:
pass
ret.append(m)
if h:
header = []
for (key, value) in fits_hdu.header.items():
header.append((key, value))
if opened_file:
filename.close()
if len(ret) == 1:
if h:
return ret[0], header
else:
return ret[0]
else:
if all(dt == dtype[0] for dt in dtype):
ret = np.array(ret)
if h:
return ret, header
else:
return ret
def write_alm(
filename, alms, out_dtype=None, lmax=-1, mmax=-1, mmax_in=-1, overwrite=False
):
"""Write alms to a fits file.
In the fits file the alms are written
with explicit index scheme, index = l*l + l + m +1, possibly out of order.
By default write_alm makes a table with the same precision as the alms.
If specified, the lmax and mmax parameters truncate the input data to
include only alms for which l <= lmax and m <= mmax.
Parameters
----------
filename : str
The filename of the output fits file
alms : array, complex or list of arrays
A complex ndarray holding the alms, index = m*(2*lmax+1-m)/2+l, see Alm.getidx
lmax : int, optional
The maximum l in the output file
mmax : int, optional
The maximum m in the output file
out_dtype : data type, optional
data type in the output file (must be a numpy dtype). Default: *alms*.real.dtype
mmax_in : int, optional
maximum m in the input array
"""
if not cb.is_seq_of_seq(alms):
alms = [alms]
l2max = Alm.getlmax(len(alms[0]), mmax=mmax_in)
if lmax != -1 and lmax > l2max:
raise ValueError("Too big lmax in parameter")
elif lmax == -1:
lmax = l2max
if mmax_in == -1:
mmax_in = l2max
if mmax == -1:
mmax = lmax
if mmax > mmax_in:
mmax = mmax_in
if out_dtype is None:
out_dtype = alms[0].real.dtype
l, m = Alm.getlm(lmax)
idx = np.where((l <= lmax) * (m <= mmax))
l = l[idx]
m = m[idx]
idx_in_original = Alm.getidx(l2max, l=l, m=m)
index = l ** 2 + l + m + 1
hdulist = pf.HDUList()
for alm in alms:
out_data = np.empty(
len(index), dtype=[("index", "i"), ("real", out_dtype), ("imag", out_dtype)]
)
out_data["index"] = index
out_data["real"] = alm.real[idx_in_original]
out_data["imag"] = alm.imag[idx_in_original]
cindex = pf.Column(
name="index",
format=getformat(np.int32),
unit="l*l+l+m+1",
array=out_data["index"],
)
creal = pf.Column(
name="real",
format=getformat(out_dtype),
unit="unknown",
array=out_data["real"],
)
cimag = pf.Column(
name="imag",
format=getformat(out_dtype),
unit="unknown",
array=out_data["imag"],
)
tbhdu = pf.BinTableHDU.from_columns([cindex, creal, cimag])
hdulist.append(tbhdu)
# Add str to convert pathlib.Path into str
# Due to https://github.com/astropy/astropy/issues/10594
hdulist.writeto(str(filename), overwrite=overwrite)
def read_alm(filename, hdu=1, return_mmax=False):
"""Read alm from a fits file.
The file format is assumed to be the format created by `write_alm`,
with 3 columns (index, real and imaginary) for each HDU.
In the fits file, the alm are assumed to be written
with explicit index scheme, index = l**2+l+m+1, while healpix cxx
uses index = m*(2*lmax+1-m)/2+l. The conversion is done in this
function.
Parameters
----------
filename : str or HDUList or HDU or pathlib.Path instance
The name of the fits file to read
hdu : int, or tuple of int, optional
The HDU to read. Default: hdu=1
For example, when reading a FITS file containing polarized alm,
hdu=1 (or not specifying hdu) will return the alm_T
hdu=(1,2,3) will return alm_T, alm_E, alm_B
return_mmax : bool, optional
If true, both the alms and mmax is returned in a tuple. Default: return_mmax=False
Returns
-------
alms[, mmax] : complex array or tuple of a complex array and an int
The alms read from the file and optionally mmax read from the file
"""
alms = []
lmaxtot = None
mmaxtot = None
opened_file = False
if isinstance(filename, allowed_paths):
filename = pf.open(filename)
opened_file = True
for unit in np.atleast_1d(hdu):
idx, almr, almi = [_get_hdu(filename, hdu=unit).data.field(i) for i in range(3)]
l = np.floor(np.sqrt(idx - 1)).astype(int)
m = idx - l ** 2 - l - 1
if (m < 0).any():
raise ValueError("Negative m value encountered !")
lmax = l.max()
mmax = m.max()
if lmaxtot is None:
lmaxtot = lmax
mmaxtot = mmax
else:
if lmaxtot != lmax or mmaxtot != mmax:
raise RuntimeError(
"read_alm: harmonic expansion order in {} HDUs {} does not "
"match".format(filename, unit, hdu)
)
alm = almr * (0 + 0j)
i = Alm.getidx(lmax, l, m)
alm.real[i] = almr
alm.imag[i] = almi
alms.append(alm)
if opened_file:
filename.close()
if len(alms) == 1:
alm = alms[0]
else:
alm = np.array(alms)
if return_mmax:
return alm, mmax
else:
return alm
## Generic functions to read and write column of data in fits file
def _get_hdu(input_data, hdu=None, memmap=None):
"""
Return an HDU from a FITS file
Parameters
----------
input_data : str or HDUList or HDU instance
The input FITS file, either as a filename, HDU list, or HDU instance.
Returns
-------
fits_hdu : HDU
The extracted HDU
"""
if isinstance(input_data, allowed_paths):
hdulist = pf.open(input_data, memmap=memmap)
return _get_hdu(hdulist, hdu=hdu)
if isinstance(input_data, pf.HDUList):
if isinstance(hdu, int) and hdu >= len(input_data):
raise ValueError("Available hdu in [0-%d]" % len(input_data))
else:
fits_hdu = input_data[hdu]
elif isinstance(
input_data,
(pf.PrimaryHDU, pf.ImageHDU, pf.BinTableHDU, pf.TableHDU, pf.GroupsHDU),
):
fits_hdu = input_data
else:
raise TypeError(
"First argument should be a input_data (str or pathlib.Path), HDUList instance, or HDU instance"
)
return fits_hdu
def getformat(t):
"""Get the FITS convention format string of data type t.
Parameters
----------
t : data type
The data type for which the FITS type is requested
Returns
-------
fits_type : str or None
The FITS string code describing the data type, or None if unknown type.
"""
conv = {
np.dtype(np.bool_): "L",
np.dtype(np.uint8): "B",
np.dtype(np.int16): "I",
np.dtype(np.int32): "J",
np.dtype(np.int64): "K",
np.dtype(np.float32): "E",
np.dtype(np.float64): "D",
np.dtype(np.complex64): "C",
np.dtype(np.complex128): "M",
}
try:
if t in conv:
return conv[t]
except:
pass
try:
if np.dtype(t) in conv:
return conv[np.dtype(t)]
except:
pass
try:
if np.dtype(type(t)) in conv:
return conv[np.dtype(type(t))]
except:
pass
try:
if np.dtype(type(t[0])) in conv:
return conv[np.dtype(type(t[0]))]
except:
pass
try:
if t is str:
return "A"
except:
pass
try:
if type(t) is str:
return "A%d" % (len(t))
except:
pass
try:
if type(t[0]) is str:
l = max(len(s) for s in t)
return "A%d" % (l)
except:
pass
try:
if np.dtype(t.type) in conv:
return conv[np.dtype(t.type)]
except:
pass
try:
if np.dtype(t[0].type) in conv:
return conv[np.dtype(t[0].type)]
except:
pass
raise ValueError(
"healpy could not understand the equivalent FITS datatype of {}, please open an issue on the healpy Github repository".format(
str(t)
)
)
|
healpyREPO_NAMEhealpyPATH_START.@healpy_extracted@healpy-main@lib@healpy@fitsfunc.py@.PATH_END.py
|
{
"filename": "align_data.py",
"repo_name": "amanchokshi/EMBERS",
"repo_path": "EMBERS_extracted/EMBERS-master/src/embers/rf_tools/align_data.py",
"type": "Python"
}
|
"""
Align Data
----------
Tools to temporally align pairs of rf data files,
enabling comparisons between data sets
"""
import concurrent.futures
import logging
import math
import re
from itertools import repeat
from pathlib import Path
import numpy as np
from embers.rf_tools.rf_data import (read_data, tile_names, tile_pairs,
time_tree)
from matplotlib import pyplot as plt
from scipy import interpolate
from scipy.signal import savgol_filter
def savgol_interp(
ref,
tile,
savgol_window_1=None,
savgol_window_2=None,
polyorder=None,
interp_type=None,
interp_freq=None,
):
"""Interpolate a power array followed by savgol smoothing.
Interpolate to a given frequency,
making the dimensions of the power arrays
from reference and tile antennas equal,
enabling comparisons between corresponding
data points. Two level of savgol filter applied,
first to capture deep nulls + small structure,
and second level to smooth over noise.
.. code-block:: python
from embers.rf_tools.align_data import savgol_interp
sg_interp_tuple = savgol_interp(
"~/embers-data/rf0XX.txt",
"~/embers-data/S06XX",
savgol_window_1=11,
savgol_window_2=15,
polyorder=2,
interp_type="cubic",
interp_freq=1)
(ref_ali, tile_ali, time_array,
ref_power, tile_power, ref_time, tile_time) = sg_interp_tuple
:param ref: path to reference data file :class:`~str`
:param tile: path to tile data file :class:`~str`
:param savgol_window_1: window size of savgol filer, must be odd :class:`~int`
:param savgol_window_2: window size of savgol filer, must be odd :class:`~int`
:param polyorder: polynomial order to fit to savgol_window :class:`~int`
:param interp_type: type of interpolation. Ex: 'cubic', 'linear' :class:`~str`
:param interp_freq: freqency to which power array is interpolated in Hertz :class:`~int`
:returns:
A :class:`~tuple` (ref_ali, tile_ali, time_array, ref_power, tile_power, ref_time, tile_time)
- ref_ali - aligned reference power array
- tile_ali - aligned tile power array
- time_array - corresponding to power arrays
- ref_power - raw reference power array
- tile_power - raw tile power array
- ref_time - raw reference time array
- tile_time - raw tile time array
"""
# Read time and power arrays from data files
ref_power, ref_time = read_data(ref)
tile_power, tile_time = read_data(tile)
# Round up/down to nearest integer of time
start_time = math.ceil(max(ref_time[0], tile_time[0]))
stop_time = math.floor(min(ref_time[-1], tile_time[-1]))
# Array of times at which to evaluate the interpolated data
time_array = np.arange(start_time, stop_time, (1 / interp_freq))
# Mathematical interpolation functions
f = interpolate.interp1d(ref_time, ref_power, axis=0, kind=interp_type)
g = interpolate.interp1d(tile_time, tile_power, axis=0, kind=interp_type)
# New power array, evaluated at the desired frequency
ref_ali = f(time_array)
tile_ali = g(time_array)
# Savgol level 1. Capture nulls / small scale structure
ref_ali = savgol_filter(ref_ali, savgol_window_1, polyorder, axis=0)
tile_ali = savgol_filter(tile_ali, savgol_window_1, polyorder, axis=0)
# Savgol level 2. Smooth noise
ref_ali = savgol_filter(ref_ali, savgol_window_2, polyorder, axis=0)
tile_ali = savgol_filter(tile_ali, savgol_window_2, polyorder, axis=0)
return (ref_ali, tile_ali, time_array, ref_power, tile_power, ref_time, tile_time)
def plot_savgol_interp(
ref=None,
tile=None,
savgol_window_1=None,
savgol_window_2=None,
polyorder=None,
interp_type=None,
interp_freq=None,
channel=None,
out_dir=None,
):
"""Plot single channel of power arrays to visualise :func:`~embers.rf_tools.align_data.savgol_interp`.
Create a plot of a single channel of raw :samp:`rf_data` from reference and tile power arrays, along
with the outputs of :func:`~embers.rf_tools.align_data.savgol_interp` to visualise the effects of
interpolation and savgol smoothing.
:param ref: path to reference data file :class:`~str`
:param tile: path to tile data file :class:`~str`
:param savgol_window_1: window size of savgol filer, must be odd :class:`~int`
:param savgol_window_2: window size of savgol filer, must be odd :class:`~int`
:param polyorder: polynomial order to fit to savgol_window :class:`~int`
:param interp_type: type of interpolation. Ex: 'cubic', 'linear' :class:`~str`
:param interp_freq: freqency to which power array is interpolated in Hertz :class:`~int`
:param channel: index of single frequency channel :class:`~int`
:param out_dir: path to output directory :class:`~str`
:returns:
single freqency savgol_interp plot saved to :samp:`out_dir`
"""
(
ref_ali,
tile_ali,
time_array,
ref_power,
tile_power,
ref_time,
tile_time,
) = savgol_interp(
ref=ref,
tile=tile,
savgol_window_1=savgol_window_1,
savgol_window_2=savgol_window_2,
polyorder=polyorder,
interp_type=interp_type,
interp_freq=interp_freq,
)
# Sample align plot
plt.style.use("seaborn")
plt.rcParams["figure.figsize"] = (9, 6)
# convert times to minuts from first datapoint
time_array = (time_array - time_array[0]) / 60
ref_time = (ref_time - ref_time[0]) / 60
tile_time = (tile_time - tile_time[0]) / 60
plt.plot(
time_array,
tile_ali[::, channel],
color="#e23a4e",
alpha=0.9,
label="tile savgol",
)
plt.scatter(
tile_time,
tile_power[::, channel],
color="#f78b51",
marker=".",
alpha=0.6,
label="tile raw",
)
plt.plot(
time_array,
ref_ali[::, channel],
color="#252b40",
alpha=0.9,
label="ref savgol",
)
plt.scatter(
ref_time,
ref_power[::, channel],
color="#6a82bb",
marker=".",
alpha=0.6,
label="ref raw",
)
leg = plt.legend(loc="upper left", frameon=True)
leg.get_frame().set_facecolor("white")
for leg in leg.legendHandles:
leg.set_alpha(1)
plt.ylim(-110, -20)
plt.ylabel("Raw Power [dBm]")
plt.xlabel("Time [min]")
plt.tight_layout()
Path(f"{out_dir}").mkdir(parents=True, exist_ok=True)
plt.savefig(f"{out_dir}/savgol_interp_sample.png")
def save_aligned(
tile_pair,
time_stamp,
savgol_window_1,
savgol_window_2,
polyorder,
interp_type,
interp_freq,
data_dir,
out_dir,
):
"""Save an aligned set of rf data with :func:`~numpy.savez_compressed` to an :samp:`npz` file.
A pair of rf data files are smoothed, interpolated and aligned
with the :func:`~embers.rf_tools.align_data.savgol_interp`.
with the output written to a :samp:`npz` file and saved to an output
directory tree.
.. code-block:: python
from embers.rf_tools.align_data import save_aligned
savgol_interp(
["rf0XX", "S06XX"],
"2020-01-01-00:00"
savgol_window_1=11,
savgol_window_2=15,
polyorder=2,
interp_type="cubic",
interp_freq=1,
"~/embers-data/",
"~/embers-outputs")
:param tile_pair: pair of ref and tile antenna names from :func:`~embers.rf_tools.rf_data.tile_pairs` :class:`list`
:param time_stamp: time when rf observation began. In YYYY-MM-DD-HH-MM format :class:`~str`
:param savgol_window_1: window size of savgol filer, must be odd :class:`~int`
:param savgol_window_2: window size of savgol filer, must be odd :class:`~int`
:param polyorder: polynomial order to fit to savgol_window :class:`~int`
:param interp_type: type of interpolation. Ex: 'cubic', 'linear' :class:`~str`
:param interp_freq: freqency to which power array is interpolated :class:`~int`
:param data_dir: root of data dir where rf data is located :class:`~str`
:param out_dir: relative path to output directory :class:`~str`
:return:
- aligned rf data saved to :samp:`npz` file by :func:`~numpy.savez_compressed`
:raises FileNotFoundError: an input file does not exist
"""
date = re.search(r"\d{4}.\d{2}.\d{2}", time_stamp)[0]
ref = tile_pair[0]
tile = tile_pair[1]
ref_file = f"{data_dir}/{ref}/{date}/{ref}_{time_stamp}.txt"
tile_file = f"{data_dir}/{tile}/{date}/{tile}_{time_stamp}.txt"
try:
ref_ali, tile_ali, time_array, _, _, _, _ = savgol_interp(
ref_file,
tile_file,
savgol_window_1=savgol_window_1,
savgol_window_2=savgol_window_2,
polyorder=polyorder,
interp_type=interp_type,
interp_freq=interp_freq,
)
# creates output directory if it doesn't exist
save_dir = Path(f"{out_dir}/{date}/{time_stamp}")
save_dir.mkdir(parents=True, exist_ok=True)
# Convert the power array to float32
# Convert list of times to float64 (double)
# Save as compressed npz file. Seems to drastically reduce size
np.savez_compressed(
f"{save_dir}/{ref}_{tile}_{time_stamp}_aligned.npz",
ref_ali=np.single(ref_ali),
tile_ali=np.single(tile_ali),
time_array=np.double(time_array),
)
return f"Saved aligned file to {save_dir}/{ref}_{tile}_{time_stamp}_aligned.npz"
except Exception as e:
return e
def align_batch(
start_date=None,
stop_date=None,
savgol_window_1=None,
savgol_window_2=None,
polyorder=None,
interp_type=None,
interp_freq=None,
data_dir=None,
out_dir=None,
max_cores=None,
):
"""Temporally align all RF files within a date interval using :func:`~embers.rf_tools.align_data.save_aligned`.
:param start_date: In YYYY-MM-DD format :class:`~str`
:param stop_date: In YYYY-MM-DD format :class:`~str`
:param savgol_window_1: window size of savgol filer, must be odd :class:`~int`
:param savgol_window_2: window size of savgol filer, must be odd :class:`~int`
:param polyorder: polynomial order to fit to savgol_window :class:`~int`
:param interp_type: type of interpolation. Ex: 'cubic', 'linear' :class:`~str`
:param interp_freq: freqency to which power array is interpolated :class:`~int`
:param data_dir: root of data dir where rf data is located :class:`~str`
:param out_dir: relative path to output directory :class:`~str`
:param max_cores: Maximum number of cores to be used by this script. Default=None, which means that all available cores are used
:return:
- aligned rf data saved to :samp:`npz` file by :func:`~numpy.savez_compressed` in :samp:`out_dir`
"""
dates, time_stamps = time_tree(start_date, stop_date)
# Logging config
log_dir = Path(f"{out_dir}")
log_dir.mkdir(parents=True, exist_ok=True)
logging.basicConfig(
filename=f"{out_dir}/align_batch.log",
level=logging.INFO,
format="%(levelname)s: %(funcName)s: %(message)s",
)
for pair in tile_pairs(tile_names()):
for day in range(len(dates)):
with concurrent.futures.ProcessPoolExecutor(
max_workers=max_cores
) as executor:
results = executor.map(
save_aligned,
repeat(pair),
time_stamps[day],
repeat(savgol_window_1),
repeat(savgol_window_2),
repeat(polyorder),
repeat(interp_type),
repeat(interp_freq),
repeat(data_dir),
repeat(out_dir),
)
for result in results:
logging.info(result)
|
amanchokshiREPO_NAMEEMBERSPATH_START.@EMBERS_extracted@EMBERS-master@src@embers@rf_tools@align_data.py@.PATH_END.py
|
{
"filename": "TeeResMaxEvolutionStudy.py",
"repo_name": "mmicromegas/ransX",
"repo_path": "ransX_extracted/ransX-master/EVOLUTION/TeeResMaxEvolutionStudy.py",
"type": "Python"
}
|
import numpy as np
import sys
import matplotlib.pyplot as plt
from UTILS.Calculus import Calculus
from UTILS.EVOL.ALIMITevol import ALIMITevol
from UTILS.Tools import Tools
# Theoretical background https://arxiv.org/abs/1401.5176
# Mocak, Meakin, Viallet, Arnett, 2014, Compressible Hydrodynamic Mean-Field #
# Equations in Spherical Geometry and their Application to Turbulent Stellar #
# Convection Data #
class TeeResMaxEvolutionResolutionStudy(Calculus, ALIMITevol, Tools, object):
def __init__(self, filename, ig, data_prefix):
super(TeeResMaxEvolutionResolutionStudy, self).__init__(ig)
# load data to a list of structured arrays
eht = []
for ffile in filename:
eht.append(self.customLoad(ffile))
# declare data lists
t_timec, t_resTeeMax, t_tc = [], [], []
nx, ny, nz, tavg = [], [], [], []
for i in range(len(filename)):
# load temporal evolution
t_timec.append(self.getRAdata(eht[i], 't_timec'))
t_resTeeMax.append(self.getRAdata(eht[i], 't_resTeeMax'))
t_tc.append(self.getRAdata(eht[i], 't_tc'))
nx.append(self.getRAdata(eht[i], 'nx'))
ny.append(self.getRAdata(eht[i], 'ny'))
nz.append(self.getRAdata(eht[i], 'nz'))
tavg.append(self.getRAdata(eht[i], 'tavg'))
# share data across the whole class
self.t_timec = t_timec
self.t_resTeeMax = t_resTeeMax
self.t_tc = t_tc
self.tavg = tavg
self.data_prefix = data_prefix
self.nx = nx
self.ny = ny
self.nz = nz
def plot_resTeeMax_evolution(self, LAXIS, xbl, xbr, ybu, ybd, ilg):
grd = self.t_timec
plt1 = self.t_resTeeMax
t_tc = self.t_tc
# load resolution
nx = self.nx
ny = self.ny
nz = self.nz
tavg = self.tavg
# find maximum resolution data
grd_maxres = self.maxresdata(grd)
plt1_maxres = self.maxresdata(plt1)
plt_interp = []
for i in range(len(grd)):
plt_interp.append(np.interp(grd_maxres, grd[i], plt1[i]))
# create FIGURE
plt.figure(figsize=(7, 6))
# format AXIS, make sure it is exponential
plt.gca().yaxis.get_major_formatter().set_powerlimits((0, 0))
if (LAXIS != 2):
print("ERROR(TeeResMaxEvolutionStudy.py): Only LAXIS=2 is supported.")
sys.exit()
plt10_tmp = plt1[0]
plt11_tmp = plt1[0]
plt1_foraxislimit = []
plt1max = np.max(plt1[0])
for plt1i in plt1:
if (np.max(plt1i) > plt1max):
plt1_foraxislimit = plt1i
# set plot boundaries
to_plot = [plt1_foraxislimit]
self.set_plt_axis(LAXIS, xbl, xbr, ybu, ybd, to_plot)
# plot DATA
plt.title('total energy eq res Max evolution')
for i in range(len(grd)):
plt.plot(grd[i], plt1[i], label=str(nx[i]) + ' x ' + str(ny[i]) + ' x ' + str(nz[i]) + ' '
+ '(tavg = ' + str(np.round(tavg[i],1)) + ' s = '
+ str(np.round(tavg[i]/np.mean(t_tc[i]),1)) + ' TOs)')
# plt.plot(grd1,plt2,color='g',label = r'$epsD$')
# define and show x/y LABELS
setxlabel = r"t (s)"
setylabel = r"res (erg/s)"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
# show LEGEND
plt.legend(loc=ilg, prop={'size': 12})
# display PLOT
plt.show(block=False)
# save PLOT
plt.savefig('RESULTS/' + self.data_prefix + 'tresTeeMax_evol.png')
# find data with maximum resolution
def maxresdata(self, data):
tmp = 0
for idata in data:
if idata.shape[0] > tmp:
data_maxres = idata
else:
tmp = idata.shape[0]
return data_maxres
|
mmicromegasREPO_NAMEransXPATH_START.@ransX_extracted@ransX-master@EVOLUTION@TeeResMaxEvolutionStudy.py@.PATH_END.py
|
{
"filename": "ultratb.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/ipython/py3/IPython/core/ultratb.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
"""
Verbose and colourful traceback formatting.
**ColorTB**
I've always found it a bit hard to visually parse tracebacks in Python. The
ColorTB class is a solution to that problem. It colors the different parts of a
traceback in a manner similar to what you would expect from a syntax-highlighting
text editor.
Installation instructions for ColorTB::
import sys,ultratb
sys.excepthook = ultratb.ColorTB()
**VerboseTB**
I've also included a port of Ka-Ping Yee's "cgitb.py" that produces all kinds
of useful info when a traceback occurs. Ping originally had it spit out HTML
and intended it for CGI programmers, but why should they have all the fun? I
altered it to spit out colored text to the terminal. It's a bit overwhelming,
but kind of neat, and maybe useful for long-running programs that you believe
are bug-free. If a crash *does* occur in that type of program you want details.
Give it a shot--you'll love it or you'll hate it.
.. note::
The Verbose mode prints the variables currently visible where the exception
happened (shortening their strings if too long). This can potentially be
very slow, if you happen to have a huge data structure whose string
representation is complex to compute. Your computer may appear to freeze for
a while with cpu usage at 100%. If this occurs, you can cancel the traceback
with Ctrl-C (maybe hitting it more than once).
If you encounter this kind of situation often, you may want to use the
Verbose_novars mode instead of the regular Verbose, which avoids formatting
variables (but otherwise includes the information and context given by
Verbose).
.. note::
The verbose mode print all variables in the stack, which means it can
potentially leak sensitive information like access keys, or unencrypted
password.
Installation instructions for VerboseTB::
import sys,ultratb
sys.excepthook = ultratb.VerboseTB()
Note: Much of the code in this module was lifted verbatim from the standard
library module 'traceback.py' and Ka-Ping Yee's 'cgitb.py'.
Color schemes
-------------
The colors are defined in the class TBTools through the use of the
ColorSchemeTable class. Currently the following exist:
- NoColor: allows all of this module to be used in any terminal (the color
escapes are just dummy blank strings).
- Linux: is meant to look good in a terminal like the Linux console (black
or very dark background).
- LightBG: similar to Linux but swaps dark/light colors to be more readable
in light background terminals.
- Neutral: a neutral color scheme that should be readable on both light and
dark background
You can implement other color schemes easily, the syntax is fairly
self-explanatory. Please send back new schemes you develop to the author for
possible inclusion in future releases.
Inheritance diagram:
.. inheritance-diagram:: IPython.core.ultratb
:parts: 3
"""
#*****************************************************************************
# Copyright (C) 2001 Nathaniel Gray <n8gray@caltech.edu>
# Copyright (C) 2001-2004 Fernando Perez <fperez@colorado.edu>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
from collections.abc import Sequence
import functools
import inspect
import linecache
import pydoc
import sys
import time
import traceback
import types
from types import TracebackType
from typing import Any, List, Optional, Tuple
import stack_data
from pygments.formatters.terminal256 import Terminal256Formatter
from pygments.styles import get_style_by_name
import IPython.utils.colorable as colorable
# IPython's own modules
from IPython import get_ipython
from IPython.core import debugger
from IPython.core.display_trap import DisplayTrap
from IPython.core.excolors import exception_colors
from IPython.utils import PyColorize
from IPython.utils import path as util_path
from IPython.utils import py3compat
from IPython.utils.terminal import get_terminal_size
# Globals
# amount of space to put line numbers before verbose tracebacks
INDENT_SIZE = 8
# Default color scheme. This is used, for example, by the traceback
# formatter. When running in an actual IPython instance, the user's rc.colors
# value is used, but having a module global makes this functionality available
# to users of ultratb who are NOT running inside ipython.
DEFAULT_SCHEME = 'NoColor'
FAST_THRESHOLD = 10_000
# ---------------------------------------------------------------------------
# Code begins
# Helper function -- largely belongs to VerboseTB, but we need the same
# functionality to produce a pseudo verbose TB for SyntaxErrors, so that they
# can be recognized properly by ipython.el's py-traceback-line-re
# (SyntaxErrors have to be treated specially because they have no traceback)
@functools.lru_cache()
def count_lines_in_py_file(filename: str) -> int:
"""
Given a filename, returns the number of lines in the file
if it ends with the extension ".py". Otherwise, returns 0.
"""
if not filename.endswith(".py"):
return 0
else:
try:
with open(filename, "r") as file:
s = sum(1 for line in file)
except UnicodeError:
return 0
return s
"""
Given a frame object, returns the total number of lines in the file
if the filename ends with the extension ".py". Otherwise, returns 0.
"""
def get_line_number_of_frame(frame: types.FrameType) -> int:
"""
Given a frame object, returns the total number of lines in the file
containing the frame's code object, or the number of lines in the
frame's source code if the file is not available.
Parameters
----------
frame : FrameType
The frame object whose line number is to be determined.
Returns
-------
int
The total number of lines in the file containing the frame's
code object, or the number of lines in the frame's source code
if the file is not available.
"""
filename = frame.f_code.co_filename
if filename is None:
print("No file....")
lines, first = inspect.getsourcelines(frame)
return first + len(lines)
return count_lines_in_py_file(filename)
def _safe_string(value, what, func=str):
# Copied from cpython/Lib/traceback.py
try:
return func(value)
except:
return f"<{what} {func.__name__}() failed>"
def _format_traceback_lines(lines, Colors, has_colors: bool, lvals):
"""
Format tracebacks lines with pointing arrow, leading numbers...
Parameters
----------
lines : list[Line]
Colors
ColorScheme used.
lvals : str
Values of local variables, already colored, to inject just after the error line.
"""
numbers_width = INDENT_SIZE - 1
res = []
for stack_line in lines:
if stack_line is stack_data.LINE_GAP:
res.append('%s (...)%s\n' % (Colors.linenoEm, Colors.Normal))
continue
line = stack_line.render(pygmented=has_colors).rstrip('\n') + '\n'
lineno = stack_line.lineno
if stack_line.is_current:
# This is the line with the error
pad = numbers_width - len(str(lineno))
num = '%s%s' % (debugger.make_arrow(pad), str(lineno))
start_color = Colors.linenoEm
else:
num = '%*s' % (numbers_width, lineno)
start_color = Colors.lineno
line = '%s%s%s %s' % (start_color, num, Colors.Normal, line)
res.append(line)
if lvals and stack_line.is_current:
res.append(lvals + '\n')
return res
def _simple_format_traceback_lines(lnum, index, lines, Colors, lvals, _line_format):
"""
Format tracebacks lines with pointing arrow, leading numbers...
Parameters
==========
lnum: int
number of the target line of code.
index: int
which line in the list should be highlighted.
lines: list[string]
Colors:
ColorScheme used.
lvals: bytes
Values of local variables, already colored, to inject just after the error line.
_line_format: f (str) -> (str, bool)
return (colorized version of str, failure to do so)
"""
numbers_width = INDENT_SIZE - 1
res = []
for i, line in enumerate(lines, lnum - index):
# assert isinstance(line, str)
line = py3compat.cast_unicode(line)
new_line, err = _line_format(line, "str")
if not err:
line = new_line
if i == lnum:
# This is the line with the error
pad = numbers_width - len(str(i))
num = "%s%s" % (debugger.make_arrow(pad), str(lnum))
line = "%s%s%s %s%s" % (
Colors.linenoEm,
num,
Colors.line,
line,
Colors.Normal,
)
else:
num = "%*s" % (numbers_width, i)
line = "%s%s%s %s" % (Colors.lineno, num, Colors.Normal, line)
res.append(line)
if lvals and i == lnum:
res.append(lvals + "\n")
return res
def _format_filename(file, ColorFilename, ColorNormal, *, lineno=None):
"""
Format filename lines with custom formatting from caching compiler or `File *.py` by default
Parameters
----------
file : str
ColorFilename
ColorScheme's filename coloring to be used.
ColorNormal
ColorScheme's normal coloring to be used.
"""
ipinst = get_ipython()
if (
ipinst is not None
and (data := ipinst.compile.format_code_name(file)) is not None
):
label, name = data
if lineno is None:
tpl_link = f"{{label}} {ColorFilename}{{name}}{ColorNormal}"
else:
tpl_link = (
f"{{label}} {ColorFilename}{{name}}, line {{lineno}}{ColorNormal}"
)
else:
label = "File"
name = util_path.compress_user(
py3compat.cast_unicode(file, util_path.fs_encoding)
)
if lineno is None:
tpl_link = f"{{label}} {ColorFilename}{{name}}{ColorNormal}"
else:
# can we make this the more friendly ", line {{lineno}}", or do we need to preserve the formatting with the colon?
tpl_link = f"{{label}} {ColorFilename}{{name}}:{{lineno}}{ColorNormal}"
return tpl_link.format(label=label, name=name, lineno=lineno)
#---------------------------------------------------------------------------
# Module classes
class TBTools(colorable.Colorable):
"""Basic tools used by all traceback printer classes."""
# Number of frames to skip when reporting tracebacks
tb_offset = 0
def __init__(
self,
color_scheme="NoColor",
call_pdb=False,
ostream=None,
parent=None,
config=None,
*,
debugger_cls=None,
):
# Whether to call the interactive pdb debugger after printing
# tracebacks or not
super(TBTools, self).__init__(parent=parent, config=config)
self.call_pdb = call_pdb
# Output stream to write to. Note that we store the original value in
# a private attribute and then make the public ostream a property, so
# that we can delay accessing sys.stdout until runtime. The way
# things are written now, the sys.stdout object is dynamically managed
# so a reference to it should NEVER be stored statically. This
# property approach confines this detail to a single location, and all
# subclasses can simply access self.ostream for writing.
self._ostream = ostream
# Create color table
self.color_scheme_table = exception_colors()
self.set_colors(color_scheme)
self.old_scheme = color_scheme # save initial value for toggles
self.debugger_cls = debugger_cls or debugger.Pdb
if call_pdb:
self.pdb = self.debugger_cls()
else:
self.pdb = None
def _get_ostream(self):
"""Output stream that exceptions are written to.
Valid values are:
- None: the default, which means that IPython will dynamically resolve
to sys.stdout. This ensures compatibility with most tools, including
Windows (where plain stdout doesn't recognize ANSI escapes).
- Any object with 'write' and 'flush' attributes.
"""
return sys.stdout if self._ostream is None else self._ostream
def _set_ostream(self, val):
assert val is None or (hasattr(val, 'write') and hasattr(val, 'flush'))
self._ostream = val
ostream = property(_get_ostream, _set_ostream)
@staticmethod
def _get_chained_exception(exception_value):
cause = getattr(exception_value, "__cause__", None)
if cause:
return cause
if getattr(exception_value, "__suppress_context__", False):
return None
return getattr(exception_value, "__context__", None)
def get_parts_of_chained_exception(
self, evalue
) -> Optional[Tuple[type, BaseException, TracebackType]]:
chained_evalue = self._get_chained_exception(evalue)
if chained_evalue:
return chained_evalue.__class__, chained_evalue, chained_evalue.__traceback__
return None
def prepare_chained_exception_message(self, cause) -> List[Any]:
direct_cause = "\nThe above exception was the direct cause of the following exception:\n"
exception_during_handling = "\nDuring handling of the above exception, another exception occurred:\n"
if cause:
message = [[direct_cause]]
else:
message = [[exception_during_handling]]
return message
@property
def has_colors(self) -> bool:
return self.color_scheme_table.active_scheme_name.lower() != "nocolor"
def set_colors(self, *args, **kw):
"""Shorthand access to the color table scheme selector method."""
# Set own color table
self.color_scheme_table.set_active_scheme(*args, **kw)
# for convenience, set Colors to the active scheme
self.Colors = self.color_scheme_table.active_colors
# Also set colors of debugger
if hasattr(self, 'pdb') and self.pdb is not None:
self.pdb.set_colors(*args, **kw)
def color_toggle(self):
"""Toggle between the currently active color scheme and NoColor."""
if self.color_scheme_table.active_scheme_name == 'NoColor':
self.color_scheme_table.set_active_scheme(self.old_scheme)
self.Colors = self.color_scheme_table.active_colors
else:
self.old_scheme = self.color_scheme_table.active_scheme_name
self.color_scheme_table.set_active_scheme('NoColor')
self.Colors = self.color_scheme_table.active_colors
def stb2text(self, stb):
"""Convert a structured traceback (a list) to a string."""
return '\n'.join(stb)
def text(self, etype, value, tb, tb_offset: Optional[int] = None, context=5):
"""Return formatted traceback.
Subclasses may override this if they add extra arguments.
"""
tb_list = self.structured_traceback(etype, value, tb,
tb_offset, context)
return self.stb2text(tb_list)
def structured_traceback(
self,
etype: type,
evalue: Optional[BaseException],
etb: Optional[TracebackType] = None,
tb_offset: Optional[int] = None,
number_of_lines_of_context: int = 5,
):
"""Return a list of traceback frames.
Must be implemented by each class.
"""
raise NotImplementedError()
#---------------------------------------------------------------------------
class ListTB(TBTools):
"""Print traceback information from a traceback list, with optional color.
Calling requires 3 arguments: (etype, evalue, elist)
as would be obtained by::
etype, evalue, tb = sys.exc_info()
if tb:
elist = traceback.extract_tb(tb)
else:
elist = None
It can thus be used by programs which need to process the traceback before
printing (such as console replacements based on the code module from the
standard library).
Because they are meant to be called without a full traceback (only a
list), instances of this class can't call the interactive pdb debugger."""
def __call__(self, etype, value, elist):
self.ostream.flush()
self.ostream.write(self.text(etype, value, elist))
self.ostream.write('\n')
def _extract_tb(self, tb):
if tb:
return traceback.extract_tb(tb)
else:
return None
def structured_traceback(
self,
etype: type,
evalue: Optional[BaseException],
etb: Optional[TracebackType] = None,
tb_offset: Optional[int] = None,
context=5,
):
"""Return a color formatted string with the traceback info.
Parameters
----------
etype : exception type
Type of the exception raised.
evalue : object
Data stored in the exception
etb : list | TracebackType | None
If list: List of frames, see class docstring for details.
If Traceback: Traceback of the exception.
tb_offset : int, optional
Number of frames in the traceback to skip. If not given, the
instance evalue is used (set in constructor).
context : int, optional
Number of lines of context information to print.
Returns
-------
String with formatted exception.
"""
# This is a workaround to get chained_exc_ids in recursive calls
# etb should not be a tuple if structured_traceback is not recursive
if isinstance(etb, tuple):
etb, chained_exc_ids = etb
else:
chained_exc_ids = set()
if isinstance(etb, list):
elist = etb
elif etb is not None:
elist = self._extract_tb(etb)
else:
elist = []
tb_offset = self.tb_offset if tb_offset is None else tb_offset
assert isinstance(tb_offset, int)
Colors = self.Colors
out_list = []
if elist:
if tb_offset and len(elist) > tb_offset:
elist = elist[tb_offset:]
out_list.append('Traceback %s(most recent call last)%s:' %
(Colors.normalEm, Colors.Normal) + '\n')
out_list.extend(self._format_list(elist))
# The exception info should be a single entry in the list.
lines = ''.join(self._format_exception_only(etype, evalue))
out_list.append(lines)
# Find chained exceptions if we have a traceback (not for exception-only mode)
if etb is not None:
exception = self.get_parts_of_chained_exception(evalue)
if exception and (id(exception[1]) not in chained_exc_ids):
chained_exception_message = (
self.prepare_chained_exception_message(evalue.__cause__)[0]
if evalue is not None
else ""
)
etype, evalue, etb = exception
# Trace exception to avoid infinite 'cause' loop
chained_exc_ids.add(id(exception[1]))
chained_exceptions_tb_offset = 0
out_list = (
self.structured_traceback(
etype,
evalue,
(etb, chained_exc_ids), # type: ignore
chained_exceptions_tb_offset,
context,
)
+ chained_exception_message
+ out_list
)
return out_list
def _format_list(self, extracted_list):
"""Format a list of traceback entry tuples for printing.
Given a list of tuples as returned by extract_tb() or
extract_stack(), return a list of strings ready for printing.
Each string in the resulting list corresponds to the item with the
same index in the argument list. Each string ends in a newline;
the strings may contain internal newlines as well, for those items
whose source text line is not None.
Lifted almost verbatim from traceback.py
"""
Colors = self.Colors
output_list = []
for ind, (filename, lineno, name, line) in enumerate(extracted_list):
normalCol, nameCol, fileCol, lineCol = (
# Emphasize the last entry
(Colors.normalEm, Colors.nameEm, Colors.filenameEm, Colors.line)
if ind == len(extracted_list) - 1
else (Colors.Normal, Colors.name, Colors.filename, "")
)
fns = _format_filename(filename, fileCol, normalCol, lineno=lineno)
item = f"{normalCol} {fns}"
if name != "<module>":
item += f" in {nameCol}{name}{normalCol}\n"
else:
item += "\n"
if line:
item += f"{lineCol} {line.strip()}{normalCol}\n"
output_list.append(item)
return output_list
def _format_exception_only(self, etype, value):
"""Format the exception part of a traceback.
The arguments are the exception type and value such as given by
sys.exc_info()[:2]. The return value is a list of strings, each ending
in a newline. Normally, the list contains a single string; however,
for SyntaxError exceptions, it contains several lines that (when
printed) display detailed information about where the syntax error
occurred. The message indicating which exception occurred is the
always last string in the list.
Also lifted nearly verbatim from traceback.py
"""
have_filedata = False
Colors = self.Colors
output_list = []
stype = py3compat.cast_unicode(Colors.excName + etype.__name__ + Colors.Normal)
if value is None:
# Not sure if this can still happen in Python 2.6 and above
output_list.append(stype + "\n")
else:
if issubclass(etype, SyntaxError):
have_filedata = True
if not value.filename: value.filename = "<string>"
if value.lineno:
lineno = value.lineno
textline = linecache.getline(value.filename, value.lineno)
else:
lineno = "unknown"
textline = ""
output_list.append(
"%s %s%s\n"
% (
Colors.normalEm,
_format_filename(
value.filename,
Colors.filenameEm,
Colors.normalEm,
lineno=(None if lineno == "unknown" else lineno),
),
Colors.Normal,
)
)
if textline == "":
textline = py3compat.cast_unicode(value.text, "utf-8")
if textline is not None:
i = 0
while i < len(textline) and textline[i].isspace():
i += 1
output_list.append(
"%s %s%s\n" % (Colors.line, textline.strip(), Colors.Normal)
)
if value.offset is not None:
s = ' '
for c in textline[i:value.offset - 1]:
if c.isspace():
s += c
else:
s += " "
output_list.append(
"%s%s^%s\n" % (Colors.caret, s, Colors.Normal)
)
try:
s = value.msg
except Exception:
s = self._some_str(value)
if s:
output_list.append(
"%s%s:%s %s\n" % (stype, Colors.excName, Colors.Normal, s)
)
else:
output_list.append("%s\n" % stype)
# PEP-678 notes
output_list.extend(f"{x}\n" for x in getattr(value, "__notes__", []))
# sync with user hooks
if have_filedata:
ipinst = get_ipython()
if ipinst is not None:
ipinst.hooks.synchronize_with_editor(value.filename, value.lineno, 0)
return output_list
def get_exception_only(self, etype, value):
"""Only print the exception type and message, without a traceback.
Parameters
----------
etype : exception type
value : exception value
"""
return ListTB.structured_traceback(self, etype, value)
def show_exception_only(self, etype, evalue):
"""Only print the exception type and message, without a traceback.
Parameters
----------
etype : exception type
evalue : exception value
"""
# This method needs to use __call__ from *this* class, not the one from
# a subclass whose signature or behavior may be different
ostream = self.ostream
ostream.flush()
ostream.write('\n'.join(self.get_exception_only(etype, evalue)))
ostream.flush()
def _some_str(self, value):
# Lifted from traceback.py
try:
return py3compat.cast_unicode(str(value))
except:
return u'<unprintable %s object>' % type(value).__name__
class FrameInfo:
"""
Mirror of stack data's FrameInfo, but so that we can bypass highlighting on
really long frames.
"""
description: Optional[str]
filename: Optional[str]
lineno: Tuple[int]
# number of context lines to use
context: Optional[int]
raw_lines: List[str]
@classmethod
def _from_stack_data_FrameInfo(cls, frame_info):
return cls(
getattr(frame_info, "description", None),
getattr(frame_info, "filename", None), # type: ignore[arg-type]
getattr(frame_info, "lineno", None), # type: ignore[arg-type]
getattr(frame_info, "frame", None),
getattr(frame_info, "code", None),
sd=frame_info,
context=None,
)
def __init__(
self,
description: Optional[str],
filename: str,
lineno: Tuple[int],
frame,
code,
*,
sd=None,
context=None,
):
self.description = description
self.filename = filename
self.lineno = lineno
self.frame = frame
self.code = code
self._sd = sd
self.context = context
# self.lines = []
if sd is None:
try:
# return a list of source lines and a starting line number
self.raw_lines = inspect.getsourcelines(frame)[0]
except OSError:
self.raw_lines = [
"'Could not get source, probably due dynamically evaluated source code.'"
]
@property
def variables_in_executing_piece(self):
if self._sd:
return self._sd.variables_in_executing_piece
else:
return []
@property
def lines(self):
from executing.executing import NotOneValueFound
try:
return self._sd.lines
except NotOneValueFound:
class Dummy:
lineno = 0
is_current = False
def render(self, *, pygmented):
return "<Error retrieving source code with stack_data see ipython/ipython#13598>"
return [Dummy()]
@property
def executing(self):
if self._sd:
return self._sd.executing
else:
return None
# ----------------------------------------------------------------------------
class VerboseTB(TBTools):
"""A port of Ka-Ping Yee's cgitb.py module that outputs color text instead
of HTML. Requires inspect and pydoc. Crazy, man.
Modified version which optionally strips the topmost entries from the
traceback, to be used with alternate interpreters (because their own code
would appear in the traceback)."""
tb_highlight = ""
tb_highlight_style = "default"
def __init__(
self,
color_scheme: str = "Linux",
call_pdb: bool = False,
ostream=None,
tb_offset: int = 0,
long_header: bool = False,
include_vars: bool = True,
check_cache=None,
debugger_cls=None,
parent=None,
config=None,
):
"""Specify traceback offset, headers and color scheme.
Define how many frames to drop from the tracebacks. Calling it with
tb_offset=1 allows use of this handler in interpreters which will have
their own code at the top of the traceback (VerboseTB will first
remove that frame before printing the traceback info)."""
TBTools.__init__(
self,
color_scheme=color_scheme,
call_pdb=call_pdb,
ostream=ostream,
parent=parent,
config=config,
debugger_cls=debugger_cls,
)
self.tb_offset = tb_offset
self.long_header = long_header
self.include_vars = include_vars
# By default we use linecache.checkcache, but the user can provide a
# different check_cache implementation. This was formerly used by the
# IPython kernel for interactive code, but is no longer necessary.
if check_cache is None:
check_cache = linecache.checkcache
self.check_cache = check_cache
self.skip_hidden = True
def format_record(self, frame_info: FrameInfo):
"""Format a single stack frame"""
assert isinstance(frame_info, FrameInfo)
Colors = self.Colors # just a shorthand + quicker name lookup
ColorsNormal = Colors.Normal # used a lot
if isinstance(frame_info._sd, stack_data.RepeatedFrames):
return ' %s[... skipping similar frames: %s]%s\n' % (
Colors.excName, frame_info.description, ColorsNormal)
indent = " " * INDENT_SIZE
em_normal = "%s\n%s%s" % (Colors.valEm, indent, ColorsNormal)
tpl_call = f"in {Colors.vName}{{file}}{Colors.valEm}{{scope}}{ColorsNormal}"
tpl_call_fail = "in %s%%s%s(***failed resolving arguments***)%s" % (
Colors.vName,
Colors.valEm,
ColorsNormal,
)
tpl_name_val = "%%s %s= %%s%s" % (Colors.valEm, ColorsNormal)
link = _format_filename(
frame_info.filename,
Colors.filenameEm,
ColorsNormal,
lineno=frame_info.lineno,
)
args, varargs, varkw, locals_ = inspect.getargvalues(frame_info.frame)
if frame_info.executing is not None:
func = frame_info.executing.code_qualname()
else:
func = "?"
if func == "<module>":
call = ""
else:
# Decide whether to include variable details or not
var_repr = eqrepr if self.include_vars else nullrepr
try:
scope = inspect.formatargvalues(
args, varargs, varkw, locals_, formatvalue=var_repr
)
call = tpl_call.format(file=func, scope=scope)
except KeyError:
# This happens in situations like errors inside generator
# expressions, where local variables are listed in the
# line, but can't be extracted from the frame. I'm not
# 100% sure this isn't actually a bug in inspect itself,
# but since there's no info for us to compute with, the
# best we can do is report the failure and move on. Here
# we must *not* call any traceback construction again,
# because that would mess up use of %debug later on. So we
# simply report the failure and move on. The only
# limitation will be that this frame won't have locals
# listed in the call signature. Quite subtle problem...
# I can't think of a good way to validate this in a unit
# test, but running a script consisting of:
# dict( (k,v.strip()) for (k,v) in range(10) )
# will illustrate the error, if this exception catch is
# disabled.
call = tpl_call_fail % func
lvals = ''
lvals_list = []
if self.include_vars:
try:
# we likely want to fix stackdata at some point, but
# still need a workaround.
fibp = frame_info.variables_in_executing_piece
for var in fibp:
lvals_list.append(tpl_name_val % (var.name, repr(var.value)))
except Exception:
lvals_list.append(
"Exception trying to inspect frame. No more locals available."
)
if lvals_list:
lvals = '%s%s' % (indent, em_normal.join(lvals_list))
result = f'{link}{", " if call else ""}{call}\n'
if frame_info._sd is None:
# fast fallback if file is too long
tpl_link = "%s%%s%s" % (Colors.filenameEm, ColorsNormal)
link = tpl_link % util_path.compress_user(frame_info.filename)
level = "%s %s\n" % (link, call)
_line_format = PyColorize.Parser(
style=self.color_scheme_table.active_scheme_name, parent=self
).format2
first_line = frame_info.code.co_firstlineno
current_line = frame_info.lineno[0]
raw_lines = frame_info.raw_lines
index = current_line - first_line
if index >= frame_info.context:
start = max(index - frame_info.context, 0)
stop = index + frame_info.context
index = frame_info.context
else:
start = 0
stop = index + frame_info.context
raw_lines = raw_lines[start:stop]
return "%s%s" % (
level,
"".join(
_simple_format_traceback_lines(
current_line,
index,
raw_lines,
Colors,
lvals,
_line_format,
)
),
)
# result += "\n".join(frame_info.raw_lines)
else:
result += "".join(
_format_traceback_lines(
frame_info.lines, Colors, self.has_colors, lvals
)
)
return result
def prepare_header(self, etype: str, long_version: bool = False):
colors = self.Colors # just a shorthand + quicker name lookup
colorsnormal = colors.Normal # used a lot
exc = '%s%s%s' % (colors.excName, etype, colorsnormal)
width = min(75, get_terminal_size()[0])
if long_version:
# Header with the exception type, python version, and date
pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable
date = time.ctime(time.time())
head = "%s%s%s\n%s%s%s\n%s" % (
colors.topline,
"-" * width,
colorsnormal,
exc,
" " * (width - len(etype) - len(pyver)),
pyver,
date.rjust(width),
)
head += (
"\nA problem occurred executing Python code. Here is the sequence of function"
"\ncalls leading up to the error, with the most recent (innermost) call last."
)
else:
# Simplified header
head = "%s%s" % (
exc,
"Traceback (most recent call last)".rjust(width - len(etype)),
)
return head
def format_exception(self, etype, evalue):
colors = self.Colors # just a shorthand + quicker name lookup
colorsnormal = colors.Normal # used a lot
# Get (safely) a string form of the exception info
try:
etype_str, evalue_str = map(str, (etype, evalue))
except:
# User exception is improperly defined.
etype, evalue = str, sys.exc_info()[:2]
etype_str, evalue_str = map(str, (etype, evalue))
# PEP-678 notes
notes = getattr(evalue, "__notes__", [])
if not isinstance(notes, Sequence) or isinstance(notes, (str, bytes)):
notes = [_safe_string(notes, "__notes__", func=repr)]
# ... and format it
return [
"{}{}{}: {}".format(
colors.excName,
etype_str,
colorsnormal,
py3compat.cast_unicode(evalue_str),
),
*(
"{}{}".format(
colorsnormal, _safe_string(py3compat.cast_unicode(n), "note")
)
for n in notes
),
]
def format_exception_as_a_whole(
self,
etype: type,
evalue: Optional[BaseException],
etb: Optional[TracebackType],
number_of_lines_of_context,
tb_offset: Optional[int],
):
"""Formats the header, traceback and exception message for a single exception.
This may be called multiple times by Python 3 exception chaining
(PEP 3134).
"""
# some locals
orig_etype = etype
try:
etype = etype.__name__ # type: ignore
except AttributeError:
pass
tb_offset = self.tb_offset if tb_offset is None else tb_offset
assert isinstance(tb_offset, int)
head = self.prepare_header(str(etype), self.long_header)
records = (
self.get_records(etb, number_of_lines_of_context, tb_offset) if etb else []
)
frames = []
skipped = 0
lastrecord = len(records) - 1
for i, record in enumerate(records):
if (
not isinstance(record._sd, stack_data.RepeatedFrames)
and self.skip_hidden
):
if (
record.frame.f_locals.get("__tracebackhide__", 0)
and i != lastrecord
):
skipped += 1
continue
if skipped:
Colors = self.Colors # just a shorthand + quicker name lookup
ColorsNormal = Colors.Normal # used a lot
frames.append(
" %s[... skipping hidden %s frame]%s\n"
% (Colors.excName, skipped, ColorsNormal)
)
skipped = 0
frames.append(self.format_record(record))
if skipped:
Colors = self.Colors # just a shorthand + quicker name lookup
ColorsNormal = Colors.Normal # used a lot
frames.append(
" %s[... skipping hidden %s frame]%s\n"
% (Colors.excName, skipped, ColorsNormal)
)
formatted_exception = self.format_exception(etype, evalue)
if records:
frame_info = records[-1]
ipinst = get_ipython()
if ipinst is not None:
ipinst.hooks.synchronize_with_editor(frame_info.filename, frame_info.lineno, 0)
return [[head] + frames + formatted_exception]
def get_records(
self, etb: TracebackType, number_of_lines_of_context: int, tb_offset: int
):
assert etb is not None
context = number_of_lines_of_context - 1
after = context // 2
before = context - after
if self.has_colors:
style = get_style_by_name(self.tb_highlight_style)
style = stack_data.style_with_executing_node(style, self.tb_highlight)
formatter = Terminal256Formatter(style=style)
else:
formatter = None
options = stack_data.Options(
before=before,
after=after,
pygments_formatter=formatter,
)
# Let's estimate the amount of code we will have to parse/highlight.
cf: Optional[TracebackType] = etb
max_len = 0
tbs = []
while cf is not None:
try:
mod = inspect.getmodule(cf.tb_frame)
if mod is not None:
mod_name = mod.__name__
root_name, *_ = mod_name.split(".")
if root_name == "IPython":
cf = cf.tb_next
continue
max_len = get_line_number_of_frame(cf.tb_frame)
except OSError:
max_len = 0
max_len = max(max_len, max_len)
tbs.append(cf)
cf = getattr(cf, "tb_next", None)
if max_len > FAST_THRESHOLD:
FIs = []
for tb in tbs:
frame = tb.tb_frame # type: ignore
lineno = (frame.f_lineno,)
code = frame.f_code
filename = code.co_filename
# TODO: Here we need to use before/after/
FIs.append(
FrameInfo(
"Raw frame", filename, lineno, frame, code, context=context
)
)
return FIs
res = list(stack_data.FrameInfo.stack_data(etb, options=options))[tb_offset:]
res = [FrameInfo._from_stack_data_FrameInfo(r) for r in res]
return res
def structured_traceback(
self,
etype: type,
evalue: Optional[BaseException],
etb: Optional[TracebackType] = None,
tb_offset: Optional[int] = None,
number_of_lines_of_context: int = 5,
):
"""Return a nice text document describing the traceback."""
formatted_exception = self.format_exception_as_a_whole(etype, evalue, etb, number_of_lines_of_context,
tb_offset)
colors = self.Colors # just a shorthand + quicker name lookup
colorsnormal = colors.Normal # used a lot
head = '%s%s%s' % (colors.topline, '-' * min(75, get_terminal_size()[0]), colorsnormal)
structured_traceback_parts = [head]
chained_exceptions_tb_offset = 0
lines_of_context = 3
formatted_exceptions = formatted_exception
exception = self.get_parts_of_chained_exception(evalue)
if exception:
assert evalue is not None
formatted_exceptions += self.prepare_chained_exception_message(evalue.__cause__)
etype, evalue, etb = exception
else:
evalue = None
chained_exc_ids = set()
while evalue:
formatted_exceptions += self.format_exception_as_a_whole(etype, evalue, etb, lines_of_context,
chained_exceptions_tb_offset)
exception = self.get_parts_of_chained_exception(evalue)
if exception and not id(exception[1]) in chained_exc_ids:
chained_exc_ids.add(id(exception[1])) # trace exception to avoid infinite 'cause' loop
formatted_exceptions += self.prepare_chained_exception_message(evalue.__cause__)
etype, evalue, etb = exception
else:
evalue = None
# we want to see exceptions in a reversed order:
# the first exception should be on top
for formatted_exception in reversed(formatted_exceptions):
structured_traceback_parts += formatted_exception
return structured_traceback_parts
def debugger(self, force: bool = False):
"""Call up the pdb debugger if desired, always clean up the tb
reference.
Keywords:
- force(False): by default, this routine checks the instance call_pdb
flag and does not actually invoke the debugger if the flag is false.
The 'force' option forces the debugger to activate even if the flag
is false.
If the call_pdb flag is set, the pdb interactive debugger is
invoked. In all cases, the self.tb reference to the current traceback
is deleted to prevent lingering references which hamper memory
management.
Note that each call to pdb() does an 'import readline', so if your app
requires a special setup for the readline completers, you'll have to
fix that by hand after invoking the exception handler."""
if force or self.call_pdb:
if self.pdb is None:
self.pdb = self.debugger_cls()
# the system displayhook may have changed, restore the original
# for pdb
display_trap = DisplayTrap(hook=sys.__displayhook__)
with display_trap:
self.pdb.reset()
# Find the right frame so we don't pop up inside ipython itself
if hasattr(self, "tb") and self.tb is not None: # type: ignore[has-type]
etb = self.tb # type: ignore[has-type]
else:
etb = self.tb = sys.last_traceback
while self.tb is not None and self.tb.tb_next is not None:
assert self.tb.tb_next is not None
self.tb = self.tb.tb_next
if etb and etb.tb_next:
etb = etb.tb_next
self.pdb.botframe = etb.tb_frame
# last_value should be deprecated, but last-exc sometimme not set
# please check why later and remove the getattr.
exc = sys.last_value if sys.version_info < (3, 12) else getattr(sys, "last_exc", sys.last_value) # type: ignore[attr-defined]
if exc:
self.pdb.interaction(None, exc)
else:
self.pdb.interaction(None, etb)
if hasattr(self, 'tb'):
del self.tb
def handler(self, info=None):
(etype, evalue, etb) = info or sys.exc_info()
self.tb = etb
ostream = self.ostream
ostream.flush()
ostream.write(self.text(etype, evalue, etb))
ostream.write('\n')
ostream.flush()
# Changed so an instance can just be called as VerboseTB_inst() and print
# out the right info on its own.
def __call__(self, etype=None, evalue=None, etb=None):
"""This hook can replace sys.excepthook (for Python 2.1 or higher)."""
if etb is None:
self.handler()
else:
self.handler((etype, evalue, etb))
try:
self.debugger()
except KeyboardInterrupt:
print("\nKeyboardInterrupt")
#----------------------------------------------------------------------------
class FormattedTB(VerboseTB, ListTB):
"""Subclass ListTB but allow calling with a traceback.
It can thus be used as a sys.excepthook for Python > 2.1.
Also adds 'Context' and 'Verbose' modes, not available in ListTB.
Allows a tb_offset to be specified. This is useful for situations where
one needs to remove a number of topmost frames from the traceback (such as
occurs with python programs that themselves execute other python code,
like Python shells). """
mode: str
def __init__(self, mode='Plain', color_scheme='Linux', call_pdb=False,
ostream=None,
tb_offset=0, long_header=False, include_vars=False,
check_cache=None, debugger_cls=None,
parent=None, config=None):
# NEVER change the order of this list. Put new modes at the end:
self.valid_modes = ['Plain', 'Context', 'Verbose', 'Minimal']
self.verbose_modes = self.valid_modes[1:3]
VerboseTB.__init__(self, color_scheme=color_scheme, call_pdb=call_pdb,
ostream=ostream, tb_offset=tb_offset,
long_header=long_header, include_vars=include_vars,
check_cache=check_cache, debugger_cls=debugger_cls,
parent=parent, config=config)
# Different types of tracebacks are joined with different separators to
# form a single string. They are taken from this dict
self._join_chars = dict(Plain='', Context='\n', Verbose='\n',
Minimal='')
# set_mode also sets the tb_join_char attribute
self.set_mode(mode)
def structured_traceback(self, etype, value, tb, tb_offset=None, number_of_lines_of_context=5):
tb_offset = self.tb_offset if tb_offset is None else tb_offset
mode = self.mode
if mode in self.verbose_modes:
# Verbose modes need a full traceback
return VerboseTB.structured_traceback(
self, etype, value, tb, tb_offset, number_of_lines_of_context
)
elif mode == 'Minimal':
return ListTB.get_exception_only(self, etype, value)
else:
# We must check the source cache because otherwise we can print
# out-of-date source code.
self.check_cache()
# Now we can extract and format the exception
return ListTB.structured_traceback(
self, etype, value, tb, tb_offset, number_of_lines_of_context
)
def stb2text(self, stb):
"""Convert a structured traceback (a list) to a string."""
return self.tb_join_char.join(stb)
def set_mode(self, mode: Optional[str] = None):
"""Switch to the desired mode.
If mode is not specified, cycles through the available modes."""
if not mode:
new_idx = (self.valid_modes.index(self.mode) + 1 ) % \
len(self.valid_modes)
self.mode = self.valid_modes[new_idx]
elif mode not in self.valid_modes:
raise ValueError(
"Unrecognized mode in FormattedTB: <" + mode + ">\n"
"Valid modes: " + str(self.valid_modes)
)
else:
assert isinstance(mode, str)
self.mode = mode
# include variable details only in 'Verbose' mode
self.include_vars = (self.mode == self.valid_modes[2])
# Set the join character for generating text tracebacks
self.tb_join_char = self._join_chars[self.mode]
# some convenient shortcuts
def plain(self):
self.set_mode(self.valid_modes[0])
def context(self):
self.set_mode(self.valid_modes[1])
def verbose(self):
self.set_mode(self.valid_modes[2])
def minimal(self):
self.set_mode(self.valid_modes[3])
#----------------------------------------------------------------------------
class AutoFormattedTB(FormattedTB):
"""A traceback printer which can be called on the fly.
It will find out about exceptions by itself.
A brief example::
AutoTB = AutoFormattedTB(mode = 'Verbose',color_scheme='Linux')
try:
...
except:
AutoTB() # or AutoTB(out=logfile) where logfile is an open file object
"""
def __call__(self, etype=None, evalue=None, etb=None,
out=None, tb_offset=None):
"""Print out a formatted exception traceback.
Optional arguments:
- out: an open file-like object to direct output to.
- tb_offset: the number of frames to skip over in the stack, on a
per-call basis (this overrides temporarily the instance's tb_offset
given at initialization time."""
if out is None:
out = self.ostream
out.flush()
out.write(self.text(etype, evalue, etb, tb_offset))
out.write('\n')
out.flush()
# FIXME: we should remove the auto pdb behavior from here and leave
# that to the clients.
try:
self.debugger()
except KeyboardInterrupt:
print("\nKeyboardInterrupt")
def structured_traceback(
self,
etype: type,
evalue: Optional[BaseException],
etb: Optional[TracebackType] = None,
tb_offset: Optional[int] = None,
number_of_lines_of_context: int = 5,
):
# tb: TracebackType or tupleof tb types ?
if etype is None:
etype, evalue, etb = sys.exc_info()
if isinstance(etb, tuple):
# tb is a tuple if this is a chained exception.
self.tb = etb[0]
else:
self.tb = etb
return FormattedTB.structured_traceback(
self, etype, evalue, etb, tb_offset, number_of_lines_of_context
)
#---------------------------------------------------------------------------
# A simple class to preserve Nathan's original functionality.
class ColorTB(FormattedTB):
"""Shorthand to initialize a FormattedTB in Linux colors mode."""
def __init__(self, color_scheme='Linux', call_pdb=0, **kwargs):
FormattedTB.__init__(self, color_scheme=color_scheme,
call_pdb=call_pdb, **kwargs)
class SyntaxTB(ListTB):
"""Extension which holds some state: the last exception value"""
def __init__(self, color_scheme='NoColor', parent=None, config=None):
ListTB.__init__(self, color_scheme, parent=parent, config=config)
self.last_syntax_error = None
def __call__(self, etype, value, elist):
self.last_syntax_error = value
ListTB.__call__(self, etype, value, elist)
def structured_traceback(self, etype, value, elist, tb_offset=None,
context=5):
# If the source file has been edited, the line in the syntax error can
# be wrong (retrieved from an outdated cache). This replaces it with
# the current value.
if isinstance(value, SyntaxError) \
and isinstance(value.filename, str) \
and isinstance(value.lineno, int):
linecache.checkcache(value.filename)
newtext = linecache.getline(value.filename, value.lineno)
if newtext:
value.text = newtext
self.last_syntax_error = value
return super(SyntaxTB, self).structured_traceback(etype, value, elist,
tb_offset=tb_offset, context=context)
def clear_err_state(self):
"""Return the current error state and clear it"""
e = self.last_syntax_error
self.last_syntax_error = None
return e
def stb2text(self, stb):
"""Convert a structured traceback (a list) to a string."""
return ''.join(stb)
# some internal-use functions
def text_repr(value):
"""Hopefully pretty robust repr equivalent."""
# this is pretty horrible but should always return *something*
try:
return pydoc.text.repr(value) # type: ignore[call-arg]
except KeyboardInterrupt:
raise
except:
try:
return repr(value)
except KeyboardInterrupt:
raise
except:
try:
# all still in an except block so we catch
# getattr raising
name = getattr(value, '__name__', None)
if name:
# ick, recursion
return text_repr(name)
klass = getattr(value, '__class__', None)
if klass:
return '%s instance' % text_repr(klass)
except KeyboardInterrupt:
raise
except:
return 'UNRECOVERABLE REPR FAILURE'
def eqrepr(value, repr=text_repr):
return '=%s' % repr(value)
def nullrepr(value, repr=text_repr):
return ''
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@ipython@py3@IPython@core@ultratb.py@.PATH_END.py
|
{
"filename": "tfsa-2022-164.md",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/security/advisory/tfsa-2022-164.md",
"type": "Markdown"
}
|
## TFSA-2022-164: `CHECK_EQ` fail via input in `SparseMatrixNNZ`
### CVE Number
CVE-2022-41901
### Impact
An input `sparse_matrix` that is not a matrix with a shape with rank 0 will trigger a `CHECK` fail in [`tf.raw_ops.SparseMatrixNNZ`](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/sparse/sparse_matrix.h).
```python
import tensorflow as tf
tf.raw_ops.SparseMatrixNNZ(sparse_matrix=[])
```
### Patches
We have patched the issue in GitHub commits [f856d02e5322821aad155dad9b3acab1e9f5d693](https://github.com/tensorflow/tensorflow/commit/f856d02e5322821aad155dad9b3acab1e9f5d693).
The fix will be included in TensorFlow 2.11. We will also cherrypick this commit on TensorFlow 2.10.1, 2.9.3, and TensorFlow 2.8.4, as these are also affected and still in supported range.
### For more information
Please consult [our security guide](https://github.com/tensorflow/tensorflow/blob/master/SECURITY.md) for more information regarding the security model and how to contact us with issues and questions.
### Attribution
This vulnerability has been reported by Kang Hong Jin
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@security@advisory@tfsa-2022-164.md@.PATH_END.py
|
{
"filename": "plot_atomic_data.md",
"repo_name": "Morisset/PyNeb_devel",
"repo_path": "PyNeb_devel_extracted/PyNeb_devel-master/docs/api_reference/plot/plot_atomic_data.md",
"type": "Markdown"
}
|
::: pyneb.plot.plotAtomicData
handler: python
rendering:
show_root_heading: false
selection:
docstring_style: google
docstring_options:
replace_admonitions: no
|
MorissetREPO_NAMEPyNeb_develPATH_START.@PyNeb_devel_extracted@PyNeb_devel-master@docs@api_reference@plot@plot_atomic_data.md@.PATH_END.py
|
{
"filename": "flask_theme_support.py",
"repo_name": "dfm/python-fsps",
"repo_path": "python-fsps_extracted/python-fsps-main/docs/_themes/flask_theme_support.py",
"type": "Python"
}
|
# flasky extensions. flasky pygments style based on tango style
from pygments.style import Style
from pygments.token import (
Comment,
Error,
Generic,
Keyword,
Literal,
Name,
Number,
Operator,
Other,
Punctuation,
String,
Whitespace,
)
class FlaskyStyle(Style):
background_color = "#f8f8f8"
default_style = ""
styles = {
# No corresponding class for the following:
# Text: "", # class: ''
Whitespace: "underline #f8f8f8", # class: 'w'
Error: "#a40000 border:#ef2929", # class: 'err'
Other: "#000000", # class 'x'
Comment: "italic #8f5902", # class: 'c'
Comment.Preproc: "noitalic", # class: 'cp'
Keyword: "bold #004461", # class: 'k'
Keyword.Constant: "bold #004461", # class: 'kc'
Keyword.Declaration: "bold #004461", # class: 'kd'
Keyword.Namespace: "bold #004461", # class: 'kn'
Keyword.Pseudo: "bold #004461", # class: 'kp'
Keyword.Reserved: "bold #004461", # class: 'kr'
Keyword.Type: "bold #004461", # class: 'kt'
Operator: "#582800", # class: 'o'
Operator.Word: "bold #004461", # class: 'ow' - like keywords
Punctuation: "bold #000000", # class: 'p'
# because special names such as Name.Class, Name.Function, etc.
# are not recognized as such later in the parsing, we choose them
# to look the same as ordinary variables.
Name: "#000000", # class: 'n'
Name.Attribute: "#c4a000", # class: 'na' - to be revised
Name.Builtin: "#004461", # class: 'nb'
Name.Builtin.Pseudo: "#3465a4", # class: 'bp'
Name.Class: "#000000", # class: 'nc' - to be revised
Name.Constant: "#000000", # class: 'no' - to be revised
Name.Decorator: "#888", # class: 'nd' - to be revised
Name.Entity: "#ce5c00", # class: 'ni'
Name.Exception: "bold #cc0000", # class: 'ne'
Name.Function: "#000000", # class: 'nf'
Name.Property: "#000000", # class: 'py'
Name.Label: "#f57900", # class: 'nl'
Name.Namespace: "#000000", # class: 'nn' - to be revised
Name.Other: "#000000", # class: 'nx'
Name.Tag: "bold #004461", # class: 'nt' - like a keyword
Name.Variable: "#000000", # class: 'nv' - to be revised
Name.Variable.Class: "#000000", # class: 'vc' - to be revised
Name.Variable.Global: "#000000", # class: 'vg' - to be revised
Name.Variable.Instance: "#000000", # class: 'vi' - to be revised
Number: "#990000", # class: 'm'
Literal: "#000000", # class: 'l'
Literal.Date: "#000000", # class: 'ld'
String: "#4e9a06", # class: 's'
String.Backtick: "#4e9a06", # class: 'sb'
String.Char: "#4e9a06", # class: 'sc'
String.Doc: "italic #8f5902", # class: 'sd' - like a comment
String.Double: "#4e9a06", # class: 's2'
String.Escape: "#4e9a06", # class: 'se'
String.Heredoc: "#4e9a06", # class: 'sh'
String.Interpol: "#4e9a06", # class: 'si'
String.Other: "#4e9a06", # class: 'sx'
String.Regex: "#4e9a06", # class: 'sr'
String.Single: "#4e9a06", # class: 's1'
String.Symbol: "#4e9a06", # class: 'ss'
Generic: "#000000", # class: 'g'
Generic.Deleted: "#a40000", # class: 'gd'
Generic.Emph: "italic #000000", # class: 'ge'
Generic.Error: "#ef2929", # class: 'gr'
Generic.Heading: "bold #000080", # class: 'gh'
Generic.Inserted: "#00A000", # class: 'gi'
Generic.Output: "#888", # class: 'go'
Generic.Prompt: "#745334", # class: 'gp'
Generic.Strong: "bold #000000", # class: 'gs'
Generic.Subheading: "bold #800080", # class: 'gu'
Generic.Traceback: "bold #a40000", # class: 'gt'
}
|
dfmREPO_NAMEpython-fspsPATH_START.@python-fsps_extracted@python-fsps-main@docs@_themes@flask_theme_support.py@.PATH_END.py
|
{
"filename": "ra_dec.py",
"repo_name": "HeRTA/FRBSTATS",
"repo_path": "FRBSTATS_extracted/FRBSTATS-main/figs/ra_dec.py",
"type": "Python"
}
|
import numpy as np
from astropy import units as u
import astropy.coordinates as apycoords
from astropy.coordinates import SkyCoord
from shutil import copyfile
from csv import reader
import urllib.request
import matplotlib
import matplotlib.pyplot as plt
# Use TeX
plt.rcParams['text.usetex'] = True
# Adjust figsize
plt.rcParams["figure.figsize"] = (35,35)
# Load data
# Initiate empty parameter lists
l = []
b = []
dm = []
# Read FRBSTATS CSV catalogue
with open('../catalogue.csv', 'r') as read_obj:
csv_reader = reader(read_obj)
header = next(csv_reader)
# Skip header
if header != None:
for row in csv_reader:
l.append(row[6])
b.append(row[7])
dm.append(row[9])
# Pre-process data (pick out incompatible rows)
idx_mask = set()
for idx, val in enumerate(l):
try:
l[idx] = float(val)
except ValueError:
idx_mask.add(idx)
for idx, val in enumerate(b):
try:
b[idx] = float(val)
except ValueError:
idx_mask.add(idx)
for idx, val in enumerate(dm):
try:
dm[idx] = float(val)
except ValueError:
idx_mask.add(idx)
# Dump rows with missing data
for idx in sorted(idx_mask, reverse=True):
del l[idx]
del b[idx]
del dm[idx]
# Convert l, b to RA, Dec.
ras = []
decs = []
for idx in range(len(l)):
c = SkyCoord(l[idx], b[idx], frame='galactic', unit='deg')
c = c.icrs
ras.append(c.ra.hour)
decs.append(c.dec.deg)
# Load HI survey
survey = np.loadtxt('map.txt')
# Flip array to match RA and Dec axes
#survey_corrected = survey
survey_corrected = np.flip(survey, 0)
# Plot map
plt.imshow(survey_corrected.T, extent=[-90,90,24,0], aspect=4, interpolation='gaussian')
# Plot properties
plt.title(r'$\mathrm{FRB \ } \delta \mathrm{-} \alpha \mathrm{ \ Distribution}$', fontsize=70, y=1.01)
plt.xlabel(r'$\mathrm{Declination \ [deg]}$', fontsize=50)
plt.ylabel(r'$\mathrm{Right \ Ascension \ [h]}$', fontsize=50)
plt.yticks(np.arange(0, 24.01, 2))
plt.xticks(fontsize=36)
plt.yticks(fontsize=36)
# Plot given source position
plt.scatter(decs, ras, c=dm, s=400, alpha=0.6, edgecolor='white', linewidth=2, cmap='plasma')
# Set colorbar
cbar = plt.colorbar(ticks=list(np.arange(0, max(dm), 300)), orientation="horizontal", aspect=30, pad=0.08)
cbar.set_label(r'$\mathrm{Dispersion \ Measure \ }\Bigg[\mathrm{pc \ cm}^{-3}\Bigg]$', fontsize=52)
cbar.ax.tick_params(labelsize=42)
# Remove alpha colorbar component
cbar.set_alpha(1)
cbar.draw_all()
# Add survey citation
plt.text(40.5, -0.31, r'$\mathrm{LAB \ HI \ Survey \ (Kalberla \ et \ al., \ 2005)}$', fontsize=34, bbox={'facecolor': 'white', 'pad': 5})
# Save plot to file
plt.savefig('ra_dec.svg', bbox_inches='tight')
plt.savefig('ra_dec.pdf', bbox_inches='tight')
plt.savefig('ra_dec.png', bbox_inches='tight')
|
HeRTAREPO_NAMEFRBSTATSPATH_START.@FRBSTATS_extracted@FRBSTATS-main@figs@ra_dec.py@.PATH_END.py
|
{
"filename": "testme.py",
"repo_name": "idefix-code/idefix",
"repo_path": "idefix_extracted/idefix-master/test/HD/sod/testme.py",
"type": "Python"
}
|
#!/usr/bin/env python3
"""
@author: glesur
"""
import os
import sys
sys.path.append(os.getenv("IDEFIX_DIR"))
import pytools.idfx_test as tst
name="dump.0001.dmp"
def testMe(test):
test.configure()
test.compile()
inifiles=["idefix.ini","idefix-hll.ini","idefix-hllc.ini","idefix-tvdlf.ini"]
if test.reconstruction==4:
inifiles=["idefix-rk3.ini","idefix-hllc-rk3.ini"]
# loop on all the ini files for this test
for ini in inifiles:
test.run(inputFile=ini)
if test.init:
test.makeReference(filename=name)
test.standardTest()
test.nonRegressionTest(filename=name)
test=tst.idfxTest()
if not test.all:
if(test.check):
test.checkOnly(filename=name)
else:
testMe(test)
else:
test.noplot = True
for rec in range(2,5):
test.vectPot=False
test.single=False
test.reconstruction=rec
test.mpi=False
testMe(test)
# test in single precision
test.reconstruction=2
test.single=True
testMe(test)
|
idefix-codeREPO_NAMEidefixPATH_START.@idefix_extracted@idefix-master@test@HD@sod@testme.py@.PATH_END.py
|
{
"filename": "sim.py",
"repo_name": "thomasorb/orb",
"repo_path": "orb_extracted/orb-master/orb/sim.py",
"type": "Python"
}
|
#!/usr/bin/python
# *-* coding: utf-8 *-*
# Author: Thomas Martin <thomas.martin.1@ulaval.ca>
# File: sim.py
## Copyright (c) 2010-2020 Thomas Martin <thomas.martin.1@ulaval.ca>
##
## This file is part of ORB
##
## ORB is free software: you can redistribute it and/or modify it
## under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## ORB is distributed in the hope that it will be useful, but WITHOUT
## ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
## or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
## License for more details.
##
## You should have received a copy of the GNU General Public License
## along with ORB. If not, see <http://www.gnu.org/licenses/>.
import logging
import numpy as np
import warnings
import orb.utils.validate
import orb.utils.filters
import orb.utils.sim
import orb.utils.photometry
import orb.utils.spectrum
import orb.core
import orb.fft
import orb.constants
import scipy.interpolate
import scipy.stats
import pandas as pd
class SkyModel(object):
"""Very basic sky model which generate a spectrum of the sky.
It includes continuum brightness, sky lines and moon brightness.
most data comes from https://www.gemini.edu/observing/telescopes-and-sites/sites
"""
def __init__(self, airmass=1, instrument='sitelle'):
self.airmass = float(airmass)
# www.cfht.hawaii.edu/Instruments/ObservatoryManual/CFHT_ObservatoryManual_(Sec_2).html
# self.sky_brightness = pd.DataFrame(
# {'band': ['U', 'B', 'V', 'R', 'I', 'J', 'H', 'K'],
# 'nm': [365, 445, 551, 658, 806, 1220, 1630, 2190], # nm
# 'mag': [21.6, 22.3, 21.1, 20.3, 19.2, 14.8, 13.4, 12.6]}) # mag/arsec^2
# self.sky_brightness['flam'] = orb.utils.photometry.ABmag2flambda(
# self.sky_brightness.mag, self.sky_brightness.nm*10.) # erg/cm2/s/A/arcsec^2
self.skybg = dict()
self.tools = orb.core.Tools(instrument=instrument)
filepath = self.tools._get_orb_data_file_path('skybg_50_10.dat')
with open(filepath) as f:
for line in f:
if '#' in line: continue
if len(self.skybg) == 0:
keys = line.strip().split()
[self.skybg.__setitem__(ikey, list()) for ikey in keys]
continue
vals = line.strip().split()
if float(vals[1]) == 0: continue
for ikey, ival in zip(keys, vals):
self.skybg[ikey].append(float(ival))
self.skybg = pd.DataFrame(self.skybg)
self.skybg['flam'] = self.skybg['phot/s/nm/arcsec^2/m^2'].values
self.skybg['flam'] *= orb.utils.photometry.compute_photon_energy(self.skybg.nm.values) # erg/s/nm/arcsec2/m2
self.skybg['flam'] /= 10 # erg/s/m2/A/arcsec2
self.skybg['flam'] /= 1e4 # erg/s/cm2/A/arcsec2
y = self.skybg['flam'].values
x = self.skybg.nm.values
BIN = 31
diffuse = np.array([((x[min(i+BIN, x.size-1)]+x[i])/2, np.min(y[i:i+BIN]))
for i in range(0, x.size, BIN)]).T
self.diffusef = scipy.interpolate.UnivariateSpline(diffuse[0], diffuse[1], k=2, s=np.mean(diffuse[1])**2 * 0.05)
self.skybg['diffuse'] = self.diffusef(x)
BIN = 200
#lines = np.array([((x[min(i+BIN, x.size-1)]+x[i])/2, np.mean(y[i:i+BIN]) - np.min(y[i:i+BIN]))
# for i in range(0, x.size, BIN)]).T
#self.linesf = scipy.interpolate.UnivariateSpline(lines[0], lines[1], k=2, s=0)
self.linesf = scipy.interpolate.UnivariateSpline(x, y - self.diffusef(x), k=2, s=0)
atm_ext_cm1 = orb.core.Vector1d(
self.tools._get_atmospheric_extinction_file_path(), instrument=instrument)
atm_ext_cm1.data = orb.utils.photometry.ext2trans(
atm_ext_cm1.data, self.airmass)
self.atm_transf = scipy.interpolate.UnivariateSpline(
1e7/atm_ext_cm1.axis.data[::-1], atm_ext_cm1.data[::-1], k=3, s=0, ext=3)
def get_spectrum(self, cm1_axis):
assert np.all(np.isclose(np.diff(cm1_axis), np.diff(cm1_axis)[0]), 0), 'cm1_axis must be evenly spaced'
sky_spectrum = np.zeros_like(cm1_axis)
sky_lines = orb.core.Lines().air_sky_lines_nm
axis_step = cm1_axis[1] - cm1_axis[0]
fwhm = axis_step*4
for iline in sky_lines:
iline_cm1 = 1e7/sky_lines[iline][0]
iline_amp = sky_lines[iline][1]
if ((iline_cm1 > cm1_axis[0])
and(iline_cm1 < cm1_axis[-1])):
iiline = orb.utils.spectrum.gaussian1d(
cm1_axis, 0, iline_amp, iline_cm1, fwhm)
sky_spectrum += iiline
# lines are scaled to the brightness of the reference lines
SMOOTH = 2
sky_spectrum /= orb.utils.vector.smooth(sky_spectrum, deg=sky_spectrum.size/SMOOTH)
sky_spectrum *= orb.utils.vector.smooth(self.linesf(1e7/cm1_axis), deg=sky_spectrum.size/SMOOTH)
# adding diffuse brightness
sky_spectrum += self.diffusef(1e7/cm1_axis)
# convert to erg/cm2/s/A
pixel_surf = (self.tools.config.FIELD_OF_VIEW_1 * 60
/ self.tools.config.CAM1_DETECTOR_SIZE_X)**2
sky_spectrum *= pixel_surf
return orb.core.Vector1d(sky_spectrum, axis=cm1_axis)
class Base(object):
def __init__(self, step_nb, params, instrument='sitelle', **kwargs):
"""
:param params: Can be a parameters dict or the name of a filter.
"""
self.tools = orb.core.Tools(instrument=instrument)
if isinstance(params, str):
filter_name = params
self.params = orb.core.ROParams()
if step_nb <= 0: raise ValueError('step_nb must be > 0')
self.params['step_nb'] = int(step_nb)
self.params['filter_name'] = str(filter_name)
self.filterfile = orb.core.FilterFile(self.params.filter_name)
self.params['filter_file_path'] = self.filterfile.basic_path
self.params['step'] = self.filterfile.params.step
self.params['order'] = self.filterfile.params.order
self.params['zpd_index'] = self.params.step_nb // 5
self.params['calib_coeff'] = orb.utils.spectrum.theta2corr(
self.tools.config['OFF_AXIS_ANGLE_CENTER'])
self.params['nm_laser'] = self.tools.config['CALIB_NM_LASER']
elif isinstance(params, dict):
self.params = params
if 'calib_coeff' not in self.params:
if 'axis_corr' in self.params:
self.params['calib_coeff'] = self.params['axis_corr']
else:
raise TypeError('params must be a filter name (str) or a parameter dictionary')
self.params.update(kwargs)
self.params['calib_coeff_orig'] = self.params['calib_coeff']
self.params['apodization'] = 1
self.params['wavenumber'] = True
self.data = None
self.axis = None
class Spectrum(Base):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def add_component(self, lines, amp, noise_std=0, vel=0, sigma=0):
lines = orb.core.Lines().get_line_cm1(lines)
amp = np.array(amp).reshape((np.size(amp)))
ax, sp = orb.fit.create_cm1_lines_model_raw(
np.array(lines).reshape((np.size(lines))), amp, self.params.step, self.params.order, self.params.step_nb,
self.params.calib_coeff, self.params.zpd_index,
vel=vel, sigma=sigma, fmodel='sincgauss')
if self.data is None:
self.data = np.copy(sp)
else:
self.data += sp
if self.axis is not None:
assert np.all(self.axis == ax)
else:
self.axis = np.copy(ax)
def get_spectrum(self):
if self.data is None:
raise Exception('add at least one component with add_component()')
return orb.fft.Spectrum(self.data, axis=self.axis, params=self.params)
class Interferogram(Base):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
cm1_axis = orb.utils.spectrum.create_cm1_axis(
self.params.step_nb, self.params.step, self.params.order,
corr=self.params.calib_coeff)
self.spectrum_axis = orb.core.Axis(cm1_axis, params=self.params)
self.data = np.zeros_like(cm1_axis)
def get_interferogram(self):
return orb.fft.Interferogram(np.copy(self.data), params=self.params, exposure_time=1)
def add_line(self, wave, vel=0, flux=1, sigma=0, jitter=0):
"""
:param wave: The name of the line or the line wavenumber in cm-1
:param vel: Velocity in km/s
:param jitter: Std of an OPD jitter. Must be given in nm.
"""
if isinstance(wave, str):
wave = orb.core.Lines().get_line_cm1(wave)
if vel != 0:
wave += orb.utils.spectrum.line_shift(
vel, wave, wavenumber=True)
RESOLV_COEFF = self.params.order * 10
opd_axis = ((np.arange(self.params.step_nb) - self.params.zpd_index) * self.params.step) * 1e-7 / self.params.calib_coeff
ratio = self.params.step_nb / float(self.params.step_nb - self.params.zpd_index)
if jitter == 0:
interf = np.cos(2 * np.pi * wave * opd_axis)
else:
jitter_range = np.linspace(-jitter * 3, jitter * 3, RESOLV_COEFF) * 1e-7
highres_opd_axis = np.concatenate([iopd + jitter_range for iopd in opd_axis])
highres_interf = np.cos(2 * np.pi * wave * highres_opd_axis)
kernel = np.array(orb.utils.spectrum.gaussian1d(
jitter_range / jitter * 1e7, 0., 1., 0,
orb.constants.FWHM_COEFF))
kernel /= np.sum(kernel)
interf = np.array([np.sum(sub * kernel)
for sub in np.split(highres_interf, self.params.step_nb)])
if sigma != 0:
sigma_pix = orb.utils.fit.vel2sigma(sigma, wave, orb.cutils.get_cm1_axis_step(
self.params.step_nb, self.params.step, self.params.calib_coeff))
fwhm_pix = orb.utils.spectrum.compute_line_fwhm_pix(oversampling_ratio=ratio)
window = orb.utils.fft.gaussian_window(
orb.utils.fft.sigma2apod(sigma_pix, fwhm_pix),
opd_axis/np.max(opd_axis))
interf *= window
# compute line flux for normalization
fwhm_cm1 = orb.utils.spectrum.compute_line_fwhm(
self.params.step_nb - self.params.zpd_index,
self.params.step, self.params.order,
self.params.calib_coeff, wavenumber=True)
fwhm_nm = orb.utils.spectrum.fwhm_cm12nm(fwhm_cm1, wave) * 10.
line_flux = orb.utils.spectrum.sinc1d_flux(
self.params.step_nb / ratio, fwhm_nm)
interf /= line_flux / flux
self.data += interf
def add_background(self):
QUALITY_COEFF = 100
a = self.filterfile.get_transmission(
self.params.step_nb * QUALITY_COEFF,
corr=self.params.calib_coeff)
return self.add_spectrum(a)
def add_spectrum(self, spectrum):
"""
:param spectrum: Spectrum instance which must be defined on the filter range.
"""
a = spectrum.project(self.spectrum_axis)
if np.any(np.isnan(a.data)):
raise ValueError('spectrum must be defined at least on the whole filter bandpass')
a = np.concatenate((a.data, np.zeros(a.dimx))).astype(float)
a_ifft = np.fft.ifft(a)
a_interf = np.concatenate(
(a_ifft[-self.params.zpd_index:],
a_ifft[:self.params.step_nb - self.params.zpd_index]))
# compensate for energy lost in the imaginary part !
a_interf = a_interf.real.astype(float) * 2.
self.data += a_interf
class SourceSpectrum(orb.core.Vector1d, orb.core.Tools):
def __init__(self, spectrum, axis, instrument='sitelle', data_prefix="./", **kwargs):
"""Init
:param spectrum: Spectrum of the source, may be vector of
zeros. Must have the same size as axis. Must be calibrated
in erg/cm2/s/A.
:param axis: Axis in cm-1. Resolution must be much higher than
the simulated spectrum (e.g. np.linspace(10000, 25000,
30000))
"""
assert np.size(axis) > 0, 'axis size must be > 0'
assert np.size(spectrum) == np.size(axis), 'axis must have same size as spectrum'
orb.core.Tools.__init__(
self, instrument=instrument,
data_prefix=data_prefix,
config=None)
orb.core.Vector1d.__init__(
self, spectrum, axis=axis,
params={'instrument':instrument}, **kwargs)
def add_line(self, wave, flux, vel=None, sigma=None):
"""add a line to the spectrum
:param wave: Name of the line or its wavenumber in cm-1 (can be a tuple)
:param vel: Velocity in km/s
:param flux: Flux in erg/cm2/s
:param sigma: Broadening in km/s
"""
if np.size(wave) == 1:
wave = [wave,]
wave = np.array(wave)
if not np.issubdtype(wave.dtype, np.number):
wave = orb.core.Lines().get_line_cm1(wave)
if np.size(flux) == 1:
flux = [flux,]
flux = np.array(flux)
assert flux.size == wave.size, 'flux must have same size as wave'
if vel is not None:
vel = np.array(vel)
assert vel.size == wave.size, 'vel must have same size as wave'
wave += orb.utils.spectrum.line_shift(
vel, wave, wavenumber=True)
if sigma is not None:
sigma = np.array(sigma)
assert sigma.size == wave.size, 'sigma must have same size as wave'
else:
sigma = np.zeros_like(wave)
axis_step = self.axis.data[1] - self.axis.data[0]
for iwave, isigma, iflux in zip(wave, sigma, flux):
isigma = orb.utils.fit.vel2sigma(isigma, iwave, axis_step) * axis_step
isigma = max(4 * axis_step, isigma)
iline = orb.utils.spectrum.gaussian1d(
self.axis.data, 0, 1, iwave, isigma * orb.constants.FWHM_COEFF)
iline /= orb.utils.spectrum.gaussian1d_flux(1, isigma) # flux is normalized to 1
iline *= iflux
self.data += iline
def add_spectrum(self, spectrum):
"""Spectrum
:param spectrum: spectrum vector, must be in erg/cm2/s/A
"""
spectrum = np.array(spectrum)
assert spectrum.size == self.axis.data.size, 'spectrum must have same size has the axis provided at init'
self.data += spectrum
def get_interferogram(self, params, camera=0, theta=None, binning=1, me_factor=1., x=None,
bypass_flux2counts=False, noiseless=True):
""":param x: scanning vector (to simulate a non-uniform
scan). Must be given in step fraction. e.g. x =
(np.arange(params['step_nb']) - params['zpd_index']) will
simulate the default scanning sequence.
"""
needed_params = ('instrument', 'filter_name', 'exposure_time', 'step_nb', 'airmass')
for ipar in needed_params:
if ipar not in params:
raise Exception('parameter {} needed'.format(ipar))
ff = orb.core.FilterFile(params['filter_name'])
params.update(ff.params)
params['zpd_index'] = int(0.25 * params['step_nb'])
params['nm_laser'] = self.config.CALIB_NM_LASER
params['apodization'] = 1.
params['wavenumber'] = True
if theta is None:
theta = self.config.OFF_AXIS_ANGLE_CENTER
corr = orb.utils.spectrum.theta2corr(theta)
params['calib_coeff_orig'] = corr
params['calib_coeff'] = corr
params = orb.core.Params(params)
spectrum = orb.fft.Spectrum(np.copy(self.data), axis=np.copy(self.axis.data), params=params)
photom = orb.photometry.Photometry(params.filter_name,
camera, instrument=params.instrument,
airmass=params.airmass)
# spectrum.data is in erg/cm2/s/A
# it should be transformed to counts
if not bypass_flux2counts:
spectrum = orb.fft.Spectrum(photom.flux2counts(spectrum, modulated=True))
spectrum.data *= params.step_nb * params.exposure_time * binning**2.
spectrum.data[np.nonzero(np.isnan(spectrum.data))] = 0.
# compute total flux of input spectrum
# axis_steps = (1e7/spectrum.axis.data[:-1] - 1e7/spectrum.axis.data[1:]) * 10
# axis_steps = np.concatenate((axis_steps, [axis_steps[-1],]))
# print(np.sum(spectrum.data * axis_steps) )
# decalibrate spectrum
decal_cm1_axis = orb.core.Axis(orb.utils.spectrum.create_cm1_axis(
self.dimx, params.step, params.order, corr=corr))
spectrum = spectrum.project(decal_cm1_axis)
spectrum.data[np.nonzero(np.isnan(spectrum.data))] = 0.
# spectrum is flipped if order is even
if int(params.order)&1:
spectrum.reverse()
a = np.concatenate((spectrum.data, np.zeros(spectrum.dimx)))
if x is None:
a_ifft = scipy.fft.ifft(a)
a_interf = np.concatenate(
(a_ifft[-params.zpd_index:],
a_ifft[:params.step_nb - params.zpd_index]))
else:
a_interf = orb.utils.fft.indft(spectrum.data, x/2)/2
# compensate energy lost in the imaginary part !
a_interf = a_interf.real.astype(float) * 2.
interf = orb.fft.Interferogram(a_interf, params=params)
unmod_spectrum = spectrum.math('divide', photom.get_modulation_efficiency())
unmod_spectrum.data[np.nonzero(np.isnan(unmod_spectrum.data))] = 0.
interf.data += np.mean(unmod_spectrum.data).astype(np.longdouble)
interf.data = interf.data.real
# modulation efficiency loss with OPD
if me_factor > 1:
mean_ = np.mean(interf.data)
interf.data -= mean_
interf.data *= orb.utils.fft.gaussian_window(
me_factor, interf.axis.data/np.max(interf.axis.data))
interf.data += mean_
# poisson noise
if not noiseless:
interf.data = np.random.poisson(interf.data).astype(int).astype(float)
return interf
|
thomasorbREPO_NAMEorbPATH_START.@orb_extracted@orb-master@orb@sim.py@.PATH_END.py
|
{
"filename": "queue_timeout_test.py",
"repo_name": "triton-inference-server/server",
"repo_path": "server_extracted/server-main/qa/L0_batcher/queue_timeout_test.py",
"type": "Python"
}
|
#!/usr/bin/env python3
# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import concurrent.futures
import time
import unittest
import numpy as np
import tritonclient.grpc as grpcclient
from tritonclient.utils import InferenceServerException
class TestMaxQueueDelayTimeout(unittest.TestCase):
def setUp(self):
# Initialize client
self._triton = grpcclient.InferenceServerClient("localhost:8001")
def _get_inputs(self, batch_size):
self.assertIsInstance(batch_size, int)
self.assertGreater(batch_size, 0)
shape = [batch_size, 8]
inputs = [grpcclient.InferInput("INPUT0", shape, "FP32")]
inputs[0].set_data_from_numpy(np.ones(shape, dtype=np.float32))
return inputs
def _generate_callback_and_response_pair(self):
response = {"responded": False, "result": None, "error": None}
def callback(result, error):
response["responded"] = True
response["result"] = result
response["error"] = error
return callback, response
# Test queued requests on dynamic batch scheduler can be cancelled
def test_default_queue_policy_timeout_prompt_response(self):
model_name = "dynamic_batch"
with concurrent.futures.ThreadPoolExecutor() as pool:
# Saturate the slots on the model
saturate_thread = pool.submit(
self._triton.infer, model_name, self._get_inputs(batch_size=1)
)
time.sleep(2) # ensure the slots are filled
# The next request should be queued
callback, response = self._generate_callback_and_response_pair()
self._triton.async_infer(
model_name, self._get_inputs(batch_size=1), callback
)
time.sleep(2) # ensure the request is queued
# Check if the request has timed-out
time.sleep(2) # ensure the timeout period has expired
self.assertTrue(response["responded"])
self.assertEqual(response["result"], None)
self.assertIsInstance(response["error"], InferenceServerException)
self.assertEqual(response["error"].status(), "StatusCode.UNAVAILABLE")
self.assertEqual(response["error"].message(), "Request timeout expired")
# Join saturating thread
saturate_thread.result()
if __name__ == "__main__":
unittest.main()
|
triton-inference-serverREPO_NAMEserverPATH_START.@server_extracted@server-main@qa@L0_batcher@queue_timeout_test.py@.PATH_END.py
|
{
"filename": "_linepositionsrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/funnel/outsidetextfont/_linepositionsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LinepositionsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name="linepositionsrc",
parent_name="funnel.outsidetextfont",
**kwargs,
):
super(LinepositionsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@funnel@outsidetextfont@_linepositionsrc.py@.PATH_END.py
|
{
"filename": "_cone.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/template/data/_cone.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ConeValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(
self, plotly_name="cone", parent_name="layout.template.data", **kwargs
):
super(ConeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Cone"),
data_docs=kwargs.pop(
"data_docs",
"""
""",
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@template@data@_cone.py@.PATH_END.py
|
{
"filename": "plot.py",
"repo_name": "nanograv/holodeck",
"repo_path": "holodeck_extracted/holodeck-main/holodeck/plot.py",
"type": "Python"
}
|
"""Plotting module.
Provides convenience methods for generating standard plots and components using `matplotlib`.
"""
import numpy as np
import scipy as sp
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import kalepy as kale
import holodeck as holo
from holodeck import utils, log
from holodeck.constants import MSOL, YR
FIGSIZE = 6
FONTSIZE = 13
GOLDEN_RATIO = (np.sqrt(5) - 1) / 2
mpl.style.use('default') # avoid dark backgrounds from dark theme vscode
plt.rcParams['axes.grid'] = True
plt.rcParams['grid.alpha'] = 0.15
plt.rcParams["mathtext.fontset"] = "cm"
plt.rcParams["font.family"] = "serif"
plt.rcParams["legend.handlelength"] = 1.5
plt.rcParams["lines.solid_capstyle"] = 'round'
# plt.rcParams["font.size"] = FONTSIZE
# plt.rcParams["legend.fontsize"] = FONTSIZE*0.8
# mpl.rcParams['xtick.labelsize'] = FONTSIZE*0.8
# mpl.rcParams['ytick.labelsize'] = FONTSIZE*0.8
LABEL_GW_FREQUENCY_YR = r"GW Frequency $[\mathrm{yr}^{-1}]$"
LABEL_GW_FREQUENCY_HZ = r"GW Frequency $[\mathrm{Hz}]$"
LABEL_GW_FREQUENCY_NHZ = r"GW Frequency $[\mathrm{nHz}]$"
LABEL_SEPARATION_PC = r"Binary Separation $[\mathrm{pc}]$"
LABEL_CHARACTERISTIC_STRAIN = r"GW Characteristic Strain"
LABEL_HARDENING_TIME = r"Hardening Time $[\mathrm{Gyr}]$"
LABEL_CLC0 = r"$C_\ell / C_0$"
PARAM_KEYS = {
'hard_time': r"phenom $\tau_f$",
'hard_gamma_inner': r"phenom $\nu_\mathrm{inner}$",
'hard_gamma_outer': r"phenom $\nu_\mathrm{outer}$",
'hard_gamma_rot' : r"phenom $\nu_{\mathrm{rot}}$",
'gsmf_phi0': r"GSMF $\psi_0$",
'gsmf_mchar0_log10': r"GSMF $m_{\psi,0}$",
'gsmf_alpha0': r"GSMF $\alpha_{\psi,0}$",
'gpf_zbeta': r"GPF $\beta_{p,z}$",
'gpf_qgamma': r"GPF $\gamma_{p,0}$",
'gmt_norm': r"GMT $T_0$",
'gmt_zbeta': r"GMT $\beta_{t,z}$",
'mmb_mamp_log10': r"MMB $\mu$",
'mmb_plaw': r"MMB $\alpha_{\mu}$",
'mmb_scatter_dex': r"MMB $\epsilon_{\mu}$",
}
LABEL_DPRATIO = r"$\langle N_\mathrm{SS} \rangle / \mathrm{DP}_\mathrm{BG}$"
LABEL_EVSS = r"$\langle N_\mathrm{SS} \rangle$"
LABEL_DPBG = r"$\mathrm{DP}_\mathrm{BG}$"
COLORS_MPL = plt.rcParams['axes.prop_cycle'].by_key()['color']
class MidpointNormalize(mpl.colors.Normalize):
"""
Normalise the colorbar so that diverging bars work there way either side from a prescribed midpoint value)
e.g. im=ax1.imshow(array, norm=MidpointNormalize(midpoint=0.,vmin=-100, vmax=100))
"""
def __init__(self, vmin=None, vmax=None, midpoint=0.0, clip=False):
super().__init__(vmin, vmax, clip)
self.midpoint = midpoint
return
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y), np.isnan(value))
def inverse(self, value):
# x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
y, x = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y), np.isnan(value))
class MidpointLogNormalize(mpl.colors.LogNorm):
def __init__(self, vmin=None, vmax=None, midpoint=0.0, clip=False):
super().__init__(vmin, vmax, clip)
self.midpoint = midpoint
return
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
vals = utils.interp(value, x, y, xlog=True, ylog=False)
# return np.ma.masked_array(vals, np.isnan(value))
return vals
def inverse(self, value):
y, x = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
vals = utils.interp(value, x, y, xlog=False, ylog=True)
# return np.ma.masked_array(vals, np.isnan(value))
return vals
def figax_single(height=None, **kwargs):
mpl.style.use('default') # avoid dark backgrounds from dark theme vscode
plt.rcParams['axes.grid'] = True
plt.rcParams['grid.alpha'] = 0.15
plt.rcParams["mathtext.fontset"] = "cm"
plt.rcParams["font.family"] = "serif"
plt.rcParams["legend.handlelength"] = 1.5
plt.rcParams["lines.solid_capstyle"] = 'round'
plt.rcParams["font.size"] = FONTSIZE
plt.rcParams["legend.fontsize"] = FONTSIZE*0.8
mpl.rcParams['xtick.labelsize'] = FONTSIZE*0.8
mpl.rcParams['ytick.labelsize'] = FONTSIZE*0.8
if height is None:
height = FIGSIZE * GOLDEN_RATIO
figsize_single = [FIGSIZE, height]
adjust_single = dict(left=0.15, bottom=0.15, right=0.95, top=0.95)
kwargs.setdefault('figsize', figsize_single)
for kk, vv in adjust_single.items():
kwargs.setdefault(kk, vv)
return figax(**kwargs)
def figax_double(height=None, **kwargs):
mpl.style.use('default') # avoid dark backgrounds from dark theme vscode
plt.rcParams['axes.grid'] = True
plt.rcParams['grid.alpha'] = 0.15
plt.rcParams["mathtext.fontset"] = "cm"
plt.rcParams["font.family"] = "serif"
plt.rcParams["legend.handlelength"] = 1.5
plt.rcParams["lines.solid_capstyle"] = 'round'
plt.rcParams["font.size"] = FONTSIZE
plt.rcParams["legend.fontsize"] = FONTSIZE*0.8
mpl.rcParams['xtick.labelsize'] = FONTSIZE*0.8
mpl.rcParams['ytick.labelsize'] = FONTSIZE*0.8
if height is None:
height = 2 * FIGSIZE * GOLDEN_RATIO
figsize_double = [2*FIGSIZE, height]
adjust_double = dict(left=0.10, bottom=0.10, right=0.98, top=0.95)
kwargs.setdefault('figsize', figsize_double)
for kk, vv in adjust_double.items():
kwargs.setdefault(kk, vv)
return figax(**kwargs)
def figax(figsize=[7, 5], ncols=1, nrows=1, sharex=False, sharey=False, squeeze=True,
scale=None, xscale='log', xlabel='', xlim=None, yscale='log', ylabel='', ylim=None,
left=None, bottom=None, right=None, top=None, hspace=None, wspace=None,
widths=None, heights=None, grid=True, **kwargs):
"""Create matplotlib figure and axes instances.
Convenience function to create fig/axes using `plt.subplots`, and quickly modify standard
parameters.
Parameters
----------
figsize : (2,) list, optional
Figure size in inches.
ncols : int, optional
Number of columns of axes.
nrows : int, optional
Number of rows of axes.
sharex : bool, optional
Share xaxes configuration between axes.
sharey : bool, optional
Share yaxes configuration between axes.
squeeze : bool, optional
Remove dimensions of length (1,) in the `axes` object.
scale : [type], optional
Axes scaling to be applied to all x/y axes. One of ['log', 'lin'].
xscale : str, optional
Axes scaling for xaxes ['log', 'lin'].
xlabel : str, optional
Label for xaxes.
xlim : [type], optional
Limits for xaxes.
yscale : str, optional
Axes scaling for yaxes ['log', 'lin'].
ylabel : str, optional
Label for yaxes.
ylim : [type], optional
Limits for yaxes.
left : [type], optional
Left edge of axes space, set using `plt.subplots_adjust()`, as a fraction of figure.
bottom : [type], optional
Bottom edge of axes space, set using `plt.subplots_adjust()`, as a fraction of figure.
right : [type], optional
Right edge of axes space, set using `plt.subplots_adjust()`, as a fraction of figure.
top : [type], optional
Top edge of axes space, set using `plt.subplots_adjust()`, as a fraction of figure.
hspace : [type], optional
Height space between axes if multiple rows are being used.
wspace : [type], optional
Width space between axes if multiple columns are being used.
widths : [type], optional
heights : [type], optional
grid : bool, optional
Add grid lines to axes.
Returns
-------
fig : `matplotlib.figure.Figure`
New matplotlib figure instance containing axes.
axes : [ndarray] `matplotlib.axes.Axes`
New matplotlib axes, either a single instance or an ndarray of axes.
"""
if scale is not None:
xscale = scale
yscale = scale
scales = [xscale, yscale]
for ii in range(2):
if scales[ii].startswith('lin'):
scales[ii] = 'linear'
xscale, yscale = scales
if (widths is not None) or (heights is not None):
gridspec_kw = dict()
if widths is not None:
gridspec_kw['width_ratios'] = widths
if heights is not None:
gridspec_kw['height_ratios'] = heights
kwargs['gridspec_kw'] = gridspec_kw
fig, axes = plt.subplots(figsize=figsize, squeeze=False, ncols=ncols, nrows=nrows,
sharex=sharex, sharey=sharey, **kwargs)
plt.subplots_adjust(
left=left, bottom=bottom, right=right, top=top, hspace=hspace, wspace=wspace)
if ylim is not None:
shape = (nrows, ncols, 2)
if np.shape(ylim) == (2,):
ylim = np.array(ylim)[np.newaxis, np.newaxis, :]
else:
shape = (nrows, ncols,)
ylim = np.broadcast_to(ylim, shape)
if xlim is not None:
shape = (nrows, ncols, 2)
if np.shape(xlim) == (2,):
xlim = np.array(xlim)[np.newaxis, np.newaxis, :]
else:
shape = (nrows, ncols)
xlim = np.broadcast_to(xlim, shape)
_, xscale, xlabel = np.broadcast_arrays(axes, xscale, xlabel)
_, yscale, ylabel = np.broadcast_arrays(axes, yscale, ylabel)
for idx, ax in np.ndenumerate(axes):
ax.set(xscale=xscale[idx], xlabel=xlabel[idx], yscale=yscale[idx], ylabel=ylabel[idx])
if xlim[idx] is not None:
ax.set_xlim(xlim[idx])
if ylim[idx] is not None:
ax.set_ylim(ylim[idx])
if grid is True:
ax.set_axisbelow(True)
# ax.grid(True, which='major', axis='both', c='0.6', zorder=2, alpha=0.4)
# ax.grid(True, which='minor', axis='both', c='0.8', zorder=2, alpha=0.4)
# ax.grid(True, which='major', axis='both', c='0.6', zorder=2, alpha=0.4)
# ax.grid(True, which='minor', axis='both', c='0.8', zorder=2, alpha=0.4)
if squeeze:
axes = np.squeeze(axes)
if np.ndim(axes) == 0:
axes = axes[()]
return fig, axes
def smap(args=[0.0, 1.0], cmap=None, log=False, norm=None, midpoint=None,
under='0.8', over='0.8', left=None, right=None):
"""Create a colormap from a scalar range to a set of colors.
Parameters
----------
args : scalar or array_like of scalar
Range of valid scalar values to normalize with
cmap : None, str, or ``matplotlib.colors.Colormap`` object
Colormap to use.
log : bool
Logarithmic scaling
norm : None or `matplotlib.colors.Normalize`
Normalization to use.
under : str or `None`
Color specification for values below range.
over : str or `None`
Color specification for values above range.
left : float {0.0, 1.0} or `None`
Truncate the left edge of the colormap to this value.
If `None`, 0.0 used (if `right` is provided).
right : float {0.0, 1.0} or `None`
Truncate the right edge of the colormap to this value
If `None`, 1.0 used (if `left` is provided).
Returns
-------
smap : ``matplotlib.cm.ScalarMappable``
Scalar mappable object which contains the members:
`norm`, `cmap`, and the function `to_rgba`.
"""
# _DEF_CMAP = 'viridis'
_DEF_CMAP = 'Spectral'
if cmap is None:
if midpoint is not None:
cmap = 'bwr'
else:
cmap = _DEF_CMAP
cmap = _get_cmap(cmap)
# Select a truncated subsection of the colormap
if (left is not None) or (right is not None):
if left is None:
left = 0.0
if right is None:
right = 1.0
cmap = _cut_cmap(cmap, left, right)
if under is not None:
cmap.set_under(under)
if over is not None:
cmap.set_over(over)
if norm is None:
norm = _get_norm(args, midpoint=midpoint, log=log)
else:
log = isinstance(norm, mpl.colors.LogNorm)
# Create scalar-mappable
smap = mpl.cm.ScalarMappable(norm=norm, cmap=cmap)
# Bug-Fix something something
smap._A = []
# Allow `smap` to be used to construct colorbars
smap.set_array([])
# Store type of mapping
smap.log = log
return smap
def _get_norm(data, midpoint=None, log=False):
"""
"""
# Determine minimum and maximum
if np.size(data) == 1:
min = 0
max = np.int(data) - 1
elif np.size(data) == 2:
min, max = data
else:
try:
min, max = utils.minmax(data, filter=log)
except Exception:
err = f"Input `data` ({type(data)}) must be an integer, (2,) of scalar, or ndarray of scalar!"
log.exception(err)
raise ValueError(err)
# print(f"{min=}, {max=}")
# Create normalization
if log:
if midpoint is None:
norm = mpl.colors.LogNorm(vmin=min, vmax=max)
else:
norm = MidpointLogNormalize(vmin=min, vmax=max, midpoint=midpoint)
else:
if midpoint is None:
norm = mpl.colors.Normalize(vmin=min, vmax=max)
else:
# norm = MidpointNormalize(vmin=min, vmax=max, midpoint=midpoint)
norm = MidpointNormalize(vmin=min, vmax=max, midpoint=midpoint)
# norm = mpl.colors.TwoSlopeNorm(vmin=min, vcenter=midpoint, vmax=max)
return norm
def _cut_cmap(cmap, min=0.0, max=1.0, n=100):
"""Select a truncated subset of the given colormap.
Code from: http://stackoverflow.com/a/18926541/230468
"""
name = f"trunc({cmap.name},{min:.2f},{max:.2f})"
new_cmap = mpl.colors.LinearSegmentedColormap.from_list(name, cmap(np.linspace(min, max, n)))
return new_cmap
def _get_cmap(cmap):
"""Retrieve a colormap with the given name if it is not already a colormap.
"""
if isinstance(cmap, mpl.colors.Colormap):
return cmap
try:
return mpl.cm.get_cmap(cmap).copy()
except Exception as err:
log.error(f"Could not load colormap from `{cmap}` : {err}")
raise
def _get_hist_steps(xx, yy, yfilter=None):
"""Convert from bin-edges and histogram heights, to specifications for step lines.
Parameters
----------
xx : array_like
Independence variable representing bin-edges. Size (N,)
yy : array_like
Dependence variable representing histogram amplitudes. Size (N-1,)
yfilter : None, bool, callable
Returns
-------
xnew : array (N,)
x-values
ynew : array (N,)
y-values
"""
size = len(xx) - 1
if size != len(yy):
err = f"Length of `xx` ({len(xx)}) should be length of `yy` ({len(yy)}) + 1!"
log.exception(err)
raise ValueError(err)
xnew = [[xx[ii], xx[ii+1]] for ii in range(xx.size-1)]
ynew = [[yy[ii], yy[ii]] for ii in range(xx.size-1)]
xnew = np.array(xnew).flatten()
ynew = np.array(ynew).flatten()
if yfilter not in [None, False]:
if yfilter is True:
idx = (ynew > 0.0)
elif callable(yfilter):
idx = yfilter(ynew)
else:
raise ValueError()
xnew = xnew[idx]
ynew = ynew[idx]
return xnew, ynew
def draw_hist_steps(ax, xx, yy, yfilter=None, **kwargs):
return ax.plot(*_get_hist_steps(xx, yy, yfilter=yfilter), **kwargs)
def draw_gwb(ax, xx, gwb, nsamp=10, color=None, label=None, **kwargs):
if color is None:
color = ax._get_lines.get_next_color()
kw_plot = kwargs.pop('plot', {})
kw_plot.setdefault('color', color)
hh = draw_med_conf(ax, xx, gwb, plot=kw_plot, **kwargs)
if (nsamp is not None) and (nsamp > 0):
nsamp_max = gwb.shape[1]
idx = np.random.choice(nsamp_max, np.min([nsamp, nsamp_max]), replace=False)
for ii in idx:
ax.plot(xx, gwb[:, ii], color=color, alpha=0.25, lw=1.0, ls='-')
return hh
def draw_ss_and_gwb(ax, xx, hc_ss, gwb, nsamp=10,
color=None, cmap = cm.rainbow,
sslabel=None, bglabel=None, **kwargs):
if color is None:
color = ax._get_lines.get_next_color()
kw_plot = kwargs.get('plot', {})
kw_plot.setdefault('color', color)
# hh = draw_med_conf(ax, xx, gwb, plot=kw_plot, **kwargs)
if (nsamp is not None) and (nsamp > 0):
nsamp_max = gwb.shape[1]
nsize = np.min([nsamp, nsamp_max])
colors = cmap(np.linspace(0,1,nsize))
ci = 0
idx = np.random.choice(nsamp_max, nsize, replace=False)
for ii in idx:
if(ii==0):
label=bglabel
else: label=None
cc = colors[ci] if color is None else color
ax.plot(xx, gwb[:, ii], color=cc, alpha=0.25, lw=1.0, ls='-')
for ll in range(len(hc_ss[0,0])):
if(ll==0):
edgecolor='k'
if(ii==0): label=sslabel # first source of first realization
else: label=None
else:
edgecolor=None
label=None
ax.scatter(xx, hc_ss[:, ii, ll], color=cc, alpha=0.25,
edgecolor=edgecolor, label=label)
ci+=1
# return hh
def plot_gwb(fobs, gwb, hc_ss=None, bglabel=None, sslabel=None, **kwargs):
xx = fobs * YR
fig, ax = figax(
xlabel=LABEL_GW_FREQUENCY_YR,
ylabel=LABEL_CHARACTERISTIC_STRAIN
)
if(hc_ss is not None):
draw_ss_and_gwb(ax, xx, hc_ss, gwb, sslabel=sslabel,
bglabel=bglabel, **kwargs)
else:
draw_gwb(ax, xx, gwb, **kwargs)
_twin_hz(ax)
return fig
def plot_bg_ss(fobs, bg, ss=None, bglabel=None, sslabel=None,
xlabel=LABEL_GW_FREQUENCY_YR, ylabel=LABEL_CHARACTERISTIC_STRAIN, **kwargs):
""" Can plot strain or power spectral density, just need to set ylabel accordingly
"""
xx = fobs * YR
fig, ax = figax(
xlabel=xlabel,
ylabel=ylabel
)
if(ss is not None):
draw_ss_and_gwb(ax, xx, ss, bg, sslabel=sslabel,
bglabel=bglabel, **kwargs)
else:
draw_gwb(ax, xx, bg, **kwargs)
_twin_hz(ax)
return fig
def draw_sspars_and_bgpars(axs, xx, sspar, bgpar, nsamp=10, cmap=cm.rainbow_r, color = None, label=None, **kwargs):
# if color is None:
# color = axs[0,0]._get_lines.get_next_color()
# kw_plot = kwargs.get('plot', {})
# kw_plot.setdefault('color', color)
m_bg = bgpar[0,:,:]/MSOL # bg avg masses in solar masses
m_ss = sspar[0,:,:,:]/MSOL # ss masses in solar masses
# mm_med = draw_med_conf(axs[0,0], xx, m_bg, plot=kw_plot, **kwargs)
q_bg = bgpar[1,:,:] # bg avg ratios
q_ss = sspar[1,:,:,:] # ss ratios
# qq_med = draw_med_conf(axs[0,1], xx, q_bg, plot=kw_plot, **kwargs)
di_bg = holo.cosmo.comoving_distance(bgpar[2,:,:]).value # bg avg distances in Mpc
di_ss = holo.cosmo.comoving_distance(sspar[2,:,:,:]).value # ss distances in Mpc
df_bg = holo.cosmo.comoving_distance(bgpar[3,:,:]).value # bg avg distances in Mpc
df_ss = holo.cosmo.comoving_distance(sspar[3,:,:,:]).value # ss distances in Mpc
# dd_med = draw_med_conf(axs[1,0], xx, d_bg, plot=kw_plot, **kwargs)
# hh_med = draw_med_conf(axs[1,1], xx, hc_bg, plot=kw_plot, **kwargs)
if (nsamp is not None) and (nsamp > 0):
nsamp_max = bgpar.shape[2]
nsize = np.min([nsamp, nsamp_max])
colors = cmap(np.linspace(0,1,nsize))
ci = 0
idx = np.random.choice(nsamp_max, nsize, replace=False)
for ii in idx:
# background
axs[0,0].plot(xx, m_bg[:,ii], color=colors[ci], alpha=0.25, lw=1.0, ls='-') # masses (upper left)
axs[0,1].plot(xx, q_bg[:,ii], color=colors[ci], alpha=0.25, lw=1.0, ls='-') # ratios (upper right)
axs[1,0].plot(xx, di_bg[:,ii], color=colors[ci], alpha=0.25, lw=1.0, ls='-') # initial distances (lower left)
axs[1,1].plot(xx, df_bg[:, ii], color=colors[ci], alpha=0.25, lw=1.0, ls='-') # final distances (lower right)
# single sources
for ll in range(sspar.shape[-1]):
if(ll==0): edgecolor='k'
else: edgecolor=None
axs[0,0].scatter(xx, m_ss[:, ii, ll], color=colors[ci], alpha=0.25,
edgecolor=edgecolor) # ss masses (upper left)
axs[0,1].scatter(xx, q_ss[:, ii, ll], color=colors[ci], alpha=0.25,
edgecolor=edgecolor) # ss ratios (upper right)
axs[1,0].scatter(xx, di_ss[:, ii, ll], color=colors[ci], alpha=0.25,
edgecolor=edgecolor) # ss intial distances (lower left)
axs[1,1].scatter(xx, df_ss[:, ii, ll], color=colors[ci], alpha=0.25,
edgecolor=edgecolor) # ss final distances (lower left)
ci +=1
# return mm_med, qq_med, dd_med, hh_med
def plot_pars(fobs, sspar, bgpar, **kwargs):
xx= fobs * YR
fig, axs = figax(figsize = (11,6), ncols=2, nrows=2, sharex = True)
axs[0,0].set_ylabel('Total Mass $M/M_\odot$')
axs[0,1].set_ylabel('Mass Ratio $q$')
axs[1,0].set_ylabel('Initial Comoving Distance $d_c$ (Mpc)')
axs[1,1].set_ylabel('Final Comoving Distance $d_c$ (Mpc)')
axs[1,0].set_xlabel(LABEL_GW_FREQUENCY_YR)
axs[1,1].set_xlabel(LABEL_GW_FREQUENCY_YR)
draw_sspars_and_bgpars(axs, xx, sspar, bgpar, color='pink')
fig.tight_layout()
return fig
def scientific_notation(val, man=1, exp=0, dollar=True):
"""Convert a scalar into a string with scientific notation (latex formatted).
Arguments
---------
val : scalar
Numerical value to convert.
man : int or `None`
Precision of the mantissa (decimal points); or `None` for omit mantissa.
exp : int or `None`
Precision of the exponent (decimal points); or `None` for omit exponent.
dollar : bool
Include dollar-signs ('$') around returned expression.
Returns
-------
rv_str : str
Scientific notation string using latex formatting.
"""
if val == 0.0:
rv_str = "$"*dollar + "0.0" + "$"*dollar
return rv_str
# get log10 exponent
val_exp = np.floor(np.log10(np.fabs(val)))
# get mantissa (positive/negative is still included here)
val_man = val / np.power(10.0, val_exp)
val_man = np.around(val_man, man)
if val_man >= 10.0:
val_man /= 10.0
val_exp += 1
# Construct Mantissa String
# --------------------------------
str_man = "{0:.{1:d}f}".format(val_man, man)
# If the mantissa is '1' (or '1.0' or '1.00' etc), dont write it
if str_man == "{0:.{1:d}f}".format(1.0, man):
str_man = ""
# Construct Exponent String
# --------------------------------
str_exp = "10^{{ {:d} }}".format(int(val_exp))
# Put them together
# --------------------------------
rv_str = "$"*dollar + str_man
if len(str_man) and len(str_exp):
rv_str += " \\times"
rv_str += str_exp + "$"*dollar
return rv_str
def _draw_plaw(ax, freqs, amp=1e-15, f0=1/YR, **kwargs):
kwargs.setdefault('alpha', 0.25)
kwargs.setdefault('color', 'k')
kwargs.setdefault('ls', '--')
plaw = amp * np.power(np.asarray(freqs)/f0, -2/3)
return ax.plot(freqs, plaw, **kwargs)
def _twin_hz(ax, nano=True, fs=10, **kw):
tw = ax.twiny()
tw.grid(False)
xlim = np.array(ax.get_xlim()) / YR
if nano:
label = LABEL_GW_FREQUENCY_NHZ
xlim *= 1e9
else:
label = LABEL_GW_FREQUENCY_HZ
tw.set(xlim=xlim, xscale=ax.get_xscale())
tw.set_xlabel(label, fontsize=fs, **kw)
return tw
def _twin_yr(ax, nano=True, fs=10, label=True, **kw):
tw = ax.twiny()
tw.grid(False)
xlim = np.array(ax.get_xlim()) * YR
if nano:
xlim /= 1e9
tw.set(xlim=xlim, xscale=ax.get_xscale())
if label:
tw.set_xlabel(LABEL_GW_FREQUENCY_YR, fontsize=fs, **kw)
return tw
def draw_med_conf(ax, xx, vals, fracs=[0.50, 0.90], weights=None, plot={}, fill={}, filter=False):
plot.setdefault('alpha', 0.75)
fill.setdefault('alpha', 0.2)
percs = np.atleast_1d(fracs)
assert np.all((0.0 <= percs) & (percs <= 1.0))
# center the target percentages into pairs around 50%, e.g. 68 ==> [16,84]
inter_percs = [[0.5-pp/2, 0.5+pp/2] for pp in percs]
# Add the median value (50%)
inter_percs = [0.5, ] + np.concatenate(inter_percs).tolist()
# Get percentiles; they go along the last axis
if filter:
rv = [
kale.utils.quantiles(vv[vv > 0.0], percs=inter_percs, weights=weights)
for vv in vals
]
rv = np.asarray(rv)
else:
rv = kale.utils.quantiles(vals, percs=inter_percs, weights=weights, axis=-1)
med, *conf = rv.T
# plot median
hh, = ax.plot(xx, med, **plot)
# Reshape confidence intervals to nice plotting shape
# 2*P, X ==> (P, 2, X)
conf = np.array(conf).reshape(len(percs), 2, xx.size)
kw = dict(color=hh.get_color())
kw.update(fill)
fill = kw
# plot each confidence interval
for lo, hi in conf:
gg = ax.fill_between(xx, lo, hi, **fill)
return (hh, gg)
def draw_med_conf_color(ax, xx, vals, fracs=[0.50, 0.90], weights=None, plot={}, fill={},
filter=False, color=None, linestyle='-'):
plot.setdefault('alpha', 0.75)
fill.setdefault('alpha', 0.2)
percs = np.atleast_1d(fracs)
assert np.all((0.0 <= percs) & (percs <= 1.0))
# center the target percentages into pairs around 50%, e.g. 68 ==> [16,84]
inter_percs = [[0.5-pp/2, 0.5+pp/2] for pp in percs]
# Add the median value (50%)
inter_percs = [0.5, ] + np.concatenate(inter_percs).tolist()
# Get percentiles; they go along the last axis
if filter:
rv = [
kale.utils.quantiles(vv[vv > 0.0], percs=inter_percs, weights=weights)
for vv in vals
]
rv = np.asarray(rv)
else:
rv = kale.utils.quantiles(vals, percs=inter_percs, weights=weights, axis=-1)
med, *conf = rv.T
# plot median
if color is not None:
hh, = ax.plot(xx, med, color=color, linestyle=linestyle, **plot)
else:
hh, = ax.plot(xx, med, **plot)
# Reshape confidence intervals to nice plotting shape
# 2*P, X ==> (P, 2, X)
conf = np.array(conf).reshape(len(percs), 2, xx.size)
kw = dict(color=hh.get_color())
kw.update(fill)
fill = kw
# plot each confidence interval
for lo, hi in conf:
gg = ax.fill_between(xx, lo, hi, **fill)
return (hh, gg)
def smooth_spectra(xx, gwb, smooth=(20, 4), interp=100):
assert np.shape(xx) == (np.shape(gwb)[0],)
if len(smooth) != 2:
err = f"{smooth=} must be a (2,) of float specifying the filter-window size and polynomial-order!!"
raise ValueError(err)
xnew = kale.utils.spacing(xx, 'log', num=int(interp))
rv = [utils.interp(xnew, xx, vv) for vv in gwb.T]
rv = sp.signal.savgol_filter(rv, *smooth, axis=-1)
med, *conf = rv
# Reshape confidence intervals to nice plotting shape
# 2*P, X ==> (P, 2, X)
npercs = np.shape(conf)[0] // 2
conf = np.array(conf).reshape(npercs, 2, xnew.size)
return xnew, med, conf
def get_med_conf(vals, fracs, weights=None, axis=-1):
percs = np.atleast_1d(fracs)
assert np.all((0.0 <= percs) & (percs <= 1.0))
# center the target percentages into pairs around 50%, e.g. 68 ==> [16,84]
inter_percs = [[0.5-pp/2, 0.5+pp/2] for pp in percs]
# Add the median value (50%)
inter_percs = [0.5, ] + np.concatenate(inter_percs).tolist()
# Get percentiles; they go along the last axis
rv = kale.utils.quantiles(vals, percs=inter_percs, weights=weights, axis=axis)
return rv
def draw_smooth_med_conf(ax, xx, vals, smooth=(10, 4), interp=100, fracs=[0.50, 0.90], weights=None, plot={}, fill={}):
plot.setdefault('alpha', 0.5)
fill.setdefault('alpha', 0.2)
rv = get_med_conf(vals, fracs, weights, axis=-1)
xnew, med, conf = smooth_spectra(xx, rv, smooth=smooth, interp=interp)
# plot median
hh, = ax.plot(xnew, med, **plot)
# plot each confidence interval
for lo, hi in conf:
gg = ax.fill_between(xnew, lo, hi, color=hh.get_color(), **fill)
return (hh, gg)
def violins(ax, xx, yy, zz, width, **kwargs):
assert np.ndim(xx) == 1
if np.ndim(yy) == 1:
yy = [yy] * len(xx)
assert np.ndim(yy) == 2
assert np.shape(yy) == np.shape(zz)
if np.shape(yy)[0] != xx.size:
if np.shape(yy)[1] == xx.size:
yy = yy.T
zz = zz.T
assert np.shape(xx)[0] == xx.size
assert np.shape(zz)[0] == xx.size
for ii in range(xx.size):
usey = yy[ii]
usez = zz[ii]
handle = violin(ax, xx[ii], usey, usez, width, **kwargs)
return handle
def violin(ax, xx, yy, zz, width, median_log10=False, side='both', clip_pdf=None,
median={}, line={}, fill={}, **kwargs):
assert np.ndim(xx) == 0
assert np.shape(xx) == np.shape(width)
assert np.ndim(yy) == 1
assert yy.shape == zz.shape
valid_sides = ['l', 'r', 'b']
if side[0] not in valid_sides:
raise ValueError(f"{side=} must begin with one of {valid_sides}!")
if line is not None:
line_def = dict(alpha=0.5, lw=0.5, color='k')
line_def.update(kwargs)
line_def.update(line)
line = line_def
if fill is not None:
fill_def = dict(alpha=0.25, lw=0.0)
fill_def.update(kwargs)
fill_def.update(fill)
fill = fill_def
if clip_pdf is not None:
assert np.ndim(clip_pdf) == 0
assert clip_pdf < 1.0
zz = zz / zz.max()
if median is True:
median = {}
if median is False:
median = None
if median is not None:
if median_log10:
dy = np.diff(np.log10(yy))
else:
dy = np.diff(yy)
cdf = 0.5 * (zz[1:] + zz[:-1]) * dy
cdf = np.concatenate([[0.0, ], cdf])
cdf = np.cumsum(cdf)
med = np.interp([0.5], cdf/cdf.max(), yy)
if clip_pdf is not None:
idx = zz > clip_pdf
yy = yy[idx]
zz = zz[idx]
xl = xx * np.ones_like(yy)
xr = xx * np.ones_like(yy)
left_flag = side.startswith('l') or side.startswith('b')
right_flag = side.startswith('r') or side.startswith('b')
if left_flag:
xl = xl - zz * width
if right_flag:
xr = xr + zz * width
handle = []
if line is not None:
h1, = ax.plot(xl, yy, **line)
ax.plot(xr, yy, **line)
handle.append(h1)
if fill is not None:
h2 = ax.fill_betweenx(yy, xl, xr, **fill)
handle.append(h2)
if median is not None:
kwargs = dict(line)
kwargs['lw'] = 1.0
kwargs.update(median)
mwid = kwargs.pop('width', 0.5)
ll = xx
rr = xx
if left_flag:
ll = ll - width * mwid
if right_flag:
rr = rr + width * mwid
ax.plot([ll, rr], [med, med], **kwargs)
handle = handle[0] if len(handle) == 1 else tuple(handle)
return handle
class Corner:
_LIMITS_STRETCH = 0.1
def __init__(self, ndim, origin='tl', rotate=True, axes=None, labels=None, limits=None, **kwargs):
origin = kale.plot._parse_origin(origin)
# -- Construct figure and axes
if axes is None:
fig, axes = kale.plot._figax(ndim, **kwargs)
self.fig = fig
if origin[0] == 1:
axes = axes[::-1]
if origin[1] == 1:
axes = axes.T[::-1].T
else:
try:
self.fig = axes[0, 0].figure
except Exception as err:
raise err
self.origin = origin
self.axes = axes
last = ndim - 1
if labels is None:
labels = [''] * ndim
for (ii, jj), ax in np.ndenumerate(axes):
# Set upper-right plots to invisible
if jj > ii:
ax.set_visible(False)
continue
ax.grid(True)
# Bottom row
if ii == last:
if rotate and (jj == last):
ax.set_ylabel(labels[jj]) # currently this is being reset to empty later, that's okay
else:
ax.set_xlabel(labels[jj])
# If vertical origin is the top
if origin[0] == 1:
ax.xaxis.set_label_position('top')
ax.xaxis.set_ticks_position('top')
# Non-bottom row
else:
ax.set_xlabel('')
for tlab in ax.xaxis.get_ticklabels():
tlab.set_visible(False)
# First column
if jj == 0:
# Not-first rows
if ii != 0:
ax.set_ylabel(labels[ii])
# If horizontal origin is the right
if origin[1] == 1:
ax.yaxis.set_label_position('right')
ax.yaxis.set_ticks_position('right')
# Not-first columns
else:
# if (jj != last) or (not rotate):
ax.set_ylabel('')
for tlab in ax.yaxis.get_ticklabels():
tlab.set_visible(False)
# Diagonals
if ii == jj:
# not top-left
if (ii != 0) and (origin[1] == 0):
ax.yaxis.set_label_position('right')
ax.yaxis.set_ticks_position('right')
else:
ax.yaxis.set_label_position('left')
ax.yaxis.set_ticks_position('left')
# If axes limits are given, set axes to them
if limits is not None:
limit_flag = False
kale.plot._set_corner_axes_extrema(self.axes, limits, rotate)
# Otherwise, prepare to calculate limits during plotting
else:
limits = [None] * ndim
limit_flag = True
# --- Store key parameters
self.ndim = ndim
self._rotate = rotate
self._labels = labels
self._limits = limits
self._limit_flag = limit_flag
return
def plot(self, data, edges=None, weights=None, ratio=None, quantiles=None, sigmas=None, reflect=None,
color=None, cmap=None, limit=None, dist1d={}, dist2d={}):
if limit is None:
limit = self._limit_flag
# ---- Sanitize
if np.ndim(data) != 2:
err = "`data` (shape: {}) must be 2D with shape (parameters, data-points)!".format(
np.shape(data))
raise ValueError(err)
axes = self.axes
size = np.shape(data)[0]
shp = np.shape(axes)
if (np.ndim(axes) != 2) or (shp[0] != shp[1]) or (shp[0] != size):
raise ValueError("`axes` (shape: {}) does not match data dimension {}!".format(shp, size))
if ratio is not None:
if np.ndim(ratio) != 2 or np.shape(ratio)[0] != size:
err = f"`ratio` (shape: {np.shape(ratio)}) must be 2D with shape (parameters, data-points)!"
raise ValueError(err)
# ---- Set parameters
last = size - 1
rotate = self._rotate
# Set default color or cmap as needed
color, cmap = kale.plot._parse_color_cmap(ax=axes[0][0], color=color, cmap=cmap)
edges = kale.utils.parse_edges(data, edges=edges)
quantiles, _ = kale.plot._default_quantiles(quantiles=quantiles, sigmas=sigmas)
# ---- Draw 1D Histograms & Carpets
limits = [None] * size # variable to store the data extrema
for jj, ax in enumerate(axes.diagonal()):
rot = (rotate and (jj == last))
refl = reflect[jj] if reflect is not None else None
rat = ratio[jj] if ratio is not None else None
self.dist1d(
ax, edges[jj], data[jj], weights=weights, ratio=rat, quantiles=quantiles, rotate=rot,
color=color, reflect=refl, **dist1d
)
limits[jj] = kale.utils.minmax(data[jj], stretch=self._LIMITS_STRETCH)
# ---- Draw 2D Histograms and Contours
for (ii, jj), ax in np.ndenumerate(axes):
if jj >= ii:
continue
rat = [ratio[jj], ratio[ii]] if ratio is not None else None
handle = self.dist2d(
ax, [edges[jj], edges[ii]], [data[jj], data[ii]], weights=weights, ratio=rat,
color=color, cmap=cmap, quantiles=quantiles, **dist2d
)
# ---- calculate and set axes limits
if limit:
# Update any stored values
for ii in range(self.ndim):
self._limits[ii] = kale.utils.minmax(limits[ii], prev=self._limits[ii])
# Set axes to limits
kale.plot._set_corner_axes_extrema(self.axes, self._limits, self._rotate)
return handle
def dist1d(self, ax, edges, data, color=None, weights=None, ratio=None, probability=True, rotate=False,
density=None, confidence=False, hist=None, carpet=True, quantiles=None,
ls=None, alpha=None, reflect=None, **kwargs):
if np.ndim(data) != 1:
err = "Input `data` (shape: {}) is not 1D!".format(np.shape(data))
raise ValueError(err)
if ratio is not None and np.ndim(ratio) != 1:
err = "`ratio` (shape: {}) is not 1D!".format(np.shape(ratio))
raise ValueError(err)
# Use `scatter` as the limiting-number of scatter-points
# To disable scatter, `scatter` will be set to `None`
carpet = kale.plot._scatter_limit(carpet, "carpet")
# set default color to next from axes' color-cycle
if color is None:
color = kale.plot._get_next_color(ax)
# ---- Draw Components
# Draw PDF from KDE
handle = None # variable to store a plotting 'handle' from one of the plotted objects
if density is not False:
kde = kale.KDE(data, weights=weights)
# If histogram is also being plotted (as a solid line) use a dashed line
if ls is None:
_ls = '--' if hist else '-'
_alpha = 0.8 if hist else 0.8
else:
_ls = ls
_alpha = alpha
# Calculate KDE density distribution for the given parameter
kde_kwargs = dict(probability=probability, params=0, reflect=reflect)
xx, yy = kde.density(**kde_kwargs)
if ratio is not None:
kde_ratio = kale.KDE(ratio, weights=weights)
_, kde_ratio = kde_ratio.density(points=xx, **kde_kwargs)
yy /= kde_ratio
# rescale by value of density
yy = yy * density
# Plot
if rotate:
temp = xx
xx = yy
yy = temp
handle, = ax.plot(xx, yy, color=color, ls=_ls, alpha=_alpha, **kwargs)
# Draw Histogram
if hist:
if alpha is None:
_alpha = 0.5 if density else 0.8
else:
_alpha = alpha
_, _, hh = self.hist1d(
ax, data, edges=edges, weights=weights, ratio=ratio, color=color,
density=True, probability=probability, joints=True, rotate=rotate,
ls=ls, alpha=_alpha, **kwargs
)
if handle is None:
handle = hh
# Draw Contours and Median Line
if confidence:
if ratio is not None:
raise NotImplementedError("`confidence` with `ratio` is not implemented!")
hh = kale.plot._confidence(data, ax=ax, color=color, quantiles=quantiles, rotate=rotate)
if handle is None:
handle = hh
# Draw Carpet Plot
if carpet is not None:
if ratio is not None:
raise NotImplementedError("`confidence` with `carpet` is not implemented!")
hh = kale.plot._carpet(data, weights=weights, ax=ax, color=color, rotate=rotate, limit=carpet)
if handle is None:
handle = hh
return handle
def hist1d(self, ax, data, edges=None, weights=None, ratio=None, density=False, probability=False,
renormalize=False, joints=True, positive=True, rotate=False, **kwargs):
hist_kwargs = dict(density=density, probability=probability)
# Calculate histogram
hist, edges = kale.utils.histogram(data, bins=edges, weights=weights, **hist_kwargs)
if ratio is not None:
hist_ratio, _ = kale.utils.histogram(data, bins=edges, **hist_kwargs)
hist /= hist_ratio
# Draw
rv = kale.plot.draw_hist1d(
ax, edges, hist,
renormalize=renormalize, joints=joints, positive=positive, rotate=rotate,
**kwargs
)
return hist, edges, rv
def dist2d(
self, ax, edges, data, weights=None, ratio=None, quantiles=None, sigmas=None,
color=None, cmap=None, smooth=None, upsample=None, pad=True, outline=True,
median=False, scatter=True, contour=True, hist=True, mask_dense=None, mask_below=True, mask_alpha=0.9
):
if np.ndim(data) != 2 or np.shape(data)[0] != 2:
err = f"`data` (shape: {np.shape(data)}) must be 2D with shape (parameters, data-points)!"
raise ValueError(err)
if ratio is not None:
if np.ndim(ratio) != 2 or np.shape(ratio)[0] != 2:
err = f"`ratio` (shape: {np.shape(ratio)}) must be 2D with shape (parameters, data-points)!"
raise ValueError(err)
# Set default color or cmap as needed
color, cmap = kale.plot._parse_color_cmap(ax=ax, color=color, cmap=cmap)
# Use `scatter` as the limiting-number of scatter-points
# To disable scatter, `scatter` will be set to `None`
scatter = kale.plot._scatter_limit(scatter, "scatter")
# Default: if either hist or contour is being plotted, mask over high-density scatter points
if mask_dense is None:
mask_dense = (scatter is not None) and (hist or contour)
# Calculate histogram
edges = kale.utils.parse_edges(data, edges=edges)
hist_kwargs = dict(bins=edges, density=True)
hh, *_ = np.histogram2d(*data, weights=weights, **hist_kwargs)
if ratio is not None:
hh_ratio, *_ = np.histogram2d(*ratio, **hist_kwargs)
hh /= hh_ratio
hh = np.nan_to_num(hh)
_, levels, quantiles = kale.plot._dfm_levels(hh, quantiles=quantiles, sigmas=sigmas)
if mask_below is True:
mask_below = levels.min()
handle = None
# ---- Draw Scatter Points
if (scatter is not None):
handle = kale.plot.draw_scatter(ax, *data, color=color, zorder=5, limit=scatter)
# ---- Draw Median Lines (cross-hairs style)
if median:
if ratio:
raise NotImplementedError("`median` is not impemented with `ratio`!")
for dd, func in zip(data, [ax.axvline, ax.axhline]):
# Calculate value
if weights is None:
med = np.median(dd)
else:
med = kale.utils.quantiles(dd, percs=0.5, weights=weights)
# Load path_effects
out_pe = kale.plot._get_outline_effects() if outline else None
# Draw
func(med, color=color, alpha=0.25, lw=1.0, zorder=40, path_effects=out_pe)
cents, hh_prep = kale.plot._prep_hist(edges, hh, smooth, upsample, pad)
# ---- Draw 2D Histogram
if hist:
_ee, _hh, handle = kale.plot.draw_hist2d(
ax, edges, hh, mask_below=mask_below, cmap=cmap, zorder=10, shading='auto',
)
# ---- Draw Contours
if contour:
contour_cmap = cmap.reversed()
# Narrow the range of contour colors relative to full `cmap`
dd = 0.7 / 2
nq = len(quantiles)
if nq < 4:
dd = nq*0.08
contour_cmap = kale.plot._cut_colormap(contour_cmap, 0.5 - dd, 0.5 + dd)
_ee, _hh, _handle = _contour2d(
ax, cents, hh_prep, levels=levels, cmap=contour_cmap, zorder=20, outline=outline,
)
# hi = 1 if len(_handle.collections) > 0 else 0
hi = -1
handle = _handle.collections[hi]
# for some reason the above handle is not showing up on legends... create a dummy line
# to get a better handle
col = handle.get_edgecolor()
handle, = ax.plot([], [], color=col)
# Mask dense scatter-points
if mask_dense:
# NOTE: levels need to be recalculated here!
_, levels, quantiles = kale.plot._dfm_levels(hh_prep, quantiles=quantiles)
span = [levels.min(), hh_prep.max()]
mask_cmap = mpl.colors.ListedColormap('white')
# Draw
ax.contourf(*cents, hh_prep, span, cmap=mask_cmap, antialiased=True, zorder=9, alpha=mask_alpha)
return handle
def legend(self, handles, labels, index=None,
loc=None, fancybox=False, borderaxespad=0, **kwargs):
"""
"""
fig = self.fig
# Set Bounding Box Location
# ------------------------------------
bbox = kwargs.pop('bbox', None)
bbox = kwargs.pop('bbox_to_anchor', bbox)
if bbox is None:
if index is None:
size = self.ndim
if size in [2, 3, 4]:
index = (0, -1)
loc = 'lower left'
elif size == 1:
index = (0, 0)
loc = 'upper right'
elif size % 2 == 0:
index = size // 2
index = (size - index - 2, index + 1)
loc = 'lower left'
else:
index = (size // 2) + 1
loc = 'lower left'
index = (size-index-1, index)
bbox = self.axes[index].get_position()
bbox = (bbox.x0, bbox.y0)
kwargs['bbox_to_anchor'] = bbox
kwargs.setdefault('bbox_transform', fig.transFigure)
# Set other defaults
leg = fig.legend(handles, labels, fancybox=fancybox,
borderaxespad=borderaxespad, loc=loc, **kwargs)
return leg
def target(self, targets, upper_limits=None, lower_limits=None, lw=1.0, fill_alpha=0.1, **kwargs):
size = self.ndim
axes = self.axes
last = size - 1
# labs = self._labels
extr = self._limits
# ---- check / sanitize arguments
if len(targets) != size:
err = "`targets` (shape: {}) must be shaped ({},)!".format(np.shape(targets), size)
raise ValueError(err)
if lower_limits is None:
lower_limits = [None] * size
if len(lower_limits) != size:
err = "`lower_limits` (shape: {}) must be shaped ({},)!".format(np.shape(lower_limits), size)
raise ValueError(err)
if upper_limits is None:
upper_limits = [None] * size
if len(upper_limits) != size:
err = "`upper_limits` (shape: {}) must be shaped ({},)!".format(np.shape(upper_limits), size)
raise ValueError(err)
# ---- configure settings
kwargs.setdefault('color', 'red')
kwargs.setdefault('alpha', 0.50)
kwargs.setdefault('zorder', 20)
line_kw = dict()
line_kw.update(kwargs)
line_kw['lw'] = lw
span_kw = dict()
span_kw.update(kwargs)
span_kw['alpha'] = fill_alpha
# ---- draw 1D targets and limits
for jj, ax in enumerate(axes.diagonal()):
if (self._rotate and (jj == last)):
func = ax.axhline
func_up = lambda xx: ax.axhspan(extr[jj][0], xx, **span_kw)
func_lo = lambda xx: ax.axhspan(xx, extr[jj][1], **span_kw)
else:
func = ax.axvline
func_up = lambda xx: ax.axvspan(extr[jj][0], xx, **span_kw)
func_lo = lambda xx: ax.axvspan(xx, extr[jj][1], **span_kw)
if targets[jj] is not None:
func(targets[jj], **line_kw)
if upper_limits[jj] is not None:
func_up(upper_limits[jj])
if lower_limits[jj] is not None:
func_lo(lower_limits[jj])
# ---- draw 2D targets and limits
for (ii, jj), ax in np.ndenumerate(axes):
if jj >= ii:
continue
for kk, func, func_lim in zip([ii, jj], [ax.axhline, ax.axvline], [ax.axhspan, ax.axvspan]):
if targets[kk] is not None:
func(targets[kk], **line_kw)
if upper_limits[kk] is not None:
func_lim(extr[kk][0], upper_limits[kk], **span_kw)
if lower_limits[kk] is not None:
func_lim(lower_limits[kk], extr[kk][0], **span_kw)
return
def _contour2d(ax, edges, hist, levels, outline=True, **kwargs):
LW = 1.5
alpha = kwargs.setdefault('alpha', 0.8)
lw = kwargs.pop('linewidths', kwargs.pop('lw', LW))
kwargs.setdefault('linestyles', kwargs.pop('ls', '-'))
kwargs.setdefault('zorder', 10)
# ---- Draw contours
cont = ax.contour(*edges, hist, levels=levels, linewidths=lw, **kwargs)
# ---- Add Outline path effect to contours
if (outline is True):
outline = kale.plot._get_outline_effects(2*lw, alpha=1 - np.sqrt(1 - alpha))
plt.setp(cont.collections, path_effects=outline)
return edges, hist, cont
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
'''
https://stackoverflow.com/a/18926541
'''
if isinstance(cmap, str):
cmap = plt.get_cmap(cmap)
new_cmap = mpl.colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
# =================================================================================================
# ==== Below Needs Review / Cleaning ====
# =================================================================================================
'''
def plot_bin_pop(pop):
mt, mr = utils.mtmr_from_m1m2(pop.mass)
redz = cosmo.a_to_z(pop.scafa)
data = [mt/MSOL, mr, pop.sepa/PC, 1+redz]
data = [np.log10(dd) for dd in data]
reflect = [None, [None, 0], None, [0, None]]
labels = [r'M/M_\odot', 'q', r'a/\mathrm{{pc}}', '1+z']
labels = [r'${{\log_{{10}}}} \left({}\right)$'.format(ll) for ll in labels]
if pop.eccen is not None:
data.append(pop.eccen)
reflect.append([0.0, 1.0])
labels.append('e')
kde = kale.KDE(data, reflect=reflect)
corner = kale.Corner(kde, labels=labels, figsize=[8, 8])
corner.plot_data(kde)
return corner
def plot_mbh_scaling_relations(pop, fname=None, color='r'):
units = r"$[\log_{10}(M/M_\odot)]$"
fig, ax = plt.subplots(figsize=[8, 5])
ax.set(xlabel=f'Stellar Mass {units}', ylabel=f'BH Mass {units}')
# ==== Plot McConnell+Ma-2013 Data ====
handles = []
names = []
if fname is not None:
hh = _draw_MM2013_data(ax, fname)
handles.append(hh)
names.append('McConnell+Ma')
# ==== Plot MBH Merger Data ====
hh, nn = _draw_pop_masses(ax, pop, color)
handles = handles + hh
names = names + nn
ax.legend(handles, names)
return fig
def _draw_MM2013_data(ax):
data = observations.load_mcconnell_ma_2013()
data = {kk: data[kk] if kk == 'name' else np.log10(data[kk]) for kk in data.keys()}
key = 'mbulge'
mass = data['mass']
yy = mass[:, 1]
yerr = np.array([yy - mass[:, 0], mass[:, 2] - yy])
vals = data[key]
if np.ndim(vals) == 1:
xx = vals
xerr = None
elif vals.shape[1] == 2:
xx = vals[:, 0]
xerr = vals[:, 1]
elif vals.shape[1] == 3:
xx = vals[:, 1]
xerr = np.array([xx-vals[:, 0], vals[:, 2]-xx])
else:
raise ValueError()
idx = (xx > 0.0) & (yy > 0.0)
if xerr is not None:
xerr = xerr[:, idx]
ax.errorbar(xx[idx], yy[idx], xerr=xerr, yerr=yerr[:, idx], fmt='none', zorder=10)
handle = ax.scatter(xx[idx], yy[idx], zorder=10)
ax.set(ylabel='MBH Mass', xlabel=key)
return handle
def _draw_pop_masses(ax, pop, color='r', nplot=3e3):
xx = pop.mbulge.flatten() / MSOL
yy_list = [pop.mass]
names = ['new']
if hasattr(pop, '_mass'):
yy_list.append(pop._mass)
names.append('old')
colors = [color, '0.5']
handles = []
if xx.size > nplot:
cut = np.random.choice(xx.size, int(nplot), replace=False)
print("Plotting {:.1e}/{:.1e} data-points".format(nplot, xx.size))
else:
cut = slice(None)
for ii, yy in enumerate(yy_list):
yy = yy.flatten() / MSOL
data = np.log10([xx[cut], yy[cut]])
kale.plot.dist2d(
data, ax=ax, color=colors[ii], hist=False, contour=True,
median=True, mask_dense=True,
)
hh, = plt.plot([], [], color=colors[ii])
handles.append(hh)
return handles, names
def plot_gwb(gwb, color=None, uniform=False, nreals=5):
"""Plot a GW background from the given `Grav_Waves` instance.
Plots samples, confidence intervals, power-law, and adds twin-Hz axis (x2).
Parameters
----------
gwb : `gravwaves.Grav_Waves` (subclass) instance
Returns
-------
fig : `mpl.figure.Figure`
New matplotlib figure instance.
"""
fig, ax = figax(
scale='log',
xlabel=r'frequency $[\mathrm{yr}^{-1}]$',
ylabel=r'characteristic strain $[\mathrm{h}_c]$'
)
if uniform:
color = ax._get_lines.get_next_color()
_draw_gwb_sample(ax, gwb, color=color, num=nreals)
_draw_gwb_conf(ax, gwb, color=color)
_draw_plaw(ax, gwb.freqs*YR, f0=1, color='0.5', lw=2.0, ls='--')
_twin_hz(ax, nano=True, fs=12)
return fig
def _draw_gwb_sample(ax, gwb, num=10, back=True, fore=True, color=None):
back_flag = back
fore_flag = fore
back = gwb.back
fore = gwb.fore
freqs = gwb.freqs * YR
pl = dict(alpha=0.5, color=color, lw=0.8)
plsel = dict(alpha=0.85, color=color, lw=1.6)
sc = dict(alpha=0.25, s=20, fc=color, lw=0.0, ec='none')
scsel = dict(alpha=0.50, s=40, ec='k', fc=color, lw=1.0)
cut = np.random.choice(back.shape[1], num, replace=False)
sel = cut[0]
cut = cut[1:]
color_gen = None
color_sel = None
if back_flag:
hands_gen = ax.plot(freqs, back[:, cut], **pl)
hands_sel, = ax.plot(freqs, back[:, sel], **plsel)
color_gen = [hh.get_color() for hh in hands_gen]
color_sel = hands_sel.get_color()
if color is None:
sc['fc'] = color_gen
scsel['fc'] = color_sel
if fore_flag:
yy = fore[:, cut]
xx = freqs[:, np.newaxis] * np.ones_like(yy)
dx = np.diff(freqs)
dx = np.concatenate([[dx[0]], dx])[:, np.newaxis]
dx *= 0.2
xx += np.random.normal(0, dx, np.shape(xx))
# xx += np.random.uniform(-dx, dx, np.shape(xx))
xx = np.clip(xx, freqs[0]*0.75, None)
ax.scatter(xx, yy, **sc)
yy = fore[:, sel]
xx = freqs
ax.scatter(xx, yy, **scsel)
return
def _draw_gwb_conf(ax, gwb, **kwargs):
conf = [0.25, 0.50, 0.75]
freqs = gwb.freqs * YR
back = gwb.back
kwargs.setdefault('alpha', 0.5)
kwargs.setdefault('lw', 0.5)
conf = np.percentile(back, 100*np.array(conf), axis=-1)
ax.fill_between(freqs, conf[0], conf[-1], **kwargs)
kwargs['alpha'] = 1.0 - 0.5*(1.0 - kwargs['alpha'])
ax.plot(freqs, conf[1], **kwargs)
return
'''
|
nanogravREPO_NAMEholodeckPATH_START.@holodeck_extracted@holodeck-main@holodeck@plot.py@.PATH_END.py
|
{
"filename": "_dtickrange.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scatter3d/marker/colorbar/tickformatstop/_dtickrange.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class DtickrangeValidator(_plotly_utils.basevalidators.InfoArrayValidator):
def __init__(
self,
plotly_name="dtickrange",
parent_name="scatter3d.marker.colorbar.tickformatstop",
**kwargs
):
super(DtickrangeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
items=kwargs.pop(
"items",
[
{"valType": "any", "editType": "calc"},
{"valType": "any", "editType": "calc"},
],
),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scatter3d@marker@colorbar@tickformatstop@_dtickrange.py@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.