metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
|---|---|---|
{
"filename": "gamma_SIII.py",
"repo_name": "bwvdnbro/CMacIonize",
"repo_path": "CMacIonize_extracted/CMacIonize-master/data/linecooling/gamma_SIII.py",
"type": "Python"
}
|
#! /usr/bin/python
################################################################################
# This file is part of CMacIonize
# Copyright (C) 2017 Bert Vandenbroucke (bert.vandenbroucke@gmail.com)
#
# CMacIonize is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# CMacIonize is distributed in the hope that it will be useful,
# but WITOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with CMacIonize. If not, see <http://www.gnu.org/licenses/>.
################################################################################
##
# @file gamma_SIII.py
#
# @brief Script that fits curves to the velocity-averaged collision strength
# data from Hudson, C. E., Ramsbottom, C. A. & Scott, M. P. 2012, ApJ, 750, 65
# (http://adsabs.harvard.edu/abs/2012ApJ...750...65H).
#
# @author Bert Vandenbroucke (bv7@st-andrews.ac.uk)
##
## load modules
import numpy as np
# for curve_fit
import scipy.optimize as opt
# for plotting (using a backend that does not require a graphics environment)
import matplotlib
matplotlib.use("Agg")
import pylab as pl
# for the fitting curve
from fitting_curve import (
fitting_curve,
print_fit_variables,
initialize_data_values,
append_data_values,
print_data_values,
get_code,
jacobian_fitting_curve,
round_parameters,
)
# dictionary that links abbreviated transition names to the full names used in
# LineCoolingData
transitions = {
"G0t1": "TRANSITION_0_to_1",
"G0t2": "TRANSITION_0_to_2",
"G0t3": "TRANSITION_0_to_3",
"G0t4": "TRANSITION_0_to_4",
"G1t2": "TRANSITION_1_to_2",
"G1t3": "TRANSITION_1_to_3",
"G1t4": "TRANSITION_1_to_4",
"G2t3": "TRANSITION_2_to_3",
"G2t4": "TRANSITION_2_to_4",
"G3t4": "TRANSITION_3_to_4",
}
# main function: computes fits to the data and plots the data and fits for
# visual comparison
# the fitted curve coefficients are printed to the stdout
if __name__ == "__main__":
data = {}
# data from Hudson, Ramsbottom & Scott (2012), table 5
logT = np.array(
[
3.0,
3.2,
3.4,
3.6,
3.8,
4.0,
4.2,
4.4,
4.6,
4.8,
5.0,
5.2,
5.4,
5.6,
5.8,
6.0,
]
)
T = 10.0 ** logT
# 3P0 to 3P1
data["G0t1"] = np.array(
[
2.08,
2.10,
2.13,
2.20,
2.27,
2.26,
2.17,
2.07,
1.97,
1.84,
1.63,
1.36,
1.07,
0.81,
0.59,
0.41,
]
)
# 3P0 to 3P2
data["G0t2"] = np.array(
[
9.74e-1,
9.49e-1,
9.36e-1,
9.50e-1,
9.73e-1,
1.02,
1.11,
1.23,
1.31,
1.31,
1.22,
1.06,
8.74e-1,
6.96e-1,
5.45e-1,
4.23e-1,
]
)
# 3P1 to 3P2
data["G1t2"] = np.array(
[
4.82,
4.80,
4.79,
4.90,
5.03,
5.10,
5.19,
5.31,
5.36,
5.19,
4.73,
4.03,
3.25,
2.52,
1.92,
1.44,
]
)
# 1D2 to 1S0
data["G3t4"] = np.array(
[
8.63e-1,
8.66e-1,
8.72e-1,
9.50e-1,
1.14,
1.38,
1.60,
1.79,
1.94,
1.98,
1.90,
1.73,
1.54,
1.35,
1.18,
1.02,
]
)
# 3P0 to 1D2
data["G0t3"] = np.array(
[
6.98e-1,
7.33e-1,
7.38e-1,
7.20e-1,
7.10e-1,
7.29e-1,
7.65e-1,
7.91e-1,
7.86e-1,
7.44e-1,
6.60e-1,
5.48e-1,
4.30e-1,
3.23e-1,
2.34e-1,
1.64e-1,
]
)
# 3P0 to 1S0
data["G0t4"] = np.array(
[
8.27e-2,
8.89e-2,
9.58e-2,
1.04e-1,
1.14e-1,
1.25e-1,
1.38e-1,
1.54e-1,
1.71e-1,
1.78e-1,
1.72e-1,
1.55e-1,
1.32e-1,
1.09e-1,
8.72e-2,
6.84e-2,
]
)
# 3P1 to 1D2
data["G1t3"] = np.array(
[
2.09,
2.19,
2.19,
2.14,
2.11,
2.17,
2.28,
2.36,
2.35,
2.23,
1.98,
1.64,
1.29,
9.65e-1,
6.98e-1,
4.90e-1,
]
)
# 3P1 to 1S0
data["G1t4"] = np.array(
[
2.17e-1,
2.36e-1,
2.57e-1,
2.79e-1,
3.05e-1,
3.30e-1,
3.54e-1,
3.84e-1,
4.08e-1,
4.05e-1,
3.65e-1,
3.01e-1,
2.31e-1,
1.69e-1,
1.18e-1,
8.04e-2,
]
)
# 3P2 to 1D2
data["G2t3"] = np.array(
[
3.90,
4.07,
4.08,
3.99,
3.94,
4.02,
4.19,
4.30,
4.26,
4.01,
3.55,
2.95,
2.32,
1.75,
1.27,
8.99e-1,
]
)
# 3P2 to 1S0
data["G2t4"] = np.array(
[
3.54e-1,
3.87e-1,
4.21e-1,
4.60e-1,
5.04e-1,
5.45e-1,
5.85e-1,
6.34e-1,
6.73e-1,
6.66e-1,
5.99e-1,
4.94e-1,
3.80e-1,
2.77e-1,
1.95e-1,
1.33e-1,
]
)
# initialize the strings for code and value output
code = ""
data_values = initialize_data_values()
# do the curve fitting
for key in sorted(data):
imin = 0
imax = 11
# fit the curve
A, _ = opt.curve_fit(
fitting_curve,
T[imin:imax],
data[key][imin:imax],
maxfev=1000000,
p0=(0.0, 100.0, 1.0, 1.0, 0.0, 0.0, 0.0),
jac=jacobian_fitting_curve,
)
A = round_parameters(*A)
# compute the xi2 difference between the data values (in the fitting
# interval) and the curve
xi2 = sum((data[key][imin:imax] - fitting_curve(T[imin:imax], *A)) ** 2)
# output some info
print("Transition:", key)
print_fit_variables(*A)
print("convergence:", xi2)
print("validity: [", T[imin], ",", T[imax - 1], "]")
# write the fitting code for this transition
code += get_code("SIII", transitions[key], *A)
# add the values to the list strings
append_data_values(data_values, *A)
# plot the data and fit for visual comparison
Trange = np.logspace(3.0, 5.0, 100)
pl.plot(T, data[key], "k.")
pl.plot(Trange, fitting_curve(Trange, *A), "r-")
pl.xlim(0.0, 1.0e5)
pl.savefig("tmp/SIII_{key}.png".format(key=key))
pl.close()
# save the plot values in separate files
dfile = open("tmp/SIII_{key}_data.txt".format(key=key), "w")
for i in range(len(T)):
dfile.write("{T}\t{data}\n".format(T=T[i], data=data[key][i]))
dfile.close()
ffile = open("tmp/SIII_{key}_fit.txt".format(key=key), "w")
for i in range(len(Trange)):
ffile.write(
"{T}\t{fit}\n".format(
T=Trange[i], fit=fitting_curve(Trange[i], *A)
)
)
ffile.close()
# output the code to put into the LineCoolingData constructor
print("code:")
print(code)
# output the values to put in atom4.dat in Kenny's code (to update the
# reference values for the unit tests)
print_data_values(data_values)
|
bwvdnbroREPO_NAMECMacIonizePATH_START.@CMacIonize_extracted@CMacIonize-master@data@linecooling@gamma_SIII.py@.PATH_END.py
|
{
"filename": "lib2def.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/numpy/py2/numpy/distutils/lib2def.py",
"type": "Python"
}
|
from __future__ import division, absolute_import, print_function
import re
import sys
import subprocess
__doc__ = """This module generates a DEF file from the symbols in
an MSVC-compiled DLL import library. It correctly discriminates between
data and functions. The data is collected from the output of the program
nm(1).
Usage:
python lib2def.py [libname.lib] [output.def]
or
python lib2def.py [libname.lib] > output.def
libname.lib defaults to python<py_ver>.lib and output.def defaults to stdout
Author: Robert Kern <kernr@mail.ncifcrf.gov>
Last Update: April 30, 1999
"""
__version__ = '0.1a'
py_ver = "%d%d" % tuple(sys.version_info[:2])
DEFAULT_NM = 'nm -Cs'
DEF_HEADER = """LIBRARY python%s.dll
;CODE PRELOAD MOVEABLE DISCARDABLE
;DATA PRELOAD SINGLE
EXPORTS
""" % py_ver
# the header of the DEF file
FUNC_RE = re.compile(r"^(.*) in python%s\.dll" % py_ver, re.MULTILINE)
DATA_RE = re.compile(r"^_imp__(.*) in python%s\.dll" % py_ver, re.MULTILINE)
def parse_cmd():
"""Parses the command-line arguments.
libfile, deffile = parse_cmd()"""
if len(sys.argv) == 3:
if sys.argv[1][-4:] == '.lib' and sys.argv[2][-4:] == '.def':
libfile, deffile = sys.argv[1:]
elif sys.argv[1][-4:] == '.def' and sys.argv[2][-4:] == '.lib':
deffile, libfile = sys.argv[1:]
else:
print("I'm assuming that your first argument is the library")
print("and the second is the DEF file.")
elif len(sys.argv) == 2:
if sys.argv[1][-4:] == '.def':
deffile = sys.argv[1]
libfile = 'python%s.lib' % py_ver
elif sys.argv[1][-4:] == '.lib':
deffile = None
libfile = sys.argv[1]
else:
libfile = 'python%s.lib' % py_ver
deffile = None
return libfile, deffile
def getnm(nm_cmd = ['nm', '-Cs', 'python%s.lib' % py_ver]):
"""Returns the output of nm_cmd via a pipe.
nm_output = getnam(nm_cmd = 'nm -Cs py_lib')"""
f = subprocess.Popen(nm_cmd, shell=True, stdout=subprocess.PIPE, universal_newlines=True)
nm_output = f.stdout.read()
f.stdout.close()
return nm_output
def parse_nm(nm_output):
"""Returns a tuple of lists: dlist for the list of data
symbols and flist for the list of function symbols.
dlist, flist = parse_nm(nm_output)"""
data = DATA_RE.findall(nm_output)
func = FUNC_RE.findall(nm_output)
flist = []
for sym in data:
if sym in func and (sym[:2] == 'Py' or sym[:3] == '_Py' or sym[:4] == 'init'):
flist.append(sym)
dlist = []
for sym in data:
if sym not in flist and (sym[:2] == 'Py' or sym[:3] == '_Py'):
dlist.append(sym)
dlist.sort()
flist.sort()
return dlist, flist
def output_def(dlist, flist, header, file = sys.stdout):
"""Outputs the final DEF file to a file defaulting to stdout.
output_def(dlist, flist, header, file = sys.stdout)"""
for data_sym in dlist:
header = header + '\t%s DATA\n' % data_sym
header = header + '\n' # blank line
for func_sym in flist:
header = header + '\t%s\n' % func_sym
file.write(header)
if __name__ == '__main__':
libfile, deffile = parse_cmd()
if deffile is None:
deffile = sys.stdout
else:
deffile = open(deffile, 'w')
nm_cmd = [str(DEFAULT_NM), str(libfile)]
nm_output = getnm(nm_cmd)
dlist, flist = parse_nm(nm_output)
output_def(dlist, flist, DEF_HEADER, deffile)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@numpy@py2@numpy@distutils@lib2def.py@.PATH_END.py
|
{
"filename": "test_einasto.py",
"repo_name": "halomod/halomod",
"repo_path": "halomod_extracted/halomod-main/devel/test_einasto.py",
"type": "Python"
}
|
import numpy as np
import halomod
model = halomod.TracerHaloModel(
z=0.2,
transfer_model="EH",
rnum=30,
rmin=0.1,
rmax=30,
hod_model="Zehavi05",
hod_params={"M_min": 12.0, "M_1": 12.8, "alpha": 1.05},
dr_table=0.1,
dlnk=0.1,
dlog10m=0.05,
)
model_ein = model.clone()
model_ein.halo_profile_model = "Einasto"
rhos = []
corrs = []
for a in np.linspace(0.08, 0.4, 20):
model_ein.update(halo_profile_params={"alpha": a})
rhos.append(model_ein.halo_profile_rho[:, 200])
corrs.append(model_ein.corr_auto_tracer)
|
halomodREPO_NAMEhalomodPATH_START.@halomod_extracted@halomod-main@devel@test_einasto.py@.PATH_END.py
|
{
"filename": "test_classifier_metrics.py",
"repo_name": "daniel-muthukrishna/astrorapid",
"repo_path": "astrorapid_extracted/astrorapid-master/tests/test_classifier_metrics.py",
"type": "Python"
}
|
daniel-muthukrishnaREPO_NAMEastrorapidPATH_START.@astrorapid_extracted@astrorapid-master@tests@test_classifier_metrics.py@.PATH_END.py
|
|
{
"filename": "geometry.py",
"repo_name": "skyfielders/python-skyfield",
"repo_path": "python-skyfield_extracted/python-skyfield-master/skyfield/geometry.py",
"type": "Python"
}
|
"""Routines solving basic geometric problems in astronomy."""
from .functions import dots, length_of, nan, sqrt_nan, where
def intersect_line_and_sphere(endpoint, center, radius):
"""Compute distance to intersections of a line and a sphere.
Given a line through the origin (0,0,0) and an |xyz| ``endpoint``,
and a sphere with the |xyz| ``center`` and scalar ``radius``,
return the distance from the origin to their two intersections.
If the line is tangent to the sphere, the two intersections will be
at the same distance. If the line does not intersect the sphere,
two ``nan`` values will be returned.
"""
# See http://paulbourke.net/geometry/circlesphere/index.html#linesphere
# Names "b" and "c" designate the familiar values from the quadratic
# formula; happily, a = 1 because we use a unit vector for the line.
minus_b = 2.0 * (endpoint / length_of(endpoint) * center).sum(axis=0)
c = (center * center).sum(axis=0) - radius * radius
discriminant = minus_b * minus_b - 4 * c
dsqrt = sqrt_nan(discriminant)
return (minus_b - dsqrt) / 2.0, (minus_b + dsqrt) / 2.0
def line_and_ellipsoid_intersection(line_start, line_direction, radii):
"""Return the |xyz| position where a line intersects an ellipsoid.
All three arguments are |xyz| arrays. The line is specified by a
``line_start`` endpoint and a ``line_direction`` vector. The
ellipsoid is centered at the origin and is specified by its three
``radii`` that point along the three coordinate axes.
Returns the |xyz| point of intersection, or ``[nan nan nan]`` if the
line does not intersect the sphere.
"""
# Based on `surfpt.f` from the SPICE Toolkit.
if len(getattr(line_start, 'shape', ())) > 1:
radii = radii.reshape((3, 1))
# Scale coordinates so the ellipsoid becomes the unit sphere.
start = line_start / radii
direction = line_direction / radii
# Where does the line come closest to the sphere's center?
closest_point = start - _vector_projection(start, direction)
startmag = length_of(start)
pmag = length_of(closest_point)
is_inside_sphere = startmag < 1.0
is_behind_us = dots(closest_point - start, direction) < 0.0
sign = where(is_inside_sphere, +1.0, where(is_behind_us, nan, -1.0))
half_chord_length = sqrt_nan(1.0 - pmag*pmag)
unit_direction = direction / length_of(direction)
intersection = closest_point + sign * half_chord_length * unit_direction
return where(startmag == 1.0, line_start, intersection * radii)
def _vector_projection(a, b):
return dots(a,b) / dots(b,b) * b
|
skyfieldersREPO_NAMEpython-skyfieldPATH_START.@python-skyfield_extracted@python-skyfield-master@skyfield@geometry.py@.PATH_END.py
|
{
"filename": "_color.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scatterpolar/marker/colorbar/tickfont/_color.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name="color",
parent_name="scatterpolar.marker.colorbar.tickfont",
**kwargs,
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scatterpolar@marker@colorbar@tickfont@_color.py@.PATH_END.py
|
{
"filename": "_row.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/smith/domain/_row.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class RowValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(self, plotly_name="row", parent_name="layout.smith.domain", **kwargs):
super(RowValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@smith@domain@_row.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "kammerje/spaceKLIP",
"repo_path": "spaceKLIP_extracted/spaceKLIP-main/spaceKLIP/__init__.py",
"type": "Python"
}
|
from . import analysistools
from . import classpsfsubpipeline
from . import coron1pipeline
from . import coron2pipeline
from . import coron3pipeline
from . import database
from . import imagetools
from . import mast
from . import plotting
from . import psf
from . import pyklippipeline
from . import utils
from ._version import __version__, __version_tuple__
|
kammerjeREPO_NAMEspaceKLIPPATH_START.@spaceKLIP_extracted@spaceKLIP-main@spaceKLIP@__init__.py@.PATH_END.py
|
{
"filename": "fastembed.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/langchain/langchain/embeddings/fastembed.py",
"type": "Python"
}
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.embeddings import FastEmbedEmbeddings
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"FastEmbedEmbeddings": "langchain_community.embeddings"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"FastEmbedEmbeddings",
]
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@langchain@langchain@embeddings@fastembed.py@.PATH_END.py
|
{
"filename": "spingaussian.py",
"repo_name": "javicarron/pynkowski",
"repo_path": "pynkowski_extracted/pynkowski-main/pynkowski/theory/spingaussian.py",
"type": "Python"
}
|
"""This submodule contains the definition for spin field with isotropic Gaussian Q and U.
"""
import numpy as np
from .utils_th import get_μ, get_σ, lkc_ambient_dict, get_C2, flag, rho
from .gaussian import Gaussian
from .base_th import _prepare_lkc
def LKC_spin(j, us, mu, dim=3, lkc_ambient=lkc_ambient_dict["SO3"], Ks=[1., 0.31912, 0.7088, 1.]):
"""Compute the expected value of the Lipschitz–Killing Curvatures (LKC) of the excursion set for Gaussian Isotropic fields.
Parameters
----------
j : int
The index of the LKC.
us : np.array
The thresholds where the LKC is evaluated.
mu : float
The value of the derivative of the covariance function at the origin for the U and Q fields.
dim : int, optional
The dimension of the ambient manifold, SO(3). Must be 3.
lkc_ambient : np.array, optional
An array of the Lipschitz–Killing Curvatures of the ambient manifold. Its lenght must be `dim+1` if `dim` is also given.
Ks : np.array, optional
The normalisation factors for the Minkowski Functionals.
Returns
----------
LKC : np.array
The expected value of the Lipschitz–Killing Curvatures at the thresholds.
"""
dim, lkc_ambient = _prepare_lkc(dim, lkc_ambient)
assert dim==3, "The LKC for spin fields is only defined on SO(3), which has `dim=3`"
result = np.zeros_like(us)
KLs = Ks[::-1] # To get the order as the LKC index, not the MF index
KLs[-1] *= 2.**1.5 / 5. # This is the determinant of the covariance matrix of the derivatives of the field
KLs /= np.array([1., 1., mu**0.5, mu])
for k in np.arange(0,dim-j+1):
result += flag(k+j, k) * rho(k, us) * lkc_ambient[k+j] / KLs[k+j]
return result * KLs[j] /lkc_ambient[-1]
class SpinGaussian(Gaussian):
"""Class for Spin Isotropic Gaussian fields in the SO(3) formalism.
Parameters
----------
cls : np.array
Angular power spectrum of the field (cls_E + cls_B).
normalise : bool, optional
If `True`, normalise the field to unit variance.
Default : True
fsky : float, optional
Fraction of the sky covered by the field, `0<fsky<=1`.
Default : 1.
Ks : np.array, optional
The normalisation constants for the MFs of the field: [K_0, K_1, K_2, K_3].
Default : [1., 0.31912, 0.7088, 1.] as found in Carrón Duque et al (2023)
leading_order : bool, optional
Whether to use only the leading order in μ for the computation of the MFs or the exact expression (with two terms).
Default : False (exact expression)
Attributes
----------
cls : np.array
Angular Power Spectrum of the field (cls_E + cls_B).
fsky : float
Fraction of the sky covered by the field.
dim : int
Dimension of the space where the field is defined, in this case this is 3.
name : str
Name of the field, `"Spin Isotropic Gaussian"` by default.
sigma : float
The standard deviation of the field.
mu : float
The derivative of the covariance function at the origin, times $-2$ (in the spatial coordinates θϕ). Equal to the variance of the first derivatives of the field.
nu : float
The second derivative of the covariance function at the origin (in the spatial coordinates θϕ).
C2 : float
The second derivative of the angular covariance function at 1 (in the spatial coordinates θϕ).
lkc_ambient : np.array or None
The values for the Lipschitz–Killing Curvatures of the ambient space.
Ks : np.array
The normalisation constants for the MFs of the field: [K_0, K_1, K_2, K_3].
leading_order : bool
Whether to use only the leading order in μ for the computation of the MFs or the exact expression (with two terms).
"""
def __init__(self, cls, normalise=True, fsky=1., Ks=[1., 0.31912, 0.7088, 1.], leading_order=True):
if normalise:
cls /= get_σ(cls)
self.sigma = 1.
else:
self.sigma = np.sqrt(get_σ(cls))
self.cls = cls
self.fsky = fsky
self.mu = get_μ(cls)
self.C2 = get_C2(cls)
self.nu = self.C2/4. - self.mu/24.
super().__init__(3, sigma=self.sigma, mu=self.mu, nu=self.nu, lkc_ambient=lkc_ambient_dict["SO3"]*self.fsky)
self.leading_order = leading_order
if leading_order:
self.lkc_ambient[1] = 0.
self.name = 'Spin Isotropic Gaussian'
self.Ks = Ks
def LKC(self, j, us):
"""Compute the expected values of the Lipschitz–Killing Curvatures of the excursion sets at thresholds `us`, $\mathbb{L}_j(A_u(f))$.
Parameters
----------
j : int
Index of the LKC to compute, `0 < j < dim`.
us : np.array
The thresholds considered for the computation.
Returns
-------
lkc : np.array
Expected value of LKC evaluated at the thresholds.
"""
us /= self.sigma
return LKC_spin(j, us, self.mu, dim=self.dim, lkc_ambient=self.lkc_ambient, Ks=self.Ks)
__all__ = ["SpinGaussian"]
|
javicarronREPO_NAMEpynkowskiPATH_START.@pynkowski_extracted@pynkowski-main@pynkowski@theory@spingaussian.py@.PATH_END.py
|
{
"filename": "analyze.py",
"repo_name": "jmd-dk/concept",
"repo_path": "concept_extracted/concept-master/test/pure_python_pp/analyze.py",
"type": "Python"
}
|
# This file has to be run in pure Python mode!
# Imports from the CO𝘕CEPT code
from commons import *
from snapshot import load
import species
plt = get_matplotlib().pyplot
# Absolute path and name of this test
this_dir = os.path.dirname(os.path.realpath(__file__))
this_test = os.path.basename(os.path.dirname(this_dir))
# Read in data from the CO𝘕CEPT snapshots
species.allow_similarly_named_components = True
a = []
nprocs_list = sorted(
int(dname[(dname.index('python_') + 7):])
for dname in [
os.path.basename(dname)
for dname in glob(f'{this_dir}/output_python_*')
]
)
components = {
'cython': {n: [] for n in nprocs_list},
'python': {n: [] for n in nprocs_list},
}
for cp in components.keys():
for n in nprocs_list:
for fname in sorted(
glob(f'{this_dir}/output_{cp}_{n}/snapshot_a=*'),
key=(lambda s: s[(s.index('=') + 1):]),
):
snapshot = load(fname, compare_params=False)
if cp == 'cython' and n == 1:
a.append(snapshot.params['a'])
components[cp][n].append(snapshot.components[0])
N_snapshots = len(a)
# Begin analysis
masterprint(f'Analysing {this_test} data ...')
# Using the particle order of the cython snapshot as the standard, find the corresponding
# ID's in the python snapshots and order these particles accordingly.
N = components['cython'][1][0].N
D2 = zeros(N, dtype=float)
ID = zeros(N, dtype=int)
for i in range(N_snapshots):
for n in nprocs_list:
x_cython = components['cython'][n][i].posx
y_cython = components['cython'][n][i].posy
z_cython = components['cython'][n][i].posz
x_python = components['python'][n][i].posx
y_python = components['python'][n][i].posy
z_python = components['python'][n][i].posz
for j in range(N):
for k in range(N):
dx = x_cython[j] - x_python[k]
if dx > 0.5*boxsize:
dx -= boxsize
elif dx < -0.5*boxsize:
dx += boxsize
dy = y_cython[j] - y_python[k]
if dy > 0.5*boxsize:
dy -= boxsize
elif dy < -0.5*boxsize:
dy += boxsize
dz = z_cython[j] - z_python[k]
if dz > 0.5*boxsize:
dz -= boxsize
elif dz < -0.5*boxsize:
dz += boxsize
D2[k] = dx**2 + dy**2 + dz**2
ID[j] = np.argmin(D2)
components['python'][n][i].posx[:] = components['python'][n][i].posx[ID]
components['python'][n][i].posy[:] = components['python'][n][i].posy[ID]
components['python'][n][i].posz[:] = components['python'][n][i].posz[ID]
components['python'][n][i].momx[:] = components['python'][n][i].momx[ID]
components['python'][n][i].momy[:] = components['python'][n][i].momy[ID]
components['python'][n][i].momz[:] = components['python'][n][i].momz[ID]
# Compute distance between particles in the two snapshots
dist = collections.OrderedDict((n, []) for n in nprocs_list)
for i in range(N_snapshots):
x = {(cp, n): components[cp][n][i].posx for cp in ('cython', 'python') for n in nprocs_list}
y = {(cp, n): components[cp][n][i].posy for cp in ('cython', 'python') for n in nprocs_list}
z = {(cp, n): components[cp][n][i].posz for cp in ('cython', 'python') for n in nprocs_list}
for n in nprocs_list:
dist[n].append(sqrt(asarray([
min([
+ (x['cython', n][j] - x['python', n][j] + xsgn*boxsize)**2
+ (y['cython', n][j] - y['python', n][j] + ysgn*boxsize)**2
+ (z['cython', n][j] - z['python', n][j] + zsgn*boxsize)**2
for xsgn in (-1, 0, +1)
for ysgn in (-1, 0, +1)
for zsgn in (-1, 0, +1)
])
for j in range(N)
])))
# Plot
fig_file = f'{this_dir}/result.png'
fig, axes = plt.subplots(len(nprocs_list), sharex=True, sharey=True)
for n, d, ax in zip(dist.keys(), dist.values(), axes):
for i in range(N_snapshots):
ax.semilogy(
machine_ϵ + asarray(d[i])/boxsize,
'.',
alpha=0.7,
label=f'$a={a[i]}$',
zorder=-i,
)
ax.set_ylabel(
rf'$|\mathbf{{x}}_{{\mathrm{{pp}}{n}}} - \mathbf{{x}}_{{\mathrm{{c}}{n}}}|'
rf'/\mathrm{{boxsize}}$'
)
axes[ 0].set_xlim(0, N - 1)
axes[-1].set_xlabel('Particle number')
fig.subplots_adjust(hspace=0)
plt.setp([ax.get_xticklabels() for ax in axes[:-1]], visible=False)
axes[0].legend()
fig.tight_layout()
fig.savefig(fig_file, dpi=150)
# Printout error message for unsuccessful test
tol = 1e-10
if any(np.mean(asarray(d)/boxsize) > tol for d in dist.values()):
abort(
f'Some or all pure Python runs with nprocs = {nprocs_list} yielded results '
f'different from their compiled counterparts!\n'
f'See "{fig_file}" for a visualization.'
)
# Compare the two tabulated grids
ewald_grid = {}
for cp in ('cython', 'python'):
with open_hdf5(f'{this_dir}/ewald_{cp}.hdf5', mode='r') as hdf5_file:
ewald_grid[cp] = hdf5_file['data'][...]
δ, ϵ = 1e-10, 1e-10
if not all(np.isclose(ewald_grid['cython'], ewald_grid['python'], ϵ, δ)):
abort(
'The two tabulated Ewald grids "{}" and "{}" are far from being numerically identical!'
.format(*[f'{this_dir}/ewald_{cp}.hdf5' for cp in ('cython', 'python')])
)
# Done analysing
masterprint('done')
|
jmd-dkREPO_NAMEconceptPATH_START.@concept_extracted@concept-master@test@pure_python_pp@analyze.py@.PATH_END.py
|
{
"filename": "_target_encoder.py",
"repo_name": "scikit-learn/scikit-learn",
"repo_path": "scikit-learn_extracted/scikit-learn-main/sklearn/preprocessing/_target_encoder.py",
"type": "Python"
}
|
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from numbers import Integral, Real
import numpy as np
from ..base import OneToOneFeatureMixin, _fit_context
from ..utils._param_validation import Interval, StrOptions
from ..utils.multiclass import type_of_target
from ..utils.validation import (
_check_feature_names_in,
_check_y,
check_consistent_length,
check_is_fitted,
)
from ._encoders import _BaseEncoder
from ._target_encoder_fast import _fit_encoding_fast, _fit_encoding_fast_auto_smooth
class TargetEncoder(OneToOneFeatureMixin, _BaseEncoder):
"""Target Encoder for regression and classification targets.
Each category is encoded based on a shrunk estimate of the average target
values for observations belonging to the category. The encoding scheme mixes
the global target mean with the target mean conditioned on the value of the
category (see [MIC]_).
When the target type is "multiclass", encodings are based
on the conditional probability estimate for each class. The target is first
binarized using the "one-vs-all" scheme via
:class:`~sklearn.preprocessing.LabelBinarizer`, then the average target
value for each class and each category is used for encoding, resulting in
`n_features` * `n_classes` encoded output features.
:class:`TargetEncoder` considers missing values, such as `np.nan` or `None`,
as another category and encodes them like any other category. Categories
that are not seen during :meth:`fit` are encoded with the target mean, i.e.
`target_mean_`.
For a demo on the importance of the `TargetEncoder` internal cross-fitting,
see
:ref:`sphx_glr_auto_examples_preprocessing_plot_target_encoder_cross_val.py`.
For a comparison of different encoders, refer to
:ref:`sphx_glr_auto_examples_preprocessing_plot_target_encoder.py`. Read
more in the :ref:`User Guide <target_encoder>`.
.. note::
`fit(X, y).transform(X)` does not equal `fit_transform(X, y)` because a
:term:`cross fitting` scheme is used in `fit_transform` for encoding.
See the :ref:`User Guide <target_encoder>` for details.
.. versionadded:: 1.3
Parameters
----------
categories : "auto" or list of shape (n_features,) of array-like, default="auto"
Categories (unique values) per feature:
- `"auto"` : Determine categories automatically from the training data.
- list : `categories[i]` holds the categories expected in the i-th column. The
passed categories should not mix strings and numeric values within a single
feature, and should be sorted in case of numeric values.
The used categories are stored in the `categories_` fitted attribute.
target_type : {"auto", "continuous", "binary", "multiclass"}, default="auto"
Type of target.
- `"auto"` : Type of target is inferred with
:func:`~sklearn.utils.multiclass.type_of_target`.
- `"continuous"` : Continuous target
- `"binary"` : Binary target
- `"multiclass"` : Multiclass target
.. note::
The type of target inferred with `"auto"` may not be the desired target
type used for modeling. For example, if the target consisted of integers
between 0 and 100, then :func:`~sklearn.utils.multiclass.type_of_target`
will infer the target as `"multiclass"`. In this case, setting
`target_type="continuous"` will specify the target as a regression
problem. The `target_type_` attribute gives the target type used by the
encoder.
.. versionchanged:: 1.4
Added the option 'multiclass'.
smooth : "auto" or float, default="auto"
The amount of mixing of the target mean conditioned on the value of the
category with the global target mean. A larger `smooth` value will put
more weight on the global target mean.
If `"auto"`, then `smooth` is set to an empirical Bayes estimate.
cv : int, default=5
Determines the number of folds in the :term:`cross fitting` strategy used in
:meth:`fit_transform`. For classification targets, `StratifiedKFold` is used
and for continuous targets, `KFold` is used.
shuffle : bool, default=True
Whether to shuffle the data in :meth:`fit_transform` before splitting into
folds. Note that the samples within each split will not be shuffled.
random_state : int, RandomState instance or None, default=None
When `shuffle` is True, `random_state` affects the ordering of the
indices, which controls the randomness of each fold. Otherwise, this
parameter has no effect.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
encodings_ : list of shape (n_features,) or (n_features * n_classes) of \
ndarray
Encodings learnt on all of `X`.
For feature `i`, `encodings_[i]` are the encodings matching the
categories listed in `categories_[i]`. When `target_type_` is
"multiclass", the encoding for feature `i` and class `j` is stored in
`encodings_[j + (i * len(classes_))]`. E.g., for 2 features (f) and
3 classes (c), encodings are ordered:
f0_c0, f0_c1, f0_c2, f1_c0, f1_c1, f1_c2,
categories_ : list of shape (n_features,) of ndarray
The categories of each input feature determined during fitting or
specified in `categories`
(in order of the features in `X` and corresponding with the output
of :meth:`transform`).
target_type_ : str
Type of target.
target_mean_ : float
The overall mean of the target. This value is only used in :meth:`transform`
to encode categories.
n_features_in_ : int
Number of features seen during :term:`fit`.
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
classes_ : ndarray or None
If `target_type_` is 'binary' or 'multiclass', holds the label for each class,
otherwise `None`.
See Also
--------
OrdinalEncoder : Performs an ordinal (integer) encoding of the categorical features.
Contrary to TargetEncoder, this encoding is not supervised. Treating the
resulting encoding as a numerical features therefore lead arbitrarily
ordered values and therefore typically lead to lower predictive performance
when used as preprocessing for a classifier or regressor.
OneHotEncoder : Performs a one-hot encoding of categorical features. This
unsupervised encoding is better suited for low cardinality categorical
variables as it generate one new feature per unique category.
References
----------
.. [MIC] :doi:`Micci-Barreca, Daniele. "A preprocessing scheme for high-cardinality
categorical attributes in classification and prediction problems"
SIGKDD Explor. Newsl. 3, 1 (July 2001), 27–32. <10.1145/507533.507538>`
Examples
--------
With `smooth="auto"`, the smoothing parameter is set to an empirical Bayes estimate:
>>> import numpy as np
>>> from sklearn.preprocessing import TargetEncoder
>>> X = np.array([["dog"] * 20 + ["cat"] * 30 + ["snake"] * 38], dtype=object).T
>>> y = [90.3] * 5 + [80.1] * 15 + [20.4] * 5 + [20.1] * 25 + [21.2] * 8 + [49] * 30
>>> enc_auto = TargetEncoder(smooth="auto")
>>> X_trans = enc_auto.fit_transform(X, y)
>>> # A high `smooth` parameter puts more weight on global mean on the categorical
>>> # encodings:
>>> enc_high_smooth = TargetEncoder(smooth=5000.0).fit(X, y)
>>> enc_high_smooth.target_mean_
np.float64(44...)
>>> enc_high_smooth.encodings_
[array([44..., 44..., 44...])]
>>> # On the other hand, a low `smooth` parameter puts more weight on target
>>> # conditioned on the value of the categorical:
>>> enc_low_smooth = TargetEncoder(smooth=1.0).fit(X, y)
>>> enc_low_smooth.encodings_
[array([20..., 80..., 43...])]
"""
_parameter_constraints: dict = {
"categories": [StrOptions({"auto"}), list],
"target_type": [StrOptions({"auto", "continuous", "binary", "multiclass"})],
"smooth": [StrOptions({"auto"}), Interval(Real, 0, None, closed="left")],
"cv": [Interval(Integral, 2, None, closed="left")],
"shuffle": ["boolean"],
"random_state": ["random_state"],
}
def __init__(
self,
categories="auto",
target_type="auto",
smooth="auto",
cv=5,
shuffle=True,
random_state=None,
):
self.categories = categories
self.smooth = smooth
self.target_type = target_type
self.cv = cv
self.shuffle = shuffle
self.random_state = random_state
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y):
"""Fit the :class:`TargetEncoder` to X and y.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data to determine the categories of each feature.
y : array-like of shape (n_samples,)
The target data used to encode the categories.
Returns
-------
self : object
Fitted encoder.
"""
self._fit_encodings_all(X, y)
return self
@_fit_context(prefer_skip_nested_validation=True)
def fit_transform(self, X, y):
"""Fit :class:`TargetEncoder` and transform X with the target encoding.
.. note::
`fit(X, y).transform(X)` does not equal `fit_transform(X, y)` because a
:term:`cross fitting` scheme is used in `fit_transform` for encoding.
See the :ref:`User Guide <target_encoder>`. for details.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data to determine the categories of each feature.
y : array-like of shape (n_samples,)
The target data used to encode the categories.
Returns
-------
X_trans : ndarray of shape (n_samples, n_features) or \
(n_samples, (n_features * n_classes))
Transformed input.
"""
from ..model_selection import KFold, StratifiedKFold # avoid circular import
X_ordinal, X_known_mask, y_encoded, n_categories = self._fit_encodings_all(X, y)
# The cv splitter is voluntarily restricted to *KFold to enforce non
# overlapping validation folds, otherwise the fit_transform output will
# not be well-specified.
if self.target_type_ == "continuous":
cv = KFold(self.cv, shuffle=self.shuffle, random_state=self.random_state)
else:
cv = StratifiedKFold(
self.cv, shuffle=self.shuffle, random_state=self.random_state
)
# If 'multiclass' multiply axis=1 by num classes else keep shape the same
if self.target_type_ == "multiclass":
X_out = np.empty(
(X_ordinal.shape[0], X_ordinal.shape[1] * len(self.classes_)),
dtype=np.float64,
)
else:
X_out = np.empty_like(X_ordinal, dtype=np.float64)
for train_idx, test_idx in cv.split(X, y):
X_train, y_train = X_ordinal[train_idx, :], y_encoded[train_idx]
y_train_mean = np.mean(y_train, axis=0)
if self.target_type_ == "multiclass":
encodings = self._fit_encoding_multiclass(
X_train,
y_train,
n_categories,
y_train_mean,
)
else:
encodings = self._fit_encoding_binary_or_continuous(
X_train,
y_train,
n_categories,
y_train_mean,
)
self._transform_X_ordinal(
X_out,
X_ordinal,
~X_known_mask,
test_idx,
encodings,
y_train_mean,
)
return X_out
def transform(self, X):
"""Transform X with the target encoding.
.. note::
`fit(X, y).transform(X)` does not equal `fit_transform(X, y)` because a
:term:`cross fitting` scheme is used in `fit_transform` for encoding.
See the :ref:`User Guide <target_encoder>`. for details.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data to determine the categories of each feature.
Returns
-------
X_trans : ndarray of shape (n_samples, n_features) or \
(n_samples, (n_features * n_classes))
Transformed input.
"""
X_ordinal, X_known_mask = self._transform(
X, handle_unknown="ignore", ensure_all_finite="allow-nan"
)
# If 'multiclass' multiply axis=1 by num of classes else keep shape the same
if self.target_type_ == "multiclass":
X_out = np.empty(
(X_ordinal.shape[0], X_ordinal.shape[1] * len(self.classes_)),
dtype=np.float64,
)
else:
X_out = np.empty_like(X_ordinal, dtype=np.float64)
self._transform_X_ordinal(
X_out,
X_ordinal,
~X_known_mask,
slice(None),
self.encodings_,
self.target_mean_,
)
return X_out
def _fit_encodings_all(self, X, y):
"""Fit a target encoding with all the data."""
# avoid circular import
from ..preprocessing import (
LabelBinarizer,
LabelEncoder,
)
check_consistent_length(X, y)
self._fit(X, handle_unknown="ignore", ensure_all_finite="allow-nan")
if self.target_type == "auto":
accepted_target_types = ("binary", "multiclass", "continuous")
inferred_type_of_target = type_of_target(y, input_name="y")
if inferred_type_of_target not in accepted_target_types:
raise ValueError(
"Unknown label type: Target type was inferred to be "
f"{inferred_type_of_target!r}. Only {accepted_target_types} are "
"supported."
)
self.target_type_ = inferred_type_of_target
else:
self.target_type_ = self.target_type
self.classes_ = None
if self.target_type_ == "binary":
label_encoder = LabelEncoder()
y = label_encoder.fit_transform(y)
self.classes_ = label_encoder.classes_
elif self.target_type_ == "multiclass":
label_binarizer = LabelBinarizer()
y = label_binarizer.fit_transform(y)
self.classes_ = label_binarizer.classes_
else: # continuous
y = _check_y(y, y_numeric=True, estimator=self)
self.target_mean_ = np.mean(y, axis=0)
X_ordinal, X_known_mask = self._transform(
X, handle_unknown="ignore", ensure_all_finite="allow-nan"
)
n_categories = np.fromiter(
(len(category_for_feature) for category_for_feature in self.categories_),
dtype=np.int64,
count=len(self.categories_),
)
if self.target_type_ == "multiclass":
encodings = self._fit_encoding_multiclass(
X_ordinal,
y,
n_categories,
self.target_mean_,
)
else:
encodings = self._fit_encoding_binary_or_continuous(
X_ordinal,
y,
n_categories,
self.target_mean_,
)
self.encodings_ = encodings
return X_ordinal, X_known_mask, y, n_categories
def _fit_encoding_binary_or_continuous(
self, X_ordinal, y, n_categories, target_mean
):
"""Learn target encodings."""
if self.smooth == "auto":
y_variance = np.var(y)
encodings = _fit_encoding_fast_auto_smooth(
X_ordinal,
y,
n_categories,
target_mean,
y_variance,
)
else:
encodings = _fit_encoding_fast(
X_ordinal,
y,
n_categories,
self.smooth,
target_mean,
)
return encodings
def _fit_encoding_multiclass(self, X_ordinal, y, n_categories, target_mean):
"""Learn multiclass encodings.
Learn encodings for each class (c) then reorder encodings such that
the same features (f) are grouped together. `reorder_index` enables
converting from:
f0_c0, f1_c0, f0_c1, f1_c1, f0_c2, f1_c2
to:
f0_c0, f0_c1, f0_c2, f1_c0, f1_c1, f1_c2
"""
n_features = self.n_features_in_
n_classes = len(self.classes_)
encodings = []
for i in range(n_classes):
y_class = y[:, i]
encoding = self._fit_encoding_binary_or_continuous(
X_ordinal,
y_class,
n_categories,
target_mean[i],
)
encodings.extend(encoding)
reorder_index = (
idx
for start in range(n_features)
for idx in range(start, (n_classes * n_features), n_features)
)
return [encodings[idx] for idx in reorder_index]
def _transform_X_ordinal(
self,
X_out,
X_ordinal,
X_unknown_mask,
row_indices,
encodings,
target_mean,
):
"""Transform X_ordinal using encodings.
In the multiclass case, `X_ordinal` and `X_unknown_mask` have column
(axis=1) size `n_features`, while `encodings` has length of size
`n_features * n_classes`. `feat_idx` deals with this by repeating
feature indices by `n_classes` E.g., for 3 features, 2 classes:
0,0,1,1,2,2
Additionally, `target_mean` is of shape (`n_classes`,) so `mean_idx`
cycles through 0 to `n_classes` - 1, `n_features` times.
"""
if self.target_type_ == "multiclass":
n_classes = len(self.classes_)
for e_idx, encoding in enumerate(encodings):
# Repeat feature indices by n_classes
feat_idx = e_idx // n_classes
# Cycle through each class
mean_idx = e_idx % n_classes
X_out[row_indices, e_idx] = encoding[X_ordinal[row_indices, feat_idx]]
X_out[X_unknown_mask[:, feat_idx], e_idx] = target_mean[mean_idx]
else:
for e_idx, encoding in enumerate(encodings):
X_out[row_indices, e_idx] = encoding[X_ordinal[row_indices, e_idx]]
X_out[X_unknown_mask[:, e_idx], e_idx] = target_mean
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Not used, present here for API consistency by convention.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names. `feature_names_in_` is used unless it is
not defined, in which case the following input feature names are
generated: `["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
When `type_of_target_` is "multiclass" the names are of the format
'<feature_name>_<class_name>'.
"""
check_is_fitted(self, "n_features_in_")
feature_names = _check_feature_names_in(self, input_features)
if self.target_type_ == "multiclass":
feature_names = [
f"{feature_name}_{class_name}"
for feature_name in feature_names
for class_name in self.classes_
]
return np.asarray(feature_names, dtype=object)
else:
return feature_names
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.target_tags.required = True
return tags
|
scikit-learnREPO_NAMEscikit-learnPATH_START.@scikit-learn_extracted@scikit-learn-main@sklearn@preprocessing@_target_encoder.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/ohlc/increasing/line/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._width import WidthValidator
from ._dash import DashValidator
from ._color import ColorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
["._width.WidthValidator", "._dash.DashValidator", "._color.ColorValidator"],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@ohlc@increasing@line@__init__.py@.PATH_END.py
|
{
"filename": "singlesource_image.py",
"repo_name": "dingswin/psrvlbireduce",
"repo_path": "psrvlbireduce_extracted/psrvlbireduce-master/datareduction/singlesource_image.py",
"type": "Python"
}
|
#!/usr/bin/env ParselTongue
## written by Adam Deller
################################################################################
# AIPS imports
################################################################################
from AIPS import AIPS, AIPSDisk
from AIPSTask import AIPSTask, AIPSList
from AIPSData import AIPSUVData, AIPSImage, AIPSCat
from AIPSTV import AIPSTV
################################################################################
# General python imports
################################################################################
import sys, os, math
import vlbatasks
from optparse import OptionParser
################################################################################
# Option parsing and defaulted global variables
################################################################################
usage = "usage: %prog [options]"
parser = OptionParser(usage)
parser.add_option("-f", "--fitsfile", dest="fitsfile", default="",
help="Fits file to bisect")
parser.add_option("-s", "--sourcename", dest="sourcename", default="",
help="Name of the source (blank will try to guess from filename")
parser.add_option("-e", "--experiment", dest="experiment", default="",
help="Name of the experiment (blank will try to guess from filename")
parser.add_option("--endif", dest="endif", default=-1,
help="End IF number (default -1 does only combined")
parser.add_option("--pixelmas", dest="pixelmas", default="0.75",
help="Pixel size in milliarcseconds")
parser.add_option("--pixelwindow", dest="pixelwindow", default="20",
help="Number of pixels to fit")
parser.add_option("--imagesize", dest="imagesize", default="2048",
help="Size of the initial difmap image in pixels")
parser.add_option("--finalimagesize", dest="finalimagesize", default="256",
help="Size of the final difmap image in pixels")
parser.add_option("--weightstring", dest="weightstring", default="0,-1",
help="Difmap weight string to use (default 0,-2)")
(options, junk) = parser.parse_args()
aipsver = '31DEC20'
fitsfile = options.fitsfile
sourcename = options.sourcename
experiment = options.experiment
endif = int(options.endif)
pixelmas = float(options.pixelmas)
pixelwindow = int(options.pixelwindow)
imagesize = int(options.imagesize)
finalimagesize = int(options.finalimagesize)
weightstring = options.weightstring
AIPS.userno = 2
beginif = 1
prefix = os.getcwd() + '/'
if fitsfile == "":
parser.error("You must supply a filename with -f or --fitsfile")
if sourcename == "":
sourcename = fitsfile.split('/')[-1].split('_')[1]
if experiment == "":
experiment = fitsfile.split('/')[-1].split('_')[0]
if fitsfile.rfind('/') >= 0:
prefix = fitsfile[:fitsfile.rfind('/')] + '/'
else:
fitsfile = prefix + fitsfile
os.system("rm -f " + os.getcwd() + "/templink.fits")
os.system("ln -s %s templink.fits" % fitsfile)
# Load the file
uvdata = AIPSUVData("JUNK", "JUNK", 1, 1)
if uvdata.exists():
uvdata.zap()
vlbatasks.fitld_uvfits(os.getcwd() + "/templink.fits", uvdata, [])
try:
fullmjd = vlbatasks.get_dataset_mjd_midpoint(uvdata)
except ValueError:
print "Couldn't get MJD"
fullmjd = -1
uvdata.zap()
fullauto = True
stokesi = True
npixels = imagesize
gaussiantarget = False
beginif = 1
uvaverstr = '20,False'
fullimagefile = fitsfile[:fitsfile.rfind('.')] + ".clean"
fulljmfitfile = fitsfile[:fitsfile.rfind('.')] + ".clean"
#vlbatasks.difmap_maptarget(fitsfile, fullimagefile, fullauto, stokesi,
# pixelmas, npixels, weightstring, uvaverstr, gaussiantarget,
# beginif, endif, "", finalimagesize)
#vlbatasks.jmfit(fullimagefile, fulljmfitfile, sourcename, stokesi, endif, pixelwindow, fullmjd)
vlbatasks.difmap_maptarget(fitsfile, fullimagefile, fullauto, stokesi,
pixelmas, npixels, weightstring, uvaverstr, gaussiantarget,
beginif, endif)
vlbatasks.jmfit(fullimagefile, fulljmfitfile, sourcename, stokesi, endif)
|
dingswinREPO_NAMEpsrvlbireducePATH_START.@psrvlbireduce_extracted@psrvlbireduce-master@datareduction@singlesource_image.py@.PATH_END.py
|
{
"filename": "Osiris_old.py",
"repo_name": "JaidenCook/OSIRIS",
"repo_path": "OSIRIS_extracted/OSIRIS-main/old/Osiris_old.py",
"type": "Python"
}
|
#!/usr/bin/python
__author__ = "Jaiden Cook, Jack Line"
__credits__ = ["Jaiden Cook","Jack Line"]
__version__ = "0.0.0"
__maintainer__ = "Jaiden Cook"
__email__ = "Jaiden.Cook@student.curtin.edu"
# Generic stuff:
#%matplotlib notebook
import os,sys
import time
from datetime import datetime
import glob
import shutil
import re
from math import pi
import warnings
import subprocess
warnings.filterwarnings("ignore")
# Array stuff:
import numpy as np
warnings.simplefilter('ignore', np.RankWarning)
# Plotting stuff:
import matplotlib.pyplot as plt
import matplotlib
from mpl_toolkits.mplot3d import Axes3D
#from matplotlib.gridspec import GridSpec
#from matplotlib.ticker import LinearLocator, FormatStrFormatter
plt.style.use('seaborn-white')
plt.rcParams['mathtext.fontset'] = 'stix'
plt.rcParams['font.family'] = 'STIXGeneral'
plt.rcParams.update({'font.size': 12})
plt.rc('xtick', color='k', labelsize='medium', direction='out')
plt.rc('xtick.major', size=6, pad=4)
plt.rc('xtick.minor', size=4, pad=4)
plt.rc('ytick', color='k', labelsize='medium', direction='out')
plt.rc('ytick.major', size=6, pad=4)
plt.rc('ytick.minor', size=4, pad=4)
# Parser options:
from optparse import OptionParser
# Multiprocessing stuff:
#from joblib import Parallel, delayed
#import multiprocessing
#from tqdm import tqdm
# Scipy stuff:
import scipy
from scipy.fft import fftn,fftfreq,fftshift,ifftshift
from scipy import stats
import scipy.optimize as opt
# casa-core stuff:
#from casacore.tables import table,tablecolumn
# Astropy stuff:
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.coordinates import ICRS, Galactic, FK4, FK5 # Low-level frames
from astropy.coordinates import Angle, Latitude, Longitude # Angles
from astropy import wcs
from astropy.io import fits
from astropy.io import ascii
from astropy.io.votable import parse_single_table
from astropy.table import Table,Column,vstack
from astropy.io.votable import writeto as writetoVO
# MWA beam stuff
from mwa_pb import primary_beam as pb
def mwa_alt_az_za(obsid, ra=None, dec=None, degrees=False):
"""
Calculate the altitude, azumith and zenith for an obsid
Args:
obsid : The MWA observation id (GPS time)
ra : The right acension in HH:MM:SS
dec : The declintation in HH:MM:SS
degrees: If true the ra and dec is given in degrees (Default:False)
"""
from astropy.time import Time
from astropy.coordinates import SkyCoord, AltAz, EarthLocation
from astropy import units as u
obstime = Time(float(obsid),format='gps')
if degrees:
sky_posn = SkyCoord(ra, dec, unit=(u.deg,u.deg))
else:
sky_posn = SkyCoord(ra, dec, unit=(u.hourangle,u.deg))
earth_location = EarthLocation.of_site('Murchison Widefield Array')
#earth_location = EarthLocation.from_geodetic(lon="116:40:14.93", lat="-26:42:11.95", height=377.8)
altaz = sky_posn.transform_to(AltAz(obstime=obstime, location=earth_location))
Alt = altaz.alt.deg
Az = altaz.az.deg
Za = 90. - Alt
return Alt, Az, Za
def Gauss2D(X,Y,A,x0,y0,theta,amaj,bmin,polar=False):
# By definition the semi-major axis is larger than the semi-minor axis:
if amaj < bmin:
# Swapping amaj and bmin:
t = bmin
bmin = amaj
amaj = t
else:
pass
# Defining the width of the Gaussians
sigx = amaj/np.sqrt(2.0*np.log(2.0))
sigy = bmin/np.sqrt(2.0*np.log(2.0))
a = (np.cos(theta)**2)/(2.0*sigx**2) + (np.sin(theta)**2)/(2.0*sigy**2)
b = -np.sin(2.0*theta)/(4.0*sigx**2) + np.sin(2.0*theta)/(4.0*sigy**2)
c = (np.sin(theta)**2)/(2.0*sigx**2) + (np.cos(theta)**2)/(2.0*sigy**2)
if polar == False:
# Cartesian.
return A*np.exp(-(a*(X-x0)**2 + 2*b*(X-x0)*(Y-y0) + c*(Y-y0)**2))
elif polar == True:
# Arguments
# Az,Zen,A,Az0,Zen0,theta,amaj,bmin
# General 2D Gaussian function.
# Stereographic projection.
#
# https://www.aanda.org/articles/aa/full/2002/45/aah3860/node5.html
#
# Gaussians that exist in Spherical space are plotted onto a 2D surface.
# A*exp(-(a*(x-x0)^2 + 2*b*(x-x0)*(y-y0) + c*(y-y0)^2))
#
# r = 2*sin(Zen)/(1 + cos(Zen))
#
# x = 2*cos(Az)*sin(Zen)/(1 + cos(Zen))
# y = 2*sin(Az)*sin(Zen)/(1 + cos(Zen))
#
# Zen in [0,pi]
# Az in [0,2pi]
l0 = np.sin(x0)*np.cos(y0)
m0 = -np.sin(x0)*np.sin(y0)
#print('l0 = %2.3f, m0 = %2.3f' %(l0,m0))
#print(l0,m0,np.sin(np.arccos(np.sqrt(l0**2 + m0**2))),np.arccos(np.sqrt(l0**2 + m0**2)))
#sigy = 2*np.sin(sigy/2)*np.sin(np.arccos(np.sqrt(l0**2 + m0**2)))#*np.cos(np.pi/2 - np.arctan2(l0,m0)+np.pi)#*np.cos(theta)
#sigx = 2*np.sin(sigx/2)
Az = X
Zen = Y
Az0 = x0
Zen0 = y0
theta_pa = theta
#print('theta = %2.3f' % (theta))
sigx = sigx*np.sqrt((np.sin(theta_pa))**2 + (np.cos(theta_pa)*np.cos(Zen0))**2)
sigy = sigy*np.sqrt((np.cos(theta_pa))**2 + (np.sin(theta_pa)*np.cos(Zen0))**2)
#theta = theta + np.arctan2(l0,m0) + np.pi
theta = theta + Az0
a = (np.cos(theta)**2)/(2.0*sigx**2) + (np.sin(theta)**2)/(2.0*sigy**2)
b = -np.sin(2.0*theta)/(4.0*sigx**2) + np.sin(2.0*theta)/(4.0*sigy**2)
c = (np.sin(theta)**2)/(2.0*sigx**2) + (np.cos(theta)**2)/(2.0*sigy**2)
#print('theta = %2.3f' % (theta))
#print('arctan(l0,m0) = %2.3f' % (np.arctan2(m0,l0)))
# Defining x-x0 and y-y0. Defining them in spherical coordinates.
##x_shft = 2*np.sin(Zen)*np.cos(Az)/(1+np.cos(Zen)) - 2*np.sin(Zen0)*np.cos(Az0)/(1+np.cos(Zen0))
##y_shft = 2*np.sin(Zen)*np.sin(Az)/(1+np.cos(Zen)) - 2*np.sin(Zen0)*np.sin(Az0)/(1+np.cos(Zen0))
x_shft = np.sin(Zen)*np.cos(Az) - np.sin(Zen0)*np.cos(Az0)
y_shft = -np.sin(Zen)*np.sin(Az) + np.sin(Zen0)*np.sin(Az0)
return A*np.exp(-(a*(x_shft)**2 + 2*b*(x_shft)*(y_shft) + c*(y_shft)**2))
def Poly_func2D_old(xx,yy,*a):
#
# General form of the polynomial.
# p(x,y) = sum^p_j=0 sum^p_i=0 b_i x^p-i y^j
#
#print "Coefficient Matrix:"
a=np.array(a).flatten()#[0]
#print np.shape(np.array(a).flatten())
#print a
zz = np.zeros(np.shape(xx))
p = np.sqrt(len(a)) - 1
p = int(p)
index = 0
for j in range(p+1):
for i in range(p+1):
zz = zz + a[index]*(xx**(p-i))*(yy**j)
index += 1
#print(p-i,j)
return zz.ravel()
def Poly_func2D_nu(data_tuple,*a):
(xx,yy) = data_tuple
#
# General form of the polynomial.
# p(x,y) = sum^p_j=0 sum^p_i=0 b_i x^p-i y^j
#
#print "Coefficient Matrix:"
a=np.array(a).flatten()#[0]
#print np.shape(np.array(a).flatten())
#print a
zz = np.zeros(np.shape(xx))
p = np.sqrt(len(a)) - 1
p = int(p)
index = 0
for j in range(p+1):
for i in range(p+1):
zz = zz + a[index]*(xx**(p-i))*(yy**j)
index += 1
#print(p-i,j)
return zz.ravel()
def Window_blackman(x,x0,X,a):
# This function might need to be adjusted.
Wind_arr = (1-a)/2.0 - 0.5*np.cos((2*np.pi*(x - x0 - 0.5*X))/X) + 0.5*a*np.cos((4*np.pi*(x - x0 - 0.5*X))/X)
# Setting all values outside the window to zero:
Wind_arr[(x <= x0 - 0.5*X)] = 0.0
Wind_arr[(x >= x0 + 0.5*X)] = 0.0
return Wind_arr
def realign_polar_xticks(ax):
for x, label in zip(ax.get_xticks(), ax.get_xticklabels()):
if np.sin(x) > 0.1:
label.set_horizontalalignment('right')
if np.sin(x) < -0.1:
label.set_horizontalalignment('left')
def Plot_img(Img,X_vec=None,Y_vec=None,projection=None,cmap='jet',figsize = (14,12),**kwargs):
if projection == None:
fig, axs = plt.subplots(1, figsize = figsize, dpi=75)
# Creating the image objects:
if np.any(X_vec) != None and np.any(Y_vec) != None:
im = axs.imshow(Img,cmap=cmap,origin='upper',\
extent=[np.min(X_vec),np.max(X_vec),np.min(Y_vec),np.max(Y_vec)])
else:
im = axs.imshow(Img,cmap=cmap,origin='upper')
# Setting the colour bars:
cb = fig.colorbar(im, ax=axs, fraction=0.046, pad=0.04)
cb.set_label(label='Intensity')
axs.set_xlabel(r'$l$')
axs.set_ylabel(r'$m$')
im.set_clim(**kwargs)
plt.show()
elif projection == "polar":
fig = plt.figure(figsize = (14,12), dpi = 75)
label_size = 24
font_size = 22
thetaticks = np.arange(0,360,45)
ax1 = fig.add_subplot(111,projection='polar')
pcm1 = ax1.pcolormesh(X_vec,Y_vec,Img, cmap = cmap)
ax1.set_yticks([])
ax1.set_theta_offset(np.pi/2.0)
cb = fig.colorbar(pcm1, ax = ax1, fraction = 0.046, pad = 0.065)
cb.set_label(label = 'Intensity', fontsize = font_size)
cb.ax.tick_params(axis = 'x', labelsize = font_size - 2)
realign_polar_xticks(ax1)
plt.subplots_adjust(left=-0.5)
pcm1.set_clim(**kwargs)
plt.show()
def Plot_3D(X_arr,Y_arr,Z_arr,cmap='jet'):
fontsize=24
fig = plt.figure(figsize = (12,10), dpi=75)
ax = fig.gca(projection='3d')
# Plot the surface.
surf = ax.plot_surface(X_arr, Y_arr, Z_arr, cmap=cmap,
linewidth=0, antialiased=False)
cb = fig.colorbar(surf, ax=ax, fraction=0.046, pad=0.04)
cb.set_label(label='Intensity',fontsize=fontsize)
ax.set_xlabel(r'$l$')
ax.set_ylabel(r'$m$')
plt.show()
def Plot_visibilites(Vis,N,u_vec,v_vec,cmap='jet'):
# Creating the plots of the real, im, phase and amplitude:
# Visibilities must be normalised before plotting.
Vis_power = np.abs(Vis)
fig, axs = plt.subplots(2,2, figsize = (14,12), dpi=75)
Vis_power_std = np.std(Vis_power)
Vis_power_mean = np.mean(Vis_power)
# Creating the image objects:
im_Vis = axs[0,0].imshow(Vis_power,cmap=cmap,\
extent=[np.min(u_vec),np.max(u_vec),np.min(v_vec),np.max(v_vec)])#,\
#vmin=Vis_power_mean-5*Vis_power_std,vmax=Vis_power_mean+5*Vis_power_std)
im_Real = axs[0,1].imshow(np.real(Vis),cmap=cmap,\
extent=[np.min(u_vec),np.max(u_vec),np.min(v_vec),np.max(v_vec)])
im_Im = axs[1,0].imshow(np.imag(Vis),cmap=cmap,\
extent=[np.min(u_vec),np.max(u_vec),np.min(v_vec),np.max(v_vec)])
im_Phase = axs[1,1].imshow(np.angle(Vis,deg=True),cmap=cmap,\
extent=[np.min(u_vec),np.max(u_vec),np.min(v_vec),np.max(v_vec)])
# Setting the colour bars:
cb_Vis = fig.colorbar(im_Vis, ax=axs[0,0], fraction=0.046, pad=0.04)
cb_Vis.set_label(label='Intensity')
cb_Real = fig.colorbar(im_Real, ax=axs[0,1], fraction=0.046, pad=0.04)
cb_Real.set_label(label='Intensity')
cb_Im = fig.colorbar(im_Im, ax=axs[1,0], fraction=0.046, pad=0.04)
cb_Im.set_label(label='Intensity')
cb_Phase = fig.colorbar(im_Phase, ax=axs[1,1], fraction=0.046, pad=0.04)
cb_Phase.set_label(label='Phase [Degrees]')
# Setting the axis labels:
axs[0,0].set_xlabel(r'$u\lambda$')
axs[0,0].set_ylabel(r'$v\lambda$')
axs[0,0].set_title('Power')
axs[0,1].set_xlabel(r'$u\lambda$')
axs[0,1].set_ylabel(r'$v\lambda$')
axs[0,1].set_title('Real Amplitude')
axs[1,0].set_xlabel(r'$u\lambda$')
axs[1,0].set_ylabel(r'$v\lambda$')
axs[1,0].set_title('Imaginary Amplitude')
axs[1,1].set_xlabel(r'$u\lambda$')
axs[1,1].set_ylabel(r'$v\lambda$')
axs[1,1].set_title('Phase')
plt.show()
def Visibilities_2D(img,X,Y,N):
"""
This function takes an input 2D image domain array, and returns the 2D visibilities for that image.
"""
# Order of Fourier operations:
# Pad - Input array should already be padded.
# Shift-fftshift-roll-roll necessary due to off by one error.
# FT-fft
# Inverse shift-ifftshift
#Vis = ifftshift(fftn(np.roll(np.roll(fftshift(img),1,axis=0),1,axis=1)))
#Vis = ifftshift(fftn(fftshift(img)))
## Noticed an off by one error in the visibilities of some low pixel size visibility images.
Vis = np.roll(np.roll(ifftshift(fftn(np.roll(np.roll(fftshift(img),1,axis=0),1,axis=1))),-1,axis=0),-1,axis=1)
# Creating the Fourier grid:
# N is number of sample points
# T is sample spacing
u_vec = fftfreq(N,X/N)
v_vec = fftfreq(N,Y/N)
# Creating the u and v plane:
u_arr,v_arr = np.meshgrid(u_vec,v_vec)
return u_arr, v_arr, Vis
def Vis_Beam_Poly2D(U,V,dL,dM,l0,m0,*a):
a = np.array(a).ravel() # Setting the beam parameters.
vis = np.zeros(np.shape(U),dtype=complex) # Initialising the vis array.
p = int(np.sqrt(len(a)) - 1) # Forcing type.
# Shifting the U and V arrays.
U = fftshift(U)
V = fftshift(V)
index = 0
for r in range(p+1):
for s in range(p+1):
# u-component:
FT_b_u = 0
for n in range(p-r+1):
temp_frac_u = ((-1)**n) * ( ((dL/2.0)**(p-r-n)) / ((2*np.pi*(U))**(n+1)) )
temp_cos_u = np.cos((np.pi/2.0)*(3*(p-r-n) + 1) - np.pi*(U)*dL)
temp_u = temp_frac_u*temp_cos_u
FT_b_u = FT_b_u + temp_u
# Taking care of the discontinuities.
if r==2:
# Sinc function condition.
FT_b_u[np.isinf(FT_b_u)] = dL/2
if r==1:
FT_b_u[np.isnan(FT_b_u)] = 0.0
if r==0:
FT_b_u[np.isnan(FT_b_u)] = -(dL**3)/12.0
cond_u = False
if r == 2 and cond_u == True:
print(np.max(U[0,:]),np.min(U[0,:]),FT_b_u[0,:][U[0,:]==0.0])
print(dL)
plt.clf()
#plt.semilogy(U[0,:],FT_b_u[0,:])
plt.plot(U[0,:],FT_b_u[0,:])
plt.xlabel(r'$u$',fontsize=24)
plt.ylabel(r'$\frac{\hat{b}^2_2(u)}{2i^{2}e^{-2\pi i u l_0}}$',fontsize=24)
plt.xlim([-25,25])
#plt.plot(U[0,:],temp_cos_u[0,:])
#plt.savefig('{0}.png'.format(n))
plt.show()
# v-component:
FT_b_v = 0
for q in range(s+1):
temp_frac_v = ((-1)**q)*(((dM/2.0)**(s-q))/(2*np.pi*(V))**(q+1))
temp_cos_v = np.cos((np.pi/2.0)*(3*(s-q) + 1) - np.pi*(V)*dM)
temp_v = temp_frac_v*temp_cos_v
FT_b_v = FT_b_v + temp_v
if s==0:
# Sinc function condition.
FT_b_v[np.isinf(FT_b_v)] = dM/2
if s==1:
FT_b_v[np.isnan(FT_b_v)] = 0.0
if s==2:
FT_b_v[np.isnan(FT_b_v)] = -(dM**3)/12.0
cond_v = False
if s == 2 and cond_v == True:
print(np.max(V[:,0]),np.min(V[:,0]),FT_b_v[:,0][V[:,0]==0.0])
print(dM)
plt.clf()
plt.plot(V[:,0],FT_b_v[:,0])
plt.show()
vis = vis + 4*(complex(0,1)**(p-r-s))*a[index]*FT_b_u*FT_b_v
index = index + 1
# Exponential phase offset term.
phase_l0m0 = np.zeros(np.shape(U),dtype=complex)
phase_l0m0.real = np.cos(-2*np.pi*(l0*U + m0*V))
phase_l0m0.imag = np.sin(-2*np.pi*(l0*U + m0*V))
# shifting the phase for the off centre window.
vis = vis*phase_l0m0
return vis
def Vis_Gauss2D(U,V,I0,l0,m0,PA,amaj,bmin,Az0,Zen0):
# Analytic visibility model.
Vis = np.zeros(np.shape(U),dtype=complex) # Initialising the vis array.
N = len(Vis)
#Normalisation = ((N**2)/(4*np.pi**2))
Normalisation = (N/(2*np.pi))**2#N/(2*np.pi)
# Defining the width of the Gaussians
sigx = amaj/np.sqrt(2.0*np.log(2.0))
sigy = bmin/np.sqrt(2.0*np.log(2.0))
# Defining the width of the Gaussians
sigx = amaj/(2.0*np.sqrt(2.0*np.log(2.0)))
sigy = bmin/(2.0*np.sqrt(2.0*np.log(2.0)))
sigx = sigx*np.sqrt((np.sin(PA))**2 + (np.cos(PA)*np.cos(Zen0))**2)
sigy = sigy*np.sqrt((np.cos(PA))**2 + (np.sin(PA)*np.cos(Zen0))**2)
#print('l0 = %2.3f, m0 = %2.3f' %(l0,m0))
#print('PA = %2.3f' % (PA))
#PA = PA - np.arctan2(l0,m0)+np.pi
PA = PA + Az0
#print('PA = %2.3f' % (PA))
#print('arctan(l0,m0) = %2.3f' % (np.arctan2(l0,m0)))
I0_h = 2*np.pi*sigx*sigy*I0
a_p = 2*(np.pi**2)*((sigx**2)*(np.cos(PA)**2) + (sigy**2)*(np.sin(PA)**2))
b_p = 0.5*(np.sin(2*PA))*(np.pi**2)*(-1*(-sigx**2) + (-sigy**2))
c_p = 2*(np.pi**2)*((sigy**2)*(np.cos(PA)**2) + (sigx**2)*(np.sin(PA)**2))
# Exponential phase offset term.
phase_l0m0 = np.zeros(np.shape(U),dtype=complex)
phase_l0m0.real = np.cos(-2*np.pi*(l0*U + m0*V))
phase_l0m0.imag = np.sin(-2*np.pi*(l0*U + m0*V))
# Normalisation issue
#Fix normalisation issues. Determine how to properly normalise.
Vis = Vis + I0_h*np.exp(-a_p*U**2 - 2*b_p*U*V - c_p*V**2)
Vis = phase_l0m0*Vis*Normalisation #Normalisation = (N^2/4pi^2)
return Vis
def Power_spec1D(Vis_power,u_arr,v_arr,r_vec=None):
# Condition for radius vector. The default sampling is to just use
# the (u,v) grid. If the user inputs a u and v vector this is specifically
# for specifying their own sampling.
if np.any(r_vec) == None:
u_vec = u_arr[0,:]
v_vec = v_arr[:,0]
# This is for binning the radii.
r_vec = np.sqrt(u_vec[u_vec >= 0.0]**2 + v_vec[v_vec >= 0.0]**2)
else:
pass
# The u_arr and v_arr should be shifted.
r_uv = np.sqrt(u_arr**2 + v_arr**2) + 0.00001
# Initialising Power vector and Radius vector.
Power_spec_1D = np.zeros(len(r_vec))
Radius = np.zeros(len(r_vec))
for i in range(len(r_vec)-1):
Radius[i] = ((r_vec[i+1] + r_vec[i])/2.0)
Power_spec_1D[i] = np.mean(Vis_power[np.logical_and(r_uv >= r_vec[i], r_uv <= r_vec[i+1])])# Weight.
#print("#%3d, log_r[i] = %3.2f,log_r[i+1] = %3.2f, Power = %3.3e" % \
# (i,r_bins[i],r_bins[i+1],Power_spec_1D[i]))
Radius = np.roll(Radius,1)
return Power_spec_1D, Radius
def Plot_Power_spec1D(Vis_power1D_list,radius,label_list=None,xlim=None,ylim=None,**kwargs):
# Vis_power1D can be a list of multiple 1D power spectrums.
# label_list should contain the same number of elements as Vis_power1D_list
#print(np.shape(Vis_power1D_list))
#print(label_list)
# Initialising the figure object.
# Need fig object, code breaks otherwise, fix this in the future.
fig, axs = plt.subplots(1, figsize = (14,12), dpi=75)
plt.semilogy()
if len(np.shape(Vis_power1D_list)) < 2:
axs.plot(radius,Vis_power1D_list,**kwargs)
# Plotting multiple 1D power spectra if required.
elif label_list != None and len(np.shape(Vis_power1D_list)) > 1:
for i in range(len(Vis_power1D_list)):
axs.plot(radius,Vis_power1D_list[i],label = label_list[i],**kwargs)
if xlim != None:
axs.set_xlim(xlim)
if ylim != None:
axs.set_ylim(ylim)
axs.set_xlabel(r'$\sqrt{u^2 + v^2}$',fontsize=24)
axs.set_ylabel(r'$\rm{Power}$',fontsize=24)
plt.legend(fontsize=24)
def Vis_degrid(u_arr,v_arr,u_ker_arr,v_ker_arr,u,v,vis_ker,vis_sky,verb_cond=False):
# Initialising the new deridded visibility array:
vis_sky_deg = np.zeros(len(u),dtype=complex)
u_err_vec = np.zeros(len(u))
v_err_vec = np.zeros(len(v))
# might have to specify different cases. One for odd and even arrays.
u_vec = u_arr[0,:]
v_vec = v_arr[:,0]
# Creating an index vector.
ind_vec = np.arange(len(u_arr))
u_pixel_size = np.abs(u_vec[0] - u_vec[1])# These should be in units of wavelengths.
v_pixel_size = np.abs(v_vec[0] - v_vec[1])
u_ker_pixel_size = np.abs(u_ker_arr[0,0] - u_ker_arr[0,1])# These should be in untis of wavelengths.
v_ker_pixel_size = np.abs(v_ker_arr[0,0] - v_ker_arr[1,0])
# Catch condition for degridding. Make sure pixel sizes for the kernel and the sky_vis are the same.
if u_ker_pixel_size != u_pixel_size or v_ker_pixel_size != v_pixel_size:
print("Kernel pixel size and visibilty pixel size don't match.")
print('du_pix = %5.2f, du_ker_pix = %5.2f' % (u_pixel_size,u_ker_pixel_size))
print('dv_pix = %5.2f, dv_ker_pix = %5.2f' % (v_pixel_size,v_ker_pixel_size))
return None
# The kernel sum should equal 1.
vis_ker = vis_ker/(np.sum(vis_ker))
# Integer size of the kernel.
ker_len = int(len(vis_ker)/2)
for i in range(len(u)):
# These should be the indices of the coordinates closest to the baseline. These coordinates
# should line up with the kernel.
temp_u_ind = ind_vec[np.isclose(u_vec,u[i],atol=u_pixel_size/2)][0]
temp_v_ind = ind_vec[np.isclose(v_vec,v[i],atol=v_pixel_size/2)][0]
# We also want to look at the error between the pixel position and the guess.
u_err_vec[i] = np.abs(u[i] - u_vec[temp_u_ind])
v_err_vec[i] = np.abs(v[i] - v_vec[temp_v_ind])
# Might have to define a visibility subset that is larger.
# Defining the visibility subset:
vis_sub = vis_sky[temp_u_ind - ker_len - 1:temp_u_ind + ker_len,\
temp_v_ind - ker_len - 1:temp_v_ind + ker_len]
# Verbose output condition, for diagnostic purposes. Default condition is False.
if verb_cond == True:
print('#{0}'.format(i))
print('u[i] = %5.2f, v[i] = %5.2f' % (u[i],v[i]))
print('upixel scale = %5.2f, vpixelscale = %5.2f' % (u_vec[1]-u_vec[0],v_vec[1]-v_vec[0]))
print('min_u = %7.3f, min_v = %7.3f' % (np.min(u_vec),np.min(v_vec)))
print('u_diff = %5.2f, v_diff = %5.2f'% (np.min(np.abs(u_vec - u[i])),np.min(np.abs(v_vec - v[i]))))
print('u_ind = %4i, v_ind = %4i' % (temp_u_ind,temp_v_ind))
print('Kernel half width = %3i' % ker_len)
else:
pass
vis_sky_deg[i] = np.sum(vis_sub*vis_ker)
return vis_sky_deg, u_err_vec, v_err_vec
## Interferometry functions from J.Line.
def add_kernel(uv_array,u_ind,v_ind,kernel):
'''Takes v by u sized kernel and adds it into
a numpy array at the u,v point u_ind, v_ind
Kernel MUST be odd dimensions for symmetry purposes
Author: J.Line
'''
ker_v,ker_u = kernel.shape
width_u = int((ker_u - 1) / 2)
width_v = int((ker_v - 1) / 2)
N = len(uv_array)
min_u_ind = u_ind - width_u
max_u_ind = u_ind + width_u + 1
min_v_ind = v_ind - width_v
max_v_ind = v_ind + width_v + 1
## Jack suggests changing this, I will have to discuss this with him.
if max_u_ind > N-1:
max_u_ind = N-1
kernel = kernel[:,0:max_u_ind-min_u_ind]
if max_v_ind > N-1:
max_v_ind = N-1
kernel = kernel[0:max_v_ind-min_v_ind,:]
if min_u_ind < 0:
min_u_ind = 0
kernel = kernel[:,min_u_ind:max_u_ind]
if min_v_ind < 0:
min_v_ind = 0
kernel = kernel[min_v_ind:max_v_ind,:]
array_subsec = uv_array[min_v_ind:max_v_ind, min_u_ind:max_u_ind]
try:
array_subsec += kernel
except ValueError:
print('Value Error')
print('kernel shape {0}'.format(kernel.shape))
print('kernel width u = %4i, kernel width v = %4i' % (width_u,width_v))
print('Kernel shape (%4i,%4i)' % (max_v_ind-min_v_ind,max_u_ind-min_u_ind))
print('Array size = %4i, u indexing size = %4i' % (len(uv_array), u_ind + width_u +1))
print('Array size = %4i, v indexing size = %4i' % (len(uv_array), u_ind + width_u +1))
def grid(container=None,u_coords=None, v_coords=None, u_range=None, v_range=None,complexes=None, kernel='gaussian', kernel_params=[2.0,2.0]):
'''A simple(ish) gridder - defaults to gridding with a gaussian
Author: J.Line
'''
# Weight array, we will divide the entire container array by this.
weights_arr = np.zeros(np.shape(container),dtype='complex')
KERNEL_SIZE = 31
#KERNEL_SIZE = 63
for i in np.arange(len(u_coords)):
u,v,comp = u_coords[i],v_coords[i],complexes[i]
##Find the difference between the gridded u coords and the current u
##Get the u and v indexes in the uv grdding container
u_ind,v_ind,u_off,v_off = find_closet_uv(u=u,v=v,u_range=u_range,v_range=v_range)
if kernel == 'gaussian':
kernel_array = gaussian(sig_x=kernel_params[0],sig_y=kernel_params[1],gridsize=KERNEL_SIZE,x_offset=0,y_offset=0)
else:
kernel_array = np.array([[complex(1,0)]])
#kernel_array = np.zeros([KERNEL_SIZE,KERNEL_SIZE],dtype=complex) + 1
#kernel_array = 1.0
##Multiply the kernal by the complex value
#data_kernel = kernel_array * comp # Add all the kernel components into a separate array for weighting.
data_kernel = kernel_array * comp #/ (KERNEL_SIZE)# Add all the kernel components into a separate array for weighting.
##Add the multiplied kernel-uvdata values to the grid
add_kernel(container,u_ind,v_ind,data_kernel)
add_kernel(weights_arr,u_ind,v_ind,kernel_array)
#print(np.sum(weights_arr))
# Dividing the containter by the weights
# Dividing by the number of visibilities summed into a particular bin.
container[weights_arr.real > 0]/weights_arr[weights_arr.real > 0]
return container,weights_arr
def gaussian(sig_x=None,sig_y=None,gridsize=31,x_offset=0,y_offset=0):
'''Creates a gaussian array of a specified gridsize, with the
the gaussian peak centred at an offset from the centre of the grid
Author: J.Line
'''
x_cent = int(gridsize / 2.0) + x_offset
y_cent = int(gridsize / 2.0) + y_offset
x = np.arange(gridsize)
y = np.arange(gridsize)
x_mesh, y_mesh = np.meshgrid(x,y)
x_bit = (x_mesh - x_cent)*(x_mesh - x_cent) / (2*sig_x*sig_x)
y_bit = (y_mesh - y_cent)*(y_mesh - y_cent) / (2*sig_y*sig_y)
amp = 1 / (2*pi*sig_x*sig_y)
gaussian = amp*np.exp(-(x_bit + y_bit))
return gaussian
def enh2xyz(east,north,height,latitiude):
'''Calculates local X,Y,Z using east,north,height coords,
and the latitude of the array. Latitude must be in radians
Author: J.Line
'''
sl = np.sin(latitiude)
cl = np.cos(latitiude)
X = -north*sl + height*cl
Y = east
Z = north*cl + height*sl
return X,Y,Z
def get_lm(ra=None,ra0=None,dec=None,dec0=None):
'''Calculate l,m,n for a given phase centre ra0,dec0 and sky point ra,dec
Enter angles in radians
Author: J.Line
'''
##RTS way of doing it
cdec0 = np.cos(dec0)
sdec0 = np.sin(dec0)
cdec = np.cos(dec)
sdec = np.sin(dec)
cdra = np.cos(ra-ra0)
sdra = np.sin(ra-ra0)
l = cdec*sdra
m = sdec*cdec0 - cdec*sdec0*cdra
n = sdec*sdec0 + cdec*cdec0*cdra
return l,m,n
def get_uvw(x_lamb,y_lamb,z_lamb,dec,HA):
'''Calculates u,v,w for a given
Author: J.Line
'''
u = np.sin(HA)*x_lamb + np.cos(HA)*y_lamb
v = -np.sin(dec)*np.cos(HA)*x_lamb + np.sin(dec)*np.sin(HA)*y_lamb + np.cos(dec)*z_lamb
w = np.cos(dec)*np.cos(HA)*x_lamb - np.cos(dec)*np.sin(HA)*y_lamb + np.sin(dec)*z_lamb
return u,v,w
def find_closet_uv(u=None,v=None,u_range=None,v_range=None):
'''Finds the closet values to u,v in the ranges u_range,v_range
Returns the index of the closest values, and the offsets from
the closest values
Author: J.Line
'''
u_resolution = u_range[1] - u_range[0]
v_resolution = v_range[1] - v_range[0]
##Find the difference between the gridded u coords and the desired u
u_offs = np.abs(u_range - u)
##Find out where in the gridded u coords the current u lives;
##This is a boolean array of length len(u_offs)
u_true = u_offs < u_resolution/2.0
##Find the index so we can access the correct entry in the container
u_ind = np.where(u_true == True)[0]
##Use the numpy abs because it's faster (np_abs)
v_offs = np.abs(v_range - v)
v_true = v_offs < v_resolution/2.0
v_ind = np.where(v_true == True)[0]
##If the u or v coord sits directly between two grid points,
##just choose the first one ##TODO choose smaller offset?
if len(u_ind) == 0:
u_true = u_offs <= u_resolution/2
u_ind = np.where(u_true == True)[0]
#print('here')
#print(u_range.min())
if len(v_ind) == 0:
v_true = v_offs <= v_resolution/2
v_ind = np.where(v_true == True)[0]
# print(u,v)
u_ind,v_ind = u_ind[0],v_ind[0]
u_offs = u_range - u
v_offs = v_range - v
u_off = -(u_offs[u_ind] / u_resolution)
v_off = -(v_offs[v_ind] / v_resolution)
return u_ind,v_ind,u_off,v_off
### Defining classes. Split this up into different module files.
class Skymodel:
"""A class for defining sky-models. In this class sky-models are stored as data cubes,
the x and y axis correspond to the (l,m) plane, and the z axis corresponds to frequnecy."""
def __init__(self,shape,l_vec,m_vec):
self.model = np.zeros(shape)
self.l_vec = l_vec
self.m_vec = m_vec
# Creating the (l,m) plane grid:
self.l_grid, self.m_grid = np.meshgrid(l_vec,m_vec)
# Creating a radius array for masking purposes:
self.r_grid = np.sqrt(self.l_grid**2 + self.m_grid**2)
# Creating an index array, we want all pixels less than or equal to r = 1:
ind_arr = self.r_grid <= 1.0
# Here we want to create a new alt and az array that is the same size as l_arr and m_arr:
Alt_arr = np.zeros(np.shape(self.l_grid))
Az_arr = np.zeros(np.shape(self.l_grid))
# Now we want to determine the Altitude and Azimuth, but only in the region where r <= 1.
# Outside this region isbeyond the boundary of the horizon.
Alt_arr[ind_arr] = np.arccos(self.r_grid[ind_arr]) # Alt = arccos([l^2 + m^2]^(1/2))
Az_arr[ind_arr] = np.arctan2(self.l_grid[ind_arr],self.m_grid[ind_arr]) + np.pi #arctan2() returns [-pi,pi] we want [0,2pi].
# Defining the Altitude and Azimuthal grids.
self.Alt_grid = Alt_arr
self.Az_grid = Az_arr
def Gauss2D(self,Az,Zen,A,Az0,Zen0,theta_pa,amaj,bmin):
# Arguments
# Az,Zen,A,Az0,Zen0,theta,amaj,bmin
# General 2D Gaussian function.
# Stereographic projection.
#
# https://www.aanda.org/articles/aa/full/2002/45/aah3860/node5.html
#
# Gaussians that exist in Spherical space are plotted onto a 2D surface.
# A*exp(-(a*(x-x0)^2 + 2*b*(x-x0)*(y-y0) + c*(y-y0)^2))
#
# r = 2*sin(Zen)/(1 + cos(Zen))
#
# x = 2*cos(Az)*sin(Zen)/(1 + cos(Zen))
# y = 2*sin(Az)*sin(Zen)/(1 + cos(Zen))
#
# Zen in [0,pi]
# Az in [0,2pi]
# By definition the semi-major axis is larger than the semi-minor axis:
#
# FWHM = amaj = 2 sqrt(2 ln(2)) sigma
if amaj < bmin:
# Swapping amaj and bmin:
t = bmin
bmin = amaj
amaj = t
else:
pass
# Defining the width of the Gaussians
sigx = amaj/np.sqrt(2.0*np.log(2.0))
sigy = bmin/np.sqrt(2.0*np.log(2.0))
# Defining the width of the Gaussians
sigx = amaj/(2.0*np.sqrt(2.0*np.log(2.0)))
sigy = bmin/(2.0*np.sqrt(2.0*np.log(2.0)))
sigx = sigx*np.sqrt((np.sin(theta_pa))**2 + (np.cos(theta_pa)*np.cos(Zen0))**2)
sigy = sigy*np.sqrt((np.cos(theta_pa))**2 + (np.sin(theta_pa)*np.cos(Zen0))**2)
# Deriving the peak amplitude from the integrated amplitude.
Amplitude = A/(sigx*sigy*2*np.pi)
theta = theta_pa + Az0
a = (np.cos(theta)**2)/(2.0*sigx**2) + (np.sin(theta)**2)/(2.0*sigy**2)
b = -np.sin(2.0*theta)/(4.0*sigx**2) + np.sin(2.0*theta)/(4.0*sigy**2)
c = (np.sin(theta)**2)/(2.0*sigx**2) + (np.cos(theta)**2)/(2.0*sigy**2)
x_shft = np.sin(Zen)*np.cos(Az) - np.sin(Zen0)*np.cos(Az0)
y_shft = -np.sin(Zen)*np.sin(Az) + np.sin(Zen0)*np.sin(Az0)
#return A*np.exp(-(a*(x_shft)**2 + 2*b*(x_shft)*(y_shft) + c*(y_shft)**2))
return Amplitude*np.exp(-(a*(x_shft)**2 + 2*b*(x_shft)*(y_shft) + c*(y_shft)**2))
def add_Gaussian_sources(self, Az_mod, Alt_mod, Maj, Min, PA, S, window_size):
# Converting the the Alt and Az into l and m coordinates:
l_mod = np.cos(np.radians(Alt_mod))*np.sin(np.radians(Az_mod))# Slant Orthographic Project
m_mod = -np.cos(np.radians(Alt_mod))*np.cos(np.radians(Az_mod))# Slant Orthographic Project
for i in range(len(l_mod)):
# Creating temporary close l and m mask arrays:
temp_l_ind = np.isclose(self.l_vec,l_mod[i],atol=window_size)
temp_m_ind = np.isclose(self.m_vec,m_mod[i],atol=window_size)
# Creating temporary index vectors:
# Use the mask array to determin the index values.
l_ind_vec = np.arange(len(self.l_vec))[temp_l_ind]
m_ind_vec = np.arange(len(self.m_vec))[temp_m_ind]
# Creating index arrays:
# Use the index vectors to create arrays
l_ind_arr, m_ind_arr = np.meshgrid(l_ind_vec, m_ind_vec)
# Creating temporary l and m arrays:
l_temp_arr = self.l_grid[l_ind_arr,m_ind_arr]
m_temp_arr = self.m_grid[l_ind_arr,m_ind_arr]
# Creating temporary Azimuth and Altitude arrays:
## This is the way it is described in Thompson. Section 3.1 Pg 71 Second Edition.
Alt_temp_arr = np.arccos(np.sqrt(l_temp_arr**2 + m_temp_arr**2)) # Alt = arccos([l^2 + m^2]^(1/2))
Az_temp_arr = np.arctan2(m_temp_arr,l_temp_arr) + np.pi #arctan2() returns [-pi,pi] we want [0,2pi].
# converting the major and minor axes into (l,m) coords.
temp_maj = np.sin(np.radians(Maj[i]))
temp_min = np.sin(np.radians(Min[i]))
Gauss_temp = self.Gauss2D(Az_temp_arr, np.pi/2 - Alt_temp_arr, 1.0, 2*np.pi - np.radians(Az_mod[i]),\
np.pi/2 - np.radians(Alt_mod[i]),np.radians(PA[i]),\
temp_maj, temp_min)
self.model[l_ind_arr,m_ind_arr,:] = self.model[l_ind_arr,m_ind_arr,:] +\
np.ones(np.shape(self.model[l_ind_arr,m_ind_arr,:]))*Gauss_temp[:,:,None]
## Set all NaNs and values below the horizon to zero:
#self.model[self.r_arr > 1.0,:] = 0.0
self.model[np.isnan(self.model)] = 0.0
self.model = self.model*S[i,:]
|
JaidenCookREPO_NAMEOSIRISPATH_START.@OSIRIS_extracted@OSIRIS-main@old@Osiris_old.py@.PATH_END.py
|
{
"filename": "test.py",
"repo_name": "bolverk/huji-rich",
"repo_path": "huji-rich_extracted/huji-rich-master/convergence/discontinuous_advection_1d/base/test.py",
"type": "Python"
}
|
#! /usr/bin/python
import math
def chi2_test(a1,a2):
import math
diff2 = [(x-y)**2 for x,y in zip(a1,a2)]
return math.sqrt(sum(diff2)/len(a1))
def L1_test(a1,a2):
import math
abs_diff = [abs(x-y) for x,y in zip(a1,a2)]
return sum(abs_diff)/len(a1)
def goodness_of_fit(a1, a2,test_name='L1'):
import math
if 'chi2'==test_name:
return chi2_test(a1,a2)
elif 'L1'==test_name:
return L1_test(a1,a2)
else:
raise NameError("I don't know test "+
test_name)
def density_init_prof(x):
if x<=0.3 or x>=0.7:
return 1
else:
return 2
def density_prof(x,t):
xeq = x-t
if xeq < 0:
xeq = xeq + 1
return density_init_prof(xeq)
def pressure_prof(x):
return 1
def velocity_prof(x):
return 1
def main():
import os
import numpy
time = numpy.loadtxt('time.txt')
x_list = numpy.loadtxt('cell_centres.txt')
d_list = numpy.loadtxt('densities.txt')
p_list = numpy.loadtxt('pressures.txt')
v_list = numpy.loadtxt('velocities.txt')
da = [density_prof(x,time) for x in x_list]
pa = [pressure_prof(x) for x in x_list]
va = [velocity_prof(x) for x in x_list]
if False:
import pylab
pylab.subplot(311)
pylab.plot(x_list,d_list,x_list,da)
pylab.ylabel('Density')
pylab.subplot(312)
pylab.plot(x_list,p_list,x_list,pa)
pylab.ylabel('Pressure')
pylab.subplot(313)
pylab.plot(x_list,v_list,x_list,va)
pylab.xlabel('Distance')
pylab.ylabel('Velocity')
pylab.show()
test_name = 'L1'
gof1 = goodness_of_fit(d_list,da,test_name)
gof2 = goodness_of_fit(p_list,pa,test_name)
gof3 = goodness_of_fit(v_list,va,test_name)
f = open('gradesheet.txt','w')
f.write(str(gof1)+'\n')
f.write(str(gof2)+'\n')
f.write(str(gof3)+'\n')
f.close()
return gof1<0.06 and gof2<0.02 and gof3 < 0.04
import sys
if __name__=='__main__':
print main()
|
bolverkREPO_NAMEhuji-richPATH_START.@huji-rich_extracted@huji-rich-master@convergence@discontinuous_advection_1d@base@test.py@.PATH_END.py
|
{
"filename": "mnu_compare.py",
"repo_name": "cmbant/CosmoMC",
"repo_path": "CosmoMC_extracted/CosmoMC-master/batch1/outputs/mnu_compare.py",
"type": "Python"
}
|
#import planckStyle as s
#g=s.plotter
import GetDistPlots
g=GetDistPlots.GetDistPlotter('main/plot_data')
labels=None
roots=['base_mnu_planck_lowl_lowLike','base_mnu_planck_lowl_lowLike_lensing','base_mnu_Alens_planck_lowl_lowLike','base_mnu_Alens_planck_lowl_lowLike_post_lensing','base_mnu_planck_tauprior','base_mnu_planck_tauprior_post_lensing']
g.plots_1d(roots, legend_labels=labels)
g.export('plots/mnu_compare.pdf')
|
cmbantREPO_NAMECosmoMCPATH_START.@CosmoMC_extracted@CosmoMC-master@batch1@outputs@mnu_compare.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "gomesdasilva/pyrhk",
"repo_path": "pyrhk_extracted/pyrhk-master/README.md",
"type": "Markdown"
}
|
# pyrhk
Python functions to calculate logR'HK for HARPS and ESPRESSO and estimate rotation periods and ages.
What pyrhk can do:
- Calibrate S-index values to the Mt. Wilson scale (SMW) for HARPS (using Gomes da Silva et al. 2021 or Lovis et al. 2011 calibrations) or ESPRESSO (using the calibration shown below) spectrographs.
- Calculate logR'HK via Noyes et al. (1984) using two bolometric corrections:
- Middelkoop (1982): for 0.44 < B-V < 1.20 (MS stars)
- Rutten (1984): for 0.3 < B-V < 1.6 (MS stars) and 0.3 < B-V < 1.7 (evolved stars)
- Calculate chromospheric rotation period and gyrochronology age using activity relations from Noyes et al. (1984) and Mamajek & Hillenbrand (2008).
- Obtain B-V colour from Simbad (requires `astroquery` module installed).
### Calibration of SMW for ESPRESSO:
Using 27 stars with data from HARPS and ESPRESSO.

|
gomesdasilvaREPO_NAMEpyrhkPATH_START.@pyrhk_extracted@pyrhk-master@README.md@.PATH_END.py
|
{
"filename": "docker-integration.md",
"repo_name": "crossbario/crossbar",
"repo_path": "crossbar_extracted/crossbar-master/test/docker-integration.md",
"type": "Markdown"
}
|
# Docker integration
## Show Docker details
```
(cpy373_1) oberstet@intel-nuci7:~/scm/crossbario/crossbarfx$ crossbarfx shell --realm mrealm1 show docker node1
{'Architecture': 'x86_64',
'BridgeNfIp6tables': True,
'BridgeNfIptables': True,
'CPUSet': True,
'CPUShares': True,
'CgroupDriver': 'cgroupfs',
...
'SystemTime': '2019-05-13T09:25:34.280230589+02:00',
'Warnings': ['WARNING: No swap limit support']}
```
## Listing images
```
(cpy373_1) oberstet@intel-nuci7:~/scm/crossbario/crossbarfx$ crossbarfx shell --realm mrealm1 list nodes
['4752c752-a128-4ae4-a041-84208eabe49d']
(cpy373_1) oberstet@intel-nuci7:~/scm/crossbario/crossbarfx$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
ubuntu latest d131e0fa2585 2 weeks ago 102MB
crossbario/crossbarfx latest 4bbb66b3e0c6 2 months ago 502MB
(cpy373_1) oberstet@intel-nuci7:~/scm/crossbario/crossbarfx$ crossbarfx shell --realm mrealm1 list docker-images node1
['sha256:d131e0fa2585a7efbfb187f70d648aa50e251d9d3b7031edf4730ca6154e221e',
'sha256:4bbb66b3e0c6f7fb5f1e254f8976c35a7fcf7ea82cd7e3885d1ab7702eedece1']
```
## Show image details
```
(cpy373_1) oberstet@intel-nuci7:~/scm/crossbario/crossbarfx$ crossbarfx shell --realm mrealm1 show docker-image node1 4bbb66
{'Architecture': 'amd64',
'Author': 'The Crossbar.io Project <support@crossbario.com>',
'Comment': '',
'Config': {'ArgsEscaped': True,
'AttachStderr': False,
'AttachStdin': False,
'AttachStdout': False,
'Cmd': ['edge',
'start',
'--cbdir',
'/node/.crossbar',
'--loglevel',
'info'],
...
'Size': 501562522,
'VirtualSize': 501562522}
(cpy373_1) oberstet@intel-nuci7:~/scm/crossbario/crossbarfx$
```
## Listing containers
```
(cpy373_1) oberstet@intel-nuci7:~/scm/crossbario/crossbarfx$ crossbarfx shell --realm mrealm1 list docker-containers node1
['02137443e9568c2c5e2b5a0c994da4c4009f797207f8123dcbe77464cfead1f5',
'a4550e94128736ce767c60c85c420382895e7d3a2aee398f6e623a8f6e50982c',
'ca456feb41b5e995f493bb36180e13a8425b17d61201b2d7c1c38ac4f869017d',
'f254963a5bee60e66ed314cb0cce0df29223e6e48497578d086f43d0de983f3f']
```
## Show container details
```
(cpy373_1) oberstet@intel-nuci7:~/scm/crossbario/crossbarfx$ crossbarfx shell --realm mrealm1 show docker-container node1 02137443e9
{'AppArmorProfile': 'docker-default',
'Args': [],
'Config': {'AttachStderr': True,
'AttachStdin': True,
'AttachStdout': True,
'Cmd': ['/bin/bash'],
'Domainname': '',
'Entrypoint': None,
'Env': ['PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'],
'Hostname': '02137443e956',
'Image': 'ubuntu',
...
'Status': 'exited'}}
(cpy373_1) oberstet@intel-nuci7:~/scm/crossbario/crossbarfx$
```
## Creating containers
```
(cpy373_1) oberstet@intel-nuci7:~/scm/crossbario/crossbarfx$ crossbarfx shell --realm mrealm1 create docker-container node1 d131e0fa258 --config {}
{'id': '050c89a9ca0b7b930ad68d3cd7d93911510f426beddbea97881f907e974314ef'}
```
Eg:
```
(cpy373_1) oberstet@intel-nuci7:~/scm/crossbario/crossbarfx$ crossbarfx shell --realm mrealm1 create docker-container node1 d131e0fa258 --config {}
{'id': '050c89a9ca0b7b930ad68d3cd7d93911510f426beddbea97881f907e974314ef'}
(cpy373_1) oberstet@intel-nuci7:~/scm/crossbario/crossbarfx$ crossbarfx shell --realm mrealm1 list docker-images node1
['sha256:d131e0fa2585a7efbfb187f70d648aa50e251d9d3b7031edf4730ca6154e221e',
'sha256:4bbb66b3e0c6f7fb5f1e254f8976c35a7fcf7ea82cd7e3885d1ab7702eedece1']
(cpy373_1) oberstet@intel-nuci7:~/scm/crossbario/crossbarfx$ crossbarfx shell --realm mrealm1 list docker-containers node1
['050c89a9ca0b7b930ad68d3cd7d93911510f426beddbea97881f907e974314ef',
'02137443e9568c2c5e2b5a0c994da4c4009f797207f8123dcbe77464cfead1f5',
'a4550e94128736ce767c60c85c420382895e7d3a2aee398f6e623a8f6e50982c',
'ca456feb41b5e995f493bb36180e13a8425b17d61201b2d7c1c38ac4f869017d',
'f254963a5bee60e66ed314cb0cce0df29223e6e48497578d086f43d0de983f3f']
(cpy373_1) oberstet@intel-nuci7:~/scm/crossbario/crossbarfx$ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
(cpy373_1) oberstet@intel-nuci7:~/scm/crossbario/crossbarfx$ docker ps -a
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
050c89a9ca0b d131e0fa258 "/bin/bash" 33 seconds ago Created naughty_brown
02137443e956 ubuntu "/bin/bash" 41 minutes ago Exited (0) 41 minutes ago test1
a4550e941287 ubuntu "--name test1 sh" 41 minutes ago Created vigorous_snyder
ca456feb41b5 ubuntu "--name test1 bash" 42 minutes ago Created boring_engelbart
f254963a5bee ubuntu "--name test1 /bin/b…" 42 minutes ago Created condescending_lehmann
```
|
crossbarioREPO_NAMEcrossbarPATH_START.@crossbar_extracted@crossbar-master@test@docker-integration.md@.PATH_END.py
|
{
"filename": "generate_Pk_example.py",
"repo_name": "federicomarulli/CosmoBolognaLib",
"repo_path": "CosmoBolognaLib_extracted/CosmoBolognaLib-master/External/CLASS/external/external_Pk/generate_Pk_example.py",
"type": "Python"
}
|
#!/usr/bin/python
from __future__ import print_function
import sys
from math import exp
# README:
#
# This is an example python script for the external_Pk mode of Class.
# It generates the primordial spectrum of LambdaCDM.
# It can be edited and used directly, though keeping a copy of it is recommended.
#
# Two (maybe three) things need to be edited:
#
# 1. The name of the parameters needed for the calculation of Pk.
# "sys.argv[1]" corresponds to "custom1" in Class, an so on
try :
k_0 = float(sys.argv[1])
A = float(sys.argv[2])
n_s = float(sys.argv[3])
# Error control, no need to touch
except IndexError :
raise IndexError("It seems you are calling this script with too few arguments.")
except ValueError :
raise ValueError("It seems some of the arguments are not correctly formatted. "+
"Remember that they must be floating point numbers.")
# 2. The function giving P(k), including the necessary import statements.
# Inside this function, you can use the parameters named in the previous step.
def P(k) :
return A * (k/k_0)**(n_s-1.)
# 3. Limits for k and precision:
# Check that the boundaries are correct for your case.
# It is safer to set k_per_decade primordial slightly bigger than that of Class.
k_min = 1.e-6
k_max = 10.
k_per_decade_primordial = 200.
#
# And nothing should need to be edited from here on.
#
# Filling the array of k's
ks = [float(k_min)]
while ks[-1] <= float(k_max) :
ks.append(ks[-1]*10.**(1./float(k_per_decade_primordial)))
# Filling the array of Pk's
for k in ks :
P_k = P(k)
print("%.18g %.18g" % (k, P_k))
|
federicomarulliREPO_NAMECosmoBolognaLibPATH_START.@CosmoBolognaLib_extracted@CosmoBolognaLib-master@External@CLASS@external@external_Pk@generate_Pk_example.py@.PATH_END.py
|
{
"filename": "summary_iterator.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/summary/summary_iterator.py",
"type": "Python"
}
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides a method for reading events from an event file via an iterator."""
from tensorflow.core.util import event_pb2
from tensorflow.python.lib.io import tf_record
from tensorflow.python.util.tf_export import tf_export
class _SummaryIterator(object):
"""Yields `Event` protocol buffers from a given path."""
def __init__(self, path):
self._tf_record_iterator = tf_record.tf_record_iterator(path)
def __iter__(self):
return self
def __next__(self):
r = next(self._tf_record_iterator)
return event_pb2.Event.FromString(r)
next = __next__
@tf_export(v1=['train.summary_iterator'])
def summary_iterator(path):
# pylint: disable=line-too-long
"""Returns a iterator for reading `Event` protocol buffers from an event file.
You can use this function to read events written to an event file. It returns
a Python iterator that yields `Event` protocol buffers.
Example: Print the contents of an events file.
```python
for e in tf.compat.v1.train.summary_iterator(path to events file):
print(e)
```
Example: Print selected summary values.
```python
# This example supposes that the events file contains summaries with a
# summary value tag 'loss'. These could have been added by calling
# `add_summary()`, passing the output of a scalar summary op created with
# with: `tf.compat.v1.summary.scalar('loss', loss_tensor)`.
for e in tf.compat.v1.train.summary_iterator(path to events file):
for v in e.summary.value:
if v.tag == 'loss':
print(tf.make_ndarray(v.tensor))
```
Example: Continuously check for new summary values.
```python
summaries = tf.compat.v1.train.summary_iterator(path to events file)
while True:
for e in summaries:
for v in e.summary.value:
if v.tag == 'loss':
print(tf.make_ndarray(v.tensor))
# Wait for a bit before checking the file for any new events
time.sleep(wait time)
```
See the protocol buffer definitions of
[Event](https://www.tensorflow.org/code/tensorflow/core/util/event.proto)
and
[Summary](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
for more information about their attributes.
Args:
path: The path to an event file created by a `SummaryWriter`.
Returns:
A iterator that yields `Event` protocol buffers
"""
return _SummaryIterator(path)
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@summary@summary_iterator.py@.PATH_END.py
|
{
"filename": "losses.py",
"repo_name": "keras-team/keras",
"repo_path": "keras_extracted/keras-master/keras/src/legacy/losses.py",
"type": "Python"
}
|
from keras.src.api_export import keras_export
@keras_export("keras._legacy.losses.Reduction")
class Reduction:
AUTO = "auto"
NONE = "none"
SUM = "sum"
SUM_OVER_BATCH_SIZE = "sum_over_batch_size"
@classmethod
def all(cls):
return (cls.AUTO, cls.NONE, cls.SUM, cls.SUM_OVER_BATCH_SIZE)
@classmethod
def validate(cls, key):
if key not in cls.all():
raise ValueError(
f'Invalid Reduction Key: {key}. Expected keys are "{cls.all()}"'
)
|
keras-teamREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@keras@src@legacy@losses.py@.PATH_END.py
|
{
"filename": "_borderwidth.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scattergl/marker/colorbar/_borderwidth.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class BorderwidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name="borderwidth",
parent_name="scattergl.marker.colorbar",
**kwargs,
):
super(BorderwidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scattergl@marker@colorbar@_borderwidth.py@.PATH_END.py
|
{
"filename": "pointnet2_modules.py",
"repo_name": "ma-xu/pointMLP-pytorch",
"repo_path": "pointMLP-pytorch_extracted/pointMLP-pytorch-main/pointnet2_ops_lib/pointnet2_ops/pointnet2_modules.py",
"type": "Python"
}
|
from typing import List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from pointnet2_ops import pointnet2_utils
def build_shared_mlp(mlp_spec: List[int], bn: bool = True):
layers = []
for i in range(1, len(mlp_spec)):
layers.append(
nn.Conv2d(mlp_spec[i - 1], mlp_spec[i], kernel_size=1, bias=not bn)
)
if bn:
layers.append(nn.BatchNorm2d(mlp_spec[i]))
layers.append(nn.ReLU(True))
return nn.Sequential(*layers)
class _PointnetSAModuleBase(nn.Module):
def __init__(self):
super(_PointnetSAModuleBase, self).__init__()
self.npoint = None
self.groupers = None
self.mlps = None
def forward(
self, xyz: torch.Tensor, features: Optional[torch.Tensor]
) -> Tuple[torch.Tensor, torch.Tensor]:
r"""
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor of the xyz coordinates of the features
features : torch.Tensor
(B, C, N) tensor of the descriptors of the the features
Returns
-------
new_xyz : torch.Tensor
(B, npoint, 3) tensor of the new features' xyz
new_features : torch.Tensor
(B, \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors
"""
new_features_list = []
xyz_flipped = xyz.transpose(1, 2).contiguous()
new_xyz = (
pointnet2_utils.gather_operation(
xyz_flipped, pointnet2_utils.furthest_point_sample(xyz, self.npoint)
)
.transpose(1, 2)
.contiguous()
if self.npoint is not None
else None
)
for i in range(len(self.groupers)):
new_features = self.groupers[i](
xyz, new_xyz, features
) # (B, C, npoint, nsample)
new_features = self.mlps[i](new_features) # (B, mlp[-1], npoint, nsample)
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint)
new_features_list.append(new_features)
return new_xyz, torch.cat(new_features_list, dim=1)
class PointnetSAModuleMSG(_PointnetSAModuleBase):
r"""Pointnet set abstrction layer with multiscale grouping
Parameters
----------
npoint : int
Number of features
radii : list of float32
list of radii to group with
nsamples : list of int32
Number of samples in each ball query
mlps : list of list of int32
Spec of the pointnet before the global max_pool for each scale
bn : bool
Use batchnorm
"""
def __init__(self, npoint, radii, nsamples, mlps, bn=True, use_xyz=True):
# type: (PointnetSAModuleMSG, int, List[float], List[int], List[List[int]], bool, bool) -> None
super(PointnetSAModuleMSG, self).__init__()
assert len(radii) == len(nsamples) == len(mlps)
self.npoint = npoint
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(
pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz)
if npoint is not None
else pointnet2_utils.GroupAll(use_xyz)
)
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
self.mlps.append(build_shared_mlp(mlp_spec, bn))
class PointnetSAModule(PointnetSAModuleMSG):
r"""Pointnet set abstrction layer
Parameters
----------
npoint : int
Number of features
radius : float
Radius of ball
nsample : int
Number of samples in the ball query
mlp : list
Spec of the pointnet before the global max_pool
bn : bool
Use batchnorm
"""
def __init__(
self, mlp, npoint=None, radius=None, nsample=None, bn=True, use_xyz=True
):
# type: (PointnetSAModule, List[int], int, float, int, bool, bool) -> None
super(PointnetSAModule, self).__init__(
mlps=[mlp],
npoint=npoint,
radii=[radius],
nsamples=[nsample],
bn=bn,
use_xyz=use_xyz,
)
class PointnetFPModule(nn.Module):
r"""Propigates the features of one set to another
Parameters
----------
mlp : list
Pointnet module parameters
bn : bool
Use batchnorm
"""
def __init__(self, mlp, bn=True):
# type: (PointnetFPModule, List[int], bool) -> None
super(PointnetFPModule, self).__init__()
self.mlp = build_shared_mlp(mlp, bn=bn)
def forward(self, unknown, known, unknow_feats, known_feats):
# type: (PointnetFPModule, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
unknown : torch.Tensor
(B, n, 3) tensor of the xyz positions of the unknown features
known : torch.Tensor
(B, m, 3) tensor of the xyz positions of the known features
unknow_feats : torch.Tensor
(B, C1, n) tensor of the features to be propigated to
known_feats : torch.Tensor
(B, C2, m) tensor of features to be propigated
Returns
-------
new_features : torch.Tensor
(B, mlp[-1], n) tensor of the features of the unknown features
"""
if known is not None:
dist, idx = pointnet2_utils.three_nn(unknown, known)
dist_recip = 1.0 / (dist + 1e-8)
norm = torch.sum(dist_recip, dim=2, keepdim=True)
weight = dist_recip / norm
interpolated_feats = pointnet2_utils.three_interpolate(
known_feats, idx, weight
)
else:
interpolated_feats = known_feats.expand(
*(known_feats.size()[0:2] + [unknown.size(1)])
)
if unknow_feats is not None:
new_features = torch.cat(
[interpolated_feats, unknow_feats], dim=1
) # (B, C2 + C1, n)
else:
new_features = interpolated_feats
new_features = new_features.unsqueeze(-1)
new_features = self.mlp(new_features)
return new_features.squeeze(-1)
|
ma-xuREPO_NAMEpointMLP-pytorchPATH_START.@pointMLP-pytorch_extracted@pointMLP-pytorch-main@pointnet2_ops_lib@pointnet2_ops@pointnet2_modules.py@.PATH_END.py
|
{
"filename": "Tutorial-ADM_Initial_Data-Converting_Numerical_ADM_Spherical_or_Cartesian_to_BSSNCurvilinear.ipynb",
"repo_name": "zachetienne/nrpytutorial",
"repo_path": "nrpytutorial_extracted/nrpytutorial-master/Deprecated/Tutorial-ADM_Initial_Data-Converting_Numerical_ADM_Spherical_or_Cartesian_to_BSSNCurvilinear.ipynb",
"type": "Jupyter Notebook"
}
|
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'UA-59152712-8');
</script>
# Converting *Numerical* ADM Initial Data in the Spherical or Cartesian Basis to BSSN Initial Data in the Desired Curvilinear Basis
## Author: Zach Etienne
### Formatting improvements courtesy Brandon Clark
[comment]: <> (Abstract: TODO)
### This module is meant for use only with initial data that can be represented numerically in ADM form, either in the Spherical or Cartesian basis. I.e., the ADM variables are given $\left\{\gamma_{ij}, K_{ij}, \alpha, \beta^i\right\}$ *numerically* as functions of $(r,\theta,\phi)$ or $(x,y,z)$; e.g., through an initial data solver. If instead the ADM initial data are provided as exact (algebraic) functions of $(r,\theta,\phi)$ or $(x,y,z)$, then is is better to use [the Exact-ADM-Spherical/Cartesian-to-BSSNCurvilinear module](Tutorial-ADM_Initial_Data-Converting_Exact_ADM_Spherical_or_Cartesian_to_BSSNCurvilinear.ipynb) instead.
**Notebook Status:** <font color='orange'><b> Self-Validated </b></font>
**Validation Notes:** This tutorial notebook has been confirmed to be self-consistent with its corresponding NRPy+ module, as documented [below](#code_validation). **Additional validation tests may have been performed, but are as yet, undocumented. (TODO)**
### NRPy+ Source Code for this module: [BSSN/ADM_Numerical_Spherical_or_Cartesian_to_BSSNCurvilinearID.py](../edit/BSSN/ADM_Numerical_Spherical_or_Cartesian_to_BSSNCurvilinearID.py)
## Introduction:
Given the ADM variables:
$$\left\{\gamma_{ij}, K_{ij}, \alpha, \beta^i, B^i\right\}$$
in the Spherical or Cartesian basis, and as functions of $(r,\theta,\phi)$ or $(x,y,z)$, respectively, this module documents their conversion to the BSSN variables
$$\left\{\bar{\gamma}_{i j},\bar{A}_{i j},\phi, K, \bar{\Lambda}^{i}, \alpha, \beta^i, B^i\right\},$$
in the desired curvilinear basis (given by `reference_metric::CoordSystem`). Then it rescales the resulting BSSNCurvilinear variables (as defined in [the BSSN Curvilinear tutorial](Tutorial-BSSNCurvilinear.ipynb)) into the form needed for BSSNCurvilinear evolutions:
$$\left\{h_{i j},a_{i j},\phi, K, \lambda^{i}, \alpha, \mathcal{V}^i, \mathcal{B}^i\right\}.$$
We will use as our core example in this module UIUC initial data, which are ([as documented in their NRPy+ initial data module](Tutorial-ADM_Initial_Data-UIUC_BlackHole.ipynb)) given in terms of ADM variables in Spherical coordinates.
<a id='toc'></a>
# Table of Contents
$$\label{toc}$$
This notebook is organized as follows
1. [Step 1](#initializenrpy): Initialize core Python/NRPy+ modules
1. [Step 2](#cylindrical): Desired output BSSN Curvilinear coordinate system set to Cylindrical, as a proof-of-principle
1. [Step 3](#admxx0xx1xx2): Make ADM variables functions of ${\rm xx0},{\rm xx1},{\rm xx2}$ instead of functions of Cartesian or Spherical coordinates
1. [Step 4](#adm_jacobian): Applying Jacobian transformations to get in the correct `xx0,xx1,xx2` basis
1. [Step 5](#adm2bssn): Call functions within [`BSSN.BSSN_in_terms_of_ADM`](../edit/BSSN/BSSN_in_terms_of_ADM.py) ([**tutorial**](Tutorial-BSSN_in_terms_of_ADM.ipynb)) to perform the ADM-to-BSSN conversion for all BSSN quantities *except* $\lambda^i$
1. [Step 6](#adm2bssn_c): Output all ADM-to-BSSN expressions to a C function
1. [Step 6.a](#driver): Output the driver function for the above C function
1. [Step 7](#lambda): Compute $\bar{\Lambda}^i$ from finite-difference derivatives of rescaled metric quantities
1. [Step 8](#code_validation): Code Validation against `BSSN.ADM_Numerical_Spherical_or_Cartesian_to_BSSNCurvilinear` NRPy+ module
1. [Step 9](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file
<a id='initializenrpy'></a>
# Step 1: Initialize core Python/NRPy+ modules \[Back to [top](#toc)\]
$$\label{initializenrpy}$$
```python
# Step 1: Initialize core Python/NRPy+ modules
from outputC import outCfunction,outputC,lhrh # NRPy+: Core C code output module
import NRPy_param_funcs as par # NRPy+: Parameter interface
import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends
import finite_difference as fin # NRPy+: Finite difference C code generation module
import grid as gri # NRPy+: Functions having to do with numerical grids
import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support
import reference_metric as rfm # NRPy+: Reference metric support
import BSSN.BSSN_quantities as Bq # NRPy+: Computes useful BSSN quantities; e.g., gammabarUU & GammabarUDD needed below
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
import os, shutil, sys # Standard Python modules for multiplatform OS-level functions
# Step 1.a: Create output directory for C codes generated by this tutorial:
Ccodesdir = os.path.join("numerical_ADM_to_BSSN_Ccodes/")
# First remove C code output directory if it exists
# Courtesy https://stackoverflow.com/questions/303200/how-do-i-remove-delete-a-folder-that-is-not-empty
# !rm -r ScalarWaveCurvilinear_Playground_Ccodes
shutil.rmtree(Ccodesdir, ignore_errors=True)
# Then create a fresh directory
cmd.mkdir(Ccodesdir)
# Step 1.b: Create output directory for C codes generated by the corresponding NRPy+ Python module,
# BSSN.ADM_Numerical_Spherical_or_Cartesian_to_BSSNCurvilinear
PyModCcodesdir = os.path.join("numerical_ADM_to_BSSN_Ccodes/PyMod/")
# First remove C code output directory if it exists
# Courtesy https://stackoverflow.com/questions/303200/how-do-i-remove-delete-a-folder-that-is-not-empty
# !rm -r ScalarWaveCurvilinear_Playground_Ccodes
shutil.rmtree(PyModCcodesdir, ignore_errors=True)
# Then create a fresh directory
cmd.mkdir(PyModCcodesdir)
```
<a id='cylindrical'></a>
# Step 2: Desired output BSSN Curvilinear coordinate system set to Cylindrical, as a proof-of-principle \[Back to [top](#toc)\]
$$\label{cylindrical}$$
```python
# Step 2: Desired output BSSN Curvilinear coordinate system set to Cylindrical, as a proof-of-principle
# The ADM & BSSN formalisms only work in 3D; they are 3+1 decompositions of Einstein's equations.
# To implement axisymmetry or spherical symmetry, simply set all spatial derivatives in
# the relevant angular directions to zero; DO NOT SET DIM TO ANYTHING BUT 3.
# Set spatial dimension (must be 3 for BSSN)
DIM = 3
# Set the desired *output* coordinate system to Cylindrical:
par.set_parval_from_str("reference_metric::CoordSystem","Cylindrical")
rfm.reference_metric()
# Set function input parameters to consistent defaults.
ADM_input_function_name = "ID_ADM_SphorCart"
pointer_to_ID_inputs = False
```
<a id='admxx0xx1xx2'></a>
# Step 3: Make ADM variables functions of ${\rm xx0},{\rm xx1},{\rm xx2}$ instead of functions of Cartesian or Spherical coordinates \[Back to [top](#toc)\]
$$\label{admxx0xx1xx2}$$
ADM variables are given as functions of $(r,\theta,\phi)$ or $(x,y,z)$. We convert them to functions of `(xx0,xx1,xx2)` using SymPy's `subs()` function.
```python
# Step 1: All input quantities are in terms of r,th,ph or x,y,z. We want them in terms
# of xx0,xx1,xx2, so here we call sympify_integers__replace_rthph() to replace
# r,th,ph or x,y,z, respectively, with the appropriate functions of xx0,xx1,xx2
# as defined for this particular reference metric in reference_metric.py's
# xxSph[] or xx_to_Cart[], respectively:
# Define the input variables:
gammaSphorCartDD = ixp.declarerank2("gammaSphorCartDD","sym01")
KSphorCartDD = ixp.declarerank2("KSphorCartDD","sym01")
alphaSphorCart = sp.symbols("alphaSphorCart")
betaSphorCartU = ixp.declarerank1("betaSphorCartU")
BSphorCartU = ixp.declarerank1("BSphorCartU")
# UIUC Black Hole initial data are given in Spherical coordinates.
CoordType_in = "Spherical"
# Make sure that rfm.reference_metric() has been called.
# We'll need the variables it defines throughout this module.
if rfm.have_already_called_reference_metric_function == False:
print("Error. Called Convert_Spherical_ADM_to_BSSN_curvilinear() without")
print(" first setting up reference metric, by calling rfm.reference_metric().")
sys.exit(1)
r_th_ph_or_Cart_xyz_oID_xx = []
if CoordType_in == "Spherical":
r_th_ph_or_Cart_xyz_oID_xx = rfm.xxSph
elif CoordType_in == "Cartesian":
r_th_ph_or_Cart_xyz_oID_xx = rfm.xx_to_Cart
else:
print("Error: Can only convert ADM Cartesian or Spherical initial data to BSSN Curvilinear coords.")
sys.exit(1)
```
<a id='adm_jacobian'></a>
# Step 4: Applying Jacobian transformations to get in the correct `xx0,xx1,xx2` basis \[Back to [top](#toc)\]
$$\label{adm_jacobian}$$
The following discussion holds for either Spherical or Cartesian input data, so for simplicity let's just assume the data are given in Spherical coordinates.
All ADM tensors and vectors are in the Spherical coordinate basis $x^i_{\rm Sph} = (r,\theta,\phi)$, but we need them in the curvilinear coordinate basis $x^i_{\rm rfm}=$`(xx0,xx1,xx2)` set by the `"reference_metric::CoordSystem"` variable. Empirically speaking, it is far easier to write `(x(xx0,xx1,xx2),y(xx0,xx1,xx2),z(xx0,xx1,xx2))` than the inverse, so we will compute the Jacobian matrix
$$
{\rm Jac\_dUSph\_dDrfmUD[i][j]} = \frac{\partial x^i_{\rm Sph}}{\partial x^j_{\rm rfm}},
$$
via exact differentiation (courtesy SymPy), and the inverse Jacobian
$$
{\rm Jac\_dUrfm\_dDSphUD[i][j]} = \frac{\partial x^i_{\rm rfm}}{\partial x^j_{\rm Sph}},
$$
using NRPy+'s `generic_matrix_inverter3x3()` function. In terms of these, the transformation of BSSN tensors from Spherical to `"reference_metric::CoordSystem"` coordinates may be written:
\begin{align}
\beta^i_{\rm rfm} &= \frac{\partial x^i_{\rm rfm}}{\partial x^\ell_{\rm Sph}} \beta^\ell_{\rm Sph}\\
B^i_{\rm rfm} &= \frac{\partial x^i_{\rm rfm}}{\partial x^\ell_{\rm Sph}} B^\ell_{\rm Sph}\\
\gamma^{\rm rfm}_{ij} &=
\frac{\partial x^\ell_{\rm Sph}}{\partial x^i_{\rm rfm}}
\frac{\partial x^m_{\rm Sph}}{\partial x^j_{\rm rfm}} \gamma^{\rm Sph}_{\ell m}\\
K^{\rm rfm}_{ij} &=
\frac{\partial x^\ell_{\rm Sph}}{\partial x^i_{\rm rfm}}
\frac{\partial x^m_{\rm Sph}}{\partial x^j_{\rm rfm}} K^{\rm Sph}_{\ell m}
\end{align}
```python
# Step 4: All ADM initial data quantities are now functions of xx0,xx1,xx2, but
# they are still in the Spherical or Cartesian basis. We can now directly apply
# Jacobian transformations to get them in the correct xx0,xx1,xx2 basis:
# Next apply Jacobian transformations to convert into the (xx0,xx1,xx2) basis
# alpha is a scalar, so no Jacobian transformation is necessary.
alpha = alphaSphorCart
Jac_dUSphorCart_dDrfmUD = ixp.zerorank2()
for i in range(DIM):
for j in range(DIM):
Jac_dUSphorCart_dDrfmUD[i][j] = sp.diff(r_th_ph_or_Cart_xyz_oID_xx[i],rfm.xx[j])
Jac_dUrfm_dDSphorCartUD, dummyDET = ixp.generic_matrix_inverter3x3(Jac_dUSphorCart_dDrfmUD)
betaU = ixp.zerorank1()
BU = ixp.zerorank1()
gammaDD = ixp.zerorank2()
KDD = ixp.zerorank2()
for i in range(DIM):
for j in range(DIM):
betaU[i] += Jac_dUrfm_dDSphorCartUD[i][j] * betaSphorCartU[j]
BU[i] += Jac_dUrfm_dDSphorCartUD[i][j] * BSphorCartU[j]
for k in range(DIM):
for l in range(DIM):
gammaDD[i][j] += Jac_dUSphorCart_dDrfmUD[k][i]*Jac_dUSphorCart_dDrfmUD[l][j] * gammaSphorCartDD[k][l]
KDD[i][j] += Jac_dUSphorCart_dDrfmUD[k][i]*Jac_dUSphorCart_dDrfmUD[l][j] * KSphorCartDD[k][l]
```
<a id='adm2bssn'></a>
# Step 5: Call functions within [`BSSN.BSSN_in_terms_of_ADM`](../edit/BSSN/BSSN_in_terms_of_ADM.py) ([**tutorial**](Tutorial-BSSN_in_terms_of_ADM.ipynb)) to perform the ADM-to-BSSN conversion for all BSSN quantities *except* $\lambda^i$ \[Back to [top](#toc)\]
$$\label{adm2bssn}$$
All ADM quantities were input into this function in the Spherical or Cartesian basis, as functions of $r,\theta,\phi$ or $x,y,z$, respectively. In [Step 3](#admxx0xx1xx2) and [Step 4](#adm_jacobian) above, we converted them to the `xx0,xx1,xx2` basis, and as functions of `xx0,xx1,xx2`. Here we convert ADM quantities in the `xx0,xx1,xx2` (a.k.a. "rfm") basis to their BSSN Curvilinear counterparts, in the same basis. **Note that we withold computation of the BSSN $\lambda^i$ quantities until [a later section of this notebook](#lambda), as they must be evaluated using finite differences.**
```python
# Step 5: Now that we have all ADM quantities in the desired
# basis, we next perform ADM-to-BSSN conversion:
import BSSN.BSSN_in_terms_of_ADM as BitoA
BitoA.gammabarDD_hDD( gammaDD)
BitoA.trK_AbarDD_aDD( gammaDD,KDD)
BitoA.cf_from_gammaDD(gammaDD)
BitoA.betU_vetU( betaU,BU)
hDD = BitoA.hDD
trK = BitoA.trK
aDD = BitoA.aDD
cf = BitoA.cf
vetU = BitoA.vetU
betU = BitoA.betU
```
<a id='adm2bssn_c'></a>
# Step 6: Output all ADM-to-BSSN expressions to a C function \[Back to [top](#toc)\]
$$\label{adm2bssn_c}$$
This function must first call the `ID_ADM_SphorCart()` defined above. Using these Spherical or Cartesian data, it sets up all quantities needed for BSSNCurvilinear initial data, *except* $\lambda^i$, which must be computed from numerical data using finite-difference derivatives.
```python
# Step 6: Output all ADM-to-BSSN expressions to a C function
ID_inputs_param = "ID_inputs other_inputs,"
if pointer_to_ID_inputs == True:
ID_inputs_param = "ID_inputs *other_inputs,"
desc="Write BSSN variables in terms of ADM variables at a given point xx0,xx1,xx2"
name="ID_ADM_xx0xx1xx2_to_BSSN_xx0xx1xx2__ALL_BUT_LAMBDAs"
params = "const REAL xx0xx1xx2[3],"+ID_inputs_param+"""
REAL *hDD00,REAL *hDD01,REAL *hDD02,REAL *hDD11,REAL *hDD12,REAL *hDD22,
REAL *aDD00,REAL *aDD01,REAL *aDD02,REAL *aDD11,REAL *aDD12,REAL *aDD22,
REAL *trK,
REAL *vetU0,REAL *vetU1,REAL *vetU2,
REAL *betU0,REAL *betU1,REAL *betU2,
REAL *alpha, REAL *cf"""
outCparams = "preindent=1,outCverbose=False,includebraces=False"
outCfunction(
outfile = os.path.join(Ccodesdir,name+".h"), desc=desc, name=name, params=params,
body ="""
REAL gammaSphorCartDD00,gammaSphorCartDD01,gammaSphorCartDD02,
gammaSphorCartDD11,gammaSphorCartDD12,gammaSphorCartDD22;
REAL KSphorCartDD00,KSphorCartDD01,KSphorCartDD02,
KSphorCartDD11,KSphorCartDD12,KSphorCartDD22;
REAL alphaSphorCart,betaSphorCartU0,betaSphorCartU1,betaSphorCartU2;
REAL BSphorCartU0,BSphorCartU1,BSphorCartU2;
const REAL xx0 = xx0xx1xx2[0];
const REAL xx1 = xx0xx1xx2[1];
const REAL xx2 = xx0xx1xx2[2];
REAL xyz_or_rthph[3];\n"""+
outputC(r_th_ph_or_Cart_xyz_oID_xx[0:3],["xyz_or_rthph[0]","xyz_or_rthph[1]","xyz_or_rthph[2]"],"returnstring",
outCparams+",CSE_enable=False")+" "+ADM_input_function_name+"""(xyz_or_rthph, other_inputs,
&gammaSphorCartDD00,&gammaSphorCartDD01,&gammaSphorCartDD02,
&gammaSphorCartDD11,&gammaSphorCartDD12,&gammaSphorCartDD22,
&KSphorCartDD00,&KSphorCartDD01,&KSphorCartDD02,
&KSphorCartDD11,&KSphorCartDD12,&KSphorCartDD22,
&alphaSphorCart,&betaSphorCartU0,&betaSphorCartU1,&betaSphorCartU2,
&BSphorCartU0,&BSphorCartU1,&BSphorCartU2);
// Next compute all rescaled BSSN curvilinear quantities:\n"""+
outputC([hDD[0][0],hDD[0][1],hDD[0][2],hDD[1][1],hDD[1][2],hDD[2][2],
aDD[0][0],aDD[0][1],aDD[0][2],aDD[1][1],aDD[1][2],aDD[2][2],
trK, vetU[0],vetU[1],vetU[2], betU[0],betU[1],betU[2],
alpha, cf],
["*hDD00","*hDD01","*hDD02","*hDD11","*hDD12","*hDD22",
"*aDD00","*aDD01","*aDD02","*aDD11","*aDD12","*aDD22",
"*trK", "*vetU0","*vetU1","*vetU2", "*betU0","*betU1","*betU2",
"*alpha","*cf"],"returnstring",params=outCparams),
enableCparameters=False)
```
Output C function ID_ADM_xx0xx1xx2_to_BSSN_xx0xx1xx2__ALL_BUT_LAMBDAs() to file numerical_ADM_to_BSSN_Ccodes/ID_ADM_xx0xx1xx2_to_BSSN_xx0xx1xx2__ALL_BUT_LAMBDAs.h
<a id='driver'></a>
## Step 6.a: Output the driver function for the above C function \[Back to [top](#toc)\]
$$\label{driver}$$
We output the driver function for the above C function:
`ID_ADM_xx0xx1xx2_to_BSSN_xx0xx1xx2__ALL_BUT_LAMBDAs()`
```python
# Step 6.a: Output the driver function for the above C function
# Next write ID_BSSN__ALL_BUT_LAMBDAs(), the driver
# function for ID_ADM_xx0xx1xx2_to_BSSN_xx0xx1xx2__ALL_BUT_LAMBDAs():
desc="""Driver function for ID_ADM_xx0xx1xx2_to_BSSN_xx0xx1xx2__ALL_BUT_LAMBDAs(),
which writes BSSN variables in terms of ADM variables at a given point xx0,xx1,xx2"""
name="ID_BSSN__ALL_BUT_LAMBDAs"
params = "const int Nxx_plus_2NGHOSTS[3],REAL *xx[3],"+ID_inputs_param+"REAL *in_gfs"
outCfunction(
outfile = os.path.join(Ccodesdir,name+".h"), desc=desc, name=name, params=params,
body ="""
const int idx = IDX3(i0,i1,i2);
const REAL xx0xx1xx2[3] = {xx0,xx1,xx2};
ID_ADM_xx0xx1xx2_to_BSSN_xx0xx1xx2__ALL_BUT_LAMBDAs(xx0xx1xx2,other_inputs,
&in_gfs[IDX4pt(HDD00GF,idx)],&in_gfs[IDX4pt(HDD01GF,idx)],&in_gfs[IDX4pt(HDD02GF,idx)],
&in_gfs[IDX4pt(HDD11GF,idx)],&in_gfs[IDX4pt(HDD12GF,idx)],&in_gfs[IDX4pt(HDD22GF,idx)],
&in_gfs[IDX4pt(ADD00GF,idx)],&in_gfs[IDX4pt(ADD01GF,idx)],&in_gfs[IDX4pt(ADD02GF,idx)],
&in_gfs[IDX4pt(ADD11GF,idx)],&in_gfs[IDX4pt(ADD12GF,idx)],&in_gfs[IDX4pt(ADD22GF,idx)],
&in_gfs[IDX4pt(TRKGF,idx)],
&in_gfs[IDX4pt(VETU0GF,idx)],&in_gfs[IDX4pt(VETU1GF,idx)],&in_gfs[IDX4pt(VETU2GF,idx)],
&in_gfs[IDX4pt(BETU0GF,idx)],&in_gfs[IDX4pt(BETU1GF,idx)],&in_gfs[IDX4pt(BETU2GF,idx)],
&in_gfs[IDX4pt(ALPHAGF,idx)],&in_gfs[IDX4pt(CFGF,idx)]);
""",
loopopts = "AllPoints,Read_xxs,oldloops",enableCparameters=False)
```
Output C function ID_BSSN__ALL_BUT_LAMBDAs() to file numerical_ADM_to_BSSN_Ccodes/ID_BSSN__ALL_BUT_LAMBDAs.h
<a id='lambda'></a>
# Step 7: Compute $\bar{\Lambda}^i$ from finite-difference derivatives of rescaled metric quantities \[Back to [top](#toc)\]
$$\label{lambda}$$
We compute $\bar{\Lambda}^i$ (Eqs. 4 and 5 of [Baumgarte *et al.*](https://arxiv.org/pdf/1211.6632.pdf)), from finite-difference derivatives of rescaled metric quantities $h_{ij}$:
$$
\bar{\Lambda}^i = \bar{\gamma}^{jk}\left(\bar{\Gamma}^i_{jk} - \hat{\Gamma}^i_{jk}\right).
$$
The [reference_metric.py](../edit/reference_metric.py) module provides us with analytic expressions for $\hat{\Gamma}^i_{jk}$, so here we need only compute finite-difference expressions for $\bar{\Gamma}^i_{jk}$, based on the values for $h_{ij}$ provided in the initial data. Once $\bar{\Lambda}^i$ has been computed, we apply the usual rescaling procedure:
$$
\lambda^i = \bar{\Lambda}^i/\text{ReU[i]},
$$
and then output the result to a C file using the NRPy+ finite-difference C output routine.
```python
# Step 7: Compute $\bar{\Lambda}^i$ from finite-difference derivatives of rescaled metric quantities
# We will need all BSSN gridfunctions to be defined, as well as
# expressions for gammabarDD_dD in terms of exact derivatives of
# the rescaling matrix and finite-difference derivatives of
# hDD's. This functionality is provided by BSSN.BSSN_unrescaled_and_barred_vars,
# which we call here to overwrite above definitions of gammabarDD,gammabarUU, etc.
Bq.gammabar__inverse_and_derivs() # Provides gammabarUU and GammabarUDD
gammabarUU = Bq.gammabarUU
GammabarUDD = Bq.GammabarUDD
# Next evaluate \bar{\Lambda}^i, based on GammabarUDD above and GammahatUDD
# (from the reference metric):
LambdabarU = ixp.zerorank1()
for i in range(DIM):
for j in range(DIM):
for k in range(DIM):
LambdabarU[i] += gammabarUU[j][k] * (GammabarUDD[i][j][k] - rfm.GammahatUDD[i][j][k])
# Finally apply rescaling:
# lambda^i = Lambdabar^i/\text{ReU[i]}
lambdaU = ixp.zerorank1()
for i in range(DIM):
lambdaU[i] = LambdabarU[i] / rfm.ReU[i]
outCparams = "preindent=1,outCfileaccess=a,outCverbose=False,includebraces=False"
lambdaU_expressions = [lhrh(lhs=gri.gfaccess("in_gfs","lambdaU0"),rhs=lambdaU[0]),
lhrh(lhs=gri.gfaccess("in_gfs","lambdaU1"),rhs=lambdaU[1]),
lhrh(lhs=gri.gfaccess("in_gfs","lambdaU2"),rhs=lambdaU[2])]
desc="Output lambdaU[i] for BSSN, built using finite-difference derivatives."
name="ID_BSSN_lambdas"
params = "const int Nxx[3],const int Nxx_plus_2NGHOSTS[3],REAL *xx[3],const REAL dxx[3],REAL *in_gfs"
outCfunction(
outfile = os.path.join(Ccodesdir,name+".h"), desc=desc, name=name, params=params,
preloop = """
const REAL invdx0 = 1.0/dxx[0];
const REAL invdx1 = 1.0/dxx[1];
const REAL invdx2 = 1.0/dxx[2];
""",
body = fin.FD_outputC("returnstring",lambdaU_expressions, outCparams),
loopopts = "InteriorPoints,Read_xxs,oldloops",enableCparameters=False)
```
Output C function ID_BSSN_lambdas() to file numerical_ADM_to_BSSN_Ccodes/ID_BSSN_lambdas.h
<a id='code_validation'></a>
# Step 8: Code Validation against `BSSN.ADM_Numerical_Spherical_or_Cartesian_to_BSSNCurvilinear` NRPy+ module \[Back to [top](#toc)\]
$$\label{code_validation}$$
Here, as a code validation check, we verify agreement in the C codes for converting "numerical" UIUCBlackHole initial data (in Spherical coordinates/basis) to BSSN Curvilinear data in Cylindrical coordinates/basis between
1. this tutorial and
2. the NRPy+ [BSSN.ADM_Numerical_Spherical_or_Cartesian_to_BSSNCurvilinear](../edit/BSSN/ADM_Numerical_Spherical_or_Cartesian_to_BSSNCurvilinear.py) module.
By default, we analyze these expressions in Cylindrical coordinates, though other coordinate systems may be chosen.
```python
# Reset the gridfunctions list;
# in Convert_Spherical_or_Cartesian_ADM_to_BSSN_curvilinear()
# below, BSSN_RHSs is called
# tutorial. This line of code enables us to run
# Convert_Spherical_or_Cartesian_ADM_to_BSSN_curvilinear()
# without resetting the running Python kernel.
gri.glb_gridfcs_list = []
import BSSN.ADM_Numerical_Spherical_or_Cartesian_to_BSSNCurvilinear as AtoBnum
AtoBnum.Convert_Spherical_or_Cartesian_ADM_to_BSSN_curvilinear("Spherical",ADM_input_function_name,
Ccodesdir=PyModCcodesdir)
print("\n\n ### BEGIN VALIDATION TESTS")
import filecmp
for file in ["ID_BSSN_lambdas.h","ID_BSSN__ALL_BUT_LAMBDAs.h",
"ID_ADM_xx0xx1xx2_to_BSSN_xx0xx1xx2__ALL_BUT_LAMBDAs.h"]:
if filecmp.cmp(os.path.join(Ccodesdir,file),
os.path.join(PyModCcodesdir,file)) == False:
print("VALIDATION TEST FAILED ON file: "+file+".")
sys.exit(1)
else:
print("Validation test PASSED on file: "+file)
```
Output C function ID_BSSN_lambdas() to file numerical_ADM_to_BSSN_Ccodes/PyMod/ID_BSSN_lambdas.h
Output C function ID_ADM_xx0xx1xx2_to_BSSN_xx0xx1xx2__ALL_BUT_LAMBDAs() to file numerical_ADM_to_BSSN_Ccodes/PyMod/ID_ADM_xx0xx1xx2_to_BSSN_xx0xx1xx2__ALL_BUT_LAMBDAs.h
Output C function ID_BSSN__ALL_BUT_LAMBDAs() to file numerical_ADM_to_BSSN_Ccodes/PyMod/ID_BSSN__ALL_BUT_LAMBDAs.h
### BEGIN VALIDATION TESTS
Validation test PASSED on file: ID_BSSN_lambdas.h
Validation test PASSED on file: ID_BSSN__ALL_BUT_LAMBDAs.h
Validation test PASSED on file: ID_ADM_xx0xx1xx2_to_BSSN_xx0xx1xx2__ALL_BUT_LAMBDAs.h
<a id='latex_pdf_output'></a>
# Step 9: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\]
$$\label{latex_pdf_output}$$
Once the following code finishes running, the generated PDF may be found at the following location within the directory you have the NRPy+ tutorial saved:
[Tutorial-ADM_Initial_Data-Converting_Numerical_ADM_Spherical_or_Cartesian_to_BSSNCurvilinear.pdf](Tutorial-ADM_Initial_Data-Converting_Numerical_ADM_Spherical_or_Cartesian_to_BSSNCurvilinear.pdf)
```python
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-ADM_Initial_Data-Converting_Numerical_ADM_Spherical_or_Cartesian_to_BSSNCurvilinear")
```
Created Tutorial-ADM_Initial_Data-
Converting_Numerical_ADM_Spherical_or_Cartesian_to_BSSNCurvilinear.tex,
and compiled LaTeX file to PDF file Tutorial-ADM_Initial_Data-
Converting_Numerical_ADM_Spherical_or_Cartesian_to_BSSNCurvilinear.pdf
|
zachetienneREPO_NAMEnrpytutorialPATH_START.@nrpytutorial_extracted@nrpytutorial-master@Deprecated@Tutorial-ADM_Initial_Data-Converting_Numerical_ADM_Spherical_or_Cartesian_to_BSSNCurvilinear.ipynb@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/jupyter-core/py3/README.md",
"type": "Markdown"
}
|
# Jupyter Core
[](https://github.com/jupyter/jupyter_core/actions/workflows/test.yml/badge.svg?query=branch%3Amain++)
[](http://jupyter-core.readthedocs.io/en/latest/?badge=latest)
Core common functionality of Jupyter projects.
This package contains base application classes and configuration inherited by other projects.
It doesn't do much on its own.
# Development Setup
The [Jupyter Contributor Guides](https://docs.jupyter.org/en/latest/contributing/content-contributor.html) provide extensive information on contributing code or documentation to Jupyter projects. The limited instructions below for setting up a development environment are for your convenience.
## Coding
You'll need Python and `pip` on the search path. Clone the Jupyter Core git repository to your computer, for example in `/my/projects/jupyter_core`.
Now create an [editable install](https://pip.pypa.io/en/stable/reference/pip_install/#editable-installs)
and download the dependencies of code and test suite by executing:
```
cd /my/projects/jupyter_core/
pip install -e ".[test]"
py.test
```
The last command runs the test suite to verify the setup. During development, you can pass filenames to `py.test`, and it will execute only those tests.
## Code Styling
`jupyter_core` has adopted automatic code formatting so you shouldn't
need to worry too much about your code style.
As long as your code is valid,
the pre-commit hook should take care of how it should look.
`pre-commit` and its associated hooks will automatically be installed when
you run `pip install -e ".[test]"`
To install `pre-commit` manually, run the following:
```bash
pip install pre-commit
pre-commit install
```
You can invoke the pre-commit hook by hand at any time with:
```bash
pre-commit run
```
which should run any autoformatting on your code
and tell you about any errors it couldn't fix automatically.
You may also install [black integration](https://github.com/psf/black#editor-integration)
into your text editor to format code automatically.
If you have already committed files before setting up the pre-commit
hook with `pre-commit install`, you can fix everything up using
`pre-commit run --all-files`. You need to make the fixing commit
yourself after that.
## Documentation
The documentation of Jupyter Core is generated from the files in `docs/` using Sphinx. Instructions for setting up Sphinx with a selection of optional modules are in the [Documentation Guide](https://docs.jupyter.org/en/latest/contributing/content-contributor.html). You'll also need the `make` command.
For a minimal Sphinx installation to process the Jupyter Core docs, execute:
```
pip install sphinx
```
The following commands build the documentation in HTML format and check for broken links:
```
cd /my/projects/jupyter_core/docs/
make html linkcheck
```
Point your browser to the following URL to access the generated documentation:
_file:///my/projects/jupyter_core/docs/\_build/html/index.html_
## About the Jupyter Development Team
The Jupyter Development Team is the set of all contributors to the Jupyter
project. This includes all of the Jupyter subprojects. A full list with
details is kept in the documentation directory, in the file
`about/credits.txt`.
The core team that coordinates development on GitHub can be found here:
https://github.com/ipython/.
## Our Copyright Policy
Jupyter uses a shared copyright model. Each contributor maintains copyright
over their contributions to Jupyter. It is important to note that these
contributions are typically only changes to the repositories. Thus, the Jupyter
source code in its entirety is not the copyright of any single person or
institution. Instead, it is the collective copyright of the entire Jupyter
Development Team. If individual contributors want to maintain a record of what
changes/contributions they have specific copyright on, they should indicate
their copyright in the commit message of the change, when they commit the
change to one of the Jupyter repositories.
With this in mind, the following banner should be used in any source code file
to indicate the copyright and license terms:
```
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
```
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@jupyter-core@py3@README.md@.PATH_END.py
|
{
"filename": "test_multiproc.py",
"repo_name": "LSSTDESC/Imsim",
"repo_path": "Imsim_extracted/Imsim-main/tests/test_multiproc.py",
"type": "Python"
}
|
import os
import sys
from pathlib import Path
import logging
import unittest
import galsim
class MultiprocTestCase(unittest.TestCase):
"""TestCase class to test multiprocessing with imSim"""
def setUp(self):
self.output_dir = 'fits_multiproc_test'
self.only_dets = ['R22_S11', 'R22_S12']
self.expected_files = []
for det_num, det_name in enumerate(self.only_dets):
self.expected_files.extend(
[os.path.join(self.output_dir,
f'{prefix}_00161899-0-r-{det_name}'
f'-det{det_num:03d}.{suffix}')
for prefix, suffix in [('amp', 'fits.fz'),
('eimage', 'fits')]])
def tearDown(self):
"""Clean up test output files, if they exist."""
for item in self.expected_files:
if os.path.isfile(item):
os.remove(item)
if os.path.isdir(self.output_dir):
os.removedirs(self.output_dir)
def run_imsim(self):
imsim_dir = os.path.dirname(os.path.abspath(str(Path(__file__).parent)))
os.environ['SIMS_SED_LIBRARY_DIR'] \
= os.path.join(imsim_dir, 'tests', 'data', 'test_sed_library')
template = os.path.join(imsim_dir, 'config',
'imsim-config-instcat.yaml')
instcat_file = os.path.join(imsim_dir, 'tests', 'data',
'test_multiproc_instcat.txt')
logger = logging.getLogger('test_multiproc')
if len(logger.handlers) == 0:
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.setLevel(logging.CRITICAL) # silence the log messages
config = {'modules': ['imsim'],
'template': template,
'input.instance_catalog.file_name': instcat_file,
'input.opsim_data.file_name': instcat_file,
'input.tree_rings.only_dets': self.only_dets,
'input.checkpoint': '',
'input.atm_psf': '',
'psf': {'type': 'Convolve',
'items': [{'type': 'Gaussian',
'fwhm': 0.8},
{'type': 'Gaussian',
'fwhm': 0.3}]
},
'image.random_seed': 42,
'stamp.fft_sb_thresh': '1e5',
'output.only_dets': self.only_dets,
'output.det_num.first': 0,
'output.nfiles': 2,
'output.dir': self.output_dir,
'output.truth': '',
'output.nproc': 2,
}
galsim.config.Process(config, logger=logger, except_abort=True)
def test_multiproc(self):
"""Run the 2-process test"""
self.run_imsim()
# Check that expected files exist.
for item in self.expected_files:
self.assertTrue(os.path.isfile(item))
if __name__ == "__main__":
unittest.main()
|
LSSTDESCREPO_NAMEImsimPATH_START.@Imsim_extracted@Imsim-main@tests@test_multiproc.py@.PATH_END.py
|
{
"filename": "_scaleanchor.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/yaxis/_scaleanchor.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ScaleanchorValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="scaleanchor", parent_name="layout.yaxis", **kwargs):
super(ScaleanchorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
values=kwargs.pop(
"values",
[
"/^x([2-9]|[1-9][0-9]+)?( domain)?$/",
"/^y([2-9]|[1-9][0-9]+)?( domain)?$/",
False,
],
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@yaxis@_scaleanchor.py@.PATH_END.py
|
{
"filename": "justplotit.py",
"repo_name": "natashabatalha/PandExo",
"repo_path": "PandExo_extracted/PandExo-master/pandexo/engine/justplotit.py",
"type": "Python"
}
|
from bokeh.plotting import show
from bokeh.plotting import figure as Figure
from bokeh.io import output_file as outputfile
from bokeh.io import output_notebook as outnotebook
import pickle as pk
import numpy as np
from bokeh.layouts import row
import pandas as pd
def jwst_1d_spec(result_dict, model=True, title='Model + Data + Error Bars', output_file = 'data.html',legend = False,
R=False, num_tran = False, plot_width=800, plot_height=400,x_range=[1,10],y_range=None, plot=True,
output_notebook=True):
"""Plots 1d simulated spectrum and rebin or rescale for more transits
Plots 1d data points with model in the background (if wanted). Designed to read in exact
output of run_pandexo.
Parameters
----------
result_dict : dict or list of dict
Dictionary from pandexo output. If parameter space was run in run_pandexo
make sure to restructure the input as a list of dictionaries without they key words
that run_pandexo assigns.
model : bool
(Optional) True is default. True plots model, False does not plot model
title : str
(Optional) Title of plot. Default is "Model + Data + Error Bars".
output_file : str
(Optional) name of html file for you bokeh plot. After bokeh plot is rendered you will
have the option to save as png.
legend : bool
(Optional) Default is False. True, plots legend.
R : float
(Optional) Rebin data from native instrument resolution to specified resolution. Dafult is False,
no binning. Here I adopt R as w[1]/(w[2] - w[0]) to maintain consistency with `pandeia.engine`
num_tran : float
(Optional) Scales data by number of transits to improve error by sqrt(`num_trans`)
plot_width : int
(Optional) Sets the width of the plot. Default = 800
plot_height : int
(Optional) Sets the height of the plot. Default = 400
y_range : list of int
(Optional) sets y range of plot. Defaut is +- 10% of max and min
x_range : list of int
(Optional) Sets x range of plot. Default = [1,10]
plot : bool
(Optional) Supresses the plot if not wanted (Default = True)
out_notebook : bool
(Optional) Output notebook. Default is false, if true, outputs in the notebook
Returns
-------
x,y,e : list of arrays
Returns wave axis, spectrum and associated error in list format. x[0] will be correspond
to the first dictionary input, x[1] to the second, etc.
Examples
--------
>>> jwst_1d_spec(result_dict, num_tran = 3, R = 35) #for a single plot
If you wanted to save each of the axis that were being plotted:
>>> x,y,e = jwst_1d_data([result_dict1, result_dict2], model=False, num_tran = 5, R = 100) #for multiple
See Also
--------
jwst_noise, jwst_1d_bkg, jwst_1d_flux, jwst_1d_snr, jwst_2d_det, jwst_2d_sat
"""
outx=[]
outy=[]
oute=[]
TOOLS = "pan,wheel_zoom,box_zoom,reset,save"
if output_notebook & plot:
outnotebook()
elif plot:
outputfile(output_file)
colors = ['black','blue','red','orange','yellow','purple','pink','cyan','grey','brown']
#make sure its iterable
if type(result_dict) != list:
result_dict = [result_dict]
if type(legend)!=bool:
legend_keys = legend
legend = True
if type(legend_keys) != list:
legend_keys = [legend_keys]
i = 0
for dictt in result_dict:
ntran_old = dictt['timing']['Number of Transits']
to = dictt['timing']["Num Integrations Out of Transit"]
ti = dictt['timing']["Num Integrations In Transit"]
#remove any nans
y = dictt['FinalSpectrum']['spectrum_w_rand']
x = dictt['FinalSpectrum']['wave'][~np.isnan(y)]
err = dictt['FinalSpectrum']['error_w_floor'][~np.isnan(y)]
y = y[~np.isnan(y)]
if (R == False) & (num_tran == False):
x=x
y=y
elif (R != False) & (num_tran != False):
new_wave = bin_wave_to_R(x, R)
out = uniform_tophat_sum(new_wave,x, dictt['RawData']['electrons_out']*num_tran/ntran_old)
inn = uniform_tophat_sum(new_wave,x, dictt['RawData']['electrons_in']*num_tran/ntran_old)
vout = uniform_tophat_sum(new_wave,x, dictt['RawData']['var_out']*num_tran/ntran_old)
vin = uniform_tophat_sum(new_wave,x, dictt['RawData']['var_in']*num_tran/ntran_old)
var_tot = (to/ti/out)**2.0 * vin + (inn*to/ti/out**2.0)**2.0 * vout
if dictt['input']['Primary/Secondary']=='fp/f*':
fac = -1.0
else:
fac = 1.0
rand_noise = np.sqrt((var_tot))*(np.random.randn(len(new_wave)))
raw_spec = (out/to-inn/ti)/(out/to)
sim_spec = fac*(raw_spec + rand_noise )
x = new_wave
y = sim_spec
err = np.sqrt(var_tot)
elif (R == False) & (num_tran != False):
out = dictt['RawData']['electrons_out']*num_tran/ntran_old
inn = dictt['RawData']['electrons_in']*num_tran/ntran_old
vout = dictt['RawData']['var_out']*num_tran/ntran_old
vin = dictt['RawData']['var_in']*num_tran/ntran_old
var_tot = (to/ti/out)**2.0 * vin + (inn*to/ti/out**2.0)**2.0 * vout
if dictt['input']['Primary/Secondary']=='fp/f*':
fac = -1.0
else:
fac = 1.0
rand_noise = np.sqrt((var_tot))*(np.random.randn(len(x)))
raw_spec = (out/to-inn/ti)/(out/to)
sim_spec = fac*(raw_spec + rand_noise )
x = x
y = sim_spec
err = np.sqrt(var_tot)
elif (R != False) & (num_tran == False):
new_wave = bin_wave_to_R(x, R)
out = uniform_tophat_sum(new_wave,x, dictt['RawData']['electrons_out'])
inn = uniform_tophat_sum(new_wave,x, dictt['RawData']['electrons_in'])
vout = uniform_tophat_sum(new_wave,x, dictt['RawData']['var_out'])
vin = uniform_tophat_sum(new_wave,x, dictt['RawData']['var_in'])
var_tot = (to/ti/out)**2.0 * vin + (inn*to/ti/out**2.0)**2.0 * vout
if dictt['input']['Primary/Secondary']=='fp/f*':
fac = -1.0
else:
fac = 1.0
rand_noise = np.sqrt((var_tot))*(np.random.randn(len(new_wave)))
raw_spec = (out/to-inn/ti)/(out/to)
sim_spec = fac*(raw_spec + rand_noise )
x = new_wave
y = sim_spec
err = np.sqrt(var_tot)
else:
print("Something went wrong. Cannot enter both resolution and ask to bin to new wave")
return
x = np.array(x,dtype=float)
y = np.array(y,dtype=float)
err= np.array(err,dtype=float)
#create error bars for Bokeh's multi_line and drop nans
data = pd.DataFrame({'x':x, 'y':y,'err':err}).dropna()
y_err = []
x_err = []
for px, py, yerr in zip(data['x'], data['y'], data['err']):
np.array(x_err.append((px, px)))
np.array(y_err.append((py - yerr, py + yerr)))
#initialize Figure
if i == 0:
#Define units for x and y axis
y_axis_label = dictt['input']['Primary/Secondary']
if y_axis_label == 'fp/f*': p = -1.0
else: y_axis_label = y_axis_label
if dictt['input']['Calculation Type'] =='phase_spec':
x_axis_label='Time (secs)'
x_range = [min(x), max(x)]
else:
x_axis_label='Wavelength [microns]'
if y_range!=None:
ylims = y_range
else:
ylims = [min(dictt['OriginalInput']['model_spec'])- 0.1*min(dictt['OriginalInput']['model_spec']),
0.1*max(dictt['OriginalInput']['model_spec'])+max(dictt['OriginalInput']['model_spec'])]
fig1d = Figure(x_range=x_range, y_range = ylims,
width = plot_width, height =plot_height,title=title,x_axis_label=x_axis_label,
y_axis_label = y_axis_label, tools=TOOLS, background_fill_color = 'white')
#plot model, data, and errors
if model:
mxx = dictt['OriginalInput']['model_wave']
myy = dictt['OriginalInput']['model_spec']
my = uniform_tophat_mean(x, mxx,myy)
model_line = pd.DataFrame({'x':x, 'my':my}).dropna()
fig1d.line(model_line['x'],model_line['my'], color=colors[i],alpha=0.2, line_width = 4)
if legend:
fig1d.circle(data['x'], data['y'], color=colors[i], legend = legend_keys[i])
else:
fig1d.circle(data['x'], data['y'], color=colors[i])
outx += [data['x'].values]
outy += [data['y'].values]
oute += [data['err'].values]
fig1d.multi_line(x_err, y_err,color=colors[i])
i += 1
if plot:
show(fig1d)
return outx,outy,oute
def bin_wave_to_R(w, R):
"""Creates new wavelength axis at specified resolution
Parameters
----------
w : list of float or numpy array of float
Wavelength axis to be rebinned
R : float or int
Resolution to bin axis to
Returns
-------
list of float
New wavelength axis at specified resolution
Examples
--------
>>> newwave = bin_wave_to_R(np.linspace(1,2,1000), 10)
>>> print(len(newwave))
11
"""
wave = []
tracker = min(w)
i = 1
ind= 0
firsttime = True
while(tracker<max(w)):
if i <len(w)-1:
dlambda = w[i]-w[ind]
newR = w[i]/dlambda
if (newR < R) & (firsttime):
tracker = w[ind]
wave += [tracker]
ind += 1
i += 1
firsttime = True
elif newR < R:
tracker = w[ind]+dlambda/2.0
wave +=[tracker]
ind = (np.abs(w-tracker)).argmin()
i = ind+1
firsttime = True
else:
firsttime = False
i+=1
else:
tracker = max(w)
wave += [tracker]
return wave
def uniform_tophat_sum(xnew,x, y):
"""Adapted from Mike R. Line to rebin spectra
Takes sum of group of points in bin of wave points
Parameters
----------
xnew : list of float or numpy array of float
New wavelength grid to rebin to
x : list of float or numpy array of float
Old wavelength grid to get rid of
y : list of float or numpy array of float
New rebinned y axis
Returns
-------
array of floats
new y axis
Examples
--------
>>> oldgrid = np.linspace(1,3,100)
>>> y = np.zeros(100)+10.0
>>> newy = uniform_tophat_sum(np.linspace(2,3,3), oldgrid, y)
>>> newy
array([ 240., 250., 130.])
"""
xnew = np.array(xnew)
szmod=xnew.shape[0]
delta=np.zeros(szmod)
ynew=np.zeros(szmod)
delta[0:-1]=xnew[1:]-xnew[:-1]
delta[szmod-1]=delta[szmod-2]
#pdb.set_trace()
for i in range(szmod-1):
i=i+1
loc=np.where((x >= xnew[i]-0.5*delta[i-1]) & (x < xnew[i]+0.5*delta[i]))
ynew[i]=np.sum(y[loc])
loc=np.where((x > xnew[0]-0.5*delta[0]) & (x < xnew[0]+0.5*delta[0]))
ynew[0]=np.sum(y[loc])
return ynew
def uniform_tophat_mean(xnew,x, y):
"""Adapted from Mike R. Line to rebin spectra
Takes average of group of points in bin
Parameters
----------
xnew : list of float or numpy array of float
New wavelength grid to rebin to
x : list of float or numpy array of float
Old wavelength grid to get rid of
y : list of float or numpy array of float
New rebinned y axis
Returns
-------
array of floats
new y axis
Examples
--------
>>> oldgrid = np.linspace(1,3,100)
>>> y = np.zeros(100)+10.0
>>> newy = uniform_tophat_sum(np.linspace(2,3,3), oldgrid, y)
>>> newy
array([ 240., 250., 130.])
"""
xnew = np.array(xnew)
szmod=xnew.shape[0]
delta=np.zeros(szmod)
ynew=np.zeros(szmod)
delta[0:-1]=xnew[1:]-xnew[:-1]
delta[szmod-1]=delta[szmod-2]
#pdb.set_trace()
for i in range(szmod-1):
i=i+1
loc=np.where((x >= xnew[i]-0.5*delta[i-1]) & (x < xnew[i]+0.5*delta[i]))
ynew[i]=np.mean(y[loc])
loc=np.where((x > xnew[0]-0.5*delta[0]) & (x < xnew[0]+0.5*delta[0]))
ynew[0]=np.mean(y[loc])
return ynew
def jwst_1d_flux(result_dict, plot=True, output_file= 'flux.html'):
"""Plot flux rate in e/s
Parameters
----------
result_dict : dict
Dictionary from pandexo output. If parameter space was run in run_pandexo
make sure to restructure the input as a list of dictionaries without they key words
that run_pandexo assigns.
plot : bool
(Optional) True renders plot, Flase does not. Default=True
output_file : str
(Optional) Default = 'flux.html'
Return
------
x : numpy array
micron
y : numpy array
1D flux rate in electrons/s
See Also
--------
jwst_1d_spec, jwst_1d_bkg, jwst_noise, jwst_1d_snr, jwst_2d_det, jwst_2d_sat
"""
TOOLS = "pan,wheel_zoom,box_zoom,reset,save"
out = result_dict['PandeiaOutTrans']
# Flux 1d
x, y = out['1d']['extracted_flux']
x = x[~np.isnan(y)]
y = y[~np.isnan(y)]
plot_flux_1d1 = Figure(tools=TOOLS,
x_axis_label='Wavelength [microns]',
y_axis_label='Flux (e/s)', title="Out of Transit Flux Rate",
width=800, height=300)
plot_flux_1d1.line(x, y, line_width = 4, alpha = .7)
if plot:
outputfile(output_file)
show(plot_flux_1d1)
return x,y
def jwst_1d_snr(result_dict, plot=True, output_file='snr.html'):
"""Plot SNR
Parameters
----------
result_dict : dict
Dictionary from pandexo output. If parameter space was run in run_pandexo
make sure to restructure the input as a list of dictionaries without they key words
that run_pandexo assigns.
plot : bool
(Optional) True renders plot, Flase does not. Default=True
output_file : str
(Optional) Default = 'snr.html'
Return
------
x : numpy array
micron
y : numpy array
1D SNR
See Also
--------
jwst_1d_bkg, jwst_noise, jwst_1d_flux, jwst_1d_spec, jwst_2d_det, jwst_2d_sat
"""
TOOLS = "pan,wheel_zoom,box_zoom,reset,save"
# Flux 1d
x= result_dict['RawData']['wave']
electrons_out = result_dict['RawData']['electrons_out']
y = electrons_out/np.sqrt(result_dict['RawData']['var_out'])
x = x[~np.isnan(y)]
y = y[~np.isnan(y)]
plot_snr_1d1 = Figure(tools=TOOLS,
x_axis_label='Wavelength (micron)',
y_axis_label='SNR', title="SNR Out of Trans",
width=800, height=300)
plot_snr_1d1.line(x, y, line_width = 4, alpha = .7)
if plot:
outputfile(output_file)
show(plot_snr_1d1)
return x,y
def jwst_1d_bkg(result_dict, plot=True, output_file='bkg.html'):
"""Plot background
Parameters
----------
result_dict : dict
Dictionary from pandexo output. If parameter space was run in run_pandexo
make sure to restructure the input as a list of dictionaries without they key words
that run_pandexo assigns.
plot : bool
(Optional) True renders plot, Flase does not. Default=True
output_file : str
(Optional) Default = bkt.html
Return
------
x : numpy array
micron
y : numpy array
1D bakground e/s
See Also
--------
jwst_1d_spec, jwst_noise, jwst_1d_flux, jwst_1d_snr, jwst_2d_det, jwst_2d_sat
"""
TOOLS = "pan,wheel_zoom,box_zoom,reset,save"
# BG 1d
out = result_dict['PandeiaOutTrans']
x, y = out['1d']['extracted_bg_only']
y = y[~np.isnan(y)]
x = x[~np.isnan(y)]
plot_bg_1d1 = Figure(tools=TOOLS,
x_axis_label='Wavelength [microns]',
y_axis_label='Flux (e/s)', title="Background",
width=800, height=300)
plot_bg_1d1.line(x, y, line_width = 4, alpha = .7)
if plot:
outputfile(output_file)
show(plot_bg_1d1)
return x,y
def jwst_noise(result_dict, plot=True, output_file= 'noise.html'):
"""Plot background
Parameters
----------
result_dict : dict
Dictionary from pandexo output. If parameter space was run in run_pandexo
make sure to restructure the input as a list of dictionaries without they key words
that run_pandexo assigns.
plot : bool
(Optional) True renders plot, Flase does not. Default=True
output_file : str
(Optional) Default = 'noise.html'
Return
------
x : numpy array
micron
y : numpy array
1D noise (ppm)
See Also
--------
jwst_1d_spec, jwst_1d_bkg, jwst_1d_flux, jwst_1d_snr, jwst_2d_det, jwst_2d_sat
"""
TOOLS = "pan,wheel_zoom,box_zoom,reset,save" #saturation
x = result_dict['FinalSpectrum']['wave']
y = result_dict['FinalSpectrum']['error_w_floor']*1e6
x = x[~np.isnan(y)]
y = y[~np.isnan(y)]
ymed = np.median(y)
plot_noise_1d1 = Figure(tools=TOOLS,#responsive=True,
x_axis_label='Wavelength (micron)',
y_axis_label='Error on Spectrum (PPM)', title="Error Curve",
width=800, height=300, y_range = [0,2.0*ymed])
ymed = np.median(y)
plot_noise_1d1.circle(x, y, line_width = 4, alpha = .7)
if plot:
outputfile(output_file)
show(plot_noise_1d1)
return x,y
def jwst_2d_det(result_dict, plot=True, output_file='det2d.html'):
"""Plot 2d detector image
Parameters
----------
result_dict : dict
Dictionary from pandexo output. If parameter space was run in run_pandexo
make sure to restructure the input as a list of dictionaries without they key words
that run_pandexo assigns.
plot : bool
(Optional) True renders plot, Flase does not. Default=True
output_file : str
(Optional) Default = 'det2d.html'
Return
------
numpy array
2D array of out of transit detector simulation
See Also
--------
jwst_1d_spec, jwst_1d_bkg, jwst_1d_flux, jwst_1d_snr, jwst_noise, jwst_2d_sat
"""
TOOLS = "pan,wheel_zoom,box_zoom,reset,save"
out = result_dict['PandeiaOutTrans']
data = out['2d']['detector']
if 'miri' in result_dict['input']['Instrument']:
width=300
height=800
else:
width=800
height=300
xr, yr = data.shape
plot_detector_2d = Figure(tools="pan,wheel_zoom,box_zoom,reset,hover,save",
x_range=[0, yr], y_range=[0, xr],
x_axis_label='Pixel', y_axis_label='Spatial',
title="2D Detector Image",
width=width, height=height)
plot_detector_2d.image(image=[data], x=[0], y=[0], dh=[xr], dw=[yr],
palette="Spectral11")
if plot:
outputfile(output_file)
show(plot_detector_2d)
return data
def jwst_2d_sat(result_dict, plot=True, output_file='sat2d.html'):
"""Plot 2d saturation profile
Parameters
----------
result_dict : dict
Dictionary from pandexo output. If parameter space was run in run_pandexo
make sure to restructure the input as a list of dictionaries without they key words
that run_pandexo assigns.
plot : bool
(Optional) True renders plot, Flase does not. Default=True
output_file : str
(Optional) Default = 'sat2d.html'
Return
------
numpy array
2D array of out of transit detector simulation
See Also
--------
jwst_1d_spec, jwst_1d_bkg, jwst_1d_flux, jwst_1d_snr, jwst_2d_det, jwst_noise
"""
TOOLS = "pan,wheel_zoom,box_zoom,reset,save" #saturation
out = result_dict['PandeiaOutTrans']
data = out['2d']['saturation']
xr, yr = data.shape
if 'miri' in result_dict['input']['Instrument']:
width=300
height=800
else:
width=800
height=300
plot_sat_2d = Figure(tools=TOOLS,
x_range=[0, yr], y_range=[0, xr],
x_axis_label='Pixel', y_axis_label='Spatial',
title="Saturation",
width=width, height=height)
plot_sat_2d.image(image=[data], x=[0], y=[0], dh=[xr], dw=[yr],
palette="Spectral11")
if plot:
outputfile(output_file)
show(plot_sat_2d)
return data
def hst_spec(result_dict, plot=True, output_file ='hstspec.html', model = True, output_notebook=True):
"""Plot 1d spec with error bars for hst
Parameters
----------
result_dict : dict
Dictionary from pandexo output.
plot : bool
(Optional) True renders plot, False does not. Default=True
model : bool
(Optional) Plot model under data. Default=True
output_file : str
(Optional) Default = 'hstspec.html'
output_notebook : bool
(Optional) Default true, plots in notebook
Return
------
x : numpy array
micron
y : numpy array
1D spec fp/f* or rp^2/r*^2
e : numpy array
1D rms noise
modelx : numpy array
micron
modely : numpy array
1D spec fp/f* or rp^2/r*^2
See Also
--------
hst_time
"""
TOOLS = "pan,wheel_zoom,box_zoom,reset,save"
#plot planet spectrum
mwave = result_dict['planet_spec']['model_wave']
mspec = result_dict['planet_spec']['model_spec']
binwave = result_dict['planet_spec']['binwave']
binspec = result_dict['planet_spec']['binspec']
error = result_dict['planet_spec']['error']
error = np.zeros(len(binspec))+ error
xlims = [result_dict['planet_spec']['wmin'], result_dict['planet_spec']['wmax']]
ylims = [np.min(binspec)-2.0*error[0], np.max(binspec)+2.0*error[0]]
plot_spectrum = Figure(width=800, height=300, x_range=xlims,
y_range=ylims, tools=TOOLS,#responsive=True,
x_axis_label='Wavelength [microns]',
y_axis_label='Ratio',
title="Original Model with Observation")
y_err = []
x_err = []
for px, py, yerr in zip(binwave, binspec, error):
np.array(x_err.append((px, px)))
np.array(y_err.append((py - yerr, py + yerr)))
if model:
plot_spectrum.line(mwave,mspec, color= "black", alpha = 0.5, line_width = 4)
plot_spectrum.circle(binwave,binspec, line_width=3, line_alpha=0.6)
plot_spectrum.multi_line(x_err, y_err)
if output_notebook & plot:
outnotebook()
show(plot_spectrum)
elif plot:
outputfile(output_file)
show(plot_spectrum)
return binwave, binspec, error, mwave, mspec
def hst_time(result_dict, plot=True, output_file ='hsttime.html', model = True, output_notebook=True):
"""Plot earliest and latest start times for hst observation
Parameters
----------
result_dict : dict
Dictionary from pandexo output.
plot : bool
(Optional) True renders plot, False does not. Default=True
model : bool
(Optional) Plot model under data. Default=True
output_file : str
(Optional) Default = 'hsttime.html'
Return
------
obsphase1 : numpy array
earliest start time
obstr1 : numpy array
white light curve
obsphase2 : numpy array
latest start time
obstr2 : numpy array
white light curve
rms : numpy array
1D rms noise
See Also
--------
hst_spec
"""
TOOLS = "pan,wheel_zoom,box_zoom,reset,save"
#earliest and latest start times
obsphase1 = result_dict['calc_start_window']['obsphase1']
obstr1 = result_dict['calc_start_window']['obstr1']
rms = result_dict['calc_start_window']['light_curve_rms']
obsphase2 = result_dict['calc_start_window']['obsphase2']
obstr2 = result_dict['calc_start_window']['obstr2']
phase1 = result_dict['calc_start_window']['phase1']
phase2 = result_dict['calc_start_window']['phase2']
trmodel1 = result_dict['calc_start_window']['trmodel1']
trmodel2 = result_dict['calc_start_window']['trmodel2']
if isinstance(rms, float):
rms = np.zeros(len(obsphase1))+rms
y_err1 = []
x_err1 = []
for px, py, yerr in zip(obsphase1, obstr1, rms):
np.array(x_err1.append((px, px)))
np.array(y_err1.append((py - yerr, py + yerr)))
y_err2 = []
x_err2 = []
for px, py, yerr in zip(obsphase2, obstr2, rms):
np.array(x_err2.append((px, px)))
np.array(y_err2.append((py - yerr, py + yerr)))
early = Figure(width=400, height=300,
tools=TOOLS,#responsive=True,
x_axis_label='Orbital Phase',
y_axis_label='Flux',
title="Earliest Start Time")
if model: early.line(phase1, trmodel1, color='black',alpha=0.5, line_width = 4)
early.circle(obsphase1, obstr1, line_width=3, line_alpha=0.6)
early.multi_line(x_err1, y_err1)
late = Figure(width=400, height=300,
tools=TOOLS,#responsive=True,
x_axis_label='Orbital Phase',
y_axis_label='Flux',
title="Latest Start Time")
if model: late.line(phase2, trmodel2, color='black',alpha=0.5, line_width = 3)
late.circle(obsphase2, obstr2, line_width=3, line_alpha=0.6)
late.multi_line(x_err2, y_err2)
start_time = row(early, late)
if output_notebook & plot:
outnotebook()
show(start_time)
elif plot:
outputfile(output_file)
show(start_time)
return obsphase1, obstr1, obsphase2, obstr2,rms
def hst_simulated_lightcurve(result_dict, plot=True, output_file ='hsttime.html', model = True, output_notebook=True):
"""Plot simulated HST light curves (in fluece) for earliest and latest start times
Parameters
----------
result_dict : dict
Dictionary from pandexo output.
plot : bool
(Optional) True renders plot, False does not. Default=True
model : bool
(Optional) Plot model under data. Default=True
output_file : str
(Optional) Default = 'hsttime.html'
Return
------
obsphase1 : numpy array
earliest start time
counts1 : numpy array
white light curve in fluence (e/pixel)
obsphase2 : numpy array
latest start time
counts2 : numpy array
white light curve in fluence (e/pixel)
rms : numpy array
1D rms noise
See Also
--------
hst_spec
"""
TOOLS = "pan,wheel_zoom,box_zoom,reset,save"
# earliest and latest start times
obsphase1 = result_dict['light_curve']['obsphase1']
rms = result_dict['light_curve']['light_curve_rms']
obsphase2 = result_dict['light_curve']['obsphase2']
phase1 = result_dict['light_curve']['phase1']
phase2 = result_dict['light_curve']['phase2']
counts1 = result_dict['light_curve']['counts1']
counts2 = result_dict['light_curve']['counts2']
count_noise = result_dict['light_curve']['count_noise']
ramp_included = result_dict['light_curve']['ramp_included']
model_counts1 = result_dict['light_curve']['model_counts1']
model_counts2 = result_dict['light_curve']['model_counts2']
if isinstance(count_noise, float):
rms = np.zeros(len(counts1)) + count_noise
y_err1 = []
x_err1 = []
for px, py, yerr in zip(obsphase1, counts1, rms):
np.array(x_err1.append((px, px)))
np.array(y_err1.append((py - yerr, py + yerr)))
y_err2 = []
x_err2 = []
for px, py, yerr in zip(obsphase2, counts2, rms):
np.array(x_err2.append((px, px)))
np.array(y_err2.append((py - yerr, py + yerr)))
if ramp_included:
title_description = " (Ramp Included)"
else:
title_description =" (Ramp Removed)"
early = Figure(width=400, height=300,
tools=TOOLS,#responsive=True,
x_axis_label='Orbital Phase',
y_axis_label='Flux [electrons/pixel]',
title="Earliest Start Time" + title_description)
if model:
early.line(phase1, model_counts1, color='black', alpha=0.5, line_width=4)
early.circle(obsphase1, counts1, line_width=3, line_alpha=0.6)
early.multi_line(x_err1, y_err1)
late = Figure(width=400, height=300,
tools=TOOLS, # responsive=True,
x_axis_label='Orbital Phase',
y_axis_label='Flux [electrons/pixel]',
title="Latest Start Time" + title_description)
if model:
late.line(phase2, model_counts2, color='black', alpha=0.5, line_width=3)
late.circle(obsphase2, counts2, line_width=3, line_alpha=0.6)
late.multi_line(x_err2, y_err2)
start_time = row(early, late)
if plot:
if output_notebook:
outnotebook()
else:
outputfile(output_file)
show(start_time)
return obsphase1, counts1, obsphase2, counts2, rms
|
natashabatalhaREPO_NAMEPandExoPATH_START.@PandExo_extracted@PandExo-master@pandexo@engine@justplotit.py@.PATH_END.py
|
{
"filename": "_funnel.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/_funnel.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FunnelValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="funnel", parent_name="", **kwargs):
super(FunnelValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Funnel"),
data_docs=kwargs.pop(
"data_docs",
"""
alignmentgroup
Set several traces linked to the same position
axis or matching axes to the same
alignmentgroup. This controls whether bars
compute their positional range dependently or
independently.
cliponaxis
Determines whether the text nodes are clipped
about the subplot axes. To show the text nodes
above axis lines and tick labels, make sure to
set `xaxis.layer` and `yaxis.layer` to *below
traces*.
connector
:class:`plotly.graph_objects.funnel.Connector`
instance or dict with compatible properties
constraintext
Constrain the size of text inside or outside a
bar to be no larger than the bar itself.
customdata
Assigns extra data each datum. This may be
useful when listening to hover, click and
selection events. Note that, "scatter" traces
also appends customdata items in the markers
DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud
for customdata .
dx
Sets the x coordinate step. See `x0` for more
info.
dy
Sets the y coordinate step. See `y0` for more
info.
hoverinfo
Determines which trace information appear on
hover. If `none` or `skip` are set, no
information is displayed upon hovering. But, if
`none` is set, click and hover events are still
fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud
for hoverinfo .
hoverlabel
:class:`plotly.graph_objects.funnel.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the
information that appear on hover box. Note that
this will override `hoverinfo`. Variables are
inserted using %{variable}, for example "y:
%{y}". Numbers are formatted using d3-format's
syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}".
https://github.com/d3/d3-time-
format#locale_format for details on the date
formatting syntax. The variables available in
`hovertemplate` are the ones emitted as event
data described at this link
https://plotly.com/javascript/plotlyjs-
events/#event-data. Additionally, every
attributes that can be specified per-point (the
ones that are `arrayOk: true`) are available.
variables `percentInitial`, `percentPrevious`
and `percentTotal`. Anything contained in tag
`<extra>` is displayed in the secondary box,
for example "<extra>{fullData.name}</extra>".
To hide the secondary box completely, use an
empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud
for hovertemplate .
hovertext
Sets hover text elements associated with each
(x,y) pair. If a single string, the same string
appears over all the data points. If an array
of string, the items are mapped in order to the
this trace's (x,y) coordinates. To be seen,
trace `hoverinfo` must contain a "text" flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud
for hovertext .
ids
Assigns id labels to each datum. These ids for
object constancy of data points during
animation. Should be an array of strings, not
numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud
for ids .
insidetextanchor
Determines if texts are kept at center or
start/end points in `textposition` "inside"
mode.
insidetextfont
Sets the font used for `text` lying inside the
bar.
legendgroup
Sets the legend group for this trace. Traces
part of the same legend group hide/show at the
same time when toggling legend items.
marker
:class:`plotly.graph_objects.funnel.Marker`
instance or dict with compatible properties
meta
Assigns extra meta information associated with
this trace that can be used in various text
attributes. Attributes such as trace `name`,
graph, axis and colorbar `title.text`,
annotation `text` `rangeselector`,
`updatemenues` and `sliders` `label` text all
support `meta`. To access the trace `meta`
values in an attribute in the same trace,
simply use `%{meta[i]}` where `i` is the index
or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or
key of the `meta` and `n` is the trace index.
metasrc
Sets the source reference on Chart Studio Cloud
for meta .
name
Sets the trace name. The trace name appear as
the legend item and on hover.
offset
Shifts the position where the bar is drawn (in
position axis units). In "group" barmode,
traces that set "offset" will be excluded and
drawn in "overlay" mode instead.
offsetgroup
Set several traces linked to the same position
axis or matching axes to the same offsetgroup
where bars of the same position coordinate will
line up.
opacity
Sets the opacity of the trace.
orientation
Sets the orientation of the funnels. With "v"
("h"), the value of the each bar spans along
the vertical (horizontal). By default funnels
are tend to be oriented horizontally; unless
only "y" array is presented or orientation is
set to "v". Also regarding graphs including
only 'horizontal' funnels, "autorange" on the
"y-axis" are set to "reversed".
outsidetextfont
Sets the font used for `text` lying outside the
bar.
selectedpoints
Array containing integer indices of selected
points. Has an effect only for traces that
support selections. Note that an empty array
means an empty selection where the `unselected`
are turned on for all points, whereas, any
other non-array values means no selection all
where the `selected` and `unselected` styles
have no effect.
showlegend
Determines whether or not an item corresponding
to this trace is shown in the legend.
stream
:class:`plotly.graph_objects.funnel.Stream`
instance or dict with compatible properties
text
Sets text elements associated with each (x,y)
pair. If a single string, the same string
appears over all the data points. If an array
of string, the items are mapped in order to the
this trace's (x,y) coordinates. If trace
`hoverinfo` contains a "text" flag and
"hovertext" is not set, these elements will be
seen in the hover labels.
textangle
Sets the angle of the tick labels with respect
to the bar. For example, a `tickangle` of -90
draws the tick labels vertically. With "auto"
the texts may automatically be rotated to fit
with the maximum size in bars.
textfont
Sets the font used for `text`.
textinfo
Determines which trace information appear on
the graph. In the case of having multiple
funnels, percentages & totals are computed
separately (per trace).
textposition
Specifies the location of the `text`. "inside"
positions `text` inside, next to the bar end
(rotated and scaled if needed). "outside"
positions `text` outside, next to the bar end
(scaled if needed), unless there is another bar
stacked on this one, then the text gets pushed
inside. "auto" tries to position `text` inside
the bar, but if the bar is too small and no bar
is stacked on this one the text is moved
outside.
textpositionsrc
Sets the source reference on Chart Studio Cloud
for textposition .
textsrc
Sets the source reference on Chart Studio Cloud
for text .
texttemplate
Template string used for rendering the
information text that appear on points. Note
that this will override `textinfo`. Variables
are inserted using %{variable}, for example "y:
%{y}". Numbers are formatted using d3-format's
syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}".
https://github.com/d3/d3-time-
format#locale_format for details on the date
formatting syntax. Every attributes that can be
specified per-point (the ones that are
`arrayOk: true`) are available. variables
`percentInitial`, `percentPrevious`,
`percentTotal`, `label` and `value`.
texttemplatesrc
Sets the source reference on Chart Studio Cloud
for texttemplate .
uid
Assign an id to this trace, Use this to provide
object constancy between traces during
animations and transitions.
uirevision
Controls persistence of some user-driven
changes to the trace: `constraintrange` in
`parcoords` traces, as well as some `editable:
true` modifications such as `name` and
`colorbar.title`. Defaults to
`layout.uirevision`. Note that other user-
driven trace attribute changes are controlled
by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and
`colorbar.(x|y)` (accessible with `config:
{editable: true}`) is controlled by
`layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on
trace index if no `uid` is provided. So if your
app can add/remove traces before the end of the
`data` array, such that the same trace has a
different index, you can still preserve user-
driven changes if you give each trace a `uid`
that stays with it as it moves.
visible
Determines whether or not this trace is
visible. If "legendonly", the trace is not
drawn, but can appear as a legend item
(provided that the legend itself is visible).
width
Sets the bar width (in position axis units).
x
Sets the x coordinates.
x0
Alternate to `x`. Builds a linear space of x
coordinates. Use with `dx` where `x0` is the
starting coordinate and `dx` the step.
xaxis
Sets a reference between this trace's x
coordinates and a 2D cartesian x axis. If "x"
(the default value), the x coordinates refer to
`layout.xaxis`. If "x2", the x coordinates
refer to `layout.xaxis2`, and so on.
xperiod
Only relevant when the axis `type` is "date".
Sets the period positioning in milliseconds or
"M<n>" on the x axis. Special values in the
form of "M<n>" could be used to declare the
number of months. In this case `n` must be a
positive integer.
xperiod0
Only relevant when the axis `type` is "date".
Sets the base for period positioning in
milliseconds or date string on the x0 axis.
When `x0period` is round number of weeks, the
`x0period0` by default would be on a Sunday
i.e. 2000-01-02, otherwise it would be at
2000-01-01.
xperiodalignment
Only relevant when the axis `type` is "date".
Sets the alignment of data points on the x
axis.
xsrc
Sets the source reference on Chart Studio Cloud
for x .
y
Sets the y coordinates.
y0
Alternate to `y`. Builds a linear space of y
coordinates. Use with `dy` where `y0` is the
starting coordinate and `dy` the step.
yaxis
Sets a reference between this trace's y
coordinates and a 2D cartesian y axis. If "y"
(the default value), the y coordinates refer to
`layout.yaxis`. If "y2", the y coordinates
refer to `layout.yaxis2`, and so on.
yperiod
Only relevant when the axis `type` is "date".
Sets the period positioning in milliseconds or
"M<n>" on the y axis. Special values in the
form of "M<n>" could be used to declare the
number of months. In this case `n` must be a
positive integer.
yperiod0
Only relevant when the axis `type` is "date".
Sets the base for period positioning in
milliseconds or date string on the y0 axis.
When `y0period` is round number of weeks, the
`y0period0` by default would be on a Sunday
i.e. 2000-01-02, otherwise it would be at
2000-01-01.
yperiodalignment
Only relevant when the axis `type` is "date".
Sets the alignment of data points on the y
axis.
ysrc
Sets the source reference on Chart Studio Cloud
for y .
""",
),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@_funnel.py@.PATH_END.py
|
{
"filename": "_cmid.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scattergl/marker/_cmid.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class CmidValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="cmid", parent_name="scattergl.marker", **kwargs):
super(CmidValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {}),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scattergl@marker@_cmid.py@.PATH_END.py
|
{
"filename": "_xsrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/bar/_xsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class XsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="xsrc", parent_name="bar", **kwargs):
super(XsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@bar@_xsrc.py@.PATH_END.py
|
{
"filename": "regtest.py",
"repo_name": "cmillion/gPhoton",
"repo_path": "gPhoton_extracted/gPhoton-master/gPhoton/tests/regtest/regtest.py",
"type": "Python"
}
|
"""
This is a full system regression test for gPhoton. It takes a while to run,
perhaps many hours. It will print progress as it goes and then a final verdict
of PASS/FAIL. You should run it from the terminal command line as a script like
> python regtest.py
"""
from __future__ import absolute_import, division, print_function
# Core and Third Party imports.
import os
import pandas as pd
import sys
sys.path=sys.path+['../..'] # Probably an awful hack that you should never do.
# gPhoton imports.
from gPhoton.gCalrun import calrun
print('GENERATING TEST CSV DATA (may take a while)')
for band in ['NUV','FUV']:
print('BAND: {b}'.format(b=band))
calrun('DB10_calrun_{b}_test.csv'.format(b=band),band,nsamples=1,seed=323,
rarange=[0,360],decrange=[53,90],verbose=1,calpath='../../../cal/')
print('BEGINNING REGRESSION TEST')
import more_test
print('DELETING TEST CSV DATA')
for band in ['NUV','FUV']:
os.system('rm DB10_calrun_{b}_test.csv'.format(b=band))
|
cmillionREPO_NAMEgPhotonPATH_START.@gPhoton_extracted@gPhoton-master@gPhoton@tests@regtest@regtest.py@.PATH_END.py
|
{
"filename": "_weight.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/polar/angularaxis/tickfont/_weight.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class WeightValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self,
plotly_name="weight",
parent_name="layout.polar.angularaxis.tickfont",
**kwargs,
):
super(WeightValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
extras=kwargs.pop("extras", ["normal", "bold"]),
max=kwargs.pop("max", 1000),
min=kwargs.pop("min", 1),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@polar@angularaxis@tickfont@_weight.py@.PATH_END.py
|
{
"filename": "_metasrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scatterternary/_metasrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class MetasrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="metasrc", parent_name="scatterternary", **kwargs):
super(MetasrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scatterternary@_metasrc.py@.PATH_END.py
|
{
"filename": "singlecopy.py",
"repo_name": "desihub/fastspecfit",
"repo_path": "fastspecfit_extracted/fastspecfit-main/py/fastspecfit/singlecopy.py",
"type": "Python"
}
|
"""
fastspecfit.singlecopy
======================
Single-copy (per process) data structures read from files.
"""
from fastspecfit.cosmo import TabulatedDESI
from fastspecfit.igm import Inoue14
from fastspecfit.photometry import Photometry
from fastspecfit.linetable import LineTable
from fastspecfit.templates import Templates
from fastspecfit.logger import log, DEBUG
class Singletons(object):
def __init__(self):
pass
def initialize(self,
emlines_file=None,
fphotofile=None,
fastphot=False,
fitstack=False,
ignore_photometry=False,
template_file=None,
template_version=None,
template_imf=None,
log_verbose=False,
):
# adjust logging level if requested
if log_verbose:
log.setLevel(DEBUG)
# templates for continnuum fitting
self.templates = Templates(template_file=template_file,
template_version=template_version,
imf=template_imf,
mintemplatewave=None,
maxtemplatewave=40e4,
fastphot=fastphot)
log.debug(f'Cached stellar templates {self.templates.file}')
# emission line table
self.emlines = LineTable(emlines_file)
log.debug(f'Cached emission-line table {self.emlines.file}')
# photometry
self.photometry = Photometry(fphotofile, fitstack,
ignore_photometry)
log.debug(f'Cached photometric filters and parameters {self.photometry.fphotofile}')
# fiducial cosmology
self.cosmology = TabulatedDESI()
log.debug(f'Cached cosmology table {self.cosmology.file}')
# IGM model
self.igm = Inoue14()
log.debug(f'Cached {self.igm.reference} IGM attenuation parameters.')
# global structure with single-copy data, initially empty
sc_data = Singletons()
|
desihubREPO_NAMEfastspecfitPATH_START.@fastspecfit_extracted@fastspecfit-main@py@fastspecfit@singlecopy.py@.PATH_END.py
|
{
"filename": "superbol-py2.py",
"repo_name": "mnicholl/superbol",
"repo_path": "superbol_extracted/superbol-master/superbol-py2.py",
"type": "Python"
}
|
#!/usr/bin/env python
version = '1.8 '
'''
SUPERBOL: Supernova Bolometric Light Curves
Written by Matt Nicholl, 2015-2020
IMPORTANT NOTE: PYTHON 2 VERSION WILL NO LONGER BE UPDATED AFTER V1.8
Version 1.8 : Fix bug in suppression integral - thanks Sebastian Gomez (MN)
Version 1.7 : Fix bug introduced in 1.6 where extinction/Swift corrections not always applied (MN)
Version 1.6 : Save interpolations before applying other corrections (MN)
Version 1.5 : Add prompt to convert Swift AB to Vega (MN)
Version 1.4 : Narrow date range for finding max of polynomial fit to peak (MN)
Version 1.3 : Minor tweaks to output plots (MN)
Version 1.2 : Add extinction correction as an option (MN)
Version 1.1 : Add bibliographic reference, output file now includes K-correction info (MN)
Version 1.0 : Release version, Nicholl 2018 RNAAS (MN)
Version 0.17: Fix bug to write nans instead of blanks when BB fit fails (MN)
Version 0.16: Correct inconsistency in x axis labels, automatically exit if <2 filters used (MN)
Version 0.15: Plot temperature and radius, other small figure adjustments (MN)
Version 0.14: Fixed bug where having two reference epochs the same broke manual interpolation (MN)
Version 0.13: Give user control over whether to fit UV separately, improve commenting and output files, change min integration wavelength to 100A (MN)
Version 0.12: Change UV suppression to power law (lambda/lambda_max)^x following Nicholl, Guillochon & Berger 2017 (MN)
Version 0.11: Added ATLAS c and o filters (MN)
Version 0.10: Added Gaia G filter. Automatically sort wavelength array when calculating Luminosity. Updated constants in bbody. Added option to change cosmologies with astropy. (SG)
Version 0.9 : Only do separate UV fit if > 2 UV filters (MN)
Version 0.8 : New feature! Can choose to shift SED to rest-frame for data with no K-correction (MN)
Version 0.7 : Improved handling of errors (MN)
Version 0.6 : Tidied up separate blackbody integrations in UV/NIR (MN)
Version 0.5 : Specifying files on command line now COMMA separated to handle 2-digit indices (MN)
Version 0.4 : Fixed bug in blackbody formula - missing factor of pi led to overestimate of radius (MN)
Version 0.3 : Added GALEX NUV and FUV bands in AB system (MN)
Version 0.2 : Swift effective wavelengths now match Poole et al 2008 (update by MN)
Version 0.1 : Origin and veracity of all zeropoints checked by SJS. Comments added, with more details in superbol.man file. Archived this version in /home/sne/soft
Version 0 : Written by Matt Nicholl (QUB), 2015
Computes pseudobolometric light curves and estimates full bolometric with blackbody corrections
See superbol.man for the manual file and more details.
Requirements and usage:
Needs numpy, scipy and matplotlib
To-do:
- set error floor for interpolation to ref band error
- make compatible with other inputs (Open Supernova Catalog, output from psf.py)
- include extinction correction
Input files should be called SNname_filters.EXT, eg PTF12dam_ugriz.txt, LSQ14bdq_JHK.dat, etc
Can have multiple files per SN with different filters in each
Format of files must be:
MJD filter1 err1 filter2 err2...
MJD can be replaced by phase or some other time parameter, but must be consistent between files.
Important: Bands must be in their common systems -- AB mag for ugrizy and GALEX, Vega mag for UBVRIJHK and Swift (S=UVW2 D=UVM2 A=UVW1)
Important : Order of filter magnitudes in file must match order of filters in filename.
Output of each run of the code will contain all the filters used in the integration in the filenames
Steps:
- Find files associated with SN and determine available filters and data
- Correct for time dilation, distance, and approximate K-correction if desired
- Map light curves in each filter to a common set of times (typically defined by the filter with most observations)
- Interpolation options: linear between neighbouring epochs or polynomial fits (user determines order of polynomial interactively)
- Extrapolation: using polynomials or assuming constant colour with respect to reference filter. Large extrapolations = large uncertainties!
- Save interpolated light curves for reproducability!
- Fit blackbodies to SED at each epoch (most SNe can be reasonably approximated by blackbody above ~3000 A). In UV, user can choose to:
- fit SED over all wavelengths with single blackbody
- fit separate blackbodies to optical and UV (if UV data exist). Optical fit gives better temperature estimate than single BB. UV fit used only to extrapolate flux for bolometric luminosity.
- use a simple prescription for line blanketing at UV wavelengths, defined as L_uv(lambda < cutoff) = L_bb(lambda)*(lambda/cutoff)^x, where x is chosen by user. Cutoff is either set to bluest available band, or if bluest band is >3000A, cutoff = 3000A
- Numerically integrate observed SEDs, and account for missing UV and NIR flux using blackbody extrapolations. NIR is easy, UV used options described above
- Save outputs:
- interpolated_lcs_<SN>_<filters>.txt = multicolour light curves mapped to common times. Footer gives methods of interpolation and extrapolation. If file exists, can be read in future to skip interpolating next time.
- bol_<SN>_<filters>.txt = main output. Contains pseudobolometric light curve, integrated trapezoidally, and bolometric light curve including the additional BB corrections, and errors on each. Footer gives filters and method of UV fitting.
- logL_obs_<SN>_<filters>.txt = same pseudobolometric (observed) light curve, in convenient log form
- logL_obs_<SN>_<filters>.txt = light curve with the BB corrections, in convenient log form
- BB_params_<SN>_<filters>.txt = fit parameters for blackbodies: T, R and inferred L from Stefan-Boltzmann law (can compare with direct integration method). If separate optical/UV fit, gives both T_bb (fit to all data) and T_opt (fit only to data >3000 A)
Recommended practice: run once with ALL available filters, and fit missing data as best you can using light curve interpolations. Then re-run choosing only the well-observed filters for the integration. You can compare results and decide for yourself whether you have more belief in the "integrate all filters with light curve extrapolations" method or the "integrate only the well-sampled filters and account for missing flux with blackbodies" method.
'''
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate as itg
from scipy.optimize import curve_fit
from scipy.interpolate import interpolate as interp
import glob
import sys
import os
# If you don't have astropy, can comment this out, and uncomment cosmocalc routine
from astropy.coordinates import Distance
# print 'cool' logo
print '\n * * * * * * * * * * * * * * * * * * * * *'
print ' * *'
print ' * Welcome to `SUPER BOL`! *'
print ' * SUPernova BOLometric light curves *'
print ' * *'
print ' * ______ *'
print ' * {\ */} *'
print ' * \__/ *'
print ' * || *'
print ' * ==== *'
print ' * *'
print ' * Matt Nicholl (2018, RNAAS, 2, 230) *'
print ' * V'+version+' *'
print ' * *'
print ' * * * * * * * * * * * * * * * * * * * * *\n\n'
# interactive plotting
plt.ion()
# Define some functions:
def bbody(lam,T,R):
'''
Calculate the corresponding blackbody radiance for a set
of wavelengths given a temperature and radiance.
Parameters
---------------
lam: Reference wavelengths in Angstroms
T: Temperature in Kelvin
R: Radius in cm
Output
---------------
Spectral radiance in units of erg/s/Angstrom
(calculation and constants checked by Sebastian Gomez)
'''
# Planck Constant in cm^2 * g / s
h = 6.62607E-27
# Speed of light in cm/s
c = 2.99792458E10
# Convert wavelength to cm
lam_cm = lam * 1E-8
# Boltzmann Constant in cm^2 * g / s^2 / K
k_B = 1.38064852E-16
# Calculate Radiance B_lam, in units of (erg / s) / cm ^ 2 / cm
exponential = (h * c) / (lam_cm * k_B * T)
B_lam = ((2 * np.pi * h * c ** 2) / (lam_cm ** 5)) / (np.exp(exponential) - 1)
# Multiply by the surface area
A = 4*np.pi*R**2
# Output radiance in units of (erg / s) / Angstrom
Radiance = B_lam * A / 1E8
return Radiance
def easyint(x,y,err,xref,yref):
'''
Adapt scipy interpolation to include extrapolation for filters missing early/late data
Originally based on `bolom.py` by Enrico Cappellaro (2008)
Returns light curve mapped to reference epochs and errors on each point
'''
ir = (xref>=min(x))&(xref<=max(x))
# for times where observed and reference band overlap, do simple interpolation
yint = interp.interp1d(x[np.argsort(x)],y[np.argsort(x)])(xref[ir])
yout = np.zeros(len(xref),dtype=float)
# For times before or after observed filter has observations, use constant colour with reference band
ylow = yint[np.argmin(xref[ir])]-yref[ir][np.argmin(xref[ir])]+yref[xref<min(x)]
yup = yint[np.argmax(xref[ir])]-yref[ir][np.argmax(xref[ir])]+yref[xref>max(x)]
yout[ir] = yint
yout[xref<min(x)] = ylow
yout[xref>max(x)] = yup
errout = np.zeros(len(xref),dtype=float)
# put error floor of 0.1 mag on any interpolated data
errout[ir] = max(np.mean(err),0.1)
# for extrapolations, apply mean error for interpolated data, plus 0.01 mag per day of extrapolation (added in quadrature)
errout[xref<min(x)] = np.sqrt((min(x) - xref[xref<min(x)])**2/1.e4 + np.mean(err)**2)
errout[xref>max(x)] = np.sqrt((xref[xref>max(x)] - max(x))**2/1.e4 + np.mean(err)**2)
return yout,errout
def cosmocalc(z):
################# cosmocalc by N. Wright ##################
'''
This was used in an older version of superbol, but can still
be used in place of astropy if desired - just uncomment cosmocalc in step 3
'''
# initialize constants
H0 = 70 # Hubble constant
WM = 0.27 # Omega(matter)
WV = 1.0 - WM - 0.4165/(H0*H0) # Omega(vacuum) or lambda
WR = 0. # Omega(radiation)
WK = 0. # Omega curvaturve = 1-Omega(total)
c = 299792.458 # velocity of light in km/sec
Tyr = 977.8 # coefficent for converting 1/H into Gyr
DTT = 0.0 # time from z to now in units of 1/H0
DCMR = 0.0 # comoving radial distance in units of c/H0
DA = 0.0 # angular size distance
DL = 0.0 # luminosity distance
DL_Mpc = 0.0
a = 1.0 # 1/(1+z), the scale factor of the Universe
az = 0.5 # 1/(1+z(object))
h = H0/100.
WR = 4.165E-5/(h*h) # includes 3 massless neutrino species, T0 = 2.72528
WK = 1-WM-WR-WV
az = 1.0/(1+1.0*z)
n=1000 # number of points in integrals
for i in range(n):
a = az+(1-az)*(i+0.5)/n
adot = np.sqrt(WK+(WM/a)+(WR/(a*a))+(WV*a*a))
DTT = DTT + 1./adot
DCMR = DCMR + 1./(a*adot)
DTT = (1.-az)*DTT/n
DCMR = (1.-az)*DCMR/n
ratio = 1.00
x = np.sqrt(abs(WK))*DCMR
if x > 0.1:
if WK > 0:
ratio = 0.5*(np.exp(x)-np.exp(-x))/x
else:
ratio = np.sin(x)/x
else:
y = x*x
if WK < 0: y = -y
ratio = 1. + y/6. + y*y/120.
DCMT = ratio*DCMR
DA = az*DCMT
DL = DA/(az*az)
DL_Mpc = (c/H0)*DL
return DL_Mpc
# Filter information
#SDSS filters and AB mags:
#These effective wavelengths for SDSS filters are from Fukugita et al. (1996, AJ, 111, 1748) and are
#the wavelength weighted averages (effective wavelengths in their Table 2a, first row)
#Effective wavelengths (in Angs)
wle = {'u': 3560, 'g': 4830, 'r': 6260, 'i': 7670, 'z': 8890, 'y': 9600, 'Y': 9600,
'U': 3600, 'B': 4380, 'V': 5450, 'R': 6410, 'G': 6730, 'I': 7980, 'J': 12200, 'H': 16300,
'K': 21900, 'S': 2030, 'D': 2231, 'A': 2634, 'F': 1516, 'N': 2267, 'o': 6790, 'c': 5330}
# For Swift UVOT: S=UVW2, D=UVM2, A=UVW1
# For GALEX: F=FUV, N=NUV
# The below zeropoints are needed to convert magnitudes to fluxes
#For AB mags,
# m(AB) = -2.5 log(f_nu) - 48.60.
# f_nu is in units of ergs/s/cm2/Hz such that
# m(AB) = 0 has a flux of f_nu = 3.63E-20 erg/s/cm2/Hz = 3631 Jy
# Therefore, AB magnitudes are directly related to a physical flux.
# Working through the conversion to ergs/s/cm2/Angs, gives
# f_lam = 0.1089/(lambda_eff^2) where lambda_eff is the effective wavelength of the filter in angstroms
# Note then that the AB flux zeropoint is defined ONLY by the choice of effective wavelength of the bandpass
# However, not all bands here are AB mag, so for consistency across all filters the zeropoints are stored in the following dictionary
# Matt originally listed the following from Paul Martini's page : http://www.astronomy.ohio-state.edu/~martini/usefuldata.html
# That is not an original source, for AB mags it simply uses the f_lam =0.1089/(lambda_eff^2) relation, and the effective wavelengths from Fukugita et al.
# ugriz and GALEX NUV/FUV are in AB mag system, UBVRI are Johnson-Cousins in Vega mag, JHK are Glass system Vega mags, and Swift UVOT SDA are in Vega mag system
#
#The values for UBVRIJHK are for the Johnson-Cousins-Glass system and are taken directly from Bessell et al. 1998, A&A, 333, 231 (Paul Martini's page lists these verbatim)
#Note that these Bessell et al. (1998) values were calculated not from the spectrum of Vega itself, but from a Kurucz model atmosphere of an AOV star.
#GALEX effective wavelengths from here: http://galex.stsci.edu/gr6/?page=faq
# ATLAS values taken from Tonry et al 2018
#All values in 1e-11 erg/s/cm2/Angs
zp = {'u': 859.5, 'g': 466.9, 'r': 278.0, 'i': 185.2, 'z': 137.8, 'y': 118.2, 'Y': 118.2,
'U': 417.5, 'B': 632.0, 'V': 363.1, 'R': 217.7, 'G': 240.0, 'I': 112.6, 'J': 31.47, 'H': 11.38,
'K': 3.961, 'S': 536.2, 'D': 463.7, 'A': 412.3, 'F': 4801., 'N': 2119., 'o': 236.2, 'c': 383.3}
#Filter widths (in Angs)
width = {'u': 458, 'g': 928, 'r': 812, 'i': 894, 'z': 1183, 'y': 628, 'Y': 628,
'U': 485, 'B': 831, 'V': 827, 'R': 1389, 'G': 4203, 'I': 899, 'J': 1759, 'H': 2041,
'K': 2800, 'S': 671, 'D': 446, 'A': 821, 'F': 268, 'N': 732, 'o': 2580, 'c': 2280}
#Extinction coefficients in A_lam / E(B-V). Uses York Extinction Solver (http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/community/YorkExtinctionSolver/coefficients.cgi)
extco = {'u': 4.786, 'g': 3.587, 'r': 2.471, 'i': 1.798, 'z': 1.403, 'y': 1.228, 'Y': 1.228,
'U': 4.744, 'B': 4.016, 'V': 3.011, 'R': 2.386, 'G': 2.216, 'I': 1.684, 'J': 0.813, 'H': 0.516,
'K': 0.337, 'S': 8.795, 'D': 9.270, 'A': 6.432, 'F': 8.054, 'N': 8.969, 'o': 2.185, 'c': 3.111}
# Colours for plots
cols = {'u': 'dodgerblue', 'g': 'g', 'r': 'r', 'i': 'goldenrod', 'z': 'k', 'y': '0.5',
'Y': '0.5', 'U': 'slateblue', 'B': 'b', 'V': 'yellowgreen', 'R': 'crimson', 'G': 'salmon',
'I': 'chocolate', 'J': 'darkred', 'H': 'orangered', 'K': 'saddlebrown',
'S': 'mediumorchid', 'D': 'purple', 'A': 'midnightblue',
'F': 'hotpink', 'N': 'magenta', 'o': 'darkorange', 'c': 'cyan'}
# Maintains order from blue to red effective wavelength
bandlist = 'FSDNAuUBgcVrRoGiIzyYJHK'
# First step is to search directory for existing superbol files, or photometry files matching our naming conventions
print '\n######### Step 1: input files and filters ##########'
# keep tabs on whether interpolated LCs exist
useInt = 'n'
# SN name defines names of input and output files
sn = raw_input('\n> Enter SN name: ')
if not sn:
print '\n* No name given; lets just call it `SN`...'
sn = 'SN'
# Keep outputs in this directory
outdir = 'superbol_output_'+sn
if not os.path.exists(outdir): os.makedirs(outdir)
# Get photometry files
do1 = raw_input('\n> Find input files automatically?[y] ')
if not do1: do1='y'
# User will almost always want to do this automatically, if files follow naming convention!
use1 = []
if do1 == 'y':
# first check for previous superbol interpolations
files = glob.glob(outdir+'/interpolated_lcs_'+sn+'*.txt')
if len(files)>0:
print '\n* Interpolated LC(s) already available:'
# If multiple interpolations exist, ask user which they want
for i in range(len(files)):
print ' ', i, ':', files[i]
use = raw_input('\n> Use interpolated LC? (e.g. 0,2 for files 0 and 2, or n for no) [0]\n (Warning: using multiple interpolation files can cause problems unless times match!) ')
# Default is to read in the first interpolation file
# Multiple interpolations can be read using commas, BUT if time axes don't match then the phases can end up incorrectly defined for some bands!!!
if not use: use1.append(0)
if use!='n':
# if previous interpolations are used, need to keep tabs so we don't interpolate again later!
useInt = 'y'
if len(use)>0:
for i in use.split(','):
use1.append(i)
else: print '\n* Not using interpolated data'
if len(files)==0 or use=='n':
# And here is if we don't have (or want) previously interpolated data
# search for any files matching with SN name
files = glob.glob(sn+'_*')
if len(files)>0:
# If files are found, print them and let the user choose which ones to read in
print '\n* Available files:'
for i in range(len(files)):
print ' ', i, ':', files[i]
use = raw_input('\n> Specify files to use (e.g. 0,2 for files 0 and 2) [all] ')
if len(use)>0:
# Include only specified files
for i in use.split(','):
use1.append(i)
else:
# Or include all files
for i in range(len(files)):
use1.append(i)
else:
# If no files found, keep track and print message
do1 = 'n'
print '* No files found for '+sn
if do1 != 'y':
# If we did not find any input data, you can specify files manually - BUT should still follow filter conventions and end in _<filters>.EXT
files1 = raw_input('\n> Enter all file names separated by commas:\n')
if not files1:
# But if no files specified by this point, we give up prompting!
print 'No files given - exiting!'
sys.exit(0)
files = []
for i in files1.split(','):
# If manually specified files given, add them to input list
files.append(i)
for i in range(len(files)):
# Also need to keep an integer index for each file, so we can treat them the same as we would the automatically-detected files
use1.append(i)
# This dictionary is vital, will hold all light curve data!
lc = {}
# This keeps track of filters used (don't remember why I used strings in place of lists...)
filts2 = str()
for i in use1:
# These integers map to the list of input files
i = int(i)
# get filter from file name and add to list
# filts1 keeps track of filters IN THAT FILE ONLY, filts2 is ALL filters across ALL files.
filts1 = files[i].split('.')[0]
filts1 = filts1.split('_')[-1]
filts2 += filts1
# Here we read in the files using genfromtxt. Uses try statements to catch a few common variants of the input, e.g. with csv or header rows
try:
d = np.genfromtxt(files[i])
x = 1
for j in filts1:
# loop over filters (j) in file and add each light curve to dictionary
# column 0 is time, odd columns (x) are magnitudes, even columns (x+2) are errors
lc[j] = np.array(list(zip(d[:,0][~np.isnan(d[:,x])],d[:,x][~np.isnan(d[:,x])],d[:,x+1][~np.isnan(d[:,x])])))
x+=2
except:
try:
d = np.genfromtxt(files[i],skip_header=1)
x = 1
for j in filts1:
lc[j] = np.array(list(zip(d[:,0][~np.isnan(d[:,x])],d[:,x][~np.isnan(d[:,x])],d[:,x+1][~np.isnan(d[:,x])])))
x+=2
except:
try:
d= np.genfromtxt(files[i],delimiter=',')
x = 1
for j in filts1:
lc[j] = np.array(list(zip(d[:,0][~np.isnan(d[:,x])],d[:,x][~np.isnan(d[:,x])],d[:,x+1][~np.isnan(d[:,x])])))
x+=2
except:
try:
d= np.genfromtxt(files[i],delimiter=',',skip_header=1)
x = 1
for j in filts1:
lc[j] = np.array(list(zip(d[:,0][~np.isnan(d[:,x])],d[:,x][~np.isnan(d[:,x])],d[:,x+1][~np.isnan(d[:,x])])))
x+=2
except:
raise ValueError('Could not read file')
# sort list of recognised filters from filts2 into wavelength order:
filters = str()
for i in bandlist:
if i in filts2:
filters += i
# If a filter name is not recognised, prompt user to add its properties manually
for i in filts2:
if not i in wle:
print '\n* Unknown filter '+i+'!'
print '* Please enter details for filter',i
wle[i] = float(raw_input(' >Lambda_eff (angstroms): '))
zp[i] = float(raw_input(' >Flux zero point (1e11 erg/cm2/s/ang): '))
width[i] = float(raw_input(' >Filter width (angstroms): '))
ftmp = str()
cols[i] = 'grey'
for j in filters:
if wle[j]<wle[i]:
ftmp += j
ftmp += i
for j in filters:
if wle[j]>wle[i]:
ftmp += j
filters = ftmp
# This ends the data import
print '\n######### Step 2: reference band for phase info ##########'
plt.figure(1,(8,6))
plt.clf()
# Default time axis label
xlab = 'Time'
# Plot all light curves on same axes
for i in filters:
plt.errorbar(lc[i][:,0],lc[i][:,1],lc[i][:,2],fmt='o',color=cols[i],label=i)
plt.gca().invert_yaxis()
plt.xlabel(xlab)
plt.ylabel('Magnitude')
plt.legend(numpoints=1,fontsize=16,ncol=2,frameon=True)
plt.tight_layout(pad=0.5)
plt.draw()
# Loop through dictionary and determine which filter has the most data
ref1 = 0
for i in filters:
ref2 = len(lc[i])
if ref2>ref1:
ref1 = ref2
ref3 = i
print '\n* Displaying all available photometry...'
# User can choose to include only a subset of filters, e.g. if they see that some don't have very useful data
t3 = raw_input('\n> Enter bands to use (blue to red) ['+filters+'] ')
if not t3: t3 = filters
filters = t3
if len(filters) < 2:
# If only one filter, no need to interpolate, and can't apply BB fits, so makes no sense to use superbol!
print 'At least two filters required - exiting!'
sys.exit(0)
# If using light curves that have not yet been interpolated by a previous superbol run, we need a reference filter
if useInt!='y':
ref = raw_input('\n> Choose reference filter for sampling epochs\n Suggested (most LC points): ['+ref3+'] ')
# Defaults to the band with the most data
if not ref: ref = ref3
# If light curves are already interpolated, reference is mainly for plotting so just pick first band
else: ref = filters[0]
print '\n* Using '+ref+'-band for reference'
# User may want to have output in terms of days from maximum, so here we find max light in reference band
# Two options: fit light curve interactively, or just use brightest point. User specifies what they want to do
t1 = raw_input('\n> Interactively find '+ref+'-band maximum?[n] ')
if not t1:
# Default to not doing interactive fit
t1 = 'n'
# in this case check if user wants quick approximation
doSh = raw_input('\n> Shift to approx maximum?[n] ')
# Default to not doing this either - i.e. leave light curve as it is
if not doSh: doSh = 'n'
if doSh=='y':
# If approx shift wanted, find time of brightest point in ref band to set as t=0
d = lc[ref]
shift = d[:,0][np.argmin(d[:,1])]
# Loop over all bands and shift them
for j in lc:
lc[j][:,0]-=shift
# update x-axis label
xlab += ' from approx '+ref+'-band maximum'
print '\n* Approx shift done'
if t1!='n':
# Here's where date of maximum is fit interactively, if user wanted it
# Start with approx shift of reference band
d = lc[ref]
shift = d[:,0][np.argmin(d[:,1])]
d[:,0]-=shift
plt.clf()
# Plot reference band centred roughly on brightest point
plt.errorbar(d[:,0],d[:,1],d[:,2],fmt='o',color=cols[ref])
plt.ylim(max(d[:,1])+0.2,min(d[:,1])-0.2)
plt.xlabel(xlab + ' from approx maximum')
plt.ylabel('Magnitude')
plt.tight_layout(pad=0.5)
plt.draw()
# As long as happy ='n', user can keep refitting til they get a good result
happy = 'n'
print '\n### Begin polynomial fit to peak... ###'
# Default polynomial order =4
order1 = 4
# Only fit data at times < Xup from maximum light. Default is 50 days
Xup1 = 50
while happy == 'n':
print '\n### Select data range ###'
# Interactively set upper limit on times to fit
Xup = raw_input('>> Cut-off phase for polynomial fit?['+str(Xup1)+'] ')
if not Xup: Xup = Xup1
Xup = float(Xup)
Xup1 = Xup
d1 = d[d[:,0]<Xup]
plt.clf()
# Plot only times < Xup
plt.errorbar(d1[:,0],d1[:,1],d1[:,2],fmt='o',color=cols[ref])
plt.ylim(max(d1[:,1])+0.4,min(d1[:,1])-0.2)
plt.tight_layout(pad=0.5)
plt.draw()
# Interactively set polynomial order
order = raw_input('\n>> Order of polynomial to fit?['+str(order1)+'] ')
if not order: order = order1
order = int(order)
order1 = order
# Fit light curve with polynomial
fit = np.polyfit(d1[:,0],d1[:,1],deg=order)
# Plot the polynomial
days = np.arange(min(-40,min(d[:,0]))-10,Xup)
eq = 0
for i in range(len(fit)):
# Loop allows calculation for arbitrary polynomial order
eq += fit[i]*days**(order-i)
plt.plot(days,eq,label='Fit order = %d' %order)
plt.ylabel('Magnitude')
plt.xlabel(xlab + ' from approx maximum')
plt.legend(numpoints=1,fontsize=16,ncol=2,frameon=True)
plt.xlim(min(d[:,0])-5,Xup)
plt.tight_layout(pad=0.5)
plt.draw()
# Check if user likes fit
happy = raw_input('\n> Happy with fit?(y/[n]) ')
# Default is to try again!
if not happy: happy = 'n'
# After user tired/satisfied with fit, check if they want to use the peak of their most recent polynomial as t=0, or default to the brightest point
new_peak = raw_input('> Use [p-olynomial] or o-bserved peak date? ')
# Default is to use polynomial for peak date
if not new_peak: new_peak = 'p'
xlab += ' from '+ref+'-band maximum'
# Plot reference band shifted to match polynomial peak
if new_peak=='p':
days = np.arange(d[:,0][np.argmin(d[:,1])]-10,d[:,0][np.argmin(d[:,1])]+10)
eq = 0
for i in range(len(fit)):
# Loop allows calculation for arbitrary polynomial order
eq += fit[i]*days**(order-i)
peak = days[np.argmin(eq)]
d[:,0] -= peak
plt.clf()
plt.errorbar(d[:,0],d[:,1],d[:,2],fmt='o',color=cols[ref])
plt.ylabel('Magnitude')
plt.xlabel(xlab)
plt.ylim(max(d[:,1])+0.2,min(d[:,1])-0.2)
plt.tight_layout(pad=0.5)
plt.draw()
# If user instead wants observed peak, that shift was already done!
if new_peak == 'o':
peak = 0
# Shift all light curves by same amount as reference band
for j in lc:
lc[j][:,0]-=(shift+peak)
# Need to un-shift the reference band, since it's now been shifted twice!
lc[ref][:,0]+=(shift+peak)
plt.figure(1)
plt.clf()
# Re-plot the light curves after shifting
for i in filters:
plt.errorbar(lc[i][:,0],lc[i][:,1],lc[i][:,2],fmt='o',color=cols[i],label=i)
plt.gca().invert_yaxis()
plt.xlabel(xlab)
plt.ylabel('Magnitude')
plt.legend(numpoints=1,fontsize=16,ncol=2,frameon=True)
plt.tight_layout(pad=0.5)
plt.draw()
# Needed for K-correction step a bit later
skipK = 'n'
# Input redshift or distance modulus, needed for flux -> luminosity
z = raw_input('\n> Please enter SN redshift or distance modulus:[0] ')
# Default to zero
if not z: z=0
z = float(z)
if z<10:
# Redshift always less than 10, distance modulus always greater, so easy to distinguish
print 'Redshift entered (or DM=0)'
t2 = ''
# Check if user wants to correct time axis for cosmological time dilation
if lc[ref][0,0]>25000 or useInt=='y':
# If time is in MJD or input light curves were already interpolated, default to no
t2 = raw_input('\n> Correct for time-dilation?[n] ')
if not t2: t2 = 'n'
else:
# Otherwise default to yes
t2 = raw_input('\n> Correct for time-dilation?[y] ')
if not t2: t2 = 'y'
if t2=='y':
# Apply correction for time dilation
for j in lc:
lc[j][:,0]/=(1+z)
print '\n* Displaying corrected phases'
xlab += ' (rest-frame)'
plt.xlabel(xlab)
plt.figure(1)
plt.clf()
# Re-plot light curves in rest-frame times
for i in filters:
plt.errorbar(lc[i][:,0],lc[i][:,1],lc[i][:,2],fmt='o',color=cols[i],label=i)
plt.gca().invert_yaxis()
plt.xlabel(xlab)
plt.ylabel('Magnitude')
plt.legend(numpoints=1,fontsize=16,ncol=2,frameon=True)
plt.tight_layout(pad=0.5)
plt.draw()
print '\n######### Step 3: Flux scale ##########'
# New version uses astropy coordinates.Distance
# Old version used cosmocalc (thanks to Sebastian Gomez for change)
# Options for cosmologies
# WMAP9, H0 = 69.3, Om0 = 0.286, Tcmb0 = 2.725, Neff = 3.04, m_nu = 0, Ob0 = 0.0463
# And many others...
# from astropy.cosmology import WMAP9
# cosmology.set_current(WMAP9)
DL_Mpc = Distance(z = z).Mpc
# To use cosmocalc instead, uncomment below:
# DL_Mpc = cosmocalc(z)
#############################################
# Check value of first light curve point to see if likely absolute or apparent mag
print '\n* First '+ref+'-band mag = %.2f' %lc[ref][0,1]
absol='n'
if lc[ref][0,1] < 0:
# If negative mag, must be absolute (but check!)
absol = raw_input('> Magnitudes are in Absolute mags, correct?[y] ')
if not absol: absol='y'
else:
# If positive mag, must be apparent (but check!)
absol = raw_input('> Magnitudes are in Apparent mags, correct?[y] ')
if not absol: absol ='n'
if absol=='y':
# If absolute mag, set distance to 10 parsecs
DL_Mpc = 1e-5
print '\n* Absolute mags; Luminosity distance = 10 pc'
else:
# Otherwise use luminosity distance from redshift
print '\n* Luminosity distance = %.2e Mpc' %DL_Mpc
# convert Mpc to cm, since flux in erg/s/cm2/A
dist = DL_Mpc*3.086e24
else:
# If distance modulus entered, different approach needed!
print 'Distance modulus entered'
# No k correction if no redshift!
skipK = 'y'
for i in lc:
# Subtract distance modulus to convert to absolute mags (assuming no one would ever supply absolute mags and still enter a DM...)
lc[i][:,1]-=z
# Once absolute, distance = 10 pc
dist = 1e-5*3.086e24
print '\n######### Step 4: Interpolate LCs to ref epochs ##########'
# If light curves are not already interpolated, now we need to do some work
if useInt!='y':
# Sort light curves by phase (sometimes this isn't done already...)
for i in lc:
lc[i] = lc[i][lc[i][:,0].argsort()]
# New dictionary for interpolated light curves
lc_int = {}
# Reference light curve is already 'interpolated' by definition
lc_int[ref] = lc[ref]
# User decides whether to fit each light curve
t4 = raw_input('\n> Interpolate light curves interactively?[y] ')
# Default is yes
if not t4: t4 = 'y'
if t4=='y':
print '\n### Begin polynomial fit... ###'
# Interpolate / extrapolate other bands to same epochs - polynomial fits
# - what if there are only one or two points??? Use colour?
# Use this to keep tabs on method used, and append to output file
intKey = '\n# Reference band was '+ref
for i in filters:
# Need to loop through and interpolate every band except reference
if i!=ref:
print '\n### '+i+'-band ###'
# Default polynomial order to fit light curves
order1 = 4
# Keep looping until happy
happy = 'n'
while happy == 'n':
# Plot current band and reference band
plt.clf()
plt.errorbar(lc[i][:,0],lc[i][:,1],lc[i][:,2],fmt='o',color=cols[i],label=i)
plt.errorbar(lc[ref][:,0],lc[ref][:,1],lc[ref][:,2],fmt='o',color=cols[ref],label=ref)
plt.gca().invert_yaxis()
plt.legend(numpoints=1,fontsize=16,ncol=2,frameon=True)
plt.xlabel(xlab)
plt.ylabel('Magnitude')
plt.ylim(max(max(lc[ref][:,1]),max(lc[i][:,1]))+0.5,min(min(lc[ref][:,1]),min(lc[i][:,1]))-0.5)
plt.tight_layout(pad=0.5)
plt.draw()
# Choose order of polynomial fit to use
order = raw_input('\n>> Order of polynomial to fit?(q to quit and use constant colour)['+str(order1)+'] ')
# If user decides they can't get a good fit, enter q to use simple linear interpolation and constant-colour extrapolation
if order == 'q':
break
# Or use default order
if not order: order = order1
order = int(order)
# Set new default to current order
order1 = order
# Fit light curve with polynomial
fit = np.polyfit(lc[i][:,0],lc[i][:,1],deg=order)
# Plot fit
days = np.arange(np.min(lc[ref][:,0]),np.max(lc[ref][:,0]))
eq = 0
for j in range(len(fit)):
# Loop for arbitrary polynomial order
eq += fit[j]*days**(order-j)
plt.plot(days,eq,label='Fit order = %d' %order)
plt.ylabel('Magnitude')
plt.xlabel(xlab)
plt.legend(numpoints=1,fontsize=16,ncol=2,frameon=True)
plt.tight_layout(pad=0.5)
plt.draw()
# Check if happy with fit
happy = raw_input('\n> Happy with fit?(y/[n]) ')
# Default to no
if not happy: happy = 'n'
# If user quit polyfit, use easyint
if order == 'q':
tmp1,tmp2 = easyint(lc[i][:,0],lc[i][:,1],lc[i][:,2],lc[ref][:,0],lc[ref][:,1])
tmp = list(zip(lc[ref][:,0],tmp1,tmp2))
lc_int[i] = np.array(tmp)
print '\n* Interpolating linearly; extrapolating assuming constant colour...'
# Add method to output
intKey += '\n# '+i+': Linear interp; extrap=c'
else:
# If user was happy with fit, add different interpolation string to output
intKey += '\n# '+i+': fit order='+str(order)+'; extrap method '
# Construct polynomial interpolation
# Goal: if band has point at same epoch as ref band, use point; otherwise, use polynomial prediction
mag_int = []
for k in lc[ref]:
# Check each light curve point against each reference time
# If match, add that point to interpolated light curve
k1 = np.where(lc[i][:,0]==k[0])
if len(k1[0])>0:
mag_int.append(lc[i][k1][0])
# Convert matches to numpy array (just to compare with reference array)
tmp_arr = np.array(mag_int)
if tmp_arr.size:
# Do this loop if there were some temporal matches between current and reference band
for k in lc[ref]:
# Iterate over each reference time
if k[0] not in tmp_arr[:,0]:
# If no match in current band, calculate magnitude from polynomial
mag = 0
for j in range(len(fit)):
# Sum works for arbitrary polynomial order
mag += fit[j]*k[0]**(order-j)
# Append polynomial magnitude to light curve, with an error floor of 0.1 mags
out = np.array([k[0],mag,max(np.mean(lc[i][:,2]),0.1)])
mag_int.append(out)
else:
# Do this loop if there were zero matches between current band and reference times
for l in lc[ref][:,0]:
# Construct polynomial mag as above for each reference time
mag = 0
for j in range(len(fit)):
mag += fit[j]*l**(order-j)
out = np.array([l,mag,max(np.mean(lc[i][:,2]),0.1)])
mag_int.append(out)
# Convert full interpolated light curve to np array
mag_int = np.array(mag_int)
# Sort chronologically
tmp = mag_int[np.argsort(mag_int[:,0])]
# Now need to check extrapolation to times outside observed range for current band
# Polynomial method already did an extrapolation, but polynomial can be bad where there is no data to constrain it!
# Here we apply the constant colour method too, and user can check what they prefer
# Earliest time in band
low = min(lc[i][:,0])
# Latest time in band
up = max(lc[i][:,0])
# Colour wrt reference band at earliest and latest interpolated epochs
col1 = tmp[tmp[:,0]>low][0,1] - lc[ref][tmp[:,0]>low][0,1]
col2 = tmp[tmp[:,0]<up][-1,1] - lc[ref][tmp[:,0]<up][-1,1]
# Get extrapolated points in current band by adding colour to reference band
early = lc[ref][tmp[:,0]<low][:,1]+col1
late = lc[ref][tmp[:,0]>up][:,1]+col2
# Compute error as random sum of average error in band plus 0.1 mag for every 10 days extrapolated
tmp[:,2][tmp[:,0]<low] = np.sqrt((low - tmp[:,0][tmp[:,0]<low])**2/1.e4 + np.mean(lc[i][:,2])**2)
tmp[:,2][tmp[:,0]>up] = np.sqrt((tmp[:,0][tmp[:,0]>up] - up)**2/1.e4 + np.mean(lc[i][:,2])**2)
# Plot light curve from polynomial fit
plt.errorbar(tmp[:,0],tmp[:,1],fmt='s',markersize=12,mfc='none',markeredgewidth=3,markeredgecolor=cols[i],label='Polynomial')
# Plot constant colour extrapolation
plt.errorbar(tmp[tmp[:,0]<low][:,0],early,fmt='o',markersize=12,mfc='none',markeredgewidth=3,markeredgecolor=cols[i],label='Constant colour')
plt.errorbar(tmp[tmp[:,0]>up][:,0],late,fmt='o',markersize=12,mfc='none',markeredgewidth=3,markeredgecolor=cols[i])
plt.legend(numpoints=1,fontsize=16,ncol=2,frameon=True)
plt.tight_layout(pad=0.5)
plt.draw()
if len(tmp[tmp[:,0]<low])>0:
# If there are early extrapolated points, ask user whether they prefer polynomial, constant colour, or want to hedge their bets
extraptype = raw_input('\n> Early-time extrapolation:\n [P-olynomial], c-onstant colour, or a-verage of two methods?\n')
# Default to polynomial
if not extraptype: extraptype = 'p'
if extraptype == 'c':
# constant colour
tmp[:,1][tmp[:,0]<low]=early
if extraptype == 'a':
# average
tmp[:,1][tmp[:,0]<low]=0.5*(tmp[:,1][tmp[:,0]<low]+early)
# If no need to extrapolate:
else: extraptype = 'n'
# Keep tabs on which extrapolation method was used!
intKey += 'early='+extraptype+';'
# Now do same for late times
if len(tmp[tmp[:,0]>up])>0:
extraptype = raw_input('\n> Late-time extrapolation:\n [P-olynomial], c-onstant colour, or a-verage of two methods?\n')
if not extraptype: extraptype = 'p'
if extraptype == 'c':
tmp[:,1][tmp[:,0]>up]=late
if extraptype == 'a':
tmp[:,1][tmp[:,0]>up]=0.5*(tmp[:,1][tmp[:,0]>up]+late)
else: extraptype = 'n'
intKey += 'late='+extraptype
# Add the final interpolated and extrapolated light curve to the dictionary
lc_int[i] = tmp
# Key for output file
intKey += '\n# p = polynomial, c = constant colour, a = average'
# If user does not want to do interpolation interactively:
else:
for i in filters:
# For every band except reference, use easyint for linear interpolation between points, and constant colour extrapolation
if i!=ref:
tmp1,tmp2 = easyint(lc[i][:,0],lc[i][:,1],lc[i][:,2],lc[ref][:,0],lc[ref][:,1])
tmp = list(zip(lc[ref][:,0],tmp1,tmp2))
lc_int[i] = np.array(tmp)
print '\n* Interpolating linearly; extrapolating assuming constant colour...'
intKey = '\n# All light curves linearly interpolated\n# Extrapolation done by assuming constant colour with reference band ('+ref+')'
# Need to save interpolated light curves for future re-runs
int_out = np.empty([len(lc[ref][:,0]),1+2*len(filters)])
# Start with reference times
int_out[:,0] = lc[ref][:,0]
for i in range(len(filters)):
# Append magnitudes and errors, in order from bluest to reddest bands
int_out[:,2*i+1] = lc_int[filters[i]][:,1]
int_out[:,2*i+2] = lc_int[filters[i]][:,2]
# Open file in superbol output directory to write light curves
int_file = open(outdir+'/interpolated_lcs_'+sn+'_'+filters+'.txt','wb')
# Construct header
cap = '#phase\t'
for i in filters:
# Add a column heading for each filter
cap = cap+i+'\terr\t'
cap +='\n'
# Save to file, including header and footer containing log of interpolation methods
np.savetxt(int_file,int_out,fmt='%.2f',delimiter='\t',header=cap,footer=intKey,comments='#')
# Close output file
int_file.close()
# Plot interpolated lcs
print '\n* Displaying all interpolated/extrapolated LCs'
plt.figure(1)
plt.clf()
for i in filters:
plt.errorbar(lc_int[i][:,0],lc_int[i][:,1],lc_int[i][:,2],fmt='o',color=cols[i],label=i)
plt.gca().invert_yaxis()
plt.xlabel(xlab)
plt.ylabel('Magnitude')
plt.legend(numpoints=1,fontsize=16,ncol=2,frameon=True)
# plt.ylim(max(max(lc_int[ref][:,1]),max(lc_int[i][:,1]))+0.5,min(min(lc_int[ref][:,1]),min(lc_int[i][:,1]))-0.5)
plt.tight_layout(pad=0.5)
plt.draw()
# Or if light curves were already interpolated, no need for the last 250 lines!
else:
print '\n* Interpolation already done, skipping step 4!'
# Put pre-interpolated lcs into dictionary
lc_int = {}
for i in filters:
lc_int[i] = lc[i]
print '\n######### Step 5: Extinction and K-corrections #########'
# Extinction correction
ebv = raw_input('\n> Please enter Galactic E(B-V): \n'
' (0 if data are already extinction-corrected) [0] ')
if not ebv: ebv=0
ebv = float(ebv)
for i in lc_int:
# Subtract foreground extinction using input E(B-V) and coefficients from YES
lc_int[i][:,1]-=extco[i]*ebv
# If UVOT bands are in AB, need to convert to Vega
if 'S' in lc_int or 'D' in lc_int or 'A' in lc_int:
shiftSwift = raw_input('\n> UVOT bands detected. These must be in Vega mags.\n'
' Apply AB->Vega correction for these bands? [n] ')
if not shiftSwift: shiftSwift = 'n'
if shiftSwift == 'y':
if 'S' in lc_int:
lc_int['S'][:,1] -= 1.51
if 'D' in lc_int:
lc_int['D'][:,1] -= 1.69
if 'A' in lc_int:
lc_int['A'][:,1] -= 1.73
# Whether to apply approximate K correction
doKcorr = 'n'
# i.e. if we have a redshift:
if skipK == 'n':
# converting to rest-frame means wavelength /= 1+z and flux *= 1+z. But if input magnitudes were K-corrected, this has already been done implicitly!
doKcorr = raw_input('\n> Do you want to covert flux and wavelength to rest-frame?\n'
' (skip this step if data are already K-corrected) [n] ')
######### Now comes the main course - time to build SEDs and integrate luminosity
# Build list of wavelengths
wlref = []
# First wavelength is roughly blue edge of bluest band (effective wavelength + half the width)
wlref1 = [wle[filters[0]]-width[filters[0]]/2]
# wlref contains band centres only (for BB fit), whereas wlref1 also has outer band edges (for numerical integration)
# List of flux zeropoints matching wavelengths
fref = []
# List of widths for each band (needed for error estimates)
bandwidths = []
# Loop over used filters and populate lists from dictionaries of band properties
for i in filters:
wlref.append(float(wle[i]))
fref.append(zp[i]*1e-11)
wlref1.append(float(wle[i]))
bandwidths.append(float(width[i]))
# Final reference wavelength is red edge of reddest band
wlref1.append(wle[filters[-1]]+width[filters[-1]]/2)
# Flux will be set to zero at red and blue extrema of SED when integrating pseudobolometric light curve
# Make everything a numpy array
wlref1 = np.array(wlref1)
wlref = np.array(wlref)
fref = np.array(fref)
bandwidths = np.array(bandwidths)
# Get phases with photometry to loop over
phase = lc_int[ref][:,0]
# Correct flux and wavelength to rest-frame, if user chose that option earlier
if doKcorr == 'y':
wlref /= (1+z)
wlref1 /= (1+z)
fref *= (1+z)
bandwidths /= (1+z)
# construct some notes for output file
method = '\n# Methodology:'
method += '\n# filters used:'+filters
method += '\n# redshift used:'+str(z)
method += '\n# extinction used:'+str(ebv)
if doKcorr == 'y':
method += '\n# Flux and wavelength converted to rest-frame'
else:
method += '\n# Wavelengths used in observer frame (data already K-corrected?)'
print '\n######### Step 6: Fit blackbodies and integrate flux #########'
# these are needed to scale and offset SEDs when plotting, to help visibility
k = 1
fscale = 4*np.pi*dist**2*zp[ref]*1e-11*10**(-0.4*min(lc[ref][:,1]))
# These lists will be populated with luminosities as we loop through the data and integrate SEDs
L1arr = []
L2arr = []
L1err_arr = []
L2err_arr = []
Lbb_full_arr = []
Lbb_full_err_arr = []
Lbb_opt_arr = []
Lbb_opt_err_arr = []
# Set up some parameters for the BB fits and integrations:
# First, if there are sufficient UV data, best to fit UV and optical separately
# Optical fit gives better colour temperature by excluding line-blanketed region
# UV fit used only for extrapolating bluewards of bluest band
sep = 'n'
# If multiple UV filters
if len(wlref[wlref<3000])>2:
# Prompt for separate fits
sep = raw_input('\n> Multiple UV filters detected! Fitting optical and UV separately can\n give better estimates of continuum temperature and UV flux\n Fit separately? [y] ')
# Default is yes
if not sep: sep = 'y'
else:
# Cannot do separate UV fit if no UV data!
sep = 'n'
# If no UV data or user chooses not to do separate fit, allow for suppression in blue relative to BB
# - If UV data, suppress to the blue of the bluest band
# - If no UV data, start suppression at 3000A
# Functional form comes from Nicholl, Guillochon & Berger 2017 / Yan et al 2018:
# - power law in (lambda / lambda_cutoff) joins smoothly to BB at lambda_cutoff
bluecut = 1
# These default parameters give an unattenuated blackbody
sup = 0
if sep == 'n':
# cutoff wavelength is either the bluest band (if data constrain SED below 3000A), or else fixed at 3000A (where deviation from BB usually starts becoming clear)
bluecut = float(min(wlref[0],3000))
# User specifies degree of suppression - higher polynomial order takes flux to zero faster. Value of x~1 is recommended for most cases
sup = raw_input('\n> Suppression index for BB flux bluewards of '+str(bluecut)+'A?\n i.e. L_uv(lam) = L_bb(lam)*(lam/'+str(bluecut)+')^x\n [x=0 (i.e. no suppression)] ')
# Default is no suppression
if not sup: sup = 0
sup = float(sup)
# Open output files for bolometric light curve and blackbody parameters
out1 = open(outdir+'/bol_'+sn+'_'+filters+'.txt','w')
out2 = open(outdir+'/BB_params_'+sn+'_'+filters+'.txt','w')
# Write header for bol file
out1.write('# ph\tLobs\terr\tL+BB\terr\t\n\n')
# Write header for BB params file - if separate UV/optical fits, need another set of columns for the optical-only filts
# T_bb etc are fits to all data, T_opt are fits to data at lambda>3000A (i.e. not affected by line blanketing)
if sep=='y':
out2.write('# ph\tT_bb\terr\tR_bb\terr\tL_bb\terr\tT_opt\terr\tR_opt\terr\tL_opt\terr\n\n')
else:
out2.write('# ph\tT_bb\terr\tR_bb\terr\tL_bb\terr\n\n')
# Display various lines for different fitting assumptions, tell user here rather than cluttering figure legend
print '\n*** Fitting Blackbodies to SED ***'
print '\n* Solid line = blackbody fit for flux extrapolation'
if sep=='y':
# show separate fits to UV and optical, if they exist, and tell output file
print '* Dashed lines = separate fit to optical and UV for T and R estimates'
method += '\n# Separate BB fits above/below 3000A'
if sup!=0:
# plot suppression if used, and tell output file where suppression began and what was the index
print '* Dotted lines = UV flux with assumed blanketing'
method += '\n# BB fit below '+str(bluecut)+'A suppressed by factor (lamda/'+str(bluecut)+')^'+str(sup)
if sep!='y' and sup==0:
# if a single un-suppressed BB was used, add this to output file
method += '\n# Single BB fit to all wavelengths, with no UV suppression'
# New figure to display SEDs
plt.figure(2,(8,8))
plt.clf()
# Loop through reference epochs
for i in range(len(phase)):
# get date
ph = phase[i]
# Get list of mags and errors in all filters at each epoch - start with blank arrays to add all filters
mags = np.zeros(len(filters))
errs = np.zeros(len(filters))
for j in range(len(filters)):
# Loop through filters and populate SED tables with interpolated light curves
mags[j] = lc_int[filters[j]][i,1]
errs[j] = lc_int[filters[j]][i,2]
# convert magnitudes to physical fluxes using zeropoints and distance
flux = 4*np.pi*dist**2*fref*10**(-0.4*mags)
# convert mag errors to flux errors
ferr = 2.5/np.log(10) * flux * errs
# Set flux to zero at red and blue extrema matching wlref1
flux1 = np.insert(flux,0,0)
flux1 = np.append(flux1,0)
# Fit blackbody to SED (the one that is not padded with zeros)
BBparams, covar = curve_fit(bbody,wlref,flux,p0=(10000,1e15),sigma=ferr)
# Get temperature and radius, with errors, from fit
T1 = BBparams[0]
T1_err = np.sqrt(np.diag(covar))[0]
R1 = np.abs(BBparams[1])
R1_err = np.sqrt(np.diag(covar))[1]
# Plot SEDs, offset for clarity
plt.figure(2)
plt.errorbar(wlref,flux-fscale*k,ferr,fmt='o',color=cols[filters[k%len(filters)]],label='%.1f' %ph)
plt.plot(np.arange(100,25000),bbody(np.arange(100,25000),T1,R1)-fscale*k,color=cols[filters[k%len(filters)]],linestyle='-')
# Plot UV SED with suppression (matches blackbody if suppression set to zero)
plt.plot(np.arange(100,bluecut),bbody(np.arange(100,bluecut),T1,R1)*(np.arange(100,bluecut)/bluecut)**sup-fscale*k,color=cols[filters[k%len(filters)]],linestyle=':')
# Get pseudobolometric luminosity by trapezoidal integration, with flux set to zero outside of observed bands
L1 = itg.trapz(flux1[np.argsort(wlref1)],wlref1[np.argsort(wlref1)])
# Use flux errors and bandwidths to get luminosity error
L1_err = np.sqrt(np.sum((bandwidths*ferr)**2))
# Add luminosity to array (i.e. pseudobolometric light curve)
L1arr.append(L1)
L1err_arr.append(L1_err)
# Calculate luminosity using alternative method of Stefan-Boltzmann, and T and R from fit
L1bb = 4*np.pi*R1**2*5.67e-5*T1**4
L1bb_err = L1bb*np.sqrt((2*R1_err/R1)**2+(4*T1_err/T1)**2)
# Get UV luminosity (i.e. bluewards of bluest band)
Luv = itg.trapz(bbody(np.arange(100,bluecut),T1,R1)*(np.arange(100,bluecut)/bluecut)**sup,np.arange(100,bluecut))
if bluecut < wlref[0]:
# If no UV data and cutoff defaults to 3000A, need to further integrate (unabsorbed) BB from cutoff up to the bluest band
Luv += itg.trapz(bbody(np.arange(bluecut,wlref[0]),T1,R1),np.arange(bluecut,wlref[0]))
# Use uncertainty in BB fit T and R to estimate error in UV flux
Luv_err = Luv*np.sqrt((2*R1_err/R1)**2+(4*T1_err/T1)**2)
# NIR luminosity from integrating blackbody above reddest band
Lnir = itg.trapz(bbody(np.arange(wlref[-1],25000),T1,R1),np.arange(wlref[-1],25000))
Lnir_err = Lnir*np.sqrt((2*R1_err/R1)**2+(4*T1_err/T1)**2)
# Treating UV and optical separately if user so decided:
if sep=='y':
# Used to occasionally crash, wrap in try statement
try:
# Fit BB only to data above 3000A
BBparams, covar = curve_fit(bbody,wlref[wlref>3000],flux[wlref>3000],p0=(10000,1e15),sigma=ferr[wlref>3000])
# This gives better estimate of optical colour temperature
Topt = BBparams[0]
Topt_err = np.sqrt(np.diag(covar))[0]
Ropt = np.abs(BBparams[1])
Ropt_err = np.sqrt(np.diag(covar))[1]
# Calculate luminosity predicted by Stefan-Boltzmann law for optical T and R
L2bb = 4*np.pi*Ropt**2*5.67e-5*Topt**4
L2bb_err = L2bb*np.sqrt((2*Ropt_err/Ropt)**2+(4*Topt_err/Topt)**2)
# Use this BB fit to get NIR extrapolation, rather than the fit that included UV
Lnir = itg.trapz(bbody(np.arange(wlref[-1],25000),Topt,Ropt),np.arange(wlref[-1],25000))
Lnir_err = Lnir*np.sqrt((2*Ropt_err/Ropt)**2+(4*Topt_err/Topt)**2)
# Now do the separate fit to the UV
# Because of line blanketing, this temperature and radius are not very meaningful physically, but shape of function useful for extrapolating flux bluewards of bluest band
BBparams, covar = curve_fit(bbody,wlref[wlref<4000],flux[wlref<4000],p0=(10000,1e15),sigma=ferr[wlref<4000])
Tuv = BBparams[0]
Tuv_err = np.sqrt(np.diag(covar))[0]
Ruv = np.abs(BBparams[1])
Ruv_err = np.sqrt(np.diag(covar))[1]
Luv = itg.trapz(bbody(np.arange(100,wlref[0]),Tuv,Ruv),np.arange(100,wlref[0]))
Luv_err = Luv*np.sqrt((2*Ruv_err/Ruv)**2+(4*Tuv_err/Tuv)**2)
# Plot UV- and optical-only BBs for comparison to single BB
plt.figure(2)
plt.plot(np.arange(3000,25000),bbody(np.arange(3000,25000),Topt,Ropt)-fscale*k,color=cols[filters[k%len(filters)]],linestyle='--',linewidth=1.5)
plt.plot(np.arange(100,3600),bbody(np.arange(100,3600),Tuv,Ruv)-fscale*k,color=cols[filters[k%len(filters)]],linestyle='-.',linewidth=1.5)
except:
# If UV fits failed, just write out the single BB fits
Topt,Topt_err,Ropt,Ropt_err,L2bb,L2bb_err = np.nan, np.nan, np.nan, np.nan, np.nan, np.nan
# Write out BB params, and optical-only BB params, to file
out2.write('%.2f\t%.2e\t%.2e\t%.2e\t%.2e\t%.2e\t%.2e\t%.2e\t%.2e\t%.2e\t%.2e\t%.2e\t%.2e\n' %(ph,T1,T1_err,R1,R1_err,L1bb,L1bb_err,Topt,Topt_err,Ropt,Ropt_err,L2bb,L2bb_err))
else:
# If separate fits were not used, just write out the single BB fits
out2.write('%.2f\t%.2e\t%.2e\t%.2e\t%.2e\t%.2e\t%.2e\n' %(ph,T1,T1_err,R1,R1_err,L1bb,L1bb_err))
# Estimate total bolometric luminosity as integration over observed flux, plus corrections in UV and NIR from the blackbody extrapolations
# If separate UV fit was used, Luv comes from this fit and Lnir comes from optical-only fit
# If no separate fits, Luv and Lnir come from the same BB (inferior fit and therefore less accurate extrapolation)
L2 = Luv + itg.trapz(flux,wlref) + Lnir
# Add errors on each part of the luminosity in quadrature
L2_err = np.sqrt(L1_err**2 + (Luv_err)**2 + (Lnir_err)**2)
# Append to light curve
L2arr.append(L2)
L2err_arr.append(L2_err)
# Write light curve to file: L1 is pseudobolometric, L2 is full bolometric
out1.write('%.2f\t%.2e\t%.2e\t%.2e\t%.2e\n' %(ph,L1,L1_err,L2,L2_err))
plt.draw()
plt.xlabel('Wavelength (Ang)')
plt.ylabel(r'$\mathit{L}_\lambda$ + constant')
plt.legend(numpoints=1,ncol=2,fontsize=16,frameon=True)
# Counter shifts down next SED on plot for visibility
k += 1
plt.figure(2)
plt.yticks([])
plt.xlim(min(wlref)-2000,max(wlref)+3000)
plt.tight_layout(pad=0.5)
# Add methodologies and keys to output files so user knows which approximations were made in this run
out1.write('\n#KEY\n# Lobs = integrate observed fluxes with no BB fit\n# L+BB = observed flux + BB fit extrapolation')
out1.write('\n# See logL_obs_'+sn+'_'+filters+'.txt and logL_bb_'+sn+'_'+filters+'.txt for simple LC files')
out1.write(method)
out2.write('\n#KEY\n# _bb = blackbody fit to all wavelengths, _opt = fit only data redwards of 3000A\n# L_bb = luminosity from Stefan-Boltzman; L_opt = same but using T_opt and R_opt')
out2.write('\n# (in contrast, bol_'+sn+'_'+filters+'.txt file contains trapezoidal integration over observed wavelengths)')
# Close output files
out1.close()
out2.close()
# Make final light curves into numpy arrays
L1arr = np.array(L1arr)
L1err_arr = np.array(L1err_arr)
L2arr = np.array(L2arr)
L2err_arr = np.array(L2err_arr)
print '\n\n*** Done! Displaying bolometric light curve ***'
# Save convenient log versions of light curves
logout = np.array(list(zip(phase,np.log10(L1arr),0.434*L1err_arr/L1arr)))
logoutBB = np.array(list(zip(phase,np.log10(L2arr),0.434*L2err_arr/L2arr)))
np.savetxt(outdir+'/logL_obs_'+sn+'_'+filters+'.txt',logout,fmt='%.3f',delimiter='\t')
np.savetxt(outdir+'/logL_bb_'+sn+'_'+filters+'.txt',logoutBB,fmt='%.3f',delimiter='\t')
# Plot final outputs
plt.figure(3,(8,8))
plt.clf()
plt.subplot(311)
# Plot pseudobolometric and bolometric (including BB) light curves (logarithmic versions)
plt.errorbar(logout[:,0],logout[:,1],logout[:,2],fmt='o',color='k',markersize=12,label='Observed flux only')
plt.errorbar(logoutBB[:,0],logoutBB[:,1],logoutBB[:,2],fmt='d',color='r',markersize=9,label='Plus BB correction')
plt.ylabel(r'$log_{10} \mathit{L}_{bol}\,(erg\,s^{-1})$')
plt.legend(numpoints=1,fontsize=16)
plt.xticks(visible=False)
# Get blackbody temperature and radius
bbresults = np.genfromtxt(outdir+'/BB_params_'+sn+'_'+filters+'.txt')
# Plot temperature in units of 10^3 K
plt.subplot(312)
plt.errorbar(bbresults[:,0],bbresults[:,1]/1e3,bbresults[:,2]/1e3,fmt='o',color='k',markersize=12,label='Fit all bands')
plt.ylabel(r'$\mathit{T}_{BB}\,(10^3K)$')
plt.xticks(visible=False)
if len(bbresults[0])==13:
# If separate fit to optical-only, plot this too
plt.errorbar(bbresults[:,0],bbresults[:,7]/1e3,bbresults[:,8]/1e3,fmt='s',color='c',markersize=8,label=r'Fit >3000$\AA$')
plt.legend(numpoints=1,fontsize=16)
# Plot radius in units of 10^15 cm
plt.subplot(313)
plt.errorbar(bbresults[:,0],bbresults[:,3]/1e15,bbresults[:,4]/1e15,fmt='o',color='k',markersize=12,label='Fit all bands')
plt.ylabel(r'$\mathit{R}_{BB}\,(10^{15}cm)$')
if len(bbresults[0])==13:
plt.errorbar(bbresults[:,0],bbresults[:,9]/1e15,bbresults[:,10]/1e15,fmt='s',color='c',markersize=8,label='Exclude UV')
# X-label for all subplots
plt.xlabel(xlab)
plt.subplots_adjust(hspace=0)
plt.tight_layout(pad=0.5)
plt.draw()
plt.show()
plt.figure(1)
plt.savefig(outdir+'/interpolated_lcs_'+sn+'_'+filters+'.pdf')
plt.figure(2)
plt.savefig(outdir+'/bb_fits_'+sn+'_'+filters+'.pdf')
plt.figure(3)
plt.savefig(outdir+'/results_'+sn+'_'+filters+'.pdf')
# Wait for key press before closing plots!
fin = raw_input('\n\n> PRESS RETURN TO EXIT...\n')
|
mnichollREPO_NAMEsuperbolPATH_START.@superbol_extracted@superbol-master@superbol-py2.py@.PATH_END.py
|
{
"filename": "testTreeNeighbor.py",
"repo_name": "LLNL/spheral",
"repo_path": "spheral_extracted/spheral-main/tests/unit/Neighbor/testTreeNeighbor.py",
"type": "Python"
}
|
#ATS:test(SELF, np=1, level=100, label="TreeNeighbor unit tests")
from math import *
import unittest
import random
from Spheral import *
from NeighborTestBase import *
#===============================================================================
# Radom node distribution -- 1-D.
#===============================================================================
NeighborRandom1d._NeighborType = TreeNeighbor1d
#===============================================================================
# Radom node distribution -- 2-D.
#===============================================================================
NeighborRandom2d._NeighborType = TreeNeighbor2d
#===============================================================================
# Radom node distribution -- 3-D.
#===============================================================================
NeighborRandom3d._NeighborType = TreeNeighbor3d
#===============================================================================
# Cylindrical node distribution -- 2-D.
#===============================================================================
NeighborCylindrical2d._NeighborType = TreeNeighbor2d
#===============================================================================
# Run the tests
#===============================================================================
if __name__ == "__main__":
unittest.main()
|
LLNLREPO_NAMEspheralPATH_START.@spheral_extracted@spheral-main@tests@unit@Neighbor@testTreeNeighbor.py@.PATH_END.py
|
{
"filename": "check_target_acquisition_time.py",
"repo_name": "igrins/plp",
"repo_path": "plp_extracted/plp-master/utils/check_target_acquisition_time.py",
"type": "Python"
}
|
import numpy as np
import re
utdate = "20140710"
fn = "indata/%s/IGRINS_DT_Log_%s-1_H.txt" % (utdate, utdate)
p_end_comma = re.compile(r",$")
s = "".join(p_end_comma.sub("", l) for l in open(fn))
dtype=[('FILENAME', 'S128'), ('OBSTIME', 'S128'), ('GROUP1', 'i'), ('GROUP2', 'i'), ('OBJNAME', 'S128'), ('OBJTYPE', 'S128'), ('FRAMETYPE', 'S128'), ('EXPTIME', 'd'), ('ROTPA', 'd'), ('RA', 'S128'), ('DEC', 'S128'), ('AM', 'd')]
from StringIO import StringIO
l = np.genfromtxt(StringIO(s),
names=True, skip_header=1, delimiter=",", dtype=dtype)
from itertools import groupby
groupby_keys = ["OBJNAME", "OBJTYPE", "GROUP1", "GROUP2", "EXPTIME"]
def keyfunc(l1):
return tuple(l1[k] for k in groupby_keys)
#s_list = []
import datetime
today = datetime.date.today()
last_end_time = None
for lll in groupby(l, keyfunc):
grouper = list(lll[1])
obsids = [int(lll1[0].split(".")[0].split("_")[-1]) for lll1 in grouper]
frametypes = [lll1["FRAMETYPE"] for lll1 in grouper]
objtype = lll[0][1]
abba_start_time_ = datetime.time(*map(int, grouper[0][1].split(":")))
abba_end_time_ = datetime.time(*map(int, grouper[-1][1].split(":")))
abba_start_time = datetime.datetime.combine(today, abba_start_time_)
abba_end_time = datetime.datetime.combine(today, abba_end_time_)
if len(grouper) > 1:
abba_end2_time_ = datetime.time(*map(int, grouper[-2][1].split(":")))
abba_end2_time = datetime.datetime.combine(today, abba_end2_time_)
exptime = abba_end_time - abba_end2_time
else:
exptime = datetime.timedelta(seconds=float(grouper[0][7]))
abba_end_time_real = abba_end_time + exptime
#print grouper[0][4], abba_start_time, abba_end_time
if objtype.lower() in ["std", "tar"]:
#print grouper[0][4], abba_start_time, abba_end_time
if last_end_time:
print grouper[0][4], abba_start_time - last_end_time
last_end_time = abba_end_time_real
|
igrinsREPO_NAMEplpPATH_START.@plp_extracted@plp-master@utils@check_target_acquisition_time.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "atomec-project/atoMEC",
"repo_path": "atoMEC_extracted/atoMEC-master/tests/dev_tests/README.md",
"type": "Markdown"
}
|
# Development test suite
## Overview
This directory contains the necessary code and data to enable the generation and execution of development tests for the atoMEC project. These tests are dsigned to evaluate the _performance_ of the code, with a focus on the `CalcEnergy` function, its related components, and behavior under extreme edge cases. They are distinct from the CI tests, which are designed to check the _correctness_ of the code across the full codebase. They are not mandatory but are recommended for developers making significant changes to performance-critical parts of the code, especially when modifications impact the execution time observed in CI tests.
## Development testing tools
The development tests themselves are not directly included. Instead, the repository provides the necessary tools to generate and run these tests:
- `benchmarking.py`: The core module containing functions to set up the benchmarking environment
- `pressure_benchmarks.csv`: The dataset containing parameters for generating test cases
- `test.py`: The template for creating individual test scripts
- `submit.slurm`: A sample SLURM submission script for use on HPC systems
- `run_benchmark_tests.py`: A script that demonstrates how to run the entire testing workflow using the provided tools
- `comp_benchmark_tests.py`: A script that compares the results from two csv files generated from `run_benchmark_tests.py`
## Environment assumption
The testing workflow currently assumes that atoMEC is operated within a Conda virtual environment.
## Execution Instructions
The full testing workflow can be run on a slurm-based HPC system with the `run_benchmark_tests.py` script. The script needs to be first run in "setup_and_run" mode, which sets up the calculations and submits them to the slurm system (these steps can also be run separately if preferred). Then it should be run in "evaluate" mode, to collect and summarize the results.
## Evaluation and benchmarking protocol
Benchmarking should be conducted against the results from the most recent iteration of the development branch. This means that *two* testing workflows should be set-up, one for the branch being submitted as a PR, and one for atoMEC's development branch. After generating the results, performance can be compared by running the `comp_benchmark_tests.py` script. The most important benchmark is considered to be the "Average time % difference", an average of the row-by-row percentage difference between the times taken.
|
atomec-projectREPO_NAMEatoMECPATH_START.@atoMEC_extracted@atoMEC-master@tests@dev_tests@README.md@.PATH_END.py
|
{
"filename": "generate_DESI_mean.py",
"repo_name": "dkirkby/gphist",
"repo_path": "gphist_extracted/gphist-master/generate_DESI_mean.py",
"type": "Python"
}
|
#!/usr/bin/env python
"""Plot expansion history inferences.
"""
import argparse
import numpy as np
# matplotlib is imported inside main()
from scipy.optimize import minimize
import gphist
from scipy import interpolate
def main():
# Parse command-line arguments.
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--input',type = str, default = None,
help = 'name of input file to read (extension .npz will be added)')
parser.add_argument('--posterior', type = str, action='append', metavar = 'NAME',
help = 'posteriors to plot (can be repeated, plot all if omitted)')
parser.add_argument('--nlp', action = 'store_true',
help = 'show plots of posterior -log(P) marginalized over hyperparameters')
parser.add_argument('--full', action = 'store_true',
help = 'show plots of DH/DH0,DA/DA0 evolution over the full redshift range')
parser.add_argument('--zoom', action = 'store_true',
help = 'show plots of DH,DA on a linear scale up to redshift zmax')
parser.add_argument('--dark-energy', action = 'store_true',
help = 'show plots of H(z)/(1+z) and Omega_DE(z)/Omega_DE(0) up to redshift zmax')
parser.add_argument('--growth', action = 'store_true',
help = 'show plots of phi(lna)')
parser.add_argument('--zmax', type = float, default = 3.0,
help = 'maximum redshift to display on H(z)/(1+z) plot')
parser.add_argument('--level', type = float, default = 0.9,
help = 'confidence level to plot')
parser.add_argument('--examples', action = 'store_true',
help = 'include examples of random realizations in each plot')
parser.add_argument('--output', type = str, default = None,
help = 'base name for saving plots (no plots are saved if not set)')
parser.add_argument('--show', action = 'store_true',
help = 'show each plot (in addition to saving it if output is set)')
parser.add_argument('--plot-format', type = str, default = 'png', metavar = 'FMT',
help = 'format for saving plots (png,pdf,...)')
args = parser.parse_args()
# Do we have any inputs to read?
if args.input is None:
print 'Missing required input arg.'
return -1
# Do we have anything to plot?
num_plot_rows = args.full + args.zoom + args.dark_energy
#if num_plot_rows == 0 and not args.nlp and not args.growth:
#print 'No plots selected.'
#return 0
if not args.output and not args.show:
print 'No output requested.'
if args.examples:
print 'Option --examples not implemented yet.'
return -1
# Initialize matplotlib.
import matplotlib as mpl
if not args.show:
# Use the default backend, which does not require X11 on unix systems.
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib import gridspec
# Load the input file.
loaded = np.load(args.input + '.npz')
#f_loaded = np.load('npz_files/phi_test/test2.0.npz')
DH_hist = loaded['DH_hist']
DA_hist = loaded['DA_hist']
de_hist = loaded['de_hist']
#f_hist = f_loaded['phi_hist']
#print 'the shape of the combined phi histogram is '+str(phi_hist.shape)
#print 'the shape of the combined DH histogram is '+str(DH_hist.shape)
#phi_realizations = loaded['phi_realizations']
DH0 = loaded['DH0']
DA0 = loaded['DA0']
de0 = loaded['de0']
#f0 = f_loaded['phi0']
zvalues = loaded['zvalues']
lna=-np.log(1 + zvalues[::-1])
fixed_options = loaded['fixed_options']
bin_range = loaded['bin_range']
hyper_range = loaded['hyper_range']
posterior_names = loaded['posterior_names']
# The -log(P) array is only present if this file was written by combine.py
npost = len(posterior_names)
perms = gphist.analysis.get_permutations(npost)
iperm =31
name = '-'.join(posterior_names[perms[iperm]]) or 'Prior'
print '%d : %s' % (iperm,name)
# Calculate the confidence bands of DH/DH0 and DA/DA0.
DH_ratio_limits = gphist.analysis.calculate_confidence_limits(
DH_hist[iperm],[args.level],bin_range)
DA_ratio_limits = gphist.analysis.calculate_confidence_limits(
DA_hist[iperm],[args.level],bin_range)
# Convert to limits on DH, DA, with DA limits extended to z=0.
DH_limits = DH_ratio_limits*DH0
DA_limits = np.empty_like(DH_limits)
DA_limits[:,1:] = DA_ratio_limits*DA0[1:]
DA_limits[:,0] = 0.
# Find first z index beyond zmax.
new_z_values = np.array([0.65,0.75,0.85,0.95,1.05,1.15,1.25,1.35,1.45,1.55,1.65,1.75,1.85,
1.96,2.12,2.28,2.43,2.59,2.75,2.91,3.07,3.23,3.39,3.55])
DA_interp = interpolate.interp1d(zvalues,DA_limits[1])
DH_interp = interpolate.interp1d(zvalues,DH_limits[1])
new_DA_val = DA_interp(new_z_values)
new_DH_val = DH_interp(new_z_values)
DA0_interp = interpolate.interp1d(zvalues,DA0)
DH0_interp = interpolate.interp1d(zvalues,DH0)
new_DA_lcdm = DA0_interp(new_z_values)
new_DH_lcdm = DH0_interp(new_z_values)
np.savetxt('DESI_means.txt',(new_z_values,new_DH_val,new_DA_val))
np.savetxt('DESI_lcdm.txt',(new_z_values,new_DH_lcdm,new_DA_lcdm))
if __name__ == '__main__':
main()
|
dkirkbyREPO_NAMEgphistPATH_START.@gphist_extracted@gphist-master@generate_DESI_mean.py@.PATH_END.py
|
{
"filename": "PGIR_DR1_light_curves.ipynb",
"repo_name": "astro-datalab/notebooks-latest",
"repo_path": "notebooks-latest_extracted/notebooks-latest-master/03_ScienceExamples/PGIRLightCurves/PGIR_DR1_light_curves.ipynb",
"type": "Jupyter Notebook"
}
|
```python
__nbid__ = '0069'
__author__ = 'Ryan M. Lau <ryan.lau@noirlab.edu>, Kishalay De <kde1@mit.edu>, Alice Jacques <alice.jacques@noirlab.edu>, Astro Data Lab Team <datalab@noirlab.edu>'
__version__ = '20240927' # yyyymmdd
__datasets__ = ['pgir_dr1']
__keywords__ = ['lightcurve', 'photometry', 'time domain', 'Lomb-Scargle']
```
## J-band light curves from Palomar Gattini-IR DR1 catalog
*Ryan M. Lau (NSF NOIRLab), Kishalay De (MIT), Shion Murakawa (MIT), Alice Jacques (NSF NOIRLab/CSDC), & Astro Data Lab Team*
### Table of contents
* [Goals & notebook summary](#goals)
* [Disclaimer & attribution](#attribution)
* [Imports & setup](#import)
* [Input search coordinates and generate light curve](#lightcurve)
* [Lomb-Scargle periodogram analysis](#lombscargle)
* [Resources and references](#resources)
<a class="anchor" id="goals"></a>
# Goals & notebook summary
The goal of this notebook is to demonstrate how to generate light curves from the **Palomar Gattini-IR (PGIR) Data Release 1 (DR1)** catalog of infrared J-band light curves and also perform a basic Lomb-Scargle periodogram analysis to identify a period.
**Description of Palomar Gattini-IR:** PGIR is a wide-field, robotic, near-infrared time domain survey covering the entire visible night sky north of declination -28.5 at a median cadence of 2 nights. The survey operates in a single filter (J-band, calibrated to the 2MASS system), with a single exposure field of view of 25 square degrees and a native pixel scale of 8.7 arcsec/pixel. Further details about the instrument and data reduction system can be found in [De et al. (2020)](https://ui.adsabs.harvard.edu/abs/2020PASP..132b5001D/abstract). Light curves are extracted by performing Point Spread Function (PSF) photometry on the stacked images from each field visit, with the entire observing footprint divided into 1,329 fields. The first data release contains J-band light curves of approximately 286 million sources from the 2MASS catalog, with a total of approximately 50 billion photometric measurements [(Murakawa et al. 2024)](https://ui.adsabs.harvard.edu/abs/2024arXiv240601720M/abstract).
**Science Example:** The science target in this notebook is the enigmatic NaSt1 system (also known as LS IV +005 and WR 122), which is thought to be an early-type wolf-rayet system enshrouded but a dense nebula. We will demonstrate how the PGIR light curves can be used to identify the periodic variability from this system, which was presented by [(Lau et al. 2021)](https://ui.adsabs.harvard.edu/abs/2021ApJ...922....5L/abstract).
<a class="anchor" id="attribution"></a>
# Disclaimer & attribution
Disclaimers
-----------
Note that using the Astro Data Lab constitutes your agreement with our minimal [Disclaimers](https://datalab.noirlab.edu/disclaimers.php).
Acknowledgments
---------------
If you use **Astro Data Lab** in your published research, please include the text in your paper's Acknowledgments section:
_This research uses services or data provided by the Astro Data Lab, which is part of the Community Science and Data Center (CSDC) Program of NSF NOIRLab. NOIRLab is operated by the Association of Universities for Research in Astronomy (AURA), Inc. under a cooperative agreement with the U.S. National Science Foundation._
If you use **SPARCL jointly with the Astro Data Lab platform** (via JupyterLab, command-line, or web interface) in your published research, please include this text below in your paper's Acknowledgments section:
_This research uses services or data provided by the SPectra Analysis and Retrievable Catalog Lab (SPARCL) and the Astro Data Lab, which are both part of the Community Science and Data Center (CSDC) Program of NSF NOIRLab. NOIRLab is operated by the Association of Universities for Research in Astronomy (AURA), Inc. under a cooperative agreement with the U.S. National Science Foundation._
In either case **please cite the following papers**:
* Data Lab concept paper: Fitzpatrick et al., "The NOAO Data Laboratory: a conceptual overview", SPIE, 9149, 2014, https://doi.org/10.1117/12.2057445
* Astro Data Lab overview: Nikutta et al., "Data Lab - A Community Science Platform", Astronomy and Computing, 33, 2020, https://doi.org/10.1016/j.ascom.2020.100411
If you are referring to the Data Lab JupyterLab / Jupyter Notebooks, cite:
* Juneau et al., "Jupyter-Enabled Astrophysical Analysis Using Data-Proximate Computing Platforms", CiSE, 23, 15, 2021, https://doi.org/10.1109/MCSE.2021.3057097
If publishing in a AAS journal, also add the keyword: `\facility{Astro Data Lab}`
And if you are using SPARCL, please also add `\software{SPARCL}` and cite:
* Juneau et al., "SPARCL: SPectra Analysis and Retrievable Catalog Lab", Conference Proceedings for ADASS XXXIII, 2024
https://doi.org/10.48550/arXiv.2401.05576
The NOIRLab Library maintains [lists of proper acknowledgments](https://noirlab.edu/science/about/scientific-acknowledgments) to use when publishing papers using the Lab's facilities, data, or services.
<a class="anchor" id="import"></a>
# Imports and setup
Importing the basic python modules, the `LombScargle` function from astropy, and the `queryClient` from Astro Data Lab.
```python
import matplotlib.pyplot as plt
import numpy as np
import astropy.units as u
from astropy.timeseries import LombScargle
from dl import queryClient as qc
```
<a class="anchor" id="lightcurve"></a>
# Input search coordinates and generate light curve
### Enter RA and Dec (in degrees) of an object and cone-search radius
As an example to demonstrate the utility of PGIR DR1, we will investigate the IR variability from the heavily enshrouded massive star system NaSt1 / WR122 [(Lau et al. 2021)](https://ui.adsabs.harvard.edu/abs/2021ApJ...922....5L/abstract). We will use a 0.01 degree cone-search radius to demonstrate how searches are handled when multiple 2MASS sources are identified.
```python
#Coordinates of NaSt1
ra = 283.07312
dec = 0.99564
search_rad = 0.01 # search radius in degrees
```
### PGIR DR1 is composed of the three following tables:
`exposures` - Metadata of each exposure included in the catalog
`photometry` - Individual photometric measurements for each source in the catalog
`sources` - Metadata and statistical moments of photometry for each 2MASS source
The columns and descriptions of each table can be found in Table 1 of [Murakawa et al. (2024)](https://ui.adsabs.harvard.edu/abs/2024arXiv240601720M/abstract) and the [PGIR DR1 table browser](https://datalab.noirlab.edu/query.php?name=pgir_dr1.sources) at Astro Data Lab.
We will conduct our coordinate search on the `sources` table to identify the 2MASS sources that fall within our coordinates. Using a table join on the unique 2MASS ID (`pts_key`) from the `sources` table, we can obtain the PGIR photometry of each 2MASS source from the `photometry` table. Our result table will provide the following:
`tmcra` - 2MASS right ascension of source
`tmcdec` - 2MASS declination of source
`obsjd` - Julian date of exposure start
`pts_key` - 2MASS ID number of source
`magpsf` - J-band magnitude of PGIR exposure of source
`magpsferr` - Image noise magnitude error of PGIR exposure of source
`magpsfstaterr` - Statistical magnitude error of PGIR exposure of source
`flags` - Bit-value of the flags of exposure of source
```python
q = f"""SELECT S.pts_key, S.tmcra, S.tmcdec, P.obsjd, P.magpsf, P.magpsferr, P.magpsfstaterr, P.flags
FROM pgir_dr1.sources as S
JOIN pgir_dr1.photometry as P on S.pts_key = P.pts_key
WHERE q3c_radial_query(S.tmcra, S.tmcdec, {ra}, {dec}, {search_rad})
"""
re = qc.query(sql=q, fmt='pandas')
print('%i unique 2MASS sources found' % len(np.unique(re['pts_key'])))
```
17 unique 2MASS sources found
The search has identified 17 unique 2MASS sources from the search:
```python
np.unique(re['pts_key'])
```
array([673172927, 673172931, 673172956, 673172967, 673173015, 673173034,
673173036, 673173040, 673173065, 673173067, 673173083, 673173090,
673173119, 673173147, 673173151, 673173181, 673173185])
The following cell identifies the closest source based on its proximity to the search coordinates and defines a new light curve table of that closest source.
```python
re['dist'] = np.sqrt((ra-re['tmcra'])**2+(dec-re['tmcdec'])**2)
re = re.sort_values('dist')
re = re.reset_index(drop=True)
re_best = re[re['pts_key']==re['pts_key'][0]]
re_best
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>pts_key</th>
<th>tmcra</th>
<th>tmcdec</th>
<th>obsjd</th>
<th>magpsf</th>
<th>magpsferr</th>
<th>magpsfstaterr</th>
<th>flags</th>
<th>dist</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>673173034</td>
<td>283.07312</td>
<td>0.995639</td>
<td>2459867.8</td>
<td>9.885060</td>
<td>5.506587e-02</td>
<td>0.003874</td>
<td>32</td>
<td>1.000000e-06</td>
</tr>
<tr>
<th>1</th>
<td>673173034</td>
<td>283.07312</td>
<td>0.995639</td>
<td>2459722.8</td>
<td>9.639231</td>
<td>5.945242e-02</td>
<td>0.003415</td>
<td>33</td>
<td>1.000000e-06</td>
</tr>
<tr>
<th>2</th>
<td>673173034</td>
<td>283.07312</td>
<td>0.995639</td>
<td>2459721.8</td>
<td>9.656757</td>
<td>6.217199e-02</td>
<td>0.003503</td>
<td>33</td>
<td>1.000000e-06</td>
</tr>
<tr>
<th>3</th>
<td>673173034</td>
<td>283.07312</td>
<td>0.995639</td>
<td>2459719.8</td>
<td>9.629660</td>
<td>4.645084e-02</td>
<td>0.003359</td>
<td>33</td>
<td>1.000000e-06</td>
</tr>
<tr>
<th>4</th>
<td>673173034</td>
<td>283.07312</td>
<td>0.995639</td>
<td>2459717.8</td>
<td>9.679220</td>
<td>5.606063e-02</td>
<td>0.003443</td>
<td>33</td>
<td>1.000000e-06</td>
</tr>
<tr>
<th>...</th>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
</tr>
<tr>
<th>1605</th>
<td>673173034</td>
<td>283.07312</td>
<td>0.995639</td>
<td>2459375.8</td>
<td>9.688159</td>
<td>6.854054e-02</td>
<td>0.003479</td>
<td>33</td>
<td>1.000000e-06</td>
</tr>
<tr>
<th>1606</th>
<td>673173034</td>
<td>283.07312</td>
<td>0.995639</td>
<td>2459374.8</td>
<td>9.918835</td>
<td>7.887600e-02</td>
<td>0.004528</td>
<td>33</td>
<td>1.000000e-06</td>
</tr>
<tr>
<th>1607</th>
<td>673173034</td>
<td>283.07312</td>
<td>0.995639</td>
<td>2459386.8</td>
<td>10.392316</td>
<td>1.221390e-01</td>
<td>0.004804</td>
<td>33</td>
<td>1.000000e-06</td>
</tr>
<tr>
<th>1608</th>
<td>673173034</td>
<td>283.07312</td>
<td>0.995639</td>
<td>2458425.5</td>
<td>29.796688</td>
<td>9.745351e+06</td>
<td>NaN</td>
<td>36</td>
<td>1.000000e-06</td>
</tr>
<tr>
<th>1609</th>
<td>673173034</td>
<td>283.07312</td>
<td>0.995639</td>
<td>2458420.5</td>
<td>inf</td>
<td>inf</td>
<td>NaN</td>
<td>36</td>
<td>1.000000e-06</td>
</tr>
</tbody>
</table>
<p>1610 rows × 9 columns</p>
</div>
In order to filter out spurious detections, we apply a signal-to-noise ratio (SNR) threshold using both the statistical magnitude error (`magpsfstaterr`) and the sampled magnitude error (`magpsferr`). Negative errors are also filtered out.
```python
snr_thresh = 10
positive_err = ((re_best['magpsferr'] > 0) &
(re_best['magpsfstaterr'] > 0) &
(1 / re_best['magpsfstaterr'] > snr_thresh) &
(1 / re_best['magpsferr'] > snr_thresh))
re_best = re_best[positive_err]
```
The following cell reads the quality flags for each epoch. Uncomment flags you wish to apply to your data. In this notebook, we will not utilize the flags.
Note that NaSt1 has nearby sources that lead to aperture contamination, which will lead to many of the epochs having the aperture contamination (F6) flag. However, since we are only interested in the variability, the contamination is not a major issue.
```python
flags = re_best['flags']
procflags = []
for flagval in flags:
totflag = True
if flagval - 32 >= 0:
#totflag = False # remove measurements with aperture contamination (F6).
flagval -= 32
if flagval - 16 >= 0:
#totflag = False # remove measurements with zero point deviation (F5).
flagval -= 16
if flagval - 8 >= 0:
#totflag = False # remove measurements with bad airmass (F4).
flagval -= 8
if flagval - 4 >= 0:
#totflag = False # remove measurements with magnitudes outside of that is recommended (F3).
flagval -= 4
if flagval - 2 >= 0:
#totflag = False # remove measurements with pixel saturation (F2).
flagval -= 2
if flagval - 1 >= 0:
#totflag = False # remove measurements on the west meridian side (F1).
flagval -= 1
procflags.append(totflag)
```
### Plotting PGIR J-band Light Curve
We can now plot the PGIR J-band light curve of our target (NaSt1; `pts_key = 673173034`) with photometry that has passed our SNR threshold cut and the flags that we set in the previous cell.
```python
plt.figure(figsize=(10,6))
plt.errorbar(re_best[procflags]['obsjd'], re_best[procflags]['magpsf'],
fmt='o', color='k', ms=6, ls='', zorder=10, label='PGIR photometry')
plt.errorbar(re_best[procflags]['obsjd'], re_best[procflags]['magpsf'],
yerr = re_best[procflags]['magpsfstaterr'],
fmt=',', color='none', ms=6, zorder=20, elinewidth=0.9, ecolor='c',
label='Statistical magnitude error')
plt.errorbar(re_best[procflags]['obsjd'], re_best[procflags]['magpsf'],
yerr = re_best[procflags]['magpsferr'],
fmt=',', ms=6, zorder=0, elinewidth=0.9, ecolor='orange',
label='Sampled magnitude error')
plt.xlabel('JD')
plt.ylabel('J magnitude')
plt.title(f"PGIR light curve for {re_best['pts_key'][0]} (RA = {re_best['tmcra'][0]}, Dec = {re_best['tmcdec'][0]})")
plt.legend()
plt.show()
```

<a class="anchor" id="lombscargle"></a>
# Lomb-Scargle periodogram analysis
The light curve of NaSt1 appears to exhibit periodic variability. Here, we conduct a Lomb-Scargle periodogram analysis to verify the period and compare to the results from [(Lau et al. 2021)](https://ui.adsabs.harvard.edu/abs/2021ApJ...922....5L/abstract).
For the periodogram analysis, we run the `LombScargle` function to search for periods between 3 - 500 days from the PGIR J-band light curve of NaSt1.
```python
min_period = 3.0 * u.day
max_period = 500 * u.day
min_freq_search = 1.0 / max_period
max_freq_search = 1.0 / min_period
frequency, power = LombScargle(np.array(re_best[procflags]['obsjd']) * u.day,
re_best[procflags]['magpsf']).autopower(minimum_frequency=min_freq_search,
maximum_frequency=max_freq_search)
freq_peak = frequency[np.argmax(power)].value
print('The period corresponding to the peak frequency is %.1f d' % (1 / freq_peak))
```
The period corresponding to the peak frequency is 308.3 d
Below, we plot the Lomb-Scargle periodograms as a function of frequency and period.
```python
fig, ax = plt.subplots(1, 2, figsize=(8, 4))
plt.sca(ax[0])
plt.plot(frequency, power)
plt.vlines(freq_peak, 0, 1, linestyle='--', color='tab:red')
plt.minorticks_on()
plt.xlabel('frequency (1/d)')
plt.xscale('log')
plt.ylabel('power')
plt.sca(ax[1])
plt.plot(1 / frequency, power)
plt.vlines(1 / freq_peak, 0, 1, linestyle='--', color='tab:red')
plt.minorticks_on()
plt.xlabel('period (d)')
plt.ylabel('power')
plt.tight_layout()
plt.show()
```

The strongest peak in the periodogram indeed corresponds to a period of 308 days, which is consistent with the variability period derived for NaSt1 by [(Lau et al. 2021)](https://ui.adsabs.harvard.edu/abs/2021ApJ...922....5L/abstract).
<a class="anchor" id="resources"></a>
# Resources and references
De et al. (2020): *Palomar Gattini-IR: Survey Overview, Data Processing System, On-sky Performance and First Results*. Publications of the Astronomical Society of the Pacific, 132, 025001: https://ui.adsabs.harvard.edu/abs/2020PASP..132b5001D/abstract
Lau et al. (2021): *Discovery of a 310 Day Period from the Enshrouded Massive System NaSt1 (WR 122)*. The Astrophysical Journal, 922, 5: https://ui.adsabs.harvard.edu/abs/2021ApJ...922....5L/abstract
Murakawa et al. (2024): *The first Palomar Gattini-IR catalog of J-band light curves: construction and public data release*.
arXiv:2406.01720: https://ui.adsabs.harvard.edu/abs/2024arXiv240601720M/abstract
|
astro-datalabREPO_NAMEnotebooks-latestPATH_START.@notebooks-latest_extracted@notebooks-latest-master@03_ScienceExamples@PGIRLightCurves@PGIR_DR1_light_curves.ipynb@.PATH_END.py
|
{
"filename": "GraduatedCosmology.py",
"repo_name": "ja-vazquez/SimpleMC",
"repo_path": "SimpleMC_extracted/SimpleMC-master/simplemc/models/GraduatedCosmology.py",
"type": "Python"
}
|
import numpy as np
from simplemc.models.LCDMCosmology import LCDMCosmology
from simplemc.cosmo.paramDefs import Ok_par, ggama_par, glambda_par
#Graduated dark energy: Observational hints of a spontaneous sign
# switch in the cosmological constant
#https://arxiv.org/abs/1912.08751
class GraduatedCosmology(LCDMCosmology):
def __init__(self, varyggama=True, varyglambda=False, varyOk=False):
"""
Introduces the graduated dark energy (gDE) characterised by a minimal dynamical
deviation from the null inertial mass density of the Lambda
Parameters
----------
varyggama
varyglambda
varyOk
Returns
-------
"""
self.varyggama = varyggama
self.varyglambda = varyglambda
self.varyOk = varyOk
self.Ok = Ok_par.value
self.ggama = ggama_par.value
self.glambda = glambda_par.value
LCDMCosmology.__init__(self)
## my free parameters. We add Ok on top of LCDM ones (we inherit LCDM)
def freeParameters(self):
l=LCDMCosmology.freeParameters(self)
if (self.varyggama): l.append(ggama_par)
if (self.varyglambda): l.append(glambda_par)
if (self.varyOk): l.append(Ok_par)
return l
def updateParams(self,pars):
ok=LCDMCosmology.updateParams(self,pars)
if not ok:
return False
for p in pars:
if p.name=="ggama":
self.ggama=p.value
elif p.name=="glambda":
self.glambda=p.value
elif p.name=="Ok":
self.Ok=p.value
self.setCurvature(self.Ok)
if (abs(self.Ok)>1.0):
return False
return True
## this is relative hsquared as a function of a
## i.e. H(z)^2/H(z=0)^2
def RHSquared_a(self,a):
#For glambda=0, it becomes inertial mass density
z = 1./a - 1.
#NuContrib= self.NuDensity.rho(a)/self.h**2
if self.glambda == 1:
rhow = 1.
elif self.glambda ==0:
rhow = (1. + 3.*(1 + self.ggama)*np.log(1+z))
else:
term = (1. - 3.*self.ggama*(self.glambda-1)*np.log(1+z))
rhow= np.sign(term)*np.abs(term)**(1./(1-self.glambda))
return abs(self.Ocb/a**3+self.Ok/a**2+self.Omrad/a**4+(1.0-self.Om-self.Ok)*rhow)
|
ja-vazquezREPO_NAMESimpleMCPATH_START.@SimpleMC_extracted@SimpleMC-master@simplemc@models@GraduatedCosmology.py@.PATH_END.py
|
{
"filename": "ModelSet.py",
"repo_name": "mirochaj/ares",
"repo_path": "ares_extracted/ares-main/ares/analysis/ModelSet.py",
"type": "Python"
}
|
"""
ModelFit.py
Author: Jordan Mirocha
Affiliation: University of Colorado at Boulder
Created on: Mon Apr 28 11:19:03 MDT 2014
Description: For analysis of MCMC fitting.
"""
import pickle
import shutil
import numpy as np
import matplotlib as mpl
from ..util.Math import smooth
import matplotlib.pyplot as pl
from ..util import ProgressBar
from ..physics import Cosmology
import re, os, string, time, glob
from .BlobFactory import BlobFactory
from matplotlib.colors import Normalize
from matplotlib.patches import Rectangle
from .MultiPhaseMedium import MultiPhaseMedium as aG21
from ..physics.Constants import nu_0_mhz, erg_per_ev, h_p
from ..util import labels as default_labels
from ..util.Pickling import read_pickle_file, write_pickle_file
import matplotlib.patches as patches
from ..util.Aesthetics import Labeler
from ..util.PrintInfo import print_model_set
from .DerivedQuantities import DerivedQuantities as DQ
from ..util.ParameterFile import count_populations, par_info
from matplotlib.collections import PatchCollection, LineCollection
from ..util.SetDefaultParameterValues import SetAllDefaults, TanhParameters
from ..util.Stats import Gauss1D, error_2D, _error_2D_crude, \
bin_e2c, correlation_matrix
from ..util.ReadData import concatenate, read_pickled_chain,\
read_pickled_logL
try:
# this runs with no issues in python 2 but raises error in python 3
basestring
except:
# this try/except allows for python 2/3 compatible string type checking
basestring = str
try:
from scipy.spatial import Delaunay
except ImportError:
pass
try:
import shapely.geometry as geometry
from shapely.ops import cascaded_union, polygonize, unary_union
have_shapely = True
except (ImportError, OSError):
have_shapely = False
try:
from descartes import PolygonPatch
have_descartes = True
except ImportError:
have_descartes = False
try:
import h5py
have_h5py = True
except ImportError:
have_h5py = False
try:
from mpi4py import MPI
rank = MPI.COMM_WORLD.rank
size = MPI.COMM_WORLD.size
except ImportError:
rank = 0
size = 1
default_mp_kwargs = \
{
'diagonal': 'lower',
'keep_diagonal': True,
'panel_size': (0.5,0.5),
'padding': (0,0)
}
numerical_types = [float, np.float64, np.float32, int, np.int32, np.int64]
# Machine precision
MP = np.finfo(float).eps
def err_str(label, mu, err, log, labels=None):
s = undo_mathify(make_label(label, log, labels))
s += '={0:.3g}^{{+{1:.2g}}}_{{-{2:.2g}}}'.format(mu, err[1], err[0])
return r'${!s}$'.format(s)
class ModelSubSet(object):
def __init__(self):
pass
class ModelSet(BlobFactory):
def __init__(self, data, subset=None, verbose=True):
"""
Parameters
----------
data : instance, str
prefix for a bunch of files ending in .chain.pkl, .pinfo.pkl, etc.,
or a ModelSubSet instance.
subset : list, str
List of parameters / blobs to recover from individual files. Can
also set subset='all', and we'll try to automatically track down
all that are available.
"""
self.subset = subset
self.is_single_output = True
# Read in data from file (assumed to be pickled)
if isinstance(data, basestring):
# Check to see if perhaps this is just the chain
if re.search('pkl', data):
self._prefix_is_chain = True
pre_pkl = data[0:data.rfind('.pkl')]
self.prefix = prefix = pre_pkl
elif re.search('hdf5', data):
self._prefix_is_chain = True
pre_pkl = data[0:data.rfind('.hdf5')]
self.prefix = prefix = pre_pkl
else:
self._prefix_is_chain = False
self.prefix = prefix = data
i = prefix.rfind('/') # forward slash index
# This means we're sitting in the right directory already
if i == - 1:
self.path = '.'
self.fn = prefix
else:
self.path = prefix[0:i+1]
self.fn = prefix[i+1:]
if verbose:
try:
print_model_set(self)
except:
pass
elif isinstance(data, ModelSet):
self.prefix = data.prefix
self._chain = data.chain
self._is_log = data.is_log
self._base_kwargs = data.base_kwargs
elif type(data) in [list, tuple]:
self.is_single_output = False
fn = []
self.paths = []
self.prefix = data
for h, prefix in enumerate(data):
i = prefix.rfind('/') # forward slash index
# This means we're sitting in the right directory already
if i == - 1:
path = '.'
fn.append(prefix)
else:
path = prefix[0:i+1]
fn.append(prefix[i+1:])
self.paths.append(path[0:-1] if path[-1] == '/' else path)
if h > 0:
assert fn[h] == fn[h-1], \
"File prefix different between {} and {}".format(
fn[h], fn[h-1])
self.fn = fn[0]
print("# Will load MCMC outputs from {} directories:".format(len(self.paths)))
for path in self.paths:
print("# {}".format(path))
print("# Each with file prefix `{}`".format(self.fn))
else:
raise TypeError('Argument must be ModelSubSet instance or filename prefix')
self.derived_blobs = DQ(self)
@property
def mask(self):
if not hasattr(self, '_mask'):
self._mask = np.zeros_like(self.chain) # chain.shape[0]?
return self._mask
@mask.setter
def mask(self, value):
if self.is_mcmc:
assert len(value) == len(self.logL)
# Must be re-initialized to reflect new mask
del self._chain, self._logL
self._mask = value
@property
def skip(self):
if not hasattr(self, '_skip'):
self._skip = 0
return self._skip
@skip.setter
def skip(self, value):
if hasattr(self, '_skip'):
pass
#print("WARNING: Running `skip` for (at least) the second time!")
else:
# On first time, stash away a copy of the original mask
if not hasattr(self, '_original_mask'):
self._original_mask = self.mask.copy()
if hasattr(self, '_stop'):
mask = self.mask.copy()
assert value < self._stop
else:
mask = self._original_mask.copy()
self._skip = int(value)
x = np.arange(0, self.logL.size)
mask[x < self._skip] = True
print("Masked out {} elements using `skip`.".format(self._skip))
self.mask = mask
@property
def stop(self):
if not hasattr(self._stop):
self._stop = 0
return self._stop
@stop.setter
def stop(self, value):
if hasattr(self, '_stop'):
pass
#print("WARNING: Running `stop` for (at least) the second time!")
else:
# On first time, stash away a copy of the original mask
if not hasattr(self, '_original_mask'):
self._original_mask = self.mask.copy()
# If skip has already been called, operate on pre-existing mask.
# Otherwise, start from scratch
if hasattr(self, '_skip'):
mask = self.mask.copy()
assert value > self._skip
else:
mask = self._original_mask.copy()
self._stop = int(value)
x = np.arange(0, self.logL.size)
print("Masked out {} elements using `stop`.".format(max(x) - self._stop))
self.mask = mask
@property
def load(self):
if not hasattr(self, '_load'):
print("WARNING: if this run was restarted, the `load` values " +\
"are probably wrong.")
if os.path.exists('{!s}.load.pkl'.format(self.prefix)):
self._load = concatenate(read_pickle_file(\
'{!s}.load.pkl'.format(self.prefix), nloads=None,\
verbose=False))
else:
self._load = None
return self._load
@property
def pf(self):
return self.base_kwargs
@property
def base_kwargs(self):
if not hasattr(self, '_base_kwargs'):
pre, post = self._get_pre_post()
if os.path.exists('{!s}/{!s}.binfo.pkl'.format(pre, post)):
fn = '{!s}/{!s}.binfo.pkl'.format(pre, post)
elif os.path.exists('{!s}/{!s}.setup.pkl'.format(pre, post)):
fn = '{!s}/{!s}.setup.pkl'.format(pre, post)
else:
print("WARNING: No files with prefix={} were found.".format(pre))
self._base_kwargs = None
return self._base_kwargs
try:
self._base_kwargs =\
read_pickle_file(fn, nloads=1, verbose=False)
except ImportError as err:
raise err
except:
self._base_kwargs = {}
return self._base_kwargs
def _get_pre_post(self):
if self.is_single_output:
pre = self.path
burn = self.prefix.endswith('.burn')
else:
pre = self.paths[0]
burn = self.fn[0].endswith('.burn')
if burn:
post = self.fn.replace('.burn', '')
else:
post = self.fn
return pre, post
@property
def parameters(self):
# Read parameter names and info
if not hasattr(self, '_parameters'):
pre, post = self._get_pre_post()
if os.path.exists('{!s}/{!s}.pinfo.pkl'.format(pre, post)):
(self._parameters, self._is_log) =\
read_pickle_file('{!s}/{!s}.pinfo.pkl'.format(pre, post),
nloads=1, verbose=False)
elif os.path.exists('{!s}/{!s}.hdf5'.format(pre, post)):
f = h5py.File('{!s}/{!s}.hdf5'.format(pre, post))
self._parameters = list(f['chain'].attrs.get('names'))
#self._is_log = list(f['chain'].attrs.get('is_log'))
self._is_log = [False] * len(self._parameters)
f.close()
else:
print("WARNING: No files following naming convention {}/{} were found.".format(pre, post))
self._is_log = [False] * self.chain.shape[-1]
self._parameters = ['p{}'.format(i) \
for i in range(self.chain.shape[-1])]
self._is_log = tuple(self._is_log)
self._parameters = tuple(self._parameters)
return self._parameters
@property
def nwalkers(self):
# Read parameter names and info
if not hasattr(self, '_nwalkers'):
pre, post = self._get_pre_post()
if os.path.exists('{!s}/{!s}.rinfo.pkl'.format(pre, post)):
loaded =\
read_pickle_file('{!s}/{!s}.rinfo.pkl'.format(pre, post),\
nloads=1, verbose=False)
self._nwalkers, self._save_freq, self._steps = \
list(map(int, loaded))
else:
print("WARNING: No files following naming convention {}/{} were found.".format(pre, post))
self._nwalkers = self._save_freq = self._steps = None
return self._nwalkers
@property
def save_freq(self):
if not hasattr(self, '_save_freq'):
nwalkers = self.nwalkers
return self._save_freq
@property
def steps(self):
if not hasattr(self, '_steps'):
nwalkers = self.nwalkers
return self._steps
@property
def priors(self):
if not hasattr(self, '_priors'):
pre, post = self._get_pre_post()
if os.path.exists('{!s}/{!s}.priors.pkl'.format(pre, post)):
self._priors = \
read_pickle_file('{!s}/{!s}.priors.pkl'.format(pre, post),
nloads=1, verbose=False)
else:
self._priors = {}
return self._priors
@property
def is_log(self):
if not hasattr(self, '_is_log'):
pars = self.parameters
return self._is_log
@property
def polygon(self):
if not hasattr(self, '_polygon'):
return None
return self._polygon
@polygon.setter
def polygon(self, value):
self._polygon = value
@property
def is_mcmc(self):
if not hasattr(self, '_is_mcmc'):
if os.path.exists('{!s}.logL.pkl'.format(self.prefix)):
self._is_mcmc = True
elif glob.glob('{!s}.dd*.logL.pkl'.format(self.prefix)):
self._is_mcmc = True
else:
self._is_mcmc = False
return self._is_mcmc
@property
def facc(self):
if not hasattr(self, '_facc'):
if os.path.exists('{!s}.facc.pkl'.format(self.prefix)):
self._facc =\
read_pickle_file('{!s}.facc.pkl'.format(self.prefix),\
nloads=None, verbose=False)
self._facc = np.array(self._facc)
else:
self._facc = None
return self._facc
def get_ax(self, ax=None, fig=1):
if ax is None:
gotax = False
fig = pl.figure(fig)
ax = fig.add_subplot(111)
else:
gotax = True
return ax, gotax
@property
def timing(self):
if not hasattr(self, '_timing'):
self._timing = []
i = 1
fn = '{0!s}.timing_{1!s}.pkl'.format(self.prefix, str(i).zfill(4))
while os.path.exists(fn):
self._timing.extend(\
read_pickle_file(fn, nloads=None, verbose=False))
i += 1
fn = '{0!s}.timing_{1!s}.pkl'.format(self.prefix,\
str(i).zfill(4))
return self._timing
@property
def Nd(self):
if not hasattr(self, '_Nd'):
try:
self._Nd = int(self.chain.shape[-1])
except TypeError:
self._Nd = None
return self._Nd
@property
def unique_samples(self):
if not hasattr(self, '_unique_samples'):
self._unique_samples = \
[np.unique(self.chain[:,i].data) for i in range(self.Nd)]
return self._unique_samples
@property
def include_checkpoints(self):
if not hasattr(self, '_include_checkpoints'):
self._include_checkpoints = None
return self._include_checkpoints
@include_checkpoints.setter
def include_checkpoints(self, value):
assert type(value) in [int, list, tuple, np.ndarray], \
"Supplied checkpoint(s) must be integer or iterable of integers!"
if type(value) is int:
self._include_checkpoints = [value]
else:
self._include_checkpoints = value
if hasattr(self, '_chain'):
print("WARNING: the chain has already been read. Be sure to " +\
"delete `_chain` attribute before continuing.")
@property
def chain(self):
# Read MCMC chain
if not hasattr(self, '_chain'):
pre, post = self._get_pre_post()
if self.is_single_output:
paths = [self.path]
else:
paths = self.paths
##
# Loop below just in case we're stitching together many MCMCs
chains = []
for h, path in enumerate(paths):
have_chain_f = os.path.exists('{!s}/{!s}.chain.pkl'.format(path,
self.fn))
have_f = os.path.exists('{!s}/{!s}.pkl'.format(path,
self.fn))
if have_chain_f or have_f:
if have_chain_f:
fn = '{!s}/{!s}.chain.pkl'.format(path, self.fn)
else:
fn = '{!s}/{!s}.pkl'.format(path, self.fn)
if rank == 0:
print("# Loading {!s}...".format(fn))
t1 = time.time()
_chain = read_pickled_chain(fn)
t2 = time.time()
if rank == 0:
print("# Loaded {0!s} in {1:.2g} seconds.\n".format(fn,\
t2-t1))
if hasattr(self, '_mask'):
if self.mask.ndim == 1:
mask2d = np.array([self.mask] * _chain.shape[1]).T
elif self.mask.ndim == 2:
mask2d = self.mask
#mask2d = np.zeros_like(self._chain)
else:
mask2d = 0
_chain = np.ma.array(_chain, mask=mask2d)
# We might have data stored by processor
elif os.path.exists('{!s}.000.chain.pkl'.format(self.prefix)):
i = 0
full_chain = []
full_mask = []
fn = '{!s}.000.chain.pkl'.format(self.prefix)
while True:
if not os.path.exists(fn):
break
try:
this_chain = read_pickled_chain(fn)
full_chain.extend(this_chain.copy())
except ValueError:
#import pickle
#f = open(fn, 'rb')
#data = pickle.load(f)
#f.close()
#print data
print("# Error loading {!s}.".format(fn))
i += 1
fn = '{0!s}.{1!s}.chain.pkl'.format(self.prefix,\
str(i).zfill(3))
_chain = np.ma.array(full_chain,
mask=np.zeros_like(full_chain))
# So we don't have to stitch them together again.
# THIS CAN BE REALLY CONFUSING IF YOU, E.G., RUN A NEW
# CALCULATION AND FORGET TO CLEAR OUT OLD FILES.
# Hence, it is commented out (for now).
#if rank == 0:
# write_pickle_file(self._chain,\
# '{!s}.chain.pkl'.format(self.prefix), ndumps=1,\
# open_mode='w', safe_mode=False, verbose=False)
elif os.path.exists('{!s}.hdf5'.format(self.prefix)):
f = h5py.File('{!s}.hdf5'.format(self.prefix))
chain = np.array(f[('chain')])
if hasattr(self, '_mask'):
if self.mask.ndim == 1:
mask2d = np.repeat(self.mask, 2).reshape(len(self.mask), 2)
else:
mask2d = self.mask#np.zeros_like(self._chain)
else:
mask2d = np.zeros(chain.shape)
self.mask = mask2d
_chain = np.ma.array(chain, mask=mask2d)
f.close()
# If each "chunk" gets its own file.
elif glob.glob('{!s}.dd*.chain.pkl'.format(self.prefix)):
if self.include_checkpoints is not None:
outputs_to_read = []
for output_num in self.include_checkpoints:
dd = str(output_num).zfill(4)
fn = '{0!s}.dd{1!s}.chain.pkl'.format(self.prefix, dd)
outputs_to_read.append(fn)
else:
# Only need to use "sorted" on the second time around
outputs_to_read = sorted(glob.glob(\
'{!s}.dd*.chain.pkl'.format(self.prefix)))
full_chain = []
if rank == 0:
print("# Loading {!s}.dd*.chain.pkl...".format(self.prefix))
t1 = time.time()
for fn in outputs_to_read:
if not os.path.exists(fn):
print("# Found no output: {!s}".format(fn))
continue
this_chain = read_pickled_chain(fn)
full_chain.extend(this_chain)
_chain = np.ma.array(full_chain, mask=0)
if rank == 0:
t2 = time.time()
print("# Loaded {0!s}.dd*.chain.pkl in {1:.2g} s.".format(\
self.prefix, t2 - t1))
else:
self._chain = None
chains.append(_chain)
self._chain = np.concatenate(chains, axis=0)
return self._chain
def identify_bad_walkers(self, tol=1e-2, skip=0, limits=False):
"""
Find trajectories that are flat. They are probably walkers stuck
in some "no man's land" region of parameter space. Poor guys.
Returns
-------
Lists of walker ID numbers. First, the good walkers, then the bad, as
well as the mask itself.
"""
Ns = self.chain.shape[0]
steps_per_walker = Ns // self.nwalkers
if skip > steps_per_walker:
raise ValueError("`skip` must be < steps_per_walker={}".format(steps_per_walker))
errs = [tuple(self.get_1d_error(par, skip=skip*self.nwalkers)[1]) \
for par in self.parameters]
bad_walkers = []
good_walkers = []
mask = np.zeros_like(self.chain, dtype=int)
for i in range(self.nwalkers):
chain, logL, elements = self.get_walker(i)
good_walker = True
for j, par in enumerate(self.parameters):
err = np.abs(np.diff(errs[j]))[0]
diff = np.diff(chain[skip:,j])
dp = chain[skip:,j].max() - chain[skip:,j].min()
#print(par, err, dp, tol * err, dp < tol * err,
# np.allclose(diff, 0.0, atol=tol * err, rtol=0))
if limits:
if (dp < tol * err):
good_walker = False
break
elif np.allclose(diff, 0.0, atol=tol * err, rtol=0):
good_walker = False
break
else:
continue
if good_walker:
good_walkers.append(i)
else:
bad_walkers.append(i)
mask += elements
return good_walkers, bad_walkers, np.minimum(mask, 1)
@property
def checkpoints(self):
# Read MCMC chain
if not hasattr(self, '_checkpoints'):
i = 0
fail = 0
self._checkpoints = {}
fn = '{!s}.000.checkpt.pkl'.format(self.prefix)
while True:
if not os.path.exists(fn):
fail += 1
if fail > 10:
break
else:
self._checkpoints[i] =\
read_pickle_file(fn, nloads=1, verbose=False)
i += 1
fn = '{0!s}.{1!s}.checkpt.pkl'.format(self.prefix,\
str(i).zfill(3))
return self._checkpoints
@property
def logL(self):
if not hasattr(self, '_logL'):
if os.path.exists('{!s}.logL.pkl'.format(self.prefix)):
self._logL = \
read_pickled_logL('{!s}.logL.pkl'.format(self.prefix))
if self.mask.ndim == 2:
N = self.chain.shape[0]
mask1d = np.array([np.max(self.mask[i,:]) for i in range(N)])
else:
mask1d = self.mask
self._logL = np.ma.array(self._logL, mask=mask1d)
elif os.path.exists('{!s}.000.logL.pkl'.format(self.prefix)):
i = 0
full_logL = []
full_mask = []
fn = '{!s}.000.logL.pkl'.format(self.prefix)
while True:
if not os.path.exists(fn):
break
try:
this_logL = read_pickled_logL(fn)
full_logL.extend(this_logL.copy())
except ValueError:
print("Error loading {!s}.".format(fn))
i += 1
fn = '{0!s}.{1!s}.logL.pkl'.format(self.prefix,\
str(i).zfill(3))
self._logL = np.ma.array(full_logL,
mask=np.zeros_like(full_logL))
elif glob.glob('{!s}.dd*.logL.pkl'.format(self.prefix)):
if self.include_checkpoints is not None:
outputs_to_read = []
for output_num in self.include_checkpoints:
dd = str(output_num).zfill(4)
fn = '{0!s}.dd{1!s}.logL.pkl'.format(self.prefix, dd)
outputs_to_read.append(fn)
else:
outputs_to_read = sorted(glob.glob(\
'{!s}.dd*.logL.pkl'.format(self.prefix)))
full_chain = []
for fn in outputs_to_read:
if not os.path.exists(fn):
print("Found no output: {!s}".format(fn))
continue
full_chain.extend(read_pickled_logL(fn))
if self.mask.ndim == 2:
N = self.chain.shape[0]
mask1d = np.array([np.max(self.mask[i,:]) for i in range(N)])
self._logL = np.ma.array(full_chain, mask=mask1d)
else:
self._logL = np.ma.array(full_chain, mask=self.mask)
else:
self._logL = None
return self._logL
@logL.setter
def logL(self, value):
self._logL = value
@property
def L(self):
if not hasattr(self, '_L'):
self._L = np.exp(self.logL)
return self._L
@property
def fails(self):
if not hasattr(self, '_fails'):
if os.path.exists('{!s}.fails.pkl'.format(self.prefix)):
self._fails =\
read_pickle_file('{!s}.fails.pkl'.format(self.prefix),\
nloads=1, verbose=False)
elif os.path.exists('{!s}.000.fail.pkl'.format(self.prefix)):
i = 0
fails = []
fn =\
'{0!s}.{1!s}.fail.pkl'.format(self.prefix, str(i).zfill(3))
while True:
if not os.path.exists(fn):
break
data = read_pickle_file(fn, nloads=None, verbose=False)
fails.extend(data)
i += 1
fn = '{0!s}.{1!s}.fail.pkl'.format(self.prefix,\
str(i).zfill(3))
# So we don't have to stitch them together again.
# AVOIDING CONFUSION
#if rank == 0:
# write_pickle_file(fails,\
# '{!s}.fails.pkl'.format(self.prefix), ndumps=1,\
# open_mode='w', safe_mode=False, verbose=False)
self._fails = fails
else:
self._fails = None
return self._fails
@property
def timeouts(self):
if not hasattr(self, '_timeouts'):
if os.path.exists('{!s}.timeout.pkl'.format(self.prefix)):
self._fails =\
read_pickle_file('{!s}.timeout.pkl'.format(self.prefix),\
nloads=1, verbose=False)
elif os.path.exists('{!s}.000.timeout.pkl'.format(self.prefix)):
i = 0
timeout = []
fn = '{0!s}.{1!s}.timeout.pkl'.format(self.prefix,\
str(i).zfill(3))
while True:
if not os.path.exists(fn):
break
data = read_pickle_file(fn, nloads=None, verbose=False)
timeout.extend(data)
i += 1
fn = '{0!s}.{1!s}.timeout.pkl'.format(self.prefix,\
str(i).zfill(3))
self._timeout = timeout
else:
self._timeout = None
return self._timeout
def get_walker(self, num):
"""
Return chain elements corresponding to specific walker.
Parameters
----------
num : int
ID # for walker of interest.
Returns
-------
1. 2-D array with shape (nsteps, nparameters).
2. A mask, with the same shape as the chain, with elements == 1
corresponding to those specific to the given walker.
"""
sf = self.save_freq
nw = self.nwalkers
assert num < nw, "Only {} walkers were used!".format(nw)
steps_per_walker = self.chain.shape[0] // nw
nchunks = steps_per_walker // sf
# "size" of each chunk in # of MCMC steps
schunk = nw * sf
data = []
logL = []
elements = np.zeros_like(self.chain, dtype=int).data
for i in range(nchunks):
# Within each 'chunk', which is the size of a data outputs,
# the walker of interest's data is in a block of size 'save_freq`
_logL = self.logL[i*schunk + sf*num:i*schunk + sf*(num+1)]
chunk = self.chain[i*schunk + sf*num:i*schunk + sf*(num+1)]
elements[i*schunk + sf*num:i*schunk + sf*(num+1)] = 1
data.extend(chunk)
logL.extend(_logL)
return np.array(data), np.array(logL), elements
@property
def Npops(self):
if not hasattr(self, '_Npops') and self.base_kwargs is not None:
self._Npops = count_populations(**self.base_kwargs)
elif self.base_kwargs is None:
self._Npops = 1
return self._Npops
@property
def blob_redshifts_float(self):
if not hasattr(self, '_blob_redshifts_float'):
self._blob_redshifts_float = []
for i, redshift in enumerate(self.blob_redshifts):
if isinstance(redshift, basestring):
self._blob_redshifts_float.append(None)
else:
self._blob_redshifts_float.append(round(redshift, 3))
return self._blob_redshifts_float
@property
def blob_redshifts_float(self):
if not hasattr(self, '_blob_redshifts_float'):
self._blob_redshifts_float = []
for i, redshift in enumerate(self.blob_redshifts):
if isinstance(redshift, basestring):
z = None
else:
z = redshift
self._blob_redshifts_float.append(z)
return self._blob_redshifts_float
def SelectModels(self): # pragma: no cover
"""
Draw a rectangle on supplied matplotlib.axes.Axes instance, return
information about those models.
"""
if not hasattr(self, '_ax'):
raise AttributeError('No axis found.')
self._op = self._ax.figure.canvas.mpl_connect('button_press_event',
self._on_press)
self._or = self._ax.figure.canvas.mpl_connect('button_release_event',
self._on_release)
def _on_press(self, event): # pragma: no cover
self.x0 = event.xdata
self.y0 = event.ydata
def _on_release(self, event): # pragma: no cover
self.x1 = event.xdata
self.y1 = event.ydata
self._ax.figure.canvas.mpl_disconnect(self._op)
self._ax.figure.canvas.mpl_disconnect(self._or)
# Width and height of rectangle
dx = abs(self.x1 - self.x0)
dy = abs(self.y1 - self.y0)
# Find lower left corner of rectangle
lx = self.x0 if self.x0 < self.x1 else self.x1
ly = self.y0 if self.y0 < self.y1 else self.y1
# Lower-left
ll = (lx, ly)
# Upper right
ur = (lx + dx, ly + dy)
origin = (self.x0, self.y0)
rect = Rectangle(ll, dx, dy, fc='none', ec='k')
self._ax.add_patch(rect)
self._ax.figure.canvas.draw()
print('{0:f} {1:f} {2:f} {3:f}'.format(lx, lx+dx, ly, ly+dy))
self.Slice((lx, lx+dx, ly, ly+dy), **self.plot_info)
def SliceIteratively(self, pars):
#assert self.Nd == 3 # for now
if type(pars) != list:
par = pars
k = list(self.parameters).index(par)
vals = self.unique_samples[k]
slices = []
for i, val in enumerate(vals):
if i == 0:
lo = 0
hi = np.mean([val, vals[i+1]])
elif i == len(vals) - 1:
lo = np.mean([val, vals[i-1]])
hi = max(vals) * 1.1
else:
lo = np.mean([vals[i-1], val])
hi = np.mean([vals[i+1], val])
slices.append(self.Slice([lo, hi], [par]))
return vals, slices
else:
vals
for par in pars:
k = list(self.parameters).index(par)
vals.append(np.sort(np.unique(self.chain[:,k])))
def Slice(self, constraints, pars, ivar=None, take_log=False,
un_log=False, multiplier=1.):
"""
Return revised ("sliced") dataset given set of criteria.
Parameters
----------
constraints : list, tuple
A rectangle (or line segment) bounding the region of interest.
For 2-D plane, supply (left, right, bottom, top), and then to
`pars` supply list of datasets defining the plane. For 1-D, just
supply (min, max).
pars:
Dictionary of constraints to use to calculate likelihood.
Each entry should be a two-element list, with the first
element being the redshift at which to apply the constraint,
and second, a function for the posterior PDF for that quantity.s
Examples
--------
Returns
-------
Object to be used to initialize a new ModelSet instance.
"""
if len(constraints) == 4:
Nd = 2
x1, x2, y1, y2 = constraints
else:
Nd = 1
x1, x2 = constraints
# Figure out what these values translate to.
data = self.ExtractData(pars, ivar, take_log, un_log,
multiplier)
# Figure out elements we want
xok_ = np.logical_and(data[pars[0]] >= x1, data[pars[0]] <= x2)
xok_MP = np.logical_or(np.abs(data[pars[0]] - x1) <= MP,
np.abs(data[pars[0]].data - x2) <= MP)
xok_pre = np.logical_or(xok_, xok_MP)
unmasked = np.logical_not(data[pars[0]].mask == 1)
xok = np.logical_and(xok_pre, unmasked)
if Nd == 2:
yok_ = np.logical_and(data[pars[1]] >= y1, data[pars[1]] <= y2)
yok_MP = np.logical_or(np.abs(data[pars[1]] - y1) <= MP,
np.abs(data[pars[1]] - y2) <= MP)
yok = np.logical_or(yok_, yok_MP)
to_keep = np.logical_and(xok, yok)
else:
to_keep = np.array(xok)
mask = np.logical_not(to_keep)
##
# CREATE NEW MODELSET INSTANCE
##
model_set = ModelSet(self.prefix)
# Set the mask.
# Must this be 2-D?
mask2d = np.array([mask] * self.chain.shape[1]).T
model_set.mask = np.logical_or(mask2d, self.mask)
i = 0
while hasattr(self, 'slice_{}'.format(i)):
i += 1
setattr(self, 'slice_{}'.format(i), model_set)
print("Saved result to slice_{} attribute.".format(i))
return model_set
def SliceByElement(self, to_keep):
##
# CREATE NEW MODELSET INSTANCE
##
model_set = ModelSet(self.prefix)
# Set the mask!
keep = np.zeros(self.chain.shape[0])
for i in to_keep:
keep[i] = 1
old_keep = np.logical_not(self.mask)[:,0]
model_set.mask = np.logical_not(np.logical_and(keep, old_keep))
return model_set
def SliceByParameters(self, to_keep):
elements = []
for kw in to_keep:
tmp = []
for i, par in enumerate(self.parameters):
if self.is_log[i]:
tmp.append(np.log10(kw[par]))
else:
tmp.append(kw[par])
tmp = np.array(tmp)
loc = np.argwhere(self.chain == tmp)[:,0]
if not loc:
continue
assert np.all(np.diff(loc) == 0)
elements.append(loc[0])
return self.SliceByElement(elements)
def difference(self, set2):
"""
Create a new ModelSet out of the elements unique to current ModelSet.
"""
assert self.chain.shape == set2.chain.shape
assert self.parameters == set2.parameters
mask = np.ones(self.chain.shape[0])
for i, element in enumerate(self.chain):
if self.mask[i] == 0 and (set2.mask[i] == 1):
mask[i] = 0
model_set = ModelSet(self.prefix)
# Set the mask!
model_set.mask = mask
return model_set
def union(self, set2):
"""
Create a new ModelSet out of the elements unique to input sets.
"""
assert self.chain.shape == set2.chain.shape
assert self.parameters == set2.parameters
mask = self.mask * set2.mask
model_set = ModelSet(self.prefix)
# Set the mask!
model_set.mask = mask
return model_set
def SliceByPolygon(self, parameters, polygon):
"""
Convert a bounding polygon to a new ModelSet instance.
Parameters
----------
parameters : list
List of parameters names / blob names defining the (x, y) plane
of the input polygon.
polygon : shapely.geometry.Polygon instance
Yep.
Returns
-------
New instance of THIS VERY CLASS.
"""
data = self.ExtractData(parameters)
xdata = data[parameters[0]]
ydata = data[parameters[1]]
assert len(xdata) == len(ydata)
assert len(xdata) == self.chain.shape[0]
mask = np.zeros(self.chain.shape[0])
for i in range(len(xdata)):
pt = geometry.Point(xdata[i], ydata[i])
pt_in_poly = polygon.contains(pt) or polygon.touches(pt) \
or polygon.intersects(pt)
if not pt_in_poly:
mask[i] = 1
##
# CREATE NEW MODELSET INSTANCE
##
model_set = ModelSet(self.prefix)
# Set the mask!
model_set.mask = np.logical_or(mask, self.mask)
# Save the polygon we used
model_set.polygon = polygon
return model_set
def Vennify(self, polygon1, polygon2):
"""
Return a new ModelSet instance containing only models that lie
within (or outside, if union==False) intersection of two polygons.
"""
overlap = polygon1.intersection(polygon2)
p1_w_overlap = polygon1.union(overlap)
p2_w_overlap = polygon2.union(overlap)
p1_unique = polygon1.difference(p2_w_overlap)
p2_unique = polygon2.difference(p1_w_overlap)
return p1_unique, overlap, p2_unique
@property
def plot_info(self):
if not hasattr(self, '_plot_info'):
self._plot_info = None
return self._plot_info
@plot_info.setter
def plot_info(self, value):
self._plot_info = value
def WalkerTrajectoriesMultiPlot(self, pars=None, N='all', walkers='first',
axes=None, fig=1, best_fit='mode', ncols=1,
use_top=1, skip=0, stop=None, offset=0, **kwargs):
"""
Plot trajectories of `N` walkers for multiple parameters at once.
"""
if pars is None:
pars = self.parameters
if N == 'all':
N = self.nwalkers
Npars = len(pars)
while (Npars / float(ncols)) % 1 != 0:
Npars += 1
had_axes = True
if axes is None:
had_axes = False
nrows = Npars//ncols
if nrows * ncols < Npars:
nrows += 1
fig, axes = pl.subplots(nrows, ncols, num=fig)
w = self._get_walker_subset(N, walkers)
if not best_fit:
loc = None
elif best_fit == 'median':
N = len(self.logL)
loc = np.sort(self.logL)[N // 2]
elif best_fit == 'mode':
loc = np.argmax(self.logL)
#psorted = np.argsort(self.logL)
#
#cut = int(0.9 * len(self.logL))
#
#loc = psorted[cut:]
# Find precise point of max likelihood
ibest = np.argsort(self.logL)[-1::-1]
best = []
for i in range(use_top):
walker, step = self.index_to_walker_step(ibest[i])
best.append((walker, step))
for i, par in enumerate(pars):
self.WalkerTrajectories(par, walkers=w, ax=axes[i],
skip=skip, stop=stop, offset=offset, **kwargs)
if loc is None:
continue
# Plot current maximum likelihood value
if par in self.parameters:
k = self.parameters.index(par)
axes[i].plot([0, offset+self.chain[:,k].size / float(self.nwalkers)],
[self.chain[loc,k]]*2, color='k', ls='--', lw=3)
for j, (walk, step) in enumerate(best):
axes[i].scatter(offset+step-1, self.chain[ibest[j],k],
marker=r'$ {} $'.format(j+1) if j > 0 else '+',
s=150, color='k', lw=1)
else:
pass
if i not in mp.bottom:
axes.set_xlabel('')
axes.set_xticklabels([])
return axes
def index_to_walker_step(self, loc):
sf = self.save_freq
nw = self.nwalkers
steps_per_walker = self.chain.shape[0] // nw
nchunks = steps_per_walker // sf
if nchunks == 0:
raise ValueError("Looks like save_freq > steps per walker. For some reason this causes problems.")
# "size" of each chunk in # of MCMC steps
schunk = nw * sf
#
# isolates chunk for walker ID `num`, `i` is chunk ID num
broken = False
for num in range(self.nwalkers):
for i in range(nchunks):
mi, ma = i*schunk + sf*num, i*schunk + sf*(num+1)
if mi <= loc <= ma:
broken = True
break
if broken:
break
# Must do correction if last chunk different size, e.g., if
# steps_per_walker % save_freq != 0
if not broken:
# Go again, with modified 'schunk'
schunk_last = self.chain.shape[0] % schunk
sf_last = schunk_last // nw
for num in range(self.nwalkers):
mi = self.chain.shape[0] - schunk_last \
+ num * sf_last
ma = self.chain.shape[0] - schunk_last \
+ (num + 1) * sf_last
if mi <= loc <= ma:
broken = True
break
if broken:
break
step = i * sf + (loc - mi)
return num, step
def WalkerTrajectories(self, par, N=50, walkers='first', ax=None, fig=1,
skip=0, stop=None, ivar=None, multiplier=1., offset=0, **kwargs):
"""
Plot 1-D trajectories of N walkers (i.e., vs. step number).
Parameters
----------
parameter : str
Name of parameter to show results for.
walkers : str
Which walkers to grab? By default, select `N` random walkers,
but can also grab `N` first or `N` last walkers.
"""
if ax is None:
gotax = False
fig = pl.figure(fig)
ax = fig.add_subplot(111)
else:
gotax = True
if stop is not None:
stop = -int(stop)
if isinstance(walkers, basestring):
assert N < self.nwalkers, \
"Only {} walkers available!".format(self.nwalkers)
to_plot = self._get_walker_subset(N, walkers)
else:
to_plot = walkers
for i in to_plot:
data, logL, elements = self.get_walker(i)
if par in self.parameters:
y = data[:,self.parameters.index(par)]
else:
keep = elements[:,0]
tmp = self.ExtractData(par, ivar=ivar)[par]
y = tmp[keep == 1] * multiplier
x = np.arange(offset, len(y)+offset)
ax.plot(x[skip:stop], y[skip:stop], **kwargs)
iML = np.argmax(self.logL)
ax.plot([])
self.set_axis_labels(ax, ['step', par], take_log=False, un_log=False,
labels={})
return ax
def WalkerTrajectory2D(self, pars, N=50, walkers='first', ax=None, fig=1,
scale_by_step=True, scatter=False, **kwargs):
if ax is None:
gotax = False
fig = pl.figure(fig)
ax = fig.add_subplot(111)
else:
gotax = True
assert type(pars) in [list, tuple]
par1, par2 = pars
if isinstance(walkers, basestring):
assert N <= self.nwalkers, \
"Only {} walkers available!".format(self.nwalkers)
to_plot = self._get_walker_subset(N, walkers)
else:
to_plot = walkers
for i in to_plot:
data, logL, mask = self.get_walker(i)
if scale_by_step:
if scatter:
c = np.arange(0, data[:,0].size, 1)
else:
raise NotImplementedError('dunno how to do this correctly')
carr = np.arange(data[:,0].size)
c = pl.cm.jet(carr)
#cmap = colormap(Normalize(carr.min(), carr.max()))
else:
c = None
if scatter:
ax.scatter(data[:,self.parameters.index(par1)],
data[:,self.parameters.index(par2)], c=c, **kwargs)
else:
ax.plot(data[:,self.parameters.index(par1)],
data[:,self.parameters.index(par2)], color=c, **kwargs)
#self.set_axis_labels(ax, [par1, par2], take_log=False, un_log=False,
# labels={})
return ax
def _get_walker_subset(self, N=50, walkers='random'):
to_plot = np.arange(self.nwalkers)
if walkers == 'random':
np.random.shuffle(to_plot)
slc = slice(0, N)
elif walkers == 'first':
slc = slice(0, N)
elif walkers == 'last':
slc = slice(-N, None)
else:
raise NotImplementedError('help!')
return to_plot[slc]
@property
def cosm(self):
if not hasattr(self, '_cosm'):
self._cosm = Cosmology(**self.pf)
return self._cosm
@property
def derived_blob_ivars(self):
if not hasattr(self, '_derived_blob_ivars'):
junk = self.derived_blob_names
return self._derived_blob_ivars
@property
def derived_blob_names(self):
#if not hasattr(self, '_derived_blob_names'):
self._derived_blob_ivars = {}
self._derived_blob_names = []
fn = '{}.dbinfo.pkl'.format(self.prefix)
if not os.path.exists(fn):
return self._derived_blob_names
with open(fn, 'rb') as f:
ivars = pickle.load(f)
self._derived_blob_ivars.update(ivars)
for key in ivars:
self._derived_blob_names.append(key)
return self._derived_blob_names
def Scatter(self, pars, ivar=None, ax=None, fig=1, c=None, aux=None,
take_log=False, un_log=False, multiplier=1., use_colorbar=True,
line_plot=False, sort_by='z', filter_z=None, rungs=False,
rung_label=None, rung_label_top=True, return_cb=False, cax=None,
skip=0, skim=1, stop=None,
cb_kwargs={}, operation=None, **kwargs):
"""
Plot samples as points in 2-d plane.
Parameters
----------
pars : list
2-element list of parameter names.
ivar : float, list
Independent variable(s) to be used for non-scalar blobs.
z : str, float
Redshift at which to plot x vs. y, if applicable.
c : str
Field for (optional) color axis.
Returns
-------
matplotlib.axes._subplots.AxesSubplot instance.
"""
if ax is None:
gotax = False
fig = pl.figure(fig)
ax = fig.add_subplot(111)
else:
gotax = True
# Make a new variable since pars might be self.parameters
# (don't want to modify that)
if c is not None:
p = list(pars) + [c]
if ivar is not None:
if len(ivar) != 3:
iv = list(ivar) + [None]
else:
iv = ivar
else:
iv = None
else:
p = pars
iv = ivar
data = self.ExtractData(p, iv, take_log, un_log, multiplier)
xdata = data[p[0]]
ydata = data[p[1]]
if aux is not None:
adata = self.ExtractData(aux)[aux]
if c is not None:
_cdata = data[p[2]].squeeze()
if operation is None:
cdata = _cdata
elif isinstance(operation, basestring):
assert self.Nd > 2
# There's gotta be a faster way to do this...
xu = np.unique(xdata[np.isfinite(xdata)])
yu = np.unique(ydata[np.isfinite(ydata)])
ids = []
for i, val in enumerate(_cdata):
x = xdata[i]
y = ydata[i]
i = np.argmin(np.abs(x - xu))
j = np.argmin(np.abs(y - yu))
ids.append(i * len(yu) + j)
ids = np.array(ids)
cdata = np.zeros_like(_cdata)
for i, idnum in enumerate(np.unique(ids)):
#if isinstance(operation, basestring):
tmp = _cdata[ids == idnum]
if operation == 'mean':
cdata[ids == idnum] = np.mean(tmp)
elif operation == 'stdev':
cdata[ids == idnum] = np.std(tmp)
elif operation == 'diff':
cdata[ids == idnum] = np.max(tmp) - np.min(tmp)
elif operation == 'max':
cdata[ids == idnum] = np.max(tmp)
elif operation == 'min':
cdata[ids == idnum] = np.min(tmp)
# The next two could be accomplished by slicing
# along third dimension
elif operation == 'first':
val = min(adata[adata.mask == 0])
cond = np.logical_and(ids == idnum, adata == val)
cdata[ids == idnum] = _cdata[cond]
elif operation == 'last':
val = max(adata[adata.mask == 0])
cond = np.logical_and(ids == idnum, adata == val)
cdata[ids == idnum] = _cdata[cond]
else:
raise NotImplementedError('help')
#else:
#cond = np.ma.logical_and(ids == idnum, adata == operation)
#print np.any(adata == operation), np.unique(adata), operation, np.ma.sum(cond)
#cdata[ids == idnum] = _cdata[cond]
else:
cdata = _cdata
else:
cdata = None
# Seems unecessary...a method inherited from days past?
func = ax.__getattribute__('scatter')
if filter_z is not None:
_condition = np.isclose(cdata, filter_z)
if not np.any(_condition):
print("No instances of {0!s}={1:.4g}".format(p[2], filter_z))
return
xd = xdata[_condition]
yd = ydata[_condition]
cd = cdata[_condition]
else:
_condition = None
mask = np.logical_or(xdata.mask == True, ydata.mask == True)
if cdata is not None:
mask = np.logical_or(mask == True, cdata.mask == True)
#print("Masking {} elements in ({}, {}) plane.".format(mask.sum(), p[0], p[1]))
xd = xdata[mask == 0]
yd = ydata[mask == 0]
if cdata is not None:
cd = cdata[mask == 0]
else:
cd = cdata
keep = np.ones_like(xd)
if skip is not None:
keep[0:skip] *= 0
if stop is not None:
stop = -int(stop)
keep[stop:] *= 0
kw = {}
for _kw in kwargs:
if _kw not in ['color', 'mec', 'mfc', 'alpha', 'ms', 'm']:
continue
kw[_kw] = kwargs[_kw]
if rungs:
scat = self._add_rungs(xdata, ydata, cdata, ax, _condition,
label=rung_label, label_on_top=rung_label_top, **kw)
elif line_plot:
scat = func(xd[keep==1], yd[keep==1], **kw)
elif (cdata is not None) and (filter_z is None):
scat = func(xd[keep==1], yd[keep==1], c=cd[keep==1], **kw)
else:
scat = func(xd[keep==1], yd[keep==1], **kw)
if (cdata is not None) and use_colorbar and (not line_plot) and \
(filter_z is None):
if 'facecolors' in kwargs:
if kwargs['facecolors'] in ['none', None]:
cb = None
else:
cb = None
else:
cb = self._cb = pl.colorbar(scat, cax=cax, **cb_kwargs)
else:
cb = None
self._scat = scat
# Might use this for slicing
self.plot_info = {'pars': pars, 'ivar': ivar,
'take_log': take_log, 'un_log':un_log, 'multiplier':multiplier}
# Make labels
self.set_axis_labels(ax, p, take_log, un_log, cb)
pl.draw()
self._ax = ax
if return_cb:
return ax, cb
else:
return ax
def _add_rungs(self, _x, _y, c, ax, cond, tick_size=1, label=None,
label_on_top=True, **kwargs): # pragma: no cover
assert cond.sum() == 1
# Grab rung locations
_xr = _x[cond][0]
_yr = _y[cond][0]
# We need to transform into the "axes fraction" coordinate system
xr, yr = ax.transData.transform((_xr, _yr))
# Just determine a fixed length scale in data coordinates
_xx1, _yy1 = ax.transData.transform((_xr, _yr))
_xx2, _yy2 = ax.transData.transform((_xr+1, _yr))
one_in_display_units = abs(_xx2 - _xx1)
data = []
for i in range(len(_x)):
data.append(ax.transData.transform((_x[i], _y[i])))
x, y = np.array(data).T
dy = np.roll(y, -1) - y
dx = np.roll(x, -1) - x
angle = np.arctan2(dy, dx) + np.pi / 2.
# Set to 1 in data units * some amplification factor
tick_len = one_in_display_units * tick_size
x2 = xr + tick_len * np.cos(angle[cond])[0]
x1 = xr - tick_len * np.cos(angle[cond])[0]
y1 = yr - tick_len * np.sin(angle[cond])[0]
y2 = yr + tick_len * np.sin(angle[cond])[0]
if label_on_top:
_xl = xr + 2 * tick_len * np.cos(angle[cond])[0]
_yl = yr + 2 * tick_len * np.sin(angle[cond])[0]
else:
_xl = xr - 2 * tick_len * np.cos(angle[cond])[0]
_yl = yr - 2 * tick_len * np.sin(angle[cond])[0]
# Transform back into data coordinates!
inv = ax.transData.inverted()
rungs = []
for pt in ([x1, y1], [xr, yr], [x2, y2]):
rungs.append(inv.transform(pt))
tick_lines = LineCollection([rungs], **kwargs)
ax.add_collection(tick_lines)
if label is not None:
xl, yl = inv.transform((_xl, _yl))
rot = (angle[cond][0] + np.pi / 2.) * 180 / np.pi
pl.text(xl, yl, label, va="center", ha="center", rotation=rot,
fontsize=12)
return ax
def BoundingPolygon(self, pars, ivar=None, ax=None, fig=1,
take_log=False, un_log=False, multiplier=1., add_patch=True,
skip=0, skim=1, stop=None,
boundary_type='convex', alpha=0.3, return_polygon=False, **kwargs): # pragma: no cover
"""
Basically a scatterplot but instead of plotting individual points,
we draw lines bounding the locations of all those points.
Parameters
----------
pars : list, tuple
List of parameters that defines 2-D plane.
boundary_type : str
Options: 'convex' or 'concave' or 'envelope'
alpha : float
Only used if boundary_type == 'concave'. Making alpha smaller
makes the contouring more crude, but also less noisy as a result.
"""
assert have_shapely, "Need shapely installed for this to work."
assert have_descartes, "Need descartes installed for this to work."
if (ax is None) and add_patch:
gotax = False
fig = pl.figure(fig)
ax = fig.add_subplot(111)
else:
gotax = True
data = self.ExtractData(pars, ivar, take_log, un_log, multiplier)
xdata = self.xdata = data[pars[0]].compressed()
ydata = self.ydata = data[pars[1]].compressed()
# Organize into (x, y) pairs
points = list(zip(xdata, ydata))
# Create polygon object
point_collection = geometry.MultiPoint(list(points))
if boundary_type == 'convex':
polygon = point_collection.convex_hull
elif boundary_type == 'concave':
polygon, edge_points = self._alpha_shape(points, alpha)
elif boundary_type == 'envelope':
polygon = point_collection.envelope
else:
raise ValueError('Unrecognized boundary_type={!s}!'.format(\
boundary_type))
# Plot a Polygon using descartes
if add_patch and (polygon is not None):
# This basically just gets the axis object in order without
# actually plotting anything
self.Scatter(pars, ivar=ivar, take_log=take_log, un_log=un_log,
multiplier=multiplier, ax=ax, edgecolors='none',
facecolors='none')
try:
patch = PolygonPatch(polygon, **kwargs)
ax.add_patch(patch)
except:
patches = []
for pgon in polygon:
patches.append(PolygonPatch(pgon, **kwargs))
try:
ax.add_collection(PatchCollection(patches, match_original=True))
except TypeError:
print('Patches: {!s}'.format(patches))
pl.draw()
if return_polygon and add_patch:
return ax, polygon
elif return_polygon:
return polygon
else:
return ax
def get_par_prefix(self, par):
m = re.search(r"\{([0-9])\}", par)
if m is None:
return par
# Population ID number
num = int(m.group(1))
# Pop ID including curly braces
prefix = par.split(m.group(0))[0]
return prefix
@property
def weights(self):
if (not self.is_mcmc) and hasattr(self, 'logL'):
if self.logL is not None:
raise NotImplemented('need to do something here')
if (not self.is_mcmc) and (not hasattr(self, '_weights')):
self._weights = np.ones_like(self.chain)
return self._weights
def get_levels(self, L, nu=[0.95, 0.68]):
"""
Return levels corresponding to input nu-values, and assign
colors to each element of the likelihood.
"""
nu, levels = _error_2D_crude(L, nu=nu)
return nu, levels
def PruneSet(self, pars, bin_edges, N, ivar=None, take_log=False,
un_log=False, multiplier=1.):
"""
Take `N` models from each 2-D bin in space `pars`.
"""
data = self.ExtractData(pars, ivar=ivar,
take_log=take_log, un_log=un_log, multiplier=multiplier)
be = bin_edges
ct = np.zeros([len(be[0]) - 1, len(be[1]) - 1])
out = np.zeros([len(be[0]) - 1, len(be[1]) - 1, N])
for h in range(self.chain.shape[0]):
x = data[pars[0]][h]
y = data[pars[1]][h]
if (x < be[0][0]) or (x > be[0][-1]):
continue
if (y < be[1][0]) or (y > be[1][-1]):
continue
# Find bin where this model lives.
i = np.argmin(np.abs(x - be[0]))
j = np.argmin(np.abs(y - be[1]))
if i == len(be[0]) - 1:
i -= 1
if j == len(be[1]) - 1:
j -= 1
# This bin is already full
if ct[i,j] == N:
continue
k = ct[i,j]
out[i,j,k] = h
ct[i,j] += 1
# Create a new object
to_keep = out.ravel()
return self.SliceByElement(to_keep)
def get_1d_error(self, par, ivar=None, nu=0.68, take_log=False,
limit=None, un_log=False, multiplier=1., peak='mode', skip=0,
stop=None):
"""
Compute 1-D error bar for input parameter.
Parameters
----------
par : str
Name of parameter.
nu : float
Percent likelihood enclosed by this 1-D error
peak : str
Determines whether the 'best' value is the median, mode, or
maximum likelihood point.
Returns
-------
if peak is None:
Returns x-values corresponding to desired quartile range, i.e.,
not really an error-bar.
else:
tuple: (maximum likelihood value, positive error, negative error).
"""
to_hist = self.ExtractData(par, ivar=ivar, take_log=take_log,
multiplier=multiplier, un_log=un_log)
# Need to weight results of non-MCMC runs explicitly
if not hasattr(self, '_weights'):
weights = None
else:
weights = self.weights
# Apply mask to weights
if weights is not None and to_hist[par].shape != weights.shape:
weights = weights[np.logical_not(mask)]
if stop is not None:
stop = -int(stop)
if hasattr(to_hist[par], 'compressed'):
#logL = self.logL[skip:stop].compressed()
#tohist = to_hist[par][skip:stop].compressed()
_mask = to_hist[par].mask
indices = np.arange(self.logL.size)
if stop is None:
stop = indices.size
if skip is None:
skip = 0
_cond = np.logical_and(indices >= skip, indices <= stop)
keep = np.logical_and(_cond, _mask == 0)
logL = self.logL[keep]
tohist = to_hist[par][keep]
else:
logL = self.logL[skip:stop]
tohist = to_hist[par][skip:stop]
if logL.size != tohist.size:
raise ValueError('logL and chain have different number of elements!')
if peak == 'median':
N = len(logL)
psorted = np.sort(tohist)
mu = psorted[int(N / 2.)]
elif peak == 'mode':
mu = tohist[np.argmax(logL)]
else:
mu = None
if limit is None:
q1 = 0.5 * 100 * (1. - nu)
q2 = 100 * nu + q1
elif limit == 'upper':
q1 = 0.0
q2 = 100 * nu
elif limit == 'lower':
q1 = 100 * (1. - nu)
q2 = 100
else:
raise ValueError('Unrecognized option for \'limit\': {!s}'.format(\
limit))
# Do it already
lo, hi = np.percentile(tohist, (q1, q2))
if (mu is not None) and (limit is None):
sigma = (hi - mu, mu - lo)
else:
sigma = (hi, lo)
return mu, np.array(sigma)
def _get_1d_kwargs(self, **kw):
for key in ['labels', 'colors', 'linestyles', 'cmap']:
if key in kw:
kw.pop(key)
return kw
def Limits(self, pars, ivar=None, take_log=False, un_log=False,
multiplier=1., remove_nas=False):
data = self.ExtractData(pars, ivar=ivar, take_log=take_log,
un_log=un_log, multiplier=multiplier, remove_nas=remove_nas)
lims = {}
for par in pars:
lims[par] = (min(data[par]), max(data[par]))
return lims
def ExtractData(self, pars, ivar=None, take_log=False, un_log=False,
multiplier=1., remove_nas=False):
"""
Extract data for subsequent analysis.
This means a few things:
(1) Go retrieve data from native format without having to worry about
all the indexing yourself.
(2) [optionally] take the logarithm.
(3) [optionally] apply multiplicative factors.
(4) Create a mask that excludes all nans / infs.
Parameters
----------
pars : list
List of quantities to return. These can be parameters or the names
of meta-data blobs.
ivars : list
List of independent variables at which to compute values of pars.
take_log single bool or list of bools determining whether data should
be presented after its log is taken
un_log single bool or list of bools determining whether data should be
presented after its log is untaken (i.e. it is exponentiated)
multiplier list of numbers to multiply the parameters by before they
are presented
remove_nas bool determining whether rows with nan's or inf's should be
removed or not. This must be set to True when the user
is using numpy newer than version 1.9.x if the user wants
to histogram the data because numpy gave up support for
masked arrays in histograms.
Returns
-------
Tuple with two entries:
(i) Dictionary containing 1-D arrays of samples for each quantity.
(ii) Dictionary telling us which of the datasets are actually the
log10 values of the associated parameters.
"""
pars, take_log, multiplier, un_log, ivar = \
self._listify_common_inputs(pars, take_log, multiplier, un_log,
ivar)
if np.all(np.array(multiplier) == 1):
multiplier = [None] * len(pars)
data = {}
for k, par in enumerate(pars):
# If one of our free parameters, things are easy.
if par in self.parameters:
j = self.parameters.index(par)
if self.is_log[j] and un_log[k]:
val = 10**self.chain[:,j].copy()
else:
val = self.chain[:,j].copy()
if multiplier[k] is not None:
if self.is_log[j] and (not un_log[k]):
val += np.log10(multiplier[k])
else:
val *= multiplier[k]
# Take log, unless the parameter is already in log10
if take_log[k] and (not self.is_log[j]):
val = np.log10(val)
elif par == 'logL':
val = self.logL
elif par == 'load':
val = self.load
# Blobs are a little harder, might need new mask later.
elif par in self.all_blob_names:
i, j, nd, dims = self.blob_info(par)
if nd == 0:
val = self.get_blob(par, ivar=None).copy()
else:
val = self.get_blob(par, ivar=ivar[k]).copy()
# Blobs are never stored as log10 of their true values
if multiplier[k] is not None:
val *= multiplier[k]
# Only derived blobs in this else block, yes?
else:
if re.search("\[", self.prefix):
print("WARNING: filenames with brackets can cause problems for glob.")
print(" : replacing each occurence with '?'")
_pre = self.prefix.replace('[', '?').replace(']', '?')
else:
_pre = self.prefix
cand = sorted(glob.glob('{0!s}.*.{1!s}.pkl'.format(_pre, par)))
if len(cand) == 0:
cand =\
sorted(glob.glob('{0!s}*.{1!s}.pkl'.format(_pre, par)))
if len(cand) == 0:
raise IOError('No results for {0!s}*.{1!s}.pkl'.format(\
self.prefix, par))
# Only one option: go for it.
elif len(cand) == 1:
fn = cand[0]
elif len(cand) == 2:
# This, for example, could happen for files named after
# a parameter, like pop_fesc and pop_fesc_LW may get
# confused, or pop_yield and pop_yield_index.
pre1 = cand[0].partition('.')[0]
pre2 = cand[1].partition('.')[0]
if pre1 in pre2:
fn = cand[0]
else:
fn = cand[1]
else:
print('{!s}'.format(cand))
raise IOError(('More than 2 options for ' +\
'{0!s}*{1!s}.pkl').format(self.prefix, par))
dat = read_pickle_file(fn, nloads=1, verbose=False)
# What follows is real cludgey...sorry, future Jordan
nd = len(dat.shape) - 1
dims = dat[0].shape
#assert nd == 1, "Help!"
# Need to figure out dimensions of derived blob,
# which requires some care as that info will not simply
# be stored in a binfo.pkl file.
# Right now this may only work with 1-D blobs...
if (nd == 2) and (ivar[k] is not None):
fn_md = '{!s}.dbinfo.pkl'.format(self.prefix)
#dbinfo = {}
#dbinfos =\
# read_pickle_file(fn_md, nloads=None, verbose=False)
#for info in dbinfos:
# dbinfo.update(info)
#del dbinfos
# Look up the independent variables for this DB
#ivars = dbinfo[par]
ivars = self.derived_blob_ivars[par]
i1 = np.argmin(np.abs(ivars[0] - ivar[k][0]))
if ivar[k][1] is None:
i2 = Ellipsis
else:
i2 = np.argmin(np.abs(ivars[1] - ivar[k][1]))
#for iv in ivars:
# arr = np.array(iv).squeeze()
# if arr.shape == dat[0].shape:
# break
#
#loc = np.argmin(np.abs(arr - ivar[k]))
val = dat[:,i1,i2]
elif nd > 2:
raise NotImplementedError('help')
else:
val = dat
# must handle log-ifying blobs separately
if par not in self.parameters:
if take_log[k]:
val = np.log10(val)
##
# OK, at this stage, 'val' is just an array. If it corresponds to
# a parameter, it's 1-D, if a blob, it's dimensionality could
# be different. So, we have to be a little careful with the mask.
##
if par in self.parameters:
j = self.parameters.index(par)
if self.mask.ndim == 2:
mask = self.mask[:,j]
else:
mask = self.mask
elif not np.array_equal(val.shape,self.mask.shape):
# If no masked elements, don't worry any more. Just set -> 0.
if not np.any(self.mask == 1):
mask = 0
# Otherwise, we might need to reshape the mask.
# If, for example, certain links in the MCMC chain are masked,
# we need to make sure that every blob element corresponding
# to those links are masked.
else:
#print("hello, {}".format(self.mask[:,0].sum()))
if self.mask.shape == val.shape:
mask = self.mask
else:
N = np.product(val.shape[1:])
try:
mask = np.reshape(np.repeat(self.mask[:,0], N),
val.shape)
except ValueError:
print("Problem reshaping mask (shape {}) to match blob={} w/ shape {}".format(par,
self.mask.shape, val.shape))
else:
mask = self.mask
if self.is_mcmc:
data[par] = np.ma.array(val, mask=mask)
else:
try:
data[par] = np.ma.array(val, mask=mask)
except np.ma.MaskError:
print("MaskError encountered. Assuming mask=0.")
data[par] = np.ma.array(val, mask=0)
if remove_nas:
to_remove = []
length = len(data[list(data.keys())[0]])
for ilink in range(length):
for par in data:
elem = data[par][ilink]
if type(elem) is np.ma.core.MaskedConstant:
to_remove.append(ilink)
break
elif type(elem) in numerical_types:
if np.isinf(elem) or np.isnan(elem):
to_remove.append(ilink)
break
else: # elem is array (because par is a non-0d blob)
is_inf_or_nan = (np.isinf(elem) | np.isnan(elem))
if hasattr(elem, 'mask'): # ignore rows affected by mask
is_inf_or_nan = (is_inf_or_nan | elem.mask)
if not np.all(~is_inf_or_nan):
to_remove.append(ilink)
break
for par in data:
data[par] = np.delete(data[par], to_remove, axis=0)
print(("{0} of {1} chain elements ignored because of chain " +\
"links with inf's/nan's.").format(len(to_remove), length))
return data
def _set_bins(self, pars, to_hist, take_log=False, bins=20):
"""
Create a vector of bins to be used when plotting PDFs.
"""
if type(to_hist) is dict:
binvec = {}
else:
binvec = []
for k, par in enumerate(pars):
if type(to_hist) is dict:
tohist = to_hist[par]
else:
tohist = to_hist[k]
if self.is_mcmc or (par not in self.parameters) or \
not hasattr(self, 'axes'):
if type(bins) == int:
valc = tohist
bvp = np.linspace(valc.min(), valc.max(), bins)
elif type(bins) == dict:
bvp = bins[par]
elif type(bins[k]) == int:
valc = tohist
bvp = np.linspace(valc.min(), valc.max(), bins[k])
else:
bvp = bins[k]
#if take_log[k]:
# binvec.append(np.log10(bins[k]))
#else:
# binvec.append(bins[k])
else:
if take_log[k]:
bvp = np.log10(self.axes[par])
else:
bvp = self.axes[par]
if type(to_hist) is dict:
binvec[par] = bvp
else:
binvec.append(bvp)
return binvec
def _set_inputs(self, pars, inputs, take_log, un_log, multiplier):
"""
Figure out input values for x and y parameters for each panel.
Returns
-------
Dictionary, elements sorted by
"""
if inputs is None:
return None
if type(inputs) is list:
if inputs == []:
return None
if type(inputs) is dict:
if not inputs:
return None
else:
inputs = list(inputs)
is_log = []
for par in pars:
if par in self.parameters:
k = self.parameters.index(par)
is_log.append(self.is_log[k])
else:
# Blobs are never log10-ified before storing to disk
is_log.append(False)
if type(multiplier) in [int, float]:
multiplier = [multiplier] * len(pars)
if len(np.unique(pars)) < len(pars):
input_output = []
else:
input_output = {}
Nd = len(pars)
for i, par in enumerate(pars):
if type(inputs) is list:
val = inputs[i]
elif par in inputs:
val = inputs[par]
else:
dq = DQ(data=inputs)
try:
val = dq[par]
except:
val = None
# Take log [optional]
if val is None:
vin = None
elif (is_log[i] or take_log[i]) and (not un_log[i]):
vin = np.log10(10**val * multiplier[i])
else:
vin = val * multiplier[i]
if type(input_output) is dict:
input_output[par] = vin
else:
input_output.append(vin)
return input_output
def _listify_common_inputs(self, pars, take_log, multiplier, un_log,
ivar=None):
"""
Make everything lists.
"""
if type(pars) not in [list, tuple]:
pars = [pars]
if type(take_log) == bool:
take_log = [take_log] * len(pars)
if type(un_log) == bool:
un_log = [un_log] * len(pars)
if type(multiplier) in [int, float]:
multiplier = [multiplier] * len(pars)
if ivar is not None:
if type(ivar) is list:
if len(pars) == 1:
i, j, nd, dims = self.blob_info(pars[0])
if nd == 2:
ivar = list(np.atleast_2d(ivar))
assert len(ivar) == len(pars)
else:
if len(pars) == 1:
ivar = [ivar]
else:
raise ValueError('ivar must be same length as pars')
else:
ivar = [None] * len(pars)
return pars, take_log, multiplier, un_log, ivar
def PlotPosteriorCDF(self, pars, bins=500, **kwargs):
return self.PosteriorPDF(pars, bins=bins, cdf=True, **kwargs)
def PosteriorPDF(self, **kwargs):
return self.PlotPosteriorPDF(**kwargs)
def PlotPosteriorPDF(self, pars, to_hist=None, ivar=None,
ax=None, fig=1,
multiplier=1., like=[0.95, 0.68], cdf=False,
color_by_like=False, fill=True, take_log=False, un_log=False,
bins=20, skip=0, skim=1,
contour_method='raw', excluded=False, stop=None, **kwargs): # pragma: no cover
"""
Compute posterior PDF for supplied parameters.
If len(pars) == 2, plot 2-D posterior PDFs. If len(pars) == 1, plot
1-D marginalized PDF.
Parameters
----------
pars : str, list
Name of parameter or list of parameters to analyze.
ivar : float
Redshift, if any element of pars is a "blob" quantity.
plot : bool
Plot PDF?
like : float, list
If plot == False, return the nu-sigma error-bar.
If color_by_like == True, list of confidence contours to plot.
color_by_like : bool
If True, color points based on what confidence contour they lie
within.
multiplier : list
Two-element list of multiplicative factors to apply to elements of
pars.
take_log : list
Two-element list saying whether to histogram the base-10 log of
each parameter or not.
skip : int
Number of steps at beginning of chain to exclude. This is a nice
way of doing a burn-in after the fact.
skim : int
Only take every skim'th step from the chain.
excluded : bool
If True, and fill == True, fill the area *beyond* the given contour with
cross-hatching, rather than the area interior to it.
Returns
-------
Either a matplotlib.Axes.axis object or a nu-sigma error-bar,
depending on whether we're doing a 2-D posterior PDF (former) or
1-D marginalized posterior PDF (latter).
"""
cs = None
kw = kwargs
if 'labels' in kw:
labels = kwargs['labels']
else:
labels = self.custom_labels
# Only make a new plot window if there isn't already one
if ax is None:
gotax = False
fig = pl.figure(fig)
ax = fig.add_subplot(111)
else:
gotax = True
# Grab all the data we need
if (to_hist is None):
to_hist = self.ExtractData(pars, ivar=ivar,
take_log=take_log, un_log=un_log, multiplier=multiplier)
pars, take_log, multiplier, un_log, ivar = \
self._listify_common_inputs(pars, take_log, multiplier, un_log,
ivar)
# Modify bins to account for log-taking, multipliers, etc.
binvec = self._set_bins(pars, to_hist, take_log, bins)
# We might supply weights by-hand for ModelGrid calculations
if not hasattr(self, '_weights'):
weights = None
else:
weights = self.weights
##
### Histogramming and plotting starts here
##
if stop is not None:
stop = -int(stop)
# Marginalized 1-D PDFs
if len(pars) == 1:
if type(to_hist) is dict:
tohist = to_hist[pars[0]][skip:stop]
b = binvec[pars[0]]
elif type(to_hist) is list:
tohist = to_hist[0][skip:stop]
b = binvec[0]
else:
tohist = to_hist[skip:stop]
b = bins
if hasattr(tohist, 'compressed'):
tohist = tohist.compressed()
hist, bin_edges = \
np.histogram(tohist, density=True, bins=b, weights=weights)
bc = bin_e2c(bin_edges)
# Take CDF
if cdf:
hist = np.cumsum(hist)
tmp = self._get_1d_kwargs(**kw)
ax.plot(bc, hist / hist.max(), drawstyle='steps-mid', **tmp)
ax.set_ylim(0, 1.05)
# Marginalized 2-D PDFs
else:
if type(to_hist) is dict:
tohist1 = to_hist[pars[0]][skip:stop]
tohist2 = to_hist[pars[1]][skip:stop]
b = [binvec[pars[0]], binvec[pars[1]]]
else:
tohist1 = to_hist[0][skip:stop]
tohist2 = to_hist[1][skip:stop]
b = [binvec[0], binvec[1]]
# If each quantity has a different set of masked elements,
# we'll get an error at plot-time.
if hasattr(tohist1, 'compressed'):
tohist1 = tohist1.compressed()
if hasattr(tohist2, 'compressed'):
tohist2 = tohist2.compressed()
# Compute 2-D histogram
hist, xedges, yedges = \
np.histogram2d(tohist1, tohist2, bins=b, weights=weights)
hist = hist.T
# Recover bin centers
bc = []
for i, edges in enumerate([xedges, yedges]):
bc.append(bin_e2c(edges))
# Determine mapping between likelihood and confidence contours
if color_by_like:
# Get likelihood contours (relative to peak) that enclose
# nu-% of the area
if contour_method == 'raw':
nu, levels = error_2D(None, None, hist, None, nu=like,
method='raw')
else:
nu, levels = error_2D(to_hist[0], to_hist[1], self.L / self.L.max(),
bins=[binvec[0], binvec[1]], nu=nu, method=contour_method)
if fill:
if excluded and len(nu) == 1:
# Fill the entire window with cross-hatching
x1, x2 = ax.get_xlim()
y1, y2 = ax.get_ylim()
x_polygon = [x1, x2, x2, x1]
y_polygon = [y1, y1, y2, y2]
ax.fill(x_polygon, y_polygon, color="none", hatch='X',
edgecolor=kwargs['color'])
# Now, fill the enclosed area with white
ax.contourf(bc[0], bc[1], hist / hist.max(),
levels, color='w', colors='w', zorder=2)
# Draw an outline too
ax.contour(bc[0], bc[1], hist / hist.max(),
levels, colors=kwargs['color'], linewidths=1,
zorder=2)
else:
ax.contourf(bc[0], bc[1], hist / hist.max(),
levels, zorder=3, **kwargs)
else:
ax.contour(bc[0], bc[1], hist / hist.max(),
levels, zorder=4, **kwargs)
else:
if fill:
cs = ax.contourf(bc[0], bc[1], hist / hist.max(),
zorder=3, **kw)
else:
cs = ax.contour(bc[0], bc[1], hist / hist.max(),
zorder=4, **kw)
# Force linear
if not gotax:
ax.set_xscale('linear')
ax.set_yscale('linear')
# Add nice labels (or try to)
self.set_axis_labels(ax, pars, take_log, un_log, None, labels)
# Rotate ticks?
for tick in ax.get_xticklabels():
tick.set_rotation(45.)
for tick in ax.get_yticklabels():
tick.set_rotation(45.)
pl.draw()
return ax
def PlotContour(self, pars, c, levels=None, leveltol=1e-6, ivar=None,
take_log=False,
un_log=False, multiplier=1., ax=None, fig=1, fill=True,
inline_labels=False, manual=None, cax=None, use_colorbar=True,
cb_kwargs={}, **kwargs):
"""
Draw contours that are NOT associated with confidence levels.
..note:: To draw many contours in same plane, just call this
function repeatedly.
Should use pl.contour if we're plotting on a regular grid, i.e.,
the parameter space of a 2-D model grid with the color axis
some derived quantity.
Parameters
----------
pars : list
List of parameters defining the plane on which to draw contours.
c : str
Name of parameter or blob that we're to draw contours of.
levels : list
[Optional] list of levels for
"""
# Only make a new plot window if there isn't already one
if ax is None:
gotax = False
fig = pl.figure(fig)
ax = fig.add_subplot(111)
else:
gotax = True
cb = None
if (pars[0] in self.parameters) and (pars[1] in self.parameters):
xdata, ydata, zdata = self._reshape_data(pars, c, ivar=ivar,
take_log=take_log, un_log=un_log, multiplier=multiplier)
if fill:
kw = kwargs.copy()
kw.update(cb_kwargs)
if levels is not None:
CS = ax.contourf(xdata, ydata, zdata.T, levels, **kw)
else:
CS = ax.contourf(xdata, ydata, zdata.T, **kw)
if use_colorbar:
cb = pl.colorbar(CS, cax=cax, **cb_kwargs)
else:
if levels is not None:
CS = ax.contour(xdata, ydata, zdata.T, levels, **kwargs)
else:
CS = ax.contour(xdata, ydata, zdata.T, **kwargs)
if inline_labels:
pl.clabel(CS, ineline=1, fontsize=10, manual=manual)
else:
p = list(pars) + [c]
# Grab all the data we need
data = self.ExtractData(p, ivar=ivar,
take_log=take_log, un_log=un_log, multiplier=multiplier)
xdata = data[p[0]]
ydata = data[p[1]]
zdata = data[p[2]]
for i, level in enumerate(levels):
# Find indices of appropriate elements
cond = np.abs(zdata - level) < leveltol
elements = np.argwhere(cond).squeeze()
order = np.argsort(xdata[elements])
kw = {}
for kwarg in kwargs.keys():
if type(kwargs[kwarg]) == tuple:
kw[kwarg] = kwargs[kwarg][i]
else:
kw[kwarg] = kwargs[kwarg]
ax.plot(xdata[elements][order], ydata[elements][order], **kw)
pl.draw()
return ax, cb
def PlotContourScatter(self, x, y, c, z=None, ax=None, fig=1, Nscat=1e4,
take_log=False, cmap='jet', alpha=1.0, bins=20, vmin=None, vmax=None,
color_by_like=False, like=[0.95, 0.68], zbins=None, labels=None,
**kwargs):
"""
Show contour plot in 2-D plane, and add colored points for third axis.
Parameters
----------
x : str
Fields for the x-axis.
y : str
Fields for the y-axis.
c : str
Name of parameter to represent with colored points.
z : int, float, str
Redshift (if investigating blobs)
Nscat : int
Number of samples plot.
Returns
-------
Three objects: the main Axis instance, the scatter plot instance,
and the colorbar object.
"""
if type(take_log) == bool:
take_log = [take_log] * 3
if labels is None:
labels = default_labels
else:
labels_tmp = default_labels.copy()
labels_tmp.update(labels)
labels = labels_tmp
if type(z) is not list:
z = [z] * 3
pars = [x, y]
axes = []
for i, par in enumerate(pars):
if par in self.parameters:
axes.append(self.chain[:,self.parameters.index(par)])
elif par in self.blob_names:
axes.append(self.blobs[:,self.blob_redshifts.index(z[i]),
self.blob_names.index(par)])
elif par in self.derived_blob_names:
axes.append(self.derived_blobs[:,self.blob_redshifts.index(z[i]),
self.derived_blob_names.index(par)])
for i in range(2):
if take_log[i]:
axes[i] = np.log10(axes[i])
xax, yax = axes
if c in self.parameters:
zax = self.chain[:,self.parameters.index(c)].ravel()
elif c in self.all_blob_names:
zax = self.ExtractData(c)[c]
elif c in self.derived_blob_names:
zax = self.derived_blobs[:,self.blob_redshifts.index(z[-1]),
self.derived_blob_names.index(c)]
if zax.shape[0] != self.chain.shape[0]:
if self.chain.shape[0] > zax.shape[0]:
xax = xax[0:self.blobs.shape[0]]
yax = yax[0:self.blobs.shape[0]]
print("Looks like calculation was terminated after chain " +\
"was written to disk but before blobs. How unlucky!")
print("Applying cludge to ensure shape match...")
else:
raise ValueError('Shape mismatch between blobs and chain!')
if take_log[2]:
zax = np.log10(zax)
z.pop(-1)
ax = self.PosteriorPDF(pars, z=z, take_log=take_log, fill=False,
bins=bins, ax=ax, fig=fig, color_by_like=color_by_like, like=like,
**kwargs)
# Pick out Nscat random points to plot
mask = np.zeros_like(xax, dtype=bool)
rand = np.arange(len(xax))
np.random.shuffle(rand)
mask[rand < Nscat] = True
if zbins is not None:
cmap_obj = eval('mpl.colorbar.cm.{!s}'.format(cmap))
#if take_log[2]:
# norm = mpl.colors.LogNorm(zbins, cmap_obj.N)
#else:
if take_log[2]:
norm = mpl.colors.BoundaryNorm(np.log10(zbins), cmap_obj.N)
else:
norm = mpl.colors.BoundaryNorm(zbins, cmap_obj.N)
else:
norm = None
scat = ax.scatter(xax[mask], yax[mask], c=zax[mask], cmap=cmap,
zorder=1, edgecolors='none', alpha=alpha, vmin=vmin, vmax=vmax,
norm=norm)
cb = pl.colorbar(scat)
cb.set_alpha(1)
cb.draw_all()
if c in labels:
cblab = labels[c]
elif '{' in c:
cblab = labels[c[0:c.find('{')]]
else:
cblab = c
if take_log[2]:
cb.set_label(logify_str(cblab))
else:
cb.set_label(cblab)
cb.update_ticks()
pl.draw()
return ax, scat, cb
def get_samples(self, par, burn=0):
all_pars = self.parameters
chain = self.chain
burn_per_w = burn // chain.shape[0]
##
# If par in `params`, it's easy.
assert par in all_pars
i = list(all_pars).index(par)
return self.chain[burn:,i]
def PlotTriangle(self, fig=1, axes=None, pars=None, redshifts=None,
complement=False, bins=20, burn=0, fig_kwargs={}, contours=True,
fill=False, nu=[0.95, 0.68], take_log=False, is_log=False,
skip=None, smooth=None, skip_pars=None, **kwargs): # pragma: no cover
"""
Stolen from micro21cm...
"""
has_ax = axes is not None
if not has_ax:
fig = pl.figure(constrained_layout=True, num=fig, **fig_kwargs)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
else:
axes_by_row = axes
all_pars = self.parameters
if pars is None:
pars = all_pars
else:
pass
elements = range(len(pars))
#try:
# labels = self.get_labels(pars, redshifts)
#except IndexError:
labels = [''] * len(pars)
Np = len(pars)
if type(bins) not in [list, tuple, np.ndarray]:
bins = [bins] * Np
if type(complement) not in [list, tuple, np.ndarray]:
complement = [complement] * Np
if type(is_log) not in [list, tuple, np.ndarray]:
is_log = [is_log] * Np
if type(take_log) not in [list, tuple, np.ndarray]:
take_log = [take_log] * Np
# Remember, for gridspec, rows are numbered frop top-down.
if not has_ax:
gs = fig.add_gridspec(Np, Np)
axes_by_row = [[] for i in range(Np)]
flatchain = self.chain
for i, row in enumerate(range(Np)):
for j, col in enumerate(range(Np)):
# Skip elements in upper triangle
if j > i:
continue
if skip is not None:
if i in skip:
continue
if j in skip:
continue
# Create axis
if not has_ax:
_ax = fig.add_subplot(gs[i,j])
axes_by_row[i].append(_ax)
else:
_ax = axes_by_row[i][j]
if skip_pars is not None:
if pars[i] in skip_pars:
continue
if pars[j] in skip_pars:
continue
if pars[i] not in all_pars:
continue
if pars[j] not in all_pars:
continue
#self.PosteriorPDF(pars, to_hist=None, ivar=None,
# ax=None, fig=1,
# multiplier=1., like=[0.95, 0.68], cdf=False,
# color_by_like=False, fill=True, take_log=False, un_log=False,
# bins=20, skip=0, skim=1,
# contour_method='raw', excluded=False, stop=None, **kwargs)
idata = self.get_samples(pars[i], burn)
jdata = self.get_samples(pars[j], burn)
# Retrieve data to be used in plot
if not is_log[i]:
p1 = 1. - idata if complement[i] else idata
else:
p1 = 10**idata if is_log[i] else idata
if take_log[i]:
p1 = np.log10(p1)
# 2-D PDFs from here on
if not is_log[j]:
p2 = 1. - jdata if complement[j] else jdata
else:
p2 = 10**jdata if is_log[j] else jdata
if take_log[j]:
p2 = np.log10(p2)
# 1-D PDFs
if i == j:
kw = kwargs.copy()
if 'colors' in kw:
del kw['colors']
if 'linestyles' in kw:
del kw['linestyles']
_ax.hist(p2, density=True, bins=bins[j], histtype='step', **kw)
if j > 0:
_ax.set_yticklabels([])
if j == Np - 1:
_ax.set_xlabel(labels[j])
else:
_ax.set_xticklabels([])
else:
_ax.set_ylabel(r'PDF')
ok = np.isfinite(p2)
_ax.set_xlim(p2[ok==1].min(), p2[ok==1].max())
continue
if contours:
hist, be2, be1 = np.histogram2d(p2, p1, [bins[j], bins[i]])
if smooth is not None:
hist = gaussian_filter(hist, smooth)
bc1 = bin_e2c(be1)
bc2 = bin_e2c(be2)
nu, levels = self.get_levels(hist, nu)
# (columns, rows, histogram)
if fill:
_ax.contourf(bc2, bc1, hist.T / hist.max(),
levels, zorder=4, **kwargs)
else:
_ax.contour(bc2, bc1, hist.T / hist.max(),
levels, zorder=4, **kwargs)
else:
h, x, y, img = _ax.hist2d(p2, p1, bins=[bins[j], bins[i]],
cmap='viridis', norm=LogNorm())
# Get rid of labels/ticks on interior panels.
if i < Np - 1:
_ax.set_xticklabels([])
else:
_ax.set_xlabel(labels[j])
if j > 0:
_ax.set_yticklabels([])
else:
_ax.set_ylabel(labels[i])
ok1 = np.isfinite(p1)
ok2 = np.isfinite(p2)
_ax.set_ylim(p1[ok1==1].min(), p1[ok1==1].max())
_ax.set_xlim(p2[ok2==1].min(), p2[ok2==1].max())
# Done
return fig, axes_by_row
def _label_panels(self, mp, label_panels):
letters = list(string.ascii_lowercase)
letters.extend([let*2 for let in list(string.ascii_lowercase)])
ct = 0
for ax in mp.grid:
if ax is None:
continue
if label_panels == 'upper left':
ax.annotate('({!s})'.format(letters[ct]), (0.05, 0.95),
xycoords='axes fraction', ha='left', va='top')
elif label_panels == 'upper right':
ax.annotate('({!s})'.format(letters[ct]), (0.95, 0.95),
xycoords='axes fraction', ha='right', va='top')
elif label_panels == 'upper center':
ax.annotate('({!s})'.format(letters[ct]), (0.5, 0.95),
xycoords='axes fraction', ha='center', va='top')
elif label_panels == 'lower right':
ax.annotate('({!s})'.format(letters[ct]), (0.95, 0.95),
xycoords='axes fraction', ha='right', va='top')
else:
print("WARNING: Uncrecognized label_panels option.")
break
ct += 1
pl.draw()
return mp
def _reshape_data(self, pars, c, ivar=None, take_log=False,
un_log=False, multiplier=1.):
"""
Prepare datasets to make a contour plot.
"""
assert len(pars) == 2
assert pars[0] in self.parameters and pars[1] in self.parameters
p = list(pars) + [c]
# Grab all the data we need
data = self.ExtractData(p, ivar=ivar,
take_log=take_log, un_log=un_log, multiplier=multiplier)
x = np.unique(data[pars[0]])
y = np.unique(data[pars[1]])
# Don't do this: grid may be incomplete!
#assert x * y == data[c].size
flat = data[c]
zarr = np.inf * np.ones([len(x), len(y)])
for i, xx in enumerate(x):
for j, yy in enumerate(y):
xok = xx == data[pars[0]]
yok = yy == data[pars[1]]
gotit = np.logical_and(xok, yok)
if gotit.sum() == 0:
continue
if type(gotit.sum()) == np.ma.core.MaskedConstant:
continue
k = np.argwhere(gotit == True)
# If multiple elements, means this grid had redundant
# elements. Shouldn't happen in the future!
if len(k.shape) == 2:
# Just pick one
zarr[i,j] = flat[k].min()
else:
zarr[i,j] = flat[k]
return x, y, zarr
def RetrieveModels(self, skip=0, stop=None, Nmods=1, seed=None,
limit_to=None, limit_all=False, tol=None, force_positive=False,
percentile=None, **kwargs):
"""
Return a set of model parameters close to those requested.
Do this by searching the posterior distribution for nearby points,
potentially within some tolerance of the value requested and/or within
the bulk of the distribution, quantified by `limit_to`.
"""
if len(kwargs.keys()) > 1:
raise NotImplemented('help')
# Grab element closest to requested
for i, par in enumerate(self.parameters):
if par not in kwargs:
continue
nearby = np.abs(self.chain[skip:stop,i] - kwargs[par])
# Sort samples in order of closeness to our request.
nsorted = np.argsort(nearby)
break
# Make this deterministic. if we want
#np.random.seed(seed)
#np.random.shuffle(nsorted)
logL_sorted = self.logL[skip:stop][nsorted]
good_sorted = self.chain[skip:stop,i][nsorted]
# Compute likelihood percentiles, pick a cutoff.
if percentile is not None:
q1 = 0.5 * 100 * (1. - percentile)
q2 = 100 * percentile + q1
lo, hi = np.percentile(logL_sorted[np.isfinite(logL_sorted)],
(q1, q2))
logL_cut = hi
else:
logL_cut = -np.inf
ct = 0
models = []
for n, item in enumerate(nsorted):
if ct >= Nmods:
break
val = good_sorted[n]
if np.ma.is_masked(val):
continue
logL = logL_sorted[n]
if logL < logL_cut:
continue
if tol is not None:
if abs(val - kwargs[par]) > tol:
continue
if limit_to is not None:
mu, (hi, lo) = self.get_1d_error(par, nu=limit_to)
if not lo <= val <= hi:
#print("Match n={} outside {} range".format(n, limit_to))
continue
if limit_all:
for _i, _par in enumerate(self.parameters):
if _i == i:
# Already did this one!
continue
mu, (hi, lo) = self.get_1d_error(_par, nu=limit_to)
if not lo <= self.chain[skip:stop,:][item,_i] <= hi:
continue
print("Matched val={} (actual={}) at index={}".format(kwargs[par],
val, item))
# Make sure this element is in the high-likelihood region
p = {}
for m, par in enumerate(self.parameters):
#if self.chain[skip:stop,:][item,m] == p[par]:
# print('Parameter \#{} identical to previous iteration'.format(m))
if self.is_log[m]:
p[par] = 10**self.chain[skip:stop,:][item,m]
else:
p[par] = self.chain[skip:stop,:][item,m]
models.append(p)
ct += 1
return models
def ReconstructedFunction(self, name, ivar=None, fig=1, ax=None,
use_best=False, percentile=0.68, take_log=False, un_logy=False,
expr=None, new_x=None, is_logx=False, smooth_boundary=False,
multiplier=1, skip=0, stop=None, return_data=False, z_to_freq=False,
best='mode', fill=True, samples=None, ivars=None, E_to_freq=False,
**kwargs):
"""
Reconstructed evolution in whatever the independent variable is.
Parameters
----------
name : str
Name of quantity you're interested in.
ivar : list, np.ndarray
List of values (or nested list) of independent variables. If
blob is 2-D, only need to provide the independent variable for
one of the dimensions, e.g.,
# If LF data, plot LF at z=3.8
ivar = [3.8, None]
or
# If LF data, plot z evolution of phi(MUV=-20)
ivar = [None, -20]
ivars : np.ndarray
If this is a derived blob, supply ivars by hand. Need to write
automated way of figuring this out.
percentile : bool, float
If not False, should be the confidence interval to plot, e.g, 0.68.
use_best : bool
If True, will plot the maximum likelihood reconstructed
function. Otherwise, will use `percentile` and plot shaded region.
samples : int, str
If 'all', will plot all realizations individually. If an integer,
will plot only the last `samples` realizations.
"""
if ax is None:
gotax = False
fig = pl.figure(fig)
ax = fig.add_subplot(111)
else:
gotax = True
if percentile:
q1 = 0.5 * 100 * (1. - percentile)
q2 = 100 * percentile + q1
max_samples = min(self.chain.shape[0], self.mask.size - self.mask.sum())
if samples is not None:
if type(samples) == int:
samples = min(max_samples, samples)
# Step 1: figure out ivars
try:
info = self.blob_info(name)
nd = info[2]
except KeyError:
print("WARNING: blob {} not found by `blob_info`.".format(name))
print(" : Making some assumptions...")
if ivars is None:
ivars = self.get_ivars(name)
else:
if type(ivars) is str:
ivars = np.array(self.get_ivars(ivars))
else:
ivars = np.atleast_2d(ivars)
nd = len(ivars)
if ivars is None:
if nd == 1:
# This first case happens when reading from hdf5 since the
# blobs there aren't nested.
if info[0] is None:
ivars = np.atleast_2d(self.blob_ivars[0])
else:
ivars = np.atleast_2d(self.blob_ivars[info[0]])
else:
if name in self.derived_blob_names:
ivars = self.derived_blob_ivars[name]
else:
ivars = self.blob_ivars[info[0]]
if nd != 1 and (ivar is None):
raise NotImplemented('If not 1-D blob, must supply one ivar!')
##
# Real work starts here.
##
# First, read-in data from disk. Slice it up depending on if
# skip or stop were provided. Squeeze arrays to remove NaNs etc.
# 1-D case. Don't need to specify ivar by hand.
if nd == 1:
# Read in the independent variable(s) and data itself
xarr = ivars[0]
if new_x is not None:
xarr = new_x
print("You better know what you're doing!")
# Convert redshifts to frequencies
if z_to_freq:
xarr = nu_0_mhz / (1. + xarr)
if E_to_freq:
xarr = xarr * erg_per_ev / h_p
if is_logx:
xarr = 10**xarr
if new_x is not None:
xarr = new_x
print("You better know what you're doing!")
#if len(names) == 1:
tmp = self.ExtractData(name,
take_log=take_log, un_log=un_logy, multiplier=multiplier)
yblob = tmp[name].squeeze()
if expr is not None:
yblob = eval(expr).squeeze()
#else:
# tmp = self.ExtractData(names,
# take_log=take_log, un_log=un_log, multiplier=multiplier)
# xblob = tmp[names[0]].squeeze()
# yblob = tmp[names[1]].squeeze()
#
# # In this case, xarr is 2-D. Need to be more careful...
# assert use_best
# Only keep runs where ALL elements are OK.
mask = np.all(yblob.mask == True, axis=1)
keep = np.array(np.logical_not(mask), dtype=int)
nans = np.any(np.isnan(yblob.data), axis=1)
infs = np.any(np.isinf(yblob.data), axis=1)
if skip is not None:
keep[0:skip] *= 0
if stop is not None:
keep[stop:] *= 0
if (samples is not None) and (type(samples) != str):
keep[-samples:] = 0
# Grab the maximum likelihood point
if use_best and self.is_mcmc:
if best == 'median':
N = len(self.logL[keep == 1])
psorted = np.argsort(self.logL[keep == 1])
loc = psorted[int(N / 2.)]
else:
loc = np.argmax(self.logL[keep == 1])
# A few NaNs ruin everything
if np.any(nans):
print("WARNING: {} elements with NaNs detected in field={}. Will be discarded.".format(nans.sum(), name))
keep[nans == 1] = 0
if np.any(infs):
print("WARNING: {} elements with infs detected in field={}. Will be discarded.".format(infs.sum(), name))
keep[infs == 1] = 0
#
if not np.any(keep==1):
print("WARNING: no finite elements for field={}.".format(name))
return
# Plot time
if samples == 'all':
ax.plot(xarr, yblob.T, **kwargs)
elif type(samples) is int:
ax.plot(xarr, yblob[keep==1].T, **kwargs)
elif use_best and self.is_mcmc:
y = yblob[keep==1][loc]
if smooth_boundary:
y = smooth(y, smooth_boundary)
ax.plot(xarr, y, **kwargs)
elif percentile:
lo, hi = np.percentile(yblob[keep==1], (q1, q2), axis=0)
if smooth_boundary:
lo = smooth(lo, smooth_boundary)
hi = smooth(hi, smooth_boundary)
if fill:
ax.fill_between(xarr, lo, hi, **kwargs)
else:
kw_lo = kwargs.copy()
kw_hi = kwargs.copy()
if 'ls' in kwargs:
if type(kwargs['ls']) in [list, tuple]:
kw_lo['ls'] = kwargs['ls'][0]
kw_hi['ls'] = kwargs['ls'][1]
ax.plot(xarr, lo, **kw_lo)
if 'label' in kwargs:
del kwargs['label'], kw_hi['label']
ax.plot(xarr, hi, **kw_hi)
else:
raise NotImplemented('help')
ax.plot(xarr, yblob.T[0], **kwargs)
if 'label' in kwargs:
del kwargs['label']
ax.plot(xarr, yblob.T[1], **kwargs)
elif nd == 2:
if ivar[0] is None:
scalar = ivar[1]
vector = xarr = ivars[0]
slc = slice(-1, None, -1)
else:
scalar = ivar[0]
vector = xarr = ivars[1]
slc = slice(0, None, 1)
# Convert redshifts to frequencies
if z_to_freq:
xarr = nu_0_mhz / (1. + xarr)
if E_to_freq:
xarr = xarr * erg_per_ev / h_p
if is_logx:
xarr = 10**xarr
if new_x is not None:
xarr = new_x
print("You better know what you're doing!")
if type(multiplier) not in [list, np.ndarray, tuple]:
multiplier = [multiplier] * len(vector)
tmp = self.ExtractData(name, ivar=ivar,
take_log=take_log, un_log=un_logy)
_yblob = tmp[name]
if expr is not None:
_yblob = eval(expr)
yblob = np.nan_to_num(_yblob)
mask = np.all(yblob.mask == True, axis=1)
keep = np.array(np.logical_not(mask), dtype=int)
nans = np.any(np.isnan(yblob.data), axis=1)
if skip is not None:
keep[0:skip] *= 0
if stop is not None:
keep[stop:] *= 0
if (samples is not None) and (type(samples) != str):
keep[0:-samples] = 0
#if multiplier != 1:
# raise NotImplemented('need to fix this')
# Plot individual samples
if samples == 'all':
# Slicing in x dimension
if ivar[0] is not None:
#ix = np.argmin(np.abs(ivar[0] - ivars[0]))
#print(yblob.shape, ix, ivars[0].shape, xarr.shape)
#y = yblob[:,ix,:]
ax.plot(xarr, yblob[keep==1].T, **kwargs)
# Slicing in y dimension
else:
pass
elif type(samples) is int:
ax.plot(xarr, yblob[keep==1].T, **kwargs)
# Plot only the best-fitting model
elif use_best and self.is_mcmc:
if best == 'median':
N = len(self.logL[keep == 1])
psorted = np.argsort(self.logL[keep == 1])
loc = psorted[int(N / 2.)]
else:
loc = np.argmax(self.logL[keep == 1])
y = yblob[keep==1][loc]
if smooth_boundary:
y = smooth(y, smooth_boundary)
ax.plot(xarr, y, **kwargs)
# Plot contours enclosing some amount of likelihood
elif percentile:
lo, hi = np.nanpercentile(yblob[keep == 1], (q1, q2), axis=0)
if smooth_boundary:
lo = smooth(lo, smooth_boundary)
hi = smooth(hi, smooth_boundary)
if fill:
ax.fill_between(xarr, lo, hi, **kwargs)
else:
kw_lo = kwargs.copy()
kw_hi = kwargs.copy()
if 'ls' in kwargs:
if type(kwargs['ls']) in [list, tuple]:
kw_lo['ls'] = kwargs['ls'][0]
kw_hi['ls'] = kwargs['ls'][1]
ax.plot(xarr, lo, **kw_lo)
if 'label' in kwargs:
del kwargs['label'], kw_hi['label']
ax.plot(xarr, hi, **kw_hi)
else:
raise NotImplemented('help')
#y = []
#for i, value in enumerate(vector):
# iv = [scalar, value][slc]
#
# print(i, scalar, value)
#
# # Would be faster to pull this outside the loop, but I think
# # we do this for vector multipliers
# #tmp = self.ExtractData(name, ivar=[iv]*len(names),
# # take_log=take_log, un_log=un_log, multiplier=[multiplier[i]])
#
# #if len(names) == 1:
# #yblob = tmp[name]
# #else:
# # raise ValueError('')
# # xblob = tmp[names[0]]
# # yblob = tmp[names[1]]
#
# #keep = np.ones_like(yblob.shape[0])
#
# mask = yblob.mask == True
# keep = np.array(np.logical_not(mask), dtype=int)
# nans = np.any(np.isnan(yblob.data))
#
# if skip is not None:
# keep[0:skip] *= 0
# if stop is not None:
# keep[stop: ] *= 0
#
# # Grab the maximum likelihood point
# if use_best and self.is_mcmc:
# if best == 'median':
# N = len(self.logL[keep == 1])
# psorted = np.argsort(self.logL[keep == 1])
# loc = psorted[int(N / 2.)]
# else:
# loc = np.argmax(self.logL[keep == 1])
#
# if np.all(yblob[keep == 1].mask == 1):
# print("WARNING: elements all masked!")
# y.append(-np.inf)
# continue
#
# if (use_best and self.is_mcmc):
# #x.append(xblob[name][skip:stop][loc])
# y.append(yblob[loc])
# elif samples is not None:
# y.append(yblob[keep == 1])
# elif percentile:
# lo, hi = np.percentile(yblob[keep == 1], (q1, q2))
# y.append((lo, hi))
# else:
# dat = yblob[keep == 1]
# lo, hi = dat.min(), dat.max()
# y.append((lo, hi))
##
# Do the actual plotting
##
# Limit number of realizations
if samples is not None:
pass
#M = min(min(self.chain.shape[0], max_samples), len(y.T))
#
#if samples == 'all':
# pass
# # Unmasked elements only
# #mask1d = np.sum(self.mask, axis=1)
# #np.argwhere(mask1d == 0).squeeze()
#
# #for i, element in enumerate(y.T):
# # #if type(element) == float:
# # # continue
# # #elif len(element) != len(xarr):
# # # print('hello', i, element.shape, xarr.shape)
# # # continue
# #
# # #ok = np.isfinite(element)
# # ax.plot(xarr, element, **kwargs)
#else:
# # Choose randomly
# if type(samples) == int:
# elements = np.random.randint(0, M, size=samples)
# # Or take from list
# else:
# elements = samples
#
# for element in range(M):
# if element not in elements:
# continue
#
# ax.plot(xarr, y.T[element], **kwargs)
#elif use_best and self.is_mcmc:
# pass
# # Don't need to transpose in this case
# #ax.plot(xarr, y, **kwargs)
#else:
#
# #if not take_log:
# # # Where y is zero, set to small number?
# # zeros = np.argwhere(y == 0)
# # for element in zeros:
# # y[element[0],element[1]] = 1e-15
#
# if fill:
# ax.fill_between(xarr, y.T[0], y.T[1], **kwargs)
# else:
# ax.plot(xarr, y.T[0], **kwargs)
#
# if 'label' in kwargs:
# del kwargs['label']
#
# ax.plot(xarr, y.T[1], **kwargs)
ax.set_ylabel(self.labeler.label(name))
pl.draw()
if return_data:
return ax, xarr, yblob
else:
return ax
def CovarianceMatrix(self, pars, ivar=None):
"""
Compute covariance matrix for input parameters.
Parameters
----------
pars : list
List of parameter names to include in covariance estimate.
Returns
-------
Returns vector of mean, and the covariance matrix itself.
"""
data = self.ExtractData(pars, ivar=ivar)
blob_vec = []
for i in range(len(pars)):
blob_vec.append(data[pars[i]])
mu = np.ma.mean(blob_vec, axis=1)
cov = np.ma.cov(blob_vec)
return mu, cov
def PlotCovarianceMatrix(self, pars, ivar=None, fig=1, ax=None,\
cmap='RdBu_r'):
mu, cov = self.CovarianceMatrix(pars, ivar=ivar)
if ax is None:
fig = pl.figure(fig)
ax = fig.add_subplot(111)
cax = ax.imshow(cov, interpolation='none', cmap=cmap)
cb = pl.colorbar(cax)
return ax, cb
def AssembleParametersList(self, N=None, ids=None, include_bkw=False,
**update_kwargs):
"""
Return dictionaries of parameters corresponding to elements of the
chain. Really just a convenience thing -- converting 1-D arrays
(i.e, links of the chain) into dictionaries -- so that the parameters
can be passed into ares.simulations objects.
.. note :: Masked chain elements are excluded.
N : int
Maximum number of models to return, starting from beginning of
chain. If None, return all available.
include_bkw : bool
Include base_kwargs? If so, then each element within the returned
list can be supplied to an ares.simulations instance and recreate
that model exactly.
loc : int
If supplied, only the dictionary of parameters associated with
link `loc` in the chain will be returned.
update_kwargs : dict
New kwargs that you want added to each set of parameters. Will
override pre-existing keys.
Returns
-------
List of dictionaries. Maximum length: `N`.
"""
ct = 0
all_kwargs = []
for i, element in enumerate(self.chain):
if sum(self.mask[i]):
continue
if ids is not None:
if type(ids) in [int, np.int64]:
if (i != ids):
continue
else:
if (i not in ids):
continue
elif N is not None:
if i >= N:
break
if include_bkw:
if ct == 0:
# Only print first time...could be thousands of iterations
print(("WARNING: Any un-pickleable kwargs will not " +\
"have been saved in {!s}.binfo.pkl!").format(\
self.prefix))
kwargs = self.base_kwargs.copy()
else:
kwargs = {}
for j, parameter in enumerate(self.parameters):
if type(self.chain) == np.ma.core.MaskedArray:
if self.is_log[j]:
kwargs[parameter] = 10**self.chain.data[i,j]
else:
kwargs[parameter] = self.chain.data[i,j]
else:
if self.is_log[j]:
kwargs[parameter] = 10**self.chain[i,j]
else:
kwargs[parameter] = self.chain[i,j]
kwargs.update(update_kwargs)
all_kwargs.append(kwargs.copy())
ct += 1
return all_kwargs
def CorrelationMatrix(self, pars, ivar=None, fig=1, ax=None):
""" Plot correlation matrix. """
mu, cov = self.CovarianceMatrix(pars, ivar=ivar)
corr = correlation_matrix(cov)
if ax is None:
fig = pl.figure(fig); ax = fig.add_subplot(111)
cax = ax.imshow(corr.T, interpolation='none', cmap='RdBu_r',
vmin=-1, vmax=1, origin='lower left')
cb = pl.colorbar(cax)
return ax
def get_blob(self, name, ivar=None):
"""
Extract an array of values for a given quantity.
..note:: If ivar is not supplied, this is equivalent to just reading
all data from disk.
Parameters
----------
name : str
Name of quantity
ivar : list, tuple, array
Independent variables a given blob may depend on.
"""
i, j, nd, dims = self.blob_info(name)
if (i is None) and (j is None):
f = h5py.File('{!s}.hdf5'.format(self.prefix), 'r')
arr = np.array(f[('blobs')][name])
f.close()
return arr
blob = self.get_blob_from_disk(name)
if nd == 0:
return blob
elif nd == 1:
if ivar is None:
return blob
else:
# Cludgey...
biv = np.array(self.blob_ivars[i]).squeeze()
k = np.argmin(np.abs(biv - ivar))
if not np.allclose(biv[k], ivar):
print("WARNING: Looking for `{}` at ivar={}, closest found is {}.".format(name, ivar, biv[k]))
return blob[:,k]
elif nd == 2:
if ivar is None:
return blob
assert len(ivar) == 2, "Must supply 2-D coordinate for blob!"
k1 = np.argmin(np.abs(np.array(self.blob_ivars[i][0]) - ivar[0]))
if not np.allclose(self.blob_ivars[i][0][k1], ivar[0]):
print("WARNING: Looking for `{}` at ivar={}, closest found is {}.".format(name,
ivar[0], self.blob_ivars[i][0][k1]))
if ivar[1] is None:
return blob[:,k1,:]
else:
k2 = np.argmin(np.abs(np.array(self.blob_ivars[i][1]) - ivar[1]))
if self.blob_ivars[i][1][k2] != ivar[1]:
print("WARNING: Looking for `{}` at ivar={}, closest found is {}.".format(name,
ivar[1], self.blob_ivars[i][1][k2]))
return blob[:,k1,k2]
def max_likelihood_parameters(self, method='mode', min_or_max='max',
skip=0, stop=None, limit_to_dist=False, nu=0.68):
"""
Return parameter values at maximum likelihood point.
Parameters
----------
method : str
median or mode
"""
if method == 'median':
N = len(self.logL[skip:stop])
psorted = np.sort(self.logL[skip:stop])
logL_med = psorted[int(N / 2.)]
iML = np.argmin(np.abs(self.logL[skip:stop] - logL_med))
else:
if min_or_max == 'max':
iML = np.argmax(self.logL[skip:stop])
else:
iML = np.argmin(self.logL[skip:stop])
# Require that the best-fit model be in the bulk of the distribution?
if limit_to_dist:
iML_all = np.argsort(self.logL[skip:stop])[-1::-1]
ranges = {}
for par in self.parameters:
mu, (hi, lo) = self.get_1d_error(par, peak=method, skip=skip,
stop=stop, nu=nu)
ranges[par] = (mu - lo, mu + hi)
for h, _iML in enumerate(iML_all):
all_ok = True
for i, par in enumerate(self.parameters):
pval = self.chain[skip:stop][_iML,i]
if not ranges[par][0] <= pval <= ranges[par][1]:
all_ok = False
break
if all_ok:
break
if h != 0:
print("WARNING: Using {}th highest-likelihood point.".format(h))
self._max_like_pars = {}
for i, par in enumerate(self.parameters):
if self.is_log[i]:
self._max_like_pars[par] = 10**self.chain[skip:stop][iML,i]
else:
self._max_like_pars[par] = self.chain[skip:stop][iML,i]
return self._max_like_pars
def ExpensiveBlob(self, func, ivar, name, skip=0, clobber=False):
"""
Generate a new blob from parameters only, i.e., we need to re-run
some ARES calculation, which is wrapped by `func`.
No restart option yet. Should add that.
"""
kwargs = self.AssembleParametersList(include_bkw=True)
print("Generating new field={} for all {} samples...".format(name,
len(kwargs)))
nd = len(ivar)
shape = [len(kwargs)]
for k, dim in enumerate(range(nd)):
shape.append(len(ivar[k][1]))
fn = '{0!s}.blob_{1}d.{2!s}.pkl'.format(self.prefix, nd, name)
if os.path.exists(fn) and (not clobber):
print(('{!s} exists! Set clobber=True or remove by ' +\
'hand.').format(fn))
return
pb = ProgressBar(len(kwargs), name=name)
pb.start()
all_results = -99999 * np.ones(shape)
for k, kw in enumerate(kwargs):
if k < skip:
continue
if k % size != rank:
continue
result = func(ivar, **kw)
all_results[k] = result
pb.update(k)
pb.finish()
if size > 1:
tmp = np.zeros_like(all_results)
nothing = MPI.COMM_WORLD.Allreduce(all_results, tmp)
all_results = tmp
if rank > 0:
return
write_pickle_file(all_results, fn, open_mode='w', ndumps=1,\
safe_mode=False, verbose=False)
def DeriveBlob(self, func=None, fields=None, expr=None, varmap=None,
save=True, ivar=None, name=None, clobber=False):
"""
Derive new blob from pre-existing ones.
Parameters
----------
Either supply the first two arguments:
func : function!
A function of two variables: ``data`` (a dictionary containing the
data) and ``ivars``, which contain the independent variables for
each field in ``data``.
fields : list, tuple
List of quantities required by ``func``.
OR the second two:
expr : str
For example, 'x - y'
varmap : dict
Relates variables in `expr` to blobs. For example,
varmap = {'x': 'nu_D', 'y': 'nu_C'}
The remaining parameters are:
save : bool
Save to disk? If not, just returns array.
name : str
If save==True, this is a name for this new blob that we can use
to call it up later.
clobber : bool
If file with same ``name`` exists, overwrite it?
"""
if func is not None:
data = self.ExtractData(fields)
# Grab ivars
ivars_for_func = {}
ivars = {}
for key in data:
# Don't need ivars if we're manipulating parameters!
if key in self.parameters:
continue
# Might be a derived blob of derived blobs!
# Just err on the side of no ivars for now.
try:
i, j, nd, size = self.blob_info(key)
n = self.blob_ivarn[i]
ivars[key] = self.blob_ivars[i]
for k, _name in enumerate(n):
ivars_for_func[_name] = self.blob_ivars[i][k]
except KeyError:
ivars_for_func[key] = None
ivars[key] = None
result = func(data, ivars_for_func)
else:
blobs = list(varmap.values())
if ivar is not None:
iv = [ivar[blob] for blob in blobs]
else:
iv = None
data = self.ExtractData(blobs, ivar=iv)
result = eval(expr, {var: data[varmap[var]] for var in varmap.keys()})
if save:
assert name is not None, "Must supply name for new blob!"
# First dimension is # of samples
nd = len(result.shape) - 1
fn = '{0!s}.blob_{1}d.{2!s}.pkl'.format(self.prefix, nd, name)
if os.path.exists(fn) and (not clobber):
print(('{!s} exists! Set clobber=True or remove by ' +\
'hand.').format(fn))
data = self.ExtractData(name)
return data[name]
write_pickle_file(result, fn, open_mode='w', ndumps=1,\
safe_mode=False, verbose=False)
# 'data' contains all field used to derive this blob.
# Shape of new blob must be the same
ivars = {}
for key in data:
# Don't need ivars if we're manipulating parameters!
if key in self.parameters:
continue
try:
i, j, nd, size = self.blob_info(key)
ivars[key] = self.blob_ivars[i]
except KeyError:
ivars[key] = None
##
# Need to save ivars under new blob name.
# Require ivars of component fields to be the same?
##
# I think keys() no longer returns a list in Python 3.?
keys = list(ivars.keys())
ivars_f = {}
if len(ivars.keys()) == 1:
ivars_f[name] = ivars[list(ivars.keys())[0]]
else:
ivars = dict(ivars)
for k in range(1, len(keys)):
assert ivars[keys[k]] == ivars[keys[k-1]]
ivars_f[name] = ivars[keys[0]]
# Save metadata about this derived blob
fn_md = '{!s}.dbinfo.pkl'.format(self.prefix)
if (not os.path.exists(fn_md)) or clobber:
write_pickle_file(ivars_f, fn_md, open_mode='w',\
ndumps=1, safe_mode=False, verbose=False)
else:
pdats = read_pickle_file(fn_md, nloads=None, verbose=False)
for pdat in pdats:
if name in pdat:
if pdat[name] == ivars_f[name]:
break
if pdat is not None:
write_pickle_file(ivars_f, fn_md, open_mode='a',\
ndumps=1, safe_mode=False, verbose=False)
return result
def z_to_freq(self, clobber=False):
for tp in list('BCD'):
self.DeriveBlob(expr='{:.5g} / (1. + x)'.format(nu_0_mhz),\
varmap={'x': 'z_{!s}'.format(tp)}, name='nu_{!s}'.format(tp),\
clobber=clobber)
self.DeriveBlob(expr='{:.5g} / (1. + x)'.format(nu_0_mhz),\
varmap={'x': 'z_{!s}p'.format(tp)},\
name='nu_{!s}p'.format(tp), clobber=clobber)
def RankModels(self, **kwargs):
"""
Determine how close all models in ModelSet are to parameter set
in kwargs.
"""
# This is a list of all points in the chain represented as a
# dictionary of parameter:value pairs.
all_kwargs = self.AssembleParametersList()
scores = np.inf * np.ones(len(all_kwargs))
for i, element in enumerate(all_kwargs):
# Loop over parameters and add relative difference between
# "reference model" parameter and that given
for j, parameter in enumerate(self.parameters):
if parameter not in element:
continue
if parameter not in kwargs:
continue
if element[parameter] is None:
continue
if kwargs[parameter] is None:
continue
if not np.isfinite(scores[i]):
scores[i] = 0
score = abs(element[parameter] - kwargs[parameter]) \
/ kwargs[parameter]
scores[i] += score
sorter = np.argsort(scores)
new_kw = [all_kwargs[i] for i in sorter]
return sorter, new_kw, scores
def export(self, pars, prefix=None, fn=None, ivar=None, path='.',
fmt='hdf5', clobber=False, skip=0, stop=None):
"""
Just a wrapper around `save' routine.
"""
self.save(pars, prefix=prefix, fn=fn, ivar=ivar,
path=path, fmt=fmt, clobber=clobber, skip=skip, stop=stop)
def save(self, pars, prefix=None, fn=None, ivar=None, path='.', fmt='hdf5',
clobber=False, include_chain=True, restructure_grid=False,
skip=0, stop=None):
"""
Extract data from chain or blobs and output to separate file(s).
This can be a convenient way to re-package data, for instance
consolidating data outputs from lots of processors into a single file,
or simply reducing the size of a file for easy transport when we
don't need absolutely everything.
Parameters
----------
pars : str, list, tuple
Name of parameter (or list of parameters) or blob(s) to extract.
ivar : int, float, str, list, tuple
[optional] independent variables, if None will extract all.
fmt : str
Options: 'hdf5' or 'pkl'
path : str
By default, will save files to CWD. Can modify this if you'd like.
include_chain : bool
By default, include the chain, which in the case of a ModelGrid,
is just the axes of the grid.
restructure_grid : bool
Not implemented yet, but would be nice to restructure model grid
data into an ordered mesh to be nice.
"""
if type(pars) not in [list, tuple]:
pars = [pars]
for par in pars:
if par in self.parameters:
print(("FYI: {!s} is a free parameter, so there's no " +\
"need to include it explicitly.").format(par))
data = self.ExtractData(pars, ivar=ivar)
if fn is None:
assert prefix is not None
fn =\
'{0!s}/{1!s}.{2!s}.{3!s}'.format(path,self.prefix, prefix, fmt)
if os.path.exists(fn) and (not clobber):
raise IOError('File exists! Set clobber=True to wipe it.')
# Output to HDF5. In this case, save each field as a new dataset
if fmt == 'hdf5':
assert have_h5py, "h5py import failed."
f = h5py.File(fn, 'w')
if include_chain:
ds = f.create_dataset('chain', data=self.chain[skip:stop])
ds.attrs.create('names', data=self.parameters)
ds.attrs.create('is_log', data=self.is_log)
f.create_dataset('mask', data=self.mask[skip:stop])
else:
# raise a warning? eh.
pass
# Loop over parameters and save to disk
for par in pars:
if par in self.parameters:
continue
# Tag ivars on as attribute if blob
if 'blobs' not in f:
grp = f.create_group('blobs')
else:
grp = f['blobs']
dat = data[par][skip:stop]#[skip:stop:skim,Ellipsis]
ds = grp.create_dataset(par, data=dat[self.mask[skip:stop] == 0])
try:
i, j, nd, dims = self.blob_info(par)
if self.blob_ivars[i] is not None:
# This might cause problems if the ivars are real big.
ds.attrs.create('ivar', self.blob_ivars[i])
except KeyError:
print("Missing ivar info for {!s}!".format(par))
f.close()
print("Wrote {!s}.".format(fn))
else:
raise NotImplementedError('Only support for hdf5 so far. Sorry!')
# Also make a copy of the info files with same prefix
# since that's generally nice to have available.
# Well, it gives you a false sense of what data is available,
# so sorry! Not doing that anymore.
#out = '{0!s}/{1!s}.{2!s}.binfo.pkl'.format(path, self.prefix, prefix)
#shutil.copy('{!s}.binfo.pkl'.format(self.prefix), out)
#print "Wrote {!s}.".format(out)
#
#out = '{0!s}/{1!s}.{2!s}.pinfo.pkl'.format(path, self.prefix, prefix)
#shutil.copy('{!s}.pinfo.pkl'.format(self.prefix), out)
#print "Wrote {!s}.".format(out)
@property
def custom_labels(self):
if not hasattr(self, '_custom_labels'):
self._custom_labels = {}
return self._custom_labels
@custom_labels.setter
def custom_labels(self, value):
assert type(value) is dict
if not hasattr(self, '_custom_labels'):
self._custom_labels = {}
for key in value:
#if key not in self.parameters:
# print("WARNING: custom_label for par `{}` no in parameters list.".format(key))
self._custom_labels[key] = value[key]
# Force us to re-make the labeler if one already exists
if hasattr(self, '_labeler'):
del self._labeler
@property
def labeler(self):
if not hasattr(self, '_labeler'):
kw = self.base_kwargs if self.base_kwargs is not None else {}
self._labeler = Labeler(self.parameters, self.is_log,
extra_labels=self.custom_labels, **kw)
return self._labeler
def set_axis_labels(self, ax, pars, take_log=False, un_log=False,
cb=None, labels={}):
"""
Make nice axis labels.
"""
pars, take_log, multiplier, un_log, ivar = \
self._listify_common_inputs(pars, take_log, 1.0, un_log, None)
is_log = {}
for par in pars:
if par in self.parameters:
k = self.parameters.index(par)
is_log[par] = self.is_log[k]
else:
# Blobs are never log10-ified before storing to disk
is_log[par] = False
if type(take_log) != dict:
tmp = {par:take_log[i] for i, par in enumerate(pars)}
take_log = tmp
# Prep for label making
labeler = self.labeler #= Labeler(pars, is_log, extra_labels=labels,
#**self.base_kwargs)
# x-axis first
ax.set_xlabel(labeler.label(pars[0], take_log=take_log[pars[0]],
un_log=un_log[0]))
if len(pars) == 1:
ax.set_ylabel('PDF')
pl.draw()
return
ax.set_ylabel(labeler.label(pars[1], take_log=take_log[pars[1]],
un_log=un_log[1]))
# Rotate ticks?
for tick in ax.get_xticklabels():
tick.set_rotation(45.)
for tick in ax.get_yticklabels():
tick.set_rotation(45.)
# colorbar
if cb is not None and len(pars) > 2:
cb.set_label(labeler.label(pars[2], take_log=take_log[pars[2]],
un_log=un_log[2]))
pl.draw()
return ax
def _alpha_shape(self, points, alpha):
"""
Stolen from here:
http://blog.thehumangeo.com/2014/05/12/drawing-boundaries-in-python/
Thanks, stranger!
Compute the alpha shape (concave hull) of a set
of points.
@param points: Iterable container of points.
@param alpha: alpha value to influence the
gooeyness of the border. Smaller numbers
don't fall inward as much as larger numbers.
Too large, and you lose everything!
"""
if 1 <= len(points) < 4:
# When you have a triangle, there is no sense
# in computing an alpha shape.
return geometry.MultiPoint(list(points)).convex_hull
#else:
# return None, None
def add_edge(edges, edge_points, coords, i, j):
"""
Add a line between the i-th and j-th points,
if not in the list already
"""
if (i, j) in edges or (j, i) in edges:
# already added
return
edges.add( (i, j) )
edge_points.append(coords[ [i, j] ])
coords = np.array(points)#np.array([point.coords[0] for point in points])
tri = Delaunay(coords)
edges = set()
edge_points = []
# loop over triangles:
# ia, ib, ic = indices of corner points of the
# triangle
for ia, ib, ic in tri.vertices:
pa = coords[ia]
pb = coords[ib]
pc = coords[ic]
# Lengths of sides of triangle
a = np.sqrt((pa[0]-pb[0])**2 + (pa[1]-pb[1])**2)
b = np.sqrt((pb[0]-pc[0])**2 + (pb[1]-pc[1])**2)
c = np.sqrt((pc[0]-pa[0])**2 + (pc[1]-pa[1])**2)
# Semiperimeter of triangle
s = (a + b + c)/2.0
# Area of triangle by Heron's formula
area = np.sqrt(s*(s-a)*(s-b)*(s-c))
circum_r = a*b*c/(4.0*area)
# Here's the radius filter.
#print circum_r
if circum_r < 1.0/alpha:
add_edge(edges, edge_points, coords, ia, ib)
add_edge(edges, edge_points, coords, ib, ic)
add_edge(edges, edge_points, coords, ic, ia)
m = geometry.MultiLineString(edge_points)
triangles = list(polygonize(m))
return cascaded_union(triangles), edge_points
|
mirochajREPO_NAMEaresPATH_START.@ares_extracted@ares-main@ares@analysis@ModelSet.py@.PATH_END.py
|
{
"filename": "_valuesrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/isosurface/_valuesrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ValuesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="valuesrc", parent_name="isosurface", **kwargs):
super(ValuesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@isosurface@_valuesrc.py@.PATH_END.py
|
{
"filename": "_visible.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/bar/error_x/_visible.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class VisibleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="visible", parent_name="bar.error_x", **kwargs):
super(VisibleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@bar@error_x@_visible.py@.PATH_END.py
|
{
"filename": "download_file.py",
"repo_name": "revoltek/LiLF",
"repo_path": "LiLF_extracted/LiLF-master/scripts/download_file.py",
"type": "Python"
}
|
#!/usr/bin/env python3
# Generic file download with retry and check for length
import os
from time import sleep
import requests
def download_file(url, filename, login=None, password=None):
downloaded=False
while not downloaded:
connected=False
while not connected:
try:
#print('Opening connection')
if (login is not None) and (password is not None):
response = requests.get(url, stream=True, verify=True, timeout=60,
auth=(login, password))
else:
response = requests.get(url, stream=True, verify=True, timeout=60)
if response.status_code!=200:
print(response.headers)
raise RuntimeError('Code was %i' % response.status_code)
if 'Content-Length' in response.headers.keys():
esize = int(response.headers['Content-Length'])
else: esize = None
except requests.exceptions.ConnectionError:
print('Downloader -- Connection error! sleeping 30 seconds before retry...')
sleep(30)
except (requests.exceptions.Timeout,requests.exceptions.ReadTimeout):
print('Downloader -- Timeout! sleeping 30 seconds before retry...')
sleep(30)
except RuntimeError:
sleep(30)
else:
connected=True
try:
#print('Downloading %i bytes' % esize)
with open(filename, 'wb') as fd:
for chunk in response.iter_content(chunk_size=8192):
if chunk:
fd.write(chunk)
fsize=os.path.getsize(filename)
if esize!=fsize and esize is not None:
print('Downloader -- Download incomplete (expected %i, got %i)! Retrying' % (esize, fsize))
elif esize is not None:
print('Downloader -- Download successful, %i of %i bytes received' % (fsize, esize))
downloaded=True
else:
print('Downloader -- Download successful, %i bytes (unknown size)' % (fsize))
downloaded=True
except (requests.exceptions.ConnectionError,requests.exceptions.Timeout,requests.exceptions.ChunkedEncodingError):
print('Downloader -- Connection error! sleeping 30 seconds before retry...')
sleep(30) # back to the connection
del response
return downloaded
if __name__=='__main__':
import sys
download_file(sys.argv[1],sys.argv[2])
|
revoltekREPO_NAMELiLFPATH_START.@LiLF_extracted@LiLF-master@scripts@download_file.py@.PATH_END.py
|
{
"filename": "test_fits.py",
"repo_name": "halomod/hmf",
"repo_path": "hmf_extracted/hmf-main/tests/test_fits.py",
"type": "Python"
}
|
import pytest
from pytest import raises
import inspect
import itertools
import numpy as np
from hmf import MassFunction
from hmf.mass_function import fitting_functions as ff
allfits = [
o
for n, o in inspect.getmembers(
ff,
lambda member: inspect.isclass(member)
and issubclass(member, ff.FittingFunction)
and member is not ff.FittingFunction
and member is not ff.PS,
)
]
@pytest.fixture(scope="module")
def hmf():
return MassFunction(
Mmin=10,
Mmax=15,
dlog10m=0.1,
lnk_min=-16,
lnk_max=10,
dlnk=0.01,
hmf_model="PS",
z=0.0,
sigma_8=0.8,
n=1,
cosmo_params={"Om0": 0.3, "H0": 70.0, "Ob0": 0.05},
transfer_model="EH",
)
@pytest.fixture(scope="module")
def ps_max(hmf):
hmf.update(hmf_model="PS")
return hmf.fsigma.max()
@pytest.mark.parametrize("redshift, fit", itertools.product([0.0, 2.0], allfits))
def test_allfits(hmf, ps_max, redshift, fit):
"""
This basically tests all implemented fits to check the form for three things:
1) whether the maximum fsigma is less than in the PS formula (which is known to overestimate)
2) whether the slope is positive below this maximum
3) whether the slope is negative above this maximum
Since it calls each class, any blatant errors should also pop up.
"""
hmf.update(z=redshift, hmf_model=fit)
maxarg = np.argmax(hmf.fsigma)
assert ps_max >= hmf.fsigma[maxarg]
assert np.all(np.diff(hmf.fsigma[:maxarg]) >= 0)
assert np.all(np.diff(hmf.fsigma[maxarg:]) <= 0)
def test_tinker08_dh():
h = MassFunction(
hmf_model="Tinker08",
mdef_model="SOMean",
mdef_params={"overdensity": 200},
transfer_model="EH",
)
h1 = MassFunction(
hmf_model="Tinker08",
mdef_model="SOMean",
mdef_params={"overdensity": 200.1},
transfer_model="EH",
)
assert np.allclose(h.fsigma, h1.fsigma, rtol=1e-2)
def test_tinker10_dh():
h = MassFunction(hmf_model="Tinker10", transfer_model="EH")
h1 = MassFunction(
hmf_model="Tinker10",
mdef_model="SOMean",
mdef_params={"overdensity": 200.1},
transfer_model="EH",
)
assert np.allclose(h.fsigma, h1.fsigma, rtol=1e-2)
def test_tinker10_neg_gam():
with raises(ValueError):
h = MassFunction(
hmf_model="Tinker10", hmf_params={"gamma_200": -1}, transfer_model="EH"
)
h.fsigma
def test_tinker10_neg_eta():
with raises(ValueError):
h = MassFunction(
hmf_model="Tinker10", hmf_params={"eta_200": -1}, transfer_model="EH"
)
h.fsigma
def test_tinker10_neg_etaphi():
with raises(ValueError):
h = MassFunction(
hmf_model="Tinker10",
hmf_params={"eta_200": -1, "phi_200": 0},
transfer_model="EH",
)
h.fsigma
def test_tinker10_neg_beta():
with raises(ValueError):
h = MassFunction(
hmf_model="Tinker10", hmf_params={"beta_200": -1}, transfer_model="EH"
)
h.fsigma
|
halomodREPO_NAMEhmfPATH_START.@hmf_extracted@hmf-main@tests@test_fits.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/freezegun/py2/freezegun/__init__.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
"""
freezegun
~~~~~~~~
:copyright: (c) 2012 by Steve Pulec.
"""
from .api import freeze_time
__title__ = 'freezegun'
__version__ = '0.3.15'
__author__ = 'Steve Pulec'
__license__ = 'Apache License 2.0'
__copyright__ = 'Copyright 2012 Steve Pulec'
__all__ = ["freeze_time"]
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@freezegun@py2@freezegun@__init__.py@.PATH_END.py
|
{
"filename": "document_loader_feat_table.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/docs/scripts/document_loader_feat_table.py",
"type": "Python"
}
|
import sys
from pathlib import Path
from langchain_community import document_loaders
from langchain_core.document_loaders.base import BaseLoader
DOCUMENT_LOADER_TEMPLATE = """\
---
sidebar_position: 0
sidebar_class_name: hidden
keywords: [compatibility]
custom_edit_url:
hide_table_of_contents: true
---
# Document loaders
:::info
If you'd like to write your own document loader, see [this how-to](/docs/how_to/document_loader_custom/).
If you'd like to contribute an integration, see [Contributing integrations](/docs/contributing/how_to/integrations/).
:::
## Features
The following table shows the feature support for all document loaders.
{table}
"""
DEPRECATED = [
"AirbyteCDKLoader",
"AirbyteGongLoader",
"AirbyteHubspotLoader",
"AirbyteJSONLoader",
"AirbyteSalesforceLoader",
"AirbyteShopifyLoader",
"AirbyteStripeLoader",
"AirbyteTypeformLoader",
"AirbyteZendeskSupportLoader",
]
def get_document_loader_table() -> str:
"""Get the table of document loaders."""
doc_loaders_feat_table = {}
for cm in document_loaders.__all__:
doc_loaders_feat_table[cm] = {}
cls = getattr(document_loaders, cm)
if issubclass(cls, BaseLoader):
for feat in ("aload", "alazy_load", ("lazy_load", "lazy_loading")):
if isinstance(feat, tuple):
feat, name = feat
else:
feat, name = feat, feat
doc_loaders_feat_table[cm][name] = getattr(cls, feat) != getattr(
BaseLoader, feat
)
native_async = (
doc_loaders_feat_table[cm]["aload"]
or doc_loaders_feat_table[cm]["alazy_load"]
)
del doc_loaders_feat_table[cm]["aload"]
del doc_loaders_feat_table[cm]["alazy_load"]
doc_loaders_feat_table[cm]["native_async"] = native_async
doc_loaders_feat_table[cm]["description"] = (cls.__doc__ or "").split("\n")[
0
]
header = ["loader", "description", "lazy_loading", "native_async"]
title = ["Document Loader", "Description", "Lazy loading", "Native async support"]
rows = [title, [":-"] * 2 + [":-:"] * (len(title) - 2)]
for loader, feats in sorted(doc_loaders_feat_table.items()):
if not feats or loader in DEPRECATED:
continue
rows += [
[loader, feats["description"]]
+ ["✅" if feats.get(h) else "❌" for h in header[2:]]
]
return "\n".join(["|".join(row) for row in rows])
if __name__ == "__main__":
output_dir = Path(sys.argv[1])
output_integrations_dir = output_dir / "integrations"
output_integrations_dir_doc_loaders = output_integrations_dir / "document_loaders"
output_integrations_dir_doc_loaders.mkdir(parents=True, exist_ok=True)
document_loader_page = DOCUMENT_LOADER_TEMPLATE.format(
table=get_document_loader_table()
)
with open(output_integrations_dir / "document_loaders" / "index.mdx", "w") as f:
f.write(document_loader_page)
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@docs@scripts@document_loader_feat_table.py@.PATH_END.py
|
{
"filename": "field_aliases.py",
"repo_name": "yt-project/yt",
"repo_path": "yt_extracted/yt-main/yt/fields/field_aliases.py",
"type": "Python"
}
|
_field_name_aliases = [
("GridLevel", "grid_level"),
("GridIndices", "grid_indices"),
("OnesOverDx", "ones_over_dx"),
("Ones", "ones"),
# ("CellsPerBin", "cells_per_bin"),
("SoundSpeed", "sound_speed"),
("RadialMachNumber", "radial_mach_number"),
("MachNumber", "mach_number"),
("CourantTimeStep", "courant_time_step"),
# ("ParticleVelocityMagnitude", "particle_velocity_magnitude"),
("VelocityMagnitude", "velocity_magnitude"),
("TangentialOverVelocityMagnitude", "tangential_over_velocity_magnitude"),
("Pressure", "pressure"),
("Entropy", "entropy"),
("sph_r", "spherical_r"),
("sph_theta", "spherical_theta"),
("sph_phi", "spherical_phi"),
("cyl_R", "cylindrical_radius"),
("cyl_z", "cylindrical_z"),
("cyl_theta", "cylindrical_theta"),
("cyl_RadialVelocity", "cylindrical_radial_velocity"),
("cyl_RadialVelocityABS", "cylindrical_radial_velocity_absolute"),
("cyl_TangentialVelocity", "velocity_cylindrical_theta"),
("cyl_TangentialVelocityABS", "velocity_cylindrical_theta"),
("DynamicalTime", "dynamical_time"),
("JeansMassMsun", "jeans_mass"),
("CellMass", "cell_mass"),
("TotalMass", "total_mass"),
("StarMassMsun", "star_mass"),
("Matter_Density", "matter_density"),
("ComovingDensity", "comoving_density"),
("Overdensity", "overdensity"),
("DensityPerturbation", "density_perturbation"),
("Baryon_Overdensity", "baryon_overdensity"),
("WeakLensingConvergence", "weak_lensing_convergence"),
("CellVolume", "cell_volume"),
("ChandraEmissivity", "chandra_emissivity"),
("XRayEmissivity", "xray_emissivity"),
("SZKinetic", "sz_kinetic"),
("SZY", "szy"),
("AveragedDensity", "averaged_density"),
("DivV", "div_v"),
("AbsDivV", "div_v_absolute"),
("Contours", "contours"),
("tempContours", "temp_contours"),
("SpecificAngularMomentumX", "specific_angular_momentum_x"),
("SpecificAngularMomentumY", "specific_angular_momentum_y"),
("SpecificAngularMomentumZ", "specific_angular_momentum_z"),
("AngularMomentumX", "angular_momentum_x"),
("AngularMomentumY", "angular_momentum_y"),
("AngularMomentumZ", "angular_momentum_z"),
# ("ParticleSpecificAngularMomentumX", "particle_specific_angular_momentum_x"),
# ("ParticleSpecificAngularMomentumY", "particle_specific_angular_momentum_y"),
# ("ParticleSpecificAngularMomentumZ", "particle_specific_angular_momentum_z"),
# ("ParticleAngularMomentumX", "particle_angular_momentum_x"),
# ("ParticleAngularMomentumY", "particle_angular_momentum_y"),
# ("ParticleAngularMomentumZ", "particle_angular_momentum_z"),
# ("ParticleRadius", "particle_radius"),
("Radius", "radius"),
("RadialVelocity", "radial_velocity"),
("RadialVelocityABS", "radial_velocity_absolute"),
("TangentialVelocity", "tangential_velocity"),
("CuttingPlaneVelocityX", "cutting_plane_velocity_x"),
("CuttingPlaneVelocityY", "cutting_plane_velocity_y"),
("CuttingPlaneBX", "cutting_plane_magnetic_field_x"),
("CuttingPlaneBy", "cutting_plane_magnetic_field_y"),
("MeanMolecularWeight", "mean_molecular_weight"),
("particle_density", "particle_density"),
("ThermalEnergy", "specific_thermal_energy"),
("TotalEnergy", "specific_total_energy"),
("MagneticEnergy", "magnetic_energy_density"),
("GasEnergy", "specific_thermal_energy"),
("Gas_Energy", "specific_thermal_energy"),
("BMagnitude", "b_magnitude"),
("PlasmaBeta", "plasma_beta"),
("MagneticPressure", "magnetic_pressure"),
("BPoloidal", "b_poloidal"),
("BToroidal", "b_toroidal"),
("BRadial", "b_radial"),
("VorticitySquared", "vorticity_squared"),
("gradPressureX", "grad_pressure_x"),
("gradPressureY", "grad_pressure_y"),
("gradPressureZ", "grad_pressure_z"),
("gradPressureMagnitude", "grad_pressure_magnitude"),
("gradDensityX", "grad_density_x"),
("gradDensityY", "grad_density_y"),
("gradDensityZ", "grad_density_z"),
("gradDensityMagnitude", "grad_density_magnitude"),
("BaroclinicVorticityX", "baroclinic_vorticity_x"),
("BaroclinicVorticityY", "baroclinic_vorticity_y"),
("BaroclinicVorticityZ", "baroclinic_vorticity_z"),
("BaroclinicVorticityMagnitude", "baroclinic_vorticity_magnitude"),
("VorticityX", "vorticity_x"),
("VorticityY", "vorticity_y"),
("VorticityZ", "vorticity_z"),
("VorticityMagnitude", "vorticity_magnitude"),
("VorticityStretchingX", "vorticity_stretching_x"),
("VorticityStretchingY", "vorticity_stretching_y"),
("VorticityStretchingZ", "vorticity_stretching_z"),
("VorticityStretchingMagnitude", "vorticity_stretching_magnitude"),
("VorticityGrowthX", "vorticity_growth_x"),
("VorticityGrowthY", "vorticity_growth_y"),
("VorticityGrowthZ", "vorticity_growth_z"),
("VorticityGrowthMagnitude", "vorticity_growth_magnitude"),
("VorticityGrowthMagnitudeABS", "vorticity_growth_magnitude_absolute"),
("VorticityGrowthTimescale", "vorticity_growth_timescale"),
("VorticityRadPressureX", "vorticity_radiation_pressure_x"),
("VorticityRadPressureY", "vorticity_radiation_pressure_y"),
("VorticityRadPressureZ", "vorticity_radiation_pressure_z"),
("VorticityRadPressureMagnitude", "vorticity_radiation_pressure_magnitude"),
("VorticityRPGrowthX", "vorticity_radiation_pressure_growth_x"),
("VorticityRPGrowthY", "vorticity_radiation_pressure_growth_y"),
("VorticityRPGrowthZ", "vorticity_radiation_pressure_growth_z"),
("VorticityRPGrowthMagnitude", "vorticity_radiation_pressure_growth_magnitude"),
("VorticityRPGrowthTimescale", "vorticity_radiation_pressure_growth_timescale"),
("DiskAngle", "theta"),
("Height", "height"),
("HI density", "H_density"),
("HII density", "H_p1_density"),
("HeI density", "He_density"),
("HeII density", "He_p1_density"),
("HeIII density", "He_p2_density"),
]
_field_units_aliases = [
("cyl_RCode", "code_length"),
("HeightAU", "au"),
("cyl_RadialVelocityKMS", "km/s"),
("cyl_RadialVelocityKMSABS", "km/s"),
("cyl_TangentialVelocityKMS", "km/s"),
("cyl_TangentialVelocityKMSABS", "km/s"),
("CellMassMsun", "msun"),
("CellMassCode", "code_mass"),
("TotalMassMsun", "msun"),
("CellVolumeCode", "code_length"),
("CellVolumeMpc", "Mpc**3"),
("ParticleSpecificAngularMomentumXKMSMPC", "km/s/Mpc"),
("ParticleSpecificAngularMomentumYKMSMPC", "km/s/Mpc"),
("ParticleSpecificAngularMomentumZKMSMPC", "km/s/Mpc"),
("RadiusMpc", "Mpc"),
("ParticleRadiusMpc", "Mpc"),
("ParticleRadiuskpc", "kpc"),
("Radiuskpc", "kpc"),
("ParticleRadiuskpch", "kpc"),
("Radiuskpch", "kpc"),
("ParticleRadiuspc", "pc"),
("Radiuspc", "pc"),
("ParticleRadiusAU", "au"),
("RadiusAU", "au"),
("ParticleRadiusCode", "code_length"),
("RadiusCode", "code_length"),
("RadialVelocityKMS", "km/s"),
("RadialVelocityKMSABS", "km/s"),
("JeansMassMsun", "msun"),
]
|
yt-projectREPO_NAMEytPATH_START.@yt_extracted@yt-main@yt@fields@field_aliases.py@.PATH_END.py
|
{
"filename": "digraphs.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/prompt-toolkit/py3/prompt_toolkit/key_binding/digraphs.py",
"type": "Python"
}
|
"""
Vi Digraphs.
This is a list of special characters that can be inserted in Vi insert mode by
pressing Control-K followed by to normal characters.
Taken from Neovim and translated to Python:
https://raw.githubusercontent.com/neovim/neovim/master/src/nvim/digraph.c
"""
from __future__ import annotations
__all__ = [
"DIGRAPHS",
]
# digraphs for Unicode from RFC1345
# (also work for ISO-8859-1 aka latin1)
DIGRAPHS: dict[tuple[str, str], int] = {
("N", "U"): 0x00,
("S", "H"): 0x01,
("S", "X"): 0x02,
("E", "X"): 0x03,
("E", "T"): 0x04,
("E", "Q"): 0x05,
("A", "K"): 0x06,
("B", "L"): 0x07,
("B", "S"): 0x08,
("H", "T"): 0x09,
("L", "F"): 0x0A,
("V", "T"): 0x0B,
("F", "F"): 0x0C,
("C", "R"): 0x0D,
("S", "O"): 0x0E,
("S", "I"): 0x0F,
("D", "L"): 0x10,
("D", "1"): 0x11,
("D", "2"): 0x12,
("D", "3"): 0x13,
("D", "4"): 0x14,
("N", "K"): 0x15,
("S", "Y"): 0x16,
("E", "B"): 0x17,
("C", "N"): 0x18,
("E", "M"): 0x19,
("S", "B"): 0x1A,
("E", "C"): 0x1B,
("F", "S"): 0x1C,
("G", "S"): 0x1D,
("R", "S"): 0x1E,
("U", "S"): 0x1F,
("S", "P"): 0x20,
("N", "b"): 0x23,
("D", "O"): 0x24,
("A", "t"): 0x40,
("<", "("): 0x5B,
("/", "/"): 0x5C,
(")", ">"): 0x5D,
("'", ">"): 0x5E,
("'", "!"): 0x60,
("(", "!"): 0x7B,
("!", "!"): 0x7C,
("!", ")"): 0x7D,
("'", "?"): 0x7E,
("D", "T"): 0x7F,
("P", "A"): 0x80,
("H", "O"): 0x81,
("B", "H"): 0x82,
("N", "H"): 0x83,
("I", "N"): 0x84,
("N", "L"): 0x85,
("S", "A"): 0x86,
("E", "S"): 0x87,
("H", "S"): 0x88,
("H", "J"): 0x89,
("V", "S"): 0x8A,
("P", "D"): 0x8B,
("P", "U"): 0x8C,
("R", "I"): 0x8D,
("S", "2"): 0x8E,
("S", "3"): 0x8F,
("D", "C"): 0x90,
("P", "1"): 0x91,
("P", "2"): 0x92,
("T", "S"): 0x93,
("C", "C"): 0x94,
("M", "W"): 0x95,
("S", "G"): 0x96,
("E", "G"): 0x97,
("S", "S"): 0x98,
("G", "C"): 0x99,
("S", "C"): 0x9A,
("C", "I"): 0x9B,
("S", "T"): 0x9C,
("O", "C"): 0x9D,
("P", "M"): 0x9E,
("A", "C"): 0x9F,
("N", "S"): 0xA0,
("!", "I"): 0xA1,
("C", "t"): 0xA2,
("P", "d"): 0xA3,
("C", "u"): 0xA4,
("Y", "e"): 0xA5,
("B", "B"): 0xA6,
("S", "E"): 0xA7,
("'", ":"): 0xA8,
("C", "o"): 0xA9,
("-", "a"): 0xAA,
("<", "<"): 0xAB,
("N", "O"): 0xAC,
("-", "-"): 0xAD,
("R", "g"): 0xAE,
("'", "m"): 0xAF,
("D", "G"): 0xB0,
("+", "-"): 0xB1,
("2", "S"): 0xB2,
("3", "S"): 0xB3,
("'", "'"): 0xB4,
("M", "y"): 0xB5,
("P", "I"): 0xB6,
(".", "M"): 0xB7,
("'", ","): 0xB8,
("1", "S"): 0xB9,
("-", "o"): 0xBA,
(">", ">"): 0xBB,
("1", "4"): 0xBC,
("1", "2"): 0xBD,
("3", "4"): 0xBE,
("?", "I"): 0xBF,
("A", "!"): 0xC0,
("A", "'"): 0xC1,
("A", ">"): 0xC2,
("A", "?"): 0xC3,
("A", ":"): 0xC4,
("A", "A"): 0xC5,
("A", "E"): 0xC6,
("C", ","): 0xC7,
("E", "!"): 0xC8,
("E", "'"): 0xC9,
("E", ">"): 0xCA,
("E", ":"): 0xCB,
("I", "!"): 0xCC,
("I", "'"): 0xCD,
("I", ">"): 0xCE,
("I", ":"): 0xCF,
("D", "-"): 0xD0,
("N", "?"): 0xD1,
("O", "!"): 0xD2,
("O", "'"): 0xD3,
("O", ">"): 0xD4,
("O", "?"): 0xD5,
("O", ":"): 0xD6,
("*", "X"): 0xD7,
("O", "/"): 0xD8,
("U", "!"): 0xD9,
("U", "'"): 0xDA,
("U", ">"): 0xDB,
("U", ":"): 0xDC,
("Y", "'"): 0xDD,
("T", "H"): 0xDE,
("s", "s"): 0xDF,
("a", "!"): 0xE0,
("a", "'"): 0xE1,
("a", ">"): 0xE2,
("a", "?"): 0xE3,
("a", ":"): 0xE4,
("a", "a"): 0xE5,
("a", "e"): 0xE6,
("c", ","): 0xE7,
("e", "!"): 0xE8,
("e", "'"): 0xE9,
("e", ">"): 0xEA,
("e", ":"): 0xEB,
("i", "!"): 0xEC,
("i", "'"): 0xED,
("i", ">"): 0xEE,
("i", ":"): 0xEF,
("d", "-"): 0xF0,
("n", "?"): 0xF1,
("o", "!"): 0xF2,
("o", "'"): 0xF3,
("o", ">"): 0xF4,
("o", "?"): 0xF5,
("o", ":"): 0xF6,
("-", ":"): 0xF7,
("o", "/"): 0xF8,
("u", "!"): 0xF9,
("u", "'"): 0xFA,
("u", ">"): 0xFB,
("u", ":"): 0xFC,
("y", "'"): 0xFD,
("t", "h"): 0xFE,
("y", ":"): 0xFF,
("A", "-"): 0x0100,
("a", "-"): 0x0101,
("A", "("): 0x0102,
("a", "("): 0x0103,
("A", ";"): 0x0104,
("a", ";"): 0x0105,
("C", "'"): 0x0106,
("c", "'"): 0x0107,
("C", ">"): 0x0108,
("c", ">"): 0x0109,
("C", "."): 0x010A,
("c", "."): 0x010B,
("C", "<"): 0x010C,
("c", "<"): 0x010D,
("D", "<"): 0x010E,
("d", "<"): 0x010F,
("D", "/"): 0x0110,
("d", "/"): 0x0111,
("E", "-"): 0x0112,
("e", "-"): 0x0113,
("E", "("): 0x0114,
("e", "("): 0x0115,
("E", "."): 0x0116,
("e", "."): 0x0117,
("E", ";"): 0x0118,
("e", ";"): 0x0119,
("E", "<"): 0x011A,
("e", "<"): 0x011B,
("G", ">"): 0x011C,
("g", ">"): 0x011D,
("G", "("): 0x011E,
("g", "("): 0x011F,
("G", "."): 0x0120,
("g", "."): 0x0121,
("G", ","): 0x0122,
("g", ","): 0x0123,
("H", ">"): 0x0124,
("h", ">"): 0x0125,
("H", "/"): 0x0126,
("h", "/"): 0x0127,
("I", "?"): 0x0128,
("i", "?"): 0x0129,
("I", "-"): 0x012A,
("i", "-"): 0x012B,
("I", "("): 0x012C,
("i", "("): 0x012D,
("I", ";"): 0x012E,
("i", ";"): 0x012F,
("I", "."): 0x0130,
("i", "."): 0x0131,
("I", "J"): 0x0132,
("i", "j"): 0x0133,
("J", ">"): 0x0134,
("j", ">"): 0x0135,
("K", ","): 0x0136,
("k", ","): 0x0137,
("k", "k"): 0x0138,
("L", "'"): 0x0139,
("l", "'"): 0x013A,
("L", ","): 0x013B,
("l", ","): 0x013C,
("L", "<"): 0x013D,
("l", "<"): 0x013E,
("L", "."): 0x013F,
("l", "."): 0x0140,
("L", "/"): 0x0141,
("l", "/"): 0x0142,
("N", "'"): 0x0143,
("n", "'"): 0x0144,
("N", ","): 0x0145,
("n", ","): 0x0146,
("N", "<"): 0x0147,
("n", "<"): 0x0148,
("'", "n"): 0x0149,
("N", "G"): 0x014A,
("n", "g"): 0x014B,
("O", "-"): 0x014C,
("o", "-"): 0x014D,
("O", "("): 0x014E,
("o", "("): 0x014F,
("O", '"'): 0x0150,
("o", '"'): 0x0151,
("O", "E"): 0x0152,
("o", "e"): 0x0153,
("R", "'"): 0x0154,
("r", "'"): 0x0155,
("R", ","): 0x0156,
("r", ","): 0x0157,
("R", "<"): 0x0158,
("r", "<"): 0x0159,
("S", "'"): 0x015A,
("s", "'"): 0x015B,
("S", ">"): 0x015C,
("s", ">"): 0x015D,
("S", ","): 0x015E,
("s", ","): 0x015F,
("S", "<"): 0x0160,
("s", "<"): 0x0161,
("T", ","): 0x0162,
("t", ","): 0x0163,
("T", "<"): 0x0164,
("t", "<"): 0x0165,
("T", "/"): 0x0166,
("t", "/"): 0x0167,
("U", "?"): 0x0168,
("u", "?"): 0x0169,
("U", "-"): 0x016A,
("u", "-"): 0x016B,
("U", "("): 0x016C,
("u", "("): 0x016D,
("U", "0"): 0x016E,
("u", "0"): 0x016F,
("U", '"'): 0x0170,
("u", '"'): 0x0171,
("U", ";"): 0x0172,
("u", ";"): 0x0173,
("W", ">"): 0x0174,
("w", ">"): 0x0175,
("Y", ">"): 0x0176,
("y", ">"): 0x0177,
("Y", ":"): 0x0178,
("Z", "'"): 0x0179,
("z", "'"): 0x017A,
("Z", "."): 0x017B,
("z", "."): 0x017C,
("Z", "<"): 0x017D,
("z", "<"): 0x017E,
("O", "9"): 0x01A0,
("o", "9"): 0x01A1,
("O", "I"): 0x01A2,
("o", "i"): 0x01A3,
("y", "r"): 0x01A6,
("U", "9"): 0x01AF,
("u", "9"): 0x01B0,
("Z", "/"): 0x01B5,
("z", "/"): 0x01B6,
("E", "D"): 0x01B7,
("A", "<"): 0x01CD,
("a", "<"): 0x01CE,
("I", "<"): 0x01CF,
("i", "<"): 0x01D0,
("O", "<"): 0x01D1,
("o", "<"): 0x01D2,
("U", "<"): 0x01D3,
("u", "<"): 0x01D4,
("A", "1"): 0x01DE,
("a", "1"): 0x01DF,
("A", "7"): 0x01E0,
("a", "7"): 0x01E1,
("A", "3"): 0x01E2,
("a", "3"): 0x01E3,
("G", "/"): 0x01E4,
("g", "/"): 0x01E5,
("G", "<"): 0x01E6,
("g", "<"): 0x01E7,
("K", "<"): 0x01E8,
("k", "<"): 0x01E9,
("O", ";"): 0x01EA,
("o", ";"): 0x01EB,
("O", "1"): 0x01EC,
("o", "1"): 0x01ED,
("E", "Z"): 0x01EE,
("e", "z"): 0x01EF,
("j", "<"): 0x01F0,
("G", "'"): 0x01F4,
("g", "'"): 0x01F5,
(";", "S"): 0x02BF,
("'", "<"): 0x02C7,
("'", "("): 0x02D8,
("'", "."): 0x02D9,
("'", "0"): 0x02DA,
("'", ";"): 0x02DB,
("'", '"'): 0x02DD,
("A", "%"): 0x0386,
("E", "%"): 0x0388,
("Y", "%"): 0x0389,
("I", "%"): 0x038A,
("O", "%"): 0x038C,
("U", "%"): 0x038E,
("W", "%"): 0x038F,
("i", "3"): 0x0390,
("A", "*"): 0x0391,
("B", "*"): 0x0392,
("G", "*"): 0x0393,
("D", "*"): 0x0394,
("E", "*"): 0x0395,
("Z", "*"): 0x0396,
("Y", "*"): 0x0397,
("H", "*"): 0x0398,
("I", "*"): 0x0399,
("K", "*"): 0x039A,
("L", "*"): 0x039B,
("M", "*"): 0x039C,
("N", "*"): 0x039D,
("C", "*"): 0x039E,
("O", "*"): 0x039F,
("P", "*"): 0x03A0,
("R", "*"): 0x03A1,
("S", "*"): 0x03A3,
("T", "*"): 0x03A4,
("U", "*"): 0x03A5,
("F", "*"): 0x03A6,
("X", "*"): 0x03A7,
("Q", "*"): 0x03A8,
("W", "*"): 0x03A9,
("J", "*"): 0x03AA,
("V", "*"): 0x03AB,
("a", "%"): 0x03AC,
("e", "%"): 0x03AD,
("y", "%"): 0x03AE,
("i", "%"): 0x03AF,
("u", "3"): 0x03B0,
("a", "*"): 0x03B1,
("b", "*"): 0x03B2,
("g", "*"): 0x03B3,
("d", "*"): 0x03B4,
("e", "*"): 0x03B5,
("z", "*"): 0x03B6,
("y", "*"): 0x03B7,
("h", "*"): 0x03B8,
("i", "*"): 0x03B9,
("k", "*"): 0x03BA,
("l", "*"): 0x03BB,
("m", "*"): 0x03BC,
("n", "*"): 0x03BD,
("c", "*"): 0x03BE,
("o", "*"): 0x03BF,
("p", "*"): 0x03C0,
("r", "*"): 0x03C1,
("*", "s"): 0x03C2,
("s", "*"): 0x03C3,
("t", "*"): 0x03C4,
("u", "*"): 0x03C5,
("f", "*"): 0x03C6,
("x", "*"): 0x03C7,
("q", "*"): 0x03C8,
("w", "*"): 0x03C9,
("j", "*"): 0x03CA,
("v", "*"): 0x03CB,
("o", "%"): 0x03CC,
("u", "%"): 0x03CD,
("w", "%"): 0x03CE,
("'", "G"): 0x03D8,
(",", "G"): 0x03D9,
("T", "3"): 0x03DA,
("t", "3"): 0x03DB,
("M", "3"): 0x03DC,
("m", "3"): 0x03DD,
("K", "3"): 0x03DE,
("k", "3"): 0x03DF,
("P", "3"): 0x03E0,
("p", "3"): 0x03E1,
("'", "%"): 0x03F4,
("j", "3"): 0x03F5,
("I", "O"): 0x0401,
("D", "%"): 0x0402,
("G", "%"): 0x0403,
("I", "E"): 0x0404,
("D", "S"): 0x0405,
("I", "I"): 0x0406,
("Y", "I"): 0x0407,
("J", "%"): 0x0408,
("L", "J"): 0x0409,
("N", "J"): 0x040A,
("T", "s"): 0x040B,
("K", "J"): 0x040C,
("V", "%"): 0x040E,
("D", "Z"): 0x040F,
("A", "="): 0x0410,
("B", "="): 0x0411,
("V", "="): 0x0412,
("G", "="): 0x0413,
("D", "="): 0x0414,
("E", "="): 0x0415,
("Z", "%"): 0x0416,
("Z", "="): 0x0417,
("I", "="): 0x0418,
("J", "="): 0x0419,
("K", "="): 0x041A,
("L", "="): 0x041B,
("M", "="): 0x041C,
("N", "="): 0x041D,
("O", "="): 0x041E,
("P", "="): 0x041F,
("R", "="): 0x0420,
("S", "="): 0x0421,
("T", "="): 0x0422,
("U", "="): 0x0423,
("F", "="): 0x0424,
("H", "="): 0x0425,
("C", "="): 0x0426,
("C", "%"): 0x0427,
("S", "%"): 0x0428,
("S", "c"): 0x0429,
("=", '"'): 0x042A,
("Y", "="): 0x042B,
("%", '"'): 0x042C,
("J", "E"): 0x042D,
("J", "U"): 0x042E,
("J", "A"): 0x042F,
("a", "="): 0x0430,
("b", "="): 0x0431,
("v", "="): 0x0432,
("g", "="): 0x0433,
("d", "="): 0x0434,
("e", "="): 0x0435,
("z", "%"): 0x0436,
("z", "="): 0x0437,
("i", "="): 0x0438,
("j", "="): 0x0439,
("k", "="): 0x043A,
("l", "="): 0x043B,
("m", "="): 0x043C,
("n", "="): 0x043D,
("o", "="): 0x043E,
("p", "="): 0x043F,
("r", "="): 0x0440,
("s", "="): 0x0441,
("t", "="): 0x0442,
("u", "="): 0x0443,
("f", "="): 0x0444,
("h", "="): 0x0445,
("c", "="): 0x0446,
("c", "%"): 0x0447,
("s", "%"): 0x0448,
("s", "c"): 0x0449,
("=", "'"): 0x044A,
("y", "="): 0x044B,
("%", "'"): 0x044C,
("j", "e"): 0x044D,
("j", "u"): 0x044E,
("j", "a"): 0x044F,
("i", "o"): 0x0451,
("d", "%"): 0x0452,
("g", "%"): 0x0453,
("i", "e"): 0x0454,
("d", "s"): 0x0455,
("i", "i"): 0x0456,
("y", "i"): 0x0457,
("j", "%"): 0x0458,
("l", "j"): 0x0459,
("n", "j"): 0x045A,
("t", "s"): 0x045B,
("k", "j"): 0x045C,
("v", "%"): 0x045E,
("d", "z"): 0x045F,
("Y", "3"): 0x0462,
("y", "3"): 0x0463,
("O", "3"): 0x046A,
("o", "3"): 0x046B,
("F", "3"): 0x0472,
("f", "3"): 0x0473,
("V", "3"): 0x0474,
("v", "3"): 0x0475,
("C", "3"): 0x0480,
("c", "3"): 0x0481,
("G", "3"): 0x0490,
("g", "3"): 0x0491,
("A", "+"): 0x05D0,
("B", "+"): 0x05D1,
("G", "+"): 0x05D2,
("D", "+"): 0x05D3,
("H", "+"): 0x05D4,
("W", "+"): 0x05D5,
("Z", "+"): 0x05D6,
("X", "+"): 0x05D7,
("T", "j"): 0x05D8,
("J", "+"): 0x05D9,
("K", "%"): 0x05DA,
("K", "+"): 0x05DB,
("L", "+"): 0x05DC,
("M", "%"): 0x05DD,
("M", "+"): 0x05DE,
("N", "%"): 0x05DF,
("N", "+"): 0x05E0,
("S", "+"): 0x05E1,
("E", "+"): 0x05E2,
("P", "%"): 0x05E3,
("P", "+"): 0x05E4,
("Z", "j"): 0x05E5,
("Z", "J"): 0x05E6,
("Q", "+"): 0x05E7,
("R", "+"): 0x05E8,
("S", "h"): 0x05E9,
("T", "+"): 0x05EA,
(",", "+"): 0x060C,
(";", "+"): 0x061B,
("?", "+"): 0x061F,
("H", "'"): 0x0621,
("a", "M"): 0x0622,
("a", "H"): 0x0623,
("w", "H"): 0x0624,
("a", "h"): 0x0625,
("y", "H"): 0x0626,
("a", "+"): 0x0627,
("b", "+"): 0x0628,
("t", "m"): 0x0629,
("t", "+"): 0x062A,
("t", "k"): 0x062B,
("g", "+"): 0x062C,
("h", "k"): 0x062D,
("x", "+"): 0x062E,
("d", "+"): 0x062F,
("d", "k"): 0x0630,
("r", "+"): 0x0631,
("z", "+"): 0x0632,
("s", "+"): 0x0633,
("s", "n"): 0x0634,
("c", "+"): 0x0635,
("d", "d"): 0x0636,
("t", "j"): 0x0637,
("z", "H"): 0x0638,
("e", "+"): 0x0639,
("i", "+"): 0x063A,
("+", "+"): 0x0640,
("f", "+"): 0x0641,
("q", "+"): 0x0642,
("k", "+"): 0x0643,
("l", "+"): 0x0644,
("m", "+"): 0x0645,
("n", "+"): 0x0646,
("h", "+"): 0x0647,
("w", "+"): 0x0648,
("j", "+"): 0x0649,
("y", "+"): 0x064A,
(":", "+"): 0x064B,
('"', "+"): 0x064C,
("=", "+"): 0x064D,
("/", "+"): 0x064E,
("'", "+"): 0x064F,
("1", "+"): 0x0650,
("3", "+"): 0x0651,
("0", "+"): 0x0652,
("a", "S"): 0x0670,
("p", "+"): 0x067E,
("v", "+"): 0x06A4,
("g", "f"): 0x06AF,
("0", "a"): 0x06F0,
("1", "a"): 0x06F1,
("2", "a"): 0x06F2,
("3", "a"): 0x06F3,
("4", "a"): 0x06F4,
("5", "a"): 0x06F5,
("6", "a"): 0x06F6,
("7", "a"): 0x06F7,
("8", "a"): 0x06F8,
("9", "a"): 0x06F9,
("B", "."): 0x1E02,
("b", "."): 0x1E03,
("B", "_"): 0x1E06,
("b", "_"): 0x1E07,
("D", "."): 0x1E0A,
("d", "."): 0x1E0B,
("D", "_"): 0x1E0E,
("d", "_"): 0x1E0F,
("D", ","): 0x1E10,
("d", ","): 0x1E11,
("F", "."): 0x1E1E,
("f", "."): 0x1E1F,
("G", "-"): 0x1E20,
("g", "-"): 0x1E21,
("H", "."): 0x1E22,
("h", "."): 0x1E23,
("H", ":"): 0x1E26,
("h", ":"): 0x1E27,
("H", ","): 0x1E28,
("h", ","): 0x1E29,
("K", "'"): 0x1E30,
("k", "'"): 0x1E31,
("K", "_"): 0x1E34,
("k", "_"): 0x1E35,
("L", "_"): 0x1E3A,
("l", "_"): 0x1E3B,
("M", "'"): 0x1E3E,
("m", "'"): 0x1E3F,
("M", "."): 0x1E40,
("m", "."): 0x1E41,
("N", "."): 0x1E44,
("n", "."): 0x1E45,
("N", "_"): 0x1E48,
("n", "_"): 0x1E49,
("P", "'"): 0x1E54,
("p", "'"): 0x1E55,
("P", "."): 0x1E56,
("p", "."): 0x1E57,
("R", "."): 0x1E58,
("r", "."): 0x1E59,
("R", "_"): 0x1E5E,
("r", "_"): 0x1E5F,
("S", "."): 0x1E60,
("s", "."): 0x1E61,
("T", "."): 0x1E6A,
("t", "."): 0x1E6B,
("T", "_"): 0x1E6E,
("t", "_"): 0x1E6F,
("V", "?"): 0x1E7C,
("v", "?"): 0x1E7D,
("W", "!"): 0x1E80,
("w", "!"): 0x1E81,
("W", "'"): 0x1E82,
("w", "'"): 0x1E83,
("W", ":"): 0x1E84,
("w", ":"): 0x1E85,
("W", "."): 0x1E86,
("w", "."): 0x1E87,
("X", "."): 0x1E8A,
("x", "."): 0x1E8B,
("X", ":"): 0x1E8C,
("x", ":"): 0x1E8D,
("Y", "."): 0x1E8E,
("y", "."): 0x1E8F,
("Z", ">"): 0x1E90,
("z", ">"): 0x1E91,
("Z", "_"): 0x1E94,
("z", "_"): 0x1E95,
("h", "_"): 0x1E96,
("t", ":"): 0x1E97,
("w", "0"): 0x1E98,
("y", "0"): 0x1E99,
("A", "2"): 0x1EA2,
("a", "2"): 0x1EA3,
("E", "2"): 0x1EBA,
("e", "2"): 0x1EBB,
("E", "?"): 0x1EBC,
("e", "?"): 0x1EBD,
("I", "2"): 0x1EC8,
("i", "2"): 0x1EC9,
("O", "2"): 0x1ECE,
("o", "2"): 0x1ECF,
("U", "2"): 0x1EE6,
("u", "2"): 0x1EE7,
("Y", "!"): 0x1EF2,
("y", "!"): 0x1EF3,
("Y", "2"): 0x1EF6,
("y", "2"): 0x1EF7,
("Y", "?"): 0x1EF8,
("y", "?"): 0x1EF9,
(";", "'"): 0x1F00,
(",", "'"): 0x1F01,
(";", "!"): 0x1F02,
(",", "!"): 0x1F03,
("?", ";"): 0x1F04,
("?", ","): 0x1F05,
("!", ":"): 0x1F06,
("?", ":"): 0x1F07,
("1", "N"): 0x2002,
("1", "M"): 0x2003,
("3", "M"): 0x2004,
("4", "M"): 0x2005,
("6", "M"): 0x2006,
("1", "T"): 0x2009,
("1", "H"): 0x200A,
("-", "1"): 0x2010,
("-", "N"): 0x2013,
("-", "M"): 0x2014,
("-", "3"): 0x2015,
("!", "2"): 0x2016,
("=", "2"): 0x2017,
("'", "6"): 0x2018,
("'", "9"): 0x2019,
(".", "9"): 0x201A,
("9", "'"): 0x201B,
('"', "6"): 0x201C,
('"', "9"): 0x201D,
(":", "9"): 0x201E,
("9", '"'): 0x201F,
("/", "-"): 0x2020,
("/", "="): 0x2021,
(".", "."): 0x2025,
("%", "0"): 0x2030,
("1", "'"): 0x2032,
("2", "'"): 0x2033,
("3", "'"): 0x2034,
("1", '"'): 0x2035,
("2", '"'): 0x2036,
("3", '"'): 0x2037,
("C", "a"): 0x2038,
("<", "1"): 0x2039,
(">", "1"): 0x203A,
(":", "X"): 0x203B,
("'", "-"): 0x203E,
("/", "f"): 0x2044,
("0", "S"): 0x2070,
("4", "S"): 0x2074,
("5", "S"): 0x2075,
("6", "S"): 0x2076,
("7", "S"): 0x2077,
("8", "S"): 0x2078,
("9", "S"): 0x2079,
("+", "S"): 0x207A,
("-", "S"): 0x207B,
("=", "S"): 0x207C,
("(", "S"): 0x207D,
(")", "S"): 0x207E,
("n", "S"): 0x207F,
("0", "s"): 0x2080,
("1", "s"): 0x2081,
("2", "s"): 0x2082,
("3", "s"): 0x2083,
("4", "s"): 0x2084,
("5", "s"): 0x2085,
("6", "s"): 0x2086,
("7", "s"): 0x2087,
("8", "s"): 0x2088,
("9", "s"): 0x2089,
("+", "s"): 0x208A,
("-", "s"): 0x208B,
("=", "s"): 0x208C,
("(", "s"): 0x208D,
(")", "s"): 0x208E,
("L", "i"): 0x20A4,
("P", "t"): 0x20A7,
("W", "="): 0x20A9,
("=", "e"): 0x20AC, # euro
("E", "u"): 0x20AC, # euro
("=", "R"): 0x20BD, # rouble
("=", "P"): 0x20BD, # rouble
("o", "C"): 0x2103,
("c", "o"): 0x2105,
("o", "F"): 0x2109,
("N", "0"): 0x2116,
("P", "O"): 0x2117,
("R", "x"): 0x211E,
("S", "M"): 0x2120,
("T", "M"): 0x2122,
("O", "m"): 0x2126,
("A", "O"): 0x212B,
("1", "3"): 0x2153,
("2", "3"): 0x2154,
("1", "5"): 0x2155,
("2", "5"): 0x2156,
("3", "5"): 0x2157,
("4", "5"): 0x2158,
("1", "6"): 0x2159,
("5", "6"): 0x215A,
("1", "8"): 0x215B,
("3", "8"): 0x215C,
("5", "8"): 0x215D,
("7", "8"): 0x215E,
("1", "R"): 0x2160,
("2", "R"): 0x2161,
("3", "R"): 0x2162,
("4", "R"): 0x2163,
("5", "R"): 0x2164,
("6", "R"): 0x2165,
("7", "R"): 0x2166,
("8", "R"): 0x2167,
("9", "R"): 0x2168,
("a", "R"): 0x2169,
("b", "R"): 0x216A,
("c", "R"): 0x216B,
("1", "r"): 0x2170,
("2", "r"): 0x2171,
("3", "r"): 0x2172,
("4", "r"): 0x2173,
("5", "r"): 0x2174,
("6", "r"): 0x2175,
("7", "r"): 0x2176,
("8", "r"): 0x2177,
("9", "r"): 0x2178,
("a", "r"): 0x2179,
("b", "r"): 0x217A,
("c", "r"): 0x217B,
("<", "-"): 0x2190,
("-", "!"): 0x2191,
("-", ">"): 0x2192,
("-", "v"): 0x2193,
("<", ">"): 0x2194,
("U", "D"): 0x2195,
("<", "="): 0x21D0,
("=", ">"): 0x21D2,
("=", "="): 0x21D4,
("F", "A"): 0x2200,
("d", "P"): 0x2202,
("T", "E"): 0x2203,
("/", "0"): 0x2205,
("D", "E"): 0x2206,
("N", "B"): 0x2207,
("(", "-"): 0x2208,
("-", ")"): 0x220B,
("*", "P"): 0x220F,
("+", "Z"): 0x2211,
("-", "2"): 0x2212,
("-", "+"): 0x2213,
("*", "-"): 0x2217,
("O", "b"): 0x2218,
("S", "b"): 0x2219,
("R", "T"): 0x221A,
("0", "("): 0x221D,
("0", "0"): 0x221E,
("-", "L"): 0x221F,
("-", "V"): 0x2220,
("P", "P"): 0x2225,
("A", "N"): 0x2227,
("O", "R"): 0x2228,
("(", "U"): 0x2229,
(")", "U"): 0x222A,
("I", "n"): 0x222B,
("D", "I"): 0x222C,
("I", "o"): 0x222E,
(".", ":"): 0x2234,
(":", "."): 0x2235,
(":", "R"): 0x2236,
(":", ":"): 0x2237,
("?", "1"): 0x223C,
("C", "G"): 0x223E,
("?", "-"): 0x2243,
("?", "="): 0x2245,
("?", "2"): 0x2248,
("=", "?"): 0x224C,
("H", "I"): 0x2253,
("!", "="): 0x2260,
("=", "3"): 0x2261,
("=", "<"): 0x2264,
(">", "="): 0x2265,
("<", "*"): 0x226A,
("*", ">"): 0x226B,
("!", "<"): 0x226E,
("!", ">"): 0x226F,
("(", "C"): 0x2282,
(")", "C"): 0x2283,
("(", "_"): 0x2286,
(")", "_"): 0x2287,
("0", "."): 0x2299,
("0", "2"): 0x229A,
("-", "T"): 0x22A5,
(".", "P"): 0x22C5,
(":", "3"): 0x22EE,
(".", "3"): 0x22EF,
("E", "h"): 0x2302,
("<", "7"): 0x2308,
(">", "7"): 0x2309,
("7", "<"): 0x230A,
("7", ">"): 0x230B,
("N", "I"): 0x2310,
("(", "A"): 0x2312,
("T", "R"): 0x2315,
("I", "u"): 0x2320,
("I", "l"): 0x2321,
("<", "/"): 0x2329,
("/", ">"): 0x232A,
("V", "s"): 0x2423,
("1", "h"): 0x2440,
("3", "h"): 0x2441,
("2", "h"): 0x2442,
("4", "h"): 0x2443,
("1", "j"): 0x2446,
("2", "j"): 0x2447,
("3", "j"): 0x2448,
("4", "j"): 0x2449,
("1", "."): 0x2488,
("2", "."): 0x2489,
("3", "."): 0x248A,
("4", "."): 0x248B,
("5", "."): 0x248C,
("6", "."): 0x248D,
("7", "."): 0x248E,
("8", "."): 0x248F,
("9", "."): 0x2490,
("h", "h"): 0x2500,
("H", "H"): 0x2501,
("v", "v"): 0x2502,
("V", "V"): 0x2503,
("3", "-"): 0x2504,
("3", "_"): 0x2505,
("3", "!"): 0x2506,
("3", "/"): 0x2507,
("4", "-"): 0x2508,
("4", "_"): 0x2509,
("4", "!"): 0x250A,
("4", "/"): 0x250B,
("d", "r"): 0x250C,
("d", "R"): 0x250D,
("D", "r"): 0x250E,
("D", "R"): 0x250F,
("d", "l"): 0x2510,
("d", "L"): 0x2511,
("D", "l"): 0x2512,
("L", "D"): 0x2513,
("u", "r"): 0x2514,
("u", "R"): 0x2515,
("U", "r"): 0x2516,
("U", "R"): 0x2517,
("u", "l"): 0x2518,
("u", "L"): 0x2519,
("U", "l"): 0x251A,
("U", "L"): 0x251B,
("v", "r"): 0x251C,
("v", "R"): 0x251D,
("V", "r"): 0x2520,
("V", "R"): 0x2523,
("v", "l"): 0x2524,
("v", "L"): 0x2525,
("V", "l"): 0x2528,
("V", "L"): 0x252B,
("d", "h"): 0x252C,
("d", "H"): 0x252F,
("D", "h"): 0x2530,
("D", "H"): 0x2533,
("u", "h"): 0x2534,
("u", "H"): 0x2537,
("U", "h"): 0x2538,
("U", "H"): 0x253B,
("v", "h"): 0x253C,
("v", "H"): 0x253F,
("V", "h"): 0x2542,
("V", "H"): 0x254B,
("F", "D"): 0x2571,
("B", "D"): 0x2572,
("T", "B"): 0x2580,
("L", "B"): 0x2584,
("F", "B"): 0x2588,
("l", "B"): 0x258C,
("R", "B"): 0x2590,
(".", "S"): 0x2591,
(":", "S"): 0x2592,
("?", "S"): 0x2593,
("f", "S"): 0x25A0,
("O", "S"): 0x25A1,
("R", "O"): 0x25A2,
("R", "r"): 0x25A3,
("R", "F"): 0x25A4,
("R", "Y"): 0x25A5,
("R", "H"): 0x25A6,
("R", "Z"): 0x25A7,
("R", "K"): 0x25A8,
("R", "X"): 0x25A9,
("s", "B"): 0x25AA,
("S", "R"): 0x25AC,
("O", "r"): 0x25AD,
("U", "T"): 0x25B2,
("u", "T"): 0x25B3,
("P", "R"): 0x25B6,
("T", "r"): 0x25B7,
("D", "t"): 0x25BC,
("d", "T"): 0x25BD,
("P", "L"): 0x25C0,
("T", "l"): 0x25C1,
("D", "b"): 0x25C6,
("D", "w"): 0x25C7,
("L", "Z"): 0x25CA,
("0", "m"): 0x25CB,
("0", "o"): 0x25CE,
("0", "M"): 0x25CF,
("0", "L"): 0x25D0,
("0", "R"): 0x25D1,
("S", "n"): 0x25D8,
("I", "c"): 0x25D9,
("F", "d"): 0x25E2,
("B", "d"): 0x25E3,
("*", "2"): 0x2605,
("*", "1"): 0x2606,
("<", "H"): 0x261C,
(">", "H"): 0x261E,
("0", "u"): 0x263A,
("0", "U"): 0x263B,
("S", "U"): 0x263C,
("F", "m"): 0x2640,
("M", "l"): 0x2642,
("c", "S"): 0x2660,
("c", "H"): 0x2661,
("c", "D"): 0x2662,
("c", "C"): 0x2663,
("M", "d"): 0x2669,
("M", "8"): 0x266A,
("M", "2"): 0x266B,
("M", "b"): 0x266D,
("M", "x"): 0x266E,
("M", "X"): 0x266F,
("O", "K"): 0x2713,
("X", "X"): 0x2717,
("-", "X"): 0x2720,
("I", "S"): 0x3000,
(",", "_"): 0x3001,
(".", "_"): 0x3002,
("+", '"'): 0x3003,
("+", "_"): 0x3004,
("*", "_"): 0x3005,
(";", "_"): 0x3006,
("0", "_"): 0x3007,
("<", "+"): 0x300A,
(">", "+"): 0x300B,
("<", "'"): 0x300C,
(">", "'"): 0x300D,
("<", '"'): 0x300E,
(">", '"'): 0x300F,
("(", '"'): 0x3010,
(")", '"'): 0x3011,
("=", "T"): 0x3012,
("=", "_"): 0x3013,
("(", "'"): 0x3014,
(")", "'"): 0x3015,
("(", "I"): 0x3016,
(")", "I"): 0x3017,
("-", "?"): 0x301C,
("A", "5"): 0x3041,
("a", "5"): 0x3042,
("I", "5"): 0x3043,
("i", "5"): 0x3044,
("U", "5"): 0x3045,
("u", "5"): 0x3046,
("E", "5"): 0x3047,
("e", "5"): 0x3048,
("O", "5"): 0x3049,
("o", "5"): 0x304A,
("k", "a"): 0x304B,
("g", "a"): 0x304C,
("k", "i"): 0x304D,
("g", "i"): 0x304E,
("k", "u"): 0x304F,
("g", "u"): 0x3050,
("k", "e"): 0x3051,
("g", "e"): 0x3052,
("k", "o"): 0x3053,
("g", "o"): 0x3054,
("s", "a"): 0x3055,
("z", "a"): 0x3056,
("s", "i"): 0x3057,
("z", "i"): 0x3058,
("s", "u"): 0x3059,
("z", "u"): 0x305A,
("s", "e"): 0x305B,
("z", "e"): 0x305C,
("s", "o"): 0x305D,
("z", "o"): 0x305E,
("t", "a"): 0x305F,
("d", "a"): 0x3060,
("t", "i"): 0x3061,
("d", "i"): 0x3062,
("t", "U"): 0x3063,
("t", "u"): 0x3064,
("d", "u"): 0x3065,
("t", "e"): 0x3066,
("d", "e"): 0x3067,
("t", "o"): 0x3068,
("d", "o"): 0x3069,
("n", "a"): 0x306A,
("n", "i"): 0x306B,
("n", "u"): 0x306C,
("n", "e"): 0x306D,
("n", "o"): 0x306E,
("h", "a"): 0x306F,
("b", "a"): 0x3070,
("p", "a"): 0x3071,
("h", "i"): 0x3072,
("b", "i"): 0x3073,
("p", "i"): 0x3074,
("h", "u"): 0x3075,
("b", "u"): 0x3076,
("p", "u"): 0x3077,
("h", "e"): 0x3078,
("b", "e"): 0x3079,
("p", "e"): 0x307A,
("h", "o"): 0x307B,
("b", "o"): 0x307C,
("p", "o"): 0x307D,
("m", "a"): 0x307E,
("m", "i"): 0x307F,
("m", "u"): 0x3080,
("m", "e"): 0x3081,
("m", "o"): 0x3082,
("y", "A"): 0x3083,
("y", "a"): 0x3084,
("y", "U"): 0x3085,
("y", "u"): 0x3086,
("y", "O"): 0x3087,
("y", "o"): 0x3088,
("r", "a"): 0x3089,
("r", "i"): 0x308A,
("r", "u"): 0x308B,
("r", "e"): 0x308C,
("r", "o"): 0x308D,
("w", "A"): 0x308E,
("w", "a"): 0x308F,
("w", "i"): 0x3090,
("w", "e"): 0x3091,
("w", "o"): 0x3092,
("n", "5"): 0x3093,
("v", "u"): 0x3094,
('"', "5"): 0x309B,
("0", "5"): 0x309C,
("*", "5"): 0x309D,
("+", "5"): 0x309E,
("a", "6"): 0x30A1,
("A", "6"): 0x30A2,
("i", "6"): 0x30A3,
("I", "6"): 0x30A4,
("u", "6"): 0x30A5,
("U", "6"): 0x30A6,
("e", "6"): 0x30A7,
("E", "6"): 0x30A8,
("o", "6"): 0x30A9,
("O", "6"): 0x30AA,
("K", "a"): 0x30AB,
("G", "a"): 0x30AC,
("K", "i"): 0x30AD,
("G", "i"): 0x30AE,
("K", "u"): 0x30AF,
("G", "u"): 0x30B0,
("K", "e"): 0x30B1,
("G", "e"): 0x30B2,
("K", "o"): 0x30B3,
("G", "o"): 0x30B4,
("S", "a"): 0x30B5,
("Z", "a"): 0x30B6,
("S", "i"): 0x30B7,
("Z", "i"): 0x30B8,
("S", "u"): 0x30B9,
("Z", "u"): 0x30BA,
("S", "e"): 0x30BB,
("Z", "e"): 0x30BC,
("S", "o"): 0x30BD,
("Z", "o"): 0x30BE,
("T", "a"): 0x30BF,
("D", "a"): 0x30C0,
("T", "i"): 0x30C1,
("D", "i"): 0x30C2,
("T", "U"): 0x30C3,
("T", "u"): 0x30C4,
("D", "u"): 0x30C5,
("T", "e"): 0x30C6,
("D", "e"): 0x30C7,
("T", "o"): 0x30C8,
("D", "o"): 0x30C9,
("N", "a"): 0x30CA,
("N", "i"): 0x30CB,
("N", "u"): 0x30CC,
("N", "e"): 0x30CD,
("N", "o"): 0x30CE,
("H", "a"): 0x30CF,
("B", "a"): 0x30D0,
("P", "a"): 0x30D1,
("H", "i"): 0x30D2,
("B", "i"): 0x30D3,
("P", "i"): 0x30D4,
("H", "u"): 0x30D5,
("B", "u"): 0x30D6,
("P", "u"): 0x30D7,
("H", "e"): 0x30D8,
("B", "e"): 0x30D9,
("P", "e"): 0x30DA,
("H", "o"): 0x30DB,
("B", "o"): 0x30DC,
("P", "o"): 0x30DD,
("M", "a"): 0x30DE,
("M", "i"): 0x30DF,
("M", "u"): 0x30E0,
("M", "e"): 0x30E1,
("M", "o"): 0x30E2,
("Y", "A"): 0x30E3,
("Y", "a"): 0x30E4,
("Y", "U"): 0x30E5,
("Y", "u"): 0x30E6,
("Y", "O"): 0x30E7,
("Y", "o"): 0x30E8,
("R", "a"): 0x30E9,
("R", "i"): 0x30EA,
("R", "u"): 0x30EB,
("R", "e"): 0x30EC,
("R", "o"): 0x30ED,
("W", "A"): 0x30EE,
("W", "a"): 0x30EF,
("W", "i"): 0x30F0,
("W", "e"): 0x30F1,
("W", "o"): 0x30F2,
("N", "6"): 0x30F3,
("V", "u"): 0x30F4,
("K", "A"): 0x30F5,
("K", "E"): 0x30F6,
("V", "a"): 0x30F7,
("V", "i"): 0x30F8,
("V", "e"): 0x30F9,
("V", "o"): 0x30FA,
(".", "6"): 0x30FB,
("-", "6"): 0x30FC,
("*", "6"): 0x30FD,
("+", "6"): 0x30FE,
("b", "4"): 0x3105,
("p", "4"): 0x3106,
("m", "4"): 0x3107,
("f", "4"): 0x3108,
("d", "4"): 0x3109,
("t", "4"): 0x310A,
("n", "4"): 0x310B,
("l", "4"): 0x310C,
("g", "4"): 0x310D,
("k", "4"): 0x310E,
("h", "4"): 0x310F,
("j", "4"): 0x3110,
("q", "4"): 0x3111,
("x", "4"): 0x3112,
("z", "h"): 0x3113,
("c", "h"): 0x3114,
("s", "h"): 0x3115,
("r", "4"): 0x3116,
("z", "4"): 0x3117,
("c", "4"): 0x3118,
("s", "4"): 0x3119,
("a", "4"): 0x311A,
("o", "4"): 0x311B,
("e", "4"): 0x311C,
("a", "i"): 0x311E,
("e", "i"): 0x311F,
("a", "u"): 0x3120,
("o", "u"): 0x3121,
("a", "n"): 0x3122,
("e", "n"): 0x3123,
("a", "N"): 0x3124,
("e", "N"): 0x3125,
("e", "r"): 0x3126,
("i", "4"): 0x3127,
("u", "4"): 0x3128,
("i", "u"): 0x3129,
("v", "4"): 0x312A,
("n", "G"): 0x312B,
("g", "n"): 0x312C,
("1", "c"): 0x3220,
("2", "c"): 0x3221,
("3", "c"): 0x3222,
("4", "c"): 0x3223,
("5", "c"): 0x3224,
("6", "c"): 0x3225,
("7", "c"): 0x3226,
("8", "c"): 0x3227,
("9", "c"): 0x3228,
# code points 0xe000 - 0xefff excluded, they have no assigned
# characters, only used in proposals.
("f", "f"): 0xFB00,
("f", "i"): 0xFB01,
("f", "l"): 0xFB02,
("f", "t"): 0xFB05,
("s", "t"): 0xFB06,
# Vim 5.x compatible digraphs that don't conflict with the above
("~", "!"): 161,
("c", "|"): 162,
("$", "$"): 163,
("o", "x"): 164, # currency symbol in ISO 8859-1
("Y", "-"): 165,
("|", "|"): 166,
("c", "O"): 169,
("-", ","): 172,
("-", "="): 175,
("~", "o"): 176,
("2", "2"): 178,
("3", "3"): 179,
("p", "p"): 182,
("~", "."): 183,
("1", "1"): 185,
("~", "?"): 191,
("A", "`"): 192,
("A", "^"): 194,
("A", "~"): 195,
("A", '"'): 196,
("A", "@"): 197,
("E", "`"): 200,
("E", "^"): 202,
("E", '"'): 203,
("I", "`"): 204,
("I", "^"): 206,
("I", '"'): 207,
("N", "~"): 209,
("O", "`"): 210,
("O", "^"): 212,
("O", "~"): 213,
("/", "\\"): 215, # multiplication symbol in ISO 8859-1
("U", "`"): 217,
("U", "^"): 219,
("I", "p"): 222,
("a", "`"): 224,
("a", "^"): 226,
("a", "~"): 227,
("a", '"'): 228,
("a", "@"): 229,
("e", "`"): 232,
("e", "^"): 234,
("e", '"'): 235,
("i", "`"): 236,
("i", "^"): 238,
("n", "~"): 241,
("o", "`"): 242,
("o", "^"): 244,
("o", "~"): 245,
("u", "`"): 249,
("u", "^"): 251,
("y", '"'): 255,
}
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@prompt-toolkit@py3@prompt_toolkit@key_binding@digraphs.py@.PATH_END.py
|
{
"filename": "glossary.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/numpy/py2/numpy/doc/glossary.py",
"type": "Python"
}
|
"""
========
Glossary
========
.. glossary::
along an axis
Axes are defined for arrays with more than one dimension. A
2-dimensional array has two corresponding axes: the first running
vertically downwards across rows (axis 0), and the second running
horizontally across columns (axis 1).
Many operations can take place along one of these axes. For example,
we can sum each row of an array, in which case we operate along
columns, or axis 1::
>>> x = np.arange(12).reshape((3,4))
>>> x
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.sum(axis=1)
array([ 6, 22, 38])
array
A homogeneous container of numerical elements. Each element in the
array occupies a fixed amount of memory (hence homogeneous), and
can be a numerical element of a single type (such as float, int
or complex) or a combination (such as ``(float, int, float)``). Each
array has an associated data-type (or ``dtype``), which describes
the numerical type of its elements::
>>> x = np.array([1, 2, 3], float)
>>> x
array([ 1., 2., 3.])
>>> x.dtype # floating point number, 64 bits of memory per element
dtype('float64')
# More complicated data type: each array element is a combination of
# and integer and a floating point number
>>> np.array([(1, 2.0), (3, 4.0)], dtype=[('x', int), ('y', float)])
array([(1, 2.0), (3, 4.0)],
dtype=[('x', '<i4'), ('y', '<f8')])
Fast element-wise operations, called a :term:`ufunc`, operate on arrays.
array_like
Any sequence that can be interpreted as an ndarray. This includes
nested lists, tuples, scalars and existing arrays.
attribute
A property of an object that can be accessed using ``obj.attribute``,
e.g., ``shape`` is an attribute of an array::
>>> x = np.array([1, 2, 3])
>>> x.shape
(3,)
big-endian
When storing a multi-byte value in memory as a sequence of bytes, the
sequence addresses/sends/stores the most significant byte first (lowest
address) and the least significant byte last (highest address). Common in
micro-processors and used for transmission of data over network protocols.
BLAS
`Basic Linear Algebra Subprograms <https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms>`_
broadcast
NumPy can do operations on arrays whose shapes are mismatched::
>>> x = np.array([1, 2])
>>> y = np.array([[3], [4]])
>>> x
array([1, 2])
>>> y
array([[3],
[4]])
>>> x + y
array([[4, 5],
[5, 6]])
See `numpy.doc.broadcasting` for more information.
C order
See `row-major`
column-major
A way to represent items in a N-dimensional array in the 1-dimensional
computer memory. In column-major order, the leftmost index "varies the
fastest": for example the array::
[[1, 2, 3],
[4, 5, 6]]
is represented in the column-major order as::
[1, 4, 2, 5, 3, 6]
Column-major order is also known as the Fortran order, as the Fortran
programming language uses it.
decorator
An operator that transforms a function. For example, a ``log``
decorator may be defined to print debugging information upon
function execution::
>>> def log(f):
... def new_logging_func(*args, **kwargs):
... print("Logging call with parameters:", args, kwargs)
... return f(*args, **kwargs)
...
... return new_logging_func
Now, when we define a function, we can "decorate" it using ``log``::
>>> @log
... def add(a, b):
... return a + b
Calling ``add`` then yields:
>>> add(1, 2)
Logging call with parameters: (1, 2) {}
3
dictionary
Resembling a language dictionary, which provides a mapping between
words and descriptions thereof, a Python dictionary is a mapping
between two objects::
>>> x = {1: 'one', 'two': [1, 2]}
Here, `x` is a dictionary mapping keys to values, in this case
the integer 1 to the string "one", and the string "two" to
the list ``[1, 2]``. The values may be accessed using their
corresponding keys::
>>> x[1]
'one'
>>> x['two']
[1, 2]
Note that dictionaries are not stored in any specific order. Also,
most mutable (see *immutable* below) objects, such as lists, may not
be used as keys.
For more information on dictionaries, read the
`Python tutorial <https://docs.python.org/tutorial/>`_.
field
In a :term:`structured data type`, each sub-type is called a `field`.
The `field` has a name (a string), a type (any valid :term:`dtype`, and
an optional `title`. See :ref:`arrays.dtypes`
Fortran order
See `column-major`
flattened
Collapsed to a one-dimensional array. See `numpy.ndarray.flatten`
for details.
homogenous
Describes a block of memory comprised of blocks, each block comprised of
items and of the same size, and blocks are interpreted in exactly the
same way. In the simplest case each block contains a single item, for
instance int32 or float64.
immutable
An object that cannot be modified after execution is called
immutable. Two common examples are strings and tuples.
instance
A class definition gives the blueprint for constructing an object::
>>> class House(object):
... wall_colour = 'white'
Yet, we have to *build* a house before it exists::
>>> h = House() # build a house
Now, ``h`` is called a ``House`` instance. An instance is therefore
a specific realisation of a class.
iterable
A sequence that allows "walking" (iterating) over items, typically
using a loop such as::
>>> x = [1, 2, 3]
>>> [item**2 for item in x]
[1, 4, 9]
It is often used in combination with ``enumerate``::
>>> keys = ['a','b','c']
>>> for n, k in enumerate(keys):
... print("Key %d: %s" % (n, k))
...
Key 0: a
Key 1: b
Key 2: c
list
A Python container that can hold any number of objects or items.
The items do not have to be of the same type, and can even be
lists themselves::
>>> x = [2, 2.0, "two", [2, 2.0]]
The list `x` contains 4 items, each which can be accessed individually::
>>> x[2] # the string 'two'
'two'
>>> x[3] # a list, containing an integer 2 and a float 2.0
[2, 2.0]
It is also possible to select more than one item at a time,
using *slicing*::
>>> x[0:2] # or, equivalently, x[:2]
[2, 2.0]
In code, arrays are often conveniently expressed as nested lists::
>>> np.array([[1, 2], [3, 4]])
array([[1, 2],
[3, 4]])
For more information, read the section on lists in the `Python
tutorial <https://docs.python.org/tutorial/>`_. For a mapping
type (key-value), see *dictionary*.
little-endian
When storing a multi-byte value in memory as a sequence of bytes, the
sequence addresses/sends/stores the least significant byte first (lowest
address) and the most significant byte last (highest address). Common in
x86 processors.
mask
A boolean array, used to select only certain elements for an operation::
>>> x = np.arange(5)
>>> x
array([0, 1, 2, 3, 4])
>>> mask = (x > 2)
>>> mask
array([False, False, False, True, True])
>>> x[mask] = -1
>>> x
array([ 0, 1, 2, -1, -1])
masked array
Array that suppressed values indicated by a mask::
>>> x = np.ma.masked_array([np.nan, 2, np.nan], [True, False, True])
>>> x
masked_array(data = [-- 2.0 --],
mask = [ True False True],
fill_value = 1e+20)
<BLANKLINE>
>>> x + [1, 2, 3]
masked_array(data = [-- 4.0 --],
mask = [ True False True],
fill_value = 1e+20)
<BLANKLINE>
Masked arrays are often used when operating on arrays containing
missing or invalid entries.
matrix
A 2-dimensional ndarray that preserves its two-dimensional nature
throughout operations. It has certain special operations, such as ``*``
(matrix multiplication) and ``**`` (matrix power), defined::
>>> x = np.mat([[1, 2], [3, 4]])
>>> x
matrix([[1, 2],
[3, 4]])
>>> x**2
matrix([[ 7, 10],
[15, 22]])
method
A function associated with an object. For example, each ndarray has a
method called ``repeat``::
>>> x = np.array([1, 2, 3])
>>> x.repeat(2)
array([1, 1, 2, 2, 3, 3])
ndarray
See *array*.
record array
An :term:`ndarray` with :term:`structured data type` which has been
subclassed as ``np.recarray`` and whose dtype is of type ``np.record``,
making the fields of its data type to be accessible by attribute.
reference
If ``a`` is a reference to ``b``, then ``(a is b) == True``. Therefore,
``a`` and ``b`` are different names for the same Python object.
row-major
A way to represent items in a N-dimensional array in the 1-dimensional
computer memory. In row-major order, the rightmost index "varies
the fastest": for example the array::
[[1, 2, 3],
[4, 5, 6]]
is represented in the row-major order as::
[1, 2, 3, 4, 5, 6]
Row-major order is also known as the C order, as the C programming
language uses it. New NumPy arrays are by default in row-major order.
self
Often seen in method signatures, ``self`` refers to the instance
of the associated class. For example:
>>> class Paintbrush(object):
... color = 'blue'
...
... def paint(self):
... print("Painting the city %s!" % self.color)
...
>>> p = Paintbrush()
>>> p.color = 'red'
>>> p.paint() # self refers to 'p'
Painting the city red!
slice
Used to select only certain elements from a sequence::
>>> x = range(5)
>>> x
[0, 1, 2, 3, 4]
>>> x[1:3] # slice from 1 to 3 (excluding 3 itself)
[1, 2]
>>> x[1:5:2] # slice from 1 to 5, but skipping every second element
[1, 3]
>>> x[::-1] # slice a sequence in reverse
[4, 3, 2, 1, 0]
Arrays may have more than one dimension, each which can be sliced
individually::
>>> x = np.array([[1, 2], [3, 4]])
>>> x
array([[1, 2],
[3, 4]])
>>> x[:, 1]
array([2, 4])
structure
See :term:`structured data type`
structured data type
A data type composed of other datatypes
tuple
A sequence that may contain a variable number of types of any
kind. A tuple is immutable, i.e., once constructed it cannot be
changed. Similar to a list, it can be indexed and sliced::
>>> x = (1, 'one', [1, 2])
>>> x
(1, 'one', [1, 2])
>>> x[0]
1
>>> x[:2]
(1, 'one')
A useful concept is "tuple unpacking", which allows variables to
be assigned to the contents of a tuple::
>>> x, y = (1, 2)
>>> x, y = 1, 2
This is often used when a function returns multiple values:
>>> def return_many():
... return 1, 'alpha', None
>>> a, b, c = return_many()
>>> a, b, c
(1, 'alpha', None)
>>> a
1
>>> b
'alpha'
ufunc
Universal function. A fast element-wise array operation. Examples include
``add``, ``sin`` and ``logical_or``.
view
An array that does not own its data, but refers to another array's
data instead. For example, we may create a view that only shows
every second element of another array::
>>> x = np.arange(5)
>>> x
array([0, 1, 2, 3, 4])
>>> y = x[::2]
>>> y
array([0, 2, 4])
>>> x[0] = 3 # changing x changes y as well, since y is a view on x
>>> y
array([3, 2, 4])
wrapper
Python is a high-level (highly abstracted, or English-like) language.
This abstraction comes at a price in execution speed, and sometimes
it becomes necessary to use lower level languages to do fast
computations. A wrapper is code that provides a bridge between
high and the low level languages, allowing, e.g., Python to execute
code written in C or Fortran.
Examples include ctypes, SWIG and Cython (which wraps C and C++)
and f2py (which wraps Fortran).
"""
from __future__ import division, absolute_import, print_function
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@numpy@py2@numpy@doc@glossary.py@.PATH_END.py
|
{
"filename": "filter.py",
"repo_name": "hpparvi/PyTransit",
"repo_path": "PyTransit_extracted/PyTransit-master/pytransit/contamination/filter.py",
"type": "Python"
}
|
# PyTransit: fast and easy exoplanet transit modelling in Python.
# Copyright (C) 2010-2019 Hannu Parviainen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from typing import Optional
from numpy import array, ones_like, zeros_like, diff, arange, linspace, ndarray, where, ones
from scipy.interpolate import interp1d
class Filter:
def __init__(self, name: str):
self.name: str = name
self.bbox: ndarray = array([250, 1000], dtype='d')
def __call__(self, wl):
raise NotImplementedError
def sample(self, n: Optional[int] = 100):
raise NotImplementedError
class DeltaFilter(Filter):
def __init__(self, name: str, wl: float):
super().__init__(name)
self.wl: float = wl
self.bbox = array([wl-1e-5, wl+1e-5])
def __call__(self, wl):
return where(abs(wl - self.wl) < 1e-5, 1.0, 0.0)
def sample(self, n: Optional[int] = 100):
return array(self.wl), array(1.0)
class ClearFilter(Filter):
"""Constant unity transmission.
"""
def __init__(self, name: str):
raise NotImplementedError("CleanFilter has been removed, please use a wide BoxcarFilter instead.")
class BoxcarFilter(Filter):
"""Filter with a transmission of 1 inside the minimum and maximum wavelengths and 0 outside.
"""
def __init__(self, name, wl_min, wl_max):
"""
Parameters
----------
:param name: passband name
:param wl_min: minimum wavelength
:param wl_max: maximum wavelength
"""
super().__init__(name)
self.bbox = array([wl_min, wl_max], dtype='d')
def __call__(self, wl):
w = zeros_like(wl)
w[(wl >= self.bbox[0]) & (wl <= self.bbox[1])] = 1.
return w
def sample(self, n: Optional[int] = 100):
return linspace(*self.bbox, num=n), ones(n)
class TabulatedFilter(Filter):
"""Interpolated tabulated filter.
"""
def __init__(self, name, wl, tm):
"""
Parameters
----------
name : string passband name
wl : array_like a list of wavelengths
tm : array_like a list of transmission values
"""
super().__init__(name)
self.wl = array(wl)
self.tm = array(tm)
self.bbox = array([self.wl.min(), self.wl.max()])
assert self.wl.size == self.tm.size, "The wavelength and transmission arrays must be of same size"
assert all(diff(self.wl) > 0.), "Wavelength array must be monotonously increasing"
assert all((self.tm >= 0.0) & (self.tm <= 1.0)), "Transmission must always be between 0.0 and 1.0"
self._ip = interp1d(self.wl, self.tm, kind='cubic')
def __call__(self, wl):
return self._ip(wl)
def sample(self, n: Optional[int] = 100):
return self.wl, self.tm
sdss_g = BoxcarFilter("g'", 400, 550) #: SDSS G filter
sdss_r = BoxcarFilter("r'", 570, 690)
sdss_i = BoxcarFilter("i'", 710, 790)
sdss_z = BoxcarFilter("z'", 810, 900)
kepler = TabulatedFilter('kepler',
arange(350, 960, 25),
array([0.000, 0.001, 0.000, 0.056, 0.465, 0.536, 0.624, 0.663,
0.681, 0.715, 0.713, 0.696, 0.670, 0.649, 0.616, 0.574,
0.541, 0.490, 0.468, 0.400, 0.332, 0.279, 0.020, 0.000,
0.000]))
__all__ = 'Filter TabulatedFilter BoxcarFilter sdss_g sdss_r sdss_i sdss_z kepler'.split()
|
hpparviREPO_NAMEPyTransitPATH_START.@PyTransit_extracted@PyTransit-master@pytransit@contamination@filter.py@.PATH_END.py
|
{
"filename": "Zha_2008.py",
"repo_name": "geodynamics/burnman",
"repo_path": "burnman_extracted/burnman-main/burnman/calibrants/Zha_2008.py",
"type": "Python"
}
|
# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for
# the Earth and Planetary Sciences
# Copyright (C) 2012 - 2024 by the BurnMan team, released under the GNU
# GPL v2 or later.
from burnman.eos.birch_murnaghan import BirchMurnaghanBase as BM3
from burnman.eos.mie_grueneisen_debye import MGDBase
from burnman.classes.calibrant import Calibrant
import numpy as np
"""
Zha_2008
^^^^^^^^
"""
class Pt(Calibrant):
"""
The Pt pressure standard reported by
Zha (2008; https://doi.org/10.1063/1.2844358).
"""
def __init__(self):
def _pressure_Zha_Pt(volume, temperature, params):
# Isothermal pressure (GPa)
pressure_model = BM3()
P0 = pressure_model.pressure(params["T_0"], volume, params)
# Thermal pressure
thermal_model = MGDBase()
Pth0 = thermal_model._thermal_pressure(params["T_0"], volume, params)
Pth = thermal_model._thermal_pressure(temperature, volume, params)
# Electronic pressure
Pel = (
1.1916e-15 * temperature**4.0
- 1.4551e-11 * temperature**3.0
+ 1.6209e-07 * temperature**2.0
+ 1.8269e-4 * temperature
- 0.069
) * 1.0e09
# Total pressure
P = P0 + Pth - Pth0 + Pel
return P
_params_Zha_Pt = {
"V_0": 9.0904e-06,
"K_0": 273.5e9,
"Kprime_0": 4.7,
"Debye_0": 230.0, # 370-405
"grueneisen_0": 2.75,
"q_0": 0.25,
"n": 1.0,
"T_0": 300.0,
"P_0": 0.0,
"Z": 4.0,
}
Calibrant.__init__(self, _pressure_Zha_Pt, "pressure", _params_Zha_Pt)
|
geodynamicsREPO_NAMEburnmanPATH_START.@burnman_extracted@burnman-main@burnman@calibrants@Zha_2008.py@.PATH_END.py
|
{
"filename": "fastfdresponse.py",
"repo_name": "mikekatz04/BBHx",
"repo_path": "BBHx_extracted/BBHx-master/bbhx/response/fastfdresponse.py",
"type": "Python"
}
|
# LISA Response Functions
# Copyright (C) 2021 Michael L. Katz, Sylvain Marsat
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import numpy as np
try:
import cupy as cp
from bbhx.pyFDResponse import LISA_response_wrap as LISA_response_wrap_gpu
except (ImportError, ModuleNotFoundError) as e:
print("No CuPy or GPU response available.")
from bbhx.pyFDResponse_cpu import LISA_response_wrap as LISA_response_wrap_cpu
from bbhx.utils.constants import *
from lisatools.detector import EqualArmlengthOrbits, Orbits
class LISATDIResponse:
"""Evaluate the fast frequency domain response function
The response function is the fast frequency domain response function
from `arXiv:1806.10734 <https://arxiv.org/abs/1806.10734>`_ and
`arXiv:2003.00357 <https://arxiv.org/abs/2003.00357>`_. Please cite
these papers if this class is used. This response assumes a fixed,
non-breathing armlength for the LISA constellation.
This class has GPU capability.
Args:
TDItag (str, optional): TDI channels to generate. Options are ``"XYZ"`` and
``"AET"``. If ``"XYZ"`` is not given, it will default to ``"AET"``.
(Default: ``"AET"``)
orbits (Orbits, optional): Orbit class. If ``None``, orbits is set to
:class:`EqualArmlengthOrbits`. (Default: ``None``)
rescaled (bool, optional): If ``True``, rescale TDI functions to avoid
infinities at high frequency. (Default: ``False``)
tdi2 (bool, optional): If ``True``, apply a factor of :math:`-2i \\sin{(4x)}e^{i4x})`
to tdi1 output. This is a conversion from TDI 1 to TDI 2 under the assumption of equal armlengt orbits.
(Default: ``False``)
order_fresnel_stencil (int, optional): Order of the Fresnel stencil in the
response. Currently, anything above 0 is not implemented. This is left
in for future compatibility. (Default: ``0``)
use_gpu (bool, optional): If ``True``, use a GPU. (Default: ``False``)
Attributes:
allowable_modes (list): Allowed list of mode tuple pairs ``(l,m)`` for
the chosen waveform model.
ells_default (np.ndarray): Default values for the ``l`` index of the harmonic.
mms_default (np.ndarray): Default values for the ``m`` index of the harmonic.
includes_amps (bool): If ``True``, the ``out_buffer`` contains the first
entry for amplitudes.
order_fresnel_stencil (int): Order of the Fresnel stencil in the
response. Currently, anything above 0 is not implemented. This is left
in for future compatibility.
TDItag (str): TDI channels to generate. Either ``"XYZ"`` or ``"AET"``.
"""
def __init__(
self,
TDItag="AET",
orbits: Orbits | None = None,
rescaled: bool = False,
tdi2: bool = False,
order_fresnel_stencil=0,
use_gpu=False,
):
self.rescaled = rescaled
self.tdi2 = tdi2
# gpu setup
self.use_gpu = use_gpu
if order_fresnel_stencil > 0:
raise NotImplementedError
self.order_fresnel_stencil = order_fresnel_stencil
# TDI setup
self.TDItag = TDItag
if TDItag == "XYZ":
self.TDItag_int = 1
else:
self.TDItag_int = 2
# PhenomHM modes
self.allowable_modes = [(2, 2), (3, 3), (4, 4), (2, 1), (3, 2), (4, 3)]
self.ells_default = self.xp.array([2, 3, 4, 2, 3, 4], dtype=self.xp.int32)
self.mms_default = self.xp.array([2, 3, 4, 1, 2, 3], dtype=self.xp.int32)
self.orbits = orbits
@property
def use_gpu(self) -> bool:
"""Whether to use a GPU."""
return self._use_gpu
@use_gpu.setter
def use_gpu(self, use_gpu: bool) -> None:
"""Set ``use_gpu``."""
assert isinstance(use_gpu, bool)
self._use_gpu = use_gpu
@property
def response_gen(self):
"""C function on GPU/CPU"""
response_gen = (
LISA_response_wrap_gpu if self.use_gpu else LISA_response_wrap_cpu
)
return response_gen
@property
def xp(self):
"""Numpy or Cupy"""
xp = cp if self.use_gpu else np
return xp
@property
def orbits(self) -> Orbits:
return self._orbits
@orbits.setter
def orbits(self, orbits: Orbits) -> None:
if orbits is None:
self._orbits = EqualArmlengthOrbits()
elif not isinstance(orbits, Orbits):
raise ValueError(
"Input orbits must be of type Orbits (from LISA Analysis Tools)."
)
else:
self._orbits = orbits
self._orbits.configure(linear_interp_setup=True)
@property
def citation(self):
"""Return citations for this class"""
return katz_citations + marsat_1 + marsat_2
def _sanity_check_modes(self, ells, mms):
"""Make sure modes are allowed"""
for ell, mm in zip(ells, mms):
if (ell, mm) not in self.allowable_modes:
raise ValueError(
"Requested mode [(l,m) = ({},{})] is not available. Allowable modes include {}".format(
ell, mm, self.allowable_modes
)
)
def _initialize_response_container(self):
"""setup reponse container if needed"""
self.response_carrier = self.xp.zeros(
(self.nparams * self.length * self.num_modes * self.num_bin_all),
dtype=self.xp.float64,
)
@property
def transferL1(self):
"""TransferL1 term in response. Shape: ``(num_bin_all, num_modes, length)``"""
temp = self.response_carrier[
(self.includes_amps + 2)
* self.num_per_param : (self.includes_amps + 3)
* self.num_per_param
].reshape(
self.num_bin_all, self.num_modes, self.length
) + 1j * self.response_carrier[
(self.includes_amps + 3)
* self.num_per_param : (self.includes_amps + 4)
* self.num_per_param
].reshape(
self.num_bin_all, self.num_modes, self.length
)
return temp
@property
def transferL2(self):
"""TransferL2 term in response. Shape: ``(num_bin_all, num_modes, length)``"""
temp = self.response_carrier[
(self.includes_amps + 4)
* self.num_per_param : (self.includes_amps + 5)
* self.num_per_param
].reshape(
self.num_bin_all, self.num_modes, self.length
) + 1j * self.response_carrier[
(self.includes_amps + 5)
* self.num_per_param : (self.includes_amps + 6)
* self.num_per_param
].reshape(
self.num_bin_all, self.num_modes, self.length
)
return temp
@property
def transferL3(self):
"""TransferL3 term in response. Shape: ``(num_bin_all, num_modes, length)``"""
temp = self.response_carrier[
(self.includes_amps + 6)
* self.num_per_param : (self.includes_amps + 7)
* self.num_per_param
].reshape(
self.num_bin_all, self.num_modes, self.length
) + 1j * self.response_carrier[
(self.includes_amps + 7)
* self.num_per_param : (self.includes_amps + 8)
* self.num_per_param
].reshape(
self.num_bin_all, self.num_modes, self.length
)
return temp
@property
def phase(self):
"""Get updated phase info. Shape: ``(num_bin_all, num_modes, length)``"""
phase = self.response_carrier[
(self.includes_amps + 0)
* self.num_per_param : (self.includes_amps + 1)
* self.num_per_param
].reshape(self.num_bin_all, self.num_modes, self.length)
return phase
@property
def tf(self):
"""Get tf info. Shape: ``(num_bin_all, num_modes, length)``"""
tf = self.response_carrier[
(self.includes_amps + 1)
* self.num_per_param : (self.includes_amps + 2)
* self.num_per_param
].reshape(self.num_bin_all, self.num_modes, self.length)
return tf
def __call__(
self,
freqs,
inc,
lam,
beta,
psi,
phi_ref,
length,
modes=None,
phase=None,
tf=None,
out_buffer=None,
adjust_phase=True,
direct=False,
):
"""Evaluate respones function
Args:
freqs (1D or 2D xp.ndarray): Frequency at which the response is evaluated.
2D shape is ``(num_bin_all, length)``. If given as a 1D array,
it should be of length ``num_bin_all * length``.
inc (scalar or 1D xp.ndarray): Inclination of BBH system in radians.
lam (scalar or 1D xp.ndarray): Ecliptic longitude in SSB frame in radians.
beta (scalar or 1D xp.ndarray): Ecliptic latitude in SSB frame in radians.
psi (scalar or 1D xp.ndarray): Polarization angle of the system in radians.
phi_ref (scalar or 1D xp.ndarray): Reference phase. **Note**:
The response function rotates the source by ``phi_ref``. For this reason,
the main waveform functions (e.g. :class:`bbhx.waveform.BBHWaveformFD`)
provide ``phi_ref = 0.0`` into the source-frame scaled waveform generators
(e.g. :class:`bbhx.waveforms.phenomhm.PhenomHMAmpPhase`). This allows
the reference phase to be applied here in the response.
length (int): The length of the individual frequency arrays. This is required
because of the options for putting in 1D arrays into this function.
The length tells the chunk size in a 1D array.
modes (list, optional): Harmonic modes to use. If not given, they will
default to those available in the waveform model PhenomHM:
``[(2,2), (3,3), (4,4), (2,1), (3,2), (4,3)]``. (Default: ``None``)
phase (xp.ndarray, optional): Waveform phase. This is adjusted by the ``phaseRdelay``
quantity in the code. If more than 1D, the shape should be
``(num_bin_all, num_modes, length)``. If 1D, its total length
should be equivalent to ``num_bin_all * num_modes * length``.
If ``out_buffer`` is not provided, ``phase`` and ``tf`` are required.
tf (xp.ndarray, optional): Waveform time-frequency correspondence. This tells the
response where the LISA constellation is at each frequency.
If more than 1D, the shape should be
``(num_bin_all, num_modes, length)``. If 1D, its total length
should be equivalent to ``num_bin_all * num_modes * length``.
If ``out_buffer`` is not provided, ``phase`` and ``tf`` are required.
out_buffer (xp.ndarray, optional): 1D array initialized to contain all computations
from the inital waveform and response function. If providing ``out_buffer``,
the response fills it directly. To make this happen easily in GPU/CPU
agnostic manner, out_buffer needs to be a 1D array with length
equivalent to ``nparams * num_bin_all * num_modes * length``.
``nparams`` can be 8 if the buffer does not include the amplitudes
(which are not needed at all for the response computation) or 9
if it includes the amplitudes. (Default: ``None``)
adjust_phase (bool, optional): If ``True`` adjust the phase array in-place
inside the response code. **Note**: This only applies when
inputing ``phase`` and ``tf``. (Default: ``True``)
Raises:
ValueError: Incorrect dimensions for the arrays.
"""
# to cupy if needed
inc = self.xp.asarray(self.xp.atleast_1d(inc)).copy()
lam = self.xp.asarray(self.xp.atleast_1d(lam)).copy()
beta = self.xp.asarray(self.xp.atleast_1d(beta)).copy()
psi = self.xp.asarray(self.xp.atleast_1d(psi)).copy()
phi_ref = self.xp.asarray(self.xp.atleast_1d(phi_ref)).copy()
# mode setup
if modes is not None:
ells = self.xp.asarray([ell for ell, mm in modes], dtype=self.xp.int32)
mms = self.xp.asarray([mm for ell, mm in modes], dtype=self.xp.int32)
self._sanity_check_modes(ells, mms)
else:
ells = self.ells_default
mms = self.mms_default
self.modes = [(ell, mm) for ell, mm in zip(ells, mms)]
num_modes = len(ells)
num_bin_all = len(inc)
# store all info
self.length = length
self.num_modes = num_modes
self.num_bin_all = num_bin_all
self.num_per_param = length * num_modes * num_bin_all
self.num_per_bin = length * num_modes
# number of respones-specific parameters
self.nresponse_params = 6
if out_buffer is None and phase is None and tf is None:
raise ValueError("Must provide either out_buffer or both phase and tf.")
# setup out_buffer based on inputs
if out_buffer is None:
includes_amps = 0
self.nparams = includes_amps + 2 + self.nresponse_params
self._initialize_response_container()
else:
# use other shape information already known to
# make sure the given out_buffer is the right length
# and has the right number of parameters (8 or 9 (including amps))
nparams_empirical = len(out_buffer) / (
self.num_bin_all * self.num_modes * self.length
)
# indicate if there is an integer number of params in the out_buffer
if np.allclose(nparams_empirical, np.round(nparams_empirical)) and int(
nparams_empirical
) in [8, 9]:
self.nparams = int(nparams_empirical)
else:
raise ValueError(
f"out_buffer incorrect length. The length should be equivalent to (8 or 9) * {self.num_bin_all * self.num_modes * self.length}. Given length is {len(out_buffer)}."
)
includes_amps = 1 if self.nparams == 9 else 0
self.response_carrier = out_buffer
# if amps are included they are in teh first slot in the array
self.includes_amps = includes_amps
# setup and check frequency dimensions
self.freqs = freqs
if self.freqs.ndim > 1:
if self.freqs.shape != (self.num_bin_all, self.num_modes, self.length):
raise ValueError(
f"freqs have incorrect shape. Shape should be {(self.num_bin_all, self.num_modes, self.length)}. Current shape is {freqs.shape}."
)
self.freqs = self.freqs.flatten()
else:
if len(freqs) != self.num_bin_all * self.num_modes * self.length:
raise ValueError(
f"freqs incorrect length. The length should be equivalent to {self.num_bin_all * self.num_modes * self.length}. Given length is {len(freqs)}."
)
# if using phase/tf
if phase is not None and tf is not None:
use_phase_tf = True
if not direct and (
tf.min() < self.orbits.t_base.min()
or tf.max() > self.orbits.t_base.max()
):
raise ValueError(
f"Orbital information does not cover minimum ({tf.min()}) and maximum ({tf.max()}) tf. Orbital information begins at {self.orbits.t_base.min()} and ends at {self.orbits.t_base.max()}."
)
if phase.shape != tf.shape:
raise ValueError(
"Shape of phase array and tf array need to be the same shape."
)
if phase.ndim > 1:
if phase.shape != (self.num_bin_all, self.num_modes, self.length):
raise ValueError(
f"phase have incorrect shape. Shape should be {(self.num_bin_all, self.num_modes, self.length)}. Current shape is {phase.shape}."
)
# will need to write the phase to original array later if adjust_phase == True
first = phase.copy().flatten()
second = tf.copy().flatten()
else:
if len(phase) != self.num_bin_all * self.num_modes * self.length:
raise ValueError(
f"phase incorrect length. The length should be equivalent to {self.num_bin_all * self.num_modes * self.length}. Given length is {len(phase)}."
)
# will need to write the phase to original array later if adjust_phase == True
first = phase
second = tf
# fill the phase into the buffer (which is flat)
self.response_carrier[
(includes_amps + 0)
* length
* num_modes
* num_bin_all : (includes_amps + 1)
* length
* num_modes
* num_bin_all
] = first
# fill tf in the buffer (which is flat)
self.response_carrier[
(includes_amps + 1)
* length
* num_modes
* num_bin_all : (includes_amps + 2)
* length
* num_modes
* num_bin_all
] = second
elif phase is not None or tf is not None:
raise ValueError("If provided phase or tf, need to provide both.")
else:
use_phase_tf = False
if not direct and (
self.tf.min() < self.orbits.t_base.min()
or self.tf.max() > self.orbits.t_base.max()
):
breakpoint()
raise ValueError(
f"Orbital information does not cover minimum ({self.tf.min()}) and maximum ({self.tf.max()}) tf. Orbital information begins at {self.orbits.t_base.min()} and ends at {self.orbits.t_base.max()}."
)
# run response code in C/CUDA
self.response_gen(
self.response_carrier,
ells,
mms,
self.freqs,
phi_ref,
inc,
lam,
beta,
psi,
self.TDItag_int,
self.rescaled,
self.tdi2,
self.order_fresnel_stencil,
num_modes,
length,
num_bin_all,
includes_amps,
self.orbits,
)
# adjust input phase arrays in-place
if use_phase_tf and adjust_phase:
output = self.response_carrier[
(includes_amps + 0)
* length
* num_modes
* num_bin_all : (includes_amps + 1)
* length
* num_modes
* num_bin_all
]
if phase.ndim > 1:
phase[:] = output.reshape(phase.shape)
else:
phase[:] = output
|
mikekatz04REPO_NAMEBBHxPATH_START.@BBHx_extracted@BBHx-master@bbhx@response@fastfdresponse.py@.PATH_END.py
|
{
"filename": "plots.py",
"repo_name": "jpcoles/glass",
"repo_path": "glass_extracted/glass-master/glass/plots.py",
"type": "Python"
}
|
from __future__ import division
import pylab as pl
import numpy as np
import matplotlib
import matplotlib.cm as cm
import matplotlib.lines as mpll
from matplotlib import rc
from matplotlib.ticker import LogLocator
from matplotlib.patches import Circle, Ellipse
from matplotlib.lines import Line2D
from collections import defaultdict
from itertools import count, izip, product
from glass.environment import env
from glass.command import command
from glass.log import log as Log
from glass.scales import convert
from glass.shear import Shear
from glass.utils import dist_range
from scipy.ndimage.filters import correlate1d
from scipy.misc import central_diff_weights
rc('text', usetex=True)
#rc('text', dvipnghack=True)
rc('font',**{'family':'serif','serif':['Computer Modern Roman']})
_styles = [{'label':r'rejected', 'c':'r', 'ls':'-', 'z':-1, 'line':Line2D([],[],c='r',ls='-')},
{'label':r'accepted', 'c':'b', 'ls':'-', 'z': 0, 'line':Line2D([],[],c='b',ls='-')},
{'label':r'unknown', 'c':'k', 'ls':'-', 'z':+1, 'line':Line2D([],[],c='k',ls='-')}]
_system_colors = 'gbmykw'
_source_colors = 'c'
def system_color(i): return _system_colors[i%len(_system_colors)]
def source_color(i): return _source_colors[i%len(_source_colors)]
def _style_iterator(colors='gbrcm'):
_linestyles = [k for k,v, in mpll.lineStyles.iteritems() if not v.endswith('nothing')]
_linestyles.sort()
for lw in count(1):
for ls in _linestyles:
for clr in colors:
yield lw,ls,clr
def style_iterator():
if env().bw_styles:
return _style_iterator('k')
else:
return _style_iterator()
def default_kw(R, kwargs=None):
kw = {}
if kwargs: kw = dict(kwargs)
kw.setdefault('extent', [-R,R,-R,R])
kw.setdefault('interpolation', 'nearest')
kw.setdefault('aspect', 'equal')
kw.setdefault('origin', 'upper')
kw.setdefault('fignum', False)
kw.setdefault('cmap', cm.bone)
#if vmin is not None: kw['vmin'] = vmin
#if vmax is not None: kw['vmax'] = vmax
return kw
def index_to_slice(i):
if i is None:
return slice(None)
else:
return slice(i,i+1)
def glscolorbar():
rows,cols,_ = pl.gca().get_geometry()
x,y = pl.gcf().get_size_inches()
pars = pl.gcf().subplotpars
left = pars.left
right = pars.right
bottom = pars.bottom
top = pars.top
wspace = x*pars.wspace
hspace = y*pars.hspace
totWidth = x*(right-left)
totHeight = y*(top-bottom)
figH = (totHeight-(hspace*(rows>1))) / rows
figW = (totWidth-(wspace*(cols>1))) / cols
pl.colorbar(shrink=figW/figH)
@command
def show_plots(env):
pl.show()
@command
def img_plot(env, **kwargs): #src_index=None, with_maximum=True, color=None, with_guide=False, tight=False):
obj_index = kwargs.pop('obj_index', 0)
src_index = kwargs.pop('src_index', None)
tight = kwargs.pop('tight', False)
with_guide = kwargs.pop('with_guide', False)
color = kwargs.pop('color', None)
with_maximum = kwargs.pop('with_maximum', True)
#src_index = np.atleast_1d(src_index)
obj = env.objects[obj_index]
oxlim, oylim = pl.xlim(), pl.ylim()
rmax = 0
si = style_iterator()
for i,src in enumerate(obj.sources):
lw,ls,c = si.next()
if src_index:
if i not in np.atleast_1d(src_index): continue
xs,ys,cs = [], [], []
for img in src.images:
#print img.pos
if not with_maximum and img.parity_name == 'max': continue
xs.append(img.pos.real)
ys.append(img.pos.imag)
if not color:
if img.parity_name == 'unk':
cs.append('red')
else:
cs.append(c)
else:
cs.append(color)
if xs and ys:
pl.over(pl.scatter,xs, ys, s=80, c=cs, zorder=1000, alpha=1.0)
if with_guide or tight:
a = pl.gca()
for x,y in zip(xs,ys):
r = np.sqrt(x**2 + y**2)
rmax = np.amax([r,rmax])
if with_guide:
a.add_artist(Circle((0,0),r, fill=False,color='lightgrey'))
pl.xlim(oxlim); pl.ylim(oylim)
if tight and rmax > 0:
#rmax *= 1.01
pl.gca().set_xlim(-rmax, rmax)
pl.gca().set_ylim(-rmax, rmax)
@command
def external_mass_plot(env, obj_index=0, with_maximum=True, color=None, with_guide=False, tight=False):
#obj,_ = model['obj,data'][obj_index]
#obj,_ = model['obj,data'][obj_index]
si = style_iterator()
for i in xrange(obj_index+1):
lw,ls,c = si.next()
obj = env.objects[obj_index]
#print obj.external_masses
# if isinstance(model, (list, tuple)):
# obj,_ = model
# else:
# obj = model
oxlim, oylim = pl.xlim(), pl.ylim()
rmax = 0
xs,ys,cs = [], [], []
for i,m in enumerate(obj.extra_potentials):
if isinstance(m, Shear): continue
xs.append(m.r.real)
ys.append(m.r.imag)
if not color:
cs.append(c)
else:
cs.append(color)
rmax = np.amax([np.abs(m.r),rmax])
if xs and ys:
pl.over(pl.scatter,xs, ys, s=160, c=cs, zorder=1000, alpha=1.0, marker='s')
pl.xlim(oxlim); pl.ylim(oylim)
if tight and rmax > 0:
pl.pl.gca().set_pl.xlim(-rmax, rmax)
pl.pl.gca().set_pl.ylim(-rmax, rmax)
@command
def Re_plot(env, *args, **kwargs):
models = kwargs.pop('models', env.models)
obj_index = kwargs.pop('obj_index', 0)
models = np.atleast_1d(models)
kwargs.setdefault('color', 'k')
kwargs.setdefault('fill', False)
kwargs.setdefault('lw', 2)
for m in models:
obj,data = m['obj,data'][obj_index]
print data.keys()
if not data['Re']: continue
Re, a,b, theta = data['Re']
#pl.gca().add_artist(Circle((rl.real,rl.imag), 0.1, fill=False, lw=2, color='r'))
#pl.gca().add_artist(Circle((rs.real,rs.imag), 0.1, fill=False, lw=2, color='r'))
#pl.gca().add_artist(Line2D([0,A[0]], [0,A[1]], lw=2, color=color))
#pl.gca().add_artist(Line2D([0,B[0]], [0,B[1]], lw=2, color=color))
#pl.gca().add_artist(Circle((0,0), a, fill=False, lw=2, color=color))
#pl.gca().add_artist(Circle((0,0), b, fill=False, lw=2, color=color))
pl.gca().add_artist(Ellipse((0,0), 2*a,2*b, theta, **kwargs))
#pl.gca().add_artist(Circle((0,0), a, fill=False, lw=2, color=color))
@command
def src_plot(env, *args, **kwargs):
models = kwargs.pop('models', env.models)
obj_index = kwargs.pop('obj_index', 0)
src_index = kwargs.pop('src_index', None)
hilite_model = kwargs.pop('hilite_model', None)
hilite_color = kwargs.pop('hilite_color', 'g')
oxlim, oylim = pl.xlim(), pl.ylim()
def plot(model, si, hilite=False):
obj, data = model
xs = []
ys = []
cs = []
for i,sys in enumerate(obj.sources):
if src_index is not None and i != src_index: continue
xs.append(data['src'][i].real)
ys.append(data['src'][i].imag)
lw,ls,c = si.next()
cs.append(c) #system_color(i))
if hilite:
pl.over(scatter,xs, ys, s=80, c=hilite_color, zorder=2000, marker='x', alpha=1.0, **kwargs)
else:
pl.scatter(xs, ys, s=80, c=cs, zorder=1000, marker='d', alpha=0.5, facecolor='none', linewidths=1, **kwargs)
#pl.over(scatter,xs, ys, s=80, c=cs, zorder=1000, marker='d', alpha=0.5, facecolor='none', linewidths=1)
if isinstance(models, dict):
si = style_iterator()
plot(models['obj,data'][obj_index], si)
else:
for mi,model in enumerate(models):
m = model['obj,data'][obj_index]
# for m in model['obj,data'][obj_index]:
si = style_iterator()
plot(m, si, mi==hilite_model)
pl.xlim(oxlim); pl.ylim(oylim)
#if isinstance(models, (list,tuple)) and len(models)>0 and isinstance(models[0], (list,tuple)):
#else:
def src_hist(**kwargs):
xlabel = kwargs.get('xlabel', r'$r$ $(\mathrm{arcsec})$')
ylabel = kwargs.get('ylabel', r'$\mathrm{Count}$')
models = kwargs.get('models', None)
hilite_model = kwargs.get('hilite_model', None)
if models is None: models = env.models
d = []
hilite=[]
for mi,model in enumerate(models):
for [_,data] in model['obj,data']:
r = list(np.abs(data['src']))
d += r
if mi == hilite_model: hilite += r
pl.hist(d, histtype='step', log=False)
for i,r in enumerate(hilite):
print r
pl.axvline(r, c=system_color(i), ls='-', zorder=-2, alpha=0.5)
pl.xlabel(xlabel)
pl.ylabel(ylabel)
@command
def image_plot(env, im, radius, **kwargs):
center = kwargs.pop('center', 0)
format = kwargs.pop('format', None)
dx,dy = center if not np.isscalar(center) else [center,center]
Rx,Ry = radius if not np.isscalar(radius) else [radius,radius]
kwargs.setdefault('extent', [-Rx-dx,Rx-dx,-Ry-dy,Ry-dy])
if format: kwargs.setdefault('format', format)
pl.imshow(pl.imread(im), **kwargs)
#def kappa_avg_plot(models):
# objs = {}
# for m in models:
# for [obj, data] in m['obj,data']:
# a =
#
# grid
def mass_plot(model, obj_index, with_contours=True, only_contours=False, clevels=30):
Log( "WARNING: use of mass_plot is deprecated. Use kappa_plot instead." )
return kappa_plot(model, obj_index, with_contours, only_contours, clevels)
@command
def kappa_plot(env, model, obj_index, **kwargs):
obj, data = model['obj,data'][obj_index]
if not data: return
with_contours = kwargs.pop('with_contours', False)
only_contours = kwargs.pop('only_contours', False)
label_contours = kwargs.pop('label_contours', False)
clevels = kwargs.pop('clevels', 30)
with_colorbar = kwargs.pop('with_colorbar', True)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
subtract = kwargs.pop('subtract', 0)
xlabel = kwargs.pop('xlabel', r'arcsec')
ylabel = kwargs.pop('ylabel', r'arcsec')
# print pl.gca()
# print pl.gca().get_frame()
# print pl.gca().get_frame().get_bbox()
# print pl.gca().get_geometry()
# print pl.gca().get_position()
# print pl.gca().get_window_extent()
# l= pl.gca().get_axes_locator()
# print l
# help(l)
# #help(pl.gca())
# assert 0
R = obj.basis.mapextent
kw = default_kw(R, kwargs)
#grid = obj.basis.kappa_grid(data)
#print data['kappa'].shape
#print subtract
grid = obj.basis._to_grid(data['kappa']-subtract,1)
if vmin is None:
w = data['kappa'] != 0
if not np.any(w):
vmin = -15
grid += 10**vmin
else:
vmin = np.log10(np.amin(data['kappa'][w]))
#print 'min?', np.amin(data['kappa'] != 0)
kw.setdefault('vmin', vmin)
if vmax is not None:
kw.setdefault('vmax', vmax)
grid = np.log10(grid.copy()) # + 1e-15)
# grid2 = grid.copy()
# for i in xrange(grid.shape[0]):
# for j in xrange(grid.shape[1]):
# grid[i,j] = abs(grid2[grid.shape[0]-i-1, grid.shape[1]-j-1] - grid[i,j]) / grid[i,j]
# grid = grid.copy() + 1e-4
#grid[grid >= 1] = 0
if not only_contours:
#pl.matshow(np.log10(grid), **kw)
pl.matshow(grid, **kw)
#imshow(grid, fignum=False, **kw)
#pl.matshow(grid, fignum=False, **kw)
if with_colorbar:
glscolorbar()
if only_contours or with_contours:
#if 'colors' in kw and 'cmap' in kw:
#kw.pop('cmap')
kw.setdefault('colors', 'w')
kw.setdefault('extend', 'both')
kw.setdefault('alpha', 0.7)
kw.pop('cmap')
#kw.pop('colors')
C = pl.contour(grid, clevels, **kw)
if label_contours:
pl.clabel(C, inline=1, fontsize=10)
pl.gca().set_aspect('equal')
pl.xlabel(xlabel)
pl.ylabel(ylabel)
@command
def grad_kappa_plot(env, model, obj_index, which='x', with_contours=False, only_contours=False, clevels=30, with_colorbar=True):
obj, data = model['obj,data'][obj_index]
R = obj.basis.mapextent
grid = obj.basis.kappa_grid(data)
grid = grid.copy()
kw = default_kw(R)
kw['vmin'] = -1
kw['vmax'] = 2
if not only_contours:
print '!!!!!!', grid.shape
if which == 'x': grid = np.diff(grid, axis=1)
if which == 'y': grid = np.diff(grid, axis=0)
print '!!!!!!', grid.shape
pl.matshow(grid, **kw)
if with_colorbar:
glspl.colorbar()
if with_contours:
kw.pop('cmap')
pl.over(contour, grid, clevels, extend='both', colors='k', alpha=0.7, **kw)
pl.xlabel('arcsec')
pl.ylabel('arcsec')
@command
def potential_plot(env, model, obj_index, src_index, with_colorbar=True, with_contours=False):
obj, data = model['obj,data'][obj_index]
R = obj.basis.mapextent
grid = obj.basis.potential_grid(data)
levs = obj.basis.potential_contour_levels(data)
# pl.matshow(grid, fignum=False, extent=[-R,R,-R,R], interpolation='nearest')
pl.matshow(grid, fignum=False, cmap=cm.bone, extent=[-R,R,-R,R], interpolation='nearest')
if with_colorbar: glspl.colorbar()
# pl.contour(grid, extent=[-R,R,-R,R], origin='upper')
#print levs
if with_contours:
for i,lev in enumerate(levs):
pl.over(contour, grid, lev, colors = system_color(i),
extent=[-R,R,-R,R], origin='upper', extend='both')
pl.xlabel('arcsec')
pl.ylabel('arcsec')
# figure();
# xs = linspace(-R, R, grid.shape[0])
# plot(xs, grid[grid.shape[1]//2, :], 'k-')
# plot(xs, 5*xs, 'r-')
#pl.suptitle('Potential')
@command
def critical_curve_plot(env, model, obj_index, src_index):
obj, data = model['obj,data'][obj_index]
R = obj.basis.mapextent
g = obj.basis.maginv_grid(data)[src_index]
pl.matshow(g, fignum=False, cmap=cm.bone, extent=[-R,R,-R,R], interpolation='nearest')
pl.over(contour, g, [0], colors='g', linewidths=1, extent=[-R,R,-R,R], origin='upper')
@command
def arrival_plot(env, model, **kwargs):
obj_index = kwargs.pop('obj_index', None)
src_index = kwargs.pop('src_index', None)
only_contours = kwargs.pop('only_contours', None)
clevels = kwargs.pop('clevels', None)
with_colorbar = kwargs.pop('with_colorbar', False)
xlabel = kwargs.pop('xlabel', r'arcsec')
ylabel = kwargs.pop('ylabel', r'arcsec')
obj_slice = slice(None) if obj_index is None else obj_index
obj_slice = index_to_slice(obj_index)
src_slice = index_to_slice(src_index)
#if src_index is not None:
#assert 0, 'arrival_plot: src_index not yet supported'
def plot_one(obj,data,src_index,g,lev,kw):
matplotlib.rcParams['contour.negative_linestyle'] = 'solid'
#loglev = logspace(1, log(amax(g)-amin(g)), 20, base=math.e) + amin(g)
if not only_contours:
kw.update({'zorder':-100})
pl.matshow(np.log10(g), **kw)
if with_colorbar: glspl.colorbar()
if 'cmap' in kw: kw.pop('cmap')
if clevels:
loglev=clevels
#loglev = logspace(1, log(g.ptp()), clevels, base=math.e) + amin(g)
#loglev = 1 / logspace(1/ log(amax(g)-amin(g)), 1, clevels, base=math.e) + amin(g)
#loglev = 1 / logspace(1/ log10(amax(g)-amin(g)), 1, clevels) + amin(g)
kw.update({'zorder':-1000})
pl.contour(g, loglev, **kw)
if lev:
kw.update({'zorder':1000})
kw.update({'colors': 'k', 'linewidths':2, 'cmap':None})
#kw.update({'colors':system_color(src_index), 'linewidths':3, 'cmap':None})
pl.contour(g, lev, **kw)
for i,[obj,data] in enumerate(model['obj,data'][obj_slice]):
if not data: continue
print len(obj.sources[src_slice])
lev = obj.basis.arrival_contour_levels(data)
print len(lev)
arrival_grid = obj.basis.arrival_grid(data)
for i,src in enumerate(obj.sources[src_slice]):
#print src.index, len(lev)
if lev: levels = lev[src.index]
g = arrival_grid[src.index]
S = obj.basis.subdivision
R = obj.basis.mapextent
kw = default_kw(R, kwargs)
kw.update(kwargs)
kw.setdefault('colors', 'grey')
kw.setdefault('linewidths', 1)
kw.setdefault('cmap', None)
plot_one(obj,data,src.index,g,levels,kw)
pl.xlim(-obj.basis.mapextent, obj.basis.mapextent)
pl.ylim(-obj.basis.mapextent, obj.basis.mapextent)
pl.gca().set_aspect('equal')
pl.xlabel(xlabel)
pl.ylabel(ylabel)
@command
def srcdiff_plot(env, model, **kwargs):
obj_index = kwargs.pop('obj_index', 0)
src_index = kwargs.pop('src_index', 0)
with_colorbar = kwargs.pop('with_colorbar', False)
xlabel = kwargs.pop('xlabel', r'arcsec')
ylabel = kwargs.pop('ylabel', r'arcsec')
obj, data = model['obj,data'][obj_index]
S = obj.basis.subdivision
R = obj.basis.mapextent
g = obj.basis.srcdiff_grid(data)[src_index]
vmin = np.log10(np.amin(g[g>0]))
g = g.copy() + 1e-10
kw = default_kw(R, kwargs) #, vmin=vmin, vmax=vmin+2)
#loglev = logspace(1, log(amax(g)-amin(g)), 20, base=math.e) + amin(g)
pl.matshow(np.log10(g), **kw)
matplotlib.rcParams['contour.negative_linestyle'] = 'solid'
if with_colorbar: glspl.colorbar()
# pl.over(contour, g, 50, colors='w', linewidths=1,
# extent=[-R,R,-R,R], origin='upper', extend='both')
#pl.grid()
pl.xlabel(xlabel)
pl.ylabel(ylabel)
@command
def deflect_plot(env, model, obj_index, which, src_index):
obj, data = model['obj,data'][obj_index]
S = obj.basis.subdivision
R = obj.basis.mapextent
g = obj.basis.deflect_grid(data, which, src_index)
#vmin = np.log10(amin(g[g>0]))
g = g.copy() + 1e-10
kw = default_kw(R, vmin=None, vmax=None)
pl.matshow(g, **kw)
#matplotlib.rcParams['contour.negative_linestyle'] = 'solid'
@command
def grad_tau(env, model, obj_index, which, src_index):
assert which in ['x','y'], "grad_tau: 'which' must be one of 'x' or 'y'"
#print "grad_tau"
obj,ps = model['obj,data'][obj_index]
R = obj.basis.mapextent
#---------------------------------------------------------------------------
# Find the derivative of the arrival time surface.
#---------------------------------------------------------------------------
arrival = obj.basis.arrival_grid(ps)[src_index]
w = central_diff_weights(3)
which = 1 if which == 'x' else 0
d = correlate1d(arrival, w, axis=which, mode='constant')
d = d[1:-1,1:-1]
d[np.abs(d) < 1e-3] = 0
d[d>0] = 1
d[d<0] = -1
pl.matshow(d, fignum=False, extent=[-R,R,-R,R], alpha=0.5)
@command
def deriv(env, model, obj_index, src_index, m, axis, R):
w = central_diff_weights(5)
#d = correlate1d(m, w, axis=axis, mode='constant')
d = (correlate1d(m, -w, axis=0, mode='constant')) \
+ (correlate1d(m, w, axis=1, mode='constant'))
d = (correlate1d(d, -w, axis=0, mode='constant')) \
+ (correlate1d(d, w, axis=1, mode='constant'))
d = d[2:-2,2:-2]
d[d>.8] = .8
d[d<-.8] = -.8
#d = correlate1d(d, w, axis=axis, mode='constant')
#d = diff(d, axis=axis)
#d /= abs(d)
#d = correlate1d(d, w, axis=axis, mode='constant')
#d = diff(d, axis=axis)
R -= model[0].basis.top_level_cell_size * 2
#R -= model[0].basis.top_level_cell_size * 2
pl.matshow(d, extent=[-R,R,-R,R])
glspl.colorbar()
arrival_plot(model, obj_index, src_index, only_contours=True, clevels=200)
#img_plot(model, src_index=src_index)
#pl.matshow(d)
# d = d[1:-1,1:-1]
# d[d>0] = 1
# d[d<0] = -1
# pl.matshow(d, extent=[-R,R,-R,R])
# img_plot(model, src_index=src_index)
@command
def inout_plot(env, model, obj_index, src_index):
print "inout"
obj,ps = model['obj,data'][obj_index]
R = obj.basis.mapextent
arrival = obj.basis.arrival_grid(ps)[src_index]
deriv(model, obj_index, src_index, arrival, 0, R)
deriv(model, obj_index, src_index, arrival, 1, R)
def _find_key(xs, key):
if hasattr(key, '__iter__'):
for k in key[:-1]:
xs = xs[k]
key = key[-1]
return xs[key]
def _data_plot(models, X,Y, **kwargs):
with_legend = False
use = [0,0,0]
if isinstance(X, basestring): X = [X,None]
if isinstance(Y, basestring): Y = [Y,None]
x_prop, x_units = X
y_prop, y_units = Y
ret_list = []
every = kwargs.pop('every', 1)
upto = kwargs.pop('upto', len(models))
mark_images = kwargs.pop('mark_images', True)
hilite_model = kwargs.pop('hilite_model', None)
hilite_color = kwargs.pop('hilite_color', 'm')
yscale = kwargs.pop('yscale', 'log')
xscale = kwargs.pop('xscale', 'linear')
xlabel = kwargs.pop('xlabel', None)
ylabel = kwargs.pop('ylabel', None)
kwargs.setdefault('color', 'k')
kwargs.setdefault('marker', '.')
kwargs.setdefault('ls', '-')
normal_kw = {'zorder':0, 'drawstyle':'steps', 'alpha':1.0}
hilite_kw = {'zorder':1000, 'drawstyle':'steps', 'alpha':1.0, 'lw':4, 'ls':'--'}
accepted_kw = {'zorder':500, 'drawstyle':'steps', 'alpha':0.5}
normal = []
hilite = []
accepted = []
#imgs = set()
imgs = defaultdict(set)
xmin, xmax = np.inf, -np.inf
ymin, ymax = np.inf, -np.inf
objplot = defaultdict(dict)
for mi in xrange(0,upto,every):
m = models[mi]
si = m.get('accepted', 2)
tag = ''
if si==False: tag = 'rejected'
if si==True: tag = 'accepted'
for [obj, data] in m['obj,data']:
try:
xs = data[x_prop][x_units]
ys = data[y_prop][y_units]
xlabel = _axis_label(xs, x_units) if not xlabel else None
ylabel = _axis_label(ys, y_units) if not ylabel else None
objplot[obj].setdefault(tag, {'ys':[], 'xs':None})
objplot[obj][tag]['ys'].append(ys)
objplot[obj][tag]['xs'] = xs
#objplot[obj].setdefault('%s:xs'%tag, xs)
#objplot[obj].setdefault('%s:ymax'%tag, ys)
#objplot[obj].setdefault('%s:ymin'%tag, ys)
#objplot[obj].setdefault('%s:ysum'%tag, np.zeros_like(ys))
#objplot[obj].setdefault('%s:count'%tag, 0)
#objplot[obj]['%s:ymax'%tag] = np.amax((objplot[obj]['%s:ymax'%tag], ys), axis=0)
#objplot[obj]['%s:ymin'%tag] = np.amin((objplot[obj]['%s:ymin'%tag], ys), axis=0)
#objplot[obj]['%s:ysum'%tag] += ys
#objplot[obj]['%s:count'%tag] += 1
if mark_images:
for i,src in enumerate(obj.sources):
for img in src.images:
imgs[i].add(convert('arcsec to %s' % x_units, np.abs(img.pos), obj.dL, data['nu']))
except KeyError as bad_key:
Log( "Missing information for object %s with key %s. Skipping plot." % (obj.name,bad_key) )
continue
use[si] = 1
s = _styles[si]
#xmin, xmax = min(xmin, amin(data[X])), max(xmax, amax(data[X]))
#ymin, ymax = min(ymin, amin(data[Y])), max(ymax, amax(data[Y]))
for i,tag in enumerate(['rejected', 'accepted', '']):
for k,v in objplot.iteritems():
if tag not in v: break
ys = np.array(v[tag]['ys'])
xs = np.repeat(np.atleast_2d(v[tag]['xs']), len(ys), axis=0)
ret_list.append([xs,ys])
if tag == 'rejected':
pl.plot(xs, ys, c=_styles[0]['c'], zorder=_styles[0]['z'])
else:
pl.plot(xs.T, ys.T, **kwargs)
# return
pl.yscale(yscale)
pl.xscale(xscale)
si = style_iterator()
for k,v in imgs.iteritems():
lw,ls,c = si.next()
for img_pos in v:
pl.axvline(img_pos, c=c, ls=ls, lw=lw, zorder=-2, alpha=0.5)
# if use[0] or use[1]:
# lines = [s['line'] for s,u in zip(_styles, use) if u]
# labels = [s['label'] for s,u in zip(_styles, use) if u]
# pl.legend(lines, labels)
if use[0]:
lines = [ _styles[0]['line'] ]
labels = [ _styles[0]['label'] ]
pl.legend(lines, labels)
#axis('scaled')
if xlabel: pl.xlabel(xlabel)
if ylabel: pl.ylabel(ylabel)
pl.xlim(xmin=pl.xlim()[0] - 0.01*(pl.xlim()[1] - pl.xlim()[0]))
#pl.ylim(0, ymax)
return ret_list
def _axis_label(data, units):
label = '%s' % data.symbol
if units is not None: label += ' (%s)' % data.label(units)
return label
def _data_error_plot(models, X,Y, **kwargs):
with_legend = False
use = [0,0,0]
if isinstance(X, basestring): X = [X,None]
if isinstance(Y, basestring): Y = [Y,None]
x_prop, x_units = X
y_prop, y_units = Y
ret_list = []
every = kwargs.pop('every', 1)
upto = kwargs.pop('upto', len(models))
mark_images = kwargs.pop('mark_images', True)
hilite_model = kwargs.pop('hilite_model', None)
hilite_color = kwargs.pop('hilite_color', 'm')
yscale = kwargs.pop('yscale', 'log')
xscale = kwargs.pop('xscale', 'linear')
xlabel = kwargs.pop('xlabel', None)
ylabel = kwargs.pop('ylabel', None)
sigma = kwargs.pop('sigma', '1sigma')
kwargs.setdefault('color', 'k')
kwargs.setdefault('marker', '.')
kwargs.setdefault('ls', '-')
normal_kw = {'zorder':0, 'drawstyle':'steps', 'alpha':1.0}
hilite_kw = {'zorder':1000, 'drawstyle':'steps', 'alpha':1.0, 'lw':4, 'ls':'--'}
accepted_kw = {'zorder':500, 'drawstyle':'steps', 'alpha':0.5}
normal = []
hilite = []
accepted = []
#imgs = set()
imgs = defaultdict(set)
xmin, xmax = np.inf, -np.inf
ymin, ymax = np.inf, -np.inf
objplot = defaultdict(dict)
for mi in xrange(0,upto,every):
m = models[mi]
si = m.get('accepted', 2)
#print si
tag = ''
if si==False: tag = 'rejected'
if si==True: tag = 'accepted'
for [obj, data] in m['obj,data']:
try:
xs = data[x_prop][x_units]
ys = data[y_prop][y_units]
xlabel = _axis_label(xs, x_units) if not xlabel else xlabel
ylabel = _axis_label(ys, y_units) if not ylabel else ylabel
objplot[obj].setdefault(tag, {'ys':[], 'xs':None})
objplot[obj][tag]['ys'].append(ys)
objplot[obj][tag]['xs'] = xs
#objplot[obj].setdefault('%s:xs'%tag, xs)
#objplot[obj].setdefault('%s:ymax'%tag, ys)
#objplot[obj].setdefault('%s:ymin'%tag, ys)
#objplot[obj].setdefault('%s:ysum'%tag, np.zeros_like(ys))
#objplot[obj].setdefault('%s:count'%tag, 0)
#objplot[obj]['%s:ymax'%tag] = np.amax((objplot[obj]['%s:ymax'%tag], ys), axis=0)
#objplot[obj]['%s:ymin'%tag] = np.amin((objplot[obj]['%s:ymin'%tag], ys), axis=0)
#objplot[obj]['%s:ysum'%tag] += ys
#objplot[obj]['%s:count'%tag] += 1
if mark_images:
for i,src in enumerate(obj.sources):
for img in src.images:
imgs[i].add(convert('arcsec to %s' % x_units, np.abs(img.pos), obj.dL, data['nu']))
except KeyError as bad_key:
Log( "Missing information for object %s with key %s. Skipping plot." % (obj.name,bad_key) )
continue
use[si] = 1
s = _styles[si]
#xmin, xmax = min(xmin, amin(data[X])), max(xmax, amax(data[X]))
#ymin, ymax = min(ymin, amin(data[Y])), max(ymax, amax(data[Y]))
for i,tag in enumerate(['rejected', 'accepted', '']):
for k,v in objplot.iteritems():
if tag not in v: break
#if not v.has_key('%s:count'%tag): break
avg, errp, errm = dist_range(v[tag]['ys'], sigma=sigma)
errp = errp - avg
errm = avg - errm
#s = np.sort(v[tag]['ys'], axis=0)
#avg = s[len(s)//2] if len(s)%2==1 else (s[len(s)//2] + s[len(s)//2+1])/2
#print s
#avg = np.median(v[tag]['ys'], axis=0)
#print avg
#print np.median(v[tag]['ys'], axis=1)
#errp = s[len(s) * .841] - avg
#errm = avg - s[len(s) * .159]
#errp = np.amax(v[tag]['ys'], axis=0) - avg
#errm = avg - np.amin(v[tag]['ys'], axis=0)
#errp = errm = np.std(v[tag]['ys'], axis=0, dtype=np.float64)
xs = v[tag]['xs']
# print [x[1] for x in v[tag]['ys']]
# pl.hist([x[1] for x in v[tag]['ys']])
# break
#avg = v['%s:ysum'%tag] / v['%s:count'%tag]
#errp = v['%s:ymax'%tag]-avg
#errm = avg-v['%s:ymin'%tag]
#errm = errp = np.std(
#print len(v['xs'])
#print len(avg)
#assert 0
#print len(xs)
#print len(avg)
ret_list.append([xs,avg,errm,errp])
yerr = (errm,errp) if not np.all(errm == errp) else None
if tag == 'rejected':
pl.errorbar(xs, avg, yerr=yerr, c=_styles[0]['c'], zorder=_styles[0]['z'])
else:
pl.errorbar(xs, avg, yerr=yerr, **kwargs)
# return
pl.xscale(xscale)
pl.yscale(yscale)
si = style_iterator()
for k,v in imgs.iteritems():
lw,ls,c = si.next()
for img_pos in v:
pl.axvline(img_pos, c=c, ls=ls, lw=lw, zorder=-2, alpha=0.5)
# if use[0] or use[1]:
# lines = [s['line'] for s,u in zip(_styles, use) if u]
# labels = [s['label'] for s,u in zip(_styles, use) if u]
# pl.legend(lines, labels)
if use[0]:
lines = [ _styles[0]['line'] ]
labels = [ _styles[0]['label'] ]
pl.legend(lines, labels)
#axis('scaled')
if xlabel: pl.xlabel(xlabel)
if ylabel: pl.ylabel(ylabel)
pl.xlim(xmin=pl.xlim()[0] - 0.01*(pl.xlim()[1] - pl.xlim()[0]))
#pl.ylim(0, ymax)
return ret_list
@command
def glplot(env, ptype, xkeys, ykeys=[], **kwargs):
if not ykeys: ykeys = ptype
models = kwargs.pop('models', env.models)
_data_plot(models, xkeys, ykeys, **kwargs)
@command
def glerrorplot(env, ptype, xkeys, ykeys=[], **kwargs):
if not ykeys: ykeys = ptype
models = kwargs.pop('models', env.models)
return _data_error_plot(models, xkeys, ykeys, **kwargs)
@command
def H0inv_plot(env, **kwargs):
_hist(env, '1/H0', xlabel=r'$H_0^{-1}$ (Gyr)', **kwargs)
return
models = kwargs.pop('models', env.models)
obj_index = kwargs.pop('obj_index', 0)
key = kwargs.pop('key', 'accepted')
xlabel = kwargs.pop('xlabel', r'$H_0^{-1}$ (Gyr)')
ylabel = kwargs.pop('ylabel', r'Count')
# select a list to append to based on the 'accepted' property.
l = [[], [], []]
for m in models:
obj, data = m['obj,data'][0] # For H0inv we only have to look at one model because the others are the same
l[m.get(key,2)].append(data['1/H0'])
#l[2].append(data['kappa'][1])
#print amin(l[2]), amax(l[2])
not_accepted, accepted, notag = l
#print 'H0inv_plot',H0s
for d,s in zip(l, _styles):
if d:
#print len(d), d, np.ptp(d), np.sqrt(len(d))
#pl.hist(d, bins=20, histtype='step', edgecolor=s['c'], zorder=s['z'], label=s['label'])
pl.hist(d, bins=np.ptp(d)//1+1, histtype='step', edgecolor=s['c'], zorder=s['z'], label=s['label'], **kwargs)
#if not_accepted or accepted:
#pl.legend()
pl.axvline(13.7, c='k', ls=':', zorder = 2)
pl.xlabel(xlabel)
pl.ylabel(ylabel)
if accepted or not not_accepted:
if accepted:
h = np.array(accepted)
else:
h = np.array(accepted + notag)
hs = np.sort(h)
l = len(hs)
m = hs[l * 0.50]
u = hs[l * (0.50 + 0.341)]
l = hs[l * (0.50 - 0.341)]
#u = hs[l * 0.68]
#l = hs[l * 0.32]
pl.axvline(m, c='r', ls='-', zorder = 2)
pl.axvline(u, c='g', ls='-', zorder = 2)
pl.axvline(l, c='g', ls='-', zorder = 2)
Log( 'H0inv_plot: ', m, u, l )
Log( 'H0inv_plot: ', m, (u-m), (m-l) )
else:
Log( "H0inv_plot: No H0inv values accepted" )
_H0_xlabel = r'$H_0$ (km/s/Mpc)'
@command
def H0_plot(env, **kwargs):
_hist(env, 'H0', xlabel=r'$H_0$ (km/s/Mpc)', **kwargs)
return
models = kwargs.pop('models', env.models)
obj_index = kwargs.pop('obj_index', 0)
key = kwargs.pop('key', 'accepted')
# select a list to append to based on the 'accepted' property.
l = [[], [], []]
for m in models:
obj, data = m['obj,data'][obj_index] # For H0 we only have to look at one model because the others are the same
l[m.get(key,2)].append(data['H0'])
#print 'nu', data['nu']
#l[2].append(data['kappa'][1])
#print amin(l[2]), amax(l[2])
not_accepted, accepted, notag = l
#print 'H0_plot',H0s
for d,s in zip(l, _styles):
if d:
#print len(d), d
#pl.hist(d, bins=20, histtype='step', edgecolor=s['c'], zorder=s['z'], label=s['label'])
pl.hist(d, bins=np.ptp(d)//1+1, histtype='step', edgecolor=s['c'], zorder=s['z'], label=s['label'], **kwargs)
if not_accepted or accepted:
pl.legend()
#pl.axvline(72, c='k', ls=':', zorder = 2)
pl.xlabel(_H0_xlabel)
pl.ylabel(r'Count')
if accepted or not not_accepted:
if accepted:
h = np.array(accepted)
else:
h = np.array(accepted + notag)
hs = np.sort(h)
l = len(hs)
m = hs[l * 0.50]
u = hs[l * (0.50 + 0.341)]
l = hs[l * (0.50 - 0.341)]
pl.axvline(m, c='r', ls='-', zorder = 2)
pl.axvline(u, c='g', ls='-', zorder = 2)
pl.axvline(l, c='g', ls='-', zorder = 2)
Log( 'H0_plot: %f %f %f' % (m, u, l) )
Log( 'H0_plot: %f %f %f' % (m, (u-m), (m-l)) )
else:
Log( "H0_plot: No H0 values accepted" )
pl.xlim(xmin=0)
pl.xlim(xmax=pl.xlim()[1] + 0.01*(pl.xlim()[1] - pl.xlim()[0]))
pl.ylim(ymax=pl.ylim()[1] + 0.01*(pl.ylim()[1] - pl.ylim()[0]))
_time_delays_xlabel = r'Time delay (days)'
@command
def time_delays_plot(env, **kwargs):
models = kwargs.pop('models', env.models)
obj_index = kwargs.pop('obj_index', 0)
src_index = kwargs.pop('src_index', 0)
key = kwargs.pop('key', 'accepted')
d = defaultdict(list)
for m in models:
obj,data = m['obj,data'][obj_index]
t0 = data['arrival times'][src_index][0]
for i,t in enumerate(data['arrival times'][src_index][1:]):
d[i].append( float('%0.6f'%convert('arcsec^2 to days', t-t0, obj.dL, obj.z, data['nu'])) )
t0 = t
s = product(range(1,1+len(d)), ['solid', 'dashed', 'dashdot', 'dotted'])
for k,v in d.iteritems():
#print 'td plot', k, len(v)
#print v
lw,ls = s.next()
pl.hist(v, bins=25, histtype='step', color='k', ls=ls, lw=lw, label='%s - %s' % (str(k+1),str(k+2)), **kwargs)
#pl.xlim(xmin=0)
pl.ylim(ymin=0)
pl.xlim(xmin=pl.xlim()[0] - 0.01*(pl.xlim()[1] - pl.xlim()[0]))
pl.legend()
pl.xlabel(_time_delays_xlabel)
pl.ylabel(r'Count')
_scale_factor_xlabel = r'Scale factor'
@command
def scale_factor_plot(env, **kwargs):
_hist(env, 'sigp:scale-factor', xlabel=r'Scale factor')
return
models = kwargs.pop('models', env.models)
objects = kwargs.pop('objects', None)
key = kwargs.pop('key', 'accepted')
# select a list to append to based on the 'accepted' property.
l = [[], [], []]
for m in models:
# For H0 we only have to look at one model because the others are the same
obj, data = m['obj,data'][0]
l[m.get(key,2)].append(data['sigp:scale-factor'])
not_accepted, accepted, notag = l
for d,s in zip(l, _styles):
if d:
pl.hist(d, bins=np.ptp(d)//1+1, histtype='step', edgecolor=s['c'], zorder=s['z'], label=s['label'], log=False, **kwargs)
if not_accepted or accepted:
pl.legend()
pl.xlabel(_scale_factor_xlabel)
pl.ylabel(r'Count')
_chisq_xlabel = r'$\chi^2$'
@command
def chisq_plot(env, **kwargs):
_hist(env, 'sigp:chisq', xlabel=r'$\chi^2$')
return
models = kwargs.pop('models', env.models)
objects = kwargs.pop('objects', None)
key = kwargs.pop('key', 'accepted')
# select a list to append to based on the 'accepted' property.
l = [[], [], []]
for m in models:
# For H0 we only have to look at one model because the others are the same
obj, data = m['obj,data'][0]
l[m.get(key,2)].append(data['sigp:chisq'])
not_accepted, accepted, notag = l
for d,s in zip(l, _styles):
if d:
pl.hist(d, histtype='step', edgecolor=s['c'], zorder=s['z'], label=s['label'], log=False, **kwargs)
if not_accepted or accepted:
pl.legend()
pl.xlabel(_chisq_xlabel)
pl.ylabel(r'Count')
@command
def shear_plot(env, **kwargs):
models = kwargs.pop('models', env.models)
obj_index = kwargs.pop('obj_index', None)
src_index = kwargs.pop('src_index', None)
key = kwargs.pop('key', 'accepted')
obj_slice = index_to_slice(obj_index)
src_slice = index_to_slice(src_index)
s0 = [ [] for o in env.objects ]
s1 = [ [] for o in env.objects ]
for mi,m in enumerate(models):
# For H0 we only have to look at one model because the others are the same
for oi, [obj,data] in enumerate(m['obj,data'][obj_slice]):
if not data.has_key('shear'): continue
#s0[oi].append(90-np.degrees(np.arctan2(*data['shear'])))
s0[oi].append(data['shear'][0])
s1[oi].append(data['shear'][1])
#s0,s1 = data['shear']
#Log( 'Model %i Object %i Shear %.4f %.4f' % (mi, oi, s0,s1) )
if s0[0]: pl.hist(s0[0], histtype='step', **kwargs)
if s1[0]: pl.hist(s1[0], histtype='step', **kwargs)
@command
def shear_plot2d(env, **kwargs):
models = kwargs.pop('models', env.models)
obj_index = kwargs.pop('obj_index', None)
src_index = kwargs.pop('src_index', None)
key = kwargs.pop('key', 'accepted')
obj_slice = index_to_slice(obj_index)
src_slice = index_to_slice(src_index)
s0 = [ [] for o in env.objects ]
s1 = [ [] for o in env.objects ]
for mi,m in enumerate(models):
# For H0 we only have to look at one model because the others are the same
for oi, [obj,data] in enumerate(m['obj,data'][obj_slice]):
if not data.has_key('shear'): continue
#s0[oi].append(90-np.degrees(np.arctan2(*data['shear'])))
s0[oi].append(data['shear'][0])
s1[oi].append(data['shear'][1])
#s0,s1 = data['shear']
#Log( 'Model %i Object %i Shear %.4f %.4f' % (mi, oi, s0,s1) )
kw = kwargs.copy()
kw.setdefault('bins', max(11, min(15,(int(np.sqrt(max(len(s0[0]),len(s1[0])))//2)) * 2 + 1)))
if s0[0] and s1[0]:
pl.hist2d(s0[0],s1[0], **kw)
pl.title(r'Shear')
pl.xlabel(r'$\varepsilon_1$')
pl.ylabel(r'$\varepsilon_2$')
_chi2_xlabel = r'$\ln \chi^2$'
@command
def chi2_plot(env, models, model0, **kwargs):
v = []
n_chi2 = 0
d_chi2 = 0
for m in models:
total_chi2 = 0
for m1,m2 in izip(m['obj,data'], model0['obj,data']):
obj,data = m1
obj0,data0 = m2
mass0 = data0['kappa'] * convert('kappa to Msun/arcsec^2', 1, obj0.dL, data0['nu'])
mass1 = data['kappa'] * convert('kappa to Msun/arcsec^2', 1, obj.dL, data['nu'])
n_chi2 += np.sum((mass1 - mass0)**2)
d_chi2 += np.sum(mass0**2)
v.append(np.log(n_chi2/d_chi2))
pl.hist(v, histtype='step', log=False, **kwargs)
pl.xlabel(_chi2_xlabel)
pl.ylabel(r'Count')
@command
def glhist(env, data_key, **kwargs):
_hist(env, data_key, **kwargs)
def _hist(env, data_key, **kwargs):
models = kwargs.pop('models', env.models)
obj_index = kwargs.pop('obj_index', 0)
key = kwargs.pop('key', 'accepted')
label = kwargs.pop('label', None)
color = kwargs.pop('color', None)
xlabel = kwargs.pop('xlabel', data_key)
ylabel = kwargs.pop('ylabel', r'Count')
sigma = kwargs.pop('sigma', '1sigma')
mark_sigma = kwargs.pop('mark_sigma', True)
# select a list to append to based on the 'accepted' property.
l = [[], [], []]
for m in models:
obj, data = m['obj,data'][obj_index] # For H0 we only have to look at one model because the others are the same
if data.has_key(data_key):
l[m.get(key,2)].append(data[data_key])
#print 'nu', data['nu']
#l[2].append(data['kappa'][1])
#print amin(l[2]), amax(l[2])
not_accepted, accepted, notag = l
#print 'H0_plot',H0s
for d,s in zip(l, _styles):
kw = kwargs.copy()
if d:
#print len(d), d, np.ptp(d), np.sqrt(len(d))
kw.setdefault('bins', int(np.ptp(d)//1)+1)
kw.setdefault('histtype', 'step')
#print len(d), d
#pl.hist(d, bins=20, histtype='step', edgecolor=s['c'], zorder=s['z'], label=s['label'])
pl.hist(d,
edgecolor=s['c'] if color is None else color,
zorder=s['z'],
label=s['label'] if label is None else label,
**kw)
if not_accepted or label:
pl.legend()
if mark_sigma:
if accepted or notag:
if accepted:
h = np.array(accepted)
else:
h = np.array(notag)
m,u,l = dist_range(h, sigma=sigma)
pl.axvline(m, c='r', ls='-', zorder = 2)
pl.axvline(u, c='g', ls='-', zorder = 2)
pl.axvline(l, c='g', ls='-', zorder = 2)
Log( '%s: %f %f %f' % (data_key, m, u, l) )
Log( '%s: %f +/- %f %f' % (data_key, m, (u-m), (m-l)) )
else:
Log( "%s: No H0 values accepted" % data_key )
#pl.axvline(72, c='k', ls=':', zorder = 2)
pl.xlabel(xlabel)
pl.ylabel(ylabel)
pl.xlim(xmax=pl.xlim()[1] + 0.01*(pl.xlim()[1] - pl.xlim()[0]))
pl.ylim(ymax=pl.ylim()[1] + 0.01*(pl.ylim()[1] - pl.ylim()[0]))
|
jpcolesREPO_NAMEglassPATH_START.@glass_extracted@glass-master@glass@plots.py@.PATH_END.py
|
{
"filename": "test_indexing.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/numpy/core/tests/test_indexing.py",
"type": "Python"
}
|
from __future__ import division, absolute_import, print_function
import sys
import warnings
import functools
import operator
import numpy as np
from numpy.core.multiarray_tests import array_indexing
from itertools import product
from numpy.testing import (
run_module_suite, assert_, assert_equal, assert_raises,
assert_array_equal, assert_warns, dec, HAS_REFCOUNT, suppress_warnings,
)
try:
cdll = None
if hasattr(sys, 'gettotalrefcount'):
try:
cdll = np.ctypeslib.load_library('multiarray_d', np.core.multiarray.__file__)
except OSError:
pass
if cdll is None:
cdll = np.ctypeslib.load_library('multiarray', np.core.multiarray.__file__)
_HAS_CTYPE = True
except ImportError:
_HAS_CTYPE = False
class TestIndexing(object):
def test_index_no_floats(self):
a = np.array([[[5]]])
assert_raises(IndexError, lambda: a[0.0])
assert_raises(IndexError, lambda: a[0, 0.0])
assert_raises(IndexError, lambda: a[0.0, 0])
assert_raises(IndexError, lambda: a[0.0,:])
assert_raises(IndexError, lambda: a[:, 0.0])
assert_raises(IndexError, lambda: a[:, 0.0,:])
assert_raises(IndexError, lambda: a[0.0,:,:])
assert_raises(IndexError, lambda: a[0, 0, 0.0])
assert_raises(IndexError, lambda: a[0.0, 0, 0])
assert_raises(IndexError, lambda: a[0, 0.0, 0])
assert_raises(IndexError, lambda: a[-1.4])
assert_raises(IndexError, lambda: a[0, -1.4])
assert_raises(IndexError, lambda: a[-1.4, 0])
assert_raises(IndexError, lambda: a[-1.4,:])
assert_raises(IndexError, lambda: a[:, -1.4])
assert_raises(IndexError, lambda: a[:, -1.4,:])
assert_raises(IndexError, lambda: a[-1.4,:,:])
assert_raises(IndexError, lambda: a[0, 0, -1.4])
assert_raises(IndexError, lambda: a[-1.4, 0, 0])
assert_raises(IndexError, lambda: a[0, -1.4, 0])
assert_raises(IndexError, lambda: a[0.0:, 0.0])
assert_raises(IndexError, lambda: a[0.0:, 0.0,:])
def test_slicing_no_floats(self):
a = np.array([[5]])
# start as float.
assert_raises(TypeError, lambda: a[0.0:])
assert_raises(TypeError, lambda: a[0:, 0.0:2])
assert_raises(TypeError, lambda: a[0.0::2, :0])
assert_raises(TypeError, lambda: a[0.0:1:2,:])
assert_raises(TypeError, lambda: a[:, 0.0:])
# stop as float.
assert_raises(TypeError, lambda: a[:0.0])
assert_raises(TypeError, lambda: a[:0, 1:2.0])
assert_raises(TypeError, lambda: a[:0.0:2, :0])
assert_raises(TypeError, lambda: a[:0.0,:])
assert_raises(TypeError, lambda: a[:, 0:4.0:2])
# step as float.
assert_raises(TypeError, lambda: a[::1.0])
assert_raises(TypeError, lambda: a[0:, :2:2.0])
assert_raises(TypeError, lambda: a[1::4.0, :0])
assert_raises(TypeError, lambda: a[::5.0,:])
assert_raises(TypeError, lambda: a[:, 0:4:2.0])
# mixed.
assert_raises(TypeError, lambda: a[1.0:2:2.0])
assert_raises(TypeError, lambda: a[1.0::2.0])
assert_raises(TypeError, lambda: a[0:, :2.0:2.0])
assert_raises(TypeError, lambda: a[1.0:1:4.0, :0])
assert_raises(TypeError, lambda: a[1.0:5.0:5.0,:])
assert_raises(TypeError, lambda: a[:, 0.4:4.0:2.0])
# should still get the DeprecationWarning if step = 0.
assert_raises(TypeError, lambda: a[::0.0])
def test_index_no_array_to_index(self):
# No non-scalar arrays.
a = np.array([[[1]]])
assert_raises(TypeError, lambda: a[a:a:a])
def test_none_index(self):
# `None` index adds newaxis
a = np.array([1, 2, 3])
assert_equal(a[None], a[np.newaxis])
assert_equal(a[None].ndim, a.ndim + 1)
def test_empty_tuple_index(self):
# Empty tuple index creates a view
a = np.array([1, 2, 3])
assert_equal(a[()], a)
assert_(a[()].base is a)
a = np.array(0)
assert_(isinstance(a[()], np.int_))
def test_void_scalar_empty_tuple(self):
s = np.zeros((), dtype='V4')
assert_equal(s[()].dtype, s.dtype)
assert_equal(s[()], s)
assert_equal(type(s[...]), np.ndarray)
def test_same_kind_index_casting(self):
# Indexes should be cast with same-kind and not safe, even if that
# is somewhat unsafe. So test various different code paths.
index = np.arange(5)
u_index = index.astype(np.uintp)
arr = np.arange(10)
assert_array_equal(arr[index], arr[u_index])
arr[u_index] = np.arange(5)
assert_array_equal(arr, np.arange(10))
arr = np.arange(10).reshape(5, 2)
assert_array_equal(arr[index], arr[u_index])
arr[u_index] = np.arange(5)[:,None]
assert_array_equal(arr, np.arange(5)[:,None].repeat(2, axis=1))
arr = np.arange(25).reshape(5, 5)
assert_array_equal(arr[u_index, u_index], arr[index, index])
def test_empty_fancy_index(self):
# Empty list index creates an empty array
# with the same dtype (but with weird shape)
a = np.array([1, 2, 3])
assert_equal(a[[]], [])
assert_equal(a[[]].dtype, a.dtype)
b = np.array([], dtype=np.intp)
assert_equal(a[[]], [])
assert_equal(a[[]].dtype, a.dtype)
b = np.array([])
assert_raises(IndexError, a.__getitem__, b)
def test_ellipsis_index(self):
a = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
assert_(a[...] is not a)
assert_equal(a[...], a)
# `a[...]` was `a` in numpy <1.9.
assert_(a[...].base is a)
# Slicing with ellipsis can skip an
# arbitrary number of dimensions
assert_equal(a[0, ...], a[0])
assert_equal(a[0, ...], a[0,:])
assert_equal(a[..., 0], a[:, 0])
# Slicing with ellipsis always results
# in an array, not a scalar
assert_equal(a[0, ..., 1], np.array(2))
# Assignment with `(Ellipsis,)` on 0-d arrays
b = np.array(1)
b[(Ellipsis,)] = 2
assert_equal(b, 2)
def test_single_int_index(self):
# Single integer index selects one row
a = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
assert_equal(a[0], [1, 2, 3])
assert_equal(a[-1], [7, 8, 9])
# Index out of bounds produces IndexError
assert_raises(IndexError, a.__getitem__, 1 << 30)
# Index overflow produces IndexError
assert_raises(IndexError, a.__getitem__, 1 << 64)
def test_single_bool_index(self):
# Single boolean index
a = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
assert_equal(a[np.array(True)], a[None])
assert_equal(a[np.array(False)], a[None][0:0])
def test_boolean_shape_mismatch(self):
arr = np.ones((5, 4, 3))
index = np.array([True])
assert_raises(IndexError, arr.__getitem__, index)
index = np.array([False] * 6)
assert_raises(IndexError, arr.__getitem__, index)
index = np.zeros((4, 4), dtype=bool)
assert_raises(IndexError, arr.__getitem__, index)
assert_raises(IndexError, arr.__getitem__, (slice(None), index))
def test_boolean_indexing_onedim(self):
# Indexing a 2-dimensional array with
# boolean array of length one
a = np.array([[ 0., 0., 0.]])
b = np.array([ True], dtype=bool)
assert_equal(a[b], a)
# boolean assignment
a[b] = 1.
assert_equal(a, [[1., 1., 1.]])
def test_boolean_assignment_value_mismatch(self):
# A boolean assignment should fail when the shape of the values
# cannot be broadcast to the subscription. (see also gh-3458)
a = np.arange(4)
def f(a, v):
a[a > -1] = v
assert_raises(ValueError, f, a, [])
assert_raises(ValueError, f, a, [1, 2, 3])
assert_raises(ValueError, f, a[:1], [1, 2, 3])
def test_boolean_assignment_needs_api(self):
# See also gh-7666
# This caused a segfault on Python 2 due to the GIL not being
# held when the iterator does not need it, but the transfer function
# does
arr = np.zeros(1000)
indx = np.zeros(1000, dtype=bool)
indx[:100] = True
arr[indx] = np.ones(100, dtype=object)
expected = np.zeros(1000)
expected[:100] = 1
assert_array_equal(arr, expected)
def test_boolean_indexing_twodim(self):
# Indexing a 2-dimensional array with
# 2-dimensional boolean array
a = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
b = np.array([[ True, False, True],
[False, True, False],
[ True, False, True]])
assert_equal(a[b], [1, 3, 5, 7, 9])
assert_equal(a[b[1]], [[4, 5, 6]])
assert_equal(a[b[0]], a[b[2]])
# boolean assignment
a[b] = 0
assert_equal(a, [[0, 2, 0],
[4, 0, 6],
[0, 8, 0]])
def test_reverse_strides_and_subspace_bufferinit(self):
# This tests that the strides are not reversed for simple and
# subspace fancy indexing.
a = np.ones(5)
b = np.zeros(5, dtype=np.intp)[::-1]
c = np.arange(5)[::-1]
a[b] = c
# If the strides are not reversed, the 0 in the arange comes last.
assert_equal(a[0], 0)
# This also tests that the subspace buffer is initialized:
a = np.ones((5, 2))
c = np.arange(10).reshape(5, 2)[::-1]
a[b, :] = c
assert_equal(a[0], [0, 1])
def test_reversed_strides_result_allocation(self):
# Test a bug when calculating the output strides for a result array
# when the subspace size was 1 (and test other cases as well)
a = np.arange(10)[:, None]
i = np.arange(10)[::-1]
assert_array_equal(a[i], a[i.copy('C')])
a = np.arange(20).reshape(-1, 2)
def test_uncontiguous_subspace_assignment(self):
# During development there was a bug activating a skip logic
# based on ndim instead of size.
a = np.full((3, 4, 2), -1)
b = np.full((3, 4, 2), -1)
a[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T
b[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T.copy()
assert_equal(a, b)
def test_too_many_fancy_indices_special_case(self):
# Just documents behaviour, this is a small limitation.
a = np.ones((1,) * 32) # 32 is NPY_MAXDIMS
assert_raises(IndexError, a.__getitem__, (np.array([0]),) * 32)
def test_scalar_array_bool(self):
# NumPy bools can be used as boolean index (python ones as of yet not)
a = np.array(1)
assert_equal(a[np.bool_(True)], a[np.array(True)])
assert_equal(a[np.bool_(False)], a[np.array(False)])
# After deprecating bools as integers:
#a = np.array([0,1,2])
#assert_equal(a[True, :], a[None, :])
#assert_equal(a[:, True], a[:, None])
#
#assert_(not np.may_share_memory(a, a[True, :]))
def test_everything_returns_views(self):
# Before `...` would return a itself.
a = np.arange(5)
assert_(a is not a[()])
assert_(a is not a[...])
assert_(a is not a[:])
def test_broaderrors_indexing(self):
a = np.zeros((5, 5))
assert_raises(IndexError, a.__getitem__, ([0, 1], [0, 1, 2]))
assert_raises(IndexError, a.__setitem__, ([0, 1], [0, 1, 2]), 0)
def test_trivial_fancy_out_of_bounds(self):
a = np.zeros(5)
ind = np.ones(20, dtype=np.intp)
ind[-1] = 10
assert_raises(IndexError, a.__getitem__, ind)
assert_raises(IndexError, a.__setitem__, ind, 0)
ind = np.ones(20, dtype=np.intp)
ind[0] = 11
assert_raises(IndexError, a.__getitem__, ind)
assert_raises(IndexError, a.__setitem__, ind, 0)
def test_nonbaseclass_values(self):
class SubClass(np.ndarray):
def __array_finalize__(self, old):
# Have array finalize do funny things
self.fill(99)
a = np.zeros((5, 5))
s = a.copy().view(type=SubClass)
s.fill(1)
a[[0, 1, 2, 3, 4], :] = s
assert_((a == 1).all())
# Subspace is last, so transposing might want to finalize
a[:, [0, 1, 2, 3, 4]] = s
assert_((a == 1).all())
a.fill(0)
a[...] = s
assert_((a == 1).all())
def test_subclass_writeable(self):
d = np.rec.array([('NGC1001', 11), ('NGC1002', 1.), ('NGC1003', 1.)],
dtype=[('target', 'S20'), ('V_mag', '>f4')])
ind = np.array([False, True, True], dtype=bool)
assert_(d[ind].flags.writeable)
ind = np.array([0, 1])
assert_(d[ind].flags.writeable)
assert_(d[...].flags.writeable)
assert_(d[0].flags.writeable)
def test_memory_order(self):
# This is not necessary to preserve. Memory layouts for
# more complex indices are not as simple.
a = np.arange(10)
b = np.arange(10).reshape(5,2).T
assert_(a[b].flags.f_contiguous)
# Takes a different implementation branch:
a = a.reshape(-1, 1)
assert_(a[b, 0].flags.f_contiguous)
def test_scalar_return_type(self):
# Full scalar indices should return scalars and object
# arrays should not call PyArray_Return on their items
class Zero(object):
# The most basic valid indexing
def __index__(self):
return 0
z = Zero()
class ArrayLike(object):
# Simple array, should behave like the array
def __array__(self):
return np.array(0)
a = np.zeros(())
assert_(isinstance(a[()], np.float_))
a = np.zeros(1)
assert_(isinstance(a[z], np.float_))
a = np.zeros((1, 1))
assert_(isinstance(a[z, np.array(0)], np.float_))
assert_(isinstance(a[z, ArrayLike()], np.float_))
# And object arrays do not call it too often:
b = np.array(0)
a = np.array(0, dtype=object)
a[()] = b
assert_(isinstance(a[()], np.ndarray))
a = np.array([b, None])
assert_(isinstance(a[z], np.ndarray))
a = np.array([[b, None]])
assert_(isinstance(a[z, np.array(0)], np.ndarray))
assert_(isinstance(a[z, ArrayLike()], np.ndarray))
def test_small_regressions(self):
# Reference count of intp for index checks
a = np.array([0])
if HAS_REFCOUNT:
refcount = sys.getrefcount(np.dtype(np.intp))
# item setting always checks indices in separate function:
a[np.array([0], dtype=np.intp)] = 1
a[np.array([0], dtype=np.uint8)] = 1
assert_raises(IndexError, a.__setitem__,
np.array([1], dtype=np.intp), 1)
assert_raises(IndexError, a.__setitem__,
np.array([1], dtype=np.uint8), 1)
if HAS_REFCOUNT:
assert_equal(sys.getrefcount(np.dtype(np.intp)), refcount)
def test_unaligned(self):
v = (np.zeros(64, dtype=np.int8) + ord('a'))[1:-7]
d = v.view(np.dtype("S8"))
# unaligned source
x = (np.zeros(16, dtype=np.int8) + ord('a'))[1:-7]
x = x.view(np.dtype("S8"))
x[...] = np.array("b" * 8, dtype="S")
b = np.arange(d.size)
#trivial
assert_equal(d[b], d)
d[b] = x
# nontrivial
# unaligned index array
b = np.zeros(d.size + 1).view(np.int8)[1:-(np.intp(0).itemsize - 1)]
b = b.view(np.intp)[:d.size]
b[...] = np.arange(d.size)
assert_equal(d[b.astype(np.int16)], d)
d[b.astype(np.int16)] = x
# boolean
d[b % 2 == 0]
d[b % 2 == 0] = x[::2]
def test_tuple_subclass(self):
arr = np.ones((5, 5))
# A tuple subclass should also be an nd-index
class TupleSubclass(tuple):
pass
index = ([1], [1])
index = TupleSubclass(index)
assert_(arr[index].shape == (1,))
# Unlike the non nd-index:
assert_(arr[index,].shape != (1,))
def test_broken_sequence_not_nd_index(self):
# See gh-5063:
# If we have an object which claims to be a sequence, but fails
# on item getting, this should not be converted to an nd-index (tuple)
# If this object happens to be a valid index otherwise, it should work
# This object here is very dubious and probably bad though:
class SequenceLike(object):
def __index__(self):
return 0
def __len__(self):
return 1
def __getitem__(self, item):
raise IndexError('Not possible')
arr = np.arange(10)
assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),])
# also test that field indexing does not segfault
# for a similar reason, by indexing a structured array
arr = np.zeros((1,), dtype=[('f1', 'i8'), ('f2', 'i8')])
assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),])
def test_indexing_array_weird_strides(self):
# See also gh-6221
# the shapes used here come from the issue and create the correct
# size for the iterator buffering size.
x = np.ones(10)
x2 = np.ones((10, 2))
ind = np.arange(10)[:, None, None, None]
ind = np.broadcast_to(ind, (10, 55, 4, 4))
# single advanced index case
assert_array_equal(x[ind], x[ind.copy()])
# higher dimensional advanced index
zind = np.zeros(4, dtype=np.intp)
assert_array_equal(x2[ind, zind], x2[ind.copy(), zind])
def test_indexing_array_negative_strides(self):
# From gh-8264,
# core dumps if negative strides are used in iteration
arro = np.zeros((4, 4))
arr = arro[::-1, ::-1]
slices = [slice(None), [0, 1, 2, 3]]
arr[slices] = 10
assert_array_equal(arr, 10.)
class TestFieldIndexing(object):
def test_scalar_return_type(self):
# Field access on an array should return an array, even if it
# is 0-d.
a = np.zeros((), [('a','f8')])
assert_(isinstance(a['a'], np.ndarray))
assert_(isinstance(a[['a']], np.ndarray))
class TestBroadcastedAssignments(object):
def assign(self, a, ind, val):
a[ind] = val
return a
def test_prepending_ones(self):
a = np.zeros((3, 2))
a[...] = np.ones((1, 3, 2))
# Fancy with subspace with and without transpose
a[[0, 1, 2], :] = np.ones((1, 3, 2))
a[:, [0, 1]] = np.ones((1, 3, 2))
# Fancy without subspace (with broadcasting)
a[[[0], [1], [2]], [0, 1]] = np.ones((1, 3, 2))
def test_prepend_not_one(self):
assign = self.assign
s_ = np.s_
a = np.zeros(5)
# Too large and not only ones.
assert_raises(ValueError, assign, a, s_[...], np.ones((2, 1)))
assert_raises(ValueError, assign, a, s_[[1, 2, 3],], np.ones((2, 1)))
assert_raises(ValueError, assign, a, s_[[[1], [2]],], np.ones((2,2,1)))
def test_simple_broadcasting_errors(self):
assign = self.assign
s_ = np.s_
a = np.zeros((5, 1))
assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 2)))
assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 0)))
assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 2)))
assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 0)))
assert_raises(ValueError, assign, a, s_[[0], :], np.zeros((2, 1)))
def test_index_is_larger(self):
# Simple case of fancy index broadcasting of the index.
a = np.zeros((5, 5))
a[[[0], [1], [2]], [0, 1, 2]] = [2, 3, 4]
assert_((a[:3, :3] == [2, 3, 4]).all())
def test_broadcast_subspace(self):
a = np.zeros((100, 100))
v = np.arange(100)[:,None]
b = np.arange(100)[::-1]
a[b] = v
assert_((a[::-1] == v).all())
class TestSubclasses(object):
def test_basic(self):
class SubClass(np.ndarray):
pass
s = np.arange(5).view(SubClass)
assert_(isinstance(s[:3], SubClass))
assert_(s[:3].base is s)
assert_(isinstance(s[[0, 1, 2]], SubClass))
assert_(isinstance(s[s > 0], SubClass))
def test_matrix_fancy(self):
# The matrix class messes with the shape. While this is always
# weird (getitem is not used, it does not have setitem nor knows
# about fancy indexing), this tests gh-3110
m = np.matrix([[1, 2], [3, 4]])
assert_(isinstance(m[[0,1,0], :], np.matrix))
# gh-3110. Note the transpose currently because matrices do *not*
# support dimension fixing for fancy indexing correctly.
x = np.asmatrix(np.arange(50).reshape(5,10))
assert_equal(x[:2, np.array(-1)], x[:2, -1].T)
def test_finalize_gets_full_info(self):
# Array finalize should be called on the filled array.
class SubClass(np.ndarray):
def __array_finalize__(self, old):
self.finalize_status = np.array(self)
self.old = old
s = np.arange(10).view(SubClass)
new_s = s[:3]
assert_array_equal(new_s.finalize_status, new_s)
assert_array_equal(new_s.old, s)
new_s = s[[0,1,2,3]]
assert_array_equal(new_s.finalize_status, new_s)
assert_array_equal(new_s.old, s)
new_s = s[s > 0]
assert_array_equal(new_s.finalize_status, new_s)
assert_array_equal(new_s.old, s)
@dec.skipif(not HAS_REFCOUNT)
def test_slice_decref_getsetslice(self):
# See gh-10066, a temporary slice object should be discarted.
# This test is only really interesting on Python 2 since
# it goes through `__set/getslice__` here and can probably be
# removed. Use 0:7 to make sure it is never None:7.
class KeepIndexObject(np.ndarray):
def __getitem__(self, indx):
self.indx = indx
if indx == slice(0, 7):
raise ValueError
def __setitem__(self, indx, val):
self.indx = indx
if indx == slice(0, 4):
raise ValueError
k = np.array([1]).view(KeepIndexObject)
k[0:5]
assert_equal(k.indx, slice(0, 5))
assert_equal(sys.getrefcount(k.indx), 2)
try:
k[0:7]
raise AssertionError
except ValueError:
# The exception holds a reference to the slice so clear on Py2
if hasattr(sys, 'exc_clear'):
with suppress_warnings() as sup:
sup.filter(DeprecationWarning)
sys.exc_clear()
assert_equal(k.indx, slice(0, 7))
assert_equal(sys.getrefcount(k.indx), 2)
k[0:3] = 6
assert_equal(k.indx, slice(0, 3))
assert_equal(sys.getrefcount(k.indx), 2)
try:
k[0:4] = 2
raise AssertionError
except ValueError:
# The exception holds a reference to the slice so clear on Py2
if hasattr(sys, 'exc_clear'):
with suppress_warnings() as sup:
sup.filter(DeprecationWarning)
sys.exc_clear()
assert_equal(k.indx, slice(0, 4))
assert_equal(sys.getrefcount(k.indx), 2)
class TestFancyIndexingCast(object):
def test_boolean_index_cast_assign(self):
# Setup the boolean index and float arrays.
shape = (8, 63)
bool_index = np.zeros(shape).astype(bool)
bool_index[0, 1] = True
zero_array = np.zeros(shape)
# Assigning float is fine.
zero_array[bool_index] = np.array([1])
assert_equal(zero_array[0, 1], 1)
# Fancy indexing works, although we get a cast warning.
assert_warns(np.ComplexWarning,
zero_array.__setitem__, ([0], [1]), np.array([2 + 1j]))
assert_equal(zero_array[0, 1], 2) # No complex part
# Cast complex to float, throwing away the imaginary portion.
assert_warns(np.ComplexWarning,
zero_array.__setitem__, bool_index, np.array([1j]))
assert_equal(zero_array[0, 1], 0)
class TestFancyIndexingEquivalence(object):
def test_object_assign(self):
# Check that the field and object special case using copyto is active.
# The right hand side cannot be converted to an array here.
a = np.arange(5, dtype=object)
b = a.copy()
a[:3] = [1, (1,2), 3]
b[[0, 1, 2]] = [1, (1,2), 3]
assert_array_equal(a, b)
# test same for subspace fancy indexing
b = np.arange(5, dtype=object)[None, :]
b[[0], :3] = [[1, (1,2), 3]]
assert_array_equal(a, b[0])
# Check that swapping of axes works.
# There was a bug that made the later assignment throw a ValueError
# do to an incorrectly transposed temporary right hand side (gh-5714)
b = b.T
b[:3, [0]] = [[1], [(1,2)], [3]]
assert_array_equal(a, b[:, 0])
# Another test for the memory order of the subspace
arr = np.ones((3, 4, 5), dtype=object)
# Equivalent slicing assignment for comparison
cmp_arr = arr.copy()
cmp_arr[:1, ...] = [[[1], [2], [3], [4]]]
arr[[0], ...] = [[[1], [2], [3], [4]]]
assert_array_equal(arr, cmp_arr)
arr = arr.copy('F')
arr[[0], ...] = [[[1], [2], [3], [4]]]
assert_array_equal(arr, cmp_arr)
def test_cast_equivalence(self):
# Yes, normal slicing uses unsafe casting.
a = np.arange(5)
b = a.copy()
a[:3] = np.array(['2', '-3', '-1'])
b[[0, 2, 1]] = np.array(['2', '-1', '-3'])
assert_array_equal(a, b)
# test the same for subspace fancy indexing
b = np.arange(5)[None, :]
b[[0], :3] = np.array([['2', '-3', '-1']])
assert_array_equal(a, b[0])
class TestMultiIndexingAutomated(object):
"""
These tests use code to mimic the C-Code indexing for selection.
NOTE:
* This still lacks tests for complex item setting.
* If you change behavior of indexing, you might want to modify
these tests to try more combinations.
* Behavior was written to match numpy version 1.8. (though a
first version matched 1.7.)
* Only tuple indices are supported by the mimicking code.
(and tested as of writing this)
* Error types should match most of the time as long as there
is only one error. For multiple errors, what gets raised
will usually not be the same one. They are *not* tested.
Update 2016-11-30: It is probably not worth maintaining this test
indefinitely and it can be dropped if maintenance becomes a burden.
"""
def setup(self):
self.a = np.arange(np.prod([3, 1, 5, 6])).reshape(3, 1, 5, 6)
self.b = np.empty((3, 0, 5, 6))
self.complex_indices = ['skip', Ellipsis,
0,
# Boolean indices, up to 3-d for some special cases of eating up
# dimensions, also need to test all False
np.array([True, False, False]),
np.array([[True, False], [False, True]]),
np.array([[[False, False], [False, False]]]),
# Some slices:
slice(-5, 5, 2),
slice(1, 1, 100),
slice(4, -1, -2),
slice(None, None, -3),
# Some Fancy indexes:
np.empty((0, 1, 1), dtype=np.intp), # empty and can be broadcast
np.array([0, 1, -2]),
np.array([[2], [0], [1]]),
np.array([[0, -1], [0, 1]], dtype=np.dtype('intp').newbyteorder()),
np.array([2, -1], dtype=np.int8),
np.zeros([1]*31, dtype=int), # trigger too large array.
np.array([0., 1.])] # invalid datatype
# Some simpler indices that still cover a bit more
self.simple_indices = [Ellipsis, None, -1, [1], np.array([True]),
'skip']
# Very simple ones to fill the rest:
self.fill_indices = [slice(None, None), 0]
def _get_multi_index(self, arr, indices):
"""Mimic multi dimensional indexing.
Parameters
----------
arr : ndarray
Array to be indexed.
indices : tuple of index objects
Returns
-------
out : ndarray
An array equivalent to the indexing operation (but always a copy).
`arr[indices]` should be identical.
no_copy : bool
Whether the indexing operation requires a copy. If this is `True`,
`np.may_share_memory(arr, arr[indicies])` should be `True` (with
some exceptions for scalars and possibly 0-d arrays).
Notes
-----
While the function may mostly match the errors of normal indexing this
is generally not the case.
"""
in_indices = list(indices)
indices = []
# if False, this is a fancy or boolean index
no_copy = True
# number of fancy/scalar indexes that are not consecutive
num_fancy = 0
# number of dimensions indexed by a "fancy" index
fancy_dim = 0
# NOTE: This is a funny twist (and probably OK to change).
# The boolean array has illegal indexes, but this is
# allowed if the broadcast fancy-indices are 0-sized.
# This variable is to catch that case.
error_unless_broadcast_to_empty = False
# We need to handle Ellipsis and make arrays from indices, also
# check if this is fancy indexing (set no_copy).
ndim = 0
ellipsis_pos = None # define here mostly to replace all but first.
for i, indx in enumerate(in_indices):
if indx is None:
continue
if isinstance(indx, np.ndarray) and indx.dtype == bool:
no_copy = False
if indx.ndim == 0:
raise IndexError
# boolean indices can have higher dimensions
ndim += indx.ndim
fancy_dim += indx.ndim
continue
if indx is Ellipsis:
if ellipsis_pos is None:
ellipsis_pos = i
continue # do not increment ndim counter
raise IndexError
if isinstance(indx, slice):
ndim += 1
continue
if not isinstance(indx, np.ndarray):
# This could be open for changes in numpy.
# numpy should maybe raise an error if casting to intp
# is not safe. It rejects np.array([1., 2.]) but not
# [1., 2.] as index (same for ie. np.take).
# (Note the importance of empty lists if changing this here)
indx = np.array(indx, dtype=np.intp)
in_indices[i] = indx
elif indx.dtype.kind != 'b' and indx.dtype.kind != 'i':
raise IndexError('arrays used as indices must be of '
'integer (or boolean) type')
if indx.ndim != 0:
no_copy = False
ndim += 1
fancy_dim += 1
if arr.ndim - ndim < 0:
# we can't take more dimensions then we have, not even for 0-d
# arrays. since a[()] makes sense, but not a[(),]. We will
# raise an error later on, unless a broadcasting error occurs
# first.
raise IndexError
if ndim == 0 and None not in in_indices:
# Well we have no indexes or one Ellipsis. This is legal.
return arr.copy(), no_copy
if ellipsis_pos is not None:
in_indices[ellipsis_pos:ellipsis_pos+1] = ([slice(None, None)] *
(arr.ndim - ndim))
for ax, indx in enumerate(in_indices):
if isinstance(indx, slice):
# convert to an index array
indx = np.arange(*indx.indices(arr.shape[ax]))
indices.append(['s', indx])
continue
elif indx is None:
# this is like taking a slice with one element from a new axis:
indices.append(['n', np.array([0], dtype=np.intp)])
arr = arr.reshape((arr.shape[:ax] + (1,) + arr.shape[ax:]))
continue
if isinstance(indx, np.ndarray) and indx.dtype == bool:
if indx.shape != arr.shape[ax:ax+indx.ndim]:
raise IndexError
try:
flat_indx = np.ravel_multi_index(np.nonzero(indx),
arr.shape[ax:ax+indx.ndim], mode='raise')
except Exception:
error_unless_broadcast_to_empty = True
# fill with 0s instead, and raise error later
flat_indx = np.array([0]*indx.sum(), dtype=np.intp)
# concatenate axis into a single one:
if indx.ndim != 0:
arr = arr.reshape((arr.shape[:ax]
+ (np.prod(arr.shape[ax:ax+indx.ndim]),)
+ arr.shape[ax+indx.ndim:]))
indx = flat_indx
else:
# This could be changed, a 0-d boolean index can
# make sense (even outside the 0-d indexed array case)
# Note that originally this is could be interpreted as
# integer in the full integer special case.
raise IndexError
else:
# If the index is a singleton, the bounds check is done
# before the broadcasting. This used to be different in <1.9
if indx.ndim == 0:
if indx >= arr.shape[ax] or indx < -arr.shape[ax]:
raise IndexError
if indx.ndim == 0:
# The index is a scalar. This used to be two fold, but if
# fancy indexing was active, the check was done later,
# possibly after broadcasting it away (1.7. or earlier).
# Now it is always done.
if indx >= arr.shape[ax] or indx < - arr.shape[ax]:
raise IndexError
if (len(indices) > 0 and
indices[-1][0] == 'f' and
ax != ellipsis_pos):
# NOTE: There could still have been a 0-sized Ellipsis
# between them. Checked that with ellipsis_pos.
indices[-1].append(indx)
else:
# We have a fancy index that is not after an existing one.
# NOTE: A 0-d array triggers this as well, while one may
# expect it to not trigger it, since a scalar would not be
# considered fancy indexing.
num_fancy += 1
indices.append(['f', indx])
if num_fancy > 1 and not no_copy:
# We have to flush the fancy indexes left
new_indices = indices[:]
axes = list(range(arr.ndim))
fancy_axes = []
new_indices.insert(0, ['f'])
ni = 0
ai = 0
for indx in indices:
ni += 1
if indx[0] == 'f':
new_indices[0].extend(indx[1:])
del new_indices[ni]
ni -= 1
for ax in range(ai, ai + len(indx[1:])):
fancy_axes.append(ax)
axes.remove(ax)
ai += len(indx) - 1 # axis we are at
indices = new_indices
# and now we need to transpose arr:
arr = arr.transpose(*(fancy_axes + axes))
# We only have one 'f' index now and arr is transposed accordingly.
# Now handle newaxis by reshaping...
ax = 0
for indx in indices:
if indx[0] == 'f':
if len(indx) == 1:
continue
# First of all, reshape arr to combine fancy axes into one:
orig_shape = arr.shape
orig_slice = orig_shape[ax:ax + len(indx[1:])]
arr = arr.reshape((arr.shape[:ax]
+ (np.prod(orig_slice).astype(int),)
+ arr.shape[ax + len(indx[1:]):]))
# Check if broadcasting works
res = np.broadcast(*indx[1:])
# unfortunately the indices might be out of bounds. So check
# that first, and use mode='wrap' then. However only if
# there are any indices...
if res.size != 0:
if error_unless_broadcast_to_empty:
raise IndexError
for _indx, _size in zip(indx[1:], orig_slice):
if _indx.size == 0:
continue
if np.any(_indx >= _size) or np.any(_indx < -_size):
raise IndexError
if len(indx[1:]) == len(orig_slice):
if np.product(orig_slice) == 0:
# Work around for a crash or IndexError with 'wrap'
# in some 0-sized cases.
try:
mi = np.ravel_multi_index(indx[1:], orig_slice,
mode='raise')
except Exception:
# This happens with 0-sized orig_slice (sometimes?)
# here it is a ValueError, but indexing gives a:
raise IndexError('invalid index into 0-sized')
else:
mi = np.ravel_multi_index(indx[1:], orig_slice,
mode='wrap')
else:
# Maybe never happens...
raise ValueError
arr = arr.take(mi.ravel(), axis=ax)
arr = arr.reshape((arr.shape[:ax]
+ mi.shape
+ arr.shape[ax+1:]))
ax += mi.ndim
continue
# If we are here, we have a 1D array for take:
arr = arr.take(indx[1], axis=ax)
ax += 1
return arr, no_copy
def _check_multi_index(self, arr, index):
"""Check a multi index item getting and simple setting.
Parameters
----------
arr : ndarray
Array to be indexed, must be a reshaped arange.
index : tuple of indexing objects
Index being tested.
"""
# Test item getting
try:
mimic_get, no_copy = self._get_multi_index(arr, index)
except Exception as e:
if HAS_REFCOUNT:
prev_refcount = sys.getrefcount(arr)
assert_raises(Exception, arr.__getitem__, index)
assert_raises(Exception, arr.__setitem__, index, 0)
if HAS_REFCOUNT:
assert_equal(prev_refcount, sys.getrefcount(arr))
return
self._compare_index_result(arr, index, mimic_get, no_copy)
def _check_single_index(self, arr, index):
"""Check a single index item getting and simple setting.
Parameters
----------
arr : ndarray
Array to be indexed, must be an arange.
index : indexing object
Index being tested. Must be a single index and not a tuple
of indexing objects (see also `_check_multi_index`).
"""
try:
mimic_get, no_copy = self._get_multi_index(arr, (index,))
except Exception as e:
if HAS_REFCOUNT:
prev_refcount = sys.getrefcount(arr)
assert_raises(Exception, arr.__getitem__, index)
assert_raises(Exception, arr.__setitem__, index, 0)
if HAS_REFCOUNT:
assert_equal(prev_refcount, sys.getrefcount(arr))
return
self._compare_index_result(arr, index, mimic_get, no_copy)
def _compare_index_result(self, arr, index, mimic_get, no_copy):
"""Compare mimicked result to indexing result.
"""
arr = arr.copy()
indexed_arr = arr[index]
assert_array_equal(indexed_arr, mimic_get)
# Check if we got a view, unless its a 0-sized or 0-d array.
# (then its not a view, and that does not matter)
if indexed_arr.size != 0 and indexed_arr.ndim != 0:
assert_(np.may_share_memory(indexed_arr, arr) == no_copy)
# Check reference count of the original array
if HAS_REFCOUNT:
if no_copy:
# refcount increases by one:
assert_equal(sys.getrefcount(arr), 3)
else:
assert_equal(sys.getrefcount(arr), 2)
# Test non-broadcast setitem:
b = arr.copy()
b[index] = mimic_get + 1000
if b.size == 0:
return # nothing to compare here...
if no_copy and indexed_arr.ndim != 0:
# change indexed_arr in-place to manipulate original:
indexed_arr += 1000
assert_array_equal(arr, b)
return
# Use the fact that the array is originally an arange:
arr.flat[indexed_arr.ravel()] += 1000
assert_array_equal(arr, b)
def test_boolean(self):
a = np.array(5)
assert_equal(a[np.array(True)], 5)
a[np.array(True)] = 1
assert_equal(a, 1)
# NOTE: This is different from normal broadcasting, as
# arr[boolean_array] works like in a multi index. Which means
# it is aligned to the left. This is probably correct for
# consistency with arr[boolean_array,] also no broadcasting
# is done at all
self._check_multi_index(
self.a, (np.zeros_like(self.a, dtype=bool),))
self._check_multi_index(
self.a, (np.zeros_like(self.a, dtype=bool)[..., 0],))
self._check_multi_index(
self.a, (np.zeros_like(self.a, dtype=bool)[None, ...],))
def test_multidim(self):
# Automatically test combinations with complex indexes on 2nd (or 1st)
# spot and the simple ones in one other spot.
with warnings.catch_warnings():
# This is so that np.array(True) is not accepted in a full integer
# index, when running the file separately.
warnings.filterwarnings('error', '', DeprecationWarning)
warnings.filterwarnings('error', '', np.VisibleDeprecationWarning)
def isskip(idx):
return isinstance(idx, str) and idx == "skip"
for simple_pos in [0, 2, 3]:
tocheck = [self.fill_indices, self.complex_indices,
self.fill_indices, self.fill_indices]
tocheck[simple_pos] = self.simple_indices
for index in product(*tocheck):
index = tuple(i for i in index if not isskip(i))
self._check_multi_index(self.a, index)
self._check_multi_index(self.b, index)
# Check very simple item getting:
self._check_multi_index(self.a, (0, 0, 0, 0))
self._check_multi_index(self.b, (0, 0, 0, 0))
# Also check (simple cases of) too many indices:
assert_raises(IndexError, self.a.__getitem__, (0, 0, 0, 0, 0))
assert_raises(IndexError, self.a.__setitem__, (0, 0, 0, 0, 0), 0)
assert_raises(IndexError, self.a.__getitem__, (0, 0, [1], 0, 0))
assert_raises(IndexError, self.a.__setitem__, (0, 0, [1], 0, 0), 0)
def test_1d(self):
a = np.arange(10)
with warnings.catch_warnings():
warnings.filterwarnings('error', '', np.VisibleDeprecationWarning)
for index in self.complex_indices:
self._check_single_index(a, index)
class TestFloatNonIntegerArgument(object):
"""
These test that ``TypeError`` is raised when you try to use
non-integers as arguments to for indexing and slicing e.g. ``a[0.0:5]``
and ``a[0.5]``, or other functions like ``array.reshape(1., -1)``.
"""
def test_valid_indexing(self):
# These should raise no errors.
a = np.array([[[5]]])
a[np.array([0])]
a[[0, 0]]
a[:, [0, 0]]
a[:, 0,:]
a[:,:,:]
def test_valid_slicing(self):
# These should raise no errors.
a = np.array([[[5]]])
a[::]
a[0:]
a[:2]
a[0:2]
a[::2]
a[1::2]
a[:2:2]
a[1:2:2]
def test_non_integer_argument_errors(self):
a = np.array([[5]])
assert_raises(TypeError, np.reshape, a, (1., 1., -1))
assert_raises(TypeError, np.reshape, a, (np.array(1.), -1))
assert_raises(TypeError, np.take, a, [0], 1.)
assert_raises(TypeError, np.take, a, [0], np.float64(1.))
def test_non_integer_sequence_multiplication(self):
# NumPy scalar sequence multiply should not work with non-integers
def mult(a, b):
return a * b
assert_raises(TypeError, mult, [1], np.float_(3))
# following should be OK
mult([1], np.int_(3))
def test_reduce_axis_float_index(self):
d = np.zeros((3,3,3))
assert_raises(TypeError, np.min, d, 0.5)
assert_raises(TypeError, np.min, d, (0.5, 1))
assert_raises(TypeError, np.min, d, (1, 2.2))
assert_raises(TypeError, np.min, d, (.2, 1.2))
class TestBooleanIndexing(object):
# Using a boolean as integer argument/indexing is an error.
def test_bool_as_int_argument_errors(self):
a = np.array([[[1]]])
assert_raises(TypeError, np.reshape, a, (True, -1))
assert_raises(TypeError, np.reshape, a, (np.bool_(True), -1))
# Note that operator.index(np.array(True)) does not work, a boolean
# array is thus also deprecated, but not with the same message:
assert_raises(TypeError, operator.index, np.array(True))
assert_warns(DeprecationWarning, operator.index, np.True_)
assert_raises(TypeError, np.take, args=(a, [0], False))
def test_boolean_indexing_weirdness(self):
# Weird boolean indexing things
a = np.ones((2, 3, 4))
a[False, True, ...].shape == (0, 2, 3, 4)
a[True, [0, 1], True, True, [1], [[2]]] == (1, 2)
assert_raises(IndexError, lambda: a[False, [0, 1], ...])
class TestArrayToIndexDeprecation(object):
"""Creating an an index from array not 0-D is an error.
"""
def test_array_to_index_error(self):
# so no exception is expected. The raising is effectively tested above.
a = np.array([[[1]]])
assert_raises(TypeError, operator.index, np.array([1]))
assert_raises(TypeError, np.reshape, a, (a, -1))
assert_raises(TypeError, np.take, a, [0], a)
class TestNonIntegerArrayLike(object):
"""Tests that array_likes only valid if can safely cast to integer.
For instance, lists give IndexError when they cannot be safely cast to
an integer.
"""
def test_basic(self):
a = np.arange(10)
assert_raises(IndexError, a.__getitem__, [0.5, 1.5])
assert_raises(IndexError, a.__getitem__, (['1', '2'],))
# The following is valid
a.__getitem__([])
class TestMultipleEllipsisError(object):
"""An index can only have a single ellipsis.
"""
def test_basic(self):
a = np.arange(10)
assert_raises(IndexError, lambda: a[..., ...])
assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 2,))
assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 3,))
class TestCApiAccess(object):
def test_getitem(self):
subscript = functools.partial(array_indexing, 0)
# 0-d arrays don't work:
assert_raises(IndexError, subscript, np.ones(()), 0)
# Out of bound values:
assert_raises(IndexError, subscript, np.ones(10), 11)
assert_raises(IndexError, subscript, np.ones(10), -11)
assert_raises(IndexError, subscript, np.ones((10, 10)), 11)
assert_raises(IndexError, subscript, np.ones((10, 10)), -11)
a = np.arange(10)
assert_array_equal(a[4], subscript(a, 4))
a = a.reshape(5, 2)
assert_array_equal(a[-4], subscript(a, -4))
def test_setitem(self):
assign = functools.partial(array_indexing, 1)
# Deletion is impossible:
assert_raises(ValueError, assign, np.ones(10), 0)
# 0-d arrays don't work:
assert_raises(IndexError, assign, np.ones(()), 0, 0)
# Out of bound values:
assert_raises(IndexError, assign, np.ones(10), 11, 0)
assert_raises(IndexError, assign, np.ones(10), -11, 0)
assert_raises(IndexError, assign, np.ones((10, 10)), 11, 0)
assert_raises(IndexError, assign, np.ones((10, 10)), -11, 0)
a = np.arange(10)
assign(a, 4, 10)
assert_(a[4] == 10)
a = a.reshape(5, 2)
assign(a, 4, 10)
assert_array_equal(a[-1], [10, 10])
if __name__ == "__main__":
run_module_suite()
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@numpy@core@tests@test_indexing.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scattergeo/line/__init__.py",
"type": "Python"
}
|
import sys
if sys.version_info < (3, 7):
from ._width import WidthValidator
from ._dash import DashValidator
from ._color import ColorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
["._width.WidthValidator", "._dash.DashValidator", "._color.ColorValidator"],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scattergeo@line@__init__.py@.PATH_END.py
|
{
"filename": "_lambertw.py",
"repo_name": "scipy/scipy",
"repo_path": "scipy_extracted/scipy-main/scipy/special/_lambertw.py",
"type": "Python"
}
|
from ._ufuncs import _lambertw
import numpy as np
def lambertw(z, k=0, tol=1e-8):
r"""
lambertw(z, k=0, tol=1e-8)
Lambert W function.
The Lambert W function `W(z)` is defined as the inverse function
of ``w * exp(w)``. In other words, the value of ``W(z)`` is
such that ``z = W(z) * exp(W(z))`` for any complex number
``z``.
The Lambert W function is a multivalued function with infinitely
many branches. Each branch gives a separate solution of the
equation ``z = w exp(w)``. Here, the branches are indexed by the
integer `k`.
Parameters
----------
z : array_like
Input argument.
k : int, optional
Branch index.
tol : float, optional
Evaluation tolerance.
Returns
-------
w : array
`w` will have the same shape as `z`.
See Also
--------
wrightomega : the Wright Omega function
Notes
-----
All branches are supported by `lambertw`:
* ``lambertw(z)`` gives the principal solution (branch 0)
* ``lambertw(z, k)`` gives the solution on branch `k`
The Lambert W function has two partially real branches: the
principal branch (`k = 0`) is real for real ``z > -1/e``, and the
``k = -1`` branch is real for ``-1/e < z < 0``. All branches except
``k = 0`` have a logarithmic singularity at ``z = 0``.
**Possible issues**
The evaluation can become inaccurate very close to the branch point
at ``-1/e``. In some corner cases, `lambertw` might currently
fail to converge, or can end up on the wrong branch.
**Algorithm**
Halley's iteration is used to invert ``w * exp(w)``, using a first-order
asymptotic approximation (O(log(w)) or `O(w)`) as the initial estimate.
The definition, implementation and choice of branches is based on [2]_.
References
----------
.. [1] https://en.wikipedia.org/wiki/Lambert_W_function
.. [2] Corless et al, "On the Lambert W function", Adv. Comp. Math. 5
(1996) 329-359.
https://cs.uwaterloo.ca/research/tr/1993/03/W.pdf
Examples
--------
The Lambert W function is the inverse of ``w exp(w)``:
>>> import numpy as np
>>> from scipy.special import lambertw
>>> w = lambertw(1)
>>> w
(0.56714329040978384+0j)
>>> w * np.exp(w)
(1.0+0j)
Any branch gives a valid inverse:
>>> w = lambertw(1, k=3)
>>> w
(-2.8535817554090377+17.113535539412148j)
>>> w*np.exp(w)
(1.0000000000000002+1.609823385706477e-15j)
**Applications to equation-solving**
The Lambert W function may be used to solve various kinds of
equations. We give two examples here.
First, the function can be used to solve implicit equations of the
form
:math:`x = a + b e^{c x}`
for :math:`x`. We assume :math:`c` is not zero. After a little
algebra, the equation may be written
:math:`z e^z = -b c e^{a c}`
where :math:`z = c (a - x)`. :math:`z` may then be expressed using
the Lambert W function
:math:`z = W(-b c e^{a c})`
giving
:math:`x = a - W(-b c e^{a c})/c`
For example,
>>> a = 3
>>> b = 2
>>> c = -0.5
The solution to :math:`x = a + b e^{c x}` is:
>>> x = a - lambertw(-b*c*np.exp(a*c))/c
>>> x
(3.3707498368978794+0j)
Verify that it solves the equation:
>>> a + b*np.exp(c*x)
(3.37074983689788+0j)
The Lambert W function may also be used find the value of the infinite
power tower :math:`z^{z^{z^{\ldots}}}`:
>>> def tower(z, n):
... if n == 0:
... return z
... return z ** tower(z, n-1)
...
>>> tower(0.5, 100)
0.641185744504986
>>> -lambertw(-np.log(0.5)) / np.log(0.5)
(0.64118574450498589+0j)
"""
# TODO: special expert should inspect this
# interception; better place to do it?
k = np.asarray(k, dtype=np.dtype("long"))
return _lambertw(z, k, tol)
|
scipyREPO_NAMEscipyPATH_START.@scipy_extracted@scipy-main@scipy@special@_lambertw.py@.PATH_END.py
|
{
"filename": "plot_regional_maxima.py",
"repo_name": "scikit-image/scikit-image",
"repo_path": "scikit-image_extracted/scikit-image-main/doc/examples/color_exposure/plot_regional_maxima.py",
"type": "Python"
}
|
"""
=========================
Filtering regional maxima
=========================
Here, we use morphological reconstruction to create a background image, which
we can subtract from the original image to isolate bright features (regional
maxima).
First we try reconstruction by dilation starting at the edges of the image. We
initialize a seed image to the minimum intensity of the image, and set its
border to be the pixel values in the original image. These maximal pixels will
get dilated in order to reconstruct the background image.
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import gaussian_filter
from skimage import data
from skimage import img_as_float
from skimage.morphology import reconstruction
# Convert to float: Important for subtraction later which won't work with uint8
image = img_as_float(data.coins())
image = gaussian_filter(image, 1)
seed = np.copy(image)
seed[1:-1, 1:-1] = image.min()
mask = image
dilated = reconstruction(seed, mask, method='dilation')
######################################################################
# Subtracting the dilated image leaves an image with just the coins and a
# flat, black background, as shown below.
fig, (ax0, ax1, ax2) = plt.subplots(
nrows=1, ncols=3, figsize=(8, 2.5), sharex=True, sharey=True
)
ax0.imshow(image, cmap='gray')
ax0.set_title('original image')
ax0.axis('off')
ax1.imshow(dilated, vmin=image.min(), vmax=image.max(), cmap='gray')
ax1.set_title('dilated')
ax1.axis('off')
ax2.imshow(image - dilated, cmap='gray')
ax2.set_title('image - dilated')
ax2.axis('off')
fig.tight_layout()
######################################################################
# Although the features (i.e. the coins) are clearly isolated, the coins
# surrounded by a bright background in the original image are dimmer in the
# subtracted image. We can attempt to correct this using a different seed
# image.
#
# Instead of creating a seed image with maxima along the image border, we can
# use the features of the image itself to seed the reconstruction process.
# Here, the seed image is the original image minus a fixed value, ``h``.
h = 0.4
seed = image - h
dilated = reconstruction(seed, mask, method='dilation')
hdome = image - dilated
######################################################################
# To get a feel for the reconstruction process, we plot the intensity of the
# mask, seed, and dilated images along a slice of the image (indicated by red
# line).
fig, (ax0, ax1, ax2) = plt.subplots(nrows=1, ncols=3, figsize=(8, 2.5))
yslice = 197
ax0.plot(mask[yslice], '0.5', label='mask')
ax0.plot(seed[yslice], 'k', label='seed')
ax0.plot(dilated[yslice], 'r', label='dilated')
ax0.set_ylim(-0.2, 2)
ax0.set_title('image slice')
ax0.set_xticks([])
ax0.legend()
ax1.imshow(dilated, vmin=image.min(), vmax=image.max(), cmap='gray')
ax1.axhline(yslice, color='r', alpha=0.4)
ax1.set_title('dilated')
ax1.axis('off')
ax2.imshow(hdome, cmap='gray')
ax2.axhline(yslice, color='r', alpha=0.4)
ax2.set_title('image - dilated')
ax2.axis('off')
fig.tight_layout()
plt.show()
######################################################################
# As you can see in the image slice, each coin is given a different baseline
# intensity in the reconstructed image; this is because we used the local
# intensity (shifted by ``h``) as a seed value. As a result, the coins in the
# subtracted image have similar pixel intensities. The final result is known
# as the h-dome of an image since this tends to isolate regional maxima of
# height ``h``. This operation is particularly useful when your images are
# unevenly illuminated.
|
scikit-imageREPO_NAMEscikit-imagePATH_START.@scikit-image_extracted@scikit-image-main@doc@examples@color_exposure@plot_regional_maxima.py@.PATH_END.py
|
{
"filename": "plot_save_mesa_pos_eta_res.py",
"repo_name": "NikolayBritavskiyAstro/fast_rotating_binaries",
"repo_path": "fast_rotating_binaries_extracted/fast_rotating_binaries-main/src/scripts/plot_save_mesa_pos_eta_res.py",
"type": "Python"
}
|
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import numpy as np
import os
import mesaPlot as mp
from showyourwork.paths import user as Paths
paths = Paths()
plt.style.use(paths.scripts / "matplotlibrc")
if os.path.exists(os.path.join(paths.data, 'mass_transfer_efficiency/p3_pos/LOGS3/history.data')):
pass
else:
os.system(f'python {os.path.join(paths.scripts / "unzip_MESA_output.py")}')
def find_nearest(array, value):
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx
m3_p3_g1_new = mp.MESA()
m3_p5_g1_new = mp.MESA()
m3_p10_g1_new = mp.MESA()
m3_p50_g1_new = mp.MESA()
m3_p70_g1_new = mp.MESA()
m2_p3_g1_new = mp.MESA()
m2_p5_g1_new = mp.MESA()
m2_p10_g1_new = mp.MESA()
m2_p50_g1_new = mp.MESA()
m2_p70_g1_new = mp.MESA()
m3_p3_g1_res = mp.MESA()
m3_p5_g1_res = mp.MESA()
m3_p10_g1_res = mp.MESA()
m3_p50_g1_res = mp.MESA()
m3_p70_g1_res = mp.MESA()
m2_p3_g1_res = mp.MESA()
m2_p5_g1_res = mp.MESA()
m2_p10_g1_res = mp.MESA()
m2_p50_g1_res = mp.MESA()
m2_p70_g1_res = mp.MESA()
# name=sys.argv[1]
# print(name+'/LOGS1')
m3_p3_g1_new.log_fold = os.path.join(paths.data, 'mass_transfer_efficiency/p3_pos/LOGS3')
m3_p3_g1_new.loadHistory()
m3_p5_g1_new.log_fold = os.path.join(paths.data, 'mass_transfer_efficiency/p5_pos/LOGS3')
m3_p5_g1_new.loadHistory()
m3_p10_g1_new.log_fold = os.path.join(paths.data, 'mass_transfer_efficiency/p10_pos/LOGS3')
m3_p10_g1_new.loadHistory()
m3_p50_g1_new.log_fold = os.path.join(paths.data, 'mass_transfer_efficiency/p50_pos/LOGS3')
m3_p50_g1_new.loadHistory()
m3_p70_g1_new.log_fold = os.path.join(paths.data, 'mass_transfer_efficiency/p70_pos/LOGS3')
m3_p70_g1_new.loadHistory()
m2_p3_g1_new.log_fold = os.path.join(paths.data, 'mass_transfer_efficiency/p3_pos/LOGS2')
m2_p3_g1_new.loadHistory()
m2_p5_g1_new.log_fold = os.path.join(paths.data, 'mass_transfer_efficiency/p5_pos/LOGS2')
m2_p5_g1_new.loadHistory()
m2_p10_g1_new.log_fold = os.path.join(paths.data, 'mass_transfer_efficiency/p10_pos/LOGS2')
m2_p10_g1_new.loadHistory()
m2_p50_g1_new.log_fold = os.path.join(paths.data, 'mass_transfer_efficiency/p50_pos/LOGS2')
m2_p50_g1_new.loadHistory()
m2_p70_g1_new.log_fold = os.path.join(paths.data, 'mass_transfer_efficiency/p70_pos/LOGS2')
m2_p70_g1_new.loadHistory()
m3_p3_g1_res.log_fold = os.path.join(paths.data, 'mass_transfer_efficiency/p3_res/LOGS3')
m3_p3_g1_res.loadHistory()
m3_p5_g1_res.log_fold = os.path.join(paths.data, 'mass_transfer_efficiency/p5_res/LOGS3')
m3_p5_g1_res.loadHistory()
m3_p10_g1_res.log_fold = os.path.join(paths.data, 'mass_transfer_efficiency/p10_res/LOGS3')
m3_p10_g1_res.loadHistory()
m3_p50_g1_res.log_fold = os.path.join(paths.data, 'mass_transfer_efficiency/p50_res/LOGS3')
m3_p50_g1_res.loadHistory()
m3_p70_g1_res.log_fold = os.path.join(paths.data, 'mass_transfer_efficiency/p70_res/LOGS3')
m3_p70_g1_res.loadHistory()
m2_p3_g1_res.log_fold = os.path.join(paths.data, 'mass_transfer_efficiency/p3_res/LOGS2')
m2_p3_g1_res.loadHistory()
m2_p5_g1_res.log_fold = os.path.join(paths.data, 'mass_transfer_efficiency/p5_res/LOGS2')
m2_p5_g1_res.loadHistory()
m2_p10_g1_res.log_fold = os.path.join(paths.data, 'mass_transfer_efficiency/p10_res/LOGS2')
m2_p10_g1_res.loadHistory()
m2_p50_g1_res.log_fold = os.path.join(paths.data, 'mass_transfer_efficiency/p50_res/LOGS2')
m2_p50_g1_res.loadHistory()
m2_p70_g1_res.log_fold = os.path.join(paths.data, 'mass_transfer_efficiency/p70_res/LOGS2')
m2_p70_g1_res.loadHistory()
indx_p3 = []
indx_p5 = []
indx_p10 = []
indx_p50 = []
indx_p70 = []
indx_p3_res = []
indx_p5_res = []
indx_p10_res = []
indx_p50_res = []
indx_p70_res = []
age_p3_g1_res = m3_p3_g1_res.hist.age
delta_p3_g1_res = m3_p3_g1_res.hist.delta_updated
spin2_p3_g1_res = m3_p3_g1_res.hist.J_spin_2
period_days_p3_res = m3_p3_g1_res.hist.period_days
omega_sync_p3 = 2 * 3.1415926 / (period_days_p3_res) # rad/days
star_age_p3_g1_res = m2_p3_g1_res.hist.star_age
surf_avg_omega_div_omega_crit_p3_g1_res = m2_p3_g1_res.hist.surf_avg_omega_div_omega_crit
surf_avg_omega_crit_p3_g1_res = m2_p3_g1_res.hist.surf_avg_omega_crit
age_p5_g1_res = m3_p5_g1_res.hist.age
delta_p5_g1_res = m3_p5_g1_res.hist.delta_updated
spin2_p5_g1_res = m3_p5_g1_res.hist.J_spin_2
period_days_p5_res = m3_p5_g1_res.hist.period_days
omega_sync_p5_res = 2 * 3.1415926 / (period_days_p5_res) # rad/days
star_age_p5_g1_res = m2_p5_g1_res.hist.star_age
surf_avg_omega_div_omega_crit_p5_g1_res = m2_p5_g1_res.hist.surf_avg_omega_div_omega_crit
surf_avg_omega_crit_p5_g1_res = m2_p5_g1_res.hist.surf_avg_omega_crit
age_p10_g1_res = m3_p10_g1_res.hist.age
delta_p10_g1_res = m3_p10_g1_res.hist.delta_updated
spin2_p10_g1_res = m3_p10_g1_res.hist.J_spin_2
star_age_p10_g1_res = m2_p10_g1_res.hist.star_age
surf_avg_omega_div_omega_crit_p10_g1_res = m2_p10_g1_res.hist.surf_avg_omega_div_omega_crit
age_p50_g1_res = m3_p50_g1_res.hist.age
delta_p50_g1_res = m3_p50_g1_res.hist.delta_updated
spin2_p50_g1_res = m3_p50_g1_res.hist.J_spin_2
star_age_p50_g1_res = m2_p50_g1_res.hist.star_age
surf_avg_omega_div_omega_crit_p50_g1_res = m2_p50_g1_res.hist.surf_avg_omega_div_omega_crit
age_p70_g1_res = m3_p70_g1_res.hist.age
delta_p70_g1_res = m3_p70_g1_res.hist.delta_updated
spin2_p70_g1_res = m3_p70_g1_res.hist.J_spin_2
period_days_p70_res = m3_p70_g1_res.hist.period_days
star_age_p70_g1_res = m2_p70_g1_res.hist.star_age
surf_avg_omega_div_omega_crit_p70_g1_res = m2_p70_g1_res.hist.surf_avg_omega_div_omega_crit
for i in range(len(age_p3_g1_res)):
indx_p3_res.append(find_nearest(star_age_p3_g1_res, age_p3_g1_res[i]))
for i in range(len(age_p5_g1_res)):
indx_p5_res.append(find_nearest(star_age_p5_g1_res, age_p5_g1_res[i]))
for i in range(len(age_p10_g1_res)):
indx_p10_res.append(find_nearest(star_age_p10_g1_res, age_p10_g1_res[i]))
for i in range(len(age_p50_g1_res)):
indx_p50_res.append(find_nearest(star_age_p50_g1_res, age_p50_g1_res[i]))
for i in range(len(age_p70_g1_res)):
indx_p70_res.append(find_nearest(star_age_p70_g1_res, age_p70_g1_res[i]))
age_p3_g1_new = m3_p3_g1_new.hist.age
delta_p3_g1_new = m3_p3_g1_new.hist.delta_updated
spin2_p3_g1_new = m3_p3_g1_new.hist.J_spin_2
period_days_p3 = m3_p3_g1_new.hist.period_days
omega_sync_p3 = 2 * 3.1415926 / (period_days_p3) # rad/days
star_age_p3_g1_new = m2_p3_g1_new.hist.star_age
surf_avg_omega_div_omega_crit_p3_g1_new = m2_p3_g1_new.hist.surf_avg_omega_div_omega_crit
surf_avg_omega_crit_p3_g1_new = m2_p3_g1_new.hist.surf_avg_omega_crit
age_p5_g1_new = m3_p5_g1_new.hist.age
delta_p5_g1_new = m3_p5_g1_new.hist.delta_updated
spin2_p5_g1_new = m3_p5_g1_new.hist.J_spin_2
period_days_p5 = m3_p5_g1_new.hist.period_days
omega_sync_p5 = 2 * 3.1415926 / (period_days_p5) # rad/days
star_age_p5_g1_new = m2_p5_g1_new.hist.star_age
surf_avg_omega_div_omega_crit_p5_g1_new = m2_p5_g1_new.hist.surf_avg_omega_div_omega_crit
surf_avg_omega_crit_p5_g1_new = m2_p5_g1_new.hist.surf_avg_omega_crit
age_p10_g1_new = m3_p10_g1_new.hist.age
delta_p10_g1_new = m3_p10_g1_new.hist.delta_updated
spin2_p10_g1_new = m3_p10_g1_new.hist.J_spin_2
star_age_p10_g1_new = m2_p10_g1_new.hist.star_age
surf_avg_omega_div_omega_crit_p10_g1_new = m2_p10_g1_new.hist.surf_avg_omega_div_omega_crit
age_p50_g1_new = m3_p50_g1_new.hist.age
delta_p50_g1_new = m3_p50_g1_new.hist.delta_updated
spin2_p50_g1_new = m3_p50_g1_new.hist.J_spin_2
star_age_p50_g1_new = m2_p50_g1_new.hist.star_age
surf_avg_omega_div_omega_crit_p50_g1_new = m2_p50_g1_new.hist.surf_avg_omega_div_omega_crit
age_p70_g1_new = m3_p70_g1_new.hist.age
delta_p70_g1_new = m3_p70_g1_new.hist.delta_updated
spin2_p70_g1_new = m3_p70_g1_new.hist.J_spin_2
period_days_p70 = m3_p70_g1_new.hist.period_days
surf_avg_omega_crit_p70_g1_new = m2_p70_g1_new.hist.surf_avg_omega_crit
omega_sync_p70 = 2 * 3.1415926 / (period_days_p70) # rad/days
star_age_p70_g1_new = m2_p70_g1_new.hist.star_age
surf_avg_omega_div_omega_crit_p70_g1_new = m2_p70_g1_new.hist.surf_avg_omega_div_omega_crit
for i in range(len(age_p3_g1_new)):
indx_p3.append(find_nearest(star_age_p3_g1_new, age_p3_g1_new[i]))
for i in range(len(age_p5_g1_new)):
indx_p5.append(find_nearest(star_age_p5_g1_new, age_p5_g1_new[i]))
for i in range(len(age_p10_g1_new)):
indx_p10.append(find_nearest(star_age_p10_g1_new, age_p10_g1_new[i]))
for i in range(len(age_p50_g1_new)):
indx_p50.append(find_nearest(star_age_p50_g1_new, age_p50_g1_new[i]))
for i in range(len(age_p70_g1_new)):
indx_p70.append(find_nearest(star_age_p70_g1_new, age_p70_g1_new[i]))
pp1_period = PdfPages(paths.figures / 'eta_omega_res.pdf')
plt.figure(figsize=(10, 10))
plt.title('$\it{M}_\mathrm{don,ini}$ = 30, $\it{M}_\mathrm{acc,ini}$ = 20, $\it{\gamma}$ = 3', fontsize=30)
# plt.tick_params(labelsize=18)
param = 24 * 3600
plt.plot(surf_avg_omega_div_omega_crit_p3_g1_res[indx_p3_res][:-100], 1 - delta_p3_g1_res[:-100],
label='$\it{P}_\mathrm{ini}$ = 3 d', lw=3)
plt.plot(surf_avg_omega_div_omega_crit_p5_g1_res[indx_p5_res][:-100], 1 - delta_p5_g1_res[:-100], linestyle='-',
label='$\it{P}_\mathrm{ini}$ = 5 d', lw=3)
plt.plot(surf_avg_omega_div_omega_crit_p10_g1_res[indx_p10_res], 1 - delta_p10_g1_res, linestyle='-',
label='$\it{P}_\mathrm{ini}$ = 10 d', lw=3)
plt.plot(surf_avg_omega_div_omega_crit_p50_g1_res[indx_p50_res], 1 - delta_p50_g1_res, linestyle='-',
label='$\it{P}_\mathrm{ini}$ = 50 d', lw=3)
plt.plot(surf_avg_omega_div_omega_crit_p70_g1_res[indx_p70_res], 1 - delta_p70_g1_res, linestyle='-',
label='$\it{P}_\mathrm{ini}$ = 70 d', lw=3)
plt.text(0.01, 0.23, 'mesh delta = 0.5, time delta = 0.75', fontsize=18)
plt.plot(surf_avg_omega_div_omega_crit_p3_g1_new[indx_p3][:-150], 1 - delta_p3_g1_new[:-150], lw=2, color='gray')
plt.plot(surf_avg_omega_div_omega_crit_p5_g1_new[indx_p5][:-150], 1 - delta_p5_g1_new[:-150], linestyle='-',
color='gray', lw=2)
plt.plot(surf_avg_omega_div_omega_crit_p10_g1_new[indx_p10], 1 - delta_p10_g1_new, linestyle='-', color='gray', lw=2)
plt.plot(surf_avg_omega_div_omega_crit_p50_g1_new[indx_p50][:-30], 1 - delta_p50_g1_new[:-30], linestyle='-',
color='gray', lw=2)
plt.plot(surf_avg_omega_div_omega_crit_p70_g1_new[indx_p70][:-30], 1 - delta_p70_g1_new[:-30], color='gray', lw=2)
plt.xlabel('$\it{\omega}/\it{\omega}_\mathrm{crit}$', fontsize=30)
plt.ylabel('$\it{\eta} = \Delta \it{M}_\mathrm{acc}/\Delta \it{M}_\mathrm{don}$', fontsize=30)
# plt.xlabel('Star age [Myrs]',fontsize=30)
plt.legend(loc=1, fontsize=23, frameon=True)
# plt.xlim([5e6,6.5e6])
plt.ylim([-0.02, 0.25])
plt.savefig(pp1_period, format='pdf')
pp1_period.close()
|
NikolayBritavskiyAstroREPO_NAMEfast_rotating_binariesPATH_START.@fast_rotating_binaries_extracted@fast_rotating_binaries-main@src@scripts@plot_save_mesa_pos_eta_res.py@.PATH_END.py
|
{
"filename": "alpha_dropout_test.py",
"repo_name": "fchollet/keras",
"repo_path": "keras_extracted/keras-master/keras/src/layers/regularization/alpha_dropout_test.py",
"type": "Python"
}
|
import numpy as np
import pytest
from keras.src import backend
from keras.src import layers
from keras.src import testing
class AlphaDropoutTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_alpha_dropout_basics(self):
self.run_layer_test(
layers.AlphaDropout,
init_kwargs={
"rate": 0.2,
},
input_shape=(2, 3),
call_kwargs={"training": True},
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=1,
expected_num_losses=0,
supports_masking=True,
assert_built_after_instantiation=True,
)
def test_alpha_dropout_correctness(self):
inputs = np.ones((20, 500)).astype("float32")
layer = layers.AlphaDropout(0.3, seed=1337)
outputs = layer(inputs, training=True)
self.assertAllClose(
np.std(backend.convert_to_numpy(outputs)), 1.0, atol=1e-1
)
def test_alpha_dropout_partial_noise_shape_dynamic(self):
inputs = np.ones((20, 5, 10))
layer = layers.AlphaDropout(0.5, noise_shape=(None, 1, None))
outputs = layer(inputs, training=True)
self.assertAllClose(outputs[:, 0, :], outputs[:, 1, :])
def test_alpha_dropout_partial_noise_shape_static(self):
inputs = np.ones((20, 5, 10))
layer = layers.AlphaDropout(0.5, noise_shape=(20, 1, 10))
outputs = layer(inputs, training=True)
self.assertAllClose(outputs[:, 0, :], outputs[:, 1, :])
def test_alpha_dropout_negative_rate(self):
with self.assertRaisesRegex(
ValueError,
"Invalid value received for argument `rate`. "
"Expected a float value between 0 and 1.",
):
_ = layers.AlphaDropout(rate=-0.5)
def test_alpha_dropout_rate_greater_than_one(self):
with self.assertRaisesRegex(
ValueError,
"Invalid value received for argument `rate`. "
"Expected a float value between 0 and 1.",
):
_ = layers.AlphaDropout(rate=1.5)
|
fcholletREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@keras@src@layers@regularization@alpha_dropout_test.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "lofar-astron/RMextract",
"repo_path": "RMextract_extracted/RMextract-master/RMextract/pyiri/__init__.py",
"type": "Python"
}
|
lofar-astronREPO_NAMERMextractPATH_START.@RMextract_extracted@RMextract-master@RMextract@pyiri@__init__.py@.PATH_END.py
|
|
{
"filename": "test_metacal_bootstrap.py",
"repo_name": "esheldon/ngmix",
"repo_path": "ngmix_extracted/ngmix-master/ngmix/tests/test_metacal_bootstrap.py",
"type": "Python"
}
|
"""
just test moment errors
"""
import pytest
import numpy as np
import ngmix
from ngmix.runners import Runner, PSFRunner
from ngmix.guessers import SimplePSFGuesser, TFluxAndPriorGuesser
from ngmix.fitting import Fitter
from ngmix.gaussmom import GaussMom
from ngmix.metacal import metacal_bootstrap, MetacalBootstrapper
from ._sims import get_model_obs
from ._priors import get_prior
from ._galsim_sims import _get_obs
FRAC_TOL = 5.0e-4
@pytest.mark.parametrize('noise', [1.0e-8, 0.01])
@pytest.mark.parametrize('use_bootstrapper', [False, True])
@pytest.mark.parametrize('nband', [None, 2])
@pytest.mark.parametrize('nepoch', [None, 2])
def test_metacal_bootstrap_max_smoke(
noise, use_bootstrapper, nband, nepoch, metacal_caching
):
"""
test a metacal bootstrapper with maxlike fitting
"""
rng = np.random.RandomState(2830)
model = 'gauss'
fit_model = 'gauss'
data = get_model_obs(
rng=rng,
model=model,
noise=noise,
nepoch=nepoch,
nband=nband,
)
obs = data['obs']
prior = get_prior(
fit_model=fit_model,
rng=rng,
scale=0.2,
T_range=[-1.0, 1.e3],
F_range=[0.01, 1000.0],
nband=nband,
)
flux_guess = data['gmix'].get_flux()
Tguess = data['gmix'].get_T()
guesser = TFluxAndPriorGuesser(
rng=rng, T=Tguess, flux=flux_guess, prior=prior,
)
psf_guesser = SimplePSFGuesser(rng=rng)
fitter = Fitter(model=fit_model, prior=prior)
psf_fitter = Fitter(model='gauss')
psf_runner = PSFRunner(
fitter=psf_fitter,
guesser=psf_guesser,
ntry=2,
)
runner = Runner(
fitter=fitter,
guesser=guesser,
ntry=2,
)
if use_bootstrapper:
boot = MetacalBootstrapper(
runner=runner, psf_runner=psf_runner,
rng=rng,
)
resdict, obsdict = boot.go(obs)
_ = boot.fitter # for coverage
else:
resdict, obsdict = metacal_bootstrap(
obs=obs, runner=runner, psf_runner=psf_runner,
rng=rng,
)
for key in ['noshear', '1p', '1m', '2p', '2m']:
assert key in resdict
assert key in obsdict
assert resdict[key]['flags'] == 0
if isinstance(obsdict[key], ngmix.Observation):
assert obsdict[key].has_psf()
assert 'result' in obsdict[key].psf.meta
@pytest.mark.parametrize('noise', [1.0e-8, 0.01])
@pytest.mark.parametrize('use_bootstrapper', [False, True])
def test_metacal_bootstrap_gaussmom_smoke(
noise, use_bootstrapper, metacal_caching,
):
"""
test a metacal bootstrapper with gaussian moments
"""
rng = np.random.RandomState(2830)
data = get_model_obs(
rng=rng,
model='gauss',
noise=noise,
)
obs = data['obs']
fwhm = 1.2
fitter = GaussMom(fwhm=fwhm)
psf_fitter = GaussMom(fwhm=fwhm)
psf_runner = PSFRunner(fitter=psf_fitter)
runner = Runner(fitter=fitter)
if use_bootstrapper:
boot = MetacalBootstrapper(
runner=runner, psf_runner=psf_runner,
rng=rng,
)
resdict, obsdict = boot.go(obs)
else:
resdict, obsdict = metacal_bootstrap(
obs=obs, runner=runner, psf_runner=psf_runner,
rng=rng,
)
for key in ['noshear', '1p', '1m', '2p', '2m']:
assert key in resdict
assert key in obsdict
assert resdict[key]['flags'] == 0
if isinstance(obsdict[key], ngmix.Observation):
assert obsdict[key].has_psf()
assert 'result' in obsdict[key].psf.meta
def test_metacal_bootstrap_gaussmom_response(metacal_caching):
"""
test a metacal bootstrapper with gaussian moments
"""
rng = np.random.RandomState(2830)
ntrial = 50
fwhm = 1.2
fitter = GaussMom(fwhm=fwhm)
psf_fitter = GaussMom(fwhm=fwhm)
psf_runner = PSFRunner(fitter=psf_fitter)
runner = Runner(fitter=fitter)
boot = MetacalBootstrapper(
runner=runner, psf_runner=psf_runner,
rng=rng,
types=['1p', '1m'],
)
Rvals = np.zeros(ntrial)
for i in range(ntrial):
obs = _get_obs(
rng=rng,
set_noise_image=False,
)
resdict, obsdict = boot.go(obs)
res1p = resdict['1p']
res1m = resdict['1m']
Rvals[i] = (res1p['e'][0] - res1m['e'][0])/0.02
Rmean = Rvals.mean()
assert abs(Rmean - 0.28159) < 1.0e-4
|
esheldonREPO_NAMEngmixPATH_START.@ngmix_extracted@ngmix-master@ngmix@tests@test_metacal_bootstrap.py@.PATH_END.py
|
{
"filename": "_family.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/pie/textfont/_family.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="family", parent_name="pie.textfont", **kwargs):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "plot"),
no_blank=kwargs.pop("no_blank", True),
strict=kwargs.pop("strict", True),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@pie@textfont@_family.py@.PATH_END.py
|
{
"filename": "_stream.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/graph_objs/scattermap/_stream.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Stream(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scattermap"
_path_str = "scattermap.stream"
_valid_props = {"maxpoints", "token"}
# maxpoints
# ---------
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self["maxpoints"]
@maxpoints.setter
def maxpoints(self, val):
self["maxpoints"] = val
# token
# -----
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://chart-studio.plotly.com/settings for more
details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["token"]
@token.setter
def token(self, val):
self["token"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattermap.Stream`
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
Returns
-------
Stream
"""
super(Stream, self).__init__("stream")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattermap.Stream
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattermap.Stream`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("maxpoints", None)
_v = maxpoints if maxpoints is not None else _v
if _v is not None:
self["maxpoints"] = _v
_v = arg.pop("token", None)
_v = token if token is not None else _v
if _v is not None:
self["token"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@graph_objs@scattermap@_stream.py@.PATH_END.py
|
{
"filename": "test_covariances.py",
"repo_name": "LSSTDESC/CCL",
"repo_path": "CCL_extracted/CCL-master/benchmarks/test_covariances.py",
"type": "Python"
}
|
import os
import numpy as np
import pyccl as ccl
def test_ssc_WL():
# Compare against Benjamin Joachimi's code. An overview of the methodology
# is given in appendix E.2 of 2007.01844.
data_dir = os.path.join(os.path.dirname(__file__), "data/covariances/")
h = 0.7
cosmo = ccl.Cosmology(Omega_c=0.25, Omega_b=0.05, h=h, n_s=0.97,
sigma8=0.8, m_nu=0.0)
mass_def = ccl.halos.MassDef200m
hmf = ccl.halos.MassFuncTinker10(mass_def=mass_def)
hbf = ccl.halos.HaloBiasTinker10(mass_def=mass_def)
con = ccl.halos.ConcentrationDuffy08(mass_def=mass_def)
nfw = ccl.halos.HaloProfileNFW(mass_def=mass_def, concentration=con,
fourier_analytic=True)
hmc = ccl.halos.HMCalculator(mass_function=hmf, halo_bias=hbf,
mass_def=mass_def)
n_z = 100
n_k = 200
k_min = 1e-4
k_max = 1e2
a = np.linspace(1/(1+6), 1, n_z)
k = np.geomspace(k_min, k_max, n_k)
tk3D = ccl.halos.halomod_Tk3D_SSC(cosmo=cosmo, hmc=hmc,
prof=nfw, prof2=nfw, prof12_2pt=None,
lk_arr=np.log(k), a_arr=a,
use_log=True)
z, nofz = np.loadtxt(os.path.join(data_dir, "ssc_WL_nofz.txt"),
unpack=True)
WL_tracer = ccl.WeakLensingTracer(cosmo, dndz=(z, nofz))
ell = np.loadtxt(os.path.join(data_dir, "ssc_WL_ell.txt"))
fsky = 0.05
sigma2_B = ccl.sigma2_B_disc(cosmo, a_arr=a, fsky=fsky)
cov_ssc = ccl.covariances.angular_cl_cov_SSC(cosmo,
tracer1=WL_tracer,
tracer2=WL_tracer,
ell=ell, t_of_kk_a=tk3D,
sigma2_B=(a, sigma2_B),
fsky=None)
var_ssc_ccl = np.diag(cov_ssc)
off_diag_1_ccl = np.diag(cov_ssc, k=1)
cov_ssc_bj = np.loadtxt(os.path.join(data_dir, "ssc_WL_cov_matrix.txt"))
# At large scales, CCL uses a different convention for the Limber
# approximation. This factor accounts for this difference
ccl_limber_shear_fac = np.sqrt((ell-1)*ell*(ell+1)*(ell+2))/(ell+1/2)**2
cov_ssc_bj_corrected = cov_ssc_bj * np.outer(ccl_limber_shear_fac**2,
ccl_limber_shear_fac**2)
var_bj = np.diag(cov_ssc_bj_corrected)
off_diag_1_bj = np.diag(cov_ssc_bj_corrected, k=1)
assert np.all(np.fabs(var_ssc_ccl/var_bj - 1) < 3e-2)
assert np.all(np.fabs(off_diag_1_ccl/off_diag_1_bj - 1) < 3e-2)
assert np.all(np.fabs(cov_ssc/cov_ssc_bj_corrected - 1) < 3e-2)
|
LSSTDESCREPO_NAMECCLPATH_START.@CCL_extracted@CCL-master@benchmarks@test_covariances.py@.PATH_END.py
|
{
"filename": "test_plot.py",
"repo_name": "smirik/resonances",
"repo_path": "resonances_extracted/resonances-main/tests/resonances/resonance/test_plot.py",
"type": "Python"
}
|
from pathlib import Path
import shutil
import pytest
import resonances
import os
import tests.tools as tools
@pytest.fixture(autouse=True)
def run_around_tests():
resonances.config.set('plot', None)
resonances.config.set('plot.type', None)
Path('cache/tests').mkdir(parents=True, exist_ok=True)
yield
resonances.config.set('plot', 'resonant')
resonances.config.set('plot.type', 'save')
shutil.rmtree('cache/tests')
def test_simple_run():
sim = tools.create_test_simulation_for_solar_system()
sim.add_body(tools.get_3body_elements_sample(), resonances.ThreeBody('4J-2S-1'))
sim.add_body(tools.get_2body_elements_sample(), resonances.TwoBody('1J-1'))
sim.run()
assert 1 == 1
def test_body():
sim = tools.create_test_simulation_for_solar_system()
sim.add_body(tools.get_3body_elements_sample(), resonances.ThreeBody('4J-2S-1'))
sim.add_body(tools.get_2body_elements_sample(), resonances.TwoBody('1J-1'))
sim.run()
body = sim.bodies[0]
mmr = body.mmrs[0]
body.angles_filtered[mmr.to_s()] = None
resonances.resonance.plot.body(sim, body, mmr)
body.axis_filtered = None
resonances.resonance.plot.body(sim, body, mmr)
sim.plot_type = 'save'
resonances.resonance.plot.body(sim, body, mmr)
file_path = 'cache/tests/asteroid_4J-2S-1+0+0-1.png'
assert Path(file_path).is_file() is True
os.remove(file_path)
assert Path(file_path).is_file() is False
sim.plot_type = None
resonances.resonance.plot.body(sim, body, mmr)
file_path = 'cache/tests/asteroid_4J-2S-1+0+0-1.png'
assert Path(file_path).is_file() is False
|
smirikREPO_NAMEresonancesPATH_START.@resonances_extracted@resonances-main@tests@resonances@resonance@test_plot.py@.PATH_END.py
|
{
"filename": "_cells.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/table/_cells.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class CellsValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="cells", parent_name="table", **kwargs):
super(CellsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Cells"),
data_docs=kwargs.pop(
"data_docs",
"""
align
Sets the horizontal alignment of the `text`
within the box. Has an effect only if `text`
spans two or more lines (i.e. `text` contains
one or more <br> HTML tags) or if an explicit
width is set to override the text width.
alignsrc
Sets the source reference on Chart Studio Cloud
for `align`.
fill
:class:`plotly.graph_objects.table.cells.Fill`
instance or dict with compatible properties
font
:class:`plotly.graph_objects.table.cells.Font`
instance or dict with compatible properties
format
Sets the cell value formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see: h
ttps://github.com/d3/d3-format/tree/v1.4.5#d3-
format.
formatsrc
Sets the source reference on Chart Studio Cloud
for `format`.
height
The height of cells.
line
:class:`plotly.graph_objects.table.cells.Line`
instance or dict with compatible properties
prefix
Prefix for cell values.
prefixsrc
Sets the source reference on Chart Studio Cloud
for `prefix`.
suffix
Suffix for cell values.
suffixsrc
Sets the source reference on Chart Studio Cloud
for `suffix`.
values
Cell values. `values[m][n]` represents the
value of the `n`th point in column `m`,
therefore the `values[m]` vector length for all
columns must be the same (longer vectors will
be truncated). Each value must be a finite
number or a string.
valuessrc
Sets the source reference on Chart Studio Cloud
for `values`.
""",
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@table@_cells.py@.PATH_END.py
|
{
"filename": "plot.py",
"repo_name": "TRASAL/frbpoppy",
"repo_path": "frbpoppy_extracted/frbpoppy-master/tests/monte_carlo/plot.py",
"type": "Python"
}
|
from frbpoppy import hist
from goodness_of_fit import GoodnessOfFit
from matplotlib.lines import Line2D
from scipy.optimize import curve_fit
from tests.convenience import plot_aa_style, rel_path
import matplotlib.pyplot as plt
import numpy as np
class Plot():
"""Plot runs."""
def __init__(self):
plot_aa_style()
plt.rcParams['figure.figsize'] = (5.75373, (5.75373/3)*4)
plt.rcParams['font.size'] = 9
# plt.rcParams['xtick.major.pad'] = 10
# plt.rcParams['ytick.major.pad'] = 10
# plt.rcParams['axes.titlepad'] = 10
self.fig, self.axes = plt.subplots(4, 3, sharey='row')
self.colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
self.gf = GoodnessOfFit()
self.df = self.gf.so.df
# Calculate global maximums
self.gm = {}
for run in self.df.run.unique():
self.gm[run] = self.gf.calc_global_max(run)
print(self.gm)
# Plot various subplots
self.alpha()
self.si()
self.li()
self.li_2()
self.lum_min()
self.lum_max()
self.w_int_mean()
self.w_int_std()
self.legend()
self.dm_igm_slope()
self.dm_host()
self.axes[3, 2].set_axis_off()
# Plot run rectangles
# self.runs()
# Save plot
plt.tight_layout() # rect=[0, 0, 0.98, 1])
plt.subplots_adjust(wspace=0.1)
plt.savefig(rel_path('./plots/mc/mc.pdf'))
def alpha(self):
ax = self.axes[0, 0]
parm = 'alpha'
label = r'$\alpha$'
runs = [1, 5, 8]
ax.set_yscale('log', nonposy='clip')
ax.set_ylabel(r'GoF')
ax.set_xlabel(label)
best_value = np.nan
# Plot runs
for i, run in enumerate(runs):
bins, gofs = self.get_data(run, parm)
ax.step(bins, gofs, where='mid')
# Plot global maximum
try:
best_value, best_gof = self.gm[run][parm]
ax.plot([best_value]*2, [1e-1, best_gof], color=self.colors[i], linestyle='--')
ax.scatter([best_value], [best_gof], marker='x', color=self.colors[i])
except KeyError:
i -= 1
continue
if i==1 and not np.isnan(best_value):
title = fr'{label}=${best_value:.1f}$'
ax.set_title(title, fontsize=10, color=self.colors[i])
def si(self):
ax = self.axes[0, 1]
parm = 'si'
label = r'\text{si}'
runs = [1, 5, 8]
ax.set_yscale('log', nonposy='clip')
ax.set_xlabel(label)
best_value = np.nan
# Plot runs
for i, run in enumerate(runs):
bins, gofs = self.get_data(run, parm)
ax.step(bins, gofs, where='mid')
# Plot global maximum
try:
best_value, best_gof = self.gm[run][parm]
ax.plot([best_value]*2, [1e-1, best_gof], color=self.colors[i], linestyle='--')
ax.scatter([best_value], [best_gof], marker='x', color=self.colors[i])
except KeyError:
i -= 1
continue
if i == 1 and not np.isnan(best_value):
title = fr'{label}=${best_value:.1f}$'
ax.set_title(title, fontsize=10, color=self.colors[i])
def li(self):
ax = self.axes[0, 2]
parm = 'li'
# label = r'\text{lum$_{\text{i}}$}'
label = 'li'
runs = [1, 5, 8]
ax.set_yscale('log', nonposy='clip')
ax.set_xlabel(label)
ax_right = ax.twinx()
ax_right.set_ylabel('Set 1', labelpad=10)
ax_right.tick_params(axis='y', which='both', right=False, labelright=False)
best_value = np.nan
# Plot runs
for i, run in enumerate(runs):
bins, gofs = self.get_data(run, parm)
ax.step(bins, gofs, where='mid')
# Plot global maximum
try:
best_value, best_gof = self.gm[run][parm]
ax.plot([best_value]*2, [1e-1, best_gof], color=self.colors[i], linestyle='--')
ax.scatter([best_value], [best_gof], marker='x', color=self.colors[i])
except KeyError:
i -= 1
continue
if i == 1 and not np.isnan(best_value):
title = fr'{label}=${best_value:.1f}$'
ax.set_title(title, fontsize=10, color=self.colors[i])
def li_2(self):
ax = self.axes[1, 0]
ax.set_ylabel(r'GoF')
parm = 'li'
label = 'li'
runs = [2]
ax.set_yscale('log', nonposy='clip')
ax.set_xlabel(label)
best_value = np.nan
# Plot runs
for i, run in enumerate(runs):
bins, gofs = self.get_data(run, parm)
ax.step(bins, gofs, where='mid')
# Plot global maximum
try:
best_value, best_gof = self.gm[run][parm]
ax.plot([best_value]*2, [1e-1, best_gof], color=self.colors[i], linestyle='--')
ax.scatter([best_value], [best_gof], marker='x', color=self.colors[i])
except KeyError:
i -= 1
continue
# if not np.isnan(best_value):
# title = fr'{label}=${best_value:.1f}$'
# ax.set_title(title, fontsize=10, color=self.colors[i])
def lum_min(self):
ax = self.axes[1, 1]
ax.set_xscale('log')
label = r'\text{lum$_{\text{min}}$}'
parm = 'lum_min'
runs = [2]
ax.set_yscale('log', nonposy='clip')
ax.set_xlabel(label + r' (erg s$^{-1}$)')
best_value = np.nan
# Plot runs
for i, run in enumerate(runs):
bins, gofs = self.get_data(run, parm)
ax.step(bins, gofs, where='mid')
# Plot global maximum
try:
best_value, best_gof = self.gm[run][parm]
ax.plot([best_value]*2, [1e-1, best_gof], color=self.colors[i], linestyle='--')
ax.scatter([best_value], [best_gof], marker='x', color=self.colors[i])
except KeyError:
i -= 1
continue
# if not np.isnan(best_value):
# title = fr'{label}=${best_value:.1e}$'
# ax.set_title(title, fontsize=10, color=self.colors[i])
def lum_max(self):
ax = self.axes[1, 2]
ax.set_xscale('log')
label = r'\text{lum$_{\text{max}}$}'
parm = 'lum_max'
runs = [2]
ax.set_yscale('log', nonposy='clip')
ax.set_xlabel(label + r' (erg s$^{-1}$)')
ax_right = ax.twinx()
ax_right.set_ylabel('Set 2', labelpad=10)
ax_right.tick_params(axis='y', which='both', right=False, labelright=False)
best_value = np.nan
# Plot runs
for i, run in enumerate(runs):
bins, gofs = self.get_data(run, parm)
ax.step(bins, gofs, where='mid')
# Plot global maximum
try:
best_value, best_gof = self.gm[run][parm]
ax.plot([best_value]*2, [1e-1, best_gof], color=self.colors[i], linestyle='--')
ax.scatter([best_value], [best_gof], marker='x', color=self.colors[i])
except KeyError:
i -= 1
continue
# if not np.isnan(best_value):
# title = fr'{label}=${best_value:.1e}$'
# ax.set_title(title, fontsize=10, color=self.colors[i])
def w_int_mean(self):
ax = self.axes[2, 0]
ax.set_xscale('log')
ax.set_ylabel(r'GoF')
label = r'\text{w$_{\text{int, mean}}$}'
parm = 'w_mean'
runs = [3, 6, 9]
ax.set_yscale('log', nonposy='clip')
ax.set_xlabel(fr'{label} (ms)')
best_value = np.nan
# Plot runs
for i, run in enumerate(runs):
bins, gofs = self.get_data(run, parm)
ax.step(bins, gofs, where='mid')
# Plot global maximum
try:
best_value, best_gof = self.gm[run][parm]
ax.plot([best_value]*2, [1e-1, best_gof], color=self.colors[i], linestyle='--')
ax.scatter([best_value], [best_gof], marker='x', color=self.colors[i])
except KeyError:
i -= 1
continue
if i == 1 and not np.isnan(best_value):
title = fr'{label}=${best_value:.1e}$'
ax.set_title(title, fontsize=10, color=self.colors[i])
def w_int_std(self):
ax = self.axes[2, 1]
label = r'\text{w$_{\text{int, std}}$}'
parm = 'w_std'
runs = [3, 6, 9]
ax.set_yscale('log', nonposy='clip')
ax.set_xlabel(fr'{label} (ms)')
ax_right = ax.twinx()
ax_right.set_ylabel('Set 3', labelpad=10)
ax_right.tick_params(axis='y', which='both', right=False, labelright=False)
best_value = np.nan
# Plot runs
for i, run in enumerate(runs):
bins, gofs = self.get_data(run, parm)
ax.step(bins, gofs, where='mid')
# Plot global maximum
try:
best_value, best_gof = self.gm[run][parm]
ax.plot([best_value]*2, [1e-1, best_gof], color=self.colors[i], linestyle='--')
ax.scatter([best_value], [best_gof], marker='x', color=self.colors[i])
except KeyError:
i -= 1
continue
if i == 1 and not np.isnan(best_value):
title = fr'{label}=${best_value:.1f}$'
ax.set_title(title, fontsize=10, color=self.colors[i])
def dm_igm_slope(self):
ax = self.axes[3, 0]
ax.set_ylabel(r'GoF')
label = r'\text{DM$_{\text{IGM, slope}}$}'
parm = 'dm_igm_slope'
runs = [4, 7, 10]
ax.set_yscale('log', nonposy='clip')
ax.set_xlabel(label + r' ($\textrm{pc}\ \textrm{cm}^{-3}$)')
best_value = np.nan
# Plot runs
for i, run in enumerate(runs):
bins, gofs = self.get_data(run, parm)
ax.step(bins, gofs, where='mid')
# Plot global maximum
try:
best_value, best_gof = self.gm[run][parm]
ax.plot([best_value]*2, [1e-1, best_gof], color=self.colors[i], linestyle='--')
ax.scatter([best_value], [best_gof], marker='x', color=self.colors[i])
except KeyError:
i -= 1
continue
if i == 1 and not np.isnan(best_value):
title = fr'{label}=${best_value:.0f}$'
ax.set_title(title, fontsize=10, color=self.colors[i])
def dm_host(self):
ax = self.axes[3, 1]
label = r'\text{DM$_{\text{Host}}$}'
parm = 'dm_host'
runs = [4, 7, 10]
ax.set_yscale('log', nonposy='clip')
ax.set_xlabel(label + r' ($\textrm{pc}\ \textrm{cm}^{-3}$)')
ax_right = ax.twinx()
ax_right.set_ylabel('Set 4', labelpad=10)
ax_right.tick_params(axis='y', which='both', right=False, labelright=False)
best_value = np.nan
# Plot runs
for i, run in enumerate(runs):
bins, gofs = self.get_data(run, parm)
ax.step(bins, gofs, where='mid')
# Plot global maximum
try:
best_value, best_gof = self.gm[run][parm]
ax.plot([best_value]*2, [1e-1, best_gof], color=self.colors[i], linestyle='--')
ax.scatter([best_value], [best_gof], marker='x', color=self.colors[i])
except KeyError:
i -= 1
continue
if i == 1 and not np.isnan(best_value):
title = fr'{label}=${best_value:.0f}$'
ax.set_title(title, fontsize=10, color=self.colors[i])
def legend(self):
ax = self.axes[2, 2]
# Add legend elements
elements = []
line = Line2D([0], [0], color=self.colors[0])
elements.append((line, r'Cycle 1'))
line = Line2D([0], [0], color=self.colors[1])
elements.append((line, r'Cycle 2'))
line = Line2D([0], [0], color=self.colors[2])
elements.append((line, r'Cycle 3'))
line = Line2D([0], [0], color='grey', linestyle='--')
label = r'Max GoF'
elements.append((line, label))
lines, labels = zip(*elements)
self.fig.legend(lines, labels, bbox_to_anchor=(0.84, 0.4),
loc='center')
ax.set_axis_off()
def get_data(self, run_number, par):
df = self.df[self.df.run == run_number]
if df.empty:
return [np.nan], [np.nan]
gofs = []
bins = []
for bin_val, group in df.groupby(par):
gof = self.gf.weighted_median(group)
gofs.append(gof)
bins.append(bin_val)
bins = np.array(bins)
gofs = np.array(gofs)
diff = np.diff(bins)
bin_type = 'lin'
if not np.isclose(diff[0], diff[1]):
bin_type = 'log'
bins, gofs = self.gf.add_edges_to_hist(bins, gofs, bin_type=bin_type)
gofs[np.isnan(gofs)] = 1e-1
return bins, gofs
if __name__ == '__main__':
Plot()
|
TRASALREPO_NAMEfrbpoppyPATH_START.@frbpoppy_extracted@frbpoppy-master@tests@monte_carlo@plot.py@.PATH_END.py
|
{
"filename": "MR_plotly.py",
"repo_name": "mplotnyko/SuperEarth.py",
"repo_path": "SuperEarth.py_extracted/SuperEarth.py-master/examples/MR_plotly.py",
"type": "Python"
}
|
import superearth as se
df = se.exoplanets(0.25,0.08,rocky=False)
fig = se.plotly_pl(df,color='black', marker='circle') #plot M-R data using plotly
# Display the figure
fig.show()
|
mplotnykoREPO_NAMESuperEarth.pyPATH_START.@SuperEarth.py_extracted@SuperEarth.py-master@examples@MR_plotly.py@.PATH_END.py
|
{
"filename": "test_ogle_remote.py",
"repo_name": "D-arioSpace/astroquery",
"repo_path": "astroquery_extracted/astroquery-main/astroquery/ogle/tests/test_ogle_remote.py",
"type": "Python"
}
|
import pytest
import astropy.units as u
from astropy.coordinates import SkyCoord
from astropy.utils.exceptions import AstropyDeprecationWarning
from .. import Ogle
@pytest.mark.remote_data
def test_ogle_single():
co = SkyCoord(0, 3, unit=(u.degree, u.degree), frame='galactic')
response = Ogle.query_region(coord=co)
assert len(response) == 1
@pytest.mark.remote_data
def test_ogle_list():
co = SkyCoord(0, 3, unit=(u.degree, u.degree), frame='galactic')
co_list = [co, co, co]
response = Ogle.query_region(coord=co_list)
assert len(response) == 3
assert response['RA[hr]'][0] == response['RA[hr]'][1] == response['RA[hr]'][2]
@pytest.mark.remote_data
def test_ogle_list_values():
co_list = [[0, 0, 0], [3, 3, 3]]
with pytest.warns(AstropyDeprecationWarning):
response = Ogle.query_region(coord=co_list)
assert len(response) == 3
assert response['RA[hr]'][0] == response['RA[hr]'][1] == response['RA[hr]'][2]
|
D-arioSpaceREPO_NAMEastroqueryPATH_START.@astroquery_extracted@astroquery-main@astroquery@ogle@tests@test_ogle_remote.py@.PATH_END.py
|
{
"filename": "custom_nest_protocol.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/util/custom_nest_protocol.py",
"type": "Python"
}
|
# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Protocol class for custom tf.nest support."""
import typing
from typing import Protocol
@typing.runtime_checkable
class CustomNestProtocol(Protocol):
"""Protocol for adding custom tf.nest support in user-defined classes.
User classes should implement the two methods defined in this protocol in
order to be supported by nest functions.
- `__tf_flatten__` for generating the flattened components and the metadata
of the current object.
- `__tf_unflatten__` for creating a new object based on the input metadata
and the components.
See the method doc for details.
In terms of support level, classes implementing this protocol
- are supported by tf.nest and tf.data functions.
- have limited support from tf.function, which requires writing a custom
TraceType subclass to be used as the input or output of a tf.function.
- are NOT supported by SavedModel.
Code Examples:
>>> import dataclasses
>>> @dataclasses.dataclass
... class MaskedTensor:
... mask: bool
... value: tf.Tensor
...
... def __tf_flatten__(self):
... metadata = (self.mask,) # static config.
... components = (self.value,) # dynamic values.
... return metadata, components
...
... @classmethod
... def __tf_unflatten__(cls, metadata, components):
... mask = metadata[0]
... value = components[0]
... return MaskedTensor(mask=mask, value=value)
...
>>> mt = MaskedTensor(mask=True, value=tf.constant([1]))
>>> mt
MaskedTensor(mask=True, value=<tf.Tensor: ... numpy=array([1], dtype=int32)>)
>>> tf.nest.is_nested(mt)
True
>>> mt2 = MaskedTensor(mask=False, value=tf.constant([2]))
>>> tf.nest.assert_same_structure(mt, mt2)
>>> leaves = tf.nest.flatten(mt)
>>> leaves
[<tf.Tensor: shape=(1,), dtype=int32, numpy=array([1], dtype=int32)>]
>>> mt3 = tf.nest.pack_sequence_as(mt, leaves)
>>> mt3
MaskedTensor(mask=True, value=<tf.Tensor: ... numpy=array([1], dtype=int32)>)
>>> bool(mt == mt3)
True
>>> tf.nest.map_structure(lambda x: x * 2, mt)
MaskedTensor(mask=True, value=<tf.Tensor: ... numpy=array([2], dtype=int32)>)
More examples are available in the unit tests (nest_test.py).
"""
def __tf_flatten__(self):
"""Flatten current object into (metadata, components).
Returns:
A `tuple` of (metadata, components), where
- metadata is a custom Python object that stands for the static config
of the current object, which is supposed to be fixed and not affected
by data transformation.
- components is a `tuple` that contains the modifiable fields of the
current object.
Implementation Note:
- This method should not invoke any TensorFlow ops.
- This method only needs to flatten the current level. If current object has
an attribute that also need custom flattening, nest functions (such as
`nest.flatten`) will utilize this method to do recursive flattening.
- Components must be a `tuple`, not a `list`
"""
@classmethod
def __tf_unflatten__(cls, metadata, components):
"""Create a user-defined object from (metadata, components).
Args:
metadata: a custom Python object that stands for the static config for
reconstructing a new object of the current class.
components: a `tuple` that contains the dynamic data fields of the current
class, for object reconstruction.
Returns:
The user-defined object, with the same class of the current object.
Implementation Note:
- This method should not invoke any TensorFlow ops.
- This method only needs to unflatten the current level. If the object has
an attribute that also need custom unflattening, nest functions will
utilize this method to do recursive unflattening.
"""
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@util@custom_nest_protocol.py@.PATH_END.py
|
{
"filename": "_highlightcolor.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/surface/contours/z/_highlightcolor.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class HighlightcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="highlightcolor", parent_name="surface.contours.z", **kwargs
):
super(HighlightcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@surface@contours@z@_highlightcolor.py@.PATH_END.py
|
{
"filename": "keys.py",
"repo_name": "macrocosme/shwirl",
"repo_path": "shwirl_extracted/shwirl-master/shwirl/extern/vispy/util/keys.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
""" Define constants for keys.
Each key constant is defined as a Key object, which allows comparison with
strings (e.g. 'A', 'Escape', 'Shift'). This enables handling of key events
without using the key constants explicitly (e.g. ``if ev.key == 'Left':``).
In addition, key objects that represent characters can be matched to
the integer ordinal (e.g. 32 for space, 65 for A). This behavior is mainly
intended as a compatibility measure.
"""
from ..ext.six import string_types
class Key(object):
""" Represent the identity of a certain key.
This represents one or more names that the key in question is known by.
A Key object can be compared to one of its string names (case
insensitive), to the integer ordinal of the key (only for keys that
represent characters), and to another Key instance.
"""
def __init__(self, *names):
self._names = names
self._names_upper = tuple([v.upper() for v in names])
@property
def name(self):
""" The primary name of the key.
"""
return self._names[0]
def __hash__(self):
return self._names[0].__hash__()
def __repr__(self):
return "<Key %s>" % ', '.join([repr(v) for v in self._names])
def __eq__(self, other):
if isinstance(other, string_types):
return other.upper() in self._names_upper
elif isinstance(other, Key):
return self._names[0] == other
elif isinstance(other, int):
return other in [ord(v) for v in self._names_upper if len(v) == 1]
elif other is None:
return False
else:
raise ValueError('Key can only be compared to str, int and Key.')
SHIFT = Key('Shift')
CONTROL = Key('Control')
ALT = Key('Alt')
META = Key('Meta') # That Mac thingy
UP = Key('Up')
DOWN = Key('Down')
LEFT = Key('Left')
RIGHT = Key('Right')
PAGEUP = Key('PageUp')
PAGEDOWN = Key('PageDown')
INSERT = Key('Insert')
DELETE = Key('Delete')
HOME = Key('Home')
END = Key('End')
ESCAPE = Key('Escape')
BACKSPACE = Key('Backspace')
F1 = Key('F1')
F2 = Key('F2')
F3 = Key('F3')
F4 = Key('F4')
F5 = Key('F5')
F6 = Key('F6')
F7 = Key('F7')
F8 = Key('F8')
F9 = Key('F9')
F10 = Key('F10')
F11 = Key('F11')
F12 = Key('F12')
SPACE = Key('Space', ' ')
ENTER = Key('Enter', 'Return', '\n')
TAB = Key('Tab', '\t')
|
macrocosmeREPO_NAMEshwirlPATH_START.@shwirl_extracted@shwirl-master@shwirl@extern@vispy@util@keys.py@.PATH_END.py
|
{
"filename": "dot-plots.md",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/doc/python/dot-plots.md",
"type": "Markdown"
}
|
---
jupyter:
jupytext:
notebook_metadata_filter: all
text_representation:
extension: .md
format_name: markdown
format_version: '1.2'
jupytext_version: 1.4.2
kernelspec:
display_name: Python 3
language: python
name: python3
language_info:
codemirror_mode:
name: ipython
version: 3
file_extension: .py
mimetype: text/x-python
name: python
nbconvert_exporter: python
pygments_lexer: ipython3
version: 3.7.7
plotly:
description: How to make dot plots in Python with Plotly.
display_as: basic
language: python
layout: base
name: Dot Plots
order: 6
page_type: u-guide
permalink: python/dot-plots/
thumbnail: thumbnail/dot-plot.jpg
---
#### Basic Dot Plot
Dot plots (also known as [Cleveland dot plots](<https://en.wikipedia.org/wiki/Dot_plot_(statistics)>)) are [scatter plots](https://plotly.com/python/line-and-scatter/) with one categorical axis and one continuous axis. They can be used to show changes between two (or more) points in time or between two (or more) conditions. Compared to a [bar chart](/python/bar-charts/), dot plots can be less cluttered and allow for an easier comparison between conditions.
For the same data, we show below how to create a dot plot using either `px.scatter` or `go.Scatter`.
[Plotly Express](/python/plotly-express/) is the easy-to-use, high-level interface to Plotly, which [operates on a variety of types of data](/python/px-arguments/) and produces [easy-to-style figures](/python/styling-plotly-express/).
```python
import plotly.express as px
df = px.data.medals_long()
fig = px.scatter(df, y="nation", x="count", color="medal", symbol="medal")
fig.update_traces(marker_size=10)
fig.show()
```
```python
import plotly.express as px
import pandas as pd
schools = ["Brown", "NYU", "Notre Dame", "Cornell", "Tufts", "Yale",
"Dartmouth", "Chicago", "Columbia", "Duke", "Georgetown",
"Princeton", "U.Penn", "Stanford", "MIT", "Harvard"]
n_schools = len(schools)
women_salary = [72, 67, 73, 80, 76, 79, 84, 78, 86, 93, 94, 90, 92, 96, 94, 112]
men_salary = [92, 94, 100, 107, 112, 114, 114, 118, 119, 124, 131, 137, 141, 151, 152, 165]
df = pd.DataFrame(dict(school=schools*2, salary=men_salary + women_salary,
gender=["Men"]*n_schools + ["Women"]*n_schools))
# Use column names of df for the different parameters x, y, color, ...
fig = px.scatter(df, x="salary", y="school", color="gender",
title="Gender Earnings Disparity",
labels={"salary":"Annual Salary (in thousands)"} # customize axis label
)
fig.show()
```
```python
import plotly.graph_objects as go
schools = ["Brown", "NYU", "Notre Dame", "Cornell", "Tufts", "Yale",
"Dartmouth", "Chicago", "Columbia", "Duke", "Georgetown",
"Princeton", "U.Penn", "Stanford", "MIT", "Harvard"]
fig = go.Figure()
fig.add_trace(go.Scatter(
x=[72, 67, 73, 80, 76, 79, 84, 78, 86, 93, 94, 90, 92, 96, 94, 112],
y=schools,
marker=dict(color="crimson", size=12),
mode="markers",
name="Women",
))
fig.add_trace(go.Scatter(
x=[92, 94, 100, 107, 112, 114, 114, 118, 119, 124, 131, 137, 141, 151, 152, 165],
y=schools,
marker=dict(color="gold", size=12),
mode="markers",
name="Men",
))
fig.update_layout(title="Gender Earnings Disparity",
xaxis_title="Annual Salary (in thousands)",
yaxis_title="School")
fig.show()
```
#### Styled Categorical Dot Plot
```python
import plotly.graph_objects as go
country = ['Switzerland (2011)', 'Chile (2013)', 'Japan (2014)',
'United States (2012)', 'Slovenia (2014)', 'Canada (2011)',
'Poland (2010)', 'Estonia (2015)', 'Luxembourg (2013)', 'Portugal (2011)']
voting_pop = [40, 45.7, 52, 53.6, 54.1, 54.2, 54.5, 54.7, 55.1, 56.6]
reg_voters = [49.1, 42, 52.7, 84.3, 51.7, 61.1, 55.3, 64.2, 91.1, 58.9]
fig = go.Figure()
fig.add_trace(go.Scatter(
x=voting_pop,
y=country,
name='Percent of estimated voting age population',
marker=dict(
color='rgba(156, 165, 196, 0.95)',
line_color='rgba(156, 165, 196, 1.0)',
)
))
fig.add_trace(go.Scatter(
x=reg_voters, y=country,
name='Percent of estimated registered voters',
marker=dict(
color='rgba(204, 204, 204, 0.95)',
line_color='rgba(217, 217, 217, 1.0)'
)
))
fig.update_traces(mode='markers', marker=dict(line_width=1, symbol='circle', size=16))
fig.update_layout(
title="Votes cast for ten lowest voting age population in OECD countries",
xaxis=dict(
showgrid=False,
showline=True,
linecolor='rgb(102, 102, 102)',
tickfont_color='rgb(102, 102, 102)',
showticklabels=True,
dtick=10,
ticks='outside',
tickcolor='rgb(102, 102, 102)',
),
margin=dict(l=140, r=40, b=50, t=80),
legend=dict(
font_size=10,
yanchor='middle',
xanchor='right',
),
width=800,
height=600,
paper_bgcolor='white',
plot_bgcolor='white',
hovermode='closest',
)
fig.show()
```
### Reference
See https://plotly.com/python/reference/scatter/ for more information and chart attribute options!
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@doc@python@dot-plots.md@.PATH_END.py
|
{
"filename": "transit_adjust_setup_example.py",
"repo_name": "franpoz/SHERLOCK",
"repo_path": "SHERLOCK_extracted/SHERLOCK-master/examples/programmatical/transit_adjust_setup_example.py",
"type": "Python"
}
|
from contextlib import contextmanager
from timeit import default_timer
from sherlockpipe.search.sherlock import Sherlock
from lcbuilder.objectinfo.MissionObjectInfo import MissionObjectInfo
from sherlockpipe.search.sherlock_target import SherlockTarget
@contextmanager
def elapsed_timer():
start = default_timer()
elapser = lambda: str(default_timer() - start)
yield lambda: elapser()
end = default_timer()
elapser = lambda: str(end - start)
with elapsed_timer() as elapsed:
# We will use only one object id so we can explain better the detrend configs that the coder can select
# We will:
# 1 Set the maximum number of runs to be executed.
# 2 Select the period protect value, which limits the minimum detrend window length
# 3 Select the min period for a transit to be fit.
# 4 Select the max period for a transit to be fit.
# 5 Select the binning for RMS calculation
# 6 Select the number of CPU cores to be used for the transit search.
# 7 Select the min SNR, the min SDE and the max FAP to stop the runs execution for each object.
# 8 Select the found transits masking method. We use subtract here as example, but it is discouraged.
# 9 Select the best signal algorithm, which provides a different implementation to decide which of the detrend
# signals is the stronger one to be selected.
# 10 Set the strength of the quorum algorithm votes, which makes every vote that is found to increase the SNR by
# a factor of 1.2 for our selection.
sherlock = Sherlock([SherlockTarget(MissionObjectInfo(mission_id="TIC 181804752", sectors='all'),
max_runs=10, period_protect=12, period_min=1, period_max=10, bin_minutes=20,
cpu_cores=3, snr_min=6, sde_min=6, mask_mode="subtract",
best_signal_algorithm='quorum', quorum_strength=1.2)])\
.run()
print("Analysis took " + elapsed() + "s")
|
franpozREPO_NAMESHERLOCKPATH_START.@SHERLOCK_extracted@SHERLOCK-master@examples@programmatical@transit_adjust_setup_example.py@.PATH_END.py
|
{
"filename": "11283_pils_totlen.py",
"repo_name": "shreeyesh-biswal/Rvalue_3D",
"repo_path": "Rvalue_3D_extracted/Rvalue_3D-main/Codes/X-class/AR_11283/11283_pils_totlen.py",
"type": "Python"
}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 10 11:55:39 2022
@author: shreeyeshbiswal
"""
import os
import numpy as np
import matplotlib as mpl
from matplotlib import pyplot as plt
from matplotlib.pyplot import figure
AR = "11283"
core_dir = "/home/shreeyeshbiswal/IDLWorkspace/Dataset_PF/"
base_dir = "/home/shreeyeshbiswal/IDLWorkspace/Dataset_PF/AR_" + AR
dir_list = sorted(os.listdir(base_dir))
n = len(dir_list)
m = 10 # values per file
tot_len_matrix = np.zeros(shape=(n,m))
max_len_matrix = np.zeros(shape=(n,m))
abs_flx_matrix = np.zeros(shape=(n,m))
index = np.arange(0,n)
height = np.arange(0,m)*0.36
P1 = 'Total Length of PILs (Mm); AR '+ AR
colorbarticks = [0, 200, 400, 600, 800, 1000]
cbar_min = 0
cbar_max = 1000
flare_time1 = 142.2
flare_time2 = 166.53
for i in range(0,n):
Time_tag = dir_list[i]
Time = Time_tag[0:19]
Hour = Time[11:13]
print(Time)
dir = "/home/shreeyeshbiswal/IDLWorkspace/Dataset_PF/AR_" + AR + "/" + Time_tag
os.chdir(dir)
# the if-else statement takes care of missing data
if len(os.listdir(dir)) != 0:
mpils = np.loadtxt("PF_ext_mpils_" + Time + ".dat")
print(np.shape(mpils))
tot_len_matrix[i,:] = mpils[:,0]
max_len_matrix[i,:] = mpils[:,1]
abs_flx_matrix[i,:] = mpils[:,2]
print(Hour)
else:
tot_len_matrix[i,:] = np.nan
max_len_matrix[i,:] = np.nan
abs_flx_matrix[i,:] = np.nan
print("Empty directory")
os.chdir(core_dir)
x = np.arange(0,n)
figure(figsize=(10,10), dpi=100000)
figure, axs = plt.subplots(10)
figure.set_figheight(15)
figure.set_figwidth(9)
cm = plt.cm.get_cmap('afmhot')
mpl.rc('xtick', labelsize=13)
# Plot
sc = axs[0].scatter(x, tot_len_matrix[:,9], c = tot_len_matrix[:,9], vmin=cbar_min, vmax=cbar_max, s=10, cmap=cm)
for i in range(0,m):
axs[i].scatter(x, tot_len_matrix[:,9-i], c = tot_len_matrix[:,9-i], vmin=cbar_min, vmax=cbar_max, s=10, cmap=cm)
for i in range(0,m):
axs[i].set_ylim([cbar_min, cbar_max])
axs[9].tick_params(axis='x', labelsize=16)
axs[9].set_xticks(np.arange(0,n,24))
# Hide the ylims of individual boxes
for i in range(0,m):
axs[i].set_yticks([])
# Show heights in the altitude
heightfont = 16
for i in range(0,m):
max_alt = (m-1)*0.36
altitude = max_alt-(i*0.36)
alt_str = "{:.2f}".format(altitude)
axs[i].set_ylabel(alt_str + ' ', fontsize = heightfont, rotation = 0)
# Show flare occurence in dotted lines
for i in range(0,m):
axs[i].axvline(x = flare_time1, ymin = 0, ymax = 1, linestyle = '--', color = 'k', alpha=0.40)
axs[i].axvline(x = flare_time2, ymin = 0, ymax = 1, linestyle = '--', color = 'k', alpha=0.40)
axs[i].axvline(x = 204, ymin = 0, ymax = 1, linestyle = '--', color = 'k', alpha=0.00)# Show heights in the altitude
# Orient the text
st = dir_list[0]
start_time = st[0:4] + '/' + st[5:7] + '/' + st[8:10] + '/' + st[11:13] + ':' + st[14:16]
axs[0].text(-10, (cbar_max + (0.35*(cbar_max - cbar_min))), P1, fontsize=23)
axs[5].text(-54, cbar_min + 0.5*(cbar_max - cbar_min), 'Height (Mm)', rotation = 90, fontsize=18)
axs[9].text(19, (cbar_min - (0.65*(cbar_max - cbar_min))), 'Time after ' + start_time + ' (hrs)', rotation = 0, fontsize=18)
figure.subplots_adjust(right=0.8)
cbar_ax = figure.add_axes([0.85, 0.15, 0.05, 0.7])
cbar_ax.tick_params(labelsize=16)
figure.colorbar(sc, cax=cbar_ax, ticks=colorbarticks)
plt.subplots_adjust(wspace=0.5, hspace=0)
plt.show()
mpl.rcParams.update(mpl.rcParamsDefault)
|
shreeyesh-biswalREPO_NAMERvalue_3DPATH_START.@Rvalue_3D_extracted@Rvalue_3D-main@Codes@X-class@AR_11283@11283_pils_totlen.py@.PATH_END.py
|
{
"filename": "_autocolorscale.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/choroplethmap/_autocolorscale.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class AutocolorscaleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="autocolorscale", parent_name="choroplethmap", **kwargs
):
super(AutocolorscaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {}),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@choroplethmap@_autocolorscale.py@.PATH_END.py
|
{
"filename": "adam_test.py",
"repo_name": "keras-team/keras",
"repo_path": "keras_extracted/keras-master/keras/src/optimizers/adam_test.py",
"type": "Python"
}
|
import numpy as np
import pytest
import keras
from keras.src import backend
from keras.src import ops
from keras.src import testing
from keras.src.optimizers.adam import Adam
class AdamTest(testing.TestCase):
def test_config(self):
optimizer = Adam(
learning_rate=0.5,
beta_1=0.5,
beta_2=0.67,
epsilon=1e-5,
amsgrad=True,
)
self.run_class_serialization_test(optimizer)
def test_single_step(self):
optimizer = Adam(learning_rate=0.5)
grads = ops.array([1.0, 6.0, 7.0, 2.0])
vars = backend.Variable([1.0, 2.0, 3.0, 4.0])
optimizer.apply_gradients(zip([grads], [vars]))
self.assertAllClose(vars, [0.5, 1.5, 2.5, 3.5], rtol=1e-4, atol=1e-4)
def test_weight_decay(self):
grads, var1, var2, var3 = (
ops.zeros(()),
backend.Variable(2.0),
backend.Variable(2.0, name="exclude"),
backend.Variable(2.0),
)
optimizer_1 = Adam(learning_rate=1.0, weight_decay=0.004)
optimizer_1.apply_gradients(zip([grads], [var1]))
optimizer_2 = Adam(learning_rate=1.0, weight_decay=0.004)
optimizer_2.exclude_from_weight_decay(var_names=["exclude"])
optimizer_2.apply_gradients(zip([grads, grads], [var1, var2]))
optimizer_3 = Adam(learning_rate=1.0, weight_decay=0.004)
optimizer_3.exclude_from_weight_decay(var_list=[var3])
optimizer_3.apply_gradients(zip([grads, grads], [var1, var3]))
self.assertAlmostEqual(var1.numpy(), 1.9760959, decimal=6)
self.assertAlmostEqual(var2.numpy(), 2.0, decimal=6)
self.assertAlmostEqual(var3.numpy(), 2.0, decimal=6)
def test_correctness_with_golden(self):
optimizer = Adam(amsgrad=True)
x = backend.Variable(np.ones([10]))
grads = ops.arange(0.1, 1.1, 0.1)
first_grads = ops.full((10,), 0.01)
golden = np.tile(
[[0.999], [0.9982], [0.9974], [0.9965], [0.9955]], (1, 10)
)
optimizer.apply_gradients(zip([first_grads], [x]))
for i in range(5):
self.assertAllClose(x, golden[i], rtol=5e-4, atol=5e-4)
optimizer.apply_gradients(zip([grads], [x]))
def test_clip_norm(self):
optimizer = Adam(clipnorm=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2])
def test_clip_value(self):
optimizer = Adam(clipvalue=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [1.0, 1.0])
@pytest.mark.requires_trainable_backend
def test_ema(self):
# TODO: test correctness
model = keras.Sequential([keras.layers.Dense(10)])
model.compile(optimizer=Adam(use_ema=True), loss="mse")
x = keras.ops.zeros((1, 5))
y = keras.ops.zeros((1, 10))
model.fit(x, y)
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="The IndexedSlices test can only run with TF backend.",
)
def test_clipnorm_indexed_slices(self):
# https://github.com/keras-team/keras/issues/18985
model = keras.Sequential(
[
keras.layers.Embedding(10, 4),
keras.layers.Flatten(),
keras.layers.Dense(2),
]
)
model.compile(optimizer=Adam(clipnorm=100), loss="mse")
x = keras.ops.ones((8, 5))
y = keras.ops.zeros((8, 2))
model.fit(x, y, verbose=0)
|
keras-teamREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@keras@src@optimizers@adam_test.py@.PATH_END.py
|
{
"filename": "_yref.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/heatmap/colorbar/_yref.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class YrefValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="yref", parent_name="heatmap.colorbar", **kwargs):
super(YrefValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["container", "paper"]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@heatmap@colorbar@_yref.py@.PATH_END.py
|
{
"filename": "grid_finder.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/matplotlib/py2/mpl_toolkits/axisartist/grid_finder.py",
"type": "Python"
}
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
from matplotlib.transforms import Bbox
from . import clip_path
clip_line_to_rect = clip_path.clip_line_to_rect
import matplotlib.ticker as mticker
from matplotlib.transforms import Transform
# extremes finder
class ExtremeFinderSimple(object):
def __init__(self, nx, ny):
self.nx, self.ny = nx, ny
def __call__(self, transform_xy, x1, y1, x2, y2):
"""
get extreme values.
x1, y1, x2, y2 in image coordinates (0-based)
nx, ny : number of division in each axis
"""
x_, y_ = np.linspace(x1, x2, self.nx), np.linspace(y1, y2, self.ny)
x, y = np.meshgrid(x_, y_)
lon, lat = transform_xy(np.ravel(x), np.ravel(y))
lon_min, lon_max = lon.min(), lon.max()
lat_min, lat_max = lat.min(), lat.max()
return self._add_pad(lon_min, lon_max, lat_min, lat_max)
def _add_pad(self, lon_min, lon_max, lat_min, lat_max):
""" a small amount of padding is added because the current
clipping algorithms seems to fail when the gridline ends at
the bbox boundary.
"""
dlon = (lon_max - lon_min) / self.nx
dlat = (lat_max - lat_min) / self.ny
lon_min, lon_max = lon_min - dlon, lon_max + dlon
lat_min, lat_max = lat_min - dlat, lat_max + dlat
return lon_min, lon_max, lat_min, lat_max
class GridFinderBase(object):
def __init__(self,
extreme_finder,
grid_locator1,
grid_locator2,
tick_formatter1=None,
tick_formatter2=None):
"""
the transData of the axes to the world coordinate.
locator1, locator2 : grid locator for 1st and 2nd axis.
Derived must define "transform_xy, inv_transform_xy"
(may use update_transform)
"""
super(GridFinderBase, self).__init__()
self.extreme_finder = extreme_finder
self.grid_locator1 = grid_locator1
self.grid_locator2 = grid_locator2
self.tick_formatter1 = tick_formatter1
self.tick_formatter2 = tick_formatter2
def get_grid_info(self,
x1, y1, x2, y2):
"""
lon_values, lat_values : list of grid values. if integer is given,
rough number of grids in each direction.
"""
extremes = self.extreme_finder(self.inv_transform_xy, x1, y1, x2, y2)
# min & max rage of lat (or lon) for each grid line will be drawn.
# i.e., gridline of lon=0 will be drawn from lat_min to lat_max.
lon_min, lon_max, lat_min, lat_max = extremes
lon_levs, lon_n, lon_factor = \
self.grid_locator1(lon_min, lon_max)
lat_levs, lat_n, lat_factor = \
self.grid_locator2(lat_min, lat_max)
if lon_factor is None:
lon_values = np.asarray(lon_levs[:lon_n])
else:
lon_values = np.asarray(lon_levs[:lon_n]/lon_factor)
if lat_factor is None:
lat_values = np.asarray(lat_levs[:lat_n])
else:
lat_values = np.asarray(lat_levs[:lat_n]/lat_factor)
lon_lines, lat_lines = self._get_raw_grid_lines(lon_values,
lat_values,
lon_min, lon_max,
lat_min, lat_max)
ddx = (x2-x1)*1.e-10
ddy = (y2-y1)*1.e-10
bb = Bbox.from_extents(x1-ddx, y1-ddy, x2+ddx, y2+ddy)
grid_info = {}
grid_info["extremes"] = extremes
grid_info["lon_lines"] = lon_lines
grid_info["lat_lines"] = lat_lines
grid_info["lon"] = self._clip_grid_lines_and_find_ticks(lon_lines,
lon_values,
lon_levs,
bb)
grid_info["lat"] = self._clip_grid_lines_and_find_ticks(lat_lines,
lat_values,
lat_levs,
bb)
tck_labels = grid_info["lon"]["tick_labels"] = dict()
for direction in ["left", "bottom", "right", "top"]:
levs = grid_info["lon"]["tick_levels"][direction]
tck_labels[direction] = self.tick_formatter1(direction,
lon_factor, levs)
tck_labels = grid_info["lat"]["tick_labels"] = dict()
for direction in ["left", "bottom", "right", "top"]:
levs = grid_info["lat"]["tick_levels"][direction]
tck_labels[direction] = self.tick_formatter2(direction,
lat_factor, levs)
return grid_info
def _get_raw_grid_lines(self,
lon_values, lat_values,
lon_min, lon_max, lat_min, lat_max):
lons_i = np.linspace(lon_min, lon_max, 100) # for interpolation
lats_i = np.linspace(lat_min, lat_max, 100)
lon_lines = [self.transform_xy(np.zeros_like(lats_i) + lon, lats_i)
for lon in lon_values]
lat_lines = [self.transform_xy(lons_i, np.zeros_like(lons_i) + lat)
for lat in lat_values]
return lon_lines, lat_lines
def _clip_grid_lines_and_find_ticks(self, lines, values, levs, bb):
gi = dict()
gi["values"] = []
gi["levels"] = []
gi["tick_levels"] = dict(left=[], bottom=[], right=[], top=[])
gi["tick_locs"] = dict(left=[], bottom=[], right=[], top=[])
gi["lines"] = []
tck_levels = gi["tick_levels"]
tck_locs = gi["tick_locs"]
for (lx, ly), v, lev in zip(lines, values, levs):
xy, tcks = clip_line_to_rect(lx, ly, bb)
if not xy:
continue
gi["levels"].append(v)
gi["lines"].append(xy)
for tck, direction in zip(tcks,
["left", "bottom", "right", "top"]):
for t in tck:
tck_levels[direction].append(lev)
tck_locs[direction].append(t)
return gi
def update_transform(self, aux_trans):
if isinstance(aux_trans, Transform):
def transform_xy(x, y):
x, y = np.asarray(x), np.asarray(y)
ll1 = np.concatenate((x[:,np.newaxis], y[:,np.newaxis]), 1)
ll2 = aux_trans.transform(ll1)
lon, lat = ll2[:,0], ll2[:,1]
return lon, lat
def inv_transform_xy(x, y):
x, y = np.asarray(x), np.asarray(y)
ll1 = np.concatenate((x[:,np.newaxis], y[:,np.newaxis]), 1)
ll2 = aux_trans.inverted().transform(ll1)
lon, lat = ll2[:,0], ll2[:,1]
return lon, lat
else:
transform_xy, inv_transform_xy = aux_trans
self.transform_xy = transform_xy
self.inv_transform_xy = inv_transform_xy
def update(self, **kw):
for k in kw:
if k in ["extreme_finder",
"grid_locator1",
"grid_locator2",
"tick_formatter1",
"tick_formatter2"]:
setattr(self, k, kw[k])
else:
raise ValueError("unknown update property '%s'" % k)
class GridFinder(GridFinderBase):
def __init__(self,
transform,
extreme_finder=None,
grid_locator1=None,
grid_locator2=None,
tick_formatter1=None,
tick_formatter2=None):
"""
transform : transform from the image coordinate (which will be
the transData of the axes to the world coordinate.
or transform = (transform_xy, inv_transform_xy)
locator1, locator2 : grid locator for 1st and 2nd axis.
"""
if extreme_finder is None:
extreme_finder = ExtremeFinderSimple(20, 20)
if grid_locator1 is None:
grid_locator1 = MaxNLocator()
if grid_locator2 is None:
grid_locator2 = MaxNLocator()
if tick_formatter1 is None:
tick_formatter1 = FormatterPrettyPrint()
if tick_formatter2 is None:
tick_formatter2 = FormatterPrettyPrint()
super(GridFinder, self).__init__(
extreme_finder,
grid_locator1,
grid_locator2,
tick_formatter1,
tick_formatter2)
self.update_transform(transform)
class MaxNLocator(mticker.MaxNLocator):
def __init__(self, nbins=10, steps=None,
trim=True,
integer=False,
symmetric=False,
prune=None):
# trim argument has no effect. It has been left for API compatibility
mticker.MaxNLocator.__init__(self, nbins, steps=steps,
integer=integer,
symmetric=symmetric, prune=prune)
self.create_dummy_axis()
self._factor = None
def __call__(self, v1, v2):
if self._factor is not None:
self.set_bounds(v1*self._factor, v2*self._factor)
locs = mticker.MaxNLocator.__call__(self)
return np.array(locs), len(locs), self._factor
else:
self.set_bounds(v1, v2)
locs = mticker.MaxNLocator.__call__(self)
return np.array(locs), len(locs), None
def set_factor(self, f):
self._factor = f
class FixedLocator(object):
def __init__(self, locs):
self._locs = locs
self._factor = None
def __call__(self, v1, v2):
if self._factor is None:
v1, v2 = sorted([v1, v2])
else:
v1, v2 = sorted([v1*self._factor, v2*self._factor])
locs = np.array([l for l in self._locs if ((v1 <= l) and (l <= v2))])
return locs, len(locs), self._factor
def set_factor(self, f):
self._factor = f
# Tick Formatter
class FormatterPrettyPrint(object):
def __init__(self, useMathText=True):
self._fmt = mticker.ScalarFormatter(
useMathText=useMathText, useOffset=False)
self._fmt.create_dummy_axis()
self._ignore_factor = True
def __call__(self, direction, factor, values):
if not self._ignore_factor:
if factor is None:
factor = 1.
values = [v/factor for v in values]
#values = [v for v in values]
self._fmt.set_locs(values)
return [self._fmt(v) for v in values]
class DictFormatter(object):
def __init__(self, format_dict, formatter=None):
"""
format_dict : dictionary for format strings to be used.
formatter : fall-back formatter
"""
super(DictFormatter, self).__init__()
self._format_dict = format_dict
self._fallback_formatter = formatter
def __call__(self, direction, factor, values):
"""
factor is ignored if value is found in the dictionary
"""
if self._fallback_formatter:
fallback_strings = self._fallback_formatter(
direction, factor, values)
else:
fallback_strings = [""]*len(values)
r = [self._format_dict.get(k, v) for k, v in zip(values,
fallback_strings)]
return r
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@matplotlib@py2@mpl_toolkits@axisartist@grid_finder.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "j-faria/kima",
"repo_path": "kima_extracted/kima-master/examples/CoRoT7/README.md",
"type": "Markdown"
}
|
This example uses HARPS radial velocity measurements of the active star CoRoT-7.
It reproduces the analysis done by [Faria et al.
(2016)](https://www.aanda.org/articles/aa/abs/2016/04/aa27899-15/aa27899-15.html),
where we successfully recovered both the orbits of CoRoT-7b and CoRot-7c, and
the activity-induced signal.
In the `kima_setup.ccp` file, we set a GP model with hyperpriors and no linear
trend. The number of planets is free, with a uniform prior between 0 and 5.
> **note:** in the paper we considered a prior for Np between 0 and 10.
To compile and run, type
```
kima-run
```
This example takes a considerable time to run because of the GP model. The
analysis in the paper took around 4 days on a standard desktop computer.
|
j-fariaREPO_NAMEkimaPATH_START.@kima_extracted@kima-master@examples@CoRoT7@README.md@.PATH_END.py
|
{
"filename": "_hovertemplate.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/surface/_hovertemplate.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class HovertemplateValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="hovertemplate", parent_name="surface", **kwargs):
super(HovertemplateValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@surface@_hovertemplate.py@.PATH_END.py
|
{
"filename": "test_openai.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/tests/integration_tests/adapters/test_openai.py",
"type": "Python"
}
|
from typing import Any
from langchain_community.adapters import openai as lcopenai
def _test_no_stream(**kwargs: Any) -> None:
import openai
result = openai.ChatCompletion.create(**kwargs) # type: ignore[attr-defined]
lc_result = lcopenai.ChatCompletion.create(**kwargs)
if isinstance(lc_result, dict):
if isinstance(result, dict):
result_dict = result["choices"][0]["message"].to_dict_recursive()
lc_result_dict = lc_result["choices"][0]["message"]
assert result_dict == lc_result_dict
return
def _test_stream(**kwargs: Any) -> None:
import openai
result = []
for c in openai.ChatCompletion.create(**kwargs): # type: ignore[attr-defined]
result.append(c["choices"][0]["delta"].to_dict_recursive())
lc_result = []
for c in lcopenai.ChatCompletion.create(**kwargs):
lc_result.append(c["choices"][0]["delta"])
assert result == lc_result
async def _test_async(**kwargs: Any) -> None:
import openai
result = await openai.ChatCompletion.acreate(**kwargs) # type: ignore[attr-defined]
lc_result = await lcopenai.ChatCompletion.acreate(**kwargs)
if isinstance(lc_result, dict):
if isinstance(result, dict):
result_dict = result["choices"][0]["message"].to_dict_recursive()
lc_result_dict = lc_result["choices"][0]["message"]
assert result_dict == lc_result_dict
return
async def _test_astream(**kwargs: Any) -> None:
import openai
result = []
async for c in await openai.ChatCompletion.acreate(**kwargs): # type: ignore[attr-defined]
result.append(c["choices"][0]["delta"].to_dict_recursive())
lc_result = []
async for c in await lcopenai.ChatCompletion.acreate(**kwargs):
lc_result.append(c["choices"][0]["delta"])
assert result == lc_result
FUNCTIONS = [
{
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
},
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
},
"required": ["location"],
},
}
]
async def _test_module(**kwargs: Any) -> None:
_test_no_stream(**kwargs)
await _test_async(**kwargs)
_test_stream(stream=True, **kwargs)
await _test_astream(stream=True, **kwargs)
async def test_normal_call() -> None:
await _test_module(
messages=[{"role": "user", "content": "hi"}],
model="gpt-3.5-turbo",
temperature=0,
)
async def test_function_calling() -> None:
await _test_module(
messages=[{"role": "user", "content": "whats the weather in boston"}],
model="gpt-3.5-turbo",
functions=FUNCTIONS,
temperature=0,
)
async def test_answer_with_function_calling() -> None:
await _test_module(
messages=[
{"role": "user", "content": "say hi, then whats the weather in boston"}
],
model="gpt-3.5-turbo",
functions=FUNCTIONS,
temperature=0,
)
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@tests@integration_tests@adapters@test_openai.py@.PATH_END.py
|
{
"filename": "sst2.ipynb",
"repo_name": "google/flax",
"repo_path": "flax_extracted/flax-main/examples/sst2/sst2.ipynb",
"type": "Jupyter Notebook"
}
|
# Flax SST-2 Example
<a href="https://colab.research.google.com/github/google/flax/blob/main/examples/sst2/sst2.ipynb" ><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
Demonstration notebook for
https://github.com/google/flax/tree/main/examples/sst2
**Before you start:** Select Runtime -> Change runtime type -> GPU.
The **Flax Notebook Workflow**:
1. Run the entire notebook end-to-end and check out the outputs.
- This will open Python files in the right-hand editor!
- You'll be able to interactively explore metrics in TensorBoard.
2. Change `config` and train for different hyperparameters. Check out the
updated TensorBoard plots.
3. Update the code in `train.py`. Thanks to `%autoreload`, any changes you
make in the file will automatically appear in the notebook. Some ideas to
get you started:
- Change the model.
- Log some per-batch metrics during training.
- Add new hyperparameters to `configs/default.py` and use them in
`train.py`.
4. At any time, feel free to paste code from `train.py` into the notebook
and modify it directly there!
## Setup
```
example_directory = 'examples/sst2'
editor_relpaths = ('configs/default.py', 'train.py', 'models.py')
```
```
# (If you run this code in Jupyter[lab], then you're already in the
# example directory and nothing needs to be done.)
#@markdown **Fetch newest Flax, copy example code**
#@markdown
#@markdown **If you select no** below, then the files will be stored on the
#@markdown *ephemeral* Colab VM. **After some time of inactivity, this VM will
#@markdown be restarted an any changes are lost**.
#@markdown
#@markdown **If you select yes** below, then you will be asked for your
#@markdown credentials to mount your personal Google Drive. In this case, all
#@markdown changes you make will be *persisted*, and even if you re-run the
#@markdown Colab later on, the files will still be the same (you can of course
#@markdown remove directories inside your Drive's `flax/` root if you want to
#@markdown manually revert these files).
if 'google.colab' in str(get_ipython()):
import os
os.chdir('/content')
# Download Flax repo from Github.
if not os.path.isdir('flaxrepo'):
!git clone --depth=1 https://github.com/google/flax flaxrepo
# Copy example files & change directory.
mount_gdrive = 'no' #@param ['yes', 'no']
if mount_gdrive == 'yes':
DISCLAIMER = 'Note: Editing in your Google Drive, changes will persist.'
from google.colab import drive
drive.mount('/content/gdrive')
example_root_path = f'/content/gdrive/My Drive/flax/{example_directory}'
else:
DISCLAIMER = 'WARNING: Editing in VM - changes lost after reboot!!'
example_root_path = f'/content/{example_directory}'
from IPython import display
display.display(display.HTML(
f'<h1 style="color:red;" class="blink">{DISCLAIMER}</h1>'))
if not os.path.isdir(example_root_path):
os.makedirs(example_root_path)
!cp -r flaxrepo/$example_directory/* "$example_root_path"
os.chdir(example_root_path)
from google.colab import files
for relpath in editor_relpaths:
s = open(f'{example_root_path}/{relpath}').read()
open(f'{example_root_path}/{relpath}', 'w').write(
f'## {DISCLAIMER}\n' + '#' * (len(DISCLAIMER) + 3) + '\n\n' + s)
files.view(f'{example_root_path}/{relpath}')
```
```
# Note: In Colab, above cell changed the working directory.
!pwd
```
```
# Install SST-2 dependencies.
!pip install -q -r requirements.txt
```
## Imports / Helpers
```
# If you want to use TPU instead of GPU, you need to run this to make it work.
try:
import jax.tools.colab_tpu
jax.tools.colab_tpu.setup_tpu()
except KeyError:
print('\n### NO TPU CONNECTED - USING CPU or GPU ###\n')
import os
os.environ['XLA_FLAGS'] = '--xla_force_host_platform_device_count=8'
jax.devices()
```
```
from absl import logging
import flax
import jax.numpy as jnp
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
import time
logging.set_verbosity(logging.INFO)
# Make sure the GPU is for JAX, not for TF.
tf.config.experimental.set_visible_devices([], 'GPU')
```
```
# Local imports from current directory - auto reload.
# Any changes you make to train.py will appear automatically.
%load_ext autoreload
%autoreload 2
import train
import models
import vocabulary
import input_pipeline
from configs import default as config_lib
config = config_lib.get_config()
```
## Dataset
```
# Get datasets.
# If you get an error you need to install tensorflow_datasets from Github.
train_dataset = input_pipeline.TextDataset(split='train')
eval_dataset = input_pipeline.TextDataset(split='validation')
```
## Training
```
# Get a live update during training - use the "refresh" button!
# (In Jupyter[lab] start "tensorboard" in the local directory instead.)
if 'google.colab' in str(get_ipython()):
%load_ext tensorboard
%tensorboard --logdir=.
```
```
config.num_epochs = 10
model_name = 'bilstm'
start_time = time.time()
optimizer = train.train_and_evaluate(config, workdir=f'./models/{model_name}')
logging.info('Walltime: %f s', time.time() - start_time)
```
```
if 'google.colab' in str(get_ipython()):
#@markdown You can upload the training results directly to https://tensorboard.dev
#@markdown
#@markdown Note that everbody with the link will be able to see the data.
upload_data = 'yes' #@param ['yes', 'no']
if upload_data == 'yes':
!tensorboard dev upload --one_shot --logdir ./models --name 'Flax examples/mnist'
```
|
googleREPO_NAMEflaxPATH_START.@flax_extracted@flax-main@examples@sst2@sst2.ipynb@.PATH_END.py
|
{
"filename": "light2mass.py",
"repo_name": "lenstronomy/lenstronomy",
"repo_path": "lenstronomy_extracted/lenstronomy-main/lenstronomy/Analysis/light2mass.py",
"type": "Python"
}
|
import numpy as np
from lenstronomy.Util import util
from lenstronomy.LightModel.light_model import LightModel
__all__ = ["light2mass_interpol"]
def light2mass_interpol(
lens_light_model_list,
kwargs_lens_light,
numPix=100,
deltaPix=0.05,
subgrid_res=5,
center_x=0,
center_y=0,
):
"""Takes a lens light model and turns it numerically in a lens model (with all
lensmodel quantities computed on a grid). Then provides an interpolated grid for the
quantities.
:param kwargs_lens_light: lens light keyword argument list
:param numPix: number of pixels per axis for the return interpolation
:param deltaPix: interpolation/pixel size
:param center_x: center of the grid
:param center_y: center of the grid
:param subgrid_res: subgrid for the numerical integrals
:return: keyword arguments for 'INTERPOL' lens model
"""
# make super-sampled grid
x_grid_sub, y_grid_sub = util.make_grid(
numPix=numPix * 5, deltapix=deltaPix, subgrid_res=subgrid_res
)
import lenstronomy.Util.mask_util as mask_util
mask = mask_util.mask_azimuthal(x_grid_sub, y_grid_sub, center_x, center_y, r=1)
x_grid, y_grid = util.make_grid(numPix=numPix, deltapix=deltaPix)
# compute light on the subgrid
lightModel = LightModel(light_model_list=lens_light_model_list)
flux = lightModel.surface_brightness(x_grid_sub, y_grid_sub, kwargs_lens_light)
flux_norm = np.sum(flux[mask == 1]) / np.sum(mask)
flux /= flux_norm
from lenstronomy.LensModel import convergence_integrals as integral
# compute lensing quantities with subgrid
convergence_sub = util.array2image(flux)
f_x_sub, f_y_sub = integral.deflection_from_kappa_grid(
convergence_sub, grid_spacing=deltaPix / float(subgrid_res)
)
f_sub = integral.potential_from_kappa_grid(
convergence_sub, grid_spacing=deltaPix / float(subgrid_res)
)
# interpolation function on lensing quantities
x_axes_sub, y_axes_sub = util.get_axes(x_grid_sub, y_grid_sub)
from lenstronomy.LensModel.Profiles.interpol import Interpol
interp_func = Interpol()
interp_func.do_interp(x_axes_sub, y_axes_sub, f_sub, f_x_sub, f_y_sub)
# compute lensing quantities on sparser grid
x_axes, y_axes = util.get_axes(x_grid, y_grid)
f_ = interp_func.function(x_grid, y_grid)
f_x, f_y = interp_func.derivatives(x_grid, y_grid)
# numerical differentials for second order differentials
from lenstronomy.LensModel.lens_model import LensModel
lens_model = LensModel(lens_model_list=["INTERPOL"])
kwargs = [
{
"grid_interp_x": x_axes_sub,
"grid_interp_y": y_axes_sub,
"f_": f_sub,
"f_x": f_x_sub,
"f_y": f_y_sub,
}
]
f_xx, f_xy, f_yx, f_yy = lens_model.hessian(x_grid, y_grid, kwargs, diff=0.00001)
kwargs_interpol = {
"grid_interp_x": x_axes,
"grid_interp_y": y_axes,
"f_": util.array2image(f_),
"f_x": util.array2image(f_x),
"f_y": util.array2image(f_y),
"f_xx": util.array2image(f_xx),
"f_xy": util.array2image(f_xy),
"f_yy": util.array2image(f_yy),
}
return kwargs_interpol
|
lenstronomyREPO_NAMElenstronomyPATH_START.@lenstronomy_extracted@lenstronomy-main@lenstronomy@Analysis@light2mass.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "pandas-dev/pandas",
"repo_path": "pandas_extracted/pandas-main/pandas/tseries/__init__.py",
"type": "Python"
}
|
# ruff: noqa: TC004
from typing import TYPE_CHECKING
if TYPE_CHECKING:
# import modules that have public classes/functions:
from pandas.tseries import (
frequencies,
offsets,
)
# and mark only those modules as public
__all__ = ["frequencies", "offsets"]
|
pandas-devREPO_NAMEpandasPATH_START.@pandas_extracted@pandas-main@pandas@tseries@__init__.py@.PATH_END.py
|
{
"filename": "_t.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/area/_t.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="t", parent_name="area", **kwargs):
super(TValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "data"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@area@_t.py@.PATH_END.py
|
{
"filename": "_mesh3d.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/graph_objs/layout/template/data/_mesh3d.py",
"type": "Python"
}
|
from plotly.graph_objs import Mesh3d
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@graph_objs@layout@template@data@_mesh3d.py@.PATH_END.py
|
{
"filename": "CombineFrames_2.py",
"repo_name": "sPaMFouR/RedPipe",
"repo_path": "RedPipe_extracted/RedPipe-master/photometry/CombineFrames_2.py",
"type": "Python"
}
|
# /usr/bin/env python
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx #
# xxxxxxxxxxxxxxxxxxxxxx----------------------------COMBINE IMAGES----------------------------xxxxxxxxxxxxxxxxxxxxxxx #
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx #
# ------------------------------------------------------------------------------------------------------------------- #
# Import Required Libraries
# ------------------------------------------------------------------------------------------------------------------- #
import os
import re
import glob
from pyraf import iraf
from astropy.io import fits
import dateutil.parser as dparser
# ------------------------------------------------------------------------------------------------------------------- #
# ------------------------------------------------------------------------------------------------------------------- #
# Telescope CCD Specifications & Image Header Keywords
# ------------------------------------------------------------------------------------------------------------------- #
read_noise = 4.87
ccd_gain = 1.22
EXPTIME_keyword = 'EXPTIME'
# ------------------------------------------------------------------------------------------------------------------- #
# ------------------------------------------------------------------------------------------------------------------- #
# Name Of The Object
# ------------------------------------------------------------------------------------------------------------------- #
OBJECT_name = '2018hna'
# ------------------------------------------------------------------------------------------------------------------- #
# ------------------------------------------------------------------------------------------------------------------- #
# Load Required IRAF Packages
# ------------------------------------------------------------------------------------------------------------------- #
iraf.noao(_doprint=0)
iraf.images(_doprint=0)
# ------------------------------------------------------------------------------------------------------------------- #
# ------------------------------------------------------------------------------------------------------------------- #
# Functions For File Handling
# ------------------------------------------------------------------------------------------------------------------- #
def remove_file(file_name):
"""
Removes the file "file_name" in the constituent directory.
Args:
file_name : Name of the file to be removed from the current directory
Returns:
None
"""
try:
os.remove(file_name)
except OSError:
pass
def group_similar_files(text_list, common_text, exceptions=''):
"""
Groups similar files based on the string 'common_text'. Writes the similar files
onto the list 'text_list' (only if this string is not empty) and appends the similar
files to a list 'python_list'.
Args:
text_list : Name of the output text file with names grouped based on the 'common_text'
common_text : String containing partial name of the files to be grouped
exceptions : String containing the partial name of the files that need to be excluded
Returns:
list_files : Python list containing the names of the grouped files
"""
list_files = glob.glob(common_text)
if exceptions != '':
list_exception = exceptions.split(',')
for file_name in glob.glob(common_text):
for text in list_exception:
test = re.search(text, file_name)
if test:
try:
list_files.remove(file_name)
except ValueError:
pass
list_files.sort()
if len(text_list) != 0:
with open(text_list, 'w') as f:
for file_name in list_files:
f.write(file_name + '\n')
return list_files
def text_list_to_python_list(text_list):
"""
Returns data in the file 'text_list' as a python_list.
Args:
text_list : Input file containing filenames
Returns:
python_list : List of all the elements in the file 'text_list'
Raises:
Error : File 'text_list 'Not Found
"""
if os.path.isfile(text_list):
with open(text_list, "r+") as f:
python_list = f.read().split()
return python_list
else:
print ("Error : File '{0}' Not Found".format(text_list))
def calculate_exptime(textlist_images):
"""
Calculates total exposure for the images in the list 'list_images'.
Args:
textlist_images : Text list of subject images which needs to be combined
Returns:
total_exptime : Total exposure time of the combined image
"""
list_images = text_list_to_python_list(textlist_images)
total_exptime = 0
if len(list_images) != 0:
for image in list_images:
file_header = fits.getheader(image)
exptime = file_header[EXPTIME_keyword]
total_exptime += int(exptime)
else:
print ("No Images In The Text List {0}".format(textlist_images))
return total_exptime
def edit_exptime(comb_image, total_exptime):
"""
Calculates total exposure for the images in the list 'list_images'.
Args:
comb_image : Image whose header has to be edited
total_exptime : Total exposure time of the combined image
Returns:
None
"""
hdulist = fits.open(comb_image, mode='update')
hdulist[0].header[EXPTIME_keyword] = int(total_exptime)
hdulist.close()
# ------------------------------------------------------------------------------------------------------------------- #
# ------------------------------------------------------------------------------------------------------------------- #
# Functions For Tasks In IRAF
# ------------------------------------------------------------------------------------------------------------------- #
def imcombine(textlist_images, type_combine='sum'):
"""
Combines images in the text list 'text_list_images' using algorithms based on the combine
type mentioned in the variable 'type_combine'.
Args:
textlist_images : Text list of subject images which needs to be combined
type_combine : Type of combining operation to be performed on pixels
Returns:
None
"""
task = iraf.images.immatch.imcombine
task.unlearn()
task.combine = type_combine # Type Of Combining Operation Performed On Pixels
task.reject = 'none' # Type Of Rejection Operation Performed On Pixels
task.project = 'no' # Combine Across The Highest Dimension Of The Image?
task.rdnoise = float(read_noise) # CCD Readout Noise (In e-)
task.gain = float(ccd_gain) # CCD Gain (In e-/ADU)
task(input='@' + textlist_images, output=textlist_images[5:] + ".fits")
total_exptime = calculate_exptime(textlist_images)
edit_exptime(textlist_images[5:] + '.fits', total_exptime)
# ------------------------------------------------------------------------------------------------------------------- #
# ------------------------------------------------------------------------------------------------------------------- #
# Groups Images To Be Combined - [ca_jul30_fbs_object-v1.fits]
# ------------------------------------------------------------------------------------------------------------------- #
list_dates = []
for file_name in group_similar_files('', common_text='*.fits'):
temp_name = file_name.split(OBJECT_name)[0]
date = dparser.parse(temp_name, fuzzy=True)
date = date.strftime('%Y-%m-%d')
list_dates.append(date)
list_dates = set(list_dates)
list_filters = ['U', 'B', 'V', 'R', 'I']
list_patterns = ['ca_' + date + '_cfbs_' + OBJECT_name + '-' + band.lower() for date in list_dates for band in list_filters]
list_list_comb = []
for pattern in list_patterns:
if len(group_similar_files('', pattern + '*')) != 0:
pattern_files = group_similar_files('list_' + pattern, common_text=pattern + '*')
if len(pattern_files) > 1:
list_list_comb.append('list_' + pattern)
print list_list_comb
# ------------------------------------------------------------------------------------------------------------------- #
# ------------------------------------------------------------------------------------------------------------------- #
# Combines Images In The List 'list_comb'
# ------------------------------------------------------------------------------------------------------------------- #
for list_comb in list_list_comb:
imcombine(textlist_images=list_comb, type_combine='sum')
# for file_name in group_similar_files('', 'list_ca*'):
# remove_file(file_name)
# ------------------------------------------------------------------------------------------------------------------- #
|
sPaMFouRREPO_NAMERedPipePATH_START.@RedPipe_extracted@RedPipe-master@photometry@CombineFrames_2.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/tools/cython/Cython/Runtime/__init__.py",
"type": "Python"
}
|
# empty file
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@tools@cython@Cython@Runtime@__init__.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "PlasmaPy/PlasmaPy",
"repo_path": "PlasmaPy_extracted/PlasmaPy-main/src/plasmapy/tests/_helpers/__init__.py",
"type": "Python"
}
|
"""Test helper functionality for PlasmaPy and affiliated packages."""
__all__ = [
"ExceptionMismatchFail",
"InvalidTestError",
"MissingExceptionFail",
"MissingWarningFail",
"TestFailed",
"TypeMismatchFail",
"UnexpectedExceptionFail",
"UnexpectedResultFail",
"UnexpectedWarningFail",
"WarningMismatchFail",
]
# This file contains several commented out import statements. These
# statements will be uncommented out over the course of several pull
# requests that were each originally part of #728. The blank lines
# between the import statements will hopefully simplify automatic merging.
# from plasmapy.tests._helpers.actual import ActualTestOutcome
# from plasmapy.tests._helpers.cases import AttrTestCase, FunctionTestCase, MethodTestCase
from plasmapy.tests._helpers.exceptions import (
ExceptionMismatchFail,
InvalidTestError,
MissingExceptionFail,
MissingWarningFail,
TestFailed,
TypeMismatchFail,
UnexpectedExceptionFail,
UnexpectedResultFail,
UnexpectedWarningFail,
WarningMismatchFail,
)
# from plasmapy.tests._helpers.expected import ExpectedTestOutcome
# from plasmapy.tests._helpers.inputs import (
# AbstractTestInputs,
# ClassAttributeTestInputs,
# ClassMethodTestInputs,
# FunctionTestInputs,
# GenericClassTestInputs,
# )
# from plasmapy.tests._helpers.runner import test_runner
|
PlasmaPyREPO_NAMEPlasmaPyPATH_START.@PlasmaPy_extracted@PlasmaPy-main@src@plasmapy@tests@_helpers@__init__.py@.PATH_END.py
|
{
"filename": "apero_database_kill.py",
"repo_name": "njcuk9999/apero-drs",
"repo_path": "apero-drs_extracted/apero-drs-main/apero/tools/recipes/bin/apero_database_kill.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# CODE NAME HERE
# CODE DESCRIPTION HERE
Created on 2021-05-18
@author: cook
"""
from apero.base import base
from apero.core import constants
from apero.tools.module.database import manage_databases
# =============================================================================
# Define variables
# =============================================================================
__NAME__ = 'apero_database_kill.py'
__INSTRUMENT__ = 'None'
__PACKAGE__ = base.__PACKAGE__
__version__ = base.__version__
__author__ = base.__author__
__date__ = base.__date__
__release__ = base.__release__
# =============================================================================
# Start of code
# =============================================================================
if __name__ == "__main__":
# load params
params = constants.load()
# kill all user processes in the database that have been running for
manage_databases.kill(params, timeout=60)
# =============================================================================
# End of code
# =============================================================================
|
njcuk9999REPO_NAMEapero-drsPATH_START.@apero-drs_extracted@apero-drs-main@apero@tools@recipes@bin@apero_database_kill.py@.PATH_END.py
|
{
"filename": "setup.py",
"repo_name": "Caltech-IPAC/Montage",
"repo_path": "Montage_extracted/Montage-main/python/MontagePy/setup.py",
"type": "Python"
}
|
import os
import platform
from setuptools import setup
from distutils.extension import Extension
from Cython.Build import cythonize
machine = platform.machine()
TOP = os.path.abspath(os.path.join(os.getcwd(), '../..'))
LIB = os.path.join(TOP, 'lib')
MONTAGELIB = os.path.join(TOP, 'MontageLib')
objs = []
for obj in os.listdir('lib'):
objs.append('lib/' + obj)
os.environ['CC' ] = 'gcc'
os.environ['CFLAGS' ] = ''
os.environ['ARCHFLAGS'] = '-arch ' + machine
extensions = [
Extension('MontagePy._wrappers', ['src/MontagePy/_wrappers.pyx'],
include_dirs = [os.path.join(LIB, 'include'), MONTAGELIB],
extra_objects = objs),
Extension('MontagePy.main', ['src/MontagePy/main.pyx'])
]
setup(
packages = ['src/MontagePy'],
package_data = {'MontagePy': ['FreeSans.ttf']},
ext_modules = cythonize(extensions,
compiler_directives={'language_level' : '3str'})
)
|
Caltech-IPACREPO_NAMEMontagePATH_START.@Montage_extracted@Montage-main@python@MontagePy@setup.py@.PATH_END.py
|
{
"filename": "_functional_video.py",
"repo_name": "pytorch/vision",
"repo_path": "vision_extracted/vision-main/torchvision/transforms/_functional_video.py",
"type": "Python"
}
|
import warnings
import torch
warnings.warn(
"The 'torchvision.transforms._functional_video' module is deprecated since 0.12 and will be removed in the future. "
"Please use the 'torchvision.transforms.functional' module instead."
)
def _is_tensor_video_clip(clip):
if not torch.is_tensor(clip):
raise TypeError("clip should be Tensor. Got %s" % type(clip))
if not clip.ndimension() == 4:
raise ValueError("clip should be 4D. Got %dD" % clip.dim())
return True
def crop(clip, i, j, h, w):
"""
Args:
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
"""
if len(clip.size()) != 4:
raise ValueError("clip should be a 4D tensor")
return clip[..., i : i + h, j : j + w]
def resize(clip, target_size, interpolation_mode):
if len(target_size) != 2:
raise ValueError(f"target size should be tuple (height, width), instead got {target_size}")
return torch.nn.functional.interpolate(clip, size=target_size, mode=interpolation_mode, align_corners=False)
def resized_crop(clip, i, j, h, w, size, interpolation_mode="bilinear"):
"""
Do spatial cropping and resizing to the video clip
Args:
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
i (int): i in (i,j) i.e coordinates of the upper left corner.
j (int): j in (i,j) i.e coordinates of the upper left corner.
h (int): Height of the cropped region.
w (int): Width of the cropped region.
size (tuple(int, int)): height and width of resized clip
Returns:
clip (torch.tensor): Resized and cropped clip. Size is (C, T, H, W)
"""
if not _is_tensor_video_clip(clip):
raise ValueError("clip should be a 4D torch.tensor")
clip = crop(clip, i, j, h, w)
clip = resize(clip, size, interpolation_mode)
return clip
def center_crop(clip, crop_size):
if not _is_tensor_video_clip(clip):
raise ValueError("clip should be a 4D torch.tensor")
h, w = clip.size(-2), clip.size(-1)
th, tw = crop_size
if h < th or w < tw:
raise ValueError("height and width must be no smaller than crop_size")
i = int(round((h - th) / 2.0))
j = int(round((w - tw) / 2.0))
return crop(clip, i, j, th, tw)
def to_tensor(clip):
"""
Convert tensor data type from uint8 to float, divide value by 255.0 and
permute the dimensions of clip tensor
Args:
clip (torch.tensor, dtype=torch.uint8): Size is (T, H, W, C)
Return:
clip (torch.tensor, dtype=torch.float): Size is (C, T, H, W)
"""
_is_tensor_video_clip(clip)
if not clip.dtype == torch.uint8:
raise TypeError("clip tensor should have data type uint8. Got %s" % str(clip.dtype))
return clip.float().permute(3, 0, 1, 2) / 255.0
def normalize(clip, mean, std, inplace=False):
"""
Args:
clip (torch.tensor): Video clip to be normalized. Size is (C, T, H, W)
mean (tuple): pixel RGB mean. Size is (3)
std (tuple): pixel standard deviation. Size is (3)
Returns:
normalized clip (torch.tensor): Size is (C, T, H, W)
"""
if not _is_tensor_video_clip(clip):
raise ValueError("clip should be a 4D torch.tensor")
if not inplace:
clip = clip.clone()
mean = torch.as_tensor(mean, dtype=clip.dtype, device=clip.device)
std = torch.as_tensor(std, dtype=clip.dtype, device=clip.device)
clip.sub_(mean[:, None, None, None]).div_(std[:, None, None, None])
return clip
def hflip(clip):
"""
Args:
clip (torch.tensor): Video clip to be normalized. Size is (C, T, H, W)
Returns:
flipped clip (torch.tensor): Size is (C, T, H, W)
"""
if not _is_tensor_video_clip(clip):
raise ValueError("clip should be a 4D torch.tensor")
return clip.flip(-1)
|
pytorchREPO_NAMEvisionPATH_START.@vision_extracted@vision-main@torchvision@transforms@_functional_video.py@.PATH_END.py
|
{
"filename": "setup.py",
"repo_name": "PuDu-Astro/DASpec",
"repo_path": "DASpec_extracted/DASpec-master/setup.py",
"type": "Python"
}
|
#!/usr/bin/env python
from setuptools import setup, Extension
include_dirs = [
'src/DASpec',
'/home/dupu/Softwares/cmpfit/cmpfit-1.3a', # add or remove path if necessary
]
library_dirs = [
'/home/dupu/Softwares/cmpfit/cmpfit-1.3a', # add or remove path if necessary
]
ext_swigDASpec = Extension(
name = '_swigDASpec',
swig_opts = ['-c++'],
sources = [
'compcontainer.cpp',
'component.cpp',
'curvefit.cpp',
'function.cpp',
'swigDASpec.i',
],
include_dirs = include_dirs,
library_dirs = library_dirs,
extra_compile_args = [
'-fPIC',
],
extra_link_args = [
'-lmpfit',
'-lgsl',
'-lgslcblas',
]
)
ext_carray = Extension(
name = '_carray',
swig_opts = ['-c++'],
sources = [
'carray.cpp',
'carray.i',
],
include_dirs = include_dirs
)
setup(
name = 'DASpec',
version = '0.8',
author = 'Pu Du',
description = """DASpec""",
ext_modules = [ext_swigDASpec, ext_carray],
py_modules = ["DASpec", "carray"],
)
|
PuDu-AstroREPO_NAMEDASpecPATH_START.@DASpec_extracted@DASpec-master@setup.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "pyDANDIA/pyDANDIA",
"repo_path": "pyDANDIA_extracted/pyDANDIA-main/README.md",
"type": "Markdown"
}
|
[](https://travis-ci.org/pyDANDIA/pyDANDIA)
# pyDanDIA
# Open reduction pipeline of the RoboNet team
|
pyDANDIAREPO_NAMEpyDANDIAPATH_START.@pyDANDIA_extracted@pyDANDIA-main@README.md@.PATH_END.py
|
{
"filename": "ioloop.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/tornado/tornado-4/tornado/ioloop.py",
"type": "Python"
}
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""An I/O event loop for non-blocking sockets.
Typical applications will use a single `IOLoop` object, in the
`IOLoop.instance` singleton. The `IOLoop.start` method should usually
be called at the end of the ``main()`` function. Atypical applications may
use more than one `IOLoop`, such as one `IOLoop` per thread, or per `unittest`
case.
In addition to I/O events, the `IOLoop` can also schedule time-based events.
`IOLoop.add_timeout` is a non-blocking alternative to `time.sleep`.
"""
from __future__ import absolute_import, division, print_function
import collections
import datetime
import errno
import functools
import heapq
import itertools
import logging
import numbers
import os
import select
import sys
import threading
import time
import traceback
import math
from tornado.concurrent import TracebackFuture, is_future
from tornado.log import app_log, gen_log
from tornado.platform.auto import set_close_exec, Waker
from tornado import stack_context
from tornado.util import PY3, Configurable, errno_from_exception, timedelta_to_seconds
try:
import signal
except ImportError:
signal = None
if PY3:
import _thread as thread
else:
import thread
_POLL_TIMEOUT = 3600.0
class TimeoutError(Exception):
pass
class IOLoop(Configurable):
"""A level-triggered I/O loop.
We use ``epoll`` (Linux) or ``kqueue`` (BSD and Mac OS X) if they
are available, or else we fall back on select(). If you are
implementing a system that needs to handle thousands of
simultaneous connections, you should use a system that supports
either ``epoll`` or ``kqueue``.
Example usage for a simple TCP server:
.. testcode::
import errno
import functools
import tornado.ioloop
import socket
def connection_ready(sock, fd, events):
while True:
try:
connection, address = sock.accept()
except socket.error as e:
if e.args[0] not in (errno.EWOULDBLOCK, errno.EAGAIN):
raise
return
connection.setblocking(0)
handle_connection(connection, address)
if __name__ == '__main__':
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(0)
sock.bind(("", port))
sock.listen(128)
io_loop = tornado.ioloop.IOLoop.current()
callback = functools.partial(connection_ready, sock)
io_loop.add_handler(sock.fileno(), callback, io_loop.READ)
io_loop.start()
.. testoutput::
:hide:
By default, a newly-constructed `IOLoop` becomes the thread's current
`IOLoop`, unless there already is a current `IOLoop`. This behavior
can be controlled with the ``make_current`` argument to the `IOLoop`
constructor: if ``make_current=True``, the new `IOLoop` will always
try to become current and it raises an error if there is already a
current instance. If ``make_current=False``, the new `IOLoop` will
not try to become current.
.. versionchanged:: 4.2
Added the ``make_current`` keyword argument to the `IOLoop`
constructor.
"""
# Constants from the epoll module
_EPOLLIN = 0x001
_EPOLLPRI = 0x002
_EPOLLOUT = 0x004
_EPOLLERR = 0x008
_EPOLLHUP = 0x010
_EPOLLRDHUP = 0x2000
_EPOLLONESHOT = (1 << 30)
_EPOLLET = (1 << 31)
# Our events map exactly to the epoll events
NONE = 0
READ = _EPOLLIN
WRITE = _EPOLLOUT
ERROR = _EPOLLERR | _EPOLLHUP
# Global lock for creating global IOLoop instance
_instance_lock = threading.Lock()
_current = threading.local()
@staticmethod
def instance():
"""Returns a global `IOLoop` instance.
Most applications have a single, global `IOLoop` running on the
main thread. Use this method to get this instance from
another thread. In most other cases, it is better to use `current()`
to get the current thread's `IOLoop`.
"""
if not hasattr(IOLoop, "_instance"):
with IOLoop._instance_lock:
if not hasattr(IOLoop, "_instance"):
# New instance after double check
IOLoop._instance = IOLoop()
return IOLoop._instance
@staticmethod
def initialized():
"""Returns true if the singleton instance has been created."""
return hasattr(IOLoop, "_instance")
def install(self):
"""Installs this `IOLoop` object as the singleton instance.
This is normally not necessary as `instance()` will create
an `IOLoop` on demand, but you may want to call `install` to use
a custom subclass of `IOLoop`.
When using an `IOLoop` subclass, `install` must be called prior
to creating any objects that implicitly create their own
`IOLoop` (e.g., :class:`tornado.httpclient.AsyncHTTPClient`).
"""
assert not IOLoop.initialized()
IOLoop._instance = self
@staticmethod
def clear_instance():
"""Clear the global `IOLoop` instance.
.. versionadded:: 4.0
"""
if hasattr(IOLoop, "_instance"):
del IOLoop._instance
@staticmethod
def current(instance=True):
"""Returns the current thread's `IOLoop`.
If an `IOLoop` is currently running or has been marked as
current by `make_current`, returns that instance. If there is
no current `IOLoop`, returns `IOLoop.instance()` (i.e. the
main thread's `IOLoop`, creating one if necessary) if ``instance``
is true.
In general you should use `IOLoop.current` as the default when
constructing an asynchronous object, and use `IOLoop.instance`
when you mean to communicate to the main thread from a different
one.
.. versionchanged:: 4.1
Added ``instance`` argument to control the fallback to
`IOLoop.instance()`.
"""
current = getattr(IOLoop._current, "instance", None)
if current is None and instance:
return IOLoop.instance()
return current
def make_current(self):
"""Makes this the `IOLoop` for the current thread.
An `IOLoop` automatically becomes current for its thread
when it is started, but it is sometimes useful to call
`make_current` explicitly before starting the `IOLoop`,
so that code run at startup time can find the right
instance.
.. versionchanged:: 4.1
An `IOLoop` created while there is no current `IOLoop`
will automatically become current.
"""
IOLoop._current.instance = self
@staticmethod
def clear_current():
IOLoop._current.instance = None
@classmethod
def configurable_base(cls):
return IOLoop
@classmethod
def configurable_default(cls):
if hasattr(select, "epoll"):
from tornado.platform.epoll import EPollIOLoop
return EPollIOLoop
if hasattr(select, "kqueue"):
# Python 2.6+ on BSD or Mac
from tornado.platform.kqueue import KQueueIOLoop
return KQueueIOLoop
from tornado.platform.select import SelectIOLoop
return SelectIOLoop
def initialize(self, make_current=None):
if make_current is None:
if IOLoop.current(instance=False) is None:
self.make_current()
elif make_current:
if IOLoop.current(instance=False) is not None:
raise RuntimeError("current IOLoop already exists")
self.make_current()
def close(self, all_fds=False):
"""Closes the `IOLoop`, freeing any resources used.
If ``all_fds`` is true, all file descriptors registered on the
IOLoop will be closed (not just the ones created by the
`IOLoop` itself).
Many applications will only use a single `IOLoop` that runs for the
entire lifetime of the process. In that case closing the `IOLoop`
is not necessary since everything will be cleaned up when the
process exits. `IOLoop.close` is provided mainly for scenarios
such as unit tests, which create and destroy a large number of
``IOLoops``.
An `IOLoop` must be completely stopped before it can be closed. This
means that `IOLoop.stop()` must be called *and* `IOLoop.start()` must
be allowed to return before attempting to call `IOLoop.close()`.
Therefore the call to `close` will usually appear just after
the call to `start` rather than near the call to `stop`.
.. versionchanged:: 3.1
If the `IOLoop` implementation supports non-integer objects
for "file descriptors", those objects will have their
``close`` method when ``all_fds`` is true.
"""
raise NotImplementedError()
def add_handler(self, fd, handler, events):
"""Registers the given handler to receive the given events for ``fd``.
The ``fd`` argument may either be an integer file descriptor or
a file-like object with a ``fileno()`` method (and optionally a
``close()`` method, which may be called when the `IOLoop` is shut
down).
The ``events`` argument is a bitwise or of the constants
``IOLoop.READ``, ``IOLoop.WRITE``, and ``IOLoop.ERROR``.
When an event occurs, ``handler(fd, events)`` will be run.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def update_handler(self, fd, events):
"""Changes the events we listen for ``fd``.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def remove_handler(self, fd):
"""Stop listening for events on ``fd``.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def set_blocking_signal_threshold(self, seconds, action):
"""Sends a signal if the `IOLoop` is blocked for more than
``s`` seconds.
Pass ``seconds=None`` to disable. Requires Python 2.6 on a unixy
platform.
The action parameter is a Python signal handler. Read the
documentation for the `signal` module for more information.
If ``action`` is None, the process will be killed if it is
blocked for too long.
"""
raise NotImplementedError()
def set_blocking_log_threshold(self, seconds):
"""Logs a stack trace if the `IOLoop` is blocked for more than
``s`` seconds.
Equivalent to ``set_blocking_signal_threshold(seconds,
self.log_stack)``
"""
self.set_blocking_signal_threshold(seconds, self.log_stack)
def log_stack(self, signal, frame):
"""Signal handler to log the stack trace of the current thread.
For use with `set_blocking_signal_threshold`.
"""
gen_log.warning('IOLoop blocked for %f seconds in\n%s',
self._blocking_signal_threshold,
''.join(traceback.format_stack(frame)))
def start(self):
"""Starts the I/O loop.
The loop will run until one of the callbacks calls `stop()`, which
will make the loop stop after the current event iteration completes.
"""
raise NotImplementedError()
def _setup_logging(self):
"""The IOLoop catches and logs exceptions, so it's
important that log output be visible. However, python's
default behavior for non-root loggers (prior to python
3.2) is to print an unhelpful "no handlers could be
found" message rather than the actual log entry, so we
must explicitly configure logging if we've made it this
far without anything.
This method should be called from start() in subclasses.
"""
if not any([logging.getLogger().handlers,
logging.getLogger('tornado').handlers,
logging.getLogger('tornado.application').handlers]):
logging.basicConfig()
def stop(self):
"""Stop the I/O loop.
If the event loop is not currently running, the next call to `start()`
will return immediately.
To use asynchronous methods from otherwise-synchronous code (such as
unit tests), you can start and stop the event loop like this::
ioloop = IOLoop()
async_method(ioloop=ioloop, callback=ioloop.stop)
ioloop.start()
``ioloop.start()`` will return after ``async_method`` has run
its callback, whether that callback was invoked before or
after ``ioloop.start``.
Note that even after `stop` has been called, the `IOLoop` is not
completely stopped until `IOLoop.start` has also returned.
Some work that was scheduled before the call to `stop` may still
be run before the `IOLoop` shuts down.
"""
raise NotImplementedError()
def run_sync(self, func, timeout=None):
"""Starts the `IOLoop`, runs the given function, and stops the loop.
The function must return either a yieldable object or
``None``. If the function returns a yieldable object, the
`IOLoop` will run until the yieldable is resolved (and
`run_sync()` will return the yieldable's result). If it raises
an exception, the `IOLoop` will stop and the exception will be
re-raised to the caller.
The keyword-only argument ``timeout`` may be used to set
a maximum duration for the function. If the timeout expires,
a `TimeoutError` is raised.
This method is useful in conjunction with `tornado.gen.coroutine`
to allow asynchronous calls in a ``main()`` function::
@gen.coroutine
def main():
# do stuff...
if __name__ == '__main__':
IOLoop.current().run_sync(main)
.. versionchanged:: 4.3
Returning a non-``None``, non-yieldable value is now an error.
"""
future_cell = [None]
def run():
try:
result = func()
if result is not None:
from tornado.gen import convert_yielded
result = convert_yielded(result)
except Exception:
future_cell[0] = TracebackFuture()
future_cell[0].set_exc_info(sys.exc_info())
else:
if is_future(result):
future_cell[0] = result
else:
future_cell[0] = TracebackFuture()
future_cell[0].set_result(result)
self.add_future(future_cell[0], lambda future: self.stop())
self.add_callback(run)
if timeout is not None:
timeout_handle = self.add_timeout(self.time() + timeout, self.stop)
self.start()
if timeout is not None:
self.remove_timeout(timeout_handle)
if not future_cell[0].done():
raise TimeoutError('Operation timed out after %s seconds' % timeout)
return future_cell[0].result()
def time(self):
"""Returns the current time according to the `IOLoop`'s clock.
The return value is a floating-point number relative to an
unspecified time in the past.
By default, the `IOLoop`'s time function is `time.time`. However,
it may be configured to use e.g. `time.monotonic` instead.
Calls to `add_timeout` that pass a number instead of a
`datetime.timedelta` should use this function to compute the
appropriate time, so they can work no matter what time function
is chosen.
"""
return time.time()
def add_timeout(self, deadline, callback, *args, **kwargs):
"""Runs the ``callback`` at the time ``deadline`` from the I/O loop.
Returns an opaque handle that may be passed to
`remove_timeout` to cancel.
``deadline`` may be a number denoting a time (on the same
scale as `IOLoop.time`, normally `time.time`), or a
`datetime.timedelta` object for a deadline relative to the
current time. Since Tornado 4.0, `call_later` is a more
convenient alternative for the relative case since it does not
require a timedelta object.
Note that it is not safe to call `add_timeout` from other threads.
Instead, you must use `add_callback` to transfer control to the
`IOLoop`'s thread, and then call `add_timeout` from there.
Subclasses of IOLoop must implement either `add_timeout` or
`call_at`; the default implementations of each will call
the other. `call_at` is usually easier to implement, but
subclasses that wish to maintain compatibility with Tornado
versions prior to 4.0 must use `add_timeout` instead.
.. versionchanged:: 4.0
Now passes through ``*args`` and ``**kwargs`` to the callback.
"""
if isinstance(deadline, numbers.Real):
return self.call_at(deadline, callback, *args, **kwargs)
elif isinstance(deadline, datetime.timedelta):
return self.call_at(self.time() + timedelta_to_seconds(deadline),
callback, *args, **kwargs)
else:
raise TypeError("Unsupported deadline %r" % deadline)
def call_later(self, delay, callback, *args, **kwargs):
"""Runs the ``callback`` after ``delay`` seconds have passed.
Returns an opaque handle that may be passed to `remove_timeout`
to cancel. Note that unlike the `asyncio` method of the same
name, the returned object does not have a ``cancel()`` method.
See `add_timeout` for comments on thread-safety and subclassing.
.. versionadded:: 4.0
"""
return self.call_at(self.time() + delay, callback, *args, **kwargs)
def call_at(self, when, callback, *args, **kwargs):
"""Runs the ``callback`` at the absolute time designated by ``when``.
``when`` must be a number using the same reference point as
`IOLoop.time`.
Returns an opaque handle that may be passed to `remove_timeout`
to cancel. Note that unlike the `asyncio` method of the same
name, the returned object does not have a ``cancel()`` method.
See `add_timeout` for comments on thread-safety and subclassing.
.. versionadded:: 4.0
"""
return self.add_timeout(when, callback, *args, **kwargs)
def remove_timeout(self, timeout):
"""Cancels a pending timeout.
The argument is a handle as returned by `add_timeout`. It is
safe to call `remove_timeout` even if the callback has already
been run.
"""
raise NotImplementedError()
def add_callback(self, callback, *args, **kwargs):
"""Calls the given callback on the next I/O loop iteration.
It is safe to call this method from any thread at any time,
except from a signal handler. Note that this is the **only**
method in `IOLoop` that makes this thread-safety guarantee; all
other interaction with the `IOLoop` must be done from that
`IOLoop`'s thread. `add_callback()` may be used to transfer
control from other threads to the `IOLoop`'s thread.
To add a callback from a signal handler, see
`add_callback_from_signal`.
"""
raise NotImplementedError()
def add_callback_from_signal(self, callback, *args, **kwargs):
"""Calls the given callback on the next I/O loop iteration.
Safe for use from a Python signal handler; should not be used
otherwise.
Callbacks added with this method will be run without any
`.stack_context`, to avoid picking up the context of the function
that was interrupted by the signal.
"""
raise NotImplementedError()
def spawn_callback(self, callback, *args, **kwargs):
"""Calls the given callback on the next IOLoop iteration.
Unlike all other callback-related methods on IOLoop,
``spawn_callback`` does not associate the callback with its caller's
``stack_context``, so it is suitable for fire-and-forget callbacks
that should not interfere with the caller.
.. versionadded:: 4.0
"""
with stack_context.NullContext():
self.add_callback(callback, *args, **kwargs)
def add_future(self, future, callback):
"""Schedules a callback on the ``IOLoop`` when the given
`.Future` is finished.
The callback is invoked with one argument, the
`.Future`.
"""
assert is_future(future)
callback = stack_context.wrap(callback)
future.add_done_callback(
lambda future: self.add_callback(callback, future))
def _run_callback(self, callback):
"""Runs a callback with error handling.
For use in subclasses.
"""
try:
ret = callback()
if ret is not None:
from tornado import gen
# Functions that return Futures typically swallow all
# exceptions and store them in the Future. If a Future
# makes it out to the IOLoop, ensure its exception (if any)
# gets logged too.
try:
ret = gen.convert_yielded(ret)
except gen.BadYieldError:
# It's not unusual for add_callback to be used with
# methods returning a non-None and non-yieldable
# result, which should just be ignored.
pass
else:
self.add_future(ret, self._discard_future_result)
except Exception:
self.handle_callback_exception(callback)
def _discard_future_result(self, future):
"""Avoid unhandled-exception warnings from spawned coroutines."""
future.result()
def handle_callback_exception(self, callback):
"""This method is called whenever a callback run by the `IOLoop`
throws an exception.
By default simply logs the exception as an error. Subclasses
may override this method to customize reporting of exceptions.
The exception itself is not passed explicitly, but is available
in `sys.exc_info`.
"""
app_log.error("Exception in callback %r", callback, exc_info=True)
def split_fd(self, fd):
"""Returns an (fd, obj) pair from an ``fd`` parameter.
We accept both raw file descriptors and file-like objects as
input to `add_handler` and related methods. When a file-like
object is passed, we must retain the object itself so we can
close it correctly when the `IOLoop` shuts down, but the
poller interfaces favor file descriptors (they will accept
file-like objects and call ``fileno()`` for you, but they
always return the descriptor itself).
This method is provided for use by `IOLoop` subclasses and should
not generally be used by application code.
.. versionadded:: 4.0
"""
try:
return fd.fileno(), fd
except AttributeError:
return fd, fd
def close_fd(self, fd):
"""Utility method to close an ``fd``.
If ``fd`` is a file-like object, we close it directly; otherwise
we use `os.close`.
This method is provided for use by `IOLoop` subclasses (in
implementations of ``IOLoop.close(all_fds=True)`` and should
not generally be used by application code.
.. versionadded:: 4.0
"""
try:
try:
fd.close()
except AttributeError:
os.close(fd)
except OSError:
pass
class PollIOLoop(IOLoop):
"""Base class for IOLoops built around a select-like function.
For concrete implementations, see `tornado.platform.epoll.EPollIOLoop`
(Linux), `tornado.platform.kqueue.KQueueIOLoop` (BSD and Mac), or
`tornado.platform.select.SelectIOLoop` (all platforms).
"""
def initialize(self, impl, time_func=None, **kwargs):
super(PollIOLoop, self).initialize(**kwargs)
self._impl = impl
if hasattr(self._impl, 'fileno'):
set_close_exec(self._impl.fileno())
self.time_func = time_func or time.time
self._handlers = {}
self._events = {}
self._callbacks = collections.deque()
self._timeouts = []
self._cancellations = 0
self._running = False
self._stopped = False
self._closing = False
self._thread_ident = None
self._blocking_signal_threshold = None
self._timeout_counter = itertools.count()
# Create a pipe that we send bogus data to when we want to wake
# the I/O loop when it is idle
self._waker = Waker()
self.add_handler(self._waker.fileno(),
lambda fd, events: self._waker.consume(),
self.READ)
def close(self, all_fds=False):
self._closing = True
self.remove_handler(self._waker.fileno())
if all_fds:
for fd, handler in list(self._handlers.values()):
self.close_fd(fd)
self._waker.close()
self._impl.close()
self._callbacks = None
self._timeouts = None
def add_handler(self, fd, handler, events):
fd, obj = self.split_fd(fd)
self._handlers[fd] = (obj, stack_context.wrap(handler))
self._impl.register(fd, events | self.ERROR)
def update_handler(self, fd, events):
fd, obj = self.split_fd(fd)
self._impl.modify(fd, events | self.ERROR)
def remove_handler(self, fd):
fd, obj = self.split_fd(fd)
self._handlers.pop(fd, None)
self._events.pop(fd, None)
try:
self._impl.unregister(fd)
except Exception:
gen_log.debug("Error deleting fd from IOLoop", exc_info=True)
def set_blocking_signal_threshold(self, seconds, action):
if not hasattr(signal, "setitimer"):
gen_log.error("set_blocking_signal_threshold requires a signal module "
"with the setitimer method")
return
self._blocking_signal_threshold = seconds
if seconds is not None:
signal.signal(signal.SIGALRM,
action if action is not None else signal.SIG_DFL)
def start(self):
if self._running:
raise RuntimeError("IOLoop is already running")
self._setup_logging()
if self._stopped:
self._stopped = False
return
old_current = getattr(IOLoop._current, "instance", None)
IOLoop._current.instance = self
self._thread_ident = thread.get_ident()
self._running = True
# signal.set_wakeup_fd closes a race condition in event loops:
# a signal may arrive at the beginning of select/poll/etc
# before it goes into its interruptible sleep, so the signal
# will be consumed without waking the select. The solution is
# for the (C, synchronous) signal handler to write to a pipe,
# which will then be seen by select.
#
# In python's signal handling semantics, this only matters on the
# main thread (fortunately, set_wakeup_fd only works on the main
# thread and will raise a ValueError otherwise).
#
# If someone has already set a wakeup fd, we don't want to
# disturb it. This is an issue for twisted, which does its
# SIGCHLD processing in response to its own wakeup fd being
# written to. As long as the wakeup fd is registered on the IOLoop,
# the loop will still wake up and everything should work.
old_wakeup_fd = None
if hasattr(signal, 'set_wakeup_fd') and os.name == 'posix':
# requires python 2.6+, unix. set_wakeup_fd exists but crashes
# the python process on windows.
try:
old_wakeup_fd = signal.set_wakeup_fd(self._waker.write_fileno())
if old_wakeup_fd != -1:
# Already set, restore previous value. This is a little racy,
# but there's no clean get_wakeup_fd and in real use the
# IOLoop is just started once at the beginning.
signal.set_wakeup_fd(old_wakeup_fd)
old_wakeup_fd = None
except ValueError:
# Non-main thread, or the previous value of wakeup_fd
# is no longer valid.
old_wakeup_fd = None
try:
while True:
# Prevent IO event starvation by delaying new callbacks
# to the next iteration of the event loop.
ncallbacks = len(self._callbacks)
# Add any timeouts that have come due to the callback list.
# Do not run anything until we have determined which ones
# are ready, so timeouts that call add_timeout cannot
# schedule anything in this iteration.
due_timeouts = []
if self._timeouts:
now = self.time()
while self._timeouts:
if self._timeouts[0].callback is None:
# The timeout was cancelled. Note that the
# cancellation check is repeated below for timeouts
# that are cancelled by another timeout or callback.
heapq.heappop(self._timeouts)
self._cancellations -= 1
elif self._timeouts[0].deadline <= now:
due_timeouts.append(heapq.heappop(self._timeouts))
else:
break
if (self._cancellations > 512 and
self._cancellations > (len(self._timeouts) >> 1)):
# Clean up the timeout queue when it gets large and it's
# more than half cancellations.
self._cancellations = 0
self._timeouts = [x for x in self._timeouts
if x.callback is not None]
heapq.heapify(self._timeouts)
for i in range(ncallbacks):
self._run_callback(self._callbacks.popleft())
for timeout in due_timeouts:
if timeout.callback is not None:
self._run_callback(timeout.callback)
# Closures may be holding on to a lot of memory, so allow
# them to be freed before we go into our poll wait.
due_timeouts = timeout = None
if self._callbacks:
# If any callbacks or timeouts called add_callback,
# we don't want to wait in poll() before we run them.
poll_timeout = 0.0
elif self._timeouts:
# If there are any timeouts, schedule the first one.
# Use self.time() instead of 'now' to account for time
# spent running callbacks.
poll_timeout = self._timeouts[0].deadline - self.time()
poll_timeout = max(0, min(poll_timeout, _POLL_TIMEOUT))
else:
# No timeouts and no callbacks, so use the default.
poll_timeout = _POLL_TIMEOUT
if not self._running:
break
if self._blocking_signal_threshold is not None:
# clear alarm so it doesn't fire while poll is waiting for
# events.
signal.setitimer(signal.ITIMER_REAL, 0, 0)
try:
event_pairs = self._impl.poll(poll_timeout)
except Exception as e:
# Depending on python version and IOLoop implementation,
# different exception types may be thrown and there are
# two ways EINTR might be signaled:
# * e.errno == errno.EINTR
# * e.args is like (errno.EINTR, 'Interrupted system call')
if errno_from_exception(e) == errno.EINTR:
continue
else:
raise
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL,
self._blocking_signal_threshold, 0)
# Pop one fd at a time from the set of pending fds and run
# its handler. Since that handler may perform actions on
# other file descriptors, there may be reentrant calls to
# this IOLoop that modify self._events
self._events.update(event_pairs)
while self._events:
fd, events = self._events.popitem()
try:
fd_obj, handler_func = self._handlers[fd]
handler_func(fd_obj, events)
except (OSError, IOError) as e:
if errno_from_exception(e) == errno.EPIPE:
# Happens when the client closes the connection
pass
else:
self.handle_callback_exception(self._handlers.get(fd))
except Exception:
self.handle_callback_exception(self._handlers.get(fd))
fd_obj = handler_func = None
finally:
# reset the stopped flag so another start/stop pair can be issued
self._stopped = False
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL, 0, 0)
IOLoop._current.instance = old_current
if old_wakeup_fd is not None:
signal.set_wakeup_fd(old_wakeup_fd)
def stop(self):
self._running = False
self._stopped = True
self._waker.wake()
def time(self):
return self.time_func()
def call_at(self, deadline, callback, *args, **kwargs):
timeout = _Timeout(
deadline,
functools.partial(stack_context.wrap(callback), *args, **kwargs),
self)
heapq.heappush(self._timeouts, timeout)
return timeout
def remove_timeout(self, timeout):
# Removing from a heap is complicated, so just leave the defunct
# timeout object in the queue (see discussion in
# http://docs.python.org/library/heapq.html).
# If this turns out to be a problem, we could add a garbage
# collection pass whenever there are too many dead timeouts.
timeout.callback = None
self._cancellations += 1
def add_callback(self, callback, *args, **kwargs):
if self._closing:
return
# Blindly insert into self._callbacks. This is safe even
# from signal handlers because deque.append is atomic.
self._callbacks.append(functools.partial(
stack_context.wrap(callback), *args, **kwargs))
if thread.get_ident() != self._thread_ident:
# This will write one byte but Waker.consume() reads many
# at once, so it's ok to write even when not strictly
# necessary.
self._waker.wake()
else:
# If we're on the IOLoop's thread, we don't need to wake anyone.
pass
def add_callback_from_signal(self, callback, *args, **kwargs):
with stack_context.NullContext():
self.add_callback(callback, *args, **kwargs)
class _Timeout(object):
"""An IOLoop timeout, a UNIX timestamp and a callback"""
# Reduce memory overhead when there are lots of pending callbacks
__slots__ = ['deadline', 'callback', 'tdeadline']
def __init__(self, deadline, callback, io_loop):
if not isinstance(deadline, numbers.Real):
raise TypeError("Unsupported deadline %r" % deadline)
self.deadline = deadline
self.callback = callback
self.tdeadline = (deadline, next(io_loop._timeout_counter))
# Comparison methods to sort by deadline, with object id as a tiebreaker
# to guarantee a consistent ordering. The heapq module uses __le__
# in python2.5, and __lt__ in 2.6+ (sort() and most other comparisons
# use __lt__).
def __lt__(self, other):
return self.tdeadline < other.tdeadline
def __le__(self, other):
return self.tdeadline <= other.tdeadline
class PeriodicCallback(object):
"""Schedules the given callback to be called periodically.
The callback is called every ``callback_time`` milliseconds.
Note that the timeout is given in milliseconds, while most other
time-related functions in Tornado use seconds.
If the callback runs for longer than ``callback_time`` milliseconds,
subsequent invocations will be skipped to get back on schedule.
`start` must be called after the `PeriodicCallback` is created.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
def __init__(self, callback, callback_time, io_loop=None):
self.callback = callback
if callback_time <= 0:
raise ValueError("Periodic callback must have a positive callback_time")
self.callback_time = callback_time
self.io_loop = io_loop or IOLoop.current()
self._running = False
self._timeout = None
def start(self):
"""Starts the timer."""
self._running = True
self._next_timeout = self.io_loop.time()
self._schedule_next()
def stop(self):
"""Stops the timer."""
self._running = False
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
def is_running(self):
"""Return True if this `.PeriodicCallback` has been started.
.. versionadded:: 4.1
"""
return self._running
def _run(self):
if not self._running:
return
try:
return self.callback()
except Exception:
self.io_loop.handle_callback_exception(self.callback)
finally:
self._schedule_next()
def _schedule_next(self):
if self._running:
current_time = self.io_loop.time()
if self._next_timeout <= current_time:
callback_time_sec = self.callback_time / 1000.0
self._next_timeout += (math.floor((current_time - self._next_timeout) /
callback_time_sec) + 1) * callback_time_sec
self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@tornado@tornado-4@tornado@ioloop.py@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.