metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
|---|---|---|
{
"filename": "equations.py",
"repo_name": "zachetienne/nrpytutorial",
"repo_path": "nrpytutorial_extracted/nrpytutorial-master/GRHD/equations.py",
"type": "Python"
}
|
# As documented in the NRPy+ tutorial module
# Tutorial-GRHD_Equations-Cartesian.ipynb
# this module will construct useful quantities
# for the IllinoisGRMHD implementation of
# general relativistic hydrodynamics (GRHD)
#
# Authors: Zachariah B. Etienne
# zachetie **at** gmail **dot* com
# Patrick Nelson
# Step 1: import all needed modules from NRPy+/Python:
from outputC import nrpyAbs # NRPy+: Core C code output module
import NRPy_param_funcs as par # NRPy+: Parameter interface
import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends
import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support
# Step 2: Define the stress-energy tensor
# Step 2.a: First define h, the enthalpy:
def compute_enthalpy(rho_b,P,epsilon):
global h
h = 1 + epsilon + P/rho_b
# Step 2.b: Define T^{mu nu} (a 4-dimensional tensor)
def compute_T4UU(gammaDD,betaU,alpha, rho_b,P,epsilon,u4U):
global T4UU
compute_enthalpy(rho_b,P,epsilon)
# Then define g^{mu nu} in terms of the ADM quantities:
import BSSN.ADMBSSN_tofrom_4metric as AB4m
AB4m.g4UU_ito_BSSN_or_ADM("ADM",gammaDD,betaU,alpha)
# Finally compute T^{mu nu}
T4UU = ixp.zerorank2(DIM=4)
for mu in range(4):
for nu in range(4):
T4UU[mu][nu] = rho_b * h * u4U[mu]*u4U[nu] + P*AB4m.g4UU[mu][nu]
# Step 2.c: Define T^{mu}_{nu} (a 4-dimensional tensor)
def compute_T4UD(gammaDD,betaU,alpha, T4UU):
global T4UD
# Next compute T^mu_nu = T^{mu delta} g_{delta nu}, needed for S_tilde flux.
# First we'll need g_{alpha nu} in terms of ADM quantities:
import BSSN.ADMBSSN_tofrom_4metric as AB4m
AB4m.g4DD_ito_BSSN_or_ADM("ADM",gammaDD,betaU,alpha)
T4UD = ixp.zerorank2(DIM=4)
for mu in range(4):
for nu in range(4):
for delta in range(4):
T4UD[mu][nu] += T4UU[mu][delta]*AB4m.g4DD[delta][nu]
# Step 3: Writing the conservative variables in terms of the primitive variables
def compute_sqrtgammaDET(gammaDD):
global sqrtgammaDET
_gammaUU, gammaDET = ixp.symm_matrix_inverter3x3(gammaDD) # _gammaUU unused.
sqrtgammaDET = sp.sqrt(gammaDET)
def compute_rho_star(alpha, sqrtgammaDET, rho_b,u4U):
global rho_star
# Compute rho_star:
rho_star = alpha*sqrtgammaDET*rho_b*u4U[0]
def compute_tau_tilde(alpha, sqrtgammaDET, T4UU,rho_star):
global tau_tilde
tau_tilde = alpha**2*sqrtgammaDET*T4UU[0][0] - rho_star
def compute_S_tildeD(alpha, sqrtgammaDET, T4UD):
global S_tildeD
S_tildeD = ixp.zerorank1(DIM=3)
for i in range(3):
S_tildeD[i] = alpha*sqrtgammaDET*T4UD[0][i+1]
# Step 4: Define the fluxes for the GRHD equations
# Step 4.a: vU from u4U may be needed for computing rho_star_flux from u4U
def compute_vU_from_u4U__no_speed_limit(u4U):
global vU
# Now compute v^i = u^i/u^0:
vU = ixp.zerorank1(DIM=3)
for j in range(3):
vU[j] = u4U[j+1]/u4U[0]
# Step 4.b: rho_star flux
def compute_rho_star_fluxU(vU, rho_star):
global rho_star_fluxU
rho_star_fluxU = ixp.zerorank1(DIM=3)
for j in range(3):
rho_star_fluxU[j] = rho_star*vU[j]
# Step 4.c: tau_tilde flux
def compute_tau_tilde_fluxU(alpha, sqrtgammaDET, vU,T4UU,rho_star):
global tau_tilde_fluxU
tau_tilde_fluxU = ixp.zerorank1(DIM=3)
for j in range(3):
tau_tilde_fluxU[j] = alpha**2*sqrtgammaDET*T4UU[0][j+1] - rho_star*vU[j]
# Step 4.d: S_tilde flux
def compute_S_tilde_fluxUD(alpha, sqrtgammaDET, T4UD):
global S_tilde_fluxUD
S_tilde_fluxUD = ixp.zerorank2(DIM=3)
for j in range(3):
for i in range(3):
S_tilde_fluxUD[j][i] = alpha*sqrtgammaDET*T4UD[j+1][i+1]
# Step 5: Define source terms on RHSs of GRHD equations
# Step 5.a: tau_tilde RHS source term s
def compute_s_source_term(KDD,betaU,alpha, sqrtgammaDET,alpha_dD, T4UU):
global s_source_term
s_source_term = sp.sympify(0)
for i in range(3):
for j in range(3):
s_source_term += (T4UU[0][0]*betaU[i]*betaU[j] + 2*T4UU[0][i+1]*betaU[j] + T4UU[i+1][j+1])*KDD[i][j]
for i in range(3):
s_source_term += -(T4UU[0][0]*betaU[i] + T4UU[0][i+1])*alpha_dD[i]
s_source_term *= alpha*sqrtgammaDET
# Step 5.b: Define source term on RHS of $\tilde{S}_i$ equation
# Step 5.b.i: Compute g_{mu nu, i}, needed for the S tilde source term
def compute_g4DD_zerotimederiv_dD(gammaDD,betaU,alpha, gammaDD_dD,betaU_dD,alpha_dD):
global g4DD_zerotimederiv_dD
# Eq. 2.121 in B&S
betaD = ixp.zerorank1()
for i in range(3):
for j in range(3):
betaD[i] += gammaDD[i][j]*betaU[j]
# gammaDD_dD = ixp.declarerank3("gammaDD_dDD","sym12",DIM=3)
# betaU_dD = ixp.declarerank2("betaU_dD" ,"nosym",DIM=3)
betaDdD = ixp.zerorank2()
for i in range(3):
for j in range(3):
for k in range(3):
# Recall that betaD[i] = gammaDD[i][j]*betaU[j] (Eq. 2.121 in B&S)
betaDdD[i][k] += gammaDD_dD[i][j][k]*betaU[j] + gammaDD[i][j]*betaU_dD[j][k]
# Eq. 2.122 in B&S
g4DD_zerotimederiv_dD = ixp.zerorank3(DIM=4)
for k in range(3):
# Recall that g4DD[0][0] = -alpha^2 + betaU[j]*betaD[j]
g4DD_zerotimederiv_dD[0][0][k+1] += -2*alpha*alpha_dD[k]
for j in range(3):
g4DD_zerotimederiv_dD[0][0][k+1] += betaU_dD[j][k]*betaD[j] + betaU[j]*betaDdD[j][k]
for i in range(3):
for k in range(3):
# Recall that g4DD[i][0] = g4DD[0][i] = betaD[i]
g4DD_zerotimederiv_dD[i+1][0][k+1] = g4DD_zerotimederiv_dD[0][i+1][k+1] = betaDdD[i][k]
for i in range(3):
for j in range(3):
for k in range(3):
# Recall that g4DD[i][j] = gammaDD[i][j]
g4DD_zerotimederiv_dD[i+1][j+1][k+1] = gammaDD_dD[i][j][k]
# Step 5.b.ii: S_tilde source terms
def compute_S_tilde_source_termD(alpha, sqrtgammaDET,g4DD_zerotimederiv_dD, T4UU):
global S_tilde_source_termD
S_tilde_source_termD = ixp.zerorank1(DIM=3)
for i in range(3):
for mu in range(4):
for nu in range(4):
S_tilde_source_termD[i] += sp.Rational(1,2)*alpha*sqrtgammaDET*T4UU[mu][nu]*g4DD_zerotimederiv_dD[mu][nu][i+1]
# Step 6.a: Convert Valencia 3-velocity v_{(n)}^i into u^\mu, and apply a speed limiter
# Speed-limited ValenciavU is output to rescaledValenciavU global.
def u4U_in_terms_of_ValenciavU__rescale_ValenciavU_by_applying_speed_limit(alpha, betaU, gammaDD, ValenciavU):
# Inputs: Metric lapse alpha, shift betaU, 3-metric gammaDD, Valencia 3-velocity ValenciavU
# Outputs (as globals): u4U_ito_ValenciavU, rescaledValenciavU
# R = gamma_{ij} v^i v^j
R = sp.sympify(0)
for i in range(3):
for j in range(3):
R += gammaDD[i][j] * ValenciavU[i] * ValenciavU[j]
thismodule = __name__
# The default value isn't terribly important here, since we can overwrite in the main C code
GAMMA_SPEED_LIMIT = par.Cparameters("REAL", thismodule, "GAMMA_SPEED_LIMIT", 10.0) # Default value based on
# IllinoisGRMHD.
# GiRaFFE default = 2000.0
Rmax = 1 - 1 / (GAMMA_SPEED_LIMIT * GAMMA_SPEED_LIMIT)
# Now, we set Rstar = min(Rmax,R):
# If R < Rmax, then Rstar = 0.5*(Rmax+R-Rmax+R) = R
# If R >= Rmax, then Rstar = 0.5*(Rmax+R+Rmax-R) = Rmax
Rstar = sp.Rational(1, 2) * (Rmax + R - nrpyAbs(Rmax - R))
# We add TINYDOUBLE to R below to avoid a 0/0, which occurs when
# ValenciavU == 0 for all Valencia 3-velocity components.
# "Those tiny *doubles* make me warm all over
# with a feeling that I'm gonna love you till the end of time."
# - Adapted from Connie Francis' "Tiny Bubbles"
TINYDOUBLE = par.Cparameters("#define", thismodule, "TINYDOUBLE", 1e-100)
# The rescaled (speed-limited) Valencia 3-velocity
# is given by, v_{(n)}^i = sqrt{Rstar/R} v^i
global rescaledValenciavU
rescaledValenciavU = ixp.zerorank1()
for i in range(3):
# If R == 0, then Rstar == 0, so sqrt( Rstar/(R+TINYDOUBLE) )=sqrt(0/1e-100) = 0
# If your velocities are of order 1e-100 and this is physically
# meaningful, there must be something wrong with your unit conversion.
rescaledValenciavU[i] = ValenciavU[i] * sp.sqrt(Rstar / (R + TINYDOUBLE))
# Finally compute u^mu in terms of Valenciav^i
# u^0 = 1/(alpha-sqrt(1-R^*))
global u4U_ito_ValenciavU
u4U_ito_ValenciavU = ixp.zerorank1(DIM=4)
u4U_ito_ValenciavU[0] = 1 / (alpha * sp.sqrt(1 - Rstar))
# u^i = u^0 ( alpha v^i_{(n)} - beta^i ), where v^i_{(n)} is the Valencia 3-velocity
for i in range(3):
u4U_ito_ValenciavU[i + 1] = u4U_ito_ValenciavU[0] * (alpha * rescaledValenciavU[i] - betaU[i])
# Step 6.b: Convert v^i into u^\mu, and apply a speed limiter.
# Speed-limited vU is output to rescaledvU global.
def u4U_in_terms_of_vU__rescale_vU_by_applying_speed_limit(alpha, betaU, gammaDD, vU):
ValenciavU = ixp.zerorank1()
for i in range(3):
ValenciavU[i] = (vU[i] + betaU[i]) / alpha
u4U_in_terms_of_ValenciavU__rescale_ValenciavU_by_applying_speed_limit(alpha, betaU, gammaDD, ValenciavU)
# Since ValenciavU is written in terms of vU,
# u4U_ito_ValenciavU is actually u4U_ito_vU
global u4U_ito_vU
u4U_ito_vU = ixp.zerorank1(DIM=4)
for mu in range(4):
u4U_ito_vU[mu] = u4U_ito_ValenciavU[mu]
# Finally compute the rescaled (speed-limited) vU
global rescaledvU
rescaledvU = ixp.zerorank1(DIM=3)
for i in range(3):
rescaledvU[i] = alpha * rescaledValenciavU[i] - betaU[i]
def generate_everything_for_UnitTesting():
# First define hydrodynamical quantities
u4U = ixp.declarerank1("u4U", DIM=4)
rho_b, P, epsilon = sp.symbols('rho_b P epsilon', real=True)
# Then ADM quantities
gammaDD = ixp.declarerank2("gammaDD", "sym01", DIM=3)
KDD = ixp.declarerank2("KDD", "sym01", DIM=3)
betaU = ixp.declarerank1("betaU", DIM=3)
alpha = sp.symbols('alpha', real=True)
# First compute stress-energy tensor T4UU and T4UD:
compute_T4UU(gammaDD, betaU, alpha, rho_b, P, epsilon, u4U)
compute_T4UD(gammaDD, betaU, alpha, T4UU)
# Next sqrt(gamma)
compute_sqrtgammaDET(gammaDD)
# Compute conservative variables in terms of primitive variables
compute_rho_star(alpha, sqrtgammaDET, rho_b, u4U)
compute_tau_tilde(alpha, sqrtgammaDET, T4UU, rho_star)
compute_S_tildeD(alpha, sqrtgammaDET, T4UD)
# Then compute v^i from u^mu
compute_vU_from_u4U__no_speed_limit(u4U)
# Next compute fluxes of conservative variables
compute_rho_star_fluxU( vU, rho_star)
compute_tau_tilde_fluxU(alpha, sqrtgammaDET, vU, T4UU, rho_star)
compute_S_tilde_fluxUD( alpha, sqrtgammaDET, T4UD)
# Then declare derivatives & compute g4DD_zerotimederiv_dD
gammaDD_dD = ixp.declarerank3("gammaDD_dD", "sym01", DIM=3)
betaU_dD = ixp.declarerank2("betaU_dD", "nosym", DIM=3)
alpha_dD = ixp.declarerank1("alpha_dD", DIM=3)
compute_g4DD_zerotimederiv_dD(gammaDD, betaU, alpha, gammaDD_dD, betaU_dD, alpha_dD)
# Then compute source terms on tau_tilde and S_tilde equations
compute_s_source_term(KDD, betaU, alpha, sqrtgammaDET, alpha_dD, T4UU)
compute_S_tilde_source_termD(alpha, sqrtgammaDET, g4DD_zerotimederiv_dD, T4UU)
# Then compute the 4-velocities in terms of an input Valencia 3-velocity testValenciavU[i]
testValenciavU = ixp.declarerank1("testValenciavU", DIM=3)
u4U_in_terms_of_ValenciavU__rescale_ValenciavU_by_applying_speed_limit(alpha, betaU, gammaDD, testValenciavU)
# Finally compute the 4-velocities in terms of an input 3-velocity testvU[i] = u^i/u^0
testvU = ixp.declarerank1("testvU", DIM=3)
u4U_in_terms_of_vU__rescale_vU_by_applying_speed_limit(alpha, betaU, gammaDD, testvU)
|
zachetienneREPO_NAMEnrpytutorialPATH_START.@nrpytutorial_extracted@nrpytutorial-master@GRHD@equations.py@.PATH_END.py
|
{
"filename": "thin_slice_projection.py",
"repo_name": "rennehan/yt-swift",
"repo_path": "yt-swift_extracted/yt-swift-main/doc/source/cookbook/thin_slice_projection.py",
"type": "Python"
}
|
import yt
# Load the dataset.
ds = yt.load("Enzo_64/DD0030/data0030")
# Make a projection that is the full width of the domain,
# but only 5 Mpc in depth. This is done by creating a
# region object with this exact geometry and providing it
# as a data_source for the projection.
# Center on the domain center
center = ds.domain_center.copy()
# First make the left and right corner of the region based
# on the full domain.
left_corner = ds.domain_left_edge.copy()
right_corner = ds.domain_right_edge.copy()
# Now adjust the size of the region along the line of sight (x axis).
depth = ds.quan(5.0, "Mpc")
left_corner[0] = center[0] - 0.5 * depth
right_corner[0] = center[0] + 0.5 * depth
# Create the region
region = ds.box(left_corner, right_corner)
# Create a density projection and supply the region we have just created.
# Only cells within the region will be included in the projection.
# Try with another data container, like a sphere or disk.
plot = yt.ProjectionPlot(
ds, "x", ("gas", "density"), weight_field=("gas", "density"), data_source=region
)
# Save the image with the keyword.
plot.save("Thin_Slice")
|
rennehanREPO_NAMEyt-swiftPATH_START.@yt-swift_extracted@yt-swift-main@doc@source@cookbook@thin_slice_projection.py@.PATH_END.py
|
{
"filename": "lcmodel.py",
"repo_name": "nye17/javelin",
"repo_path": "javelin_extracted/javelin-master/javelin/lcmodel.py",
"type": "Python"
}
|
# Last-modified: 28 Apr 2014 05:06:35
# generic packages
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
# np.seterr(all='raise')
from scipy.optimize import fmin
import matplotlib.pyplot as plt
# internal packages
from .cholesky_utils import cholesky, chosolve_from_tri, chodet_from_tri
from .zylc import LightCurve
from .cov import get_covfunc_dict
from .spear import spear, spear_threading
from .predict import (PredictSignal, PredictRmap, PredictPmap, PredictSPmap,
PredictSCmap, PredictDPmap)
from .gp import FullRankCovariance, NearlyFullRankCovariance
from .err import InputError, UsageError
from six.moves import range
# try:
# try to use locally-installed emcee
# from emcee import EnsembleSampler
# except ImportError:
# try to use the internal emcee
# from .emcee_internal import EnsembleSampler
# use the internal by default because the latest emcee is not downstream compatible with the older versions anymore.
from .emcee_internal import EnsembleSampler
from .graphic import figure_handler
from copy import copy
my_neg_inf = float(-1.0e+300)
my_pos_inf = float(+1.0e+300)
tau_floor = 1.e-6
tau_ceiling = 1.e+5
sigma_floor = 1.e-6
sigma_ceiling = 1.e+2
logtau_floor = np.log(tau_floor)
logtau_ceiling = np.log(tau_ceiling)
logsigma_floor = np.log(sigma_floor)
logsigma_ceiling = np.log(sigma_ceiling)
nu_floor = 1.e-6
lognu_floor = np.log(nu_floor)
nu_ceiling = 1.e+3
lognu_ceiling = np.log(nu_ceiling)
__all__ = ['Cont_Model', 'Rmap_Model', 'Pmap_Model', 'SPmap_Model',
'SCmap_Model', 'Disk_Model', 'DPmap_Model']
def _lnlike_from_U(U, zydata, set_retq=False, set_verbose=False):
""" Calculate the log-likelihoods from the upper triangle of cholesky
decomposition.
"""
# log determinant of C^-1
detC_log = chodet_from_tri(U, retlog=True)
# solve for C a = y so that a = C^-1 y
a = chosolve_from_tri(U, zydata.marr)
# solve for C b = L so that b = C^-1 L
b = chosolve_from_tri(U, zydata.larr)
# multiply L^T and b so that C_p = L^T C^-1 L = C_q^-1
C_p = np.dot(zydata.larrTr, b)
# for 'issingle is True' case, C_p is a scalar.
# C_p is a nested list for some reason [[C_p]], so isscalar is bad
# if np.isscalar(C_p):
if zydata.issingle:
# for single-mode, cholesky of C_p is simply squre-root of C_p
W = np.sqrt(C_p)
detCp_log = np.log(C_p.squeeze())
# for single-mode, simply devide L^T by C_p
d = zydata.larrTr/C_p
else:
# cholesky decompose C_p so that W^T W = C_p
W, info = cholesky(C_p, raiseinfo=False)
if info > 0:
return(_exit_with_retval(
zydata.nlc, set_retq,
errmsg="Warning: non positive-definite covariance W",
set_verbose=set_verbose))
detCp_log = chodet_from_tri(W, retlog=True)
# solve for C_p d = L^T so that d = C_p^-1 L^T = C_q L^T
d = chosolve_from_tri(W, zydata.larrTr)
# print d[0][0]
# print d[4]
# multiply b d and a so that e = C^-1 L C_p^-1 L^T C^-1 y
e = np.dot(b, np.dot(d, a))
# a minus e so that f = a - e = C^-1 y - C^-1 L C_p^-1 L^T C^-1 y
# thus f = C_v^-1 y
f = a - e
# multiply y^T and f so that h = y^T C_v^-1 y
h = np.dot(zydata.marr, f)
# chi2_PRH = -0.5*h
_chi2 = -0.5*h
# following Carl Rasmussen's term, a penalty on the complexity of
# the model
_compl_pen = -0.5*detC_log
# penalty on blatant linear drift
_wmean_pen = -0.5*detCp_log
# final log_likelhood
_log_like = _chi2 + _compl_pen + _wmean_pen
# XXX guarding against nan
if np.isnan(_log_like):
print("warning: NaN in loglike, convert to my_neg_inf")
_log_like = my_neg_inf
if set_retq:
q = np.dot(d, a)
return(_log_like, _chi2, _compl_pen, _wmean_pen, q)
else:
return(_log_like)
def _exit_with_retval(nlc, set_retq, errmsg=None, set_verbose=False):
""" Return failure elegantly.
When you are desperate and just want to leave the calculation with
appropriate return values that quietly speak out your angst.
"""
if errmsg is not None:
if set_verbose:
print(("Exit: %s" % errmsg))
if set_retq:
return(my_neg_inf, my_neg_inf, my_neg_inf, my_neg_inf,
[my_neg_inf]*nlc)
else:
return(my_neg_inf)
def _get_hpd(ndim, flatchain):
""" Get the 68% percentile range of each parameter.
"""
hpd = np.zeros((3, ndim))
chain_len = flatchain.shape[0]
pct1sig = chain_len*np.array([0.16, 0.50, 0.84])
medlowhig = pct1sig.astype(np.int32)
for i in range(ndim):
vsort = np.sort(flatchain[:,i])
hpd[:,i] = vsort[medlowhig]
return(hpd)
def _get_bfp(flatchain, logp):
j = np.argmax(logp)
bfp = flatchain[j, :]
return(bfp)
# ---------------------------------
# Cont_Model: Continuum Variability
def unpacksinglepar(p, covfunc="drw", uselognu=False):
""" Internal Function: Unpack the physical parameters from input 1-d
array for single mode.
"""
if p[0] > logsigma_ceiling:
sigma = sigma_ceiling
elif p[0] < logsigma_floor:
sigma = sigma_floor
else:
sigma = np.exp(p[0])
if p[1] > logtau_ceiling:
tau = tau_ceiling
elif p[1] < logtau_floor:
tau = tau_floor
else:
tau = np.exp(p[1])
if covfunc == "drw":
nu = None
elif uselognu:
if p[2] < lognu_floor:
nu = nu_floor
elif p[2] > lognu_ceiling:
nu = nu_ceiling
else:
nu = np.exp(p[2])
else:
nu = p[2]
return(sigma, tau, nu)
def lnpostfn_single_p(p, zydata, covfunc, taulimit=None, set_prior=True,
conthpd=None, uselognu=False, rank="Full",
set_retq=False, set_verbose=False,
fixed=None, p_fix=None):
""" Calculate the log posterior for parameter set `p`.
Parameters
----------
p: list
Parameter list.
zydata: LightCurve
Input LightCurve data.
covfunc: str
name of the covariance function.
taulimit: tuple
lower and upper boundary for tau.
set_prior: bool, optional
Turn on/off priors that are pre-defined in `lnpostfn_single_p`
(default: True).
conthpd: ndarray, optional
Priors on sigma and tau as an ndarray with shape (3, 2),
np.array([[log(sigma_low), log(tau_low)],
[log(sigma_med), log(tau_med)],
[log(sigma_hig), log(tau_hig)]])
where 'low', 'med', and 'hig' are defined as the 68% confidence
limits around the median. Here it is only used if the `covfunc` is
'(w)kepler2_exp'.
uselognu: bool, optional
Whether to use lognu instead of nu (default: False).
rank: str, optional
Type of covariance matrix rank, "Full" or "NearlyFull" (
default: "Full").
set_retq: bool, optional
Whether to return all the components of the posterior (default: False).
set_verbose: bool, optional
Turn on/off verbose mode (default: True).
fixed: list
Bit list indicating which parameters are to be fixed during
minimization, `1` means varying, while `0` means fixed,
so [1, 1, 0] means fixing only the third parameter, and `len(fixed)`
equals the number of parameters (default: None, i.e., varying all
the parameters simultaneously).
p_fix: list
parameter list, with p_fix[fixed==0] being fixed.
Returns
-------
retval: float (set_retq is False) or list (set_retq is True)
if `retval` returns a list, then it contains the full posterior info
as a list of [log_posterior, chi2_component, det_component, DC_penalty,
correction_to_the_mean].
"""
if fixed is not None:
# fix parameters during inference.
fixed = np.asarray(fixed)
p_fix = np.asarray(p_fix)
p = np.asarray(p)
p = p * fixed + p_fix * (1. - fixed)
sigma, tau, nu = unpacksinglepar(p, covfunc, uselognu=uselognu)
# log-likelihood
if set_retq:
vals = list(lnlikefn_single(zydata, covfunc=covfunc, rank=rank,
sigma=sigma, tau=tau, nu=nu, set_retq=True,
set_verbose=set_verbose))
else:
logl = lnlikefn_single(zydata, covfunc=covfunc, rank=rank, sigma=sigma,
tau=tau, nu=nu, set_retq=False,
set_verbose=set_verbose)
# prior
prior = 0.0
if set_prior:
if covfunc == "kepler2_exp" or covfunc == "wkepler2_exp":
if conthpd is None:
# raise RuntimeError("kepler2_exp prior requires conthpd")
print("Warning: (w)kepler2_exp prior requires conthpd")
else:
# for sigma
if p[0] < conthpd[1,0]:
prior0 = (p[0] - conthpd[1,0])/(conthpd[1,0]-conthpd[0,0])
else:
prior0 = (p[0] - conthpd[1,0])/(conthpd[2,0]-conthpd[1,0])
# for tau
if p[1] < conthpd[1,1]:
prior1 = (p[1] - conthpd[1,1])/(conthpd[1,1]-conthpd[0,1])
else:
prior1 = (p[1] - conthpd[1,1])/(conthpd[2,1]-conthpd[1,1])
# final
prior += -0.5*(prior0*prior0+prior1*prior1)
else:
prior += - np.log(sigma)
if tau > zydata.cont_cad:
prior += - np.log(tau/zydata.cont_cad)
elif tau < 0.001:
# 86.4 seconds if input is in days
prior = my_neg_inf
else:
prior += - np.log(zydata.cont_cad/tau)
if taulimit is not None:
if tau < taulimit[0] or tau > taulimit[1]:
prior = my_neg_inf
# combine prior and log-likelihood
if set_retq:
vals[0] = vals[0] + prior
vals.append(prior)
return(vals)
else:
logp = logl + prior
return(logp)
def lnlikefn_single(zydata, covfunc="drw", rank="Full", set_retq=False,
set_verbose=False, **covparams):
""" internal function to calculate the log likelihood,
see `lnpostfn_single_p` for doc. """
covfunc_dict = get_covfunc_dict(covfunc, **covparams)
# print('covfunc_dict')
# print(covfunc_dict)
# print('eval_fun')
# print('eval_fun in lcmodel')
# print(covfunc_dict['eval_fun'])
# print(covfunc_dict['eval_fun'](np.array([[0,1],[1,0]]), np.array([[0,1],[1,0]]), symm=True, amp=3, scale=400,pow=1.0))
###
sigma = covparams.pop("sigma")
tau = covparams.pop("tau")
nu = covparams.pop("nu", None)
# set up covariance function
if (sigma <= 0.0 or tau <= 0.0):
return(_exit_with_retval(
zydata.nlc, set_retq,
errmsg="Warning: illegal input of parameters",
set_verbose=set_verbose))
if covfunc == "pow_exp":
if nu <= 0.0 or nu >= 2.0:
return(_exit_with_retval(zydata.nlc, set_retq,
errmsg="Warning: illegal input of parameters in nu",
set_verbose=set_verbose))
elif covfunc == "matern":
if nu <= 0.0:
return(_exit_with_retval(zydata.nlc, set_retq,
errmsg="Warning: illegal input of parameters in nu",
set_verbose=set_verbose))
if nu < 0.0 or nu >= 1.0:
return(_exit_with_retval(zydata.nlc, set_retq,
errmsg="Warning: illegal input of parameters in nu",
set_verbose=set_verbose))
elif covfunc == "kepler2_exp" or covfunc == "wkepler2_exp":
# here nu is the cutoff time scale
if nu < 0.0 or nu >= tau:
return(_exit_with_retval(zydata.nlc, set_retq,
errmsg="Warning: illegal input of parameters in nu",
set_verbose=set_verbose))
# test sigma
# choice of ranks
if rank == "Full":
# using full-rank
C = FullRankCovariance(**covfunc_dict)
elif rank == "NearlyFull":
# using nearly full-rank
C = NearlyFullRankCovariance(**covfunc_dict)
else:
raise InputError("No such option for rank "+rank)
# cholesky decompose S+N so that U^T U = S+N = C
# using intrinsic method of C without explicitly writing out cmatrix
try:
U = C.cholesky(zydata.jarr, observed=False, nugget=zydata.varr)
# print('invmat', U)
except:
print('cholesky failed')
return(_exit_with_retval(zydata.nlc, set_retq,
errmsg="Warning: non positive-definite covariance C #5",
set_verbose=set_verbose))
# print "test U"
# print U[:6, :6]
# print U[5, 5]
# calculate RPH likelihood
retval = _lnlike_from_U(U, zydata, set_retq=set_retq,
set_verbose=set_verbose)
return(retval)
class Cont_Model(object):
def __init__(self, zydata=None, covfunc="drw"):
""" Cont Model object.
Parameters
----------
zydata: LightCurve object, optional
Input LightCurve data, a null input means that `Cont_Model` will
be loading existing chains (default: None).
covfunc: str, optional
Name of the covariance function for the continuum (default: drw)
"""
self.zydata = zydata
self.covfunc = covfunc
if zydata is None:
pass
else:
self.nlc = zydata.nlc
self.npt = zydata.npt
self.cont_npt = zydata.nptlist[0]
self.cont_cad = zydata.cont_cad
self.cont_cad_min = zydata.cont_cad_min
self.cont_cad_max = zydata.cont_cad_max
self.cont_std = zydata.cont_std
self.rj = zydata.rj
self.jstart = zydata.jstart
self.jend = zydata.jend
self.names = zydata.names
self.vars = ["sigma", "tau"]
self.texs = [r"$\log\,\sigma$", r"$\log\,\tau$"]
if covfunc == "drw":
self.uselognu = False
self.ndim = 2
elif covfunc == "matern" or covfunc == "kepler2_exp" or covfunc == "wkepler2_exp":
self.uselognu = True
self.ndim = 3
self.vars.append("nu")
self.texs.append(r"$\log\,\nu$")
else:
self.uselognu = False
self.ndim = 3
self.vars.append("nu")
self.texs.append(r"$\nu$")
def __call__(self, p, **lnpostparams):
""" Calculate the posterior value given one parameter set `p`.
See `lnpostfn_single_p` for doc.
"""
return(lnpostfn_single_p(p, self.zydata, covfunc=self.covfunc,
uselognu=self.uselognu, **lnpostparams))
def do_map(self, p_ini, fixed=None, **lnpostparams):
"""
Maximum A Posterior minimization. See `lnpostfn_single_p` for doc.
Parameters
----------
p_ini: list
Initial guess for the parameters.
fixed: list
Bit list indicating which parameters are to be fixed during
minimization, `1` means varying, while `0` means fixed,
so [1, 1, 0] means fixing only the third parameter, and `len(fixed)`
equals the number of parameters (default: None, i.e., varying all
the parameters simultaneously).
lnpostparams: kwargs
kwargs for `lnpostfn_single_p`.
"""
set_verbose = lnpostparams.pop("set_verbose", True)
set_retq = lnpostparams.pop("set_retq", False)
taulimit = lnpostparams.pop("taulimit", None)
set_prior = lnpostparams.pop("set_prior", True)
rank = lnpostparams.pop("rank", "Full")
conthpd = lnpostparams.pop("conthpd", None)
if set_retq is True:
raise InputError("set_retq has to be False")
p_ini = np.asarray(p_ini)
# FIXME the new lnpostfn_single_p could take fixed and p_ini directly as input...
if fixed is not None:
fixed = np.asarray(fixed)
func = lambda _p: -lnpostfn_single_p(
_p*fixed+p_ini*(1.-fixed), self.zydata, self.covfunc,
taulimit=taulimit,
set_prior=set_prior,
conthpd=conthpd,
uselognu=self.uselognu,
rank=rank,
set_retq=False,
set_verbose=set_verbose
)
else:
func = lambda _p: -lnpostfn_single_p(
_p, self.zydata, self.covfunc,
taulimit=taulimit,
set_prior=set_prior,
conthpd=conthpd,
uselognu=self.uselognu,
rank=rank,
set_retq=False,
set_verbose=set_verbose
)
p_bst, v_bst = fmin(func, p_ini, full_output=True)[:2]
if fixed is not None:
p_bst = p_bst*fixed+p_ini*(1.-fixed)
sigma, tau, nu = unpacksinglepar(p_bst, covfunc=self.covfunc,
uselognu=self.uselognu)
if set_verbose:
print("Best-fit parameters are:")
print(("sigma %8.3f tau %8.3f" % (sigma, tau)))
if nu is not None:
print(("nu %8.3f" % nu))
print(("with logp %10.5g " % -v_bst))
return(p_bst, -v_bst)
def do_grid1d(self, p_ini, fixed, rangex, dx, fgrid1d, **lnpostparams):
""" Minimization over a 1D grid. See `lnpostfn_single_p` for doc.
Parameters
----------
p_ini: list
Initial guess for the parameters.
fixed: list
Bit list indicating which parameters are to be fixed during
minimization, `1` means varying, while `0` means fixed, so [1, 1, 0]
means fixing only the third parameter, and `len(fixed)` equals the
number of parameters (default: None, i.e., varying all the
parameters simultaneously).
rangex: tuple
range of `x`, i.e., (xmin, xmax)
dx: float
bin size in `x`.
fgrid1d: str
filename for the output.
lnpostparams: kwargs
kwargs for `lnpostfn_single_p`.
"""
set_verbose = lnpostparams.pop("set_verbose", True)
xs = np.arange(rangex[0], rangex[-1]+dx, dx)
fixed = np.asarray(fixed)
nfixed = np.sum(fixed == 0)
if nfixed != 1:
raise InputError("wrong number of fixed pars ")
f = open(fgrid1d, "w")
for x in xs:
_p_ini = p_ini*fixed + x*(1.-fixed)
_p, _l = self.do_map(_p_ini, fixed=fixed, **lnpostparams)
_line = "".join([format(_l, "20.10g"),
" ".join([format(r, "10.5f") for r in _p]), "\n"])
f.write(_line)
f.flush()
f.close()
if set_verbose:
print(("saved grid1d result to %s" % fgrid1d))
def do_grid2d(self, p_ini, fixed, rangex, dx, rangey, dy, fgrid2d,
**lnpostparams):
""" Minimization over a 2D grid. See `lnpostfn_single_p` for doc.
Parameters
----------
p_ini: list
Initial guess for the parameters.
fixed: list
Bit list indicating which parameters are to be fixed during
minimization, `1` means varying, while `0` means fixed,
so [1, 1, 0] means fixing only the third parameter,
and `len(fixed)` equals the number of parameters (default:
None, i.e., varying all the parameters simultaneously).
rangex: tuple
range of `x`, i.e., (xmin, xmax)
dx: float
bin size in `x`.
rangey: tuple
range of `y`, i.e., (ymin, ymax)
dy: float
bin size in `y`.
fgrid2d: str
filename for the output.
lnpostparams: kwargs
kwargs for `lnpostfn_single_p`.
"""
fixed = np.asarray(fixed)
set_verbose = lnpostparams.pop("set_verbose", True)
xs = np.arange(rangex[0], rangex[-1]+dx, dx)
ys = np.arange(rangey[0], rangey[-1]+dy, dy)
nfixed = np.sum(fixed == 0)
if nfixed != 2:
raise InputError("wrong number of fixed pars ")
posx, posy = np.nonzero(1-fixed)[0]
dimx, dimy = len(xs),len(ys)
header = " ".join(["#", str(posx), str(posy), str(dimx),
str(dimy), "\n"])
print(header)
f = open(fgrid2d, "w")
f.write(header)
for x in xs:
for y in ys:
_p_ini = p_ini*fixed
_p_ini[posx] = x
_p_ini[posy] = y
_p, _l = self.do_map(_p_ini, fixed=fixed, **lnpostparams)
_line = "".join([format(_l, "20.10g"),
" ".join([format(r, "10.5f") for r in _p]),
"\n"])
f.write(_line)
f.flush()
f.close()
if set_verbose:
print(("saved grid2d result to %s" % fgrid2d))
def read_logp_map(self, fgrid2d, set_verbose=True):
""" Read the output from `do_grid2d`.
Parameters
----------
fgrid2d: str
filename.
set_verbose: bool, optional
Turn on/off verbose mode (default: True).
Returns
-------
retdict: dict
Grid returned as a dict.
"""
f = open(fgrid2d, "r")
posx, posy, dimx, dimy = [
int(r) for r in f.readline().lstrip("#").split()]
if set_verbose:
print(("grid file %s is registered for" % fgrid2d))
print(("var_x = %10s var_y = %10s" % (self.vars[posx],
self.vars[posy])))
print(("dim_x = %10d dim_y = %10d" % (dimx, dimy)))
if self.covfunc != "drw":
logp, sigma, tau, nu = np.genfromtxt(
f, unpack=True, usecols=(0,1,2,3))
else:
logp, sigma, tau = np.genfromtxt(f, unpack=True, usecols=(0,1,2))
f.close()
retdict = {
'logp': logp.reshape(dimx, dimy).T,
'sigma': sigma.reshape(dimx, dimy).T,
'tau': tau.reshape(dimx, dimy).T,
'nu': None,
'posx': posx,
'posy': posy,
'dimx': dimx,
'dimy': dimy,
}
if self.covfunc != "drw":
retdict['nu'] = nu.reshape(dimx, dimy).T
return(retdict)
def show_logp_map(self, fgrid2d, set_normalize=True, vmin=None, vmax=None,
set_contour=True, clevels=None, set_verbose=True,
figout=None, figext=None):
""" Display the grid output from `do_grid2d`.
Parameters
----------
fgrid2d: str
filename.
set_normalize: bool, optional
Whether to normalize the histogram.
vmin: float, optional
Minimum value of the histogram.
set_contour: bool, optional
Whether to overplot contours (default: True).
clevels: list, optional
Contour levels. `clevels` = None will set the levels as if the
likelihood is for a Gaussian model with two parameters.
set_verbose: bool, optional
Turn on/off verbose mode (default: True).
figout: str, optional
Output figure name (default: None, i.e., using sequencial integers).
figext: str, optional
Output figure extension (default: None, i.e., using `show`).
"""
ln10 = np.log(10.0)
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111)
retdict = self.read_logp_map(fgrid2d, set_verbose=set_verbose)
x = retdict[self.vars[retdict['posx']]]/ln10
y = retdict[self.vars[retdict['posy']]]/ln10
z = retdict['logp']
if x is None or y is None:
raise InputError("incompatible fgrid2d file"+fgrid2d)
xmin,xmax,ymin,ymax = np.min(x),np.max(x),np.min(y),np.max(y)
extent = (xmin,xmax,ymin,ymax)
if set_normalize:
zmax = np.max(z)
z = z - zmax
if vmin is None:
vmin = z.min()
if vmax is None:
vmax = z.max()
ax.imshow(z, origin='lower', vmin=vmin, vmax=vmax,
cmap='jet', interpolation="nearest", aspect="auto",
extent=extent)
if set_contour:
if clevels is None:
sigma3,sigma2,sigma1 = 11.8/2.0,6.17/2.0,2.30/2.0
levels = (vmax-sigma1, vmax-sigma2, vmax-sigma3)
else:
levels = clevels
ax.set_autoscale_on(False)
ax.contour(z,levels, hold='on',colors='k',
origin='lower',extent=extent)
ax.set_xlabel(self.texs[retdict['posx']])
ax.set_ylabel(self.texs[retdict['posy']])
return(figure_handler(fig=fig, figout=figout, figext=figext))
def do_mcmc(self, conthpd=None, set_prior=True, taulimit="baseline",
rank="Full", nwalkers=100, nburn=50, nchain=50, fburn=None,
fchain=None, flogp=None, threads=1, set_verbose=True,
fixed=None, p_fix=None):
""" Run MCMC sampling over the parameter space.
Parameters
----------
conthpd: ndarray, optional
Usually the `hpd` array generated from the MCMC chain
using `Cont_Model` (default: None).
set_prior: bool, optional
Turn on/off priors that are predefined in `lnpostfn_single_p` (
default: True).
taulimit: tuple
lower and upper boundary for tau.
rank: str, optional
Type of covariance matrix rank, "Full" or "NearlyFull" (
default: "Full").
nwalker: integer, optional
Number of walkers for `emcee` (default: 100).
nburn: integer, optional
Number of burn-in steps for `emcee` (default: 50).
nchain: integer, optional
Number of chains for `emcee` (default: 50).
fburn: str, optional
filename for burn-in output (default: None).
fchain: str, optional
filename for MCMC chain output (default: None).
flogp: str, optional
filename for logp output (default: None).
thread: integer
Number of threads (default: 1).
set_verbose: bool, optional
Turn on/off verbose mode (default: True).
fixed: list
Bit list indicating which parameters are to be fixed during
minimization, `1` means varying, while `0` means fixed,
so [1, 1, 0] means fixing only the third parameter, and `len(fixed)`
equals the number of parameters (default: None, i.e., varying all
the parameters simultaneously).
p_fix: list
parameter list, with p_fix[fixed==0] being fixed.
"""
# initialize a multi-dim random number array
p0 = np.random.rand(nwalkers*self.ndim).reshape(nwalkers, self.ndim)
# initial values of sigma to be scattering around cont_std
p0[:, 0] = p0[:, 0] - 0.5 + np.log(self.cont_std)
# initial values of tau filling cont_cad : cont_cad + 0.5rj
# p0[:, 1] = np.log(self.cont_cad + self.rj*0.5*p0[:, 1])
p0[:, 1] = np.log(2.0 * self.cont_cad + self.rj*0.5*p0[:, 1])
if self.covfunc == "pow_exp":
p0[:, 2] = p0[:, 2] * 1.99
elif self.covfunc == "matern":
p0[:, 2] = np.log(p0[:, 2] * 5)
elif self.covfunc == "kepler2_exp" or self.covfunc == "wkepler2_exp":
# p0[:, 2] = np.log(self.cont_cad * p0[:, 2])
p0[:, 2] = np.log(2.0 * self.cont_cad * p0[:, 2])
# p0[:, 2] = np.log(self.cont_cad * p0[:, 2])
# p0[:, 2] = p0[:, 1] + np.log(0.2)
# make sure the initial values of tau_cut are smaller than tau_d
if set_verbose:
print("start burn-in")
print(("nburn: %d nwalkers: %d --> number of burn-in iterations: %d"
% (nburn, nwalkers, nburn*nwalkers)))
if taulimit == "baseline":
taulimit = [self.cont_cad, self.rj]
sampler = EnsembleSampler(
nwalkers, self.ndim, lnpostfn_single_p,
args=(self.zydata, self.covfunc, taulimit, set_prior, conthpd,
self.uselognu, rank, False, False, fixed, p_fix), threads=threads)
pos, prob, state = sampler.run_mcmc(p0, nburn)
if set_verbose:
print("burn-in finished")
if fburn is not None:
if set_verbose:
print(("save burn-in chains to %s" % fburn))
if fixed is not None:
# modify flatchain
for i in range(self.ndim):
if fixed[i] == 0:
sampler.flatchain[:, i] = p_fix[i]
np.savetxt(fburn, sampler.flatchain)
# reset sampler
sampler.reset()
if set_verbose:
print("start sampling")
sampler.run_mcmc(pos, nchain, rstate0=state)
if set_verbose:
print("sampling finished")
af = sampler.acceptance_fraction
if set_verbose:
print("acceptance fractions for all walkers are")
print((" ".join([format(r, "3.2f") for r in af])))
if fixed is not None:
# modify flatchain
for i in range(self.ndim):
if fixed[i] == 0:
sampler.flatchain[:, i] = p_fix[i]
if fchain is not None:
if set_verbose:
print(("save MCMC chains to %s" % fchain))
np.savetxt(fchain, sampler.flatchain)
if flogp is not None:
if set_verbose:
print(("save logp of MCMC chains to %s" % flogp))
np.savetxt(flogp, np.ravel(sampler.lnprobability), fmt='%16.8f')
# make chain an attritue
self.flatchain = sampler.flatchain
self.flatchain_whole = np.copy(self.flatchain)
# get HPD
self.get_hpd(set_verbose=set_verbose)
self.logp = np.ravel(sampler.lnprobability)
self.logp_whole = np.copy(self.logp)
self.get_bfp()
def get_hpd(self, set_verbose=True):
""" Get the 68% percentile range of each parameter to self.hpd.
Parameters
----------
set_verbose: bool, optional
Turn on/off verbose mode (default: True).
"""
hpd = _get_hpd(self.ndim, self.flatchain)
for i in range(self.ndim):
if set_verbose:
print(("HPD of %s" % self.vars[i]))
if (self.vars[i] == "nu" and (not self.uselognu)):
print(("low: %8.3f med %8.3f hig %8.3f" % tuple(hpd[:,i])))
else:
print(("low: %8.3f med %8.3f hig %8.3f" % tuple(
np.exp(hpd[:,i]))))
# register hpd to attr
self.hpd = hpd
def get_bfp(self):
self.bfp = _get_bfp(self.flatchain, self.logp)
def show_hist(self, bins=100, figout=None, figext=None):
""" Display histograms of the posterior distributions.
Parameters
----------
bins: integer, optional
Number of bins (default:100).
figout: str, optional
Output figure name (default: None, i.e., using sequencial integers).
figext: str, optional
Output figure extension (default: None, i.e., using `show`).
"""
if not hasattr(self, "flatchain"):
print("Warning: need to run do_mcmc or load_chain first")
return(1)
ln10 = np.log(10.0)
fig = plt.figure(figsize=(8, 5))
for i in range(self.ndim):
ax = fig.add_subplot(1,self.ndim,i+1)
if (self.vars[i] == "nu" and (not self.uselognu)):
ax.hist(self.flatchain[:,i], bins)
if self.covfunc == "kepler2_exp" or self.covfunc == "wkepler2_exp":
ax.axvspan(self.cont_cad_min,
self.cont_cad, color="g", alpha=0.2)
else:
ax.hist(self.flatchain[:,i]/ln10, bins)
if self.vars[i] == "nu" and (self.covfunc == "kepler2_exp" or self.covfunc == "wkepler2_exp"):
ax.axvspan(np.log10(self.cont_cad_min),
np.log10(self.cont_cad), color="g", alpha=0.2)
ax.set_xlabel(self.texs[i])
ax.set_ylabel("N")
# plt.get_current_fig_manager().toolbar.zoom()
return(figure_handler(fig=fig, figout=figout, figext=figext))
def load_chain(self, fchain, flogp=None, set_verbose=True):
""" Load an existing chain file.
Parameters
----------
fchain: str
filename for MCMC chain input.
set_verbose: bool, optional
Turn on/off verbose mode (default: True).
"""
if set_verbose:
print(("load MCMC chain from %s" % fchain))
self.flatchain = np.genfromtxt(fchain)
self.flatchain_whole = np.copy(self.flatchain)
# get HPD
self.get_hpd(set_verbose=set_verbose)
if flogp is not None:
self.logp = np.genfromtxt(flogp)
self.logp_whole = np.copy(self.logp)
self.get_bfp()
def break_chain(self, covpar_segments):
""" Break the chain into different segments.
Parameters
----------
covpar_segments: list of lists.
list with length that equals the number of dimensions of the
parameter space.
"""
if (len(covpar_segments) != self.ndim):
print(("Error: covpar_segments has to be a list of length %d" %
(self.ndim)))
return(1)
if not hasattr(self, "flatchain"):
print("Warning: need to run do_mcmc or load_chain first")
return(1)
for i, covpar_seq in enumerate(covpar_segments):
if covpar_seq is None:
continue
indx = np.argsort(self.flatchain[:, i])
imin, imax = np.searchsorted(self.flatchain[indx, i], covpar_seq)
indx_cut = indx[imin: imax]
if len(indx_cut) < 10:
print("Warning: cut too aggressive!")
return(1)
self.flatchain = self.flatchain[indx_cut,:]
if hasattr(self, "logp"):
self.logp = self.logp[indx_cut]
def restore_chain(self):
""" Restore chain after `break_chain`.
"""
self.flatchain = np.copy(self.flatchain_whole)
if hasattr(self, "logp"):
self.logp = np.copy(self.logp_whole)
def get_qlist(self, p_bst):
""" get the best-fit linear responses.
Parameters
----------
p_bst: list
best-fit parameters.
"""
self.qlist = lnpostfn_single_p(p_bst, self.zydata, self.covfunc,
uselognu=self.uselognu, rank="Full",
set_retq=True)[4]
def do_pred(self, p_bst=None, fpred=None, dense=10, rank="Full",
set_overwrite=True):
""" Predict light curves using the best-fit parameters.
Parameters
----------
p_bst: list
best-fit parameters.
fpred: str, optional
filename for saving the predicted light curves.
dense: integer, optional
factors by which the desired sampling is compared to the original
data sampling (default: 10).
rank: str, optional
Type of covariance matrix rank, "Full" or "NearlyFull" (
default: "Full").
set_overwrite: bool, optional
Whether to overwrite, if a `fpred` file already exists.
Returns
-------
zypred: LightCurve data.
Predicted LightCurve.
"""
if p_bst is None and hasattr(self, "bfp"):
p_bst = self.bfp
self.get_qlist(p_bst)
self.zydata.update_qlist(self.qlist)
sigma, tau, nu = unpacksinglepar(p_bst, self.covfunc,
uselognu=self.uselognu)
lcmean = self.zydata.blist[0]
P = PredictSignal(zydata=self.zydata, lcmean=lcmean, rank=rank,
covfunc=self.covfunc, sigma=sigma, tau=tau, nu=nu)
nwant = dense*self.cont_npt
jwant0 = self.jstart - 0.1*self.rj
jwant1 = self.jend + 0.1*self.rj
jwant = np.linspace(jwant0, jwant1, nwant)
mve, var = P.mve_var(jwant)
sig = np.sqrt(var)
zylclist_pred = [[jwant, mve, sig],]
zydata_pred = LightCurve(zylclist_pred)
if fpred is not None:
zydata_pred.save(fpred, set_overwrite=set_overwrite)
return(zydata_pred)
# ---------------------------------
# Rmap_Model: Spectroscopic RM
def unpackspearpar(p, nlc=None, hascontlag=False):
""" Internal Function: unpack the physical parameters from input 1-d
array for spear mode.
"""
if nlc is None:
# possible to figure out nlc from the size of p
nlc = (len(p) - 2)//3 + 1
sigma = np.exp(p[0])
tau = np.exp(p[1])
if hascontlag:
lags = np.zeros(nlc)
wids = np.zeros(nlc)
scales = np.ones(nlc)
for i in range(1, nlc):
lags[i] = p[2+(i-1)*3]
wids[i] = p[3+(i-1)*3]
scales[i] = p[4+(i-1)*3]
return(sigma, tau, lags, wids, scales)
else:
llags = np.zeros(nlc-1)
lwids = np.zeros(nlc-1)
lscales = np.ones(nlc-1)
for i in range(nlc-1):
llags[i] = p[2+i*3]
lwids[i] = p[3+i*3]
lscales[i] = p[4+i*3]
return(sigma, tau, llags, lwids, lscales)
def lnpostfn_spear_p(p, zydata, conthpd=None, lagtobaseline=0.3, laglimit=None,
widtobaseline=1, widlimit=None,
set_threading=False, blocksize=10000, set_retq=False,
set_verbose=False, fixed=None, p_fix=None):
""" log-posterior function of p.
Parameters
----------
p: array_like
Rmap_Model parameters, [log(sigma), log(tau), lag1, wid1, scale1,
...]
zydata: LightCurve object
Input LightCurve data.
conthpd: ndarray, optional
Priors on sigma and tau as an ndarray with shape (3, 2),
np.array([[log(sigma_low), log(tau_low)],
[log(sigma_med), log(tau_med)],
[log(sigma_hig), log(tau_hig)]])
where 'low', 'med', and 'hig' are defined as the 68% confidence
limits around the median. conthpd usually comes in as an attribute
of the `Cont_Model` object `hpd` (default: None).
lagtobaseline: float, optional
Prior on lags. When input lag exceeds lagtobaseline*baseline, a
logarithmic prior will be applied.
laglimit: str or list of tuples.
hard boundaries for the lag searching during MCMC sampling.
'baseline' means the boundaries are naturally determined by the
duration of the light curves, or you can set them as a list with
`nline` of tuples, with each tuple containing the (min, max) pair
for each single line.
set_threading: bool, optional
True if you want threading in filling matrix. It conflicts with the
'threads' option in Rmap_Model.run_mcmc (default: False).
blocksize: int, optional
Maximum matrix block size in threading (default: 10000).
set_retq: bool, optional
Return the value(s) of q along with each component of the
log-likelihood if True (default: False).
set_verbose: bool, optional
True if you want verbosity (default: False).
fixed: list
Bit list indicating which parameters are to be fixed during
minimization, `1` means varying, while `0` means fixed,
so [1, 1, 0] means fixing only the third parameter, and `len(fixed)`
equals the number of parameters (default: None, i.e., varying all
the parameters simultaneously).
p_fix: list
parameter list, with p_fix[fixed==0] being fixed.
Returns
-------
retval: float (set_retq is False) or list (set_retq is True)
if `retval` returns a list, then it contains the full posterior info
as a list of [log_posterior, chi2_component, det_component,
DC_penalty, correction_to_the_mean].
"""
if fixed is not None:
# fix parameters during inference.
fixed = np.asarray(fixed)
p_fix = np.asarray(p_fix)
p = np.asarray(p)
p = p * fixed + p_fix * (1. - fixed)
# unpack the parameters from p
sigma, tau, llags, lwids, lscales = unpackspearpar(p, zydata.nlc,
hascontlag=False)
if set_retq:
vals = list(lnlikefn_spear(zydata, sigma, tau, llags, lwids, lscales,
set_retq=True, set_verbose=set_verbose,
set_threading=set_threading,
blocksize=blocksize))
else:
logl = lnlikefn_spear(zydata, sigma, tau, llags, lwids, lscales,
set_retq=False, set_verbose=set_verbose,
set_threading=set_threading, blocksize=blocksize)
# conthpd is in natural log
if conthpd is not None:
# for sigma
if p[0] < conthpd[1,0]:
prior0 = (p[0] - conthpd[1,0])/(conthpd[1,0]-conthpd[0,0])
else:
prior0 = (p[0] - conthpd[1,0])/(conthpd[2,0]-conthpd[1,0])
# for tau
if p[1] < conthpd[1,1]:
prior1 = (p[1] - conthpd[1,1])/(conthpd[1,1]-conthpd[0,1])
else:
prior1 = (p[1] - conthpd[1,1])/(conthpd[2,1]-conthpd[1,1])
else:
prior0 = 0.0
prior1 = 0.0
# for each lag
prior2 = 0.0
for i in range(zydata.nlc-1):
if lagtobaseline < 1.0:
if np.abs(llags[i]) > lagtobaseline*zydata.rj:
# penalize long lags when they are larger than 0.3 times the
# baseline, as it is too easy to fit the model with
# non-overlapping signals in the light curves.
prior2 += np.log(np.abs(llags[i])/(lagtobaseline*zydata.rj))
# penalize long lags to be impossible
if laglimit is not None:
if llags[i] > laglimit[i][1] or llags[i] < laglimit[i][0]:
# try not stack priors
prior2 += my_pos_inf
# penalize on extremely large transfer function width
if widtobaseline < 1.0:
if np.abs(lwids[i]) > widtobaseline*zydata.rj:
prior2 += np.log(np.abs(lwids[i])/(widtobaseline*zydata.rj))
if widlimit is not None:
if lwids[i] > widlimit[i][1] or lwids[i] < widlimit[i][0]:
# print "wid prior applied"
# print lwids[i]
# print widlimit[i]
prior2 += my_pos_inf
# add logp of all the priors
prior = -0.5*(prior0*prior0+prior1*prior1) - prior2
if set_retq:
vals[0] = vals[0] + prior
vals.extend([prior0, prior1, prior2])
return(vals)
else:
logp = logl + prior
return(logp)
def lnlikefn_spear(zydata, sigma, tau, llags, lwids, lscales, set_retq=False,
set_verbose=False, set_threading=False, blocksize=10000):
""" Internal function to calculate the log likelihood.
"""
if zydata.issingle:
raise UsageError("lnlikefn_spear does not work for single mode")
# impossible scenarios
if ((sigma <= 0.0 or tau <= 0.0 or np.min(lwids) < 0.0 or
np.min(lscales) <= 0.0 or np.max(np.abs(llags)) > zydata.rj)):
return(_exit_with_retval(zydata.nlc, set_retq,
errmsg="Warning: illegal input of parameters",
set_verbose=set_verbose))
# fill in lags/wids/scales
lags = np.zeros(zydata.nlc)
wids = np.zeros(zydata.nlc)
scales = np.ones(zydata.nlc)
lags[1:] = llags
wids[1:] = lwids
scales[1:] = lscales
# calculate covariance matrix
if set_threading:
C = spear_threading(zydata.jarr, zydata.jarr, zydata.iarr, zydata.iarr,
sigma, tau, lags, wids, scales, blocksize=blocksize)
else:
C = spear(zydata.jarr, zydata.jarr, zydata.iarr, zydata.iarr, sigma,
tau, lags, wids, scales)
# decompose C inplace
U, info = cholesky(C, nugget=zydata.varr, inplace=True, raiseinfo=False)
# handle exceptions here
if info > 0:
return(
_exit_with_retval(
zydata.nlc, set_retq,
errmsg="Warning: non positive-definite covariance C #4",
set_verbose=set_verbose))
retval = _lnlike_from_U(U, zydata, set_retq=set_retq,
set_verbose=set_verbose)
return(retval)
class Rmap_Model(object):
def __init__(self, zydata=None):
""" Rmap Model object.
Parameters
----------
zydata: LightCurve object, optional
Light curve data.
"""
self.zydata = zydata
if zydata is None:
pass
else:
self.nlc = zydata.nlc
self.npt = zydata.npt
self.cont_npt = zydata.nptlist[0]
self.cont_cad = zydata.cont_cad
self.cont_std = zydata.cont_std
self.rj = zydata.rj
self.jstart = zydata.jstart
self.jend = zydata.jend
self.names = zydata.names
# number of parameters
self.ndim = 2 + (self.nlc-1)*3
self.vars = ["sigma", "tau"]
self.texs = [r"$\log\,\sigma$", r"$\log\,\tau$"]
for i in range(1, self.nlc):
self.vars.append("_".join(["lag", self.names[i]]))
self.vars.append("_".join(["wid", self.names[i]]))
self.vars.append("_".join(["scale", self.names[i]]))
self.texs.append("".join(
[r"$t_{", self.names[i].lstrip(r"$").rstrip(r"$"), r"}$"]))
self.texs.append("".join(
[r"$w_{", self.names[i].lstrip(r"$").rstrip(r"$"), r"}$"]))
self.texs.append("".join(
[r"$s_{", self.names[i].lstrip(r"$").rstrip(r"$"), r"}$"]))
def __call__(self, p, **lnpostparams):
""" Calculate the posterior value given one parameter set `p`.
See `lnpostfn_spear_p` for doc.
Parameters
----------
p: array_like
Rmap_Model parameters, [log(sigma), log(tau), lag1, wid1, scale1,
...]
lnpostparams: kwargs
Keyword arguments for `lnpostfn_spear_p`.
Returns
-------
retval: float (set_retq is False) or list (set_retq is True)
if `retval` returns a list, then it contains the full posterior info
as a list of [log_posterior, chi2_component, det_component,
DC_penalty, correction_to_the_mean].
"""
return(lnpostfn_spear_p(p, self.zydata, **lnpostparams))
def do_map(self, p_ini, fixed=None, **lnpostparams):
""" Do an optimization to find the Maximum a Posterior estimates.
See `lnpostfn_spear_p` for doc.
Parameters
----------
p_ini: array_like
Rmap_Model parameters, [log(sigma), log(tau), lag1, wid1, scale1,
...]
fixed: array_like, optional
Same dimension as p_ini, but with 0 for parameters that is fixed in
the optimization, and with 1 for parameters that is varying, e.g.,
fixed = [0, 1, 1, 1, 1, ...] means sigma is fixed while others
are varying. fixed=[1, 1, 1, 1, 1, ...] is equivalent to
fixed=None (default: None).
lnpostparams: kwargs
Kewword arguments for `lnpostfn_spear_p`.
Returns
-------
p_bst: array_like
Best-fit parameters.
l: float
The maximum log-posterior.
"""
set_verbose = lnpostparams.pop("set_verbose", True)
set_retq = lnpostparams.pop("set_retq", False)
if set_retq is True:
raise InputError("set_retq has to be False")
p_ini = np.asarray(p_ini)
if fixed is not None:
fixed = np.asarray(fixed)
func = lambda _p: -lnpostfn_spear_p(_p*fixed+p_ini*(1.-fixed),
self.zydata, **lnpostparams)
else:
func = lambda _p: -lnpostfn_spear_p(_p,
self.zydata, **lnpostparams)
p_bst, v_bst = fmin(func, p_ini, full_output=True)[:2]
if fixed is not None:
p_bst = p_bst*fixed+p_ini*(1.-fixed)
sigma, tau, llags, lwids, lscales = unpackspearpar(
p_bst, self.zydata.nlc, hascontlag=False)
if set_verbose:
print("Best-fit parameters are")
print(("sigma %8.3f tau %8.3f" % (sigma, tau)))
for i in range(self.nlc-1):
ip = 2+i*3
print(("%s %8.3f %s %8.3f %s %8.3f" % (
self.vars[ip+0], llags[i],
self.vars[ip+1], lwids[i],
self.vars[ip+2], lscales[i],
)))
print(("with logp %10.5g " % -v_bst))
return(p_bst, -v_bst)
def do_mcmc(self, conthpd=None, lagtobaseline=0.3, laglimit="baseline",
widtobaseline=1, widlimit="nyquist",
nwalkers=100, nburn=100, nchain=100, threads=1, fburn=None,
fchain=None, flogp=None, set_threading=False, blocksize=10000,
set_verbose=True, fixed=None, p_fix=None):
""" Run MCMC sampling over the parameter space.
Parameters
----------
conthpd: ndarray, optional
Priors on sigma and tau as an ndarray with shape (3, 2),
np.array([[log(sigma_low), log(tau_low)],
[log(sigma_med), log(tau_med)],
[log(sigma_hig), log(tau_hig)]])
where 'low', 'med', and 'hig' are defined as the 68% confidence
limits around the median. conthpd usually comes in as an attribute
of the `Cont_Model` object `hpd` (default: None).
lagtobaseline: float, optional
Prior on lags. When input lag exceeds lagtobaseline*baseline, a
logarithmic prior will be applied.
laglimit: str or list of tuples.
Hard boundaries for the lag searching during MCMC sampling.
'baseline' means the boundaries are naturally determined by the
duration of the light curves, or you can set them as a list
with `nline` of tuples, with each tuple containing the (min, max)
pair for each single line.
nwalker: integer, optional
Number of walkers for `emcee` (default: 100).
nburn: integer, optional
Number of burn-in steps for `emcee` (default: 50).
nchain: integer, optional
Number of chains for `emcee` (default: 50).
thread: integer
Number of threads (default: 1).
fburn: str, optional
filename for burn-in output (default: None).
fchain: str, optional
filename for MCMC chain output (default: None).
flogp: str, optional
filename for logp output (default: None).
set_threading: bool, optional
True if you want threading in filling matrix. It conflicts with the
'threads' option in Rmap_Model.run_mcmc (default: False).
blocksize: int, optional
Maximum matrix block size in threading (default: 10000).
set_verbose: bool, optional
Turn on/off verbose mode (default: True).
fixed: list
Bit list indicating which parameters are to be fixed during
minimization, `1` means varying, while `0` means fixed,
so [1, 1, 0] means fixing only the third parameter, and `len(fixed)`
equals the number of parameters (default: None, i.e., varying all
the parameters simultaneously).
p_fix: list
parameter list, with p_fix[fixed==0] being fixed.
"""
if (threads > 1 and (not set_threading)):
if set_verbose:
print(("run parallel chains of number %2d " % threads))
elif (threads == 1):
if set_verbose:
if set_threading:
print(("run single chain in submatrix blocksize %10d " %
blocksize))
else:
print("run single chain without subdividing matrix ")
else:
raise InputError("conflicting set_threading and threads setup")
if laglimit == "baseline":
laglimit = [[-self.rj, self.rj],]*(self.nlc-1)
elif len(laglimit) != (self.nlc - 1):
raise InputError(
"laglimit should be a list of lists matching number of lines")
if widlimit == "nyquist":
# two times the cadence, resembling Nyquist sampling.
widlimit = [[0.0, 2.0*self.cont_cad],]*(self.nlc-1)
elif len(widlimit) != (self.nlc - 1):
raise InputError(
"widlimit should be a list of lists matching number of lines")
# generate array of random numbers
p0 = np.random.rand(nwalkers*self.ndim).reshape(nwalkers, self.ndim)
# initialize array
if conthpd is None:
p0[:, 0] += np.log(self.cont_std)-0.5
p0[:, 1] += np.log(np.sqrt(self.rj*self.cont_cad))-0.5
else:
p0[:, 0] += conthpd[1,0]-0.5
p0[:, 1] += conthpd[1,1]-0.5
for i in range(self.nlc-1):
p0[:, 2+i*3] = p0[:,2+i*3]*(laglimit[i][1]-laglimit[i][0]) + \
laglimit[i][0]
if set_verbose:
print("start burn-in")
if conthpd is None:
print("no priors on sigma and tau")
else:
print("using priors on sigma and tau from continuum fitting")
print((np.exp(conthpd)))
if lagtobaseline < 1.0:
print(("penalize lags longer than %3.2f of the baseline" %
lagtobaseline))
else:
print("no penalizing long lags, but within the baseline")
if widtobaseline < 1.0:
print(("penalize widths longer than %3.2f of the baseline" %
widtobaseline))
else:
print("no penalizing long widths, but within the baseline")
print(("nburn: %d nwalkers: %d --> number of burn-in iterations: %d"
% (nburn, nwalkers, nburn*nwalkers)))
# initialize the ensemble sampler
sampler = EnsembleSampler(nwalkers, self.ndim, lnpostfn_spear_p,
args=(self.zydata, conthpd, lagtobaseline,
laglimit, widtobaseline, widlimit,
set_threading, blocksize,
False, False, fixed, p_fix), threads=threads)
pos, prob, state = sampler.run_mcmc(p0, nburn)
if set_verbose:
print("burn-in finished")
if fburn is not None:
if set_verbose:
print(("save burn-in chains to %s" % fburn))
if fixed is not None:
# modify flatchain
for i in range(self.ndim):
if fixed[i] == 0:
sampler.flatchain[:, i] = p_fix[i]
np.savetxt(fburn, sampler.flatchain)
# reset the sampler
sampler.reset()
if set_verbose:
print("start sampling")
sampler.run_mcmc(pos, nchain, rstate0=state)
if set_verbose:
print("sampling finished")
af = sampler.acceptance_fraction
if set_verbose:
print("acceptance fractions are")
print((" ".join([format(r, "3.2f") for r in af])))
if fixed is not None:
# modify flatchain
for i in range(self.ndim):
if fixed[i] == 0:
sampler.flatchain[:, i] = p_fix[i]
if fchain is not None:
if set_verbose:
print(("save MCMC chains to %s" % fchain))
np.savetxt(fchain, sampler.flatchain)
if flogp is not None:
if set_verbose:
print(("save logp of MCMC chains to %s" % flogp))
np.savetxt(flogp, np.ravel(sampler.lnprobability), fmt='%16.8f')
# make chain an attritue
self.flatchain = sampler.flatchain
self.flatchain_whole = np.copy(self.flatchain)
# get HPD
self.get_hpd(set_verbose=set_verbose)
self.logp = np.ravel(sampler.lnprobability)
self.logp_whole = np.copy(self.logp)
self.get_bfp()
def get_hpd(self, set_verbose=True):
""" Get the 68% percentile range of each parameter to self.hpd.
Parameters
----------
set_verbose: bool, optional
True if you want verbosity (default: True).
"""
hpd = _get_hpd(self.ndim, self.flatchain)
for i in range(self.ndim):
if set_verbose:
print(("HPD of %s" % self.vars[i]))
if i < 2:
print(("low: %8.3f med %8.3f hig %8.3f" %
tuple(np.exp(hpd[:,i]))))
else:
print(("low: %8.3f med %8.3f hig %8.3f" % tuple(hpd[:,i])))
# register hpd to attr
self.hpd = hpd
def get_bfp(self):
self.bfp = _get_bfp(self.flatchain, self.logp)
def show_hist(self, bins=100, lagbinsize=1.0, figout=None, figext=None):
""" Display histograms of the posterior distributions.
Parameters
----------
bins: integer, optional
Number of bins for parameters except for 'lag' (default:100).
lagbinsize: integer, optional
bin width for 'lag' (default:100).
figout: str, optional
Output figure name (default: None, i.e., using sequencial integers).
figext: str, optional
Output figure extension (default: None, i.e., using `show`).
"""
if not hasattr(self, "flatchain"):
print("Warning: need to run do_mcmc or load_chain first")
return(1)
ln10 = np.log(10.0)
fig = plt.figure(figsize=(14, 2.8*self.nlc))
for i in range(2):
ax = fig.add_subplot(self.nlc,3,i+1)
ax.hist(self.flatchain[:,i]/ln10, bins)
ax.set_xlabel(self.texs[i])
ax.set_ylabel("N")
for k in range(self.nlc-1):
for i in range(2+k*3, 5+k*3):
ax = fig.add_subplot(self.nlc,3,i+1+1)
if np.mod(i, 3) == 2:
# lag plots
lagbins = np.arange(
int(np.min(self.flatchain[:,i])),
int(np.max(self.flatchain[:,i]))+lagbinsize, lagbinsize)
ax.hist(self.flatchain[:,i], bins=lagbins)
else:
ax.hist(self.flatchain[:,i], bins)
ax.set_xlabel(self.texs[i])
ax.set_ylabel("N")
return(figure_handler(fig=fig, figout=figout, figext=figext))
def break_chain(self, llag_segments):
""" Break the chain.
Parameters
----------
llag_segments: list of lists
list of length self.nlc-1, wich each element a two-element array
bracketing the range of lags (usually the single most probable peak)
you want to consider for each line.
"""
if (len(llag_segments) != self.nlc-1):
print(("Error: llag_segments has to be a list of length %d" %
(self.nlc-1)))
return(1)
if not hasattr(self, "flatchain"):
print("Warning: need to run do_mcmc or load_chain first")
return(1)
for i, llag_seq in enumerate(llag_segments):
if llag_seq is None:
continue
indx = np.argsort(self.flatchain[:, 2+i*3])
imin, imax = np.searchsorted(self.flatchain[indx, 2+i*3], llag_seq)
indx_cut = indx[imin: imax]
self.flatchain = self.flatchain[indx_cut,:]
if hasattr(self, "logp"):
self.logp = self.logp[indx_cut]
def restore_chain(self):
""" Restore chain after `break_chain`.
"""
self.flatchain = np.copy(self.flatchain_whole)
if hasattr(self, "logp"):
self.logp = np.copy(self.logp_whole)
def load_chain(self, fchain, flogp=None, set_verbose=True):
""" Load stored MCMC chain.
Parameters
----------
fchain: string
Name for the chain file.
set_verbose: bool, optional
True if you want verbosity (default: True).
"""
if set_verbose:
print(("load MCMC chain from %s" % fchain))
self.flatchain = np.genfromtxt(fchain)
self.flatchain_whole = np.copy(self.flatchain)
self.ndim = self.flatchain.shape[1]
# get HPD
self.get_hpd(set_verbose=set_verbose)
if flogp is not None:
self.logp = np.genfromtxt(flogp)
self.logp_whole = np.copy(self.logp)
self.get_bfp()
def get_qlist(self, p_bst):
""" get the best-fit linear responses.
Parameters
----------
p_bst: list
best-fit parameters.
"""
self.qlist = lnpostfn_spear_p(p_bst, self.zydata, set_retq=True,
set_verbose=False)[4]
def do_pred(self, p_bst=None, fpred=None, dense=10, set_overwrite=True):
""" Calculate the predicted mean and variance of each light curve on a
densely sampled time axis.
Parameters
----------
p_bst: array_like
Input paraemeters.
fpred: string, optional
Name of the output file for the predicted light curves, set it to
None if you do not want output (default: None).
dense: int, optional
The factor by which the predicted light curves should be more
densely sampled than the original data (default: 10).
set_overwrite: bool, optional
True if you want to overwrite existing fpred (default: True).
Returns
-------
zydata_pred: LightCurve object
Predicted light curves packaged as a LightCurve object.
"""
if p_bst is None and hasattr(self, "bfp"):
p_bst = self.bfp
self.get_qlist(p_bst)
sigma, tau, lags, wids, scales = unpackspearpar(
p_bst, self.zydata.nlc, hascontlag=True)
# update qlist
self.zydata.update_qlist(self.qlist)
# initialize PredictRmap object
P = PredictRmap(zydata=self.zydata, sigma=sigma, tau=tau,
lags=lags, wids=wids, scales=scales)
nwant = dense*self.cont_npt
jwant0 = self.jstart - 0.1*self.rj
jwant1 = self.jend + 0.1*self.rj
jwant = np.linspace(jwant0, jwant1, nwant)
zylclist_pred = []
for i in range(self.nlc):
iwant = np.ones(nwant)*(i+1)
mve, var = P.mve_var(jwant, iwant)
sig = np.sqrt(var)
zylclist_pred.append([jwant, mve, sig])
zydata_pred = LightCurve(zylclist_pred)
if fpred is not None:
zydata_pred.save(fpred, set_overwrite=set_overwrite)
return(zydata_pred)
# ---------------------------------
# Pmap_Model: Two-Band Photometric RM
def unpackphotopar(p, nlc=2, hascontlag=False):
""" Unpack the physical parameters from input 1-d array for photo mode.
Currently only two bands, one on and on off the line emission.
"""
if nlc != 2:
raise InputError("Pmap_Model cannot cope with more than two bands yet")
sigma = np.exp(p[0])
tau = np.exp(p[1])
if hascontlag:
lags = np.zeros(3)
wids = np.zeros(3)
scales = np.ones(3)
# line contribution
lags[1] = p[2]
wids[1] = p[3]
scales[1] = p[4]
# continuum contribution
scales[2] = p[5]
return(sigma, tau, lags, wids, scales)
else:
llags = np.zeros(2)
lwids = np.zeros(2)
lscales = np.ones(2)
llags[0] = p[2]
lwids[0] = p[3]
lscales[0] = p[4]
# continuum contribution
lscales[1] = p[5]
return(sigma, tau, llags, lwids, lscales)
def lnpostfn_photo_p(p, zydata, conthpd=None, set_extraprior=False,
lagtobaseline=0.3, laglimit=None, widtobaseline=1.0,
widlimit=None, set_threading=False, blocksize=10000,
set_retq=False, set_verbose=False, fixed=None, p_fix=None):
""" log-posterior function of p.
Parameters
----------
p: array_like
Pmap_Model parameters, [log(sigma), log(tau), lag1, wid1, scale1, alpha]
zydata: LightCurve object
Light curve data.
conthpd: ndarray, optional
Priors on sigma and tau as an ndarray with shape (3, 2),
np.array([[log(sigma_low), log(tau_low)],
[log(sigma_med), log(tau_med)],
[log(sigma_hig), log(tau_hig)]])
where 'low', 'med', and 'hig' are defined as the 68% confidence
limits around the median. conthpd usually comes in as an attribute
of the DRW_Model object DRW_Model.hpd (default: None).
set_extraprior: bool, optional
DEPRECATED, keep it for backward compatibilit and debugging purposes.
lagtobaseline: float, optional
Prior on lags. When input lag exceeds lagtobaseline*baseline, a
logarithmic prior will be applied.
laglimit: list of tuples.
hard boundaries for the lag searching.
widtobaseline: float, optional
Prior on wids. When input wid exceeds widtobaseline*baseline, a
logarithmic prior will be applied.
widlimit: list of tuples, optional
hard boundaries for the wid searching.
set_threading: bool, optional
True if you want threading in filling matrix. It conflicts with the
'threads' option in Pmap_Model.run_mcmc (default: False).
blocksize: int, optional
Maximum matrix block size in threading (default: 10000).
set_retq: bool, optional
Return the value(s) of q along with each component of the
log-likelihood if True (default: False).
set_verbose: bool, optional
True if you want verbosity (default: False).
"""
if fixed is not None:
# fix parameters during inference.
fixed = np.asarray(fixed)
p_fix = np.asarray(p_fix)
p = np.asarray(p)
p = p * fixed + p_fix * (1. - fixed)
# unpack the parameters from p
sigma, tau, llags, lwids, lscales = unpackphotopar(p, zydata.nlc,
hascontlag=False)
if set_retq:
vals = list(lnlikefn_photo(zydata, sigma, tau, llags, lwids, lscales,
set_retq=True, set_verbose=set_verbose,
set_threading=set_threading,
blocksize=blocksize))
else:
logl = lnlikefn_photo(zydata, sigma, tau, llags, lwids, lscales,
set_retq=False, set_verbose=set_verbose,
set_threading=set_threading, blocksize=blocksize)
# conthpd is in natural log
if conthpd is not None:
# for sigma
if p[0] < conthpd[1,0]:
prior0 = (p[0] - conthpd[1,0])/(conthpd[1,0]-conthpd[0,0])
else:
prior0 = (p[0] - conthpd[1,0])/(conthpd[2,0]-conthpd[1,0])
# for tau
if p[1] < conthpd[1,1]:
prior1 = (p[1] - conthpd[1,1])/(conthpd[1,1]-conthpd[0,1])
else:
prior1 = (p[1] - conthpd[1,1])/(conthpd[2,1]-conthpd[1,1])
else:
prior0 = 0.0
prior1 = 0.0
# for each lag
prior2 = 0.0
if lagtobaseline < 1.0:
if np.abs(llags[0]) > lagtobaseline*zydata.rj:
# penalize long lags when larger than 0.3 times the baseline,
# as it is too easy to fit the model with non-overlapping
# signals in the light curves.
prior2 += np.log(np.abs(llags[0])/(lagtobaseline*zydata.rj))
# penalize long lags to be impossible
if laglimit is not None:
if llags[0] > laglimit[0][1] or llags[0] < laglimit[0][0]:
prior2 += my_pos_inf
# penalize on extremely large transfer function width
if widtobaseline < 1.0:
if np.abs(lwids[0]) > widtobaseline*zydata.rj:
prior2 += np.log(np.abs(lwids[0])/(widtobaseline*zydata.rj))
if widlimit is not None:
if lwids[0] > widlimit[0][1] or lwids[0] < widlimit[0][0]:
prior2 += my_pos_inf
# if np.abs(lwids[0]) >= zydata.cont_cad:
# prior2 += np.log(np.abs(lwids[0])/zydata.cont_cad)
# else:
# prior2 += np.log(zydata.cont_cad/np.abs(lwids[0]))
if set_extraprior:
# XXX {{{Extra penalizations.
# penalize on extremely short lags (below median cadence).
if (np.abs(llags[0]) <= zydata.cont_cad or
np.abs(llags[0]) <= np.abs(lwids[0])):
prior2 += my_pos_inf
# penalize on extremely small line responses (below mean error level).
if sigma * np.abs(lscales[0]) <= np.mean(zydata.elist[1]):
prior2 += my_pos_inf
# }}}
# add logp of all the priors
prior = -0.5*(prior0*prior0+prior1*prior1) - prior2
# print p
# print prior
if set_retq:
vals[0] = vals[0] + prior
vals.extend([prior0, prior1, prior2])
return(vals)
else:
logp = logl + prior
return(logp)
def lnlikefn_photo(zydata, sigma, tau, llags, lwids, lscales, set_retq=False,
set_verbose=False, set_threading=False, blocksize=10000):
""" Log-likelihood function.
"""
if zydata.issingle:
raise UsageError("lnlikefn_photo does not work for single mode")
# impossible scenarios
if (sigma <= 0.0 or tau <= 0.0 or np.min(lwids) < 0.0 or
np.min(lscales) < 0.0 or np.max(np.abs(llags)) > zydata.rj):
return(_exit_with_retval(zydata.nlc, set_retq,
errmsg="Warning: illegal input of parameters",
set_verbose=set_verbose))
# set_pmap = True
# fill in lags/wids/scales
lags = np.zeros(3)
wids = np.zeros(3)
scales = np.ones(3)
lags[1:] = llags[:]
wids[1:] = lwids[:]
scales[1:] = lscales[:]
if set_threading:
C = spear_threading(zydata.jarr, zydata.jarr, zydata.iarr,
zydata.iarr, sigma, tau, lags, wids, scales,
set_pmap=True, blocksize=blocksize)
else:
C = spear(zydata.jarr, zydata.jarr, zydata.iarr, zydata.iarr, sigma,
tau, lags, wids, scales, set_pmap=True)
# print 'debug: photo'
# print C
# decompose C inplace
U, info = cholesky(C, nugget=zydata.varr, inplace=True, raiseinfo=False)
# handle exceptions here
if info > 0:
return(_exit_with_retval(
zydata.nlc, set_retq,
errmsg="Warning: non positive-definite covariance C #3",
set_verbose=set_verbose))
retval = _lnlike_from_U(U, zydata, set_retq=set_retq,
set_verbose=set_verbose)
return(retval)
class Pmap_Model(object):
def __init__(self, zydata=None, linename="line"):
""" Pmap Model object.
Parameters
----------
zydata: LightCurve object, optional
Light curve data.
linename: str, optional
Name of the emission line (default: 'line').
"""
self.zydata = zydata
if zydata is None:
pass
else:
self.nlc = zydata.nlc
self.npt = zydata.npt
self.cont_npt = zydata.nptlist[0]
self.cont_cad = zydata.cont_cad
self.cont_std = zydata.cont_std
self.rj = zydata.rj
self.jstart = zydata.jstart
self.jend = zydata.jend
self.names = zydata.names
# number of parameters
self.ndim = 6
self.vars = ["sigma", "tau"]
self.texs = [r"$\log\,\sigma$", r"$\log\,\tau$"]
#
self.vars.append("_".join(["lag", linename]))
self.vars.append("_".join(["wid", linename]))
self.vars.append("_".join(["scale", linename]))
self.texs.append("".join([r"$t_{", linename, r"}$"]))
self.texs.append("".join([r"$w_{", linename, r"}$"]))
self.texs.append("".join([r"$s_{", linename, r"}$"]))
#
self.vars.append("alpha")
self.texs.append(r"$\alpha$")
def __call__(self, p, **lnpostparams):
""" Calculate the posterior value given one parameter set `p`.
Parameters
----------
p: array_like
Pmap_Model parameters, [log(sigma), log(tau), lag, wid, scale,
alpha].
lnpostparams: kwargs
Kewword arguments for `lnpostfn_photo_p`.
Returns
-------
retval: float (set_retq is False) or list (set_retq is True)
if `retval` returns a list, then it contains the full posterior info
as a list of [log_posterior, chi2_component, det_component,
DC_penalty, correction_to_the_mean].
"""
return(lnpostfn_photo_p(p, self.zydata, **lnpostparams))
def do_map(self, p_ini, fixed=None, **lnpostparams):
""" Do an optimization to find the Maximum a Posterior estimates.
Parameters
----------
p_ini: array_like
Pmap_Model parameters [log(sigma), log(tau), lag, wid, scale,
alpha].
fixed: array_like, optional
Same dimension as p_ini, but with 0 for parameters that is fixed in
the optimization, and with 1 for parameters that is varying, e.g.,
fixed = [0, 1, 1, 1, 1, 1] means sigma is fixed while others are
varying. fixed=[1, 1, 1, 1, 1,] is equivalent to fixed=None (
default: None).
Returns
-------
p_bst: array_like
Best-fit parameters.
l: float
The maximum log-posterior.
"""
set_verbose = lnpostparams.pop("set_verbose", True)
set_retq = lnpostparams.pop("set_retq", False)
if set_retq is True:
raise InputError("set_retq has to be False")
p_ini = np.asarray(p_ini)
if fixed is not None:
fixed = np.asarray(fixed)
func = lambda _p: -lnpostfn_photo_p(_p*fixed+p_ini*(1.-fixed),
self.zydata, **lnpostparams)
else:
func = lambda _p: -lnpostfn_photo_p(_p,
self.zydata, **lnpostparams)
p_bst, v_bst = fmin(func, p_ini, full_output=True)[:2]
if fixed is not None:
p_bst = p_bst*fixed+p_ini*(1.-fixed)
sigma, tau, llags, lwids, lscales = unpackphotopar(
p_bst, self.zydata.nlc, hascontlag=False)
if set_verbose:
print("Best-fit parameters are")
print(("sigma %8.3f tau %8.3f" % (sigma, tau)))
print(("%s %8.3f %s %8.3f %s %8.3f" % (
self.vars[2], llags[0], self.vars[3], lwids[0],
self.vars[4], lscales[0])))
print(("alpha %8.3f" % (lscales[1])))
print(("with logp %10.5g " % -v_bst))
return(p_bst, -v_bst)
def do_mcmc(self, conthpd=None, set_extraprior=False, lagtobaseline=0.3,
laglimit="baseline", widtobaseline=1, widlimit="nyquist",
nwalkers=100, nburn=100, nchain=100, threads=1, fburn=None,
fchain=None, flogp=None, set_threading=False, blocksize=10000,
set_verbose=True, fixed=None, p_fix=None):
""" See `lnpostfn_photo_p` for doc, except for `laglimit` and `widlimit`,
both of which have different default values ('baseline' / 'nyquist').
'baseline' means the boundaries are naturally determined by the
duration of the light curves, and 'nyquist' means the transfer function
width has to be within two times the typical cadence of light curves.
"""
if (threads > 1 and (not set_threading)):
if set_verbose:
print(("run parallel chains of number %2d " % threads))
elif (threads == 1):
if set_verbose:
if set_threading:
print(("run single chain in submatrix blocksize %10d " %
blocksize))
else:
print("run single chain without subdividing matrix ")
else:
raise InputError("conflicting set_threading and threads setup")
if laglimit == "baseline":
laglimit = [[-self.rj, self.rj],]
elif len(laglimit) != 1:
raise InputError("laglimit should be a list of a single list")
if widlimit == "nyquist":
# two times the cadence, resembling Nyquist sampling.
widlimit = [[0.0, 2.0*self.cont_cad],]
elif len(widlimit) != 1:
raise InputError("widlimit should be a list of a single list")
# generate array of random numbers
p0 = np.random.rand(nwalkers*self.ndim).reshape(nwalkers, self.ndim)
# initialize array
if conthpd is None:
p0[:, 0] += np.log(self.cont_std)-0.5
p0[:, 1] += np.log(np.sqrt(self.rj*self.cont_cad))-0.5
else:
# XXX stretch the range from (0,1) to ( conthpd[0,0], conthpd[2,0] )
p0[:, 0] = p0[:, 0] * (conthpd[2,0] - conthpd[0,0]) + conthpd[0,0]
p0[:, 1] = p0[:, 1] * (conthpd[2,1] - conthpd[0,1]) + conthpd[0,1]
# old way, just use 0.5 as the 1\sigma width.
# p0[:, 0] += conthpd[1,0]-0.5
# p0[:, 1] += conthpd[1,1]-0.5
p0[:, 2] = p0[:,2]*(laglimit[0][1]-laglimit[0][0]) + laglimit[0][0]
p0[:, 3] = p0[:,3]*(widlimit[0][1]-widlimit[0][0]) + widlimit[0][0]
if set_verbose:
print("start burn-in")
if conthpd is None:
print("no priors on sigma and tau")
else:
print("using priors on sigma and tau from continuum fitting")
print((np.exp(conthpd)))
if lagtobaseline < 1.0:
print(("penalize lags longer than %3.2f of the baseline" %
lagtobaseline))
else:
print("no penalizing long lags, restrict to < baseline")
print(("nburn: %d nwalkers: %d --> number of burn-in iterations: %d"
% (nburn, nwalkers, nburn*nwalkers)))
# initialize the ensemble sampler
sampler = EnsembleSampler(nwalkers, self.ndim, lnpostfn_photo_p,
args=(self.zydata, conthpd, set_extraprior,
lagtobaseline, laglimit, widtobaseline,
widlimit, set_threading, blocksize,
False, False, fixed, p_fix), threads=threads)
pos, prob, state = sampler.run_mcmc(p0, nburn)
if set_verbose:
print("burn-in finished")
if fburn is not None:
if set_verbose:
print(("save burn-in chains to %s" % fburn))
if fixed is not None:
# modify flatchain
for i in range(self.ndim):
if fixed[i] == 0:
sampler.flatchain[:, i] = p_fix[i]
np.savetxt(fburn, sampler.flatchain)
# reset the sampler
sampler.reset()
if set_verbose:
print("start sampling")
sampler.run_mcmc(pos, nchain, rstate0=state)
if set_verbose:
print("sampling finished")
af = sampler.acceptance_fraction
if set_verbose:
print("acceptance fractions are")
print((" ".join([format(r, "3.2f") for r in af])))
if fixed is not None:
# modify flatchain
for i in range(self.ndim):
if fixed[i] == 0:
sampler.flatchain[:, i] = sampler.flatchain[:, i] * 0.0 + p_fix[i]
if fchain is not None:
if set_verbose:
print(("save MCMC chains to %s" % fchain))
np.savetxt(fchain, sampler.flatchain)
if flogp is not None:
if set_verbose:
print(("save logp of MCMC chains to %s" % flogp))
np.savetxt(flogp, np.ravel(sampler.lnprobability), fmt='%16.8f')
# make chain an attritue
self.flatchain = sampler.flatchain
self.flatchain_whole = np.copy(self.flatchain)
# get HPD
self.get_hpd(set_verbose=set_verbose)
self.logp = np.ravel(sampler.lnprobability)
self.logp_whole = np.copy(self.logp)
self.get_bfp()
def get_hpd(self, set_verbose=True):
""" Get the 68% percentile range of each parameter to self.hpd.
Parameters
----------
set_verbose: bool, optional
True if you want verbosity (default: True).
"""
hpd = _get_hpd(self.ndim, self.flatchain)
for i in range(self.ndim):
if set_verbose:
print(("HPD of %s" % self.vars[i]))
if i < 2:
print(("low: %8.3f med %8.3f hig %8.3f" %
tuple(np.exp(hpd[:,i]))))
else:
print(("low: %8.3f med %8.3f hig %8.3f" %
tuple(hpd[:,i])))
# register hpd to attr
self.hpd = hpd
def get_bfp(self):
self.bfp = _get_bfp(self.flatchain, self.logp)
def show_hist(self, bins=100, lagbinsize=1.0, figout=None, figext=None):
""" Display histograms of the posterior distributions.
Parameters
----------
bins: integer, optional
Number of bins for parameters except for 'lag' (default:100).
lagbinsize: integer, optional
bin width for 'lag' (default:100).
figout: str, optional
Output figure name (default: None, i.e., using sequencial integers).
figext: str, optional
Output figure extension (default: None, i.e., using `show`).
"""
if not hasattr(self, "flatchain"):
print("Warning: need to run do_mcmc or load_chain first")
return(1)
ln10 = np.log(10.0)
fig = plt.figure(figsize=(14, 2.8*self.nlc))
for i in range(2):
ax = fig.add_subplot(self.nlc,3,i+1)
ax.hist(self.flatchain[:,i]/ln10, bins)
ax.set_xlabel(self.texs[i])
ax.set_ylabel("N")
# alpha
ax = fig.add_subplot(self.nlc,3,3)
ax.hist(self.flatchain[:,5], bins)
ax.set_xlabel(self.texs[5])
ax.set_ylabel("N")
# line
for i in range(2, 5):
ax = fig.add_subplot(self.nlc,3,i+1+1)
if np.mod(i, 3) == 2:
# lag plots
lagbins = np.arange(int(np.min(self.flatchain[:,i])),
int(np.max(self.flatchain[:,i]))+lagbinsize,
lagbinsize)
ax.hist(self.flatchain[:,i], bins=lagbins)
else:
ax.hist(self.flatchain[:,i], bins)
ax.set_xlabel(self.texs[i])
ax.set_ylabel("N")
# plt.get_current_fig_manager().toolbar.zoom()
return(figure_handler(fig=fig, figout=figout, figext=figext))
def break_chain(self, llag_segments):
""" Break the chain.
Parameters
----------
llag_segments: list of lists
list of length 1, wich the single element a two-element array
bracketing the range of lags (usually the single most probable peak)
you want to consider for each line.
"""
if (len(llag_segments) != self.nlc-1):
print(("Error: llag_segments has to be a list of length %d" %
(self.nlc-1)))
return(1)
if not hasattr(self, "flatchain"):
print("Warning: need to run do_mcmc or load_chain first")
return(1)
llag_seq = llag_segments[0]
if llag_seq is None:
print("Warning: no rule to break chains with")
else:
indx = np.argsort(self.flatchain[:, 2])
imin, imax = np.searchsorted(self.flatchain[indx, 2], llag_seq)
indx_cut = indx[imin: imax]
self.flatchain = self.flatchain[indx_cut,:]
if hasattr(self, "logp"):
self.logp = self.logp[indx_cut]
def restore_chain(self):
""" Restore chain after `break_chain`.
"""
self.flatchain = np.copy(self.flatchain_whole)
if hasattr(self, "logp"):
self.logp = np.copy(self.logp_whole)
def load_chain(self, fchain, flogp=None, set_verbose=True):
""" Load stored MCMC chain.
Parameters
----------
fchain: string
Name for the chain file.
set_verbose: bool, optional
True if you want verbosity (default: True).
"""
if set_verbose:
print(("load MCMC chain from %s" % fchain))
self.flatchain = np.genfromtxt(fchain)
self.flatchain_whole = np.copy(self.flatchain)
self.ndim = self.flatchain.shape[1]
# get HPD
self.get_hpd(set_verbose=set_verbose)
if flogp is not None:
self.logp = np.genfromtxt(flogp)
self.logp_whole = np.copy(self.logp)
self.get_bfp()
def do_pred(self, p_bst=None, fpred=None, dense=10, set_overwrite=True,
set_decompose=False):
""" Calculate the predicted mean and variance of each light curve on a
densely sampled time axis.
Parameters
----------
p_bst: array_like
Input paraemeters.
fpred: string, optional
Name of the output file for the predicted light curves, set it to
None if you do not want output (default: None).
dense: int, optional
The factor by which the predicted light curves should be more
densely sampled than the original data (default: 10).
set_overwrite: bool, optional
True if you want to overwrite existing fpred (default: True).
Returns
-------
zydata_pred: LightCurve object
Predicted light curves packaged as a LightCurve object.
"""
if p_bst is None and hasattr(self, "bfp"):
p_bst = self.bfp
qlist = lnpostfn_photo_p(p_bst, self.zydata, set_retq=True,
set_verbose=False)[4]
sigma, tau, lags, wids, scales = unpackphotopar(p_bst, self.zydata.nlc,
hascontlag=True)
# update qlist
self.zydata.update_qlist(qlist)
# initialize PredictRmap object
P = PredictDPmap(zydata=self.zydata, sigma=sigma, tau=tau, lags=lags, wids=wids, scales=scales)
nwant = dense*self.cont_npt
jwant0 = self.jstart - 0.1*self.rj
jwant1 = self.jend + 0.1*self.rj
jwant = np.linspace(jwant0, jwant1, nwant)
zylclist_pred = []
for i in range(self.nlc):
iwant = np.ones(nwant)*(i+1)
mve, var = P.mve_var(jwant, iwant)
sig = np.sqrt(var)
zylclist_pred.append([jwant, mve, sig])
if set_decompose:
mve_band = (zylclist_pred[0][1] - self.zydata.blist[0])*scales[-1]
mve_line = (zylclist_pred[1][1] - self.zydata.blist[1])-mve_band
mve_nonv = jwant * 0.0 + self.zydata.blist[1]
zydata_pred = LightCurve(zylclist_pred)
if fpred is not None:
zydata_pred.save(fpred, set_overwrite=set_overwrite)
if set_decompose:
return(zydata_pred, [jwant, mve_band, mve_line, mve_nonv])
else:
return(zydata_pred)
# ---------------------------------
# SPmap_Model: One-Band Photometric RM
def unpacksbphotopar(p, nlc=1):
""" Unpack the physical parameters from input 1-d array for single band
photo mode.
"""
if nlc != 1:
raise InputError("SPmap_Model cannot cope with more than one band.")
sigma = np.exp(p[0])
tau = np.exp(p[1])
lag = p[2]
wid = p[3]
scale = p[4]
return(sigma, tau, lag, wid, scale)
def lnpostfn_sbphoto_p(p, zydata, conthpd=None, scalehpd=None,
lagtobaseline=0.3, laglimit=None, widtobaseline=1,
widlimit=None, set_threading=False, blocksize=10000,
set_retq=False, set_verbose=False, fixed=None, p_fix=None):
""" log-posterior function of p.
Parameters
----------
p: array_like
SPmap_Model parameters, [log(sigma), log(tau), lag1, wid1, scale1]
zydata: LightCurve object
Light curve data.
conthpd: ndarray, optional
Priors on sigma and tau as an ndarray with shape (3, 2),
np.array([[log(sigma_low), log(tau_low)],
[log(sigma_med), log(tau_med)],
[log(sigma_hig), log(tau_hig)]])
where 'low', 'med', and 'hig' are defined as the 68% confidence
limits around the median. conthpd usually comes in as an attribute
of the `Cont_Model` object `hpd` (default: None).
scalehpd: ndarray, optional
Prior on ln(scale) as an 1D ndarray with size 3.
np.array([lnscale_low, lnscale_med, lnscale_hig])
where 'low', 'med', and 'hig' are defined as the 68% confidence
limits around the median. Use scalehpd if you have a rough idea of
how large the ratio of line variation over the underlying continuum is.
lagtobaseline: float, optional
Prior on lags. When input lag exceeds lagtobaseline*baseline, a
logarithmic prior will be applied.
laglimit: list of tuples.
hard boundaries for the lag searching.
widtobaseline: float, optional
Prior on wids. When input wid exceeds widtobaseline*baseline, a
logarithmic prior will be applied.
widlimit: list of tuples, optional
hard boundaries for the wid searching.
set_threading: bool, optional
True if you want threading in filling matrix. It conflicts with the
'threads' option in Pmap_Model.run_mcmc (default: False).
blocksize: int, optional
Maximum matrix block size in threading (default: 10000).
set_retq: bool, optional
Return the value(s) of q along with each component of the
log-likelihood if True (default: False).
set_verbose: bool, optional
True if you want verbosity (default: False).
"""
if fixed is not None:
# fix parameters during inference.
fixed = np.asarray(fixed)
p_fix = np.asarray(p_fix)
p = np.asarray(p)
p = p * fixed + p_fix * (1. - fixed)
sigma, tau, lag, wid, scale = unpacksbphotopar(p, zydata.nlc)
if set_retq:
vals = list(lnlikefn_sbphoto(zydata, sigma, tau, lag, wid, scale,
set_retq=True, set_verbose=set_verbose,
set_threading=set_threading,
blocksize=blocksize))
else:
logl = lnlikefn_sbphoto(zydata, sigma, tau, lag, wid, scale,
set_retq=False, set_verbose=set_verbose,
set_threading=set_threading,
blocksize=blocksize)
# both conthpd and p[1-2] are in natural log
if conthpd is not None:
# for sigma
if p[0] < conthpd[1,0]:
prior0 = (p[0] - conthpd[1,0])/(conthpd[1,0]-conthpd[0,0])
else:
prior0 = (p[0] - conthpd[1,0])/(conthpd[2,0]-conthpd[1,0])
# for tau
if p[1] < conthpd[1,1]:
prior1 = (p[1] - conthpd[1,1])/(conthpd[1,1]-conthpd[0,1])
else:
prior1 = (p[1] - conthpd[1,1])/(conthpd[2,1]-conthpd[1,1])
else:
prior0 = 0.0
prior1 = 0.0
# for scale
if scalehpd is not None:
lnscale = np.log(scale)
if lnscale < scalehpd[1]:
prior3 = (lnscale - scalehpd[1])/(scalehpd[1]-scalehpd[0])
else:
prior3 = (lnscale - scalehpd[1])/(scalehpd[2]-scalehpd[1])
else:
prior3 = 0.0
# for lags and wids
prior2 = 0.0
# penalize on extremely long lags.
if lagtobaseline < 1.0:
if np.abs(lag) > lagtobaseline*zydata.rj:
prior2 += np.log(np.abs(lag)/(lagtobaseline*zydata.rj))
# penalize long lags to be impossible
if laglimit is not None:
if lag > laglimit[0][1] or lag < laglimit[0][0]:
prior2 += my_pos_inf
# penalize on extremely large transfer function width
if widtobaseline < 1.0:
if np.abs(wid) > lagtobaseline*zydata.rj:
prior2 += np.log(np.abs(wid)/(lagtobaseline*zydata.rj))
if widlimit is not None:
if wid > widlimit[0][1] or wid < widlimit[0][0]:
prior2 += my_pos_inf
# add logp of all the priors
prior = -0.5*(prior0*prior0+prior1*prior1+prior3*prior3) - prior2
if set_retq:
vals[0] = vals[0] + prior
vals.extend([prior0, prior1, prior2])
return(vals)
else:
logp = logl + prior
return(logp)
def lnlikefn_sbphoto(zydata, sigma, tau, lag, wid, scale, set_retq=False,
set_verbose=False, set_threading=False, blocksize=10000):
""" Log-likelihood function for the SBmap model.
"""
if not zydata.issingle:
raise UsageError("lnlikefn_sbphoto expects a single input light curve.")
# impossible scenarios
if (sigma <= 0.0 or tau <= 0.0 or wid < 0.0 or scale < 0.0 or
lag > zydata.rj):
return(_exit_with_retval(zydata.nlc, set_retq,
errmsg="Warning: illegal input of parameters",
set_verbose=set_verbose))
# fill in lags/wids/scales so that we can use spear.py with set_pmap=True.
lags = np.zeros(3)
wids = np.zeros(3)
scales = np.ones(3)
lags[1] = lag
wids[1] = wid
scales[1] = scale
# we know all elements in zydata.iarr are 1, so we want them to be 2 here.
if set_threading:
C = spear_threading(zydata.jarr,zydata.jarr, zydata.iarr+1,
zydata.iarr+1,sigma,tau,lags,wids,scales,
set_pmap=True, blocksize=blocksize)
else:
C = spear(zydata.jarr,zydata.jarr, zydata.iarr+1,zydata.iarr+1,
sigma,tau,lags,wids,scales, set_pmap=True)
# decompose C inplace
U, info = cholesky(C, nugget=zydata.varr, inplace=True, raiseinfo=False)
# handle exceptions here
if info > 0:
return(_exit_with_retval(
zydata.nlc, set_retq,
errmsg="Warning: non positive-definite covariance C #2",
set_verbose=set_verbose))
retval = _lnlike_from_U(U, zydata, set_retq=set_retq,
set_verbose=set_verbose)
return(retval)
class SPmap_Model(object):
def __init__(self, zydata=None, linename="line"):
""" SPmap Model object (Single-band Photometric mapping).
Parameters
----------
zydata: LightCurve object, optional
Light curve data.
"""
self.zydata = zydata
if zydata is None:
pass
else:
self.nlc = zydata.nlc
self.npt = zydata.npt
self.cont_npt = zydata.nptlist[0]
self.cont_cad = zydata.cont_cad
self.cont_std = zydata.cont_std
self.rj = zydata.rj
self.jstart = zydata.jstart
self.jend = zydata.jend
self.names = zydata.names
# test if all elements in zydata.iarr are one.
if not np.all(zydata.iarr == 1):
raise UsageError("Element ids in zydata should all be ones.")
# number of parameters
self.ndim = 5
self.vars = ["sigma", "tau"]
self.texs = [r"$\log\,\sigma$", r"$\log\,\tau$"]
#
self.vars.append("_".join(["lag", linename]))
self.vars.append("_".join(["wid", linename]))
self.vars.append("_".join(["scale", linename]))
self.texs.append("".join([r"$t_{", linename, r"}$"]))
self.texs.append("".join([r"$w_{", linename, r"}$"]))
self.texs.append("".join([r"$s_{", linename, r"}$"]))
def __call__(self, p, **lnpostparams):
return(lnpostfn_sbphoto_p(p, self.zydata, **lnpostparams))
def do_map(self, p_ini, fixed=None, **lnpostparams):
""" Do an optimization to find the Maximum a Posterior estimates.
Parameters
----------
p_ini: array_like
Pmap_Model parameters [log(sigma), log(tau), lag, wid, scale].
fixed: array_like, optional
Same dimension as p_ini, but with 0 for parameters that is fixed in
the optimization, and with 1 for parameters that is varying, e.g.,
fixed = [0, 1, 1, 1, 1] means sigma is fixed while others are
varying. fixed=[1, 1, 1, 1,] is equivalent to fixed=None
(default: None).
Returns
-------
p_bst: array_like
Best-fit parameters.
l: float
The maximum log-posterior.
"""
set_verbose = lnpostparams.pop("set_verbose", True)
set_retq = lnpostparams.pop("set_retq", False)
if set_retq is True:
raise InputError("set_retq has to be False")
p_ini = np.asarray(p_ini)
if fixed is not None:
fixed = np.asarray(fixed)
func = lambda _p: -lnpostfn_sbphoto_p(_p*fixed+p_ini*(1.-fixed),
self.zydata, **lnpostparams)
else:
func = lambda _p: -lnpostfn_sbphoto_p(_p, self.zydata,
**lnpostparams)
p_bst, v_bst = fmin(func, p_ini, full_output=True)[:2]
if fixed is not None:
p_bst = p_bst*fixed+p_ini*(1.-fixed)
sigma, tau, lag, wid, scale = unpacksbphotopar(p_bst,
nlc=self.zydata.nlc)
if set_verbose:
print("Best-fit parameters are")
print(("sigma %8.3f tau %8.3f" % (sigma, tau)))
print(("%s %8.3f %s %8.3f %s %8.3f" % (
self.vars[2], lag,
self.vars[3], wid,
self.vars[3], scale,
)))
print(("with logp %10.5g " % -v_bst))
return(p_bst, -v_bst)
def do_mcmc(self, conthpd=None, scalehpd=None, lagtobaseline=0.3,
laglimit="baseline", widtobaseline=1, widlimit="nyquist",
nwalkers=100, nburn=100, nchain=100, threads=1, fburn=None,
fchain=None, flogp=None, set_threading=False, blocksize=10000,
set_verbose=True, fixed=None, p_fix=None):
""" See `lnpostfn_sbphoto_p` for doc, except for `laglimit` and
`widlimit`, both of which have different default values
('baseline' / 'nyquist'). 'baseline' means the boundaries are
naturally determined by the duration of the light curves,
and 'nyquist' means the transfer function width has to be within two
times the typical cadence of light curves.
"""
if (threads > 1 and (not set_threading)):
if set_verbose:
print(("run parallel chains of number %2d " % threads))
elif (threads == 1):
if set_verbose:
if set_threading:
print(("run single chain in submatrix blocksize %10d " %
blocksize))
else:
print("run single chain without subdividing matrix ")
else:
raise InputError("conflicting set_threading and threads setup:" +
"set_threading should be false when threads > 1")
if laglimit == "baseline":
laglimit = [[-self.rj, self.rj],]
elif len(laglimit) != 1:
raise InputError("laglimit should be a list of a single list")
if widlimit == "nyquist":
# two times the cadence, resembling Nyquist sampling.
widlimit = [[0.0, 2.0*self.cont_cad],]
elif len(widlimit) != 1:
raise InputError("widlimit should be a list of a single list")
# generate array of random numbers
p0 = np.random.rand(nwalkers*self.ndim).reshape(nwalkers, self.ndim)
# initialize array
if conthpd is None:
p0[:, 0] += np.log(self.cont_std)-0.5
p0[:, 1] += np.log(np.sqrt(self.rj*self.cont_cad))-0.5
else:
# XXX stretch the range from (0,1) to ( conthpd[0,0], conthpd[2,0] )
p0[:, 0] = p0[:, 0] * (conthpd[2,0] - conthpd[0,0]) + conthpd[0,0]
p0[:, 1] = p0[:, 1] * (conthpd[2,1] - conthpd[0,1]) + conthpd[0,1]
# old way, just use 0.5 as the 1\sigma width.
# p0[:, 0] += conthpd[1,0]-0.5
# p0[:, 1] += conthpd[1,1]-0.5
p0[:, 2] = p0[:, 2] * (laglimit[0][1] - laglimit[0][0]) + laglimit[0][0]
p0[:, 3] = p0[:, 3] * (widlimit[0][1] - widlimit[0][0]) + widlimit[0][0]
if scalehpd is None:
pass # (0, 1) is adequate.
else:
# XXX scalehpd is in natural log-space
p0[:, 4] = np.exp(p0[:, 4] * (scalehpd[2] - scalehpd[0]) +
scalehpd[0])
if set_verbose:
print("start burn-in")
if conthpd is None:
print("no priors on sigma and tau")
else:
print("use log-priors on sigma and tau from continuum fitting")
print((np.exp(conthpd)))
if lagtobaseline < 1.0:
print(("penalize lags longer than %3.2f of the baseline" %
lagtobaseline))
else:
print("no penalizing long lags, restrict to < laglimit")
if widtobaseline < 1.0:
print(("penalize wids longer than %3.2f of the baseline" %
widtobaseline))
else:
print("no penalizing long wids, restrict to < widlimit")
if scalehpd is None:
print("no priors on scale")
else:
print("using log-priors on scale")
print((np.exp(scalehpd)))
print(("nburn: %d nwalkers: %d --> number of burn-in iterations: %d"
% (nburn, nwalkers, nburn*nwalkers)))
# initialize the ensemble sampler
sampler = EnsembleSampler(nwalkers, self.ndim, lnpostfn_sbphoto_p,
args=(self.zydata, conthpd, scalehpd,
lagtobaseline, laglimit, widtobaseline,
widlimit, set_threading, blocksize,
False, False, fixed, p_fix), threads=threads)
pos, prob, state = sampler.run_mcmc(p0, nburn)
if set_verbose:
print("burn-in finished")
if fburn is not None:
if set_verbose:
print(("save burn-in chains to %s" % fburn))
if fixed is not None:
# modify flatchain
for i in range(self.ndim):
if fixed[i] == 0:
sampler.flatchain[:, i] = p_fix[i]
np.savetxt(fburn, sampler.flatchain)
# reset the sampler
sampler.reset()
if set_verbose:
print("start sampling")
sampler.run_mcmc(pos, nchain, rstate0=state)
if set_verbose:
print("sampling finished")
af = sampler.acceptance_fraction
if set_verbose:
print("acceptance fractions are")
print((" ".join([format(r, "3.2f") for r in af])))
if fixed is not None:
# modify flatchain
for i in range(self.ndim):
if fixed[i] == 0:
sampler.flatchain[:, i] = p_fix[i]
if fchain is not None:
if set_verbose:
print(("save MCMC chains to %s" % fchain))
np.savetxt(fchain, sampler.flatchain)
if flogp is not None:
if set_verbose:
print(("save logp of MCMC chains to %s" % flogp))
np.savetxt(flogp, np.ravel(sampler.lnprobability), fmt='%16.8f')
# make chain an attritue
self.flatchain = sampler.flatchain
self.flatchain_whole = np.copy(self.flatchain)
# get HPD
self.get_hpd(set_verbose=set_verbose)
self.logp = np.ravel(sampler.lnprobability)
self.logp_whole = np.copy(self.logp)
self.get_bfp()
def get_hpd(self, set_verbose=True):
""" Get the 68% percentile range of each parameter to self.hpd.
Parameters
----------
set_verbose: bool, optional
True if you want verbosity (default: True).
"""
hpd = _get_hpd(self.ndim, self.flatchain)
for i in range(self.ndim):
if set_verbose:
print(("HPD of %s" % self.vars[i]))
if i < 2:
print(("low: %8.3f med %8.3f hig %8.3f" %
tuple(np.exp(hpd[:,i]))))
else:
print(("low: %8.3f med %8.3f hig %8.3f" %
tuple(hpd[:,i])))
# register hpd to attr
self.hpd = hpd
def get_bfp(self):
self.bfp = _get_bfp(self.flatchain, self.logp)
def show_hist(self, bins=100, lagbinsize=1.0, figout=None, figext=None):
""" Display histograms of the posterior distributions.
Parameters
----------
bins: integer, optional
Number of bins for parameters except for 'lag' (default:100).
lagbinsize: integer, optional
bin width for 'lag' (default:100).
figout: str, optional
Output figure name (default: None, i.e., using sequencial integers).
figext: str, optional
Output figure extension (default: None, i.e., using `show`).
"""
if not hasattr(self, "flatchain"):
print("Warning: need to run do_mcmc or load_chain first")
return(1)
ln10 = np.log(10.0)
fig = plt.figure(figsize=(14, 2.8*2))
for i in range(2):
ax = fig.add_subplot(2,3,i+1)
ax.hist(self.flatchain[:,i]/ln10, bins)
ax.set_xlabel(self.texs[i])
ax.set_ylabel("N")
# line
for i in range(2, 5):
ax = fig.add_subplot(2,3,i+1+1)
if np.mod(i, 3) == 2:
# lag plots
lagbins = np.arange(int(np.min(self.flatchain[:,i])),
int(np.max(self.flatchain[:,i]))+lagbinsize,
lagbinsize)
ax.hist(self.flatchain[:,i], bins=lagbins)
else:
ax.hist(self.flatchain[:,i], bins)
ax.set_xlabel(self.texs[i])
ax.set_ylabel("N")
return(figure_handler(fig=fig, figout=figout, figext=figext))
def break_chain(self, llag_segments):
""" Break the chain.
Parameters
----------
llag_segments: list of lists
list of a single list, which is a two-element array
bracketing the range of lags (usually the single most probable
peak).
"""
if (len(llag_segments) != 1):
print("Error: llag_segments has to be a list of length 1")
return(1)
if not hasattr(self, "flatchain"):
print("Warning: need to run do_mcmc or load_chain first")
return(1)
llag_seq = llag_segments[0]
if llag_seq is None:
print("Warning: no rule to break chains with")
else:
indx = np.argsort(self.flatchain[:, 2])
imin, imax = np.searchsorted(self.flatchain[indx, 2], llag_seq)
indx_cut = indx[imin: imax]
self.flatchain = self.flatchain[indx_cut,:]
if hasattr(self, "logp"):
self.logp = self.logp[indx_cut]
def restore_chain(self):
self.flatchain = np.copy(self.flatchain_whole)
if hasattr(self, "logp"):
self.logp = np.copy(self.logp_whole)
def load_chain(self, fchain, flogp=None, set_verbose=True):
""" Load stored MCMC chain.
Parameters
----------
fchain: string
Name for the chain file.
set_verbose: bool, optional
True if you want verbosity (default: True).
"""
if set_verbose:
print(("load MCMC chain from %s" % fchain))
self.flatchain = np.genfromtxt(fchain)
self.flatchain_whole = np.copy(self.flatchain)
self.ndim = self.flatchain.shape[1]
# get HPD
self.get_hpd(set_verbose=set_verbose)
if flogp is not None:
self.logp = np.genfromtxt(flogp)
self.logp_whole = np.copy(self.logp)
self.get_bfp()
def do_pred(self, p_bst=None, fpred=None, dense=10, set_overwrite=True):
""" Calculate the predicted mean and variance of each light curve on a
densely sampled time axis.
Parameters
----------
p_bst: array_like
Input paraemeters.
fpred: string, optional
Name of the output file for the predicted light curves, set it to
None if you do not want output (default: None).
dense: int, optional
The factor by which the predicted light curves should be more
densely sampled than the original data (default: 10).
set_overwrite: bool, optional
True if you want to overwrite existing fpred (default: True).
Returns
-------
zydata_pred: LightCurve object
Predicted light curves packaged as a LightCurve object.
"""
if p_bst is None and hasattr(self, "bfp"):
p_bst = self.bfp
qlist = lnpostfn_sbphoto_p(p_bst, self.zydata, set_retq=True,
set_verbose=False)[4]
sigma, tau, lag, wid, scale = unpacksbphotopar(p_bst, self.zydata.nlc)
# update qlist
self.zydata.update_qlist(qlist)
# initialize PredictRmap object
P = PredictSPmap(zydata=self.zydata, sigma=sigma, tau=tau, lag=lag,
wid=wid, scale=scale)
nwant = dense*self.cont_npt
jwant0 = self.jstart - 0.1*self.rj
jwant1 = self.jend + 0.1*self.rj
jwant = np.linspace(jwant0, jwant1, nwant)
zylclist_pred = []
iwant = np.ones(nwant)
mve, var = P.mve_var(jwant, iwant)
sig = np.sqrt(var)
zylclist_pred.append([jwant, mve, sig])
zydata_pred = LightCurve(zylclist_pred)
if fpred is not None:
zydata_pred.save(fpred, set_overwrite=set_overwrite)
return(zydata_pred)
# ---------------------------------
# SCmap_Model: Smoothed Continuum Spectroscopic RM
def unpackscspearpar(p, nlc=None):
""" Unpack the physical parameters from input 1-d array for smoothed
continuum spec mode. """
if nlc is None:
# possible to figure out nlc from the size of p, only one extra
# parameter compared to the regular Rmap model.
nlc = (len(p) - 3)//3 + 1
sigma = np.exp(p[0])
tau = np.exp(p[1])
# XXX have an imaginary unsmoothed light curve, for easily calling spear.
lags = np.zeros(nlc+1)
wids = np.zeros(nlc+1)
scales = np.ones(nlc+1)
# for the smoothed continuum
wids[1] = p[2]
for i in range(1, nlc):
lags[i+1] = p[3+(i-1)*3]
wids[i+1] = p[4+(i-1)*3]
scales[i+1] = p[5+(i-1)*3]
return(sigma, tau, lags, wids, scales)
def lnpostfn_scspear_p(p, zydata, lagtobaseline=0.3, laglimit=None,
set_threading=False, blocksize=10000, set_retq=False,
set_verbose=False, fixed=None, p_fix=None):
""" log-posterior function of p.
Parameters
----------
p: array_like
SCmap_Model parameters, [log(sigma), log(tau), wid0, lag1, wid1, scale1,
...]
zydata: LightCurve object
Input LightCurve data.
lagtobaseline: float, optional
Prior on lags. When input lag exceeds lagtobaseline*baseline, a
logarithmic prior will be applied.
laglimit: str or list of tuples.
hard boundaries for the lag searching during MCMC sampling.
'baseline' means the boundaries are naturally determined by the
duration of the light curves, or you can set them as a list with
`nline` of tuples, with each tuple containing the (min, max) pair
for each single line.
set_threading: bool, optional
True if you want threading in filling matrix. It conflicts with the
'threads' option in Rmap_Model.run_mcmc (default: False).
blocksize: int, optional
Maximum matrix block size in threading (default: 10000).
set_retq: bool, optional
Return the value(s) of q along with each component of the
log-likelihood if True (default: False).
set_verbose: bool, optional
True if you want verbosity (default: False).
Returns
-------
retval: float (set_retq is False) or list (set_retq is True)
if `retval` returns a list, then it contains the full posterior info
as a list of [log_posterior, chi2_component, det_component,
DC_penalty, correction_to_the_mean].
"""
if fixed is not None:
# fix parameters during inference.
fixed = np.asarray(fixed)
p_fix = np.asarray(p_fix)
p = np.asarray(p)
p = p * fixed + p_fix * (1. - fixed)
# unpack the parameters from p
sigma, tau, lags, wids, scales = unpackscspearpar(p, zydata.nlc)
if set_retq:
vals = list(lnlikefn_scspear(zydata, sigma, tau, lags, wids, scales,
set_retq=True, set_verbose=set_verbose,
set_threading=set_threading,
blocksize=blocksize))
else:
logl = lnlikefn_scspear(zydata, sigma, tau, lags, wids, scales,
set_retq=False, set_verbose=set_verbose,
set_threading=set_threading,
blocksize=blocksize)
# XXX deprecated by left here for conformity.
prior0 = 0.0
prior1 = 0.0
# for each lag
prior2 = 0.0
for _i in range(zydata.nlc-1):
i = _i + 2
if lagtobaseline < 1.0:
if np.abs(lags[i]) > lagtobaseline*zydata.rj:
# penalize long lags when they are larger than 0.3 times the
# baseline,
# as it is too easy to fit the model with non-overlapping
# signals in the light curves.
prior2 += np.log(np.abs(lags[i])/(lagtobaseline*zydata.rj))
# penalize long lags to be impossible
if laglimit is not None:
# laglimit starts with the 1st line lightcurve.
if lags[i] > laglimit[_i][1] or lags[i] < laglimit[_i][0]:
# try not stack priors
prior2 += my_pos_inf
# add logp of all the priors
prior = -0.5*(prior0*prior0+prior1*prior1) - prior2
if set_retq:
vals[0] = vals[0] + prior
vals.extend([prior0, prior1, prior2])
return(vals)
else:
logp = logl + prior
return(logp)
def lnlikefn_scspear(zydata, sigma, tau, lags, wids, scales, set_retq=False,
set_verbose=False, set_threading=False, blocksize=10000):
""" Internal function to calculate the log likelihood.
"""
# impossible scenarios
if (sigma <= 0.0 or tau <= 0.0 or np.min(wids) < 0.0 or
np.min(scales) <= 0.0 or np.max(np.abs(lags)) > zydata.rj):
return(_exit_with_retval(zydata.nlc, set_retq,
errmsg="Warning: illegal input of parameters",
set_verbose=set_verbose))
# calculate covariance matrix
# here we have to trick the program to think that we have line lightcurves
# with zero lag rather than having a unsmoothed continuum by increasing
# iarr by one.
if set_threading:
C = spear_threading(zydata.jarr, zydata.jarr, zydata.iarr+1,
zydata.iarr+1, sigma, tau, lags, wids, scales,
blocksize=blocksize)
else:
C = spear(zydata.jarr, zydata.jarr, zydata.iarr+1, zydata.iarr+1,
sigma, tau, lags, wids, scales)
# decompose C inplace
U, info = cholesky(C, nugget=zydata.varr, inplace=True, raiseinfo=False)
# handle exceptions here
if info > 0:
return(_exit_with_retval(
zydata.nlc, set_retq,
errmsg="Warning: non positive-definite covariance C #1",
set_verbose=set_verbose))
retval = _lnlike_from_U(U, zydata, set_retq=set_retq,
set_verbose=set_verbose)
return(retval)
class SCmap_Model(object):
def __init__(self, zydata=None):
""" SCmap Model object.
Parameters
----------
zydata: LightCurve object, optional
Light curve data.
"""
self.zydata = zydata
if zydata is None:
pass
else:
self.nlc = zydata.nlc
self.npt = zydata.npt
self.cont_npt = zydata.nptlist[0]
self.cont_cad = zydata.cont_cad
self.cont_std = zydata.cont_std
self.rj = zydata.rj
self.jstart = zydata.jstart
self.jend = zydata.jend
self.names = zydata.names
# number of parameters
self.ndim = 2 + (self.nlc-1)*3 + 1
self.vars = ["sigma", "tau", "smoothing"]
self.texs = [r"$\log\,\sigma$", r"$\log\,\tau$", r"$t_c$"]
for i in range(1, self.nlc):
self.vars.append("_".join(["lag", self.names[i]]))
self.vars.append("_".join(["wid", self.names[i]]))
self.vars.append("_".join(["scale", self.names[i]]))
self.texs.append("".join([
r"$t_{", self.names[i].lstrip(r"$").rstrip(r"$"), r"}$"]))
self.texs.append("".join([
r"$w_{", self.names[i].lstrip(r"$").rstrip(r"$"), r"}$"]))
self.texs.append("".join([
r"$s_{", self.names[i].lstrip(r"$").rstrip(r"$"), r"}$"]))
def __call__(self, p, **lnpostparams):
""" Calculate the posterior value given one parameter set `p`.
See `lnpostfn_spear_p` for doc.
Parameters
----------
p: array_like
Rmap_Model parameters, [log(sigma), log(tau), lag1, wid1,
scale1, ...]
lnpostparams: kwargs
Keyword arguments for `lnpostfn_scspear_p`.
Returns
-------
retval: float (set_retq is False) or list (set_retq is True)
if `retval` returns a list, then it contains the full posterior info
as a list of [log_posterior, chi2_component, det_component,
DC_penalty, correction_to_the_mean].
"""
return(lnpostfn_scspear_p(p, self.zydata, **lnpostparams))
def do_map(self, p_ini, fixed=None, **lnpostparams):
""" Do an optimization to find the Maximum a Posterior estimates.
See `lnpostfn_scspear_p` for doc.
Parameters
----------
p_ini: array_like
SCmap_Model parameters, [log(sigma), log(tau), wid0, lag1,
wid1, scale1, ...]
fixed: array_like, optional
Same dimension as p_ini, but with 0 for parameters that is fixed in
the optimization, and with 1 for parameters that is varying, e.g.,
fixed = [0, 1, 1, 1, 1, 1, ...] means sigma is fixed while
others are varying. fixed=[1, 1, 1, 1, 1, 1, ...] is
equivalent to fixed=None (default: None).
lnpostparams: kwargs
Kewword arguments for `lnpostfn_scspear_p`.
Returns
-------
p_bst: array_like
Best-fit parameters.
l: float
The maximum log-posterior.
"""
set_verbose = lnpostparams.pop("set_verbose", True)
set_retq = lnpostparams.pop("set_retq", False)
if set_retq is True:
raise InputError("set_retq has to be False")
p_ini = np.asarray(p_ini)
if fixed is not None:
fixed = np.asarray(fixed)
func = lambda _p: -lnpostfn_scspear_p(_p*fixed+p_ini*(1.-fixed),
self.zydata, **lnpostparams)
else:
func = lambda _p: -lnpostfn_scspear_p(
_p, self.zydata, **lnpostparams)
p_bst, v_bst = fmin(func, p_ini, full_output=True)[:2]
if fixed is not None:
p_bst = p_bst*fixed+p_ini*(1.-fixed)
sigma, tau, lags, wids, scales = unpackscspearpar(p_bst,
self.zydata.nlc)
if set_verbose:
print("Best-fit parameters are")
print(("sigma %8.3f tau %8.3f wid0 %8.3f " % (sigma, tau, wids[0])))
for i in range(self.nlc-1):
ip = 2+i*3 + 1
print(("%s %8.3f %s %8.3f %s %8.3f" % (
self.vars[ip+0], lags[i+1],
self.vars[ip+1], wids[i+1],
self.vars[ip+2], scales[i+1],
)))
print(("with logp %10.5g " % -v_bst))
return(p_bst, -v_bst)
def do_mcmc(self, lagtobaseline=0.3, laglimit="baseline", nwalkers=100,
nburn=100, nchain=100, threads=1, fburn=None, fchain=None,
flogp=None, set_threading=False, blocksize=10000,
set_verbose=True, fixed=None, p_fix=None):
""" Run MCMC sampling over the parameter space.
Parameters
----------
lagtobaseline: float, optional
Prior on lags. When input lag exceeds lagtobaseline*baseline, a
logarithmic prior will be applied.
laglimit: str or list of tuples.
Hard boundaries for the lag searching during MCMC sampling.
'baseline' means the boundaries are naturally determined by
the duration of the light curves, or you can set them as a list
with `nline` of tuples, with each tuple containing the (min, max)
pair for each single line.
nwalker: integer, optional
Number of walkers for `emcee` (default: 100).
nburn: integer, optional
Number of burn-in steps for `emcee` (default: 50).
nchain: integer, optional
Number of chains for `emcee` (default: 50).
thread: integer
Number of threads (default: 1).
fburn: str, optional
filename for burn-in output (default: None).
fchain: str, optional
filename for MCMC chain output (default: None).
flogp: str, optional
filename for logp output (default: None).
set_threading: bool, optional
True if you want threading in filling matrix. It conflicts with the
'threads' option in Rmap_Model.run_mcmc (default: False).
blocksize: int, optional
Maximum matrix block size in threading (default: 10000).
set_verbose: bool, optional
Turn on/off verbose mode (default: True).
"""
if (threads > 1 and (not set_threading)):
if set_verbose:
print(("run parallel chains of number %2d " % threads))
elif (threads == 1):
if set_verbose:
if set_threading:
print(("run single chain in submatrix blocksize %10d " %
blocksize))
else:
print("run single chain without subdividing matrix ")
else:
raise InputError("conflicting set_threading and threads setup")
if laglimit == "baseline":
laglimit = [[-self.rj, self.rj],]*(self.nlc-1)
elif len(laglimit) != (self.nlc - 1):
raise InputError("laglimit should be a list of nline lists")
# generate array of random numbers
p0 = np.random.rand(nwalkers*self.ndim).reshape(nwalkers, self.ndim)
# initialize array
# sigma and tau without prior
p0[:, 0] += np.log(self.cont_std)-0.5
p0[:, 1] += np.log(np.sqrt(self.rj*self.cont_cad))-0.5
# make the initial wid0 to be [0, 10*cadence]
p0[:, 2] *= 10. * self.cont_cad
for i in range(self.nlc-1):
p0[:, 3+i*3] = p0[:,3+i*3] * (laglimit[i][1] -
laglimit[i][0]) + laglimit[i][0]
if set_verbose:
print("start burn-in")
if lagtobaseline < 1.0:
print(("penalize lags longer than %3.2f of the baseline" %
lagtobaseline))
else:
print("no penalizing long lags, restrict to < baseline")
print(("nburn: %d nwalkers: %d --> number of burn-in iterations: %d"
% (nburn, nwalkers, nburn*nwalkers)))
# initialize the ensemble sampler
sampler = EnsembleSampler(nwalkers, self.ndim, lnpostfn_scspear_p,
args=(self.zydata, lagtobaseline, laglimit,
set_threading, blocksize, False, False, fixed, p_fix),
threads=threads)
pos, prob, state = sampler.run_mcmc(p0, nburn)
if set_verbose:
print("burn-in finished")
if fburn is not None:
if set_verbose:
print(("save burn-in chains to %s" % fburn))
if fixed is not None:
# modify flatchain
for i in range(self.ndim):
if fixed[i] == 0:
sampler.flatchain[:, i] = p_fix[i]
np.savetxt(fburn, sampler.flatchain)
# reset the sampler
sampler.reset()
if set_verbose:
print("start sampling")
sampler.run_mcmc(pos, nchain, rstate0=state)
if set_verbose:
print("sampling finished")
af = sampler.acceptance_fraction
if set_verbose:
print("acceptance fractions are")
print((" ".join([format(r, "3.2f") for r in af])))
if fixed is not None:
# modify flatchain
for i in range(self.ndim):
if fixed[i] == 0:
sampler.flatchain[:, i] = p_fix[i]
if fchain is not None:
if set_verbose:
print(("save MCMC chains to %s" % fchain))
np.savetxt(fchain, sampler.flatchain)
if flogp is not None:
if set_verbose:
print(("save logp of MCMC chains to %s" % flogp))
np.savetxt(flogp, np.ravel(sampler.lnprobability), fmt='%16.8f')
# make chain an attritue
self.flatchain = sampler.flatchain
self.flatchain_whole = np.copy(self.flatchain)
# get HPD
self.get_hpd(set_verbose=set_verbose)
self.logp = np.ravel(sampler.lnprobability)
self.logp_whole = np.copy(self.logp)
self.get_bfp()
def get_hpd(self, set_verbose=True):
""" Get the 68% percentile range of each parameter to self.hpd.
Parameters
----------
set_verbose: bool, optional
True if you want verbosity (default: True).
"""
hpd = _get_hpd(self.ndim, self.flatchain)
for i in range(self.ndim):
if set_verbose:
print(("HPD of %s" % self.vars[i]))
if i < 2:
print(("low: %8.3f med %8.3f hig %8.3f" %
tuple(np.exp(hpd[:,i]))))
else:
print(("low: %8.3f med %8.3f hig %8.3f" %
tuple(hpd[:,i])))
# register hpd to attr
self.hpd = hpd
def get_bfp(self):
self.bfp = _get_bfp(self.flatchain, self.logp)
def show_hist(self, bins=100, lagbinsize=1.0, figout=None, figext=None):
""" Display histograms of the posterior distributions.
Parameters
----------
bins: integer, optional
Number of bins for parameters except for 'lag' (default:100).
lagbinsize: integer, optional
bin width for 'lag' (default:100).
figout: str, optional
Output figure name (default: None, i.e., using sequencial integers).
figext: str, optional
Output figure extension (default: None, i.e., using `show`).
"""
if not hasattr(self, "flatchain"):
print("Warning: need to run do_mcmc or load_chain first")
return(1)
ln10 = np.log(10.0)
fig = plt.figure(figsize=(14, 2.8*self.nlc))
for i in range(2):
ax = fig.add_subplot(self.nlc,3,i+1)
ax.hist(self.flatchain[:,i]/ln10, bins)
ax.set_xlabel(self.texs[i])
ax.set_ylabel("N")
# for wid0
ax = fig.add_subplot(self.nlc,3,3)
ax.hist(self.flatchain[:,2], bins)
ax.set_xlabel(self.texs[2])
ax.set_ylabel("N")
# go to lines
for k in range(self.nlc-1):
for i in range(3+k*3, 6+k*3):
ax = fig.add_subplot(self.nlc,3,i+1)
if np.mod(i, 3) == 0:
# lag plots
lagbins = np.arange(int(np.min(self.flatchain[:,i])),
int(np.max(self.flatchain[:,i])) +
lagbinsize, lagbinsize)
ax.hist(self.flatchain[:,i], bins=lagbins)
else:
ax.hist(self.flatchain[:,i], bins)
ax.set_xlabel(self.texs[i])
ax.set_ylabel("N")
return(figure_handler(fig=fig, figout=figout, figext=figext))
def break_chain(self, llag_segments):
""" Break the chain.
Parameters
----------
llag_segments: list of lists
list of length self.nlc-1, wich each element a two-element array
bracketing the range of lags (usually the single most probable peak)
you want to consider for each line.
"""
if (len(llag_segments) != self.nlc-1):
print(("Error: llag_segments has to be a list of length %d" %
(self.nlc-1)))
return(1)
if not hasattr(self, "flatchain"):
print("Warning: need to run do_mcmc or load_chain first")
return(1)
for i, llag_seq in enumerate(llag_segments):
if llag_seq is None:
continue
indx = np.argsort(self.flatchain[:, 3+i*3])
imin, imax = np.searchsorted(self.flatchain[indx, 3+i*3], llag_seq)
indx_cut = indx[imin: imax]
self.flatchain = self.flatchain[indx_cut,:]
if hasattr(self, "logp"):
self.logp = self.logp[indx_cut]
def restore_chain(self):
""" Restore chain after `break_chain`.
"""
self.flatchain = np.copy(self.flatchain_whole)
if hasattr(self, "logp"):
self.logp = np.copy(self.logp_whole)
def load_chain(self, fchain, flogp=None, set_verbose=True):
""" Load stored MCMC chain.
Parameters
----------
fchain: string
Name for the chain file.
set_verbose: bool, optional
True if you want verbosity (default: True).
"""
if set_verbose:
print(("load MCMC chain from %s" % fchain))
self.flatchain = np.genfromtxt(fchain)
self.flatchain_whole = np.copy(self.flatchain)
self.ndim = self.flatchain.shape[1]
# get HPD
self.get_hpd(set_verbose=set_verbose)
if flogp is not None:
self.logp = np.genfromtxt(flogp)
self.logp_whole = np.copy(self.logp)
self.get_bfp()
def get_qlist(self, p_bst):
""" get the best-fit linear responses.
Parameters
----------
p_bst: list
best-fit parameters.
"""
self.qlist = lnpostfn_scspear_p(p_bst, self.zydata, set_retq=True,
set_verbose=False)[4]
def do_pred(self, p_bst=None, fpred=None, dense=10, set_overwrite=True):
""" Calculate the predicted mean and variance of each light curve on a
densely sampled time axis.
Parameters
----------
p_bst: array_like
Input paraemeters.
fpred: string, optional
Name of the output file for the predicted light curves, set it to
None if you do not want output (default: None).
dense: int, optional
The factor by which the predicted light curves should be more
densely sampled than the original data (default: 10).
set_overwrite: bool, optional
True if you want to overwrite existing fpred (default: True).
Returns
-------
zydata_pred: LightCurve object
Predicted light curves packaged as a LightCurve object.
"""
if p_bst is None and hasattr(self, "bfp"):
p_bst = self.bfp
self.get_qlist(p_bst)
sigma, tau, lags, wids, scales = unpackscspearpar(p_bst,
self.zydata.nlc)
# update qlist
self.zydata.update_qlist(self.qlist)
# initialize PredictRmap object
P = PredictSCmap(zydata=self.zydata, sigma=sigma, tau=tau,
lags=lags, wids=wids, scales=scales)
nwant = dense*self.cont_npt
jwant0 = self.jstart - 0.1*self.rj
jwant1 = self.jend + 0.1*self.rj
jwant = np.linspace(jwant0, jwant1, nwant)
zylclist_pred = []
for i in range(self.nlc):
iwant = np.ones(nwant)*(i+1)
mve, var = P.mve_var(jwant, iwant)
sig = np.sqrt(var)
zylclist_pred.append([jwant, mve, sig])
zydata_pred = LightCurve(zylclist_pred)
if fpred is not None:
zydata_pred.save(fpred, set_overwrite=set_overwrite)
return(zydata_pred)
# ---------------------------------
# Disk_Model: Thin Disk Continuum RM
def thin_disk_func(a, b, waves, refwave):
""" Thin-disk parameterization of the accretion disk.
Parameters
----------
a: float
accretion disk size at the reference wavelength (R_0)
b: float
power-law scaling of disk size as a function of wavelength (beta)
waves: float or ndarray
wavelengths at which the new disk sizes are to be calculated
refwave: float
wavelength of the driving light curve (lambda_0)
Returns
------
disk size at each wavelength provided in 'waves', as a numpy array
"""
return(a*(np.power((np.float(waves)/np.float(refwave)), np.float(b)) - 1.))
def unpackthindiskpar(p, nlc=None, hascontlag=False, lwaves=None, refwave=None):
""" Internal Function: unpack the physical parameters from input 1-d
array for thin disk mode.
Parameters
----------
p: list
sigma, tau, a, b, width1, scale1, ..., widthn, scalen
lwaves: ndarray
array or list of floats that are wavelengths for the input light curves
refwave: ndarray
wavelength of the driving light curve
Returns
-------
necessary info from walkers to ready for lnlike
"""
if nlc is None:
# possible to figure out nlc from the size of p
nlc = (len(p) - 4.)//2. + 1.
if lwaves is None:
print("You need to provide values for \'lwaves\'.")
sys.exit()
sigma = np.exp(p[0]) # DRW amplitude
tau = np.exp(p[1]) # DRW damping timescale
alph = p[2] # Thin disk normalization; disk size at refwave
bet = p[3] # Thin disk wavelength power-law scaling
if hascontlag:
lags = np.zeros(nlc)
wids = np.zeros(nlc)
scales = np.ones(nlc)
for i in range(1, nlc): # Get values needed for lnlikefn_spear
lags[i] = thin_disk_func(alph, bet, lwaves[i], refwave)
wids[i] = p[4+(i-1)*2]
scales[i] = p[5+(i-1)*2]
return(sigma, tau, lags, wids, scales, alph, bet)
else:
llags = np.zeros(nlc-1)
lwids = np.zeros(nlc-1)
lscales = np.ones(nlc-1)
for i in range(nlc-1):
llags[i] = thin_disk_func(alph, bet, lwaves[i + 1], refwave)
lwids[i] = p[4+i*2]
lscales[i] = p[5+i*2]
return(sigma, tau, llags, lwids, lscales, alph, bet)
def lnpostfn_thindisk_p(p, zydata, bandwaves, ref_wave, conthpd=None,
lagtobaseline=0.3, laglimit=None,
set_threading=False, blocksize=10000, set_retq=False,
set_verbose=False, tophatminwidth=None,
a_lims = [0., np.inf], b_lims = [0., np.inf],
fixed=None, p_fix=None):
""" log-posterior function of p.
Parameters
----------
p: array_like
Disk_Model parameters, [log_e(sigma), log_e(tau), alpha, beta, width1,
scale1, ..., widthn, scalen]
zydata: LightCurve object
Input LightCurve data.
bandwaves: array_like
The effective wavelengths for the photometric bands
conthpd: ndarray, optional
Priors on sigma and tau as an ndarray with shape (3, 2),
np.array([[log_e(sigma_low), log_e(tau_low)],
[log_e(sigma_med), log_e(tau_med)],
[log_e(sigma_hig), log_e(tau_hig)]])
where 'low', 'med', and 'hig' are defined as the 68% confidence
limits around the median. conthpd usually comes in as an attribute
of the `Cont_Model` object `hpd` (default: None).
lagtobaseline: float, optional
Prior on lags. When input lag exceeds lagtobaseline*baseline, a
logarithmic prior will be applied.
laglimit: str or list of tuples.
hard boundaries for the lag searching during MCMC sampling.
'baseline' means the boundaries are naturally determined by the
duration of the light curves, or you can set them as a list with
`nline` of tuples, with each tuple containing the (min, max) pair
for each single line.
set_threading: bool, optional
True if you want threading in filling matrix. It conflicts with the
'threads' option in Rmap_Model.run_mcmc (default: False).
blocksize: int, optional
Maximum matrix block size in threading (default: 10000).
set_retq: bool, optional
Return the value(s) of q along with each component of the
log-likelihood if True (default: False).
set_verbose: bool, optional
True if you want verbosity (default: False).
tophatminwidth: float, optional
Used for prior on tophat transfer function width (prior4).
a_lims: list of floats, optional
The allowed limits for the disk size of the driving light curve
in units of light-(time unit of light curve).
b_lims: list of floats, optional
The allowed limits for the power law index of the disk scaling
as a function of wavelength.
fixed: list
Bit list indicating which parameters are to be fixed during
minimization, `1` means varying, while `0` means fixed,
so [1, 1, 0] means fixing only the third parameter, and `len(fixed)`
equals the number of parameters (default: None, i.e., varying all
the parameters simultaneously).
p_fix: list
parameter list, with p_fix[fixed==0] being fixed.
Returns
-------
retval: float (set_retq is False) or list (set_retq is True)
if `retval` returns a list, then it contains the full posterior info
as a list of [log_posterior, chi2_component, det_component,
DC_penalty, correction_to_the_mean].
"""
if fixed is not None:
# fix parameters during inference.
fixed = np.asarray(fixed)
p_fix = np.asarray(p_fix)
p = np.asarray(p)
p = p * fixed + p_fix * (1. - fixed)
# unpack the parameters from p
sigma, tau, llags, lwids, lscales, alpha, beta = unpackthindiskpar(p,
zydata.nlc, hascontlag=False, lwaves=bandwaves,
refwave = ref_wave)
if set_retq:
vals = list(lnlikefn_spear(zydata, sigma, tau, llags, lwids, lscales,
set_retq=True, set_verbose=set_verbose,
set_threading=set_threading,
blocksize=blocksize))
else:
logl = lnlikefn_spear(zydata, sigma, tau, llags, lwids, lscales,
set_retq=False, set_verbose=set_verbose,
set_threading=set_threading, blocksize=blocksize)
# Deal with errors on logl, give large bad value
if np.isnan(logl):
return 6.*my_neg_inf
# conthpd is in natural log
if conthpd is not None:
# for sigma
if p[0] < conthpd[1,0]:
prior0 = (p[0] - conthpd[1,0])/(conthpd[1,0]-conthpd[0,0])
else:
prior0 = (p[0] - conthpd[1,0])/(conthpd[2,0]-conthpd[1,0])
# for tau
if p[1] < conthpd[1,1]:
prior1 = (p[1] - conthpd[1,1])/(conthpd[1,1]-conthpd[0,1])
else:
prior1 = (p[1] - conthpd[1,1])/(conthpd[2,1]-conthpd[1,1])
else:
prior0 = 0.0
prior1 = 0.0
# for each lag
prior2 = 0.0
prior3 = 0.0 # Don't let alpha or beta go negative or outside range
prior4 = 0.0 # Don't let width go less than specified time units
prior5 = 0.0 # Don't let scales go negative
prior6 = 0.0 # Don't let logsigma go above ceiling, below floor
prior7 = 0.0 # Don't let logtau go above ceiling, below floor
for i in range(zydata.nlc-1):
if lagtobaseline < 1.0:
if np.abs(llags[i]) > lagtobaseline*zydata.rj:
# penalize long lags when they are larger than 0.3 times the
# baseline, as it is too easy to fit the model with
# non-overlapping signals in the light curves.
prior2 += np.log(np.abs(llags[i])/(lagtobaseline*zydata.rj))
# penalize long lags to be impossible
if laglimit is not None:
if llags[i] > laglimit[i][1] or llags[i] < laglimit[i][0]:
# try not stack priors
prior2 += my_pos_inf
# Add penalty for alpha, beta
if (alpha <= a_lims[0]) or (beta <= b_lims[0]) or \
(alpha >= a_lims[1]) or (beta >= b_lims[1]):
prior3 = my_pos_inf
# Add penalty for tophad widths if that parameter is provided
if tophatminwidth is not None:
if (lwids <= tophatminwidth).any():
prior4 = my_pos_inf
# Add penalty for zero scale
if (lscales <= 0.).any():
prior5 = my_pos_inf
if (sigma <= sigma_floor) or (sigma >= sigma_ceiling):
prior6 = my_pos_inf
if (tau <= tau_floor) or (tau >= tau_ceiling):
prior7 = my_pos_inf
# add logp of all the priors
prior = -0.5*(prior0*prior0+prior1*prior1)**2. - prior2 - prior3 - prior4 \
- prior5 - prior6 - prior7
if set_retq:
vals[0] = vals[0] + prior
vals.extend([prior0, prior1, prior2])
return(vals)
else:
logp = logl + prior
return(logp)
class Disk_Model(object):
def __init__(self, zydata = None, effwave = None, tophatminwidth = None,
alpha_lims = [0., np.inf], beta_lims = [0., np.inf]):
""" Disk Model object.
Parameters
----------
zydata: LightCurve object, necessary
Light curve data.
effwave: list-like, necessary
Gives the effective wavelengths that the photometric data is at.
Assumes the first item in this list is that of the driving zylc.
tophatminwidth: float, optional
The smallest value to allow for the tophat transfer function in
the time units supplied by the zydata.
alpha_lims: list of floats, optional
The allowed limits for the disk size of the driving light curve
in units of light-(time unit of light curve).
beta_lims: list of floats, optional
The allowed limits for the power law index of the disk scaling
as a function of wavelength.
"""
if zydata is None:
raise UsageError("Disk_Model Object requires Light Curve data.")
else:
self.zydata = zydata
self.nlc = zydata.nlc
self.npt = zydata.npt
self.cont_npt = zydata.nptlist[0]
self.cont_cad = zydata.cont_cad
self.cont_std = zydata.cont_std
self.rj = zydata.rj
self.jstart = zydata.jstart
self.jend = zydata.jend
self.names = zydata.names
self.tophatminwidth = None
self.alpha_lims = np.asarray(alpha_lims, dtype = float)
self.beta_lims = np.asarray(beta_lims, dtype = float)
if len(effwave) != self.nlc:
raise UsageError("Number of wavelengths does not match " +
"number of light curves.")
self.effwaves = np.array(effwave, dtype=float)
self.refwave = self.effwaves[0]
# Note the code always assumes the driving zylc wavelength is the
# first entry in self.effwaves, skipping it when necessary
# number of parameters = 4 globals, then 2 more for each zylc
self.ndim = 4 + (self.nlc-1)*2
self.vars = ["sigma", "tau", "alpha", "beta"] # always used params
self.texs = [r"$\log\,\sigma$", r"$\log\,\tau$", r"$\alpha$",
r"$\beta$"]
for i in range(1, self.nlc):
self.vars.append("_".join(["wid", self.names[i]]))
self.vars.append("_".join(["scale", self.names[i]]))
self.texs.append("".join(
[r"$w_{", self.names[i].lstrip(r"$").rstrip(r"$"), r"}$"]))
self.texs.append("".join(
[r"$s_{", self.names[i].lstrip(r"$").rstrip(r"$"), r"}$"]))
def __call__(self, p, **lnpostparams):
""" Calculate the posterior value given one parameter set `p`.
See `lnpostfn_thindisk_p` for doc.
Parameters
----------
p: array_like
Rmap_Model parameters, [log_e(sigma), log_e(tau), alpha, beta, width1,
scale1, ..., widthn, scalen]
lnpostparams: kwargs
Keyword arguments for `lnpostfn_thindisk_p`.
Returns
-------
retval: float (set_retq is False) or list (set_retq is True)
if `retval` returns a list, then it contains the full posterior info
as a list of [log_posterior, chi2_component, det_component,
DC_penalty, correction_to_the_mean].
"""
return(lnpostfn_thindisk_p(p, self.zydata, self.effwaves, self.refwave,
**lnpostparams))
def do_mcmc(self, conthpd=None, lagtobaseline=0.3, laglimit="baseline",
nwalkers=100, nburn=100, nchain=100, threads=1, fburn=None,
fchain=None, flogp=None, set_threading=False, blocksize=10000,
set_verbose=True, fixed=None, p_fix=None):
""" Run MCMC sampling over the parameter space.
Parameters
----------
conthpd: ndarray, optional
Priors on sigma and tau as an ndarray with shape (3, 2),
np.array([[log_e(sigma_low), log_e(tau_low)],
[log_e(sigma_med), log_e(tau_med)],
[log_e(sigma_hig), log_e(tau_hig)]])
where 'low', 'med', and 'hig' are defined as the 68% confidence
limits around the median. conthpd usually comes in as an attribute
of the `Cont_Model` object `hpd` (default: None).
lagtobaseline: float, optional
Prior on lags. When input lag exceeds lagtobaseline*baseline, a
logarithmic prior will be applied.
laglimit: str or list of tuples.
Hard boundaries for the lag searching during MCMC sampling.
'baseline' means the boundaries are naturally determined by the
duration of the light curves, or you can set them as a list
with `nline` of tuples, with each tuple containing the (min, max)
pair for each single line.
nwalker: integer, optional
Number of walkers for `emcee` (default: 100).
nburn: integer, optional
Number of burn-in steps for `emcee` (default: 50).
nchain: integer, optional
Number of chains for `emcee` (default: 50).
thread: integer
Number of threads (default: 1).
fburn: str, optional
filename for burn-in output (default: None).
fchain: str, optional
filename for MCMC chain output (default: None).
flogp: str, optional
filename for logp output (default: None).
set_threading: bool, optional
True if you want threading in filling matrix. It conflicts with the
'threads' option in Rmap_Model.run_mcmc (default: False).
blocksize: int, optional
Maximum matrix block size in threading (default: 10000).
set_verbose: bool, optional
Turn on/off verbose mode (default: True).
fixed: list
Bit list indicating which parameters are to be fixed during
minimization, `1` means varying, while `0` means fixed,
so [1, 1, 0] means fixing only the third parameter, and `len(fixed)`
equals the number of parameters (default: None, i.e., varying all
the parameters simultaneously).
p_fix: list
parameter list, with p_fix[fixed==0] being fixed.
"""
# Print statements for knowledge of user. These values can be changed in this script.
if (threads > 1 and (not set_threading)):
if set_verbose:
print(("run parallel chains of number %2d " % threads))
elif (threads == 1):
if set_verbose:
if set_threading:
print(("run single chain in submatrix blocksize %10d " %
blocksize))
else:
print("run single chain without subdividing matrix ")
else:
raise InputError("conflicting set_threading and threads setup")
if laglimit == "baseline":
laglimit = [[-self.rj, self.rj],]*(self.nlc-1)
elif len(laglimit) != (self.nlc - 1):
raise InputError(
"laglimit should be a list of lists matching number of lines")
# generate array of random numbers
p0 = np.random.rand(nwalkers*self.ndim).reshape(nwalkers, self.ndim)
# initialize arrays
if conthpd is None:
#p0[:, 0] += np.log(self.cont_std)-0.5*np.random.rand(nwalkers)
#p0[:, 1] += np.log(np.sqrt(self.rj*self.cont_cad))- \
# 0.5*np.random.rand(nwalkers)
p0[:, 0] = np.random.uniform(low = logsigma_floor,
high = logsigma_ceiling, size = nwalkers)
p0[:, 1] = np.random.uniform(low = logtau_floor,
high = logtau_ceiling, size = nwalkers)
else:
p0[:, 0] += conthpd[1,0]-0.5*np.random.rand(nwalkers)
p0[:, 1] += conthpd[1,1]-0.5*np.random.rand(nwalkers)
# Begin lwids at larger than the cadence, and apply min if supplied
for i in range(0, self.nlc-1):
p0[:, 4 + 2*i] *= 2.*self.cont_cad # Scatter widths around cadence
if self.tophatminwidth is not None:
p0[:, 4 + 2*i] += self.tophatminwidth
# Reset alpha and beta initialization if limits were supplied
if ~(np.isinf(self.alpha_lims).any() or
np.isinf(self.beta_lims).any()):
p0[:, 2] = np.random.uniform(low = self.alpha_lims[0],
high = self.alpha_lims[1], size = nwalkers)
p0[:, 3] = np.random.uniform(low = self.beta_lims[0],
high = self.beta_lims[1], size = nwalkers)
# Go to town!
if set_verbose:
print("start burn-in")
if conthpd is None:
print("no priors on sigma and tau")
else:
print("using priors on sigma and tau from continuum fitting")
print((np.exp(conthpd)))
if lagtobaseline < 1.0:
print(("penalize lags longer than %3.2f of the baseline" %
lagtobaseline))
else:
print("no penalizing long lags, but within the baseline")
print(("nburn: %d nwalkers: %d --> number of burn-in iterations: %d"
% (nburn, nwalkers, nburn*nwalkers)))
# initialize the ensemble sampler, proceed to burn in
sampler = EnsembleSampler(nwalkers, self.ndim, lnpostfn_thindisk_p,
args=(self.zydata, self.effwaves,
self.refwave, conthpd, lagtobaseline,
laglimit, set_threading, blocksize, False,
False, self.tophatminwidth, self.alpha_lims,
self.beta_lims, fixed, p_fix),
threads=threads)
pos, prob, state = sampler.run_mcmc(p0, nburn)
if set_verbose:
print("burn-in finished")
if fburn is not None:
if set_verbose:
print(("save burn-in chains to %s" % fburn))
if fixed is not None:
# modify flatchain
for i in range(self.ndim):
if fixed[i] == 0:
sampler.flatchain[:, i] = p_fix[i]
np.savetxt(fburn, sampler.flatchain)
# reset the sampler, lags after burn-in
sampler.reset()
self.lags = list()
if set_verbose:
print("start sampling")
sampler.run_mcmc(pos, nchain, rstate0=state)
if set_verbose:
print("sampling finished")
if fixed is not None:
# modify flatchain
for i in range(self.ndim):
if fixed[i] == 0:
sampler.flatchain[:, i] = p_fix[i]
af = sampler.acceptance_fraction
# Turn lags into array from list for printing
self.lags = np.asarray(self.lags, dtype = float)
if set_verbose:
print("acceptance fractions are")
print((" ".join([format(r, "3.2f") for r in af])))
if fchain is not None:
if set_verbose:
print(("save MCMC chains to %s" % fchain))
np.savetxt(fchain, sampler.flatchain)
if flogp is not None:
if set_verbose:
print(("save logp of MCMC chains to %s" % flogp))
np.savetxt(flogp, np.ravel(sampler.lnprobability), fmt='%16.8f')
# make chain an attritue
self.flatchain = sampler.flatchain
self.flatchain_whole = np.copy(self.flatchain)
# get HPD
self.get_hpd(set_verbose=set_verbose)
self.logp = np.ravel(sampler.lnprobability)
self.logp_whole = np.copy(self.logp)
self.get_bfp()
def do_map(self, p_ini, fixed=None, **lnpostparams):
""" Do an optimization to find the Maximum a Posterior estimates.
See `lnpostfn_thindisk_p` for doc.
Parameters
----------
p_ini: array_like
Rmap_Model parameters, [log_e(sigma), log_e(tau), alpha, beta,
wid1, scale1,...,widn, scalen]
fixed: array_like, optional
Same dimension as p_ini, but with 0 for parameters that is fixed in
the optimization, and with 1 for parameters that is varying, e.g.,
fixed = [0, 1, 1, 1, 1, ...] means sigma is fixed while others
are varying. fixed=[1, 1, 1, 1, 1, ...] is equivalent to
fixed=None (default: None).
lnpostparams: kwargs
Kewword arguments for `lnpostfn_thindisk_p`.
Returns
-------
p_bst: array_like
Best-fit parameters.
l: float
The maximum log-posterior.
"""
set_verbose = lnpostparams.pop("set_verbose", True)
set_retq = lnpostparams.pop("set_retq", False)
if set_retq is True:
raise InputError("set_retq has to be False")
p_ini = np.asarray(p_ini)
if fixed is not None:
fixed = np.asarray(fixed)
func = lambda _p: -lnpostfn_thindisk_p(_p*fixed+p_ini*(1.-fixed),
self.zydata, self.effwaves, self.refwave,
**lnpostparams)
else:
func = lambda _p: -lnpostfn_thindisk_p(_p,
self.zydata, self.effwaves, self.refwave, **lnpostparams)
p_bst, v_bst = fmin(func, p_ini, full_output=True)[:2]
if fixed is not None:
p_bst = p_bst*fixed+p_ini*(1.-fixed)
sigma, tau, llags, lwids, lscales, alpha, beta = unpackthindiskpar(p_bst, self.nlc,
hascontlag=False, lwaves=self.effwaves, refwave = self.refwave)
if len(llags) == self.nlc:
j = 1
else:
j = 0
if set_verbose:
print("Best-fit parameters are")
print(("sigma %8.3f tau %8.3f alpha %8.3f beta %8.3f" % (sigma, tau, alpha, beta)))
for i in range(self.nlc-1):
ip = 4+i*2
print(("%s %8.3f %s %8.3f" % (
self.vars[ip+0], lwids[j],
self.vars[ip+1], lscales[j],
)))
j = j + 1
print(("with logp %10.5g " % -v_bst))
return(p_bst, -v_bst)
def get_hpd(self, set_verbose=True):
""" Get the 68% percentile range of each parameter to self.hpd.
Parameters
----------
set_verbose: bool, optional
True if you want verbosity (default: True).
"""
hpd = _get_hpd(self.ndim, self.flatchain)
for i in range(self.ndim):
if set_verbose:
print(("HPD of %s" % self.vars[i]))
if i < 2:
print(("low: %8.3f med %8.3f hig %8.3f" %
tuple(np.exp(hpd[:,i]))))
else:
print(("low: %8.3f med %8.3f hig %8.3f" % tuple(hpd[:,i])))
# register hpd to attr
self.hpd = hpd
def get_bfp(self):
self.bfp = _get_bfp(self.flatchain, self.logp)
def show_hist(self, bins=100, figout=None, figext=None):
""" Display histograms of the posterior distributions.
Parameters
----------
bins: integer, optional
Number of bins for parameters except for 'lag' (default:100).
figout: str, optional
Output figure name (default: None, i.e., using sequencial integers).
figext: str, optional
Output figure extension (default: None, i.e., using `show`).
"""
if not hasattr(self, "flatchain"):
print("Warning: need to run do_mcmc or load_chain first")
return(1)
ln10 = np.log(10.0)
fig = plt.figure(figsize=(14, 2.8*(self.nlc+1)))
for i in range(2):
ax = fig.add_subplot(self.nlc + 1,2,i+1)
ax.hist(self.flatchain[:,i]/ln10, bins)
ax.set_xlabel(self.texs[i])
ax.set_ylabel("N")
for j in range(2,4):
ax = fig.add_subplot(self.nlc + 1,2,j+1)
ax.hist(self.flatchain[:,j], bins)
ax.set_xlabel(self.texs[j])
ax.set_ylabel("N")
for k in range(self.nlc-1):
for m in range(4+k*2, 6+k*2):
ax = fig.add_subplot(self.nlc + 1, 2, m+1)
#ax = fig.add_subplot(self.nlc,3,i+1+1)
ax.hist(self.flatchain[:,m], bins)
ax.set_xlabel(self.texs[m])
ax.set_ylabel("N")
plt.tight_layout()
return(figure_handler(fig=fig, figout=figout, figext=figext))
def restore_chain(self):
""" Restore chain after `break_chain`.
"""
self.flatchain = np.copy(self.flatchain_whole)
if hasattr(self, "logp"):
self.logp = np.copy(self.logp_whole)
def load_chain(self, fchain, flogp=None, set_verbose=True):
""" Load stored MCMC chain.
Parameters
----------
fchain: string
Name for the chain file.
set_verbose: bool, optional
True if you want verbosity (default: True).
"""
if set_verbose:
print(("load MCMC chain from %s" % fchain))
self.flatchain = np.loadtxt(fchain)
print("Gen from text complete")
self.flatchain_whole = np.copy(self.flatchain)
print("flatchain complete")
self.ndim = self.flatchain.shape[1]
# get HPD
self.get_hpd(set_verbose=set_verbose)
if flogp is not None:
self.logp = np.loadtxt(flogp)
self.logp_whole = np.copy(self.logp)
self.get_bfp()
def get_qlist(self, p_bst):
""" get the best-fit linear responses.
Parameters
----------
p_bst: list
best-fit parameters.
"""
self.qlist = lnpostfn_thindisk_p(p_bst, self.zydata, self.effwaves, self.refwave, set_retq=True,
set_verbose=False)[4]
def do_pred(self, p_bst=None, fpred=None, dense=10, set_overwrite=True):
""" Calculate the predicted mean and variance of each light curve on a
densely sampled time axis.
Parameters
----------
p_bst: array_like
Input paraemeters.
fpred: string, optional
Name of the output file for the predicted light curves, set it to
None if you do not want output (default: None).
dense: int, optional
The factor by which the predicted light curves should be more
densely sampled than the original data (default: 10).
set_overwrite: bool, optional
True if you want to overwrite existing fpred (default: True).
Returns
-------
zydata_pred: LightCurve object
Predicted light curves packaged as a LightCurve object.
"""
if p_bst is None and hasattr(self, "bfp"):
p_bst = self.bfp
self.get_qlist(p_bst)
sigma, tau, llags, lwids, lscales = unpackthindiskpar(
p_bst, self.zydata.nlc, lwaves = self.effwaves,
refwave = self.refwave, hascontlag=True)
# update qlist
self.zydata.update_qlist(self.qlist)
# initialize PredictRmap object
P = PredictRmap(zydata=self.zydata, sigma=sigma, tau=tau,
lags=llags, wids=lwids, scales=lscales)
nwant = dense*self.cont_npt
jwant0 = self.jstart - 0.1*self.rj
jwant1 = self.jend + 0.1*self.rj
jwant = np.linspace(jwant0, jwant1, nwant)
zylclist_pred = []
for i in range(self.nlc):
iwant = np.ones(nwant)*(i+1)
mve, var = P.mve_var(jwant, iwant)
sig = np.sqrt(var)
zylclist_pred.append([jwant, mve, sig])
zydata_pred = LightCurve(zylclist_pred)
if fpred is not None:
zydata_pred.save(fpred, set_overwrite=set_overwrite)
return(zydata_pred)
# ---------------------------------
# DoubleDustEchoes:
def unpackdphotopar(p, nlc=2, hascontlag=False):
""" Unpack the physical parameters from input 1-d array for dphoto mode.
Currently only two bands, one on and on off the double-line emission.
"""
if nlc != 2:
raise InputError("DPmap_Model cannot cope with more than two bands yet")
sigma = np.exp(p[0])
tau = np.exp(p[1])
if hascontlag:
lags = np.zeros(3)
wids = np.zeros(3)
scales = np.ones(3)
# line contribution
lags[1] = p[2]
wids[1] = p[3]
scales[1] = p[4]
lags[2] = p[5]
wids[2] = p[6]
scales[2] = p[7]
return(sigma, tau, lags, wids, scales)
else:
llags = np.zeros(2)
lwids = np.zeros(2)
lscales = np.ones(2)
llags[0] = p[2]
lwids[0] = p[3]
lscales[0] = p[4]
llags[1] = p[5]
lwids[1] = p[6]
lscales[1] = p[7]
return(sigma, tau, llags, lwids, lscales)
def lnpostfn_doublephoto_p(p, zydata, conthpd=None, set_extraprior=False,
lagtobaseline=0.3, laglimit=None, widtobaseline=1,
widlimit=None, set_threading=False, blocksize=10000,
set_retq=False, set_verbose=False, fixed=None, p_fix=None):
""" log-posterior function of p.
Parameters
----------
p: array_like
DPmap_Model parameters, [log(sigma), log(tau), lag1, wid1, scale1, lag2, wid2, scale2].
zydata: LightCurve object
Light curve data.
conthpd: ndarray, optional
Priors on sigma and tau as an ndarray with shape (3, 2),
np.array([[log(sigma_low), log(tau_low)],
[log(sigma_med), log(tau_med)],
[log(sigma_hig), log(tau_hig)]])
where 'low', 'med', and 'hig' are defined as the 68% confidence
limits around the median. conthpd usually comes in as an attribute
of the DRW_Model object DRW_Model.hpd (default: None).
set_extraprior: bool, optional
DEPRECATED, keep it for backward compatibilit and debugging purposes.
lagtobaseline: float, optional
Prior on lags. When input lag exceeds lagtobaseline*baseline, a
logarithmic prior will be applied.
laglimit: list of tuples.
hard boundaries for the lag searching.
widtobaseline: float, optional
Prior on wids. When input wid exceeds widtobaseline*baseline, a
logarithmic prior will be applied.
widlimit: list of tuples, optional
hard boundaries for the wid searching.
set_threading: bool, optional
True if you want threading in filling matrix. It conflicts with the
'threads' option in Pmap_Model.run_mcmc (default: False).
blocksize: int, optional
Maximum matrix block size in threading (default: 10000).
set_retq: bool, optional
Return the value(s) of q along with each component of the
log-likelihood if True (default: False).
set_verbose: bool, optional
True if you want verbosity (default: False).
"""
if fixed is not None:
# fix parameters during inference.
fixed = np.asarray(fixed)
p_fix = np.asarray(p_fix)
p = np.asarray(p)
p = p * fixed + p_fix * (1. - fixed)
# unpack the parameters from p
sigma, tau, llags, lwids, lscales = unpackdphotopar(p, zydata.nlc, hascontlag=False)
if set_retq:
vals = list(lnlikefn_doublephoto(zydata, sigma, tau, llags, lwids, lscales,
set_retq=True, set_verbose=set_verbose,
set_threading=set_threading,
blocksize=blocksize))
else:
logl = lnlikefn_doublephoto(zydata, sigma, tau, llags, lwids, lscales,
set_retq=False, set_verbose=set_verbose,
set_threading=set_threading, blocksize=blocksize)
# print logl
# conthpd is in natural log
if conthpd is not None:
# for sigma
if p[0] < conthpd[1,0]:
prior0 = (p[0] - conthpd[1,0])/(conthpd[1,0]-conthpd[0,0])
else:
prior0 = (p[0] - conthpd[1,0])/(conthpd[2,0]-conthpd[1,0])
# for tau
if p[1] < conthpd[1,1]:
prior1 = (p[1] - conthpd[1,1])/(conthpd[1,1]-conthpd[0,1])
else:
prior1 = (p[1] - conthpd[1,1])/(conthpd[2,1]-conthpd[1,1])
else:
prior0 = 0.0
prior1 = 0.0
# for each lag
prior2 = 0.0
if llags[0] < llags[1]:
# make sure the first lag is always larger
prior2 += my_pos_inf
if lagtobaseline < 1.0:
if np.abs(llags[0]) > lagtobaseline*zydata.rj:
# penalize long lags when larger than 0.3 times the baseline,
# as it is too easy to fit the model with non-overlapping
# signals in the light curves.
prior2 += np.log(np.abs(llags[0])/(lagtobaseline*zydata.rj))
# likewise for the second line component
if np.abs(llags[1]) > lagtobaseline*zydata.rj:
prior2 += np.log(np.abs(llags[1])/(lagtobaseline*zydata.rj))
# penalize long lags to be impossible
if laglimit is not None:
if llags[0] > laglimit[0][1] or llags[0] < laglimit[0][0]:
prior2 += my_pos_inf
if llags[1] > laglimit[1][1] or llags[1] < laglimit[1][0]:
prior2 += my_pos_inf
# penalize on extremely large transfer function width
if widtobaseline < 1.0:
if np.abs(lwids[0]) > widtobaseline*zydata.rj:
prior2 += np.log(np.abs(lwids[0])/(widtobaseline*zydata.rj))
if np.abs(lwids[1]) > widtobaseline*zydata.rj:
prior2 += np.log(np.abs(lwids[1])/(widtobaseline*zydata.rj))
if widlimit is not None:
if lwids[0] > widlimit[0][1] or lwids[0] < widlimit[0][0]:
prior2 += my_pos_inf
if lwids[1] > widlimit[1][1] or lwids[1] < widlimit[1][0]:
prior2 += my_pos_inf
# if np.abs(lwids[0]) >= zydata.cont_cad:
# prior2 += np.log(np.abs(lwids[0])/zydata.cont_cad)
# else:
# prior2 += np.log(zydata.cont_cad/np.abs(lwids[0]))
if set_extraprior:
# XXX {{{Extra penalizations.
# penalize on extremely short lags (below median cadence).
if (np.abs(llags[0]) <= zydata.cont_cad or np.abs(llags[0]) <= np.abs(lwids[0])):
prior2 += my_pos_inf
if (np.abs(llags[1]) <= zydata.cont_cad or np.abs(llags[1]) <= np.abs(lwids[1])):
prior2 += my_pos_inf
# penalize on extremely small line responses (below mean error level).
if sigma * np.abs(lscales[0]) <= np.mean(zydata.elist[1]):
prior2 += my_pos_inf
if sigma * np.abs(lscales[1]) <= np.mean(zydata.elist[2]):
prior2 += my_pos_inf
# }}}
# add logp of all the priors
prior = -0.5*(prior0*prior0+prior1*prior1) - prior2
# print p
# print prior
if set_retq:
vals[0] = vals[0] + prior
vals.extend([prior0, prior1, prior2])
return(vals)
else:
logp = logl + prior
return(logp)
def lnlikefn_doublephoto(zydata, sigma, tau, llags, lwids, lscales, set_retq=False,
set_verbose=False, set_threading=False, blocksize=10000):
""" Log-likelihood function.
"""
if zydata.issingle:
raise UsageError("lnlikefn_doublephoto does not work for single mode")
# impossible scenarios
if (sigma <= 0.0 or tau <= 0.0 or np.min(lwids) < 0.0 or
np.min(lscales) < 0.0 or np.max(np.abs(llags)) > zydata.rj):
return(_exit_with_retval(zydata.nlc, set_retq,
errmsg="Warning: illegal input of parameters",
set_verbose=set_verbose))
# fill in lags/wids/scales
lags = np.zeros(3)
wids = np.zeros(3)
scales = np.ones(3)
lags[1:] = llags[:]
wids[1:] = lwids[:]
scales[1:] = lscales[:]
if set_threading:
C = spear_threading(zydata.jarr, zydata.jarr, zydata.iarr,
zydata.iarr, sigma, tau, lags, wids, scales,
set_dpmap=True, blocksize=blocksize)
else:
C = spear(zydata.jarr, zydata.jarr, zydata.iarr, zydata.iarr, sigma,
tau, lags, wids, scales, set_dpmap=True)
# print 'debug: doublephoto'
# print C
# decompose C inplace
U, info = cholesky(C, nugget=zydata.varr, inplace=True, raiseinfo=False)
# handle exceptions here
if info > 0:
return(_exit_with_retval(
zydata.nlc, set_retq,
errmsg="Warning: non positive-definite covariance C #3",
set_verbose=set_verbose))
retval = _lnlike_from_U(U, zydata, set_retq=set_retq,
set_verbose=set_verbose)
# print retval
return(retval)
class DPmap_Model(object):
def __init__(self, zydata=None, linenames=["1", "2"]):
""" DPmap Model object.
Parameters
----------
zydata: LightCurve object, optional
Light curve data.
linenames: list, optional
Names of the lagged components (default: ['1', '2']).
"""
self.zydata = zydata
if zydata is None:
pass
else:
self.nlc = zydata.nlc
self.npt = zydata.npt
self.cont_npt = zydata.nptlist[0]
self.cont_cad = zydata.cont_cad
self.cont_std = zydata.cont_std
self.rj = zydata.rj
self.jstart = zydata.jstart
self.jend = zydata.jend
self.names = zydata.names
# number of parameters
self.ndim = 8
self.vars = ["sigma", "tau"]
self.texs = [r"$\log\,\sigma$", r"$\log\,\tau$"]
# first component
self.vars.append("_".join(["lag", linenames[0]]))
self.vars.append("_".join(["wid", linenames[0]]))
self.vars.append("_".join(["scale", linenames[0]]))
self.texs.append("".join([r"$t_{", linenames[0], r"}$"]))
self.texs.append("".join([r"$w_{", linenames[0], r"}$"]))
self.texs.append("".join([r"$s_{", linenames[0], r"}$"]))
# second component
self.vars.append("_".join(["lag", linenames[1]]))
self.vars.append("_".join(["wid", linenames[1]]))
self.vars.append("_".join(["scale", linenames[1]]))
self.texs.append("".join([r"$t_{", linenames[1], r"}$"]))
self.texs.append("".join([r"$w_{", linenames[1], r"}$"]))
self.texs.append("".join([r"$s_{", linenames[1], r"}$"]))
#
# self.vars.append("alpha")
# self.texs.append(r"$\alpha$")
def __call__(self, p, **lnpostparams):
""" Calculate the posterior value given one parameter set `p`.
Parameters
----------
p: array_like
DPmap_Model parameters, [log(sigma), log(tau), lag1, wid1, scale1, lag2, wid2, scale2].
lnpostparams: kwargs
Keyword arguments for `lnpostfn_doublephoto_p`.
Returns
-------
retval: float (set_retq is False) or list (set_retq is True)
if `retval` returns a list, then it contains the full posterior info
as a list of [log_posterior, chi2_component, det_component,
DC_penalty, correction_to_the_mean].
"""
return(lnpostfn_doublephoto_p(p, self.zydata, **lnpostparams))
def do_map(self, p_ini, fixed=None, **lnpostparams):
""" Do an optimization to find the Maximum a Posterior estimates.
Parameters
----------
p_ini: array_like
DPmap_Model parameters [log(sigma), log(tau), lag1, wid1, scale1,
lag2, wid2, scale2].
fixed: array_like, optional
Same dimension as p_ini, but with 0 for parameters that is fixed in
the optimization, and with 1 for parameters that is varying, e.g.,
fixed = [0, 1, 1, 1, 1, 1, 1, 1] means sigma is fixed while others are
varying. fixed=[1, 1, 1, 1, 1, 1, 1] is equivalent to fixed=None (
default: None).
Returns
-------
p_bst: array_like
Best-fit parameters.
l: float
The maximum log-posterior.
"""
set_verbose = lnpostparams.pop("set_verbose", True)
set_retq = lnpostparams.pop("set_retq", False)
if set_retq is True:
raise InputError("set_retq has to be False")
p_ini = np.asarray(p_ini)
if fixed is not None:
fixed = np.asarray(fixed)
func = lambda _p: -lnpostfn_doublephoto_p(_p*fixed+p_ini*(1.-fixed),
self.zydata, **lnpostparams)
else:
func = lambda _p: -lnpostfn_doublephoto_p(_p,
self.zydata, **lnpostparams)
p_bst, v_bst = fmin(func, p_ini, full_output=True)[:2]
if fixed is not None:
p_bst = p_bst*fixed+p_ini*(1.-fixed)
sigma, tau, llags, lwids, lscales = unpackdphotopar(
p_bst, self.zydata.nlc, hascontlag=False)
if set_verbose:
print("Best-fit parameters are")
print(("sigma %8.3f tau %8.3f" % (sigma, tau)))
print(("%s %8.3f %s %8.3f %s %8.3f" % (
self.vars[2], llags[0], self.vars[3], lwids[0], self.vars[4], lscales[0])))
print(("%s %8.3f %s %8.3f %s %8.3f" % (
self.vars[5], llags[1], self.vars[6], lwids[1], self.vars[7], lscales[1])))
print(("with logp %10.5g " % -v_bst))
return(p_bst, -v_bst)
def do_mcmc(self, conthpd=None, set_extraprior=False, lagtobaseline=0.3,
laglimit="baseline", widtobaseline=1, widlimit="nyquist",
nwalkers=100, nburn=100, nchain=100, threads=1, fburn=None,
fchain=None, flogp=None, set_threading=False, blocksize=10000,
set_verbose=True, fixed=None, p_fix=None):
""" See `lnpostfn_doublephoto_p` for doc, except for `laglimit` and `widlimit`,
both of which have different default values ('baseline' / 'nyquist').
'baseline' means the boundaries are naturally determined by the
duration of the light curves, and 'nyquist' means the transfer function
width has to be within two times the typical cadence of light curves.
"""
if (threads > 1 and (not set_threading)):
if set_verbose:
print(("run parallel chains of number %2d " % threads))
elif (threads == 1):
if set_verbose:
if set_threading:
print(("run single chain in submatrix blocksize %10d " %
blocksize))
else:
print("run single chain without subdividing matrix ")
else:
raise InputError("conflicting set_threading and threads setup")
if laglimit == "baseline":
laglimit = [[-self.rj, self.rj], [-self.rj, self.rj]]
elif len(laglimit) != 2:
raise InputError("laglimit should be a list of two lists")
if widlimit == "nyquist":
# two times the cadence, resembling Nyquist sampling.
widlimit = [[0.0, 2.0*self.cont_cad], [0.0, 2.0*self.cont_cad]]
elif len(widlimit) != 2:
raise InputError("widlimit should be a list of two lists")
# generate array of random numbers
p0 = np.random.rand(nwalkers*self.ndim).reshape(nwalkers, self.ndim)
# initialize array
if conthpd is None:
p0[:, 0] += np.log(self.cont_std)-0.5
p0[:, 1] += np.log(np.sqrt(self.rj*self.cont_cad))-0.5
else:
# XXX stretch the range from (0,1) to ( conthpd[0,0], conthpd[2,0] )
p0[:, 0] = p0[:, 0] * (conthpd[2,0] - conthpd[0,0]) + conthpd[0,0]
p0[:, 1] = p0[:, 1] * (conthpd[2,1] - conthpd[0,1]) + conthpd[0,1]
# old way, just use 0.5 as the 1\sigma width.
# p0[:, 0] += conthpd[1,0]-0.5
# p0[:, 1] += conthpd[1,1]-0.5
p0[:, 2] = p0[:,2]*(laglimit[0][1]-laglimit[0][0]) + laglimit[0][0]
p0[:, 3] = p0[:,3]*(widlimit[0][1]-widlimit[0][0]) + widlimit[0][0]
p0[:, 5] = p0[:,5]*(laglimit[1][1]-laglimit[1][0]) + laglimit[1][0]
p0[:, 6] = p0[:,6]*(widlimit[1][1]-widlimit[1][0]) + widlimit[1][0]
if set_verbose:
print("start burn-in")
if conthpd is None:
print("no priors on sigma and tau")
else:
print("using priors on sigma and tau from continuum fitting")
print((np.exp(conthpd)))
if lagtobaseline < 1.0:
print(("penalize lags longer than %3.2f of the baseline" %
lagtobaseline))
else:
print("no penalizing long lags, restrict to < baseline")
print(("nburn: %d nwalkers: %d --> number of burn-in iterations: %d"
% (nburn, nwalkers, nburn*nwalkers)))
# initialize the ensemble sampler
sampler = EnsembleSampler(nwalkers, self.ndim, lnpostfn_doublephoto_p,
args=(self.zydata, conthpd, set_extraprior,
lagtobaseline, laglimit, widtobaseline,
widlimit, set_threading, blocksize,
False, False, fixed, p_fix), threads=threads)
pos, prob, state = sampler.run_mcmc(p0, nburn)
if set_verbose:
print("burn-in finished")
if fburn is not None:
if set_verbose:
print(("save burn-in chains to %s" % fburn))
if fixed is not None:
# modify flatchain
for i in range(self.ndim):
if fixed[i] == 0:
sampler.flatchain[:, i] = p_fix[i]
np.savetxt(fburn, sampler.flatchain)
# reset the sampler
sampler.reset()
if set_verbose:
print("start sampling")
sampler.run_mcmc(pos, nchain, rstate0=state)
if set_verbose:
print("sampling finished")
af = sampler.acceptance_fraction
if set_verbose:
print("acceptance fractions are")
print((" ".join([format(r, "3.2f") for r in af])))
if fixed is not None:
# modify flatchain
for i in range(self.ndim):
if fixed[i] == 0:
sampler.flatchain[:, i] = p_fix[i]
if fchain is not None:
if set_verbose:
print(("save MCMC chains to %s" % fchain))
np.savetxt(fchain, sampler.flatchain)
if flogp is not None:
if set_verbose:
print(("save logp of MCMC chains to %s" % flogp))
np.savetxt(flogp, np.ravel(sampler.lnprobability), fmt='%16.8f')
# make chain an attritue
self.flatchain = sampler.flatchain
self.flatchain_whole = np.copy(self.flatchain)
# get HPD
self.get_hpd(set_verbose=set_verbose)
self.logp = np.ravel(sampler.lnprobability)
self.logp_whole = np.copy(self.logp)
self.get_bfp()
def get_hpd(self, set_verbose=True):
""" Get the 68% percentile range of each parameter to self.hpd.
Parameters
----------
set_verbose: bool, optional
True if you want verbosity (default: True).
"""
hpd = _get_hpd(self.ndim, self.flatchain)
for i in range(self.ndim):
if set_verbose:
print(("HPD of %s" % self.vars[i]))
if i < 2:
print(("low: %8.3f med %8.3f hig %8.3f" %
tuple(np.exp(hpd[:,i]))))
else:
print(("low: %8.3f med %8.3f hig %8.3f" %
tuple(hpd[:,i])))
# register hpd to attr
self.hpd = hpd
def get_bfp(self):
self.bfp = _get_bfp(self.flatchain, self.logp)
def show_hist(self, bins=100, lagbinsize=1.0, figout=None, figext=None):
""" Display histograms of the posterior distributions.
Parameters
----------
bins: integer, optional
Number of bins for parameters except for 'lag' (default:100).
lagbinsize: integer, optional
bin width for 'lag' (default:100).
figout: str, optional
Output figure name (default: None, i.e., using sequencial integers).
figext: str, optional
Output figure extension (default: None, i.e., using `show`).
"""
if not hasattr(self, "flatchain"):
print("Warning: need to run do_mcmc or load_chain first")
return(1)
_nlc = self.nlc + 1
ln10 = np.log(10.0)
fig = plt.figure(figsize=(14, 2.8*_nlc))
for i in range(2):
ax = fig.add_subplot(_nlc,3,i+1)
ax.hist(self.flatchain[:,i]/ln10, bins)
ax.set_xlabel(self.texs[i])
ax.set_ylabel("N")
# line
for i in range(2, 8):
ax = fig.add_subplot(_nlc,3,i+1+1)
if np.mod(i, 3) == 2:
# lag plots
lagbins = np.arange(int(np.min(self.flatchain[:,i])),
int(np.max(self.flatchain[:,i]))+lagbinsize,
lagbinsize)
ax.hist(self.flatchain[:,i], bins=lagbins)
else:
ax.hist(self.flatchain[:,i], bins)
ax.set_xlabel(self.texs[i])
ax.set_ylabel("N")
# plt.get_current_fig_manager().toolbar.zoom()
return(figure_handler(fig=fig, figout=figout, figext=figext))
def break_chain(self, llag_segments):
""" Break the chain.
Parameters
----------
llag_segments: list of lists
list of length 2, with each element a two-element array
bracketing the range of lags (usually the single most probable peak)
you want to consider for each line.
"""
if (len(llag_segments) != 2):
print(("Error: llag_segments has to be a list of length %d" % 2))
return(1)
if not hasattr(self, "flatchain"):
print("Warning: need to run do_mcmc or load_chain first")
return(1)
for i in range(2):
llag_seq = llag_segments[i]
if llag_seq is None:
print("Warning: no rule to break chains with")
else:
indx = np.argsort(self.flatchain[:, 2+i*3])
imin, imax = np.searchsorted(self.flatchain[indx, 2+i*3], llag_seq)
indx_cut = indx[imin: imax]
self.flatchain = self.flatchain[indx_cut,:]
if hasattr(self, "logp"):
self.logp = self.logp[indx_cut]
def restore_chain(self):
""" Restore chain after `break_chain`.
"""
self.flatchain = np.copy(self.flatchain_whole)
if hasattr(self, "logp"):
self.logp = np.copy(self.logp_whole)
def load_chain(self, fchain, flogp=None, set_verbose=True):
""" Load stored MCMC chain.
Parameters
----------
fchain: string
Name for the chain file.
set_verbose: bool, optional
True if you want verbosity (default: True).
"""
if set_verbose:
print(("load MCMC chain from %s" % fchain))
self.flatchain = np.genfromtxt(fchain)
self.flatchain_whole = np.copy(self.flatchain)
self.ndim = self.flatchain.shape[1]
# get HPD
self.get_hpd(set_verbose=set_verbose)
if flogp is not None:
self.logp = np.genfromtxt(flogp)
self.logp_whole = np.copy(self.logp)
self.get_bfp()
def do_pred(self, p_bst=None, fpred=None, dense=10, set_overwrite=True,
set_decompose=False):
""" Calculate the predicted mean and variance of each light curve on a
densely sampled time axis.
Parameters
----------
p_bst: array_like
Input paraemeters.
fpred: string, optional
Name of the output file for the predicted light curves, set it to
None if you do not want output (default: None).
dense: int, optional
The factor by which the predicted light curves should be more
densely sampled than the original data (default: 10).
set_overwrite: bool, optional
True if you want to overwrite existing fpred (default: True).
Returns
-------
zydata_pred: LightCurve object
Predicted light curves packaged as a LightCurve object.
"""
if p_bst is None and hasattr(self, "bfp"):
p_bst = self.bfp
qlist = lnpostfn_doublephoto_p(p_bst, self.zydata, set_retq=True, set_verbose=False)[4]
sigma, tau, lags, wids, scales = unpackdphotopar(p_bst, self.zydata.nlc,
hascontlag=True)
# update qlist
self.zydata.update_qlist(qlist)
# initialize PredictDPmap object
P = PredictDPmap(zydata=self.zydata, sigma=sigma, tau=tau, lags=lags,
wids=wids, scales=scales)
nwant = dense*self.cont_npt
jwant0 = self.jstart - 0.1*self.rj
jwant1 = self.jend + 0.1*self.rj
jwant = np.linspace(jwant0, jwant1, nwant)
zylclist_pred = []
for i in range(self.nlc):
iwant = np.ones(nwant)*(i+1)
mve, var = P.mve_var(jwant, iwant)
sig = np.sqrt(var)
zylclist_pred.append([jwant, mve, sig])
if set_decompose:
mve_band = (zylclist_pred[0][1] - self.zydata.blist[0])*scales[-1]
mve_line = (zylclist_pred[1][1] - self.zydata.blist[1])-mve_band
mve_nonv = jwant * 0.0 + self.zydata.blist[1]
zydata_pred = LightCurve(zylclist_pred)
if fpred is not None:
zydata_pred.save(fpred, set_overwrite=set_overwrite)
if set_decompose:
return(zydata_pred, [jwant, mve_band, mve_line, mve_nonv])
else:
return(zydata_pred)
|
nye17REPO_NAMEjavelinPATH_START.@javelin_extracted@javelin-master@javelin@lcmodel.py@.PATH_END.py
|
{
"filename": "base.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/langchain_community/graph_vectorstores/base.py",
"type": "Python"
}
|
from __future__ import annotations
import logging
from abc import abstractmethod
from collections.abc import AsyncIterable, Collection, Iterable, Iterator
from typing import (
Any,
ClassVar,
Optional,
Sequence,
cast,
)
from langchain_core._api import beta
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.load import Serializable
from langchain_core.runnables import run_in_executor
from langchain_core.vectorstores import VectorStore, VectorStoreRetriever
from pydantic import Field
from langchain_community.graph_vectorstores.links import METADATA_LINKS_KEY, Link
logger = logging.getLogger(__name__)
def _has_next(iterator: Iterator) -> bool:
"""Checks if the iterator has more elements.
Warning: consumes an element from the iterator"""
sentinel = object()
return next(iterator, sentinel) is not sentinel
@beta()
class Node(Serializable):
"""Node in the GraphVectorStore.
Edges exist from nodes with an outgoing link to nodes with a matching incoming link.
For instance two nodes `a` and `b` connected over a hyperlink ``https://some-url``
would look like:
.. code-block:: python
[
Node(
id="a",
text="some text a",
links= [
Link(kind="hyperlink", tag="https://some-url", direction="incoming")
],
),
Node(
id="b",
text="some text b",
links= [
Link(kind="hyperlink", tag="https://some-url", direction="outgoing")
],
)
]
"""
id: Optional[str] = None
"""Unique ID for the node. Will be generated by the GraphVectorStore if not set."""
text: str
"""Text contained by the node."""
metadata: dict = Field(default_factory=dict)
"""Metadata for the node."""
links: list[Link] = Field(default_factory=list)
"""Links associated with the node."""
def _texts_to_nodes(
texts: Iterable[str],
metadatas: Optional[Iterable[dict]],
ids: Optional[Iterable[str]],
) -> Iterator[Node]:
metadatas_it = iter(metadatas) if metadatas else None
ids_it = iter(ids) if ids else None
for text in texts:
try:
_metadata = next(metadatas_it).copy() if metadatas_it else {}
except StopIteration as e:
raise ValueError("texts iterable longer than metadatas") from e
try:
_id = next(ids_it) if ids_it else None
except StopIteration as e:
raise ValueError("texts iterable longer than ids") from e
links = _metadata.pop(METADATA_LINKS_KEY, [])
if not isinstance(links, list):
links = list(links)
yield Node(
id=_id,
metadata=_metadata,
text=text,
links=links,
)
if ids_it and _has_next(ids_it):
raise ValueError("ids iterable longer than texts")
if metadatas_it and _has_next(metadatas_it):
raise ValueError("metadatas iterable longer than texts")
def _documents_to_nodes(documents: Iterable[Document]) -> Iterator[Node]:
for doc in documents:
metadata = doc.metadata.copy()
links = metadata.pop(METADATA_LINKS_KEY, [])
if not isinstance(links, list):
links = list(links)
yield Node(
id=doc.id,
metadata=metadata,
text=doc.page_content,
links=links,
)
@beta()
def nodes_to_documents(nodes: Iterable[Node]) -> Iterator[Document]:
"""Convert nodes to documents.
Args:
nodes: The nodes to convert to documents.
Returns:
The documents generated from the nodes.
"""
for node in nodes:
metadata = node.metadata.copy()
metadata[METADATA_LINKS_KEY] = [
# Convert the core `Link` (from the node) back to the local `Link`.
Link(kind=link.kind, direction=link.direction, tag=link.tag)
for link in node.links
]
yield Document(
id=node.id,
page_content=node.text,
metadata=metadata,
)
@beta(message="Added in version 0.3.1 of langchain_community. API subject to change.")
class GraphVectorStore(VectorStore):
"""A hybrid vector-and-graph graph store.
Document chunks support vector-similarity search as well as edges linking
chunks based on structural and semantic properties.
.. versionadded:: 0.3.1
"""
@abstractmethod
def add_nodes(
self,
nodes: Iterable[Node],
**kwargs: Any,
) -> Iterable[str]:
"""Add nodes to the graph store.
Args:
nodes: the nodes to add.
**kwargs: Additional keyword arguments.
"""
async def aadd_nodes(
self,
nodes: Iterable[Node],
**kwargs: Any,
) -> AsyncIterable[str]:
"""Add nodes to the graph store.
Args:
nodes: the nodes to add.
**kwargs: Additional keyword arguments.
"""
iterator = iter(await run_in_executor(None, self.add_nodes, nodes, **kwargs))
done = object()
while True:
doc = await run_in_executor(None, next, iterator, done)
if doc is done:
break
yield doc # type: ignore[misc]
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[Iterable[dict]] = None,
*,
ids: Optional[Iterable[str]] = None,
**kwargs: Any,
) -> list[str]:
"""Run more texts through the embeddings and add to the vector store.
The Links present in the metadata field `links` will be extracted to create
the `Node` links.
Eg if nodes `a` and `b` are connected over a hyperlink `https://some-url`, the
function call would look like:
.. code-block:: python
store.add_texts(
ids=["a", "b"],
texts=["some text a", "some text b"],
metadatas=[
{
"links": [
Link.incoming(kind="hyperlink", tag="https://some-url")
]
},
{
"links": [
Link.outgoing(kind="hyperlink", tag="https://some-url")
]
},
],
)
Args:
texts: Iterable of strings to add to the vector store.
metadatas: Optional list of metadatas associated with the texts.
The metadata key `links` shall be an iterable of
:py:class:`~langchain_community.graph_vectorstores.links.Link`.
ids: Optional list of IDs associated with the texts.
**kwargs: vector store specific parameters.
Returns:
List of ids from adding the texts into the vector store.
"""
nodes = _texts_to_nodes(texts, metadatas, ids)
return list(self.add_nodes(nodes, **kwargs))
async def aadd_texts(
self,
texts: Iterable[str],
metadatas: Optional[Iterable[dict]] = None,
*,
ids: Optional[Iterable[str]] = None,
**kwargs: Any,
) -> list[str]:
"""Run more texts through the embeddings and add to the vector store.
The Links present in the metadata field `links` will be extracted to create
the `Node` links.
Eg if nodes `a` and `b` are connected over a hyperlink `https://some-url`, the
function call would look like:
.. code-block:: python
await store.aadd_texts(
ids=["a", "b"],
texts=["some text a", "some text b"],
metadatas=[
{
"links": [
Link.incoming(kind="hyperlink", tag="https://some-url")
]
},
{
"links": [
Link.outgoing(kind="hyperlink", tag="https://some-url")
]
},
],
)
Args:
texts: Iterable of strings to add to the vector store.
metadatas: Optional list of metadatas associated with the texts.
The metadata key `links` shall be an iterable of
:py:class:`~langchain_community.graph_vectorstores.links.Link`.
ids: Optional list of IDs associated with the texts.
**kwargs: vector store specific parameters.
Returns:
List of ids from adding the texts into the vector store.
"""
nodes = _texts_to_nodes(texts, metadatas, ids)
return [_id async for _id in self.aadd_nodes(nodes, **kwargs)]
def add_documents(
self,
documents: Iterable[Document],
**kwargs: Any,
) -> list[str]:
"""Run more documents through the embeddings and add to the vector store.
The Links present in the document metadata field `links` will be extracted to
create the `Node` links.
Eg if nodes `a` and `b` are connected over a hyperlink `https://some-url`, the
function call would look like:
.. code-block:: python
store.add_documents(
[
Document(
id="a",
page_content="some text a",
metadata={
"links": [
Link.incoming(kind="hyperlink", tag="http://some-url")
]
}
),
Document(
id="b",
page_content="some text b",
metadata={
"links": [
Link.outgoing(kind="hyperlink", tag="http://some-url")
]
}
),
]
)
Args:
documents: Documents to add to the vector store.
The document's metadata key `links` shall be an iterable of
:py:class:`~langchain_community.graph_vectorstores.links.Link`.
Returns:
List of IDs of the added texts.
"""
nodes = _documents_to_nodes(documents)
return list(self.add_nodes(nodes, **kwargs))
async def aadd_documents(
self,
documents: Iterable[Document],
**kwargs: Any,
) -> list[str]:
"""Run more documents through the embeddings and add to the vector store.
The Links present in the document metadata field `links` will be extracted to
create the `Node` links.
Eg if nodes `a` and `b` are connected over a hyperlink `https://some-url`, the
function call would look like:
.. code-block:: python
store.add_documents(
[
Document(
id="a",
page_content="some text a",
metadata={
"links": [
Link.incoming(kind="hyperlink", tag="http://some-url")
]
}
),
Document(
id="b",
page_content="some text b",
metadata={
"links": [
Link.outgoing(kind="hyperlink", tag="http://some-url")
]
}
),
]
)
Args:
documents: Documents to add to the vector store.
The document's metadata key `links` shall be an iterable of
:py:class:`~langchain_community.graph_vectorstores.links.Link`.
Returns:
List of IDs of the added texts.
"""
nodes = _documents_to_nodes(documents)
return [_id async for _id in self.aadd_nodes(nodes, **kwargs)]
@abstractmethod
def traversal_search(
self,
query: str,
*,
k: int = 4,
depth: int = 1,
filter: dict[str, Any] | None = None, # noqa: A002
**kwargs: Any,
) -> Iterable[Document]:
"""Retrieve documents from traversing this graph store.
First, `k` nodes are retrieved using a search for each `query` string.
Then, additional nodes are discovered up to the given `depth` from those
starting nodes.
Args:
query: The query string.
k: The number of Documents to return from the initial search.
Defaults to 4. Applies to each of the query strings.
depth: The maximum depth of edges to traverse. Defaults to 1.
filter: Optional metadata to filter the results.
**kwargs: Additional keyword arguments.
Returns:
Collection of retrieved documents.
"""
async def atraversal_search(
self,
query: str,
*,
k: int = 4,
depth: int = 1,
filter: dict[str, Any] | None = None, # noqa: A002
**kwargs: Any,
) -> AsyncIterable[Document]:
"""Retrieve documents from traversing this graph store.
First, `k` nodes are retrieved using a search for each `query` string.
Then, additional nodes are discovered up to the given `depth` from those
starting nodes.
Args:
query: The query string.
k: The number of Documents to return from the initial search.
Defaults to 4. Applies to each of the query strings.
depth: The maximum depth of edges to traverse. Defaults to 1.
filter: Optional metadata to filter the results.
**kwargs: Additional keyword arguments.
Returns:
Collection of retrieved documents.
"""
iterator = iter(
await run_in_executor(
None,
self.traversal_search,
query,
k=k,
depth=depth,
filter=filter,
**kwargs,
)
)
done = object()
while True:
doc = await run_in_executor(None, next, iterator, done)
if doc is done:
break
yield doc # type: ignore[misc]
@abstractmethod
def mmr_traversal_search(
self,
query: str,
*,
initial_roots: Sequence[str] = (),
k: int = 4,
depth: int = 2,
fetch_k: int = 100,
adjacent_k: int = 10,
lambda_mult: float = 0.5,
score_threshold: float = float("-inf"),
filter: dict[str, Any] | None = None, # noqa: A002
**kwargs: Any,
) -> Iterable[Document]:
"""Retrieve documents from this graph store using MMR-traversal.
This strategy first retrieves the top `fetch_k` results by similarity to
the question. It then selects the top `k` results based on
maximum-marginal relevance using the given `lambda_mult`.
At each step, it considers the (remaining) documents from `fetch_k` as
well as any documents connected by edges to a selected document
retrieved based on similarity (a "root").
Args:
query: The query string to search for.
initial_roots: Optional list of document IDs to use for initializing search.
The top `adjacent_k` nodes adjacent to each initial root will be
included in the set of initial candidates. To fetch only in the
neighborhood of these nodes, set `fetch_k = 0`.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch via similarity.
Defaults to 100.
adjacent_k: Number of adjacent Documents to fetch.
Defaults to 10.
depth: Maximum depth of a node (number of edges) from a node
retrieved via similarity. Defaults to 2.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding to maximum
diversity and 1 to minimum diversity. Defaults to 0.5.
score_threshold: Only documents with a score greater than or equal
this threshold will be chosen. Defaults to negative infinity.
filter: Optional metadata to filter the results.
**kwargs: Additional keyword arguments.
"""
async def ammr_traversal_search(
self,
query: str,
*,
initial_roots: Sequence[str] = (),
k: int = 4,
depth: int = 2,
fetch_k: int = 100,
adjacent_k: int = 10,
lambda_mult: float = 0.5,
score_threshold: float = float("-inf"),
filter: dict[str, Any] | None = None, # noqa: A002
**kwargs: Any,
) -> AsyncIterable[Document]:
"""Retrieve documents from this graph store using MMR-traversal.
This strategy first retrieves the top `fetch_k` results by similarity to
the question. It then selects the top `k` results based on
maximum-marginal relevance using the given `lambda_mult`.
At each step, it considers the (remaining) documents from `fetch_k` as
well as any documents connected by edges to a selected document
retrieved based on similarity (a "root").
Args:
query: The query string to search for.
initial_roots: Optional list of document IDs to use for initializing search.
The top `adjacent_k` nodes adjacent to each initial root will be
included in the set of initial candidates. To fetch only in the
neighborhood of these nodes, set `fetch_k = 0`.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch via similarity.
Defaults to 100.
adjacent_k: Number of adjacent Documents to fetch.
Defaults to 10.
depth: Maximum depth of a node (number of edges) from a node
retrieved via similarity. Defaults to 2.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding to maximum
diversity and 1 to minimum diversity. Defaults to 0.5.
score_threshold: Only documents with a score greater than or equal
this threshold will be chosen. Defaults to negative infinity.
filter: Optional metadata to filter the results.
**kwargs: Additional keyword arguments.
"""
iterator = iter(
await run_in_executor(
None,
self.mmr_traversal_search,
query,
initial_roots=initial_roots,
k=k,
fetch_k=fetch_k,
adjacent_k=adjacent_k,
depth=depth,
lambda_mult=lambda_mult,
score_threshold=score_threshold,
filter=filter,
**kwargs,
)
)
done = object()
while True:
doc = await run_in_executor(None, next, iterator, done)
if doc is done:
break
yield doc # type: ignore[misc]
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> list[Document]:
return list(self.traversal_search(query, k=k, depth=0))
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> list[Document]:
if kwargs.get("depth", 0) > 0:
logger.warning(
"'mmr' search started with depth > 0. "
"Maybe you meant to do a 'mmr_traversal' search?"
)
return list(
self.mmr_traversal_search(
query, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, depth=0
)
)
async def asimilarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> list[Document]:
return [doc async for doc in self.atraversal_search(query, k=k, depth=0)]
def search(self, query: str, search_type: str, **kwargs: Any) -> list[Document]:
if search_type == "similarity":
return self.similarity_search(query, **kwargs)
elif search_type == "similarity_score_threshold":
docs_and_similarities = self.similarity_search_with_relevance_scores(
query, **kwargs
)
return [doc for doc, _ in docs_and_similarities]
elif search_type == "mmr":
return self.max_marginal_relevance_search(query, **kwargs)
elif search_type == "traversal":
return list(self.traversal_search(query, **kwargs))
elif search_type == "mmr_traversal":
return list(self.mmr_traversal_search(query, **kwargs))
else:
raise ValueError(
f"search_type of {search_type} not allowed. Expected "
"search_type to be 'similarity', 'similarity_score_threshold', "
"'mmr', 'traversal', or 'mmr_traversal'."
)
async def asearch(
self, query: str, search_type: str, **kwargs: Any
) -> list[Document]:
if search_type == "similarity":
return await self.asimilarity_search(query, **kwargs)
elif search_type == "similarity_score_threshold":
docs_and_similarities = await self.asimilarity_search_with_relevance_scores(
query, **kwargs
)
return [doc for doc, _ in docs_and_similarities]
elif search_type == "mmr":
return await self.amax_marginal_relevance_search(query, **kwargs)
elif search_type == "traversal":
return [doc async for doc in self.atraversal_search(query, **kwargs)]
elif search_type == "mmr_traversal":
return [doc async for doc in self.ammr_traversal_search(query, **kwargs)]
else:
raise ValueError(
f"search_type of {search_type} not allowed. Expected "
"search_type to be 'similarity', 'similarity_score_threshold', "
"'mmr', 'traversal', or 'mmr_traversal'."
)
def as_retriever(self, **kwargs: Any) -> GraphVectorStoreRetriever:
"""Return GraphVectorStoreRetriever initialized from this GraphVectorStore.
Args:
**kwargs: Keyword arguments to pass to the search function.
Can include:
- search_type (Optional[str]): Defines the type of search that
the Retriever should perform.
Can be ``traversal`` (default), ``similarity``, ``mmr``,
``mmr_traversal``, or ``similarity_score_threshold``.
- search_kwargs (Optional[Dict]): Keyword arguments to pass to the
search function. Can include things like:
- k(int): Amount of documents to return (Default: 4).
- depth(int): The maximum depth of edges to traverse (Default: 1).
Only applies to search_type: ``traversal`` and ``mmr_traversal``.
- score_threshold(float): Minimum relevance threshold
for similarity_score_threshold.
- fetch_k(int): Amount of documents to pass to MMR algorithm
(Default: 20).
- lambda_mult(float): Diversity of results returned by MMR;
1 for minimum diversity and 0 for maximum. (Default: 0.5).
Returns:
Retriever for this GraphVectorStore.
Examples:
.. code-block:: python
# Retrieve documents traversing edges
docsearch.as_retriever(
search_type="traversal",
search_kwargs={'k': 6, 'depth': 2}
)
# Retrieve documents with higher diversity
# Useful if your dataset has many similar documents
docsearch.as_retriever(
search_type="mmr_traversal",
search_kwargs={'k': 6, 'lambda_mult': 0.25, 'depth': 2}
)
# Fetch more documents for the MMR algorithm to consider
# But only return the top 5
docsearch.as_retriever(
search_type="mmr_traversal",
search_kwargs={'k': 5, 'fetch_k': 50, 'depth': 2}
)
# Only retrieve documents that have a relevance score
# Above a certain threshold
docsearch.as_retriever(
search_type="similarity_score_threshold",
search_kwargs={'score_threshold': 0.8}
)
# Only get the single most similar document from the dataset
docsearch.as_retriever(search_kwargs={'k': 1})
"""
return GraphVectorStoreRetriever(vectorstore=self, **kwargs)
@beta(message="Added in version 0.3.1 of langchain_community. API subject to change.")
class GraphVectorStoreRetriever(VectorStoreRetriever):
"""Retriever for GraphVectorStore.
A graph vector store retriever is a retriever that uses a graph vector store to
retrieve documents.
It is similar to a vector store retriever, except that it uses both vector
similarity and graph connections to retrieve documents.
It uses the search methods implemented by a graph vector store, like traversal
search and MMR traversal search, to query the texts in the graph vector store.
Example::
store = CassandraGraphVectorStore(...)
retriever = store.as_retriever()
retriever.invoke("What is ...")
.. seealso::
:mod:`How to use a graph vector store <langchain_community.graph_vectorstores>`
How to use a graph vector store as a retriever
==============================================
Creating a retriever from a graph vector store
----------------------------------------------
You can build a retriever from a graph vector store using its
:meth:`~langchain_community.graph_vectorstores.base.GraphVectorStore.as_retriever`
method.
First we instantiate a graph vector store.
We will use a store backed by Cassandra
:class:`~langchain_community.graph_vectorstores.cassandra.CassandraGraphVectorStore`
graph vector store::
from langchain_community.document_loaders import TextLoader
from langchain_community.graph_vectorstores import CassandraGraphVectorStore
from langchain_community.graph_vectorstores.extractors import (
KeybertLinkExtractor,
LinkExtractorTransformer,
)
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
pipeline = LinkExtractorTransformer([KeybertLinkExtractor()])
pipeline.transform_documents(texts)
embeddings = OpenAIEmbeddings()
graph_vectorstore = CassandraGraphVectorStore.from_documents(texts, embeddings)
We can then instantiate a retriever::
retriever = graph_vectorstore.as_retriever()
This creates a retriever (specifically a ``GraphVectorStoreRetriever``), which we
can use in the usual way::
docs = retriever.invoke("what did the president say about ketanji brown jackson?")
Maximum marginal relevance traversal retrieval
----------------------------------------------
By default, the graph vector store retriever uses similarity search, then expands
the retrieved set by following a fixed number of graph edges.
If the underlying graph vector store supports maximum marginal relevance traversal,
you can specify that as the search type.
MMR-traversal is a retrieval method combining MMR and graph traversal.
The strategy first retrieves the top fetch_k results by similarity to the question.
It then iteratively expands the set of fetched documents by following adjacent_k
graph edges and selects the top k results based on maximum-marginal relevance using
the given ``lambda_mult``::
retriever = graph_vectorstore.as_retriever(search_type="mmr_traversal")
Passing search parameters
-------------------------
We can pass parameters to the underlying graph vector store's search methods using
``search_kwargs``.
Specifying graph traversal depth
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
For example, we can set the graph traversal depth to only return documents
reachable through a given number of graph edges::
retriever = graph_vectorstore.as_retriever(search_kwargs={"depth": 3})
Specifying MMR parameters
^^^^^^^^^^^^^^^^^^^^^^^^^
When using search type ``mmr_traversal``, several parameters of the MMR algorithm
can be configured.
The ``fetch_k`` parameter determines how many documents are fetched using vector
similarity and ``adjacent_k`` parameter determines how many documents are fetched
using graph edges.
The ``lambda_mult`` parameter controls how the MMR re-ranking weights similarity to
the query string vs diversity among the retrieved documents as fetched documents
are selected for the set of ``k`` final results::
retriever = graph_vectorstore.as_retriever(
search_type="mmr",
search_kwargs={"fetch_k": 20, "adjacent_k": 20, "lambda_mult": 0.25},
)
Specifying top k
^^^^^^^^^^^^^^^^
We can also limit the number of documents ``k`` returned by the retriever.
Note that if ``depth`` is greater than zero, the retriever may return more documents
than is specified by ``k``, since both the original ``k`` documents retrieved using
vector similarity and any documents connected via graph edges will be returned::
retriever = graph_vectorstore.as_retriever(search_kwargs={"k": 1})
Similarity score threshold retrieval
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
For example, we can set a similarity score threshold and only return documents with
a score above that threshold::
retriever = graph_vectorstore.as_retriever(search_kwargs={"score_threshold": 0.5})
""" # noqa: E501
vectorstore: VectorStore
"""VectorStore to use for retrieval."""
search_type: str = "traversal"
"""Type of search to perform. Defaults to "traversal"."""
allowed_search_types: ClassVar[Collection[str]] = (
"similarity",
"similarity_score_threshold",
"mmr",
"traversal",
"mmr_traversal",
)
@property
def graph_vectorstore(self) -> GraphVectorStore:
return cast(GraphVectorStore, self.vectorstore)
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> list[Document]:
if self.search_type == "traversal":
return list(
self.graph_vectorstore.traversal_search(query, **self.search_kwargs)
)
elif self.search_type == "mmr_traversal":
return list(
self.graph_vectorstore.mmr_traversal_search(query, **self.search_kwargs)
)
else:
return super()._get_relevant_documents(query, run_manager=run_manager)
async def _aget_relevant_documents(
self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun
) -> list[Document]:
if self.search_type == "traversal":
return [
doc
async for doc in self.graph_vectorstore.atraversal_search(
query, **self.search_kwargs
)
]
elif self.search_type == "mmr_traversal":
return [
doc
async for doc in self.graph_vectorstore.ammr_traversal_search(
query, **self.search_kwargs
)
]
else:
return await super()._aget_relevant_documents(
query, run_manager=run_manager
)
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@langchain_community@graph_vectorstores@base.py@.PATH_END.py
|
{
"filename": "tfsa-2021-111.md",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/security/advisory/tfsa-2021-111.md",
"type": "Markdown"
}
|
## TFSA-2021-111: Null pointer dereference in `CompressElement`
### CVE Number
CVE-2021-37637
### Impact
It is possible to trigger a null pointer dereference in TensorFlow by passing an
invalid input to `tf.raw_ops.CompressElement`:
```python
import tensorflow as tf
tf.raw_ops.CompressElement(components=[[]])
```
The
[implementation](https://github.com/tensorflow/tensorflow/blob/47a06f40411a69c99f381495f490536972152ac0/tensorflow/core/data/compression_utils.cc#L34)
was accessing the size of a buffer obtained from the return of a separate
function call before validating that said buffer is valid.
### Patches
We have patched the issue in GitHub commit
[5dc7f6981fdaf74c8c5be41f393df705841fb7c5](https://github.com/tensorflow/tensorflow/commit/5dc7f6981fdaf74c8c5be41f393df705841fb7c5).
The fix will be included in TensorFlow 2.6.0. We will also cherrypick this
commit on TensorFlow 2.5.1, TensorFlow 2.4.3, and TensorFlow 2.3.4, as these are
also affected and still in supported range.
### For more information
Please consult [our security
guide](https://github.com/tensorflow/tensorflow/blob/master/SECURITY.md) for
more information regarding the security model and how to contact us with issues
and questions.
### Attribution
This vulnerability has been reported by members of the Aivul Team from Qihoo
360. Concurrently, it was resolved in `master` branch as it was also discovered
internally and fixed before the report was handled.
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@security@advisory@tfsa-2021-111.md@.PATH_END.py
|
{
"filename": "_hovertext.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scatterpolargl/_hovertext.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class HovertextValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="hovertext", parent_name="scatterpolargl", **kwargs):
super(HovertextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "style"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scatterpolargl@_hovertext.py@.PATH_END.py
|
{
"filename": "StatsStart.md",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/lite/g3doc/api_docs/python/tflite_support/metadata_schema_py_generated/StatsStart.md",
"type": "Markdown"
}
|
page_type: reference
<link rel="stylesheet" href="/site-assets/css/style.css">
<!-- DO NOT EDIT! Automatically generated file. -->
<div itemscope itemtype="http://developers.google.com/ReferenceObject">
<meta itemprop="name" content="tflite_support.metadata_schema_py_generated.StatsStart" />
<meta itemprop="path" content="Stable" />
</div>
# tflite_support.metadata_schema_py_generated.StatsStart
<!-- Insert buttons and diff -->
<table class="tfo-notebook-buttons tfo-api nocontent" align="left">
<td>
<a target="_blank" href="https://github.com/tensorflow/tflite-support/blob/v0.4.4/tensorflow_lite_support/metadata/metadata_schema_py_generated.py#L1865-L1866">
<img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />
View source on GitHub
</a>
</td>
</table>
<pre class="devsite-click-to-copy prettyprint lang-py tfo-signature-link">
<code>tflite_support.metadata_schema_py_generated.StatsStart(
builder
)
</code></pre>
<!-- Placeholder for "Used in" -->
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@lite@g3doc@api_docs@python@tflite_support@metadata_schema_py_generated@StatsStart.md@.PATH_END.py
|
{
"filename": "test_cpu_features.py",
"repo_name": "numpy/numpy",
"repo_path": "numpy_extracted/numpy-main/numpy/_core/tests/test_cpu_features.py",
"type": "Python"
}
|
import os
import re
import sys
import pathlib
import platform
import subprocess
import pytest
from numpy._core._multiarray_umath import (
__cpu_features__,
__cpu_baseline__,
__cpu_dispatch__,
)
import numpy as np
def assert_features_equal(actual, desired, fname):
__tracebackhide__ = True # Hide traceback for py.test
actual, desired = str(actual), str(desired)
if actual == desired:
return
detected = str(__cpu_features__).replace("'", "")
try:
with open("/proc/cpuinfo") as fd:
cpuinfo = fd.read(2048)
except Exception as err:
cpuinfo = str(err)
try:
import subprocess
auxv = subprocess.check_output(['/bin/true'], env=dict(LD_SHOW_AUXV="1"))
auxv = auxv.decode()
except Exception as err:
auxv = str(err)
import textwrap
error_report = textwrap.indent(
"""
###########################################
### Extra debugging information
###########################################
-------------------------------------------
--- NumPy Detections
-------------------------------------------
%s
-------------------------------------------
--- SYS / CPUINFO
-------------------------------------------
%s....
-------------------------------------------
--- SYS / AUXV
-------------------------------------------
%s
""" % (detected, cpuinfo, auxv), prefix='\r')
raise AssertionError((
"Failure Detection\n"
" NAME: '%s'\n"
" ACTUAL: %s\n"
" DESIRED: %s\n"
"%s"
) % (fname, actual, desired, error_report))
def _text_to_list(txt):
out = txt.strip("][\n").replace("'", "").split(', ')
return None if out[0] == "" else out
class AbstractTest:
features = []
features_groups = {}
features_map = {}
features_flags = set()
def load_flags(self):
# a hook
pass
def test_features(self):
self.load_flags()
for gname, features in self.features_groups.items():
test_features = [self.cpu_have(f) for f in features]
assert_features_equal(__cpu_features__.get(gname), all(test_features), gname)
for feature_name in self.features:
cpu_have = self.cpu_have(feature_name)
npy_have = __cpu_features__.get(feature_name)
assert_features_equal(npy_have, cpu_have, feature_name)
def cpu_have(self, feature_name):
map_names = self.features_map.get(feature_name, feature_name)
if isinstance(map_names, str):
return map_names in self.features_flags
return any(f in self.features_flags for f in map_names)
def load_flags_cpuinfo(self, magic_key):
self.features_flags = self.get_cpuinfo_item(magic_key)
def get_cpuinfo_item(self, magic_key):
values = set()
with open('/proc/cpuinfo') as fd:
for line in fd:
if not line.startswith(magic_key):
continue
flags_value = [s.strip() for s in line.split(':', 1)]
if len(flags_value) == 2:
values = values.union(flags_value[1].upper().split())
return values
def load_flags_auxv(self):
auxv = subprocess.check_output(['/bin/true'], env=dict(LD_SHOW_AUXV="1"))
for at in auxv.split(b'\n'):
if not at.startswith(b"AT_HWCAP"):
continue
hwcap_value = [s.strip() for s in at.split(b':', 1)]
if len(hwcap_value) == 2:
self.features_flags = self.features_flags.union(
hwcap_value[1].upper().decode().split()
)
@pytest.mark.skipif(
sys.platform == 'emscripten',
reason= (
"The subprocess module is not available on WASM platforms and"
" therefore this test class cannot be properly executed."
),
)
class TestEnvPrivation:
cwd = pathlib.Path(__file__).parent.resolve()
env = os.environ.copy()
_enable = os.environ.pop('NPY_ENABLE_CPU_FEATURES', None)
_disable = os.environ.pop('NPY_DISABLE_CPU_FEATURES', None)
SUBPROCESS_ARGS = dict(cwd=cwd, capture_output=True, text=True, check=True)
unavailable_feats = [
feat for feat in __cpu_dispatch__ if not __cpu_features__[feat]
]
UNAVAILABLE_FEAT = (
None if len(unavailable_feats) == 0
else unavailable_feats[0]
)
BASELINE_FEAT = None if len(__cpu_baseline__) == 0 else __cpu_baseline__[0]
SCRIPT = """
def main():
from numpy._core._multiarray_umath import (
__cpu_features__,
__cpu_dispatch__
)
detected = [feat for feat in __cpu_dispatch__ if __cpu_features__[feat]]
print(detected)
if __name__ == "__main__":
main()
"""
@pytest.fixture(autouse=True)
def setup_class(self, tmp_path_factory):
file = tmp_path_factory.mktemp("runtime_test_script")
file /= "_runtime_detect.py"
file.write_text(self.SCRIPT)
self.file = file
return
def _run(self):
return subprocess.run(
[sys.executable, self.file],
env=self.env,
**self.SUBPROCESS_ARGS,
)
# Helper function mimicking pytest.raises for subprocess call
def _expect_error(
self,
msg,
err_type,
no_error_msg="Failed to generate error"
):
try:
self._run()
except subprocess.CalledProcessError as e:
assertion_message = f"Expected: {msg}\nGot: {e.stderr}"
assert re.search(msg, e.stderr), assertion_message
assertion_message = (
f"Expected error of type: {err_type}; see full "
f"error:\n{e.stderr}"
)
assert re.search(err_type, e.stderr), assertion_message
else:
assert False, no_error_msg
def setup_method(self):
"""Ensure that the environment is reset"""
self.env = os.environ.copy()
return
def test_runtime_feature_selection(self):
"""
Ensure that when selecting `NPY_ENABLE_CPU_FEATURES`, only the
features exactly specified are dispatched.
"""
# Capture runtime-enabled features
out = self._run()
non_baseline_features = _text_to_list(out.stdout)
if non_baseline_features is None:
pytest.skip(
"No dispatchable features outside of baseline detected."
)
feature = non_baseline_features[0]
# Capture runtime-enabled features when `NPY_ENABLE_CPU_FEATURES` is
# specified
self.env['NPY_ENABLE_CPU_FEATURES'] = feature
out = self._run()
enabled_features = _text_to_list(out.stdout)
# Ensure that only one feature is enabled, and it is exactly the one
# specified by `NPY_ENABLE_CPU_FEATURES`
assert set(enabled_features) == {feature}
if len(non_baseline_features) < 2:
pytest.skip("Only one non-baseline feature detected.")
# Capture runtime-enabled features when `NPY_ENABLE_CPU_FEATURES` is
# specified
self.env['NPY_ENABLE_CPU_FEATURES'] = ",".join(non_baseline_features)
out = self._run()
enabled_features = _text_to_list(out.stdout)
# Ensure that both features are enabled, and they are exactly the ones
# specified by `NPY_ENABLE_CPU_FEATURES`
assert set(enabled_features) == set(non_baseline_features)
return
@pytest.mark.parametrize("enabled, disabled",
[
("feature", "feature"),
("feature", "same"),
])
def test_both_enable_disable_set(self, enabled, disabled):
"""
Ensure that when both environment variables are set then an
ImportError is thrown
"""
self.env['NPY_ENABLE_CPU_FEATURES'] = enabled
self.env['NPY_DISABLE_CPU_FEATURES'] = disabled
msg = "Both NPY_DISABLE_CPU_FEATURES and NPY_ENABLE_CPU_FEATURES"
err_type = "ImportError"
self._expect_error(msg, err_type)
@pytest.mark.skipif(
not __cpu_dispatch__,
reason=(
"NPY_*_CPU_FEATURES only parsed if "
"`__cpu_dispatch__` is non-empty"
)
)
@pytest.mark.parametrize("action", ["ENABLE", "DISABLE"])
def test_variable_too_long(self, action):
"""
Test that an error is thrown if the environment variables are too long
to be processed. Current limit is 1024, but this may change later.
"""
MAX_VAR_LENGTH = 1024
# Actual length is MAX_VAR_LENGTH + 1 due to null-termination
self.env[f'NPY_{action}_CPU_FEATURES'] = "t" * MAX_VAR_LENGTH
msg = (
f"Length of environment variable 'NPY_{action}_CPU_FEATURES' is "
f"{MAX_VAR_LENGTH + 1}, only {MAX_VAR_LENGTH} accepted"
)
err_type = "RuntimeError"
self._expect_error(msg, err_type)
@pytest.mark.skipif(
not __cpu_dispatch__,
reason=(
"NPY_*_CPU_FEATURES only parsed if "
"`__cpu_dispatch__` is non-empty"
)
)
def test_impossible_feature_disable(self):
"""
Test that a RuntimeError is thrown if an impossible feature-disabling
request is made. This includes disabling a baseline feature.
"""
if self.BASELINE_FEAT is None:
pytest.skip("There are no unavailable features to test with")
bad_feature = self.BASELINE_FEAT
self.env['NPY_DISABLE_CPU_FEATURES'] = bad_feature
msg = (
f"You cannot disable CPU feature '{bad_feature}', since it is "
"part of the baseline optimizations"
)
err_type = "RuntimeError"
self._expect_error(msg, err_type)
def test_impossible_feature_enable(self):
"""
Test that a RuntimeError is thrown if an impossible feature-enabling
request is made. This includes enabling a feature not supported by the
machine, or disabling a baseline optimization.
"""
if self.UNAVAILABLE_FEAT is None:
pytest.skip("There are no unavailable features to test with")
bad_feature = self.UNAVAILABLE_FEAT
self.env['NPY_ENABLE_CPU_FEATURES'] = bad_feature
msg = (
f"You cannot enable CPU features \\({bad_feature}\\), since "
"they are not supported by your machine."
)
err_type = "RuntimeError"
self._expect_error(msg, err_type)
# Ensure that it fails even when providing garbage in addition
feats = f"{bad_feature}, Foobar"
self.env['NPY_ENABLE_CPU_FEATURES'] = feats
msg = (
f"You cannot enable CPU features \\({bad_feature}\\), since they "
"are not supported by your machine."
)
self._expect_error(msg, err_type)
if self.BASELINE_FEAT is not None:
# Ensure that only the bad feature gets reported
feats = f"{bad_feature}, {self.BASELINE_FEAT}"
self.env['NPY_ENABLE_CPU_FEATURES'] = feats
msg = (
f"You cannot enable CPU features \\({bad_feature}\\), since "
"they are not supported by your machine."
)
self._expect_error(msg, err_type)
is_linux = sys.platform.startswith('linux')
is_cygwin = sys.platform.startswith('cygwin')
machine = platform.machine()
is_x86 = re.match("^(amd64|x86|i386|i686)", machine, re.IGNORECASE)
@pytest.mark.skipif(
not (is_linux or is_cygwin) or not is_x86, reason="Only for Linux and x86"
)
class Test_X86_Features(AbstractTest):
features = [
"MMX", "SSE", "SSE2", "SSE3", "SSSE3", "SSE41", "POPCNT", "SSE42",
"AVX", "F16C", "XOP", "FMA4", "FMA3", "AVX2", "AVX512F", "AVX512CD",
"AVX512ER", "AVX512PF", "AVX5124FMAPS", "AVX5124VNNIW", "AVX512VPOPCNTDQ",
"AVX512VL", "AVX512BW", "AVX512DQ", "AVX512VNNI", "AVX512IFMA",
"AVX512VBMI", "AVX512VBMI2", "AVX512BITALG", "AVX512FP16",
]
features_groups = dict(
AVX512_KNL = ["AVX512F", "AVX512CD", "AVX512ER", "AVX512PF"],
AVX512_KNM = ["AVX512F", "AVX512CD", "AVX512ER", "AVX512PF", "AVX5124FMAPS",
"AVX5124VNNIW", "AVX512VPOPCNTDQ"],
AVX512_SKX = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL"],
AVX512_CLX = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512VNNI"],
AVX512_CNL = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512IFMA",
"AVX512VBMI"],
AVX512_ICL = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512IFMA",
"AVX512VBMI", "AVX512VNNI", "AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ"],
AVX512_SPR = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ",
"AVX512VL", "AVX512IFMA", "AVX512VBMI", "AVX512VNNI",
"AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ",
"AVX512FP16"],
)
features_map = dict(
SSE3="PNI", SSE41="SSE4_1", SSE42="SSE4_2", FMA3="FMA",
AVX512VNNI="AVX512_VNNI", AVX512BITALG="AVX512_BITALG", AVX512VBMI2="AVX512_VBMI2",
AVX5124FMAPS="AVX512_4FMAPS", AVX5124VNNIW="AVX512_4VNNIW", AVX512VPOPCNTDQ="AVX512_VPOPCNTDQ",
AVX512FP16="AVX512_FP16",
)
def load_flags(self):
self.load_flags_cpuinfo("flags")
is_power = re.match("^(powerpc|ppc)64", machine, re.IGNORECASE)
@pytest.mark.skipif(not is_linux or not is_power, reason="Only for Linux and Power")
class Test_POWER_Features(AbstractTest):
features = ["VSX", "VSX2", "VSX3", "VSX4"]
features_map = dict(VSX2="ARCH_2_07", VSX3="ARCH_3_00", VSX4="ARCH_3_1")
def load_flags(self):
self.load_flags_auxv()
is_zarch = re.match("^(s390x)", machine, re.IGNORECASE)
@pytest.mark.skipif(not is_linux or not is_zarch,
reason="Only for Linux and IBM Z")
class Test_ZARCH_Features(AbstractTest):
features = ["VX", "VXE", "VXE2"]
def load_flags(self):
self.load_flags_auxv()
is_arm = re.match("^(arm|aarch64)", machine, re.IGNORECASE)
@pytest.mark.skipif(not is_linux or not is_arm, reason="Only for Linux and ARM")
class Test_ARM_Features(AbstractTest):
features = [
"SVE", "NEON", "ASIMD", "FPHP", "ASIMDHP", "ASIMDDP", "ASIMDFHM"
]
features_groups = dict(
NEON_FP16 = ["NEON", "HALF"],
NEON_VFPV4 = ["NEON", "VFPV4"],
)
def load_flags(self):
self.load_flags_cpuinfo("Features")
arch = self.get_cpuinfo_item("CPU architecture")
# in case of mounting virtual filesystem of aarch64 kernel
is_rootfs_v8 = int('0'+next(iter(arch))) > 7 if arch else 0
if re.match("^(aarch64|AARCH64)", machine) or is_rootfs_v8:
self.features_map = dict(
NEON="ASIMD", HALF="ASIMD", VFPV4="ASIMD"
)
else:
self.features_map = dict(
# ELF auxiliary vector and /proc/cpuinfo on Linux kernel(armv8 aarch32)
# doesn't provide information about ASIMD, so we assume that ASIMD is supported
# if the kernel reports any one of the following ARM8 features.
ASIMD=("AES", "SHA1", "SHA2", "PMULL", "CRC32")
)
|
numpyREPO_NAMEnumpyPATH_START.@numpy_extracted@numpy-main@numpy@_core@tests@test_cpu_features.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "GeminiDRSoftware/DRAGONS",
"repo_path": "DRAGONS_extracted/DRAGONS-master/geminidr/gmos/recipes/qa/tests/__init__.py",
"type": "Python"
}
|
GeminiDRSoftwareREPO_NAMEDRAGONSPATH_START.@DRAGONS_extracted@DRAGONS-master@geminidr@gmos@recipes@qa@tests@__init__.py@.PATH_END.py
|
|
{
"filename": "test_grids.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/chart-studio/chart_studio/tests/test_plot_ly/test_api/test_v2/test_grids.py",
"type": "Python"
}
|
from __future__ import absolute_import
import json as _json
from chart_studio.api.v2 import grids
from chart_studio.tests.test_plot_ly.test_api import PlotlyApiTestCase
class GridsTest(PlotlyApiTestCase):
def setUp(self):
super(GridsTest, self).setUp()
# Mock the actual api call, we don't want to do network tests here.
self.request_mock = self.mock("chart_studio.api.v2.utils.requests.request")
self.request_mock.return_value = self.get_response()
# Mock the validation function since we can test that elsewhere.
self.mock("chart_studio.api.v2.utils.validate_response")
def test_create(self):
filename = "a grid"
grids.create({"filename": filename})
assert self.request_mock.call_count == 1
args, kwargs = self.request_mock.call_args
method, url = args
self.assertEqual(method, "post")
self.assertEqual(url, "{}/v2/grids".format(self.plotly_api_domain))
self.assertEqual(kwargs["data"], '{{"filename": "{}"}}'.format(filename))
def test_retrieve(self):
grids.retrieve("hodor:88")
assert self.request_mock.call_count == 1
args, kwargs = self.request_mock.call_args
method, url = args
self.assertEqual(method, "get")
self.assertEqual(url, "{}/v2/grids/hodor:88".format(self.plotly_api_domain))
self.assertEqual(kwargs["params"], {})
def test_retrieve_share_key(self):
grids.retrieve("hodor:88", share_key="foobar")
assert self.request_mock.call_count == 1
args, kwargs = self.request_mock.call_args
method, url = args
self.assertEqual(method, "get")
self.assertEqual(url, "{}/v2/grids/hodor:88".format(self.plotly_api_domain))
self.assertEqual(kwargs["params"], {"share_key": "foobar"})
def test_update(self):
new_filename = "..zzZ ..zzZ"
grids.update("hodor:88", body={"filename": new_filename})
assert self.request_mock.call_count == 1
args, kwargs = self.request_mock.call_args
method, url = args
self.assertEqual(method, "put")
self.assertEqual(url, "{}/v2/grids/hodor:88".format(self.plotly_api_domain))
self.assertEqual(kwargs["data"], '{{"filename": "{}"}}'.format(new_filename))
def test_trash(self):
grids.trash("hodor:88")
assert self.request_mock.call_count == 1
args, kwargs = self.request_mock.call_args
method, url = args
self.assertEqual(method, "post")
self.assertEqual(
url, "{}/v2/grids/hodor:88/trash".format(self.plotly_api_domain)
)
def test_restore(self):
grids.restore("hodor:88")
assert self.request_mock.call_count == 1
args, kwargs = self.request_mock.call_args
method, url = args
self.assertEqual(method, "post")
self.assertEqual(
url, "{}/v2/grids/hodor:88/restore".format(self.plotly_api_domain)
)
def test_permanent_delete(self):
grids.permanent_delete("hodor:88")
assert self.request_mock.call_count == 1
args, kwargs = self.request_mock.call_args
method, url = args
self.assertEqual(method, "delete")
self.assertEqual(
url, "{}/v2/grids/hodor:88/permanent_delete".format(self.plotly_api_domain)
)
def test_lookup(self):
# requests does urlencode, so don't worry about the `' '` character!
path = "/mah grid"
parent = 43
user = "someone"
exists = True
grids.lookup(path=path, parent=parent, user=user, exists=exists)
assert self.request_mock.call_count == 1
args, kwargs = self.request_mock.call_args
method, url = args
expected_params = {
"path": path,
"parent": parent,
"exists": "true",
"user": user,
}
self.assertEqual(method, "get")
self.assertEqual(url, "{}/v2/grids/lookup".format(self.plotly_api_domain))
self.assertEqual(kwargs["params"], expected_params)
def test_col_create(self):
cols = [
{"name": "foo", "data": [1, 2, 3]},
{"name": "bar", "data": ["b", "a", "r"]},
]
body = {"cols": _json.dumps(cols, sort_keys=True)}
grids.col_create("hodor:88", body)
assert self.request_mock.call_count == 1
args, kwargs = self.request_mock.call_args
method, url = args
self.assertEqual(method, "post")
self.assertEqual(url, "{}/v2/grids/hodor:88/col".format(self.plotly_api_domain))
self.assertEqual(kwargs["data"], _json.dumps(body, sort_keys=True))
def test_col_retrieve(self):
grids.col_retrieve("hodor:88", "aaaaaa,bbbbbb")
assert self.request_mock.call_count == 1
args, kwargs = self.request_mock.call_args
method, url = args
self.assertEqual(method, "get")
self.assertEqual(url, "{}/v2/grids/hodor:88/col".format(self.plotly_api_domain))
self.assertEqual(kwargs["params"], {"uid": "aaaaaa,bbbbbb"})
def test_col_update(self):
cols = [
{"name": "foo", "data": [1, 2, 3]},
{"name": "bar", "data": ["b", "a", "r"]},
]
body = {"cols": _json.dumps(cols, sort_keys=True)}
grids.col_update("hodor:88", "aaaaaa,bbbbbb", body)
assert self.request_mock.call_count == 1
args, kwargs = self.request_mock.call_args
method, url = args
self.assertEqual(method, "put")
self.assertEqual(url, "{}/v2/grids/hodor:88/col".format(self.plotly_api_domain))
self.assertEqual(kwargs["params"], {"uid": "aaaaaa,bbbbbb"})
self.assertEqual(kwargs["data"], _json.dumps(body, sort_keys=True))
def test_col_delete(self):
grids.col_delete("hodor:88", "aaaaaa,bbbbbb")
assert self.request_mock.call_count == 1
args, kwargs = self.request_mock.call_args
method, url = args
self.assertEqual(method, "delete")
self.assertEqual(url, "{}/v2/grids/hodor:88/col".format(self.plotly_api_domain))
self.assertEqual(kwargs["params"], {"uid": "aaaaaa,bbbbbb"})
def test_row(self):
body = {"rows": [[1, "A"], [2, "B"]]}
grids.row("hodor:88", body)
assert self.request_mock.call_count == 1
args, kwargs = self.request_mock.call_args
method, url = args
self.assertEqual(method, "post")
self.assertEqual(url, "{}/v2/grids/hodor:88/row".format(self.plotly_api_domain))
self.assertEqual(kwargs["data"], _json.dumps(body, sort_keys=True))
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@chart-studio@chart_studio@tests@test_plot_ly@test_api@test_v2@test_grids.py@.PATH_END.py
|
{
"filename": "_shadow.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/box/hoverlabel/font/_shadow.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShadowValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="shadow", parent_name="box.hoverlabel.font", **kwargs
):
super(ShadowValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@box@hoverlabel@font@_shadow.py@.PATH_END.py
|
{
"filename": "paper.md",
"repo_name": "ejhigson/perfectns",
"repo_path": "perfectns_extracted/perfectns-master/docs/paper/paper.md",
"type": "Markdown"
}
|
---
title: '``perfectns``: perfect dynamic and standard nested sampling for spherically symmetric likelihoods and priors'
tags:
- Python
- nested sampling
- dynamic nested sampling
- Bayesian inference
authors:
- name: Edward Higson
orcid: 0000-0001-8383-4614
affiliation: "1, 2"
affiliations:
- name: Cavendish Astrophysics Group, Cavendish Laboratory, J.J.Thomson Avenue, Cambridge, CB3 0HE, UK
index: 1
- name: Kavli Institute for Cosmology, Madingley Road, Cambridge, CB3 0HA, UK
index : 2
date: 21 September 2018
bibliography: paper.bib
---
# Summary
Nested sampling [@Skilling2006] is a popular Monte Carlo method for computing Bayesian evidences and generating posterior samples given some likelihood and prior.
Both standard nested sampling and its more general variant dynamic nested sampling [@Higson2017b] require sampling randomly from the prior within a hard likelihood constraint.
This is a computationally challenging problem, and typically only be done approximately for practical problems.
Popular methods include rejection sampling, utilized by ``MultiNest`` [@Feroz2008; @Feroz2009; @Feroz2013], and slice sampling, which is used by ``PolyChord`` [@Handley2015a; @Handley2015b] and ``dyPolyChord`` [@Higson2018dypolychord; @Higson2017b].
However all such approximate techniques can lead to additional errors, for example due to correlated samples or missing a mode of a multimodal posterior; for more details see [@Higson2018a].
In order to test the statistical properties of the nested sampling algorithm or check numerical implementations, it is useful to follow the approach introduced by [@Keeton2011] and consider special cases where totally uncorrelated samples can be produced within hard likelihood constrains.
As a result the nested sampling algorithm can be performed perfectly; we term this *perfect nested sampling*.
``perfectns`` performs perfect nested sampling for spherically symmetric likelihoods and priors; its specialised design and ability to produce perfectly uncorrelated samples makes it highly effective for use with spherically symmetric problems.
Furthermore, such problems provide a rich source of test cases for assessing the capacity of other software implementations to perform the nested sampling algorithm accurately, and for use in statistical research into nested sampling.
In fact, Section 3 of [@Higson2017a] shows that any perfect nested sampling calculation can in principle be transformed into a spherically symmetric form compatible with ``perfectns`` while retaining its statistical properties.
Such transformations can be used to generate a wider range of test cases, although it can be mathematically challenging and is not feasible for most practical problems.
``perfectns`` requires ``mpmath`` [@mpmath], ``matplotlib`` [@matplotlib], ``nestcheck`` [@Higson2018nestcheck; @Higson2018a; @Higson2017a], ``pandas`` [@pandas], ``numpy`` [@numpy] and ``scipy`` [@scipy].
``perfectns`` was used in the development of the dynamic nested sampling algorithm [@Higson2017b], and for making many of the numerical tests and plots in the dynamic nested sampling paper.
It was also used in testing ``dyPolyChord`` [@Higson2018dypolychord], and numerical tests and plots in [@Higson2017a] were made using earlier versions of the package.
The source code for ``perfectns`` has been archived to Zenodo [@zenodoperfectns].
# Acknowledgements
I am grateful to Will Handley, Anthony Lasenby and Mike Hobson for their help and advice in the research which lead to the creation of ``perfectns``.
# References
|
ejhigsonREPO_NAMEperfectnsPATH_START.@perfectns_extracted@perfectns-master@docs@paper@paper.md@.PATH_END.py
|
{
"filename": "_ypad.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattercarpet/marker/colorbar/_ypad.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class YpadValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="ypad", parent_name="scattercarpet.marker.colorbar", **kwargs
):
super(YpadValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scattercarpet@marker@colorbar@_ypad.py@.PATH_END.py
|
{
"filename": "acs_basic_fullframe_hstaxe_cookbook.ipynb",
"repo_name": "spacetelescope/hstaxe",
"repo_path": "hstaxe_extracted/hstaxe-main/cookbooks/ACS/WFC_basic_fullframe_extraction/acs_basic_fullframe_hstaxe_cookbook.ipynb",
"type": "Jupyter Notebook"
}
|
## HSTaXe Cookbook: Extraction for ACS/WFC Full Frame G800L Data
This notebook contains a step-by-step guide for performing a basic spectral extraction with HSTaXe for G800L full frame data from ACS/WFC.
## 1. HSTaXe Installation
Follow instructions from https://github.com/spacetelescope/hstaxe to install HSTaXe software
## 2. Cookbook Data
The example data we use in this notebook are available [here](https://stsci.app.box.com/folder/196021174459). Keep this notebook and the 'cookbook_data' folder in the same directory.
## 3. Load a Few Python Modules
```python
%matplotlib inline
import glob
from astropy.io import fits
import numpy as np
import os, shutil
import matplotlib.pyplot as plt
from drizzlepac import astrodrizzle
from hstaxe import axetasks
from stwcs import updatewcs
```
```python
# Check hstaxe version
import hstaxe
hstaxe.__version__
```
We save the current working directory
```python
cwd = os.getcwd()
print("The current directory is %s" % (cwd))
```
## 4. Copying Data from the 'cookbook_data' Folder
We will create a G800L subdirectory to copy all of the G800L files into. This where we will prepare the G800L data.
Creating the directory, removing any existing one
```python
os.chdir(cwd)
if os.path.isdir("G800L"):
shutil.rmtree("G800L")
os.mkdir("G800L")
```
Copying the G800L data (which we grab from our cookbook_data directory)
```python
os.system("cp cookbook_data/G800L/*flc.fits G800L/")
os.system("cp cookbook_data/G800L/G800L.lis G800L/")
```
We move into the G800L directory and check the content of the G800L.lis file **(in this case, we have only 1 image but you could have multiple images)**
```python
os.chdir(cwd)
os.chdir("G800L")
!cat G800L.lis
print()
os.system(f"fitsheader -f -e 0 -k APERTURE -k FILTER1 *fits")
```
We will create a F775W subdirectory to copy all of the F775W files into. This where we will prepare the F775W data.
Creating the directory, removing any existing one
```python
os.chdir(cwd)
if os.path.isdir("F775W"):
shutil.rmtree("F775W")
os.mkdir("F775W")
```
Copy the F775W data (which we grab from our cookbook_data directory)
```python
os.system("cp cookbook_data/F775W/*flc.fits F775W/")
os.system("cp cookbook_data/F775W/F775W.lis F775W/")
```
We move into the F775W directory and check the content of the F775W.lis file
```python
os.chdir(cwd)
os.chdir("F775W")
!cat F775W.lis
print()
os.system(f"fitsheader -f -e 0 -k APERTURE -k FILTER1 *fits")
```
## 5. Check/Verify Matching WCS Information
It is possible that the WCS in the direct and grism images differ. In this section we will use a function to process all the direct and grism images to verify that the WCS information is consistent throughout. If there is any disagreement in WCS information we call `updatewcs` with the database keyword set to False, which will roll back all the solutions to the original distortion-corrected WCS. For more information regarding HST WCS and improved absolute astrometry please see [WFC3 Instrument Science Report 2022-06 (Mack et al. 2022)](https://ui.adsabs.harvard.edu/abs/2022wfc..rept....6M/abstract). For documentations on `updatewcs` please see [here](https://stwcs.readthedocs.io/en/latest/updatewcs.html).
Before running _updatewcs_, we need to [set CRDS environment variables](https://hst-crds.stsci.edu/docs/cmdline_bestrefs/). We will point to a subdirectory called 'crds_cache/' using the JREF environment variable. The JREF variable is used for ACS reference files and different instruments use other variables, e.g., IREF for WFC3. You have the option to permanently add these environment variables to your user profile by adding the path in your shell's configuration file. If you're using bash, you would edit the '~/.bash_profile' file with lines such as:
- export CRDS_PATH="$HOME/crds_cache"
- export CRDS_SERVER_URL="https://hst-crds.stsci.edu"
- export jref="${CRDS_PATH}/references/hst/jref/"
If you have already set up the CRDS environment variables you may skip running the cell below.
```python
os.environ["CRDS_SERVER_URL"] = "https://hst-crds.stsci.edu"
os.environ['CRDS_SERVER'] = 'https://hst-crds.stsci.edu'
if "CRDS_PATH" not in os.environ.keys():
os.environ["CRDS_PATH"] = os.path.join(os.environ["HOME"],"crds_cache")
if "jref" not in os.environ.keys():
os.environ["jref"] = "$HOME/crds_cache/references/hst/jref/"
```
```python
def check_wcs(images):
""" A helper function to verify the active world coordinate solutions match and roll them back if they do not
(written by the WFC3 team -- Aidan Pidgeon, Benjamin Kuhn, Debopam Som)
Parameter
---------
images : list
a list of grism and direct images
Return
------
N/A
"""
direct_wcs = []
grism_wcs = []
for f in images:
# get filter for image to distinguish between direct and grism
filt = fits.getval(f, 'FILTER1')
hdul = fits.open(f)
db_bool = 'WCSCORR' not in hdul
hdul.close()
try:
# get the active solution from the file's "SCI" extension
wcsname = fits.getval(f, 'WCSNAME', ext=('SCI', 1))
if db_bool == True:
updatewcs.updatewcs(f,use_db=db_bool)
except KeyError:
updatewcs.updatewcs(f,use_db=db_bool)
wcsname = fits.getval(f, 'WCSNAME', ext=('SCI', 1))
# seperate between direct and grism
if 'G' in filt:
grism_wcs.append(wcsname)
if 'F' in filt:
direct_wcs.append(wcsname)
# get the number of unique active solutions in the direct and grism images
num_wcs_direct = len(set(direct_wcs))
num_wcs_grism = len(set(grism_wcs))
# roll back WCS on all files if there is more than one active solution for either direct or grism images
if num_wcs_direct > 1 or num_wcs_grism > 1:
[updatewcs.updatewcs(file,use_db=False) for file in images]
print('WCS reset complete')
# roll back WCS on all files if the active solution for the direct images do not match the grism images
elif set(direct_wcs) != set(grism_wcs):
[updatewcs.updatewcs(file,use_db=False) for file in images]
print('WCS reset complete')
# do nothing if there is one unique active solution and they match
elif set(direct_wcs) == set(grism_wcs):
print(f"No WCS update needed. All grism and direct images us WCS: {grism_wcs[0]}.")
```
```python
os.chdir(cwd)
all_images = glob.glob('F775W/*_flc.fits')+\
glob.glob('G800L/*_flc.fits')
# to populate 'crds_cache' folder with all reference files
for image in all_images:
command_line_input = f'crds bestrefs --files {image} --sync-references=1 --update-bestrefs'
os.system(command_line_input)
# or to get only IDCTAB and MDRIZTAB files
#for image in all_images:
# command_line_input = f'crds bestrefs --files {image} --types IDCTAB MDRIZTAB --sync-references=1 --update-bestrefs'
# os.system(command_line_input)
check_wcs(all_images)
```
## 6. Drizzling the Input Data
We now create a G800L mosaic using the G800L data **(in this case, only 1 image is drizzled but you could have multiple images)**
Note, in the cell below we have set the AstroDrizzle parameters for processing only a single image. **If you have more than one direct image to drizzle together please set the parameters appropriately.** For example `driz_separate`, `driz_sep_wcs`, `median, blot`, and `driz_cr` should all be set to True. For more information please see the AstroDrizzle documentation [here](https://drizzlepac.readthedocs.io/en/latest/astrodrizzle.html). Finally, if your input images were FLT images rather than FLC images, change the extension to 'drz.fits' in the cell below.
This mosaic will be used to set up the proper astrometry for each individual FLC (CTE corrected) files. We can only extract G800L spectra from FLC files which have been used to make this mosaic
```python
# Drizzle grism images.
os.chdir(cwd)
os.chdir("G800L")
astrodrizzle.AstroDrizzle("@G800L.lis", output="G800L", build=True, mdriztab=True, in_memory=False,
preserve=False, skysub=False, driz_separate=False, median=False,
blot=False, driz_cr=False, driz_sep_wcs=False,
driz_combine=True, final_wcs=False)
```
We already created a mosaic of all the G800L data for astrometric purposes, and we now create an F775W mosaic using the G800L mosaic as the astrometric reference frame. This will ensure that the G800L and F775W mosaics have pixels with the same RA and DEC.
We create a F775W mosaic using the F775W data and the G800L mosaic as a reference **(in this case, only 1 image is drizzled)**
Note, in the cell below we have set the AstroDrizzle parameters for processing only a single image. **If you have more than one direct image to drizzle together please set the parameters appropriately.** For example `driz_separate`, `driz_sep_wcs`, `median, blot`, and `driz_cr` should all be set to True. For more information please see the AstroDrizzle documentation [here](https://drizzlepac.readthedocs.io/en/latest/astrodrizzle.html). Finally, if your input images were FLT images rather than FLC images, change the extension to 'drz.fits' in the cell below.
```python
# Drizzle direct images.
os.chdir(cwd)
os.chdir("F775W")
ref = "../G800L/G800L_drc.fits[1]"
astrodrizzle.AstroDrizzle("@F775W.lis", output="F775W", build=True, mdriztab=True, in_memory=False,
preserve=False, skysub=True, driz_separate=False, median=False,
blot=False, driz_cr=False, driz_sep_wcs=False,
driz_combine=True, final_wcs=True, driz_sep_refimage=ref, final_refimage=ref)
```
The F775W and G800L should be aligned and bright objects should generate bright spectra in the expected position. We should see very liittle offset in the y-direction for ACS WFC grism data
```python
os.chdir(cwd)
plt.rcParams["figure.figsize"] = (10,7)
plt.subplot(1,2,1)
d = fits.open("F775W/F775W_drc.fits")[1].data
im1 = plt.imshow(d,origin="lower")
im1.set_clim(0,8.5)
plt.subplot(1,2,2)
d = fits.open("G800L/G800L_drc.fits")[1].data
im1 = plt.imshow(d,origin="lower")
im1.set_clim(0,0.85)
```
## 7. Creating a Catalog with SExtractor
We create an object catalog using sextractor
This is one step that needs to be done carefully as several things can go wrong.
- Make sure you set the magnitude zeropoint properly for the image you are using
- One can generate a simple catalog using:
sex -c default.sex F775W_drc.fits[1] -DETECT_THRESH 5 -MAG_ZEROPOINT 25.656
or use a full command as given in 'run_sext.e' file (cookbook_data/catalog)
- See default.param for the required parameters that aXe will be looking for.
- Check the resulting regions file and catalog to make sure that all objects have good magnitudes (i.e. no mag of 99.)
- Edit F775W.cat and rename column 'MAG_AUTO' with 'MAG_F7692', or you will get an "aXeError: Catalogue: test.cat does not contain any magnitude column!" error when running iolprep
This catalog, when doing a simple extraction, will be used to compute the SED of each sources. These SEDs will be used to compute our contamination models. In this example, we used a single band, F775W, but we could have added information in other bands such as F814W or F606W for example. This requires running Sextractor in matched photometry mode, and the creation of a catalog where magnitudes in multiple bands are properly listed
For simplicity, here, we copy an already generated catalog:
```python
os.chdir(cwd)
os.system("cp cookbook_data/catalog/F775W.cat ./F775W/")
!cat ./F775W/F775W.cat
```
## 8. Running HSTaXe
We can now run HSTaXe. We start by setting up some necessary environment variables that point to the various HSTaXe directories. **Make sure that you keep the path length to be less than 80 characters when the code points to individual data files, or about 60 characters without the file names to be safe**
Create a directory called CONF and copy the ACS WFC G800L Calibration files in there.
```python
os.chdir(cwd)
if os.path.isdir("CONF"):
shutil.rmtree("CONF")
os.mkdir("CONF")
os.system("cp cookbook_data/CONF/* CONF/")
```
Set up some work directories and environment variables required by HSTaXe:
```python
os.chdir(cwd)
import os
if os.path.isdir("DATA"):
shutil.rmtree("DATA")
os.mkdir("DATA")
os.environ['AXE_IMAGE_PATH'] = cwd+'/DATA/'
print ('--> variable AXE_IMAGE_PATH set to '+cwd+'/DATA/')
os.environ['AXE_CONFIG_PATH'] = cwd+'/CONF/'
print ('--> variable AXE_CONFIG_PATH set to '+cwd+'/CONF/')
if os.path.isdir("OUTPUT"):
shutil.rmtree("OUTPUT")
os.mkdir("OUTPUT")
os.environ['AXE_OUTPUT_PATH'] = cwd+'/OUTPUT/'
print ('--> variable AXE_OUTPUT_PATH set to '+cwd+'/OUTPUT/')
if os.path.isdir("DRIZZLE"):
shutil.rmtree("DRIZZLE")
os.mkdir("DRIZZLE")
os.environ['AXE_DRIZZLE_PATH'] = cwd+'/DRIZZLE/'
print ('--> variable AXE_DRIZZLE_PATH set to '+cwd+'/DRIZZLE/')
os.mkdir("DRIZZLE/tmp")
print ("Length of AXE_IMAGE_PATH is",len(os.environ['AXE_IMAGE_PATH']),"characters")
```
We define the FOV boundaries for the ACS observations.
- Four numbers to specify the modifications
[left, right, bottom, top] to the target area on the input
images. E.g. 100,500,10,0 would include in the Input Object
Lists all objects with -100 < x < x_size + 500 and
-10 < y < y_size.
- at present using 0,0,0,0 -- **it is only important when objects are at the edges**
```python
dimension_info = "0,0,0,0"
```
We copy the G800L FLC files and the F775W FLC files in the DATA directory
You can either use the original data or optionally the FLC files used to create the G800L mosaic earlier, which will have some extra bad pixels flagging
```python
os.chdir(cwd)
os.system("cp G800L/*flc.fits DATA/")
os.system("cp F775W/*flc.fits DATA/")
```
We use the iolprep aXe task to generate individual F775W catalogs
This task will create the individual F775W extraction catalogs, TWO (1 for chip1 and 1 for chip2) for each of the files listed in the F775W.lis file. We pass the F775W mosaic to it, as it contains all the information about all the individual F775W FLC file. The catalog 'filename_flc_1.cat' corresponds to chip2 and catalog 'filename_flc_2.cat' corresponds to chip1
```python
os.chdir(cwd)
os.chdir("F775W")
axetasks.iolprep(drizzle_image='F775W_drc.fits',
input_cat='F775W.cat',
dimension_in=dimension_info)
```
We copy the newly create catalog files into the DATA directory
```python
os.chdir(cwd)
os.system("cp ./F775W/jdql01jvq*_1.cat DATA/")
os.system("cp ./F775W/jdql01jvq*_2.cat DATA/")
```
```python
!ls ./DATA
```
We are almost ready to extract the spectra. We need to create an file aXe.lis containing the G800L images, expected catalog names and associated F775W direct images
The G800L mosaic we created earlier is not used directly during the aXe extraction process. However, the F775W mosaic was used to create an object master catalog. This catalog will be processed to generate individual object catalogs for the files used to create the F775W mosaic. The aXe.lis file lists which F775W images are logically associated with a particular G800L image. Ideally, these are images taken in the same HST visit so that we can be sure that the WCS of both files are consistent.
The aXe.lis file is a simple text file, with a slightly different format than the one above. In this file, each line contains 3 items:
- The name of a G800L FLC file (e.g. [grism_rootname]_flc.fits)
- TWO catalog names, separated by comma, corresponding to two chips on ACS/WFC (e.g., [direct_rootname]_flc_2.cat, [direct_rootname]_flc_1.cat) --- be careful about catalog numbering and chip numbers -- they are reversed because of reversed extension numbers: [sci,1] corresponds to chip2 and [sci,2] corresponds to chip1
- The name of the direct imaging file [direct_rootname]_flc.fits associated with the G800L data and the catalog.
```python
os.chdir(cwd)
os.system("cp cookbook_data/aXe.lis .")
!cat aXe.lis
```
We run aXeprep. This task will amongst other things take care of background subtracting the G800L data using a single master sky.
```python
config_file='ACS.WFC.CHIP1.Cycle13.5.conf,ACS.WFC.CHIP2.Cycle13.5.conf'
msky_file='ACS.WFC.CHIP1.msky.1.smooth.fits,ACS.WFC.CHIP2.msky.1.smooth.fits'
```
```python
os.chdir(cwd)
axetasks.axeprep(inlist="aXe.lis",
configs=config_file,
backgr=True,
backims=msky_file,
norm=True,
mfwhm=3.0)
```
**We can now proceed with a simple box extraction of our G800L spectra.** This will not combine individual 1D spectra and we create one extracted spectrum per object and get G800L FLC file we are processing. The contamination is estimated using the Gaussian model of each object that is included in the SExtractor object catalog.
For each of the G800L input FLC file, this will create the following in the OUTPUT/ directory:
- [rootname]_flc_2.cat : Object catalog for the FLC file [rootname]_flc.fits
- [rootname]_flc_2.OAF : Aperture file
- [rootname]_flc_2.PET.fits : The Pixel Extraction Table, containing all the unbinned information about each spectrum
- [rootname]_flc_2.SPC.fits : 1D extracted spectra
- [rootname]_flc_2.CONT.fits : Contamination estimate for eact of the spectra
- [rootname]_flc_2_opt.SPC.fits : Optimally extracted version of the 1D spectra
While running the next notebook cell, it might take a few minutes to run
```python
os.chdir(cwd)
# infwhm (extrfwhm) & outfwhm (drzfwhm) can be (5,4)(wide), (4,3)(default) or (3,2)(narrow)
axetasks.axecore('aXe.lis',
config_file,
fconfterm=msky_file,
extrfwhm=4.,
drzfwhm=3.,
backfwhm=0.,
orient=False,
weights=True,
slitless_geom=False,
cont_model='gauss',
sampling='drizzle',
exclude=True)
```
Results are in the directory pointed to by os.environ['AXE_OUTPUT_PATH'], i.e. ./OUTPUT 1D and 2D spectra extracted from individual FLC files are available. These are not combined. SPC files contained 1D spectra, opt.SPC files contained optimally extracted spectra (using gaussian profiles), STP files contain 2D stamps. CONT files contain the contamination estimate (gaussian based)
## 9. Review Output Spectra
```python
os.chdir(cwd)
!ls OUTPUT/*SPC.fits
!ls OUTPUT/*STP.fits
```
```python
ID = 5 # object ID from the Source Extractor catalog
f1_STP = "OUTPUT/jdql01jxq_flc_2.STP.fits"
f1_SPC = "OUTPUT/jdql01jxq_flc_2.SPC.fits"
f1_OPT_SPC = "OUTPUT/jdql01jxq_flc_2_opt.SPC.fits"
```
We can examine individual 2D spectra from the STP files. Note that the STP files are meant for quality control and are not calibrated versions of the 2D spectra.
Files 'filename_flc_2.STP.fits' are for chip2 and 'filename_flc_5.STP.fits' are for chip1
```python
plt.rcParams["figure.figsize"] = (10,3)
try:
d1 = fits.open(f1_STP)["BEAM_%dA" % (ID)].data
im1 = plt.imshow(d1,origin="lower")
im1.set_clim(0,400)
except:
pass
```
We now examine the calibrated 1D spectra of one of the sources:
```python
for s in glob.glob(f1_SPC):
print( s)
d1 = fits.open(s)["BEAM_%dA" % (ID)].data
w = d1["LAMBDA"]
f = d1["FLUX"]
e = d1["FERROR"]
vg = (w>5500) & (w<10500)
plt.errorbar(w[vg],f[vg],e[vg])
plt.xlabel(r'Wavelength ($\AA$)')
plt.ylabel(r'Flux ($erg/s/cm^2/\AA$)');
```
```python
for s in glob.glob(f1_SPC):
print( s)
d1 = fits.open(s)["BEAM_%dA" % (ID)].data
w = d1["LAMBDA"]
f = d1["COUNT"]
vg = (w>5500) & (w<10500)
plt.plot(w[vg],f[vg])
plt.xlabel(r'Wavelength ($\AA$)')
plt.ylabel(r'COUNT (Count)');
```
Contamination is not automatically removed but has been estimated and we can plot it
```python
for s in glob.glob(f1_SPC):
print (s)
d1 = fits.open(s)["BEAM_%dA" % (ID)].data
w = d1["LAMBDA"]
c = d1["CONTAM"]
vg = (w>5500) & (w<12000)
plt.plot(w[vg],c[vg],label=s)
plt.legend()
plt.xlabel(r'Wavelength ($\AA$)')
plt.ylabel(r'Flux ($erg/s/cm^2/\AA$)');
```
## 10. Additional Steps to Use 'aXedrizzle' Task to Combine Multiple Exposures
```python
os.chdir(cwd)
axetasks.drzprep(inlist = "aXe.lis",
configs=config_file,
back = False,
opt_extr=True)
```
```python
!ls -altr OUTPUT/*DPP*
```
```python
os.chdir(cwd)
# infwhm (extrfwhm) & outfwhm (drzfwhm) can be (5,4)(wide), (4,3)(default) or (3,2)(narrow)
axetasks.axecrr(inlist="aXe.lis",
configs=config_file,
infwhm = 4.0,
outfwhm = 3.0,
back = False,
driz_separate = 'yes',
opt_extr=True)
```
The extraction results are in the DRIZZLE directory we created, and we can examine a 2D, rectified and wavelength calibrated version of the spectrum we looked at earlier:
```python
plt.rcParams["figure.figsize"] = (10,3)
d = fits.open("./DRIZZLE/aXedrizzle_2.STP.fits")["BEAM_%dA" % (ID)].data
im = plt.imshow(d)
im.set_clim(0,100)
```
We plot the extracted 1D spectra of our source and the estimate of the contamination:
```python
fin = fits.open("./DRIZZLE/aXedrizzle_2.SPC.fits")
tdata = fin["BEAM_%dA" % (ID)].data
x = tdata["LAMBDA"]
f = tdata["FLUX"]
e = tdata["FERROR"]
c = tdata["CONTAM"]
vg = (x>5500) & (x<10500)
plt.plot(x[vg],f[vg])
plt.errorbar(x[vg],f[vg],e[vg])
plt.plot(x[vg],c[vg])
```
The MEF files in the DRIZZLE directory contain the 2D version of the spectrum of a source as well as estimate of the contamination:
```python
plt.subplot(2,1,1)
d = fits.open("./DRIZZLE/aXedrizzle_mef_ID%d.fits" % (ID))["SCI"].data
im = plt.imshow(d)
im.set_clim(0,50)
plt.subplot(2,1,2)
d = fits.open("./DRIZZLE/aXedrizzle_mef_ID%d.fits" % (ID))["CON"].data
im = plt.imshow(d)
im.set_clim(0,10)
```
The individually extracted spectra are in the OUTPUT directory and the combined ones in the DRIZZLE directory. We can plot and compare them:
```python
for s in glob.glob(f1_SPC):
print(s)
d1 = fits.open(s)["BEAM_%dA" % (ID)].data
w = d1["LAMBDA"]
f = d1["FLUX"]
e = d1["FERROR"]
vg = (w>5500) & (w<10500)
plt.errorbar(w[vg],f[vg],e[vg])
plt.xlabel(r'Wavelength ($\AA$)')
plt.ylabel(r'Flux ($erg/s/cm^2/\AA$)');
fin = fits.open("./DRIZZLE/aXedrizzle_2.SPC.fits")
tdata = fin["BEAM_%dA" % (ID)].data
x = tdata["LAMBDA"]
f = tdata["FLUX"]
e = tdata["FERROR"]
c = tdata["CONTAM"]
vg = (x>5500) & (x<10500)
plt.plot(x[vg],f[vg],color='k',lw=2)
plt.errorbar(x[vg],f[vg],e[vg],color='k',lw=2)
```
## Conclusions
Thank you for walking through this spectral extraction workflow. You should now be able to perform a basic extraction on ACS/WFC data using HSTaXe.
For detailed information on HSTaXe, please visit the [documentation webpage](https://hstaxe.readthedocs.io/en/latest/index.html).
Lastly, if you have questions regarding this notebook or using WFC3 data with HSTaXe please contact our [ACS Help Desk](https://stsci.service-now.com/hst).
## About this Notebook
**Author:** Nimish Hathi, ACS Instrument Branch (STScI)
**Updated On:** March 8, 2023
```python
```
|
spacetelescopeREPO_NAMEhstaxePATH_START.@hstaxe_extracted@hstaxe-main@cookbooks@ACS@WFC_basic_fullframe_extraction@acs_basic_fullframe_hstaxe_cookbook.ipynb@.PATH_END.py
|
{
"filename": "_showticksuffix.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/contour/colorbar/_showticksuffix.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShowticksuffixValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="showticksuffix", parent_name="contour.colorbar", **kwargs
):
super(ShowticksuffixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["all", "first", "last", "none"]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@contour@colorbar@_showticksuffix.py@.PATH_END.py
|
{
"filename": "cmb.py",
"repo_name": "LoganAMorrison/Hazma",
"repo_path": "Hazma_extracted/Hazma-master/hazma/cmb.py",
"type": "Python"
}
|
from scipy.interpolate import interp1d
import numpy as np
from pkg_resources import resource_filename
from hazma.parameters import temp_cmb_formation
"""
Functions required for computing CMB limits and related quantities.
"""
# Get paths to files inside the module
f_eff_ep_rf = resource_filename(__name__, "cmb_data/f_eff_ep.dat")
f_eff_g_rf = resource_filename(__name__, "cmb_data/f_eff_g.dat")
# Load f_eff^{e+ e-}
f_eff_ep_data = np.loadtxt(f_eff_ep_rf, delimiter=",").T
f_eff_ep = interp1d(f_eff_ep_data[0] / 1.0e6, f_eff_ep_data[1]) # eV -> MeV
# Load f_eff^{e+ e-}
f_eff_g_data = np.loadtxt(f_eff_g_rf, delimiter=",").T
f_eff_g = interp1d(f_eff_g_data[0] / 1.0e6, f_eff_g_data[1]) # eV -> MeV
#: Planck 2018 95% upper limit on p_ann from temperature + polarization
#: measurements, in cm^3 s^-1 MeV^-1
p_ann_planck_temp_pol = 3.5e-31 # temperature + polarization
#: Planck 2018 95% upper limit on p_ann from temperature + polarization +
#: lensing measurements, in cm^3 s^-1 MeV^-1
p_ann_planck_temp_pol_lensing = 3.3e-31 # temp + pol + lensing
#: Planck 2018 95% upper limit on p_ann from temperature + polarization +
#: lensing + BAO measurements, in cm^3 s^-1 MeV^-1
p_ann_planck_temp_pol_lensing_bao = 3.2e-31 # temp + pol + lensing + BAO
def vx_cmb(mx, x_kd):
"""Computes the DM relative velocity at CMB using eq. 28 from `this
reference <https://arxiv.org/abs/1309.4091>`_.
Parameters
----------
mx : float
Dark matter mass in MeV.
x_kd: float
T_kd / m_x, where T_kd is the dark matter's kinetic decoupling
temperature.
Returns
-------
v_x : float
The DM relative velocity at the time of CMB formation.
"""
return 2.0e-4 * 10e6 * temp_cmb_formation / mx * np.sqrt(1.0e-4 / x_kd)
|
LoganAMorrisonREPO_NAMEHazmaPATH_START.@Hazma_extracted@Hazma-master@hazma@cmb.py@.PATH_END.py
|
{
"filename": "_lat.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/layout/geo/center/_lat.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LatValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="lat", parent_name="layout.geo.center", **kwargs):
super(LatValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@layout@geo@center@_lat.py@.PATH_END.py
|
{
"filename": "get_d_perp.py",
"repo_name": "CPM-project/MCPM",
"repo_path": "MCPM_extracted/MCPM-master/examples/get_d_perp.py",
"type": "Python"
}
|
import numpy as np
from astropy.coordinates import SkyCoord
from astropy import units as u
import MulensModel as MM
def get_d_perp(ra_deg, dec_deg, t_0_par, ephemeris_file):
"""
extract D_perp for satellite for given epoch
"""
parameters = {'t_0': t_0_par, 'u_0': 0, 't_E': 100.,
'pi_E_N': 2.**-0.5, 'pi_E_E': 2.**-0.5, 't_0_par': t_0_par}
params = MM.ModelParameters(parameters)
coords = MM.Coordinates(SkyCoord(ra_deg, dec_deg, unit=u.deg))
satellite = MM.SatelliteSkyCoord(ephemerides_file=ephemeris_file)
ephemeris = satellite.get_satellite_coords([t_0_par])
trajectory = MM.Trajectory([t_0_par], params, coords=coords,
parallax={'satellite': True},
satellite_skycoord=ephemeris)
return np.sqrt(trajectory.x**2+trajectory.y**2)[0]
if __name__ == '__main__':
ra = 271.390792
dec = -28.542811
epoch = 2457563.
ephemeris_file = 'K2_ephemeris_01.dat'
print(get_d_perp(ra, dec, epoch, ephemeris_file))
|
CPM-projectREPO_NAMEMCPMPATH_START.@MCPM_extracted@MCPM-master@examples@get_d_perp.py@.PATH_END.py
|
{
"filename": "_bordercolorsrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/sunburst/hoverlabel/_bordercolorsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class BordercolorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="bordercolorsrc", parent_name="sunburst.hoverlabel", **kwargs
):
super(BordercolorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@sunburst@hoverlabel@_bordercolorsrc.py@.PATH_END.py
|
{
"filename": "_shadow.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/candlestick/hoverlabel/font/_shadow.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShadowValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="shadow", parent_name="candlestick.hoverlabel.font", **kwargs
):
super(ShadowValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@candlestick@hoverlabel@font@_shadow.py@.PATH_END.py
|
{
"filename": "_color.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/histogram/outsidetextfont/_color.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="color", parent_name="histogram.outsidetextfont", **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@histogram@outsidetextfont@_color.py@.PATH_END.py
|
{
"filename": "test_mapper_kv450.py",
"repo_name": "xC-ell/growth-history",
"repo_path": "growth-history_extracted/growth-history-main/xCell/xcell/tests/test_mapper_kv450.py",
"type": "Python"
}
|
import numpy as np
import xcell as xc
import healpy as hp
import os
def get_config():
return {'data_catalogs': ['xcell/tests/data/catalog.fits',
'xcell/tests/data/catalog_stars.fits'],
'file_nz': 'xcell/tests/data/Nz_DIR_z0.1t0.3.asc',
'zbin': 0, 'nside': 32, 'mask_name': 'mask'}
def get_mapper():
return xc.mappers.MapperKV450(get_config())
def test_smoke():
get_mapper()
def get_es():
npix = hp.nside2npix(32)
return np.repeat(np.array([np.arange(4)]), npix//4,
axis=0).flatten()
def test_lite():
config = get_config()
config['path_lite'] = 'xcell/tests/data/'
ifile = 0
while os.path.isfile(f'xcell/tests/data/KV450_lite_cat_zbin{ifile}.fits'):
os.remove(f'xcell/tests/data/KV450_lite_cat_zbin{ifile}.fits')
ifile += 1
m = xc.mappers.MapperKV450(config)
m.get_catalog()
assert os.path.isfile('xcell/tests/data/KV450_lite_cat_zbin0.fits')
# Non-exsisting fits files - read from lite
config['data_catalogs'] = ['whatever', 'whatever']
xc.mappers.MapperKV450(config)
def test_get_signal_map():
m = get_mapper()
sh = np.array(m.get_signal_map('shear'))
psf = np.array(m.get_signal_map('PSF'))
star = np.array(m.get_signal_map('stars'))
es = get_es()
assert sh.shape == (2, hp.nside2npix(32))
assert psf.shape == (2, hp.nside2npix(32))
assert star.shape == (2, hp.nside2npix(32))
assert np.all(np.fabs(-sh+(np.mean(es)-es)/(1+m.m[0])) < 1E-5)
assert np.all(np.fabs(-psf-es) < 1E-5)
assert np.all(np.fabs(-star-es) < 1E-5)
def test_get_mask():
m = get_mapper()
sh = m.get_mask('shear')
psf = m.get_mask('PSF')
star = m.get_mask('stars')
assert len(sh) == len(psf) == len(star) == hp.nside2npix(32)
assert np.all(np.fabs(sh-2) < 1E-5)
assert np.all(np.fabs(psf-2) < 1E-5)
assert np.all(np.fabs(star-2) < 1E-5)
def test_get_nl_coupled():
m = get_mapper()
aa = hp.nside2pixarea(32)
sh = m.get_nl_coupled()
shp = 4*np.std(np.arange(4))**2*aa/(1+m.m[0])**2
assert np.all(sh[0][:2] == 0)
assert np.fabs(np.mean(sh[0][2:])-shp) < 1E-5
psf = m.get_nl_coupled('PSF')
psfp = 4*np.mean(np.arange(4)**2)*aa
assert np.all(psf[0][:2] == 0)
assert np.fabs(np.mean(psf[0][2:])-psfp) < 1E-5
star = m.get_nl_coupled('stars')
starp = 4*np.mean(np.arange(4)**2)*aa
assert np.all(star[0][:2] == 0)
assert np.fabs(np.mean(star[0][2:])-starp) < 1E-5
|
xC-ellREPO_NAMEgrowth-historyPATH_START.@growth-history_extracted@growth-history-main@xCell@xcell@tests@test_mapper_kv450.py@.PATH_END.py
|
{
"filename": "conf.py",
"repo_name": "handley-lab/anesthetic",
"repo_path": "anesthetic_extracted/anesthetic-master/docs/source/conf.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
import sys
import os
sys.path.append(os.path.abspath('../../'))
def get_version(short=False):
with open('../../README.rst') as f:
for line in f:
if ':Version:' in line:
ver = line.split(':')[2].strip()
if short:
subver = ver.split('.')
return '%s.%s' % tuple(subver[:2])
else:
return ver
# -- Project information -----------------------------------------------------
project = 'anesthetic'
copyright = '2019, Will Handley'
author = 'Will Handley and Lukas Hergt'
# The short X.Y version
version = get_version(True)
# The full version, including alpha/beta/rc tags
release = get_version()
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.autosectionlabel',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.imgconverter',
'sphinx_copybutton',
'sphinx_autodoc_typehints',
'matplotlib.sphinxext.plot_directive',
'numpydoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for autodoc -------------------------------------------------
autodoc_default_options = {
'members': True,
'undoc-members': True,
}
autosummary_generate = True
# -- Options for autosectionlabel------------------------------------------
autosectionlabel_prefix_document = True
# -- Options for sphinx-copybutton ----------------------------------------
copybutton_prompt_text = r">>> |\.\.\. |\$ |In \[\d*\]: | {2,5}\.\.\.: | {5,8}: "
copybutton_prompt_is_regexp = True
# -- Options for numpydoc -------------------------------------------------
numpydoc_show_inherited_class_members = False
numpydoc_show_class_members = False
# -- Options for matplotlib extension ----------------------------------------
plot_rcparams = {'savefig.bbox': 'tight'}
plot_apply_rcparams = True # if context option is used
plot_include_source = True
plot_html_show_source_link = False
plot_html_show_formats = False
plot_pre_code = "import numpy as np; from matplotlib import pyplot as plt; import pandas as pd"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'anestheticdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'anesthetic.tex', 'anesthetic Documentation',
'Will Handley and Lukas Hergt', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'anesthetic', 'anesthetic Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'anesthetic', 'anesthetic Documentation',
author, 'anesthetic', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'numpy':('https://numpy.org/doc/stable', None),
'scipy':('https://docs.scipy.org/doc/scipy', None),
'pandas':('https://pandas.pydata.org/pandas-docs/stable', None),
'matplotlib':('https://matplotlib.org/stable', None),
'getdist':('https://getdist.readthedocs.io/en/latest/', None)
}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
|
handley-labREPO_NAMEanestheticPATH_START.@anesthetic_extracted@anesthetic-master@docs@source@conf.py@.PATH_END.py
|
{
"filename": "sampler.py",
"repo_name": "pytorch/vision",
"repo_path": "vision_extracted/vision-main/references/classification/sampler.py",
"type": "Python"
}
|
import math
import torch
import torch.distributed as dist
class RASampler(torch.utils.data.Sampler):
"""Sampler that restricts data loading to a subset of the dataset for distributed,
with repeated augmentation.
It ensures that different each augmented version of a sample will be visible to a
different process (GPU).
Heavily based on 'torch.utils.data.DistributedSampler'.
This is borrowed from the DeiT Repo:
https://github.com/facebookresearch/deit/blob/main/samplers.py
"""
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True, seed=0, repetitions=3):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available!")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available!")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * float(repetitions) / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
self.num_selected_samples = int(math.floor(len(self.dataset) // 256 * 256 / self.num_replicas))
self.shuffle = shuffle
self.seed = seed
self.repetitions = repetitions
def __iter__(self):
if self.shuffle:
# Deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.seed + self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = list(range(len(self.dataset)))
# Add extra samples to make it evenly divisible
indices = [ele for ele in indices for i in range(self.repetitions)]
indices += indices[: (self.total_size - len(indices))]
assert len(indices) == self.total_size
# Subsample
indices = indices[self.rank : self.total_size : self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices[: self.num_selected_samples])
def __len__(self):
return self.num_selected_samples
def set_epoch(self, epoch):
self.epoch = epoch
|
pytorchREPO_NAMEvisionPATH_START.@vision_extracted@vision-main@references@classification@sampler.py@.PATH_END.py
|
{
"filename": "DOCS.md",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/cli/DOCS.md",
"type": "Markdown"
}
|
# `langchain`
**Usage**:
```console
$ langchain [OPTIONS] COMMAND [ARGS]...
```
**Options**:
* `--help`: Show this message and exit.
* `-v, --version`: Print current CLI version.
**Commands**:
* `app`: Manage LangChain apps
* `serve`: Start the LangServe app, whether it's a...
* `template`: Develop installable templates.
## `langchain app`
Manage LangChain apps
**Usage**:
```console
$ langchain app [OPTIONS] COMMAND [ARGS]...
```
**Options**:
* `--help`: Show this message and exit.
**Commands**:
* `add`: Adds the specified template to the current...
* `new`: Create a new LangServe application.
* `remove`: Removes the specified package from the...
* `serve`: Starts the LangServe app.
### `langchain app add`
Adds the specified template to the current LangServe app.
e.g.:
langchain app add extraction-openai-functions
langchain app add git+ssh://git@github.com/efriis/simple-pirate.git
**Usage**:
```console
$ langchain app add [OPTIONS] [DEPENDENCIES]...
```
**Arguments**:
* `[DEPENDENCIES]...`: The dependency to add
**Options**:
* `--api-path TEXT`: API paths to add
* `--project-dir PATH`: The project directory
* `--repo TEXT`: Install templates from a specific github repo instead
* `--branch TEXT`: Install templates from a specific branch
* `--help`: Show this message and exit.
### `langchain app new`
Create a new LangServe application.
**Usage**:
```console
$ langchain app new [OPTIONS] NAME
```
**Arguments**:
* `NAME`: The name of the folder to create [required]
**Options**:
* `--package TEXT`: Packages to seed the project with
* `--help`: Show this message and exit.
### `langchain app remove`
Removes the specified package from the current LangServe app.
**Usage**:
```console
$ langchain app remove [OPTIONS] API_PATHS...
```
**Arguments**:
* `API_PATHS...`: The API paths to remove [required]
**Options**:
* `--help`: Show this message and exit.
### `langchain app serve`
Starts the LangServe app.
**Usage**:
```console
$ langchain app serve [OPTIONS]
```
**Options**:
* `--port INTEGER`: The port to run the server on
* `--host TEXT`: The host to run the server on
* `--app TEXT`: The app to run, e.g. `app.server:app`
* `--help`: Show this message and exit.
## `langchain serve`
Start the LangServe app, whether it's a template or an app.
**Usage**:
```console
$ langchain serve [OPTIONS]
```
**Options**:
* `--port INTEGER`: The port to run the server on
* `--host TEXT`: The host to run the server on
* `--help`: Show this message and exit.
## `langchain template`
Develop installable templates.
**Usage**:
```console
$ langchain template [OPTIONS] COMMAND [ARGS]...
```
**Options**:
* `--help`: Show this message and exit.
**Commands**:
* `new`: Creates a new template package.
* `serve`: Starts a demo app for this template.
### `langchain template new`
Creates a new template package.
**Usage**:
```console
$ langchain template new [OPTIONS] NAME
```
**Arguments**:
* `NAME`: The name of the folder to create [required]
**Options**:
* `--with-poetry / --no-poetry`: Don't run poetry install [default: no-poetry]
* `--help`: Show this message and exit.
### `langchain template serve`
Starts a demo app for this template.
**Usage**:
```console
$ langchain template serve [OPTIONS]
```
**Options**:
* `--port INTEGER`: The port to run the server on
* `--host TEXT`: The host to run the server on
* `--help`: Show this message and exit.
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@cli@DOCS.md@.PATH_END.py
|
{
"filename": "conftest.py",
"repo_name": "afd-illinois/grim",
"repo_path": "grim_extracted/grim-master/src/physics/conftest.py",
"type": "Python"
}
|
import sys
import pytest
def pytest_addoption(parser):
parser.addoption("--build_path", action="store", default=None,
help='set build directory path'
)
parser.addoption("--N1", action="store", default=64,
help='grid zones in X1'
)
parser.addoption("--N2", action="store", default=64,
help='grid zones in X2'
)
parser.addoption("--N3", action="store", default=64,
help='grid zones in X3'
)
parser.addoption("--dim", action="store", default=3,
help='grid dimension'
)
parser.addoption("--blackHoleSpin", action="store", default=0.5,
help='black hole spin'
)
parser.addoption("--hSlope", action="store", default=0.7,
help='Refinement factor in theta: 1 - no refinement'
)
def pytest_configure(config):
buildPath = config.getvalue('build_path')
gridPath = buildPath + '/grid/'
geometryPath = buildPath + '/geometry/'
physicsPath = buildPath + '/physics/'
sys.path.append(gridPath)
sys.path.append(geometryPath)
sys.path.append(physicsPath)
|
afd-illinoisREPO_NAMEgrimPATH_START.@grim_extracted@grim-master@src@physics@conftest.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/isosurface/caps/__init__.py",
"type": "Python"
}
|
import sys
if sys.version_info < (3, 7):
from ._z import ZValidator
from ._y import YValidator
from ._x import XValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__, [], ["._z.ZValidator", "._y.YValidator", "._x.XValidator"]
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@isosurface@caps@__init__.py@.PATH_END.py
|
{
"filename": "vis_sampler.py",
"repo_name": "HydraRadio/Hydra",
"repo_path": "Hydra_extracted/Hydra-main/hydra/vis_sampler.py",
"type": "Python"
}
|
import numpy as np
import numpy.fft as fft
def apply_sqrt_pspec(sqrt_pspec, v, group_id, ifft=False):
"""
Apply the square root of the power spectrum to a set of complex Fourier
coefficients. This is a way of implementing the operation "S^1/2 x" if S is
diagonal, represented only by a 2D power spectrum in delay and fringe rate.
Parameters:
sqrt_pspec (dict of array_like):
Dictionary of power spectra. The key would normally be the ID of
the redundant group that a baseline belongs to.
v (array_like):
Array of complex Fourier coefficients, of shape (Nvis, Ntau, Nfrate).
group_id (array_like):
Integer group ID of each baseline.
ifft (bool):
Whether to apply a 2D inverse FFT to transform "S^1/2 x" to real
space.
Returns:
z (array_like):
Array of complex Fourier coefficients that have been multiplied by
the sqrt of the power spectrum. Same shape as v.
"""
assert group_id.size == v.shape[0], "Must have a group_id for each visibility"
# Set up inverse FFT if requested
if ifft:
transform = lambda x: fft.ifft2(x)
else:
transform = lambda x: x
# Loop through visibilities and apply sqrt of power spectrum
z = v.copy()
for i in range(z.shape[0]):
z[i] = transform(z[i] * sqrt_pspec[group_id[i]])
return z
def apply_operator(v, inv_noise_var, sqrt_pspec, group_id, gains, ants, antpairs):
"""
Apply LHS operator to a vector of complex visibility Fourier coefficients.
Parameters:
v (array_like):
Vector of model visibility Fourier modes to apply the operator to.
Shape (Nvis, Ntau, Nfrate).
inv_noise_var (array_like):
Inverse noise variance array, with the same shape as the visibility
data.
sqrt_pspec (dict of array_like):
Dictionary of power spectra. The key would normally be the ID of
the redundant group that a baseline belongs to.
group_id (array_like):
Integer group ID of each baseline.
gains (array_like):
Complex gains, with the same ordering as `ants`. Expected shape is
(Nants, Nfreqs, Ntimes).
ants (array_like):
Array of antenna IDs.
antpairs (list of tuples):
List of antenna pair tuples.
Returns:
lhs (array_like):
Result of applying the LHS operator to the input vector, v.
"""
assert len(v.shape) == 3, "v must have shape (Nvis, Ntau, Nfrate)"
assert len(antpairs) == v.shape[0]
# Apply sqrt power spectrum, inverse FFT, and divide by noise variance
y = (
apply_sqrt_pspec(sqrt_pspec=sqrt_pspec, v=v, group_id=group_id, ifft=True)
* inv_noise_var
)
# Multiply by gains, then FFT back
for k, bl in enumerate(antpairs):
# Get antenna indices
ant1, ant2 = bl
i1 = np.where(ants == ant1)[0][0]
i2 = np.where(ants == ant2)[0][0]
# Apply S^1/2 to input vector, transform to data space, multiply by
# gain product, and divide by noise variance
y[k, :, :] = fft.fft2(
y[k, :, :] * gains[i1] * gains[i2].conj() * gains[i2] * gains[i1].conj()
)
# Return full application of the LHS operator
return v + apply_sqrt_pspec(
sqrt_pspec=sqrt_pspec, v=y, group_id=group_id, ifft=False
)
def construct_rhs(
data, inv_noise_var, sqrt_pspec, group_id, gains, ants, antpairs, realisation=False
):
"""
Construct the RHS vector of the linear system. This will have shape
(2*Nvis), as the real and imaginary parts are separated.
Parameters:
data (array_like):
Observed visibility data.
inv_noise_var (array_like):
Inverse noise variance array, with the same shape as the visibility
data.
sqrt_pspec (dict of array_like):
Dictionary of power spectra. The key would normally be the ID of
the redundant group that a baseline belongs to.
group_id (array_like):
Integer group ID of each baseline.
gains (array_like):
Complex gains, with the same ordering as `ants`. Expected shape is
(Nants, Nfreqs, Ntimes).
ants (array_like):
Array of antenna IDs.
antpairs (list of tuples):
List of antenna pair tuples.
realisation (bool):
Whether to include the random realisation terms in the RHS
(constrained realisation), or just the deterministic terms (Wiener
filter).
Returns:
rhs (array_like):
The RHS of the linear system.
"""
# fft: data -> fourier
# ifft: fourier -> data
Nvis, Ntimes, Nfreqs = data.shape
Nfrate = Ntimes
Ntau = Nfreqs
# Switch to turn random realisations on or off
realisation_switch = 1.0 if realisation else 0.0
# (Term 2): \omega_y
b = (
realisation_switch
* (
1.0 * np.random.randn(Nvis, Nfrate, Ntau)
+ 1.0j * np.random.randn(Nvis, Nfrate, Ntau)
)
/ np.sqrt(2.0)
)
# (Terms 1+3): S^1/2 F^dagger A^\dagger [ N^{-1} r + N^{-1/2} \omega_r ]
omega_r = (
realisation_switch
* (1.0 * np.random.randn(*data.shape) + 1.0j * np.random.randn(*data.shape))
/ np.sqrt(2.0)
)
# Loop over visibilities, weight terms 1 and 3 by N^-1 and N^-1/2, then
# apply conjugate of gains, FFT, and apply sqrt power spectrum
y = (data * inv_noise_var) + (omega_r * np.sqrt(inv_noise_var))
for k, bl in enumerate(antpairs):
# Get antenna indices
ant1, ant2 = bl
i1 = np.where(ants == ant1)[0][0]
i2 = np.where(ants == ant2)[0][0]
y[k, :, :] = fft.fft2(y[k, :, :] * gains[i1].conj() * gains[i2])
# Apply sqrt(S) operator
y = apply_sqrt_pspec(sqrt_pspec, y, group_id, ifft=False)
# Add the transformed Terms 1+3 to b vector
return b + y
|
HydraRadioREPO_NAMEHydraPATH_START.@Hydra_extracted@Hydra-main@hydra@vis_sampler.py@.PATH_END.py
|
{
"filename": "_textsrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scatterpolar/_textsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TextsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="textsrc", parent_name="scatterpolar", **kwargs):
super(TextsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scatterpolar@_textsrc.py@.PATH_END.py
|
{
"filename": "exochat.py",
"repo_name": "3fon3fonov/exostriker",
"repo_path": "exostriker_extracted/exostriker-main/exostriker/lib/exochat.py",
"type": "Python"
}
|
import openai
import sys, os
#import numpy as np
import json
from PyQt6.QtCore import Qt, QTextStream, QIODevice
from PyQt6.QtWidgets import (QApplication, QWidget, QVBoxLayout, QHBoxLayout,
QTextEdit, QLineEdit, QPushButton)
from PyQt6.QtGui import QColor,QIcon
from exochat_opt import show_chat_opt
#try:
# with open('./openai_api_key.txt', 'r') as fileai:
# api_key= str(fileai.readline().rstrip())
# openai.api_key = api_key
# key_found = True
#except:
# openai.api_key = ''
# key_found = False
class ChatWidget(QWidget):
def __init__(self):
super().__init__()
self.setGeometry(1,1, 495, 325)
self.conversation = QTextEdit()
self.conversation.setReadOnly(True)
self.message = QLineEdit()
self.send_button = QPushButton("Send")
self.send_button.clicked.connect(self.send_message)
self.opt_button = QPushButton("")
self.opt_button.setIcon(QIcon('./lib/UI/opt_icon.png'))
self.opt_button.clicked.connect(self.get_symbol)
self.dialog_symbols = show_chat_opt(self)
# Create the layout
layout = QVBoxLayout()
input_layout = QHBoxLayout()
input_layout.addWidget(self.message)
input_layout.addWidget(self.opt_button)
input_layout.addWidget(self.send_button)
layout.addWidget(self.conversation)
layout.addLayout(input_layout)
self.setLayout(layout)
#self.redColor = QColor(255, 0, 0)
self.blackColor = QColor(0, 0, 0)
self.blueColor = QColor(0, 0, 255)
self.read_settings()
def read_settings(self):
if os.path.isfile('./lib/ES_settings_dev.json'):
AIsettings = './lib/ES_settings_dev.json'
else:
AIsettings = './lib/ES_settings.json'
f = open(AIsettings)
sett = json.load(f)
f.close()
self.model = str(sett["openAI"]["model"])
self.temperature = float(sett["openAI"]["temperature"])
self.top_p = float(sett["openAI"]["top_p"])
self.max_tokens = int(sett["openAI"]["max_tokens"])
self.frequency_penalty = float(sett["openAI"]["frequency_penalty"])
self.presence_penalty = float(sett["openAI"]["presence_penalty"])
self.api_key=str(sett["openAI"]["api_key"])
openai.api_key = self.api_key
return
def get_symbol(self):
but_n = self.dialog_symbols.get_window()
# if but_n != None:
# print("Test")
# model=str(self.dialog_symbols.model)
# print(model)
# else:
# return
def send_message(self):
# Get the message from the QLineEdit
message = self.message.text()
# Clear the QLineEdit
self.message.clear()
#if key_found == False:
# self.conversation.append(f"You do not have a valid openai api key\n")
# Append the message to the conversation
self.conversation.setTextColor(self.blackColor)
self.conversation.append(f"You: {message}\n")
# Use ChatGPT to generate a response
try:
self.read_settings()
response = openai.Completion.create(
model=self.model,
prompt=f"You: {message}\n",
temperature=self.temperature,
max_tokens=self.max_tokens,
top_p=self.top_p,
frequency_penalty=self.frequency_penalty,
presence_penalty=self.presence_penalty,
stop=["You:"]
)
#print(response)
# Append the response to the conversation
self.conversation.setTextColor(self.blueColor)
self.conversation.append(f"{response['choices'][0]['text']}\n")
self.conversation.setTextColor(self.blackColor)
# except openai.error.RateLimitError:
# self.conversation.append(f"openai.error.RateLimitError: You exceeded your current quota, please check your plan and billing details.")
# except openai.error.AuthenticationError:
# self.conversation.append(f"openai.error.AuthenticationError: Incorrect API key provided.")
except Exception as e:
self.conversation.append(f"Your Exo-Striker ChatBoot does not work! Mostlikely you: \n \n * Have not provided, or you do not have a valid openai API key. You can obtain an API key from https://beta.openai.com. Then add your API key as a single pressing the 'option' button, and try again! \n \n * You do not have an internet! \n The Exo-Striker ChatBoot is a clone of Chat GPT, which requres an internet connection. \n \n * Or... maybe OpenAI is busy... \n" )
self.conversation.append(f"The exact error was: \n %s\n"%e)
# Scroll to the bottom of the conversation
self.conversation.verticalScrollBar().setValue(self.conversation.verticalScrollBar().maximum())
if __name__ == "__main__":
app = QApplication(sys.argv)
chat = ChatWidget()
chat.show()
sys.exit(app.exec_())
|
3fon3fonovREPO_NAMEexostrikerPATH_START.@exostriker_extracted@exostriker-main@exostriker@lib@exochat.py@.PATH_END.py
|
{
"filename": "test_donutImageCheck.py",
"repo_name": "lsst-ts/ts_wep",
"repo_path": "ts_wep_extracted/ts_wep-main/tests/test_donutImageCheck.py",
"type": "Python"
}
|
# This file is part of ts_wep.
#
# Developed for the LSST Telescope and Site Systems.
# This product includes software developed by the LSST Project
# (https://www.lsst.org).
# See the COPYRIGHT file at the top-level directory of this distribution
# for details of code ownership.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import os
import unittest
import numpy as np
from lsst.ts.wep.donutImageCheck import DonutImageCheck
from lsst.ts.wep.utils import getModulePath
class TestDonutImageCheck(unittest.TestCase):
"""Test the DonutImageCheck class."""
def setUp(self):
self.donutImgCheck = DonutImageCheck()
def testIsEffDonutWithEffImg(self):
imgFile = os.path.join(
getModulePath(),
"tests",
"testData",
"testImages",
"LSST_NE_SN25",
"z11_0.25_intra.txt",
)
donutImg = np.loadtxt(imgFile)
# This assumes this "txt" file is in the format
# I[0,0] I[0,1]
# I[1,0] I[1,1]
donutImg = donutImg[::-1, :]
# test that by default the returnEntro is False
self.assertFalse(self.donutImgCheck.returnEntro)
# change that to True
self.donutImgCheck.returnEntro = True
# first check that now two outputs are present
self.assertTrue(len(self.donutImgCheck.isEffDonut(donutImg)) == 2)
# then test that the values are what is expected
effective, entro = self.donutImgCheck.isEffDonut(donutImg)
self.assertTrue(effective)
np.testing.assert_allclose(
entro,
0.027858272652433826,
)
# change back to the default
self.donutImgCheck.returnEntro = False
# test that now we only get the boolean as before
self.assertTrue(self.donutImgCheck.isEffDonut(donutImg))
def testIsEffDonutWithConstImg(self):
zeroDonutImg = np.zeros((120, 120))
self.assertFalse(self.donutImgCheck.isEffDonut(zeroDonutImg))
onesDonutImg = np.ones((120, 120))
self.assertFalse(self.donutImgCheck.isEffDonut(onesDonutImg))
def testIsEffDonutWithRandImg(self):
donutImg = np.random.rand(120, 120)
self.assertFalse(self.donutImgCheck.isEffDonut(donutImg))
if __name__ == "__main__":
# Do the unit test
unittest.main()
|
lsst-tsREPO_NAMEts_wepPATH_START.@ts_wep_extracted@ts_wep-main@tests@test_donutImageCheck.py@.PATH_END.py
|
{
"filename": "move.py",
"repo_name": "dfm/emcee",
"repo_path": "emcee_extracted/emcee-main/src/emcee/moves/move.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
import numpy as np
__all__ = ["Move"]
class Move(object):
def tune(self, state, accepted):
pass
def update(self, old_state, new_state, accepted, subset=None):
"""Update a given subset of the ensemble with an accepted proposal
Args:
coords: The original ensemble coordinates.
log_probs: The original log probabilities of the walkers.
blobs: The original blobs.
new_coords: The proposed coordinates.
new_log_probs: The proposed log probabilities.
new_blobs: The proposed blobs.
accepted: A vector of booleans indicating which walkers were
accepted.
subset (Optional): A boolean mask indicating which walkers were
included in the subset. This can be used, for example, when
updating only the primary ensemble in a :class:`RedBlueMove`.
"""
if subset is None:
subset = np.ones(len(old_state.coords), dtype=bool)
m1 = subset & accepted
m2 = accepted[subset]
old_state.coords[m1] = new_state.coords[m2]
old_state.log_prob[m1] = new_state.log_prob[m2]
if new_state.blobs is not None:
if old_state.blobs is None:
raise ValueError(
"If you start sampling with a given log_prob, "
"you also need to provide the current list of "
"blobs at that position."
)
old_state.blobs[m1] = new_state.blobs[m2]
return old_state
|
dfmREPO_NAMEemceePATH_START.@emcee_extracted@emcee-main@src@emcee@moves@move.py@.PATH_END.py
|
{
"filename": "eccen_calc_orderl2.py",
"repo_name": "jrenaud90/TidalPy",
"repo_path": "TidalPy_extracted/TidalPy-main/TidalPy/tides/modes/mode_calc_helper/eccen_calc_orderl2.py",
"type": "Python"
}
|
from typing import Dict, TYPE_CHECKING
from TidalPy.utilities.performance import njit
from ...eccentricity_funcs import orderl2
if TYPE_CHECKING:
from TidalPy.utilities.types import FloatArray
from ...eccentricity_funcs import EccenOutput
@njit(cacheable=True)
def eccentricity_truncation_2_maxl_2(eccentricity: 'FloatArray') -> Dict[int, 'EccenOutput']:
""" Calculates eccentricity functions (squared) for a given maximum tidal order (going through each l)
Truncation level = 2
Max Supported l = 2
Parameters
----------
eccentricity : FloatArray
Orbital Eccentricity
Returns
-------
result_by_orderl : Dict[int, EccenOutput]
Eccentricity function G^2_lpq truncated. Stored by order-l.
"""
result_by_orderl = {
2: orderl2.eccentricity_funcs_trunc2(eccentricity)
}
return result_by_orderl
@njit(cacheable=True)
def eccentricity_truncation_4_maxl_2(eccentricity: 'FloatArray') -> Dict[int, 'EccenOutput']:
""" Calculates eccentricity functions (squared) for a given maximum tidal order (going through each l)
Truncation level = 4
Max Supported l = 2
Parameters
----------
eccentricity : FloatArray
Orbital Eccentricity
Returns
-------
result_by_orderl : Dict[int, EccenOutput]
Eccentricity function G^2_lpq truncated. Stored by order-l.
"""
result_by_orderl = {
2: orderl2.eccentricity_funcs_trunc4(eccentricity)
}
return result_by_orderl
@njit(cacheable=True)
def eccentricity_truncation_6_maxl_2(eccentricity: 'FloatArray') -> Dict[int, 'EccenOutput']:
""" Calculates eccentricity functions (squared) for a given maximum tidal order (going through each l)
Truncation level = 6
Max Supported l = 2
Parameters
----------
eccentricity : FloatArray
Orbital Eccentricity
Returns
-------
result_by_orderl : Dict[int, EccenOutput]
Eccentricity function G^2_lpq truncated. Stored by order-l.
"""
result_by_orderl = {
2: orderl2.eccentricity_funcs_trunc6(eccentricity)
}
return result_by_orderl
@njit(cacheable=True)
def eccentricity_truncation_8_maxl_2(eccentricity: 'FloatArray') -> Dict[int, 'EccenOutput']:
""" Calculates eccentricity functions (squared) for a given maximum tidal order (going through each l)
Truncation level = 8
Max Supported l = 2
Parameters
----------
eccentricity : FloatArray
Orbital Eccentricity
Returns
-------
result_by_orderl : Dict[int, EccenOutput]
Eccentricity function G^2_lpq truncated. Stored by order-l.
"""
result_by_orderl = {
2: orderl2.eccentricity_funcs_trunc8(eccentricity)
}
return result_by_orderl
@njit(cacheable=True)
def eccentricity_truncation_10_maxl_2(eccentricity: 'FloatArray') -> Dict[int, 'EccenOutput']:
""" Calculates eccentricity functions (squared) for a given maximum tidal order (going through each l)
Truncation level = 10
Max Supported l = 2
Parameters
----------
eccentricity : FloatArray
Orbital Eccentricity
Returns
-------
result_by_orderl : Dict[int, EccenOutput]
Eccentricity function G^2_lpq truncated. Stored by order-l.
"""
result_by_orderl = {
2: orderl2.eccentricity_funcs_trunc10(eccentricity)
}
return result_by_orderl
@njit(cacheable=True)
def eccentricity_truncation_12_maxl_2(eccentricity: 'FloatArray') -> Dict[int, 'EccenOutput']:
""" Calculates eccentricity functions (squared) for a given maximum tidal order (going through each l)
Truncation level = 12
Max Supported l = 2
Parameters
----------
eccentricity : FloatArray
Orbital Eccentricity
Returns
-------
result_by_orderl : Dict[int, EccenOutput]
Eccentricity function G^2_lpq truncated. Stored by order-l.
"""
result_by_orderl = {
2: orderl2.eccentricity_funcs_trunc12(eccentricity)
}
return result_by_orderl
@njit(cacheable=True)
def eccentricity_truncation_14_maxl_2(eccentricity: 'FloatArray') -> Dict[int, 'EccenOutput']:
""" Calculates eccentricity functions (squared) for a given maximum tidal order (going through each l)
Truncation level = 14
Max Supported l = 2
Parameters
----------
eccentricity : FloatArray
Orbital Eccentricity
Returns
-------
result_by_orderl : Dict[int, EccenOutput]
Eccentricity function G^2_lpq truncated. Stored by order-l.
"""
result_by_orderl = {
2: orderl2.eccentricity_funcs_trunc14(eccentricity)
}
return result_by_orderl
@njit(cacheable=True)
def eccentricity_truncation_16_maxl_2(eccentricity: 'FloatArray') -> Dict[int, 'EccenOutput']:
""" Calculates eccentricity functions (squared) for a given maximum tidal order (going through each l)
Truncation level = 16
Max Supported l = 2
Parameters
----------
eccentricity : FloatArray
Orbital Eccentricity
Returns
-------
result_by_orderl : Dict[int, EccenOutput]
Eccentricity function G^2_lpq truncated. Stored by order-l.
"""
result_by_orderl = {
2: orderl2.eccentricity_funcs_trunc16(eccentricity)
}
return result_by_orderl
@njit(cacheable=True)
def eccentricity_truncation_18_maxl_2(eccentricity: 'FloatArray') -> Dict[int, 'EccenOutput']:
""" Calculates eccentricity functions (squared) for a given maximum tidal order (going through each l)
Truncation level = 18
Max Supported l = 2
Parameters
----------
eccentricity : FloatArray
Orbital Eccentricity
Returns
-------
result_by_orderl : Dict[int, EccenOutput]
Eccentricity function G^2_lpq truncated. Stored by order-l.
"""
result_by_orderl = {
2: orderl2.eccentricity_funcs_trunc18(eccentricity)
}
return result_by_orderl
@njit(cacheable=True)
def eccentricity_truncation_20_maxl_2(eccentricity: 'FloatArray') -> Dict[int, 'EccenOutput']:
""" Calculates eccentricity functions (squared) for a given maximum tidal order (going through each l)
Truncation level = 20
Max Supported l = 2
Parameters
----------
eccentricity : FloatArray
Orbital Eccentricity
Returns
-------
result_by_orderl : Dict[int, EccenOutput]
Eccentricity function G^2_lpq truncated. Stored by order-l.
"""
result_by_orderl = {
2: orderl2.eccentricity_funcs_trunc20(eccentricity)
}
return result_by_orderl
@njit(cacheable=True)
def eccentricity_truncation_22_maxl_2(eccentricity: 'FloatArray') -> Dict[int, 'EccenOutput']:
""" Calculates eccentricity functions (squared) for a given maximum tidal order (going through each l)
Truncation level = 22
Max Supported l = 2
Parameters
----------
eccentricity : FloatArray
Orbital Eccentricity
Returns
-------
result_by_orderl : Dict[int, EccenOutput]
Eccentricity function G^2_lpq truncated. Stored by order-l.
"""
result_by_orderl = {
2: orderl2.eccentricity_funcs_trunc22(eccentricity)
}
return result_by_orderl
|
jrenaud90REPO_NAMETidalPyPATH_START.@TidalPy_extracted@TidalPy-main@TidalPy@tides@modes@mode_calc_helper@eccen_calc_orderl2.py@.PATH_END.py
|
{
"filename": "GetRefractiveIndexRange.py",
"repo_name": "claudiok/clsim",
"repo_path": "clsim_extracted/clsim-master/python/util/GetRefractiveIndexRange.py",
"type": "Python"
}
|
#
# Copyright (c) 2012
# Claudio Kopper <claudio.kopper@icecube.wisc.edu>
# and the IceCube Collaboration <http://www.icecube.wisc.edu>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
# OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
#
# $Id$
#
# @file GetRefractiveIndexRange.py
# @version $Revision$
# @date $Date$
# @author Claudio Kopper
#
import numpy
def getGroupRefIndex_derivative(wavelength):
n_inv = 1./getPhaseRefIndex(wavelength)
y = getDispersionPhase(wavelength);
return 1./((1.0 + y*wavelength*n_inv) * n_inv)
def __MakeGroupRefIndexFunctionFromPhaseRefIndex(phaseRefIndex):
if not phaseRefIndex.HasDerivative():
raise RuntimeError("Phase refractive index does not have the option to calculate the derivative. Cannot calculate group refractive index.")
return numpy.vectorize(lambda wlen: phaseRefIndex.GetValue(wlen)/(1. + phaseRefIndex.GetDerivative(wlen)*wlen/phaseRefIndex.GetValue(wlen)))
def GetGroupRefractiveIndexRange(mediumProperties):
"""
Returns the minimum and maximum group refractive indices as tuple
over all ice layers for a given mediumProperties object.
"""
minIndex = None
maxIndex = None
#atWlen = None
overallMinWlen = mediumProperties.GetMinWavelength()
overallMaxWlen = mediumProperties.GetMaxWavelength()
for layer in range(mediumProperties.LayersNum):
minWlen = overallMinWlen
maxWlen = overallMaxWlen
groupRefIndex = mediumProperties.GetGroupRefractiveIndexOverride(layer)
if groupRefIndex:
minWlen = max(minWlen, groupRefIndex.GetMinWlen())
maxWlen = min(maxWlen, groupRefIndex.GetMaxWlen())
groupRefIndexFunc = numpy.vectorize(lambda wlen: groupRefIndex.GetValue(wlen))
else:
phaseRefIndex = mediumProperties.GetPhaseRefractiveIndex(layer)
minWlen = max(minWlen, phaseRefIndex.GetMinWlen())
maxWlen = min(maxWlen, phaseRefIndex.GetMaxWlen())
groupRefIndexFunc = __MakeGroupRefIndexFunctionFromPhaseRefIndex(phaseRefIndex)
testWlens = numpy.linspace(minWlen, maxWlen, 10000, endpoint=True)
evaluatedFunc = groupRefIndexFunc(testWlens)
thisLayerMinIndex_arg = numpy.argmin(evaluatedFunc)
thisLayerMinIndex = evaluatedFunc[thisLayerMinIndex_arg]
thisLayerMaxIndex_arg = numpy.argmax(evaluatedFunc)
thisLayerMaxIndex = evaluatedFunc[thisLayerMaxIndex_arg]
#thisLayerAtWlen = testWlens[thisLayerMaxIndex_arg]
if (minIndex is None) or (thisLayerMinIndex < minIndex):
minIndex = thisLayerMinIndex
if (maxIndex is None) or (thisLayerMaxIndex > maxIndex):
maxIndex = thisLayerMaxIndex
#atWlen = thisLayerAtWlen
#return (maxIndex, atWlen, overallMinWlen, overallMaxWlen)
return (minIndex, maxIndex)
def GetPhaseRefractiveIndexRange(mediumProperties):
"""
Returns the minimum and maximum phase refractive indices as tuple
over all ice layers for a given mediumProperties object.
"""
minIndex = None
maxIndex = None
overallMinWlen = mediumProperties.GetMinWavelength()
overallMaxWlen = mediumProperties.GetMaxWavelength()
for layer in range(mediumProperties.LayersNum):
minWlen = overallMinWlen
maxWlen = overallMaxWlen
phaseRefIndex = mediumProperties.GetPhaseRefractiveIndex(layer)
minWlen = max(minWlen, phaseRefIndex.GetMinWlen())
maxWlen = min(maxWlen, phaseRefIndex.GetMaxWlen())
phaseRefIndexFunc = numpy.vectorize(lambda wlen: phaseRefIndex.GetValue(wlen))
testWlens = numpy.linspace(minWlen, maxWlen, 10000, endpoint=True)
evaluatedFunc = phaseRefIndexFunc(testWlens)
thisLayerMinIndex_arg = numpy.argmin(evaluatedFunc)
thisLayerMinIndex = evaluatedFunc[thisLayerMinIndex_arg]
thisLayerMaxIndex_arg = numpy.argmax(evaluatedFunc)
thisLayerMaxIndex = evaluatedFunc[thisLayerMaxIndex_arg]
if (minIndex is None) or (thisLayerMinIndex < minIndex):
minIndex = thisLayerMinIndex
if (maxIndex is None) or (thisLayerMaxIndex > maxIndex):
maxIndex = thisLayerMaxIndex
return (minIndex, maxIndex)
|
claudiokREPO_NAMEclsimPATH_START.@clsim_extracted@clsim-master@python@util@GetRefractiveIndexRange.py@.PATH_END.py
|
{
"filename": "ex_misc_tmodel.py",
"repo_name": "statsmodels/statsmodels",
"repo_path": "statsmodels_extracted/statsmodels-main/statsmodels/examples/ex_misc_tmodel.py",
"type": "Python"
}
|
import numpy as np
from scipy import stats
import statsmodels.api as sm
from statsmodels.miscmodels import TLinearModel
from statsmodels.tools.numdiff import approx_hess
#Example:
#np.random.seed(98765678)
nobs = 50
nvars = 6
df = 3
rvs = np.random.randn(nobs, nvars-1)
data_exog = sm.add_constant(rvs, prepend=False)
xbeta = 0.9 + 0.1*rvs.sum(1)
data_endog = xbeta + 0.1*np.random.standard_t(df, size=nobs)
print('variance of endog:', data_endog.var())
print('true parameters:', [0.1]*nvars + [0.9])
res_ols = sm.OLS(data_endog, data_exog).fit()
print('\nResults with ols')
print('----------------')
print(res_ols.scale)
print(np.sqrt(res_ols.scale))
print(res_ols.params)
print(res_ols.bse)
kurt = stats.kurtosis(res_ols.resid)
df_fromkurt = 6./kurt + 4
print('df_fromkurt from ols residuals', df_fromkurt)
print(stats.t.stats(df_fromkurt, moments='mvsk'))
print(stats.t.stats(df, moments='mvsk'))
modp = TLinearModel(data_endog, data_exog)
start_value = 0.1*np.ones(data_exog.shape[1]+2)
#start_value = np.zeros(data_exog.shape[1]+2)
#start_value[:nvars] = sm.OLS(data_endog, data_exog).fit().params
start_value[:nvars] = res_ols.params
start_value[-2] = df_fromkurt #10
start_value[-1] = np.sqrt(res_ols.scale) #0.5
modp.start_params = start_value
#adding fixed parameters
fixdf = np.nan * np.zeros(modp.start_params.shape)
fixdf[-2] = 5
fixone = 0
if fixone:
modp.fixed_params = fixdf
modp.fixed_paramsmask = np.isnan(fixdf)
modp.start_params = modp.start_params[modp.fixed_paramsmask]
else:
modp.fixed_params = None
modp.fixed_paramsmask = None
print('\nResults with TLinearModel')
print('-------------------------')
resp = modp.fit(start_params = modp.start_params, disp=1, method='nm',
maxfun=10000, maxiter=5000)#'newton')
#resp = modp.fit(start_params = modp.start_params, disp=1, method='newton')
print('using Nelder-Mead')
print(resp.params)
print(resp.bse)
resp2 = modp.fit(start_params = resp.params, method='Newton')
print('using Newton')
print(resp2.params)
print(resp2.bse)
hb=-approx_hess(modp.start_params, modp.loglike, epsilon=-1e-4)
tmp = modp.loglike(modp.start_params)
print(tmp.shape)
print('eigenvalues of numerical Hessian')
print(np.linalg.eigh(np.linalg.inv(hb))[0])
#store_params is only available in original test script
##pp=np.array(store_params)
##print pp.min(0)
##print pp.max(0)
|
statsmodelsREPO_NAMEstatsmodelsPATH_START.@statsmodels_extracted@statsmodels-main@statsmodels@examples@ex_misc_tmodel.py@.PATH_END.py
|
{
"filename": "read_class.py",
"repo_name": "pyspeckit/pyspeckit",
"repo_path": "pyspeckit_extracted/pyspeckit-master/pyspeckit/spectrum/readers/read_class.py",
"type": "Python"
}
|
"""
------------------------
GILDAS CLASS file reader
------------------------
Read a CLASS file into an :class:`pyspeckit.spectrum.ObsBlock`
"""
from __future__ import print_function
from six.moves import xrange
from six import iteritems
import six
import astropy.io.fits as pyfits
import numpy
import numpy as np
from numpy import pi
from astropy import log
# from astropy.time import Time
from astropy import units as u
import pyspeckit
import sys
import re
try:
from astropy.utils.console import ProgressBar
except ImportError:
ProgressBar = lambda x: None
ProgressBar.update = lambda x: None
import struct
import time
# 'range' is needed as a keyword
irange = range
def print_timing(func):
"""
Prints execution time of decorated function.
Included here because CLASS files can take a little while to read;
this should probably be replaced with a progressbar
"""
def wrapper(*arg,**kwargs):
t1 = time.time()
res = func(*arg,**kwargs)
t2 = time.time()
log.info('%s took %0.5g s' % (func.__name__, (t2-t1)))
return res
wrapper.__doc__ = func.__doc__
return wrapper
def ensure_bytes(string):
"""
Ensure a given string is in byte form
"""
if six.PY3:
return bytes(string, 'utf-8')
else:
return str(string)
""" Specification: http://iram.fr/IRAMFR/GILDAS/doc/html/class-html/node58.html """
filetype_dict = {'1A ':'Multiple_IEEE',
'1 ':'Multiple_Vax',
'1B ':'Multiple_EEEI',
'2A ':'v2',
'2 ':'v2',
'2B ':'v2',
'9A ':'Single_IEEE',
'9 ':'Single_Vax',
'9B ':'Single_EEEI'}
for key in list(filetype_dict.keys()):
filetype_dict[ensure_bytes(key)] = filetype_dict[key]
fileversion_dict = {'1A ':'v1',
'2A ':'v2',
'9A ':'v1', # untested
}
for key in list(fileversion_dict.keys()):
fileversion_dict[ensure_bytes(key)] = fileversion_dict[key]
record_lengths = {'1A': 512,
'2A': 1024*4}
header_id_numbers = {0: 'USER CODE',
-1: 'COMMENT',
-2: 'GENERAL',
-3: 'POSITION',
-4: 'SPECTRO',
-5: 'BASELINE',
-6: 'HISTORY',
-7: 'UNKNOWN-APEX',
# -8: 'SWITCH',
-9: 'GAUSSFIT', # "private"; see class-interfaces-private.f90
-10: 'DRIFT',
-11: 'BEAMSWITCH', # "private"; see class-interfaces-private.f90
-12: 'SHELLFIT', # "private"; see class-interfaces-private.f90
-13: 'NH3FIT', # "private"; see class-interfaces-private.f90
-14: 'CALIBRATION',
-18: 'ABSFIT', # "private"; see class-interfaces-private.f90
}
header_id_lengths = {-2: 9, # may really be 10?
-3: 17,
-4: 17,
-5: None, # variable length
-6: 3, # variable length
-14: 25,
}
# from packages/classic/lib/classic_mod.f90
filedescv2_nw1=14
"""
GENERAL
integer(kind=obsnum_length) :: num ! [ ] Observation number
integer(kind=4) :: ver ! [ ] Version number
integer(kind=4) :: teles(3) ! [ ] Telescope name
integer(kind=4) :: dobs ! [MJD-60549] Date of observation
integer(kind=4) :: dred ! [MJD-60549] Date of reduction
integer(kind=4) :: typec ! [ code] Type of coordinates
integer(kind=4) :: kind ! [ code] Type of data
integer(kind=4) :: qual ! [ code] Quality of data
integer(kind=4) :: subscan ! [ ] Subscan number
integer(kind=obsnum_length) :: scan ! [ ] Scan number
! Written in the entry
real(kind=8) :: ut ! 1-2 [ rad] UT of observation
real(kind=8) :: st ! 3-4 [ rad] LST of observation
real(kind=4) :: az ! 5 [ rad] Azimuth
real(kind=4) :: el ! 6 [ rad] Elevation
real(kind=4) :: tau ! 7 [neper] Opacity
real(kind=4) :: tsys ! 8 [ K] System temperature
real(kind=4) :: time ! 9 [ s] Integration time
! Not in this section in file
integer(kind=4) :: xunit ! [ code] X unit (if X coordinates section is present)
! NOT in data ---
character(len=12) :: cdobs ! [string] Duplicate of dobs
character(len=12) :: cdred ! [string] Duplicate of dred
"""
keys_lengths = {
'unknown': [
('NUM' ,1,'int32'), # Observation number
('VER' ,1,'int32'), # Version number
('TELES' ,3,'|S12') , # Telescope name
('DOBS' ,1,'int32'), # Date of observation
('DRED' ,1,'int32'), # Date of reduction
('TYPEC' ,1,'int32'), # Type of coordinates
('KIND' ,1,'int32'), # Type of data
('QUAL' ,1,'int32'), # Quality of data
('SCAN' ,1,'int32'), # Scan number
('SUBSCAN' ,1,'int32'), # Subscan number
],
'COMMENT': [ # -1
('LTEXT',1,'int32'), # integer(kind=4) :: ltext ! Length of comment
('CTEXT',1024//4,'|S1024'), # character ctext*1024 ! Comment string
],
'GENERAL': [ # -2
('UT' ,2,'float64'), # rad UT of observation
('ST' ,2,'float64'), # rad LST of observation
('AZ' ,1,'float32'), # rad Azimuth
('EL' ,1,'float32'), # rad Elevation
('TAU' ,1,'float32'), # neper Opacity
('TSYS' ,1,'float32'), # K System temperature
('TIME' ,1,'float32'), # s Integration time
# XUNIT should not be there?
#( 'XUNIT' ,1,'int32'), # code X unit (if xcoord_sec is present)
] ,
'POSITION': [ # -3
('SOURC',3,'|S12') , # [ ] Source name
('EPOCH',1,'float32'), # [ ] Epoch of coordinates
('LAM' ,2,'float64'), #[rad] Lambda
('BET' ,2,'float64'), #[rad] Beta
('LAMOF',1,'float32'), # [rad] Offset in Lambda
('BETOF',1,'float32'), # [rad] Offset in Beta
('PROJ' ,1,'int32') , # [rad] Projection system
('SL0P' ,1,'float64'), # lambda of descriptive system # MAY NOT EXIST IN OLD CLASS
('SB0P' ,1,'float64'), # beta of descriptive system # MAY NOT EXIST IN OLD CLASS
('SK0P' ,1,'float64'), # angle of descriptive system # MAY NOT EXIST IN OLD CLASS
],
'SPECTRO': [ # -4
#('align' ,1,'int32'), # [ ] Alignment padding
('LINE' ,3,'|S12'), # [ ] Line name
('RESTF' ,2,'float64'), # [ MHz] Rest frequency
('NCHAN' ,1,'int32'), # [ ] Number of channels
('RCHAN' ,1,'float32'), # [ ] Reference channels
('FRES' ,1,'float32'), # [ MHz] Frequency resolution
('FOFF' ,1,'float32'), # [ MHz] Frequency offset
('VRES' ,1,'float32'), # [km/s] Velocity resolution
('VOFF' ,1,'float32'), # [km/s] Velocity at reference channel
('BAD' ,1,'float32'), # [ ] Blanking value
#('ALIGN_1',1,'int32'), # [ ] Alignment padding
('IMAGE' ,2,'float64'), # [ MHz] Image frequency
#('ALIGN_2',1,'int32'), # [ ] Alignment padding
('VTYPE' ,1,'int32'), # [code] Type of velocity
('DOPPLER',2,'float64'), # [ ] Doppler factor = -V/c (CLASS convention)
],
'CALIBRATION': [ # -14
('ALIGN',1,'int32'), # BUFFER (it's a zero - it is not declared in the docs!!!!)
('BEEFF',1,'float32'), # [ ] Beam efficiency
('FOEFF',1,'float32'), # [ ] Forward efficiency
('GAINI',1,'float32'), # [ ] Image/Signal gain ratio
('H2OMM',1,'float32'), # [ mm] Water vapor content
('PAMB',1,'float32'), # [ hPa] Ambient pressure
('TAMB',1,'float32'), # [ K] Ambient temperature
('TATMS',1,'float32'), # [ K] Atmosphere temp. in signal band
('TCHOP',1,'float32'), # [ K] Chopper temperature
('TCOLD',1,'float32'), # [ K] Cold load temperature
('TAUS',1,'float32'), # [neper] Opacity in signal band
('TAUI',1,'float32'), # [neper] Opacity in image band
('TATMI',1,'float32'), # [ K] Atmosphere temp. in image band
('TREC',1,'float32'), # [ K] Receiver temperature
('CMODE',1,'int32'), # [ code] Calibration mode
('ATFAC',1,'float32'), # [ ] Applied calibration factor
('ALTI',1,'float32'), # [ m] Site elevation
('COUNT',3,'3float32'), # [count] Power of Atm., Chopp., Cold
('LCALOF',1,'float32'), # [ rad] Longitude offset for sky measurement
('BCALOF',1,'float32'), # [ rad] Latitude offset for sky measurement
('GEOLONG',1,'float64'), # [ rad] Geographic longitude of observatory # MAY NOT EXIST IN OLD CLASS
('GEOLAT',1,'float64'), # [ rad] Geographic latitude of observatory # MAY NOT EXIST IN OLD CLASS
],
'BASELINE':[
('DEG',1,'int32'), #! [ ] Degree of last baseline
('SIGFI',1,'float32'), #! [Int. unit] Sigma
('AIRE',1,'float32'), #! [Int. unit] Area under windows
('NWIND',1,'int32'), #! [ ] Number of line windows
# WARNING: These should probably have 'n', the second digit, = NWIND
# The docs are really unclear about this, they say "W1(MWIND)"
('W1MWIND',1,'float32'), #! [km/s] Lower limits of windows
('W2MWIND',1,'float32'), #! [km/s] Upper limits of windows
('SINUS',3,'float32'), #![] Sinus baseline results
],
'DRIFT':[ # 16?
('FREQ',1,'float64') , #! [ MHz] Rest frequency real(kind=8) ::
('WIDTH',1,'float32'), #! [ MHz] Bandwidth real(kind=4) ::
('NPOIN',1,'int32') , #! [ ] Number of data points integer(kind=4) ::
('RPOIN',1,'float32'), #! [ ] Reference point real(kind=4) ::
('TREF',1,'float32') , #! [ ?] Time at reference real(kind=4) ::
('AREF',1,'float32') , #! [ rad] Angular offset at ref. real(kind=4) ::
('APOS',1,'float32') , #! [ rad] Position angle of drift real(kind=4) ::
('TRES',1,'float32') , #! [ ?] Time resolution real(kind=4) ::
('ARES',1,'float32') , #! [ rad] Angular resolution real(kind=4) ::
('BAD',1,'float32') , #! [ ] Blanking value real(kind=4) ::
('CTYPE',1,'int32') , #! [code] Type of offsets integer(kind=4) ::
('CIMAG',1,'float64'), #! [ MHz] Image frequency real(kind=8) ::
('COLLA',1,'float32'), #! [ ?] Collimation error Az real(kind=4) ::
('COLLE',1,'float32'), #! [ ?] Collimation error El real(kind=4) ::
],
}
def _read_bytes(f, n):
'''Read the next `n` bytes (from idlsave)'''
return f.read(n)
"""
Warning: UNCLEAR what endianness should be!
Numpy seemed to get it right, and I think numpy assumes NATIVE endianness
"""
def _read_byte(f):
'''Read a single byte (from idlsave)'''
return numpy.uint8(struct.unpack('=B', f.read(4)[:1])[0])
def _read_int16(f):
'''Read a signed 16-bit integer (from idlsave)'''
return numpy.int16(struct.unpack('=h', f.read(4)[2:4])[0])
def _read_int32(f):
'''Read a signed 32-bit integer (from idlsave)'''
return numpy.int32(struct.unpack('=i', f.read(4))[0])
def _read_int64(f):
'''Read a signed 64-bit integer '''
return numpy.int64(struct.unpack('=q', f.read(8))[0])
def _read_float32(f):
'''Read a 32-bit float (from idlsave)'''
return numpy.float32(struct.unpack('=f', f.read(4))[0])
def _align_32(f):
'''Align to the next 32-bit position in a file (from idlsave)'''
pos = f.tell()
if pos % 4 != 0:
f.seek(pos + 4 - pos % 4)
return
def _read_word(f,length):
if length > 0:
chars = _read_bytes(f, length)
_align_32(f)
else:
chars = None
return chars
def _read_int(f):
return struct.unpack('i',f.read(4))
def is_ascii(s):
"""Check if there are non-ascii characters in Unicode string
Parameters
----------
s : str
The string to be checked
Returns
-------
is_ascii : bool
Returns True if all characters in the string are ascii. False
otherwise.
"""
return len(s) == len(s.decode('ascii').encode('utf-8'))
def is_all_null(s):
return all(x=='\x00' for x in s) or all(x==b'\x00' for x in s)
"""
from clic_file.f90: v1, v2
integer(kind=4) :: bloc ! 1 : observation address [records] integer(kind=8) :: bloc ! 1- 2: observation address [records] integer(kind=4) :: bloc ! 1 : block read from index
integer(kind=4) :: num ! 2 : observation number integer(kind=4) :: word ! 3 : address offset [4-bytes] integer(kind=4) :: num ! 2 : number read
integer(kind=4) :: ver ! 3 : observation version integer(kind=4) :: ver ! 4 : observation version integer(kind=4) :: ver ! 3 : version read from index
integer(kind=4) :: sourc(3) ! 4- 6: source name integer(kind=8) :: num ! 5- 6: observation number character(len=12) :: csour ! 4- 6: source read from index
integer(kind=4) :: line(3) ! 7- 9: line name integer(kind=4) :: sourc(3) ! 7- 9: source name character(len=12) :: cline ! 7- 9: line read from index
integer(kind=4) :: teles(3) ! 10-12: telescope name integer(kind=4) :: line(3) ! 10-12: line name character(len=12) :: ctele ! 10-12: telescope read from index
integer(kind=4) :: dobs ! 13 : observation date [class_date] integer(kind=4) :: teles(3) ! 13-15: telescope name integer(kind=4) :: dobs ! 13 : date obs. read from index
integer(kind=4) :: dred ! 14 : reduction date [class_date] integer(kind=4) :: dobs ! 16 : observation date [class_date] integer(kind=4) :: dred ! 14 : date red. read from index
real(kind=4) :: off1 ! 15 : lambda offset [radian] integer(kind=4) :: dred ! 17 : reduction date [class_date] real(kind=4) :: off1 ! 15 : read offset 1
real(kind=4) :: off2 ! 16 : beta offset [radian] real(kind=4) :: off1 ! 18 : lambda offset [radian] real(kind=4) :: off2 ! 16 : read offset 2
integer(kind=4) :: typec ! 17 : coordinates types real(kind=4) :: off2 ! 19 : beta offset [radian] integer(kind=4) :: type ! 17 : type of read offsets
integer(kind=4) :: kind ! 18 : data kind integer(kind=4) :: typec ! 20 : coordinates types integer(kind=4) :: kind ! 18 : type of observation
integer(kind=4) :: qual ! 19 : data quality integer(kind=4) :: kind ! 21 : data kind integer(kind=4) :: qual ! 19 : Quality read from index
integer(kind=4) :: scan ! 20 : scan number integer(kind=4) :: qual ! 22 : data quality integer(kind=4) :: scan ! 20 : Scan number read from index
integer(kind=4) :: proc ! 21 : procedure type integer(kind=4) :: scan ! 23 : scan number real(kind=4) :: posa ! 21 : Position angle
integer(kind=4) :: itype ! 22 : observation type integer(kind=4) :: proc ! 24 : procedure type integer(kind=4) :: subscan ! 22 : Subscan number
real(kind=4) :: houra ! 23 : hour angle [radian] integer(kind=4) :: itype ! 25 : observation type integer(kind=4) :: pad(10) ! 23-32: Pad to 32 words
integer(kind=4) :: project ! 24 : project name real(kind=4) :: houra ! 26 : hour angle [radian]
integer(kind=4) :: pad1 ! 25 : unused word integer(kind=4) :: project(2) ! 27 : project name
integer(kind=4) :: bpc ! 26 : baseline bandpass cal status integer(kind=4) :: bpc ! 29 : baseline bandpass cal status
integer(kind=4) :: ic ! 27 : instrumental cal status integer(kind=4) :: ic ! 30 : instrumental cal status
integer(kind=4) :: recei ! 28 : receiver number integer(kind=4) :: recei ! 31 : receiver number
real(kind=4) :: ut ! 29 : UT [s] real(kind=4) :: ut ! 32 : UT [s]
integer(kind=4) :: pad2(3) ! 30-32: padding to 32 4-bytes word
equivalently
integer(kind=obsnum_length) :: num ! [ ] Observation number
integer(kind=4) :: ver ! [ ] Version number
integer(kind=4) :: teles(3) ! [ ] Telescope name
integer(kind=4) :: dobs ! [MJD-60549] Date of observation
integer(kind=4) :: dred ! [MJD-60549] Date of reduction
integer(kind=4) :: typec ! [ code] Type of coordinates
integer(kind=4) :: kind ! [ code] Type of data
integer(kind=4) :: qual ! [ code] Quality of data
integer(kind=4) :: subscan ! [ ] Subscan number
integer(kind=obsnum_length) :: scan ! [ ] Scan number
"""
"""
index.f90:
call conv%read%i8(data(1), indl%bloc, 1) ! bloc
call conv%read%i4(data(3), indl%word, 1) ! word
call conv%read%i8(data(4), indl%num, 1) ! num
call conv%read%i4(data(6), indl%ver, 1) ! ver
call conv%read%cc(data(7), indl%csour, 3) ! csour
call conv%read%cc(data(10),indl%cline, 3) ! cline
call conv%read%cc(data(13),indl%ctele, 3) ! ctele
call conv%read%i4(data(16),indl%dobs, 1) ! dobs
call conv%read%i4(data(17),indl%dred, 1) ! dred
call conv%read%r4(data(18),indl%off1, 1) ! off1
call conv%read%r4(data(19),indl%off2, 1) ! off2
call conv%read%i4(data(20),indl%type, 1) ! type
call conv%read%i4(data(21),indl%kind, 1) ! kind
call conv%read%i4(data(22),indl%qual, 1) ! qual
call conv%read%r4(data(23),indl%posa, 1) ! posa
call conv%read%i8(data(24),indl%scan, 1) ! scan
call conv%read%i4(data(26),indl%subscan,1) ! subscan
if (isv3) then
call conv%read%r8(data(27),indl%ut, 1) ! ut
else
"""
def _read_indices(f, file_description):
#if file_description['version'] in (1,2):
# extension_positions = (file_description['aex']-1)*file_description['reclen']*4
# all_indices = {extension:
# [_read_index(f,
# filetype=file_description['version'],
# entry=ii,
# #position=position,
# )
# for ii in range(file_description['lex1'])]
# for extension,position in enumerate(extension_positions)
# if position > 0
# }
#elif file_description['version'] == 1:
extension_positions = ((file_description['aex'].astype('int64')-1)
*file_description['reclen']*4)
all_indices = [_read_index(f,
filetype=file_description['version'],
# 1-indexed files
entry_number=ii+1,
file_description=file_description,
)
for ii in range(file_description['xnext']-1)]
#else:
# raise ValueError("Invalid file version {0}".format(file_description['version']))
return all_indices
def _find_index(entry_number, file_description, return_position=False):
if file_description['gex'] == 10:
kex=(entry_number-1)//file_description['lex1'] + 1
else:
# exponential growth:
#kex = gi8_dicho(file_description['nex'], file_description['lexn'], entry_number) - 1
kex = len([xx for xx in file_description['lexn'] if xx<entry_number])
ken = entry_number - file_description['lexn'][kex-1]
#! Find ken (relative entry number in the extension, starts from 1)
#ken = entry_num - file%desc%lexn(kex-1)
kb = ((ken-1)*file_description['lind'])//file_description['reclen']
#kb = ((ken-1)*file%desc%lind)/file%desc%reclen ! In the extension, the
# ! relative record position (as an offset, starts from 0) where the
# ! Entry Index starts. NB: there can be a non-integer number of Entry
# ! Indexes per record
# Subtract 1: 'aex' is 1-indexed
kbl = (file_description['aex'][kex-1]+kb)-1
# kbl = file%desc%aex(kex)+kb ! The absolute record number where the Entry Index goes
k = ((ken-1)*file_description['lind']) % file_description['reclen']
#k = mod((ken-1)*file%desc%lind,file%desc%reclen)+1 ! = in the record, the
# ! first word of the Entry Index of the entry number 'entry_num'
if return_position:
return (kbl*file_description['reclen']+k)*4
else:
return kbl,k
def _read_index(f, filetype='v1', DEBUG=False, clic=False, position=None,
entry_number=None, file_description=None):
if position is not None:
f.seek(position)
if entry_number is not None:
indpos = _find_index(entry_number, file_description, return_position=True)
f.seek(indpos)
x0 = f.tell()
if filetype in ('1A ','v1', 1):
log.debug('Index filetype 1A')
index = {
"XBLOC":_read_int32(f),
"XNUM":_read_int32(f),
"XVER":_read_int32(f),
"XSOURC":_read_word(f,12),
"XLINE":_read_word(f,12),
"XTEL":_read_word(f,12),
"XDOBS":_read_int32(f),
"XDRED":_read_int32(f),
"XOFF1":_read_float32(f),# first offset (real, radians)
"XOFF2":_read_float32(f),# second offset (real, radians)
"XTYPE":_read_int32(f),# coordinate system ('EQ'', 'GA', 'HO')
"XKIND":_read_int32(f),# Kind of observation (0: spectral, 1: continuum, )
"XQUAL":_read_int32(f),# Quality (0-9)
"XSCAN":_read_int32(f),# Scan number
}
index['BLOC'] = index['XBLOC'] # v2 compatibility
index['WORD'] = 1 # v2 compatibility
index['SOURC'] = index['CSOUR'] = index['XSOURC']
index['DOBS'] = index['CDOBS'] = index['XDOBS']
index['CTELE'] = index['XTEL']
index['LINE'] = index['XLINE']
index['OFF1'] = index['XOFF1']
index['OFF2'] = index['XOFF2']
index['QUAL'] = index['XQUAL']
index['SCAN'] = index['XSCAN']
index['KIND'] = index['XKIND']
if clic: # use header set up in clic
nextchunk = {
"XPROC":_read_int32(f),# "procedure type"
"XITYPE":_read_int32(f),#
"XHOURANG":_read_float32(f),#
"XPROJNAME":_read_int32(f),#
"XPAD1":_read_int32(f),
"XBPC" :_read_int32(f),
"XIC" :_read_int32(f),
"XRECEI" :_read_int32(f),
"XUT":_read_float32(f),
"XPAD2":numpy.fromfile(f,count=3,dtype='int32') # BLANK is NOT ALLOWED!!! It is a special KW
}
else:
nextchunk = {"XPOSA":_read_float32(f),
"XSUBSCAN":_read_int32(f),
'XPAD2': numpy.fromfile(f,count=10,dtype='int32'),
}
nextchunk['SUBSCAN'] = nextchunk['XSUBSCAN']
nextchunk['POSA'] = nextchunk['XPOSA']
index.update(nextchunk)
if (f.tell() - x0 != 128):
missed_bits = (f.tell()-x0)
X = f.read(128-missed_bits)
if DEBUG: print("read_index missed %i bits: %s" % (128-missed_bits,X))
#raise IndexError("read_index did not successfully read 128 bytes at %i. Read %i bytes." % (x0,f.tell()-x0))
if any(not is_ascii(index[x]) for x in ('XSOURC','XLINE','XTEL')):
raise ValueError("Invalid index read from {0}.".format(x0))
elif filetype in ('2A ','v2', 2):
log.debug('Index filetype 2A')
index = {
"BLOC" : _read_int64(f) , #(data(1), 1) ! bloc
"WORD" : _read_int32(f) , #(data(3), 1) ! word
"NUM" : _read_int64(f) , #(data(4), 1) ! num
"VER" : _read_int32(f) , #(data(6), 1) ! ver
"CSOUR" : _read_word(f,12), #(data(7), 3) ! csour
"CLINE" : _read_word(f,12), #(data(10), 3) ! cline
"CTELE" : _read_word(f,12), #(data(13), 3) ! ctele
"DOBS" : _read_int32(f) , #(data(16), 1) ! dobs
"DRED" : _read_int32(f) , #(data(17), 1) ! dred
"OFF1" : _read_float32(f), #(data(18), 1) ! off1
"OFF2" : _read_float32(f), #(data(19), 1) ! off2
"TYPE" : _read_int32(f) , #(data(20), 1) ! type
"KIND" : _read_int32(f) , #(data(21), 1) ! kind
"QUAL" : _read_int32(f) , #(data(22), 1) ! qual
"POSA" : _read_float32(f), #(data(23), 1) ! posa
"SCAN" : _read_int64(f) , #(data(24), 1) ! scan
"SUBSCAN": _read_int32(f) , #(data(26), 1) ! subscan
}
#last24bits = f.read(24)
#log.debug("Read 24 bits: '{0}'".format(last24bits))
if any((is_all_null(index[x]) or not is_ascii(index[x]))
for x in ('CSOUR','CLINE','CTELE')):
raise ValueError("Invalid index read from {0}.".format(x0))
index['SOURC'] = index['XSOURC'] = index['CSOUR']
index['LINE'] = index['XLINE'] = index['CLINE']
index['XKIND'] = index['KIND']
try:
index['DOBS'] = index['XDOBS'] = index['CDOBS']
except KeyError:
index['CDOBS'] = index['XDOBS'] = index['DOBS']
else:
raise NotImplementedError("Filetype {0} not implemented.".format(filetype))
# from kernel/lib/gsys/date.f90: gag_julda
index['MJD'] = index['DOBS'] + 60549
class_dobs = index['DOBS']
index['DOBS'] = ((class_dobs + 365*2025)/365.2425 + 1)
# SLOW
#index['DATEOBS'] = Time(index['DOBS'], format='jyear')
#index['DATEOBSS'] = index['DATEOBS'].iso
log.debug("Indexing finished at {0}".format(f.tell()))
return index
def _read_header(f, type=0, position=None):
"""
Read a header entry from a CLASS file
(helper function)
"""
if position is not None:
f.seek(position)
if type in keys_lengths:
hdrsec = [(x[0],numpy.fromfile(f,count=1,dtype=x[2])[0])
for x in keys_lengths[type]]
return dict(hdrsec)
else:
return {}
raise ValueError("Unrecognized type {0}".format(type))
def _read_first_record(f):
f.seek(0)
filetype = f.read(4)
if fileversion_dict[filetype] == 'v1':
return _read_first_record_v1(f)
elif fileversion_dict[filetype] == 'v2':
return _read_first_record_v2(f)
else:
raise ValueError("Unrecognized filetype {0}".format(filetype))
def _read_first_record_v1(f, record_length_words=128):
r"""
Position & Parameter & Fortran Kind & Purpose \\
\hline
1 & {\tt code} & Character*4 & File code \\
2 & {\tt next} & Integer*4 & Next free record \\
3 & {\tt lex} & Integer*4 & Length of first extension (number of entries) \\
4 & {\tt nex} & Integer*4 & Number of extensions \\
5 & {\tt xnext} & Integer*4 & Next available entry number \\
6:2*{\tt reclen} & {\tt ex(:)} & Integer*4 & Array of extension addresses
from classic_mod.f90:
integer(kind=4) :: code ! 1 File code
integer(kind=4) :: next ! 2 Next free record
integer(kind=4) :: lex ! 3 Extension length (number of entries)
integer(kind=4) :: nex ! 4 Number of extensions
integer(kind=4) :: xnext ! 5 Next available entry number
integer(kind=4) :: aex(mex_v1) ! 6:256 Extension addresses
from old (<dec2013) class, file.f90:
read(ilun,rec=1,err=11,iostat=ier) ibx%code,ibx%next, &
& ibx%ilex,ibx%imex,ibx%xnext
also uses filedesc_v1tov2 from classic/lib/file.f90
"""
# OLD NOTES
# hdr = header
# hdr.update(obshead) # re-overwrite things
# hdr.update({'OBSNUM':obsnum,'RECNUM':spcount})
# hdr.update({'RA':hdr['LAM']/pi*180,'DEC':hdr['BET']/pi*180})
# hdr.update({'RAoff':hdr['LAMOF']/pi*180,'DECoff':hdr['BETOF']/pi*180})
# hdr.update({'OBJECT':hdr['SOURC'].strip()})
# hdr.update({'BUNIT':'Tastar'})
# hdr.update({'EXPOSURE':hdr['TIME']})
f.seek(0)
file_description = {
'code': f.read(4),
'next': _read_int32(f),
'lex': _read_int32(f),
'nex': _read_int32(f),
'xnext': _read_int32(f),
'gex': 10.,
'vind': 1, # classic_vind_v1 packages/classic/lib/classic_mod.f90
'version': 1,
'nextrec': 3,
'nextword': 1,
'lind': 32, #classic_lind_v1 packages/classic/lib/classic_mod.f90
'kind': 'unknown',
'flags': 0,
}
file_description['reclen'] = record_length_words # should be 128w = 512 bytes
ex = np.fromfile(f, count=(record_length_words*2-5), dtype='int32')
file_description['ex'] = ex[ex!=0]
file_description['nextrec'] = file_description['next'] # this can't be...
file_description['lex1'] = file_description['lex'] # number of entries
file_description['lexn'] = (np.arange(file_description['nex']+1) *
file_description['lex1'])
file_description['nentries'] = np.sum(file_description['lexn'])
file_description['aex'] = file_description['ex'][:file_description['nex']]
#file_description['version'] = fileversion_dict[file_description['code']]
assert f.tell() == 1024
# Something is not quite right with the 'ex' parsing
#assert len(file_description['ex']) == file_description['nex']
return file_description
def _read_first_record_v2(f):
r""" packages/classic/lib/file.f90
Position & Parameter & Fortran Kind & Purpose & Unit \\
\hline
1 & {\tt code} & Character*4 & File code & - \\
2 & {\tt reclen} & Integer*4 & Record length & words \\
3 & {\tt kind} & Integer*4 & File kind & - \\
4 & {\tt vind} & Integer*4 & Index version & - \\
5 & {\tt lind} & Integer*4 & Index length & words \\
6 & {\tt flags} & Integer*4 & Bit flags. \#1: single or multiple, & - \\
& & & \#2-32: provision (0-filled) & \\
\hline
7:8 & {\tt xnext} & Integer*8 & Next available entry number & - \\
9:10 & {\tt nextrec} & Integer*8 & Next record which contains free space & record \\
11 & {\tt nextword} & Integer*4 & Next free word in this record & word \\
\hline
12 & {\tt lex1} & Integer*4 & Length of first extension index & entries \\
13 & {\tt nex} & Integer*4 & Number of extensions & - \\
14 & {\tt gex} & Integer*4 & Extension growth rule & - \\
15:{\tt reclen} & {\tt aex(:)} & Integer*8 & Array of extension addresses & record
"""
f.seek(0)
file_description = {
'code': f.read(4),
'reclen': _read_int32(f),
'kind': _read_int32(f),
'vind': _read_int32(f),
'lind': _read_int32(f),
'flags': _read_int32(f),
'xnext': _read_int64(f),
'nextrec': _read_int64(f),
'nextword': _read_int32(f),
'lex1': _read_int32(f),
'nex': _read_int32(f),
'gex': _read_int32(f),
}
file_description['lexn'] = [0]
if file_description['gex'] == 10:
for ii in range(1, file_description['nex']+1):
file_description['lexn'].append(file_description['lexn'][-1]+file_description['lex1'])
else:
#! Exponential growth. Only growth with mantissa 2.0 is supported
for ii in range(1, file_description['nex']):
# I don't know what the fortran does here!!!
# ahh, maybe 2_8 means int(2, dtype='int64')
nent = int(file_description['lex1'] * 2**(ii-1))
#nent = int(file%desc%lex1,kind=8) * 2_8**(iex-1)
file_description['lexn'].append(file_description['lexn'][-1]+nent)
#file%desc%lexn(iex) = file%desc%lexn(iex-1) + nent
file_description['nentries'] = np.sum(file_description['lexn'])
record_length_words = file_description['reclen']
aex = numpy.fromfile(f, count=(record_length_words-15)//2, dtype='int64')
file_description['aex'] = aex[aex!=0]
assert len(file_description['aex']) == file_description['nex']
file_description['version'] = 2
return file_description
def gi8_dicho(ninp,lexn,xval,ceil=True):
"""
! @ public
! Find ival such as
! X(ival-1) < xval <= X(ival) (ceiling mode)
! or
! X(ival) <= xval < X(ival+1) (floor mode)
! for input data ordered. Use a dichotomic search for that.
call gi8_dicho(nex,file%desc%lexn,entry_num,.true.,kex,error)
"""
#integer(kind=size_length), intent(in) :: np ! Number of input points
#integer(kind=8), intent(in) :: x(np) ! Input ordered Values
#integer(kind=8), intent(in) :: xval ! The value we search for
#logical, intent(in) :: ceil ! Ceiling or floor mode?
#integer(kind=size_length), intent(out) :: ival ! Position in the array
#logical, intent(inout) :: error ! Logical error flag
iinf = 1
isup = ninp
#! Ceiling mode
while isup > (iinf+1):
imid = int(np.floor((isup + iinf)/2.))
if (lexn[imid-1] < xval):
iinf = imid
else:
isup = imid
ival = isup
return ival
def _read_obshead(f, file_description, position=None, verbose=False):
if file_description['version'] == 1:
return _read_obshead_v1(f, position=position, verbose=verbose)
if file_description['version'] == 2:
return _read_obshead_v2(f, position=position)
else:
raise ValueError("Invalid file version {0}.".
format(file_description['version']))
def _read_obshead_v2(f, position=None):
"""
! Version 2 (public)
integer(kind=4), parameter :: entrydescv2_nw1=11 ! Number of words, in 1st part
integer(kind=4), parameter :: entrydescv2_nw2=5 ! Number of words for 1 section in 2nd part
type classic_entrydesc_t
sequence
integer(kind=4) :: code ! 1 : code observation icode
integer(kind=4) :: version ! 2 : observation version
integer(kind=4) :: nsec ! 3 : number of sections
integer(kind=4) :: pad1 ! - : memory padding (not in data)
integer(kind=8) :: nword ! 4- 5: number of words
integer(kind=8) :: adata ! 6- 7: data address
integer(kind=8) :: ldata ! 8- 9: data length
integer(kind=8) :: xnum ! 10-11: entry number
! Out of the 'sequence' block:
integer(kind=4) :: msec ! Not in data: maximum number of sections the
! Observation Index can hold
integer(kind=4) :: pad2 ! Memory padding for 8 bytes alignment
integer(kind=4) :: seciden(classic_maxsec) ! Section Numbers (on disk: 1 to ed%nsec)
integer(kind=8) :: secleng(classic_maxsec) ! Section Lengths (on disk: 1 to ed%nsec)
integer(kind=8) :: secaddr(classic_maxsec) ! Section Addresses (on disk: 1 to ed%nsec)
end type classic_entrydesc_t
"""
if position is not None:
f.seek(position)
else:
position = f.tell()
IDcode = f.read(4)
if IDcode.strip() != b'2':
raise IndexError("Observation Header reading failure at {0}. "
"Record does not appear to be an observation header.".
format(position))
f.seek(position)
entrydescv2_nw1 = 11
entrydescv2_nw2 = 5
obshead = {
'CODE': f.read(4),
'VERSION': _read_int32(f),
'NSEC': _read_int32(f),
#'_blank': _read_int32(f),
'NWORD': _read_int64(f),
'ADATA': _read_int64(f),
'LDATA': _read_int64(f),
'XNUM': _read_int64(f),
#'MSEC': _read_int32(f),
#'_blank2': _read_int32(f),
}
section_numbers = np.fromfile(f, count=obshead['NSEC'], dtype='int32')
section_lengths = np.fromfile(f, count=obshead['NSEC'], dtype='int64')
section_addresses = np.fromfile(f, count=obshead['NSEC'], dtype='int64')
return obshead['XNUM'],obshead,dict(zip(section_numbers,section_addresses))
def _read_obshead_v1(f, position=None, verbose=False):
"""
Read the observation header of a CLASS file
(helper function for read_class; should not be used independently)
"""
if position is not None:
f.seek(position)
IDcode = f.read(4)
if IDcode.strip() != b'2':
raise IndexError("Observation Header reading failure at {0}. "
"Record does not appear to be an observation header.".
format(f.tell() - 4))
(nblocks, nbyteob, data_address, nheaders, data_length, obindex, nsec,
obsnum) = numpy.fromfile(f, count=8, dtype='int32')
if verbose:
print("nblocks,nbyteob,data_address,data_length,nheaders,obindex,nsec,obsnum",nblocks,nbyteob,data_address,data_length,nheaders,obindex,nsec,obsnum)
print("DATA_LENGTH: ",data_length)
seccodes = numpy.fromfile(f,count=nsec,dtype='int32')
# Documentation says addresses then length: It is apparently wrong
seclen = numpy.fromfile(f,count=nsec,dtype='int32')
secaddr = numpy.fromfile(f,count=nsec,dtype='int32')
if verbose:
print("Section codes, addresses, lengths: ",seccodes,secaddr,seclen)
hdr = {'NBLOCKS':nblocks, 'NBYTEOB':nbyteob, 'DATAADDR':data_address,
'DATALEN':data_length, 'NHEADERS':nheaders, 'OBINDEX':obindex,
'NSEC':nsec, 'OBSNUM':obsnum}
#return obsnum,seccodes
return obsnum,hdr,dict(zip(seccodes,secaddr))
# THIS IS IN READ_OBSHEAD!!!
# def _read_preheader(f):
# """
# Not entirely clear what this is, but it is stuff that precedes the actual data
#
# Looks something like this:
# array([ 1, -2, -3, -4, -14,
# 9, 17, 18, 25, 55,
# 64, 81, 99, -1179344801, 979657591,
#
# -2, -3, -4, -14 indicate the 4 header types
# 9,17,18,25 *MAY* indicate the number of bytes in each
#
#
# HOW is it indicated how many entries there are?
# """
# # 13 comes from counting 1, -2,....99 above
# numbers = np.fromfile(f, count=13, dtype='int32')
# sections = [n for n in numbers if n in header_id_numbers]
# return sections
def downsample_1d(myarr,factor,estimator=np.mean, weight=None):
"""
Downsample a 1D array by averaging over *factor* pixels.
Crops right side if the shape is not a multiple of factor.
This code is pure numpy and should be fast.
keywords:
estimator - default to mean. You can downsample by summing or
something else if you want a different estimator
(e.g., downsampling error: you want to sum & divide by sqrt(n))
weight: np.ndarray
An array of weights to use for the downsampling. If None,
assumes uniform 1
"""
if myarr.ndim != 1:
raise ValueError("Only works on 1d data. Says so in the title.")
xs = myarr.size
crarr = myarr[:xs-(xs % int(factor))]
if weight is None:
dsarr = estimator(np.concatenate([[crarr[i::factor] for i in
range(factor)]]),axis=0)
else:
dsarr = estimator(np.concatenate([[crarr[i::factor]*weight[i::factor] for i in
range(factor)]]),axis=0)
warr = estimator(np.concatenate([[weight[i::factor] for i in
range(factor)]]),axis=0)
dsarr = dsarr/warr
return dsarr
# unit test
def test_downsample1d():
data = np.arange(10)
weight = np.ones(10)
weight[5]=0
assert np.all(downsample_1d(data, 2, weight=weight, estimator=np.mean) ==
np.array([0.5, 2.5, 4.0, 6.5, 8.5]))
def read_observation(f, obsid, file_description=None, indices=None,
my_memmap=None, memmap=True, verbose=False):
if isinstance(f, str):
f = open(f,'rb')
opened = True
if memmap:
my_memmap = numpy.memmap(f, offset=0, dtype='float32',
mode='r')
else:
my_memmap = None
elif my_memmap is None and memmap:
raise ValueError("Must pass in a memmap object if passing in a file object.")
else:
opened = False
if file_description is None:
file_description = _read_first_record(f)
if indices is None:
indices = _read_indices(f, file_description)
index = indices[obsid]
obs_position = (index['BLOC']-1)*file_description['reclen']*4 + (index['WORD']-1)*4
log.debug("Reading observation at position {0}".format(obs_position))
obsnum,obshead,sections = _read_obshead(f, file_description,
position=obs_position,
verbose=verbose)
header = obshead
datastart = 0
for section_id,section_address in iteritems(sections):
# Section addresses are 1-indexed byte addresses
# in the current "block"
sec_position = obs_position + (section_address-1)*4
temp_hdr = _read_header(f, type=header_id_numbers[section_id],
position=sec_position)
header.update(temp_hdr)
datastart = max(datastart,f.tell())
hdr = header
hdr.update(obshead) # re-overwrite things
hdr.update({'OBSNUM':obsnum,'RECNUM':obsid})
hdr.update({'RA':hdr['LAM']/pi*180,'DEC':hdr['BET']/pi*180})
hdr.update({'RAoff':hdr['LAMOF']/pi*180,'DECoff':hdr['BETOF']/pi*180})
hdr.update({'OBJECT':hdr['SOURC'].strip()})
hdr.update({'BUNIT':'Tastar'})
hdr.update({'EXPOSURE':float(hdr['TIME'])})
hdr['HDRSTART'] = obs_position
hdr['DATASTART'] = datastart
hdr.update(indices[obsid])
# Define MJD as mid-exposure time in MJD
hdr.update({'OBSDATE': hdr['MJD'] + hdr['UT']/2./pi})
# Apparently the data are still valid in this case?
#if hdr['XNUM'] != obsid+1:
# log.error("The spectrum read was {0} but {1} was requested.".
# format(hdr['XNUM']-1, obsid))
if hdr['KIND'] == 1: # continuum
nchan = hdr['NPOIN']
elif 'NCHAN' in hdr:
nchan = hdr['NCHAN']
else:
log.error("No NCHAN in header. This is not a spectrum.")
import ipdb; ipdb.set_trace()
# There may be a 1-channel offset? CHECK!!!
# (changed by 1 pixel - October 14, 2014)
# (changed back - October 21, 2014 - I think the ends are just bad, but not
# zero.)
f.seek(datastart-1)
spec = _read_spectrum(f, position=datastart-1, nchan=nchan,
memmap=memmap, my_memmap=my_memmap)
if opened:
f.close()
return spec, hdr
def _read_spectrum(f, position, nchan, my_memmap=None, memmap=True):
if position != f.tell():
log.warning("Reading data from {0}, but the file is wound "
"to {1}.".format(position, f.tell()))
if memmap:
here = position
#spectrum = numpy.memmap(filename, offset=here, dtype='float32',
# mode='r', shape=(nchan,))
spectrum = my_memmap[here//4:here//4+nchan]
f.seek(here+nchan*4)
else:
f.seek(position)
spectrum = numpy.fromfile(f,count=nchan,dtype='float32')
return spectrum
def _spectrum_from_header(fileobj, header, memmap=None):
return _read_spectrum(fileobj, position=header['DATASTART'],
nchan=header['NCHAN'] if 'NCHAN' in hdr else hdr['NPOIN'],
my_memmap=memmap)
def clean_header(header):
newheader = {}
for k in header:
if not isinstance(header[k], (int, float, str)):
if isinstance(header[k], np.ndarray) and header[k].size > 1:
if header[k].size > 10:
raise ValueError("Large array being put in header. That's no good. key={0}".format(k))
for ii,val in enumerate(header[k]):
newheader[k[:7]+str(ii)] = val
else:
newheader[k[:8]] = str(header[k])
else:
newheader[k[:8]] = header[k]
return newheader
class ClassObject(object):
def __init__(self, filename, verbose=False):
t0 = time.time()
self._file = open(filename, 'rb')
self.file_description = _read_first_record(self._file)
self.allind = _read_indices(self._file, self.file_description)
self._data = np.memmap(self._file, dtype='float32', mode='r')
if verbose: log.info("Setting _spectra")
self._spectra = LazyItem(self)
t1 = time.time()
if verbose: log.info("Setting posang. t={0}".format(t1-t0))
self.set_posang()
t2 = time.time()
if verbose: log.info("Identifying otf scans. t={0}".format(t2-t1))
self._identify_otf_scans(verbose=verbose)
t3 = time.time()
#self._load_all_spectra()
if verbose:
log.info("Loaded CLASS object with {3} indices. Time breakdown:"
" {0}s for indices, "
"{1}s for posang, and {2}s for OTF scan identification"
.format(t1-t0, t2-t1, t3-t2, len(self.allind)))
def __repr__(self):
s = "\n".join(["{k}: {v}".format(k=k,v=v)
for k,v in iteritems(self.getinfo())])
return "ClassObject({id}) with {nspec} entries\n".format(id=id(self),
nspec=len(self.allind)) + s
def getinfo(self, allsources=False):
info = dict(
tels = self.tels,
lines = self.lines,
scans = self.scans,
sources = self.sources if allsources else self.sci_sources,
)
return info
def set_posang(self):
h0 = self.headers[0]
for h in self.headers:
dx = h['OFF1'] - h0['OFF1']
dy = h['OFF2'] - h0['OFF2']
h['COMPPOSA'] = np.arctan2(dy,dx)*180/np.pi
h0 = h
def _identify_otf_scans(self, verbose=False):
h0 = self.allind[0]
st = 0
otfscan = 0
posangs = [h['COMPPOSA'] for h in self.allind]
if verbose:
pb = ProgressBar(len(self.allind))
for ii,h in enumerate(self.allind):
if (h['SCAN'] != h0['SCAN']
or h['SOURC'] != h0['SOURC']):
h0['FIRSTSCAN'] = st
cpa = np.median(posangs[st:ii])
for hh in self.allind[st:ii]:
hh['SCANPOSA'] = cpa % 180
st = ii
if h['SCAN'] == h0['SCAN']:
h0['OTFSCAN'] = otfscan
otfscan += 1
h['OTFSCAN'] = otfscan
else:
otfscan = 0
h['OTFSCAN'] = otfscan
else:
h['OTFSCAN'] = otfscan
if verbose:
pb.update(ii)
def listscans(self, source=None, telescope=None, out=sys.stdout):
minid=0
scan = -1
sourc = ""
#tel = ''
minoff1,maxoff1 = np.inf,-np.inf
minoff2,maxoff2 = np.inf,-np.inf
ttlangle,nangle = 0.0,0
print("{entries:15s} {SOURC:12s} {XTEL:12s} {SCAN:>8s} {SUBSCAN:>8s} "
"[ {RAmin:>12s}, {RAmax:>12s} ] "
"[ {DECmin:>12s}, {DECmax:>12s} ] "
"{angle:>12s} {SCANPOSA:>12s} {OTFSCAN:>8s} {TSYS:>8s} {UTD:>12s}"
.format(entries='Scans', SOURC='Source', XTEL='Telescope',
SCAN='Scan', SUBSCAN='Subscan',
RAmin='min(RA)', RAmax='max(RA)',
DECmin='min(DEC)', DECmax='max(DEC)',
SCANPOSA='Scan PA',
angle='Angle', OTFSCAN='OTFscan',
TSYS='TSYS', UTD='UTD'),
file=out)
data_rows = []
for ii,row in enumerate(self.headers):
if (row['SCAN'] == scan
and row['SOURC'] == sourc
#and row['XTEL'] == tel
):
minoff1 = min(minoff1, row['OFF1'])
maxoff1 = max(maxoff1, row['OFF1'])
minoff2 = min(minoff2, row['OFF2'])
maxoff2 = max(maxoff2, row['OFF2'])
ttlangle += np.arctan2(row['OFF2'] - prevrow['OFF2'],
row['OFF1'] - prevrow['OFF1'])%np.pi
nangle += 1
prevrow = row
else:
if scan == -1:
scan = row['SCAN']
sourc = row['SOURC']
#tel = row['XTEL']
prevrow = row
continue
ok = True
if source is not None:
if isinstance(source, (list,tuple)):
ok = ok and any(re.search((s), prevrow['SOURC'])
for s in source)
else:
ok = ok and re.search((source), prevrow['SOURC'])
if telescope is not None:
ok = ok and re.search((telescope), prevrow['XTEL'])
if ok:
data = dict(RAmin=minoff1*180/np.pi*3600,
RAmax=maxoff1*180/np.pi*3600,
DECmin=minoff2*180/np.pi*3600,
DECmax=maxoff2*180/np.pi*3600,
angle=(ttlangle/nangle)*180/np.pi if nangle>0 else 0,
e0=minid,
e1=ii-1,
#TSYS=row['TSYS'] if 'TSYS' in row else '--',
UTD=row['DOBS']+row['UT'] if 'UT' in row else -99,
**prevrow)
print("{e0:7d}-{e1:7d} {SOURC:12s} {XTEL:12s} {SCAN:8d} {SUBSCAN:8d} "
"[ {RAmin:12f}, {RAmax:12f} ] "
"[ {DECmin:12f}, {DECmax:12f} ] "
"{angle:12.1f} {SCANPOSA:12.1f} {OTFSCAN:8d}"
" {TSYS:>8.1f} {UTD:12f}".
format(**data),
file=out)
data_rows.append(data)
minoff1,maxoff1 = np.inf,-np.inf
minoff2,maxoff2 = np.inf,-np.inf
ttlangle,nangle = 0.0,0
scan = row['SCAN']
sourc = row['SOURC']
#tel = row['XTEL']
minid = ii
return data
@property
def tels(self):
if hasattr(self,'_tels'):
return self._tels
else:
self._tels = set([h['XTEL'] for h in self.allind])
return self._tels
@property
def sources(self):
if hasattr(self,'_source'):
return self._source
else:
self._source = set([h['SOURC'] for h in self.allind])
return self._source
@property
def scans(self):
if hasattr(self,'_scan'):
return self._scan
else:
self._scan = set([h['SCAN'] for h in self.allind])
return self._scan
@property
def sci_sources(self):
return set([s for s in self.sources
if s[:4] not in ('SKY-', 'TSYS', 'TCAL', 'TREC', 'HOT-',
'COLD')])
@property
def lines(self):
if hasattr(self,'_lines'):
return self._lines
else:
self._lines = set([h['LINE'] for h in self.allind])
return self._lines
def _load_all_spectra(self, indices=None):
if indices is None:
indices = range(self.file_description['xnext']-1)
if hasattr(self, '_loaded_indices'):
indices_set = set(indices)
indices_to_load = (indices_set.difference(self._loaded_indices))
self._loaded_indices = self._loaded_indices.union(indices_set)
if any(indices_to_load):
pb = ProgressBar(len(indices_to_load))
for ii,k in enumerate(xrange(indices_to_load)):
self._spectra[k]
pb.update(ii)
else:
self._loaded_indices = set(indices)
self._spectra.load_all()
@property
def spectra(self):
return [x[0] for x in self._spectra]
@property
def headers(self):
return [self._spectra[ii][1]
if ii in self._spectra else x
for ii,x in enumerate(self.allind)]
def select_spectra(self,
all=None,
line=None,
linere=None,
linereflags=re.IGNORECASE,
number=None,
scan=None,
offset=None,
source=None,
sourcere=None,
sourcereflags=re.IGNORECASE,
range=None,
quality=None,
telescope=None,
telescopere=None,
telescopereflags=re.IGNORECASE,
subscan=None,
entry=None,
posang=None,
#observed=None,
#reduced=None,
frequency=None,
section=None,
user=None,
include_old_versions=False,
):
"""
Parameters
----------
include_old_versions: bool
Include spectra with XVER numbers <0? These are CLASS spectra that
have been "overwritten" (re-reduced?)
"""
if entry is not None and len(entry)==2:
return irange(entry[0], entry[1])
if frequency is not None:
self._load_all_spectra()
sel = [(re.search(re.escape(ensure_bytes(line)), h['LINE'], re.IGNORECASE)
if line is not None else True) and
(re.search(ensure_bytes(linere), h['LINE'], linereflags)
if linere is not None else True) and
(h['SCAN'] == scan if scan is not None else True) and
((h['OFF1'] == offset or
h['OFF2'] == offset) if offset is not None else True) and
(re.search(re.escape(ensure_bytes(source)), h['CSOUR'], re.IGNORECASE)
if source is not None else True) and
(re.search(ensure_bytes(sourcere), h['CSOUR'], sourcereflags)
if sourcere is not None else True) and
(h['OFF1']>range[0] and h['OFF1'] < range[1] and
h['OFF2']>range[2] and h['OFF2'] < range[3]
if range is not None and len(range)==4 else True) and
(h['QUAL'] == quality if quality is not None else True) and
(re.search(re.escape(ensure_bytes(telescope)), h['CTELE'], re.IGNORECASE)
if telescope is not None else True) and
(re.search(ensure_bytes(telescopere), h['CTELE'], telescopereflags)
if telescopere is not None else True) and
(h['SUBSCAN']==subscan if subscan is not None else True) and
(h['NUM'] >= number[0] and h['NUM'] < number[1]
if number is not None else True) and
('RESTF' in h and # Need to check that it IS a spectrum: continuum data can't be accessed this way
h['RESTF'] > frequency[0] and
h['RESTF'] < frequency[1]
if frequency is not None and len(frequency)==2
else True) and
(h['COMPPOSA']%180 > posang[0] and
h['COMPPOSA']%180 < posang[1]
if posang is not None and len(posang)==2
else True) and
# 1A uses XVER, 2A uses VER. If neither are present, it's
# probably not a valid spectrum?
(h.get('XVER', h.get('VER', -999)) > 0
if not include_old_versions else True)
for h in self.headers
]
return [ii for ii,k in enumerate(sel) if k]
def get_spectra(self, progressbar=True, **kwargs):
selected_indices = self.select_spectra(**kwargs)
if not any(selected_indices):
raise ValueError("Selection yielded empty.")
self._spectra.load(selected_indices, progressbar=progressbar)
return [self._spectra[ii] for ii in selected_indices]
def get_pyspeckit_spectra(self, progressbar=True, **kwargs):
spdata = self.get_spectra(progressbar=progressbar, **kwargs)
spectra = [pyspeckit.Spectrum(data=data,
xarr=make_axis(header),
header=clean_header(header))
for data,header in spdata]
return spectra
def read_observations(self, observation_indices, progressbar=True):
self._spectra.load(observation_indices, progressbar=progressbar)
return [self._spectra[ii] for ii in observation_indices]
@print_timing
def read_class(filename, downsample_factor=None, sourcename=None,
telescope=None, line=None, posang=None, verbose=False,
flag_array=None):
"""
Read a binary class file.
Based on the
`GILDAS CLASS file type Specification
<http://iram.fr/IRAMFR/GILDAS/doc/html/class-html/node58.html>`_
Parameters
----------
filename: str
downsample_factor: None or int
Factor by which to downsample data by averaging. Useful for
overresolved data.
sourcename: str or list of str
Source names to match to the data (uses regex)
telescope: str or list of str
'XTEL' or 'TELE' parameters: the telescope & instrument
line: str or list of str
The line name
posang: tuple of 2 floats
The first float is the minimum value for the position angle. The second
float is the maximum value for the position angle.
verbose: bool
Log messages with severity INFO
flag_array: np.ndarray
An array with the same shape as the data used to flag out
(remove) data when downsampling. True = flag out
"""
classobj = ClassObject(filename)
if not isinstance(sourcename, (list,tuple)):
sourcename = [sourcename]
if not isinstance(telescope, (list,tuple)):
telescope = [telescope]
if not isinstance(line, (list,tuple)):
line = [line]
spectra,headers = [],[]
if verbose:
log.info("Reading...")
selection = [ii
for source in sourcename
for tel in telescope
for li in line
for ii in classobj.select_spectra(sourcere=source,
telescope=tel,
line=li,
posang=posang)]
sphdr = classobj.read_observations(selection)
if len(sphdr) == 0:
return None
spec,hdr = zip(*sphdr)
spectra += spec
headers += hdr
indexes = headers
weight = ~flag_array if flag_array is not None else None
if downsample_factor is not None:
if verbose:
log.info("Downsampling...")
spectra = [downsample_1d(spec, downsample_factor,
weight=weight)
for spec in ProgressBar(spectra)]
headers = [downsample_header(h, downsample_factor)
for h in ProgressBar(headers)]
for hdr in headers:
stringify_header(hdr)
return spectra,headers,indexes
def stringify_header(header):
from six import string_types, integer_types
import string
FITS_allowed_types = (string_types + integer_types +
(float, complex, bool, np.floating, np.integer,
np.complexfloating, np.bool_))
bad_chars = string.printable[96:]
badcharre = re.compile("[{0}]".format(bad_chars))
for key, value in header.items():
if isinstance(value, bytes):
header[key] = value.decode()
elif not isinstance(value, FITS_allowed_types):
header[key] = badcharre.sub("", str(header[key]))
def downsample_header(hdr, downsample_factor):
for k in ('NCHAN','NPOIN','DATALEN'):
if k in hdr:
hdr[k] = int((hdr[k] / downsample_factor))
# maybe wrong? h['RCHAN'] = (h['RCHAN']-1) / downsample_factor + 1
scalefactor = 1./downsample_factor
hdr['RCHAN'] = (hdr['RCHAN']-1)*scalefactor + 0.5 + scalefactor/2.
for kw in ['FRES','VRES']:
if kw in hdr:
hdr[kw] *= downsample_factor
return hdr
def make_axis(header,imagfreq=False):
"""
Create a :class:`pyspeckit.spectrum.units.SpectroscopicAxis` from the CLASS "header"
"""
from .. import units
rest_frequency = header.get('RESTF')
xunits = 'MHz'
nchan = header.get('NCHAN')
voff = header.get('VOFF')
foff = header.get('FOFF')
doppler = header.get('DOPPLER')
fres = header.get('FRES')
refchan = header.get('RCHAN')
imfreq = header.get('IMAGE')
if foff in (None, 0.0) and voff not in (None, 0.0):
# Radio convention
foff = -voff/2.997924580e5 * rest_frequency
if not imagfreq:
xarr = rest_frequency + foff + (numpy.arange(1, nchan+1) - refchan) * fres
XAxis = units.SpectroscopicAxis(xarr,unit='MHz',refX=rest_frequency*u.MHz)
else:
xarr = imfreq - (numpy.arange(1, nchan+1) - refchan) * fres
XAxis = units.SpectroscopicAxis(xarr,unit='MHz',refX=imfreq*u.MHz)
return XAxis
@print_timing
def class_to_obsblocks(filename, telescope, line, datatuple=None, source=None,
imagfreq=False, DEBUG=False, **kwargs):
"""
Load an entire CLASS observing session into a list of ObsBlocks based on
matches to the 'telescope', 'line' and 'source' names
Parameters
----------
filename : string
The Gildas CLASS data file to read the spectra from.
telescope : list
List of telescope names to be matched.
line : list
List of line names to be matched.
source : list (optional)
List of source names to be matched. Defaults to None.
imagfreq : bool
Create a SpectroscopicAxis with the image frequency.
"""
if datatuple is None:
spectra,header,indexes = read_class(filename, **kwargs)
else:
spectra,header,indexes = datatuple
obslist = []
lastscannum = -1
spectrumlist = None
for sp,hdr,ind in zip(spectra,header,indexes):
hdr.update(ind)
# this is slow but necessary...
H = pyfits.Header()
for k,v in iteritems(hdr):
if hasattr(v,"__len__") and not isinstance(v,str):
# make an array of header entries, but this
# supports only up to 10 of them...
if len(v) > 1:
if len(v) < 10:
for ii,vv in enumerate(v):
newkey = k[:7]+str(ii)
H[newkey] = vv
elif len(v) < 100:
for ii,vv in enumerate(v):
newkey = k[:6]+str(ii)
H[newkey] = vv
else:
raise ValueError("Too many entries for {0}".format(k))
else:
H[k] = v[0]
#elif not any(x in str(v).lower() for x in ('comment', 'end', 'history')):
# # do not try to add comments...
# This commented out block used to attempt to reject comments
# using a private regex in the old pyfits which no longer exists.
# I don't know if it was necessary.
else:
H[k] = v
scannum = hdr['SCAN']
if 'XTEL' in hdr and hdr['XTEL'].strip() not in telescope:
continue
if hdr['LINE'].strip() not in line:
continue
if (source is not None) and (hdr['SOURC'].strip() not in source):
continue
hdr['RESTFREQ'] = hdr.get('RESTF')
H['RESTFREQ'] = hdr.get('RESTF')
#print "Did not skip %s,%s. Scannum, last: %i,%i" % (hdr['XTEL'],hdr['LINE'],scannum,lastscannum)
if scannum != lastscannum:
lastscannum = scannum
if spectrumlist is not None:
obslist.append(pyspeckit.ObsBlock(spectrumlist))
xarr = make_axis(hdr,imagfreq=imagfreq)
spectrumlist = [(
pyspeckit.Spectrum(xarr=xarr,
header=H,
data=sp))]
else:
spectrumlist.append(
pyspeckit.Spectrum(xarr=xarr,
header=H,
data=sp))
return obslist
class LazyItem(object):
"""
Simple lazy spectrum-retriever wrapper
"""
def __init__(self, parent):
self.parent = parent
self.sphdr = {}
self.nind = len(self.parent.allind)
self.nloaded = 0
def __repr__(self):
return ("Set of {0} spectra & headers, {1} loaded"
" ({2:0.2f}%)".format(self.nind, self.nloaded,
(float(self.nloaded)/self.nind)*100))
def load_all(self, progressbar=True):
self.load(range(self.nind))
def load(self, indices, progressbar=True):
pb = ProgressBar(len(indices))
counter = 0
for k in indices:
self[k]
counter += 1
pb.update(counter)
def __getitem__(self, key):
if key in self.sphdr:
return self.sphdr[key]
elif isinstance(key, slice):
return [self[k] for k in xrange(key.start or 0,
key.end or len(self.parent.allind),
key.step or 1)]
else:
sphd = read_observation(self.parent._file, key,
file_description=self.parent.file_description,
indices=self.parent.allind,
my_memmap=self.parent._data)
# Update the header with OTFSCAN and POSANG info
sphd[1].update(self.parent.allind[key])
self.sphdr[key] = sphd
self.nloaded += 1
return sphd
def __iter__(self):
return self.next()
def __next__(self):
for k in self.spheader:
yield self.spheader[k]
def __contains__(self, key):
return key in self.sphdr
@print_timing
def class_to_spectra(filename, datatuple=None, **kwargs):
"""
Load each individual spectrum within a CLASS file into a list of Spectrum
objects
"""
if datatuple is None:
spectra,header,indexes = read_class(filename, **kwargs)
else:
spectra,header,indexes = datatuple
spectrumlist = []
for sp,hdr,ind in zip(spectra,header,indexes):
hdr.update(ind)
xarr = make_axis(hdr)
spectrumlist.append(
pyspeckit.Spectrum(xarr=xarr,
header=hdr,
data=sp))
return pyspeckit.Spectra(spectrumlist)
def tests():
"""
Tests are specific to the machine on which this code was developed.
"""
fn1 = '/Users/adam/work/bolocam/hht/class_003.smt'
#fn1 = '/Users/adam/work/bolocam/hht/class_001.smt'
#fn1 = '/Users/adam/work/bolocam/hht/test_SMT-F1M-VU-20824-073.cls'
#fn2 = '/Users/adam/work/bolocam/hht/test_SMT-F1M-VU-79472+203.cls'
#F1 = read_class(fn1)#,DEBUG=True)
#F2 = read_class(fn2)
n2hp = class_to_obsblocks(fn1,telescope=['SMT-F1M-HU','SMT-F1M-VU'],line=['N2HP(3-2)','N2H+(3-2)'])
hcop = class_to_obsblocks(fn1,telescope=['SMT-F1M-HL','SMT-F1M-VL'],line=['HCOP(3-2)','HCO+(3-2)'])
|
pyspeckitREPO_NAMEpyspeckitPATH_START.@pyspeckit_extracted@pyspeckit-master@pyspeckit@spectrum@readers@read_class.py@.PATH_END.py
|
{
"filename": "model_resnet.py",
"repo_name": "mommermi/cloudynight",
"repo_path": "cloudynight_extracted/cloudynight-master/scripts/model_resnet.py",
"type": "Python"
}
|
""" Licensed under a 3-clause BSD style license - see LICENSE.rst
This stand-alone script contains the ResNet-18 adaptation for cloudynight.
The script only works with raw image data and labels for training and does
thus not depend on the cloudynight module. While this script shows the
training and prediction processes with this model, the image example data
provided with this repository are not sufficient for training a meaningful
model.
Note that this implementation requires a cuda-compatible GPU.
This implementation is based on the pytorch ResNet example provided by
https://zablo.net/blog/post/using-resnet-for-mnist-in-pytorch-tutorial/index.html
(c) 2020, Michael Mommert (mommermiscience@gmail.com)
"""
import time
import inspect
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy.visualization import (ZScaleInterval, LinearStretch,
ContrastBiasStretch)
from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score
import torch
from torch import nn, optim
from torch.utils.data import Dataset, DataLoader
from torchvision.transforms import Compose
from torchvision.models.resnet import ResNet, BasicBlock, Bottleneck
from tqdm.autonotebook import tqdm
# set random seeds
torch.manual_seed(3)
np.random.seed(3)
# read in data
basedir = '../example_data/images/'
outdir = '../workbench/images/'
class CloudynightDataset(Dataset):
def __init__(self, imagedir=basedir,
maskfile=basedir+'mask.fits',
transform=None, maxlen=None):
"""Model Constructor.
:param imagedir: path to image data for training
:param maskfile: path and name of mask file
:param transform: transformation to be applied to data
:param maxlen: maximum number of examples to be used in training
"""
self.imagedir = imagedir
self.labels = pd.read_csv(basedir+'y_train.dat',
delim_whitespace=True,
index_col=0, header=None)
if maxlen is not None:
self.labels = self.labels[:maxlen]
self.mask = fits.open(maskfile)[0].data[:, 5:-5]
# force quadratic image size for data loader collate_fn
self.transform = transform
def __len__(self):
return len(self.labels)
def __getitem__(self, rawidx):
# translate Dataset idx to training data idx
idx = rawidx + self.labels.index.min()
if torch.is_tensor(rawidx):
rawidx = rawidx.tolist()
idx = idx.tolist()
# read image
hdu = fits.open(self.imagedir+'{:03d}.fits.bz2'.format(idx))
# force quadratic image for dataloader collate_fn
img = hdu[0].data[:960, 225:1185]*self.mask
clouds = self.labels.loc[idx].values
sample = {'image': img.copy().reshape(1, *img.shape),
'clouds': clouds}
if self.transform:
sample = self.transform(sample)
return sample
def display(self, idx):
"""Helper function to display image with index `idx`. Returns plot."""
sample = self[idx]
f, ax = plt.subplots(figsize=(10, 10))
# plot image in original scale
img = ax.imshow(
np.sqrt(sample['image'][0].numpy()), origin='lower', cmap='gray')
# plot clouds
shape = np.array(sample['image'][0].shape)
center_coo = shape/2
radius_borders = np.linspace(0, min(shape)/2, 6)
azimuth_borders = np.linspace(-np.pi, np.pi, 9)
n_subregions = 33
# build templates for radius and azimuth
y, x = np.indices(shape)
r_map = np.sqrt((x-center_coo[0])**2 +
(y-center_coo[1])**2).astype(np.int)
az_map = np.arctan2(y-center_coo[1],
x-center_coo[0])
# build subregion maps
subregions = np.zeros([n_subregions, *shape], dtype=np.bool)
# polygons around each source region in original image dimensions
for i in range(5):
for j in range(8):
if i == 0 and j == 0:
subregions[0][(r_map < radius_borders[i+1])] = True
elif i == 0 and j > 0:
break
else:
subregions[(i-1)*8+j+1][
((r_map > radius_borders[i]) &
(r_map < radius_borders[i+1]) &
(az_map > azimuth_borders[j]) &
(az_map < azimuth_borders[j+1]))] = True
# create subregion map
submap = np.zeros(sample['image'][0].shape)
for i in range(33):
if sample['clouds'].numpy()[i]:
submap += subregions[i]
# plot subregion map
overlay_img = ax.imshow(submap, cmap='Oranges',
origin='lower',
vmin=0,
alpha=0.2,
extent=[0, submap.shape[1],
0, submap.shape[0]])
overlay_img.axes.get_xaxis().set_visible(False)
overlay_img.axes.get_yaxis().set_visible(False)
plt.axis('off')
img.axes.get_xaxis().set_visible(False)
img.axes.get_yaxis().set_visible(False)
return img
# Helper classes for image transformations
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
# add extra dimension for color (although greyscale)
return {'image': torch.from_numpy(sample['image']).float(),
'clouds': torch.from_numpy(sample['clouds']).long()}
class Normalize(object):
"""Normalize image to fixed scale."""
def __call__(self, sample):
scale = ZScaleInterval()
vmin, vmax = scale.get_limits(sample['image'][0][200:800, 200:800])
newimage = (np.clip(sample['image'][0], vmin, vmax)-vmin)/(vmax-vmin)
# deactivate stretching: linear stretch
stretch = ContrastBiasStretch(
contrast=0.5, bias=0.2) # SquaredStretch()
newimage = stretch(newimage)
newimage -= newimage[0, 0]
newimage = LinearStretch()(newimage)*512
return {'image': newimage.reshape(1, *newimage.shape),
'clouds': sample['clouds']}
# define image transformation and create dataset
data_transform = Compose([Normalize(), ToTensor()])
alldata = CloudynightDataset(transform=data_transform, maxlen=None)
# split training data set
train_size = int(0.7 * len(alldata))
test_size = len(alldata) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(
alldata, [train_size, test_size])
# define model
class CResNet(ResNet):
def __init__(self):
# ResNet-18 implementation
super(CResNet, self).__init__(BasicBlock, [2, 2, 2, 2], num_classes=33)
# single channel input instead of rgb
self.conv1 = torch.nn.Conv2d(1, 64,
kernel_size=(16, 16),
stride=(2, 2),
padding=(3, 3), bias=False)
# initialize model
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
model = CResNet().to(device)
# parameters
epochs = 10
loss_function = nn.BCEWithLogitsLoss(reduction='mean')
optimizer = optim.SGD(model.parameters(), lr=0.025, momentum=0.9)
scheduler = optim.lr_scheduler.StepLR(optimizer, 5, gamma=0.3)
# define batch sizes
batches = len(train_dataset)
val_batches = len(test_dataset)
train_batch_size = 1
val_batch_size = 1
# create data loaders
train_loader = DataLoader(
train_dataset, batch_size=train_batch_size, shuffle=True, num_workers=4)
val_loader = DataLoader(
test_dataset, batch_size=val_batch_size, shuffle=True, num_workers=4)
def calculate_metric(metric_fn, true_y, pred_y):
# multi class problems need to have averaging method
if "average" in inspect.getfullargspec(metric_fn).args:
return metric_fn(true_y, pred_y, average='weighted', labels=np.unique(pred_y))
else:
return metric_fn(true_y, pred_y)
def print_scores(p, r, f1, a, batch_size):
# just an utility printing function
for name, scores in zip(("precision", "recall", "F1", "accuracy"), (p, r, f1, a)):
print(f"\t{name.rjust(14, ' ')}: {sum(scores)/batch_size:.4f}")
trainingloss_epoch = []
validationloss_epoch = []
accuracy_epoch = []
start_ts = time.time()
for epoch in range(epochs):
total_loss = 0
progress = tqdm(enumerate(train_loader), desc="Loss: ", total=batches)
# training
model.train()
for i, data in progress:
X, y = data['image'].to(device), data['clouds'].to(device).float()
# single batch
model.zero_grad()
outputs = model(X)
loss = loss_function(outputs, y)
loss.backward()
optimizer.step()
current_loss = loss.item()
total_loss += current_loss
progress.set_description("Loss: {:.4f}".format(total_loss/(i+1)))
torch.cuda.empty_cache()
trainingloss_epoch.append(total_loss)
# validation
val_losses = 0
precision, recall, f1, accuracy = [], [], [], []
# set model to evaluating (testing)
model.eval()
with torch.no_grad():
for i, data in enumerate(val_loader):
X, y = data['image'].to(device), data['clouds'].to(device).float()
outputs = model(X)
predicted_labels = torch.zeros(outputs.shape)
predicted_labels[outputs > 0.5] = 1
current_loss = loss_function(outputs, y)
val_losses += current_loss
# calculate P/R/F1/A metrics for batch
for acc, metric in zip((precision, recall, f1, accuracy),
(precision_score, recall_score, f1_score, accuracy_score)):
# reshape vectors to count each subregion individually
acc.append(
calculate_metric(metric, y.reshape(-1, 1).cpu(),
predicted_labels.reshape(-1, 1).cpu())
)
validationloss_epoch.append(val_losses)
print(f"Epoch {epoch+1}/{epochs}, training loss: {total_loss/batches}, validation loss: {val_losses/val_batches}")
print_scores(precision, recall, f1, accuracy, val_batches)
accuracy_epoch.append(np.sum(accuracy)/val_batches)
scheduler.step()
print(f"Training time: {time.time()-start_ts}s")
print('Test score after {} epochs: {}.'.format(
epochs, accuracy_epoch[-1]))
# normalize loss
trainingloss_epoch_normalized = np.array(
[t/batches for t in trainingloss_epoch])
validationloss_epoch_normalized = np.array(
[v.cpu().numpy()/val_batches for v in validationloss_epoch])
# plot loss
plt.plot(range(len(trainingloss_epoch)), np.log(
trainingloss_epoch_normalized), color='red', label='training', alpha=0.5)
plt.plot(range(len(validationloss_epoch)), np.log(np.array(
validationloss_epoch_normalized)), color='blue', label='validation', alpha=0.5)
plt.legend()
plt.title('Loss')
plt.xlabel('Epochs')
plt.savefig(outdir+'resnet_loss.png')
plt.close()
# plot accuracy
plt.plot(range(len(accuracy_epoch)), accuracy_epoch,
color='green', label='val accuracy')
plt.title('Validation Sample Accuracy')
plt.xlabel('Epochs')
plt.savefig(outdir+'resnet_accuracy.png')
|
mommermiREPO_NAMEcloudynightPATH_START.@cloudynight_extracted@cloudynight-master@scripts@model_resnet.py@.PATH_END.py
|
{
"filename": "concatenate.py",
"repo_name": "keras-team/keras",
"repo_path": "keras_extracted/keras-master/keras/src/layers/merging/concatenate.py",
"type": "Python"
}
|
import copy
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.merging.base_merge import Merge
@keras_export("keras.layers.Concatenate")
class Concatenate(Merge):
"""Concatenates a list of inputs.
It takes as input a list of tensors, all of the same shape except
for the concatenation axis, and returns a single tensor that is the
concatenation of all inputs.
Examples:
>>> x = np.arange(20).reshape(2, 2, 5)
>>> y = np.arange(20, 30).reshape(2, 1, 5)
>>> keras.layers.Concatenate(axis=1)([x, y])
Usage in a Keras model:
>>> x1 = keras.layers.Dense(8)(np.arange(10).reshape(5, 2))
>>> x2 = keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2))
>>> y = keras.layers.Concatenate()([x1, x2])
Args:
axis: Axis along which to concatenate.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the concatenation of the inputs alongside axis `axis`.
"""
def __init__(self, axis=-1, **kwargs):
super().__init__(**kwargs)
self.axis = axis
self.supports_masking = True
self._reshape_required = False
def build(self, input_shape):
# Used purely for shape validation.
if len(input_shape) < 1 or not isinstance(
input_shape[0], (tuple, list)
):
raise ValueError(
"A `Concatenate` layer should be called on a list of "
f"at least 1 input. Received: input_shape={input_shape}"
)
if all(shape is None for shape in input_shape):
return
reduced_inputs_shapes = [list(shape) for shape in input_shape]
reduced_inputs_shapes_copy = copy.copy(reduced_inputs_shapes)
shape_set = set()
for i in range(len(reduced_inputs_shapes_copy)):
# Convert self.axis to positive axis for each input
# in case self.axis is a negative number
concat_axis = self.axis % len(reduced_inputs_shapes_copy[i])
# Skip batch axis.
for axis, axis_value in enumerate(
reduced_inputs_shapes_copy, start=1
):
# Remove squeezable axes (axes with value of 1)
# if not in the axis that will be used for concatenation
# otherwise leave it.
# This approach allows building the layer,
# but if tensor shapes are not the same when
# calling, an exception will be raised.
if axis != concat_axis and axis_value == 1:
del reduced_inputs_shapes[i][axis]
if len(reduced_inputs_shapes[i]) > self.axis:
del reduced_inputs_shapes[i][self.axis]
shape_set.add(tuple(reduced_inputs_shapes[i]))
if len(shape_set) != 1:
err_msg = (
"A `Concatenate` layer requires inputs with matching shapes "
"except for the concatenation axis. "
f"Received: input_shape={input_shape}"
)
# Make sure all the shapes have same ranks.
ranks = set(len(shape) for shape in shape_set)
if len(ranks) != 1:
raise ValueError(err_msg)
# Get the only rank for the set.
(rank,) = ranks
for axis in range(rank):
# Skip the Nones in the shape since they are dynamic, also the
# axis for concat has been removed above.
unique_dims = set(
shape[axis]
for shape in shape_set
if shape[axis] is not None
)
if len(unique_dims) > 1:
raise ValueError(err_msg)
self.built = True
def _merge_function(self, inputs):
return ops.concatenate(inputs, axis=self.axis)
def compute_output_shape(self, input_shape):
if (not isinstance(input_shape, (tuple, list))) or (
not isinstance(input_shape[0], (tuple, list))
):
raise ValueError(
"A `Concatenate` layer should be called on a list of inputs. "
f"Received: input_shape={input_shape}"
)
input_shapes = input_shape
output_shape = list(input_shapes[0])
for shape in input_shapes[1:]:
if output_shape[self.axis] is None or shape[self.axis] is None:
output_shape[self.axis] = None
break
output_shape[self.axis] += shape[self.axis]
return tuple(output_shape)
def compute_mask(self, inputs, mask=None):
if mask is None:
return None
if not isinstance(mask, (tuple, list)):
raise ValueError(f"`mask` should be a list. Received mask={mask}")
if not isinstance(inputs, (tuple, list)):
raise ValueError(
f"`inputs` should be a list. Received: inputs={inputs}"
)
if len(mask) != len(inputs):
raise ValueError(
"The lists `inputs` and `mask` should have the same length. "
f"Received: inputs={inputs} of length {len(inputs)}, and "
f"mask={mask} of length {len(mask)}"
)
if all(m is None for m in mask):
return None
# Make a list of masks while making sure
# the dimensionality of each mask
# is the same as the corresponding input.
masks = []
for input_i, mask_i in zip(inputs, mask):
if mask_i is None:
# Input is unmasked. Append all 1s to masks,
masks.append(ops.ones_like(input_i, dtype="bool"))
elif mask_i.ndim < input_i.ndim:
# Mask is smaller than the input, expand it
masks.append(
ops.broadcast_to(
ops.expand_dims(mask_i, axis=-1), ops.shape(input_i)
)
)
else:
masks.append(mask_i)
concatenated = ops.concatenate(masks, axis=self.axis)
return ops.any(concatenated, axis=-1, keepdims=False)
def get_config(self):
config = {"axis": self.axis}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export("keras.layers.concatenate")
def concatenate(inputs, axis=-1, **kwargs):
"""Functional interface to the `Concatenate` layer.
Args:
inputs: A list of input tensors.
axis: Concatenation axis.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the concatenation of the inputs alongside axis `axis`.
"""
return Concatenate(axis=axis, **kwargs)(inputs)
|
keras-teamREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@keras@src@layers@merging@concatenate.py@.PATH_END.py
|
{
"filename": "her_replay_buffer.py",
"repo_name": "DLR-RM/stable-baselines3",
"repo_path": "stable-baselines3_extracted/stable-baselines3-master/stable_baselines3/her/her_replay_buffer.py",
"type": "Python"
}
|
import copy
import warnings
from typing import Any, Optional, Union
import numpy as np
import torch as th
from gymnasium import spaces
from stable_baselines3.common.buffers import DictReplayBuffer
from stable_baselines3.common.type_aliases import DictReplayBufferSamples
from stable_baselines3.common.vec_env import VecEnv, VecNormalize
from stable_baselines3.her.goal_selection_strategy import KEY_TO_GOAL_STRATEGY, GoalSelectionStrategy
class HerReplayBuffer(DictReplayBuffer):
"""
Hindsight Experience Replay (HER) buffer.
Paper: https://arxiv.org/abs/1707.01495
Replay buffer for sampling HER (Hindsight Experience Replay) transitions.
.. note::
Compared to other implementations, the ``future`` goal sampling strategy is inclusive:
the current transition can be used when re-sampling.
:param buffer_size: Max number of element in the buffer
:param observation_space: Observation space
:param action_space: Action space
:param env: The training environment
:param device: PyTorch device
:param n_envs: Number of parallel environments
:param optimize_memory_usage: Enable a memory efficient variant
Disabled for now (see https://github.com/DLR-RM/stable-baselines3/pull/243#discussion_r531535702)
:param handle_timeout_termination: Handle timeout termination (due to timelimit)
separately and treat the task as infinite horizon task.
https://github.com/DLR-RM/stable-baselines3/issues/284
:param n_sampled_goal: Number of virtual transitions to create per real transition,
by sampling new goals.
:param goal_selection_strategy: Strategy for sampling goals for replay.
One of ['episode', 'final', 'future']
:param copy_info_dict: Whether to copy the info dictionary and pass it to
``compute_reward()`` method.
Please note that the copy may cause a slowdown.
False by default.
"""
env: Optional[VecEnv]
def __init__(
self,
buffer_size: int,
observation_space: spaces.Dict,
action_space: spaces.Space,
env: VecEnv,
device: Union[th.device, str] = "auto",
n_envs: int = 1,
optimize_memory_usage: bool = False,
handle_timeout_termination: bool = True,
n_sampled_goal: int = 4,
goal_selection_strategy: Union[GoalSelectionStrategy, str] = "future",
copy_info_dict: bool = False,
):
super().__init__(
buffer_size,
observation_space,
action_space,
device=device,
n_envs=n_envs,
optimize_memory_usage=optimize_memory_usage,
handle_timeout_termination=handle_timeout_termination,
)
self.env = env
self.copy_info_dict = copy_info_dict
# convert goal_selection_strategy into GoalSelectionStrategy if string
if isinstance(goal_selection_strategy, str):
self.goal_selection_strategy = KEY_TO_GOAL_STRATEGY[goal_selection_strategy.lower()]
else:
self.goal_selection_strategy = goal_selection_strategy
# check if goal_selection_strategy is valid
assert isinstance(
self.goal_selection_strategy, GoalSelectionStrategy
), f"Invalid goal selection strategy, please use one of {list(GoalSelectionStrategy)}"
self.n_sampled_goal = n_sampled_goal
# Compute ratio between HER replays and regular replays in percent
self.her_ratio = 1 - (1.0 / (self.n_sampled_goal + 1))
# In some environments, the info dict is used to compute the reward. Then, we need to store it.
self.infos = np.array([[{} for _ in range(self.n_envs)] for _ in range(self.buffer_size)])
# To create virtual transitions, we need to know for each transition
# when an episode starts and ends.
# We use the following arrays to store the indices,
# and update them when an episode ends.
self.ep_start = np.zeros((self.buffer_size, self.n_envs), dtype=np.int64)
self.ep_length = np.zeros((self.buffer_size, self.n_envs), dtype=np.int64)
self._current_ep_start = np.zeros(self.n_envs, dtype=np.int64)
def __getstate__(self) -> dict[str, Any]:
"""
Gets state for pickling.
Excludes self.env, as in general Env's may not be pickleable.
"""
state = self.__dict__.copy()
# these attributes are not pickleable
del state["env"]
return state
def __setstate__(self, state: dict[str, Any]) -> None:
"""
Restores pickled state.
User must call ``set_env()`` after unpickling before using.
:param state:
"""
self.__dict__.update(state)
assert "env" not in state
self.env = None
def set_env(self, env: VecEnv) -> None:
"""
Sets the environment.
:param env:
"""
if self.env is not None:
raise ValueError("Trying to set env of already initialized environment.")
self.env = env
def add( # type: ignore[override]
self,
obs: dict[str, np.ndarray],
next_obs: dict[str, np.ndarray],
action: np.ndarray,
reward: np.ndarray,
done: np.ndarray,
infos: list[dict[str, Any]],
) -> None:
# When the buffer is full, we rewrite on old episodes. When we start to
# rewrite on an old episodes, we want the whole old episode to be deleted
# (and not only the transition on which we rewrite). To do this, we set
# the length of the old episode to 0, so it can't be sampled anymore.
for env_idx in range(self.n_envs):
episode_start = self.ep_start[self.pos, env_idx]
episode_length = self.ep_length[self.pos, env_idx]
if episode_length > 0:
episode_end = episode_start + episode_length
episode_indices = np.arange(self.pos, episode_end) % self.buffer_size
self.ep_length[episode_indices, env_idx] = 0
# Update episode start
self.ep_start[self.pos] = self._current_ep_start.copy()
if self.copy_info_dict:
self.infos[self.pos] = infos
# Store the transition
super().add(obs, next_obs, action, reward, done, infos)
# When episode ends, compute and store the episode length
for env_idx in range(self.n_envs):
if done[env_idx]:
self._compute_episode_length(env_idx)
def _compute_episode_length(self, env_idx: int) -> None:
"""
Compute and store the episode length for environment with index env_idx
:param env_idx: index of the environment for which the episode length should be computed
"""
episode_start = self._current_ep_start[env_idx]
episode_end = self.pos
if episode_end < episode_start:
# Occurs when the buffer becomes full, the storage resumes at the
# beginning of the buffer. This can happen in the middle of an episode.
episode_end += self.buffer_size
episode_indices = np.arange(episode_start, episode_end) % self.buffer_size
self.ep_length[episode_indices, env_idx] = episode_end - episode_start
# Update the current episode start
self._current_ep_start[env_idx] = self.pos
def sample(self, batch_size: int, env: Optional[VecNormalize] = None) -> DictReplayBufferSamples: # type: ignore[override]
"""
Sample elements from the replay buffer.
:param batch_size: Number of element to sample
:param env: Associated VecEnv to normalize the observations/rewards when sampling
:return: Samples
"""
# When the buffer is full, we rewrite on old episodes. We don't want to
# sample incomplete episode transitions, so we have to eliminate some indexes.
is_valid = self.ep_length > 0
if not np.any(is_valid):
raise RuntimeError(
"Unable to sample before the end of the first episode. We recommend choosing a value "
"for learning_starts that is greater than the maximum number of timesteps in the environment."
)
# Get the indices of valid transitions
# Example:
# if is_valid = [[True, False, False], [True, False, True]],
# is_valid has shape (buffer_size=2, n_envs=3)
# then valid_indices = [0, 3, 5]
# they correspond to is_valid[0, 0], is_valid[1, 0] and is_valid[1, 2]
# or in numpy format ([rows], [columns]): (array([0, 1, 1]), array([0, 0, 2]))
# Those indices are obtained back using np.unravel_index(valid_indices, is_valid.shape)
valid_indices = np.flatnonzero(is_valid)
# Sample valid transitions that will constitute the minibatch of size batch_size
sampled_indices = np.random.choice(valid_indices, size=batch_size, replace=True)
# Unravel the indexes, i.e. recover the batch and env indices.
# Example: if sampled_indices = [0, 3, 5], then batch_indices = [0, 1, 1] and env_indices = [0, 0, 2]
batch_indices, env_indices = np.unravel_index(sampled_indices, is_valid.shape)
# Split the indexes between real and virtual transitions.
nb_virtual = int(self.her_ratio * batch_size)
virtual_batch_indices, real_batch_indices = np.split(batch_indices, [nb_virtual])
virtual_env_indices, real_env_indices = np.split(env_indices, [nb_virtual])
# Get real and virtual data
real_data = self._get_real_samples(real_batch_indices, real_env_indices, env)
# Create virtual transitions by sampling new desired goals and computing new rewards
virtual_data = self._get_virtual_samples(virtual_batch_indices, virtual_env_indices, env)
# Concatenate real and virtual data
observations = {
key: th.cat((real_data.observations[key], virtual_data.observations[key]))
for key in virtual_data.observations.keys()
}
actions = th.cat((real_data.actions, virtual_data.actions))
next_observations = {
key: th.cat((real_data.next_observations[key], virtual_data.next_observations[key]))
for key in virtual_data.next_observations.keys()
}
dones = th.cat((real_data.dones, virtual_data.dones))
rewards = th.cat((real_data.rewards, virtual_data.rewards))
return DictReplayBufferSamples(
observations=observations,
actions=actions,
next_observations=next_observations,
dones=dones,
rewards=rewards,
)
def _get_real_samples(
self,
batch_indices: np.ndarray,
env_indices: np.ndarray,
env: Optional[VecNormalize] = None,
) -> DictReplayBufferSamples:
"""
Get the samples corresponding to the batch and environment indices.
:param batch_indices: Indices of the transitions
:param env_indices: Indices of the environments
:param env: associated gym VecEnv to normalize the
observations/rewards when sampling, defaults to None
:return: Samples
"""
# Normalize if needed and remove extra dimension (we are using only one env for now)
obs_ = self._normalize_obs({key: obs[batch_indices, env_indices, :] for key, obs in self.observations.items()}, env)
next_obs_ = self._normalize_obs(
{key: obs[batch_indices, env_indices, :] for key, obs in self.next_observations.items()}, env
)
assert isinstance(obs_, dict)
assert isinstance(next_obs_, dict)
# Convert to torch tensor
observations = {key: self.to_torch(obs) for key, obs in obs_.items()}
next_observations = {key: self.to_torch(obs) for key, obs in next_obs_.items()}
return DictReplayBufferSamples(
observations=observations,
actions=self.to_torch(self.actions[batch_indices, env_indices]),
next_observations=next_observations,
# Only use dones that are not due to timeouts
# deactivated by default (timeouts is initialized as an array of False)
dones=self.to_torch(
self.dones[batch_indices, env_indices] * (1 - self.timeouts[batch_indices, env_indices])
).reshape(-1, 1),
rewards=self.to_torch(self._normalize_reward(self.rewards[batch_indices, env_indices].reshape(-1, 1), env)),
)
def _get_virtual_samples(
self,
batch_indices: np.ndarray,
env_indices: np.ndarray,
env: Optional[VecNormalize] = None,
) -> DictReplayBufferSamples:
"""
Get the samples, sample new desired goals and compute new rewards.
:param batch_indices: Indices of the transitions
:param env_indices: Indices of the environments
:param env: associated gym VecEnv to normalize the
observations/rewards when sampling, defaults to None
:return: Samples, with new desired goals and new rewards
"""
# Get infos and obs
obs = {key: obs[batch_indices, env_indices, :] for key, obs in self.observations.items()}
next_obs = {key: obs[batch_indices, env_indices, :] for key, obs in self.next_observations.items()}
if self.copy_info_dict:
# The copy may cause a slow down
infos = copy.deepcopy(self.infos[batch_indices, env_indices])
else:
infos = [{} for _ in range(len(batch_indices))]
# Sample and set new goals
new_goals = self._sample_goals(batch_indices, env_indices)
obs["desired_goal"] = new_goals
# The desired goal for the next observation must be the same as the previous one
next_obs["desired_goal"] = new_goals
assert (
self.env is not None
), "You must initialize HerReplayBuffer with a VecEnv so it can compute rewards for virtual transitions"
# Compute new reward
rewards = self.env.env_method(
"compute_reward",
# the new state depends on the previous state and action
# s_{t+1} = f(s_t, a_t)
# so the next achieved_goal depends also on the previous state and action
# because we are in a GoalEnv:
# r_t = reward(s_t, a_t) = reward(next_achieved_goal, desired_goal)
# therefore we have to use next_obs["achieved_goal"] and not obs["achieved_goal"]
next_obs["achieved_goal"],
# here we use the new desired goal
obs["desired_goal"],
infos,
# we use the method of the first environment assuming that all environments are identical.
indices=[0],
)
rewards = rewards[0].astype(np.float32) # env_method returns a list containing one element
obs = self._normalize_obs(obs, env) # type: ignore[assignment]
next_obs = self._normalize_obs(next_obs, env) # type: ignore[assignment]
# Convert to torch tensor
observations = {key: self.to_torch(obs) for key, obs in obs.items()}
next_observations = {key: self.to_torch(obs) for key, obs in next_obs.items()}
return DictReplayBufferSamples(
observations=observations,
actions=self.to_torch(self.actions[batch_indices, env_indices]),
next_observations=next_observations,
# Only use dones that are not due to timeouts
# deactivated by default (timeouts is initialized as an array of False)
dones=self.to_torch(
self.dones[batch_indices, env_indices] * (1 - self.timeouts[batch_indices, env_indices])
).reshape(-1, 1),
rewards=self.to_torch(self._normalize_reward(rewards.reshape(-1, 1), env)), # type: ignore[attr-defined]
)
def _sample_goals(self, batch_indices: np.ndarray, env_indices: np.ndarray) -> np.ndarray:
"""
Sample goals based on goal_selection_strategy.
:param batch_indices: Indices of the transitions
:param env_indices: Indices of the environments
:return: Sampled goals
"""
batch_ep_start = self.ep_start[batch_indices, env_indices]
batch_ep_length = self.ep_length[batch_indices, env_indices]
if self.goal_selection_strategy == GoalSelectionStrategy.FINAL:
# Replay with final state of current episode
transition_indices_in_episode = batch_ep_length - 1
elif self.goal_selection_strategy == GoalSelectionStrategy.FUTURE:
# Replay with random state which comes from the same episode and was observed after current transition
# Note: our implementation is inclusive: current transition can be sampled
current_indices_in_episode = (batch_indices - batch_ep_start) % self.buffer_size
transition_indices_in_episode = np.random.randint(current_indices_in_episode, batch_ep_length)
elif self.goal_selection_strategy == GoalSelectionStrategy.EPISODE:
# Replay with random state which comes from the same episode as current transition
transition_indices_in_episode = np.random.randint(0, batch_ep_length)
else:
raise ValueError(f"Strategy {self.goal_selection_strategy} for sampling goals not supported!")
transition_indices = (transition_indices_in_episode + batch_ep_start) % self.buffer_size
return self.next_observations["achieved_goal"][transition_indices, env_indices]
def truncate_last_trajectory(self) -> None:
"""
If called, we assume that the last trajectory in the replay buffer was finished
(and truncate it).
If not called, we assume that we continue the same trajectory (same episode).
"""
# If we are at the start of an episode, no need to truncate
if (self._current_ep_start != self.pos).any():
warnings.warn(
"The last trajectory in the replay buffer will be truncated.\n"
"If you are in the same episode as when the replay buffer was saved,\n"
"you should use `truncate_last_trajectory=False` to avoid that issue."
)
# only consider episodes that are not finished
for env_idx in np.where(self._current_ep_start != self.pos)[0]:
# set done = True for last episodes
self.dones[self.pos - 1, env_idx] = True
# make sure that last episodes can be sampled and
# update next episode start (self._current_ep_start)
self._compute_episode_length(env_idx)
# handle infinite horizon tasks
if self.handle_timeout_termination:
self.timeouts[self.pos - 1, env_idx] = True # not an actual timeout, but it allows bootstrapping
|
DLR-RMREPO_NAMEstable-baselines3PATH_START.@stable-baselines3_extracted@stable-baselines3-master@stable_baselines3@her@her_replay_buffer.py@.PATH_END.py
|
{
"filename": "audio.py",
"repo_name": "ledatelescope/bifrost",
"repo_path": "bifrost_extracted/bifrost-master/python/bifrost/blocks/audio.py",
"type": "Python"
}
|
# Copyright (c) 2016-2023, The Bifrost Authors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Bifrost Authors nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from bifrost.pipeline import SourceBlock
import bifrost.portaudio as audio
from bifrost import telemetry
telemetry.track_module()
class AudioSourceBlock(SourceBlock):
def create_reader(self, kwargs):
self.reader = audio.open(mode='r', **kwargs)
return self.reader
def on_sequence(self, reader, kwargs):
if 'frames_per_buffer' not in kwargs:
kwargs['frames_per_buffer'] = self.gulp_nframe
ohdr = {
'_tensor': {
'dtype': 'i' + str(reader.nbits),
'shape': [-1, reader.channels],
'labels': ['time', 'pol'],
'scales': [1. / reader.rate, None],
'units': ['s', None]
},
'frame_rate': reader.rate,
'input_device': reader.input_device,
'name': str(id(reader))
}
return [ohdr]
def on_data(self, reader, ospans):
ospan = ospans[0]
try:
reader.readinto(ospan.data)
except audio.PortAudioError:
#raise StopIteration
return [0]
nframe = ospan.shape[0]
return [nframe]
def stop(self):
self.reader.stop()
def read_audio(audio_kwargs, gulp_nframe, *args, **kwargs):
"""Read data from an audio input device.
Requires the portaudio library to be installed::
$ sudo apt-get install portaudio19-dev
Args:
audio_kwargs (list): List of dicts containing audio input parameters
Defaults:
``rate=44100``
``channels=2``
``nbits=16``
``frames_per_buffer=1024``
``input_device=None``
gulp_nframe (int): No. frames to read at a time.
*args: Arguments to ``bifrost.pipeline.TransformBlock``.
**kwargs: Keyword Arguments to ``bifrost.pipeline.TransformBlock``.
**Tensor semantics**::
Output: ['time', 'pol'], dtype = i*, space = SYSTEM
Returns:
AudioBlock: A new block instance.
"""
# Note: audio_kwargs used in place of sourcenames
return AudioSourceBlock(audio_kwargs, gulp_nframe, *args, **kwargs)
|
ledatelescopeREPO_NAMEbifrostPATH_START.@bifrost_extracted@bifrost-master@python@bifrost@blocks@audio.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "magic-sph/magic",
"repo_path": "magic_extracted/magic-master/src/README.md",
"type": "Markdown"
}
|
# Makefile options
### 1. Select compiler
Set a suitable compiler in the first line of the Makefile: `COMPILER = <compiler_phrase>`. The options are `intel`, `gnu` or `portland` - depending on your available compilers.
**Table** : List of default compilers
| Compiler Option | Normal | With MPI |
|:---------------:|:-------------:|:----------------:|
| intel | ifort, icc | mpiifort, mpiicc |
| gnu | gfortran, gcc | mpif90, mpicc |
| portland | pgf95, pgcc | mpif90, mpicc |
### 2. Select compiling options
* `PRODRUN` Set it to `yes` for production run, `no` for debugging.
* `PRECISION` Set it to 'dble' for double-precision calculations or to 'sngl' for single-precision calculations
* `OUT_PREC` Set it to 'dble' for double-precision in binary outputs or to 'sngl' for single precision
* `DEBUG` Set to `yes` to run in debugging mode. *While running in debugging mode, set* `PRODRUN` *to* `no`. The debug mode with intel compilers uses `marmot90`.
* `USE_MPI` Set to `yes` to use MPI
* `USE_OMP` Set to `yes` to use openmp (cannot work without MPI)
* `USE_PRECOND` Set to `yes` to perform some pre-conditioning of the matrices
* `USE_FFTLIB` This option lets you select the library you want to use for Fast Fourier Transforms. This can be set to `JW`, `FFTW` or `MKL`. `JW` refers to the inbuilt library by **J**ohannes **W**icht, while `MKL` refers to the [Intel **M**ath **K**ernel **L**ibrary](https://software.intel.com/en-us/intel-mkl). Use `JW` if you don't have Intel MKL installed.
* `USE_DCTLIB` This option lets you select the library you want to use for Discrete Cosine Transforms. This can be set to `JW`, `FFTW` or `MKL`.
* `USE_LAPACKLIB` This option allows you to select the library you want to use for LU factorisation. This can be set to `JW`, `MKL`, `LIBFLAME` or `LAPACK`.
* `USE_SHTNS` Set to `yes` to use SHTns library for spherical harmonics transforms. The helper script `install-shtns.sh` is available in the `bin` directory to help installing SHTns.
### 3. MPI_INCPATH
Make sure you set the path for your mpi header file `mpif.h` in `MPI_INCPATH`. The path depends on the computer. For PCs, this is commonly `/usr/include` or `/usr/include/mpi`. Use [Open MPI](http://www.open-mpi.de/) for running MagIC on a PC. For computing clusters, please look through the documentation of the respective cluster for their MPI implementation.
### 4. Other compilers
If your available compilers are different from the options provided in the Makefile, then change them suitably using the options `COMP_FC` and `COMP_CC` for serial fortran and C compilers and `COMP_MPFC` and `COMP_MPCC` for compilers with mpi implementation.
|
magic-sphREPO_NAMEmagicPATH_START.@magic_extracted@magic-master@src@README.md@.PATH_END.py
|
{
"filename": "_yperiod0.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/contour/_yperiod0.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class Yperiod0Validator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="yperiod0", parent_name="contour", **kwargs):
super(Yperiod0Validator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@contour@_yperiod0.py@.PATH_END.py
|
{
"filename": "test_doppler_greedy.py",
"repo_name": "rodluger/starry",
"repo_path": "starry_extracted/starry-master/tests/greedy/test_doppler_greedy.py",
"type": "Python"
}
|
import starry
import numpy as np
from scipy.linalg import block_diag
from scipy.sparse import diags
import pytest
@pytest.fixture(scope="module", params=[1, 2])
def map(request):
nc = request.param
map = starry.DopplerMap(ydeg=10, udeg=2, nt=3, nc=nc, veq=50000)
map.load(maps=["spot", "earth"][:nc])
yield map
@pytest.fixture(scope="function")
def random():
yield np.random.default_rng(0)
def test_flux(map):
"""
Test that our various implementations of the flux
yield identical results.
"""
flux1 = map.flux(method="convdot")
flux2 = map.flux(method="dotconv")
flux3 = map.flux(method="conv")
flux4 = map.flux(method="design")
assert np.allclose(flux1, flux2)
assert np.allclose(flux1, flux3)
assert np.allclose(flux1, flux4)
@pytest.mark.parametrize("ranktwo", [False, True])
@pytest.mark.parametrize("transpose", [False, True])
@pytest.mark.parametrize("fix_spectrum", [False, True])
@pytest.mark.parametrize("fix_map", [False, True])
def test_dot(map, random, ranktwo, transpose, fix_spectrum, fix_map):
"""
Test that our fast dot product method yields the same result as
instantiating the full design matrix and dotting it in.
"""
# Skip invalid combo
if fix_spectrum and fix_map:
return
# Get the design matrix
D = map.design_matrix(fix_spectrum=fix_spectrum, fix_map=fix_map)
# Instantiate the thing we're dotting it into
if transpose:
D = D.transpose()
size = [map.nt * map.nw]
else:
if fix_spectrum:
size = [map.nc * map.Ny]
elif fix_map:
size = [map.nc * map.nw0_]
else:
size = [map.nw0_ * map.Ny]
if ranktwo:
size += [5]
matrix = random.normal(size=size)
# Slow product
product1 = D.dot(matrix)
# Fast product
product2 = map.dot(
matrix, transpose=transpose, fix_spectrum=fix_spectrum, fix_map=fix_map
)
assert np.allclose(np.squeeze(product1), np.squeeze(product2))
def test_D_fixed_spectrum(map, random):
"""
Test that our fast method for computing the design matrix
for fixed input spectrum yields the same result as instantiating
the full design matrix and dotting the spectral block matrix `S` in.
"""
# Compute it manually
DS = np.zeros((map.nt * map.nw, 0))
D = map.design_matrix().todense()
for k in range(map.nc):
S = block_diag(
*[map.spectrum_[k].reshape(-1, 1) for n in range(map.Ny)]
)
DS = np.hstack((DS, D @ S))
# Compute it with starry
DS_fast = map.design_matrix(fix_spectrum=True)
# Check that the matrices are the same
assert np.allclose(DS, DS_fast)
# Check that this procedure yields the correct flux
flux1 = (DS_fast @ map.y.T.reshape(-1)).reshape(map.nt, map.nw)
flux2 = (D @ map.spectral_map).reshape(map.nt, map.nw)
assert np.allclose(flux1, flux2)
def test_D_fixed_map(map, random):
"""
Test that our fast method for computing the design matrix
for fixed input map yields the same result as instantiating
the full design matrix and dotting the map block matrix `Y` in.
"""
# Compute it manually
DY = np.zeros((map.nt * map.nw, 0))
D = map.design_matrix().todense()
if map.nc == 1:
y = np.reshape(map.y, [-1, 1])
else:
y = map.y
for k in range(map.nc):
Y = diags(
[np.ones(map.nw0_) * y[n, k] for n in range(map.Ny)],
offsets=-np.arange(0, map.Ny) * map.nw0_,
shape=(map.Ny * map.nw0_, map.nw0_),
).todense()
DY = np.hstack((DY, D @ Y))
# Compute it with starry
DY_fast = map.design_matrix(fix_map=True)
# Check that the matrices are the same
assert np.allclose(DY, DY_fast)
# Check that this procedure yields the correct flux
flux1 = (DY_fast @ map.spectrum_.reshape(-1)).reshape(map.nt, map.nw)
flux2 = (D @ map.spectral_map).reshape(map.nt, map.nw)
assert np.allclose(flux1, flux2)
def test_ld_indices(map):
"""
Test limb darkening coeff setting/getting.
"""
# Set all coeffs
map[1:] = [0.5, 0.25]
assert np.array_equal(map._u, [-1, 0.5, 0.25])
assert np.array_equal(map[:], [-1, 0.5, 0.25])
# Set individual coeff
map[1] = 0.75
assert map._u[1] == 0.75
assert map[1] == 0.75
# Set individual coeff
map[2] = 0.10
assert map._u[2] == 0.10
assert map[2] == 0.10
# Attempt to set all coeffs
with pytest.raises(ValueError):
map[:] = [0.5, 0.25]
def test_ylm_indices(map, random):
"""
Test sph harm coeff setting/getting.
"""
if map.nc == 1:
# Set all coeffs (1st method)
y = random.normal(size=map.Ny)
map[:, :] = y
assert np.array_equal(map.y, y)
# Set all coeffs (2nd method)
y = random.normal(size=map.Ny)
map[:, :, :] = y
assert np.array_equal(map.y, y)
# Set all coeffs (3rd method)
y = random.normal(size=map.Ny)
map[:, :, 0] = y
assert np.array_equal(map.y, y)
# Set all coeffs (4th method)
y = random.normal(size=(map.Ny, 1))
map[:, :, 0] = y
assert np.array_equal(map.y, y.reshape(-1))
# Set one coeff
y = random.normal()
l, m = (5, -3)
map[l, m] = y
assert map[l, m] == y
assert map.y[l ** 2 + l + m] == y
# Set several coeffs (single l, all ms)
l = 5
y = random.normal(size=(2 * l + 1))
map[l, :] = y
assert np.array_equal(map[l, :].reshape(-1), y)
assert np.array_equal(
map.y[l ** 2 : l ** 2 + 2 * l + 1].reshape(-1), y
)
# Set several coeffs (l = (4, 5) and m = (3, 4))
y = random.normal(size=4)
map[4:6, 3:5] = y
assert np.array_equal(map[4:6, 3:5].reshape(-1), y)
assert np.array_equal(
np.array([map[4, 3], map[4, 4], map[5, 3], map[5, 4]]).reshape(-1),
y,
)
elif map.nc == 2:
# Set all coeffs
y = random.normal(size=(map.Ny, map.nc))
map[:, :, :] = y
assert np.array_equal(map.y, y)
# Set all coeffs for one component
y = random.normal(size=map.Ny)
map[:, :, 0] = y
assert np.array_equal(map.y[:, 0].reshape(-1), y)
assert np.array_equal(map[:, :, 0].reshape(-1), y)
# Set all coeffs for one component (matrix input)
y = random.normal(size=(map.Ny, 1))
map[:, :, 0] = y
assert np.array_equal(map.y[:, 0].reshape(-1), y.reshape(-1))
# Set one coeff
y = random.normal()
l, m, c = (5, -3, 0)
map[l, m, c] = y
assert map[l, m, c] == y
assert map.y[l ** 2 + l + m, c] == y
# Set several coeffs (single l, all ms, single c)
l = 5
c = 0
y = random.normal(size=(2 * l + 1))
map[l, :, c] = y
assert np.array_equal(map[l, :, c].reshape(-1), y)
assert np.array_equal(
map.y[l ** 2 : l ** 2 + 2 * l + 1, c].reshape(-1), y
)
# Set several coeffs (l = (4, 5) and m = (3, 4), c = 0)
y = random.normal(size=4)
map[4:6, 3:5, 0] = y
assert np.array_equal(map[4:6, 3:5, 0].reshape(-1), y)
assert np.array_equal(
np.array(
[map[4, 3, 0], map[4, 4, 0], map[5, 3, 0], map[5, 4, 0]]
).reshape(-1),
y,
)
|
rodlugerREPO_NAMEstarryPATH_START.@starry_extracted@starry-master@tests@greedy@test_doppler_greedy.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "toshiyan/cmblensplus",
"repo_path": "cmblensplus_extracted/cmblensplus-master/wrap/basic/__init__.py",
"type": "Python"
}
|
import basic.delens
import basic.bispec
import basic.flat
import basic.galaxy
import basic.cosmofuncs
import basic.wigner_funcs
|
toshiyanREPO_NAMEcmblensplusPATH_START.@cmblensplus_extracted@cmblensplus-master@wrap@basic@__init__.py@.PATH_END.py
|
{
"filename": "prep_input.py",
"repo_name": "Astro-Sean/autophot",
"repo_path": "autophot_extracted/autophot-master/autophot/prep_input.py",
"type": "Python"
}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Script to load in default commands to autophot
to allow user to update values for their work
'''
def load():
import os
from functools import reduce
from autophot.packages.call_yaml import yaml_autophot_input as cs
# Get location of this script
filepath = os.path.dirname(os.path.abspath(__file__))
# Name of default input yaml file - do not change
default_input = 'default_input.yml'
'''
reduce package from functools
- apply function of two arguments cumulatively to the items of
iterable, from left to right, so as to reduce the iterable to a single value.
'''
# filepath of default_input.yml
default_input_filepath = reduce(os.path.join,[filepath,'databases',default_input])
# Load default commands
default_autophot_input = cs(default_input_filepath,'AutoPhOT_input').load_vars()
print('Default input loaded in from: \n%s' % default_input_filepath )
return default_autophot_input
|
Astro-SeanREPO_NAMEautophotPATH_START.@autophot_extracted@autophot-master@autophot@prep_input.py@.PATH_END.py
|
{
"filename": "plot_constant_temporal.py",
"repo_name": "gammapy/gammapy",
"repo_path": "gammapy_extracted/gammapy-main/examples/models/temporal/plot_constant_temporal.py",
"type": "Python"
}
|
r"""
.. _constant-temporal-model:
Constant temporal model
=======================
This model parametrises a constant time model.
.. math:: F(t) = k
"""
# %%
# Example plot
# ------------
# Here is an example plot of the model:
from astropy import units as u
from astropy.time import Time
import matplotlib.pyplot as plt
from gammapy.modeling.models import (
ConstantTemporalModel,
Models,
PowerLawSpectralModel,
SkyModel,
)
time_range = [Time.now(), Time.now() + 1 * u.d]
constant_model = ConstantTemporalModel(const=1)
constant_model.plot(time_range)
plt.grid(which="both")
# %%
# YAML representation
# -------------------
# Here is an example YAML file using the model:
model = SkyModel(
spectral_model=PowerLawSpectralModel(),
temporal_model=constant_model,
name="constant-model",
)
models = Models([model])
print(models.to_yaml())
|
gammapyREPO_NAMEgammapyPATH_START.@gammapy_extracted@gammapy-main@examples@models@temporal@plot_constant_temporal.py@.PATH_END.py
|
{
"filename": "api_test.py",
"repo_name": "google/jax",
"repo_path": "jax_extracted/jax-main/tests/api_test.py",
"type": "Python"
}
|
# Copyright 2018 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import collections
import collections.abc
from collections.abc import Callable
import concurrent.futures
from contextlib import contextmanager
import copy
import enum
import functools
from functools import partial
import gc
import importlib
import inspect
import itertools as it
import operator
import operator as op
import os
import re
import subprocess
import sys
import traceback
import types
from typing import NamedTuple
import unittest
import weakref
from absl import logging
from absl.testing import absltest, parameterized
import jax
from jax import custom_derivatives as custom_derivatives_public
from jax import device_put, float0, grad, hessian, jacfwd, jacrev, jit
from jax import lax
from jax import tree_util
from jax._src import api, api_util, dtypes, lib
from jax._src import array
from jax._src import config
from jax._src import core
from jax._src import custom_derivatives
from jax._src import linear_util as lu
from jax._src import test_util as jtu
from jax._src import xla_bridge
from jax._src import debugging
from jax._src import pjit as pjit_lib
from jax._src.ad_checkpoint import saved_residuals
from jax._src.interpreters import mlir
from jax._src.interpreters import partial_eval as pe
from jax._src.compilation_cache import is_persistent_cache_enabled
from jax._src.lib import xla_extension
import jax._src.util as jax_util
from jax.ad_checkpoint import checkpoint_name, checkpoint as new_checkpoint
import jax.custom_batching
import jax.custom_derivatives
import jax.custom_transpose
from jax.errors import (UnexpectedTracerError, TracerIntegerConversionError,
ConcretizationTypeError, TracerBoolConversionError)
from jax.experimental import pjit
from jax.interpreters import ad
from jax.interpreters import batching
from jax.interpreters import xla
import jax.numpy as jnp
from jax.sharding import PartitionSpec as P
import numpy as np
config.parse_flags_with_absl()
def _check_instance(self, x):
self.assertIsInstance(x, array.ArrayImpl)
class JitTest(jtu.BufferDonationTestCase):
"""Shared tests between the Python and the C++ jax,jit implementations.
Because the Python implementation supports more features, we need to have the
Python tests that extend the C++ tests (and not the other way around).
"""
def test_jit_repr(self):
def my_function():
return
jitted = jit(my_function)
self.assertEqual(repr(jitted), f"<PjitFunction of {repr(my_function)}>")
def test_jit_repr_errors(self):
class Callable:
def __call__(self): pass
def __repr__(self):
raise ValueError("invalid repr")
# repr succeeds when underlying function repr fails.
jitted = jit(Callable())
self.assertEqual(repr(jitted), "<PjitFunction>")
# repr succeeds when object is malformed.
del jitted.__wrapped__
self.assertEqual(repr(jitted), "<PjitFunction>")
def test_jit_of_noncallable(self):
self.assertRaisesRegex(TypeError, "Expected a callable value.*",
lambda: jit(3))
def test_jit_of_generator(self):
def gen(x):
yield x
self.assertRaisesRegex(TypeError,
"Expected a function, got a generator function.*",
lambda: jit(gen))
@parameterized.parameters([
# Integer support
(1, 2, 3, 4, 5),
# Numpy array support
(
np.asarray(1, np.int32),
np.asarray(2, np.int32),
np.asarray(3, np.int32),
np.asarray(4, np.int32),
np.asarray(5, np.int32),
),
])
def test_jit_static_args(self, one, two, three, four, five):
side = []
def f(x, y, z, flag=False, flag2=False):
del flag2 # unused
assert flag
side.append(None)
return 100 * x + 10 * y + z
f1 = jit(f, static_argnums=(3, 4))
assert f1(one, two, three, True, False) == 123
assert len(side) == 1
assert f1(one, two, three, True, False) == 123
assert len(side) == 1 # Obvious cache hit.
assert f1(two, one, three, True, False) == 213
assert len(side) == 1 # Should cache hit because same signature.
assert f1(two, one, three, True, True) == 213
assert len(side) == 2
side[:] = []
f2 = jit(f, static_argnums=(0, 2, 3, 4))
assert f2(1, 2, 3, True, False) == 123
assert len(side) == 1
assert f2(1, 3, 3, True, False) == 133
assert len(side) == 1
assert f2(2, 2, 3, True, False) == 223
assert len(side) == 2
assert f2(2, 4, 3, True, False) == 243
assert len(side) == 2
assert f2(2, 4, 3, True, True) == 243
assert len(side) == 3
assert f2(2, 5, 3, True, True) == 253
assert len(side) == 3
def test_static_args_equality(self):
class A():
def __hash__(self):
return 1
def __eq__(self, other):
return isinstance(other, A)
side = []
def f(x, static_arg):
del static_arg
side.append(None)
return x * 100
f1 = jit(f, static_argnums=(1,))
self.assertEqual(f1(1, A()), 100)
self.assertLen(side, 1)
self.assertEqual(f1(1, A()), 100)
self.assertLen(side, 1)
f1_cpp = getattr(f1, "_cpp_jitted_f", f1)
self.assertEqual(f1_cpp._cache_size(), 1)
@parameterized.parameters([
(1, 2, 3),
(
np.asarray(1, np.int32),
np.asarray(2, np.int32),
np.asarray(3, np.int32),
),
])
def test_jit_kwargs(self, one, two, three):
side = []
# For the CPP jit, we need to clear the cache to prevent cache hits between
# parameterized tests.
if hasattr(jit, "cache_clear"):
jit.cache_clear()
def f(x, y, z):
side.append(None)
return 100 * x + 10 * y + z.astype(y.dtype)
f = jit(f)
assert f(one, two, three) == 123
assert len(side) == 1
assert f(one, two, three) == 123
assert len(side) == 1
assert f(one, two, z=three) == 123
assert len(side) == 2 # actually recompiles from kwarg
assert f(one, two, z=three) == 123
assert len(side) == 2 # but should still cache
f(one, two, z=np.zeros(3)) # doesn't crash
if config.enable_x64.value:
# In the above call, three is of a new type (int64), thus it should
# trigger a new compilation.
assert len(side) == 3
def test_jit_device(self):
device = jax.devices()[-1]
with jtu.ignore_warning(category=DeprecationWarning,
message="backend and device argument"):
x = jit(lambda x: x, device=device)(3.)
_check_instance(self, x)
self.assertEqual(x.devices(), {device})
@parameterized.named_parameters(
('jit', jax.jit),
('pjit', pjit.pjit),
)
@jtu.skip_on_devices("cpu")
def test_jit_default_device(self, module):
if jax.device_count() == 1:
raise unittest.SkipTest("Test requires multiple devices")
system_default_devices = jnp.add(1, 1).devices()
self.assertLen(system_default_devices, 1)
system_default_device = list(system_default_devices)[0]
test_device = jax.devices()[-1]
self.assertNotEqual(system_default_device, test_device)
f = module(lambda x: x + 1)
self.assertEqual(f(1).devices(), system_default_devices)
with jax.default_device(test_device):
self.assertEqual(jnp.add(1, 1).devices(), {test_device})
self.assertEqual(f(1).devices(), {test_device})
self.assertEqual(jnp.add(1, 1).devices(), system_default_devices)
self.assertEqual(f(1).devices(), system_default_devices)
with jax.default_device(test_device):
# Explicit `device` or `backend` argument to jit overrides default_device
with jtu.ignore_warning(category=DeprecationWarning,
message="backend and device argument"):
self.assertEqual(
module(f, device=system_default_device)(1).devices(),
system_default_devices)
out = module(f, backend="cpu")(1)
self.assertEqual(next(iter(out.devices())).platform, "cpu")
# Sticky input device overrides default_device
sticky = jax.device_put(1, system_default_device)
self.assertEqual(jnp.add(sticky, 1).devices(), system_default_devices)
self.assertEqual(f(sticky).devices(), system_default_devices)
# Test nested default_devices
with jax.default_device(system_default_device):
self.assertEqual(f(1).devices(), system_default_devices)
self.assertEqual(f(1).devices(), {test_device})
# Test a few more non-default_device calls for good luck
self.assertEqual(jnp.add(1, 1).devices(), system_default_devices)
self.assertEqual(f(sticky).devices(), system_default_devices)
self.assertEqual(f(1).devices(), system_default_devices)
def test_jit_default_platform(self):
with jax.default_device("cpu"):
result = jax.jit(lambda x: x + 1)(1)
self.assertEqual(result.device.platform, "cpu")
self.assertEqual(result.device, jax.local_devices(backend="cpu")[0])
result = jax.jit(lambda x: x + 1)(1)
self.assertEqual(result.device.platform, jax.default_backend())
self.assertEqual(result.device, jax.local_devices()[0])
def test_complex_support(self):
self.assertEqual(jit(lambda x: x + 1)(1 + 1j), 2 + 1j)
@parameterized.parameters("static_argnums", "donate_argnums")
def test_jit_argnums_overflow_error(self, argnum_type: str):
def f(a, b, c):
...
def g(a, /, b, *, c):
...
def h(a, *args):
...
def i():
...
# Simplest cases
jit(f, **{argnum_type: (0, 1)})
jit(g, **{argnum_type: (0, 1)})
jit(f, **{argnum_type: (0, 1, -3)})
# Out of bounds without *args
with self.assertRaises(ValueError):
jit(f, **{argnum_type: (0, 1, 3)})
with self.assertRaises(ValueError):
jit(f, **{argnum_type: (0, 1, -4)})
with self.assertRaises(ValueError):
jit(g, **{argnum_type: (0, 1, 3)})
with self.assertRaises(ValueError):
jit(g, **{argnum_type: (0, 1, -3)})
# Out of bounds with *args
jit(h, **{argnum_type: (0, 999)})
jit(h, **{argnum_type: (0, -999)})
# No positional arguments
jit(i, static_argnums=())
jit(i)
@parameterized.parameters("static_argnames", "donate_argnames")
def test_jit_argnames_validation(self, argnum_type: str):
def f(a, b, c):
...
def g(a, b, **kwargs):
...
def h(a, /, b, c, *args, **kwargs):
...
# Simplest case
jit(f, **{argnum_type: ("b", "c")})
# Undefined arg without **kwargs
with self.assertRaises(ValueError):
jit(f, **{argnum_type: ("b", "c", "not_defined")})
# Undefined arg with **kwargs
jit(g, **{argnum_type: ("a", "b", "not_defined")})
jit(h, **{argnum_type: ("b", "c")})
jit(h, **{argnum_type: ("b", "c", "not_defined")})
# Positional only
with self.assertRaises(ValueError):
jit(h, **{argnum_type: ("a", "c")})
# Var positional
with self.assertRaises(ValueError):
jit(h, **{argnum_type: ("args", "c")})
def test_jit_with_many_args_works(self):
@jit
def f(args_list):
return sum(args_list)
self.assertEqual(f(list(range(500))), sum(range(500)))
# Jit and Donate arguments
def test_donate_argnames_signature_fail(self):
inp = np.arange(4)
with self.assertRaisesRegex(
ValueError,
"Getting the signature of function.*failed. Pass donate_argnums "
"instead of donate_argnames."):
jax.jit(np.dot, donate_argnames='a')(inp, inp)
@parameterized.named_parameters(
("argnums", "donate_argnums", (0, 1)),
("argnames", "donate_argnames", ('x', 'y')),
)
def test_jit_donate_warning_raised(self, argnum_type, argnum_val):
x = jnp.array([1.0, 2.0], jnp.float32)
y = jnp.array([1, 2], jnp.int32)
f = jit(lambda x, y: x.sum() + jnp.float32(y.sum()),
**{argnum_type: argnum_val})
with self.assertWarnsRegex(UserWarning, "Some donated buffers were not usable"):
f(x, y)
@parameterized.named_parameters(
("argnums", "donate_argnums", 0),
("argnames", "donate_argnames", 'x'),
)
@jtu.device_supports_buffer_donation()
def test_jit_donate_invalidates_input(self, argnum_type, argnum_val):
# We can't just use `lambda x: x` because JAX simplifies this away to an
# empty XLA computation.
move = jit(lambda x: x + x - x, **{argnum_type: argnum_val})
x = jnp.ones([])
y = move(x)
self.assertDeleted(x)
self.assertEqual(y, 1.)
@parameterized.named_parameters(
("donate_argnums", "donate_argnums", (2, 3)),
("donate_argnames", "donate_argnames", ('c', 'd')),
)
@jtu.device_supports_buffer_donation()
def test_jit_donate_static_argnums(self, argnum_type, argnum_val):
jit_fun = jit(
lambda a, b, c, d: ((a + b + c), (a + b + d)),
static_argnums=(0, 1),
**{argnum_type: argnum_val})
c = jax.device_put(jnp.array([2., 2.]))
d = jax.device_put(jnp.array([1., 1., 1., 1.]))
e, f = jit_fun(1, 2, c, d)
np.testing.assert_allclose(e, jnp.array([5., 5.]))
np.testing.assert_allclose(f, jnp.array([4., 4., 4., 4.]))
self.assertDeleted(c)
self.assertDeleted(d)
@jtu.device_supports_buffer_donation()
def test_jit_donate_argnames_kwargs_static_argnums(self):
jit_fun = jit(
lambda a, b, c, d, e: ((a + b + c), (a + b + d), (a + b + e)),
static_argnums=(0, 1),
donate_argnames=('d', 'e'))
c = jax.device_put(jnp.array([2., 2.]))
d = jax.device_put(jnp.array([1., 1., 1., 1.]))
e = jax.device_put(jnp.array([3., 3., 3., 3.]))
f, g, h = jit_fun(1, 2, c, d=d, e=e)
np.testing.assert_allclose(f, jnp.array([5., 5.]))
np.testing.assert_allclose(g, jnp.array([4., 4., 4., 4.]))
np.testing.assert_allclose(h, jnp.array([6., 6., 6., 6.]))
self.assertNotDeleted(c)
self.assertDeleted(d)
self.assertDeleted(e)
def test_device_put_aliasing(self):
arr = jax.device_put(np.arange(8), jax.devices()[0])
out = jax.device_put(arr, may_alias=True, donate=False)
self.assertEqual(id(arr), id(out))
out = jax.device_put(arr, may_alias=False, donate=False)
self.assertNotEqual(id(arr), id(out))
with self.assertRaisesRegex(
ValueError, "may_alias and donate cannot be True at the same time."):
jax.device_put(arr, may_alias=True, donate=True)
out = jax.device_put(arr,
jax.sharding.SingleDeviceSharding(jax.devices()[0]),
may_alias=True, donate=False)
self.assertEqual(id(arr), id(out))
out = jax.device_put(arr,
jax.sharding.SingleDeviceSharding(jax.devices()[0]),
may_alias=False, donate=False)
self.assertNotEqual(id(arr), id(out))
@parameterized.named_parameters(
("argnums", "donate_argnums", 0),
("argnames", "donate_argnames", 'x'),
)
@jtu.device_supports_buffer_donation()
def test_jit_donate_weak_type(self, argnum_type, argnum_val):
# input has weak-type, output does not have weak-type
move = jit(lambda x: x.astype(int), **{argnum_type: argnum_val})
x = jnp.broadcast_to(2, (3,))
move(x)
self.assertDeleted(x)
@parameterized.named_parameters(
("argnums", "donate_argnums", (0,)),
("argnames", "donate_argnames", ('array',)),
)
def test_jnp_array_copy(self, argnum_type, argnum_val):
# https://github.com/jax-ml/jax/issues/3412
@partial(jit, **{argnum_type: argnum_val})
def _test(array):
return array.at[0].set(77)
x = jnp.asarray([0, 1])
x_copy = jnp.array(x, copy=True)
with jtu.ignore_warning():
_test(x) # donation
# Gives: RuntimeError: Invalid argument: CopyToHostAsync() called on invalid buffer.
print(x_copy) # doesn't crash
@jtu.device_supports_buffer_donation()
def test_specify_donate_argnums_and_argnames(self):
@partial(jax.jit, donate_argnums=0, donate_argnames=('inp2', 'inp3'))
def f(inp1, inp2, inp3):
return inp1 * 2, inp2 * 2, inp3 * 2
x = jnp.ones((2, 5)) * 4
y = jnp.ones((2, 5)) * 2
z = jnp.ones((2, 5))
f(x, inp2=y, inp3=z)
self.assertDeleted(x)
self.assertDeleted(y)
self.assertDeleted(z)
def test_resolve_argnums_signature_fail(self):
api_util.resolve_argnums(int, None, None, None, None, None) # doesn't crash
@jtu.device_supports_buffer_donation()
def test_donate_argnames_with_args(self):
@partial(jax.jit, donate_argnames='inp1')
def f(inp1):
return inp1 * 2
x = jax.device_put(jnp.ones((2, 5)) * 4, jax.devices()[0])
f(x)
self.assertDeleted(x)
@jtu.device_supports_buffer_donation()
def test_donate_argnums_with_kwargs(self):
@partial(jax.jit, donate_argnums=0)
def f(inp1):
return inp1 * 2
x = jax.device_put(jnp.ones((2, 5)) * 4, jax.devices()[0])
f(inp1=x)
self.assertDeleted(x)
def test_donate_args_info_aot(self):
def fn(x, y):
return jax.tree.map(lambda i: i * 2, x), y * 2
x = jax.device_put({"A": np.array(1.0), "B": np.array(2.0)},
jax.devices()[0])
y = jax.device_put(np.array(3.0), jax.devices()[0])
f = jax.jit(fn, donate_argnums=1)
lowered = f.lower(x, y)
args_info = lowered.args_info[0]
# x is not donated.
self.assertFalse(args_info[0]['A'].donated)
self.assertFalse(args_info[0]['B'].donated)
# y is donated.
self.assertTrue(args_info[1].donated)
g = jax.jit(fn, donate_argnums=0)
lowered = g.lower(x, y)
args_info = lowered.args_info[0]
# x is donated.
self.assertTrue(args_info[0]['A'].donated)
self.assertTrue(args_info[0]['B'].donated)
# y is not donated.
self.assertFalse(args_info[1].donated)
def test_double_donation(self):
def add(x, y):
return x + y
f = jax.jit(add, donate_argnums=(0,))
x = jnp.zeros((10,), jnp.float32)
with self.assertRaises(RuntimeError):
result = f(x, x)
result.block_until_ready()
@parameterized.named_parameters(
('argnames', {'donate_argnames': ('z', 'y')}),
('argnums', {'donate_argnums': (0, 1)})
)
def test_dict_donation(self, jit_kwargs):
@partial(jax.jit, **jit_kwargs)
def f(z, y, x):
return z, y, x
z = {'c': 3.}
y = {'b': 2.}
x = {'a': 1.}
_, kwargs_info = f.lower(z=z, y=y, x=x).args_info
self.assertTrue(kwargs_info['z']['c'].donated)
self.assertTrue(kwargs_info['y']['b'].donated)
self.assertFalse(kwargs_info['x']['a'].donated)
@parameterized.named_parameters(
('argnames', {'donate_argnames': ('z', 'y')}),
('argnums', {'donate_argnums': (0, 1)})
)
def test_dict_donation_args_kwargs(self, jit_kwargs):
@partial(jax.jit, **jit_kwargs)
def f(z, y, x):
return z, y, x
z = {'c': 3.}
y = {'b': 2.}
x = {'a': 1.}
args_info, kwargs_info = f.lower(z, y=y, x=x).args_info
self.assertTrue(args_info[0]['c'].donated)
self.assertTrue(kwargs_info['y']['b'].donated)
self.assertFalse(kwargs_info['x']['a'].donated)
def test_intersecting_static_and_donate_argnames(self):
with self.assertRaisesRegex(
ValueError, "static_argnames and donate_argnames cannot intersect"):
jax.jit(lambda x: x, static_argnames='x', donate_argnames='x')
def test_jit_global_cache(self):
def f(x):
assert python_should_be_executing
return x
python_should_be_executing = True
jit(f)(2)
python_should_be_executing = False
jit(f)(3)
def test_jit_cache_clear(self):
@jit
def f(x, y):
return x + y
client = jax.devices()[0].client
gc.collect()
num_live_initial = len(client.live_executables())
f(1, 2).block_until_ready()
gc.collect()
num_live = len(client.live_executables())
self.assertEqual(num_live_initial + 1, num_live)
f.clear_cache()
gc.collect()
num_live = len(client.live_executables())
self.assertEqual(num_live_initial, num_live)
def test_jit_shallow_copy(self):
def f(x):
return copy.copy(x)
jit(f)(1)
def test_jit_deep_copy(self):
def f(x):
return copy.deepcopy(x)
jit(f)(1)
def test_disable_jit(self):
effects = []
@jit
def f(x):
effects.append(1)
return x
with api.disable_jit():
f(2)
f(2)
assert len(effects) == 2
f(2)
f(2)
assert len(effects) == 3
def test_static_argnum_on_method(self):
class A:
@functools.partial(jit, static_argnums=(0,))
def my_func_jit(self, x):
return x+2
A().my_func_jit(3)
def test_static_argnum_on_static_method_is_not_supported(self):
with self.assertRaisesRegex(TypeError, "Expected a callable value"):
class A:
@functools.partial(jit, static_argnums=(0,))
@classmethod
def my_classmethod_jit(cls, x):
return x+2
def test_staticmethod_is_not_supported(self):
with self.assertRaisesRegex(TypeError,
"staticmethod arguments are not supported"):
class A:
@functools.partial(jit)
@staticmethod
def my_staticmethod_jit(x):
return x + 2
def test_concurrent_jit(self):
@jit
def f(x):
return x + x - 3.
xs = [self.rng().randn(i) for i in range(10)]
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = [executor.submit(partial(f, x)) for x in xs]
ys = [f.result() for f in futures]
for x, y in zip(xs, ys):
self.assertAllClose(x * 2 - 3., y)
def test_trivial_computations(self):
x = jnp.array([1, 2, 3])
y = jit(lambda x: x)(x)
self.assertNotEqual(x.unsafe_buffer_pointer(), y.unsafe_buffer_pointer())
z1, z2 = jit(lambda x: (x, x))(x)
self.assertNotEqual(z1.unsafe_buffer_pointer(), z2.unsafe_buffer_pointer())
x1, x2 = jnp.array([1, 2]), jnp.array([2, 3])
z1, z2, z3 = jit(lambda x, y: (y, 1, x))(x1, x2)
self.assertNotEqual(z1.unsafe_buffer_pointer(), x2.unsafe_buffer_pointer())
self.assertNotEqual(z3.unsafe_buffer_pointer(), x1.unsafe_buffer_pointer())
self.assertEqual(z2, 1)
def test_print_token_buffer_error(self):
token = jax.lax.create_token()
with self.assertRaisesRegex(
RuntimeError, "Cannot convert a token-shape buffer to a numpy array."
):
token._buf._value
def test_trivial_computations_with_tokens(self):
@jit
def noop(arr, token):
return arr, token
arr = jnp.ones(10)
token = jax.lax.create_token()
_, out_token = noop(arr, token)
self.assertIsInstance(token, core.Token)
self.assertIsInstance(out_token, core.Token)
# Different token objects.
self.assertIsNot(token, out_token)
def test_jit_bad_input(self):
def f(x):
return x
err_str = ("Error interpreting argument to .* as an abstract array. The problematic "
"value is of type .* and was passed to the function at path x.")
with self.assertRaisesRegex(TypeError, err_str):
jit(f)("foo")
# Jax type objects aren't valid data arguments.
with self.assertRaisesRegex(TypeError, err_str):
jit(f)(jnp.int32)
def test_jit_masked_array(self):
x = np.ma.array([1, 2, 3], mask=[True, False, True])
f = jit(lambda x: x)
with self.assertRaisesRegex(ValueError, "numpy masked arrays are not supported"):
f(x)
def test_jit_on_all_devices(self):
# Verifies we can run the same computation on every device present, even
# if they are, for example, different models of GPU.
data = self.rng().rand(1000).astype(np.float32)
f = jit(jnp.negative)
for device in jax.local_devices():
x = device_put(data, device=device)
np.testing.assert_array_equal(-data, f(x))
def test_jit_nested_donate_ignored(self):
jit_fun = jit(lambda x: jit(lambda y: y**2, donate_argnums=0)(x))
a = jax.device_put(jnp.array(1))
# NOTE(mattjj): stopped raising error here and instead just ignored
# with self.assertRaisesRegex(ValueError, "nested.*not supported"):
# jit_fun(a)
jit_fun(a) # doesn't crash
def test_jit_reference_dropping(self):
x = jnp.ones(10)
f = (lambda x: lambda: x)(x) # reference to x in f's closure
g = jit(f)
x = weakref.ref(x) # no more strong ref to x in this scope
assert x() is not None # x is still around
f() # f runs
g() # g runs
g() # g runs a second time
del f # delete the raw callable
assert x() is not None # x is still around
g() # g still runs
del g # no more references to x
assert x() is None # x is gone
def test_jit_of_nonweakreferenceable_function(self):
class CallableWithSlots:
__slots__ = []
def __call__(self, x):
return x + 1
c = CallableWithSlots()
with self.assertRaisesRegex(TypeError, "cannot create weak reference.*"):
weakref.ref(c)
# Building a jit object does not crash.
f = jit(c)
with self.assertRaisesRegex(TypeError, "cannot create weak reference.*"):
# Calling the jit object will fail, but not because of the C++ JIT. The
# Python-level jit cache requires weak reference support.
f(3)
def test_jit_raises_on_first_invocation_on_non_hashable_static_argnum(self):
f = lambda x, y: x + 3
jitted_f = jit(f, static_argnums=(1,))
msg = "Non-hashable static arguments are not supported"
with self.assertRaisesRegex(ValueError, msg):
jitted_f(1, np.asarray(1))
def test_cpp_jit_raises_on_non_hashable_static_argnum(self):
f = lambda x, y: x + 3
jitted_f = jit(f, static_argnums=[1])
jitted_f(1, 1)
msg = "Non-hashable static arguments are not supported"
with self.assertRaisesRegex(ValueError, msg):
jitted_f(1, np.asarray(1))
class HashableWithoutEq:
def __hash__(self):
return 1
def __eq__(self, other):
raise NotImplementedError(
"A Python error is as is, without stack trace")
with self.assertRaisesRegex(
ValueError,
re.escape("static arguments should be comparable using __eq__")):
jitted_f(1, HashableWithoutEq())
# __eq__ would only be called if we might have a cache hit. Call the
# function a second time with exactly the same arguments to make sure that
# we could.
jitted_f(1, HashableWithoutEq())
def test_cpp_jit_raises_other_exceptions_when_hashing_fails(self):
class A:
def __hash__(self):
raise ValueError
f = jax.jit(lambda x: x + 1, static_argnums=(0,))
a = A()
with self.assertRaisesRegex(ValueError, '^$'): # no extra message
f(a)
def test_cpp_jitted_function_returns_PyBuffer(self):
jitted_f = jit(lambda a: a + 1)
jitted_f(1)
out = jitted_f(2)
self.assertIsInstance(out.sharding, jax.sharding.SingleDeviceSharding)
self.assertIsInstance(out, array.ArrayImpl)
@parameterized.named_parameters(
('jit', jax.jit),
('pjit', pjit.pjit)
)
@jtu.skip_on_devices("cpu")
def test_explicit_backend(self, module):
f = lambda x: x + 1
with jtu.ignore_warning(category=DeprecationWarning,
message="backend and device argument"):
jitted_f = module(f, backend=jtu.device_under_test())
jitted_f_cpu = module(f, backend="cpu")
result = jitted_f(1.)
result_cpu = jitted_f_cpu(1.)
self.assertEqual(list(result.devices())[0].platform, jtu.device_under_test())
self.assertEqual(list(result_cpu.devices())[0].platform, "cpu")
@parameterized.named_parameters(
('jit', jax.jit),
('pjit', pjit.pjit)
)
@jtu.skip_on_devices("cpu")
def test_device_to_device_copy_between_backends(self, module):
# b/186624243
f = lambda x: x + 1
with jtu.ignore_warning(category=DeprecationWarning,
message="backend and device argument"):
jitted_f = module(f, backend=jtu.device_under_test())
jitted_f_cpu = module(f, backend="cpu")
x = np.arange(30).reshape(1, 10, 3)
result = jitted_f(x)
result_cpu = jitted_f_cpu(result)
result_2 = jitted_f(result_cpu)
result_cpu_2 = jitted_f_cpu(result_2)
self.assertAllClose(result_2, x + 3)
self.assertAllClose(result_cpu_2, x + 4)
@jtu.skip_on_devices("cpu")
@jtu.ignore_warning(category=DeprecationWarning,
message="backend and device argument")
def test_mismatched_nested_backends(self):
@partial(jax.jit, backend=jtu.device_under_test())
def f(x):
return jax.jit(lambda x: x + 1, backend="cpu")(x)
msg = 'Received incompatible devices for jitted computation'
with self.assertRaisesRegex(ValueError, msg):
f(1.)
@jax.legacy_prng_key('allow')
def test_omnistaging(self):
# See https://github.com/jax-ml/jax/issues/5206
# TODO(frostig): remove `wrap` once we always enable_custom_prng
def wrap(arr):
arr = np.array(arr, dtype=np.uint32)
if config.enable_custom_prng.value:
return jax.random.wrap_key_data(arr)
else:
return arr
key_list = [None]
def init():
key, subkey = jax.random.split(key_list[0])
key_list[0] = key
return jax.random.normal(subkey, ())
key_list[0] = wrap([2384771982, 3928867769])
init()
jit(init)()
self.assertIsInstance(key_list[0], core.Tracer)
del key_list[0]
def test_jit_wrapped_attributes(self):
def f(x: int) -> int:
"""docstring of f."""
return x + 1
f.some_value = 4
jf = jit(f)
for attr in ["doc", "name", "module", "qualname", "annotations"]:
self.assertEqual(
{attr: getattr(f, f"__{attr}__")},
{attr: getattr(jf, f"__{attr}__")})
self.assertEqual(f.some_value, jf.some_value)
def test_jit_python_builtin(self):
x = jnp.array([1, 2])
expected = x + 1
jit_add = jit(operator.add, static_argnums=(1,))
actual = jit_add(x, 1)
self.assertArraysEqual(expected, actual)
def test_infer_argnums_and_argnames(self):
def f(x, y=1):
pass
sig = inspect.signature(f)
argnums, argnames = api_util.infer_argnums_and_argnames(
sig, argnums=None, argnames=None)
assert argnums == ()
assert argnames == ()
argnums, argnames = api_util.infer_argnums_and_argnames(
sig, argnums=0, argnames=None)
assert argnums == (0,)
assert argnames == ('x',)
argnums, argnames = api_util.infer_argnums_and_argnames(
sig, argnums=None, argnames='y')
assert argnums == (1,)
assert argnames == ('y',)
argnums, argnames = api_util.infer_argnums_and_argnames(
sig, argnums=0, argnames='y') # no validation
assert argnums == (0,)
assert argnames == ('y',)
def g(x, y, *args):
pass
sig = inspect.signature(g)
argnums, argnames = api_util.infer_argnums_and_argnames(
sig, argnums=(1, 2), argnames=None)
assert argnums == (1, 2)
assert argnames == ('y',)
def h(x, y, **kwargs):
pass
sig = inspect.signature(h)
argnums, argnames = api_util.infer_argnums_and_argnames(
sig, argnums=None, argnames=('foo', 'bar'))
assert argnums == ()
assert argnames == ('foo', 'bar')
def test_jit_with_static_argnames(self):
def f(x):
assert x == 'foo'
return 1
f_nums = jit(f, static_argnums=0)
assert f_nums('foo') == 1
assert f_nums(x='foo') == 1
f_names = jit(f, static_argnames='x')
assert f_names('foo') == 1
assert f_names(x='foo') == 1
def test_new_static_argnum_on_keyword_arguments(self):
f = jit(lambda x: x, static_argnums=0)
y = f(x=4)
assert y == 4
def test_new_static_argnum_with_default_arguments(self):
f = jit(lambda x=4: x, static_argnums=0)
y = f()
assert y == 4
def test_jit_with_mismatched_static_argnames(self):
x_is_tracer, y_is_tracer = False, False
def f(x, y):
assert isinstance(x, core.Tracer) == x_is_tracer
assert isinstance(y, core.Tracer) == y_is_tracer
return 1
# If both static_argnums and static_argnames are provided, they are allowed
# to disagree and `jit` will respect the user's choices.
f_nums = jit(f, static_argnums=1, static_argnames=())
x_is_tracer, y_is_tracer = True, False
assert f_nums(2, 'foo') == 1
x_is_tracer, y_is_tracer = True, True
assert f_nums(1, y=2) == 1
f_names = jit(f, static_argnums=(), static_argnames='y')
x_is_tracer, y_is_tracer = True, True
assert f_names(2, 3) == 1
x_is_tracer, y_is_tracer = True, False
assert f_names(1, y='foo') == 1
f_mixed = jit(f, static_argnums=(1,), static_argnames='x')
x_is_tracer, y_is_tracer = True, False
assert f_mixed(2, 'foo') == 1
x_is_tracer, y_is_tracer = True, True
assert f_mixed(1, y=3) == 1
x_is_tracer, y_is_tracer = False, True
assert f_mixed(x='foo', y=3) == 1
# TODO(zhangqiaorjc): Test pruning constants after DCE pass prunes primitive
# applications.
@parameterized.parameters(2, 3, 4)
def test_jit_with_pruned_args(self, num_args):
def f(*args):
used = np.array(2)
return args[1] + used
f_pruned = jit(f)
args = range(num_args)
with jtu.count_device_put() as count:
np.testing.assert_allclose(f_pruned(*args), 3)
self.assertEqual(count[0], 1)
def testBuffersAreFreedPromptly(self):
# Regression test for a bug where garbage collection was delayed too long
# for NumPy buffers that are aliased zero-copy by the runtime.
@jit
def f(x):
return x + 1
refs = []
x = np.ones((10000,), np.float32)
for step in range(1000):
x = f(x)
refs.append(weakref.ref(x))
x = np.asarray(x)
# We expect most of the input buffers to have been garbage
# collected in parallel with the execution. We can't call
# block_until_ready() here because it would force a garbage collection.
live_refs = len([ref for ref in refs if ref() is not None])
self.assertLessEqual(live_refs, 100)
def test_jit_lower_compile(self):
def f(x):
return jnp.sqrt(x ** 2) + 1.
f_jit = jit(f)
lowered = f_jit.lower(1.)
compiled = lowered.compile()
self.assertAllClose(compiled(1.), 2.)
self.assertEqual(lowered.in_avals, compiled.in_avals)
expected_dtype = np.float64 if config.enable_x64.value else np.float32
for obj in [lowered, compiled]:
self.assertEqual(
obj.in_avals,
((core.ShapedArray([], expected_dtype, weak_type=True),), {}))
self.assertEqual(obj.in_tree, jax.tree.flatten(((0,), {}))[1])
def test_jit_lower_duck_typing(self):
f_jit = jit(lambda x: 2 * x)
f_low = f_jit.lower(jax.ShapeDtypeStruct((), 'float32')) # doesn't crash
f_exe = f_low.compile()
self.assertAllClose(f_exe(jnp.float32(1.)), jnp.float32(2.))
def test_jit_lower_compile_in_tree_mismatch(self):
def f(x):
return jnp.sqrt(x ** 2) + 1.
f_jit = jit(f)
f_low = f_jit.lower(1.)
f_exe = f_low.compile()
self.assertRaisesRegex(
TypeError,
'Function compiled with input pytree does not match the input pytree it'
' was called with',
lambda: f_exe([1.]))
def test_jit_lower_compile_trivial(self):
def f(x): return x
out = jit(f).lower(1.).compile()(4.)
self.assertAllClose(out, 4.)
def test_jit_lower_compile_sharding_computation(self):
s = jax.sharding.SingleDeviceSharding(jax.devices()[0])
def f(x): return jax.lax.with_sharding_constraint(x, s)
out = jit(f).lower(1.).compile()(4.)
self.assertAllClose(out, 4.)
def test_jit_lower_compile_trivial_in_tree_mismatch(self):
def f(x): return x
f_exe = jit(f).lower(1.).compile()
self.assertRaisesRegex(
TypeError,
"Function compiled with input pytree does not match the input pytree it"
" was called with",
lambda: f_exe([4.0]),
)
def test_jit_lower_compile_arg_type_mismatch(self):
def f(x):
return jnp.sqrt(x ** 2) + 1.
x = jnp.array(1, dtype=int)
x_f32 = x.astype(jnp.float32)
x_i32 = x.astype(jnp.int32)
f_exe = jit(f).lower(x_f32).compile()
self.assertRaisesRegex(
TypeError,
r"Argument types differ .*"
r"The mismatches are:\n"
r"Argument 'x' compiled with.*float32.*and called with.*int32.*",
lambda: f_exe(x_i32))
def test_jit_lower_compile_multi_arg(self):
def f(*args):
x, *_ = args
return jnp.sqrt(x ** 2) + 1.
f_exe = jit(f).lower(1., 1.).compile()
self.assertAllClose(f_exe(1., 1.), 2.)
def test_jit_lower_compile_trivial_multi_arg(self):
def f(*args):
x, *_ = args
return x
f_exe = jit(f).lower(1., 1.).compile()
self.assertAllClose(f_exe(1., 1.), 1.)
def test_jit_lower_donate_argnums_available(self):
def f(*args):
x, *_ = args
return x + 4.
f_low = jit(f, donate_argnums=(0,)).lower(1., 1.)
f_com = f_low.compile()
f_low.donate_argnums == f_com.donate_argnums == (0,)
def test_jit_lower_compile_vmap(self):
f = jit(lambda x: x + 4).lower(1.).compile()
def err():
return jax.vmap(lambda x: f(x) + 2)(jnp.ones(3))
self.assertRaisesRegex(
TypeError,
"Cannot apply JAX transformations to a function lowered and compiled "
"for a particular signature. Detected .*BatchTracer",
err)
def test_jit_lower_as_text(self):
f = jit(lambda x: x + 4).lower(1.)
self.assertIsInstance(f.as_text(), str)
self.assertIsInstance(f.as_text(dialect='hlo'), str)
self.assertIsInstance(f.as_text(dialect="stablehlo"), str)
def test_jit_lower_compiler_ir(self):
f = jit(lambda x: x + 4).lower(1.)
self.assertIsNotNone(f.compiler_ir())
self.assertIsNotNone(f.compiler_ir(dialect='hlo'))
self.assertIsNotNone(f.compiler_ir(dialect="stablehlo"))
def test_jit_lower_trivial_compiler_ir(self):
f = jit(lambda x: x).lower(1.)
self.assertIsNotNone(f.compiler_ir())
self.assertIsNotNone(f.compiler_ir(dialect='hlo'))
self.assertIsNotNone(f.compiler_ir(dialect="stablehlo"))
def test_jit_replica_attributes(self):
hlo = jit(lambda x: x + 4).lower(1.).as_text("stablehlo")
self.assertIn("mhlo.num_partitions = 1", hlo)
self.assertIn("mhlo.num_replicas = 1", hlo)
def test_jit_lower_no_pruning(self):
compiled = jit(lambda x, y: x + y).lower(1., 2.).compile()
self.assertEqual(compiled._executable._kept_var_idx, {0, 1})
self.assertLen(compiled._executable.in_avals, 2)
compiled = jit(lambda x, y: x).lower(1., 2.).compile()
self.assertEqual(compiled._executable._kept_var_idx, {0})
self.assertLen(compiled._executable.in_avals, 1)
compiled = jit(lambda x, y: x, keep_unused=True).lower(
1., 2.).compile()
self.assertEqual(compiled._executable._kept_var_idx, {0, 1})
self.assertLen(compiled._executable.in_avals, 2)
# Also works with jax.jit
jitted_f = jit(lambda x, y: x, keep_unused=True)
with jtu.count_pjit_cpp_cache_miss() as count:
_ = jitted_f(1, 2)
self.assertEqual(count[0], 1)
def test_jit_lower_compile_compiler_ir(self):
f = jit(lambda x: x + 4).lower(1.).compile()
self.assertIsNotNone(f.runtime_executable())
def test_jit_lower_trivial_compile_compiler_ir(self):
f = jit(lambda x: x).lower(1.).compile()
self.assertIsNotNone(f.runtime_executable())
def test_jit_lower_compile_as_text(self):
f = jit(lambda x: x).lower(1.).compile()
g = jit(lambda x: x + 4).lower(1.).compile()
self.assertIsInstance(f.as_text(), (str, type(None)))
self.assertIsInstance(g.as_text(), (str, type(None)))
def test_jit_lower_cost_analysis(self):
# TODO(b/261771737): add support for uncompiled cost analysis in C API.
if "PJRT C API" in xla_bridge.get_backend().platform_version:
raise unittest.SkipTest("C API does not support uncompiled cost analysis")
f = jit(lambda x: x).lower(1.)
g = jit(lambda x: x + 4).lower(1.)
f.cost_analysis() # doesn't raise
g.cost_analysis() # doesn't raise
def test_jit_lower_compile_cost_analysis(self):
f = jit(lambda x: x).lower(1.).compile()
g = jit(lambda x: x + 4).lower(1.).compile()
self.assertIsNotNone(f.cost_analysis())
self.assertIsNotNone(g.cost_analysis())
def test_jit_lower_compile_memory_analysis(self):
f = jit(lambda x: x).lower(1.).compile()
g = jit(lambda x: x + 4).lower(1.).compile()
f.memory_analysis() # doesn't raise
g.memory_analysis() # doesn't raise
def test_jit_lower_compile_executable(self):
f = jit(lambda x: x).lower(1.).compile()
g = jit(lambda x: x + 4).lower(1.).compile()
self.assertIsNotNone(f.runtime_executable())
self.assertIsNotNone(g.runtime_executable())
def test_jit_lower_arg_info(self):
def f(x, y, *args, **kwargs):
return y['hi'] + args[1] + sum(kwargs.values())
lowered = jax.jit(f).lower({'hi': 1.}, {'hi': 2.}, 3., 4., z=5., w=6.)
hlo_str = mlir.module_to_string(lowered.compiler_ir('stablehlo'))
self.assertNotIn("\"x\"", hlo_str)
self.assertIn("y['hi']", hlo_str)
self.assertNotIn("args[0]", hlo_str)
self.assertIn("args[1]", hlo_str)
self.assertIn("kwargs['z']", hlo_str)
self.assertIn("kwargs['w']", hlo_str)
hlo_str = mlir.module_to_string(
lowered.compiler_ir('stablehlo'),
enable_debug_info=False,
)
for s in ("\"x\"", "y['hi']", "args[0]", "args[1]", "kwargs['z']", "kwargs['w']"):
self.assertNotIn(s, hlo_str)
@parameterized.parameters([0, 2, [(0, 2)]])
def test_jit_lower_arg_info_static_argnums(self, static_argnums):
def f(x, y, *args, **kwargs):
return y['hi'] + args[1] + sum(kwargs.values())
ir = jax.jit(f, static_argnums=static_argnums).lower(
(1.,), {'hi': 2.}, 3., 4., z=5., w=6.).compiler_ir('stablehlo')
hlo_str = mlir.module_to_string(ir)
self.assertNotIn("\"x\"", hlo_str)
self.assertIn("y['hi']", hlo_str)
self.assertNotIn("args[0]", hlo_str)
self.assertIn("args[1]", hlo_str)
self.assertIn("kwargs['z']", hlo_str)
self.assertIn("kwargs['w']", hlo_str)
hlo_str = mlir.module_to_string(ir, enable_debug_info=False)
for s in ("\"x\"", "y['hi']", "args[0]", "args[1]", "kwargs['z']", "kwargs['w']"):
self.assertNotIn(s, hlo_str)
@parameterized.parameters(['a', 'b', [('a', 'b')]])
def test_jit_lower_arg_info_static_argnames(self, static_argnames):
def f(x, y, *args, **kwargs):
return y['hi'] + args[1] + kwargs['z'] + kwargs['w']
ir = jax.jit(f, static_argnames=static_argnames).lower(
(1.,), {'hi': 2.}, 3., 4., z=5., w=6., a=7., b=8.).compiler_ir('stablehlo')
hlo_str = mlir.module_to_string(ir)
self.assertNotIn("\"x\"", hlo_str)
self.assertIn("y['hi']", hlo_str)
self.assertNotIn("args[0]", hlo_str)
self.assertIn("args[1]", hlo_str)
self.assertIn("kwargs['z']", hlo_str)
self.assertIn("kwargs['w']", hlo_str)
self.assertNotIn("kwargs['a']", hlo_str)
self.assertNotIn("kwargs['b']", hlo_str)
hlo_str = mlir.module_to_string(ir, enable_debug_info=False)
for s in (
"\"x\"", "y['hi']", "args[0]", "args[1]", "kwargs['z']",
"kwargs['w']", "kwargs['a']", "kwargs['b']"
):
self.assertNotIn(s, hlo_str)
def test_jit_lower_result_info(self):
def f(x, y, z):
return {'a': x, 'b': [y]}
ir = jax.jit(f).lower(1., (2,), [3]).compiler_ir('stablehlo')
hlo_str = mlir.module_to_string(ir)
self.assertIn("jax.result_info = \"['a']\"", hlo_str)
self.assertIn("jax.result_info = \"['b'][0][0]\"", hlo_str)
def test_jit_lower_compile_with_compiler_options(self):
def f(x):
return jnp.sqrt(x ** 2) + 1.
f_jit = jit(f)
lowered = f_jit.lower(1.)
lowered.compile( # doesn't crash
compiler_options={
"xla_embed_ir_in_executable": True,
"xla_dump_max_hlo_modules": 200,
"xla_gpu_auto_spmd_partitioning_memory_budget_ratio": 0.5,
}
)
def test_compile_options_jit(self):
def f(x):
return jnp.sqrt(x ** 2) + 1.
f_jit = jit(
f,
compiler_options={
"xla_embed_ir_in_executable": True,
"xla_dump_max_hlo_modules": 200,
"xla_gpu_auto_spmd_partitioning_memory_budget_ratio": 0.5,
})(1.0) # doesn't crash.
def test_exec_time_optimization_effort_compiler_option(self):
def f(x):
return jnp.sqrt(x ** 2) + 1.
f_jit = jit(
f,
compiler_options={
"exec_time_optimization_effort": 0.0,
})(1.0) # doesn't crash.
with self.assertRaisesRegex(xla_extension.XlaRuntimeError, "No such"):
f_jit = jit(
f,
compiler_options={
"exec_time_compilation_effort": 0.0,
})(1.0)
def test_jit_lower_compile_with_compiler_options_invalid(self):
def f(x):
return jnp.sqrt(x ** 2) + 1.
f_jit = jit(f)
lowered = f_jit.lower(1.)
self.assertRaisesRegex(
xla_extension.XlaRuntimeError, "No such compile option: 'invalid_key'",
lambda: lowered.compile(
compiler_options={"invalid_key": "invalid_value"}))
self.assertRaisesRegex(
xla_extension.XlaRuntimeError, "is not a valid bool value.",
lambda: lowered.compile(
compiler_options={"xla_embed_ir_in_executable": "invalid_value"}))
def test_jit_compile_with_compiler_options_multiple(self):
def f(x):
return jnp.sqrt(x ** 2) + 1.
with jtu.count_jit_compilation_cache_miss() as count:
jit(f, compiler_options={"xla_embed_ir_in_executable": True})(1.)
jit(f, compiler_options={"xla_embed_ir_in_executable": False})(1.)
self.assertEqual(count[0], 2)
# We should still error on invalid options after some valid compiles
with self.assertRaisesRegex(
xla_extension.XlaRuntimeError, "No such compile option: 'invalid_key'"):
jit(f, compiler_options={"invalid_key": "invalid_value"})(1.)
def test_lower_compile_with_compiler_options_multiple(self):
def f(x):
return jnp.sqrt(x ** 2) + 1.
f_jit = jit(f)
lowered = f_jit.lower(1.)
l1 = lowered.compile()
l2 = lowered.compile(
compiler_options={"xla_embed_ir_in_executable": True})
l3 = lowered.compile(
compiler_options={"xla_embed_ir_in_executable": False})
# Ideally we could test that these objects are different only in
# that they respect the different options. Object identity is a
# heuristic proxy for that.
self.assertTrue(l1 is not l2)
self.assertTrue(l1 is not l3)
self.assertTrue(l2 is not l3)
# We should still error on invalid options after some valid compiles
self.assertRaisesRegex(
xla_extension.XlaRuntimeError, "No such compile option: 'invalid_key'",
lambda: lowered.compile(
compiler_options={"invalid_key": "invalid_value"}))
def test_jit_enum_as_dict_keys_fails(self):
class E(enum.Enum):
A = 0
B = 1
@jit
def f(d) -> float:
return d[E.A]
with self.assertRaisesRegex(
(TypeError, ValueError),
"('<' not supported|Comparator raised exception).*"):
f({E.A: 1.0, E.B: 2.0})
def test_jit_static_argnums_requires_type_equality(self):
# See: https://github.com/jax-ml/jax/pull/9311
@partial(jit, static_argnums=(0,))
def f(k):
assert python_should_be_executing
return k
# Values of 'x' that compare as equal but have different types do not lead
# to cache hits.
for x in [1, True, 1.0]:
python_should_be_executing = True
self.assertEqual(x, f(x))
python_should_be_executing = False
self.assertEqual(x, f(x))
def test_caches_depend_on_axis_env(self):
# https://github.com/jax-ml/jax/issues/9187
f = lambda: lax.psum(1, "i")
g = jax.jit(f)
expected = jax.vmap(f, axis_name="i", axis_size=2, out_axes=None)()
ans = jax.vmap(g, axis_name="i", axis_size=2, out_axes=None)()
self.assertEqual(ans, expected)
# This second call to g could erroneously get a cache hit.
expected = jax.vmap(f, axis_name="i", axis_size=3, out_axes=None)()
ans = jax.vmap(g, axis_name="i", axis_size=3, out_axes=None)()
self.assertEqual(ans, expected)
# Since stackless, the vmap(f) version gets compiled a second time
@unittest.skip
def test_caches_dont_depend_on_unnamed_axis_env(self):
# https://github.com/jax-ml/jax/issues/9187
f = jax.jit(lambda: jnp.sin(1))
expected = f()
with jtu.count_jit_and_pmap_lowerings() as count: # noqa: F841
ans = jax.vmap(f, axis_size=2, out_axes=None)()
self.assertEqual(count[0], 0) # no compiles
self.assertArraysAllClose(ans, expected, check_dtypes=True)
def test_cache_key_defaults(self):
# https://github.com/jax-ml/jax/discussions/11875
f = jit(lambda x: (x ** 2).sum())
self.assertEqual(f._cache_size(), 0)
x = jnp.arange(5.0)
for _ in range(3):
_ = f(x)
self.assertEqual(f._cache_size(), 1)
def test_jit_nan_times_zero(self):
# https://github.com/jax-ml/jax/issues/4780
def f(x):
return 1 + x * 0
self.assertAllClose(f(np.nan), np.nan)
self.assertAllClose(jit(f)(np.nan), np.nan)
def test_no_tracing(self):
@jax.jit
def f(x):
return x
x = jnp.arange(3)
y = jnp.arange(4)
_ = f(x) # no crash
with self.assertRaisesRegex(RuntimeError, 'no_tracing'):
with jax.no_tracing():
_ = f(y) # crash!
class APITest(jtu.JaxTestCase):
def test_grad_item(self):
def f(x):
if x.astype(bool).item():
return x ** 2
else:
return x
out = jax.grad(f)(2.0)
self.assertEqual(out, 4)
def test_jit_item(self):
def f(x):
return x.item()
x = jnp.array(1.0)
self.assertEqual(f(x), x)
with self.assertRaisesRegex(core.ConcretizationTypeError, "Abstract tracer value"):
jax.jit(f)(x)
@parameterized.named_parameters(
('grad', jax.grad),
('jacfwd', jax.jacfwd),
('jacref', jax.jacrev),
)
def test_grad_wrap(self, transform):
# Ensures that transforms wrap transformed functions with the correct signature.
@partial(jit, static_argnames=['flag'])
@transform
def my_function(x, flag):
return x if flag else jnp.zeros_like(x)
self.assertEqual(my_function(1.0, False), 0.0)
self.assertEqual(my_function(1.0, True), 1.0)
def test_grad_bad_input(self):
def f(x):
return x
self.assertRaisesRegex(
TypeError, ".* 'foo' of type <.*'str'> is not a valid JAX type",
lambda: grad(f)("foo"))
def test_grad_argnums(self):
def f(x, y, z, flag=False):
assert flag
return 1.0 * x + 2.0 * y + 3.0 * z
assert grad(f)(1.0, 1.0, 1.0, flag=True) == 1.0
assert grad(f, argnums=1)(1.0, 1.0, 1.0, flag=True) == 2.0
assert grad(f, argnums=(2, 0))(1.0, 1.0, 1.0, flag=True) == (3.0, 1.0)
def test_value_and_grad_argnums(self):
def f(x, y, z, flag=False):
assert flag
return 1.0 * x + 2.0 * y + 3.0 * z
y = f(1.0, 1.0, 1.0, flag=True)
assert api.value_and_grad(f)(1.0, 1.0, 1.0, flag=True) == (y, 1.0)
assert api.value_and_grad(f, argnums=1)(1.0, 1.0, 1.0, flag=True) == (y, 2.0)
assert api.value_and_grad(f, argnums=(2, 0))(1.0, 1.0, 1.0, flag=True) == (y, (3.0, 1.0))
def test_grad_of_jit(self):
side = []
@jit
def f(x):
side.append(None)
return x * x
assert grad(f)(1.0) == 2.0
assert len(side) == 1
assert grad(f)(2.0) == 4.0
assert len(side) == 1
def test_jit_of_grad(self):
side = []
@jit
def f(x):
side.append(None)
return x * x
g = jit(grad(f))
assert g(1.0) == 2.0
assert len(side) == 1
assert g(2.0) == 4.0
assert len(side) == 1
@parameterized.named_parameters(
{"testcase_name": f"_{transform.__name__}", "transform": transform}
for transform in [grad, jacfwd, jacrev])
def test_ad_weak_types(self, transform):
out = transform(lambda x: x)(1.0)
self.assertTrue(dtypes.is_weakly_typed(out))
def test_bad_input(self):
def f(x):
return x
with self.assertRaisesRegex(TypeError, ".* 'foo' of type <.*'str'> is not a valid JAX type"):
grad(f)("foo")
err_str = ("Error interpreting argument to .* as an abstract array. The problematic "
"value is of type .* and was passed to the function at path x.")
with self.assertRaisesRegex(TypeError, err_str):
jit(f)("foo")
def test_grad_tuple_output(self):
jtu.check_raises(lambda: grad(lambda x: (x,x))(1.0), TypeError,
"Gradient only defined for scalar-output functions. ")
def test_grad_unit_output(self):
jtu.check_raises(lambda: grad(lambda x: ())(np.zeros(3)), TypeError,
"Gradient only defined for scalar-output functions. ")
def test_grad_nonscalar_output(self):
jtu.check_raises(lambda: grad(lambda x: x)(np.zeros(3)), TypeError,
"Gradient only defined for scalar-output functions. ")
def test_unwrapped_numpy(self):
def f(x):
return np.exp(x)
with self.assertRaisesRegex(Exception, "The numpy.ndarray conversion .*"):
grad(f)(np.zeros(3))
def test_binop_mismatch(self):
def f(x, y):
return x + y
jtu.check_raises(
lambda: f(jnp.zeros(3), jnp.zeros(4)),
TypeError,
"add got incompatible shapes for broadcasting: (3,), (4,).")
jtu.check_raises(
lambda: grad(f)(np.zeros(3), np.zeros(4)),
TypeError,
"add got incompatible shapes for broadcasting: (3,), (4,).")
def test_dot_mismatch(self):
def f(x, y):
return jnp.dot(x, y)
self.assertRaisesRegex(
TypeError, ("dot_general requires contracting dimensions to have "
"the same shape, got \\(3L?,\\) and \\(4L?,\\)."),
lambda: grad(f)(np.zeros(3), np.zeros(4)))
def test_abstract_error_message(self):
for castfun in [float, complex, int]:
def f(x):
return castfun(x)
self.assertRaisesRegex(
TypeError,
f"[Tt]ry using `x.astype\\({castfun.__name__}\\)`",
lambda: jit(f)(1.0))
def test_switch_value_jit(self):
def f(x):
y = x > 0
if y:
return x
else:
return -x
assert grad(f)(1.0) == 1.0
assert grad(f)(-1.0) == -1.0
with self.assertRaisesRegex(core.ConcretizationTypeError,
"Attempted boolean conversion"):
jit(f)(1)
def test_list_index_err(self):
L = [1, 2, 3]
def f(n):
return L[n]
assert jit(f, static_argnums=(0,))(0) == L[0]
self.assertRaisesRegex(
TypeError,
r"The __index__\(\) method was called on traced array.*",
lambda: jit(f)(0))
def test_range_err(self):
def f(x, n):
for i in range(n):
x = x + i
return x
assert jit(f, static_argnums=(1,))(0, 5) == 10
self.assertRaisesRegex(
TypeError,
r"The __index__\(\) method was called on traced array.*",
lambda: jit(f)(0, 5))
def test_cast_int(self):
f = lambda x: int(x)
self.assertRaisesRegex(
TypeError,
"('(?:JaxprTracer|DynamicJaxprTracer)' object cannot be interpreted as an integer"
"|Abstract tracer value encountered where concrete value is expected.*)", lambda: jit(f)(0))
def test_casts(self):
for castfun in [hex, oct]:
f = lambda x: castfun(x)
self.assertRaisesRegex(
TypeError,
r"The __index__\(\) method was called on traced array.*", lambda: jit(f)(0))
def test_unimplemented_interpreter_rules(self):
foo_p = core.Primitive('foo')
def foo(x):
return foo_p.bind(x)
jtu.check_raises(lambda: foo(1.0), NotImplementedError,
"Evaluation rule for 'foo' not implemented")
jtu.check_raises(lambda: jit(foo)(1.0), NotImplementedError,
"Abstract evaluation for 'foo' not implemented")
jtu.check_raises(lambda: grad(foo)(1.0), NotImplementedError,
"Differentiation rule for 'foo' not implemented")
foo_p.def_abstract_eval(lambda x: x)
jtu.check_raises_regexp(lambda: jit(foo)(1.0), NotImplementedError,
".* rule for primitive 'foo' not found.*")
foo_p.def_impl(lambda x: x)
ad.defjvp(foo_p, lambda g, x: foo(g))
jtu.check_raises(lambda: grad(foo)(1.0), NotImplementedError,
"Transpose rule (for reverse-mode differentiation) for 'foo' not implemented")
def test_wrong_output_abstract_eval(self):
foo_p = core.Primitive('foo')
def foo(x):
return foo_p.bind(x)
foo_p.def_abstract_eval(lambda x: [x]) # Shouldn't return a list.
foo_p.def_impl(lambda x: x)
jitted = jit(lambda x: foo(x))
jtu.check_raises(lambda: jitted(1.0), ValueError,
"foo.abstract_eval() method should return a tuple or")
foo2_p = core.Primitive('foo2')
foo2_p.multiple_results = True
def foo2(x):
return foo2_p.bind(x),
foo2_p.def_abstract_eval(lambda x: x) # Should return a list.
foo2_p.def_impl(lambda x: [x])
jitted = jit(lambda x: foo2(x))
jtu.check_raises(lambda: jitted(1.0), ValueError,
"foo2.abstract_eval() method should return a tuple or")
def test_is_subclass(self):
self.assertFalse(issubclass(np.ndarray, jax.Array))
def test_is_instance(self):
def f(x):
self.assertIsInstance(x, jax.Array)
self.assertNotIsInstance(x, np.ndarray)
return x + 2
jit(f)(3)
jax.vmap(f)(np.arange(3))
def test_device_put_and_get(self):
x = np.arange(12.).reshape((3, 4)).astype("float32")
dx = api.device_put(x)
_check_instance(self, dx)
self.assertIsInstance(dx, jax.Array)
self.assertNotIsInstance(dx, np.ndarray)
x2 = api.device_get(dx)
self.assertNotIsInstance(x2, jax.Array)
self.assertIsInstance(x2, np.ndarray)
assert np.all(x == x2)
y = [x, (2 * x, 3 * x)]
dy = api.device_put(y)
y2 = api.device_get(dy)
self.assertIsInstance(y2, list)
self.assertIsInstance(y2[0], np.ndarray)
assert np.all(y2[0] == x)
self.assertIsInstance(y2[1], tuple)
self.assertIsInstance(y2[1][0], np.ndarray)
assert np.all(y2[1][0] == 2 * x)
self.assertIsInstance(y2[1][1], np.ndarray)
assert np.all(y2[1][1] == 3 * x)
def test_device_put_sharding(self):
mesh = jax.sharding.Mesh(jax.devices(), ('x',))
s = jax.NamedSharding(mesh, P('x'))
x = jnp.arange(len(jax.devices()))
y = jax.device_put(x, s)
self.assertEqual(y.sharding, s)
self.assertArraysAllClose(y, x)
# this might hit a special fast path
z = jax.device_put(y, s)
self.assertEqual(z.sharding, s)
self.assertArraysAllClose(z, x)
self.assertIs(z, y) # no copy
w = jax.device_put(z)
self.assertIs(w, z)
u = jax.device_put(y, jax.devices()[0])
self.assertArraysAllClose(u, y)
self.assertEqual(u.devices(), {jax.devices()[0]})
def test_device_put_sharding_tree(self):
if jax.device_count() < 2:
raise unittest.SkipTest("Test requires >= 2 devices")
mesh = jax.sharding.Mesh(np.array(jax.devices()[:2]).reshape((2, 1)),
("x", "y"))
s1 = jax.NamedSharding(mesh, P("x"))
s2 = jax.NamedSharding(mesh, P("y"))
s3 = jax.NamedSharding(mesh, P("x", "y"))
x = jnp.arange(2)
y = jnp.arange(2) + 10
z = (jnp.arange(2) + 100).reshape((2, 1))
out = jax.device_put((x, (y, z)), device=(s1, (s2, s3)))
self.assertEqual(out[0].sharding, s1)
self.assertEqual(out[1][0].sharding, s2)
self.assertEqual(out[1][1].sharding, s3)
self.assertArraysAllClose(out[0], x)
self.assertArraysAllClose(out[1][0], y)
self.assertArraysAllClose(out[1][1], z)
def test_device_put_sharding_tree_prefix(self):
if jax.device_count() < 2:
raise unittest.SkipTest("Test requires >= 2 devices")
mesh = jax.sharding.Mesh(np.array(jax.devices()[:2]).reshape((2, 1)), ("x", "y"))
s1 = jax.sharding.NamedSharding(mesh, P("x"))
s2 = jax.sharding.NamedSharding(mesh, P("y"))
x = jnp.arange(2)
y = jnp.arange(2) + 10
z = jnp.arange(2) + 100
out = jax.device_put((x, (y, z)), device=(s1, s2))
self.assertEqual(out[0].sharding, s1)
self.assertEqual(out[1][0].sharding, s2)
self.assertEqual(out[1][1].sharding, s2)
self.assertArraysAllClose(out[0], x)
self.assertArraysAllClose(out[1][0], y)
self.assertArraysAllClose(out[1][1], z)
def test_device_put_sharding_mismatched_tree_same_leaf_count(self):
if jax.device_count() < 2:
raise unittest.SkipTest("Test requires >= 2 devices")
mesh = jax.sharding.Mesh(np.array(jax.devices()[:2]).reshape((2, 1)), ("x", "y"))
s1 = jax.sharding.NamedSharding(mesh, P("x"))
s2 = jax.sharding.NamedSharding(mesh, P("y"))
x = jnp.arange(2)
y = jnp.arange(2) + 10
z = jnp.arange(2) + 100
with self.assertRaisesRegex(
ValueError,
"device_put device specification must be a tree prefix of the "
r"corresponding value, got specification \(\(NamedSharding\(.*\), "
r"NamedSharding\(.*\)\), NamedSharding\(.*\)\) for value tree "
r"PyTreeDef\(\(\*, \(\*, \*\)\)\)."
):
jax.device_put((x, (y, z)), device=((s1, s2), s2))
def test_device_put_sharding_mismatched_tree_different_leaf_count(self):
if jax.device_count() < 2:
raise unittest.SkipTest("Test requires >= 2 devices")
mesh = jax.sharding.Mesh(np.array(jax.devices()[:2]).reshape((2, 1)), ("x", "y"))
s1 = jax.sharding.NamedSharding(mesh, P("x"))
s2 = jax.sharding.NamedSharding(mesh, P("y"))
x = jnp.arange(2)
y = jnp.arange(2) + 10
z = jnp.arange(2) + 100
with self.assertRaisesRegex(
ValueError,
"device_put device specification must be a tree prefix of the "
r"corresponding value, got specification \(NamedSharding\(.*\), "
r"NamedSharding\(.*\)\) for value tree PyTreeDef\(\(\*, \*, \*\)\)."
):
jax.device_put((x, y, z), device=(s1, s2))
def test_device_put_custom_type_not_accepting_none_leaves(self):
class CustomNode(list):
pass
def unflatten(unused_aux_data, children):
self.assertIsNotNone(children[0])
return CustomNode(children)
tree_util.register_pytree_node(CustomNode, lambda x: (x, None), unflatten)
jax.device_put(CustomNode([0.1]))
def test_vmap_inconsistent_sizes_constructs_proper_error_message(self):
def f(x1, x2, g):
return g(x1, x2)
with self.assertRaisesRegex(
ValueError,
"vmap got inconsistent sizes for array axes to be mapped:"
):
jax.vmap(f, (0, 0, None))(jnp.ones(2), jnp.ones(3), jnp.add)
def test_vmap_inconsistent_sizes_constructs_proper_error_message_kwargs(self):
# regression test for https://github.com/jax-ml/jax/issues/24406
def f(x1, x2, a3):
return x1 + x2 + a3
with self.assertRaisesRegex(
ValueError,
"vmap got inconsistent sizes for array axes to be mapped:\n"
r" \* most axes \(2 of them\) had size 2, e.g. axis 0 of argument x1 of type float32\[2\];\n"
r" \* one axis had size 1: axis 0 of kwargs\['a3'\] of type float32\[1\]",
):
jax.vmap(f)(
jnp.ones(2, dtype=jnp.float32),
a3=jnp.ones(1, dtype=jnp.float32),
x2=jnp.ones(2, dtype=jnp.float32)
)
def test_device_get_scalar(self):
x = np.arange(12.).reshape((3, 4)).astype("float32")
x = api.device_put(x)
_check_instance(self, x)
self.assertIsInstance(x.sharding, jax.sharding.SingleDeviceSharding)
for s in x.addressable_shards:
self.assertArraysEqual(s.data, x)
self.assertEqual(s.replica_id, 0)
self.assertEqual(s.index, (slice(None), slice(None)))
y = [x, 2]
y2 = api.device_get(y)
self.assertIsInstance(y2, list)
self.assertIsInstance(y2[0], np.ndarray)
assert np.all(y2[0] == x)
self.assertIsInstance(y2[1], int)
self.assertEqual(y2[1], 2)
@parameterized.parameters([(3,)], [(2, 0)])
def test_device_put_across_devices(self, shape):
if len(jax.local_devices()) < 2:
raise unittest.SkipTest("this test requires multiple devices")
d1, d2 = jax.local_devices()[:2]
data = self.rng().randn(*shape).astype(np.float32)
x = api.device_put(data, device=d1)
self.assertEqual(x.devices(), {d1})
y = api.device_put(x, device=d2)
self.assertEqual(y.devices(), {d2})
np.testing.assert_array_equal(data, np.array(y))
# Make sure these don't crash
api.device_put(x)
api.device_put(y)
@jtu.skip_on_devices("cpu")
def test_device_put_across_platforms(self):
default_device = jax.devices()[0]
cpu_device = jax.devices("cpu")[0]
np_arr = np.array([1,2,3])
scalar = 1
device_arr = jnp.array([1,2,3])
assert device_arr.devices() == {default_device}
for val in [np_arr, device_arr, scalar]:
x = api.device_put(val, device=cpu_device)
self.assertEqual(x.devices(), {cpu_device})
def test_device_put_on_single_device_donated_buffer_fails(self):
@partial(jax.jit, donate_argnums=0)
def f(inp1):
return inp1 * 2
x = jnp.zeros((10,), jnp.float32)
f(x)
with self.assertRaises(RuntimeError):
result = jax.device_put(x, jax.devices()[0])
result.block_until_ready()
with self.assertRaises(RuntimeError):
result = jax.device_put(x, jax.devices()[-1])
result.block_until_ready()
def test_device_put_on_multi_device_donated_buffer_fails(self):
@partial(jax.jit, donate_argnums=0)
def f(inp1):
return inp1 * 2
mesh1 = jax.sharding.Mesh(jax.devices(), ("x",))
s1 = jax.NamedSharding(mesh1, P("x"))
mesh2 = jax.sharding.Mesh(tuple(reversed(jax.devices())), ("x",))
s2 = jax.NamedSharding(mesh2, P("x"))
x = jax.device_put(np.arange(len(jax.devices()), dtype=jnp.float32), s1)
f(x)
with self.assertRaises(RuntimeError):
result = jax.device_put(x, s1)
result.block_until_ready()
with self.assertRaises(RuntimeError):
result = jax.device_put(x, s2)
result.block_until_ready()
@jax.default_matmul_precision("float32")
def test_jacobian(self):
R = self.rng().randn
A = R(4, 3)
x = R(3)
f = lambda x: jnp.dot(A, x)
assert np.allclose(jacfwd(f)(x), A)
assert np.allclose(jacrev(f)(x), A)
f = lambda x: jnp.tanh(jnp.dot(A, x))
assert np.allclose(jacfwd(f)(x), jacrev(f)(x))
@jax.default_matmul_precision("float32")
def test_hessian(self):
R = self.rng().randn
A = R(4, 4)
x = R(4)
f = lambda x: jnp.dot(x, jnp.dot(A, x))
assert np.allclose(hessian(f)(x), A + A.T)
@jax.default_matmul_precision("float32")
def test_hessian_holomorphic(self):
R = self.rng().randn
A = R(4, 4)
x = R(4).astype('complex64') * (1 + 2j)
f = lambda x: jnp.dot(x, jnp.dot(A.astype(x.dtype), x))
assert np.allclose(hessian(f, holomorphic=True)(x), A + A.T)
@jax.default_matmul_precision("float32")
def test_hessian_aux(self):
R = self.rng().randn
A = R(4, 4)
x = R(4)
f = lambda x: (jnp.dot(x, jnp.dot(A, x)), x)
h, aux = hessian(f, has_aux=True)(x)
assert np.allclose(h, A + A.T)
assert np.allclose(aux, x)
def test_std_basis(self):
basis = api._std_basis(jnp.zeros(3))
assert getattr(basis, "shape", None) == (3, 3)
assert np.allclose(basis, np.eye(3))
basis = api._std_basis(jnp.zeros((3, 3)))
assert getattr(basis, "shape", None) == (9, 3, 3)
assert np.allclose(basis, np.eye(9).reshape(9, 3, 3))
basis = api._std_basis([0., (jnp.zeros(3), jnp.zeros((3, 4)))])
assert isinstance(basis, list) and len(basis) == 2
assert getattr(basis[0], "shape", None) == (16,)
assert isinstance(basis[1], tuple) and len(basis[1]) == 2
assert getattr(basis[1][0], "shape", None) == (16, 3)
assert getattr(basis[1][1], "shape", None) == (16, 3, 4)
@jtu.skip_on_devices("tpu")
def test_jacobian_on_pytrees(self):
for jacfun in [jacfwd, jacrev]:
ans = jacfun(lambda x, y: (x, y))(0., 1.)
expected = (1., 0.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = jacfun(lambda x, y: (x, y), 1)(0., 1.)
expected = (0., 1.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = jacfun(lambda x, y: (x, y), (0, 1))(0., 1.)
expected = ((1., 0.),
(0., 1.),)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = jacfun(lambda x: x[:2])((1., 2., 3.))
expected = ((1., 0., 0.),
(0., 1., 0.))
self.assertAllClose(ans, expected, check_dtypes=False)
R = self.rng().randn
x = jnp.array(R(2))
y = jnp.array(R(3))
ans = jacfun(lambda x, y: {'x': x, 'xy': jnp.outer(x, y)})(x, y)
expected = {'x': np.eye(2),
'xy': np.kron(np.eye(2), y[:, None]).reshape(2, 3, 2)}
self.assertAllClose(ans, expected, check_dtypes=False)
@jtu.skip_on_devices("tpu")
def test_hessian_on_pytrees(self):
ans = hessian(lambda x: jnp.array(x)**2)((1., 2.))
expected = ((np.array([2., 0.]), np.array([0., 0.])),
(np.array([0., 0.]), np.array([0., 2.])))
self.assertAllClose(ans, expected, check_dtypes=False)
@jtu.skip_on_devices("tpu")
def test_issue1372(self):
def quad(x):
return jnp.dot(x, x)
def f(x, u):
return quad(x) + quad(u)
x, u = jnp.ones(5), jnp.ones(2)
rev = jacrev
fwd = jacfwd
# Diagonal entries
self.assertEqual(rev(rev(f, 0), 0)(x, u).shape, (5, 5))
self.assertEqual(rev(fwd(f, 0), 0)(x, u).shape, (5, 5))
self.assertEqual(fwd(rev(f, 0), 0)(x, u).shape, (5, 5))
self.assertEqual(fwd(fwd(f, 0), 0)(x, u).shape, (5, 5))
self.assertEqual(rev(rev(f, 1), 1)(x, u).shape, (2, 2))
self.assertEqual(rev(fwd(f, 1), 1)(x, u).shape, (2, 2))
self.assertEqual(fwd(rev(f, 1), 1)(x, u).shape, (2, 2))
self.assertEqual(fwd(fwd(f, 1), 1)(x, u).shape, (2, 2))
# Off-diagonal entries by reverse-mode on the outside
self.assertEqual(rev(rev(f, 1), 0)(x, u).shape, (2, 5))
self.assertEqual(rev(fwd(f, 1), 0)(x, u).shape, (2, 5))
self.assertEqual(rev(rev(f, 0), 1)(x, u).shape, (5, 2))
self.assertEqual(rev(fwd(f, 0), 1)(x, u).shape, (5, 2))
# Off-diagonal entries by forward-mode on the outside
self.assertEqual(fwd(rev(f, 1), 0)(x, u).shape, (2, 5))
self.assertEqual(fwd(fwd(f, 1), 0)(x, u).shape, (2, 5))
self.assertEqual(fwd(rev(f, 0), 1)(x, u).shape, (5, 2))
self.assertEqual(fwd(fwd(f, 0), 1)(x, u).shape, (5, 2))
def test_large_device_constant(self):
ans = jit(lambda x: 2 * x)(jnp.ones(int(2e6))) # doesn't crash
self.assertAllClose(ans, np.ones(int(2e6)) * 2., check_dtypes=False)
def test_grad_and_aux_basic(self):
g, aux = grad(lambda x: (x**3, [x**2]), has_aux=True)(3.)
self.assertAllClose(g, grad(lambda x: x**3)(3.))
self.assertAllClose(aux, [9.], check_dtypes=False)
def test_grad_and_aux_error(self):
with self.assertRaisesRegex(TypeError, "two-element tuple"):
grad(lambda x: (1, 2, 3), has_aux=True)(1.)
with self.assertRaisesRegex(TypeError, "two-element tuple"):
grad(lambda x: x, has_aux=True)(1.)
with self.assertRaisesRegex(TypeError, "two-element tuple"):
grad(lambda x: (x,), has_aux=True)(1.)
def test_grad_and_aux_nested(self):
def f(x):
g, aux = grad(lambda x: (x**3, [x**3]), has_aux=True)(x)
return aux[0]
f2 = lambda x: x**3
self.assertEqual(grad(f)(4.), grad(f2)(4.))
self.assertEqual(jit(grad(f))(4.), grad(f2)(4.))
self.assertEqual(jit(grad(jit(f)))(4.), grad(f2)(4.))
def f(x):
g, aux = grad(lambda x: (x**3, [x**3]), has_aux=True)(x)
return aux[0] * jnp.sin(x)
f2 = lambda x: x**3 * jnp.sin(x)
self.assertEqual(grad(f)(4.), grad(f2)(4.))
self.assertEqual(jit(grad(f))(4.), grad(f2)(4.))
self.assertEqual(jit(grad(jit(f)))(4.), grad(f2)(4.))
def test_grad_and_aux_constant(self):
g, aux = grad(lambda x: (x**3, [4.]), has_aux=True)(4.)
self.assertEqual(g, grad(lambda x: x**3)(4.))
self.assertEqual(aux, [4.])
g, aux = grad(lambda x: (x**3, [x**2, 4.]), has_aux=True)(4.)
self.assertEqual(g, grad(lambda x: x**3)(4.))
self.assertEqual(aux, [4.**2, 4.])
def test_grad_and_aux_no_tracers(self):
# see https://github.com/jax-ml/jax/issues/1950
def f(x):
aux = dict(identity=x, p1=x+1)
return x ** 2, aux
_, aux = jax.grad(f, has_aux=True)(3.)
self.assertIsInstance(aux, dict)
for val in aux.values():
self.assertNotIsInstance(val, core.Tracer)
def test_jacfwd_and_aux_basic(self):
jac, aux = jacfwd(lambda x: (x**3, [x**2]), has_aux=True)(3.)
self.assertAllClose(jac, jacfwd(lambda x: x**3)(3.))
self.assertAllClose(aux, [9.], check_dtypes=False)
def test_jacrev_and_aux_basic(self):
jac, aux = jacrev(lambda x: (x**3, [x**2]), has_aux=True)(3.)
self.assertAllClose(jac, jacrev(lambda x: x**3)(3.))
self.assertAllClose(aux, [9.], check_dtypes=False)
def test_jacfwd_and_aux_nested(self):
def f(x):
jac, aux = jacfwd(lambda x: (x**3, [x**3]), has_aux=True)(x)
return aux[0]
f2 = lambda x: x**3
self.assertEqual(jacfwd(f)(4.), jacfwd(f2)(4.))
self.assertEqual(jit(jacfwd(f))(4.), jacfwd(f2)(4.))
self.assertEqual(jit(jacfwd(jit(f)))(4.), jacfwd(f2)(4.))
def f(x):
jac, aux = jacfwd(lambda x: (x**3, [x**3]), has_aux=True)(x)
return aux[0] * jnp.sin(x)
f2 = lambda x: x**3 * jnp.sin(x)
self.assertEqual(jacfwd(f)(4.), jacfwd(f2)(4.))
self.assertEqual(jit(jacfwd(f))(4.), jacfwd(f2)(4.))
self.assertEqual(jit(jacfwd(jit(f)))(4.), jacfwd(f2)(4.))
def test_jacrev_and_aux_nested(self):
def f(x):
jac, aux = jacrev(lambda x: (x**3, [x**3]), has_aux=True)(x)
return aux[0]
f2 = lambda x: x**3
self.assertEqual(jacrev(f)(4.), jacrev(f2)(4.))
self.assertEqual(jit(jacrev(f))(4.), jacrev(f2)(4.))
self.assertEqual(jit(jacrev(jit(f)))(4.), jacrev(f2)(4.))
def f(x):
jac, aux = jacrev(lambda x: (x**3, [x**3]), has_aux=True)(x)
return aux[0] * jnp.sin(x)
f2 = lambda x: x**3 * jnp.sin(x)
self.assertEqual(jacrev(f)(4.), jacrev(f2)(4.))
self.assertEqual(jit(jacrev(f))(4.), jacrev(f2)(4.))
self.assertEqual(jit(jacrev(jit(f)))(4.), jacrev(f2)(4.))
def test_jvp_and_aux_basic(self):
fun = lambda x: (x**3, [x**2])
primals, tangents, aux = api.jvp(fun, (3.,), (4.,), has_aux=True)
expected_primals, expected_tangents = api.jvp(lambda x: x**3, (3.,), (4.,))
self.assertAllClose(primals, expected_primals, check_dtypes=True)
self.assertAllClose(tangents, expected_tangents, check_dtypes=True)
self.assertEqual(aux, [3.**2])
def test_jvp_mismatched_arguments(self):
self.assertRaisesRegex(
TypeError,
("primal and tangent arguments to jax.jvp must have the same tree "
"structure"),
lambda: api.jvp(lambda x, y: x * y, (np.float32(2),), ()))
# If primals and tangents must both be tuples or both lists
self.assertRaisesRegex(
TypeError,
("primal and tangent arguments to jax.jvp must have the same tree "
"structure"),
lambda: api.jvp(lambda x, y: x * y, (np.float32(2),), [np.float32(2)]))
self.assertRaisesRegex(
TypeError,
"primal and tangent arguments to jax.jvp do not match.",
lambda: api.jvp(lambda x: -x, (np.float16(2),), (np.float32(4),)))
# If primals and tangents are not of the same shape then raise error
fun = lambda x: x+1
with self.assertRaisesRegex(
ValueError, "jvp called with different primal and tangent shapes"):
api.jvp(fun, (jnp.array([1.,2.,3.]),), (jnp.array([1.,2.,3.,4.]),))
with self.assertRaisesRegex(
ValueError, "jvp called with different primal and tangent shapes"):
api.jvp(fun, (jnp.float32(10.),), (jnp.array([1.,2.,3.], dtype=jnp.float32),))
with self.assertRaisesRegex(
ValueError, "jvp called with different primal and tangent shapes"):
api.jvp(fun, (jnp.array([1.,2.,3.], dtype=jnp.float32),), (jnp.float32(20.),))
with self.assertRaisesRegex(
ValueError, "jvp called with different primal and tangent shapes"):
api.jvp(fun, (jnp.array([1.,2.,3.]),), (20.,))
def test_jvp_non_tuple_arguments(self):
def f(x, y): return x + y
self.assertRaisesRegex(
TypeError,
"primal and tangent arguments to jax.jvp must be tuples or lists; found float and tuple.",
lambda: api.jvp(f, 0., (1.,)))
self.assertRaisesRegex(
TypeError,
"primal and tangent arguments to jax.jvp must be tuples or lists; found tuple and ndarray.",
lambda: api.jvp(f, (0.,), np.array([1., 2.])))
def test_vjp_mismatched_arguments(self):
_, pullback = api.vjp(lambda x, y: x * y, np.float32(3), np.float32(4))
self.assertRaisesRegex(
ValueError, "unexpected tree structure",
lambda: pullback((np.float32(7), np.float32(100))))
self.assertRaisesRegex(
ValueError, "unexpected JAX type",
lambda: pullback(np.float16(42)))
def test_vjp_bad_cotangent_shape(self):
x = np.ones((2, 5), dtype=np.float32)
y = np.ones((5, 3), dtype=np.float32)
def f_jax(x, y):
return jnp.matmul(x, y)
res, pullback = jax.vjp(f_jax, x, y)
with self.assertRaisesRegex(ValueError, "unexpected JAX type"):
pullback(np.ones((2, 4), dtype=np.float32))
def test_jvp_jit_cached(self):
"""Bug in caching in presence of JVP and JIT."""
def func(x):
def inner(y):
return y * x
# Must have two calls to the inner jit (the second one hits the cache)
res1 = api.jit(inner)(4.)
res2 = api.jit(inner)(5.)
return res1 + res2
self.assertAllClose((45., 9.), api.jvp(func, (5.,), (1.,)))
def test_linear_transpose_abstract(self):
x = types.SimpleNamespace(shape=(3,), dtype=np.dtype(np.float32))
y = jnp.arange(3, dtype=np.float32)
transpose_fun = api.linear_transpose(lambda x: 2 * x, x)
z, = transpose_fun(y)
self.assertArraysEqual(2 * y, z, check_dtypes=True)
def test_linear_transpose_integer(self):
f = lambda x: 2 * x
transpose = api.linear_transpose(f, 1)
actual, = transpose(3)
expected = 6
self.assertEqual(actual, expected)
def test_linear_transpose_dce(self):
# https://github.com/jax-ml/jax/issues/15660
f = jit(lambda x: (2 * x, x > 0))
g = lambda x: f(x)[0]
api.linear_transpose(g, 1.)(1.)
def test_linear_transpose_error(self):
with self.assertRaisesRegex(
TypeError, "linear_transpose only supports"):
api.linear_transpose(lambda x: 2. * x, 1)
transpose_fun = api.linear_transpose(lambda x: [x, x], 1.0)
with self.assertRaisesRegex(TypeError, "cotangent tree does not match"):
transpose_fun(1.0)
transpose_fun = api.linear_transpose(lambda x: jnp.stack([x, x]), 1.0)
with self.assertRaisesRegex(TypeError, "cotangent type does not match"):
transpose_fun(1.0)
transpose_fun = api.linear_transpose(lambda x: 1j * x, 1.0)
with self.assertRaisesRegex(TypeError, "cotangent type does not match"):
transpose_fun(1.0)
transpose_fun = api.linear_transpose(lambda x: x, 1.0)
with self.assertRaisesRegex(TypeError, "cotangent type does not match"):
transpose_fun(1j)
def test_linear_transpose_complex(self):
f = lambda x: (1 + 2j) * x
transpose = api.linear_transpose(f, 1j)
actual, = transpose(3 + 4j)
expected = -5 + 10j
self.assertEqual(actual, expected)
def test_linear_transpose_zeros(self):
f = lambda x: x[0]
transpose = api.linear_transpose(f, [1., 2.])
actual, = transpose(3.)
expected = [3., 0.]
self.assertEqual(actual, expected)
def test_complex_grad_raises_error(self):
self.assertRaises(TypeError, lambda: grad(lambda x: jnp.sin(x))(1 + 2j))
def test_holomorphic_grad(self):
out = grad(lambda x: jnp.sin(x), holomorphic=True)(1 + 2j)
expected = 2.0327230070196656 - 3.0518977991518j
self.assertAllClose(out, expected, check_dtypes=False)
def test_nonholomorphic_grad(self):
zs = 0.5j * np.arange(5) + np.arange(5)
def f(z):
return jnp.sum(jnp.cos(jnp.abs(z)))
ans = grad(f)(zs)
expected = np.array([ 0. + 0.j,
-0.80430663 + 0.40215331j,
-0.70368982 + 0.35184491j,
0.1886467 - 0.09432335j,
0.86873727 - 0.43436864j])
self.assertAllClose(ans, expected, check_dtypes=False,
atol=jtu.default_gradient_tolerance,
rtol=jtu.default_gradient_tolerance)
def test_complex_output_jacrev_raises_error(self):
self.assertRaises(TypeError, lambda: jacrev(lambda x: jnp.sin(x))(1 + 2j))
def test_nonholomorphic_jacrev(self):
# code based on https://github.com/jax-ml/jax/issues/603
zs = 0.5j * np.arange(5) + np.arange(5)
def f(z):
return jnp.cos(jnp.linalg.norm(2 * z))
ans = jacrev(f)(zs)
expected = grad(f)(zs)
self.assertAllClose(ans, expected)
@jax.numpy_dtype_promotion('standard') # Test explicitly exercises implicit dtype promotion.
def test_heterogeneous_jacfwd(self):
# See https://github.com/jax-ml/jax/issues/7157
# See https://github.com/jax-ml/jax/issues/7780
x = np.array([2.0], dtype=np.float16)
y = np.array([3.0], dtype=np.float32)
a = (x, y)
def f(tup):
jtu._check_dtypes_match(tup, a)
x, y = tup
return x, y, x + y
actual = jacfwd(f)(a)
desired = ((np.array(1., dtype=np.float16), np.array(0., dtype=np.float16)),
(np.array(0., dtype=np.float32), np.array(1., dtype=np.float32)),
(np.array(1., dtype=np.float32), np.array(1., dtype=np.float32)))
jtu._check_dtypes_match(actual, desired)
jtu.check_eq(actual, desired)
@jax.numpy_dtype_promotion('standard') # Test explicitly exercises implicit dtype promotion.
def test_heterogeneous_jacrev(self):
# See https://github.com/jax-ml/jax/issues/7157
# See https://github.com/jax-ml/jax/issues/7780
x = np.array([2.0], dtype=np.float16)
y = np.array([3.0], dtype=np.float32)
a = (x, y)
def f(tup):
jtu._check_dtypes_match(tup, a)
x, y = tup
return x, y, x + y
actual = jacrev(f)(a)
desired = ((np.array(1., dtype=np.float16), np.array(0., dtype=np.float32)),
(np.array(0., dtype=np.float16), np.array(1., dtype=np.float32)),
(np.array(1., dtype=np.float16), np.array(1., dtype=np.float32)))
jtu._check_dtypes_match(actual, desired)
jtu.check_eq(actual, desired)
def test_heterogeneous_grad(self):
# See https://github.com/jax-ml/jax/issues/7157
x = np.array(1.0+1j)
y = np.array(2.0)
a = (x, y)
def f(tup):
jtu._check_dtypes_match(tup, a)
x, y = tup
return jnp.square(jnp.abs(x)) + y
actual = grad(f)(a)
desired = (np.array(2 - 2j), np.array(1.))
jtu._check_dtypes_match(actual, desired)
jtu.check_eq(actual, desired)
def test_complex_input_jacfwd_raises_error(self):
self.assertRaises(TypeError, lambda: jacfwd(lambda x: jnp.sin(x))(1 + 2j))
def test_legacy_devicearray_repr(self):
dx = device_put(3.)
str(dx.item()) # doesn't crash
def test_devicearray_repr(self):
x = device_put(jnp.zeros(3))
_check_instance(self, x)
repr(x) # doesn't crash
x = device_put(jnp.full(3, 1 + 1j))
_check_instance(self, x)
repr(x) # doesn't crash
def test_devicearray_delete(self):
x = device_put(1.)
x.delete()
self.assertRaisesRegex(RuntimeError, "Array has been deleted.",
lambda: repr(x))
def test_devicearray_block_until_ready(self):
x = device_put(1.)
y = x.block_until_ready()
# Tests mostly that block_until_ready() does not produce an error.
self.assertTrue(y is x)
def test_block_until_ready_function(self):
# Just tests that we don't error...
pytree = (device_put(1.), np.ones(3))
pytree = jax.block_until_ready(pytree)
self.assertAllClose(pytree[0], jnp.array(1.), check_dtypes=False)
self.assertAllClose(pytree[1], np.ones(3), check_dtypes=False)
def test_block_until_ready_numpy_arrays(self):
pytree = (np.ones(1), np.ones(2))
pytree = jax.block_until_ready(pytree)
self.assertAllClose(pytree[0], np.ones(1), check_dtypes=False)
self.assertAllClose(pytree[1], np.ones(2), check_dtypes=False)
def test_block_until_ready_mixed(self):
pytree = (device_put(1.), device_put(2.), np.ones(3), 4)
pytree = jax.block_until_ready(pytree)
self.assertAllClose(pytree[0], jnp.array(1.), check_dtypes=False)
self.assertAllClose(pytree[1], jnp.array(2.), check_dtypes=False)
self.assertAllClose(pytree[2], np.ones(3), check_dtypes=False)
self.assertEqual(pytree[3], 4)
def test_devicearray_weakref_friendly(self):
x = device_put(1.)
y = weakref.ref(x)
self.assertEqual(y(), 1.)
del x
self.assertIsNone(y())
def test_namedtuple_transparency(self):
# See https://github.com/jax-ml/jax/issues/446
Point = collections.namedtuple("Point", ["x", "y"])
def f(pt):
return jnp.sqrt(pt.x ** 2 + pt.y ** 2)
pt = Point(1., 2.)
f(pt) # doesn't crash
g = api.grad(f)(pt)
self.assertIsInstance(g, Point)
f_jit = api.jit(f)
self.assertAllClose(f(pt), f_jit(pt), check_dtypes=False)
def test_namedtuple_subclass_transparency(self):
# See https://github.com/jax-ml/jax/issues/806
Point = collections.namedtuple("Point", ["x", "y"])
class ZeroPoint(Point):
def is_zero(self):
return (self.x == 0) and (self.y == 0)
pt = ZeroPoint(0., 0.)
def f(pt):
return 0. if pt.is_zero() else jnp.sqrt(pt.x ** 2 + pt.y ** 2)
f(pt) # doesn't crash
_ = api.grad(f)(pt)
self.assertIsInstance(pt, ZeroPoint)
@parameterized.parameters(1, 2, 3)
def test_shape_dtype_struct(self, i):
s = api.ShapeDtypeStruct(shape=(i, 2, 3), dtype=jnp.float32)
self.assertEqual(s.shape, (i, 2, 3))
self.assertEqual(s.dtype, jnp.float32)
self.assertEqual(s.ndim, 3)
self.assertEqual(s.size, i * 2 * 3)
self.assertLen(s, i)
for f in (str, repr):
self.assertEqual(
f(s), f"ShapeDtypeStruct(shape=({i}, 2, 3), dtype=float32)")
def test_shape_dtype_struct_scalar(self):
s = api.ShapeDtypeStruct(shape=(), dtype=jnp.float32)
self.assertEmpty(s.shape)
self.assertEqual(s.size, 1)
self.assertEqual(s.ndim, 0)
with self.assertRaisesRegex(TypeError, "len[(][)] of unsized object"):
_ = len(s)
def test_shape_dtype_struct_hash(self):
s1 = api.ShapeDtypeStruct(shape=(2, 3), dtype=jnp.float32)
s2 = api.ShapeDtypeStruct(shape=(2, 3), dtype=jnp.float32)
s3 = api.ShapeDtypeStruct(shape=(2, 4), dtype=jnp.float32)
self.assertEqual(hash(s1), hash(s2))
self.assertNotEqual(hash(s1), hash(s3))
def test_shape_dtype_struct_invalid_shape(self):
with self.assertRaisesRegex(TypeError, "'int' object is not iterable"):
api.ShapeDtypeStruct(shape=4, dtype='float32')
def test_shape_dtype_struct_dtype_none(self):
with self.assertRaisesRegex(ValueError, "dtype must be specified"):
api.ShapeDtypeStruct(shape=(), dtype=None)
def test_eval_shape(self):
def fun(x, y):
return jnp.tanh(jnp.dot(x, y) + 3.)
x = jnp.ones((2, 3))
y = jnp.ones((3, 4))
out_shape = api.eval_shape(fun, x, y)
self.assertEqual(out_shape.shape, (2, 4))
def test_eval_shape_constants(self):
def fun():
x = jnp.ones((2, 3))
y = jnp.ones((3, 4))
return jnp.tanh(jnp.dot(x, y) + 3.)
out_shape = api.eval_shape(fun)
self.assertEqual(out_shape.shape, (2, 4))
def test_eval_shape_tuple_unpacking(self):
def fun(x, y):
a, b = x
return a + b + y
x = (jnp.ones(2), jnp.ones(2))
y = 3.
out_shape = api.eval_shape(fun, x, y)
self.assertEqual(out_shape.shape, (2,))
def test_eval_shape_tuple_itemgetting(self):
def fun(x, y):
return x[0] + x[1] + y
x = (jnp.ones(2), jnp.ones(2))
y = 3.
out_shape = api.eval_shape(fun, x, y)
self.assertEqual(out_shape.shape, (2,))
def test_eval_shape_output_dict(self):
def fun(x, y):
return {'hi': x[0] + x[1] + y}
x = (jnp.ones(2), jnp.ones(2))
y = 3.
out_shape = api.eval_shape(fun, x, y)
out_shape = jax.tree.map(np.shape, out_shape)
self.assertEqual(out_shape, {'hi': (2,)})
def test_eval_shape_shape_error(self):
def fun(x, y):
return jnp.tanh(jnp.dot(x, y) + 3.)
x = jnp.ones((3, 3))
y = jnp.ones((4, 4))
self.assertRaises(TypeError, lambda: api.eval_shape(fun, x, y))
def test_eval_shape_trace_cache_share(self):
def f(x):
return x
inp = np.arange(8)
with jtu.count_jit_tracing_cache_miss() as count:
jax.eval_shape(f, inp)
jax.jit(f)(inp)
self.assertEqual(count[0], 1)
def test_jit_infer_params_cache(self):
def f(x):
return x
f_jit = jax.jit(f)
def g(x):
x = f_jit(x) # noqa: F821
x = f_jit(x) # noqa: F821
return x
g_jit = jax.jit(g)
inp = np.arange(8)
with jtu.count_jit_infer_params_cache_miss() as count:
g_jit(inp)
self.assertDictEqual(count, {f: 1, g: 1})
cache_size = pjit_lib._infer_params_cached.cache_info().currsize
del count, f, f_jit, g, g_jit
# Cache should only keep a weak reference to f and g.
self.assertLess(pjit_lib._infer_params_cached.cache_info().currsize,
cache_size, msg=pjit_lib._infer_params_cached.cache_keys())
def test_eval_shape_out_shardings(self):
s = jax.sharding.SingleDeviceSharding(jax.devices()[0])
@partial(jax.jit, out_shardings=s)
def f(x):
return x * 2
inp = np.arange(8)
out = f.eval_shape(inp)
self.assertEqual(out.sharding, s)
self.assertEqual(out.shape, (inp * 2).shape)
def test_eval_shape_duck_typing(self):
def fun(A, b, x):
return jnp.dot(A, x) + b
class MyArgArray:
def __init__(self, shape, dtype):
self.shape = shape
self.dtype = np.dtype(dtype)
A = MyArgArray((3, 4), jnp.float32)
b = MyArgArray((1, 5), jnp.float32)
x = MyArgArray((4, 5), jnp.float32)
out_shape = api.eval_shape(fun, A, b, x)
self.assertEqual(out_shape.shape, (3, 5))
def test_eval_shape_duck_typing2(self):
# https://github.com/jax-ml/jax/issues/5683
class EasyDict(dict):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__dict__ = self
x = EasyDict(shape=(3,), dtype=np.dtype('float32'))
out_shape = api.eval_shape(lambda x: x, x) # doesn't crash
self.assertEqual(out_shape.shape, (3,))
def test_issue_871(self):
T = jnp.array([[1., 2.], [3., 4.], [5., 6.]])
x = jnp.array([1, 2, 3])
msg = ("linearized function called on tangent values inconsistent with "
"the original primal values")
y, f_jvp = api.linearize(jnp.sum, x)
with self.assertRaisesRegex(ValueError, msg):
f_jvp(T)
y, f_jvp = api.linearize(api.jit(jnp.sum), x)
with self.assertRaisesRegex(ValueError, msg):
f_jvp(T)
def test_grad_of_int_errors(self):
# Errors without allow_int=True
dfn = grad(lambda x: x ** 2)
self.assertRaisesRegex(
TypeError,
(r"grad requires real- or complex-valued inputs \(input dtype that is a "
r"sub-dtype of np.inexact\), but got int.*."),
lambda: dfn(3))
def test_jvp_of_int_identity(self):
primals = (1,)
tangents = (np.zeros(shape=(), dtype=float0),)
_, out = api.jvp(lambda x: x, primals, tangents)
self.assertEqual(out, np.zeros(shape=(), dtype=float0))
def test_jvp_of_int_add(self):
primals = (2,)
tangents = (np.zeros(shape=(), dtype=float0),)
_, out_tangent = api.jvp(lambda x: x+1, primals, tangents)
self.assertEqual(out_tangent, np.zeros(shape=(), dtype=float0))
def test_jit_jvp_of_int(self):
primals = (2,)
tangents = (np.zeros(shape=(), dtype=float0),)
_, out_tangent = api.jvp(jax.jit(lambda x: x+1), primals, tangents)
self.assertEqual(out_tangent, np.zeros(shape=(), dtype=float0))
def test_jvp_of_convert_element_type(self):
fun = lambda x: x.astype(np.int32) + 1
primal, tangent = jax.jvp(fun, (2.,), (1.,))
self.assertAllClose(primal, np.int32(3))
self.assertEqual(tangent, np.zeros((), dtype=float0))
def test_vjp_of_int_index(self):
primal, fn_vjp = api.vjp(lambda x, i: x[i], np.ones(2)*2, 1)
tangent_x, tangent_i = fn_vjp(1.)
self.assertEqual(primal, 2.)
self.assertAllClose(tangent_x, jnp.array([0., 1.]))
self.assertEqual(tangent_i, np.zeros(shape=(), dtype=float0))
def test_vjp_of_int_shapes(self):
out, fn_vjp = api.vjp(
lambda x: lax.reshape(x, (2, 2)), np.ones((4, 1), dtype=int))
tangent, = fn_vjp(np.zeros((2, 2), dtypes.float0))
self.assertArraysEqual(tangent, np.zeros(shape=(4, 1), dtype=float0))
def test_jit_vjp_of_int(self):
primal, fn_vjp = api.vjp(lambda x, y: x+y, 2, 1)
tangent_x, tangent_i = jax.jit(fn_vjp)(np.zeros((), dtypes.float0))
self.assertEqual(primal, 3)
self.assertEqual(tangent_x, np.zeros(shape=(), dtype=float0))
self.assertEqual(tangent_i, np.zeros(shape=(), dtype=float0))
def test_vjp_of_int_fulllike(self):
# Regression test for tangent and cotangent mismatch in convert_element_type
# transpose rule wrt a ConstVar
f = lax.full_like
out, vjp = api.vjp(f, jnp.zeros((2, 2)), 1)
self.assertAllClose(out, jnp.ones((2, 2)))
tangent_x, tangent_y = vjp(out)
self.assertAllClose(tangent_x, jnp.zeros((2, 2)))
self.assertEqual(tangent_y, np.zeros(shape=(), dtype=float0))
def test_grad_of_int(self):
# Need real-valued output, but testing integer input.
out = api.grad(lambda x: x+0., allow_int=True)(1)
self.assertEqual(out, np.zeros(shape=(), dtype=float0))
def test_grad_of_bool(self):
def cond(pred):
return lax.cond(pred, lambda _: 1., lambda _: 2., 1.)
value, grd = api.value_and_grad(cond, allow_int=True)(True)
self.assertEqual(value, 1.)
self.assertEqual(grd, np.zeros(shape=(), dtype=float0))
def test_grad_of_int_index(self):
grad_x, grad_i = api.grad(lambda x, i: x[i], argnums=(0, 1),
allow_int=True)(np.ones(2), 1)
self.assertAllClose(grad_x, jnp.array([0., 1.]))
self.assertEqual(grad_i, np.zeros(shape=(), dtype=float0))
def test_jit_grad_of_int(self):
grad_f = api.grad(lambda x, i: x[i], argnums=(0, 1), allow_int=True)
grad_x, grad_i = jax.jit(grad_f)(np.ones(2), 1)
self.assertAllClose(grad_x, jnp.array([0., 1.]))
self.assertEqual(grad_i, np.zeros(shape=(), dtype=float0))
def test_float0_reshape(self):
# dtype-agnostic operations are supported
float0_array = jax.grad(lambda x: jnp.sum(x+0.),
allow_int=True)(np.ones((2, 4), dtype=int))
self.assertArraysEqual(float0_array.reshape((4, 2)),
np.zeros((4, 2), dtype=float0))
self.assertArraysEqual(float0_array.transpose(),
np.zeros((4, 2), dtype=float0))
def test_float0_error(self):
# float0 is incompatible with other dtypes
float0_array = jax.grad(lambda x: x+0., allow_int=True)(1)
error_text = "float0s do not support any operations by design"
with self.assertRaisesRegex(TypeError, error_text):
# dispatch via Array
_ = float0_array + jnp.zeros(())
with self.assertRaisesRegex(TypeError, error_text):
# dispatch via lax
_ = lax.add(float0_array, jnp.zeros(()))
def test_grad_complex_result_errors(self):
dfn = grad(lambda x: x ** 2 + 1j)
self.assertRaisesRegex(
TypeError,
(r"grad requires real-valued outputs \(output dtype that is a "
r"sub-dtype of np.floating\), but got complex.*"),
lambda: dfn(3.))
def test_holomorphic_grad_of_float_errors(self):
dfn = grad(lambda x: x ** 2, holomorphic=True)
self.assertRaisesRegex(
TypeError,
(r"grad with holomorphic=True requires inputs with complex dtype, "
r"but got float.*"),
lambda: dfn(3.))
def test_holomorphic_jacrev_of_float_errors(self):
dfn = jacrev(lambda x: x ** 2, holomorphic=True)
self.assertRaisesRegex(
TypeError,
(r"jacrev with holomorphic=True requires inputs with complex dtype, "
r"but got float.*"),
lambda: dfn(3.))
def test_holomorphic_jacfwd_of_float_errors(self):
dfn = jacfwd(lambda x: x ** 2, holomorphic=True)
self.assertRaisesRegex(
TypeError,
(r"jacfwd with holomorphic=True requires inputs with complex dtype, "
r"but got float.*"),
lambda: dfn(3.))
def test_jacfwd_of_complex_errors(self):
dfn = jacfwd(lambda x: x ** 2)
self.assertRaisesRegex(
TypeError,
(r"jacfwd requires real-valued inputs \(input dtype that is a "
r"sub-dtype of np.floating\), but got complex.*"),
lambda: dfn(3. + 1j))
def test_compiler_ir(self):
# TODO(phawkins): merge these tests with the `xla_computation` tests.
def e(x):
return jnp.sin(jnp.cos(x))
hlo = api.jit(e).lower(2.).compiler_ir(dialect="hlo").as_hlo_text()
self.assertIn(' cosine', hlo)
self.assertIn(' sine', hlo)
stablehlo = str(api.jit(e).lower(2.).compiler_ir(dialect="stablehlo"))
self.assertIn("stablehlo.cosine", stablehlo)
self.assertIn("stablehlo.sine", stablehlo)
def test_concurrent_device_get_and_put(self):
def f(x):
for _ in range(100):
y = jax.device_put(x)
x = jax.device_get(y)
return x
xs = [self.rng().randn(i) for i in range(10)]
# Make sure JAX backend is initialised on the main thread since some JAX
# backends install signal handlers.
jax.device_put(0)
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = [executor.submit(partial(f, x)) for x in xs]
ys = [f.result() for f in futures]
for x, y in zip(xs, ys):
self.assertAllClose(x, y)
def test_dtype_from_builtin_types(self):
for dtype in [bool, int, float, complex]:
with self.assertNoWarnings():
x = jnp.array(0, dtype=dtype)
self.assertEqual(x.dtype, dtypes.canonicalize_dtype(dtype))
def test_dtype_warning(self):
# cf. issue #1230
if config.enable_x64.value:
raise unittest.SkipTest("test only applies when x64 is disabled")
def check_warning(warn, nowarn):
with self.assertWarnsRegex(UserWarning, "Explicitly requested dtype"):
warn()
with self.assertNoWarnings():
nowarn()
check_warning(lambda: jnp.array([1, 2, 3], dtype="float64"),
lambda: jnp.array([1, 2, 3], dtype="float32"))
check_warning(lambda: jnp.array([1, 2, 3], dtype="float64"),
lambda: jnp.array([1, 2, 3], dtype=float))
check_warning(lambda: jnp.ones(3, dtype=np.float64),
lambda: jnp.ones(3))
check_warning(lambda: jnp.ones(3, dtype=np.float64),
lambda: jnp.ones(3, dtype=float))
check_warning(lambda: jnp.ones_like(3, dtype=np.int64),
lambda: jnp.ones_like(3, dtype=np.int32))
check_warning(lambda: jnp.zeros(3, dtype="int64"),
lambda: jnp.zeros(3, dtype="int32"))
check_warning(lambda: jnp.zeros_like(3, dtype="float64"),
lambda: jnp.zeros_like(3, dtype="float32"))
check_warning(lambda: jnp.full((2, 3), 1, dtype="int64"),
lambda: jnp.full((2, 3), 1))
check_warning(lambda: jnp.ones(3).astype("float64"),
lambda: jnp.ones(3).astype("float32"))
check_warning(lambda: jnp.eye(3, dtype=np.float64),
lambda: jnp.eye(3))
check_warning(lambda: jnp.arange(3, dtype=np.float64),
lambda: jnp.arange(3, dtype=np.float32))
check_warning(lambda: jnp.linspace(0, 3, dtype=np.float64),
lambda: jnp.linspace(0, 3, dtype=np.float32))
check_warning(lambda: jnp.tri(2, dtype="float64"),
lambda: jnp.tri(2, dtype="float32"))
check_warning(lambda: jnp.arange(1).astype("float64"),
lambda: jnp.arange(1).astype(float))
check_warning(lambda: jnp.arange(1.0).astype("int64"),
lambda: jnp.arange(1.0).astype(int))
def test_error_for_invalid_dtype(self):
err_str = ("Error interpreting argument to .* as an abstract array. The problematic "
r"value is of type .* and was passed to the function at path args\[1\].")
with jax.enable_checks(False):
with self.assertRaisesRegex(TypeError, err_str):
lax.add(jnp.array(7), np.array("hello"))
# TODO(dougalm): re-enable checks at the beginning of `bind`. We just
# need to know which arguments to a generic primitive are ordinary operands vs functions.
# with jax.enable_checks(True):
# with self.assertRaises(AssertionError):
# lax.add(jnp.array(7), np.array("hello"))
def test_vmap_preserves_docstr(self):
def superfun(a):
"""Does things with stuff."""
pass
self.assertRegex(api.vmap(superfun).__doc__, "\n".join([
"Vectorized version of superfun.*",
"",
"Original documentation:",
"",
superfun.__doc__,
]))
def test_vmap_in_axes_list(self):
# https://github.com/jax-ml/jax/issues/2367
dictionary = {'a': 5., 'b': jnp.ones(2)}
x = jnp.zeros(3)
y = jnp.arange(3.)
def f(dct, x, y):
return dct['a'] + dct['b'] + x + y
out1 = api.vmap(f, (None, 0, 0))(dictionary, x, y)
out2 = api.vmap(f, [None, 0, 0])(dictionary, x, y)
self.assertAllClose(out1, out2)
def test_vmap_in_axes_non_tuple_error(self):
# https://github.com/jax-ml/jax/issues/18548
with self.assertRaisesRegex(
TypeError,
re.escape("vmap in_axes must be an int, None, or a tuple of entries corresponding "
"to the positional arguments passed to the function, but got {'a': 0}.")):
jax.vmap(lambda x: x['a'], in_axes={'a': 0})
def test_vmap_in_axes_wrong_length_tuple_error(self):
# https://github.com/jax-ml/jax/issues/18548
with self.assertRaisesRegex(
ValueError,
re.escape("vmap in_axes must be an int, None, or a tuple of entries corresponding to the "
"positional arguments passed to the function, but got len(in_axes)=2, len(args)=1")):
jax.vmap(lambda x: x['a'], in_axes=(0, {'a': 0}))({'a': jnp.zeros((3, 3))})
def test_vmap_in_axes_tree_prefix_error(self):
# https://github.com/jax-ml/jax/issues/795
value_tree = jnp.ones(3)
self.assertRaisesRegex(
ValueError,
"vmap in_axes specification must be a tree prefix of the corresponding "
r"value, got specification \(\[0\],\) for value tree "
+ re.escape(f"{jax.tree.structure((value_tree,))}."),
lambda: api.vmap(lambda x: x, in_axes=([0],))(value_tree)
)
def test_vmap_in_axes_leaf_types(self):
with self.assertRaisesRegex(
TypeError, r"vmap in_axes must be an int, None, or .*"):
api.vmap(lambda x: x, in_axes=(jnp.array([1., 2.]),))(jnp.array([1., 2.]))
def test_vmap_out_axes_leaf_types(self):
with self.assertRaisesRegex(
TypeError, r"vmap out_axes must be an int, None, or .*"):
api.vmap(lambda x: x, out_axes=(jnp.array([1., 2.]),))(jnp.array([1., 2.]))
def test_vmap_unbatched_object_passthrough_issue_183(self):
# https://github.com/jax-ml/jax/issues/183
fun = lambda f, x: f(x)
vfun = api.vmap(fun, (None, 0))
ans = vfun(lambda x: x + 1, jnp.arange(3))
self.assertAllClose(ans, np.arange(1, 4), check_dtypes=False)
def test_vmap_mismatched_keyword(self):
# https://github.com/jax-ml/jax/issues/10193
@jax.vmap
def f(x, y):
return x + y
with self.assertRaisesRegex(
ValueError,
"vmap got inconsistent sizes for array axes to be mapped:\n"
r" \* one axis had size 1: axis 0 of argument x of type int32\[1\];"
"\n"
r" \* one axis had size 2: axis 0 of kwargs\['y'\] of type int32\[2\]"):
f(jnp.array([1], 'int32'), y=jnp.array([1, 2], 'int32'))
def test_vmap_mismatched_axis_sizes_error_message_issue_705(self):
# https://github.com/jax-ml/jax/issues/705
def h(a, b):
return jnp.sum(a) + jnp.sum(b)
X = self.rng().randn(10, 4).astype('float32')
U = self.rng().randn(10, 2).astype('float32')
with self.assertRaisesRegex(
ValueError,
"vmap got inconsistent sizes for array axes to be mapped:\n"
r" \* one axis had size 10: axis 0 of argument a of type float32\[10,4\];""\n"
r" \* one axis had size 2: axis 1 of argument b of type float32\[10,2\]"):
api.vmap(h, in_axes=(0, 1))(X, U)
with self.assertRaisesRegex(
ValueError,
"vmap got inconsistent sizes for array axes to be mapped:\n"
r" \* most axes \(2 of them\) had size 10, e.g. axis 0 of argument x "
r"of type float32\[10,4\];" "\n"
r" \* one axis had size 2: axis 1 of argument y of type float32\[10,2\]"):
api.vmap(lambda x, y, z: None, in_axes=(0, 1, 0))(X, U, X)
with self.assertRaisesRegex(
ValueError,
"vmap got inconsistent sizes for array axes to be mapped:\n"
r" \* most axes \(2 of them\) had size 2, e.g. axis 1 of argument b\[0\] "
r"of type float32\[10,2\];" "\n"
r" \* one axis had size 10: axis 0 of argument a of type float32\[10,4\]"):
api.vmap(h, in_axes=(0, 1))(X, [U, U])
error = (r"vmap was requested to map its argument along axis 0, which "
r"implies that its rank should be at least 1, but is only 0 "
r"\(its shape is \(\)\)")
with self.assertRaisesRegex(ValueError, error):
# The mapped inputs cannot be scalars
api.vmap(lambda x: x)(1.)
with self.assertRaisesRegex(
ValueError, "vmap must have at least one non-None value in in_axes"):
# If the output is mapped, there must be a non-None in_axes
api.vmap(lambda x: x, in_axes=None)(jnp.array([1., 2.]))
error = (r"vmap was requested to map its argument along axis 1, which "
r"implies that its rank should be at least 2, but is only 1 "
r"\(its shape is \(2,\)\)")
with self.assertRaisesRegex(ValueError, error):
api.vmap(lambda x: x, in_axes=1)(jnp.array([1., 2.]))
# Error is: TypeError: only integer scalar arrays can be converted to a scalar index
with self.assertRaisesRegex(
ValueError,
"vmap out_axes specification must be a tree prefix of the "
"corresponding value.*"):
api.vmap(lambda x: x, in_axes=0, out_axes=(2, 3))(jnp.array([1., 2.]))
with self.assertRaisesRegex(
ValueError,
r"vmap has mapped output \(axis_name='foo'\) but out_axes is None"):
# If the output is mapped (user-named axis), then there must be some
# out_axes specified.
api.vmap(lambda x: x, out_axes=None, axis_name="foo")(jnp.array([1., 2.]))
with self.assertRaisesRegex(
ValueError,
"at vmap out_axes"):
# If the output is mapped (unnamed axis), then there must be some out_axes
# specified.
api.vmap(lambda x: x, out_axes=None)(jnp.array([1., 2.]))
def test_vmap_structured_in_axes(self):
A, B, C, D = 2, 3, 4, 5
K = 6 # batch size
x = np.ones((K, A, B)) # batch axis in different locations
y = np.ones((B, K, C))
z = np.ones((C, D, K))
def foo(tree_arg):
x, (y, z) = tree_arg
return jnp.dot(x, jnp.dot(y, z))
tree = (x, (y, z))
vfoo = api.vmap(foo, in_axes=((0, (1, 2)),))
self.assertEqual(vfoo(tree).shape, (6, 2, 5))
Point = collections.namedtuple("Point", ["x", "y"])
tree = (x, Point(y, z))
vfoo = api.vmap(foo, in_axes=((0, Point(1, 2)),))
self.assertEqual(vfoo(tree).shape, (6, 2, 5))
def foo(tree_arg):
x, dct = tree_arg
y, z = dct['a'], dct['b']
return jnp.dot(x, jnp.dot(y, z))
tree = (x, {'a': y, 'b': z})
vfoo = api.vmap(foo, in_axes=((0, {'a': 1, 'b': 2}),))
self.assertEqual(vfoo(tree).shape, (6, 2, 5))
tree = (x, collections.OrderedDict([('a', y), ('b', z)]))
vfoo = api.vmap(
foo, in_axes=((0, collections.OrderedDict([('a', 1), ('b', 2)])),))
self.assertEqual(vfoo(tree).shape, (6, 2, 5))
def test_vmap_in_axes_bool_error(self):
# https://github.com/jax-ml/jax/issues/6372
with self.assertRaisesRegex(TypeError, "must be an int"):
api.vmap(lambda x: x, in_axes=False)(jnp.zeros(3))
def test_pmap_in_axes_bool_error(self):
# https://github.com/jax-ml/jax/issues/6372
with self.assertRaisesRegex(TypeError, "must be an int"):
api.pmap(lambda x: x, in_axes=False)(jnp.zeros(1))
def test_vmap_empty_arguments(self):
with self.assertRaisesRegex(
ValueError,
"vmap wrapped function must be passed at least one argument "
r"containing an array, got empty \*args=\(\{\},\) and \*\*kwargs=\{\}"):
api.vmap(lambda x: x)({})
def test_pmap_empty_arguments(self):
with self.assertRaisesRegex(
ValueError,
"pmap wrapped function must be passed at least one argument "
r"containing an array, got empty \*args=\(\{\},\) and \*\*kwargs=\{\}"):
api.pmap(lambda x: x)({})
def test_pmap_global_cache(self):
def f(x, y):
return x, y
x = np.ones((1, 1, 1))
# All defaults
with jtu.assert_num_jit_and_pmap_compilations(1):
for _ in range(2):
api.pmap(f)(x, x)
# With axis name
with jtu.assert_num_jit_and_pmap_compilations(1):
for _ in range(2):
api.pmap(f, 'i')(x, x)
# With in_axes and out_axes
for x_in, y_in, x_out, y_out in it.product(*((0, 1, 2) for _ in range(4))):
with jtu.assert_num_jit_and_pmap_compilations(1):
for _ in range(2):
api.pmap(f, 'i', in_axes=(x_in, y_in), out_axes=(x_out, y_out))(x, x)
# Forward-mode AD on the outside
with jtu.assert_num_jit_and_pmap_compilations(1):
for _ in range(2):
api.jvp(api.pmap(f), (x, x), (x, x))
# Reverse-mode AD on the outside. One compilation for forward, one for backward.
with jtu.assert_num_jit_and_pmap_compilations(2):
for _ in range(2):
api.vjp(api.pmap(f), x, x)[1]((x, x))
def test_device_array_repr(self):
rep = jnp.ones(()) + 1.
self.assertStartsWith(repr(rep), 'Array')
def test_device_array_hash(self):
rep = jnp.ones((1,)) + 1.
_check_instance(self, rep)
self.assertNotIsInstance(rep, collections.abc.Hashable)
with self.assertRaisesRegex(TypeError, 'unhashable type'):
hash(rep)
def test_grad_without_enough_args_error_message(self):
# https://github.com/jax-ml/jax/issues/1696
def f(x, y): return x + y
df = api.grad(f, argnums=0)
self.assertRaisesRegex(
TypeError,
"differentiating with respect to argnums=0 requires at least 1 "
"positional arguments to be passed by the caller, but got only 0 "
"positional arguments.",
lambda: partial(df, x=0.)(y=1.))
def test_grad_object_array_error(self):
x = np.array([1, 2, 3], dtype=object)
with self.assertRaisesRegex(TypeError, ".*is not a valid JAX type"):
jax.grad(lambda x: x)(x)
def test_jit_compilation_time_logging(self):
@api.jit
def f(x):
return x * 2
# make sure some initial warnings & cached operations already happen.
f(jnp.ones(2))
prev_level = logging.get_verbosity()
try:
logging.set_verbosity('DEBUG')
with self.assertLogs(level=logging.DEBUG) as l:
f(2.)
finally:
logging.set_verbosity(prev_level)
self.assertGreaterEqual(len(l.output), 3) # 3 lines
self.assertTrue(any('Finished tracing' in line for line in l.output))
self.assertTrue(any('Compiling f' in line for line in l.output))
self.assertTrue(any('Finished XLA compilation' in line for line in l.output))
def test_grad_of_jit_compilation_caching(self):
if not hasattr(self, "assertLogs"):
raise unittest.SkipTest("test requires assertLogs (python 3)")
# make sure some initial warnings & cached operations already happen.
api.grad(api.jit(lambda x: x))(1.0)
@api.jit
def f(x):
return jnp.sin(x)
prev_level = logging.get_verbosity()
try:
logging.set_verbosity('DEBUG')
with self.assertLogs(level=logging.DEBUG) as l:
ans1 = api.grad(f)(2.)
ans2 = api.grad(f)(3.)
finally:
logging.set_verbosity(prev_level)
self.assertGreaterEqual(len(l.output), 2 * 3) # one for fwd, one for bwd, 3 lines each
self.assertAllClose(ans1, np.cos(2.), check_dtypes=False)
self.assertAllClose(ans2, np.cos(3.), check_dtypes=False)
def test_grad_of_jit_compilation_caching2(self):
# Like the above test, but instead of logging use our compile counters.
# make sure some initial convert element type operations are pre-cached.
api.grad(api.jit(lambda x: x))(1.0)
@api.jit
def f(x):
return jnp.sin(x)
with jtu.count_jit_and_pmap_lowerings() as count: # noqa: F841
_ = jax.grad(f)(3.)
self.assertEqual(count[0], 2) # one for fwd, one for bwd
with jtu.count_jit_and_pmap_lowerings() as count: # noqa: F841
_ = jax.grad(f)(3.)
_ = jax.grad(f)(4.)
self.assertEqual(count[0], 0) # cache hits on both fwd and bwd
def test_grad_does_not_unflatten_tree_with_none(self):
# https://github.com/jax-ml/jax/issues/7546
class CustomNode(list):
pass
def unflatten(unused_aux_data, children):
self.assertIsNotNone(children[0])
return CustomNode(children)
tree_util.register_pytree_node(CustomNode, lambda x: (x, None), unflatten)
grad(lambda x: x[0])(CustomNode([0.]))
def test_trivial_computations(self):
x = jnp.array([1, 2, 3])
y = api.jit(lambda x: x)(x)
self.assertNotEqual(x.unsafe_buffer_pointer(), y.unsafe_buffer_pointer())
z1, z2 = api.jit(lambda x: (x, x))(x)
self.assertNotEqual(z1.unsafe_buffer_pointer(), z2.unsafe_buffer_pointer())
x1, x2 = jnp.array([1, 2]), jnp.array([2, 3])
z1, z2, z3 = api.jit(lambda x, y: (y, 1, x))(x1, x2)
self.assertNotEqual(z1.unsafe_buffer_pointer(), x2.unsafe_buffer_pointer())
self.assertNotEqual(z3.unsafe_buffer_pointer(), x1.unsafe_buffer_pointer())
self.assertEqual(z2, 1)
def test_nested_jit_hoisting(self):
@api.jit
def f(x, y):
z = 2 * x
return y + z, 3
@api.jit
def g(x):
return f(2, x)
mlir_jaxpr_subcomp = mlir.jaxpr_subcomp
jaxprs = []
def mlir_jaxpr_subcomp_and_collect(c, jaxpr, *args, **kwargs):
jaxprs.append(jaxpr)
return mlir_jaxpr_subcomp(c, jaxpr, *args, **kwargs)
try:
mlir.jaxpr_subcomp = mlir_jaxpr_subcomp_and_collect
ans = g(3)
finally:
mlir.jaxpr_subcomp = mlir_jaxpr_subcomp
self.assertEqual(ans, (7, 3))
self.assertLen(jaxprs, 2)
outer_jaxpr, inner_jaxpr = jaxprs
self.assertLen(outer_jaxpr.eqns, 1)
prim_name = 'pjit'
jaxpr_param = 'jaxpr'
self.assertEqual(outer_jaxpr.eqns[0].primitive.name, f'{prim_name}')
subjaxpr_1 = outer_jaxpr.eqns[0].params[f"{jaxpr_param}"]
self.assertEqual(str(subjaxpr_1), str(inner_jaxpr))
self.assertLen(inner_jaxpr.eqns, 2)
self.assertEqual(inner_jaxpr.eqns[-2].primitive.name, 'mul')
self.assertEqual(inner_jaxpr.eqns[-1].primitive.name, 'add')
def test_primitive_compilation_cache(self):
with jtu.count_primitive_compiles() as count:
lax.add(1, 2)
lax.add(2, 3)
self.assertEqual(count[0], 1)
def test_arange_jit(self):
# see https://github.com/jax-ml/jax/issues/553
def fun(x):
r = jnp.arange(x.shape[0])[x]
return r
jit(fun)(jnp.array([0, 1, 2], dtype=jnp.int32)) # doesn't crash
def helper_save_tracer(self, x):
self._saved_tracer = x
return x
def test_escaped_tracers_different_top_level_traces(self):
api.jit(self.helper_save_tracer)(0.)
with self.assertRaisesRegex(
UnexpectedTracerError, "Encountered an unexpected tracer"):
api.jit(lambda x: self._saved_tracer)(0.)
def test_escaped_tracers_cant_lift_sublevels(self):
api.jit(self.helper_save_tracer)(0.)
with self.assertRaisesRegex(
UnexpectedTracerError,
re.compile(
"Encountered an unexpected tracer",
re.DOTALL)):
api.jit(lambda x: x)(self._saved_tracer)
@unittest.skip # TODO(dougalm): rethink what this should do under stackless
def test_escaped_tracers_tracer_from_higher_level(self):
api.grad(self.helper_save_tracer)(0.)
with self.assertRaises(UnexpectedTracerError):
api.grad(lambda x: x)(self._saved_tracer)
def test_escaped_tracers_incompatible_sublevel(self):
def func1(x):
api.jit(self.helper_save_tracer)(0.)
# Use the tracer
return x + self._saved_tracer
with self.assertRaisesRegex(
UnexpectedTracerError,
re.compile("Encountered an unexpected tracer",
re.DOTALL)):
api.jit(func1)(2.)
def test_escaped_tracers_cant_lift(self):
def func1(x):
api.grad(self.helper_save_tracer)(0.)
return x + self._saved_tracer
with self.assertRaisesRegex(
UnexpectedTracerError,
re.compile("unexpected tracer")):
api.grad(func1)(2.)
def test_escaped_tracers_not_among_input_tracers(self):
def func1(x):
api.grad(self.helper_save_tracer)(x)
# Use the tracer
return x + self._saved_tracer
msg = "Encountered an unexpected tracer"
with self.assertRaisesRegex(
UnexpectedTracerError, re.compile(msg, re.DOTALL)):
api.jit(func1)(2.0)
def test_escaped_tracer_omnistaging(self):
count = 1
@jit
def f():
nonlocal count
count = jnp.add(count, 1)
f() # leaked a tracer! but currently undetected
def f(x, c):
jnp.add(count, 1)
return None, None
@jit
def g():
lax.scan(f, None, None, length=2)
with self.assertRaisesRegex(UnexpectedTracerError,
"was created on line"):
g()
def test_escaped_tracer_omnistaging_top_trace(self):
count = 1
def f(_, __):
nonlocal count
count = jnp.add(count, 1)
return None, None
lax.scan(f, None, None, length=2) # leaked a tracer! (of level 1!)
with self.assertRaisesRegex(UnexpectedTracerError,
"was created on line"):
# The following call will try and raise the ones array to the count tracer
# level, which is no longer live.
jax.jit(jnp.add)(jnp.ones(()), count)
def test_escaped_tracer_transform_name(self):
with self.assertRaisesRegex(UnexpectedTracerError,
"for jit"):
jax.jit(self.helper_save_tracer)(1)
_ = self._saved_tracer+1
with self.assertRaisesRegex(UnexpectedTracerError,
"for pmap"):
jax.pmap(self.helper_save_tracer)(jnp.ones((1, 2)))
_ = self._saved_tracer+1
with self.assertRaisesRegex(UnexpectedTracerError,
"for jit"):
jax.eval_shape(self.helper_save_tracer, 1)
_ = self._saved_tracer+1
def test_escaped_tracer_shape_dtype(self):
with self.assertRaisesRegex(core.UnexpectedTracerError, r"int32\[4,3\]"):
jax.jit(self.helper_save_tracer)(jnp.ones((4, 3), dtype=jnp.int32))
_ = self._saved_tracer+1
def test_pmap_static_kwarg_error_message(self):
# https://github.com/jax-ml/jax/issues/3007
def f(a, b):
return a + b
g = jax.pmap(f, static_broadcasted_argnums=(1,))
msg = (r"pmapped function has static_broadcasted_argnums=\(1,\) but was "
r"called with only 1 positional argument. All static broadcasted "
r"arguments must be passed positionally.")
with self.assertRaisesRegex(ValueError, msg):
g(jnp.ones((1, 1)), b=1)
def test_vmap_unmapped_last(self):
@partial(jax.vmap, out_axes=-1)
def f(x):
return np.zeros((2,))
f(np.zeros((5,)))
# TODO(jakevdp): re-enable this if possible.
@unittest.skipIf(True, "broken by convert_element_type change.")
def test_xla_constant_dedup(self):
y = np.array([7, 14], dtype=np.float32)
def f(x):
return x + y + y
x = np.array([1, 2], dtype=np.float32)
hlo_lines = jax.jit(f).lower(x).as_text('hlo').split('\n')
hlo_lines = {s.strip() for s in hlo_lines}
self.assertIn('constant.1 = f32[2]{0} constant({7, 14})', hlo_lines)
self.assertNotIn('constant.2 = f32[2]{0} constant({7, 14})', hlo_lines)
def test_eval_context(self):
@jit
def f():
with core.eval_context():
assert jnp.add(1, 1) == 2
f() # doesn't crash
def test_concrete_error_because_arg_unary(self):
@jax.jit
def f(x):
if x > 0:
return x
else:
return 0
msg = r"on the value of the argument x"
with self.assertRaisesRegex(core.ConcretizationTypeError, msg):
f(1)
def test_concrete_error_because_arg_binary(self):
@jax.jit
def f(x, y):
if x > y:
return x
else:
return y
msg = r"on the values of the arguments x and y"
with self.assertRaisesRegex(core.ConcretizationTypeError, msg):
f(1, 2)
def test_concrete_error_because_arg_ternary(self):
@jax.jit
def f(x, y, z):
if x > z:
return x
else:
return y
msg = r"on the values of the arguments x and z"
with self.assertRaisesRegex(core.ConcretizationTypeError, msg):
f(1, 2, 3)
with self.assertRaisesRegex(core.ConcretizationTypeError, msg):
f(1, 2, z=3)
with self.assertRaisesRegex(core.ConcretizationTypeError, msg):
f(1, y=2, z=3)
def test_concrete_error_because_arg_varargs(self):
@jax.jit
def f(*args):
x, y, z = args
if x > z:
return x
else:
return y
msg = r"on the values of the arguments args"
with self.assertRaisesRegex(core.ConcretizationTypeError, msg):
f(1, 2, 3)
def test_concrete_error_because_arg_kwargs(self):
@jax.jit
def f(**kwargs):
x, y, z = kwargs['x'], kwargs['y'], kwargs['z']
if x > z:
return x
else:
return y
msg = r"on the values of the arguments kwargs"
with self.assertRaisesRegex(core.ConcretizationTypeError, msg):
f(x=1, y=2, z=3)
def test_concrete_error_because_arg_pytree(self):
@jax.jit
def f(xy, z):
x, y = xy
if x > 0:
return x
else:
return y
msg = r"on the value of the argument xy"
with self.assertRaisesRegex(core.ConcretizationTypeError, msg):
f((1, 2), z=3)
def test_concrete_error_because_const(self):
@jax.jit
def f():
assert jnp.add(1, 1) > 0
msg = "on these lines"
with self.assertRaisesRegex(core.ConcretizationTypeError, msg):
f()
def test_concrete_error_because_const_2(self):
@jax.jit
def f():
result = sum(jnp.add(1, 1) for _ in range(6))
assert result > 0
msg = "Additional originating lines are not shown."
with self.assertRaisesRegex(core.ConcretizationTypeError, msg):
f()
def test_concrete_error_with_nested_call(self):
@jax.jit
def f(x, y):
if y:
return x
@jax.jit
def g(x):
return f(x, True)
msg = r"on the value of the argument y"
with self.assertRaisesRegex(core.ConcretizationTypeError, msg):
g(1)
def test_linearize_aux(self):
def fn(x):
return x * 2 - 3, x > 0
f, lin_fn, aux = api.linearize(fn, 3.4, has_aux=True)
tang = lin_fn(5.)
self.assertAllClose(f, 3.8)
self.assertAllClose(tang, 10.)
self.assertEqual(aux, True)
def test_linearize_aval_error(self):
# https://github.com/jax-ml/jax/issues/4622
f = lambda x: x
# these should not error
_, f_jvp = api.linearize(f, 1.)
f_jvp(1.)
_, f_jvp = api.linearize(f, np.ones(2, np.int32))
f_jvp(np.zeros(2, float0))
# these should error
_, f_jvp = api.linearize(f, 1.)
with self.assertRaisesRegex(ValueError, "tangent values inconsistent"):
f_jvp(1)
_, f_jvp = api.linearize(f, np.ones(2, np.int32))
with self.assertRaisesRegex(ValueError, "tangent values inconsistent"):
f_jvp(np.ones(2, np.int32))
def test_grad_of_token_consuming_primitive(self):
# https://github.com/jax-ml/jax/issues/5463
tokentest_p = core.Primitive("tokentest")
tokentest_p.def_impl(partial(xla.apply_primitive, tokentest_p))
tokentest_p.def_abstract_eval(lambda x, y: x)
mlir.register_lowering(tokentest_p, lambda ctx, x, y: [x])
ad.defjvp(tokentest_p, (lambda g, x, token: x), None)
token = jax.lax.create_token(123)
arr = jnp.ones((3, 2))
res, vjp_fun = jax.vjp(lambda x: tokentest_p.bind(x, token), arr)
# Should not crash.
vjp_fun(arr)
def test_jit_returning_token(self):
x = jax.jit(jax.lax.create_token)(1.0)
self.assertIsInstance(x, core.Token)
def test_jit_capturing_token(self):
tok = jax.lax.create_token()
_, y = jax.jit(lambda x: (x + 2, tok))(7)
self.assertIsInstance(y, core.Token)
def test_leak_checker_catches_a_jit_leak(self):
with jax.checking_leaks():
lst = []
@jit
def f(x):
lst.append(x)
return x
with self.assertRaisesRegex(Exception, r"Leaked"):
f(3)
def test_leak_checker_catches_a_pmap_leak(self):
with jax.checking_leaks():
lst = []
@api.pmap
def f(x):
lst.append(x)
return x
with self.assertRaisesRegex(Exception, r"Leaked"):
f(np.ones(1))
def test_leak_checker_catches_a_grad_leak(self):
with jax.checking_leaks():
lst = []
def f(x):
lst.append(x)
return x
with self.assertRaisesRegex(Exception, r"Leaked trace"):
api.grad(f)(3.)
def test_leak_checker_avoids_false_positives(self):
with jax.checking_leaks():
api.vmap(lambda x: x)(np.arange(3.)) # doesn't crash
@jit
def f(x):
return x
f(3) # doesn't crash
api.vmap(f)(np.arange(3)) # doesn't crash
api.grad(f)(3.) # doesn't crash
@api.pmap
def f(x):
return x
f(np.ones(1)) # doesn't crash
api.vmap(f)(np.ones((1, 1))) # doesn't crash
def test_leak_checker_catches_a_scan_leak(self):
with jax.checking_leaks():
lst = []
to_scan = lambda c, x: (lst.append(c) or jnp.sin(c), None)
with self.assertRaisesRegex(Exception, r"Leaked trace"):
lax.scan(to_scan, 1., np.arange(3.))
def test_leak_checker_avoids_false_positives_scan(self):
with jax.checking_leaks():
to_scan = lambda c, x: (jnp.sin(c), None)
lax.scan(to_scan, 1., np.arange(3.)) # doesn't crash
def test_leak_checker_avoids_false_positives_scan_jvp(self):
with jax.checking_leaks():
to_scan = lambda c, x: (c, None)
def f(x):
lax.scan(to_scan, x, None, length=1)
api.jvp(f, (3.,), (1.,)) # doesn't crash
def test_leak_checker_avoids_false_positives_scan_vmap(self):
with jax.checking_leaks():
to_scan = lambda c, _: (1., None)
@api.vmap
def f(x):
lax.scan(to_scan, x, None, length=1)
f(np.arange(5.)) # doesn't crash
def test_leak_checker_avoids_false_positives_scan_vmap_2(self):
with jax.checking_leaks():
to_scan = lambda c, _: (c, None)
@api.vmap
def f(x):
lax.scan(to_scan, x, None, length=1)
f(np.arange(5.)) # doesn't crash
def test_leak_checker_catches_a_sublevel_leak(self):
with jax.checking_leaks():
@jit
def f(x):
lst = []
@jit
def g(x):
lst.append(x)
return x
x = g(x)
return x
msg = r'Leaked trace DynamicJaxprTrace'
with self.assertRaisesRegex(Exception, f"{msg}"):
f(3)
def test_leak_checker_avoids_false_positive_custom_jvp(self):
# see https://github.com/jax-ml/jax/issues/5636
with jax.checking_leaks():
@jax.custom_jvp
def t(y):
return y
def t_jvp(p, t):
pass
t.defjvp(t_jvp)
@jit
def s(y):
return t(y)
s(3) # doesn't crash
def test_leak_checker_internal_error(self):
def apply_fn(inp):
fn = jax.checkpoint(lambda x: jax.nn.relu(1.0 * x))
return jax.vjp(fn, inp)
with jax.check_tracer_leaks():
jax.jit(apply_fn)(1.0) # don't crash
def test_leak_checker_reference_chain(self):
class A:
def __init__(self, dct):
self.dct = dct
a = A({})
x = jnp.arange(3)
def sketch(x):
def foo():
return x
a.dct['hi'] = [foo]
return x
# TODO(mattjj): full test msg below fails (harmlessly) on CI, investigate
msg = (
r"This BatchTracer with object id [0-9]+ was created on line:\n"
r" .*\n"
r"<BatchTracer [0-9]+> is referred to by"
)
# msg = (
# r"This BatchTracer with object id [0-9]+ was created on line:\n"
# r" .*\n"
# r"<BatchTracer [0-9]+> is referred to by <function [0-9]+> \(foo\) "
# r"closed-over variable x\n"
# r"<function [0-9]+> is referred to by <list [0-9]+>\[0\]\n"
# r"<list [0-9]+> is referred to by <dict [0-9]+>\['hi'\]\n"
# r"<dict [0-9]+> is referred to by <A [0-9]+>\.dct\n"
# )
with jax.check_tracer_leaks():
with self.assertRaisesRegex(Exception, msg):
jax.vmap(sketch)(x)
def test_default_backend(self):
first_local_device = jax.local_devices()[0]
self.assertEqual(first_local_device.platform, jax.default_backend())
@jtu.skip_on_devices("cpu")
def test_default_device(self):
system_default_devices = jnp.add(1, 1).devices()
self.assertLen(system_default_devices, 1)
test_device = jax.devices("cpu")[-1]
# Sanity check creating array using system default device
self.assertEqual(jnp.ones(1).devices(), system_default_devices)
# Create array with default_device set
with jax.default_device(test_device):
# Hits cached primitive path
self.assertEqual(jnp.ones(1).devices(), {test_device})
# Uncached
self.assertEqual(jnp.zeros((1, 2)).devices(), {test_device})
# Test that we can reset to system default device
self.assertEqual(jnp.ones(1).devices(), system_default_devices)
def test_dunder_jax_array(self):
# https://github.com/jax-ml/jax/pull/4725
class AlexArray:
def __init__(self, jax_val):
self.jax_val = jax_val
def __jax_array__(self):
return self.jax_val
dtype = property(lambda self: self.jax_val.dtype)
shape = property(lambda self: self.jax_val.shape)
x = AlexArray(jnp.array([1., 2., 3.]))
y = jnp.sin(x)
self.assertAllClose(y, jnp.sin(jnp.array([1., 2., 3.])))
y = api.grad(api.jit(lambda x: jnp.sin(x).sum()))(x)
self.assertAllClose(y, jnp.cos(jnp.array([1., 2., 3.])))
x = AlexArray(jnp.array([[1., 2., 3.]]))
y = api.pmap(jnp.sin)(x)
self.assertAllClose(y, jnp.sin(jnp.array([[1., 2., 3.]])))
x = jnp.array(1)
a = AlexArray(x)
for f in [jnp.isscalar, jnp.size, jnp.shape, jnp.dtype]:
self.assertEqual(f(x), f(a))
x = AlexArray(jnp.array(1))
a1 = jnp.array(x)
self.assertAllClose(1, a1)
a2 = jnp.array(((x, x), [x, x]))
self.assertAllClose(np.array(((1, 1), (1, 1))), a2)
def test_eval_shape_weak_type(self):
# https://github.com/jax-ml/jax/issues/23302
arr = jax.numpy.array(1)
with jtu.count_jit_tracing_cache_miss() as count:
jax.eval_shape(jax.numpy.array, 1)
out = jax.eval_shape(jax.numpy.array, 1)
self.assertEqual(count[0], 1)
self.assertTrue(out.weak_type)
self.assertEqual(out.weak_type, arr.weak_type)
def test_dunder_jax_array_bug(self):
@jax.tree_util.register_pytree_node_class
class A:
x: jax.Array
def __init__(self, x: jax.Array):
self.x = x
def tree_flatten(self):
return ((self.x,), None)
@classmethod
def tree_unflatten(cls, _, children):
x, = children
return cls(x)
def __jax_array__(self) -> jax.Array:
return self.x
ndim = property(operator.attrgetter('x.ndim'))
dtype = property(operator.attrgetter('x.dtype'))
shape = property(operator.attrgetter('x.shape'))
a = A(jnp.ones((3, 3)))
jnp.asarray(a) # don't crash
f = jax.jit(jnp.matmul)
f(a, a) # don't crash
def test_constant_handler_mro(self):
# https://github.com/jax-ml/jax/issues/6129
class Foo(enum.IntEnum):
bar = 1
@api.pmap
def f(_):
return Foo.bar
ans = f(jnp.arange(1)) # doesn't crash
expected = jnp.arange(1) + 1
self.assertAllClose(ans, expected, check_dtypes=False)
@parameterized.named_parameters([
{"testcase_name": f"{dtype.__name__}", "dtype": dtype}
for dtype in jtu.dtypes.all])
def test_constant_handlers(self, dtype):
# https://github.com/jax-ml/jax/issues/9380
@jax.jit
def f():
return jnp.exp(dtype(0))
f() # doesn't error
def test_vmap_make_jaxpr_close_over_tracer(self):
def run(inp):
def f(x, y):
return x + y
g = lambda x: f(x, inp)
jaxpr = jax.make_jaxpr(g)(1)
return jax.core.eval_jaxpr(jaxpr.jaxpr, jaxpr.consts, 1)
jax.vmap(run)(jnp.arange(2)) # doesn't crash
def test_large_python_ints(self):
with self.assertRaises(OverflowError):
jnp.multiply(2 ** 100, 3.)
out = lax.convert_element_type(2 ** 100, jnp.float32) # doesn't crash
self.assertArraysEqual(out, np.float32(2 ** 100))
def test_dot_precision_context_manager(self):
x = jnp.zeros((2, 2))
with jax.default_matmul_precision(None):
jnp.dot(x, x) # doesn't crash
jaxpr = jax.make_jaxpr(jnp.dot)(x, x)
# self.assertIn('precision=None', str(jaxpr))
self.assertIs(jaxpr.jaxpr.eqns[0].params['precision'], None)
with jax.default_matmul_precision("bfloat16"):
x @ x # doesn't crash
jaxpr = jax.make_jaxpr(op.matmul)(x, x)
self.assertIn('Precision.DEFAULT', str(jaxpr))
with jax.default_matmul_precision("tensorfloat32"):
jnp.dot(x, x) # doesn't crash
jaxpr = jax.make_jaxpr(jnp.dot)(x, x)
self.assertIn('Precision.HIGH', str(jaxpr))
with jax.default_matmul_precision("float32"):
jnp.dot(x, x) # doesn't crash
jaxpr = jax.make_jaxpr(jnp.dot)(x, x)
self.assertIn('Precision.HIGHEST', str(jaxpr))
dot = partial(jnp.dot, precision=lax.Precision.HIGHEST)
with jax.default_matmul_precision("tensorfloat32"):
dot(x, x) # doesn't crash
jaxpr = jax.make_jaxpr(dot)(x, x)
self.assertIn('Precision.HIGHEST', str(jaxpr))
def test_dot_precision_flag(self):
x = jnp.zeros((2, 2))
with config.default_matmul_precision("tensorfloat32"):
jnp.dot(x, x) # doesn't crash
jaxpr = jax.make_jaxpr(jnp.dot)(x, x)
self.assertIn('Precision.HIGH', str(jaxpr))
with config.default_matmul_precision("tensorfloat32"):
jnp.dot(x, x) # doesn't crash
jaxpr = jax.make_jaxpr(jnp.dot)(x, x)
self.assertIn('Precision.HIGH', str(jaxpr))
def test_dot_precision_forces_retrace(self):
num_traces = 0
def g(x):
nonlocal num_traces
num_traces += 1
return jnp.dot(x, x)
def f_cond(x):
return lax.cond(True, g, g, x)
@jax.jit
def f_jit(x):
nonlocal num_traces
num_traces += 1
return jnp.dot(x, x)
for f in [f_jit, f_cond]:
# Use _read() to read the flag value rather than threadlocal value.
precision = config._read("jax_default_matmul_precision")
try:
num_traces = 0
x = jnp.zeros((2, 2))
f(x)
self.assertEqual(num_traces, 1)
f(x)
self.assertEqual(num_traces, 1)
with jax.default_matmul_precision("tensorfloat32"):
f(x)
self.assertEqual(num_traces, 2)
config.update("jax_default_matmul_precision", "float32")
f(x)
self.assertGreaterEqual(num_traces, 2)
nt = num_traces
f(x)
self.assertEqual(num_traces, nt + 1)
f(x)
self.assertEqual(num_traces, nt + 1)
finally:
config.update("jax_default_matmul_precision", precision)
def test_backward_pass_ref_dropping(self):
refs = []
@jax.custom_vjp
def f(x):
return x
def f_fwd(x):
return x, None
def f_rev(_, g):
assert len(refs) != 2 or refs[0]() is None
zero = np.zeros(())
refs.append(weakref.ref(zero))
return (zero,)
f.defvjp(f_fwd, f_rev)
api.grad(lambda x: f(f(f(x))))(1.)
def test_jit_inline(self):
@partial(api.jit, inline=False)
def f(x):
return x * 2
jaxpr = api.make_jaxpr(f)(3)
self.assertIn('pjit', str(jaxpr))
@partial(api.jit, inline=True)
def f(x):
return x * 2
jaxpr = api.make_jaxpr(f)(3)
self.assertNotIn('pjit', str(jaxpr))
# Repro for https://github.com/jax-ml/jax/issues/7229.
def test_compute_with_large_transfer(self):
def f(x, delta):
return x + jnp.asarray(delta, x.dtype)
# A large and potentially unaligned array to trigger non-zero-copy and
# async device array copy.
xs = self.rng().uniform(0., 1., size=(10, 131, 111, 3)).astype(np.float32)
for x in xs:
delta = self.rng().uniform(-0.5, 0.5, size=())
jitted_f = api.jit(f)
np.testing.assert_allclose(jitted_f(x, delta), f(x, delta))
def test_vjp_fun_jit(self):
# test that the function returned by vjp can be returned
# from and passed to jitted functions
f = lambda x: 2. * x
@partial(jit, static_argnums=0)
def linearize_vjp(f, x):
_, vjp_fun = api.vjp(f, x)
return vjp_fun
linearized = linearize_vjp(f, 1.)
actual = jit(lambda f, x: f(x))(linearized, 3.)
expected = (6.,)
self.assertEqual(actual, expected)
def test_linearize_fun_jit(self):
# test that the function returned by linearize can be returned
# from and passed to jitted functions
f = lambda x: 2. * x
@partial(jit, static_argnums=0)
def linearize(f, x):
_, jvp_fun = api.linearize(f, x)
return jvp_fun
linearized = linearize(f, 1.)
actual = jit(lambda f, x: f(x))(linearized, 3.)
expected = 6.
self.assertEqual(actual, expected)
def test_linear_transpose_fun_jit(self):
# test that the function returned by linear_transpose can be returned
# from and passed to jitted functions
f = lambda x: 2. * x
@partial(jit, static_argnums=0)
def transpose(f, x):
return api.linear_transpose(f, x)
transposed = transpose(f, 1.)
actual = jit(lambda f, x: f(x))(transposed, 3.)
expected = (6.,)
self.assertEqual(actual, expected)
def test_leaked_tracer_issue_7613(self):
# from https://github.com/jax-ml/jax/issues/7613
import numpy.random as npr
def sigmoid(x):
return 1. / (1. + jnp.exp(-x))
x = jnp.ones((1, 50))
A = jnp.array(npr.randn(50, 50), dtype=x.dtype)
@jax.jit
def loss(A, x):
h = jax.nn.sigmoid(A * x)
return jnp.sum((h - x)**2)
with jax.checking_leaks():
_ = jax.grad(loss)(A, x) # doesn't crash
def test_vmap_caching(self):
# https://github.com/jax-ml/jax/issues/7621
f = lambda x: jnp.square(x).mean()
jf = jax.jit(f)
x = jax.random.uniform(jax.random.key(0), shape=(8, 4))
with jtu.count_jit_and_pmap_lowerings() as count: # noqa: F841
for _ in range(5):
jax.hessian(jf)(x).block_until_ready()
n = count[0]
# The exact number of compilations may vary depending on the number of
# jit decorators in the function above, but it should not grow after an
# initial warmup phase.
for _ in range(5):
jax.hessian(jf)(x).block_until_ready()
self.assertEqual(count[0], n)
def test_jnp_array_doesnt_device_put(self):
with jtu.count_device_put() as count:
api.make_jaxpr(lambda: jnp.array(3))()
self.assertEqual(count[0], 0)
def test_rank_promotion_forces_retrace(self):
num_traces = 0
def g(x):
nonlocal num_traces
num_traces += 1
return x + x
def f_cond(x):
return lax.cond(True, g, g, x)
@jax.jit
def f_jit(x):
nonlocal num_traces
num_traces += 1
return x + x
for f in [f_jit, f_cond]:
# Use _read() to read the flag value rather than threadlocal value.
allow_promotion = config._read("jax_numpy_rank_promotion")
try:
config.update("jax_numpy_rank_promotion", "allow")
num_traces = 0
@jax.jit
def f(x):
nonlocal num_traces
num_traces += 1
return x + x
x = jnp.zeros((2, 2))
f(x)
self.assertEqual(num_traces, 1)
f(x)
self.assertEqual(num_traces, 1)
with jax.numpy_rank_promotion("warn"):
f(x)
self.assertEqual(num_traces, 2)
config.update("jax_numpy_rank_promotion", "raise")
f(x)
self.assertGreaterEqual(num_traces, 2)
nt = num_traces
f(x)
self.assertEqual(num_traces, nt + 1)
f(x)
self.assertEqual(num_traces, nt + 1)
finally:
config.update("jax_numpy_rank_promotion", allow_promotion)
def test_grad_negative_argnums(self):
def f(x, y):
return x.sum() * y.sum()
x = jax.random.normal(jax.random.key(0), (16, 16))
y = jax.random.normal(jax.random.key(1), (16, 16))
g = jax.grad(f, argnums=-1)
g(x, y) # doesn't crash
def test_jit_negative_static_argnums(self):
@partial(jax.jit, static_argnums=-1)
def g(x, y):
assert isinstance(y, int)
return x * y
for i in range(3): # Loop verifies we exercise both Python and C++ dispatch
self.assertEqual(2 * i, g(2, i), msg=i)
def test_fastpath_cache_confusion(self):
# https://github.com/jax-ml/jax/issues/12542
@jax.jit
def a(x):
return ()
@jax.jit
def b(x):
return a(x)
@jax.jit
def g(x):
return x, x
@jax.jit
def h(x):
return g(x)
jaxpr = jax.make_jaxpr(h)(7)
core.eval_jaxpr(jaxpr.jaxpr, jaxpr.consts, 7)
b(8) # don't crash
def test_fastpath_cache_confusion2(self):
@jax.jit
def a(): # note nullary function, still staged out though
return ()
@jax.jit
def b(x):
return a()
@jax.jit
def g(x):
return x, x
@jax.jit
def h(x):
return g(x)
jaxpr = jax.make_jaxpr(h)(7)
core.eval_jaxpr(jaxpr.jaxpr, jaxpr.consts, 7)
b(8) # don't crash
def test_vjp_multiple_arguments_error_message(self):
# https://github.com/jax-ml/jax/issues/13099
def foo(x):
return (x, x)
_, f_vjp = jax.vjp(foo, 1.0)
with self.assertRaisesRegex(TypeError, "applied to foo"):
f_vjp(1.0, 1.0)
def test_shapedtypestruct_sharding_error(self):
with self.assertRaisesRegex(
ValueError,
"sharding should be an instance of `jax.sharding.Sharding`."):
jax.ShapeDtypeStruct((8, 2), np.float32,
sharding=jax.sharding.PartitionSpec('x'))
def test_make_jaxpr_weakref(self):
class Foo(NamedTuple):
x: int
def __call__(self, y):
return self.x + y
jax.make_jaxpr(Foo(1))(3) # don't crash
def test_make_jaxpr_name(self):
def foo(x, y, z):
return x + y + z
jfoo = jax.make_jaxpr(foo)
self.assertEqual(jfoo.__name__, f"make_jaxpr({foo.__name__})")
self.assertEqual(jfoo.__qualname__, f"make_jaxpr({foo.__qualname__})")
self.assertEqual(jfoo.__module__, "jax")
def test_inner_jit_function_retracing(self):
# https://github.com/jax-ml/jax/issues/7155
inner_count = outer_count = 0
@jax.jit
def inner_fn(state):
nonlocal inner_count
inner_count += 1
return 2*state
@jax.jit
def outer_fn(x):
nonlocal outer_count
outer_count += 1
old_x = x
for _ in range(10):
x = inner_fn(x)
x = x + old_x
return x
state = jnp.arange(5, dtype=jnp.uint32)
inner_fn(state)
outer_fn(state)
self.assertEqual(inner_count, 1)
self.assertEqual(outer_count, 1)
def test_grad_conj_symbolic_zeros(self):
# https://github.com/jax-ml/jax/issues/15400
f = lambda x: jax.jit(lambda x, y: (x, y))(x, jax.lax.conj(x))[0]
out = jax.grad(f)(3.0) # doesn't crash
self.assertAllClose(out, 1., check_dtypes=False)
def test_cache_clear_pmap(self):
@jax.pmap
def f(i):
return i * 2
f(np.arange(1, dtype='float32')).block_until_ready()
self.assertEqual(f._cache_size, 1)
jax.clear_caches()
self.assertEqual(f._cache_size, 0)
def test_invalid_value_device_put(self):
with self.assertRaisesRegex(ValueError, r".*Received invalid value.*"):
jax.device_put(jnp.arange(8), 'cpu')
def test_clear_cache(self):
@jax.jit
def add(x):
return x * 2
inp = jnp.arange(8)
with config.log_compiles(True):
with self.assertLogs(level='WARNING') as cm:
add(inp)
jax.clear_caches()
add(inp)
tracing_add_count = 0
for m in cm.output:
if 'Finished tracing + transforming add for pjit' in m:
tracing_add_count += 1
self.assertEqual(tracing_add_count, 2)
def test_cache_miss_explanations(self):
@jax.jit
def f(x, y):
return jnp.sin(x) * y['hi']
x = jnp.float32(1.)
y = {'hi': jnp.arange(3., dtype='float32')}
expected_log_len = 1 if not is_persistent_cache_enabled() else 3
# print on first miss, not on hit
with config.explain_cache_misses(True):
with self.assertLogs(level='WARNING') as cm:
f(x, y)
f(x, y)
self.assertLen(cm.output, expected_log_len)
msg = cm.output[0]
self.assertIn('TRACING CACHE MISS', msg)
self.assertIn('never seen function', msg)
# shape change
y_ = {'hi': jnp.arange(4, dtype='float32')}
with config.explain_cache_misses(True):
with self.assertLogs(level='WARNING') as cm:
f(x, y_)
self.assertLen(cm.output, expected_log_len)
msg = cm.output[0]
self.assertIn('never seen input type signature', msg)
self.assertIn('closest seen input type signature has 1 mismatches', msg)
self.assertIn('seen f32[3], but now given f32[4]', msg)
# weak type change (assuming no x64)
if not config.enable_x64.value:
with config.explain_cache_misses(True):
with self.assertLogs(level='WARNING') as cm:
f(1., y)
self.assertLen(cm.output, expected_log_len)
msg = cm.output[0]
self.assertIn('weak_type=True', msg)
self.assertIn('https://jax.readthedocs.io/en/latest/type_promotion.html#weak-types', msg)
# kwarg change
with config.explain_cache_misses(True):
with self.assertLogs(level='WARNING') as cm:
f(1, y=y)
self.assertLen(cm.output, expected_log_len)
msg = cm.output[0]
self.assertIn('never seen passing 1 positional args and 1 keyword args', msg)
# tracing config change
with config.explain_cache_misses(True):
with self.assertLogs(level='WARNING') as cm:
with jax.numpy_rank_promotion('warn'):
f(x, y)
# depending on the backend, we may or may not get persistent cache warnings
self.assertTrue(1 <= len(cm.output) <= expected_log_len)
msg = cm.output[0]
self.assertIn("tracing context doesn't match", msg)
def test_cache_miss_explanations_new_function_in_loop(self):
@jax.jit
def f(x, y):
return jnp.sin(x) * y['hi']
x = jnp.float32(1.)
with config.explain_cache_misses(True):
with self.assertLogs(level='WARNING') as cm:
for _ in range(2):
jax.jit(lambda x: 2 * x)(3)
if is_persistent_cache_enabled():
# number of warnings depends on the backend
self.assertTrue(4 <= len(cm.output) <= 6)
msg = cm.output[3]
self.assertIn('another function defined on the same line', msg)
else:
self.assertLen(cm.output, 2)
_, msg = cm.output
self.assertIn('another function defined on the same line', msg)
def test_cache_miss_explanations_unpacks_transforms(self):
# Tests that the explain_tracing_cache_miss() function does not throw an
# error when unpacking `transforms` with a length greater than 3.
@jax.jit
def f(key):
return jax.random.truncated_normal(key, 1, 1, dtype=jax.numpy.float32)
with config.explain_cache_misses(True):
with self.assertLogs(level="WARNING") as cm:
f(jax.random.key(seed=123))
if is_persistent_cache_enabled():
# 5 warnings from tracing cache, 5-10 from persistent cache depending on
# the backend
self.assertTrue(10 <= len(cm.output) <= 15)
self.assertTrue(any("TRACING CACHE MISS" in msg for msg in cm.output))
else:
self.assertLen(cm.output, 5)
for msg in cm.output:
self.assertIn("TRACING CACHE MISS", msg)
def test_cache_miss_explanations_no_source_info(self):
# ``operator.add`` is a built-in function and does not have source info.
with config.explain_cache_misses(True):
jax.jit(operator.add)(42, 24)
@parameterized.named_parameters([
{"testcase_name": f"{np.dtype(dtype)}", "dtype": dtype}
for dtype in jtu.dtypes.custom_floats])
def test_jit_custom_floats(self, dtype):
f = lambda x: x + 1
args_maker = lambda: [jnp.ones((), dtype=dtype)]
self._CompileAndCheck(f, args_maker)
def test_jvp_asarray_returns_array(self):
# https://github.com/jax-ml/jax/issues/15676
p, t = jax.jvp(jax.numpy.asarray, (1.,), (2.,))
_check_instance(self, p)
_check_instance(self, t)
def test_scalar_conversion_errors(self):
array_int = jnp.arange(10, dtype=int)
scalar_float = jnp.float32(0)
scalar_int = jnp.int32(0)
empty_int = jnp.arange(0, dtype='int32')
array1_float = jnp.arange(1, dtype='float32')
assertIntError = partial(self.assertRaisesRegex, TypeError,
"Only integer scalar arrays can be converted to a scalar index.")
for func in [operator.index, hex, oct]:
assertIntError(func, array_int)
assertIntError(func, empty_int)
assertIntError(func, scalar_float)
assertIntError(jax.jit(func), array_int)
assertIntError(jax.jit(func), empty_int)
assertIntError(jax.jit(func), scalar_float)
self.assertRaises(TracerIntegerConversionError, jax.jit(func), scalar_int)
_ = func(scalar_int) # no error
assertScalarError = partial(self.assertRaisesRegex, TypeError,
"Only scalar arrays can be converted to Python scalars.")
for func in [int, float, complex]:
assertScalarError(func, array_int)
assertScalarError(jax.jit(func), array_int)
self.assertRaises(ConcretizationTypeError, jax.jit(func), scalar_int)
_ = func(scalar_int) # no error
assertScalarError(func, array1_float)
assertEmptyBoolError = partial(
self.assertRaisesRegex, ValueError,
"The truth value of an empty array is ambiguous.")
assertEmptyBoolError(bool, empty_int)
assertEmptyBoolError(jax.jit(bool), empty_int)
assertBoolError = partial(
self.assertRaisesRegex, ValueError,
"The truth value of an array with more than one element is ambiguous.")
assertBoolError(bool, array_int)
assertBoolError(jax.jit(bool), array_int)
self.assertRaises(TracerBoolConversionError, jax.jit(bool), scalar_int)
_ = bool(scalar_int) # no error
@jtu.run_on_devices('cpu')
def test_asarray_no_copy_np(self):
x = np.random.uniform(0, 1, (1000, 2000)).astype("float32")
out = jnp.asarray(x)
x_ptr = x.__array_interface__["data"][0]
# This is because the PJRT CPU client shares memory if it is 16-byte aligned.
if (x_ptr & 15) != 0:
self.assertTrue(np.shares_memory(out, x))
def test_mesh_creation_error_message(self):
with self.assertRaisesRegex(ValueError, "ndim of its first argument"):
jax.sharding.Mesh(jax.devices(), ("x", "y"))
def test_jit_boundmethod_reference_cycle(self):
class A:
def __init__(self):
self._foo = jax.jit(self.foo)
def foo(self):
pass
a = weakref.ref(A())
gc.collect()
assert a() is None
def test_forwarding_bug(self):
# Test for issue #20267.
def f(x):
@jax.jit
def inner(a, x):
return a, jnp.exp(x)
return inner(0.0, x)[0]
jax.grad(f)(1.) # don't crash
@parameterized.parameters(it.product(range(4), repeat=3))
@jtu.run_on_devices("cpu")
def test_jit_forwarding_correctness(self, seed, num_input_fwd, num_output_fwd):
num_args = 3
rng = np.random.RandomState(seed)
in_perm = rng.permutation(num_args)
out_perm = rng.permutation(num_args)
@jax.jit
def f(inputs):
inputs = [inputs[i] for i in in_perm]
outputs = inputs[:num_input_fwd] + [
jnp.exp(inputs[i]) if i < num_output_fwd else jnp.sin(inputs[i])
for i in range(num_args - num_input_fwd)]
return [outputs[i] for i in out_perm]
jtu.check_grads(f, (list(jnp.arange(float(num_args))),), order=1,
modes=['rev'], atol=1e-3, rtol=1e-3)
@jtu.run_on_devices("cpu")
def test_inner_jit_forwarding_happens(self):
jaxpr = jax.make_jaxpr(lambda: jax.jit(lambda x: x)(3))()
self.assertLen(jaxpr.jaxpr.outvars, 1)
self.assertIsInstance(jaxpr.jaxpr.outvars[0], core.Literal)
self.assertEqual(jaxpr.jaxpr.outvars[0].val, 3)
@parameterized.parameters(range(8))
@jtu.run_on_devices("cpu")
def test_inner_jit_forwarding_correctness(self, num_input_fwd):
num_args = 8
rng = np.random.RandomState(0)
@jax.jit
def f(inputs):
inputs = [inputs[i] for i in rng.permutation(num_args)]
outputs = (inputs[:num_input_fwd] +
[jnp.sin(inputs[i]) for i in range(num_args - num_input_fwd)])
return [outputs[i] for i in rng.permutation(num_args)]
f2 = jax.jit(f)
inputs = list(jnp.arange(float(num_args)))
expected = f(inputs)
ans = f2(inputs)
for a, b in zip(ans, expected):
self.assertAllClose(a, b)
@unittest.skip # TODO(dougalm): figure out with Matt what to do with this feature
def test_inner_jit_forwarded_consts_stay_const(self):
out = jax.jit(lambda: int(jax.jit(lambda x: x)(3)))() # don't crash
self.assertEqual(out, 3)
def test_lowering_platform_aot(self):
@jax.jit
def f(x):
return x * 2
f.trace(jnp.arange(8)).lower(lowering_platforms=('tpu',)) # doesn't crash
def test_no_double_dots_in_error_message(self):
@jax.jit
def f(x):
return 1 if x > 0 else 0
with self.assertRaisesRegex(TracerBoolConversionError, r"with shape bool\[\]\.[^\.]"):
f(0)
def test_inlined_literals_with_error(self):
@jax.jit
def f():
@partial(jax.jit, inline=True)
def g():
return jnp.sin(1.)
if g() > 0:
return 1.
return 0.
with self.assertRaisesRegex(TracerBoolConversionError, "Attempted boolean"):
f()
def test_inline_return_twice(self):
# https://github.com/jax-ml/jax/issues/22944
@jax.jit
def add_one(x: int) -> int:
return x + 1
def add_one_and_dupe(x: int) -> tuple[int, int]:
y = add_one(x)
return (y, y)
jit_add_one_dupe = jax.jit(add_one_and_dupe, inline=True)
jax.eval_shape(jit_add_one_dupe, 0) # don't crash
def test_use_direct_linearize(self):
def check_invariant_to_use_direct_linearize(f):
with config.use_direct_linearize(False):
ans1 = f()
with config.use_direct_linearize(True):
ans2 = f()
self.assertEqual(ans1, ans2)
def sin_of_sin(x):
return lax.sin(jax.jit(lax.sin)(x))
check_invariant_to_use_direct_linearize(lambda: jax.grad(sin_of_sin)(1.0))
class RematTest(jtu.JaxTestCase):
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', jax.remat),
('_policy', partial(jax.remat, policy=lambda *_, **__: False)),
('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),
])
def test_remat_basic(self, remat):
@remat
def g(x):
return lax.sin(lax.sin(x)), 3.
def f(x):
x, _ = g(x)
return x
ans = f(2.)
expected = np.sin(np.sin(2.))
self.assertAllClose(ans, expected, check_dtypes=False)
ans, f_lin = api.linearize(f, 2.)
expected = np.sin(np.sin(2.))
self.assertAllClose(ans, expected, check_dtypes=False)
ans = f_lin(3.)
expected = np.cos(np.sin(2.)) * np.cos(2.) * 3.
self.assertAllClose(ans, expected, check_dtypes=False)
sin_calls = []
cos_calls = []
sin_impl = lax.sin_p.impl
cos_impl = lax.cos_p.impl
try:
lax.sin_p.def_impl(lambda x: sin_calls.append(1) or sin_impl(x))
lax.cos_p.def_impl(lambda x: cos_calls.append(1) or cos_impl(x))
f_lin(3.)
finally:
lax.sin_p.def_impl(sin_impl)
lax.cos_p.def_impl(cos_impl)
self.assertEqual(len(sin_calls), 1)
self.assertEqual(len(cos_calls), 2)
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', jax.remat),
('_policy', partial(jax.remat, policy=lambda *_, **__: False)),
('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),
])
def test_remat_freevars(self, remat):
def f1(x):
y = 2 * jnp.sin(x)
z = jnp.cos(x) * jnp.sin(y)
return z
def f2(x):
y = 2 * jnp.sin(x)
z = remat(lambda x: jnp.cos(x) * jnp.sin(y))(x)
return z
ans, f_lin = api.linearize(f2, 2.)
expected, f_lin_expected = api.linearize(f1, 2.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = f_lin(3.)
expected = f_lin_expected(3.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_remat_concrete_error(self):
@jax.remat # no static_argnums or concrete
def g(x):
if x > 0:
return lax.sin(x)
else:
return lax.cos(x)
with self.assertRaisesRegex(core.ConcretizationTypeError, "static_argnums"):
g(3.)
@partial(jax.remat, static_argnums=(0,)) # using static_argnums but...
def g(x):
if x > 0: # jnp operations still get staged!
return lax.sin(x)
else:
return lax.cos(x)
with self.assertRaisesRegex(core.ConcretizationTypeError, "static_argnums"):
g(jnp.array(3.))
# But don't raise an error mentioning static_argnums here:
@jax.remat
def g(x):
jax.jit(lambda: 0 if jnp.add(1, 1) else 0)()
return lax.sin(x)
try:
g(jnp.array(3.))
except core.ConcretizationTypeError as e:
msg = str(e)
self.assertNotIn('static_argnums', msg)
@unittest.skip
def test_remat_grad_python_control_flow_static_argnums(self):
@partial(jax.remat, static_argnums=(0,))
def g(x):
with jax.ensure_compile_time_eval():
x_pos = x > 0
if x_pos:
return lax.sin(x), 3.
else:
return lax.cos(x), 4.
def f(x):
x, _ = g(x)
return x
ans = f(2.)
expected = np.sin(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(f)(2.)
expected = np.cos(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
@unittest.skip
def test_remat_grad_python_control_flow_unhashable_static_argnums(self):
@partial(jax.remat, static_argnums=(0,))
def g(x):
x = x.val
with jax.ensure_compile_time_eval():
x_pos = x > 0
if x_pos:
return lax.sin(x), 3.
else:
return lax.cos(x), 4.
def f(x):
x, _ = g(x)
return x
class A:
def __init__(self, val):
self.val = val
def __hash__(self):
raise TypeError
ans = f(A(2.))
expected = np.sin(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(lambda x: f(A(x)))(2.)
expected = np.cos(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_remat_retracing(self):
# This is *not* a very important behavior; remat doesn't need to provide
# caching guarantees with the same importance as jit. But even so, in the
# interest of not redoing tracing work (and thus make jax.remat more
# feasible to use in eager mode), this test checks that we don't re-trace
# the remat-decorated function.
count = 0
@jax.remat
def g(x):
nonlocal count
count += 1
return lax.sin(x), 3.
def f(x):
x, _ = g(x)
return x
for _ in range(10):
y = f(2.)
y.block_until_ready()
self.assertEqual(count, 1)
def test_remat_static_agnums_retracing(self):
# This is *not* a super important behavior; remat doesn't need to provide
# caching guarantees with the same importance as jit. But even so, in the
# interest of not redoing tracing work (and thus make jax.remat more
# feasible to use in eager mode), this test checks that we don't re-trace
# the remat-decorated function *even with static_argnums*. See also the
# above test, which doesn't check for static_argnums.
count = 0
@partial(jax.remat, static_argnums=(0,))
def g(x):
nonlocal count
count += 1
with jax.ensure_compile_time_eval():
x_pos = x > 0
if x_pos:
return lax.sin(x), 3.
else:
return lax.cos(x), 4.
def f(x):
x, _ = g(x)
return x
for _ in range(10):
y = f(2.)
y.block_until_ready()
self.assertEqual(count, 1)
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', jax.remat),
('_policy', partial(jax.remat, policy=lambda *_, **__: False)),
('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),
])
def test_remat_jit(self, remat):
@remat
def g(x):
return lax.sin(lax.sin(x))
def f_(x):
return g(x)
f = api.jit(f_)
ans = f(2.)
expected = np.sin(np.sin(2.))
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(f)(2.)
expected = np.cos(np.sin(2.)) * np.cos(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.jit(api.grad(f_))(2.)
expected = np.cos(np.sin(2.)) * np.cos(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', jax.remat),
('_policy', partial(jax.remat, policy=lambda *_, **__: False)),
('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),
])
def test_remat_vmap(self, remat):
@remat
def g(x):
return lax.sin(lax.sin(x))
x = np.arange(3.)
ans = api.vmap(g)(x)
expected = np.sin(np.sin(x))
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.jacfwd(g)(x)
expected = np.diag(np.cos(np.sin(x)) * np.cos(x))
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.jacrev(g)(x)
expected = np.diag(np.cos(np.sin(x)) * np.cos(x))
self.assertAllClose(ans, expected, check_dtypes=False)
# Make sure that introducing constants in vmap works.
constant_introducing_p = core.Primitive('introduce_constant')
constant_introducing_p.def_abstract_eval(core.raise_to_shaped)
def _constant_introducing_batcher(xs, ds):
(x,), (d,) = xs, ds
return (x + np.arange(x.size, dtype=x.dtype).reshape(x.shape)), d
batching.primitive_batchers[constant_introducing_p] = _constant_introducing_batcher
api.vmap(remat(constant_introducing_p.bind))(jnp.ones(20))
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', jax.remat),
('_policy', partial(jax.remat, policy=lambda *_, **__: False)),
('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),
])
def test_remat_vmap_not_leading_dim(self, remat):
@remat
def g(x):
return lax.sin(lax.sin(x))
x = np.arange(3 * 5.).reshape(3, 5)
ans = api.vmap(g, 1, 0)(x)
expected = np.sin(np.sin(x)).T
self.assertAllClose(ans, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', jax.remat),
('_policy', partial(jax.remat, policy=lambda *_, **__: False)),
('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),
])
def test_remat_higher_order_autodiff(self, remat):
def f(x):
return lax.cos(lax.sin(x))
g = remat(f)
ans = api.grad(api.grad(g))(3.)
expected = api.grad(api.grad(f))(3.)
self.assertAllClose(ans, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', jax.remat),
('_new', new_checkpoint),
])
def test_remat_scan(self, remat):
to_scan = lambda c, x: (jnp.sin(c), None)
def f_noremat(x):
y, _ = lax.scan(to_scan, x, np.arange(3.))
return y
def f_yesremat(x):
y, _ = lax.scan(remat(to_scan), x, np.arange(3.))
return y
ans = f_yesremat(4.)
expected = f_noremat(4.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(f_yesremat)(4.)
expected = api.grad(f_noremat)(4.)
self.assertAllClose(ans, expected, check_dtypes=False)
jaxpr = api.make_jaxpr(api.linearize(f_yesremat, 4.)[1])(1.)
scan_eqn, = jaxpr.jaxpr.eqns
self.assertIn(' cos ', str(scan_eqn.params['jaxpr']))
jaxpr = api.make_jaxpr(api.vjp(f_yesremat, 4.)[1])(1.)
scan_eqn, = jaxpr.jaxpr.eqns
self.assertIn(' cos ', str(scan_eqn.params['jaxpr']))
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', jax.remat),
('_policy', partial(jax.remat, policy=lambda *_, **__: False)),
('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),
])
def test_remat_no_redundant_flops(self, remat):
# see https://github.com/jax-ml/jax/pull/1749#issuecomment-558267584
@api.jit
def g(x):
return f(2., x)
@remat
def f(x, y):
return jnp.sin(x) * y
# We swap out sin_p's impl rule to count how many times it's invoked
called = []
sin_impl = lax.sin_p.impl
try:
lax.sin_p.def_impl(lambda x: called.append(1) or sin_impl(x))
api.grad(g)(3.)
finally:
lax.sin_p.def_impl(sin_impl)
num_calls = len(called)
self.assertLessEqual(num_calls, 1)
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', jax.remat),
('_policy', partial(jax.remat, policy=lambda *_, **__: False)),
('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),
])
def test_remat_binomial_checkpointing(self, remat):
def binom_checkpoint(funs):
if len(funs) == 1:
return funs[0]
else:
f1 = binom_checkpoint(funs[:len(funs)//2])
f2 = binom_checkpoint(funs[len(funs)//2:])
return remat(lambda x: f1(f2(x)))
f1 = binom_checkpoint([jnp.sin, jnp.sin, jnp.sin, jnp.sin])
f2 = lambda x: jnp.sin(jnp.sin(jnp.sin(jnp.sin(x))))
x = 4.
self.assertAllClose(f1(x), f2(x), check_dtypes=False)
self.assertAllClose(api.grad(f1)(x), api.grad(f2)(x), check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', jax.remat),
('_new', new_checkpoint),
])
def test_remat_symbolic_zeros(self, remat):
# code from https://github.com/jax-ml/jax/issues/1907
key = jax.random.key(0)
key, split = jax.random.split(key)
n = 5
def func(D0):
def shift(R, dR, **unused_kwargs):
return R + dR
def apply_fn(R):
return D0 * R
Rinit = jax.random.uniform(split, (n,3), minval=0.0, maxval=5.0,
dtype=jnp.float32)
def move(R,i):
F = apply_fn(R)
return shift(R, 0.001 * F), jnp.array([0.])
move = remat(move)
R, temp = lax.scan(move, Rinit, jnp.arange(2))
return R[0, 0]
api.grad(func)(5.0) # doesn't crash
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', jax.remat),
('_policy', partial(jax.remat, policy=lambda *_, **__: False)),
('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),
])
def test_remat_jit2(self, remat):
@api.jit
def f(x):
y = 2 * x
@remat
def g():
return y
return g()
self.assertAllClose(f(3), 6, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', jax.remat),
('_new', new_checkpoint),
])
def test_remat_nontrivial_env(self, remat):
# simplified from https://github.com/jax-ml/jax/issues/2030
@remat
def foo(state, dt=0.5, c=1):
u, u_t = state
u_tt = c**2 * u
u_t = u_t + u_tt * dt
return (u, u_t)
@partial(api.jit, static_argnums=(1,))
def _multi_step(state, count, dt, c):
f = lambda s, _: (foo(s, dt, c), _)
return lax.scan(f, state, None, count)
def multi_step(state, count, dt=1/jnp.sqrt(2), c=1):
return _multi_step(state, count, dt, c)
def loss(u0, target, steps, dt=1/jnp.sqrt(2), c=1):
init = (u0, jnp.zeros_like(u0))
(uf, _), _ = multi_step(init, steps, dt, c)
return ((uf - target) ** 2).mean()
target = jnp.zeros((128, 128))
u0 = jnp.ones_like(target)
loss(u0, target, 10) # doesn't crash
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', jax.remat),
('_policy', partial(jax.remat, policy=lambda *_, **__: False)),
('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),
])
def test_remat_jit3(self, remat):
# https://github.com/jax-ml/jax/issues/2180
def f(w, x):
a = jnp.dot(x, w)
b = jnp.einsum("btd,bTd->btT", a, a)
c = jnp.einsum("btT,btd->btd", b, a)
return jnp.sum(c)
w = jnp.ones([1, 1])
x = jnp.ones([1, 1, 1])
f = remat(f)
api.grad(f)(w, x) # doesn't crash
@api.jit
def mul(a, b):
return a * b
def f(w, x):
a = mul(w, x)
b = mul(a, a)
return b
w = 1.
x = 1.
f = remat(f)
api.grad(f)(w, x) # doesn't crash
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', jax.remat),
('_new', new_checkpoint),
])
def test_remat_scan2(self, remat):
# https://github.com/jax-ml/jax/issues/1963
def scan_bug(x0):
f = lambda x, _: (x + 1, None)
def scanned_f(x, _):
return lax.scan(f, x, xs=None, length=1)[0], None
x, _ = remat(scanned_f)(x0, None)
return x
jax.grad(scan_bug)(1.0) # doesn't crash
def test_remat_jit_static_argnum_omnistaging(self):
# https://github.com/jax-ml/jax/issues/2833
# NOTE(mattjj): after #3370, this test doesn't actually call remat...
def named_call(f):
def named_f(*args):
f_ = lu.wrap_init(lambda: (f(*args),))
out, = core.call_p.bind(f_)
return out
return named_f
def f(a_bool, y):
if a_bool:
return y + 1
else:
return y
api.jit(named_call(f), static_argnums=0)(True, 1) # no crash
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', jax.remat),
('_policy', partial(jax.remat, policy=lambda *_, **__: False)),
('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),
])
def test_remat_eval_counter(self, remat):
# https://github.com/jax-ml/jax/issues/2737
add_one_p = core.Primitive('add_one')
add_one = add_one_p.bind
num_evals = 0
@contextmanager
def assertEvals(n):
start = num_evals
yield
assert num_evals - start == n
def add_one_impl(x):
nonlocal num_evals
num_evals += 1
return x + 1
add_one_p.def_impl(add_one_impl)
def add_one_jvp(pin, tin):
pout = add_one(pin[0])
return pout, pout * tin[0]
ad.primitive_jvps[add_one_p] = add_one_jvp
add_one_p.def_abstract_eval(lambda x: x)
v = np.zeros((1,))
f = remat(add_one)
g = remat(lambda x: add_one(f(x)))
# 2 calls needed to evaluate g
with assertEvals(2):
_, vjp = jax.vjp(g, v)
# 2 calls made while transposing g, 1 call made while transposing f
with assertEvals(3):
vjp(v)
@jax_util.curry
def call(f, *args):
return core.call(
lu.wrap_init(lambda *args: [f(*args)]),
*args, name='foo')[0]
f = call(add_one)
g = remat(lambda x: add_one(f(x)))
# 2 calls needed to evaluate g
with assertEvals(2):
_, vjp = jax.vjp(g, v)
# 2 calls made while transposing g, no reevaluation for transposition of f
with assertEvals(2):
vjp(v)
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', jax.remat),
('_policy', partial(jax.remat, policy=lambda *_, **__: False)),
('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),
])
def test_escaped_tracer_remat(self, remat):
# b/169779185
def f():
seq = [jnp.zeros([])]
def g():
seq[0] += 1 # this is line 7 btw
return seq[0]
remat(g)()
remat(lambda: g())() # lambda defeats caching
with self.assertRaisesRegex(UnexpectedTracerError, "global state"):
api.jit(f)()
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', jax.remat),
('_policy', partial(jax.remat, policy=lambda *_, **__: False)),
('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),
])
def test_no_cse_widget_on_primals(self, remat):
@remat
def g(x):
return lax.sin(lax.sin(x)), 3.
def f(x):
x, _ = g(x)
return x
text = jax.jit(f).lower(2.).as_text('hlo')
self.assertNotIn('while', text)
self.assertNotIn('conditional', text)
self.assertNotIn('opt-barrier', text)
text = jax.jit(grad(f)).lower(2.).as_text('hlo')
self.assertTrue('while' in text or 'conditional' in text
or 'opt-barrier' in text)
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', jax.remat),
('_new', new_checkpoint),
])
def test_no_cse_widget_with_prevent_cse_false(self, remat):
@partial(remat, prevent_cse=False)
def g(x):
return lax.sin(lax.sin(x)), 3.
def f(x):
x, _ = g(x)
return x
text = jax.jit(f).lower(2.).as_text('hlo')
self.assertNotIn('while', text)
self.assertNotIn('conditional', text)
text = jax.jit(grad(f)).lower(2.).as_text('hlo')
self.assertNotIn('while', text)
self.assertNotIn('conditional', text)
@parameterized.named_parameters(
{"testcase_name": f"_{policy_name}_{remat_name}", "remat": remat,
"policy": policy, "in_jaxpr2": in_jaxpr2, "not_in_jaxpr2": not_in_jaxpr2}
for remat_name, remat in [
('old_remat', jax.remat),
('new_remat', new_checkpoint),
]
for policy_name, policy, in_jaxpr2, not_in_jaxpr2 in [
('save_anything', lambda *_, **__: True, [], [' sin ', ' cos ']),
('save_nothing', lambda *_, **__: False, [' sin ', ' cos '], []),
('save_sin', lambda p, *_, **__: str(p) == 'sin', [' cos '], [' sin ']),
])
def test_remat_custom_policy(self, remat, policy, in_jaxpr2, not_in_jaxpr2):
for square in [lambda x: x * x, api.jit(lambda x: x * x)]:
f = remat(lambda x: jnp.sin(square(jnp.sin(x))), policy=policy)
y, f_lin = api.linearize(f, 1.)
ydot = f_lin(2.)
jaxpr_text = str(f_lin.func.args[0])
for substr in in_jaxpr2:
self.assertIn(substr, jaxpr_text)
for substr in not_in_jaxpr2:
self.assertNotIn(substr, jaxpr_text)
y_expected, ydot_expected = api.jvp(lambda x: jnp.sin(square(jnp.sin(x))),
[1.], [2.])
self.assertAllClose(y, y_expected)
self.assertAllClose(ydot, ydot_expected)
jtu.check_grads(f, (3.,), order=2, modes=['fwd', 'rev'])
@parameterized.named_parameters(
{"testcase_name": f"_{remat_name}", "remat": remat}
for remat_name, remat in [
('old_remat', jax.remat),
('new_remat', new_checkpoint),
])
def test_remat_custom_policy_save_cos(self, remat):
save_cos = lambda prim, *_, **__: str(prim) == 'cos'
f = remat(lambda x: jnp.sin(jnp.sin(x)), # different function
policy=save_cos)
_, f_lin = api.linearize(f, 1.)
jaxpr_text = str(f_lin.func.args[0])
self.assertNotIn(' sin ', jaxpr_text)
self.assertNotIn(' cos ', jaxpr_text)
jtu.check_grads(f, (3.,), order=2, modes=['fwd', 'rev'])
@parameterized.named_parameters(
{"testcase_name": f"_{remat_name}", "remat": remat}
for remat_name, remat in [
('old_remat', jax.remat),
('new_remat', new_checkpoint),
])
def test_remat_checkpoint_dots(self, remat):
@partial(remat, policy=jax.checkpoint_policies.checkpoint_dots)
def f(x):
x = jnp.dot(x, x, precision=lax.Precision.HIGHEST)
x = jnp.sin(x)
x = jnp.dot(x, x, precision=lax.Precision.HIGHEST)
x = jnp.sin(x)
x = jnp.dot(x, x, precision=lax.Precision.HIGHEST)
x = jnp.sin(x)
return x
_, f_lin = api.linearize(f, jnp.ones((2, 2)))
jaxpr_text = str(f_lin.func.args[0])
self.assertEqual(jaxpr_text.count(' sin '), 2)
self.assertEqual(jaxpr_text.count(' dot_'), 6)
jtu.check_grads(f, (jnp.ones((2, 2)),), order=2, modes=['fwd', 'rev'])
@parameterized.named_parameters(
{"testcase_name": f"_{remat_name}", "remat": remat}
for remat_name, remat in [
('old_remat', jax.remat),
('new_remat', new_checkpoint),
])
def test_remat_checkpoint_dots_with_no_batch_dims(self, remat):
@partial(remat, policy=jax.checkpoint_policies.checkpoint_dots_with_no_batch_dims)
def f(x):
x = jnp.einsum('ij,jk->ik', x, x, precision=lax.Precision.HIGHEST)
x = jnp.sin(x)
x = jnp.einsum('ij,jk->ik', x, x, precision=lax.Precision.HIGHEST)
x = jnp.sin(x)
x = jnp.einsum('ij,jk->ik', x, x, precision=lax.Precision.HIGHEST)
x = jnp.sin(x)
return x
_, f_lin = api.linearize(f, jnp.ones((2, 2)))
jaxpr_text = str(f_lin.func.args[0])
self.assertEqual(jaxpr_text.count(' sin '), 2)
self.assertEqual(jaxpr_text.count(' dot_general'), 6)
jtu.check_grads(f, (jnp.ones((2, 2)),), order=2, modes=['fwd', 'rev'])
@parameterized.named_parameters(
{"testcase_name": f"_{remat_name}", "remat": remat}
for remat_name, remat in [
('old_remat', jax.remat),
('new_remat', new_checkpoint),
])
def test_remat_checkpoint_dots_with_no_batch_dims2(self, remat):
@partial(remat, policy=jax.checkpoint_policies.checkpoint_dots_with_no_batch_dims)
def f(x):
x = jnp.einsum('nij,njk->nik', x, x, precision=lax.Precision.HIGHEST)
x = jnp.sin(x)
x = jnp.einsum('nij,njk->nik', x, x, precision=lax.Precision.HIGHEST)
x = jnp.sin(x)
x = jnp.einsum('nij,njk->nik', x, x, precision=lax.Precision.HIGHEST)
x = jnp.sin(x)
return x
_, f_lin = api.linearize(f, jnp.ones((3, 2, 2)))
jaxpr_text = str(f_lin.func.args[0])
self.assertEqual(jaxpr_text.count(' sin '), 2)
self.assertEqual(jaxpr_text.count(' dot_general'), 9)
jtu.check_grads(f, (jnp.ones((3, 2, 2)),), order=2, modes=['fwd', 'rev'])
@parameterized.named_parameters(
{"testcase_name": f"_{remat_name}", "remat": remat}
for remat_name, remat in [
('old_remat', jax.remat),
('new_remat', new_checkpoint),
])
def test_remat_checkpoint_dots_jit(self, remat):
@api.jit
@partial(remat, policy=jax.checkpoint_policies.checkpoint_dots)
def f(x):
x = jnp.dot(x, x, precision=lax.Precision.HIGHEST)
x = jnp.sin(x * 1e-3)
x = jnp.dot(x, x, precision=lax.Precision.HIGHEST)
x = jnp.sin(x * 1e-3)
x = jnp.dot(x, x, precision=lax.Precision.HIGHEST)
x = jnp.sin(x * 1e-3)
return x
_, f_lin = api.linearize(f, jnp.ones((2, 2)))
jaxpr_text = str(f_lin.func.args[0])
self.assertEqual(jaxpr_text.count(' sin '), 2)
self.assertEqual(jaxpr_text.count(' dot_'), 6)
jtu.check_grads(f, (jnp.ones((2, 2)),), order=2, modes=['fwd', 'rev'])
def test_remat_checkpoint_dots_inside_scan(self):
x = jnp.ones((5,))
def f(W):
@partial(jax.remat, policy=jax.checkpoint_policies.checkpoint_dots)
def f(x):
x = jnp.sin(jnp.dot(x, W, precision=lax.Precision.HIGHEST))
x = jnp.sin(jnp.dot(x, W, precision=lax.Precision.HIGHEST))
x = jnp.sin(jnp.dot(x, W, precision=lax.Precision.HIGHEST))
return x
def body(x, _): return f(x), None
return lax.scan(body, x, None, length=2)[0]
_, f_vjp = api.vjp(f, jnp.ones((5, 5)))
jaxpr_text = str(f_vjp.args[0].func.args[1])
# Two sine calls in the backward pass because while we don't save sines
# within the (rematted) body function, we can save the scan carry, which
# effectively saves one sine. Three cosines for the Jacobian coefficients.
self.assertEqual(jaxpr_text.count(' sin '), 2)
self.assertEqual(jaxpr_text.count(' cos '), 3)
# Six calls to dot_general in the backward pass because we save the primal
# matmuls and only compure the backward pass ones (two for each primal one).
self.assertEqual(jaxpr_text.count(' dot_'), 6)
jtu.check_grads(api.jit(f), (jnp.ones((5, 5)),), order=2,
modes=['fwd', 'rev'])
def test_remat_custom_jvp_policy(self):
@jax.custom_jvp
def sin(x):
return jnp.sin(x)
def sin_jvp(primals, tangents):
x, = primals
g, = tangents
return sin(x), jnp.cos(x) * g
sin.defjvp(sin_jvp)
@partial(jax.remat, policy=jax.checkpoint_policies.checkpoint_dots)
def f(x):
x = jnp.dot(x, x, precision=lax.Precision.HIGHEST)
x = sin(x * 1e-3)
x = jnp.dot(x, x, precision=lax.Precision.HIGHEST)
x = sin(x * 1e-3)
x = jnp.dot(x, x, precision=lax.Precision.HIGHEST)
x = sin(x * 1e-3)
return x
jtu.check_grads(f, (3.,), order=2, modes=['fwd', 'rev'])
def g(x):
return lax.scan(lambda x, _: (f(x), None), x, None, length=2)[0]
jtu.check_grads(g, (3.,), order=2, modes=['fwd', 'rev'])
def test_remat_custom_vjp_policy(self):
@jax.custom_vjp
def sin(x):
return jnp.sin(x)
def sin_fwd(x):
return sin(x), x
def sin_bwd(x, y_bar):
return (jnp.cos(x) * y_bar,)
sin.defvjp(sin_fwd, sin_bwd)
@partial(jax.remat, policy=jax.checkpoint_policies.checkpoint_dots)
def f(x):
@partial(api.named_call, name="dot")
def dot2(y, z):
return jnp.dot(x, jnp.dot(y, z, precision=lax.Precision.HIGHEST),
precision=lax.Precision.HIGHEST)
x = dot2(x, x)
x = sin(x * 1e-3)
x = dot2(x, x)
x = sin(x * 1e-3)
x = dot2(x, x)
x = sin(x * 1e-3)
return x
jtu.check_grads(f, (3.,), order=2, modes=['rev'])
def g(x):
return lax.scan(lambda x, _: (f(x), None), x, None, length=2)[0]
jtu.check_grads(g, (3.,), order=2, modes=['rev'])
@parameterized.named_parameters(
{"testcase_name": f"_{remat_name}", "remat": remat}
for remat_name, remat in [
('old_remat', jax.remat),
('new_remat', new_checkpoint),
])
def test_remat_dropvar_policy(self, remat):
def f(x):
return x, x
@partial(remat, policy=jax.checkpoint_policies.checkpoint_dots)
def g(x):
x = api.grad(lambda x: f(x)[0])(x)
return x
api.grad(g)(3.)
def test_remat_custom_jvp_linear_policy(self):
@jax.custom_jvp
def sum(x):
return jnp.sum(x, axis=0)
@sum.defjvp
def sum_jvp(primals, tangents):
(x,), (xdot,) = primals, tangents
return sum(x), sum(xdot)
@partial(jax.remat, policy=jax.checkpoint_policies.checkpoint_dots)
def f(x):
return sum(x)
jtu.check_grads(f, (jnp.ones(3),), order=2, modes=['fwd', 'rev'])
def g(x):
return lax.scan(lambda _, x: (None, f(x)), None, x)[1]
jtu.check_grads(g, (jnp.ones((2, 3)),), order=2, modes=['fwd', 'rev'])
def test_constants_not_hoisted(self):
# The old implementation of remat worked by data dependence, and so
# (potentially large) constants would not be rematerialized and could be
# wastefully instantiated. This test checks that the newer remat
# implementation avoids that. See https://github.com/jax-ml/jax/pull/8191.
# no residuals from constants created inside jnp.einsum
@partial(new_checkpoint, policy=lambda *_, **__: False)
def f(x):
return jnp.einsum('ii->i', x)
res_avals = saved_residuals(f, jnp.ones((2, 2)))
self.assertLen(res_avals, 0)
# no residuals from jnp.zeros
@partial(new_checkpoint, policy=lambda *_, **__: False)
def f(x):
return jnp.zeros_like(x) * x
res_avals = saved_residuals(f, jnp.ones((2, 2)))
self.assertLen(res_avals, 0)
# no residuals from jnp.zeros, but input must be saved
@partial(new_checkpoint, policy=lambda *_, **__: False)
def f(x):
return jnp.zeros_like(x) * jnp.sin(x)
res_avals = saved_residuals(f, jnp.ones((2, 2)))
self.assertLen(res_avals, 1)
def test_name_saveable_input(self):
@partial(jax.remat, policy=lambda p, *_, **__: 'mul' in str(p))
def f(x):
x = checkpoint_name(x * x, 'foo')
x = x * x
return x
res = saved_residuals(f, 3.)
self.assertStartsWith(res[1][1], "named 'foo'")
def test_name_denylist(self):
def f(x):
y = checkpoint_name(jnp.multiply(2., 2.), 'y')
z = checkpoint_name(jnp.multiply(2., 2.), 'z')
w = checkpoint_name(jnp.multiply(2., 2.), 'w')
u = jnp.multiply(2., 2.)
return (((x * y) * z) * w) * u
policy = jax.checkpoint_policies.save_any_names_but_these('y', 'z', 'w')
res = saved_residuals(new_checkpoint(f, policy=policy), 1.)
self.assertLen(res, 0) # can't save anything
policy = jax.checkpoint_policies.save_any_names_but_these('z', 'w')
res = saved_residuals(new_checkpoint(f, policy=policy), 1.)
self.assertLen(res, 1) # can save only y
policy = jax.checkpoint_policies.save_any_names_but_these('w')
res = saved_residuals(new_checkpoint(f, policy=policy), 1.)
self.assertLen(res, 2) # can save y and z
policy = jax.checkpoint_policies.save_any_names_but_these()
res = saved_residuals(new_checkpoint(f, policy=policy), 1.)
self.assertLen(res, 3) # can save y, z, and w
def test_name_allowlist(self):
def f(x):
y = checkpoint_name(jnp.multiply(2., 2.), 'y')
z = checkpoint_name(jnp.multiply(2., 2.), 'z')
w = checkpoint_name(jnp.multiply(2., 2.), 'w')
u = jnp.multiply(2., 2.)
return (((x * y) * z) * w) * u
policy = jax.checkpoint_policies.save_only_these_names('y', 'z', 'w')
res = saved_residuals(new_checkpoint(f, policy=policy), 1.)
self.assertLen(res, 3) # can save y, z, and w
policy = jax.checkpoint_policies.save_only_these_names('z', 'w')
res = saved_residuals(new_checkpoint(f, policy=policy), 1.)
self.assertLen(res, 2) # can save z and w
policy = jax.checkpoint_policies.save_only_these_names('w')
res = saved_residuals(new_checkpoint(f, policy=policy), 1.)
self.assertLen(res, 1) # can save w
policy = jax.checkpoint_policies.save_only_these_names()
res = saved_residuals(new_checkpoint(f, policy=policy), 1.)
self.assertLen(res, 0) # can't save anything!
def test_saved_residuals_utility(self):
def f(x, y):
x1, x2 = x
z = checkpoint_name(jnp.sin(3.), 'z')
return z * ((x1 * x2) * y) * np.array([3.])
res = saved_residuals(f, (2., 3.), y=4.)
self.assertLen(res, 6)
self.assertEqual(res[0][0].shape, (1,))
self.assertEqual(res[0][1], "from a constant")
self.assertEqual(res[1][0].shape, ())
self.assertEqual(res[1][1], "from the argument x[0]")
self.assertEqual(res[2][0].shape, ())
self.assertEqual(res[2][1], "from the argument x[1]")
self.assertEqual(res[3][0].shape, ())
self.assertEqual(res[3][1], "from the argument y")
self.assertEqual(res[4][0].shape, ())
self.assertStartsWith(res[4][1], "named 'z'")
self.assertEqual(res[5][0].shape, ())
def test_saved_residuals_utility_jit(self):
@jax.jit
def f(x, y):
x1, x2 = x
z = checkpoint_name(jnp.sin(3.), 'z')
return z * ((x1 * x2) * y) * np.array([3.])
res = saved_residuals(f, (2., 3.), y=4.)
self.assertLen(res, 6)
self.assertEqual(res[0][0].shape, ())
self.assertEqual(res[0][1], "from the argument x[0]")
self.assertEqual(res[1][0].shape, ())
self.assertEqual(res[1][1], "from the argument x[1]")
self.assertEqual(res[2][0].shape, ())
self.assertEqual(res[2][1], "from the argument y")
self.assertEqual(res[3][0].shape, ())
self.assertStartsWith(res[3][1], "output of jitted function 'f'")
self.assertEqual(res[4][0].shape, ())
self.assertEqual(res[5][0].shape, (1,))
self.assertStartsWith(res[5][1], "output of jitted function 'f'")
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', jax.remat),
('_policy', partial(jax.remat, policy=lambda *_, **__: False)),
('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),
])
def test_checkpoint_dropvars(self, remat):
@remat
def f(x):
_, x = api.jit(lambda: (x, x))()
return x
_ = api.grad(f)(3.) # doesn't crash
def test_dce_keeps_eqns_with_used_outputs_but_no_used_inputs(self):
@new_checkpoint
def f(x):
c = jax.jit(lambda: 3.)()
return c * x
_ = jax.grad(f)(3.) # doesn't crash
def test_linearize_caching(self):
# https://github.com/jax-ml/jax/issues/9661
identity = jax.checkpoint(jax.jit(lambda x: 2 * x))
_, f_lin = jax.linearize(identity, 1.)
with jtu.count_jit_and_pmap_lowerings() as count: # noqa: F841
for _ in range(20):
f_lin(1.).block_until_ready()
self.assertEqual(count[0], 1) # cached after first execution
def test_vjp_caching(self):
# https://github.com/jax-ml/jax/issues/9661
identity = jax.checkpoint(jax.jit(lambda x: 2 * x))
_, f_vjp = jax.vjp(identity, 1.)
with jtu.count_pjit_cpp_cache_miss() as count: # noqa: F841
for _ in range(20):
f_vjp(1.)[0].block_until_ready()
self.assertEqual(count[0], 2) # fwd execute_trivial, backward_pass on bwd
def test_vjp_caching_static_argnums(self):
identity = jax.remat(lambda x, y: jax.jit(lambda x: 2 * x if y else x)(x),
static_argnums=(1,))
_, f_vjp = jax.vjp(lambda x: identity(x, True), 1.)
with jtu.count_jit_and_pmap_lowerings() as count: # noqa: F841
for _ in range(20):
f_vjp(1.)[0].block_until_ready()
self.assertEqual(count[0], 2) # fwd execute_trivial, backward_pass on bwd
def test_fwd_caching(self):
# see above test also
identity = jax.checkpoint(jax.jit(lambda x: 2 * x))
with jtu.count_jit_and_pmap_lowerings() as count: # noqa: F841
for _ in range(20):
y, _ = jax.vjp(identity, 1.)
y.block_until_ready()
self.assertEqual(count[0], 1)
def test_fwd_caching_static_argnums(self):
# see above test also
identity = jax.checkpoint(jax.jit(lambda x: 2 * x), static_argnums=(0,))
with jtu.count_jit_and_pmap_lowerings() as count: # noqa: F841
for _ in range(20):
y = identity(1.)
y.block_until_ready()
self.assertEqual(count[0], 1)
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', jax.remat),
('_new', new_checkpoint),
])
def test_remat_of_scan(self, remat):
to_scan = lambda c, _: (jnp.sin(c), jnp.sin(c))
f = lambda x: lax.scan(to_scan, x, None, length=3)
jtu.check_grads(remat(f), (3.,), order=2, modes=['rev'])
jaxpr = api.make_jaxpr(api.linearize(remat(f), 4.)[1])(1.)
self.assertIn(' sin ', str(jaxpr))
self.assertIn(' cos ', str(jaxpr))
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', jax.remat),
('_new', new_checkpoint),
])
def test_const_in_jvp_scan(self, remat):
@jax.custom_jvp
def f(x):
return x * jnp.arange(3.)
@f.defjvp
def f_jvp(primals, tangents):
(x,), (xdot,) = primals, tangents
return f(x), xdot * jnp.arange(3.)
@remat
def g(x):
def body(c, _):
return f(c), None
y, _ = jax.lax.scan(body, x, None, length=1)
return y.sum()
jax.grad(g)(jnp.arange(3.)) # doesn't crash
def test_remat_checkpoint_dots_outside_scan(self):
# see also above test test_remat_checkpoint_dots_inside_scan
x = jnp.ones((5,))
@partial(new_checkpoint, policy=jax.checkpoint_policies.checkpoint_dots)
def f(W):
def f(x):
x = jnp.sin(jnp.dot(x, W, precision=lax.Precision.HIGHEST))
x = jnp.sin(jnp.dot(x, W, precision=lax.Precision.HIGHEST))
x = jnp.sin(jnp.dot(x, W, precision=lax.Precision.HIGHEST))
return x
def body(x, _): return f(x), None
return lax.scan(body, x, None, length=2)[0]
_, f_vjp = api.vjp(f, jnp.ones((5, 5)))
jaxpr = f_vjp.args[0].func.args[1]
jaxpr_text = str(jaxpr)
self.assertEqual(jaxpr_text.count(' sin '), 3)
self.assertEqual(jaxpr_text.count(' cos '), 3)
# Six calls to dot_general in the backward pass because we save the primal
# matmuls and only compure the backward pass ones (two for each primal one).
self.assertEqual(jaxpr_text.count(' dot_'), 6)
jtu.check_grads(api.jit(f), (jnp.ones((5, 5)),), order=2,
modes=['fwd', 'rev'])
def test_remat_of_scan_policy(self):
save_cos = lambda prim, *_, **__: str(prim) == 'cos'
to_scan = lambda c, _: (jnp.sin(c), jnp.sin(c))
f = new_checkpoint(lambda x: lax.scan(to_scan, x, None, length=3),
policy=save_cos)
jtu.check_grads(f, (3.,), order=2, modes=['rev'])
jaxpr = api.make_jaxpr(api.linearize(f, 4.)[1])(1.)
jaxpr_text = str(jaxpr)
self.assertEqual(jaxpr_text.count(' sin '), 0)
self.assertEqual(jaxpr_text.count(' cos '), 0)
def test_remat_of_scan_funky_custom_jvp(self):
def scan_apply(f, x):
y, _ = lax.scan(lambda x, _: (f(x), None), x, None, length=1)
return y
@jax.custom_jvp
def sin(x):
return jnp.sin(x)
def sin_jvp(primals, tangents):
x, = primals
xdot, = tangents
y, c = jax.jit(lambda: (jnp.sin(x), jnp.cos(x)))()
ydot = c * xdot
return y, ydot
sin.defjvp(sin_jvp)
save_cos = lambda prim, *_, **__: str(prim) == 'cos'
f = new_checkpoint(partial(scan_apply, sin), policy=save_cos)
jtu.check_grads(f, (3.,), order=2, modes=['rev'])
jaxpr = api.make_jaxpr(api.linearize(f, 4.)[1])(1.)
jaxpr_text = str(jaxpr)
self.assertEqual(jaxpr_text.count(' sin '), 0)
self.assertEqual(jaxpr_text.count(' cos '), 0)
save_sin = lambda prim, *_, **__: str(prim) == 'sin'
f = new_checkpoint(partial(scan_apply, sin), policy=save_sin)
jtu.check_grads(f, (3.,), order=2, modes=['rev'])
jaxpr = api.make_jaxpr(api.linearize(f, 4.)[1])(1.)
jaxpr_text = str(jaxpr)
self.assertEqual(jaxpr_text.count(' sin '), 0)
self.assertEqual(jaxpr_text.count(' cos '), 1)
f = new_checkpoint(partial(scan_apply, sin),
policy=jax.checkpoint_policies.everything_saveable)
jtu.check_grads(f, (3.,), order=2, modes=['rev'])
jaxpr = api.make_jaxpr(api.linearize(f, 4.)[1])(1.)
jaxpr_text = str(jaxpr)
self.assertEqual(jaxpr_text.count(' sin '), 0)
self.assertEqual(jaxpr_text.count(' cos '), 0)
f = new_checkpoint(partial(scan_apply, sin),
policy=jax.checkpoint_policies.nothing_saveable)
jtu.check_grads(f, (3.,), order=2, modes=['rev'])
jaxpr = api.make_jaxpr(api.linearize(f, 4.)[1])(1.)
jaxpr_text = str(jaxpr)
self.assertEqual(jaxpr_text.count(' sin '), 1) # +1 b/c dce fixed point
self.assertEqual(jaxpr_text.count(' cos '), 1)
f = new_checkpoint(lambda x: scan_apply(sin, scan_apply(sin, x)),
policy=jax.checkpoint_policies.nothing_saveable)
jtu.check_grads(f, (3.,), order=2, modes=['rev'])
jaxpr = api.make_jaxpr(api.linearize(f, 4.)[1])(1.)
jaxpr_text = str(jaxpr)
self.assertEqual(jaxpr_text.count(' sin '), 2) # +1 b/c dce fixed point
self.assertEqual(jaxpr_text.count(' cos '), 2)
def test_remat_of_scan_funky_custom_jvp2(self):
# Like the above test but instead of using jit inside custom_jvp, use scan.
def scan_apply(f, x):
y, _ = lax.scan(lambda x, _: (f(x), None), x, None, length=1)
return y
@jax.custom_jvp
def sin(x):
return jnp.sin(x)
def sin_jvp(primals, tangents):
x, = primals
xdot, = tangents
y, c = scan_apply(lambda xs: (jnp.sin(xs[0]), jnp.cos(xs[1])), (x, x))
ydot = c * xdot
return y, ydot
sin.defjvp(sin_jvp)
save_cos = lambda prim, *_, **__: str(prim) == 'cos'
f = new_checkpoint(partial(scan_apply, sin), policy=save_cos)
jtu.check_grads(f, (3.,), order=2, modes=['rev'])
jaxpr = api.make_jaxpr(api.linearize(f, 4.)[1])(1.)
jaxpr_text = str(jaxpr)
self.assertEqual(jaxpr_text.count(' sin '), 1) # +1 b/c dce fixed point
self.assertEqual(jaxpr_text.count(' cos '), 0)
save_sin = lambda prim, *_, **__: str(prim) == 'sin'
f = new_checkpoint(partial(scan_apply, sin), policy=save_sin)
jtu.check_grads(f, (3.,), order=2, modes=['rev'])
jaxpr = api.make_jaxpr(api.linearize(f, 4.)[1])(1.)
jaxpr_text = str(jaxpr)
self.assertEqual(jaxpr_text.count(' sin '), 0)
self.assertEqual(jaxpr_text.count(' cos '), 1)
f = new_checkpoint(partial(scan_apply, sin),
policy=jax.checkpoint_policies.everything_saveable)
jtu.check_grads(f, (3.,), order=2, modes=['rev'])
jaxpr = api.make_jaxpr(api.linearize(f, 4.)[1])(1.)
jaxpr_text = str(jaxpr)
self.assertEqual(jaxpr_text.count(' sin '), 0)
self.assertEqual(jaxpr_text.count(' cos '), 0)
f = new_checkpoint(partial(scan_apply, sin),
policy=jax.checkpoint_policies.nothing_saveable)
jtu.check_grads(f, (3.,), order=2, modes=['rev'])
jaxpr = api.make_jaxpr(api.linearize(f, 4.)[1])(1.)
jaxpr_text = str(jaxpr)
self.assertEqual(jaxpr_text.count(' sin '), 1) # +1 b/c dce fixed point
self.assertEqual(jaxpr_text.count(' cos '), 1)
f = new_checkpoint(lambda x: scan_apply(sin, scan_apply(sin, x)),
policy=jax.checkpoint_policies.nothing_saveable)
jtu.check_grads(f, (3.,), order=2, modes=['rev'])
jaxpr = api.make_jaxpr(api.linearize(f, 4.)[1])(1.)
jaxpr_text = str(jaxpr)
self.assertEqual(jaxpr_text.count(' sin '), 2) # +1 b/c dce fixed point
self.assertEqual(jaxpr_text.count(' cos '), 2)
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', jax.remat),
('_new', new_checkpoint),
])
def test_remat_of_cond(self, remat):
true_fn = lambda c: (jnp.sin(c), jnp.sin(c))
false_fn = lambda c: (jnp.sin(c), jnp.sin(c))
f = lambda x: lax.cond(x > 0., true_fn, false_fn, x)
jtu.check_grads(remat(f), (3.,), order=2, modes=['rev'])
jaxpr = api.make_jaxpr(api.linearize(remat(f), 4.)[1])(1.)
self.assertNotIn(' sin ', str(jaxpr))
self.assertIn(' cos ', str(jaxpr))
true_fn = lambda c: jnp.sin(jnp.sin(c))
false_fn = lambda c: c
f = lambda x: lax.cond(x > 0., true_fn, false_fn, x)
jtu.check_grads(remat(f), (3.,), order=2, modes=['rev'])
jaxpr = api.make_jaxpr(api.linearize(remat(f), 4.)[1])(1.)
self.assertIn(' sin ', str(jaxpr))
self.assertIn(' cos ', str(jaxpr))
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', jax.remat),
('_new', new_checkpoint),
])
def test_const_in_jvp_cond(self, remat):
@jax.custom_jvp
def f(x):
return x * jnp.arange(3.)
@f.defjvp
def f_jvp(primals, tangents):
(x,), (xdot,) = primals, tangents
return f(x), xdot * jnp.arange(3.)
@remat
def g(x):
y = jax.lax.cond(x.sum() > 0, f, lambda x: x, x)
return y.sum()
jax.grad(g)(jnp.arange(3.)) # doesn't crash
def test_remat_checkpoint_dots_inside_cond(self):
x = jnp.ones((5,))
def f(W):
@partial(jax.remat, policy=jax.checkpoint_policies.checkpoint_dots)
def f(x):
x = jnp.sin(jnp.dot(x, W, precision=lax.Precision.HIGHEST))
x = jnp.sin(jnp.dot(x, W, precision=lax.Precision.HIGHEST))
x = jnp.sin(jnp.dot(x, W, precision=lax.Precision.HIGHEST))
return x
return lax.cond(x.sum() > 0, f, lambda x: x, x)
_, f_vjp = api.vjp(f, jnp.ones((5, 5)))
jaxpr_text = str(f_vjp.args[0].func.args[1])
self.assertEqual(jaxpr_text.count(' sin '), 2)
self.assertEqual(jaxpr_text.count(' cos '), 3)
# Five calls to dot_general in the backward pass because we have two for
# each forward-pass dot, except for the first which only has one (as we are
# differentiating with respect to only W and not x).
self.assertEqual(jaxpr_text.count(' dot_'), 5)
jtu.check_grads(api.jit(f), (jnp.ones((5, 5)),), order=2,
modes=['fwd', 'rev'])
def test_remat_checkpoint_dots_outside_cond(self):
# see also above test test_remat_checkpoint_dots_inside_cond
# The behavior between the two tests is essentially identical, whereas for
# scan different things are saved based on this difference in remat
# placement (because of the carry).
x = jnp.ones((5,))
@partial(new_checkpoint, policy=jax.checkpoint_policies.checkpoint_dots)
def f(W):
def f(x):
x = jnp.sin(jnp.dot(x, W, precision=lax.Precision.HIGHEST))
x = jnp.sin(jnp.dot(x, W, precision=lax.Precision.HIGHEST))
x = jnp.sin(jnp.dot(x, W, precision=lax.Precision.HIGHEST))
return x
return lax.cond(x.sum() > 0, f, lambda x: x, x)
_, f_vjp = api.vjp(f, jnp.ones((5, 5)))
jaxpr = f_vjp.args[0].func.args[1]
jaxpr_text = str(jaxpr)
self.assertEqual(jaxpr_text.count(' sin '), 2)
self.assertEqual(jaxpr_text.count(' cos '), 3)
self.assertEqual(jaxpr_text.count(' dot_'), 5)
jtu.check_grads(api.jit(f), (jnp.ones((5, 5)),), order=2,
modes=['fwd', 'rev'])
def test_remat_of_cond_policy(self):
save_cos = lambda prim, *_, **__: str(prim) == 'cos'
f = new_checkpoint(lambda x: lax.cond(x > 0, jnp.sin, lambda x: x, x),
policy=save_cos)
jtu.check_grads(f, (3.,), order=2, modes=['rev'])
jaxpr = api.make_jaxpr(api.linearize(f, 4.)[1])(1.)
jaxpr_text = str(jaxpr)
self.assertEqual(jaxpr_text.count(' sin '), 0)
self.assertEqual(jaxpr_text.count(' cos '), 0)
def test_remat_of_cond_funky_custom_jvp(self):
def cond_apply(f, x):
return lax.cond(x.sum() > -jnp.inf, f, lambda x: x, x)
@jax.custom_jvp
def sin(x):
return jnp.sin(x)
def sin_jvp(primals, tangents):
x, = primals
xdot, = tangents
y, c = jax.jit(lambda: (jnp.sin(x), jnp.cos(x)))()
ydot = c * xdot
return y, ydot
sin.defjvp(sin_jvp)
save_cos = lambda prim, *_, **__: str(prim) == 'cos'
f = new_checkpoint(partial(cond_apply, sin), policy=save_cos)
jtu.check_grads(f, (3.,), order=2, modes=['rev'])
jaxpr = api.make_jaxpr(api.linearize(f, 4.)[1])(1.)
jaxpr_text = str(jaxpr)
self.assertEqual(jaxpr_text.count(' sin '), 0)
self.assertEqual(jaxpr_text.count(' cos '), 0)
save_sin = lambda prim, *_, **__: str(prim) == 'sin'
f = new_checkpoint(partial(cond_apply, sin), policy=save_sin)
jtu.check_grads(f, (3.,), order=2, modes=['rev'])
jaxpr = api.make_jaxpr(api.linearize(f, 4.)[1])(1.)
jaxpr_text = str(jaxpr)
self.assertEqual(jaxpr_text.count(' sin '), 0)
self.assertEqual(jaxpr_text.count(' cos '), 1)
f = new_checkpoint(partial(cond_apply, sin),
policy=jax.checkpoint_policies.everything_saveable)
jtu.check_grads(f, (3.,), order=2, modes=['rev'])
jaxpr = api.make_jaxpr(api.linearize(f, 4.)[1])(1.)
jaxpr_text = str(jaxpr)
self.assertEqual(jaxpr_text.count(' sin '), 0)
self.assertEqual(jaxpr_text.count(' cos '), 0)
f = new_checkpoint(partial(cond_apply, sin),
policy=jax.checkpoint_policies.nothing_saveable)
jtu.check_grads(f, (3.,), order=2, modes=['rev'])
jaxpr = api.make_jaxpr(api.linearize(f, 4.)[1])(1.)
jaxpr_text = str(jaxpr)
self.assertEqual(jaxpr_text.count(' sin '), 0)
self.assertEqual(jaxpr_text.count(' cos '), 1)
f = new_checkpoint(lambda x: cond_apply(sin, cond_apply(sin, x)),
policy=jax.checkpoint_policies.nothing_saveable)
jtu.check_grads(f, (3.,), order=2, modes=['rev'])
jaxpr = api.make_jaxpr(api.linearize(f, 4.)[1])(1.)
jaxpr_text = str(jaxpr)
self.assertEqual(jaxpr_text.count(' sin '), 1)
self.assertEqual(jaxpr_text.count(' cos '), 2)
def test_remat_of_cond_funky_custom_jvp2(self):
# Like the above test but instead of using jit inside custom_jvp, use cond.
def cond_apply(f, x):
return lax.cond(True, f, lambda x: x, x)
@jax.custom_jvp
def sin(x):
return jnp.sin(x)
def sin_jvp(primals, tangents):
x, = primals
xdot, = tangents
y, c = cond_apply(lambda xs: (jnp.sin(xs[0]), jnp.cos(xs[1])), (x, x))
ydot = c * xdot
return y, ydot
sin.defjvp(sin_jvp)
save_cos = lambda prim, *_, **__: str(prim) == 'cos'
f = new_checkpoint(partial(cond_apply, sin), policy=save_cos)
jtu.check_grads(f, (3.,), order=2, modes=['rev'])
jaxpr = api.make_jaxpr(api.linearize(f, 4.)[1])(1.)
jaxpr_text = str(jaxpr)
self.assertEqual(jaxpr_text.count(' sin '), 0)
self.assertEqual(jaxpr_text.count(' cos '), 0)
save_sin = lambda prim, *_, **__: str(prim) == 'sin'
f = new_checkpoint(partial(cond_apply, sin), policy=save_sin)
jtu.check_grads(f, (3.,), order=2, modes=['rev'])
jaxpr = api.make_jaxpr(api.linearize(f, 4.)[1])(1.)
jaxpr_text = str(jaxpr)
self.assertEqual(jaxpr_text.count(' sin '), 0)
self.assertEqual(jaxpr_text.count(' cos '), 1)
f = new_checkpoint(partial(cond_apply, sin),
policy=jax.checkpoint_policies.everything_saveable)
jtu.check_grads(f, (3.,), order=2, modes=['rev'])
jaxpr = api.make_jaxpr(api.linearize(f, 4.)[1])(1.)
jaxpr_text = str(jaxpr)
self.assertEqual(jaxpr_text.count(' sin '), 0)
self.assertEqual(jaxpr_text.count(' cos '), 0)
f = new_checkpoint(partial(cond_apply, sin),
policy=jax.checkpoint_policies.nothing_saveable)
jtu.check_grads(f, (3.,), order=2, modes=['rev'])
jaxpr = api.make_jaxpr(api.linearize(f, 4.)[1])(1.)
jaxpr_text = str(jaxpr)
self.assertEqual(jaxpr_text.count(' sin '), 0)
self.assertEqual(jaxpr_text.count(' cos '), 1)
f = new_checkpoint(lambda x: cond_apply(sin, cond_apply(sin, x)),
policy=jax.checkpoint_policies.nothing_saveable)
jtu.check_grads(f, (3.,), order=2, modes=['rev'])
jaxpr = api.make_jaxpr(api.linearize(f, 4.)[1])(1.)
jaxpr_text = str(jaxpr)
self.assertEqual(jaxpr_text.count(' sin '), 1)
self.assertEqual(jaxpr_text.count(' cos '), 2)
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', jax.remat),
('_new', new_checkpoint),
])
def test_remat_of_while_loop(self, remat):
def cond_fn(carry):
i, _ = carry
return i < 3
def body_fn(carry):
i, x = carry
return i + 1, jnp.sin(x)
def f(x):
_, y = lax.while_loop(cond_fn, body_fn, (0, x))
return y
_, f_lin = jax.linearize(remat(f), 3.)
y_dot = f_lin(1.0)
expected = jax.grad(lambda x: jnp.sin(jnp.sin(jnp.sin(x))))(3.)
self.assertArraysAllClose(y_dot, expected, check_dtypes=False)
jaxpr = api.make_jaxpr(jax.linearize(remat(f), 4.)[1])(1.)
self.assertIn(' sin ', str(jaxpr))
self.assertIn(' cos ', str(jaxpr))
def test_remat_of_while_loop_policy(self):
def cond_fn(carry):
i, _ = carry
return i < 3
def body_fn(carry):
i, x = carry
return i + 1, jnp.sin(x)
def f(x):
_, y = lax.while_loop(cond_fn, body_fn, (0, x))
return y
# even with a policy, we can't save residuals (w/o dynamic shapes)!
save_cos = lambda prim, *_, **__: str(prim) == 'cos'
g = new_checkpoint(f, policy=save_cos)
jaxpr = api.make_jaxpr(jax.linearize(g, 4.)[1])(1.)
self.assertIn(' sin ', str(jaxpr))
self.assertIn(' cos ', str(jaxpr))
def test_remat_residual_logging(self):
def f(x):
x = jnp.sin(x)
x = jnp.cos(x.sum())
return x
x = jnp.arange(3.)
f1 = jax.remat(f)
f2 = jax.remat(f, policy=lambda *_, **__: True)
f3 = jax.remat(f, policy=lambda p, *_, **__: str(p) == 'cos')
prev_level = logging.get_verbosity()
try:
logging.set_verbosity('DEBUG')
with self.assertLogs(level=logging.DEBUG) as l:
jax.grad(f1)(x)
finally:
logging.set_verbosity(prev_level)
self.assertTrue(any('remat-decorated function saving inputs with shapes:'
in line for line in l.output))
self.assertFalse(any('intermediates' in line for line in l.output))
prev_level = logging.get_verbosity()
try:
logging.set_verbosity('DEBUG')
with self.assertLogs(level=logging.DEBUG) as l:
jax.grad(f2)(x)
finally:
logging.set_verbosity(prev_level)
self.assertFalse(any('saving inputs' in line for line in l.output))
self.assertTrue(any('remat-decorated function saving these intermediates:'
in line for line in l.output))
self.assertTrue(any(' sin ' in line for line in l.output))
self.assertTrue(any(' cos ' in line for line in l.output))
prev_level = logging.get_verbosity()
try:
logging.set_verbosity('DEBUG')
with self.assertLogs(level=logging.DEBUG) as l:
jax.grad(f3)(x)
finally:
logging.set_verbosity(prev_level)
self.assertTrue(any('remat-decorated function saving inputs with shapes:'
in line for line in l.output))
self.assertTrue(any('and saving these intermediates:'
in line for line in l.output))
self.assertFalse(any(' sin ' in line for line in l.output))
self.assertTrue(any(' cos ' in line for line in l.output))
def test_excess_precision_hell(self):
finfo = jnp.finfo('bfloat16')
eps = finfo.eps
@jax.custom_vjp
def dot(x):
return jnp.dot(x, x)
def dot_fwd(x):
return dot(x), None
def dot_bwd(_, g):
return g,
dot.defvjp(dot_fwd, dot_bwd)
@jax.custom_vjp
def foo(x):
return jnp.float32(1.) * x.astype('float32')
def foo_fwd(x):
return foo(x), x
def foo_bwd(x, _):
return jnp.float32(1.) * x.astype('float32'),
foo.defvjp(foo_fwd, foo_bwd)
@jax.jit
@partial(jax.remat, policy=lambda *_, **__: True)
def f(x):
x = dot(x)
return foo(x)
x = (jnp.bfloat16(1) + eps) * jnp.eye(2, dtype='bfloat16')
y, vjp = jax.vjp(f, x)
y_, = vjp(jnp.ones_like(y))
self.assertAllClose(y, y_, atol=0, rtol=0)
def test_concreteness_error_includes_user_code(self):
@jax.remat
def f(x):
if x > 0:
return x
else:
return jnp.sin(x)
try:
f(3.)
except TracerBoolConversionError:
self.assertIn('x > 0', traceback.format_exc())
else:
assert False
@jtu.with_config(jax_pprint_use_color=False)
class JaxprTest(jtu.JaxTestCase):
def test_scalar_literals(self):
jaxpr = api.make_jaxpr(lambda x: x + 2)(42)
self.assertLen(jaxpr.jaxpr.constvars, 0)
def test_abstract_inputs(self):
jaxpr = api.make_jaxpr(lambda x: x + 2.)(
types.SimpleNamespace(shape=(), dtype=np.dtype(np.float32)))
self.assertEqual(jaxpr.in_avals[0].shape, ())
self.assertEqual(jaxpr.in_avals[0].dtype, np.float32)
def test_const(self):
def fun(x):
return (x, 1., np.zeros(1, dtype=jnp.float32))
expected = "{ lambda a:f32[1]; b:f32[]. let in (b, 1.0, a) }"
jaxpr = api.make_jaxpr(fun)(jnp.float32(0.))
self.assertMultiLineStrippedEqual(expected, str(jaxpr))
def test_cond(self):
def f(x):
return lax.cond(x >= 0.,
x + 1.,
lambda xt: xt + x,
x + 2.,
lambda xf: xf - x)
expected = """{ lambda ; a:f32[]. let
b:bool[] = ge a 0.0
c:f32[] = add a 1.0
d:f32[] = add a 2.0
e:i32[] = convert_element_type[new_dtype=int32 weak_type=False] b
f:f32[] = cond[
branches=(
{ lambda ; g_:f32[] h:f32[] i:f32[] j:f32[]. let
k:f32[] = sub j h
in (k,) }
{ lambda ; l:f32[] m_:f32[] n:f32[] o:f32[]. let
p:f32[] = add n l
in (p,) }
)
] e a a c d
in (f,) }"""
jaxpr = api.make_jaxpr(f)(jnp.float32(3.))
self.assertMultiLineStrippedEqual(expected, str(jaxpr))
def test_make_jaxpr_static_argnums(self):
def f(x, y):
return x + y
jaxpr = api.make_jaxpr(f, static_argnums=(1,))(2, 3)
self.assertIn('3', str(jaxpr))
def test_make_jaxpr_return_shape(self):
_, shape_tree = api.make_jaxpr(lambda x: (x + 1, jnp.zeros(2, jnp.float32)),
return_shape=True)(jnp.int32(1))
expected = (api.ShapeDtypeStruct(shape=(), dtype=jnp.int32),
api.ShapeDtypeStruct(shape=(2,), dtype=jnp.float32))
self.assertEqual(shape_tree, expected)
def test_make_jaxpr_axis_env(self):
def f(x):
return x - lax.psum(x, 'i')
jaxpr = api.make_jaxpr(f, axis_env=[('i', 4)])(2)
self.assertIn('psum', str(jaxpr))
def test_weak_type_jit_invariance(self):
y = jnp.broadcast_to(3., (3,))
self.assertTrue(y.aval.weak_type)
def f():
return lax.convert_element_type(y, 'float32')
self.assertEqual(f().aval.weak_type, api.jit(f)().aval.weak_type)
def test_elide_trivial_convert_element_types(self):
# since we apply convert_element_type to a numpy.ndarray, the primitive is
# still bound and thus would appear in the jaxpr if we didn't clean it up
if config.enable_x64.value:
x = np.arange(3, dtype='float64')
else:
x = np.arange(3, dtype='float32')
cet = partial(lax.convert_element_type, new_dtype=x.dtype)
jaxpr = api.make_jaxpr(lambda: cet(cet(cet(x))))()
self.assertLen(jaxpr.eqns, 0)
def test_elide_trivial_broadcasts(self):
# since we apply broadcast to a numpy.ndarray, the primitive is still bound
# and thus would appear in the jaxpr if we didn't clean it up
jaxpr = api.make_jaxpr(lambda: lax.broadcast(np.float32(3), ()))()
self.assertLen(jaxpr.jaxpr.eqns, 0)
def test_convert_element_type_literal_constant_folding(self):
# this convert_element_type is nontrivial, but because it's on a scalar we
# constant-fold it
cet = partial(lax.convert_element_type, new_dtype='float16')
jaxpr = api.make_jaxpr(lambda: cet(3.))()
self.assertLen(jaxpr.eqns, 0)
def test_eqn_repr_with_no_lhs(self):
def f(x):
jax.debug.print("{}", x)
return x
jaxpr = jax.make_jaxpr(f)(np.int32(0))
self.assertEqual(jaxpr.eqns[0].primitive, debugging.debug_callback_p)
self.assertStartsWith(str(jaxpr.eqns[0]), "debug_callback[", )
class DCETest(jtu.JaxTestCase):
def assert_dce_result(self, jaxpr: core.Jaxpr, used_outputs: list[bool],
expected_used_inputs: list[bool],
expected_num_eqns: int | None = None,
check_diff: bool = True):
jaxpr_dce, used_inputs = pe.dce_jaxpr(jaxpr, used_outputs)
core.check_jaxpr(jaxpr_dce)
self.assertEqual(used_inputs, expected_used_inputs)
if expected_num_eqns is not None:
all_jaxprs = it.chain([jaxpr_dce], core.subjaxprs(jaxpr_dce))
num_eqns = sum(len(subjaxpr.eqns) for subjaxpr in all_jaxprs)
self.assertEqual(num_eqns, expected_num_eqns, msg=str(jaxpr_dce))
rand_ = jtu.rand_small(np.random.RandomState(0))
rand = lambda v: rand_(v.aval.shape, v.aval.dtype)
consts = [rand(v) for v in jaxpr.constvars]
inputs = [rand(v) for v in jaxpr.invars ]
inputs_dce = [x for x, used in zip(inputs, used_inputs) if used]
full_outs = core.eval_jaxpr(jaxpr , consts, *inputs)
expected_outs_dce = [y for y, used in zip(full_outs, used_outputs) if used]
outs = core.eval_jaxpr(jaxpr_dce, consts, *inputs_dce)
self.assertAllClose(outs, expected_outs_dce)
if check_diff and expected_num_eqns != 0:
f = lambda *args: core.eval_jaxpr(jaxpr_dce, consts, *args)
jtu.check_grads(f, inputs_dce, order=2, modes=['rev'])
def test_dce_jaxpr_scan_nontrivial_fixedpoint_carry(self):
# The idea is that each element of the output carry tuple depends on the
# corresponding carried input as well as the one to the left. The extensive
# inputs and outputs aren't used here; just the carry depending on itself.
def f(lst):
def body(c, _):
return [c[0]] + [c1 + c2 for c1, c2 in zip(c[:-1], c[1:])], None
out, _ = jax.lax.scan(body, lst, None, length=len(lst))
return out
jaxpr = api.make_jaxpr(f)([1., 2., 3., 4.]).jaxpr
self.assertLen(jaxpr.eqns, 1)
self.assertLen(jaxpr.eqns[0].params['jaxpr'].jaxpr.eqns, 3)
# If we use all but the last element, all but the first input is used, and
# only one eqn is pruned.
self.assert_dce_result(
jaxpr, used_outputs=[True, True, True, False],
expected_used_inputs=[True, True, True, False],
expected_num_eqns=1 + 2) # one outer scan eqn, two adds in the body
# Same as above if we just pull on the third element.
self.assert_dce_result(
jaxpr, used_outputs=[False, False, True, False],
expected_used_inputs=[True, True, True, False],
expected_num_eqns=1 + 2) # one outer scan eqn, two adds in the body
# If we use all but the last two elements, the last two inputs are not used,
# and two eqns can be pruned.
self.assert_dce_result(
jaxpr, used_outputs=[True, True, False, False],
expected_used_inputs=[True, True, False, False],
expected_num_eqns=1 + 1) # one outer scan eqn, one add in body
# If we only use the last element, no eqns can be pruned.
self.assert_dce_result(
jaxpr, used_outputs=[False, False, False, True],
expected_used_inputs=[True, True, True, True],
expected_num_eqns=1 + 3) # one outer scan eqn, three adds in body
def test_dce_jaxpr_scan_nontrivial_fixedpoint_carry_2(self):
# This is much like the above test, except with a more interesting
# dependence structure among the carry elements. Also add a const and
# extensive input.
hidden_sequence = [1, 2, 3, 5, 8]
def f(lst):
def body(c, _):
_ = jnp.sin(np.array([3., 1., 4.]))
sub_c = [c[i] for i in hidden_sequence]
sub_c = [sub_c[0]] + [c1 * c2 for c1, c2 in zip(sub_c[:-1], sub_c[1:])]
new_c = list(c)
for i, elt in zip(hidden_sequence, sub_c):
new_c[i] = elt
return new_c, None
out, _ = jax.lax.scan(body, lst, np.arange(len(lst), dtype='float32'))
return out
jaxpr = api.make_jaxpr(f)([1., 2., 3., 4., 5., 6., 7., 8., 9., 10.]).jaxpr
self.assertLen(jaxpr.eqns, 1)
self.assertLen(jaxpr.eqns[0].params['jaxpr'].jaxpr.eqns, 5)
# If we use the value at index 8 only, all the hidden sequence must be kept
# and no eqns can be pruned.
used_outputs = [False] * 10
used_outputs[8] = True
expected_used_inputs = [False] * 10
for i in hidden_sequence:
expected_used_inputs[i] = True
self.assert_dce_result(
jaxpr, used_outputs=used_outputs,
expected_used_inputs=expected_used_inputs,
expected_num_eqns=1 + 4)
# If we use the value at any indices not in the hidden sequence, none of the
# hidden sequence must be kept and we can prune all body eqns.
used_outputs = [False] * 10
expected_used_inputs = [False] * 10
used_outputs[9] = expected_used_inputs[9] = True
self.assert_dce_result(
jaxpr, used_outputs=used_outputs,
expected_used_inputs=expected_used_inputs,
expected_num_eqns=1) # 1 b/c scan doesn't have fwding rule
used_outputs[7] = expected_used_inputs[7] = True
used_outputs[6] = expected_used_inputs[6] = True
self.assert_dce_result(
jaxpr, used_outputs=used_outputs,
expected_used_inputs=expected_used_inputs,
expected_num_eqns=1)
# If we use the value at index 3 only, some of the hidden sequence must be
# kept but the rest pruned.
used_outputs = [False] * 10
used_outputs[3] = True
expected_used_inputs = [False] * 10
expected_used_inputs[1] = expected_used_inputs[2] = \
expected_used_inputs[3] = True
self.assert_dce_result(
jaxpr, used_outputs=used_outputs,
expected_used_inputs=expected_used_inputs,
expected_num_eqns=1 + 2)
def test_dce_jaxpr_scan_nontrivial_fixedpoint_extensive_output(self):
# Here we test how using the extensive output affects the carry.
def f(lst):
def body(c, _):
return [c[-1], *c[:-1]], c[-1]
_, ys = jax.lax.scan(body, lst, None, length=len(lst))
return ys
jaxpr = api.make_jaxpr(f)([1., 2., 3., 4.]).jaxpr
self.assertLen(jaxpr.eqns, 1)
# If we only use the extensive output, all carry elements are needed, and we
# need to keep the scan itself.
self.assert_dce_result(
jaxpr, used_outputs=[True],
expected_used_inputs=[True, True, True, True],
expected_num_eqns=1)
# If we don't use the extensive output, no carry elements are needed, and we
# don't need to keep the scan.
self.assert_dce_result(
jaxpr, used_outputs=[False],
expected_used_inputs=[False, False, False, False],
expected_num_eqns=0)
def test_dce_jaxpr_scan_extensive_input(self):
# Here we test an extensive input affecting the carry.
def cumprod(xs):
def body(c, x):
return c * x, c
c, ys = jax.lax.scan(body, jnp.float32(1.), xs)
return c, ys
jaxpr = api.make_jaxpr(cumprod)(jnp.arange(1., 5., dtype='float32')).jaxpr
# If we only use the carry output or extensive output, we need the input.
self.assert_dce_result(
jaxpr, used_outputs=[True, False],
expected_used_inputs=[True],
expected_num_eqns=2)
self.assert_dce_result(
jaxpr, used_outputs=[False, True],
expected_used_inputs=[True],
expected_num_eqns=2)
# If we don't use either output, the scan is eliminated.
self.assert_dce_result(
jaxpr, used_outputs=[False, False],
expected_used_inputs=[False],
expected_num_eqns=0)
def test_dce_jaxpr_scan_overpruning(self):
# This is a regression test for a specific issue.
@jax.remat
def scanned_f(c, x):
out = jnp.tanh(c * x)
return out, out
def f(xs):
return lax.scan(scanned_f, jnp.array(1., 'float32'), xs)
xs = jnp.arange(10., dtype='float32')
jaxpr = api.make_jaxpr(lambda xs: api.linearize(f, xs)[1])(xs).jaxpr
jaxpr, used_inputs = pe.dce_jaxpr(jaxpr, [True] * len(jaxpr.outvars))
self.assertLen(jaxpr.eqns, 1)
self.assertLen(jaxpr.eqns[-1].params['jaxpr'].jaxpr.eqns, 2)
def test_dce_jaxpr_scan_const_in_jvp(self):
# The main point of this test is to check for a crash.
@jax.custom_jvp
def f(x):
return x * np.arange(3.)
@f.defjvp
def f_jvp(primals, tangents):
(x,), (xdot,) = primals, tangents
return f(x), xdot * np.arange(3.)
def g(x):
def body(c, _):
return f(c), None
y, _ = jax.lax.scan(body, x, None, length=1)
return y
jaxpr = api.make_jaxpr(lambda x, xdot: api.jvp(g, (x,), (xdot,))
)(np.arange(3.), np.arange(3.)).jaxpr
self.assert_dce_result(
jaxpr, used_outputs=[True, True],
expected_used_inputs=[True, True])
self.assert_dce_result(
jaxpr, used_outputs=[True, False],
expected_used_inputs=[True, False])
def test_dce_jaxpr_scan_results(self):
# This doesn't test whether DCE is doing nontrivial work; instead it tests
# whether the result after applying DCE computes different values. If
# dce_jaxpr were an identity function, it'd pass this test!
def f(cs, xs):
def body(c, x):
return (c[0], c[0] + c[1], jnp.arange(3.)), x
cs, xs = jax.lax.scan(body, cs, xs)
return cs[::2], xs[::2]
cs = 1., 2., jnp.arange(3.)
xs = jnp.arange(3.), jnp.arange(3.) + 5
jaxpr_ = jax.make_jaxpr(f)(cs, xs)
jaxpr, consts = jaxpr_.jaxpr, jaxpr_.consts
jaxpr_pruned, used_inputs = pe.dce_jaxpr(jaxpr, [True] * len(jaxpr.outvars))
args = (*cs, *xs)
result1 = core.eval_jaxpr(jaxpr , consts, *cs, *xs)
pruned_args = [x for x, used in zip(args, used_inputs) if used]
result2 = core.eval_jaxpr(jaxpr_pruned, consts, *pruned_args)
self.assertAllClose(result1, result2)
def test_dce_jaxpr_cond_trivial(self):
x = jnp.array(1., dtype='float32')
# start with 7 eqns, use both outputs so nothing can be pruned
def f(x1, x2):
return lax.cond(x1 > 0,
lambda x1, x2: (jnp.sin(x1), jnp.sin(x2)),
lambda x1, x2: (jnp.sin(x1), jnp.sin(x2)),
x1, x2)
jaxpr = jax.make_jaxpr(f)(x, x).jaxpr
self.assert_dce_result(jaxpr, [True, True], [True, True], 7)
# use neither output so everything can be pruned
self.assert_dce_result(jaxpr, [False, False], [False, False], 0)
def test_dce_jaxpr_cond_nontrivial(self):
x = jnp.array(1., dtype='float32')
# start with 7 eqns, don't use an output so an eqn can be trimmed on each
# side and x2 _can_ be pruned
def f(x1, x2):
return lax.cond(x1 > 0,
lambda x1, x2: (jnp.sin(x1), jnp.sin(x2)),
lambda x1, x2: (jnp.sin(x1), jnp.sin(x1)),
x1, x2)
jaxpr = jax.make_jaxpr(f)(x, x).jaxpr
self.assert_dce_result(jaxpr, [True, False], [True, False], 5)
# start with 7 eqns, don't use an output so an eqn can be trimmed on each
# side, but x2 _can't_ be pruned b/c of a swap
def f(x1, x2):
return lax.cond(x1 > 0,
lambda x1, x2: (jnp.sin(x1), jnp.sin(x2)),
lambda x1, x2: (jnp.sin(x2), jnp.sin(x1)),
x1, x2)
jaxpr = jax.make_jaxpr(f)(x, x).jaxpr
self.assert_dce_result(jaxpr, [True, False], [True, True], 5)
# start with 7 eqns, only use x1 on one side and x2 on the other, so we
# can't prune any inputs or eqns
def f(x1, x2):
return lax.cond(x1 > 0,
lambda x1, x2: (jnp.sin(x1), jnp.sin(x1)),
lambda x1, x2: (jnp.sin(x2), jnp.sin(x2)),
x1, x2)
jaxpr = jax.make_jaxpr(f)(x, x).jaxpr
self.assert_dce_result(jaxpr, [True, True], [True, True], 7)
# use only one output, so we can prune eqns but not inputs
self.assert_dce_result(jaxpr, [True, False], [True, True], 5)
class CustomJVPTest(jtu.JaxTestCase):
def test_basic(self):
@jax.custom_jvp
def f(x):
return jnp.sin(x)
def f_jvp(primals, tangents):
x, = primals
g, = tangents
return f(x), 2 * jnp.cos(x) * g
f.defjvp(f_jvp)
x = 3.
self.assertAllClose(f(x), jnp.sin(x))
self.assertAllClose(api.jvp(f, (x,), (1.,)),
(jnp.sin(x), 2 * jnp.cos(x)))
self.assertAllClose(api.grad(f)(x), 2 * jnp.cos(x))
def test_invariance(self):
@jax.custom_jvp
def f(x):
return jnp.cos(2 * x) / 2.
def f_jvp(primals, tangents):
x, = primals
g, = tangents
return (f(x), 3 * g)
f.defjvp(f_jvp)
def f2(x):
y, _ = api.jvp(f, (x,), (x,))
return y
def f3(x):
y, _ = api.jvp(f2, (x,), (x,))
return y
x = 1.
self.assertAllClose(api.jvp(f, (x,), (x,)),
api.jvp(f2, (x,), (x,)),
check_dtypes=False)
self.assertAllClose(api.jvp(f, (x,), (x,)),
api.jvp(f3, (x,), (x,)),
check_dtypes=False)
def test_python_control_flow(self):
@jax.custom_jvp
def f(x):
if x > 0:
return jnp.sin(x)
else:
return jnp.cos(x)
def f_jvp(primals, tangents):
x, = primals
g, = tangents
if x > 0:
return f(x), 2 * g
else:
return f(x), 3 * g
f.defjvp(f_jvp)
x = 2.
self.assertAllClose(f(x), jnp.sin(x))
self.assertAllClose(f(-x), jnp.cos(-x))
self.assertAllClose(api.jvp(f, (x,), (1.,)),
(jnp.sin(x), 2.),
check_dtypes=False)
self.assertAllClose(api.jvp(f, (-x,), (1.,)),
(jnp.cos(-x), 3.),
check_dtypes=False)
self.assertAllClose(api.grad(f)(x), 2., check_dtypes=False)
self.assertAllClose(api.grad(f)(-x), 3., check_dtypes=False)
def test_vmap(self):
@jax.custom_jvp
def f(x):
assert jnp.ndim(x) == 0
return jnp.sin(x)
def f_jvp(primals, tangents):
x, = primals
g, = tangents
assert jnp.ndim(x) == jnp.ndim(g) == 0
return f(x), 2 * jnp.cos(x) * g
f.defjvp(f_jvp)
x = jnp.arange(3.)
xx = jnp.arange(6.).reshape(2, 3)
# vmap of f
self.assertAllClose(api.vmap(f)(x), jnp.sin(x))
self.assertAllClose(api.vmap(api.vmap(f))(xx), jnp.sin(xx))
# vmap of jvp of f
self.assertAllClose(api.vmap(lambda x: api.jvp(f, (x,), (x,)))(x),
(jnp.sin(x), 2 * jnp.cos(x) * x))
self.assertAllClose(api.vmap(api.vmap(lambda x: api.jvp(f, (x,), (x,))))(xx),
(jnp.sin(xx), 2 * jnp.cos(xx) * xx))
# jvp of vmap of f
self.assertAllClose(api.jvp(api.vmap(f), (x,), (x,)),
(jnp.sin(x), 2 * jnp.cos(x) * x))
self.assertAllClose(api.jvp(api.vmap(api.vmap(f)), (xx,), (xx,)),
(jnp.sin(xx), 2 * jnp.cos(xx) * xx))
# vmap of jvp of vmap of f
self.assertAllClose(api.vmap(lambda x: api.jvp(api.vmap(f), (x,), (x,)))(xx),
(jnp.sin(xx), 2 * jnp.cos(xx) * xx))
def test_jit(self):
@jax.custom_jvp
def f(x):
return jnp.sin(x)
def f_jvp(primals, tangents):
x, = primals
g, = tangents
return f(x), 2 * jnp.cos(x) * g
f.defjvp(f_jvp)
x = 3.
# jit
self.assertAllClose(api.jit(f)(x), jnp.sin(x))
self.assertAllClose(api.jit(api.jit(f))(x), jnp.sin(x))
# jit of jvp
self.assertAllClose(api.jit(lambda x: api.jvp(f, (x,), (x,)))(x),
(jnp.sin(x), 2 * jnp.cos(x) * x),
check_dtypes=False)
# jvp of jit
self.assertAllClose(api.jvp(api.jit(f), (x,), (x,)),
(jnp.sin(x), 2 * jnp.cos(x) * x),
check_dtypes=False)
def test_pytrees(self):
@jax.custom_jvp
def f(x):
return {'b': jnp.sin(x['a'])}
def f_jvp(primals, tangents):
x, = primals
g, = tangents
return f(x), {'b': 2 * jnp.cos(x['a']) * g['a']}
f.defjvp(f_jvp)
x = {'a': 3.}
self.assertAllClose(f(x)['b'], jnp.sin(x['a']))
self.assertAllClose(api.jvp(f, (x,), (x,)),
({'b': jnp.sin(x['a'])},
{'b': 2 * jnp.cos(x['a']) * x['a']}),
check_dtypes=False)
def test_kwargs(self):
# from https://github.com/jax-ml/jax/issues/1938
@jax.custom_jvp
def my_fun(x, y, c=1.):
return c * (x + y)
def my_jvp(primals, tangents):
x, y, c = primals
t_x, t_y, t_c = tangents
return my_fun(x, y, c), t_c
my_fun.defjvp(my_jvp)
f = lambda x, y: jnp.square(my_fun(x, y, c=2.)).sum()
f(10., 5.) # doesn't crash
api.jvp(f, (10., 5.), (1., 1.)) # doesn't crash
def test_initial_style(self):
@jax.custom_jvp
def f(x):
return 3 * x
def f_jvp(primals, tangents):
x, = primals
g, = tangents
return f(x), 2 * g
f.defjvp(f_jvp)
def foo(x):
out, _ = lax.scan(lambda c, _: (f(c), None), x, None, length=1)
return out
ans = api.grad(foo)(3.)
expected = 2.
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(api.jit(foo))(3.)
expected = 2.
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.jit(api.grad(foo))(3.)
expected = 2.
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(api.grad(foo))(3.)
expected = 0.
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(api.grad(api.jit(foo)))(3.)
expected = 0.
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(api.jit(api.grad(foo)))(3.)
expected = 0.
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.jit(api.grad(api.grad(foo)))(3.)
expected = 0.
self.assertAllClose(ans, expected, check_dtypes=False)
def test_initial_style_vmap(self):
@jax.custom_jvp
def f(x):
assert jnp.ndim(x) == 0
return 3 * x
def f_jvp(primals, tangents):
x, = primals
g, = tangents
return f(x), 2 * g
f.defjvp(f_jvp)
def foo(x):
out, _ = lax.scan(lambda c, _: (f(c), None), x, None, length=1)
return out
ans = api.vmap(foo)(jnp.ones(3))
expected = 3. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.vmap(api.jit(foo))(jnp.ones(3))
expected = 3. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.jit(api.vmap(foo))(jnp.ones(3))
expected = 3. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(lambda x: api.vmap(foo)(x).sum())(jnp.ones(3))
expected = 2. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(lambda x: api.vmap(api.jit(foo))(x).sum())(jnp.ones(3))
expected = 2. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(lambda x: api.jit(api.vmap(foo))(x).sum())(jnp.ones(3))
expected = 2. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(api.jit(lambda x: api.vmap(foo)(x).sum()))(jnp.ones(3))
expected = 2. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.jit(api.grad(lambda x: api.vmap(foo)(x).sum()))(jnp.ones(3))
expected = 2. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_initial_style_vmap_with_collective(self):
@jax.custom_jvp
def f(x):
return lax.psum(x, 'foo')
@f.defjvp
def f_jvp(xs, ts):
x, = xs
t, = ts
return lax.psum(x, 'foo'), t
def g(x):
jaxpr = api.make_jaxpr(f)(x)
return core.eval_jaxpr(jaxpr.jaxpr, [], x)[0]
v = api.vmap(lambda _, x: g(x), axis_name='foo', in_axes=(0, None),
out_axes=None)(jnp.arange(4.), 2.)
self.assertAllClose(v, 8.)
def test_closed_over_tracers_error_message(self):
def f(x):
@jax.custom_jvp
def g(y):
return x + y
def g_jvp(primals, tangents):
return g(x), 2 * primals[0]
g.defjvp(g_jvp)
return g(1.)
self.assertRaises(UnexpectedTracerError, lambda: api.jvp(f, (3.,), (1.,)))
self.assertRaises(UnexpectedTracerError, lambda: api.grad(f)(3.))
def test_nondiff_arg(self):
@partial(jax.custom_jvp, nondiff_argnums=(0,))
def app(f, x):
return f(x)
def app_jvp(f, primals, tangents):
(x,), (t,) = primals, tangents
return app(f, x), 3 * t
app.defjvp(app_jvp)
ans = app(lambda x: 2 * x, 1)
expected = 2
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.jvp(lambda x: app(lambda y: 2 * y, x), (1.,), (1.,))
expected = (2., 3.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_nondiff_arg_jit_tracer(self):
# This test would pass with "final-style" JIT tracing, but that was
# misleading: it doesn't work with "initial-style" staging, i.e. control
# flow primitives like jax.lax.scan or even pjit. The behavior isn't very
# useful either: instead of using nondiff_argnums here, a user can just pass
# such inputs as ordinary arguments, and ignore the corresponding tangents.
# Then nondiff_argnums can be reserved for (1) non jaxtype data (like a
# string- or callable-valued argument which parameterizes the function or
# rule) or (2) static data (e.g. integers which parameterize shapes).
raise unittest.SkipTest("behavior no longer supported")
@partial(jax.custom_jvp, nondiff_argnums=(0,))
def f(x, y):
return x * y
def f_jvp(x, primals, tangents):
(y,), (t_y,) = primals, tangents
return f(x, y), 5 * t_y
f.defjvp(f_jvp)
@jit
def g(x, y):
return f(x, y)
ans = api.jvp(lambda y: g(2., y), (3.,), (1.,))
expected = (6., 5.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_nondiff_arg_vmap_tracer(self):
@partial(jax.custom_jvp, nondiff_argnums=(0,))
def f(x, y):
return x * y
def f_jvp(x, primals, tangents):
(y,), (t_y,) = primals, tangents
return f(x, y), 5 * t_y
f.defjvp(f_jvp)
g = jax.vmap(f)
ans = api.jvp(lambda y: g(jnp.array([2.]), y),
(jnp.array([3.]),), (jnp.array([1.]),))
expected = (jnp.array([6.]), jnp.array([5.]))
self.assertAllClose(ans, expected, check_dtypes=False)
def test_nondiff_arg_hiding_jvp_tracer(self):
def f(x):
@partial(jax.custom_jvp, nondiff_argnums=(0,))
def g(h, x):
return h(x)
@g.defjvp
def g_jvp(h, primals, tangents):
x, = primals
t, = tangents
return g(h, x), 2. * t
h = lambda y: x + y # capture x
return g(h, x)
with self.assertRaises(UnexpectedTracerError):
api.jvp(f, (2.,), (1.,))
def test_vmap_axes(self):
raise unittest.SkipTest("TODO") # TODO(mattjj): write test
def test_pmap(self):
raise unittest.SkipTest("TODO") # TODO(mattjj): write test
def test_missing_jvp_rule_error_message(self):
@jax.custom_jvp
def foo(x):
return x ** 2
self.assertRaisesRegex(
AttributeError,
r"No JVP defined for custom_jvp function foo using defjvp.",
lambda: foo(2))
self.assertRaisesRegex(
AttributeError,
r"No JVP defined for custom_jvp function foo using defjvp.",
lambda: api.jvp(foo, (2.,), (1.,)))
self.assertRaisesRegex(
AttributeError,
r"No JVP defined for custom_jvp function foo using defjvp.",
lambda: api.grad(foo)(2.))
def test_jvp_rule_inconsistent_pytree_structures_error_message(self):
@jax.custom_jvp
def f(x):
return (x**2,)
@f.defjvp
def foo_jvp(primals, tangents):
x, = primals
t, = tangents
return f(x), [2 * x * t, x]
f(2.) # doesn't crash
self.assertRaisesRegex(
TypeError,
re.escape(
"Custom JVP rule foo_jvp for function f "
"must produce primal and tangent outputs "
"with equal container (pytree) structures, but got "
"{} and {} respectively.".format(
jax.tree.structure((1,)),
jax.tree.structure([1, 2]))
),
lambda: api.jvp(f, (2.,), (1.,)))
def test_primal_tangent_aval_disagreement_error_message(self):
@jax.custom_jvp
def f(x):
return x ** 2
@f.defjvp
def foo_jvp(primals, tangents):
x, = primals
t, = tangents
return f(x), jnp.reshape(t, (1,))
f(2.) # doesn't crash
self.assertRaisesRegex(
TypeError,
re.escape(
"Custom JVP rule must produce primal and tangent outputs "
"with corresponding shapes and dtypes. "
"Expected float32[] (tangent type of float32[]) but got float32[1]."),
lambda: api.jvp(f, (jnp.float32(2.),), (jnp.float32(1.),)))
def test_jvp_rule_doesnt_return_pair_error_message(self):
# https://github.com/jax-ml/jax/issues/2516
@jax.custom_jvp
def f(x):
return x ** 2
@f.defjvp
def foo_jvp(primals, tangents):
x, = primals
t, = tangents
return t
f(2.) # doesn't crash
self.assertRaisesRegex(
TypeError,
re.escape(
"Custom JVP rule foo_jvp for function f "
"must produce a pair (list or tuple of length two) "
"representing primal and tangent outputs, but got 1.0"),
lambda: api.jvp(f, (2.,), (1.,)))
def test_jvp_rule_primal_out_type_doesnt_match_primal_error_message(self):
# https://github.com/lucidrains/flash-attention-jax/issues/7
def scan_apply(f, x):
y, _ = jax.lax.scan(lambda x, _: (f(x), None), x, None, length=1)
return y
@jax.custom_jvp
def f(x):
return x
@f.defjvp
def f_jvp(primals, tangents):
(x,), (xdot,) = primals, tangents
return (x, x), (xdot, xdot)
x = jnp.float32(1.)
self.assertRaisesRegex(
TypeError,
re.escape(
"Custom JVP rule f_jvp for function f must produce a pair "
"(list or tuple of length two) where the first element represents "
"the primal output (equal in value to the output of the "
"custom_jvp-decorated function f, and in particular of the "
"same container/pytree structure), but instead the JVP rule "
"output's first element had container/pytree structure:\n"
" (float32[], float32[])\n"
"while the custom_jvp-decorated function f had output "
"container/pytree structure:\n"
" float32[]."
),
lambda: jax.jvp(lambda x: scan_apply(f, x), (x,), (x,)))
@f.defjvp
def f_jvp2(primals, tangents):
(x,), (xdot,) = primals, tangents
return jnp.zeros((3, *x.shape), x.dtype), xdot
self.assertRaisesRegex(
TypeError,
re.escape(
"Custom JVP rule f_jvp2 for function f must produce a pair "
"(list or tuple of length two) where the first element represents "
"the primal output (equal in value to the output of the "
"custom_jvp-decorated function f, and in particular "
"with leaves of the same shape/dtype), but instead the JVP rule "
"output's first element had shapes/dtypes of:\n"
" float32[3]\n"
"while the custom_jvp-decorated function f had output shapes/dtypes"
" of:\n"
" float32[]"
),
lambda: jax.jvp(lambda x: scan_apply(f, x), (x,), (x,)))
def test_multiple_rule_invocations(self):
@jax.custom_jvp
def expit(x):
return 1 / (1 + lax.exp(-x))
@expit.defjvp
def _expit_jvp(primals, tangents):
(x,), (t,) = primals, tangents
ans = expit(x)
t_out = t * ans * (1 - ans)
return ans, t_out
def scanned_fun(c, _):
return [expit(c[0])] + [c[i-1] + c[i] for i in range(1, len(c))], None
def foo(x):
zero = jnp.zeros_like(x)
c, _ = lax.scan(scanned_fun, [x, zero, zero, zero, zero], None, length=10)
return c[-1]
# just make sure these don't crash
foo(3.)
grad(foo)(3.)
grad(lambda x: jax.vmap(foo)(x).sum())(jnp.arange(3.))
def test_hard_stuff(self):
arr = jnp.ones((5, 2, 2))
api.jit(jax.vmap(jnp.linalg.det))(arr) # doesn't crash
def test_hard_stuff2(self):
@jax.custom_jvp
def f(x):
return np.zeros(x.shape, x.dtype)
@f.defjvp
def f_jvp(primals, tangents):
x, = primals
t, = tangents
return f(x), t
# don't crash
jax.jit(jax.vmap(f))(jnp.arange(3.))
jax.jit(jax.vmap(jax.grad(f)))(jnp.arange(3.))
jax.jit(jax.grad(lambda x: jax.vmap(f)(x).sum()))(jnp.arange(3.))
jax.grad(lambda x: jax.vmap(f)(x).sum())(jnp.arange(3.))
jax.jvp(jax.vmap(f), (jnp.arange(3.),), (jnp.ones(3),))
def test_hard_stuff3(self):
@jax.custom_jvp
def relu(x):
return jnp.maximum(x, 0)
@relu.defjvp
def _relu_jvp(primals, tangents):
x, = primals
t, = tangents
return relu(x), lax.select(x > 0, t, lax.full_like(t, 0))
def scanned_fun(c, _):
return [relu(c[0])] + [c[i-1] + c[i] for i in range(1, len(c))], None
def f(x):
zero = jnp.zeros_like(x)
c, _ = lax.scan(scanned_fun, [x, zero, zero, zero, zero], None, length=10)
return c[-1]
# don't crash
jax.jit(jax.vmap(f))(jnp.arange(3.))
jax.jit(jax.vmap(jax.grad(f)))(jnp.arange(3.))
jax.jit(jax.grad(lambda x: jax.vmap(f)(x).sum()))(jnp.arange(3.))
jax.grad(lambda x: jax.vmap(f)(x).sum())(jnp.arange(3.))
jax.jvp(jax.jit(jax.vmap(f)), (jnp.arange(3.),), (jnp.ones(3),))
def test_eval_shape(self):
@jax.custom_jvp
def expit(x):
return 1 / (1 + lax.exp(-x))
@expit.defjvp
def _expit_jvp(primals, tangents):
(x,), (t,) = primals, tangents
ans = expit(x)
t_out = t * ans * (1 - ans)
return ans, t_out
# don't crash
api.eval_shape(expit, jnp.ones((2, 3)))
api.eval_shape(api.grad(lambda x: expit(x).sum()), jnp.ones((2, 3)))
def test_jaxpr_zeros(self):
# from https://github.com/jax-ml/jax/issues/2657
@jax.custom_jvp
def f(A, b):
return A @ b
def f_jvp(primals, tangents):
A, b = primals
dA, db = tangents
z = f(A, b)
dz = A @ db + dA @ b
return z, dz
f.defjvp(f_jvp)
def experiment(theta):
def step(q, _):
z = f(jnp.eye(3), jnp.ones(3) * theta)
q += z[0]
return q, q
q = 0.
q, _ = lax.scan(step, q, None, 4)
return q
grad(experiment)(1.) # doesn't crash
def test_linear_in_scan(self):
@jax.custom_jvp
def f(x):
return -x
@f.defjvp
def f_jvp(primals, tangents):
x, = primals
x_dot, = tangents
return f(x), f(x_dot)
def foo(x):
out, _ = lax.scan(lambda c, _: (f(c), None), x, None, length=1)
return out
ans = api.grad(foo)(3.)
expected = -1.
self.assertAllClose(ans, expected, check_dtypes=False)
def test_custom_jvps_first_rule_is_none(self):
# https://github.com/jax-ml/jax/issues/3389
@jax.custom_jvp
def f(x, y):
return x ** 2 * y
f.defjvps(None, lambda x_dot, primal_out, x, y: 2 * x * y * x_dot)
ans = grad(f, 1)(2., 3.) # doesn't crash
expected = 12.
self.assertAllClose(ans, expected, check_dtypes=False)
def test_concurrent_initial_style(self):
# https://github.com/jax-ml/jax/issues/3843
def unroll(param, sequence):
def scan_f(prev_state, inputs):
return prev_state, jax.nn.sigmoid(param * inputs)
return jnp.sum(jax.lax.scan(scan_f, None, sequence)[1])
def run():
return jax.grad(unroll)(jnp.array(1.0), jnp.array([1.0]))
expected = run()
# we just don't want this to crash
n_workers = 2
with concurrent.futures.ThreadPoolExecutor(max_workers=n_workers) as e:
futures = []
for _ in range(n_workers):
futures.append(e.submit(run))
results = [f.result() for f in futures]
for ans in results:
self.assertAllClose(ans, expected)
def test_nondiff_argnums_vmap_tracer(self):
# https://github.com/jax-ml/jax/issues/3964
@partial(jax.custom_jvp, nondiff_argnums=(0, 2))
def sample(shape, param, seed):
return jax.random.uniform(key=seed, shape=shape, minval=param)
@sample.defjvp
def sample_jvp(shape, seed, primals, tangents):
param, = primals
dparam, = tangents
dparam = jnp.broadcast_to(dparam, shape)
samples = sample(shape, param, seed)
return samples, samples * dparam # dummy jvp for proof of concept
# check these don't crash
jax.vmap(lambda seed: sample((2,3), 1., seed))(
jax.random.split(jax.random.key(1), 10))
jax.jvp(lambda x: sample((2, 3), x, jax.random.key(1)),
(1.,), (1.,))
def test_fun_with_nested_calls_2(self):
def call(f, *args):
f = jax.custom_jvp(f)
f.defjvp(lambda primals, tangents: (f(*primals), sum(tangents)))
return f(*args)
def fun_with_nested_calls_2(x):
def bar(y):
def baz(w):
q = call(lambda x: y, x)
q = q + call(lambda: y)
q = q + call(lambda y: w + y, y)
q = call(lambda w: call(jnp.sin, x) * y, 1.0) + q
return q
return api.jit(baz)(x)
return call(bar, x)
# test these don't crash
self.assertAllClose(api.jit(fun_with_nested_calls_2)(3.),
fun_with_nested_calls_2(3.))
api.vmap(fun_with_nested_calls_2)(jnp.arange(3.))
def test_closure_with_vmap(self):
# https://github.com/jax-ml/jax/issues/3822
alpha = np.float32(2.)
def sample(seed):
@jax.custom_jvp
def f(alpha):
return jax.random.gamma(seed, alpha, shape=[])
@f.defjvp
def f_jvp(primal, tangent):
alpha = primal
dalpha = tangent
sample = f(alpha)
partial_alpha = lax.random_gamma_grad(alpha, sample)
return sample, partial_alpha * dalpha
return f(alpha)
api.vmap(sample)(jax.random.split(jax.random.key(1), 3)) # don't crash
def test_closure_with_vmap2(self):
# https://github.com/jax-ml/jax/issues/8783
def h(z):
def f(x):
@jax.custom_jvp
def g(y):
return x * y
# NOTE: rule closes over vmap tracer
@g.defjvp
def g_jvp(primals, tangents):
(y,), (ydot,) = primals, tangents
return x * y, x * ydot
return g(z) # NOTE: no vmapped arg
return jax.vmap(f)(jnp.arange(3., dtype='float32'))
primals, tangents = jax.jvp(h, (jnp.float32(1.),), (jnp.float32(2.),))
self.assertAllClose(primals , jnp.arange(3., dtype='float32'))
self.assertAllClose(tangents, 2 * jnp.arange(3., dtype='float32'))
def test_float0(self):
scalar_float0 = jnp.zeros((), dtype=float0)
@jax.custom_jvp
def f(x, y):
return x, y
def f_jvp(primals, _):
x, y = primals
return (x, y), (2., custom_derivatives_public.zero_from_primal(y))
f.defjvp(f_jvp)
primals = (2., 3)
tangents = (np.ones(()), scalar_float0)
expected_tangents = (2., scalar_float0)
self.assertAllClose(api.jvp(f, primals, tangents),
(primals, expected_tangents))
def test_float0_initial_style(self):
scalar_float0 = jnp.zeros((), dtype=float0)
@jax.custom_jvp
def f(x, y):
return x, y
def f_jvp(primals, _):
x, y = primals
return (x, y), (2., custom_derivatives_public.zero_from_primal(y))
f.defjvp(f_jvp)
def foo(x, y):
out, _ = lax.scan(lambda c, _: (f(*c), None), (x, y), None, length=1)
return out
primals = (2., 3)
tangents = (np.ones(()), scalar_float0)
expected_tangents = (2., scalar_float0)
self.assertAllClose(api.jvp(foo, primals, tangents),
(primals, expected_tangents))
def test_remat(self):
@jax.custom_jvp
def f(x):
return jnp.sin(x)
def f_jvp(primals, tangents):
x, = primals
g, = tangents
return f(x), 2 * jnp.cos(x) * g
f.defjvp(f_jvp)
@jax.remat
def g(x):
return f(f(x))
ans = g(2.)
expected = np.sin(np.sin(2.))
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(g)(2.)
expected = 4. * api.grad(lambda x: jnp.sin(jnp.sin(x)))(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_remat_higher_order(self):
@jax.custom_jvp
def f(x):
return jnp.sin(x)
def f_jvp(primals, tangents):
x, = primals
g, = tangents
return f(x), 2 * jnp.cos(x) * g
f.defjvp(f_jvp)
def g(x):
return f(f(x))
ans = api.grad(api.grad(new_checkpoint(g)))(2.)
expected = api.grad(api.grad(g))(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(new_checkpoint(api.grad(g)))(2.)
expected = api.grad(api.grad(g))(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(api.grad(api.grad(new_checkpoint(g))))(2.)
expected = api.grad(api.grad(api.grad(g)))(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_initial_style_vmap_2(self):
# This is like test_initial_style_vmap except the primal function closes
# over an array constant.
y = jnp.arange(1., 4.)
@jax.custom_jvp
def f(x):
assert jnp.ndim(x) == 0
return 3 * x * jnp.sum(y)
def f_jvp(primals, tangents):
x, = primals
g, = tangents
return f(x), 2 * g
f.defjvp(f_jvp)
def foo(x):
out, _ = lax.scan(lambda c, _: (f(c), None), x, None, length=1)
return out
ans = api.grad(lambda x: api.vmap(foo)(x).sum())(jnp.ones(3))
expected = 2. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(lambda x: api.vmap(api.jit(foo))(x).sum())(jnp.ones(3))
expected = 2. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(lambda x: api.jit(api.vmap(foo))(x).sum())(jnp.ones(3))
expected = 2. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(api.jit(lambda x: api.vmap(foo)(x).sum()))(jnp.ones(3))
expected = 2. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.jit(api.grad(lambda x: api.vmap(foo)(x).sum()))(jnp.ones(3))
expected = 2. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_custom_jvp_vmap_broadcasting_interaction(self):
# https://github.com/jax-ml/jax/issues/6452
def f2(y, z):
v1 = z
v2 = jnp.sum(y) + z
return jnp.logaddexp(v1, v2)
def f1(y, z):
v = api.vmap(lambda _y: f2(_y, z))(y)
return jnp.sum(v)
y = jnp.ones((3, 2))
f = lambda z: f1(y, z)
z = 0.1
val, g = api.value_and_grad(f)(z)
self.assertEqual(val.shape, ())
self.assertEqual(g.shape, ())
def test_custom_jvp_vmap_broadcasting_interaction_2(self):
# https://github.com/jax-ml/jax/issues/5849
@jax.custom_jvp
def transform(box, R):
if jnp.isscalar(box) or box.size == 1:
return R * box
elif box.ndim == 2:
return jnp.einsum('ij,j->i', box, R)
raise ValueError()
@transform.defjvp
def transform_jvp(primals, tangents):
box, R = primals
dbox, dR = tangents
return (transform(box, R), dR + transform(dbox, R))
def periodic_general(box):
def displacement_fn(Ra, Rb, **kwargs):
_box = kwargs.get('box', box)
return transform(_box, Ra - Rb)
return displacement_fn
N = 250
scalar_box = 1.0
displacement = periodic_general(scalar_box)
key = jax.random.key(0)
R = jax.random.uniform(key, (N, 2))
def energy_fn(box):
d = partial(displacement, box=box)
d = api.vmap(api.vmap(d, (None, 0)), (0, None))
return jnp.sum(d(R, R) ** 2)
self.assertEqual(grad(energy_fn)(scalar_box).shape, ())
def test_custom_jvp_implicit_broadcasting(self):
# https://github.com/jax-ml/jax/issues/6357
if config.enable_x64.value:
raise unittest.SkipTest("test only applies when x64 is disabled")
@jax.custom_jvp
def projection_unit_simplex(x: jax.Array) -> jax.Array:
"""Projection onto the unit simplex."""
s = 1.0
n_features = x.shape[0]
u = jnp.sort(x)[::-1]
cssv = jnp.cumsum(u) - s
ind = jnp.arange(n_features, dtype=x.dtype) + 1
cond = u - cssv / ind > 0
idx = jnp.count_nonzero(cond)
threshold = cssv[idx - 1] / idx.astype(x.dtype)
return jax.nn.relu(x - threshold)
@projection_unit_simplex.defjvp
def projection_unit_simplex_jvp(primals, tangents):
x, = primals
x_dot, = tangents
primal_out = projection_unit_simplex(x)
supp = (primal_out > 0).astype(x_dot.dtype)
card = jnp.count_nonzero(supp).astype(x_dot.dtype)
tangent_out = supp * x_dot - (jnp.dot(supp, x_dot) / card) * supp
return primal_out, tangent_out
rng = self.rng()
x = rng.rand(5).astype(np.float32)
J_rev = jax.jacrev(projection_unit_simplex)(x)
J_fwd = jax.jacfwd(projection_unit_simplex)(x)
p = projection_unit_simplex(x)
support = (p > 0).astype(jnp.float32)
cardinality = jnp.count_nonzero(support).astype(support.dtype)
J_true = jnp.diag(support) - jnp.outer(support, support) / cardinality
self.assertAllClose(J_true, J_fwd)
self.assertAllClose(J_true, J_rev)
proj = jax.vmap(projection_unit_simplex)
def fun(X):
return jnp.sum(proj(X) ** 2)
rng = self.rng()
X = rng.rand(4, 5).astype(np.float32)
U = rng.rand(4, 5)
U /= np.sqrt(np.sum(U ** 2))
U = U.astype(np.float32)
eps = 1e-3
dir_deriv_num = (fun(X + eps * U) - fun(X - eps * U)) / (2 * eps)
dir_deriv = jnp.vdot(jax.grad(fun)(X), U)
self.assertAllClose(dir_deriv, dir_deriv_num, atol=1e-3)
def test_vmap_inside_defjvp(self):
# https://github.com/jax-ml/jax/issues/3201
seed = 47
key = jax.random.key(seed)
mat = jax.random.normal(key, (2, 3))
@jax.custom_jvp
def f(mat, aux):
num_rows, num_cols = mat.shape
return jnp.ones((num_rows, 1)) / num_cols
@f.defjvp
def f_jvp(primals, tangents):
mat, aux = primals
vec, _ = tangents
output = f(*primals)
num_rows, num_cols = mat.shape
size = num_rows * num_cols
# -----
bd_mat = mat.reshape(1, 1, num_rows, num_cols)
bd_mat = jnp.tile(bd_mat, reps=(num_rows, num_cols))
bd_mat = bd_mat.reshape(size, num_rows, num_cols)
# -----
rowsum = jnp.sum(mat, axis=1, keepdims=True)
colsum = jnp.sum(mat, axis=0, keepdims=True)
bd_rowsum = jnp.tile(rowsum, reps=(1, num_rows))
bd_colsum = jnp.tile(colsum, reps=(num_cols, 1))
# -----
bd_vec = vec.reshape(size, 1)
# -----
def operate(mx, val):
buf = 0
for i in range(2):
buf = buf + jnp.matmul(mx, bd_colsum) / jnp.power(aux, i)
buf = jnp.matmul(bd_rowsum, buf)
return buf * val[None, :]
# -----
# Vertorizing will raise shape error
bd_buf = jax.vmap(operate, in_axes=(0, 0), out_axes=0)(bd_mat, bd_vec)
# -----
bd_buf = bd_buf / aux
jvp = jnp.sum(bd_buf, axis=0)
jvp = jnp.mean(jvp, axis=1, keepdims=True)
# -----
# JVP ends successfully, but still raise an error
return (output, jvp)
jax.grad(lambda mat, aux: jnp.sum(f(mat, aux)))(mat, 0.5) # doesn't crash
def test_custom_jvp_unbroadcasting(self):
# https://github.com/jax-ml/jax/issues/3056
a = jnp.array([1., 1.])
@jax.custom_jvp
def f(x):
return a * x
@f.defjvp
def f_jvp(primals, tangents):
x, = primals
dx, = tangents
return a * x, a * dx
shape = grad(lambda x: jnp.sum(f(x)))(jnp.array(1.)).shape
self.assertEqual(shape, ())
def test_maybe_perturbed_internal_helper_function(self):
# This is a unit test for an internal API. We include it so as not to
# regress https://github.com/jax-ml/jax/issues/9567. For an explanation of
# this helper function, see https://github.com/jax-ml/jax/issues/6415.
def f(x):
def g(y, _):
z = y * x
self.assertTrue(custom_derivatives._maybe_perturbed(z))
return y, None
g(1, None)
return lax.scan(g, 1, xs=None, length=1)[0]
jax.jvp(f, (1.0,), (1.0,)) # assertions inside f
def test_maybe_perturbed_int_regression(self):
# see https://github.com/jax-ml/jax/discussions/9951
@jax.jit
def f():
x = jnp.array(1)
_, aux_args = custom_derivatives.closure_convert(lambda: x)
self.assertEmpty(aux_args)
f()
def test_sinc_constant_function_batching(self):
# https://github.com/jax-ml/jax/pull/10756
batch_data = jnp.arange(15.).reshape(5, 3)
@jax.vmap
def f(x):
return jax.lax.map(jnp.sinc, x)
g = lambda param: f(param * batch_data).sum()
@jax.vmap
def f_ref(x):
return jnp.stack([jnp.sinc(x_) for x_ in x])
g_ref = lambda param: f_ref(param * batch_data).sum()
grad = jax.grad(g )(0.1) # doesn't crash
grad_ref = jax.grad(g_ref)(0.1)
self.assertAllClose(grad, grad_ref, check_dtypes=False)
@parameterized.named_parameters(
('jit_vmap', True, True),
('jit', True, False),
('vmap', False, True),
('', False, False),
)
def test_symbolic_zero_custom_jvp(self, maybe_jit, maybe_vmap):
def f(static_scalar, static_array, dyn_scalar, dyn_array):
out1 = static_scalar + dyn_scalar
out2 = static_array + dyn_array
return out1, out2
def _pack(x):
return lax.broadcast(x, (1,))
def _unpack(x):
(x,) = x
return x
def _vmap(fun):
def _fun(*args):
args = jax.tree.map(_pack, args)
out = jax.vmap(fun)(*args)
out = jax.tree.map(_unpack, out)
return out
return _fun
f = jax.custom_jvp(f)
@partial(f.defjvp, symbolic_zeros=True)
def f_jvp(primals, tangents):
static_scalar, *_ = primals
t_static, t_static_arr, t_dyn_scalar, t_dyn_array = tangents
self.assertIs(type(t_static) , custom_derivatives_public.SymbolicZero)
self.assertIs(type(t_static_arr), custom_derivatives_public.SymbolicZero)
self.assertEqual(t_static.shape, ())
self.assertEqual(t_static_arr.shape, (2,))
return f(*primals), (static_scalar + 90, t_dyn_array + 91)
def g(dyn_scalar, dyn_array):
if maybe_vmap:
f_ = _vmap(f)
else:
f_ = f
return f_(1., jnp.array([2., 3.]), dyn_scalar, dyn_array)
def run(primal_ins, tangent_ins):
return jax.jvp(g, primal_ins, tangent_ins)
if maybe_jit:
run = jax.jit(run)
primal_ins = (4., jnp.array([5., 6.]))
tangent_ins = (7., jnp.array([8., 9.]))
primal_outs, tangent_outs = run(primal_ins, tangent_ins)
primal_out1, primal_out2 = primal_outs
tangent_out1, tangent_out2 = tangent_outs
scalar_type = jax.Array if maybe_jit or maybe_vmap else float
self.assertIsInstance(primal_out1, scalar_type)
self.assertAllClose(primal_out1, 5.)
self.assertIsInstance(tangent_out1, scalar_type)
self.assertAllClose(tangent_out1, 91.)
self.assertIsInstance(primal_out2, jax.Array)
self.assertArraysAllClose(primal_out2, jnp.array([7., 9.]))
self.assertIsInstance(tangent_out2, jax.Array)
self.assertArraysAllClose(tangent_out2, jnp.array([99., 100.]))
def test_symbolic_zero_custom_jvp_vmap_output(self):
@jax.custom_jvp
def f(x, y):
return x * y
@partial(f.defjvp, symbolic_zeros=True)
def f_jvp(primals, tangents):
x, y = primals
x_dot, y_dot = tangents
self.assertIs(type(y_dot), custom_derivatives_public.SymbolicZero)
return f(x, y), y_dot
jax.grad(lambda x, y: jax.vmap(f)(x, y).sum())(jnp.ones(3), jnp.ones(3))
def test_symbolic_zeros_memoization_caching(self):
# Tests multiple zero patterns for partial_eval._memoize, and also tests
# that we're okay with stores being occupied with equal values.
@jax.custom_jvp
def f(x, y):
return x * y
@partial(f.defjvp, symbolic_zeros=True)
def f_jvp(primals, tangents):
x, y = primals
x_dot, y_dot = tangents
return f(x, y), y_dot
f_ = core.jaxpr_as_fun(jax.make_jaxpr(f)(2., 3.))
_ = jax.linearize(f_, 2., 3.)
_ = jax.linearize(lambda x: f_(x, 3.), 2.) # don't crash!
def test_symbolic_zeros_under_jit(self):
# https://github.com/jax-ml/jax/issues/14833
Zero = jax.custom_derivatives.SymbolicZero
@jax.custom_jvp
def f(x, y):
return x * y
@partial(f.defjvp, symbolic_zeros=True)
def fjvp(primals, tangents):
x, y = primals
tx, ty = tangents
assert type(tx) is not Zero or type(ty) is not Zero
return f(x, y), (
ty if type(tx) is Zero else
tx if type(ty) is Zero else
tx + ty)
jax.jacfwd(jax.jit(f))(0.1, 0.2) # don't crash
def test_custom_jvp_functools_partial(self):
def fun(x, y, a):
return x + y * a
fun_wrapped = functools.partial(fun, a = 0.1)
def jvp_fn(primals, tangents):
return jax.jvp(fun_wrapped, primals, tangents)
fn = jax.custom_jvp(fun_wrapped)
fn.defjvp(jvp_fn)
self.assertEqual((1.0, 0.1), jax.grad(lambda args: fn(*args))((1.0, 2.0)))
def test_run_rules_more_than_once(self):
# https://github.com/jax-ml/jax/issues/16614
@jax.custom_jvp
def f(x, y):
return x
@partial(f.defjvp, symbolic_zeros=True)
def f_jvp(primals, tangents):
x, _ = primals
x_dot, _ = tangents
return x, x_dot
def body(x_y, _):
x, y = x_y
return (f(x, y), x), None
@jax.grad
def g(x):
(out, _), _ = lax.scan(body, (x, 1.), xs=None, length=2)
return out
g(1.) # doesn't crash
class CustomVJPTest(jtu.JaxTestCase):
def test_basic(self):
@jax.custom_vjp
def f(x):
return jnp.sin(x)
def f_fwd(x):
return f(x), jnp.cos(x)
def f_rev(cos_x, g):
return (2 * cos_x * g,)
f.defvjp(f_fwd, f_rev)
x = 3.
self.assertAllClose(f(x), jnp.sin(x))
self.assertAllClose(api.grad(f)(x), 2 * jnp.cos(x))
self.assertAllClose(api.value_and_grad(f)(x),
(jnp.sin(x), 2 * jnp.cos(x)))
def test_invariance(self):
@jax.custom_vjp
def f(x):
return jnp.cos(2 * x) / 2.
def f_fwd(x):
return (f(x), x)
def f_rev(x, g):
return (g * 3,)
f.defvjp(f_fwd, f_rev)
def f2(x):
y, _ = api.value_and_grad(f)(x)
return y
def f3(x):
y, _ = api.value_and_grad(f2)(x)
return y
x = 1.
self.assertAllClose(f(x), f2(x), check_dtypes=False)
self.assertAllClose(f(x), f3(x), check_dtypes=False)
self.assertAllClose(api.grad(f)(x), api.grad(f2)(x),
check_dtypes=False)
self.assertAllClose(api.grad(f)(x), api.grad(f3)(x),
check_dtypes=False)
def test_python_control_flow(self):
@jax.custom_vjp
def f(x):
if x > 0:
return jnp.sin(x)
else:
return jnp.cos(x)
def f_fwd(x):
if x > 0:
return f(x), x
else:
return f(x), x
def f_rev(x, g):
if x > 0:
return (2 * g,)
else:
return (3 * g,)
f.defvjp(f_fwd, f_rev)
x = 2.
self.assertAllClose(f(x), jnp.sin(x))
self.assertAllClose(f(-x), jnp.cos(-x))
self.assertAllClose(api.value_and_grad(f)(x), (jnp.sin(x), 2.),
check_dtypes=False)
self.assertAllClose(api.value_and_grad(f)(-x), (jnp.cos(-x), 3.),
check_dtypes=False)
def test_vmap(self):
@jax.custom_vjp
def f(x):
assert jnp.ndim(x) == 0
return jnp.sin(x)
def f_fwd(x):
assert jnp.ndim(x) == 0
return f(x), jnp.cos(x)
def f_rev(cos_x, g):
return (2 * cos_x * g,)
f.defvjp(f_fwd, f_rev)
x = jnp.arange(3.)
xx = jnp.arange(6.).reshape(2, 3)
# vmap of f
self.assertAllClose(api.vmap(f)(x), jnp.sin(x))
self.assertAllClose(api.vmap(api.vmap(f))(xx), jnp.sin(xx))
# vmap of grad of f
self.assertAllClose(api.vmap(api.grad(f))(x), 2 * jnp.cos(x))
self.assertAllClose(api.vmap(api.value_and_grad(f))(x),
(jnp.sin(x), 2 * jnp.cos(x)))
self.assertAllClose(api.vmap(api.vmap(api.grad(f)))(xx), 2 * jnp.cos(xx))
self.assertAllClose(api.vmap(api.vmap(api.value_and_grad(f)))(xx),
(jnp.sin(xx), 2 * jnp.cos(xx)))
# grad of vmap of f
self.assertAllClose(api.grad(lambda x: api.vmap(f)(x).sum())(x),
2 * jnp.cos(x))
self.assertAllClose(api.grad(lambda x: api.vmap(api.vmap(f))(x).sum())(xx),
2 * jnp.cos(xx))
# vmap of grad of vmap of f
self.assertAllClose(api.vmap(api.grad(lambda x: api.vmap(f)(x).sum()))(xx),
2 * jnp.cos(xx))
def test_jit(self):
@jax.custom_vjp
def f(x):
return jnp.sin(x)
def f_fwd(x):
return f(x), jnp.cos(x)
def f_rev(cos_x, g):
return (2 * cos_x * g,)
f.defvjp(f_fwd, f_rev)
x = 3.
# jit
self.assertAllClose(api.jit(f)(x), jnp.sin(x))
self.assertAllClose(api.jit(api.jit(f))(x), jnp.sin(x))
# jit of grad
self.assertAllClose(api.jit(api.grad(f))(x), 2 * jnp.cos(x),
check_dtypes=False)
# grad of jit
self.assertAllClose(api.grad(api.jit(f))(x), 2 * jnp.cos(x),
check_dtypes=False)
def test_pytrees(self):
@jax.custom_vjp
def f(x):
return {'b': jnp.sin(x['a'])}
def f_fwd(x):
return f(x), {'r': jnp.cos(x['a'])}
def f_bwd(res, g):
cos_x = res['r']
return ({'a': 2 * cos_x * g['b']},)
f.defvjp(f_fwd, f_bwd)
x = {'a': 3.}
self.assertAllClose(f(x)['b'], jnp.sin(x['a']))
self.assertAllClose(api.grad(lambda x: f(x)['b'])(x),
{'a': 2 * jnp.cos(x['a'])})
def test_jvp_error(self):
@jax.custom_vjp
def f(x):
return jnp.sin(x)
def f_fwd(x):
return f(x), jnp.cos(x)
def f_rev(cos_x, g):
return (2 * cos_x * g,)
f.defvjp(f_fwd, f_rev)
self.assertRaisesRegex(
TypeError,
r"can't apply forward-mode autodiff \(jvp\) to a custom_vjp function.",
lambda: api.jvp(f, (3.,), (1.,)))
self.assertRaisesRegex(
TypeError,
r"can't apply forward-mode autodiff \(jvp\) to a custom_vjp function.",
lambda: api.jvp(api.vmap(f), (jnp.arange(3.),), (jnp.ones(3),)))
self.assertRaisesRegex(
TypeError,
r"can't apply forward-mode autodiff \(jvp\) to a custom_vjp function.",
lambda: api.jvp(jit(f), (3.,), (1.,)))
def test_kwargs(self):
# from https://github.com/jax-ml/jax/issues/1938
@jax.custom_vjp
def my_fun(x, y, c=1.):
return c * (x + y)
my_fun.defvjp(lambda x, y, c=1.: (my_fun(c, y, c), None),
lambda _, g: (g, g, g))
f = lambda x, y: jnp.square(my_fun(x, y, c=2.)).sum()
f(10., 5.) # doesn't crash
api.grad(f)(10., 5.) # doesn't crash
def test_initial_style(self):
@jax.custom_vjp
def f(x):
return jnp.sin(x)
def f_fwd(x):
return f(x), jnp.cos(x)
def f_rev(cos_x, g):
return (2 * cos_x * g,)
f.defvjp(f_fwd, f_rev)
def foo(x):
out, _ = lax.scan(lambda c, _: (f(c), None), x, None, length=1)
return out
ans = api.grad(foo)(3.)
expected = 2. * jnp.cos(3.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(api.grad(foo))(3.)
expected = -2. * jnp.sin(3.)
self.assertAllClose(ans, expected)
def test_initial_style_vmap(self):
@jax.custom_vjp
def f(x):
assert jnp.ndim(x) == 0
return 3 * x
def f_fwd(x):
return f(x), jnp.cos(x)
def f_rev(cos_x, g):
return (2 * cos_x * g,)
f.defvjp(f_fwd, f_rev)
def foo(x):
out, _ = lax.scan(lambda c, _: (f(c), None), x, None, length=1)
return out
ans = api.vmap(foo)(jnp.arange(3.))
expected = 3. * jnp.arange(3.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(lambda x: api.vmap(foo)(x).sum())(jnp.arange(3.))
expected = 2. * jnp.cos(jnp.arange(3.))
self.assertAllClose(ans, expected, check_dtypes=False)
def test_nondiff_arg(self):
@partial(jax.custom_vjp, nondiff_argnums=(0,))
def app(f, x):
return f(x)
def app_fwd(f, x):
return app(f, x), jnp.cos(x)
def app_rev(f, cos_x, g):
return (cos_x * g,)
app.defvjp(app_fwd, app_rev)
ans = app(lambda x: 2 * x, 1)
expected = 2
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.value_and_grad(lambda x: app(lambda y: 2 * y, x))(1.)
expected = (2., jnp.cos(1.))
self.assertAllClose(ans, expected, check_dtypes=False)
def test_closed_over_jit_tracer(self):
# See the comment in CustomJVPTest.test_nondiff_arg_jit_tracer.
raise unittest.SkipTest("behavior no longer supported")
# This test is similar to test_nondiff_arg_tracer except it uses lexical
# closure rather than the nondiff_argnums mechanism. We decided to disallow
# tracers in nondiff_argnums to greatly simplify bookkeeping while still
# supporting the cases for which it is necessary.
def outer(x):
@jax.custom_vjp
def f(y):
return x * y
def f_fwd(y):
return f(y), jnp.cos(y)
def f_rev(cos_y, g):
return (cos_y * g,)
f.defvjp(f_fwd, f_rev)
return f
@jit
def g(x, y):
return outer(x)(y)
ans = g(2, 3.)
expected = 6.
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(g, 1)(2., 3.)
expected = jnp.cos(3.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_closed_over_vmap_tracer(self):
def outer(x):
@jax.custom_vjp
def f(y):
return x * y
def f_fwd(y):
return f(y), jnp.cos(y)
def f_rev(cos_y, g):
return (cos_y * g,)
f.defvjp(f_fwd, f_rev)
return f
@api.vmap
def g(x):
return outer(x)(3.)
ans = g(np.arange(3.))
expected = np.arange(3.) * 3
self.assertAllClose(ans, expected, check_dtypes=False)
def test_closed_over_tracer3(self):
def outer(x):
@jax.custom_vjp
def f(y):
return x * y
def f_fwd(y):
return f(y), (x, jnp.cos(y))
def f_rev(res, g):
x, cos_y = res
return (cos_y * g * x,)
f.defvjp(f_fwd, f_rev)
return api.grad(f)
@api.vmap
def g(x):
return outer(x)(3.)
ans = g(np.arange(3.))
expected = np.cos(3.) * np.arange(3.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_nondiff_arg_tracer_error(self):
# This is similar to the old (now skipped) test_nondiff_arg_tracer, except
# we're testing for the error message that usage pattern now raises.
@partial(jax.custom_vjp, nondiff_argnums=(0,))
def f(x, y):
return x * y
def f_fwd(x, y):
return f(x, y), jnp.cos(y)
def f_rev(x, cos_y, g):
return (cos_y * g,)
f.defvjp(f_fwd, f_rev)
@jit
def g(x, y):
return f(x, y)
with self.assertRaisesRegex(UnexpectedTracerError, "custom_vjp"):
_ = g(2, 3.)
with self.assertRaisesRegex(UnexpectedTracerError, "custom_vjp"):
_ = api.grad(g, 1)(2., 3.)
def test_vmap_axes(self):
raise unittest.SkipTest("TODO") # TODO(mattjj): write test
def test_pmap(self):
raise unittest.SkipTest("TODO") # TODO(mattjj): write test
def test_missing_vjp_rule_error(self):
@jax.custom_vjp
def foo(x):
return x ** 2
self.assertRaisesRegex(
AttributeError,
r"No VJP defined for custom_vjp function foo using defvjp.",
lambda: foo(2))
self.assertRaisesRegex(
AttributeError,
r"No VJP defined for custom_vjp function foo using defvjp.",
lambda: api.grad(foo)(2.))
def test_vjp_rule_inconsistent_pytree_structures_error(self):
@jax.custom_vjp
def f(x):
return x
def foo_fwd(x):
return x, None
def foo_bwd(_, g):
return (g, g)
f.defvjp(foo_fwd, foo_bwd)
f(2) # doesn't crash
self.assertRaisesRegex(
TypeError,
re.escape(
"Custom VJP bwd rule must produce an output with the same container "
"(pytree) structure as the args tuple of the primal function, "
"and in particular must produce a tuple of length equal to the "
"number of arguments to the primal function, but got bwd output "
"structure {} for primal input structure {}.".format(
jax.tree.structure((1, 1)),
jax.tree.structure((1,)))
),
lambda: api.grad(f)(2.))
def test_vjp_bwd_returns_non_tuple_error(self):
@jax.custom_vjp
def f(x):
return x
def foo_fwd(x):
return x, None
def foo_bwd(_, g):
return 2. * g # Should be a tuple
f.defvjp(foo_fwd, foo_bwd)
with self.assertRaisesRegex(TypeError, "Custom VJP bwd rule .* must produce a tuple"):
api.grad(f)(3.)
def test_fwd_rule_primal_out_type_doesnt_match_primal_error_message(self):
# https://github.com/lucidrains/flash-attention-jax/issues/7
def scan_apply(f, x):
y, _ = jax.lax.scan(lambda x, _: (f(x), None), x, None, length=1)
return y
@jax.custom_vjp
def f(x):
return x
def f_fwd(x):
return (x, x), None
def f_bwd(_, y_bar):
return (y_bar,)
f.defvjp(f_fwd, f_bwd)
self.assertRaisesRegex(
TypeError,
re.escape(
"Custom VJP fwd rule f_fwd for function f must produce a pair "
"(list or tuple of length two) where the first element represents "
"the primal output (equal to the output of the "
"custom_vjp-decorated function f) and the second element "
"represents residuals (i.e. values stored from the forward "
"pass for use on the backward pass), but instead the fwd rule "
"output's first element had container/pytree structure:\n"
" (float32[], float32[])\n"
"while the custom_vjp-decorated function f had output "
"container/pytree structure:\n"
" float32[]."
),
lambda: jax.grad(lambda x: scan_apply(f, x))(jnp.float32(1.)))
def f_fwd2(x):
return jnp.zeros((3, *x.shape), x.dtype), None
def f_bwd2(_, y_bar):
return (y_bar,)
f.defvjp(f_fwd2, f_bwd2)
self.assertRaisesRegex(
TypeError,
re.escape(
"Custom VJP fwd rule f_fwd2 for function f must produce a pair "
"(list or tuple of length two) where the first element represents "
"the primal output (equal to the output of the "
"custom_vjp-decorated function f) and the second element "
"represents residuals (i.e. values stored from the forward "
"pass for use on the backward pass), but instead the fwd rule "
"output's first element had shapes/dtypes of:\n"
" float32[3]\n"
"while the custom_vjp-decorated function f had output "
"shapes/dtypes of:\n"
" float32[]"
),
lambda: jax.grad(lambda x: scan_apply(f, x))(jnp.float32(1.)))
def test_issue2511(self):
arr = jnp.ones((5, 2, 2))
foo = lambda x: api.vmap(jnp.linalg.det, (0,))(x)
api.jit(foo)(arr) # doesn't crash
def test_lowering_out_of_traces(self):
# https://github.com/jax-ml/jax/issues/2578
class F(collections.namedtuple("F", ["a"])):
def __call__(self, x):
return jax.nn.relu(self.a) * x
@jax.jit
def g(f, x):
return f(x)
jax.grad(g, argnums=(1,))(F(2.0), 0.) # doesn't crash
def test_clip_gradient(self):
# https://github.com/jax-ml/jax/issues/2784
@jax.custom_vjp
def _clip_gradient(lo, hi, x):
return x # identity function when not differentiating
def clip_gradient_fwd(lo, hi, x):
return x, (lo, hi,)
def clip_gradient_bwd(res, g):
lo, hi = res
return (None, None, jnp.clip(g, lo, hi),)
_clip_gradient.defvjp(clip_gradient_fwd, clip_gradient_bwd)
def clip_gradient(x):
lo = -0.1
hi = x + 0.1
return _clip_gradient(lo, hi, x)
g = jax.grad(clip_gradient)(0.1) # doesn't crash
self.assertAllClose(g, jnp.array(0.2))
def test_nestable_vjp(self):
# Verify that https://github.com/jax-ml/jax/issues/3667 is resolved.
def f(x):
return x ** 2
@jax.custom_vjp
def g(x):
return f(x)
def g_fwd(x):
y, f_vjp = api.vjp(f, x)
return y, f_vjp
def g_bwd(f_vjp, y_bar):
return f_vjp(y_bar)
g.defvjp(g_fwd, g_bwd)
# Check that VJP can be nested in simple situations. For this to pass,
# vjp has to return a PyTree.
_, g_vjp = api.vjp(g, 1.0)
y, = g_vjp(1.0)
self.assertAllClose(y, jnp.array(2.0))
# Check that VJP can be nested in complex situations. For this to pass,
# vjp can't treat the closed-over tracer x as a static argument.
@jit
def z(x):
_, g_vjp = api.vjp(g, x)
return g_vjp
y, = z(1.0)(3.0)
self.assertAllClose(y, jnp.array(6.0))
def test_initial_style_vmap_2(self):
# https://github.com/jax-ml/jax/issues/4173
x = jnp.ones((10, 3))
# Create the custom function
@jax.custom_vjp
def custom_fun(x):
return x.sum()
def forward(x):
return x.sum(), (jnp.ones_like(x),)
def backward(res, g):
return g * res[0],
custom_fun.defvjp(forward, backward)
def train_fun(x):
def summed_fun(x):
return api.vmap(custom_fun)(x).sum()
return api.grad(summed_fun)(x)
def scan_body(carry, inputs):
x = carry
return carry, train_fun(x)
scan_range = jnp.arange(4)
lax.scan(scan_body, x, scan_range) # don't crash
def test_initial_style_vmap_3(self):
# This is like test_initial_style_vmap except the primal function closes
# over an array constant.
y = jnp.arange(1., 4.)
@jax.custom_vjp
def f(x):
assert jnp.ndim(x) == 0
return 3 * x * jnp.sum(y)
def f_fwd(x):
return f(x), jnp.cos(x)
def f_rev(cos_x, g):
return (2 * cos_x * g,)
f.defvjp(f_fwd, f_rev)
def foo(x):
out, _ = lax.scan(lambda c, _: (f(c), None), x, None, length=1)
return out
ans = api.vmap(foo)(jnp.arange(3.))
expected = 3. * jnp.arange(3.) * 6
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(lambda x: api.vmap(foo)(x).sum())(jnp.arange(3.))
expected = 2. * jnp.cos(jnp.arange(3.))
self.assertAllClose(ans, expected, check_dtypes=False)
def test_initial_style_vmap_with_collective(self):
@jax.custom_vjp
def f(x):
return lax.psum(x, 'foo')
def f_fwd(x):
return lax.psum(x, 'foo'), None
def f_bwd(res, dx):
return dx
f.defvjp(f_fwd, f_bwd)
def g(x):
jaxpr = api.make_jaxpr(f)(x)
return core.eval_jaxpr(jaxpr.jaxpr, [], x)[0]
out = api.vmap(lambda _, x: g(x), axis_name='foo', in_axes=(0, None),
out_axes=None)(jnp.arange(4.), 2.)
self.assertAllClose(out, 8.)
def test_bwd_closes_over_tracer(self):
def f(y):
@jax.custom_vjp
def f(x):
return 2. * jnp.sin(x)
def fwd(x):
return f(x), ()
def bwd(_, g):
return (2. * jnp.cos(y) * g,) # capture!
f.defvjp(fwd, bwd)
return jax.grad(f)(1.)
ans = jax.jit(f)(2.)
self.assertAllClose(ans, 2. * jnp.cos(2.))
ans = jax.vmap(f)(jnp.arange(3.))
self.assertAllClose(ans, 2. * jnp.cos(jnp.arange(3.)))
ans = jax.jit(jax.vmap(f))(jnp.arange(3.))
self.assertAllClose(ans, 2. * jnp.cos(jnp.arange(3.)))
ans = jax.vmap(jax.jit(f))(jnp.arange(3.))
self.assertAllClose(ans, 2. * jnp.cos(jnp.arange(3.)))
ans = jax.grad(f)(4.)
self.assertAllClose(ans, -2. * jnp.sin(4.))
def test_fwd_closes_over_tracer(self):
def f(y):
@jax.custom_vjp
def f(x):
return 2. * jnp.sin(x)
def fwd(x):
return f(x), y
def bwd(y, g):
return (2. * jnp.cos(y) * g,) # capture!
f.defvjp(fwd, bwd)
return jax.grad(f)(1.)
ans = jax.jit(f)(2.)
self.assertAllClose(ans, 2. * jnp.cos(2.))
ans = jax.vmap(f)(jnp.arange(3.))
self.assertAllClose(ans, 2. * jnp.cos(jnp.arange(3.)))
ans = jax.jit(jax.vmap(f))(jnp.arange(3.))
self.assertAllClose(ans, 2. * jnp.cos(jnp.arange(3.)))
ans = jax.vmap(jax.jit(f))(jnp.arange(3.))
self.assertAllClose(ans, 2. * jnp.cos(jnp.arange(3.)))
ans = jax.grad(f)(4.)
self.assertAllClose(ans, -2. * jnp.sin(4.))
def test_float0(self):
@jax.custom_vjp
def f(x, _):
return x
def f_fwd(x, _):
# we need a defined (non-float0) tangent to trigger the rule
return x, (2., 1)
def f_rev(*_):
return (2., 1)
f.defvjp(f_fwd, f_rev)
x = 2.
y = 3
self.assertEqual(api.grad(f, allow_int=True, argnums=(0, 1))(x, y),
(2., np.zeros(shape=(), dtype=float0)))
def test_float0_initial_style(self):
@jax.custom_vjp
def f(x):
return x
def f_fwd(x):
return x, (2., x)
def f_rev(*_):
return ((2., jnp.zeros(shape=(), dtype=float0)),)
f.defvjp(f_fwd, f_rev)
def foo(x, y):
out, _ = lax.scan(lambda c, _: (f(c), None), (x, y), None, length=1)
return out[0]
x = 2.
y = 3
self.assertEqual(api.grad(foo, allow_int=True, argnums=(0, 1))(x, y),
(2., np.zeros(shape=(), dtype=float0)))
def test_remat(self):
@jax.custom_vjp
def f(x):
return jnp.sin(x)
def f_fwd(x):
return f(x), jnp.cos(x)
def f_rev(cos_x, g):
return (2 * cos_x * g,)
f.defvjp(f_fwd, f_rev)
@jax.remat
def g(x):
return f(f(x))
ans = g(2.)
expected = np.sin(np.sin(2.))
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(g)(2.)
expected = 4. * api.grad(lambda x: jnp.sin(jnp.sin(x)))(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_remat_higher_order(self):
@jax.custom_vjp
def f(x):
return jnp.sin(x)
def f_fwd(x):
return f(x), jnp.cos(x)
def f_rev(cos_x, g):
return (2 * cos_x * g,)
f.defvjp(f_fwd, f_rev)
def g(x):
return f(f(x))
ans = api.grad(api.grad(jax.remat(g)))(2.)
expected = api.grad(api.grad(g))(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(jax.remat(api.grad(g)))(2.)
expected = api.grad(api.grad(g))(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(api.grad(api.grad(jax.remat(g))))(2.)
expected = api.grad(api.grad(api.grad(g)))(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_bwd_nones(self):
@jax.custom_vjp
def f(x, y):
return x * jnp.sin(y)
def f_fwd(x, y):
return f(x, y), jnp.cos(y)
def f_rev(cos, g):
return (None, 2 * cos * g)
f.defvjp(f_fwd, f_rev)
ans = api.grad(lambda x: f(x, x))(3.)
expected = 2 * jnp.cos(3.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_bwd_nones_vmap(self):
@jax.custom_vjp
def f(x, y):
return x * jnp.sin(y)
def f_fwd(x, y):
return f(x, y), jnp.cos(y)
def f_rev(cos, g):
return (None, 2 * cos * g)
f.defvjp(f_fwd, f_rev)
ans = api.grad(lambda x: api.vmap(f)(x, x).sum())(jnp.arange(3.))
expected = 2 * jnp.cos(jnp.arange(3.))
self.assertAllClose(ans, expected, check_dtypes=False)
def test_bwd_nones_pytree(self):
@jax.custom_vjp
def f(xs, y):
x1, x2 = xs
return x1 * x2 * jnp.sin(y)
def f_fwd(xs, y):
return f(xs, y), jnp.cos(y)
def f_rev(cos, g):
return (None, 2 * cos * g)
f.defvjp(f_fwd, f_rev)
ans = api.grad(lambda x: f((x, x), x))(3.)
expected = 2 * jnp.cos(3.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_custom_vjp_closure_4521(self):
# https://github.com/jax-ml/jax/issues/4521
@jax.custom_vjp
def g(x, y):
return None
def g_fwd(x, y):
return None, y
def g_bwd(residuals, z_bar):
assert False
g.defvjp(g_fwd, g_bwd)
def f(xs, y):
v_g = api.vmap(g, in_axes=(0, None), out_axes=None)
v_g(xs, y)
def scan_body(xs, _):
y = jnp.zeros(1)
_, vjp_f = api.vjp(f, xs, y)
vjp_f(None)
return xs, None
lax.scan(scan_body, jnp.ones(5), None, 100) # doesn't crash
def test_float0_bwd_none(self):
@jax.custom_vjp
def f(i, x):
return jnp.sin(x)
def f_fwd(i, x):
return f(i, x), jnp.cos(x)
def f_rev(cos_x, g):
return (None, 2 * cos_x * g)
f.defvjp(f_fwd, f_rev)
ans = api.grad(f, 1)(jnp.array([1, 2]), 3.) # doesn't crash
expected = 2 * jnp.cos(3.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_custom_gradient(self):
@jax.custom_gradient
def f(x):
return x ** 2, lambda g: (g * x,)
self.assertAllClose(f(3.), 9., check_dtypes=False)
self.assertAllClose(api.grad(f)(3.), 3., check_dtypes=False)
self.assertAllClose(api.grad(api.grad(f))(3.), 1., check_dtypes=False)
def test_custom_gradient_2(self):
@jax.custom_gradient
def f(x, y):
return x * y, lambda g: (y, x)
self.assertAllClose(f(3., 4.), 12., check_dtypes=False)
self.assertAllClose(api.grad(f, argnums=(0, 1))(3., 4.), (4., 3.),
check_dtypes=False)
def test_custom_gradient_3(self):
@jax.custom_gradient
def f(x):
vjp = lambda g: (jnp.cos(x) * jnp.arange(3., 6.),)
return jnp.sum(jnp.sin(x)), vjp
self.assertAllClose(f(jnp.arange(3)), jnp.sum(jnp.sin(jnp.arange(3.))),
check_dtypes=False)
self.assertAllClose(
api.grad(f)(jnp.arange(3.)),
api.grad(lambda x: jnp.sum(jnp.sin(x)))(jnp.arange(3.)) * jnp.arange(3., 6.),
check_dtypes=False)
def test_custom_gradient_can_return_singleton_value_in_vjp(self):
@jax.custom_gradient
def f(x):
return x ** 2, lambda g: g * x
self.assertAllClose(f(3.), 9., check_dtypes=False)
self.assertAllClose(api.grad(f)(3.), 3., check_dtypes=False)
self.assertAllClose(api.grad(api.grad(f))(3.), 1., check_dtypes=False)
def test_closure_convert(self):
def cos_after(fn, x):
converted_fn, aux_args = jax.closure_convert(fn, x)
self.assertLessEqual(len(aux_args), 1)
return _cos_after(converted_fn, x, *aux_args)
@partial(jax.custom_vjp, nondiff_argnums=(0,))
def _cos_after(fn, x, *args):
return jnp.cos(fn(x, *args))
def fwd(fn, x, *args):
y = _cos_after(fn, x, *args)
return y, (x, args)
def rev(fn, res, g):
x, args = res
x_bar = 17. * x
args_bars = [42. * a for a in args]
return (x_bar, *args_bars)
_cos_after.defvjp(fwd, rev)
def dist(c, x):
return jnp.sum((x - c) ** 2.)
def solve(c, x):
def closure(x):
return dist(c, x)
return cos_after(closure, x)
c, x = 2. * jnp.ones(2), jnp.ones(2)
expected = jnp.cos(dist(c, x))
self.assertAllClose(solve(c, x), expected, check_dtypes=False)
g_c, g_x = api.grad(solve, argnums=(0, 1))(c, x)
self.assertAllClose(g_c, 42. * c, check_dtypes=False)
self.assertAllClose(g_x, 17. * x, check_dtypes=False)
def test_closure_convert_mixed_consts(self):
# Like test_closure_convert, but close over values that
# participate in AD as well as values that do not.
# See https://github.com/jax-ml/jax/issues/6415
def cos_after(fn, x):
converted_fn, aux_args = jax.closure_convert(fn, x)
self.assertLessEqual(len(aux_args), 1)
return _cos_after(converted_fn, x, *aux_args)
@partial(jax.custom_vjp, nondiff_argnums=(0,))
def _cos_after(fn, x, *args):
return jnp.cos(fn(x, *args))
def fwd(fn, x, *args):
y = _cos_after(fn, x, *args)
return y, (x, args)
def rev(fn, res, g):
x, args = res
x_bar = 17. * x
args_bars = [42. * a for a in args]
return (x_bar, *args_bars)
_cos_after.defvjp(fwd, rev)
def dist(c, s, x):
return jnp.sum(s * (x - c) ** 2.)
def solve(c, s, x):
def closure(x):
return dist(c, s, x)
return cos_after(closure, x)
c, s, x = 2. * jnp.ones(2), 3. * jnp.ones(2), jnp.ones(2)
expected = jnp.cos(dist(c, s, x))
self.assertAllClose(solve(c, s, x), expected, check_dtypes=False)
g_c, g_x = api.grad(solve, argnums=(0, 2))(c, s, x)
self.assertAllClose(g_c, 42. * c, check_dtypes=False)
self.assertAllClose(g_x, 17. * x, check_dtypes=False)
def test_closure_convert_pytree_mismatch(self):
# See https://github.com/jax-ml/jax/issues/23588
def f(x, z):
return z * x
x, z = 2.0, 3.0
_, vjp = api.vjp(f, x, z)
vjp_pure, vjp_aux_args = jax.closure_convert(vjp, x)
vjp_pure(x, *vjp_aux_args)
with self.assertRaisesRegex(
TypeError, "The inputs to the closure produced by closure_convert"):
vjp_pure(x, vjp_aux_args)
def test_float0_cotangents_automatically_handled(self):
@jax.custom_vjp
def f(x, y):
return x
def f_fwd(x, y):
return x, None
def f_bwd(_, zbar):
return (0., 1)
f.defvjp(f_fwd, f_bwd)
jax.jit(lambda x: jax.vjp(f, 0., x)[1](1.))(1) # doesn't crash
def test_custom_vjp_scan_batching_edge_case(self):
# https://github.com/jax-ml/jax/issues/5832
@jax.custom_vjp
def mul(x, coeff): return x * coeff
def mul_fwd(x, coeff): return mul(x, coeff), (x, coeff)
def mul_bwd(res, g):
x, coeff = res
g_x = g * coeff
g_coeff = (x * g).sum()
return g_x, g_coeff
mul.defvjp(mul_fwd, mul_bwd)
def scan_over_mul(x, coeff):
def f_(x, t):
return mul(x, coeff), None
y, _ = jax.lax.scan(f_, x, jnp.arange(3))
return y
key = jax.random.key(0)
key1, key2 = jax.random.split(key, 2)
x_batch = jax.random.normal(key1, (3, 2))
covector_batch = jax.random.normal(key2, (3, 2))
coeff = jnp.array(1., dtype=x_batch.dtype)
batched_scan_over_mul = jax.vmap(scan_over_mul, in_axes=(0, None), out_axes=0)
res, vjp_fun = jax.vjp(batched_scan_over_mul, x_batch, coeff)
vjp_fun(covector_batch) # doesn't crash
jtu.check_grads(batched_scan_over_mul, (x_batch, coeff), order=2,
modes=['rev'])
def test_closure_with_vmap2(self):
# https://github.com/jax-ml/jax/issues/8783
def h(z):
def f(x):
@jax.custom_vjp
def g(y):
return x * y
def g_fwd(y):
return x * y, (x, x * y, y)
def g_rev(res, w_bar):
x, *_ = res
return (x * w_bar,)
g.defvjp(g_fwd, g_rev)
return g(z)
return jax.vmap(f)(jnp.arange(3., dtype='float32')).sum()
jtu.check_grads(h, (jnp.float32(3.14),), order=1, modes=['rev'])
def test_pytrees_not_required_to_contain_nones(self):
class A(list):
pass
def unflatten(_, children):
assert children[0] is not None
return A(children)
tree_util.register_pytree_node(A, lambda x: (x, None), unflatten)
@jax.custom_vjp
def f(x):
return x[0]
def f_fwd(x):
return x[0], None
def f_bwd(_, g):
return A([g]),
f.defvjp(f_fwd, f_bwd)
jax.grad(f)(A([1.])) # doesn't crash
def test_vmap_vjp_called_twice(self):
# https://github.com/jax-ml/jax/pull/14728
@jax.custom_vjp
def f(x):
return x
f.defvjp(lambda x: (x, None), lambda _, y_bar: (y_bar,))
_, f_vjp = jax.vjp(jax.vmap(f), jnp.array([3.]))
f_vjp(jnp.array([3.]))
f_vjp(jnp.array([3.])) # doesn't crash
def test_symbolic_zero_custom_vjp_basic(self):
ZERO = custom_derivatives_public.SymbolicZero
@jax.custom_vjp
def f(x, y, z):
return x, x
def fwd(x, y, z):
self.assertIsInstance(x, jax.custom_derivatives.CustomVJPPrimal)
self.assertIsInstance(y, jax.custom_derivatives.CustomVJPPrimal)
self.assertIsInstance(z, jax.custom_derivatives.CustomVJPPrimal)
self.assertTrue(x.perturbed)
self.assertFalse(y.perturbed)
self.assertFalse(z.perturbed)
return (x.value, x.value), None
def fwd_all(x, y, z):
self.assertIsInstance(x, jax.custom_derivatives.CustomVJPPrimal)
self.assertIsInstance(y, jax.custom_derivatives.CustomVJPPrimal)
self.assertIsInstance(z, jax.custom_derivatives.CustomVJPPrimal)
self.assertTrue(x.perturbed)
self.assertTrue(y.perturbed)
self.assertTrue(z.perturbed)
return (x.value, x.value), None
def bwd_all(_, g):
x1, x2 = g
self.assertFalse(type(x1) is ZERO)
self.assertFalse(type(x2) is ZERO)
return x1, x1, x2
def bwd_fst(_, g):
x1, x2 = g
self.assertFalse(type(x1) is ZERO)
self.assertIs(type(x2), ZERO)
return x1, x1, x2
def bwd_snd(_, g):
x1, x2 = g
self.assertIs(type(x1), ZERO)
self.assertFalse(type(x2) is ZERO)
return x1, x1, x2
x, y, z = 4., 5., 6.
i = np.array(7, np.int32)
zero = np.array(0.)
f.defvjp(fwd, bwd_all, symbolic_zeros=True)
h = jax.jit(f)
jax.jacrev(h)(x, y, z)
jax.jacrev(lambda x: h(x, y, z))(x)
jax.jacrev(h, argnums=(0, 1, 2), allow_int=True)(x, i, i)
f.defvjp(fwd_all, bwd_fst, symbolic_zeros=True)
fst_f = lambda *xs: f(*xs)[0]
_, vjp = jax.vjp(fst_f, x, y, z)
_, _, gz = vjp(x)
self.assertArraysAllClose(gz, zero)
f.defvjp(fwd_all, bwd_snd, symbolic_zeros=True)
snd_f = lambda *xs: f(*xs)[1]
_, vjp = jax.vjp(snd_f, x, y, z)
gx, gy, _ = vjp(x)
self.assertArraysAllClose(gx, zero)
self.assertArraysAllClose(gy, zero)
f.defvjp(fwd, bwd_snd, symbolic_zeros=True)
_, vjp = jax.vjp(lambda x: snd_f(x, y, z), x)
gx, = vjp(x)
self.assertArraysAllClose(gx, zero)
def test_symbolic_zero_custom_vjp_bwd_shape_error(self):
@jax.custom_vjp
def f(x, y, z):
return x, y, z
def fwd(x, y, z):
return f(x.value, y.value, z.value), None
def bwd(_, gs):
x_bar, y_bar, z_bar = gs
return y_bar, x_bar, z_bar # swapped!
f.defvjp(fwd, bwd, symbolic_zeros=True)
with self.assertRaisesRegex(
ValueError,
r'Consider just returning a None here'):
jax.grad(lambda x, y, z: f(x, y, z)[2].sum())(
jnp.ones(1), jnp.ones(2), jnp.ones(3))
@parameterized.named_parameters(
('jit_vmap', True, True),
('jit', True, False),
('vmap', False, True),
('', False, False),
)
def test_symbolic_zero_custom_vjp(self, maybe_jit, maybe_vmap):
# below:
# * static_scalar will be static in and out
# * static_array will be static in, but dynamic out
# * dyn_scalar and dyn_array will be dynamic in and out
ZERO = custom_derivatives_public.SymbolicZero
def f(static_scalar, static_array, dyn_scalar, dyn_array):
out1 = static_scalar + dyn_scalar
out2 = static_array + dyn_array
return static_scalar, static_array, out1, out2
def _pack(x):
return lax.broadcast(x, (1,))
def _unpack(x):
(x,) = x
return x
def _vmap(fun):
def _fun(*args):
args = jax.tree.map(_pack, args)
out = jax.vmap(fun)(*args)
out = jax.tree.map(_unpack, out)
return out
return _fun
f = jax.custom_vjp(f)
def fwd(*args):
xs, pert = [x.value for x in args], [x.perturbed for x in args]
self.assertFalse(pert[0])
self.assertFalse(pert[1])
self.assertTrue(pert[2])
self.assertTrue(pert[3])
return f(*xs), xs
def bwd(res, g):
static_scalar, *_ = res
t_static, t_static_arr, t_dyn_scalar, t_dyn_array = g
self.assertIs(type(t_static), ZERO)
self.assertFalse(type(t_static_arr) is ZERO)
self.assertFalse(type(t_dyn_scalar) is ZERO)
self.assertFalse(type(t_dyn_array) is ZERO)
self.assertEqual(t_static.shape, ())
self.assertEqual(t_static_arr.shape, (2,))
return (static_scalar + 90,
t_static_arr + 91,
t_dyn_scalar + 92,
t_dyn_array + 93)
f.defvjp(fwd, bwd, symbolic_zeros=True)
def g(dyn_scalar, dyn_array):
if maybe_vmap:
f_ = _vmap(f)
else:
f_ = f
outs = f_(1., jnp.array([2., 3.]), dyn_scalar, dyn_array)
return outs[1:]
def run(primal_ins, cotangent_outs):
primal_outs, vjp = jax.vjp(g, *primal_ins)
cotangent_ins = vjp(cotangent_outs)
return primal_outs, cotangent_ins
if maybe_jit:
run = jax.jit(run)
scalar_type = jax.Array if maybe_jit or maybe_vmap else float
primal_ins = (4., jnp.array([5., 6.]))
cotangent_outs = (jnp.array([10., 11.]), 7., jnp.array([8., 9.]))
primal_outs, cotangent_ins = run(primal_ins, cotangent_outs)
primal_out1, primal_out2, primal_out3 = primal_outs
self.assertIsInstance(primal_out1, jax.Array)
self.assertAllClose(primal_out1, jnp.array([2., 3.]))
self.assertIsInstance(primal_out2, scalar_type)
self.assertAllClose(primal_out2, 5.)
self.assertIsInstance(primal_out3, jax.Array)
self.assertAllClose(primal_out3, jnp.array([7., 9.]))
ct_in1, ct_in2 = cotangent_ins
self.assertIsInstance(ct_in1, scalar_type)
self.assertAllClose(ct_in1, 99.)
self.assertIsInstance(ct_in2, jax.Array)
self.assertArraysAllClose(ct_in2, jnp.array([101., 102.]))
def test_symbolic_zero_custom_vjp_vmap_output(self):
@jax.custom_vjp
def f(x, y):
return x, y
def fwd(x, y):
self.assertTrue(x.perturbed)
self.assertFalse(y.perturbed)
return f(x.value, y.value), None
def bwd(_, g):
_, ct_y = g
self.assertIs(type(ct_y), custom_derivatives_public.SymbolicZero)
return g
f.defvjp(fwd, bwd, symbolic_zeros=True)
jax.grad(lambda x, y: jax.vmap(f)(x, y)[0].sum())(jnp.ones(3), jnp.ones(3))
def test_symbolic_zero_custom_vjp_custom_pytree(self):
tree_values = custom_derivatives_public.custom_vjp_primal_tree_values
@tree_util.register_pytree_node_class
class Box:
def __init__(self_, strict, val):
if strict:
# make sure we aren't getting special arguments that should only
# come up when symbolic_zeros is True
self.assertFalse(hasattr(val, 'perturbed'))
self_.strict = strict
self_.x = val
def tree_flatten(self_):
return [self_.x], self_.strict
@classmethod
def tree_unflatten(cls, strict, xs):
x, = xs
return cls(strict, x)
x, y = Box(False, jnp.array(72.)), jnp.array(73.)
@jax.custom_vjp
def f(box, y):
return box.x * y
def fwd0(box, y):
self.assertTrue(box.x.perturbed)
self.assertFalse(y.perturbed)
box, y = map(tree_values, [box, y])
return f(box, y), (box, y)
def bwd0(res, g):
box, y = res
return y * g, box.x * g
def fwd1(box, y):
self.assertFalse(box.x.perturbed)
self.assertTrue(y.perturbed)
box, y = map(tree_values, [box, y])
return f(box, y), (box, y)
def bwd1(res, g):
box, y = res
return y * g, box.x * g
f.defvjp(fwd0, bwd0, symbolic_zeros=True)
jax.grad(f, argnums=0)(x, y)
f.defvjp(fwd1, bwd1, symbolic_zeros=True)
jax.grad(f, argnums=1)(x, y)
def fwd_strict(box, y):
return f(box, y), (box, y)
def bwd_strict(res, g):
box, y = res
return y * g, box.x * g
f.defvjp(fwd_strict, bwd_strict)
jax.grad(f)(x, y)
def test_symbolic_zeros_memoization_caching(self):
# Tests multiple zero patterns for partial_eval._memoize, and also tests
# that we're okay with stores being occupied with equal values.
@jax.custom_vjp
def f(x, y):
return x * y
def f_fwd(x, y):
return x.value, None
def f_bwd(_, z_bar):
return z_bar, None
f.defvjp(f_fwd, f_bwd, symbolic_zeros=True)
f_ = core.jaxpr_as_fun(jax.make_jaxpr(f)(2., 3.))
_ = jax.linearize(f_, 2., 3.)
_ = jax.linearize(lambda x: f_(x, 3.), 2.) # don't crash!
def test_run_rules_more_than_once(self):
# https://github.com/jax-ml/jax/issues/16614
@jax.custom_vjp
def f(x, y):
return x + y
def f_fwd(x, y):
if y.perturbed:
res = None
else:
res = []
return x.value + y.value, res
def f_bwd(res, ct):
return ct, ct
f.defvjp(f_fwd, f_bwd, symbolic_zeros=True)
def body(x_y, _):
x, y = x_y
return (f(x, y), x), None
@jax.grad
def g(x):
(out, _), _ = lax.scan(body, (x, 1.), xs=None, length=2)
return out
g(1.) # doesn't crash
def test_nones_representing_zeros_in_subtrees_returned_by_bwd(self):
# https://github.com/jax-ml/jax/issues/8356
@jax.custom_vjp
def f(x):
return x[0]
def f_fwd(x):
return f(x), None
def f_bwd(_, z_bar):
return (z_bar, (None, None)),
f.defvjp(f_fwd, f_bwd)
jax.grad(f)((1.0, (2.0, 3.0))) # don't crash
def test_pytree_nones_returned_by_bwd(self):
@jax.custom_vjp
def f(x):
return x[0]
def f_fwd(x):
return f(x), None
def f_bwd(_, z_bar):
return (z_bar, (None, None)),
f.defvjp(f_fwd, f_bwd)
jax.grad(f)((1.0, (2.0, None))) # don't crash
def test_bwd_rule_shape_mismatch(self):
@jax.custom_vjp
def foo(x, y):
return x
def foo_fwd(x, y):
return x, None
def foo_bwd(_, g):
return jnp.zeros(3), jnp.zeros(3)
foo.defvjp(foo_fwd, foo_bwd)
with self.assertRaisesRegex(
ValueError,
r'output\[1\] the bwd rule produced an output of shape/dtype float..\[3\]'):
jax.grad(lambda x, y: foo(x, y * y).sum(), 1)(jnp.ones(3), jnp.ones(4))
def test_bwd_rule_shape_mismatch_disable(self):
# TODO(mattjj): remove this test when the config option is removed
@jax.custom_vjp
def foo(x, y):
return x
def foo_fwd(x, y):
return x, None
def foo_bwd(_, g):
return jnp.zeros(3), jnp.zeros(3)
foo.defvjp(foo_fwd, foo_bwd)
try:
jax.config.update('jax_custom_vjp_disable_shape_check', True)
jax.grad(lambda x, y: foo(x, y).sum(), 1)(jnp.ones(3), jnp.ones(4))
finally:
jax.config.update('jax_custom_vjp_disable_shape_check', False)
def test_bwd_rule_can_produce_list_or_tuple(self):
@jax.custom_vjp
def f(x, y):
return x * y
def f_fwd(x, y):
return f(x, y), (x, y)
def f_bwd(xy, g):
x, y = xy
return [g * y, x * g] # list, not tuple
f.defvjp(f_fwd, f_bwd)
jax.grad(f)(1., 2.) # don't crash
def test_optimize_remat(self):
def fun(x):
# This array is included to make sure that we handle consts appropriately
return np.array([1.0])*x
def fwd(x):
return np.array([2.0])*x*x/np.array([1.0]), (x,)
fwd = custom_derivatives.optimize_remat_of_custom_vjp_fwd(fun, fwd)
x = jnp.linspace(0, 5.0, 10)
self.assertAllClose(jax.jit(fwd)(x)[0], 2*x*x) # Shouldn't hit custom DCE
self.assertAllClose(jax.jit(lambda x: fwd(x)[0])(x), x) # Should be DCEed
def test_optimize_remat_vmap(self):
def fun(x):
return (np.array([1.0])*x)[0]
def fwd(x):
return (np.array([2.0])*x*x/np.array([1.0]))[0], (x,)
fwd = custom_derivatives.optimize_remat_of_custom_vjp_fwd(fun, fwd)
x = jnp.linspace(0, 5.0, 10)
self.assertAllClose(jax.jit(jax.vmap(fwd))(x)[0], 2*x*x)
self.assertAllClose(jax.jit(lambda x: jax.vmap(fwd)(x)[0])(x), x)
def test_optimize_remat_cond(self):
def fun(x):
return x
def fwd(x):
return x*x, (x,)
fwd = custom_derivatives.optimize_remat_of_custom_vjp_fwd(fun, fwd)
def g(x):
return jax.lax.cond(True, fwd, lambda x: (2.0 * x, (x,)), x)
x = jnp.linspace(0, 5.0, 10)
self.assertAllClose(jax.jit(g)(x)[0], x*x)
self.assertAllClose(jax.jit(lambda x: g(x)[0])(x), x)
def test_optimize_remat_jvp(self):
def fun(x):
return x**2
def fwd_(x):
return x*x, (x,)
fwd = custom_derivatives.optimize_remat_of_custom_vjp_fwd(fun, fwd_)
calc = jax.jvp(fwd, (3.2,), (1.0,))
expected = jax.jvp(fwd_, (3.2,), (1.0,))
self.assertAllClose(calc, expected)
@jax.jit
def g(x, t):
(y, r), (y_dot, r_dot) = jax.jvp(fwd, (x,), (t,))
return y, y_dot
calc = g(3.2, 1.0)
expected = jax.jvp(fun, (3.2,), (1.0,))
self.assertAllClose(calc, expected)
def test_optimize_remat_gh21303(self):
@jax.custom_vjp
def f(x):
return jnp.tan(x)
def f_fwd(x):
return jnp.sin(x), (x,)
def f_bwd(res, g):
x, = res
cos_x = jnp.cos(x)
return (cos_x * g,)
f.defvjp(f_fwd, f_bwd, optimize_remat=True)
def temp(x):
out = jax.remat(f)(x)
out = out ** 2
return out
v, g = jax.value_and_grad(temp)(3.2)
self.assertAllClose(v, jnp.tan(3.2)**2)
def test_optimize_remat_multiple_args(self):
def f_(x, y):
return jnp.sin(x) * y
@jax.custom_vjp
def f(x, y):
return f_(x, y)
def f_fwd(x, y):
return f(x, y), (jnp.cos(x), jnp.sin(x), y)
def f_bwd(res, g):
cos_x, sin_x, y = res
return (cos_x * g * y, sin_x * g)
f.defvjp(f_fwd, f_bwd, optimize_remat=True)
x, y = 3.2, 1.0
self.assertAllClose(jax.grad(f)(x, y), jax.grad(f_)(x, y))
def test_optimize_remat_kwargs(self):
@jax.custom_vjp
def f(x, y):
return jnp.sin(x) * y
def f_fwd(x, y, *, keyword=False):
del keyword
return f(x, y), (jnp.cos(x), jnp.sin(x), y)
def f_bwd(res, g):
cos_x, sin_x, y = res
return (cos_x * g * y, sin_x * g)
f.defvjp(f_fwd, f_bwd, optimize_remat=True)
x, y = 3.2, 1.0
jax.grad(f)(x, y) # Doesn't error
def test_optimize_remat_custom_vmap(self):
# See https://github.com/jax-ml/jax/pull/23000
@jax.custom_vjp
def f(x, y):
return jnp.sin(x) * y
@jax.custom_batching.custom_vmap
def f_fwd(x, y):
return f(x, y), (jnp.cos(x), jnp.sin(x), y)
@f_fwd.def_vmap
def f_fwd_vmap(_, in_batched, x, y):
# Insert a new const here to test the optimize_remat batching rule.
out = np.array([2.0])*f(x, y)
out_batched = (True, (True, True, True))
return (out, (jnp.cos(x), jnp.sin(x), y)), out_batched
def f_bwd(res, g):
cos_x, sin_x, y = res
return (cos_x * g * y, sin_x * g)
f.defvjp(f_fwd, f_bwd, optimize_remat=True)
x, y = jnp.linspace(0.0, 1.0, 5), jnp.linspace(2.0, 5.0, 5)
jax.jit(jax.vmap(jax.grad(f)))(x, y) # Doesn't error
def transpose_unary(f, x_example):
def transposed(y):
x, = api.linear_transpose(f, x_example)(y)
return x
return transposed
# This class wraps jax.custom_transpose.custom_transpose in order to pass in a
# particular tree of output type on each call. Otherwise it forwards
# all attribute access.
class _custom_transpose:
def __init__(self, out_types, fun):
self.out_types = out_types
self.fun = jax.custom_transpose.custom_transpose(fun)
def __getattr__(self, name):
return getattr(self.fun, name)
def __call__(self, *args):
return self.fun(self.out_types, *args)
# This function is meant to be used as a decorator that delegates to
# custom_transpose but makes it easy to specify output argument types
# by example. If used directly a decorator (i.e. not invoked with
# example arguments), assumes a scalar-valued function.
#
# TODO(frostig): remove this (and its uses) once custom_transpose offers
# an option of inferring output types.
def custom_transpose(example_out):
if isinstance(example_out, Callable):
out_type = core.get_aval(0.).to_tangent_aval()
return _custom_transpose(out_type, example_out)
return partial(
_custom_transpose,
jax.tree.map(
lambda x: core.get_aval(x).to_tangent_aval(), example_out))
class CustomTransposeTest(jtu.JaxTestCase):
def test_linear_call(self):
def f(x, y):
def fn(r, x): return x / r
def tp(r, t): return t / r
return x + jax.custom_derivatives.linear_call(fn, tp, y, x)
def f_ref(x, y):
return x + x / y
x = jnp.ones(2) * 6.
y = jnp.ones(2) * 3.
self.assertAllClose(f(x, y), f_ref(x, y))
f1 = lambda x: f(x, y)
f1_ref = lambda x: f_ref(x, y)
self.assertAllClose(transpose_unary(f1, x)(x),
transpose_unary(f1_ref, x)(x))
def test_linear_call_incorrect_transpose(self):
def f(x, y):
def fn(r, x): return x / r
def tp(r, t): return t / (2. * r) # nb: not the true transpose
return x + jax.custom_derivatives.linear_call(fn, tp, y, x)
def f_ref(x, y):
return x + x / y
x = jnp.ones(2) * 6.
y = jnp.ones(2) * 3.
self.assertAllClose(f(x, y), f_ref(x, y))
f1 = lambda x: f(x, y)
f1_ref = lambda x: f_ref(x, 2. * y) # nb: double the reference divisor
self.assertAllClose(transpose_unary(f1, x)(x),
transpose_unary(f1_ref, x)(x))
def test_linear_call_transpose_transpose_transpose(self):
def fn(r, x): return x / r
def tp(r, t): return t / (2. * r) # nb: untrue transpose
def f_(x, y):
return x + jax.custom_derivatives.linear_call(fn, tp, y, x)
x = jnp.ones(2) * 6.
y = jnp.ones(2) * 3.
f = lambda x: f_(x, y)
ft = transpose_unary(f, x)
ftt = transpose_unary(ft, x)
fttt = transpose_unary(ftt, x)
self.assertAllClose(ft(x), x + tp(y, x))
self.assertAllClose(f(x), ftt(x))
self.assertAllClose(ft(x), fttt(x))
def test_linear_call_scalar_to_vector(self):
def f(c, x):
def fn(_, x):
return [x, x]
def tp(_, t):
t1, t2 = t
return t1 + t2
return jax.custom_derivatives.linear_call(fn, tp, (), c * x)
def f_ref(c, x):
return [c * x, c * x]
c, x = 2., 3.
t = [4., 5.]
self.assertAllClose(f(c, x), f_ref(c, x))
self.assertAllClose(transpose_unary(partial(f, c), x)(t),
transpose_unary(partial(f_ref, c), x)(t))
def test_linear_call_nested(self):
# identity function with an untrue transpose of 0
def id_(x):
def f(_, x): return x
def t(_, t): return 0.
return jax.custom_derivatives.linear_call(f, t, (), x)
# identity function with an untrue transpose of 7, and where both
# forward and transpose have custom transpositions that should
# never end up invoked.
def f(x):
def f_(_, x): return id_(x)
def t_(_, t): return id_(7.)
return jax.custom_derivatives.linear_call(f_, t_, (), x)
x = 5.
id_t = transpose_unary(id_, x)
id_tt = transpose_unary(id_t, x)
ft = transpose_unary(f, x)
ftt = transpose_unary(ft, x)
fttt = transpose_unary(ftt, x)
self.assertAllClose(id_(x), x)
self.assertAllClose(id_t(x), 0.)
self.assertAllClose(id_tt(x), x)
self.assertAllClose(f(x), x)
self.assertAllClose(ft(x), 7.)
self.assertAllClose(ftt(x), x)
self.assertAllClose(fttt(x), 7.)
def test_linear_call_jit(self):
def f(x, y):
def fn(r, x): return x / r
def tp(r, t): return t / r
return x + jax.custom_derivatives.linear_call(fn, tp, y, x)
x = jnp.ones(2) * 6.
y = jnp.ones(2) * 3.
self.assertAllClose(f(x, y), jax.jit(f)(x, y))
f1 = lambda x: f(x, y)
self.assertAllClose(transpose_unary(f1, x)(x),
jax.jit(transpose_unary(f1, x))(x))
def test_basic(self):
def f(x, y):
@custom_transpose(jnp.ones(2))
def fn(r, x): return x / r
@fn.def_transpose
def tp(r, t): return t / r
return x + fn(y, x)
def f_ref(x, y):
return x + x / y
x = jnp.ones(2) * 6.
y = jnp.ones(2) * 3.
self.assertAllClose(f(x, y), f_ref(x, y))
f1 = lambda x: f(x, y)
f1_ref = lambda x: f_ref(x, y)
self.assertAllClose(transpose_unary(f1, x)(x),
transpose_unary(f1_ref, x)(x))
def test_incorrect_transpose(self):
def f(x, y):
@custom_transpose(jnp.ones(2))
def fn(r, x): return x / r
@fn.def_transpose
def tp(r, t): return t / (2. * r) # nb: not the true transpose
return x + fn(y, x)
def f_ref(x, y):
return x + x / y
x = jnp.ones(2) * 6.
y = jnp.ones(2) * 3.
self.assertAllClose(f(x, y), f_ref(x, y))
f1 = lambda x: f(x, y)
f1_ref = lambda x: f_ref(x, 2. * y) # nb: double the reference divisor
self.assertAllClose(transpose_unary(f1, x)(x),
transpose_unary(f1_ref, x)(x))
def test_transpose_transpose_transpose(self):
@custom_transpose(jnp.ones(2))
def fn(r, x): return x / r
@custom_transpose(jnp.ones(2))
def tp(r, t): return t / (2. * r) # nb: untrue transpose
fn.def_transpose(tp)
tp.def_transpose(fn)
def f_(x, y):
return x + fn(y, x)
x = jnp.ones(2) * 6.
y = jnp.ones(2) * 3.
f = lambda x: f_(x, y)
ft = transpose_unary(f, x)
ftt = transpose_unary(ft, x)
fttt = transpose_unary(ftt, x)
self.assertAllClose(ft(x), x + tp(y, x))
self.assertAllClose(f(x), ftt(x))
self.assertAllClose(ft(x), fttt(x))
def test_scalar_to_vector(self):
def f(c, x):
@custom_transpose([0., 0.])
def fn(_, x):
return [x, x]
@fn.def_transpose
def tp(_, t):
t1, t2 = t
return t1 + t2
return fn((), c * x)
def f_ref(c, x):
return [c * x, c * x]
c, x = 2., 3.
t = [4., 5.]
self.assertAllClose(f(c, x), f_ref(c, x))
self.assertAllClose(transpose_unary(partial(f, c), x)(t),
transpose_unary(partial(f_ref, c), x)(t))
def test_nested(self):
# identity function with an untrue transpose of 0
def id_(x):
f = custom_transpose(lambda _, x: x)
t = custom_transpose(lambda _, t: 0.)
f.def_transpose(t)
t.def_transpose(f)
return f((), x)
# identity function with an untrue transpose of 7, and where both
# forward and transpose have custom transpositions that should
# never end up invoked.
def f(x):
f_ = custom_transpose(lambda _, x: id_(x))
t_ = custom_transpose(lambda _, t: id_(7.))
f_.def_transpose(t_)
t_.def_transpose(f_)
return f_((), x)
x = 5.
id_t = transpose_unary(id_, x)
id_tt = transpose_unary(id_t, x)
ft = transpose_unary(f, x)
ftt = transpose_unary(ft, x)
fttt = transpose_unary(ftt, x)
self.assertAllClose(id_(x), x)
self.assertAllClose(id_t(x), 0.)
self.assertAllClose(id_tt(x), x)
self.assertAllClose(f(x), x)
self.assertAllClose(ft(x), 7.)
self.assertAllClose(ftt(x), x)
self.assertAllClose(fttt(x), 7.)
def test_one_degree(self):
T = lambda f: transpose_unary(f, 0.)
@custom_transpose
def f(_, z): return 2. * z
@f.def_transpose
def ft(_, z): return 3. * z
f = partial(f, ())
self.assertAllClose(2., f(1.))
self.assertAllClose(3., T(f)(1.))
self.assertAllClose(3., T(T(f))(1.))
self.assertAllClose(3., T(T(T(f)))(1.))
self.assertAllClose(3., T(T(T(T(f))))(1.)) # ...
def test_two_degrees(self):
T = lambda f: transpose_unary(f, 0.)
@custom_transpose
def f(_, z): return 2. * z
@f.def_transpose
@custom_transpose
def ft(_, z): return 3. * z
@ft.def_transpose
def ftt(_, z): return 7. * z
f = partial(f, ())
self.assertAllClose(2., f(1.))
self.assertAllClose(3., T(f)(1.))
self.assertAllClose(7., T(T(f))(1.))
self.assertAllClose(7., T(T(T(f)))(1.))
self.assertAllClose(7., T(T(T(T(f))))(1.)) # ...
def test_symmetric(self):
T = lambda f: transpose_unary(f, 0.)
@custom_transpose
def f(_, z): return 2. * z
@custom_transpose
def g(_, z): return 3. * z
f.def_transpose(g)
g.def_transpose(f)
f = partial(f, ())
self.assertAllClose(2., f(1.))
self.assertAllClose(3., T(f)(1.))
self.assertAllClose(2., T(T(f))(1.))
self.assertAllClose(3., T(T(T(f)))(1.))
self.assertAllClose(2., T(T(T(T(f))))(1.)) # ...
def test_recursive(self):
T = lambda f: transpose_unary(f, 0.)
@custom_transpose
def f(c, z): return c * z
@f.def_transpose
def ft(c, z): return f(c + 1., z)
g = partial(f, 1.)
self.assertAllClose(1., g(1.))
self.assertAllClose(2., T(g)(1.))
self.assertAllClose(3., T(T(g))(1.))
self.assertAllClose(4., T(T(T(g)))(1.))
self.assertAllClose(5., T(T(T(T(g))))(1.)) # ...
def test_jvp_lin(self):
def f(x, y):
@custom_transpose(jnp.ones(2))
def fn(r, x): return x / r
@fn.def_transpose
def tp(r, t): return t / r
return x + fn(y, x)
def f_ref(x, y): return x + x / y
x, y, tx = 6., 3., 1.
g = lambda x: f(x, y)
g_ref = lambda x: f_ref(x, y)
self.assertAllClose(api.jvp(g, [x], [tx]), api.jvp(g_ref, [x], [tx]))
def test_jvp_res(self):
raise unittest.SkipTest('unimplemented') # TODO(frostig)
def f(x, y):
@custom_transpose(jnp.ones(2))
def fn(r, x): return x / r
@fn.def_transpose
def tp(r, t): return t / r
return x + fn(y, x)
def f_ref(x, y): return x + x / y
x, y, ty = 6., 3., 1.
g = lambda y: f(x, y)
g_ref = lambda y: f_ref(x, y)
self.assertAllClose(api.jvp(g, [y], [ty]), api.jvp(g_ref, [y], [ty]))
def test_jvp_both(self):
raise unittest.SkipTest('unimplemented') # TODO(frostig)
def f(x, y):
@custom_transpose(jnp.ones(2))
def fn(r, x): return x / r
@fn.def_transpose
def tp(r, t): return t / r
return x + fn(y, x)
def f_ref(x, y): return x + x / y
x, y, tx, ty = 6., 3., 1., 1.
self.assertAllClose(api.jvp(f, [x, y], [tx, ty]),
api.jvp(f_ref, [x, y], [tx, ty]))
def test_make_jaxpr(self):
def f(x, y):
@custom_transpose(jnp.ones(2))
def fn(r, x): return x / r
@fn.def_transpose
def tp(r, t): return 2 * t / r
return x + fn(y, x)
x = jnp.ones(2) * 6.
y = jnp.ones(2) * 3.
f_ = lambda x: f(x, y)
f_t = transpose_unary(f_, x)
jaxpr = api.make_jaxpr(f_)(x)
self.assertIn('custom_transpose_call', str(jaxpr))
jaxpr_t = api.make_jaxpr(f_t)(x)
self.assertNotIn('custom_transpose_call', str(jaxpr_t))
def test_jit(self):
def f(x, y):
@custom_transpose(jnp.ones(2))
def fn(r, x): return x / r
@fn.def_transpose
def tp(r, t): return 2 * t / r
return x + fn(y, x)
x = jnp.ones(2) * 6.
y = jnp.ones(2) * 3.
self.assertAllClose(f(x, y), jax.jit(f)(x, y))
f_ = lambda x: f(x, y)
f_t = transpose_unary(f_, x)
g_ = jax.jit(f_)
g_t = transpose_unary(g_, x)
self.assertAllClose(f_(x), jax.jit(f_)(x))
self.assertAllClose(f_t(x), jax.jit(f_t)(x))
self.assertAllClose(f_(x), g_(x))
self.assertAllClose(f_t(x), g_t(x))
def test_jit_recursive(self):
def f(x, y):
@custom_transpose(jnp.ones(2))
def fn(r, x): return x / r
@fn.def_transpose
def tp(r, t): return 2 * fn(r, t)
return x + fn(y, x)
x = jnp.ones(2) * 6.
y = jnp.ones(2) * 3.
self.assertAllClose(f(x, y), jax.jit(f)(x, y))
f_ = lambda x: f(x, y)
f_t = transpose_unary(f_, x)
g_ = jax.jit(f_)
g_t = transpose_unary(g_, x)
self.assertAllClose(f_(x), jax.jit(f_)(x))
self.assertAllClose(f_t(x), jax.jit(f_t)(x))
self.assertAllClose(f_(x), g_(x))
self.assertAllClose(f_t(x), g_t(x))
def test_cond(self):
def f(x, y):
@custom_transpose(jnp.ones(2))
def fn(r, x): return x / r
@fn.def_transpose
def tp(r, t): return 2 * t / r
return x + fn(y, x)
def cond_wrap(f):
return lambda i, x: lax.cond(i > 0, f, lambda x: x, x)
i = 7.
x = jnp.ones(2) * 6.
y = jnp.ones(2) * 3.
f_ = lambda x: f(x, y)
f_t = transpose_unary(f_, x)
g_ = partial(cond_wrap(f_), i)
g_t = transpose_unary(g_, x)
self.assertAllClose(f_(x), g_(x))
self.assertAllClose(f_t(x), g_t(x))
def test_cond_recursive(self):
def f(x, y):
@custom_transpose(jnp.ones(2))
def fn(r, x): return x / r
@fn.def_transpose
def tp(r, t): return 2 * fn(r, t)
return x + fn(y, x)
def cond_wrap(f):
return lambda i, x: lax.cond(i > 0, f, lambda x: x, x)
i = 7.
x = jnp.ones(2) * 6.
y = jnp.ones(2) * 3.
f_ = lambda x: f(x, y)
f_t = transpose_unary(f_, x)
g_ = partial(cond_wrap(f_), i)
g_t = transpose_unary(g_, x)
self.assertAllClose(f_(x), g_(x))
self.assertAllClose(f_t(x), g_t(x))
class CustomVmapTest(jtu.JaxTestCase):
def test_basic(self):
@jax.custom_batching.custom_vmap
def f(x): return jnp.sin(x)
@f.def_vmap
def rule(axis_size, in_batched, xs):
xs_batched, = in_batched
self.assertEqual(xs_batched, True)
self.assertEqual(axis_size, xs.shape[0])
return jnp.cos(xs), xs_batched
x, xs = jnp.array(1.), jnp.arange(3)
y = f(x)
self.assertAllClose(y, jnp.sin(x))
ys = api.vmap(f)(xs)
self.assertAllClose(ys, jnp.cos(xs))
@jax.numpy_dtype_promotion('standard')
def test_closure(self):
z = jnp.array([2., 1., 3.])
@jax.custom_batching.custom_vmap
def f(x): return z + jnp.sin(x)
@f.def_vmap
def rule(axis_size, in_batched, *args):
self.assertEqual(len(in_batched), 1)
self.assertEqual(len(args), 1)
xs, = args
xs_batched, = in_batched
self.assertEqual(xs_batched, True)
self.assertEqual(axis_size, xs.shape[0])
return z + jnp.cos(xs), xs_batched
x, xs = jnp.array(1.), jnp.arange(3)
y = f(x)
self.assertAllClose(y, z + jnp.sin(x))
ys = api.vmap(f)(xs)
self.assertAllClose(ys, z + jnp.cos(xs))
def test_rule_multi_output(self):
@jax.custom_batching.custom_vmap
def f(x): return jnp.sin(x), jnp.cos(x)
@f.def_vmap
def rule(axis_size, in_batched, xs):
return (jnp.cos(xs), jnp.sin(xs)), tuple(in_batched * 2)
x, xs = jnp.array(1.), jnp.arange(3)
y1, y2 = f(x)
self.assertAllClose(y1, jnp.sin(x))
self.assertAllClose(y2, jnp.cos(x))
ys1, ys2 = api.vmap(f)(xs)
self.assertAllClose(ys1, jnp.cos(xs))
self.assertAllClose(ys2, jnp.sin(xs))
def test_nary(self):
@jax.custom_batching.custom_vmap
def f(x, y): return jnp.sin(x) + y ** 2.
@f.def_vmap
def rule(axis_size, in_batched, xs, ys):
self.assertEqual(in_batched, [True, True])
self.assertEqual(axis_size, 3)
self.assertEqual(axis_size, xs.shape[0])
self.assertEqual(axis_size, ys.shape[0])
return jnp.cos(xs) + ys ** 2., True
xs, ys = jnp.arange(3.0), jnp.arange(3.0)
zs = api.vmap(f)(xs, ys)
self.assertAllClose(zs, jnp.cos(xs) + ys ** 2.)
def test_nary_mixed_batching(self):
@jax.custom_batching.custom_vmap
def vector_dot(u, v):
self.assertEqual(u.ndim, 1)
self.assertEqual(v.ndim, 1)
return u @ v
size = 4
vlen = 3
in_batched_log = []
@vector_dot.def_vmap
def vector_dot_vmap_rule(axis_size, in_batched, u, v):
in_batched_log.append(in_batched)
self.assertEqual(axis_size, size)
u_batched, v_batched = in_batched
if u_batched:
self.assertEqual(u.ndim, 2)
self.assertEqual(u.shape[0], size)
else:
self.assertEqual(u.ndim, 1)
self.assertEqual(u.shape[0], vlen)
if v_batched:
self.assertEqual(v.ndim, 2)
self.assertEqual(v.shape[0], size)
else:
self.assertEqual(v.ndim, 1)
self.assertEqual(v.shape[0], vlen)
if u_batched and v_batched:
out = jnp.sum(u * v, axis=1)
else:
out = u @ v if u_batched else v @ u
return out, u_batched or v_batched
f = vector_dot
v = lambda *shape: jnp.ones(shape)
y = api.vmap(f, in_axes=(0, None))(v(4, 3), v(3))
self.assertAllClose(y, v(4, 3) @ v(3))
y = api.vmap(f, in_axes=(1, None))(v(3, 4), v(3))
self.assertAllClose(y, v(3, 4).T @ v(3))
y = api.vmap(f, in_axes=(None, 0))(v(3), v(4, 3))
self.assertAllClose(y, v(3) @ v(4, 3).T)
y = api.vmap(f, in_axes=(0, 0))(v(4, 3), v(4, 3))
self.assertAllClose(y, jnp.sum(v(4, 3) * v(4, 3), axis=1))
self.assertEqual(in_batched_log[0], [True, False])
self.assertEqual(in_batched_log[1], [True, False])
self.assertEqual(in_batched_log[2], [False, True])
self.assertEqual(in_batched_log[3], [True, True])
def test_rule_input_signature(self):
@jax.custom_batching.custom_vmap
def f(x): return jnp.sin(x)
rule_args = []
@f.def_vmap
def rule(axis_size, in_batched, xs):
rule_args.append((axis_size, in_batched))
return jnp.cos(xs), in_batched[0]
xs = jnp.arange(3)
_ = api.vmap(f)(xs)
(axis_size, in_batched), = rule_args
self.assertIs(type(axis_size), int)
self.assertIs(type(in_batched), list)
self.assertEqual(len(in_batched), 1)
def test_rule_output_vs_batching_output_mismatch(self):
@jax.custom_batching.custom_vmap
def f(x): return jnp.sin(x)
@f.def_vmap
def test_rule_abc(axis_size, in_batched, xs):
return [jnp.sin(xs), jnp.cos(xs)], in_batched
xs = jnp.arange(3)
self.assertRaisesRegex(
ValueError,
'structure of output value and output batching specification '
r'returned by custom vmap rule \(test_rule_abc\) do not match.*',
lambda: api.vmap(f)(xs))
def test_rule_vs_call_output_mismatch(self):
@jax.custom_batching.custom_vmap
def f(x): return jnp.sin(x)
@f.def_vmap
def test_rule_abc2(axis_size, in_batched, xs):
return [jnp.sin(xs)], in_batched
xs = jnp.arange(3)
self.assertRaisesRegex(
ValueError,
r'structure of output returned by custom vmap rule \(test_rule_abc2\) '
r'does not match that of original custom-vmapped function.*',
lambda: api.vmap(f)(xs))
def test_jvp_basic(self):
@jax.custom_batching.custom_vmap
def f(x): return jnp.sin(x)
@f.def_vmap
def rule(axis_size, in_batched, xs):
self.assertEqual(axis_size, 3)
self.assertEqual(in_batched, [True])
return jnp.cos(xs), in_batched[0]
f_jvp = lambda x, tx: api.jvp(f, [x], [tx])
x, tx = jnp.array(1.), jnp.array(2.)
xs, txs = jnp.arange(3.), jnp.arange(3.) * 2.
y, ty = f_jvp(x, tx)
self.assertAllClose(y, jnp.sin(x))
self.assertAllClose(ty, jnp.cos(x) * tx)
ys, tys = api.vmap(f_jvp)(xs, txs)
self.assertAllClose(ys, jnp.cos(xs))
self.assertAllClose(tys, -jnp.sin(xs) * txs)
ys, tys = api.jvp(api.vmap(f), [xs], [txs])
self.assertAllClose(ys, jnp.cos(xs))
self.assertAllClose(tys, -jnp.sin(xs) * txs)
@jax.numpy_dtype_promotion('standard')
def test_jvp_closure(self):
z = jnp.array([2., 1., 3.])
def bcast(x): return z + x - z
@jax.custom_batching.custom_vmap
def f(x): return z + jnp.sin(x)
@f.def_vmap
def rule(axis_size, in_batched, xs):
self.assertEqual(axis_size, 3)
self.assertEqual(in_batched, [True])
return z + jnp.cos(xs), in_batched[0]
f_jvp = lambda x, tx: api.jvp(f, [x], [tx])
x, tx = jnp.array(1.), jnp.array(2.)
xs, txs = jnp.arange(3.), jnp.arange(3.) * 2.
y, ty = f_jvp(x, tx)
self.assertAllClose(y, z + jnp.sin(x))
self.assertAllClose(ty, bcast(jnp.cos(x)) * tx)
ys, tys = api.vmap(f_jvp)(xs, txs)
self.assertAllClose(ys, z + jnp.cos(xs))
self.assertAllClose(tys, bcast(-jnp.sin(xs)) * txs)
ys, tys = api.jvp(api.vmap(f), [xs], [txs])
self.assertAllClose(ys, z + jnp.cos(xs))
self.assertAllClose(tys, bcast(-jnp.sin(xs)) * txs)
def test_jvp_nary(self):
@jax.custom_batching.custom_vmap
def f(x, y): return jnp.sin(x) + y
@f.def_vmap
def rule(axis_size, in_batched, xs, ys):
self.assertEqual(axis_size, 3)
self.assertEqual(in_batched, [True, True])
return jnp.cos(xs) + ys, True
f_jvp = lambda x, y, tx, ty: api.jvp(f, [x, y], [tx, ty])
x, y, tx, ty = jnp.arange(4.)
xs, ys, txs, tys = 4. + jnp.arange(3. * 4).reshape((4, 3))
zs, tzs = api.vmap(f_jvp)(xs, ys, txs, tys)
self.assertAllClose(zs, jnp.cos(xs) + ys)
self.assertAllClose(tzs, -jnp.sin(xs) * txs + tys)
zs, tzs = api.jvp(api.vmap(f), [xs, ys], [txs, tys])
self.assertAllClose(zs, jnp.cos(xs) + ys)
self.assertAllClose(tzs, -jnp.sin(xs) * txs + tys)
def test_jvp_extra_batched_tangents(self):
@jax.custom_batching.custom_vmap
def f(x): return jnp.sin(x)
@f.def_vmap
def rule(axis_size, in_batched, xs):
self.assertEqual(axis_size, 3)
self.assertEqual(in_batched, [False])
return jnp.cos(xs), in_batched[0]
f_jvp = lambda x, tx: api.jvp(f, [x], [tx])
txs = 2. + jnp.arange(3.)
x = jnp.array(1, dtype=txs.dtype)
y, tys = api.vmap(f_jvp, in_axes=(None, 0), out_axes=(None, 0))(x, txs)
self.assertAllClose(y, jnp.cos(x))
self.assertAllClose(tys, -jnp.sin(x) * txs)
def test_jacfwd(self):
# jacfwd is another way to exercise extra-batched tangents
@jax.custom_batching.custom_vmap
def f(x): return jnp.sin(x)
@f.def_vmap
def rule(axis_size, in_batched, xs):
self.assertEqual(axis_size, 3)
self.assertEqual(in_batched, [False])
return jnp.cos(xs), in_batched[0]
x = jnp.arange(3.) + .72
j = api.jacfwd(f)(x)
self.assertAllClose(j, -jnp.diag(jnp.sin(x)))
def test_jvp_extra_batched_primals(self):
@jax.custom_batching.custom_vmap
def f(x): return jnp.sin(x)
@f.def_vmap
def rule(axis_size, in_batched, xs):
self.assertEqual(axis_size, 3)
self.assertEqual(in_batched, [False])
return jnp.cos(xs), in_batched[0]
f_jvp = lambda x, tx: api.jvp(f, [x], [tx])
xs = jnp.arange(3.)
tx = jnp.array(4, dtype=xs.dtype)
ys, tys = api.vmap(f_jvp, in_axes=(0, None))(xs, tx)
self.assertAllClose(ys, jnp.cos(xs))
self.assertAllClose(tys, -jnp.sin(xs) * tx)
def test_jvp_extra_batched_primals_with_linear_vmap_rule(self):
# When a function is linear, its Jacobian is constant. JAX's JVP
# of linear functions takes advantage of this: when mapping over a
# batch of primals relative to a fixed (i.e. symbolically
# replicated) tangent, output tangents remain replicated as well
# (i.e. JAX will not broadcast them). This is true in general, and
# this test checks that vmapped JVPs continue to behave this way
# when custom_vmap is involved and the custom vmap rule is linear.
@jax.custom_batching.custom_vmap
def f_linear(x): return 7. * x
@f_linear.def_vmap
def linear_rule(axis_size, in_batched, xs):
return 11. * xs, in_batched[0]
@jax.custom_batching.custom_vmap
def f_nonlinear(x): return jnp.sin(x)
@f_nonlinear.def_vmap
def nonlinear_rule(axis_size, in_batched, xs):
return jnp.cos(xs), in_batched[0]
f_lin_jvp = lambda x, tx: api.jvp(f_linear, [x], [tx])
f_non_jvp = lambda x, tx: api.jvp(f_nonlinear, [x], [tx])
xs = jnp.arange(3.)
tx = jnp.array(4., dtype=xs.dtype)
# doesn't err
_ = api.vmap(f_lin_jvp, in_axes=(0, None), out_axes=(0, None))(xs, tx)
# does err
self.assertRaisesRegex(
ValueError, "at vmap out_axes",
lambda: api.vmap(
f_non_jvp, in_axes=(0, None), out_axes=(0, None))(xs, tx))
def test_jvp_dataflow_violation(self):
# The jvp-of-custom-vmap machinery should not assume the standard
# dataflow constraint on the JVP of the custom vmap rule (primal
# outputs independent of tangent inputs). Both jvp and vmap are
# "forward" transformations under which, at present, we don't
# enforce the JVP dependence diagram. Because output primals can
# depend on input tangents, extra-batched input tangents can
# create batched output primals, as this test checks.
@jax.custom_jvp
def cos_with_invalid_dataflow_jvp(x): return jnp.cos(x)
@cos_with_invalid_dataflow_jvp.defjvp
def invalid_dataflow_jvp(x, tx):
[x], [tx] = x, tx
return jnp.cos(x * tx), tx
@jax.custom_batching.custom_vmap
def f(x): return jnp.sin(x)
@f.def_vmap
def rule(axis_size, in_batched, xs):
return cos_with_invalid_dataflow_jvp(xs), in_batched[0]
f_jvp = lambda x, tx: api.jvp(f, [x], [tx])
txs = 2. + jnp.arange(3.)
x = jnp.array(1, dtype=txs.dtype)
# doesn't err
ys, tys = api.vmap(f_jvp, in_axes=(None, 0))(x, txs)
self.assertAllClose(ys, jnp.cos(x * txs))
self.assertAllClose(tys, txs)
# does err
self.assertRaisesRegex(
ValueError, "at vmap out_axes",
lambda: api.vmap(
f_jvp, in_axes=(None, 0), out_axes=(None, 0))(x, txs))
def test_tree(self):
tree_sin = partial(jax.tree.map, jnp.sin)
tree_cos = partial(jax.tree.map, jnp.cos)
x, xs = jnp.array(1.), jnp.arange(3)
x = (x, [x + 1, x + 2], [x + 3], x + 4)
xs = (xs, [xs + 1, xs + 2], [xs + 3], xs + 4)
in_batched_ref = jax.tree.map(lambda _: True, x)
@jax.custom_batching.custom_vmap
def f(xs): return tree_sin(xs)
@f.def_vmap
def rule(axis_size, in_batched, xs):
self.assertEqual(in_batched, [in_batched_ref])
sz, = {z.shape[0] for z in jax.tree.leaves(xs)}
self.assertEqual(axis_size, sz)
return tree_cos(xs), in_batched[0]
y = f(x)
self.assertAllClose(y, tree_sin(x))
ys = api.vmap(f)(xs)
self.assertAllClose(ys, tree_cos(xs))
def test_tree_with_nones(self):
tree_sin = partial(jax.tree.map, jnp.sin)
tree_cos = partial(jax.tree.map, jnp.cos)
x, xs = jnp.array(1.), jnp.arange(3)
x = (x, [x + 1, None], [x + 3], None)
xs = (xs, [xs + 1, None], [xs + 3], None)
in_batched_ref = jax.tree.map(lambda _: True, x)
@jax.custom_batching.custom_vmap
def f(xs): return tree_sin(xs)
@f.def_vmap
def rule(axis_size, in_batched, xs):
self.assertEqual(in_batched, [in_batched_ref])
sz, = {z.shape[0] for z in jax.tree.leaves(xs)}
self.assertEqual(axis_size, sz)
return tree_cos(xs), in_batched[0]
y = f(x)
self.assertAllClose(y, tree_sin(x))
ys = api.vmap(f)(xs)
self.assertAllClose(ys, tree_cos(xs))
def test_jit(self):
@jax.custom_batching.custom_vmap
def f(x): return jnp.sin(x)
@f.def_vmap
def rule(axis_size, in_batched, xs):
self.assertEqual(in_batched, [True])
self.assertEqual(axis_size, xs.shape[0])
return jnp.cos(xs), in_batched[0]
x, xs = jnp.array(1.), jnp.arange(3)
self.assertAllClose(f(x), jit(f)(x))
self.assertAllClose(jit(api.vmap(f))(xs), api.vmap(f)(xs))
self.assertAllClose(api.vmap(jit(f))(xs), api.vmap(f)(xs))
def test_sequential_vmap_basic(self):
@jax.custom_batching.sequential_vmap
def f(x):
return x + 1.
def vmap_ref(xs):
return lax.map(f, xs)
xs = jnp.arange(3.)
jaxpr = api.make_jaxpr(api.vmap(f))(xs)
jaxpr_ref = api.make_jaxpr(vmap_ref)(xs)
self.assertEqual(str(jaxpr), str(jaxpr_ref))
def test_sequential_vmap_nary_same_batching(self):
@jax.custom_batching.sequential_vmap
def f(x, y):
return x + y
def vmap_ref(xs, ys):
return lax.map(lambda args: f(*args), (xs, ys))
xs, ys = jnp.arange(3.), 4. + jnp.arange(3.)
jaxpr = api.make_jaxpr(api.vmap(f))(xs, ys)
jaxpr_ref = api.make_jaxpr(vmap_ref)(xs, ys)
self.assertEqual(str(jaxpr), str(jaxpr_ref))
def test_sequential_vmap_nary_mixed_batching(self):
@jax.custom_batching.sequential_vmap
def f(x, y):
return x + y
def vmap_ref(xs, y):
return lax.map(lambda x: f(x, y), xs)
xs, y = jnp.arange(3.), 4.
jaxpr = api.make_jaxpr(api.vmap(f, in_axes=(0, None)))(xs, y)
jaxpr_ref = api.make_jaxpr(vmap_ref)(xs, y)
self.assertEqual(str(jaxpr), str(jaxpr_ref))
@parameterized.named_parameters(
("1", 1),
("8", 4),
("12", 8),
("16", 16),
)
def test_batch_map_basic(self, batch_size: int):
def f(x):
self.assertEqual(x.shape, ())
return x**2
x = np.arange(16)
y = jax.lax.map(f, x, batch_size=batch_size)
np.testing.assert_array_equal(y, x**2)
@parameterized.named_parameters(
("1", 1),
("8", 4),
("12", 8),
("16", 16),
)
def test_batch_map_pytrees(self, batch_size: int):
f = lambda x: {'b': x['a'] ** 2}
inputs = {'a': np.arange(16)}
expected = np.arange(16) ** 2
outputs = jax.lax.map(f, inputs, batch_size=batch_size)
self.assertAllClose(outputs['b'], expected)
outputs = jax.lax.map(
f, inputs, batch_size=batch_size
)
self.assertAllClose(outputs['b'], expected)
def test_batch_divides_axis(self):
def f(t):
x, a = t
self.assertEqual(x.shape, (4,))
return (x + a)**2
x = jax.random.randint(jax.random.key(0), (16, 4), -10, 10)
a = jax.random.randint(jax.random.key(1), (16, 4), -10, 10)
@jax.jit
def g(x, a):
return jax.lax.map(f, (x, a), batch_size=8)
y = g(x, a)
self.assertAllClose(y, (x + a)**2)
def test_undefined_rule(self):
@jax.custom_batching.custom_vmap
def f(x): return jnp.sin(x)
with self.assertRaisesRegex(
AttributeError, "No batching rule defined for custom_vmap function f"):
f(0.5)
def test_kwargs(self):
@jax.custom_batching.custom_vmap
def f(x): return jnp.sin(x)
@f.def_vmap
def rule(axis_size, in_batched, xs):
xs_batched, = in_batched
self.assertEqual(xs_batched, True)
self.assertEqual(axis_size, xs.shape[0])
return jnp.cos(xs), xs_batched
x, xs = jnp.array(1.), jnp.arange(3)
y = f(x=x)
self.assertAllClose(y, jnp.sin(x))
ys = api.vmap(f)(x=xs)
self.assertAllClose(ys, jnp.cos(xs))
def test_partial_eval_raises(self):
@jax.custom_batching.custom_vmap
def f(x):
return jnp.sin(x)
@f.def_vmap
def rule(axis_size, in_batched, xs):
del axis_size # unused
return jnp.cos(xs), in_batched[0]
with self.assertRaisesRegex(
ValueError,
"Linearization failed to produce known values for all output primals",
):
jax.grad(f)(0.5)
def test_compose_custom_vjp(self):
@jax.custom_vjp
@jax.custom_batching.custom_vmap
def f(x, y):
return jnp.sin(x) * y
@f.def_vmap
def f_vmap_rule(axis_size, in_batched, xs, ys):
return jnp.cos(xs) * ys, True
def f_fwd(x, y):
return f(x, y), (jnp.cos(x), jnp.sin(x), y)
def f_bwd(res, g):
cos_x, sin_x, y = res
return (cos_x * g * y, sin_x * g)
f.defvjp(f_fwd, f_bwd)
xs = jnp.linspace(0, 1, 5)
ys = jnp.linspace(-0.1, 0.1, 5)
self.assertAllClose(jax.vmap(f)(xs, ys), jnp.cos(xs) * ys)
jax.grad(f)(xs[0], ys[0]) # Doesn't crash.
def test_compose_custom_vjp_bwd_rule(self):
# This tests the case where both the forward and backward rules are wrapped
# in custom_vmap.
@jax.custom_batching.sequential_vmap
def fun_fwd(x, y):
return jnp.sin(x) * y, (x, y)
@jax.custom_batching.sequential_vmap
def fun_bwd(res, ct):
x, y = res
return x * ct, y * ct
fun = jax.custom_vjp(lambda *args: fun_fwd(*args)[0])
fun.defvjp(fun_fwd, fun_bwd)
xs = jnp.linspace(0, 1, 5)
y = jnp.array(0.5, dtype=xs.dtype)
f = jax.vmap(jax.jit(fun), in_axes=(0, None))
out, f_vjp = jax.vjp(f, xs, y)
f_vjp(out) # Doesn't crash.
class CustomApiTest(jtu.JaxTestCase):
"""Test interactions among the custom_{vmap,jvp,vjp,transpose,*} APIs"""
def test_method_forwarding(self):
@jax.custom_batching.custom_vmap
@jax.custom_jvp
@jax.custom_transpose.custom_transpose
def f(x): return 2. * x
# none of these err:
@f.def_vmap
def f_batch(sz, b, xs): return 2. * xs
@f.defjvp
def f_jvp(x, tx): return 2. * x, 2. * tx
@f.def_transpose
def f_transpose(x): return 2. * x
def test_def_method_forwarding_all_permutations(self):
for wraps in it.permutations([
jax.custom_jvp, jax.custom_transpose.custom_transpose, jax.custom_batching.custom_vmap]):
f = lambda x: x + 1.
for wrap in wraps:
f = wrap(f)
for methods in it.permutations(['defjvp', 'def_vmap', 'def_transpose']):
for method in methods:
self.assertIsInstance(getattr(f, method), Callable)
for decorators in it.permutations([
jax.custom_vjp, jax.custom_transpose.custom_transpose, jax.custom_batching.custom_vmap]):
f = lambda x: x + 1.
for decorator in decorators:
f = decorator(f)
for methods in it.permutations(['defvjp', 'def_vmap', 'def_transpose']):
for method in methods:
self.assertIsInstance(getattr(f, method), Callable)
class BufferDonationTest(jtu.BufferDonationTestCase):
@jtu.device_supports_buffer_donation()
def test_pmap_donate_argnums_invalidates_input(self):
move = api.pmap(lambda x: x + x - x, donate_argnums=0)
n = jax.local_device_count()
x = api.pmap(lambda x: x)(jnp.ones([n]))
y = move(x)
self.assertDeleted(x)
np.testing.assert_allclose(y, [1.] * n)
@jtu.device_supports_buffer_donation()
def test_pmap_nested_donate_ignored(self):
pmap_fun = jit(lambda x: api.pmap(lambda y: y ** 2, donate_argnums=0)(x))
a = api.pmap(lambda x: x)(jnp.array([1]))
# NOTE(mattjj): stopped raising error here and instead just ignored
# with self.assertRaisesRegex(ValueError, "nested.*not supported"):
# pmap_fun(a)
pmap_fun(a) # doesn't crash
class NamedCallTest(jtu.JaxTestCase):
def test_non_jaxtype_arg(self):
# For the test to fail without the invalid JaxType filter we need to pass
# in a valid JaxType that forces the invalid Jaxtype to be raised to an
# abstract value.
def f(not_a_jaxtype, a_jaxtype):
# then Jax needs to try and evaluate the abstractified non-JaxType
if not_a_jaxtype:
return a_jaxtype
return 0
f = api.named_call(f, name="test")
out = jax.jit(f, static_argnums=(0,))("not a Jaxtype", 1)
self.assertEqual(out, 1)
@parameterized.parameters(jax.jit, jax.grad, jax.vmap, jax.remat)
def test_jax_transforms(self, transform):
f = jnp.sum
x = jnp.array([1.])
unnamed_out = transform(f)(x)
named_out = transform(api.named_call(f, name="test"))(x)
self.assertEqual(unnamed_out, named_out)
def test_static_argnums(self):
f = api.named_call(lambda x, y: y if x else None, name="test")
f = jax.jit(f, static_argnums=(0,))
out = f(True, 5)
self.assertEqual(out, 5)
def test_partial_eval(self):
f = api.named_call(lambda x, y: y if x else None, name="test")
f = jax.jit(functools.partial(f, True))
out = f(5)
self.assertEqual(out, 5)
@parameterized.parameters(
[dict(func=func, jit=jit)
for func in ['identity_trivial', 'identity', 'closure_trivial', 'closure',
'asarray', 'device_put']
for jit in jtu.JIT_IMPLEMENTATION
if not (jit._name == "noop" and func in ('identity', 'identity_trivial'))
],
)
def test_integer_overflow(self, jit, func):
funcdict = {
'identity_trivial': lambda x: x, # may hit trivial dispatch path
'identity': lambda x: x + 0,
'closure_trivial': lambda x: jax.jit(lambda: x)(),
'closure': lambda x: jax.jit(lambda: x + 0)(),
'asarray': lambda x: jnp.asarray(x), # add lambdas so no cross-test cache
'device_put': lambda x: api.device_put(x),
}
f = jit(funcdict[func])
int_dtype = dtypes.canonicalize_dtype(jnp.int64)
int_max = np.iinfo(int_dtype).max
int_min = np.iinfo(int_dtype).min
# check before any jit cache entries
self.assertRaises(OverflowError, f, int_max + 1)
self.assertRaises(OverflowError, f, int_min - 1)
self.assertEqual(f(int_max).dtype, int_dtype)
self.assertEqual(f(int_min).dtype, int_dtype)
self.assertAllClose(f(int_max), int_max)
self.assertAllClose(f(int_min), int_min)
# check after any cache entries
self.assertRaises(OverflowError, f, int_max + 1)
self.assertRaises(OverflowError, f, int_min - 1)
if func in ('trivial', 'identity'):
self.assertRaisesRegex(
OverflowError, 'An overflow.*whose argument path is x.', f,
int_max + 1)
class BackendsTest(jtu.JaxTestCase):
@unittest.skipIf(not sys.executable, "test requires sys.executable")
@jtu.run_on_devices("cpu")
def test_no_backend_warning_on_cpu_if_platform_specified(self):
warning_not_expected = (
"import jax; "
"jax.config.update('jax_platform_name', 'cpu'); "
"jax.numpy.arange(10)")
result = subprocess.run([sys.executable, '-c', warning_not_expected],
check=True, capture_output=True)
assert "may be present" not in result.stderr.decode()
class CleanupTest(jtu.JaxTestCase):
def test_call_wrapped_second_phase_cleanup(self):
try:
jax.vmap(lambda x: x, out_axes=None)(jnp.arange(3))
except:
assert core.trace_state_clean() # this is the hard one
assert core.trace_state_clean()
class EnvironmentInfoTest(jtu.JaxTestCase):
@parameterized.parameters([True, False])
def test_print_environment_info(self, return_string):
# Flush stdout buffer before checking.
sys.stdout.flush()
with jtu.capture_stdout() as stdout:
result = jax.print_environment_info(return_string=return_string)
if return_string:
self.assertEmpty(stdout())
else:
self.assertIsNone(result)
result = stdout()
assert f"jax: {jax.__version__}" in result
assert f"jaxlib: {lib.version_str}" in result
assert f"numpy: {np.__version__}" in result
class AutodidaxTest(jtu.JaxTestCase):
def test_autodidax_smoketest(self):
autodidax_file = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'docs',
'autodidax.py')
if not os.path.exists(autodidax_file):
self.skipTest("Cannot locate autodidax.py")
spec = importlib.util.spec_from_file_location('autodidax', autodidax_file)
autodidax_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(autodidax_module)
class GarbageCollectionTest(jtu.JaxTestCase):
def test_xla_gc_callback(self):
# https://github.com/jax-ml/jax/issues/14882
x_np = np.arange(10, dtype='int32')
x_jax = jax.device_put(x_np)
x_np_weakref = weakref.ref(x_np)
del x_np
del x_jax
gc.collect()
assert x_np_weakref() is None
class OverrideLoweringTest(jtu.JaxTestCase):
def test_sharding_constraint_as_noop(self):
def f(x):
return jax.lax.with_sharding_constraint(
x, jax.sharding.SingleDeviceSharding(jax.devices()[0]))
def wsc_as_noop(ctx, operand, *args, **kwargs):
del ctx, args, kwargs
return [operand]
rules = ((jax.lax.sharding_constraint_p, wsc_as_noop),)
lowered_ir = (
jax.jit(f)
.trace(jax.ShapeDtypeStruct((2, 4), dtype=jnp.bfloat16))
.lower(_private_parameters=mlir.LoweringParameters(
override_lowering_rules=rules))
.as_text()
)
self.assertNotIn("stablehlo.custom_call @Sharding", lowered_ir)
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
|
googleREPO_NAMEjaxPATH_START.@jax_extracted@jax-main@tests@api_test.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "spedas/pyspedas",
"repo_path": "pyspedas_extracted/pyspedas-master/pyspedas/projects/rbsp/rbspice_lib/__init__.py",
"type": "Python"
}
|
spedasREPO_NAMEpyspedasPATH_START.@pyspedas_extracted@pyspedas-master@pyspedas@projects@rbsp@rbspice_lib@__init__.py@.PATH_END.py
|
|
{
"filename": "_nticks.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/parcats/line/colorbar/_nticks.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class NticksValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self, plotly_name="nticks", parent_name="parcats.line.colorbar", **kwargs
):
super(NticksValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@parcats@line@colorbar@_nticks.py@.PATH_END.py
|
{
"filename": "setup.py",
"repo_name": "sibirrer/psfr",
"repo_path": "psfr_extracted/psfr-main/setup.py",
"type": "Python"
}
|
#!/usr/bin/env python
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [ ]
test_requirements = ['pytest>=3', ]
setup(
author="Simon Birrer",
author_email='sibirrer@gmail.com',
python_requires='>=3.6',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.11',
],
description="Telescope Images Point Spread Function Reconstruction",
install_requires=requirements,
long_description=readme + '\n\n' + history,
include_package_data=True,
license = 'BSD 3-Clause',
license_file = 'LICENSE.rst',
keywords='psfr',
name='psfr',
packages=find_packages(include=['psfr', 'psfr.*']),
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/sibirrer/psfr',
version='0.1.0',
zip_safe=False,
)
|
sibirrerREPO_NAMEpsfrPATH_START.@psfr_extracted@psfr-main@setup.py@.PATH_END.py
|
{
"filename": "plotter_interface.py",
"repo_name": "Jammy2211/PyAutoLens",
"repo_path": "PyAutoLens_extracted/PyAutoLens-main/autolens/imaging/model/plotter_interface.py",
"type": "Python"
}
|
from os import path
from typing import List
import autoarray.plot as aplt
from autogalaxy.imaging.model.plotter_interface import PlotterInterfaceImaging as AgPlotterInterfaceImaging
from autolens.analysis.plotter_interface import PlotterInterface
from autolens.imaging.fit_imaging import FitImaging
from autolens.imaging.plot.fit_imaging_plotters import FitImagingPlotter
from autolens.analysis.plotter_interface import plot_setting
class PlotterInterfaceImaging(PlotterInterface):
imaging = AgPlotterInterfaceImaging.imaging
imaging_combined = AgPlotterInterfaceImaging.imaging_combined
def fit_imaging(
self, fit: FitImaging, during_analysis: bool, subfolders: str = "fit_dataset"
):
"""
Visualizes a `FitImaging` object, which fits an imaging dataset.
Images are output to the `image` folder of the `image_path` in a subfolder called `fit`. When
used with a non-linear search the `image_path` points to the search's results folder and this function
visualizes the maximum log likelihood `FitImaging` inferred by the search so far.
Visualization includes individual images of attributes of the `FitImaging` (e.g. the model data, residual map)
and a subplot of all `FitImaging`'s images on the same figure.
The images output by the `PlotterInterface` are customized using the file `config/visualize/plots.yaml` under the
[fit] header.
Parameters
----------
fit
The maximum log likelihood `FitImaging` of the non-linear search which is used to plot the fit.
during_analysis
Whether visualization is performed during a non-linear search or once it is completed.
visuals_2d
An object containing attributes which may be plotted over the figure (e.g. the centres of mass and light
profiles).
"""
if plot_setting(section="tracer", name="subplot_tracer"):
mat_plot_2d = self.mat_plot_2d_from(subfolders="")
fit_plotter = FitImagingPlotter(
fit=fit, mat_plot_2d=mat_plot_2d, include_2d=self.include_2d
)
fit_plotter.subplot_tracer()
def should_plot(name):
return plot_setting(section=["fit", "fit_imaging"], name=name)
mat_plot_2d = self.mat_plot_2d_from(subfolders=subfolders)
fit_plotter = FitImagingPlotter(
fit=fit, mat_plot_2d=mat_plot_2d, include_2d=self.include_2d
)
fit_plotter.figures_2d(
data=should_plot("data"),
noise_map=should_plot("noise_map"),
signal_to_noise_map=should_plot("signal_to_noise_map"),
model_image=should_plot("model_data"),
residual_map=should_plot("residual_map"),
chi_squared_map=should_plot("chi_squared_map"),
normalized_residual_map=should_plot("normalized_residual_map"),
)
fit_plotter.figures_2d_of_planes(
subtracted_image=should_plot("subtracted_images_of_planes"),
model_image=should_plot("model_images_of_planes"),
plane_image=should_plot("plane_images_of_planes"),
)
mat_plot_2d = self.mat_plot_2d_from(subfolders="")
fit_plotter = FitImagingPlotter(
fit=fit, mat_plot_2d=mat_plot_2d, include_2d=self.include_2d
)
if should_plot("subplot_fit"):
fit_plotter.subplot_fit()
if should_plot("subplot_fit_log10"):
try:
fit_plotter.subplot_fit_log10()
except ValueError:
pass
if should_plot("subplot_of_planes"):
fit_plotter.subplot_of_planes()
if plot_setting(section="inversion", name="subplot_mappings"):
try:
fit_plotter.subplot_mappings_of_plane(plane_index=len(fit.tracer.planes) - 1)
except IndexError:
pass
if not during_analysis and should_plot("all_at_end_png"):
mat_plot_2d = self.mat_plot_2d_from(
subfolders=path.join("fit_dataset", "end"),
)
fit_plotter = FitImagingPlotter(
fit=fit, mat_plot_2d=mat_plot_2d, include_2d=self.include_2d
)
fit_plotter.figures_2d(
data=True,
noise_map=True,
signal_to_noise_map=True,
model_image=True,
residual_map=True,
normalized_residual_map=True,
chi_squared_map=True,
)
fit_plotter.figures_2d_of_planes(
subtracted_image=True, model_image=True, plane_image=True
)
if not during_analysis and should_plot("all_at_end_fits"):
mat_plot_2d = self.mat_plot_2d_from(
subfolders=path.join("fit_dataset", "fits"), format="fits"
)
fit_plotter = FitImagingPlotter(
fit=fit, mat_plot_2d=mat_plot_2d, include_2d=self.include_2d
)
fit_plotter.figures_2d(
data=True,
noise_map=True,
signal_to_noise_map=True,
model_image=True,
residual_map=True,
normalized_residual_map=True,
chi_squared_map=True,
)
fit_plotter.figures_2d_of_planes(
subtracted_image=True, model_image=True, plane_image=True, interpolate_to_uniform=True
)
def fit_imaging_combined(self, fit_list: List[FitImaging]):
"""
Output visualization of all `FitImaging` objects in a summed combined analysis, typically during or after a
model-fit is performed.
Images are output to the `image` folder of the `image_path` in a subfolder called `combined`. When used
with a non-linear search the `image_path` is the output folder of the non-linear search.
`.
Visualization includes individual images of attributes of each fit (e.g. data, normalized residual-map) on
a single subplot, such that the full suite of multiple datasets can be viewed on the same figure.
The images output by the `PlotterInterface` are customized using the file `config/visualize/plots.yaml` under
the `fit` header.
Parameters
----------
fit
The list of imaging fits which are visualized.
"""
def should_plot(name):
return plot_setting(section=["fit", "fit_imaging"], name=name)
mat_plot_2d = self.mat_plot_2d_from(subfolders="combined")
fit_plotter_list = [
FitImagingPlotter(
fit=fit, mat_plot_2d=mat_plot_2d, include_2d=self.include_2d
)
for fit in fit_list
]
subplot_columns = 6
subplot_shape = (len(fit_list), subplot_columns)
multi_plotter = aplt.MultiFigurePlotter(
plotter_list=fit_plotter_list, subplot_shape=subplot_shape
)
if should_plot("subplot_fit"):
def make_subplot_fit(filename_suffix):
multi_plotter.subplot_of_figures_multi(
func_name_list=["figures_2d"],
figure_name_list=[
"data",
],
filename_suffix=filename_suffix,
number_subplots=len(fit_list) * subplot_columns,
close_subplot=False,
)
multi_plotter.subplot_of_figures_multi(
func_name_list=["figures_2d_of_planes"],
figure_name_list=[
"subtracted_image",
],
filename_suffix=filename_suffix,
number_subplots=len(fit_list) * subplot_columns,
open_subplot=False,
close_subplot=False,
subplot_index_offset=1,
plane_index=1
)
multi_plotter.subplot_of_figures_multi(
func_name_list=["figures_2d_of_planes"],
figure_name_list=[
"model_image",
],
filename_suffix=filename_suffix,
number_subplots=len(fit_list) * subplot_columns,
open_subplot=False,
close_subplot=False,
subplot_index_offset=2,
plane_index=0
)
multi_plotter.subplot_of_figures_multi(
func_name_list=["figures_2d_of_planes"],
figure_name_list=[
"model_image",
],
filename_suffix=filename_suffix,
number_subplots=len(fit_list) * subplot_columns,
open_subplot=False,
close_subplot=False,
subplot_index_offset=3,
plane_index=len(fit_list[0].tracer.planes) - 1
)
multi_plotter.subplot_of_figures_multi(
func_name_list=["figures_2d_of_planes"],
figure_name_list=[
"plane_image",
],
filename_suffix=filename_suffix,
number_subplots=len(fit_list) * subplot_columns,
open_subplot=False,
close_subplot=False,
subplot_index_offset=4,
plane_index=len(fit_list[0].tracer.planes) - 1
)
multi_plotter.subplot_of_figures_multi(
func_name_list=["figures_2d"],
figure_name_list=[
"normalized_residual_map",
],
filename_suffix=filename_suffix,
number_subplots=len(fit_list) * subplot_columns,
subplot_index_offset=5,
open_subplot=False,
)
make_subplot_fit(filename_suffix="fit")
# for plotter in multi_plotter.plotter_list:
# plotter.mat_plot_2d.use_log10 = True
#
# make_subplot_fit(filename_suffix="fit_log10")
|
Jammy2211REPO_NAMEPyAutoLensPATH_START.@PyAutoLens_extracted@PyAutoLens-main@autolens@imaging@model@plotter_interface.py@.PATH_END.py
|
{
"filename": "filters.py",
"repo_name": "bccp/nbodykit",
"repo_path": "nbodykit_extracted/nbodykit-master/nbodykit/filters.py",
"type": "Python"
}
|
from nbodykit.base.mesh import MeshFilter
import numpy
class TopHat(MeshFilter):
""" A TopHat filter defined in Fourier space.
Notes
-----
A fourier space filter is different from a configuration space
filter. The TopHat in fourier space creates ringing effects
due to the truncation / discretization of modes.
"""
kind = 'wavenumber'
mode = 'complex'
def __init__(self, r):
"""
Parameters
----------
r : float
radius of the TopHat filter
"""
self.r = r
def filter(self, k, v):
r = self.r
k = sum(ki ** 2 for ki in k) ** 0.5
kr = k * r
w = 3 * (numpy.sin(kr) / kr **3 - numpy.cos(kr) / kr ** 2)
w[k == 0] = 1.0
return w * v
class Gaussian(MeshFilter):
""" A gaussian filter
.. math ::
G(r) = exp(-0.5 k^2 r^2)
"""
kind = 'wavenumber'
mode = 'complex'
def __init__(self, r):
"""
Parameters
----------
r : float
radius of the Gaussian filter
"""
self.r = r
def filter(self, k, v):
r = self.r
k2 = sum(ki ** 2 for ki in k)
return numpy.exp(- 0.5 * k2 * r**2) * v
|
bccpREPO_NAMEnbodykitPATH_START.@nbodykit_extracted@nbodykit-master@nbodykit@filters.py@.PATH_END.py
|
{
"filename": "get_data.ipynb",
"repo_name": "astrockragh/Mangrove",
"repo_path": "Mangrove_extracted/Mangrove-main/pysr_final/get_data.ipynb",
"type": "Jupyter Notebook"
}
|
```python
import torch, os, pickle, time
import torch_geometric as tg
from torch_geometric.data import Data
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from tqdm import tqdm
import os.path as osp
import networkx as nx
path='~/../../tigress/cj1223/merger_trees/isotrees/'
transform_path='~/../../tigress/cj1223/gmdata/transformer'
all_cols=np.array([0,2,4,10,11,12,13,14,15,16,23,24,25,35]+list(range(37,60)))
```
```python
os.listdir(osp.expanduser('~/../../../scratch/gpfs/cj1223/GraphStorage/'))
```
['vlarge_all_4t_z1.0_standard_quant',
'vlarge_all_4t_z0.3_quantile_raw',
'vlarge_4t_quantile_raw_redshift_75_all',
'vlarge_all_4t_z1.0_quantile_raw',
'vlarge_all_4t_z0.3_None',
'vlarge_all_4t_z3.0_quantile_raw',
'test_all_8t_z0.0_None',
'vlarge_all_4t_z2.0_standard_quant',
'vlarge_all_4t_z0.8_quantile_raw',
'tvt_idx',
'vlarge_all_4t_z2.0_None',
'redshift_scan_0',
'testid_all_4t_z2.0_None',
'vlarge_all_4t_z0.0_quantile_stand',
'vlarge_all_multi_try1',
'vlarge_4t_quantile_raw_redshift_99_all',
'vlarge_all_4t_z2.0_quantile_raw',
'vlarge_all_4t_z0.0_standard_quant',
'raw_raw_final_6t',
'vlarge_all_4t_z0.5_quantile_quant',
'vlarge_4t_quantile_raw_redshift_50_all',
'vlarge_all_4t_z2.0_quantile_stand',
'vlarge_all_t_quantile_raw_rm_final',
'vlarge_all_4t_z1.0_quantile_quant',
'vlarge_all_allt_z0.0_quantile_raw_rm',
'transformers',
'vlarge_all_4t_z0.0_standard_raw',
'vlarge_all_4t_quantile_raw_final',
'vlarge_all_4t_z0.5_standard_stand',
'vlarge_all_4t_z1.8_quantile_raw',
'vlarge_all_allt_z0.0_quantile_raw_floor',
'vlarge_all_4t_z0.5_standard_quant',
'vlarge_all_4t_zall_quantile_raw_trainandtest',
'vlarge_all_4t_z0.0_quantile_raw',
'standard_raw_final_6t',
'old',
'vlarge_standard_raw_rm_final',
'vlarge_all_4t_z1.0_None',
'vlarge_all_4t_z1.5_quantile_raw',
'vlarge_all_4t_z1.0_standard_stand',
'vlarge_all_4t_z0.8_None',
'vlarge_all_4t_z1.8_None',
'vlarge_all_4t_z2.0_standard_raw',
'vlarge_4t_quantile_raw_redshift_95_all',
'testid_all_4t_z0.0_None',
'vlarge_all_all_t_z0.0_None',
'vlarge_all_4t_z3.0_None',
'vlarge_all_4t_z0.5_standard_raw',
'vlarge_all_4t_z1.5_None',
'vlarge_all_4t_z0.0_None',
'vlarge_4t_quantile_raw_redshift_85_all',
'vlarge_all_4t_z0.5_quantile_raw',
'vlarge_all_4t_z1.0_standard_raw',
'vlarge_all_4t_quantile_raw',
'vlarge__all_8t_z0.0_None',
'testt_all_4t_z0.0_None',
'vlarge_all_smass',
'vlarge_all_4t_z0.0_quantile_quant',
'vlarge_all_4t_z0.5_quantile_stand',
'vlarge_all_4t_zall_quantile_raw',
'vlarge_all_4t_z0.0_standard_stand',
'vlarge_all_4t_z1.0_quantile_stand',
'vlarge_all_4t_z2.0_quantile_quant',
'vlarge_all_4t_z2.0_standard_stand',
'vlarge_all_4t_z0.5_None',
'haloids.pkl']
```python
case='vlarge_all_all_t_z0.0_None'
# case = 'vlarge_all_allt_z0.0_quantile_raw_rm'
data=pickle.load(open(osp.expanduser(f'~/../../../scratch/gpfs/cj1223/GraphStorage/{case}/data.pkl'), 'rb'))
```
```python
xs=[]
ys=[]
ls=[]
# for d in data[:int(len(data)*0.8)]:
for d in data:
xs.append(d.x.numpy()[0,:])
# xs.append(d.x.numpy()[0])
ys.append(d.y.numpy())
ls.append(len(d.x.numpy()))
xs=np.vstack(xs)
# xs[:,40]=np.log10(xs[:,40])
ys=np.vstack(ys)
ls=np.array(ls)
splits=np.cumsum(ls)
```
```python
halos=pd.read_table(path+f'isotree_0_0_0.dat', skiprows=0, nrows=1, delimiter='\s+')
halos.columns[all_cols], halos.columns
```
(Index(['#scale(0)', 'desc_scale(2)', 'num_prog(4)', 'Mvir(10)', 'Rvir(11)',
'rs(12)', 'vrms(13)', 'mmp?(14)', 'scale_of_last_MM(15)', 'vmax(16)',
'Jx(23)', 'Jy(24)', 'Jz(25)', 'Tidal_Force(35)', 'Rs_Klypin',
'Mvir_all', 'M200b', 'M200c', 'M500c', 'M2500c', 'Xoff', 'Voff',
'Spin_Bullock', 'b_to_a', 'c_to_a', 'A[x]', 'A[y]', 'A[z]',
'b_to_a(500c)', 'c_to_a(500c)', 'A[x](500c)', 'A[y](500c)',
'A[z](500c)', 'T/|U|', 'M_pe_Behroozi', 'M_pe_Diemer',
'Halfmass_Radius'],
dtype='object'),
Index(['#scale(0)', 'id(1)', 'desc_scale(2)', 'desc_id(3)', 'num_prog(4)',
'pid(5)', 'upid(6)', 'desc_pid(7)', 'phantom(8)', 'sam_Mvir(9)',
'Mvir(10)', 'Rvir(11)', 'rs(12)', 'vrms(13)', 'mmp?(14)',
'scale_of_last_MM(15)', 'vmax(16)', 'x(17)', 'y(18)', 'z(19)', 'vx(20)',
'vy(21)', 'vz(22)', 'Jx(23)', 'Jy(24)', 'Jz(25)', 'Spin(26)',
'Breadth_first_ID(27)', 'Depth_first_ID(28)', 'Tree_root_ID(29)',
'Orig_halo_ID(30)', 'Snap_idx(31)',
'Next_coprogenitor_depthfirst_ID(32)',
'Last_progenitor_depthfirst_ID(33)', 'Last_mainleaf_depthfirst_ID(34)',
'Tidal_Force(35)', 'Tidal_ID(36)', 'Rs_Klypin', 'Mvir_all', 'M200b',
'M200c', 'M500c', 'M2500c', 'Xoff', 'Voff', 'Spin_Bullock', 'b_to_a',
'c_to_a', 'A[x]', 'A[y]', 'A[z]', 'b_to_a(500c)', 'c_to_a(500c)',
'A[x](500c)', 'A[y](500c)', 'A[z](500c)', 'T/|U|', 'M_pe_Behroozi',
'M_pe_Diemer', 'Halfmass_Radius'],
dtype='object'))
```python
cols_h = []
for i, col in enumerate(halos.columns[all_cols]):
if col[-1] == ')':
cols_h.append(col[:-3]+f'({i})')
else:
cols_h.append(col+f'({i})')
cols_h.append('c_NFW')
```
```python
c_nfw = np.log10(xs[:,4]/xs[:,14])
```
```python
xs = np.vstack([xs.T, c_nfw]).T
```
```python
log_cols = [4, 5,6,9, 14, 20, 21]
xs[:,log_cols] = np.log10(xs[:,log_cols])
```
```python
fig,ax=plt.subplots(nrows=8,ncols=5, figsize=(30,23))
ax=ax.flatten()
for i in tqdm(range(len(cols_h))):
ax[i].hist(xs[:,i], bins=100, density=1, histtype='step');
ax[i].set(title=cols_h[i])
fig.tight_layout()
```
100%|██████████████████████████████████████████████████████████████| 38/38 [00:00<00:00, 182.15it/s]

```python
z0_feats = np.array([ 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
34, 35, 36,37])
targets = np.array([8, 15, 19, 21, 23, 27])
```
```python
xss = []
yss = []
for x, y in zip(xs, ys):
if np.all( y[targets] > 0):
xss.append(x[z0_feats])
yss.append(np.log10(y[targets]))
else:
continue
xss=np.vstack(xss)
yss=np.vstack(yss)
len(xss)
```
108630
```python
# mus, scales = np.mean(xss,axis=0), np.std(xss,axis=0)
# mus, scales =(array([ 1.3043818e+00, 1.0482839e+01, 6.8827255e+01, 5.5631461e+00,
# 6.1627102e+01, 3.5166168e-01, 6.4789871e+01, -6.0743326e-04,
# -7.2524161e-04, -1.0647699e-03, 4.9036452e-01, 5.4335928e+00,
# 1.0488175e+01, 1.0514258e+01, 1.0427412e+01, 1.0331637e+01,
# 1.0102284e+01, 2.4800856e+00, 4.9633760e+00, 3.8996551e-02,
# 8.3892757e-01, 6.9374627e-01, 1.3949696e+00, 1.4162083e+00,
# 1.4180782e+00, 8.3473778e-01, 6.7949873e-01, 1.1404462e+00,
# 1.1499077e+00, 1.1641036e+00, 5.5684698e-01, 1.0600431e+01,
# 1.0140233e+01, 2.2547022e+01], dtype=float32),
# array([1.5931481e+00, 4.6887007e-01, 3.7005737e+01, 7.3534260e+00,
# 3.1689447e+01, 2.3791949e-01, 3.1388607e+01, 7.0497632e-02,
# 9.0221323e-02, 9.6819788e-02, 3.3502483e-01, 5.4994922e+00,
# 4.6876633e-01, 4.7106558e-01, 4.6538469e-01, 4.6102148e-01,
# 4.5496431e-01, 3.6341157e+00, 7.2529106e+00, 2.8715109e-02,
# 1.3576227e-01, 1.3383374e-01, 2.8440909e+00, 2.8351688e+00,
# 2.8433828e+00, 1.2989864e-01, 1.2603475e-01, 2.2782235e+00,
# 2.2766337e+00, 2.2788014e+00, 4.6202425e-02, 4.5650178e-01,
# 5.0597996e-01, 1.5053839e+01], dtype=float32))
```
```python
np.array(cols_h)[z0_feats]
```
array(['num_prog(2)', 'Mvir((3)', 'Rvir((4)', 'rs((5)', 'vrms((6)',
'scale_of_last_MM((8)', 'vmax((9)', 'Jx((10)', 'Jy((11)',
'Jz((12)', 'Tidal_Force((13)', 'Rs_Klypin(14)', 'Mvir_all(15)',
'M200b(16)', 'M200c(17)', 'M500c(18)', 'M2500c(19)', 'Xoff(20)',
'Voff(21)', 'Spin_Bullock(22)', 'b_to_a(23)', 'c_to_a(24)',
'A[x](25)', 'A[y](26)', 'A[z](27)', 'b_to_a(50(28)',
'c_to_a(50(29)', 'A[x](50(30)', 'A[y](50(31)', 'A[z](50(32)',
'T/|U|(33)', 'M_pe_Behroozi(34)', 'M_pe_Diemer(35)',
'Halfmass_Radius(36)', 'c_NFW'], dtype='<U20')
```python
# scale_feats = np.array([ 0, 1, 2, 3, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
# 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33])
# xss[:, scale_feats] = (xss[:,scale_feats]-mus[scale_feats])/scales[scale_feats]
```
```python
fig,ax=plt.subplots(nrows=7,ncols=5, figsize=(30,23))
ax=ax.flatten()
cols = []
for i, col in tqdm(enumerate(np.array(cols_h)[z0_feats])):
ax[i].hist(xss[:,i], bins=100, density=1, histtype='step');
ax[i].set(title=col)
cols.append(col)
fig.tight_layout()
```
35it [00:00, 185.02it/s]

```python
fig,ax=plt.subplots(nrows=3,ncols=2, figsize=(30,23))
ax=ax.flatten()
cols = []
for i in range(len(yss[0])):
ax[i].hist(yss[:,i], bins=100, density=1, histtype='step');
# ax[i].set(title=col)
# cols.append(col)
fig.tight_layout()
```

```python
plt.plot(xss[:,1], yss[:,0], 'ro', alpha=0.1, markersize=1)
```
[<matplotlib.lines.Line2D at 0x2b490ab7c3d0>]

```python
from torch.utils.data import DataLoader, Dataset
class HaloData(Dataset):
def __init__(self, x, y, xcols = np.arange(34), ycols = np.arange(6)):
self.x = x[:, xcols]
self.y = y[:, ycols]
def __len__(self):
return len(self.y)
def __getitem__(self, idx):
return self.x[idx], self.y[idx]
train_data = HaloData(xss, yss)
test_data = HaloData(xss, yss)
train_loader = DataLoader(train_data, batch_size=64, shuffle=True)
test_loader = DataLoader(test_data, batch_size=64, shuffle=False)
for b in train_loader:
print(b)
break
```
[tensor([[ 1.0000, 10.0711, 1.6653, ..., 10.2453, 9.6444, 12.5240],
[ 1.0000, 10.0648, 1.6631, ..., 10.1801, 9.6195, 14.0350],
[ 1.0000, 10.0133, 1.6459, ..., 10.1978, 9.6062, 11.6510],
...,
[ 1.0000, 10.5571, 1.8272, ..., 10.7354, 10.1626, 17.9940],
[ 1.0000, 11.1082, 2.0109, ..., 11.2844, 10.7128, 27.3220],
[ 2.0000, 10.4714, 1.7987, ..., 10.7448, 10.1461, 13.5780]]), tensor([[-1.9363e+00, -6.8118e-01, -3.7747e+00, -3.4757e+00, -3.5134e+00,
-4.2996e+00],
[-2.0475e+00, -3.6470e-01, -3.9489e+00, -3.2291e+00, -3.2699e+00,
-4.1537e+00],
[-1.9972e+00, -5.7311e-01, -3.8291e+00, -3.5380e+00, -3.5592e+00,
-3.9991e+00],
[ 8.9280e-01, 8.4086e-01, 4.6519e-01, 7.8384e-02, 6.3993e-02,
-2.6189e+00],
[-1.4544e+00, -4.1460e-01, -3.0403e+00, -3.1986e+00, -3.2210e+00,
-4.0446e+00],
[-1.8287e+00, -4.2058e-01, -3.5919e+00, -3.4689e+00, -2.5636e+00,
-4.2206e+00],
[-1.5318e+00, -1.0965e+00, -3.0791e+00, -3.6994e+00, -3.7138e+00,
-3.9578e+00],
[-1.9795e+00, -5.4638e-01, -3.8010e+00, -3.6780e+00, -3.7035e+00,
-4.5221e+00],
[-2.2482e+00, -5.2410e-01, -4.2984e+00, -2.6573e+00, -2.6703e+00,
-4.3002e+00],
[-1.3832e+00, 2.6557e-01, -2.9120e+00, -2.7524e+00, -2.7882e+00,
-3.8851e+00],
[ 1.6940e-01, 2.0550e-01, -5.8107e-01, -1.2834e+00, -1.3113e+00,
-3.4673e+00],
[-2.3991e+00, -4.7638e-01, -4.6149e+00, -3.1167e+00, -3.1326e+00,
-3.9993e+00],
[-1.6463e+00, -5.9656e-01, -3.2973e+00, -2.9781e+00, -2.9889e+00,
-3.9196e+00],
[-2.0430e+00, -8.3266e-01, -3.9422e+00, -3.3645e+00, -3.3936e+00,
-4.1538e+00],
[-2.2804e+00, -1.4471e-01, -4.3650e+00, -3.0702e+00, -3.1045e+00,
-4.0450e+00],
[-1.8817e+00, -5.0871e-01, -3.7962e+00, -3.0536e+00, -3.0749e+00,
-3.9990e+00],
[-2.0773e+00, -4.8461e-01, -4.0099e+00, -3.6696e+00, -3.6967e+00,
-4.0960e+00],
[-1.2313e+00, -2.8741e-01, -2.7835e+00, -2.9141e+00, -2.9520e+00,
-4.0448e+00],
[-3.1325e-02, 1.5879e-01, -9.1568e-01, -1.3807e+00, -1.3818e+00,
-3.0809e+00],
[-2.1004e+00, 1.6478e-02, -4.0988e+00, -3.1070e+00, -3.1407e+00,
-3.9990e+00],
[-2.3418e+00, -2.9150e-01, -4.4306e+00, -4.0218e+00, -4.0798e+00,
-3.9201e+00],
[-1.2906e+00, -1.1567e+00, -2.6490e+00, -3.8807e+00, -3.8917e+00,
-4.0448e+00],
[-1.2693e+00, -1.5720e-01, -2.7800e+00, -2.3305e+00, -2.3657e+00,
-4.0960e+00],
[-1.7959e+00, -5.6308e-01, -3.5522e+00, -3.2450e+00, -3.2657e+00,
-3.8851e+00],
[-1.6085e+00, 3.2989e-02, -3.3365e+00, -2.1647e+00, -2.1878e+00,
-3.9574e+00],
[-9.3403e-01, -2.4362e+00, -2.1810e+00, -2.4682e+00, -2.4953e+00,
-3.9994e+00],
[-8.2817e-01, 9.5079e-02, -2.1165e+00, -2.4033e+00, -2.1348e+00,
-3.7438e+00],
[-2.4783e+00, 4.4547e-01, -4.7262e+00, -3.4652e+00, -3.5018e+00,
-3.9197e+00],
[-2.0622e+00, -6.9854e-01, -3.9421e+00, -3.7745e+00, -3.7986e+00,
-3.9990e+00],
[-1.7044e+00, -5.5271e-01, -3.4438e+00, -3.2809e+00, -3.3017e+00,
-4.0956e+00],
[-1.8672e+00, -3.1950e-01, -3.6711e+00, -2.4843e+00, -2.5141e+00,
-4.3966e+00],
[-1.7858e+00, -1.7266e-01, -3.6105e+00, -2.6076e+00, -2.6335e+00,
-3.9989e+00],
[-7.2550e-01, 7.2129e-01, -2.1490e+00, -1.7555e+00, -1.7929e+00,
-3.3409e+00],
[-8.4260e-01, -8.3853e-02, -2.1366e+00, -2.1549e+00, -2.1789e+00,
-3.9248e+00],
[-1.9286e+00, -8.7144e-01, -3.7992e+00, -3.4199e+00, -3.4338e+00,
-4.3964e+00],
[-1.7616e+00, -1.1167e+00, -3.4963e+00, -3.7680e+00, -3.7826e+00,
-4.1538e+00],
[-2.0227e+00, -9.2275e-01, -3.8516e+00, -3.9190e+00, -3.9403e+00,
-4.0448e+00],
[-1.4267e-01, 3.0847e-01, -1.1085e+00, -1.1199e+00, -1.1386e+00,
-3.9195e+00],
[ 1.4320e+00, 1.5923e+00, 1.2526e+00, 6.9279e-01, 7.1896e-01,
-1.9548e+00],
[-1.5757e+00, -2.8441e-01, -3.2707e+00, -2.9581e+00, -2.9806e+00,
-3.7685e+00],
[-1.7721e+00, -5.1497e-01, -3.4633e+00, -2.8938e+00, -2.9127e+00,
-4.3968e+00],
[-1.4706e-01, 6.5852e-01, -1.2672e+00, -9.5982e-01, -9.9642e-01,
-3.6761e+00],
[-9.3811e-01, -1.0825e+00, -2.0494e+00, -3.0489e+00, -3.0834e+00,
-4.1541e+00],
[-1.8093e+00, -2.6870e-01, -3.6467e+00, -2.6039e+00, -2.6320e+00,
-3.9574e+00],
[-1.5985e+00, -5.9623e-01, -3.3035e+00, -3.1476e+00, -3.1871e+00,
-4.2207e+00],
[-2.3482e+00, -3.0949e-01, -4.3751e+00, -3.3028e+00, -3.3402e+00,
-3.9200e+00],
[ 3.6131e-01, 1.3915e+00, -4.8966e-01, -3.5410e-01, -3.9714e-01,
-2.8856e+00],
[-1.6581e+00, -4.0996e-01, -3.3732e+00, -2.8702e+00, -2.9581e+00,
-3.8848e+00],
[-1.6501e+00, -4.1972e-01, -3.3286e+00, -3.1213e+00, -3.1512e+00,
-3.9987e+00],
[-1.7673e+00, -9.0075e-01, -3.5084e+00, -3.6050e+00, -3.6218e+00,
-3.9195e+00],
[-1.7778e-01, 4.7068e-01, -1.1324e+00, -1.2424e+00, -1.2741e+00,
-3.5800e+00],
[-5.6082e-01, -2.0068e-01, -1.7546e+00, -1.7267e+00, -1.7593e+00,
-3.6115e+00],
[-1.7041e+00, -9.4336e-01, -3.3550e+00, -3.3103e+00, -3.3448e+00,
-4.3000e+00],
[-1.8630e+00, -6.8961e-01, -3.5941e+00, -3.4450e+00, -3.4596e+00,
-4.2998e+00],
[-8.4786e-01, 5.9985e-01, -2.2009e+00, -1.9574e+00, -1.9889e+00,
-3.7947e+00],
[-1.8160e+00, -1.0168e+00, -3.5534e+00, -3.5003e+00, -3.5035e+00,
-4.1535e+00],
[-6.1658e-01, 3.3086e-01, -1.6659e+00, -1.3751e+00, -1.4162e+00,
-3.8229e+00],
[ 8.3738e-04, 5.5342e-01, -9.4686e-01, -1.3080e+00, -1.2680e+00,
-2.9876e+00],
[-2.3386e-01, 6.5756e-01, -1.3118e+00, -1.0832e+00, -1.1423e+00,
-3.2202e+00],
[-1.7580e+00, 3.0101e-01, -3.6146e+00, -2.5058e+00, -2.5424e+00,
-3.9576e+00],
[-2.2596e+00, -5.4267e-01, -4.1992e+00, -3.7644e+00, -3.8244e+00,
-4.0961e+00],
[-8.7394e-01, -7.3834e-02, -2.1292e+00, -2.2487e+00, -2.2755e+00,
-3.9990e+00],
[ 4.8658e-01, 4.1574e-02, 4.2642e-02, -6.7940e-01, -7.2612e-01,
-2.8601e+00],
[-6.8432e-01, -4.3532e-01, -1.8726e+00, -2.2612e+00, -2.3028e+00,
-3.8228e+00]])]
```python
os.mkdir(osp.expanduser(f"~/../../../scratch/gpfs/cj1223/GraphStorage/raw_raw_final_6t2"))
data_path=osp.expanduser(f"~/../../../scratch/gpfs/cj1223/GraphStorage/raw_raw_final_6t2/xs.pkl")
with open(data_path, 'wb') as handle:
pickle.dump(xss, handle)
data_path=osp.expanduser(f"~/../../../scratch/gpfs/cj1223/GraphStorage/raw_raw_final_6t2/ys.pkl")
with open(data_path, 'wb') as handle:
pickle.dump(yss, handle)
```
```python
```
|
astrockraghREPO_NAMEMangrovePATH_START.@Mangrove_extracted@Mangrove-main@pysr_final@get_data.ipynb@.PATH_END.py
|
{
"filename": "obs_campaign_LST.ipynb",
"repo_name": "astro-transients/tilepy",
"repo_path": "tilepy_extracted/tilepy-master/examples/visualization/obs_campaign_LST.ipynb",
"type": "Jupyter Notebook"
}
|
```python
from tilepy.tools.VisualizationTools import Pretty_Plot
from tilepy.include.PointingTools import ObservationParameters
import datetime
```
/Users/md274436/software/miniforge3/envs/tilepy/lib/python3.11/site-packages/ligo/lw/lsctables.py:89: UserWarning: Wswiglal-redir-stdio:
SWIGLAL standard output/error redirection is enabled in IPython.
This may lead to performance penalties. To disable locally, use:
with lal.no_swig_redirect_standard_output_error():
...
To disable globally, use:
lal.swig_redirect_standard_output_error(False)
Note however that this will likely lead to error messages from
LAL functions being either misdirected or lost when called from
Jupyter notebooks.
To suppress this warning, use:
import warnings
warnings.filterwarnings("ignore", "Wswiglal-redir-stdio")
import lal
import lal
/Users/md274436/software/miniforge3/envs/tilepy/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html
from .autonotebook import tqdm as notebook_tqdm
Adjust the following as desired
```python
#adjust the following as desired
configDir = '../config/'
datasetDir = '../../tilepy/dataset/'
outDir = '../paperplotss/'
galcatName = "Gladeplus.h5"
pointingsFile = None
```
Choose one of the follwing 4 examples
```python
skymap = 'https://heasarc.gsfc.nasa.gov/FTP/fermi/data/gbm/triggers/2023/bn231012231/quicklook/glg_healpix_all_bn231012231.fit'
#skymap = 'https://gracedb.ligo.org/api/superevents/S240624cd/files/cwb.multiorder.fits,1'
obsTime = datetime.datetime.fromisoformat("2023-10-12 20:42:18")
name = 'GRB231012A_PWG'
PointingsFile1 = '../sciencecases/bn231012231/PGWinFoV_NObs/SuggestedPointings_GWOptimisation_LST1.txt'
PointingsFile2 = '../sciencecases/bn231012231/PGWinFoV_NObs/SuggestedPointings_GWOptimisation_LST2.txt'
dirName = '../paperplots'
```
Procceed with the cells one by one
```python
cfgFile = "../config/FollowupParameters_LST.ini"
obspar = ObservationParameters()
obspar.add_parsed_args(skymap, obsTime, datasetDir, galcatName, outDir, pointingsFile)
obspar.from_configfile(cfgFile)
print(obspar)
```
============== Main parsed observation parameters ==============
Observatory Name: LST
Observatory: 28.761944 deg
Observatory: -17.89 deg
Observatory: 2200.0 m
Max zenith: 70
Using Greytime is: False
FOV: 2.5
Max runs: 4
Duration: 20
High Resolution NSIDE: 512
Low Resolution NSIDE: 256
The strategy is (3D, None, mangrove=False)
The level of details is (doPlot=True, doRank = True)
```python
from tilepy.tools.VisualizationTools import CompareTwoTilings
plotType = 'gnomonic'
CompareTwoTilings(skymap, PointingsFile1, PointingsFile2, 2.0,plotType)
plotType = 'mollweide'
CompareTwoTilings(skymap, PointingsFile1, PointingsFile2, 2.0,plotType)
```
===========================================================================================
Starting the pointing plotting from the following files
Loading map from https://heasarc.gsfc.nasa.gov/FTP/fermi/data/gbm/triggers/2023/bn231012231/quicklook/glg_healpix_all_bn231012231.fit
Filename 1: ../sciencecases/bn231012231/PGWinFoV_NObs/SuggestedPointings_GWOptimisation_LST1.txt
Filename 2: ../sciencecases/bn231012231/PGWinFoV_NObs/SuggestedPointings_GWOptimisation_LST2.txt
Loading pointings
Loading pointings from ../sciencecases/bn231012231/PGWinFoV_NObs/SuggestedPointings_GWOptimisation_LST1.txt
Loading pointings from ../sciencecases/bn231012231/PGWinFoV_NObs/SuggestedPointings_GWOptimisation_LST2.txt
Summary of 1st file: sum(PW)= 0.2486 total pointings 6
Summary of 2st file: sum(PW)= 0.2066 total pointings 6
===========================================================================================

===========================================================================================
Starting the pointing plotting from the following files
Loading map from https://heasarc.gsfc.nasa.gov/FTP/fermi/data/gbm/triggers/2023/bn231012231/quicklook/glg_healpix_all_bn231012231.fit
Filename 1: ../sciencecases/bn231012231/PGWinFoV_NObs/SuggestedPointings_GWOptimisation_LST1.txt
Filename 2: ../sciencecases/bn231012231/PGWinFoV_NObs/SuggestedPointings_GWOptimisation_LST2.txt
Loading pointings
Loading pointings from ../sciencecases/bn231012231/PGWinFoV_NObs/SuggestedPointings_GWOptimisation_LST1.txt
Loading pointings from ../sciencecases/bn231012231/PGWinFoV_NObs/SuggestedPointings_GWOptimisation_LST2.txt
Summary of 1st file: sum(PW)= 0.2486 total pointings 6
Summary of 2st file: sum(PW)= 0.2066 total pointings 6
===========================================================================================

```python
```
|
astro-transientsREPO_NAMEtilepyPATH_START.@tilepy_extracted@tilepy-master@examples@visualization@obs_campaign_LST.ipynb@.PATH_END.py
|
{
"filename": "test_you.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/tests/unit_tests/retrievers/test_you.py",
"type": "Python"
}
|
from unittest.mock import AsyncMock, patch
import responses
from langchain_community.retrievers.you import YouRetriever
from ..utilities.test_you import (
LIMITED_PARSED_OUTPUT,
MOCK_PARSED_OUTPUT,
MOCK_RESPONSE_RAW,
NEWS_RESPONSE_PARSED,
NEWS_RESPONSE_RAW,
TEST_ENDPOINT,
)
class TestYouRetriever:
@responses.activate
def test_invoke(self) -> None:
responses.add(
responses.GET, f"{TEST_ENDPOINT}/search", json=MOCK_RESPONSE_RAW, status=200
)
query = "Test query text"
you_wrapper = YouRetriever(ydc_api_key="test")
results = you_wrapper.invoke(query)
expected_result = MOCK_PARSED_OUTPUT
assert results == expected_result
@responses.activate
def test_invoke_max_docs(self) -> None:
responses.add(
responses.GET, f"{TEST_ENDPOINT}/search", json=MOCK_RESPONSE_RAW, status=200
)
query = "Test query text"
you_wrapper = YouRetriever(k=2, ydc_api_key="test")
results = you_wrapper.invoke(query)
expected_result = [MOCK_PARSED_OUTPUT[0], MOCK_PARSED_OUTPUT[1]]
assert results == expected_result
@responses.activate
def test_invoke_limit_snippets(self) -> None:
responses.add(
responses.GET, f"{TEST_ENDPOINT}/search", json=MOCK_RESPONSE_RAW, status=200
)
query = "Test query text"
you_wrapper = YouRetriever(n_snippets_per_hit=1, ydc_api_key="test")
results = you_wrapper.results(query)
expected_result = LIMITED_PARSED_OUTPUT
assert results == expected_result
@responses.activate
def test_invoke_news(self) -> None:
responses.add(
responses.GET, f"{TEST_ENDPOINT}/news", json=NEWS_RESPONSE_RAW, status=200
)
query = "Test news text"
# ensure limit on number of docs returned
you_wrapper = YouRetriever(endpoint_type="news", ydc_api_key="test")
results = you_wrapper.results(query)
expected_result = NEWS_RESPONSE_PARSED
assert results == expected_result
async def test_ainvoke(self) -> None:
instance = YouRetriever(ydc_api_key="test_api_key")
# Mock response object to simulate aiohttp response
mock_response = AsyncMock()
mock_response.__aenter__.return_value = (
mock_response # Make the context manager return itself
)
mock_response.__aexit__.return_value = None # No value needed for exit
mock_response.status = 200
mock_response.json = AsyncMock(return_value=MOCK_RESPONSE_RAW)
# Patch the aiohttp.ClientSession object
with patch("aiohttp.ClientSession.get", return_value=mock_response):
results = await instance.ainvoke("test query")
assert results == MOCK_PARSED_OUTPUT
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@tests@unit_tests@retrievers@test_you.py@.PATH_END.py
|
{
"filename": "param_plotter_example.py",
"repo_name": "htjb/maxsmooth",
"repo_path": "maxsmooth_extracted/maxsmooth-master/example_codes/param_plotter_example.py",
"type": "Python"
}
|
"""
We can assess the parameter space around the optimum solution
found using ``maxsmooth`` with the param_plotter() function.
This can help us identify how well a problem can be solved using the
sign sampling approach employed by ``maxsmooth`` or simply
be used to identify correlations between the foreground parameters.
For more details on this see the ``maxsmooth`` paper.
We begin by importing and fitting the data as with the chi_plotter()
function illustrated above.
"""
import numpy as np
x = np.load('Data/x.npy')
y = np.load('Data/y.npy')
from maxsmooth.DCF import smooth
N = 5
result = smooth(x, y, N, base_dir='examples/', fit_type='qp')
"""
We have changed the order of the fit to 5 to illustrate that for
order :math:`{N \leq 5}` and fits with derivatives :math:`{m \geq 2}` constrained
the function will plot each region of the graph corresponding to
different sign functions in a different colourmap. If the constraints are
different or the order is greater than 5 then the viable regions will have
a single colourmap. Invalid regions are plotted as black shaded colourmaps
and the contour lines are contours of :math:`{\chi^2}`.
Specifically, invalid regions violate the condition
.. math::
\pm_m \frac{\delta^m y}{\delta x^m} \leq 0
where :math:`{m}` represents the derivative order, :math:`{y}` is the dependent
variable and :math:`{x}` is the independent variable. Violation of the
condition means that one or more of the constrained derivatives crosses 0 in the
band of interest. For an MSF, as mentioned, :math:`{m \geq 2}` and the sign :math:`{\pm_m}`
applies to specific derivative orders. For this specific example there are
3 constrained derivatives, :math:`{m = 2, 3, 4}` and consequently 3 signs to
optimise for alongside the parameters :math:`{a_k}`. The coloured valid regions
therefore correspond to a specific combination of :math:`{\pm_m}` for the problem.
:math:`{\pm_m}` is also referred to as :math:`{\mathbf{s}}` in the theory
section and the ``maxsmooth`` paper.
We can import the function like so,
"""
from maxsmooth.parameter_plotter import param_plotter
"""
and access it using,
"""
param_plotter(result.optimum_params, result.optimum_signs,
x, y, N, base_dir='examples/')
"""
The function takes in the optimum parameters and signs found after the fit
as well as the data and order of the fit. There are a number of keyword arguments
detailed in the following section and the resultant fit is shown below. The
function by default samples the parameter ranges 50% either side of the optimum
and calculates 50 spamples for each parameter. In each panel the two
labelled parameters are varied while the others are maintained at their optimum
values.
.. image:: https://github.com/htjb/maxsmooth/raw/master/docs/images/Parameter_plot.png
We are also able to plot the data, fit and residuals alongside the parameter
plot and this can be done by setting data_plot=True. We can also highlight the
central region in each panel of the parameter space by setting center_plot=True.
"""
param_plotter(result.optimum_params, result.optimum_signs,
x, y, N, base_dir='examples/', data_plot=True, center_plot=True)
"""
which gives us the graph below.
.. image:: https://github.com/htjb/maxsmooth/raw/master/docs/images/Parameter_plot_extended.png
"""
|
htjbREPO_NAMEmaxsmoothPATH_START.@maxsmooth_extracted@maxsmooth-master@example_codes@param_plotter_example.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "ExoSim/ExoSimPublic",
"repo_path": "ExoSimPublic_extracted/ExoSimPublic-master/exosim/classes/__init__.py",
"type": "Python"
}
|
from exosim.classes.options import *
from exosim.classes.planet import *
from exosim.classes.sed import *
from exosim.classes.zodi import *
from exosim.classes.channel import *
|
ExoSimREPO_NAMEExoSimPublicPATH_START.@ExoSimPublic_extracted@ExoSimPublic-master@exosim@classes@__init__.py@.PATH_END.py
|
{
"filename": "test_processor_master.py",
"repo_name": "loostrum/darc",
"repo_path": "darc_extracted/darc-master/test/test_processor_master.py",
"type": "Python"
}
|
#!/usr/bin/env python3
import os
import logging
import logging.handlers
import unittest
import multiprocessing as mp
import shutil
import socket
from astropy.time import Time, TimeDelta
import yaml
from darc import ProcessorMaster
from darc import util
from darc.definitions import TIME_UNIT
# disable debug log messages from matplotlib
logging.getLogger('matplotlib').setLevel(logging.ERROR)
@unittest.skipUnless(socket.gethostname() in ('arts041', 'zeus'), "Test can only run on arts041 or zeus")
class TestProcessorMaster(unittest.TestCase):
def setUp(self):
if socket.gethostname() == 'zeus':
self.result_dir = '/data/arts/darc/central_dir_processor_master'
else:
self.result_dir = '/tank/users/arts/darc_automated_testing/processor_master'
# create log handler
log_queue = mp.Queue()
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s.%(levelname)s.%(name)s: %(message)s')
handler.setFormatter(formatter)
self.ql = logging.handlers.QueueListener(log_queue, handler)
self.ql.start()
self.processor_queue = mp.Queue()
self.processor = ProcessorMaster(log_queue, self.processor_queue)
tstart = Time.now()
duration = TimeDelta(5, format='sec')
parset = {'task.duration': str(duration.sec),
'task.startTime': tstart.isot,
'task.stopTime': (tstart + duration).isot,
'task.source.name': 'FAKE',
'task.taskID': '001122',
'task.beamSet.0.compoundBeam.0.phaseCenter': '[293.94876deg, 16.27778deg]',
'task.directionReferenceFrame': 'HADEC',
'task.telescopes': '[RT2, RT3, RT4, RT5, RT6, RT7, RT8, RT9]'}
self.obs_config = {'beams': [0],
'home': os.path.expanduser('~'),
'freq': 1370,
'startpacket': int(tstart.unix * TIME_UNIT),
'date': '20200101',
'datetimesource': '2020-01-01-00:00:00.FAKE',
'result_dir': self.result_dir,
'parset': parset}
# output directory
output_dir = os.path.join(self.result_dir, self.obs_config['date'], self.obs_config['datetimesource'])
# web directory
webdir = '{home}/public_html/darc/{webdir}/{date}/{datetimesource}'.format(webdir=self.processor.webdir,
**self.obs_config)
# remove existing output
for d in (output_dir, webdir):
try:
shutil.rmtree(d)
except FileNotFoundError:
pass
util.makedirs(output_dir)
# create empty PDF
open(os.path.join(output_dir, 'CB00.pdf'), 'w').close()
# create trigger overview
# classifier cands must be > 0 to test processing of PDF attachments
trigger_results = {'ncand_raw': 1000,
'ncand_post_clustering': 100,
'ncand_post_thresholds': 10,
'ncand_post_classifier': 1}
with open(os.path.join(output_dir, 'CB00_summary.yaml'), 'w') as f:
yaml.dump(trigger_results, f, default_flow_style=False)
# create file with trigger list
with open(os.path.join(output_dir, 'CB00_triggers.txt'), 'w') as f:
f.write('#cb snr dm time downsamp sb p\n')
f.write('00 10.00 20.00 30.0000 50 35 1.00\n')
def test_processor_master(self):
# override result dir
self.processor.result_dir = self.result_dir
# start processor
self.processor.start()
# start observation
self.processor_queue.put({'command': 'start_observation', 'obs_config': self.obs_config, 'reload': False})
# wait until processor is done
self.processor.join()
# stop queue logger
self.ql.stop()
if __name__ == '__main__':
unittest.main()
|
loostrumREPO_NAMEdarcPATH_START.@darc_extracted@darc-master@test@test_processor_master.py@.PATH_END.py
|
{
"filename": "hierarchy_test_data.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/scipy/py2/scipy/cluster/tests/hierarchy_test_data.py",
"type": "Python"
}
|
from numpy import array
Q_X = array([[5.26563660e-01, 3.14160190e-01, 8.00656370e-02],
[7.50205180e-01, 4.60299830e-01, 8.98696460e-01],
[6.65461230e-01, 6.94011420e-01, 9.10465700e-01],
[9.64047590e-01, 1.43082200e-03, 7.39874220e-01],
[1.08159060e-01, 5.53028790e-01, 6.63804780e-02],
[9.31359130e-01, 8.25424910e-01, 9.52315440e-01],
[6.78086960e-01, 3.41903970e-01, 5.61481950e-01],
[9.82730940e-01, 7.04605210e-01, 8.70978630e-02],
[6.14691610e-01, 4.69989230e-02, 6.02406450e-01],
[5.80161260e-01, 9.17354970e-01, 5.88163850e-01],
[1.38246310e+00, 1.96358160e+00, 1.94437880e+00],
[2.10675860e+00, 1.67148730e+00, 1.34854480e+00],
[1.39880070e+00, 1.66142050e+00, 1.32224550e+00],
[1.71410460e+00, 1.49176380e+00, 1.45432170e+00],
[1.54102340e+00, 1.84374950e+00, 1.64658950e+00],
[2.08512480e+00, 1.84524350e+00, 2.17340850e+00],
[1.30748740e+00, 1.53801650e+00, 2.16007740e+00],
[1.41447700e+00, 1.99329070e+00, 1.99107420e+00],
[1.61943490e+00, 1.47703280e+00, 1.89788160e+00],
[1.59880600e+00, 1.54988980e+00, 1.57563350e+00],
[3.37247380e+00, 2.69635310e+00, 3.39981700e+00],
[3.13705120e+00, 3.36528090e+00, 3.06089070e+00],
[3.29413250e+00, 3.19619500e+00, 2.90700170e+00],
[2.65510510e+00, 3.06785900e+00, 2.97198540e+00],
[3.30941040e+00, 2.59283970e+00, 2.57714110e+00],
[2.59557220e+00, 3.33477370e+00, 3.08793190e+00],
[2.58206180e+00, 3.41615670e+00, 3.26441990e+00],
[2.71127000e+00, 2.77032450e+00, 2.63466500e+00],
[2.79617850e+00, 3.25473720e+00, 3.41801560e+00],
[2.64741750e+00, 2.54538040e+00, 3.25354110e+00]])
ytdist = array([662., 877., 255., 412., 996., 295., 468., 268., 400., 754.,
564., 138., 219., 869., 669.])
linkage_ytdist_single = array([[2., 5., 138., 2.],
[3., 4., 219., 2.],
[0., 7., 255., 3.],
[1., 8., 268., 4.],
[6., 9., 295., 6.]])
linkage_ytdist_complete = array([[2., 5., 138., 2.],
[3., 4., 219., 2.],
[1., 6., 400., 3.],
[0., 7., 412., 3.],
[8., 9., 996., 6.]])
linkage_ytdist_average = array([[2., 5., 138., 2.],
[3., 4., 219., 2.],
[0., 7., 333.5, 3.],
[1., 6., 347.5, 3.],
[8., 9., 680.77777778, 6.]])
linkage_ytdist_weighted = array([[2., 5., 138., 2.],
[3., 4., 219., 2.],
[0., 7., 333.5, 3.],
[1., 6., 347.5, 3.],
[8., 9., 670.125, 6.]])
# the optimal leaf ordering of linkage_ytdist_single
linkage_ytdist_single_olo = array([[5., 2., 138., 2.],
[4., 3., 219., 2.],
[7., 0., 255., 3.],
[1., 8., 268., 4.],
[6., 9., 295., 6.]])
X = array([[1.43054825, -7.5693489],
[6.95887839, 6.82293382],
[2.87137846, -9.68248579],
[7.87974764, -6.05485803],
[8.24018364, -6.09495602],
[7.39020262, 8.54004355]])
linkage_X_centroid = array([[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.43614494, 4.],
[7., 9., 15.17363237, 6.]])
linkage_X_median = array([[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.43614494, 4.],
[7., 9., 15.17363237, 6.]])
linkage_X_ward = array([[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 9.10208346, 4.],
[7., 9., 24.7784379, 6.]])
# the optimal leaf ordering of linkage_X_ward
linkage_X_ward_olo = array([[4., 3., 0.36265956, 2.],
[5., 1., 1.77045373, 2.],
[2., 0., 2.55760419, 2.],
[6., 8., 9.10208346, 4.],
[7., 9., 24.7784379, 6.]])
inconsistent_ytdist = {
1: array([[138., 0., 1., 0.],
[219., 0., 1., 0.],
[255., 0., 1., 0.],
[268., 0., 1., 0.],
[295., 0., 1., 0.]]),
2: array([[138., 0., 1., 0.],
[219., 0., 1., 0.],
[237., 25.45584412, 2., 0.70710678],
[261.5, 9.19238816, 2., 0.70710678],
[233.66666667, 83.9424406, 3., 0.7306594]]),
3: array([[138., 0., 1., 0.],
[219., 0., 1., 0.],
[237., 25.45584412, 2., 0.70710678],
[247.33333333, 25.38372182, 3., 0.81417007],
[239., 69.36377537, 4., 0.80733783]]),
4: array([[138., 0., 1., 0.],
[219., 0., 1., 0.],
[237., 25.45584412, 2., 0.70710678],
[247.33333333, 25.38372182, 3., 0.81417007],
[235., 60.73302232, 5., 0.98793042]])}
fcluster_inconsistent = {
0.8: array([6, 2, 2, 4, 6, 2, 3, 7, 3, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1]),
1.0: array([6, 2, 2, 4, 6, 2, 3, 7, 3, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1]),
2.0: array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1])}
fcluster_distance = {
0.6: array([4, 4, 4, 4, 4, 4, 4, 5, 4, 4, 6, 6, 6, 6, 6, 7, 6, 6, 6, 6, 3,
1, 1, 1, 2, 1, 1, 1, 1, 1]),
1.0: array([2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1]),
2.0: array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1])}
fcluster_maxclust = {
8.0: array([5, 5, 5, 5, 5, 5, 5, 6, 5, 5, 7, 7, 7, 7, 7, 8, 7, 7, 7, 7, 4,
1, 1, 1, 3, 1, 1, 1, 1, 2]),
4.0: array([3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2,
1, 1, 1, 1, 1, 1, 1, 1, 1]),
1.0: array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1])}
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@scipy@py2@scipy@cluster@tests@hierarchy_test_data.py@.PATH_END.py
|
{
"filename": "test_simpleforms.py",
"repo_name": "glue-viz/glue",
"repo_path": "glue_extracted/glue-main/glue/core/tests/test_simpleforms.py",
"type": "Python"
}
|
import pytest
from ..simpleforms import IntOption, FloatOption, BoolOption
class Stub(object):
int_opt = IntOption(min=0, max=10, default=3)
float_opt = FloatOption(min=1, max=2, default=1.5)
bool_opt = BoolOption()
class TestSimpleForms(object):
def test_get_set_int(self):
assert Stub.int_opt.min == 0
assert Stub.int_opt.max == 10
assert Stub().int_opt == 3
def test_get_set_bool(self):
s = Stub()
assert s.bool_opt is False
s.bool_opt = True
assert s.bool_opt
def test_get_set_float(self):
s = Stub()
assert s.float_opt == 1.5
s.float_opt = 1
assert s.float_opt == 1.0
assert isinstance(s.float_opt, float)
def test_invalid_int(self):
s = Stub()
s.int_opt = 4
with pytest.raises(ValueError):
s.int_opt = -1
with pytest.raises(ValueError):
s.int_opt = 11
with pytest.raises(ValueError):
s.int_opt = 2.5
def test_invalid_float(self):
s = Stub()
with pytest.raises(ValueError):
s.float_opt = -0.1
with pytest.raises(ValueError):
s.float_opt = 10.1
def test_invalid(self):
s = Stub()
with pytest.raises(ValueError):
s.bool_opt = 3
|
glue-vizREPO_NAMEgluePATH_START.@glue_extracted@glue-main@glue@core@tests@test_simpleforms.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/sankey/hoverlabel/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._namelengthsrc import NamelengthsrcValidator
from ._namelength import NamelengthValidator
from ._font import FontValidator
from ._bordercolorsrc import BordercolorsrcValidator
from ._bordercolor import BordercolorValidator
from ._bgcolorsrc import BgcolorsrcValidator
from ._bgcolor import BgcolorValidator
from ._alignsrc import AlignsrcValidator
from ._align import AlignValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._namelengthsrc.NamelengthsrcValidator",
"._namelength.NamelengthValidator",
"._font.FontValidator",
"._bordercolorsrc.BordercolorsrcValidator",
"._bordercolor.BordercolorValidator",
"._bgcolorsrc.BgcolorsrcValidator",
"._bgcolor.BgcolorValidator",
"._alignsrc.AlignsrcValidator",
"._align.AlignValidator",
],
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@sankey@hoverlabel@__init__.py@.PATH_END.py
|
{
"filename": "speed_estimation.py",
"repo_name": "ultralytics/ultralytics",
"repo_path": "ultralytics_extracted/ultralytics-main/ultralytics/solutions/speed_estimation.py",
"type": "Python"
}
|
# Ultralytics YOLO 🚀, AGPL-3.0 license
from time import time
import numpy as np
from ultralytics.solutions.solutions import BaseSolution
from ultralytics.utils.plotting import Annotator, colors
class SpeedEstimator(BaseSolution):
"""
A class to estimate the speed of objects in a real-time video stream based on their tracks.
This class extends the BaseSolution class and provides functionality for estimating object speeds using
tracking data in video streams.
Attributes:
spd (Dict[int, float]): Dictionary storing speed data for tracked objects.
trkd_ids (List[int]): List of tracked object IDs that have already been speed-estimated.
trk_pt (Dict[int, float]): Dictionary storing previous timestamps for tracked objects.
trk_pp (Dict[int, Tuple[float, float]]): Dictionary storing previous positions for tracked objects.
annotator (Annotator): Annotator object for drawing on images.
region (List[Tuple[int, int]]): List of points defining the speed estimation region.
track_line (List[Tuple[float, float]]): List of points representing the object's track.
r_s (LineString): LineString object representing the speed estimation region.
Methods:
initialize_region: Initializes the speed estimation region.
estimate_speed: Estimates the speed of objects based on tracking data.
store_tracking_history: Stores the tracking history for an object.
extract_tracks: Extracts tracks from the current frame.
display_output: Displays the output with annotations.
Examples:
>>> estimator = SpeedEstimator()
>>> frame = cv2.imread("frame.jpg")
>>> processed_frame = estimator.estimate_speed(frame)
>>> cv2.imshow("Speed Estimation", processed_frame)
"""
def __init__(self, **kwargs):
"""Initializes the SpeedEstimator object with speed estimation parameters and data structures."""
super().__init__(**kwargs)
self.initialize_region() # Initialize speed region
self.spd = {} # set for speed data
self.trkd_ids = [] # list for already speed_estimated and tracked ID's
self.trk_pt = {} # set for tracks previous time
self.trk_pp = {} # set for tracks previous point
def estimate_speed(self, im0):
"""
Estimates the speed of objects based on tracking data.
Args:
im0 (np.ndarray): Input image for processing. Shape is typically (H, W, C) for RGB images.
Returns:
(np.ndarray): Processed image with speed estimations and annotations.
Examples:
>>> estimator = SpeedEstimator()
>>> image = np.random.randint(0, 255, (480, 640, 3), dtype=np.uint8)
>>> processed_image = estimator.estimate_speed(image)
"""
self.annotator = Annotator(im0, line_width=self.line_width) # Initialize annotator
self.extract_tracks(im0) # Extract tracks
self.annotator.draw_region(
reg_pts=self.region, color=(104, 0, 123), thickness=self.line_width * 2
) # Draw region
for box, track_id, cls in zip(self.boxes, self.track_ids, self.clss):
self.store_tracking_history(track_id, box) # Store track history
# Check if track_id is already in self.trk_pp or trk_pt initialize if not
if track_id not in self.trk_pt:
self.trk_pt[track_id] = 0
if track_id not in self.trk_pp:
self.trk_pp[track_id] = self.track_line[-1]
speed_label = f"{int(self.spd[track_id])} km/h" if track_id in self.spd else self.names[int(cls)]
self.annotator.box_label(box, label=speed_label, color=colors(track_id, True)) # Draw bounding box
# Draw tracks of objects
self.annotator.draw_centroid_and_tracks(
self.track_line, color=colors(int(track_id), True), track_thickness=self.line_width
)
# Calculate object speed and direction based on region intersection
if self.LineString([self.trk_pp[track_id], self.track_line[-1]]).intersects(self.r_s):
direction = "known"
else:
direction = "unknown"
# Perform speed calculation and tracking updates if direction is valid
if direction == "known" and track_id not in self.trkd_ids:
self.trkd_ids.append(track_id)
time_difference = time() - self.trk_pt[track_id]
if time_difference > 0:
self.spd[track_id] = np.abs(self.track_line[-1][1] - self.trk_pp[track_id][1]) / time_difference
self.trk_pt[track_id] = time()
self.trk_pp[track_id] = self.track_line[-1]
self.display_output(im0) # display output with base class function
return im0 # return output image for more usage
|
ultralyticsREPO_NAMEultralyticsPATH_START.@ultralytics_extracted@ultralytics-main@ultralytics@solutions@speed_estimation.py@.PATH_END.py
|
{
"filename": "_shadow.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/barpolar/hoverlabel/font/_shadow.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShadowValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="shadow", parent_name="barpolar.hoverlabel.font", **kwargs
):
super(ShadowValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@barpolar@hoverlabel@font@_shadow.py@.PATH_END.py
|
{
"filename": "_hoverlabel.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/graph_objs/funnel/_hoverlabel.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Hoverlabel(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "funnel"
_path_str = "funnel.hoverlabel"
_valid_props = {
"align",
"alignsrc",
"bgcolor",
"bgcolorsrc",
"bordercolor",
"bordercolorsrc",
"font",
"namelength",
"namelengthsrc",
}
# align
# -----
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
# alignsrc
# --------
@property
def alignsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `align`.
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bgcolorsrc
# ----------
@property
def bgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `bgcolor`.
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# bordercolorsrc
# --------------
@property
def bordercolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
# font
# ----
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.funnel.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans", "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for `family`.
lineposition
Sets the kind of decoration line(s) with text,
such as an "under", "over" or "through" as well
as combinations e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud
for `lineposition`.
shadow
Sets the shape and color of the shadow behind
text. "auto" places minimal shadow and applies
contrast text font color. See
https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional
options.
shadowsrc
Sets the source reference on Chart Studio Cloud
for `shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud
for `size`.
style
Sets whether a font should be styled with a
normal or italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud
for `style`.
textcase
Sets capitalization of text. It can be used to
make text appear in all-uppercase or all-
lowercase, or with each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud
for `textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud
for `variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud
for `weight`.
Returns
-------
plotly.graph_objs.funnel.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# namelength
# ----------
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
# namelengthsrc
# -------------
@property
def namelengthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`namelength`.
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
**kwargs,
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.funnel.Hoverlabel`
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
Returns
-------
Hoverlabel
"""
super(Hoverlabel, self).__init__("hoverlabel")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.funnel.Hoverlabel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.funnel.Hoverlabel`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("align", None)
_v = align if align is not None else _v
if _v is not None:
self["align"] = _v
_v = arg.pop("alignsrc", None)
_v = alignsrc if alignsrc is not None else _v
if _v is not None:
self["alignsrc"] = _v
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bgcolorsrc", None)
_v = bgcolorsrc if bgcolorsrc is not None else _v
if _v is not None:
self["bgcolorsrc"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("bordercolorsrc", None)
_v = bordercolorsrc if bordercolorsrc is not None else _v
if _v is not None:
self["bordercolorsrc"] = _v
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("namelength", None)
_v = namelength if namelength is not None else _v
if _v is not None:
self["namelength"] = _v
_v = arg.pop("namelengthsrc", None)
_v = namelengthsrc if namelengthsrc is not None else _v
if _v is not None:
self["namelengthsrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@graph_objs@funnel@_hoverlabel.py@.PATH_END.py
|
{
"filename": "_token.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/treemap/stream/_token.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TokenValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="token", parent_name="treemap.stream", **kwargs):
super(TokenValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
no_blank=kwargs.pop("no_blank", True),
strict=kwargs.pop("strict", True),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@treemap@stream@_token.py@.PATH_END.py
|
{
"filename": "regexopt.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/Pygments/py2/pygments/regexopt.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
"""
pygments.regexopt
~~~~~~~~~~~~~~~~~
An algorithm that generates optimized regexes for matching long lists of
literal strings.
:copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from re import escape
from os.path import commonprefix
from itertools import groupby
from operator import itemgetter
CS_ESCAPE = re.compile(r'[\^\\\-\]]')
FIRST_ELEMENT = itemgetter(0)
def make_charset(letters):
return '[' + CS_ESCAPE.sub(lambda m: '\\' + m.group(), ''.join(letters)) + ']'
def regex_opt_inner(strings, open_paren):
"""Return a regex that matches any string in the sorted list of strings."""
close_paren = open_paren and ')' or ''
# print strings, repr(open_paren)
if not strings:
# print '-> nothing left'
return ''
first = strings[0]
if len(strings) == 1:
# print '-> only 1 string'
return open_paren + escape(first) + close_paren
if not first:
# print '-> first string empty'
return open_paren + regex_opt_inner(strings[1:], '(?:') \
+ '?' + close_paren
if len(first) == 1:
# multiple one-char strings? make a charset
oneletter = []
rest = []
for s in strings:
if len(s) == 1:
oneletter.append(s)
else:
rest.append(s)
if len(oneletter) > 1: # do we have more than one oneletter string?
if rest:
# print '-> 1-character + rest'
return open_paren + regex_opt_inner(rest, '') + '|' \
+ make_charset(oneletter) + close_paren
# print '-> only 1-character'
return open_paren + make_charset(oneletter) + close_paren
prefix = commonprefix(strings)
if prefix:
plen = len(prefix)
# we have a prefix for all strings
# print '-> prefix:', prefix
return open_paren + escape(prefix) \
+ regex_opt_inner([s[plen:] for s in strings], '(?:') \
+ close_paren
# is there a suffix?
strings_rev = [s[::-1] for s in strings]
suffix = commonprefix(strings_rev)
if suffix:
slen = len(suffix)
# print '-> suffix:', suffix[::-1]
return open_paren \
+ regex_opt_inner(sorted(s[:-slen] for s in strings), '(?:') \
+ escape(suffix[::-1]) + close_paren
# recurse on common 1-string prefixes
# print '-> last resort'
return open_paren + \
'|'.join(regex_opt_inner(list(group[1]), '')
for group in groupby(strings, lambda s: s[0] == first[0])) \
+ close_paren
def regex_opt(strings, prefix='', suffix=''):
"""Return a compiled regex that matches any string in the given list.
The strings to match must be literal strings, not regexes. They will be
regex-escaped.
*prefix* and *suffix* are pre- and appended to the final regex.
"""
strings = sorted(strings)
return prefix + regex_opt_inner(strings, '(') + suffix
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@Pygments@py2@pygments@regexopt.py@.PATH_END.py
|
{
"filename": "corner.ipynb",
"repo_name": "sotzee/CPREX",
"repo_path": "CPREX_extracted/CPREX-main/corner.ipynb",
"type": "Jupyter Notebook"
}
|
```python
import numpy as np
import pandas as pd
import seaborn
import corner
import matplotlib.pyplot as plt
from load_all_data import *
likelihood_name=['none', 'all', 'crex', 'prex']
```
```python
```
```python
Skyrme_Sv_fit=np.array([[-527.63517486, 960.90032367, 32.57190263], [7.9034604, 5.54229394, np.sqrt(0.26283722**2+2.932327395**2)]])
RMF_Sv_fit =np.array([[-603.92661734, 915.16211132, 33.69134024], [2.877702, 2.3558963, np.sqrt(0.08202593**2+3.706824**2)]])
Skyrme_L_fit=np.array([[1794.30156274, 2642.18405652, -91.44462774], [27.28591802, 19.09387008, np.sqrt(0.90854965**2+10.142446**2)]])
RMF_L_fit =np.array([[3292.32977663, 2349.18446447, -163.44830784], [23.34141903, 16.16647948, np.sqrt(0.81640617**2+25.37046**2)]])
RMF_b4_fit =np.array([[-32.67247477, 24.88743999, 0.9238387], [0.08176161, 0.06204431, np.sqrt(0.00240444**2+0.1179605**2)]])
Skyrme_b4_fit=np.array([[-52.88068838, 28.11911958, 1.69036289], [0.41022367, 0.28844105, np.sqrt(0.01364027**2+0.1535242**2)]])
PrexCrex_weighted=(np.array([Skyrme_Sv_fit[0,0],Skyrme_L_fit[0,0],Skyrme_b4_fit[0,0]])[:,np.newaxis]*(PrexCrex_Skyrme[0]-PrexCrex_Skyrme[2])+np.array([Skyrme_Sv_fit[0,1],Skyrme_L_fit[0,1],Skyrme_b4_fit[0,1]])[:,np.newaxis]*(PrexCrex_Skyrme[1]-PrexCrex_Skyrme[3]))
PrexCrex_RMF_weighted=(np.array([RMF_Sv_fit[0,0],RMF_L_fit[0,0],RMF_b4_fit[0,0]])[:,np.newaxis]*(PrexCrex_RMF[0]-PrexCrex_RMF[2])+np.array([RMF_Sv_fit[0,1],RMF_L_fit[0,1],RMF_b4_fit[0,1]])[:,np.newaxis]*(PrexCrex_RMF[1]-PrexCrex_RMF[3]))
PrexCrex_RMF_to_Skyrme_weighted=(np.array([RMF_Sv_fit[0,0],RMF_L_fit[0,0],RMF_b4_fit[0,0]])[:,np.newaxis]*(PrexCrex_RMF_to_Skyrme[0]-PrexCrex_RMF_to_Skyrme[2])+np.array([RMF_Sv_fit[0,1],RMF_L_fit[0,1],RMF_b4_fit[0,1]])[:,np.newaxis]*(PrexCrex_RMF_to_Skyrme[1]-PrexCrex_RMF_to_Skyrme[3]))
```
```python
data=np.concatenate((eos_args_Skyrme[:2],[eos_args_Skyrme[2]*(eos_args_Skyrme[6]+5/4)],eos_args_Skyrme[3:],SAT_Skyrme,(PrexCrex_Skyrme[0:2]-PrexCrex_Skyrme[2:4]),PrexCrex_weighted),axis=0)
data_list=[data[:,index_list_Skyrme[i]:index_list_Skyrme[i+1]] for i in range(4)]
data_name=['$t_0$ [MeV fm$^3$]','$t_1$ [MeV fm$^5$]','$t_2(x_2+\\frac{5}{4})$ [MeV fm$^5$]','$t_3$ [MeV fm$^{3+\\alpha}$]','$x_0$ []','$x_1$ []','$x_2$ []','$x_3$ []','$\\alpha$ []','$b_4$ [fm$^4$]','$b\'_4$ [fm$^4$]','$m^*$ [MeV]','$n_s$[fm$^{-3}$]','B [MeV]','S$_v$ [MeV]','L [MeV]','K [MeV]','K$_{sym}$ [MeV]','$\Delta F_{Ca}$ []','$\Delta F_{Pb}$ []','$a\Delta F_{Ca}+b\Delta F_{Pb}$[]','$a\'\Delta F_{Ca}+b\'\Delta F_{Pb}$[]','$a\'\'\Delta F_{Ca}+b\'\'\Delta F_{Pb}$[]']
print(data.shape,len(data_name))
data_basic=np.concatenate((PrexCrex_Skyrme[4:10],PrexCrex_Skyrme[0:2],(PrexCrex_Skyrme[0:2]-PrexCrex_Skyrme[2:4]),[PrexCrex_Skyrme[12]-PrexCrex_Skyrme[13],PrexCrex_Skyrme[10]-PrexCrex_Skyrme[11]]),axis=0)
data_basic_list=[data_basic[:,index_list_Skyrme[i]:index_list_Skyrme[i+1]] for i in range(4)]
# FchCa FchPb FwCa FwPb RchCa(fm) RchZr(fm) RchPb(fm) BECa(MeV) BEZr(MeV) BEPb(MeV)
# Rn_Pb208 (fm) Rp_Pb208(fm) Rn_Ca48(fm) Rp_Ca48(fm)
data_basic_name=['$R_{ch}^{Ca}$ [fm]','$R_{ch}^{Zr}$ [fm]','$R_{ch}^{Pb}$ [fm]','(E/A)$_{Ca}$ [MeV]','(E/A)$_{Zr}$ [MeV]','(E/A)$_{Pb}$ [MeV]','$F^{Ca}_{ch}$ []','$F^{Pb}_{ch}$ []','$\Delta F_{Ca}$ []','$\Delta F_{Pb}$ []','$\Delta R_{Ca}$ [fm]','$\Delta R_{Pb}$ [fm]']
print(data_basic.shape,len(data_basic_name))
```
(23, 9498) 23
(12, 9498) 12
```python
b4p=(eos_args_RMF[4]/763**2+eos_args_RMF[2]/980**2)/(8*939**2)*197.3**4
b4 =(eos_args_RMF[1]/eos_args_RMF[0]**2+eos_args_RMF[3]/782.5**2)/(4*939**2)*197.3**4-b4p/2
data_RMF=np.concatenate((eos_args_RMF,[b4,b4p],SAT_RMF,(PrexCrex_RMF[0:2]-PrexCrex_RMF[2:4]),PrexCrex_RMF_weighted),axis=0)
data_RMF_list=[data_RMF[:,index_list_RMF[i]:index_list_RMF[i+1]] for i in range(4)]
data_RMF_name=['m$_\sigma$ [MeV]','g$_\sigma^2$ []','g$_\delta^2$ []','g$_{\omega}^2$ []','g$_\\rho^2$ []','$\kappa$ [MeV]','$\lambda$ []','$\Lambda_{\omega\\rho}$','$\zeta_\omega$ []','$b_4$ [fm$^4$]','$b\'_4$ [fm$^4$]','$m^*$ [MeV]','$n_s$[fm$^{-3}$]','BE [MeV]','S$_v$ [MeV]','L [MeV]','K [MeV]','K$_{sym}$ [MeV]','$\Delta F_{Ca}$ []','$\Delta F_{Pb}$ []','$a\Delta F_{Ca}+b\Delta F_{Pb}$[]','$a\'\Delta F_{Ca}+b\'\Delta F_{Pb}$[]','$a\'\'\Delta F_{Ca}+b\'\'\Delta F_{Pb}$[]']
print(data_RMF.shape,len(data_RMF_name))
data_RMF_basic=np.concatenate((PrexCrex_RMF[4:10],PrexCrex_RMF[0:2],(PrexCrex_RMF[0:2]-PrexCrex_RMF[2:4]),[PrexCrex_RMF[12]-PrexCrex_RMF[13],PrexCrex_RMF[10]-PrexCrex_RMF[11]]),axis=0)
data_RMF_basic_list=[data_RMF_basic[:,index_list_RMF[i]:index_list_RMF[i+1]] for i in range(4)]
# FchCa FchPb FwCa FwPb RchCa(fm) RchZr(fm) RchPb(fm) BECa(MeV) BEZr(MeV) BEPb(MeV)
# Rn_Pb208 (fm) Rp_Pb208(fm) Rn_Ca48(fm) Rp_Ca48(fm)
data_RMF_basic_name=['$R_{ch}^{Ca}$ [fm]','$R_{ch}^{Zr}$ [fm]','$R_{ch}^{Pb}$ [fm]','(E/A)$_{Ca}$ [MeV]','(E/A)$_{Zr}$ [MeV]','(E/A)$_{Pb}$ [MeV]','$F^{Ca}_{ch}$ []','$F^{Pb}_{ch}$ []','$\Delta F_{Ca}$ []','$\Delta F_{Pb}$ []','$\Delta R_{Ca}$ [fm]','$\Delta R_{Pb}$ [fm]']
print(data_RMF_basic.shape,len(data_RMF_basic_name))
```
(23, 26992) 23
(12, 26992) 12
```python
b4p=(eos_args_RMF_to_Skyrme[4]/763**2+eos_args_RMF_to_Skyrme[2]/980**2)/(8*939**2)*197.3**4
b4 =(eos_args_RMF_to_Skyrme[1]/eos_args_RMF_to_Skyrme[0]**2+eos_args_RMF_to_Skyrme[3]/782.5**2)/(4*939**2)*197.3**4-b4p/2
data_RMF_to_Skyrme=np.concatenate((eos_args_RMF_to_Skyrme,[b4,b4p],SAT_RMF_to_Skyrme,(PrexCrex_RMF_to_Skyrme[0:2]-PrexCrex_RMF_to_Skyrme[2:4]),PrexCrex_RMF_to_Skyrme_weighted),axis=0)
data_RMF_to_Skyrme_list=[data_RMF_to_Skyrme[:,index_list_RMF_to_Skyrme[i]:index_list_RMF_to_Skyrme[i+1]] for i in range(4)]
data_RMF_name=['m$_\sigma$ [MeV]','g$_\sigma^2$ []','g$_\delta^2$ []','g$_{\omega}^2$ []','g$_\\rho^2$ []','$\kappa$ [MeV]','$\lambda$ []','$\Lambda_{\omega\\rho}$','$\zeta_\omega$ []','$b_4$ [fm$^4$]','$b\'_4$ [fm$^4$]','$m^*$ [MeV]','$n_s$[fm$^{-3}$]','B [MeV]','S$_v$ [MeV]','L [MeV]','K [MeV]','K$_{sym}$ [MeV]','$\Delta F_{Ca}$ []','$\Delta F_{Pb}$ []','$a\Delta F_{Ca}+b\Delta F_{Pb}$[]','$a\'\Delta F_{Ca}+b\'\Delta F_{Pb}$[]','$a\'\'\Delta F_{Ca}+b\'\'\Delta F_{Pb}$[]']
print(data_RMF_to_Skyrme.shape,len(data_RMF_name))
data_RMF_to_Skyrme_basic=np.concatenate((PrexCrex_RMF_to_Skyrme[4:10],PrexCrex_RMF_to_Skyrme[0:2],(PrexCrex_RMF_to_Skyrme[0:2]-PrexCrex_RMF_to_Skyrme[2:4]),[PrexCrex_RMF_to_Skyrme[12]-PrexCrex_RMF_to_Skyrme[13],PrexCrex_RMF_to_Skyrme[10]-PrexCrex_RMF_to_Skyrme[11]]),axis=0)
data_RMF_to_Skyrme_basic_list=[data_RMF_to_Skyrme_basic[:,index_list_RMF_to_Skyrme[i]:index_list_RMF_to_Skyrme[i+1]] for i in range(4)]
# FchCa FchPb FwCa FwPb RchCa(fm) RchZr(fm) RchPb(fm) BECa(MeV) BEZr(MeV) BEPb(MeV)
# Rn_Pb208 (fm) Rp_Pb208(fm) Rn_Ca48(fm) Rp_Ca48(fm)
data_RMF_basic_name=['$R_{ch}^{Ca}$ [fm]','$R_{ch}^{Zr}$ [fm]','$R_{ch}^{Pb}$ [fm]','(E/A)$_{Ca}$ [MeV]','(E/A)$_{Zr}$ [MeV]','(E/A)$_{Pb}$ [MeV]','$F^{Ca}_{ch}$ []','$F^{Pb}_{ch}$ []','$\Delta F_{Ca}$ []','$\Delta F_{Pb}$ []','$\Delta R_{Ca}$ [fm]','$\Delta R_{Pb}$ [fm]']
print(data_RMF_to_Skyrme_basic.shape,len(data_RMF_basic_name))
```
(23, 31431) 23
(12, 31431) 12
```python
pd_Skyrme = pd.DataFrame(data=data.transpose(),columns=data_name)
pd_RMF = pd.DataFrame(data=data_RMF.transpose(),columns=data_RMF_name)
pd_RMF_to_Skyrme = pd.DataFrame(data=data_RMF_to_Skyrme.transpose(),columns=data_RMF_name)
pd_list =[pd_Skyrme,pd_RMF,pd_RMF_to_Skyrme]
name_list=['Skyrme','RMF','RMF_to_Skyrme']
```
```python
seaborn.set_theme(style="white")
for pd,name in zip(pd_list,name_list):
# Compute the correlation matrix
corr = pd.corr()
# Generate a mask for the upper triangle
mask = np.triu(np.ones_like(corr, dtype=bool))
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 9))
# Generate a custom diverging colormap
#cmap = sns.diverging_palette(230, 20, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
seaborn.heatmap(corr, mask=mask, cmap='jet', vmax=1,vmin=-1, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": 0.6,'anchor':(-1.5,0.8)})
plt.savefig('./figures/pearson_'+name+'.pdf',bbox_inches = 'tight',format='pdf')
```



```python
```
```python
#This block generates big corner plots which take long time and memory
data_list_all=[data_list,data_RMF_list,data_RMF_to_Skyrme_list]
data_name_list=[data_name,data_RMF_name,data_RMF_name]
for name_i,data_list_i,data_name_list_i in zip(name_list,data_list_all,data_name_list):
for data_i,likelihood_name_i in zip(data_list_i,likelihood_name):
figure = corner.corner(data_i.transpose(),labels=data_name_list_i,label_kwargs={'size':16},show_titles=True,title_fmt=".2E")
plt.savefig('./figures/'+name_i+'_'+likelihood_name_i+'.pdf',bbox_inches = 'tight',format='pdf')
data_basic_list_all=[data_basic_list,data_RMF_basic_list,data_RMF_to_Skyrme_basic_list]
data_basic_name_list=[data_basic_name,data_RMF_basic_name,data_RMF_basic_name]
for name_i,data_list_i,data_name_list_i in zip(name_list,data_basic_list_all,data_basic_name_list):
for data_i,likelihood_name_i in zip(data_list_i,likelihood_name):
figure = corner.corner(data_i.transpose(),labels=data_name_list_i,label_kwargs={'size':20},show_titles=True,title_fmt=".2E")
plt.savefig('./figures/'+name_i+'_basic_'+likelihood_name_i+'.pdf',bbox_inches = 'tight',format='pdf')
```
|
sotzeeREPO_NAMECPREXPATH_START.@CPREX_extracted@CPREX-main@corner.ipynb@.PATH_END.py
|
{
"filename": "test_integrate.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/scipy/py3/scipy/integrate/tests/test_integrate.py",
"type": "Python"
}
|
# Authors: Nils Wagner, Ed Schofield, Pauli Virtanen, John Travers
"""
Tests for numerical integration.
"""
import numpy as np
from numpy import (arange, zeros, array, dot, sqrt, cos, sin, eye, pi, exp,
allclose)
from numpy.testing import (
assert_, assert_array_almost_equal,
assert_allclose, assert_array_equal, assert_equal, assert_warns)
from pytest import raises as assert_raises
from scipy.integrate import odeint, ode, complex_ode
#------------------------------------------------------------------------------
# Test ODE integrators
#------------------------------------------------------------------------------
class TestOdeint:
# Check integrate.odeint
def _do_problem(self, problem):
t = arange(0.0, problem.stop_t, 0.05)
# Basic case
z, infodict = odeint(problem.f, problem.z0, t, full_output=True)
assert_(problem.verify(z, t))
# Use tfirst=True
z, infodict = odeint(lambda t, y: problem.f(y, t), problem.z0, t,
full_output=True, tfirst=True)
assert_(problem.verify(z, t))
if hasattr(problem, 'jac'):
# Use Dfun
z, infodict = odeint(problem.f, problem.z0, t, Dfun=problem.jac,
full_output=True)
assert_(problem.verify(z, t))
# Use Dfun and tfirst=True
z, infodict = odeint(lambda t, y: problem.f(y, t), problem.z0, t,
Dfun=lambda t, y: problem.jac(y, t),
full_output=True, tfirst=True)
assert_(problem.verify(z, t))
def test_odeint(self):
for problem_cls in PROBLEMS:
problem = problem_cls()
if problem.cmplx:
continue
self._do_problem(problem)
class TestODEClass:
ode_class = None # Set in subclass.
def _do_problem(self, problem, integrator, method='adams'):
# ode has callback arguments in different order than odeint
def f(t, z):
return problem.f(z, t)
jac = None
if hasattr(problem, 'jac'):
def jac(t, z):
return problem.jac(z, t)
integrator_params = {}
if problem.lband is not None or problem.uband is not None:
integrator_params['uband'] = problem.uband
integrator_params['lband'] = problem.lband
ig = self.ode_class(f, jac)
ig.set_integrator(integrator,
atol=problem.atol/10,
rtol=problem.rtol/10,
method=method,
**integrator_params)
ig.set_initial_value(problem.z0, t=0.0)
z = ig.integrate(problem.stop_t)
assert_array_equal(z, ig.y)
assert_(ig.successful(), (problem, method))
assert_(ig.get_return_code() > 0, (problem, method))
assert_(problem.verify(array([z]), problem.stop_t), (problem, method))
class TestOde(TestODEClass):
ode_class = ode
def test_vode(self):
# Check the vode solver
for problem_cls in PROBLEMS:
problem = problem_cls()
if problem.cmplx:
continue
if not problem.stiff:
self._do_problem(problem, 'vode', 'adams')
self._do_problem(problem, 'vode', 'bdf')
def test_zvode(self):
# Check the zvode solver
for problem_cls in PROBLEMS:
problem = problem_cls()
if not problem.stiff:
self._do_problem(problem, 'zvode', 'adams')
self._do_problem(problem, 'zvode', 'bdf')
def test_lsoda(self):
# Check the lsoda solver
for problem_cls in PROBLEMS:
problem = problem_cls()
if problem.cmplx:
continue
self._do_problem(problem, 'lsoda')
def test_dopri5(self):
# Check the dopri5 solver
for problem_cls in PROBLEMS:
problem = problem_cls()
if problem.cmplx:
continue
if problem.stiff:
continue
if hasattr(problem, 'jac'):
continue
self._do_problem(problem, 'dopri5')
def test_dop853(self):
# Check the dop853 solver
for problem_cls in PROBLEMS:
problem = problem_cls()
if problem.cmplx:
continue
if problem.stiff:
continue
if hasattr(problem, 'jac'):
continue
self._do_problem(problem, 'dop853')
def test_concurrent_fail(self):
for sol in ('vode', 'zvode', 'lsoda'):
def f(t, y):
return 1.0
r = ode(f).set_integrator(sol)
r.set_initial_value(0, 0)
r2 = ode(f).set_integrator(sol)
r2.set_initial_value(0, 0)
r.integrate(r.t + 0.1)
r2.integrate(r2.t + 0.1)
assert_raises(RuntimeError, r.integrate, r.t + 0.1)
def test_concurrent_ok(self):
def f(t, y):
return 1.0
for k in range(3):
for sol in ('vode', 'zvode', 'lsoda', 'dopri5', 'dop853'):
r = ode(f).set_integrator(sol)
r.set_initial_value(0, 0)
r2 = ode(f).set_integrator(sol)
r2.set_initial_value(0, 0)
r.integrate(r.t + 0.1)
r2.integrate(r2.t + 0.1)
r2.integrate(r2.t + 0.1)
assert_allclose(r.y, 0.1)
assert_allclose(r2.y, 0.2)
for sol in ('dopri5', 'dop853'):
r = ode(f).set_integrator(sol)
r.set_initial_value(0, 0)
r2 = ode(f).set_integrator(sol)
r2.set_initial_value(0, 0)
r.integrate(r.t + 0.1)
r.integrate(r.t + 0.1)
r2.integrate(r2.t + 0.1)
r.integrate(r.t + 0.1)
r2.integrate(r2.t + 0.1)
assert_allclose(r.y, 0.3)
assert_allclose(r2.y, 0.2)
class TestComplexOde(TestODEClass):
ode_class = complex_ode
def test_vode(self):
# Check the vode solver
for problem_cls in PROBLEMS:
problem = problem_cls()
if not problem.stiff:
self._do_problem(problem, 'vode', 'adams')
else:
self._do_problem(problem, 'vode', 'bdf')
def test_lsoda(self):
# Check the lsoda solver
for problem_cls in PROBLEMS:
problem = problem_cls()
self._do_problem(problem, 'lsoda')
def test_dopri5(self):
# Check the dopri5 solver
for problem_cls in PROBLEMS:
problem = problem_cls()
if problem.stiff:
continue
if hasattr(problem, 'jac'):
continue
self._do_problem(problem, 'dopri5')
def test_dop853(self):
# Check the dop853 solver
for problem_cls in PROBLEMS:
problem = problem_cls()
if problem.stiff:
continue
if hasattr(problem, 'jac'):
continue
self._do_problem(problem, 'dop853')
class TestSolout:
# Check integrate.ode correctly handles solout for dopri5 and dop853
def _run_solout_test(self, integrator):
# Check correct usage of solout
ts = []
ys = []
t0 = 0.0
tend = 10.0
y0 = [1.0, 2.0]
def solout(t, y):
ts.append(t)
ys.append(y.copy())
def rhs(t, y):
return [y[0] + y[1], -y[1]**2]
ig = ode(rhs).set_integrator(integrator)
ig.set_solout(solout)
ig.set_initial_value(y0, t0)
ret = ig.integrate(tend)
assert_array_equal(ys[0], y0)
assert_array_equal(ys[-1], ret)
assert_equal(ts[0], t0)
assert_equal(ts[-1], tend)
def test_solout(self):
for integrator in ('dopri5', 'dop853'):
self._run_solout_test(integrator)
def _run_solout_after_initial_test(self, integrator):
# Check if solout works even if it is set after the initial value.
ts = []
ys = []
t0 = 0.0
tend = 10.0
y0 = [1.0, 2.0]
def solout(t, y):
ts.append(t)
ys.append(y.copy())
def rhs(t, y):
return [y[0] + y[1], -y[1]**2]
ig = ode(rhs).set_integrator(integrator)
ig.set_initial_value(y0, t0)
ig.set_solout(solout)
ret = ig.integrate(tend)
assert_array_equal(ys[0], y0)
assert_array_equal(ys[-1], ret)
assert_equal(ts[0], t0)
assert_equal(ts[-1], tend)
def test_solout_after_initial(self):
for integrator in ('dopri5', 'dop853'):
self._run_solout_after_initial_test(integrator)
def _run_solout_break_test(self, integrator):
# Check correct usage of stopping via solout
ts = []
ys = []
t0 = 0.0
tend = 10.0
y0 = [1.0, 2.0]
def solout(t, y):
ts.append(t)
ys.append(y.copy())
if t > tend/2.0:
return -1
def rhs(t, y):
return [y[0] + y[1], -y[1]**2]
ig = ode(rhs).set_integrator(integrator)
ig.set_solout(solout)
ig.set_initial_value(y0, t0)
ret = ig.integrate(tend)
assert_array_equal(ys[0], y0)
assert_array_equal(ys[-1], ret)
assert_equal(ts[0], t0)
assert_(ts[-1] > tend/2.0)
assert_(ts[-1] < tend)
def test_solout_break(self):
for integrator in ('dopri5', 'dop853'):
self._run_solout_break_test(integrator)
class TestComplexSolout:
# Check integrate.ode correctly handles solout for dopri5 and dop853
def _run_solout_test(self, integrator):
# Check correct usage of solout
ts = []
ys = []
t0 = 0.0
tend = 20.0
y0 = [0.0]
def solout(t, y):
ts.append(t)
ys.append(y.copy())
def rhs(t, y):
return [1.0/(t - 10.0 - 1j)]
ig = complex_ode(rhs).set_integrator(integrator)
ig.set_solout(solout)
ig.set_initial_value(y0, t0)
ret = ig.integrate(tend)
assert_array_equal(ys[0], y0)
assert_array_equal(ys[-1], ret)
assert_equal(ts[0], t0)
assert_equal(ts[-1], tend)
def test_solout(self):
for integrator in ('dopri5', 'dop853'):
self._run_solout_test(integrator)
def _run_solout_break_test(self, integrator):
# Check correct usage of stopping via solout
ts = []
ys = []
t0 = 0.0
tend = 20.0
y0 = [0.0]
def solout(t, y):
ts.append(t)
ys.append(y.copy())
if t > tend/2.0:
return -1
def rhs(t, y):
return [1.0/(t - 10.0 - 1j)]
ig = complex_ode(rhs).set_integrator(integrator)
ig.set_solout(solout)
ig.set_initial_value(y0, t0)
ret = ig.integrate(tend)
assert_array_equal(ys[0], y0)
assert_array_equal(ys[-1], ret)
assert_equal(ts[0], t0)
assert_(ts[-1] > tend/2.0)
assert_(ts[-1] < tend)
def test_solout_break(self):
for integrator in ('dopri5', 'dop853'):
self._run_solout_break_test(integrator)
#------------------------------------------------------------------------------
# Test problems
#------------------------------------------------------------------------------
class ODE:
"""
ODE problem
"""
stiff = False
cmplx = False
stop_t = 1
z0 = []
lband = None
uband = None
atol = 1e-6
rtol = 1e-5
class SimpleOscillator(ODE):
r"""
Free vibration of a simple oscillator::
m \ddot{u} + k u = 0, u(0) = u_0 \dot{u}(0) \dot{u}_0
Solution::
u(t) = u_0*cos(sqrt(k/m)*t)+\dot{u}_0*sin(sqrt(k/m)*t)/sqrt(k/m)
"""
stop_t = 1 + 0.09
z0 = array([1.0, 0.1], float)
k = 4.0
m = 1.0
def f(self, z, t):
tmp = zeros((2, 2), float)
tmp[0, 1] = 1.0
tmp[1, 0] = -self.k / self.m
return dot(tmp, z)
def verify(self, zs, t):
omega = sqrt(self.k / self.m)
u = self.z0[0]*cos(omega*t) + self.z0[1]*sin(omega*t)/omega
return allclose(u, zs[:, 0], atol=self.atol, rtol=self.rtol)
class ComplexExp(ODE):
r"""The equation :lm:`\dot u = i u`"""
stop_t = 1.23*pi
z0 = exp([1j, 2j, 3j, 4j, 5j])
cmplx = True
def f(self, z, t):
return 1j*z
def jac(self, z, t):
return 1j*eye(5)
def verify(self, zs, t):
u = self.z0 * exp(1j*t)
return allclose(u, zs, atol=self.atol, rtol=self.rtol)
class Pi(ODE):
r"""Integrate 1/(t + 1j) from t=-10 to t=10"""
stop_t = 20
z0 = [0]
cmplx = True
def f(self, z, t):
return array([1./(t - 10 + 1j)])
def verify(self, zs, t):
u = -2j * np.arctan(10)
return allclose(u, zs[-1, :], atol=self.atol, rtol=self.rtol)
class CoupledDecay(ODE):
r"""
3 coupled decays suited for banded treatment
(banded mode makes it necessary when N>>3)
"""
stiff = True
stop_t = 0.5
z0 = [5.0, 7.0, 13.0]
lband = 1
uband = 0
lmbd = [0.17, 0.23, 0.29] # fictitious decay constants
def f(self, z, t):
lmbd = self.lmbd
return np.array([-lmbd[0]*z[0],
-lmbd[1]*z[1] + lmbd[0]*z[0],
-lmbd[2]*z[2] + lmbd[1]*z[1]])
def jac(self, z, t):
# The full Jacobian is
#
# [-lmbd[0] 0 0 ]
# [ lmbd[0] -lmbd[1] 0 ]
# [ 0 lmbd[1] -lmbd[2]]
#
# The lower and upper bandwidths are lband=1 and uband=0, resp.
# The representation of this array in packed format is
#
# [-lmbd[0] -lmbd[1] -lmbd[2]]
# [ lmbd[0] lmbd[1] 0 ]
lmbd = self.lmbd
j = np.zeros((self.lband + self.uband + 1, 3), order='F')
def set_j(ri, ci, val):
j[self.uband + ri - ci, ci] = val
set_j(0, 0, -lmbd[0])
set_j(1, 0, lmbd[0])
set_j(1, 1, -lmbd[1])
set_j(2, 1, lmbd[1])
set_j(2, 2, -lmbd[2])
return j
def verify(self, zs, t):
# Formulae derived by hand
lmbd = np.array(self.lmbd)
d10 = lmbd[1] - lmbd[0]
d21 = lmbd[2] - lmbd[1]
d20 = lmbd[2] - lmbd[0]
e0 = np.exp(-lmbd[0] * t)
e1 = np.exp(-lmbd[1] * t)
e2 = np.exp(-lmbd[2] * t)
u = np.vstack((
self.z0[0] * e0,
self.z0[1] * e1 + self.z0[0] * lmbd[0] / d10 * (e0 - e1),
self.z0[2] * e2 + self.z0[1] * lmbd[1] / d21 * (e1 - e2) +
lmbd[1] * lmbd[0] * self.z0[0] / d10 *
(1 / d20 * (e0 - e2) - 1 / d21 * (e1 - e2)))).transpose()
return allclose(u, zs, atol=self.atol, rtol=self.rtol)
PROBLEMS = [SimpleOscillator, ComplexExp, Pi, CoupledDecay]
#------------------------------------------------------------------------------
def f(t, x):
dxdt = [x[1], -x[0]]
return dxdt
def jac(t, x):
j = array([[0.0, 1.0],
[-1.0, 0.0]])
return j
def f1(t, x, omega):
dxdt = [omega*x[1], -omega*x[0]]
return dxdt
def jac1(t, x, omega):
j = array([[0.0, omega],
[-omega, 0.0]])
return j
def f2(t, x, omega1, omega2):
dxdt = [omega1*x[1], -omega2*x[0]]
return dxdt
def jac2(t, x, omega1, omega2):
j = array([[0.0, omega1],
[-omega2, 0.0]])
return j
def fv(t, x, omega):
dxdt = [omega[0]*x[1], -omega[1]*x[0]]
return dxdt
def jacv(t, x, omega):
j = array([[0.0, omega[0]],
[-omega[1], 0.0]])
return j
class ODECheckParameterUse:
"""Call an ode-class solver with several cases of parameter use."""
# solver_name must be set before tests can be run with this class.
# Set these in subclasses.
solver_name = ''
solver_uses_jac = False
def _get_solver(self, f, jac):
solver = ode(f, jac)
if self.solver_uses_jac:
solver.set_integrator(self.solver_name, atol=1e-9, rtol=1e-7,
with_jacobian=self.solver_uses_jac)
else:
# XXX Shouldn't set_integrator *always* accept the keyword arg
# 'with_jacobian', and perhaps raise an exception if it is set
# to True if the solver can't actually use it?
solver.set_integrator(self.solver_name, atol=1e-9, rtol=1e-7)
return solver
def _check_solver(self, solver):
ic = [1.0, 0.0]
solver.set_initial_value(ic, 0.0)
solver.integrate(pi)
assert_array_almost_equal(solver.y, [-1.0, 0.0])
def test_no_params(self):
solver = self._get_solver(f, jac)
self._check_solver(solver)
def test_one_scalar_param(self):
solver = self._get_solver(f1, jac1)
omega = 1.0
solver.set_f_params(omega)
if self.solver_uses_jac:
solver.set_jac_params(omega)
self._check_solver(solver)
def test_two_scalar_params(self):
solver = self._get_solver(f2, jac2)
omega1 = 1.0
omega2 = 1.0
solver.set_f_params(omega1, omega2)
if self.solver_uses_jac:
solver.set_jac_params(omega1, omega2)
self._check_solver(solver)
def test_vector_param(self):
solver = self._get_solver(fv, jacv)
omega = [1.0, 1.0]
solver.set_f_params(omega)
if self.solver_uses_jac:
solver.set_jac_params(omega)
self._check_solver(solver)
def test_warns_on_failure(self):
# Set nsteps small to ensure failure
solver = self._get_solver(f, jac)
solver.set_integrator(self.solver_name, nsteps=1)
ic = [1.0, 0.0]
solver.set_initial_value(ic, 0.0)
assert_warns(UserWarning, solver.integrate, pi)
class TestDOPRI5CheckParameterUse(ODECheckParameterUse):
solver_name = 'dopri5'
solver_uses_jac = False
class TestDOP853CheckParameterUse(ODECheckParameterUse):
solver_name = 'dop853'
solver_uses_jac = False
class TestVODECheckParameterUse(ODECheckParameterUse):
solver_name = 'vode'
solver_uses_jac = True
class TestZVODECheckParameterUse(ODECheckParameterUse):
solver_name = 'zvode'
solver_uses_jac = True
class TestLSODACheckParameterUse(ODECheckParameterUse):
solver_name = 'lsoda'
solver_uses_jac = True
def test_odeint_trivial_time():
# Test that odeint succeeds when given a single time point
# and full_output=True. This is a regression test for gh-4282.
y0 = 1
t = [0]
y, info = odeint(lambda y, t: -y, y0, t, full_output=True)
assert_array_equal(y, np.array([[y0]]))
def test_odeint_banded_jacobian():
# Test the use of the `Dfun`, `ml` and `mu` options of odeint.
def func(y, t, c):
return c.dot(y)
def jac(y, t, c):
return c
def jac_transpose(y, t, c):
return c.T.copy(order='C')
def bjac_rows(y, t, c):
jac = np.row_stack((np.r_[0, np.diag(c, 1)],
np.diag(c),
np.r_[np.diag(c, -1), 0],
np.r_[np.diag(c, -2), 0, 0]))
return jac
def bjac_cols(y, t, c):
return bjac_rows(y, t, c).T.copy(order='C')
c = array([[-205, 0.01, 0.00, 0.0],
[0.1, -2.50, 0.02, 0.0],
[1e-3, 0.01, -2.0, 0.01],
[0.00, 0.00, 0.1, -1.0]])
y0 = np.ones(4)
t = np.array([0, 5, 10, 100])
# Use the full Jacobian.
sol1, info1 = odeint(func, y0, t, args=(c,), full_output=True,
atol=1e-13, rtol=1e-11, mxstep=10000,
Dfun=jac)
# Use the transposed full Jacobian, with col_deriv=True.
sol2, info2 = odeint(func, y0, t, args=(c,), full_output=True,
atol=1e-13, rtol=1e-11, mxstep=10000,
Dfun=jac_transpose, col_deriv=True)
# Use the banded Jacobian.
sol3, info3 = odeint(func, y0, t, args=(c,), full_output=True,
atol=1e-13, rtol=1e-11, mxstep=10000,
Dfun=bjac_rows, ml=2, mu=1)
# Use the transposed banded Jacobian, with col_deriv=True.
sol4, info4 = odeint(func, y0, t, args=(c,), full_output=True,
atol=1e-13, rtol=1e-11, mxstep=10000,
Dfun=bjac_cols, ml=2, mu=1, col_deriv=True)
assert_allclose(sol1, sol2, err_msg="sol1 != sol2")
assert_allclose(sol1, sol3, atol=1e-12, err_msg="sol1 != sol3")
assert_allclose(sol3, sol4, err_msg="sol3 != sol4")
# Verify that the number of jacobian evaluations was the same for the
# calls of odeint with a full jacobian and with a banded jacobian. This is
# a regression test--there was a bug in the handling of banded jacobians
# that resulted in an incorrect jacobian matrix being passed to the LSODA
# code. That would cause errors or excessive jacobian evaluations.
assert_array_equal(info1['nje'], info2['nje'])
assert_array_equal(info3['nje'], info4['nje'])
# Test the use of tfirst
sol1ty, info1ty = odeint(lambda t, y, c: func(y, t, c), y0, t, args=(c,),
full_output=True, atol=1e-13, rtol=1e-11,
mxstep=10000,
Dfun=lambda t, y, c: jac(y, t, c), tfirst=True)
# The code should execute the exact same sequence of floating point
# calculations, so these should be exactly equal. We'll be safe and use
# a small tolerance.
assert_allclose(sol1, sol1ty, rtol=1e-12, err_msg="sol1 != sol1ty")
def test_odeint_errors():
def sys1d(x, t):
return -100*x
def bad1(x, t):
return 1.0/0
def bad2(x, t):
return "foo"
def bad_jac1(x, t):
return 1.0/0
def bad_jac2(x, t):
return [["foo"]]
def sys2d(x, t):
return [-100*x[0], -0.1*x[1]]
def sys2d_bad_jac(x, t):
return [[1.0/0, 0], [0, -0.1]]
assert_raises(ZeroDivisionError, odeint, bad1, 1.0, [0, 1])
assert_raises(ValueError, odeint, bad2, 1.0, [0, 1])
assert_raises(ZeroDivisionError, odeint, sys1d, 1.0, [0, 1], Dfun=bad_jac1)
assert_raises(ValueError, odeint, sys1d, 1.0, [0, 1], Dfun=bad_jac2)
assert_raises(ZeroDivisionError, odeint, sys2d, [1.0, 1.0], [0, 1],
Dfun=sys2d_bad_jac)
def test_odeint_bad_shapes():
# Tests of some errors that can occur with odeint.
def badrhs(x, t):
return [1, -1]
def sys1(x, t):
return -100*x
def badjac(x, t):
return [[0, 0, 0]]
# y0 must be at most 1-d.
bad_y0 = [[0, 0], [0, 0]]
assert_raises(ValueError, odeint, sys1, bad_y0, [0, 1])
# t must be at most 1-d.
bad_t = [[0, 1], [2, 3]]
assert_raises(ValueError, odeint, sys1, [10.0], bad_t)
# y0 is 10, but badrhs(x, t) returns [1, -1].
assert_raises(RuntimeError, odeint, badrhs, 10, [0, 1])
# shape of array returned by badjac(x, t) is not correct.
assert_raises(RuntimeError, odeint, sys1, [10, 10], [0, 1], Dfun=badjac)
def test_repeated_t_values():
"""Regression test for gh-8217."""
def func(x, t):
return -0.25*x
t = np.zeros(10)
sol = odeint(func, [1.], t)
assert_array_equal(sol, np.ones((len(t), 1)))
tau = 4*np.log(2)
t = [0]*9 + [tau, 2*tau, 2*tau, 3*tau]
sol = odeint(func, [1, 2], t, rtol=1e-12, atol=1e-12)
expected_sol = np.array([[1.0, 2.0]]*9 +
[[0.5, 1.0],
[0.25, 0.5],
[0.25, 0.5],
[0.125, 0.25]])
assert_allclose(sol, expected_sol)
# Edge case: empty t sequence.
sol = odeint(func, [1.], [])
assert_array_equal(sol, np.array([], dtype=np.float64).reshape((0, 1)))
# t values are not monotonic.
assert_raises(ValueError, odeint, func, [1.], [0, 1, 0.5, 0])
assert_raises(ValueError, odeint, func, [1, 2, 3], [0, -1, -2, 3])
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@scipy@py3@scipy@integrate@tests@test_integrate.py@.PATH_END.py
|
{
"filename": "SOPRANOS.md",
"repo_name": "EranOfek/AstroPack",
"repo_path": "AstroPack_extracted/AstroPack-main/matlab/astro/+astro/+supernova/+SOPRANOS/SOPRANOS.md",
"type": "Markdown"
}
|
# Overview
# List of Subpackages
# Subpackages
# Usage
# Notes
# Known Issues
# See Also
|
EranOfekREPO_NAMEAstroPackPATH_START.@AstroPack_extracted@AstroPack-main@matlab@astro@+astro@+supernova@+SOPRANOS@SOPRANOS.md@.PATH_END.py
|
{
"filename": "_font.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/treemap/hoverlabel/_font.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="font", parent_name="treemap.hoverlabel", **kwargs):
super(FontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Font"),
data_docs=kwargs.pop(
"data_docs",
"""
color
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans", "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for `family`.
lineposition
Sets the kind of decoration line(s) with text,
such as an "under", "over" or "through" as well
as combinations e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud
for `lineposition`.
shadow
Sets the shape and color of the shadow behind
text. "auto" places minimal shadow and applies
contrast text font color. See
https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional
options.
shadowsrc
Sets the source reference on Chart Studio Cloud
for `shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud
for `size`.
style
Sets whether a font should be styled with a
normal or italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud
for `style`.
textcase
Sets capitalization of text. It can be used to
make text appear in all-uppercase or all-
lowercase, or with each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud
for `textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud
for `variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud
for `weight`.
""",
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@treemap@hoverlabel@_font.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "spedas/pyspedas",
"repo_path": "pyspedas_extracted/pyspedas-master/pyspedas/projects/mms/fpi/__init__.py",
"type": "Python"
}
|
spedasREPO_NAMEpyspedasPATH_START.@pyspedas_extracted@pyspedas-master@pyspedas@projects@mms@fpi@__init__.py@.PATH_END.py
|
|
{
"filename": "_xaxis.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/layout/scene/_xaxis.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class XaxisValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="xaxis", parent_name="layout.scene", **kwargs):
super(XaxisValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "XAxis"),
data_docs=kwargs.pop(
"data_docs",
"""
autorange
Determines whether or not the range of this
axis is computed in relation to the input data.
See `rangemode` for more info. If `range` is
provided, then `autorange` is set to False.
autotypenumbers
Using "strict" a numeric string in trace data
is not converted to a number. Using *convert
types* a numeric string in trace data may be
treated as a number during automatic axis
`type` detection. Defaults to
layout.autotypenumbers.
backgroundcolor
Sets the background color of this axis' wall.
calendar
Sets the calendar system to use for `range` and
`tick0` if this is a date axis. This does not
set the calendar for interpreting data on this
axis, that's specified in the trace or via the
global `layout.calendar`
categoryarray
Sets the order in which categories on this axis
appear. Only has an effect if `categoryorder`
is set to "array". Used with `categoryorder`.
categoryarraysrc
Sets the source reference on Chart Studio Cloud
for categoryarray .
categoryorder
Specifies the ordering logic for the case of
categorical variables. By default, plotly uses
"trace", which specifies the order that is
present in the data supplied. Set
`categoryorder` to *category ascending* or
*category descending* if order should be
determined by the alphanumerical order of the
category names. Set `categoryorder` to "array"
to derive the ordering from the attribute
`categoryarray`. If a category is not found in
the `categoryarray` array, the sorting behavior
for that attribute will be identical to the
"trace" mode. The unspecified categories will
follow the categories in `categoryarray`. Set
`categoryorder` to *total ascending* or *total
descending* if order should be determined by
the numerical order of the values. Similarly,
the order can be determined by the min, max,
sum, mean or median of all the values.
color
Sets default for all colors associated with
this axis all at once: line, font, tick, and
grid colors. Grid color is lightened by
blending this with the plot background
Individual pieces can override this.
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
gridcolor
Sets the color of the grid lines.
gridwidth
Sets the width (in px) of the grid lines.
hoverformat
Sets the hover text formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
And for dates see:
https://github.com/d3/d3-time-
format#locale_format We add one item to d3's
date formatter: "%{n}f" for fractional seconds
with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f"
would display "09~15~23.46"
linecolor
Sets the axis line color.
linewidth
Sets the width (in px) of the axis line.
minexponent
Hide SI prefix for 10^n if |n| is below this
number. This only has an effect when
`tickformat` is "SI" or "B".
mirror
Determines if the axis lines or/and ticks are
mirrored to the opposite side of the plotting
area. If True, the axis lines are mirrored. If
"ticks", the axis lines and ticks are mirrored.
If False, mirroring is disable. If "all", axis
lines are mirrored on all shared-axes subplots.
If "allticks", axis lines and ticks are
mirrored on all shared-axes subplots.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
range
Sets the range of this axis. If the axis `type`
is "log", then you must take the log of your
desired range (e.g. to set the range from 1 to
100, set the range from 0 to 2). If the axis
`type` is "date", it should be date strings,
like date data, though Date objects and unix
milliseconds will be accepted and converted to
strings. If the axis `type` is "category", it
should be numbers, using the scale where each
category is assigned a serial number from zero
in the order it appears.
rangemode
If "normal", the range is computed in relation
to the extrema of the input data. If *tozero*`,
the range extends to 0, regardless of the input
data If "nonnegative", the range is non-
negative, regardless of the input data. Applies
only to linear axes.
separatethousands
If "true", even 4-digit integers are separated
showaxeslabels
Sets whether or not this axis is labeled
showbackground
Sets whether or not this axis' wall has a
background color.
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showgrid
Determines whether or not grid lines are drawn.
If True, the grid lines are drawn at every tick
mark.
showline
Determines whether or not a line bounding this
axis is drawn.
showspikes
Sets whether or not spikes starting from data
points to this axis' wall are shown on hover.
showticklabels
Determines whether or not the tick labels are
drawn.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
spikecolor
Sets the color of the spikes.
spikesides
Sets whether or not spikes extending from the
projection data points to this axis' wall
boundaries are shown on hover.
spikethickness
Sets the thickness (in px) of the spikes.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the tick font.
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
And for dates see:
https://github.com/d3/d3-time-
format#locale_format We add one item to d3's
date formatter: "%{n}f" for fractional seconds
with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f"
would display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.layout.
scene.xaxis.Tickformatstop` instances or dicts
with compatible properties
tickformatstopdefaults
When used in a template (as layout.template.lay
out.scene.xaxis.tickformatstopdefaults), sets
the default property values to use for elements
of layout.scene.xaxis.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud
for ticktext .
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud
for tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.layout.scene.xaxis
.Title` instance or dict with compatible
properties
titlefont
Deprecated: Please use
layout.scene.xaxis.title.font instead. Sets
this axis' title font. Note that the title's
font used to be customized by the now
deprecated `titlefont` attribute.
type
Sets the axis type. By default, plotly attempts
to determined the axis type by looking into the
data of the traces that referenced the axis in
question.
visible
A single toggle to hide the axis while
preserving interaction like dragging. Default
is true when a cheater plot is present on the
axis, otherwise false
zeroline
Determines whether or not a line is drawn at
along the 0 value of this axis. If True, the
zero line is drawn on top of the grid lines.
zerolinecolor
Sets the line color of the zero line.
zerolinewidth
Sets the width (in px) of the zero line.
""",
),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@layout@scene@_xaxis.py@.PATH_END.py
|
{
"filename": "FBR.py",
"repo_name": "AdiPandya/Fast_Bayesian_Regression",
"repo_path": "Fast_Bayesian_Regression_extracted/Fast_Bayesian_Regression-main/FBR.py",
"type": "Python"
}
|
import numpy as np
from numba import jit, njit, types, vectorize
import matplotlib.pyplot as plt
import seaborn
@njit(nogil=True)
def ln_likelihood(param, x, y, xerr, yerr, method=0):
"""Functon to setup the log likelihoods
Args:
param (float array eg. np.array((0.0, 0.0, 0.0))): initial values of the 3 parameters
x (array): x data
y (array): y data
xerr (array): error in x
yerr (array): error in y
method (int): 0 for Y|X, 1 for X|Y, and 2 for ODR. Defaults to Y|X or 0.
Returns:
float: likelihood function in log
"""
m,c, sig = param[0], param[1], param[2]
if method == 0:
f = m*x + c
sigma = np.sqrt((yerr**2) + np.square(m * xerr)+ (sig**2))
log_likelihood = -0.5 * np.sum((((y-f) / sigma) ** 2)+ np.log(2*np.pi*(sigma**2)))
elif method == 1:
f = m*y + c
sigma = np.sqrt((xerr**2) + np.square(m * yerr)+ (sig**2))
log_likelihood = -0.5 * np.sum((((x-f) / sigma) ** 2)+ np.log(2*np.pi*(sigma**2)))
elif method == 2:
f = m*x + c
sigma = np.sqrt(((yerr**2)/(1+m**2)) + (np.square(m * xerr)/(1+m**2))+ (sig**2))
log_likelihood = -0.5 * np.sum((((y-f)/(np.sqrt(1+m**2)*sigma))**2)+ np.log(2*np.pi*(sigma**2)))
return log_likelihood
@njit(nogil=True)
def ln_prior(param, method=0):
"""Function for setting up the log priors
Args:
param (float array): initial values of the 3 parameters
method (int): 0 for Y|X, 1 for X|Y, and 2 for ODR. Defaults to Y|X or 0.
Returns:
float: returns a 0.0 or negative infinity depending on the prior range
"""
m,c,sig = param[0], param[1], param[2]
if method == 0 or 2:
if not (-10 < m < 10):
return -np.inf
if not (-10 < c < 10):
return -np.inf
if not (0 < sig < 10):
return -np.inf
return 0.0
if method == 1:
if not (-10 < 1/m < 30):
return -np.inf
if not (-10 < -c/m < 10):
return -np.inf
if not (0 < sig < 10):
return -np.inf
return 0.0
@njit(nogil=True)
def ln_posterior(theta, x, y, xerr, yerr, method=0):
"""function to setup the log posterior
Args:
param (float array eg. np.array((0.0, 0.0, 0.0))): initial values of the 3 parameters
x (array): x data
y (array): y data
xerr (array): error in x
yerr (array): error in y
method (int): 0 for Y|X, 1 for X|Y, and 2 for ODR. Defaults to Y|X or 0.
Returns:
float: value of posterior in log
"""
return ln_prior(theta, method) + ln_likelihood(theta, x, y, xerr, yerr, method)
@njit(nogil=True)
def multivariate_sample(mean, cov):
"""Function to get a sample from a multivariate normal distribution
Args:
mean (float array eg. np.array((0.0, 0.0, 0.0))): mean for the multivariate normal distribution
cov (float diagonal matrix eg. np.diag((1e-3, 1e-4, 1e-4))): covariance matrix for the multivariate normal distribution
Returns:
float array: random sample from the multivariate normal distribution
"""
return mean + np.linalg.cholesky(cov) @ np.random.standard_normal(mean.size)
@njit(nogil=True)
def metropolis_step(x, y, xerr, yerr, ln_post_0, theta_0=np.array((0.0, 0.0, 0.0)), step_cov=np.diag((1e-3, 1e-4, 1e-4)), method=0):
"""Function that takes a step in the mcmc chain using the metropolis hastings algorithm
Args:
x (array): x data
y (array): y data
xerr (array): error in x
yerr (array): error in y
ln_post_0 (float): initial value of the log posterior
theta_0 (float, array): initial values of the parameters (used as mean for the random multivariate draw). Defaults to np.array((0.0, 0.0, 0.0)).
step_cov (float, array): diagonal matrix used to control the step size for mcmc. Defaults to np.diag((1e-3, 1e-4, 1e-4)).
Returns:
(array, float): (parameters values, its log posterior)
"""
#q = np.random.multivariate_normal(theta_0, step_cov)
q = multivariate_sample(theta_0, step_cov)
ln_post = ln_posterior(q, x, y, xerr, yerr, method)
if ln_post - ln_post_0 > np.log(np.random.rand()):
return q, ln_post
return theta_0, ln_post_0
@njit(nogil=True)
def diff(arr):
return arr[1:] - arr[:-1]
@njit(nogil=True)
def any_numba(array):
check_array = np.ones(array.shape[0]*array.shape[1])
for i in range(array.shape[0]):
for j in range(array.shape[1]):
if np.any(array[i,j,:]) == False:
check_array[i*array.shape[1]+j] = 0
return check_array.reshape(array.shape[0], array.shape[1])
@njit(nogil=True)
def MCMC(x, y, xerr, yerr, theta_0=np.array((0.0, 0.0, 0.0)), step_cov=np.diag((1e-3, 1e-4, 1e-4)), n_steps=20000, method=0, n_chains=1, acc_frac=False):
"""Function that runs the mcmc chain
Args:
x (array): x data
y (array): y data
xerr (array): error in x
yerr (array): error in y
theta_0 (float, array): initial values of the parameters (used as mean for the random multivariate draw). Defaults to np.array((0.0, 0.0, 0.0)).
step_cov (float, array): diagonal matrix used to control the step size for mcmc. Defaults to np.diag((1e-3, 1e-4, 1e-4)).
n_steps (int): Number of mcmc samples. Defaults to 20000.
Returns:
3 dimensional numpy array: chain of mcmc samples
"""
if n_chains > 1:
n_step_split = True
if n_chains == 1:
n_step_split = False
if n_step_split == True:
n_steps = n_steps//n_chains
if n_step_split == False:
n_steps = n_steps
acc = np.zeros(n_chains)
chain = np.zeros((n_steps, n_chains,len(theta_0)))
for i in range(n_chains):
t_0 = theta_0
lp0 = ln_posterior(t_0, x, y, xerr, yerr, method)
for j in range(n_steps):
t_0, lp0 = metropolis_step(x, y, xerr, yerr, lp0, t_0, step_cov, method)
chain[j,i,:] = t_0
if acc_frac == False:
return chain, acc
if acc_frac == True:
for i in range(n_chains):
acc[i] = any_numba(diff(chain))[:,i].sum() / (len(chain)-1)
return chain, acc
@njit(nogil=True)
def get_param(x, y, xerr, yerr, theta_0=np.array((0.0, 0.0, 0.0)), step_cov=np.diag((1e-3, 1e-4, 1e-4)), n_steps=20000, burn_in=2000, method=0, n_chains=1, acc_frac=False, pow_law=False):
"""Function to get the mean and standard deviation of the parameters from the mcmc chain
Args:
x (array): x data
y (array): y data
xerr (array): error in x
yerr (array): error in y
theta_0 (float, array): initial values of the parameters (used as mean for the random multivariate draw). Defaults to np.array((0.0, 0.0, 0.0)).
step_cov (float, array): diagonal matrix used to control the step size for mcmc. Defaults to np.diag((1e-3, 1e-4, 1e-4)).
n_steps (int): Number of mcmc samples. Defaults to 20000.
burn_in (int): Number of burn in samples. Defaults to 2000.
Returns:
float: (slope, slope std, intercept, intercept std, intrinsic scatter, intrinsic scatter std)
"""
chain0,_ = MCMC(x, y, xerr, yerr, theta_0, step_cov, n_steps, method, n_chains, acc_frac)
burn_in = burn_in//n_chains
chain_0 = np.zeros((chain0[burn_in:,0,].shape[0]*n_chains, chain0[burn_in:,0,].shape[1]))
for i in range(n_chains):
chain_0[(len(chain0)-burn_in)*i:(len(chain0)-burn_in)*(i+1),:] = chain0[burn_in:,i,:]
if method == 0 or 2:
slope, slope_err = chain_0[:,0].mean(), chain_0[:,0].std()
if pow_law == True:
Norm = 10**(chain_0[:,1])
intercept, intercept_err = Norm.mean(), Norm.std()
else:
intercept, intercept_err = chain_0[:,1].mean(), chain_0[:,1].std()
int_sigma, int_sigma_err = chain_0[:,2].mean(), chain_0[:,2].std()
if method == 1:
slope, slope_err = (1/chain_0[:,0]).mean(), (1/chain_0[:,0]).std()
if pow_law == True:
Norm = 10**(-chain_0[:,1]/chain_0[:,0])
intercept, intercept_err = Norm.mean(), Norm.std()
else:
intercept, intercept_err = -(chain_0[:,1].mean()/chain_0[:,0].mean()), (-chain_0[:,1]/chain_0[:,0].mean()).std()
int_sigma, int_sigma_err = chain_0[:,2].mean(), chain_0[:,2].std()
return slope, slope_err, intercept, intercept_err, int_sigma, int_sigma_err
@njit(nogil=True)
def get_raw_param(x, y, xerr, yerr, theta_0=np.array((0.0, 0.0, 0.0)), step_cov=np.diag((1e-3, 1e-4, 1e-4)), n_steps=20000, burn_in=2000, method=0, n_chains=1, acc_frac=False, pow_law=False):
"""Function to get the raw values of the parameters from the mcmc chain
Args:
x (array): x data
y (array): y data
xerr (array): error in x
yerr (array): error in y
theta_0 (float, array): initial values of the parameters (used as mean for the random multivariate draw). Defaults to np.array((0.0, 0.0, 0.0)).
step_cov (float, array): diagonal matrix used to control the step size for mcmc. Defaults to np.diag((1e-3, 1e-4, 1e-4)).
n_steps (int): Number of mcmc samples. Defaults to 20000.
burn_in (int): Number of burn in samples. Defaults to 2000.
Returns:
float array: slope, intercept, intrinsic scatter values with size (n_steps-burn_in)
"""
chain0,_ = MCMC(x, y, xerr, yerr, theta_0, step_cov, n_steps, method, n_chains, acc_frac)
burn_in = burn_in//n_chains
chain_0 = np.zeros((chain0[burn_in:,0,].shape[0]*n_chains, chain0[burn_in:,0,].shape[1]))
for i in range(n_chains):
chain_0[(len(chain0)-burn_in)*i:(len(chain0)-burn_in)*(i+1),:] = chain0[burn_in:,i,:]
if method == 0 or 2:
slope = chain_0[:,0]
if pow_law == True:
intercept = 10**(chain_0[:,1])
else:
intercept = chain_0[:,1]
int_sigma = chain_0[:,2]
if method == 1:
slope = (1/chain_0[:,0])
if pow_law == True:
intercept = 10**(-chain_0[:,1]/chain_0[:,0])
else:
intercept = -(chain_0[:,1]/chain_0[:,0])
int_sigma = chain_0[:,2]
return slope, intercept, int_sigma
@njit(nogil=True)
def get_a_param(x, y, xerr, yerr, theta_0=np.array((0.0, 0.0, 0.0)), step_cov=np.diag((1e-3, 1e-4, 1e-4)), n_steps=20000, burn_in=2000, method=0, n_chains=1, acc_frac=False, pow_law=False):
"""Function to get the mean and standard deviation of the parameters from the mcmc chain
Args:
x (array): x data
y (array): y data
xerr (array): error in x
yerr (array): error in y
theta_0 (float, array): initial values of the parameters (used as mean for the random multivariate draw). Defaults to np.array((0.0, 0.0, 0.0)).
step_cov (float, array): diagonal matrix used to control the step size for mcmc. Defaults to np.diag((1e-3, 1e-4, 1e-4)).
n_steps (int): Number of mcmc samples. Defaults to 20000.
burn_in (int): Number of burn in samples. Defaults to 2000.
Returns:
float: (slope, slope std, intercept, intercept std, intrinsic scatter, intrinsic scatter std)
"""
chain0,_ = MCMC(x, y, xerr, yerr, theta_0, step_cov, n_steps, method, n_chains, acc_frac)
burn_in = burn_in//n_chains
chain_0 = np.zeros((chain0[burn_in:,0,].shape[0]*n_chains, chain0[burn_in:,0,].shape[1]))
for i in range(n_chains):
chain_0[(len(chain0)-burn_in)*i:(len(chain0)-burn_in)*(i+1),:] = chain0[burn_in:,i,:]
if method == 0 or 2:
slope = np.median(chain_0[:,0])
slope_u, slope_l = np.percentile(chain_0[:,0], [84, 16])
if pow_law == True:
Norm = 10**(chain_0[:,1])
A = np.median(Norm)
A_u, A_l = np.percentile(Norm, [84, 16])
else:
A = np.median(chain_0[:,1])
A_u, A_l = np.percentile(chain_0[:,1], [84, 16])
int_sigma = np.median(chain_0[:,2])
int_sigma_u, int_sigma_l = np.percentile(chain_0[:,2], [84, 16])
if method == 1:
slope = np.median(1/chain_0[:,0])
slope_u, slope_l = np.percentile(1/chain_0[:,0], [84, 16])
if pow_law == True:
Norm = 10**(-chain_0[:,1]/chain_0[:,0])
A = np.median(Norm)
A_u, A_l = np.percentile(Norm, [84, 16])
else:
A = np.median(-chain_0[:,1]/chain_0[:,0])
A_u, A_l = np.percentile(-chain_0[:,1]/chain_0[:,0], [84, 16])
int_sigma = np.median(chain_0[:,2])
int_sigma_u, int_sigma_l = np.percentile(chain_0[:,2], [84, 16])
return slope, slope_u, slope_l, A, A_u, A_l, int_sigma, int_sigma_u, int_sigma_l
def plot_trace(x, y, xerr, yerr, theta_0=np.array((0.0, 0.0, 0.0)), step_cov=np.diag((1e-3, 1e-4, 1e-4)), n_steps=20000, burn_in=2000, method=0, n_chains=1, acc_frac=False, pow_law=False, m_chains=False):
"""Function to get the raw values of the parameters from the mcmc chain
Args:
x (array): x data
y (array): y data
xerr (array): error in x
yerr (array): error in y
theta_0 (float, array): initial values of the parameters (used as mean for the random multivariate draw). Defaults to np.array((0.0, 0.0, 0.0)).
step_cov (float, array): diagonal matrix used to control the step size for mcmc. Defaults to np.diag((1e-3, 1e-4, 1e-4)).
n_steps (int): Number of mcmc samples. Defaults to 20000.
burn_in (int): Number of burn in samples. Defaults to 2000.
Returns:
float array: slope, intercept, intrinsic scatter values with size (n_steps-burn_in)
"""
slope, intercept, int_sigma = get_raw_param(x, y, xerr, yerr, theta_0, step_cov, n_steps, burn_in, method, n_chains, acc_frac, pow_law)
fig, ax = plt.subplots(3, 2, figsize=(8, 6), sharey='row',gridspec_kw={'width_ratios': [3, 1]})
chain_0,acc = MCMC(x, y, xerr, yerr, theta_0, step_cov, n_steps, method, n_chains, acc_frac)
burn_in = burn_in//n_chains
if m_chains == True:
chain_0 = chain_0[burn_in:,:,:]
slope, intercept, int_sigma = np.zeros((chain_0.shape[0],n_chains)),np.zeros((chain_0.shape[0],n_chains)),np.zeros((chain_0.shape[0],n_chains))
if m_chains==False:
chain0 = np.zeros((chain_0[burn_in:,0,:].shape[0]*n_chains,chain_0[:,0,:].shape[1]))
for i in range(n_chains):
chain0[(chain_0.shape[0] - burn_in)*i:(chain_0.shape[0] - burn_in)*(i+1)] = chain_0[burn_in:,i,:]
chain_0 = chain0.reshape(chain_0[burn_in:,0,:].shape[0]*n_chains, 1, chain_0.shape[2])
slope, intercept, int_sigma = np.zeros((chain_0.shape[0],1)), np.zeros((chain_0.shape[0],1)), np.zeros((chain_0.shape[0],1))
if method == 0 or 2:
for i in range(chain_0.shape[1]):
slope[:,i] = chain_0[:,i,0]
if pow_law == True:
intercept[:,i] = 10**(chain_0[:,i,1])
else:
intercept[:,i] = chain_0[:,i,1]
int_sigma[:,i] = chain_0[:,i,2]
if method == 1:
for i in range(chain_0.shape[1]):
slope[:,i] = 1/(chain_0[:,i,0])
if pow_law == True:
intercept[:,i] = 10**(-chain_0[:,i,1]/chain_0[:,i,0])
else:
intercept[:,i] = -(chain_0[:,i,1]/chain_0[:,i,0])
int_sigma[:,i] = chain_0[:,i,2]
for i in range(chain_0.shape[1]):
ax[0,0].plot(slope[:,i])
ax[1,0].plot(intercept[:,i])
ax[2,0].plot(int_sigma[:,i])
seaborn.kdeplot(y=slope[burn_in:,i], ax=ax[0,1], label=f'Chain {i+1} acc frac = {round(acc[i],3)}')
seaborn.kdeplot(y=intercept[burn_in:,i], ax=ax[1,1])
seaborn.kdeplot(y=int_sigma[burn_in:,i], ax=ax[2,1])
ax[2,0].set_xlabel('number of steps')
ax[0,0].set_ylabel('Slope')
ax[2,0].set_ylabel('Intrinsic scatter')
ax[0,1].legend(bbox_to_anchor=(2.6, 1.0))
ax[0,1].yaxis.tick_right()
ax[1,1].yaxis.tick_right()
ax[2,1].yaxis.tick_right()
ax[0,1].set_xticklabels([])
ax[1,1].set_xticklabels([])
ax[2,1].set_xticklabels([])
# ax[0,1].yaxis.set_label_position("right")
# ax[2,1].yaxis.set_label_position("right")
# ax[1,1].yaxis.set_label_position("right")
if pow_law == True:
ax[1,0].set_ylabel('Normalization')
else:
ax[1,0].set_ylabel('Intercept')
fig.subplots_adjust(wspace=.05, hspace=.0)
return
|
AdiPandyaREPO_NAMEFast_Bayesian_RegressionPATH_START.@Fast_Bayesian_Regression_extracted@Fast_Bayesian_Regression-main@FBR.py@.PATH_END.py
|
{
"filename": "example_7dq2.ipynb",
"repo_name": "vijayvarma392/surfinBH",
"repo_path": "surfinBH_extracted/surfinBH-master/examples/example_7dq2.ipynb",
"type": "Jupyter Notebook"
}
|
# Example usage of surfinBH7dq2 fit.
```python
import surfinBH
```
```python
fit_name = 'surfinBH7dq2'
```
## Load the fit, this only needs to be done once at the start of a script
```python
fit = surfinBH.LoadFits(fit_name)
```
Loaded surfinBH7dq2 fit.
## Read the documentation
```python
help(fit)
```
Help on Fit7dq2 in module surfinBH._fit_evaluators.fit_7dq2 object:
class Fit7dq2(surfinBH.surfinBH.SurFinBH)
| A class for the surfinBH7dq2 model presented in Varma et al.,
| arxiv:1809.09125. This model predicts the final mass mf, final spin vector
| chif and final kick velocity vector vf, for the remnants of precessing
| binary black hole systems. The fits are done using Gaussian Process
| Regression (GPR) and also provide an error estimate along with the fit
| value.
|
| This model has been trained in the parameter space:
| q <= 2, |chiA| <= 0.8, |chiB| <= 0.8
|
| However, it extrapolates reasonably to:
| q <= 4, |chiA| <= 1, |chiB| <= 1
|
| =========================================================================
| Usage:
|
| import surfinBH
|
| # Load the fit
| fit = surfinBH.LoadFits('surfinBH7dq2')
|
| We provide the following call methods:
| # remnant mass and 1-sigma error estimate
| mf, mf_err = fit.mf(q, chiA, chiB, **kwargs)
|
| # remnant spin and 1-sigma error estimate
| chif, chif_err = fit.chif(q, chiA, chiB, **kwargs)
|
| # remnant recoil kick and 1-sigma error estimate
| vf, vf_err = fit.vf(q, chiA, chiB, **kwargs)
|
| # All of these together
| mf, chif, vf, mf_err, chif_err, vf_err
| = fit.all(q, chiA, chiB, **kwargs)
|
| The arguments for each of these call methods are as follows:
| Arguments:
| q: Mass ratio (q>=1)
|
| chiA: Dimensionless spin of the larger BH (array of size 3)
|
| chiB: Dimensionless spin of the smaller BH (array of size 3)
|
| By default, the spins are assumed to be the component spins at
| t=-100 M from the peak of the waveform, and in the coorbital frame,
| defined as:
| The z-axis is along the orbital angular momentum at t=-100M.
| The x-axis is along the line of separation from the smaller BH to
| the larger BH at this time.
| The y-axis completes the triad.
| We obtain this frame from the waveform as defined in
| arxiv:1705.07089.
| The returned spin and kick vectors are also in the same frame.
|
| If 'omega0' is given, instead the spins are assumed to be the
| component spins when the PN orbital frequency = omega0. The
| spins are assumed to be in the inertial frame, defined as:
| The z-axis is along the Newtonian orbital angular momentum when the
| PN orbital frequency = omega0.
| The x-axis is along the line of separation from the smaller BH to
| the larger BH at this frequency.
| The y-axis completes the triad.
| We obtain this frame from PN.
| Given the spins at omega0, we evolve the spins using PN until
| the orbital frequency = omega_switch, then we further evolve the
| spins using the NRSur7dq2 model (arxiv:1705.07089) until t=-100M
| from the peak of the waveform. Then we evaluate the fits using the
| spins at t=-100 M. Finally, we transform the remnant spin and kick
| vectors back to the inertial frame defined above.
|
| Optional arguments:
| allow_extrap:
| If False, raises a warning when q > 2.1 or |chiA|,|chiB| > 0.81,
| and raises an error when q > 4.1 or |chiA|,|chiB| > 1.
| If True, allows extrapolation to any q and |chiA|,|chiB| <= 1.
| Use at your own risk.
| Default: False.
|
| omega0:
| Initial dimensionless orbital frequency in units of 1/M, where M is
| the total mass. If this is given, the spins in x are assumed to be
| the component spins at this orbital frequency, and in the inertial
| frame as defined above. The returned remnant spin and kick vectors
| are also in the same frame.
| Default: None.
|
| PN_approximant:
| Approximant used to do the PN spin evolution. Choose from
| 'SpinTaylorT4', 'SpinTaylorT1' or 'SpinTaylorT2'.
| Default: 'SpinTaylorT4'.
|
| PN_dt:
| Dimensionless time step size in units of M (total mass), used for
| the PN evolution. You may need to increase this if omega0 is very
| low.
| Default: 0.1
|
| PN_spin_order:
| Twice the PN order of spin effects. E.g., use 7 for 3.5PN.
| Default: 7
|
| PN_phase_order:
| Twice the PN order in phase. E.g., use 7 for 3.5PN.
| Default: 7
|
| omega_switch:
| Dimensionless orbital frequency at which to switch from PN to
| NRSur7dq2 model. You may need to increase this if the NRSur7dq2
| model raises an exception like:
| "Got omega_ref = 0.0180 < 0.0184 = omega_0, too small!"
| Default: 0.018
|
| Method resolution order:
| Fit7dq2
| surfinBH.surfinBH.SurFinBH
| __builtin__.object
|
| Methods defined here:
|
| __init__(self, name, load_nrsur=False)
| #-------------------------------------------------------------------------
|
| ----------------------------------------------------------------------
| Methods inherited from surfinBH.surfinBH.SurFinBH:
|
| all(self, *args, **kwargs)
| Evaluates fit and 1-sigma error estimate for remnant mass, spin
| and kick velocity.
| Returns:
| mf, chif, vf, mf_err_est, chif_err_est, vf_err_est
|
| chif, vf, chif_err_est and vf_err_est are arrays of size 3.
|
| chif(self, *args, **kwargs)
| Evaluates fit and 1-sigma error estimate for remnant spin.
| Returns:
| chif, chif_err_est
|
| chif and chif_err_est are arrays of size 3.
|
| mf(self, *args, **kwargs)
| Evaluates fit and 1-sigma error estimate for remnant mass.
| Returns:
| mf, mf_err_est
|
| vf(self, *args, **kwargs)
| Evaluates fit and 1-sigma error estimate for remnant kick velocity.
| Returns:
| vf, vf_err_est
|
| vf and vf_err_est are arrays of size 3.
|
| ----------------------------------------------------------------------
| Data descriptors inherited from surfinBH.surfinBH.SurFinBH:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
## Evaluate the fits (using spins at t=-100 M)
```python
# Note: The component spins, remnant spin and kick are all
# assumed to be in the coorbital frame at t=-100M.
# Mass ratio and component spins
q = 1.2
chiA = [0.5, 0.05, 0.3]
chiB = [-0.5, -0.05, 0.1]
# remnant mass and 1-sigma error estimate
mf, mf_err = fit.mf(q, chiA, chiB)
# remnant spin and 1-sigma error estimate
chif, chif_err = fit.chif(q, chiA, chiB)
# remnant recoil kick and 1-sigma error estimate
vf, vf_err = fit.vf(q, chiA, chiB)
# All of these together
mf, chif, vf, mf_err, chif_err, vf_err = fit.all(q, chiA, chiB)
```
# Evaluate the fits using spins at earlier frequencies
```python
# Note: The component spins, remnant spin and kick are all
# assumed to be in the inertial frame defined at orbital frequency=omega0.
# Initial dimensionless orbital frequency (in units of rad/M)
omega0 = 7e-3
# remnant mass and 1-sigma error estimate
mf, mf_err = fit.mf(q, chiA, chiB, omega0=omega0)
# remnant spin and 1-sigma error estimate
chif, chif_err = fit.chif(q, chiA, chiB, omega0=omega0)
# remnant recoil kick and 1-sigma error estimate
vf, vf_err = fit.vf(q, chiA, chiB, omega0=omega0)
# All of these together
mf, chif, vf, mf_err, chif_err, vf_err = fit.all(q, chiA, chiB, omega0=omega0)
```
Loaded NRSur7dq2 waveform model
```python
```
|
vijayvarma392REPO_NAMEsurfinBHPATH_START.@surfinBH_extracted@surfinBH-master@examples@example_7dq2.ipynb@.PATH_END.py
|
{
"filename": "utils.py",
"repo_name": "ArgonneCPAC/diffstar",
"repo_path": "diffstar_extracted/diffstar-main/diffstar/fitting_helpers/utils.py",
"type": "Python"
}
|
"""
"""
import numpy as np
from jax import jit as jjit
from jax import value_and_grad
from jax.example_libraries import optimizers as jax_opt
from scipy.optimize import minimize
def minimizer(loss_func, loss_func_deriv, p_init, loss_data, nstep, *args, **kwargs):
"""Minimizer mixing scipy's L-BFGS-B minimizer with JAX's Adam as a backup plan.
Parameters
----------
loss_func : callable
Differentiable function to minimize.
Must accept inputs (params, data) and return a scalar,
and be differentiable using jax.grad.
loss_func_deriv : callable
Returns the gradient wrt the parameters of loss_func.
Must accept inputs (params, data) and return a scalar.
p_init : ndarray of shape (n_params, )
Initial guess at the parameters. The fitter uses this guess to draw
random initial guesses with small perturbations around these values.
loss_data : sequence
Sequence of floats and arrays storing whatever data is needed
to compute loss_func(p_init, loss_data)
n_step : int
Number of steps to walk down the gradient. Only used by Adam.
Returns
-------
p_best : ndarray of shape (n_params, )
Stores the best-fit value of the parameters after n_step steps
loss_best : float
Final value of the loss
success : int
-1 if NaN or inf is encountered by the fitter, causing termination before n_step
0 for a fit that fails with L-BFGS-B but terminates without problems using Adam
1 for a fit that terminates with no such problems using L-BFGS-B
"""
res = minimize(
loss_func, x0=p_init, method="L-BFGS-B", jac=loss_func_deriv, args=(loss_data,)
)
p_best = res.x
loss_best = float(res.fun)
success = 1
if not res.success:
_res = jax_adam_wrapper_v2(
loss_func, p_init, loss_data, nstep, n_warmup=2, *args, **kwargs
)
p_best, loss_best, loss_arr, params_arr, fit_terminates = _res
success = 0 if fit_terminates else -1
return p_best, loss_best, success
def return_random_pinit(p_init, loss_data, loss_func_deriv):
"""Slightly perturb the initial guess with a Gaussian perturbation.
Makes sure that the gradient does not contain NaNs.
"""
p_init_2 = np.random.normal(p_init[0], p_init[1])
isnan = np.isnan(loss_func_deriv(p_init_2, loss_data)).any()
if isnan:
i = 0
while (i < 1000) & (isnan):
p_init_2 = np.random.normal(p_init[0], p_init[1])
isnan = np.isnan(loss_func_deriv(p_init_2, loss_data)).any()
i += 1
return p_init_2, isnan
else:
return p_init_2, isnan
def minimizer_wrapper(
loss_func,
loss_func_deriv,
p_init,
loss_data,
loss_tol=0.1,
max_iter=10,
):
"""Convenience function wrapping scipy's L-BFGS-B optimizer
Starting from p_init, L-BFGS-B goes down the gradient
to calculate the returned value p_best.
Parameters
----------
loss_func : callable
Differentiable function to minimize.
Must accept inputs (params, data) and return a scalar,
and be differentiable using jax.grad.
loss_func_deriv : callable
Returns the gradient wrt the parameters of loss_func.
Must accept inputs (params, data) and return a scalar.
p_init : ndarray of shape (n_params, )
Initial guess at the parameters. The fitter uses this guess to draw
random initial guesses with small perturbations around these values.
loss_data : sequence
Sequence of floats and arrays storing whatever data is needed
to compute loss_func(p_init, loss_data)
loss_tol : float
When loss_best < loss_tol the fitter stops. Otherwise it starts from
a slightly different intial guess.
max_iter : int
While loss_best > loss_tol the fitter will restart from a slightly
different initial guess, but it will stop after max_iter times.
Returns
-------
p_best : ndarray of shape (n_params, )
Stores the best-fit value of the parameters after n_step steps
loss_best : float
Final value of the loss
success : int
-1 if NaN or inf is encountered by the fitter, causing termination before n_step
1 for a fit that terminates with no such problems
"""
iter_id = 0
p_best_list = []
loss_best_list = []
success_list = []
loss_best_current = np.inf
while (iter_id < max_iter) & (loss_best_current > loss_tol):
p_init_run, isnan = return_random_pinit(p_init, loss_data, loss_func_deriv)
if isnan:
p_best_list.append(p_init[0])
loss_best_list.append(999.99)
success_list.append(-1)
loss_best_current = np.min(loss_best_list)
iter_id += 1
else:
res = minimize(
loss_func,
x0=p_init_run,
method="L-BFGS-B",
jac=loss_func_deriv,
args=(loss_data,),
)
p_best = res.x
loss_best = float(res.fun)
success = 1
p_best_list.append(p_best)
loss_best_list.append(loss_best)
success_list.append(success)
if np.isnan(loss_best_list).all():
loss_best_current = np.inf
else:
loss_best_current = np.nanmin(loss_best_list)
iter_id += 1
if np.isnan(loss_best_list).all():
loss_best = 999.99
success = -1
p_best = p_init[0]
else:
argmin = np.nanargmin(loss_best_list)
p_best = p_best_list[argmin]
loss_best = loss_best_list[argmin]
success = success_list[argmin]
return p_best, loss_best, success
def jax_adam_wrapper_v2(
loss_func,
params_init,
loss_data,
n_step,
n_warmup=0,
step_size=0.01,
warmup_n_step=50,
warmup_step_size=None,
):
"""Convenience function wrapping JAX's Adam optimizer
Starting from params_init, we take n_step steps down the gradient
to calculate the returned value params_step_n.
Parameters
----------
loss_func : callable
Differentiable function to minimize.
Must accept inputs (params, data) and return a scalar,
and be differentiable using jax.grad.
params_init : ndarray of shape (n_params, )
Initial guess at the parameters
loss_data : sequence
Sequence of floats and arrays storing whatever data is needed
to compute loss_func(params_init, loss_data)
n_step : int
Number of steps to walk down the gradient
n_warmup : int, optional
Number of warmup iterations. At the end of the warmup, the best-fit parameters
are used as input parameters to the final burn. Default is zero.
warmup_n_step : int, optional
Number of Adam steps to take during warmup. Default is 50.
warmup_step_size : float, optional
Step size to use during warmup phase. Default is 5*step_size.
step_size : float, optional
Step size parameter in the Adam algorithm. Default is 0.01.
Returns
-------
params_step_n : ndarray of shape (n_params, )
Stores the best-fit value of the parameters after n_step steps
loss : float
Final value of the loss
loss_arr : ndarray of shape (n_step, )
Stores the value of the loss at each step
params_arr : ndarray of shape (n_step, n_params)
Stores the value of the model params at each step
fit_terminates : int
0 if NaN or inf is encountered by the fitter, causing termination before n_step
1 for a fit that terminates with no such problems
"""
if warmup_step_size is None:
warmup_step_size = 5 * step_size
p_init = np.copy(params_init)
for i in range(n_warmup):
p_init = _jax_adam_wrapper(
loss_func, p_init, loss_data, warmup_n_step, step_size=warmup_step_size
)[0]
if np.all(np.isfinite(p_init)):
p0 = p_init
else:
p0 = params_init
_res = _jax_adam_wrapper(loss_func, p0, loss_data, n_step, step_size=step_size)
if len(_res[2]) < n_step:
fit_terminates = 0
else:
fit_terminates = 1
return (*_res, fit_terminates)
def _jax_adam_wrapper(loss_func, params_init, loss_data, n_step, step_size=0.01):
"""Convenience function wrapping JAX's Adam optimizer
Starting from params_init, we take n_step steps down the gradient
to calculate the returned value params_step_n.
Parameters
----------
loss_func : callable
Differentiable function to minimize.
Must accept inputs (params, data) and return a scalar,
and be differentiable using jax.grad.
params_init : ndarray of shape (n_params, )
Initial guess at the parameters
loss_data : sequence
Sequence of floats and arrays storing whatever data is needed
to compute loss_func(params_init, loss_data)
n_step : int
Number of steps to walk down the gradient
step_size : float, optional
Step size parameter in the Adam algorithm. Default is 0.01
Returns
-------
params_step_n : ndarray of shape (n_params, )
Stores the best-fit value of the parameters after n_step steps
loss : float
Final value of the loss
loss_arr : ndarray of shape (n_step, )
Stores the value of the loss at each step
params_arr : ndarray of shape (n_step, n_params)
Stores the value of the model params at each step
"""
loss_arr = np.zeros(n_step).astype("f4") - 1.0
opt_init, opt_update, get_params = jax_opt.adam(step_size)
opt_state = opt_init(params_init)
n_params = len(params_init)
params_arr = np.zeros((n_step, n_params)).astype("f4")
loss_vg = jjit(value_and_grad(loss_func, argnums=0))
for istep in range(n_step):
p = np.array(get_params(opt_state))
loss, grads = loss_vg(p, loss_data)
no_nan_params = np.all(np.isfinite(p))
no_nan_loss = np.isfinite(loss)
no_nan_grads = np.all(np.isfinite(grads))
if ~no_nan_params | ~no_nan_loss | ~no_nan_grads:
if istep > 0:
indx_best = np.nanargmin(loss_arr[:istep])
best_fit_params = params_arr[indx_best]
best_fit_loss = loss_arr[indx_best]
else:
best_fit_params = np.copy(p)
best_fit_loss = 999.99
return (
best_fit_params,
best_fit_loss,
loss_arr[:istep],
params_arr[:istep, :],
)
else:
params_arr[istep, :] = p
loss_arr[istep] = loss
opt_state = opt_update(istep, grads, opt_state)
indx_best = np.nanargmin(loss_arr)
best_fit_params = params_arr[indx_best]
loss = loss_arr[indx_best]
return best_fit_params, loss, loss_arr, params_arr
|
ArgonneCPACREPO_NAMEdiffstarPATH_START.@diffstar_extracted@diffstar-main@diffstar@fitting_helpers@utils.py@.PATH_END.py
|
{
"filename": "theil_sen.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/scikit-learn/py2/sklearn/linear_model/theil_sen.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
"""
A Theil-Sen Estimator for Multiple Linear Regression Model
"""
# Author: Florian Wilhelm <florian.wilhelm@gmail.com>
#
# License: BSD 3 clause
from __future__ import division, print_function, absolute_import
import warnings
from itertools import combinations
import numpy as np
from scipy import linalg
from scipy.special import binom
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel
from ..base import RegressorMixin
from ..utils import check_random_state
from ..utils import check_X_y, _get_n_jobs
from ..utils.random import choice
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange as range
from ..exceptions import ConvergenceWarning
_EPSILON = np.finfo(np.double).eps
def _modified_weiszfeld_step(X, x_old):
"""Modified Weiszfeld step.
This function defines one iteration step in order to approximate the
spatial median (L1 median). It is a form of an iteratively re-weighted
least squares method.
Parameters
----------
X : array, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
x_old : array, shape = [n_features]
Current start vector.
Returns
-------
x_new : array, shape = [n_features]
New iteration step.
References
----------
- On Computation of Spatial Median for Robust Data Mining, 2005
T. Kärkkäinen and S. Äyrämö
http://users.jyu.fi/~samiayr/pdf/ayramo_eurogen05.pdf
"""
diff = X - x_old
diff_norm = np.sqrt(np.sum(diff ** 2, axis=1))
mask = diff_norm >= _EPSILON
# x_old equals one of our samples
is_x_old_in_X = int(mask.sum() < X.shape[0])
diff = diff[mask]
diff_norm = diff_norm[mask][:, np.newaxis]
quotient_norm = linalg.norm(np.sum(diff / diff_norm, axis=0))
if quotient_norm > _EPSILON: # to avoid division by zero
new_direction = (np.sum(X[mask, :] / diff_norm, axis=0)
/ np.sum(1 / diff_norm, axis=0))
else:
new_direction = 1.
quotient_norm = 1.
return (max(0., 1. - is_x_old_in_X / quotient_norm) * new_direction
+ min(1., is_x_old_in_X / quotient_norm) * x_old)
def _spatial_median(X, max_iter=300, tol=1.e-3):
"""Spatial median (L1 median).
The spatial median is member of a class of so-called M-estimators which
are defined by an optimization problem. Given a number of p points in an
n-dimensional space, the point x minimizing the sum of all distances to the
p other points is called spatial median.
Parameters
----------
X : array, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
max_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if spatial_median has converged. Default is 1.e-3.
Returns
-------
spatial_median : array, shape = [n_features]
Spatial median.
n_iter: int
Number of iterations needed.
References
----------
- On Computation of Spatial Median for Robust Data Mining, 2005
T. Kärkkäinen and S. Äyrämö
http://users.jyu.fi/~samiayr/pdf/ayramo_eurogen05.pdf
"""
if X.shape[1] == 1:
return 1, np.median(X.ravel())
tol **= 2 # We are computing the tol on the squared norm
spatial_median_old = np.mean(X, axis=0)
for n_iter in range(max_iter):
spatial_median = _modified_weiszfeld_step(X, spatial_median_old)
if np.sum((spatial_median_old - spatial_median) ** 2) < tol:
break
else:
spatial_median_old = spatial_median
else:
warnings.warn("Maximum number of iterations {max_iter} reached in "
"spatial median for TheilSen regressor."
"".format(max_iter=max_iter), ConvergenceWarning)
return n_iter, spatial_median
def _breakdown_point(n_samples, n_subsamples):
"""Approximation of the breakdown point.
Parameters
----------
n_samples : int
Number of samples.
n_subsamples : int
Number of subsamples to consider.
Returns
-------
breakdown_point : float
Approximation of breakdown point.
"""
return 1 - (0.5 ** (1 / n_subsamples) * (n_samples - n_subsamples + 1) +
n_subsamples - 1) / n_samples
def _lstsq(X, y, indices, fit_intercept):
"""Least Squares Estimator for TheilSenRegressor class.
This function calculates the least squares method on a subset of rows of X
and y defined by the indices array. Optionally, an intercept column is
added if intercept is set to true.
Parameters
----------
X : array, shape = [n_samples, n_features]
Design matrix, where n_samples is the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target vector, where n_samples is the number of samples.
indices : array, shape = [n_subpopulation, n_subsamples]
Indices of all subsamples with respect to the chosen subpopulation.
fit_intercept : bool
Fit intercept or not.
Returns
-------
weights : array, shape = [n_subpopulation, n_features + intercept]
Solution matrix of n_subpopulation solved least square problems.
"""
fit_intercept = int(fit_intercept)
n_features = X.shape[1] + fit_intercept
n_subsamples = indices.shape[1]
weights = np.empty((indices.shape[0], n_features))
X_subpopulation = np.ones((n_subsamples, n_features))
# gelss need to pad y_subpopulation to be of the max dim of X_subpopulation
y_subpopulation = np.zeros((max(n_subsamples, n_features)))
lstsq, = get_lapack_funcs(('gelss',), (X_subpopulation, y_subpopulation))
for index, subset in enumerate(indices):
X_subpopulation[:, fit_intercept:] = X[subset, :]
y_subpopulation[:n_subsamples] = y[subset]
weights[index] = lstsq(X_subpopulation,
y_subpopulation)[1][:n_features]
return weights
class TheilSenRegressor(LinearModel, RegressorMixin):
"""Theil-Sen Estimator: robust multivariate regression model.
The algorithm calculates least square solutions on subsets with size
n_subsamples of the samples in X. Any value of n_subsamples between the
number of features and samples leads to an estimator with a compromise
between robustness and efficiency. Since the number of least square
solutions is "n_samples choose n_subsamples", it can be extremely large
and can therefore be limited with max_subpopulation. If this limit is
reached, the subsets are chosen randomly. In a final step, the spatial
median (or L1 median) is calculated of all least square solutions.
Read more in the :ref:`User Guide <theil_sen_regression>`.
Parameters
----------
fit_intercept : boolean, optional, default True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
max_subpopulation : int, optional, default 1e4
Instead of computing with a set of cardinality 'n choose k', where n is
the number of samples and k is the number of subsamples (at least
number of features), consider only a stochastic subpopulation of a
given maximal size if 'n choose k' is larger than max_subpopulation.
For other than small problem sizes this parameter will determine
memory usage and runtime if n_subsamples is not changed.
n_subsamples : int, optional, default None
Number of samples to calculate the parameters. This is at least the
number of features (plus 1 if fit_intercept=True) and the number of
samples as a maximum. A lower number leads to a higher breakdown
point and a low efficiency while a high number leads to a low
breakdown point and a high efficiency. If None, take the
minimum number of subsamples leading to maximal robustness.
If n_subsamples is set to n_samples, Theil-Sen is identical to least
squares.
max_iter : int, optional, default 300
Maximum number of iterations for the calculation of spatial median.
tol : float, optional, default 1.e-3
Tolerance when calculating spatial median.
random_state : RandomState or an int seed, optional, default None
A random number generator instance to define the state of the
random permutations generator.
n_jobs : integer, optional, default 1
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (median of distribution).
intercept_ : float
Estimated intercept of regression model.
breakdown_ : float
Approximated breakdown point.
n_iter_ : int
Number of iterations needed for the spatial median.
n_subpopulation_ : int
Number of combinations taken into account from 'n choose k', where n is
the number of samples and k is the number of subsamples.
References
----------
- Theil-Sen Estimators in a Multiple Linear Regression Model, 2009
Xin Dang, Hanxiang Peng, Xueqin Wang and Heping Zhang
http://home.olemiss.edu/~xdang/papers/MTSE.pdf
"""
def __init__(self, fit_intercept=True, copy_X=True,
max_subpopulation=1e4, n_subsamples=None, max_iter=300,
tol=1.e-3, random_state=None, n_jobs=1, verbose=False):
self.fit_intercept = fit_intercept
self.copy_X = copy_X
self.max_subpopulation = int(max_subpopulation)
self.n_subsamples = n_subsamples
self.max_iter = max_iter
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.verbose = verbose
def _check_subparams(self, n_samples, n_features):
n_subsamples = self.n_subsamples
if self.fit_intercept:
n_dim = n_features + 1
else:
n_dim = n_features
if n_subsamples is not None:
if n_subsamples > n_samples:
raise ValueError("Invalid parameter since n_subsamples > "
"n_samples ({0} > {1}).".format(n_subsamples,
n_samples))
if n_samples >= n_features:
if n_dim > n_subsamples:
plus_1 = "+1" if self.fit_intercept else ""
raise ValueError("Invalid parameter since n_features{0} "
"> n_subsamples ({1} > {2})."
"".format(plus_1, n_dim, n_samples))
else: # if n_samples < n_features
if n_subsamples != n_samples:
raise ValueError("Invalid parameter since n_subsamples != "
"n_samples ({0} != {1}) while n_samples "
"< n_features.".format(n_subsamples,
n_samples))
else:
n_subsamples = min(n_dim, n_samples)
if self.max_subpopulation <= 0:
raise ValueError("Subpopulation must be strictly positive "
"({0} <= 0).".format(self.max_subpopulation))
all_combinations = max(1, np.rint(binom(n_samples, n_subsamples)))
n_subpopulation = int(min(self.max_subpopulation, all_combinations))
return n_subsamples, n_subpopulation
def fit(self, X, y):
"""Fit linear model.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
random_state = check_random_state(self.random_state)
X, y = check_X_y(X, y, y_numeric=True)
n_samples, n_features = X.shape
n_subsamples, self.n_subpopulation_ = self._check_subparams(n_samples,
n_features)
self.breakdown_ = _breakdown_point(n_samples, n_subsamples)
if self.verbose:
print("Breakdown point: {0}".format(self.breakdown_))
print("Number of samples: {0}".format(n_samples))
tol_outliers = int(self.breakdown_ * n_samples)
print("Tolerable outliers: {0}".format(tol_outliers))
print("Number of subpopulations: {0}".format(
self.n_subpopulation_))
# Determine indices of subpopulation
if np.rint(binom(n_samples, n_subsamples)) <= self.max_subpopulation:
indices = list(combinations(range(n_samples), n_subsamples))
else:
indices = [choice(n_samples,
size=n_subsamples,
replace=False,
random_state=random_state)
for _ in range(self.n_subpopulation_)]
n_jobs = _get_n_jobs(self.n_jobs)
index_list = np.array_split(indices, n_jobs)
weights = Parallel(n_jobs=n_jobs,
verbose=self.verbose)(
delayed(_lstsq)(X, y, index_list[job], self.fit_intercept)
for job in range(n_jobs))
weights = np.vstack(weights)
self.n_iter_, coefs = _spatial_median(weights,
max_iter=self.max_iter,
tol=self.tol)
if self.fit_intercept:
self.intercept_ = coefs[0]
self.coef_ = coefs[1:]
else:
self.intercept_ = 0.
self.coef_ = coefs
return self
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@scikit-learn@py2@sklearn@linear_model@theil_sen.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scatterpolar/stream/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._token import TokenValidator
from ._maxpoints import MaxpointsValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__, [], ["._token.TokenValidator", "._maxpoints.MaxpointsValidator"]
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scatterpolar@stream@__init__.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "tardis-sn/tardis",
"repo_path": "tardis_extracted/tardis-main/tardis/util/__init__.py",
"type": "Python"
}
|
# Utilities for TARDIS
|
tardis-snREPO_NAMEtardisPATH_START.@tardis_extracted@tardis-main@tardis@util@__init__.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "AndreasKvammen/ML_dust_detection",
"repo_path": "ML_dust_detection_extracted/ML_dust_detection-main/README.md",
"type": "Markdown"
}
|
## Machine learning detection of dust impact signals<br>observed by the Solar Orbiter
[](https://doi.org/10.5281/zenodo.7404457)
This repository contains the code and data to reproduce some of the main results from the paper "Machine learning detection of dust impact signals observed by the Solar Orbiter", freely accessible at https://angeo.copernicus.org/articles/41/69/2023/angeo-41-69-2023.html
<img src="https://angeo.copernicus.org/articles/41/69/2023/angeo-41-69-2023-f09.png" width="300" height="300">
### Article Abstract
This article presents results from automatic detection of dust impact signals observed by the Solar Orbiter - Radio and Plasma Waves instrument.
A sharp and characteristic electric field signal is observed by the Radio and Plasma Waves instrument when a dust particle impacts the spacecraft at high velocity. In this way, ~5-20 dust impacts are daily detected as the Solar Orbiter travels through the interstellar medium. The dust distribution in the inner solar system is largely uncharted and statistical studies of the detected dust impacts will enhance our understanding of the role of dust in the solar system.
It is however challenging to automatically detect and separate dust signals from the plural of other signal shapes for two main reasons. Firstly, since the spacecraft charging causes variable shapes of the impact signals and secondly because electromagnetic waves (such as solitary waves) may induce resembling electric field signals.
In this article, we propose a novel machine learning-based framework for detection of dust impacts. We consider two different supervised machine learning approaches: the support vector machine classifier and the convolutional neural network classifier. Furthermore, we compare the performance of the machine learning classifiers to the currently used on-board classification algorithm and analyze one and a half year of Radio and Plasma Waves instrument data.
Overall, we conclude that classification of dust impact signals is a suitable task for supervised machine learning techniques. In particular, the convolutional neural network achieves a 96% $\pm$ 1% overall classification accuracy and 94\% $\pm$ 2\% dust detection precision, a significant improvement to the currently used on-board classifier with 85\% overall classification accuracy and 75\% dust detection precision. In addition, both the support vector machine and the convolutional neural network detect more dust particles (on average) than the on-board classification algorithm, with 14\% $\pm$ 1\% and 16\% $\pm$ 7\% detection enhancement respectively.
The proposed convolutional neural network classifier (or similar tools) should therefore be considered for post-processing of the electric field signals observed by the Solar Orbiter.
## Installation
The scripts and functions in this repository can be used on you local machine by downloading a clone of this repository using: <br />
git clone https://github.com/AndreasKvammen/ML_dust_detection.git
This requires:
- GitHub (for cloning the repository)
- Python, Jupyter and Tensorflow working together on your local machine (for Convolutional Neural Network (CNN) Classification). For this work, Python 3.10.9, Jupyter Lab 3.5.3 and Tensorflow 2.9.0 were used.
- MatLab - the results were produced using MatLab2022b (for Support Vector Machine (SVM) Classification, CNN Classification of Triggered Snapshot WaveForm (TSWF) data and Dust impact rates), including the additional function subplot_tight that can be downloaded at: https://www.mathworks.com/matlabcentral/fileexchange/30884-controllable-tight-subplot
## Training and Testing Data
The folder "Data_train_and_test" contains the training and testing data in .csv format:
1. Test_data.csv - is the testing data with dimension (600 x 12288), 600 observations consisting of 12288 (3x4096) measurements - 4096 time steps observed at 3 antennas.
2. Test_labels.csv - is the binary testing labels with dimension (600x1) where the values (1 = dust) and (0 = no dust)
3. Train_data.csv - is the training data with dimension (2400x12288), 2400 observations consisting of 12288 (3x4096) measurements - 4096 time steps observed at 3 antennas.
4. Train_labels.csv - is the binary training labels with dimension (2400x1) where the values (1 = dust) and (0 = no dust)
## Support Vector Machine (SVM) Classification
The folder "SVM_train_and_test" contains the MatLab code to train and test the Support Vector Machine (SVM) classifier. The folder contain the following files:
1. extract_SVM_features.m - is the MatLab code to extract the 2D feature vector from the training and testing data.
2. SVM_dust_detection.m - is the MatLab script to run the training and testing of the SVM, based on the extracted feature vectors.
3. SVM_dust_detection.pdf - presents the MatLab code and resulting plots in pdf format
The SVM achieved a 94\%± 1\% average class-wise accuracy and a 90\%± 3\% precision, trained and tested over 10 runs with randomly selected training and testing data sets.
## Convolutional Neural Network (CNN) Classification
The folder "CNN_train_and_test" contains the JupyterLab (Tensorflow) code to train and test the Convolutional Neural Network (CNN) classifier, proposed for time series classification in Wang et al. (2017). The folder contains the following files:
1. CNN_dust_detection.ipynb - Jupyter Notebook that import the training and testing data and runs the training and testing of the CNN classifier.
2. run_GitHub.h5 - is the trained model in .h5 format
3. model_run_GitHub - is a folder containing the trained model in .pb format
The CNN achieved a 96\%± 1\% overall classification accuracy and a 94\%± 2\% precision, trained and tested over 10 runs with randomly selected training and testing data sets.
## CNN Classification of Triggered Snapshot WaveForm (TSWF) data
The folder "CDF_file_classification" contains the trained CNN classifier and the MatLab code to classify the Triggered Snapshot WaveForm (TSWF) data product (.cdf files). A sample script and the needed functions are included in order to classify a sample (.cdf) file "solo_L2_rpw-tds-surv-tswf-e_20211004_V01.cdf". The folder contains the following files:
1. model_run_GitHub - is a folder containing the trained model in .pb format
2. solo_L2_rpw-tds-surv-tswf-e_20211004_V01.cdf - is a sample file containing all triggered waveforms from October 4th, 2021, downloaded from https://rpw.lesia.obspm.fr/roc/data/pub/solo/rpw/data/L2/tds_wf_e/
2. cdf_CNN_classifier.m - is a MatLab script that automatically classifies the triggered waveforms, contained in file "solo_L2_rpw-tds-surv-tswf-e_20211004_V01.cdf", and plots the classification results using the trained CNN classifier in folder "model_run_GitHub"
3. classify_file.m - is a MatLab function that classifies a .cdf file
4. preprocess_cdf.m - is a MatLab function that imports the .cdf file and pre-process it for classification
5. preprocess_signal.m - is a MatLab function that performs the 4-step pre-processing procedure
6. cdf_CNN_classifier.pdf - presents the MatLab code and resulting plots in pdf format
## Dust Impact Rates
The folder "Dust_impact_rates" contains the MatLab script and files to reproduce the daily dust impact rates (Figure 11 from the article)
1. dust_impact_rates.m - is a MatLab script to read the daily dust count (classified by the TDS, SVM and CNN approach) and convert it to impact rates using the RPW-TDS duty cycle. The script plots the daily impact rates along with the associated fit using the dust flux
model from Zaslavsky et al. (2021) with an included offset
2. TDS_ddc.txt - is a table containing the date and the daily dust count using the TDS approach
3. SVM_ddc.txt - is a table containing the date, the median of the daily dust count (using the SVM approach) and the standard deviation of the daily dust count (calculated using 10 different training/testing data splits)
4. CNN_ddc.txt - is a table containing the date, the median of the daily dust count (using the CNN approach) and the standard deviation of the daily dust count (calculated using 10 different training/testing data splits)
5. fits.csv - is a table containing the date, the RPW-TDS duty cycle and the TDS/SVM/CNN fit to data using the dust flux model from Zaslavsky et al. (2021) with an included offset
6. dust_impact_rates.pdf - presents the MatLab code and resulting plots in pdf format
## Citation
Please, consider citing the original paper if you are using this library in your research
```
@article{kvammen2023machine,
author = {Kvammen, A. and Wickstr{\o}m, K. and Kociscak, S. and Vaverka, J. and Nouzak, L. and Zaslavsky, A. and Rackovic Babic, K. and Gjelsvik, A. and Pisa, D. and Soucek, J. and Mann, I.},
title = {Machine learning detection of dust impact signals observed by the Solar Orbiter},
journal = {Annales Geophysicae},
volume = {41},
year = {2023},
number = {1},
pages = {69--86},
url = {https://angeo.copernicus.org/articles/41/69/2023/},
doi = {10.5194/angeo-41-69-2023}
}
```
## References
Wang, Z., Yan,W., and Oates, T.: Time series classification from scratch with deep neural networks: A strong baseline, in: 2017 International
joint conference on neural networks (IJCNN), pp. 1578–1585, IEEE, 2017.
Zaslavsky, A., Mann, I., Soucek, J., Czechowski, A., Píša, D., Vaverka, J., Meyer-Vernet, N., Maksimovic, M., Lorfèvre, E., Issautier, K., et al.: First dust measurements with the Solar Orbiter Radio and Plasma Wave instrument, Astronomy & Astrophysics, 656, A30, 2021.
|
AndreasKvammenREPO_NAMEML_dust_detectionPATH_START.@ML_dust_detection_extracted@ML_dust_detection-main@README.md@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "gwpy/gwpy",
"repo_path": "gwpy_extracted/gwpy-main/gwpy/signal/spectral/__init__.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2017-2020)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""FFT routines for GWpy
This sub-package provides PSD estimation routines based on functionality
provided by :mod:`scipy.signal`.
The methods provided here aren't meant to be called directly by the user,
but rather via instance methods of the :class:`~gwpy.timeseries.TimeSeries`
object.
"""
from ...utils.decorators import deprecated_function
from ._registry import (get_method, register_method)
from ._scipy import (
bartlett,
coherence,
csd,
median,
rayleigh,
welch,
)
from ._ui import (psd, spectrogram, average_spectrogram)
# register deprecated methods
from . import (
_pycbc, # deprecated
_lal, # deprecated
_median_mean, # deprecated
)
__author__ = 'Duncan Macleod <duncan.macleod@ligo.org>'
@deprecated_function
def get_default_fft_api():
"""Return the preferred FFT-API library
As of gwpy > 0.14.0|1.0.0 this always returns 'scipy'
This is referenced to set the default methods for
`~gwpy.timeseries.TimeSeries` methods (amongst others)
Examples
--------
>>> get_default_fft_api()
'scipy'
"""
return 'scipy'
|
gwpyREPO_NAMEgwpyPATH_START.@gwpy_extracted@gwpy-main@gwpy@signal@spectral@__init__.py@.PATH_END.py
|
{
"filename": "pipeTestServiceClient.py",
"repo_name": "mhammond/pywin32",
"repo_path": "pywin32_extracted/pywin32-main/win32/Demos/service/pipeTestServiceClient.py",
"type": "Python"
}
|
# A Test Program for pipeTestService.py
#
# Install and start the Pipe Test service, then run this test
# either from the same machine, or from another using the "-s" param.
#
# Eg: pipeTestServiceClient.py -s server_name Hi There
# Should work.
import os
import sys
import traceback
import pywintypes
import win32api
import winerror
# # Use "import *" to keep this looking as much as a "normal" service
# as possible. Real code shouldn't do this.
from win32event import * # nopycln: import
from win32file import * # nopycln: import
from win32pipe import * # nopycln: import
verbose = 0
# def ReadFromPipe(pipeName):
# Could (Should?) use CallNamedPipe, but this technique allows variable size
# messages (whereas you must supply a buffer size for CallNamedPipe!
# hPipe = CreateFile(pipeName, GENERIC_WRITE, 0, None, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, 0)
# more = 1
# while more:
# hr = ReadFile(hPipe, 256)
# if hr==0:
# more = 0
# except win32api.error (hr, fn, desc):
# if hr==winerror.ERROR_MORE_DATA:
# data = dat
#
def CallPipe(fn, args):
ret = None
retryCount = 0
while retryCount < 8: # Keep looping until user cancels.
retryCount += 1
try:
return fn(*args)
except win32api.error as exc:
if exc.winerror == winerror.ERROR_PIPE_BUSY:
win32api.Sleep(5000)
continue
else:
raise
raise RuntimeError("Could not make a connection to the server")
def testClient(server, msg):
if verbose:
print("Sending", msg)
data = CallPipe(
CallNamedPipe,
("\\\\%s\\pipe\\PyPipeTest" % server, msg, 256, NMPWAIT_WAIT_FOREVER),
)
if verbose:
print("Server sent back '%s'" % data)
print("Sent and received a message!")
def testLargeMessage(server, size=4096):
if verbose:
print("Sending message of size %d" % (size))
msg = "*" * size
data = CallPipe(
CallNamedPipe,
("\\\\%s\\pipe\\PyPipeTest" % server, msg, 512, NMPWAIT_WAIT_FOREVER),
)
if len(data) - size:
print("Sizes are all wrong - send %d, got back %d" % (size, len(data)))
def stressThread(server, numMessages, wait):
try:
try:
for i in range(numMessages):
r = CallPipe(
CallNamedPipe,
(
"\\\\%s\\pipe\\PyPipeTest" % server,
"#" * 512,
1024,
NMPWAIT_WAIT_FOREVER,
),
)
except:
traceback.print_exc()
print("Failed after %d messages" % i)
finally:
SetEvent(wait)
def stressTestClient(server, numThreads, numMessages):
import _thread
thread_waits = []
for t_num in range(numThreads):
# Note I could just wait on thread handles (after calling DuplicateHandle)
# See the service itself for an example of waiting for the clients...
wait = CreateEvent(None, 0, 0, None)
thread_waits.append(wait)
_thread.start_new_thread(stressThread, (server, numMessages, wait))
# Wait for all threads to finish.
WaitForMultipleObjects(thread_waits, 1, INFINITE)
def main():
import getopt
server = "."
thread_count = 0
msg_count = 500
try:
opts, args = getopt.getopt(sys.argv[1:], "s:t:m:vl")
for o, a in opts:
if o == "-s":
server = a
if o == "-m":
msg_count = int(a)
if o == "-t":
thread_count = int(a)
if o == "-v":
global verbose
verbose = 1
if o == "-l":
testLargeMessage(server)
msg = " ".join(args).encode("mbcs")
except getopt.error as msg:
print(msg)
my_name = os.path.split(sys.argv[0])[1]
print(
"Usage: %s [-v] [-s server] [-t thread_count=0] [-m msg_count=500] msg ..."
% my_name
)
print(" -v = verbose")
print(
" Specifying a value for -t will stress test using that many threads."
)
return
testClient(server, msg)
if thread_count > 0:
print(
"Spawning %d threads each sending %d messages..."
% (thread_count, msg_count)
)
stressTestClient(server, thread_count, msg_count)
if __name__ == "__main__":
main()
|
mhammondREPO_NAMEpywin32PATH_START.@pywin32_extracted@pywin32-main@win32@Demos@service@pipeTestServiceClient.py@.PATH_END.py
|
{
"filename": "OTF.py",
"repo_name": "bill-cotton/Obit",
"repo_path": "Obit_extracted/Obit-master/ObitSystem/ObitSD/python/OTF.py",
"type": "Python"
}
|
""" Python Obit "On-the-fly" (OTF) single dish data class
This class contains single dish data and allows access.
An ObitOTF is the front end to a persistent disk resident structure.
There maybe (usually are) associated tables which either describe
the data or contain calibration and/or editing information.
OTF Members with python interfaces:
List - used to pass instructions to processing
Desc - Astronomical labeling of the image
TableList - List of tables attached
RecBuf - memory pointer into I/O Buffer
Additional Functions are available in OTFUtil, OTFSoln2Cal, OTFGetSoln,
OTFGetAtmCor, CleanOTF
There are a number of utility routines in this module which take
control parameters in the form of python dictionaries
(e.g. AtmCal, Clean, Concat, Image, ResidCal, Soln2Cal, Split)
which each have defined dictionaries with default values and names of the
routine and "Input" appended.
Care should he taken not to change the data types of the entries in these
dictionaries.
These dictionaries can be listed in semi human readable form using the OTF.input
function.
Data selection, calibration and editing parameters on List member
"doCalSelect" bool (1,1,1) Select/calibrate/edit data?
"doCalib" int (1,1,1) >0 -> calibrate,
"gainUse" int (1,1,1) OTFSoln/OTFCal table version number, 0-> use highest
"doBand" int (1,1,1) >0 -> calibrate,
"BPVer" int (1,1,1) OTFBP table version number, 0-> use highest
"flagVer" int (1,1,1) OTFFlag table version, 0-> use highest, <0-> none
"BChan" int (1,1,1) First spectral channel selected. [def all]
"EChan" int (1,1,1) Highest spectral channel selected. [def all]
"Targets" string (?,?,1) Target names selected. [def all]
"timeRange" float (2,1,1) Selected timerange in days. [def all]
"Scans" int (2,1,1) Lowest and highest selected scan numbers. [def all]
"Feeds" int (?,1,1) a list of selected feed numbers, [def all.]
"keepCal" bool (1,1,1) If true keep cal-on data, otherwise drop [def True.]
"""
# $Id$
#-----------------------------------------------------------------------
# Copyright (C) 2004-2013
# Associated Universities, Inc. Washington DC, USA.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free
# Software Foundation, Inc., 675 Massachusetts Ave, Cambridge,
# MA 02139, USA.
#
# Correspondence concerning this software should be addressed as follows:
# Internet email: bcotton@nrao.edu.
# Postal address: William Cotton
# National Radio Astronomy Observatory
# 520 Edgemont Road
# Charlottesville, VA 22903-2475 USA
#-----------------------------------------------------------------------
# Obit On the Fly (OTF) calibration and Imaging
import Obit, OErr, Image, ImageDesc, FArray, Table, InfoList, OTFDesc, types
import CleanOTF, OTFUtil, OTFRec, TableList, string
import OData, OTFArrayGeom
# Python shadow class to ObitOTF class
# class name in C
myClass = "ObitOTF"
class OTF(OData.OData):
""" Python Obit "On-the-fly" (OTF) single dish data class
This class contains single dish data and allows access.
An ObitOTF is the front end to a persistent disk resident structure.
There maybe (usually are) associated tables which either describe
the data or contain calibration and/or editing information.
OTF Members with python interfaces:
List - used to pass instructions to processing
Desc - Astronomical labeling of the image
TableList - List of tables attached
RecBuf - memory pointer into I/O Buffer
"""
def __init__(self,name) :
self.this = Obit.new_OTF(name)
self.thisown = 1
self.myClass = myClass
def __del__(self):
if Obit!=None:
Obit.delete_OTF(self.this)
def __setattr__(self,name,value):
if name == "me" :
# Out with the old
Obit.OTFUnref(Obit.OTF_me_get(self.this))
# In with the new
Obit.OTF_me_set(self.this,value)
return
self.__dict__[name] = value
def __getattr__(self,name):
if name == "me" :
return Obit.OTF_me_get(self.this)
# Functions to return members
if name=="List":
if not self.OTFIsA():
raise TypeError,"input MUST be a Python Obit OTF"
out = InfoList.InfoList()
out.me = Obit.InfoListUnref(out.me)
out.me = Obit.OTFGetList(self.cast(myClass))
return out
if name=="TableList":
if not self.OTFIsA():
raise TypeError,"input MUST be a Python Obit OTF"
out = TableList.TableList("TL")
out.me = Obit.TableListUnref(out.me)
out.me = Obit.OTFGetTableList(self.cast(myClass))
return out
if name=="Desc":
if not self.OTFIsA():
raise TypeError,"input MUST be a Python Obit OTF"
out = OTFDesc.OTFDesc("None")
out.me = Obit.OTFGetDesc(self.cast(myClass))
return out
if name=="ArrayGeom":
if not self.OTFIsA():
raise TypeError,"input MUST be a Python Obit OTF"
out = OTFArrayGeom.OTFArrayGeom("None")
out.me = Obit.OTFGetArrayGeom(self.cast(myClass))
return out
if name=="RecBuf":
if not self.OTFIsA():
raise TypeError,"input MUST be a Python Obit OTF"
return Obit.OTFGetRecBuf(self.cast(myClass))
raise AttributeError,name
def __repr__(self):
if self.__class__ != OTF:
return
if self==None:
return "None"
return "<C OTF instance> " + Obit.OTFGetName(self.me)
def cast(self, toClass):
""" Casts object pointer to specified class
self = object whose cast pointer is desired
toClass = Class string to cast to ("ObitOTF")
"""
# Get pointer with type of this class
out = self.me
out = out.replace(self.myClass, toClass)
return out
# end cast
def NewTable (self, access, tabType, tabVer, err,
numDet=1, numPoly=0, numParm=0):
""" Return the specified associated table
self = Python OTF object
access = access code 1=READONLY, 2=WRITEONLY, 3=READWRITE
tabType = Table type, e.g. "OTFSoln"
tabVer = table version, if > 0 on input that table returned,
if 0 on input, the highest version is used.
err = Python Obit Error/message stack
Optional parameters, values only used if table created
numDet = Number of Detectors (OTFCal, OTFSoln, OTFScanData)
numPoly = Number of polynomial terms (OTFCal, OTFSoln)
numParm = Number of model parameters (OTFModel)
"""
inOData = self
# Checks
if not self.ODataIsA():
raise TypeError,"input MUST be a Python Obit OData"
if not OErr.OErrIsA(err):
raise TypeError,"err MUST be an OErr"
#
outTable = Table.Table("None")
id = inOData.cast(OData.myClass) # Cast to OData
outTab = None
if tabType=="OTFArrayGeom":
outTab = Obit.TableOTFArrayGeom(id, [tabVer], access, tabType, err.me)
elif tabType=="OTFCal":
outTab = Obit.TableOTFCal(id, [tabVer], access, tabType, numDet, numPoly, err.me)
elif tabType=="OTFFlag":
outTab = Obit.TableOTFFlag(id, [tabVer], access, tabType, err.me)
elif tabType=="OTFIndex":
outTab = Obit.TableOTFIndex(id, [tabVer], access, tabType, err.me)
elif tabType=="OTFModel":
outTab = Obit.TableOTFModel(id, [tabVer], access, tabType, numParm, err.me)
elif tabType=="OTFScanData":
outTab = Obit.TableOTFScanData(id, [tabVer], access, tabType, numDet, err.me)
elif tabType=="OTFSoln":
outTab = Obit.TableOTFSoln(id, [tabVer], access, tabType, numDet, numPoly, err.me)
elif tabType=="SkyModel":
outTab = Obit.TableSkyModel(id, [tabVer], access, tabType, numDet, numPoly, err.me)
elif tabType=="OTFTarget":
outTab = Obit.TableOTFTarget(id, [tabVer], access, tabType, err.me)
else: # Generic
ret = Obit.newODataTable (inOData.cast(myClass), access, tabType, [tabVer], err.me)
# Table and version returned in a list
outTab = ret[0]
# Did it work? error may not be set
if (outTab.__class__!=str) or (string.find(outTab,"_ObitTable_p") <0):
OErr.printErrMsg(err, "Error getting OData data Table")
raise RuntimeError,"Failed to extract "+tabType+" table from "+inOData.GetName()
# Error?
if err.isErr:
OErr.printErrMsg(err, "Error getting OData data Table")
# Test validity of outTab
if not Obit.TableIsA(outTab):
OErr.printErrMsg(err, "Error getting OData data Table")
raise RuntimeError,"Failed to extract "+tabType+" table from "+inOData.GetName()
# Open and close to fully instantiate - should exist
outTable.me = outTab
Table.PFullInstantiate (outTable, access, err)
# Make sure that it worked - the output should be a table
if not Table.PIsA(outTable):
raise RuntimeError,"Failed to extract "+tabType+" table from "+inOData.GetName()
return outTable
# end NewTable
def Open (self, access, err):
""" Open a OTF data persistent (disk) form
Returns 0 on success, else failure
self = Python OTF object
access = access READONLY (1), WRITEONLY (2), READWRITE(3)
err = Python Obit Error/message stack
"""
inOTF = self
# Checks
if not self.OTFIsA():
raise TypeError,"input MUST be a Python Obit OTF"
if not OErr.OErrIsA(err):
raise TypeError,"err MUST be an OErr"
#
ret = Obit.OTFOpen(inOTF.cast(myClass), access, err.me)
if err.isErr:
OErr.printErrMsg(err, "Error Opening OTF data")
return ret
# end Open
def Read (self, err):
""" Read a OTF persistent (disk) form
Reads into buffer attached to OTF data, use VisBuf for access
Returns 0 on success, else failure
self = Python OTF object
err = Python Obit Error/message stack
"""
inOTF = self
# Checks
if not self.OTFIsA():
raise TypeError,"input MUST be a Python Obit OTF"
if not OErr.OErrIsA(err):
raise TypeError,"err MUST be an OErr"
#
ret = Obit.OTFRead (inOTF.cast(myClass), err.me)
if err.isErr:
OErr.printErrMsg(err, "Error Reading OTF data")
return ret
# end Read
def Write (self, err):
""" Write a OTF persistent (disk) form
Writes buffer attached to OTF data, use VisBuf for access
returns 0 on success, else failure
self = Python OTF object
err = Python Obit Error/message stack
"""
inOTF = self
# Checks
if not self.OTFIsA():
raise TypeError,"input MUST be a Python Obit OTF"
if not OErr.OErrIsA(err):
raise TypeError,"err MUST be an OErr"
#
ret = Obit.OTFWrite (inOTF.cast(myClass), err.me)
if err.isErr:
OErr.printErrMsg(err, "Error Writing OTF data")
return ret
# end Write
def ReadRec (self, err):
""" Read a OTF persistent (disk) form
Returns OTFRec structure from next record
self = Python OTF object
err = Python Obit Error/message stack
"""
# Checks
if not self.OTFIsA():
raise TypeError,"input MUST be a Python Obit OTF"
if not OErr.OErrIsA(err):
raise TypeError,"err MUST be an OErr"
#
ret = OTFRec.PGet(self, err)
if err.isErr:
OErr.printErrMsg(err, "Error Reading OTF data")
return ret
# end ReadRec
def WriteRec (self, outRec, err):
""" Write a OTF persistent (disk) form
Writes buffer attached to OTF data, use VisBuf for access
returns 0 on success, else failure
self = Python OTF object
outRec = OTFRec structure to write
err = Python Obit Error/message stack
"""
# Checks
if not self.OTFIsA():
raise TypeError,"input MUST be a Python Obit OTF"
if not OErr.OErrIsA(err):
raise TypeError,"err MUST be an OErr"
#
OTFRec.PSet(outRec, self, err)
if err.isErr:
OErr.printErrMsg(err, "Error Writing OTF data")
# end Write
def Close (self, err):
""" Close a OTF persistent (disk) form
returns 0 on success, else failure
self = Python OTF object
err = Python Obit Error/message stack
"""
inOTF = self
# Checks
if not self.OTFIsA():
raise TypeError,"input MUST be a Python Obit OTF"
if not OErr.OErrIsA(err):
raise TypeError,"err MUST be an OErr"
#
ret = Obit.OTFClose (inOTF.cast(myClass), err.me)
if err.isErr:
OErr.printErrMsg(err, "Error Closing OTF data")
return ret
# end Close
def Copy (self, outOTF, err):
""" Make a deep copy of input object.
Makes structure the same as self, copies data, tables
self = Python OTF object to copy
outOTF = Output Python OTF object, must be defined
err = Python Obit Error/message stack
"""
# Checks
if not self.OTFIsA():
raise TypeError,"self MUST be a Python Obit OTF"
if not outOTF.OTFIsA():
raise TypeError,"outOTF MUST be a Python Obit OTF"
if not OErr.OErrIsA(err):
raise TypeError,"err MUST be an OErr"
#
Obit.OTFCopy (self.cast(myClass), outOTF.cast(myClass), err.me)
if err.isErr:
OErr.printErrMsg(err, "Error copying OTF data")
# end Copy
def CopyList (self, outOTF, ScanList, err):
""" Make a deep copy of input object with a list of scans.
Makes structure the same as self, copies data, tables
self = Python OTF object to copy
outOTF = Output Python OTF object, must be defined
ScanList = List of scan numbers to copy
err = Python Obit Error/message stack
"""
# Checks
if not self.OTFIsA():
raise TypeError,"self MUST be a Python Obit OTF"
if not outOTF.OTFIsA():
raise TypeError,"outOTF MUST be a Python Obit OTF"
if not OErr.OErrIsA(err):
raise TypeError,"err MUST be an OErr"
#
# copy first scan
inInfo = self.List
inInfo.set("Scans",[ScanList[0],ScanList[0]])
self.Copy(outOTF, err)
if err.isErr:
OErr.printErrMsg(err, "Error copying OTF data")
# Concatenate rest
ConcatInput["InData"] = self
ConcatInput["OutData"] = outOTF
for scan in ScanList[1:]:
inInfo.set("Scans",[scan,scan])
Concat(err, ConcatInput)
if err.isErr:
OErr.printErrMsg(err, "Error copying OTF data")
# end CopyList
def Clone (self, outOTF, err):
""" Make a copy of a object but do not copy the actual data
This is useful to create an OTF similar to the input one.
self = Python OTF object
outOTF = Output Python OTF object, must be defined
err = Python Obit Error/message stack
"""
# Checks
if not self.OTFIsA():
raise TypeError,"self MUST be a Python Obit OTF"
if not outOTF.OTFIsA():
raise TypeError,"outOTF MUST be a Python Obit OTF"
if not OErr.OErrIsA(err):
raise TypeError,"err MUST be an OErr"
#
Obit.OTFClone (self.cast(myClass), outOTF.cast(myClass), err.me)
if err.isErr:
OErr.printErrMsg(err, "Error copying OTF data")
# end Clone
def Scratch (self, err):
""" Create a scratch file suitable for accepting the data to be read from self
A scratch OTF is more or less the same as a normal OTF except that it is
automatically deleted on the final unreference.
self = Python OTF object
err = Python Obit Error/message stack
"""
################################################################
# Checks
if not self.OTFIsA():
raise TypeError,"self MUST be a Python Obit OTF"
if not OErr.OErrIsA(err):
raise TypeError,"err MUST be an OErr"
#
outOTF = OTF("None")
outOTF.me = Obit.OTFScratch (self.cast(myClass), err.me);
if err.isErr:
OErr.printErrMsg(err, "Error creating scratch file")
outOTF.Info(err) # Update info
return outOTF
# end Scratch
def Header (self, err):
""" Write image header on output
self = Python Obit OTF object
err = Python Obit Error/message stack
"""
PHeader (self, err)
# end Header
def Info (self, err):
""" Get underlying data file info
self = Python Obit OTF object
err = Python Obit Error/message stack
"""
POTFInfo(self, err)
# end Info
def UpdateDesc (self, err, Desc=None):
""" Update any disk resident structures about descriptor
self = Python OTF object
err = Python Obit Error/message stack
Desc = Descriptor, if None then use current descriptor
Contents can be accessed throuth the Dict member
"""
# Checks
inOTF = self
if not self.OTFIsA():
raise TypeError,"input MUST be a Python Obit OTF"
#
# if Desc=None make copy of current contents
if Desc == None:
d = inOTF.Desc.Dict
else:
d = Desc.Dict
# Open for write
inOTF.Open(READWRITE,err) # Open
inOTF.Desc.Dict = d # Update header
Obit.OTFDirty(inOTF.cast(myClass)) # force update
inOTF.Close(err) # Close to update
# end UpdateDesc
def OTFIsA (self):
""" Tells if input really a Python Obit OTF
return true, false (1,0)
self = Python OTF object
"""
################################################################
# Allow derived types
return Obit.OTFIsA(self.cast(myClass))
# end PIsA
# End of class member functions (i.e. invoked by x.func())
err=OErr.OErr()
# Commonly used, dangerous variables
dim=[1,1,1,1,1]
blc=[1,1,1,1,1,1,1]
trc=[0,0,0,0,0,0,0]
# Symbolic names for access codes
READONLY = OData.READONLY # 1
WRITEONLY = OData.WRITEONLY # 2
READWRITE = OData.READWRITE # 3
def ObitName(ObitObject):
"""Return name of an Obit object or input if not an Obit Object
"""
################################################################
out = ObitObject # in case
print "\n type ",ObitObject.me
if ObitObject.me.find("_ObitImage_p") >= 0:
return Obit.ImageGetName(ObitObject.me)
if ObitObject.me.find("_ObitOTF_p") >= 0:
return Obit.OTFGetName(ObitObject.me)
if ObitObject.me.find("_ObitTable_p") >= 0:
return Obit.TableGetName(ObitObject.me)
if ObitObject.me.find("_Obit_p") >= 0:
return Obit.GetName(ObitObject.me)
return out
# end ObitName
def input(inputDict):
""" Print the contents of an input Dictionary
inputDict = Python Dictionary containing the parameters for a routine
There should be a member of the dictionary ('structure') with a value
being a list containing:
1) The name for which the input is intended (string)
2) a list of tuples consisting of (parameter name, doc string)
with an entry for each parameter in the dictionary.
The display of the the inputs dictionary will be in the order of
the tuples and display the doc string after the value.
An example:
Soln2CalInput={'structure':['Soln2Cal',[('InData','Input OTF'),
('soln','input soln table version'),
('oldCal','input cal table version, -1=none'),
('newCal','output cal table')]],
'InData':None, 'soln':0, 'oldCal':-1, 'newCal':0}
"""
################################################################
structure = inputDict['structure'] # Structure information
print 'Inputs for ',structure[0]
for k,v in structure[1]:
print ' ',k,' = ',inputDict[k],' : ',v
# end input
def newPOTF(name, filename, disk, exists, err, nrec=1000):
""" Create and initialize an OTF structure
Create, set initial access information (nrec records)
and if exists verifies the file.
Returns the Python OTF object
name = name desired for object (labeling purposes)
filename = name of FITS file
disk = FITS directory number
exists = if true then the file is opened and closed to verify
err = Python Obit Error/message stack
nrec = Number of records read/written per call
"""
################################################################
out = OTF(name)
if err.isErr: # existing error?
return out
Obit.OTFSetFITS(out.me, nrec, disk, filename, err.me)
if exists:
Obit.OTFfullInstantiate (out.me, READWRITE, err.me)
# show any errors
OErr.printErrMsg(err, "newPOTF: Error verifying file")
out.FileType = 'FITS'
out.FileName = filename
out.Fname = filename
out.Disk = disk
out.Otype = "OTF"
return out # seems OK
# end newPOTF
def ClearCal(inOTF, err):
""" Delete calibration tables on an OTF
Removes all OTFSoln and OTFCal tables
inOTF = Extant Python OTF
err = Python Obit Error/message stack
"""
################################################################
if err.isErr: # existing error?
return
#Obit.OTFfullInstantiate (inOTF.cast(myClass), READWRITE, err.me)
inOTF.Open(READWRITE, err)
inOTF.Close(err)
OErr.printErrMsg(err, "ClearCal: Error verifying file")
ver = Obit.OTFGetHighVer(inOTF.cast(myClass), "OTFCal")
while (ver>0):
Obit.OTFZapTable (inOTF.cast(myClass), 'OTFCal', ver, err.me)
ver = ver-1
OErr.printErrMsg(err, "ClearCal: Error removing OTFCal")
ver = Obit.OTFGetHighVer(inOTF.cast(myClass), "OTFSoln")
while (ver>0):
Obit.OTFZapTable (inOTF.cast(myClass), 'OTFSoln', ver, err.me)
ver = ver-1
OErr.printErrMsg(err, "ClearCal: Error removing OTFSoln")
# end ClearCal
# Define AtmCal input dictionary
AtmCalInput={'structure':['AtmCal',[('InData','Input OTF'),
('solInt','Solution interval(sec)'),
('Tau0','Zenith opacity'),
('minEl','Min elev. (deg)'),
('aTemp','Atm. temperature (K)'),
('tRx','Recvr. Temp. (K)'),
('calJy','Cal. signal in Jy'),
('raOff','RA pointing offset (deg)'),
('decOff','Dec pointing offset (deg)')]],
'InData':None, 'solInt':10000.0, 'Tau0':0.0, 'minEl':0.0,
'aTemp':[0.0,0.0], 'tRx':[0.0,0.0], 'calJy':[1.0,1.0],
'raOff':0.0, 'decOff':0.0}
def AtmCal (err, input=AtmCalInput):
""" Basic atmospheric calibration.
Applies Atmospheric calibration and optionally gross pointing offsets
Returns the version number of the Soln Table on success.
err = Python Obit Error/message stack
input = input parameter dictionary
Input dictionary entries:
InData = input Python OTF to calibrate
solInt = solution interval (sec)
Tau0 = zenith opacity (nepers)
minEl = minimum elevation (deg)
tTemp = effective atmospheric temperature (per detector)
tRx = Receiver temperature per detector (K)
calJy = Noise cal value in Jy per detector
raOff = RA pointing offset (deg)
decOff = Dec pointing offset (deg)
"""
################################################################
# Get input parameters
inData = input["InData"]
solInt = input["solInt"]/86400.0 # convert to days
Tau0 = input["Tau0"]
minEl = input["minEl"]
aTemp = input["aTemp"]
tRx = input["tRx"]
calJy = input["calJy"]
RAoff = input["raOff"]
Decoff = input["decOff"]
# Checks
if not PIsA(inData):
raise TypeError,'AtmCal: Bad input OTF'
#
# Set calibration parameters
inInfo = inData.List;
inInfo.set("solInt", solInt)
inInfo.set("Tau0", Tau0)
inInfo.set("minEl", minEl)
inInfo.set("RAoff", RAoff)
inInfo.set("Decoff", Decoff)
inInfo.set("aTemp", aTemp)
inInfo.set("tRx", tRx)
inInfo.set("calJy", calJy)
#
# Determine calibration (Obit object)
solnTable = Obit.OTFGetAtmCor (inData.me, inData.me, err.me);
#
# show any errors
OErr.printErrMsg(err, "AtmCal: Error determining calibration")
#
# Get table version number
tabVer = Obit.TableGetVer(solnTable)
#
# Cleanup Obit objects
solnTable = Obit.TableUnref (solnTable)
#
return tabVer
# end AtmCal
# Define PolyBLCal input dictionary
PolyBLCalInput={'structure':['PolyBLCal',[('InData','Input OTF'),
('solInt','Solution interval(sec)'),
('order','polynomial order'),
('gainUse','cal. table version, -1=none'),
('flagVer','flag table version, -1=none'),
('minEl','Min elev. (deg)')]],
'InData':None, 'solInt':10.0, 'order':1, 'minEl':0.0,
'gainUse':-1, 'flagVer':-1}
def PolyBLCal (err, input=PolyBLCalInput):
""" Polynomial baseline fit to residual data
Each solution interval in a scan is median averaged
(average of 9 points around the median) and then a polynomial fitted.
Returns the version number of the Soln Table on success.
err = Python Obit Error/message stack
input = input parameter dictionary
Input dictionary entries:
InData = input Python OTF to calibrate
solInt = solution interval (sec)
order = polynomial order
minEl = minimum elevation (deg)
gainUse = version number of prior table (Soln or Cal) to apply, -1 is none
flagVer = version number of flagging table to apply, -1 is none
"""
################################################################
# Get input parameters
inData = input["InData"]
solInt = input["solInt"]/86400.0 # convert to days
order = input["order"]
minEl = input["minEl"]
gainUse = input["gainUse"]
flagVer = input["flagVer"]
# Checks
if not PIsA(inData):
raise TypeError,'PolyBLCal: Bad input OTF'
if err.isErr: # existing error?
return None
#
# Set calibration parameters
dim[0] = 1; dim[1] = 1
inInfo = inData.List;
InfoList.PAlwaysPutFloat(inInfo, "solInt", dim, [solInt]);
InfoList.PAlwaysPutInt(inInfo, "order", dim, [order]);
InfoList.PAlwaysPutFloat(inInfo, "minEl", dim, [minEl]);
InfoList.PAlwaysPutInt (inInfo, "flagVer", dim, [flagVer])
InfoList.PAlwaysPutInt (inInfo, "gainUse", dim, [gainUse])
if gainUse>=0:
itemp = 1
else:
itemp = -1
InfoList.PAlwaysPutInt (inInfo, "doCalib", dim, [itemp])
doCalSelect = (gainUse >= 0) or (flagVer>0)
doClaSelect = True
InfoList.PAlwaysPutBoolean (inInfo, "doCalSelect", dim, [doCalSelect])
#
# Determine calibration (Obit object)
solnTable = Obit.OTFGetSolnPolyBL (inData.me, inData.me, err.me);
#
# show any errors
OErr.printErrMsg(err, "PolyBLCal: Error determining calibration")
#
# Get table version number
tabVer = Obit.TableGetVer(solnTable)
#
# Cleanup Obit objects
solnTable = Obit.TableUnref (solnTable)
#
return tabVer
# end PolyBL
# Define MBBaseCal input dictionary
MBBaseCalInput={'structure':['MBBaseCal',[('InData','Input OTF'),
('solInt','Solution interval(sec)'),
('order','polynomial order'),
('gainUse','cal. table version, -1=none'),
('flagVer','flag table version, -1=none'),
('clipSig','data outside of +/- clipsig ignored [def large]'),
('plotDet','Detector number (1-rel) to plot per scan [def =-1 = none]'),
('minEl','Min elev. (deg)')]],
'InData':None, 'solInt':10.0, 'order':1, 'clipSig':1.0e20, 'plotDet':-1,'minEl':0.0,
'gainUse':-1, 'flagVer':-1}
def MBBaseCal (err, input=MBBaseCalInput):
""" Continuum baseline fitting for multibeam instrument.
Fit one term, time variable common, atmospheric polynomial and a single offset
per detector.
Since the different detectors each have an individual multiplicative term, the
Atmospheric + offset are places in the the detector's additive term and the
polynomial is set to zero.
Scans in excess of 5000 samples will be broken into several.
Returns the version number of the Soln Table on success.
err = Python Obit Error/message stack
input = input parameter dictionary
Input dictionary entries:
InData = input Python OTF to calibrate
solInt = solution interval (sec), entries 4 times per SolInt
order = polynomial order
clipSig = Data outside of +/- clipsig ignored [def large]
plotDet = Detector number (1-rel) to plot per scan [def =-1 = none]
minEl = minimum elevation (deg)
gainUse = version number of prior table (Soln or Cal) to apply, -1 is none
flagVer = version number of flagging table to apply, -1 is none
"""
################################################################
# Get input parameters
inData = input["InData"]
Solint = input["Solint"]/86400.0 # convert to days
order = input["order"]
minEl = input["minEl"]
clipSig = input["clipSig"]
plotDet = input["plotDet"]
gainUse = input["gainUse"]
flagVer = input["flagVer"]
# Checks
if not PIsA(inData):
raise TypeError,'MBBaseCal: Bad input OTF'
if err.isErr: # existing error?
return None
#
# Set calibration parameters
dim[0] = 1; dim[1] = 1
inInfo = inData.List;
InfoList.PAlwaysPutFloat(inInfo, "solInt", dim, [solInt]);
InfoList.PAlwaysPutFloat(inInfo, "clipSig", dim, [clipSig]);
InfoList.PAlwaysPutFloat(inInfo, "minEl", dim, [minEl]);
InfoList.PAlwaysPutInt(inInfo, "plotDet", dim, [plotDet]);
InfoList.PAlwaysPutInt(inInfo, "Order", dim, [order]);
InfoList.PAlwaysPutInt(inInfo, "flagVer", dim, [flagVer])
InfoList.PAlwaysPutInt(inInfo, "gainUse", dim, [gainUse])
if gainUse>=0:
itemp = 1
else:
itemp = -1
InfoList.PAlwaysPutInt (inInfo, "doCalib", dim, [itemp])
doCalSelect = (gainUse >= 0) or (flagVer>0)
doClaSelect = True
InfoList.PAlwaysPutBoolean (inInfo, "doCalSelect", dim, [doCalSelect])
#
# Determine calibration (Obit object)
solnTable = Obit.OTFGetSolnMBBase (inData.me, inData.me, err.me);
#
# show any errors
OErr.printErrMsg(err, "MBBaseCal: Error determining calibration")
#
# Get table version number
tabVer = Obit.TableGetVer(solnTable)
#
# Cleanup Obit objects
solnTable = Obit.TableUnref (solnTable)
#
return tabVer
# end MBBase
# Define ResidCal input dictionary
ResidCalInput={'structure':['ResidCal',[('InData','Input OTF'),
('Model','Model FArray'),
('ModelDesc','Model Descriptor'),
('solType','Soln type, Filter,Offset,Gain'),
('solInt','Solution interval (sec)'),
('minEl','Minimum elev (deg)'),
('minRMS','min. RMS residual (gain)'),
('minFlux','Minimum flux density in Model to use'),
('maxFlux','Maximum flux density in Model to use'),
('Clip','Clipping level for residuals'),
('calJy','Cal. signal in Jy'),
('gainUse','cal. table version, -1=none'),
('flagVer','flag table version, -1=none')]],
'InData':None, 'Model':None, 'ModelDesc':None, 'solType':"Filter",
'solInt':10000.0, 'minEl':0.0, 'minRMS':0.0, 'minFlux':-10000.0,'maxFlux':None,
'Clip':1.0e20,'calJy':[1.0,1.0], 'gainUse':-1, 'flagVer':-1};
def ResidCal (err, input=ResidCalInput):
""" Determine residual calibration for an OTF.
Determines a solution table for an OTF by one of a number of techniques using
residuals from a model image.
Returns the version number of the Soln Table on success.
err = Python Obit Error/message stack
input = input parameter dictionary
Input dictionary entries:
InData = Python input OTF to calibrate
Model = Python input model FArray, "None" means do not subtract model image
ModelDesc= Python input model ImageDesc
minFlux = Minimum brightness in model
solInt = solution interval (sec)
solType = solution type:
"Gain" solve for multiplicative term from "cals" in data.
(solInt, minRMS, minEl, calJy)
"Offset" Solve for additive terms from residuals to the model.
(solInt, minEl)
"GainOffset" Solve both gain and offset
(solInt, minRMS, minEl, calJy)
"Filter" Additive terms from filters residuals to the model.
(solInt, minEl)
"MultiBeam" Multibeam solution
(solInt, minEl)
minEl = minimum elevation (deg)
minRMS = Minimum RMS residual to solution
calJy = Noise cal value in Jy per detector
gainUse = version number of prior table (Soln or Cal) to apply, -1 is none
flagVer = version number of flagging table to apply, -1 is none
"""
################################################################
# Get input parameters
inData = input["InData"]
model = input["Model"]
modDesc = input["ModelDesc"]
calType = input["solType"]
solInt = input["solInt"]/86400.0 # convert to days
minEl = input["minEl"]
minRMS = input["minRMS"]
minFlux = input["minFlux"]
maxFlux = input["maxFlux"]
Clip = input["Clip"]
calJy = input["calJy"]
gainUse = input["gainUse"]
flagVer = input["flagVer"]
# Checks
if not PIsA(inData):
raise TypeError,'ResidCal: Bad input OTF'
if err.isErr: # existing error?
return None
#
# Apply prior calibration as requested
inInfo = inData.List
inInfo.set("flagVer", flagVer)
inInfo.set("gainUse", gainUse)
if gainUse>=0:
itemp = 1
else:
itemp = -1
inInfo.set("doCalib", itemp)
doCalSelect = (gainUse >= 0) or (flagVer>0)
doClaSelect = True
inInfo.set("doCalSelect", doCalSelect)
#
if model != None:
zapIt = True # A scratch file - delete
# clip image below minFlux
print "Clip model below ", minFlux
FArray.PClip (model, minFlux, 1.0e20, minFlux)
if maxFlux:
print "Clip model above ", maxFlux
FArray.PClip (model, -1.0e20, maxFlux, maxFlux)
# Scratch file for residual data
scrData = PScratch (inData, err)
OErr.printErrMsg(err, "ResidCal: Error creating scratch file")
#
# Subtract image from inData to scrData
OTFUtil.PSubImage(inData, scrData, model, modDesc, err)
# error check
OErr.printErrMsg(err, "ResidCal: Error subtracting model")
else:
scrData = inData # use input data
zapIt = False # Not a scratch file - don't delete
#
# Set calibration parameters
scrInfo = scrData.List
scrInfo.set("solInt", solInt)
scrInfo.set("minRMS", minRMS)
scrInfo.set("minEl", minEl)
scrInfo.set("Clip", Clip)
scrInfo.set("calJy", calJy)
scrInfo.set("calType", calType)
#
# Determine calibration by type
if calType == "Filter": # Time filtering
solnTable = Obit.OTFGetSolnFilter (scrData.me, inData.me, err.me)
elif calType == "MultiBeam": # Multibeam
solnTable = Obit.OTFGetSolnCal (scrData.me, inData.me, err.me)
else: # offset or gain
solnTable = Obit.OTFGetSolnGain (scrData.me, inData.me, err.me)
#
# show any errors
OErr.printErrMsg(err, "ResidCal: Error determining calibration")
#
# Get table version number
tabVer = Obit.TableGetVer(solnTable)
#
# Cleanup Obit Objects
if zapIt:
scrData = PZap(scrData, err) # Zap scratch file
solnTable = Obit.TableUnref (solnTable)
#
return tabVer
# end ResidCal
# Define Soln2Cal input dictionary
Soln2CalInput={'structure':['Soln2Cal',[('InData','Input OTF'),
('soln','input soln table version'),
('oldCal','input cal table version, -1=none'),
('newCal','output cal table')]],
'InData':None, 'soln':0, 'oldCal':-1, 'newCal':0}
def Soln2Cal (err, input=Soln2CalInput):
""" Apply a Soln (solution) table to a Cal (calibration) table.
err = Python Obit Error/message stack
input = input parameter dictionary
Input dictionary entries:
InData = Python input OTF to calibrate
soln = Soln table version number to apply, 0-> high
oldCal = input Cal table version number, -1 means none, 0->high
newCal = output Cal table version number, 0->new
"""
################################################################
# Get input parameters
inData = input["InData"]
soln = input["soln"]
oldCal = input["oldCal"]
newCal = input["newCal"]
# Checks
if not PIsA(inData):
raise TypeError,'Soln2Cal: Bad input OTF'
if err.isErr: # existing error?
return
# Default table versions
if soln == 0:
soln = Obit.OTFGetHighVer(inData.me, "OTFSoln")
if oldCal == 0:
oldCal = Obit.OTFGetHighVer(inData.me, "OTFCal")
if oldCal == 0: # Must not be one
oldCal = -1
if newCal == 0:
newCal = oldCal + 1;
#
# Specify desired tables
inInfo = inData.List
inInfo.set("solnUse", soln)
inInfo.set("calIn", oldCal)
inInfo.set("calOut", newCal)
#
print "Soln2Cal: Soln ",soln," applied to cal",oldCal," write cal",newCal
# Update calibration return Obit object
calTable = Obit.OTFSoln2Cal (inData.me, inData.me, err.me);
#
# error check
OErr.printErrMsg(err, "Soln2Cal: Error updating calibration")
#
# Cleanup
calTable = Obit.TableUnref(calTable)
# end Soln2Cal
# Define Split input dictionary
SplitInput={'structure':['Split',[('InData','Input OTF'),
('OutData','Extant output OTF'),
('Average','if true (1) average in frequency'),
('gainUse','cal. table version, -1=none'),
('flagVer','flag table version, -1=none')]],
'InData':None, 'OutData':None, 'average':0, 'gainUse':-1, 'flagVer':-1};
def Split (err, input=SplitInput):
""" Select and calibrate an OTF writing a new one.
Applies calibration and editing/selection to inData and writes outData.
err = Python Obit Error/message stack
input = input parameter dictionary
Input dictionary entries:
InData = input Python OTF to calibrate
OutData = output Python OTF, must be previously defined
Average = if true average in frequency
gainUse = version number of prior table (Soln or Cal) to apply, -1 is none
flagVer = version number of flagging table to apply, -1 is none
"""
################################################################
# Get input parameters
inData = input["InData"]
outData = input["OutData"]
Average = input["Average"]
gainUse = input["gainUse"]
flagVer = input["flagVer"]
# Checks
if not PIsA(inData):
raise TypeError,'Split: Bad input OTF'
if not PIsA(outData):
raise TypeError,'Split: Bad output OTF'
if err.isErr: # existing error?
return
#
# Apply calibration as requested
dim[0] = 1; dim[1] = 1
inInfo = inData.List
InfoList.PAlwaysPutInt (inInfo, "flagVer", dim, [flagVer])
InfoList.PAlwaysPutInt (inInfo, "gainUse", dim, [gainUse])
if gainUse >= 0:
itemp = 1
else:
itemp=0
InfoList.PAlwaysPutInt (inInfo, "doCalib", dim, [itemp])
doCalSelect = (gainUse >= 0) or (flagVer>0)
doClaSelect = True
InfoList.PAlwaysPutBoolean (inInfo, "doCalSelect", dim, [doCalSelect])
#
# Copy image from inData to outData
if average:
Obit.OTFAver (inData.me, outData.me, err.me); # Average in frequency
else:
Obit.OTFCopy(inData.me, outData.me, err.me); # Simple copy
#
# error check
OErr.printErrMsg(err, "Split: Error copying data")
#
# end Split
# Define Concat input dictionary
ConcatInput={'InData':None, 'OutData':None};
def Concat (err, input=ConcatInput):
""" Concatenates OTFs.
Applies Copies InData to the end of OutData.
The files must be compatable (not checked)
err = Python Obit Error/message stack
input = input parameter dictionary
Input dictionary entries:
InData = Python input OTF to calibrate
OutData = Python output OTF, must be previously defined
"""
################################################################
# Get input parameters
inData = input["InData"]
outData = input["OutData"]
#
# Copy image from inData to outData
Obit.OTFConcat(inData.me, outData.me, err.me); # Simple append
#
# error check
OErr.printErrMsg(err, "Concat: Error copying data")
#
# end Concat
# Define Image input dictionary
ImageInput={'structure':['Image',[('InData','Input OTF'),
('OutName','Output image file name'),
('OutWeight','Output gridding weight file name'),
('Disk','disk number for output image file'),
('ra','center RA (deg)'),
('dec','center Dec (deg)'),
('nx','number of pixels in x = RA'),
('ny','number of pixels in y = de'),
('xCells','Cell spacing in x (asec)'),
('yCells','Cell spacing in y (asec)'),
('minWt','minimum summed weight in gridded image wrt max '),
('Clip','flag data with abs. value grweater than Clip'),
('ConvType','Conv. fn type, 0=pillbox,3=Gaussian,4=exp*sinc,5=Sph wave'),
('ConvParm','Conv. fn parameters'),
('gainUse','cal. table version, -1=none'),
('doFilter','Filter out of band noise? [True]'),
('doBeam','Convolved Beam image desired? [def True]'),
('Beam','Instrumental response beam [def None]'),
('Wt','Image to save gridding weight array [def None], overrides OutWeight'),
('flagVer','flag table version, -1=none')]],
'InData':None, 'OutName':None, 'OutWeight':None, 'Disk':1,
'ra':0.0, 'dec':0.0, 'nx':100, 'ny':100,
'xCells':0.001, 'yCells':0.001, 'minWt':0.01, 'Clip':1.0e19,
'ConvParm':[0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0], 'ConvType':3, 'doFilter':True,
'gainUse':-1, 'doBeam':True, 'Beam':None, 'Wt':None, 'flagVer':-1};
def makeImage (err, input=ImageInput):
""" Image an OTF.
Data is convolved and resampled onto the specified grid.
Image is created and returned on success.
err = Python Obit Error/message stack
input = input parameter dictionary
Input dictionary entries:
InData = input Python OTF to image
Additional optional parameters on InfoList member:
"beamNx" int scalar "X" size of Beam (pixels)
"beamNy" int scalar "Y" size of Beam(pixels)
"doScale" bool scalar If true, convolve/scale beam [def True]
Only use False if providing a dirty beam which already
includes the effects of gridding.
"deMode" bool scalar Subtract image mode from image? [def False]
"deBias" bool scalar Subtract calibration bias from image? [def False]
Note, this doesn't really work the way you would like
OutName = name of output image file
OutWeight = Output gridding weight file name
Disk = disk number for output image file
ra = center RA (deg)
dec = center Dec (deg)
nx = number of pixels in "x" = RA
ny = number of pixels in 'Y' = dec
xCells = Cell spacing in x (asec)
yCells = Cell spacing in y (asec)
minWt = minimum summed weight in gridded image wrt max [def 0.01]
Clip = data values with abs. value larger are set zero weight
ConvType= Convolving function Type 0=pillbox,3=Gaussian,4=exp*sinc,5=Sph wave
ConvParm= Convolving function parameters depends on ConvType
Type 2 = Sinc, (poor function - don't use)
Parm[0] = halfwidth in cells,
Parm[1] = Expansion factor
Type 3 = Gaussian,
Parm[0] = halfwidth in cells,[def 3.0]
Parm[1] = Gaussian with as fraction or raw beam [def 1.0]
Type 4 = Exp*Sinc
Parm[0] = halfwidth in cells, [def 2.0]
Parm[1] = 1/sinc factor (cells) [def 1.55]
Parm[2] = 1/exp factor (cells) [def 2.52]
Parm[3] = exp power [def 2.0]
Type 5 = Spherodial wave
Parm[0] = halfwidth in cells [def 3.0]
Parm[1] = Alpha [def 5.0]
Parm[2] = Expansion factor [not used]
gainUse = version number of prior table (Soln or Cal) to apply, -1 is none
flagVer = version number of flagging table to apply, -1 is none
doFilter= Filter out of band noise?
doBeam = Beam convolved with convolving Fn image desired? [def True]
Beam = Actual instrumental Beam to use, else Gaussian [def None]
Wt = Image to save gridding weight array [def None], overrides OutWeight
"""
################################################################
# Get input parameters
inData = input["InData"]
outname = input["OutName"]
outwt = input["OutWeight"]
disk = input["Disk"]
RA = input["ra"]
Dec = input["dec"]
nx = input["nx"]
ny = input["ny"]
xCells = input["xCells"]
yCells = input["yCells"]
minWt = input["minWt"]
Clip = input["Clip"]
ConvType= input["ConvType"]
ConvParm= input["ConvParm"]
gainUse = input["gainUse"]
flagVer = input["flagVer"]
doBeam = input["doBeam"]
Beam = input["Beam"]
Wt = input["Wt"]
doFilter= input["doFilter"]
# Default table versions (the Obit routines will do this as well)
if gainUse == 0: # Get highest numbered OTFCal table
gainUse = Obit.OTFGetHighVer(inData.me, "OTFCal")
if gainUse == 0: # Doesn't seem to be one, try OTFSoln
gainUse = Obit.OTFGetHighVer(inData.me, "OTFSoln")
if gainUse == 0: # Must not be one
gainUse = -1
if Beam:
lbeam = Beam # Actual beam given
else:
lbeam = Image.Image("NoBeam") # Not given
#print 'Image: using cal table ',gainUse
# Checks
if not PIsA(inData):
raise TypeError,'Image: Bad input OTF'
if err.isErr: # existing error?
return None
#
# Set imaging/calibration parameters
inInfo = inData.List
dim[0] = 1; dim[1] = 1
InfoList.PAlwaysPutFloat(inInfo, "RA", dim, [RA])
InfoList.PAlwaysPutFloat(inInfo, "Dec", dim, [Dec])
InfoList.PAlwaysPutInt(inInfo, "nx", dim, [nx])
InfoList.PAlwaysPutInt(inInfo, "ny", dim, [ny])
InfoList.PAlwaysPutInt(inInfo, "ConvType", dim, [ConvType])
xCells = -abs(xCells) # RA goes backwards
InfoList.PAlwaysPutFloat(inInfo, "xCells", dim, [xCells])
InfoList.PAlwaysPutFloat(inInfo, "yCells", dim, [yCells])
InfoList.PAlwaysPutFloat(inInfo, "minWt", dim, [minWt])
InfoList.PAlwaysPutFloat(inInfo, "Clip", dim, [Clip])
docal = gainUse >= 0
InfoList.PAlwaysPutBoolean(inInfo, "doCalSelect", dim, [True])
InfoList.PAlwaysPutBoolean(inInfo, "doFilter", dim, [doFilter])
dim[0] = len(ConvParm)
InfoList.PAlwaysPutFloat(inInfo, "ConvParm", dim, ConvParm)
dim[0] = 1
if docal:
dcal = 1
else:
dcal = 0
InfoList.PAlwaysPutInt(inInfo, "doCalib", dim, [dcal])
InfoList.PAlwaysPutInt(inInfo, "gainUse", dim, [gainUse])
InfoList.PAlwaysPutInt(inInfo, "flagVer", dim, [flagVer])
#
# Create output image object from OTF
outImage = Obit.OTFUtilCreateImage (inData.me, err.me)
#
# Define output
Obit.ImageSetFITS(outImage, 2, disk, outname, blc, trc, err.me)
Obit.ImagefullInstantiate (outImage, 2, err.me)
# error check
OErr.printErrMsg(err, "Image: Error verifying output file")
# Define Weight image
if Wt:
lWt = Wt # Actual weight image given
else:
# Create output Weight image?
if outwt:
# Define output
lWt = Image.Image("Gridding Weight")
Obit.ImageSetFITS(lWt.me, 2, disk, outwt, blc, trc, err.me)
Obit.ImageClone(outImage, lWt.me, err.me)
Obit.ImagefullInstantiate (lWt.me, 2, err.me)
# error check
OErr.printErrMsg(err, "Image: Error verifying weight file")
input["Wt"] = lWt # save
else:
lWt = Image.Image("NoWt") # Not given
# Form image
Obit.OTFUtilMakeImage (inData.me, outImage, doBeam, lbeam.me, lWt.me, err.me);
# error check
OErr.printErrMsg(err, "Image: Error imaging OTF")
#
# Wrap output image in a Python object
out = Image.Image(" ")
out.me = outImage
#
return out
# end makeImage
def SelfCal (err, ImageInp=ImageInput, CleanInp=None,
ResidCalInp=ResidCalInput, Soln2CalInp=Soln2CalInput, ):
""" Self calibrate an OTF
Image an OTF, optionally Clean, determine residual calibration,
apply to Soln to Cal table. If the Clean is done, then the CLEAN result is
used as the model in the ResidCal, otherwise the dirty image from Image is.
err = Python Obit Error/message stack
ImageInp = input parameter dictionary for Image
CleanInp = input parameter dictionary for Clean, "None"-> no Clean requested
May be modified to point to the result of the Image step
ResidCalInp = input parameter dictionary for ResidCal
Will be modified to give correct derived model image
Soln2CalInp = input parameter dictionary for Soln2Cal
"""
################################################################
#
if err.isErr: # existing error?
return
# Dirty Image
image = makeImage(err, ImageInp)
image.Open(1,err)
image.Read(err)
OErr.printErrMsg(err, "OTF:SelfCal: Error reading model")
model = image.FArray
modelDesc = image.Desc
image.Close(err)
OErr.printErrMsg(err, "OTF:SelfCal: Error imaging OTF")
# Clean if requested
if CleanInp != "None":
CleanObj = CleanInp["CleanOTF"] # Clean object
resid = CleanObj.Clean # Copy image just produced
Image.PCopy(image, resid, err) # to clean(resid)
#input(CleanInp)
CleanOTF.PClean (err, CleanInp) # Do Clean
OErr.printErrMsg(err, "OTF:SelfCal: Error Cleaning")
CCimage = CleanObj.Clean # Get Clean for image model
CCTab = CCimage.NewTable(1,"AIPS CC", 0, err)
# Use all
dim[0] = 1; dim[1] = 1
InfoList.PAlwaysPutInt(CCTab.List, "BComp", dim, [1]);
InfoList.PAlwaysPutInt(CCTab.List, "EComp", dim, [0]);
OErr.printErrMsg(err, "OTF:SelfCal: Error getting CCTable")
# Make model image, CC convolved with beam
model = OTFUtil.PConvBeam (CCTab, CleanObj.Beam, model, err);
OErr.printErrMsg(err, "OTF:SelfCal: Error making model")
#
# Residual calibration
ResidCalInp['Model'] = model # Set reference image array
ResidCalInp['ModelDesc'] = modelDesc # Set reference image array
#input(ResidCalInp)
ResidCal (err, ResidCalInp) # Determine calibration
OErr.printErrMsg(err, "OTF:SelfCal: Error Calibrating")
#
# Apply solution to calibration table
Soln2Cal (err, Soln2CalInp)
#
# end SelfCal
def PScratch (inOTF, err):
""" Create a scratch file suitable for accepting the data to be read from inOTF
A scratch OTF is more or less the same as a normal OTF except that it is
automatically deleted on the final unreference.
inOTF = Python OTF object
err = Python Obit Error/message stack
"""
################################################################
return inOTF.Scratch(err)
# end PScratch
def PZap (inOTF, err):
""" Delete underlying files and the basic object.
inOTF = Python OTF object
err = Python Obit Error/message stack
"""
################################################################
inOTF.Zap(err)
# end PZap
def PRename (inOTF, err, newFITSName=None):
""" Rename underlying files
inOTF = Python OTF object
err = Python Obit Error/message stack
For FITS files:
newFITSName = new name for FITS file
"""
################################################################
inOTF.Rename(err,newFITSName=newFITSName)
# end PRename
def PCopy (inOTF, outOTF, err):
""" Make a deep copy of input object.
Makes structure the same as inOTF, copies data, tables
inOTF = Python OTF object to copy
outOTF = Output Python OTF object, must be defined
err = Python Obit Error/message stack
"""
################################################################
inOTF.Copy (outOTF, err)
# end PCopy
def PClone (inOTF, outOTF, err):
""" Make a copy of a object but do not copy the actual data
This is useful to create an OTF similar to the input one.
inOTF = Python OTF object
outOTF = Output Python OTF object, must be defined
err = Python Obit Error/message stack
"""
################################################################
inOTF.Clone (outOTF, err)
# end PClone
def PConcat (inOTF, outOTF, err):
""" Copy data from inOTF to the end of outOTF
inOTF = Python OTF object
outOTF = Output Python OTF object, must be defined
err = Python Obit Error/message stack
"""
################################################################
# Checks
if not PIsA(inOTF):
raise TypeError,"inOTF MUST be a Python Obit OTF"
if not PIsA(outOTF):
raise TypeError,"outOTF MUST be a Python Obit OTF"
if not OErr.OErrIsA(err):
raise TypeError,"err MUST be an OErr"
if err.isErr: # existing error?
return
#
Obit.OTFConcat (inOTF.cast(myClass), outOTF.cast(myClass), err.me)
# end PConcat
def PNewOTFTable (inOTF, access, tabType, tabVer, err,
numDet=1, numPoly=0, numParm=0):
""" Return the specified associated table
inOTF = Python OTF object
access = access code 1=READONLY, 2=WRITEONLY, 3=READWRITE
tabType = Table type, e.g. "OTFSoln"
tabVer = table version, if > 0 on input that table returned,
if 0 on input, the highest version is used.
err = Python Obit Error/message stack
Optional parameters, values only used if table created
numDet = Number of Detectors (OTFCal, OTFSoln, OTFScanData)
numPoly = Number of polynomial terms (OTFCal, OTFSoln)
numParm = Number of model parameters (OTFModel)
"""
################################################################
return inOTF.NewTable (access, tabType, tabVer, err, \
numDet=numDet, numPoly=numPoly, numParm=numParm)
# end PNewOTFTable
def POpen (inOTF, access, err):
""" Open an image persistent (disk) form
Returns 0 on success, else failure
inOTF = Python OTF object
access = access 1=READONLY, 2=WRITEONLY, 3=READWRITE
err = Python Obit Error/message stack
"""
################################################################
return inOTF.Open (access, err)
# end POpen
def PDirty (inOTF):
""" Mark OTF as needing a header update to disk file
inOTF = Python OTF object
"""
################################################################
inOTF.Dirty()
# end PDirty
def PClose (inOTF, err):
""" Close an image persistent (disk) form
inOTF = Python OTF object
err = Python Obit Error/message stack
"""
################################################################
inOTF.Close (err)
# end PClose
def PZapTable (inOTF, tabType, tabVer, err):
""" Destroy specified table
inOTF = Python OTF object
tabType = Table type, e.g. "OTFSoln"
tabVer = table version, integer
err = Python Obit Error/message stack
"""
################################################################
inOTF.ZapTable (tabType, tabVer, err)
# end PZapTable
def PCopyTables (inOTF, outOTF, exclude, include, err):
""" Copy Tabels from one image to another
inOTF = Python OTF object
outOTF = Output Python OTF object, must be defined
exclude = list of table types to exclude (list of strings)
has priority
include = list of table types to include (list of strings)
err = Python Obit Error/message stack
"""
################################################################
inOTF.CopyTables (outOTF, exclude, include, err)
# end PCopyTables
def PUpdateTables (inOTF, err):
""" Update any disk resident structures about the current tables
inOTF = Python OTF object
err = Python Obit Error/message stack
"""
################################################################
inOTF.UpdateTables (err)
# end PUpdateTables
def PFullInstantiate (inOTF, access, err):
""" Fully instantiate an OTF by opening and closing
return 0 on success, else failure
inOTF = Python OTF object
access = access code 1=READONLY, 2=WRITEONLY, 3=READWRITE
err = Python Obit Error/message stack
"""
################################################################
# Checks
if not PIsA(inOTF):
raise TypeError,"inOTF MUST be a Python Obit OTF"
if not OErr.OErrIsA(err):
raise TypeError,"err MUST be an OErr"
if err.isErr: # existing error?
return None
#
return Obit.OTFfullInstantiate (inOTF.cast(myClass), access, err.me)
# end PFullInstantiate
def PSetTarget (inOTF, Target, Flux, RA, Dec, err):
""" Set target flux density and position
inOTF = Python OTF object
Target = Target name
Flux = Target Flux density
RA = RA in deg at mean equinox and epoch
Dec = Dec in deg at mean equinox and epoch
err = Python Obit Error/message stack
"""
################################################################
# Checks
if not PIsA(inOTF):
raise TypeError,"inOTF MUST be a Python Obit OTF"
if not OErr.OErrIsA(err):
raise TypeError,"err MUST be an OErr"
if err.isErr: # existing error?
return
#
return Obit.OTFSetTarget (inOTF.cast(myClass), Target, Flux, RA, Dec, err.me)
# end PSetTarget
def PGetList (inOTF):
""" Return the member InfoList
returns InfoList
inOTF = Python OTF object
"""
################################################################
return inOTF.List
# end PGetList
def PGetTableList (inOTF):
""" Return the member tableList
returns tableList
inOTF = Python OTF object
"""
################################################################
return inOTF.TableList
# end PGetTableList
def PHeader (inOTF, err):
""" Print data descriptor
inOTF = Python Obit OTF object
err = Python Obit Error/message stack
"""
################################################################
# Checks
if not PIsA(inOTF):
raise TypeError,"inOTF MUST be a Python Obit OTF data"
#
# Fully instantiate
PFullInstantiate (inOTF, READONLY, err)
# File info
if inOTF.FileType=="AIPS":
print "AIPS OTF Data Name: %12s Class: %6s seq: %d8 disk: %4d" % \
(inOTF.Aname, inOTF.Aclass, inOTF.Aseq, inOTF.Disk)
elif inOTF.FileType=="FITS":
print "FITS OTF Data Disk: %5d File Name: %s " % \
(inOTF.Disk, inOTF.FileName)
# print in OTFDesc
OTFDesc.PHeader(inOTF.Desc)
# Tables
TL = inOTF.TableList
Tlist = TableList.PGetList(TL, err)
Tdict = {}
# Once to find everything
for item in Tlist:
Tdict[item[1]] = item[0]
# Again to get Max
for item in Tlist:
count = max (Tdict[item[1]], item[0])
Tdict[item[1]] = count
for item,count in Tdict.items():
print "Maximum version number of %s tables is %d " % \
(item, count)
# end PHeader
def PGetDesc (inOTF):
""" Return the member OTFDesc
returns OTFDesc as a Python Dictionary
inOTF = Python OTF object
"""
################################################################
return inOTD.Desc
# end PGetDesc
def PGetArrayGeom (inOTF):
""" Return the member OTFArrayGeom
returns OTFArrayGeom as a Python Dictionary
inOTF = Python OTF object
"""
################################################################
return inOTD.ArrayGeom
# end PGetArrayGeom
def PUpdateDesc (inOTF, err, Desc=None):
""" Update external representation of descriptor
inOTF = Python OTF object
err = Python Obit Error/message stack
Desc = OTF descriptor, if None then use current descriptor
"""
################################################################
inOTF.UpdateDesc (err, Desc=Desc)
# end PUpdateDesc
def POTFInfo (inOTF, err):
""" Get file info for extant OTF data object
Fills in information on object, useful for scratch files
inOTF = Python OTF object
err = Python Obit Error/message stack
"""
################################################################
# Checks
if not PIsA(inOTF):
raise TypeError,"inOTF MUST be a Python Obit OTF"
if not OErr.OErrIsA(err):
raise TypeError,"err MUST be an OErr"
#
# file info
info = Obit.OTFInfo (inOTF.cast(myClass), err.me);
if err.isErr:
OErr.printErrMsg(err, "Error creating scratch file")
if info["type"]=="FITS":
inOTF.FileType = 'FITS'
inOTF.Disk = info["disk"]
inOTF.Otype = "OTF"
inOTF.FileName = info["filename"]
inOTF.Fname = info["filename"]
# end POTFInfo
def PGetRecBuf (inOTF):
return inOTF.RecBuf
def PGetHighVer (inOTF, tabType):
""" Get highest version number of a specified Table
returns highest tabType version number, 0 if none.
inOTF = Python OTF object
tabType = Table type, e.g. "OTFSoln"
"""
################################################################
return inOTF.GetHighVer (tabType)
# end PGetHighVer
def PIsScratch (inOTF):
""" Tells if OTF is a scratch object
return true, false (1,0)
inOTF = Python OTF object
"""
################################################################
return inOTF.IsScratch ()
# end PIsScratch
def PIsA (inOTF):
""" Tells if input really a Python Obit OTF
return true, false (1,0)
inOTF = Python OTF object
"""
################################################################
return Obit.OTFIsA(inOTF.cast(myClass))
# end PIsA
def PGetName (inOTF):
""" Tells OTF object name (label)
returns name as character string
inOTF = Python OTF object
"""
################################################################
return inOTF.GetName()
# end PGetName
|
bill-cottonREPO_NAMEObitPATH_START.@Obit_extracted@Obit-master@ObitSystem@ObitSD@python@OTF.py@.PATH_END.py
|
{
"filename": "_tickformat.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scatterpolar/marker/colorbar/_tickformat.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickformatValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="tickformat",
parent_name="scatterpolar.marker.colorbar",
**kwargs
):
super(TickformatValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scatterpolar@marker@colorbar@_tickformat.py@.PATH_END.py
|
{
"filename": "conftest.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/portalocker/py2/tests/conftest.py",
"type": "Python"
}
|
import py
import pytest
@pytest.fixture
def tmpfile(tmpdir_factory):
tmpdir = tmpdir_factory.mktemp('temp')
filename = tmpdir.join('tmpfile')
yield str(filename)
try:
filename.remove(ignore_errors=True)
except (py.error.EBUSY, py.error.ENOENT):
pass
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@portalocker@py2@tests@conftest.py@.PATH_END.py
|
{
"filename": "_hoverlabel.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/box/_hoverlabel.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class HoverlabelValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="hoverlabel", parent_name="box", **kwargs):
super(HoverlabelValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Hoverlabel"),
data_docs=kwargs.pop(
"data_docs",
"""
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more two or
more lines
alignsrc
Sets the source reference on Chart Studio Cloud
for `align`.
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud
for `bgcolor`.
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud
for `bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud
for `namelength`.
""",
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@box@_hoverlabel.py@.PATH_END.py
|
{
"filename": "_legend.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/waterfall/_legend.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LegendValidator(_plotly_utils.basevalidators.SubplotidValidator):
def __init__(self, plotly_name="legend", parent_name="waterfall", **kwargs):
super(LegendValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
dflt=kwargs.pop("dflt", "legend"),
edit_type=kwargs.pop("edit_type", "style"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@waterfall@_legend.py@.PATH_END.py
|
{
"filename": "synthetic_obs.py",
"repo_name": "antoinemarchal/ROHSA",
"repo_path": "ROHSA_extracted/ROHSA-master/publication/SIMU/synthetic_obs.py",
"type": "Python"
}
|
#!/home/amarchal/py2env/bin/python
'''This program build synthetic obs (21cm line) from T,n and vz which are the three-dimensional
field of the numerical simulation based on the work of Saury et al. 2014'''
import numpy as np
from glob import glob
from tqdm import tqdm
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy import units
from astropy import constants as const
from scipy import ndimage
import scipy.integrate as integrate
import FITS_tools
plt.ion()
plot = False
cm = plt.get_cmap('viridis')
cm.set_bad(color='black')
imkw = dict(origin='lower', interpolation='none', cmap=cm)
def I_Tb(params):
(u, vz, n_Delta, T, C, Delta2, dz) = params
dI = n_Delta * np.exp(- (u - (vz))**2 / (2.*Delta2))
dI[np.where(dI != dI)] = 0.
I = 1./(C * np.sqrt(2.*np.pi)) * integrate.simps(dI, dx=dz, axis=0)
return I
# Constant
m_h = 1.6737236e-27 #kg
C = 1.82243e18 #K-1cm-2 / (km.s-1)
pc2cm = units.pc.to(units.m) * 1.e2
box_size = 40. # pc
resolution = 1024.
dz = (box_size / resolution) * pc2cm
# Open data
path_simu = '/data/amarchal/ROHSA_paper/data/Saury2014/'
path_out = '/data/amarchal/ROHSA_paper/data/synthetic_obs/'
hdu_list_rho = fits.open(path_simu + 'rho_016_subgrid_256.fits')
hdu_list_T = fits.open(path_simu + 'T_016_subgrid_256.fits')
hdu_list_vz = fits.open(path_simu + 'vz_016_subgrid_256.fits')
reso = 0.8 #km.s-1
rho_cube = hdu_list_rho[0].data #g.cm-3
T_cube = hdu_list_T[0].data
vz_cube = hdu_list_vz[0].data * 1.e-5 #km.s-1 ATTENTION
## CUT TEMPERATURE
Tk_lim_inf = 0
Tk_lim_sup = np.inf
idx_phase = np.where((T_cube > Tk_lim_inf) & (T_cube < Tk_lim_sup))
rho_cube_phase = np.zeros((rho_cube.shape[0], rho_cube.shape[1], rho_cube.shape[2]))
T_cube_phase = np.zeros((rho_cube.shape[0], rho_cube.shape[1], rho_cube.shape[2]))
vz_cube_phase = np.zeros((rho_cube.shape[0], rho_cube.shape[1], rho_cube.shape[2]))
rho_cube_phase[idx_phase] = rho_cube[idx_phase]
T_cube_phase[idx_phase] = T_cube[idx_phase]
vz_cube_phase[idx_phase] = vz_cube[idx_phase]
##
# Preliminary calculation
Delta2 = ((const.k_B.value * T_cube_phase / m_h)) * 1.e-6 #km.s-1
n = rho_cube_phase/(m_h*1.e3)
n_Delta = n / np.sqrt(Delta2)
# Spectral range
u = np.arange(-40,40+reso, reso)
map_u = np.zeros((len(u), T_cube_phase.shape[1], T_cube_phase.shape[2]))
for i in np.arange(T_cube_phase.shape[1]):
for j in np.arange(T_cube_phase.shape[2]):
map_u[:,i,j] = u
Tb = np.zeros((len(u), T_cube_phase.shape[1], T_cube_phase.shape[2]))
Tb_thin = np.zeros((len(u), T_cube_phase.shape[1], T_cube_phase.shape[2]))
tau_in_front = np.zeros((len(u), T_cube_phase.shape[1], T_cube_phase.shape[2]))
for i in tqdm(range(T_cube_phase.shape[0])):
Tb_z = np.zeros((len(u), T_cube_phase.shape[1], T_cube_phase.shape[2]))
tau_z = 1. / (C * np.sqrt(2.*np.pi)) * n_Delta[i] / T_cube_phase[i] * np.exp(- (map_u - (vz_cube_phase[i]))**2 / (2.*Delta2[i])) * dz
idx_nonzero = ~np.isnan(tau_z[0])
Tb_z[:,idx_nonzero] = T_cube_phase[i,idx_nonzero] * (1. - np.exp(-1.*tau_z[:,idx_nonzero])) * np.exp(-1.*tau_in_front[:,idx_nonzero])
tau_in_front[:,idx_nonzero] += tau_z[:,idx_nonzero]
Tb += Tb_z
Tb_thin[:,idx_nonzero] += tau_z[:,idx_nonzero] * T_cube_phase[i,idx_nonzero]
# Tb_thin_fast = np.zeros((len(u), T_cube_phase.shape[1], T_cube_phase.shape[2]))
# for i in tqdm(range(len(u))):
# Tb_thin_fast[i] = I_Tb((u[i], vz_cube_phase, n_Delta, T_cube_phase, C, Delta2, dz))
fileout = 'Tb_reso_' + str(reso) + 'km.s-1_' + "Tmin_" + str(Tk_lim_inf) + "_Tmax_" + str(Tk_lim_sup) + '_ROHSA.fits'
fileout_thin = 'Tb_reso_' + str(reso) + 'km.s-1_' + "Tmin_" + str(Tk_lim_inf) + "_Tmax_" + str(Tk_lim_sup) + '_ROHSA_thin.fits'
# Write PPV cube
hdu0 = fits.PrimaryHDU(Tb)
hdu0.header['COMMENT'] = 'Brightness Temperature Tb'
hdu0.header['NAXIS'] = 3
hdu0.header['NAXIS1'] = Tb.shape[1]
hdu0.header['NAXIS2'] = Tb.shape[2]
hdu0.header['NAXIS3'] = len(u)
hdu0.header['CTYPE3'] = 'v [km.s-1]'
hdu0.header['CRVAL3'] = u[40]
hdu0.header['CDELT3'] = reso
hdu0.header['CRPIX3'] = 40
hdu0.header['BUNIT'] = 'K'
hdulist = fits.HDUList([hdu0])
hdulist.writeto(path_out + fileout, overwrite=True)
# Write PPV cube thin limit
hdu0 = fits.PrimaryHDU(Tb_thin)
hdu0.header['COMMENT'] = 'Brightness Temperature Tb'
hdu0.header['NAXIS'] = 3
hdu0.header['NAXIS1'] = Tb_thin.shape[1]
hdu0.header['NAXIS2'] = Tb_thin.shape[2]
hdu0.header['NAXIS3'] = len(u)
hdu0.header['CTYPE3'] = 'v [km.s-1]'
hdu0.header['CRVAL3'] = u[40]
hdu0.header['CDELT3'] = reso
hdu0.header['CRPIX3'] = 40
hdu0.header['BUNIT'] = 'K'
hdulist = fits.HDUList([hdu0])
hdulist.writeto(path_out + fileout_thin, overwrite=True)
|
antoinemarchalREPO_NAMEROHSAPATH_START.@ROHSA_extracted@ROHSA-master@publication@SIMU@synthetic_obs.py@.PATH_END.py
|
{
"filename": "dashboard_objs.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/chart-studio/chart_studio/dashboard_objs/dashboard_objs.py",
"type": "Python"
}
|
"""
dashboard_objs
==========
A module for creating and manipulating dashboard content. You can create
a Dashboard object, insert boxes, swap boxes, remove a box and get an HTML
preview of the Dashboard.
```
"""
import pprint
import _plotly_utils.exceptions
from _plotly_utils import optional_imports
from chart_studio import exceptions
IPython = optional_imports.get_module("IPython")
# default parameters for HTML preview
MASTER_WIDTH = 500
MASTER_HEIGHT = 500
FONT_SIZE = 9
ID_NOT_VALID_MESSAGE = (
"Your box_id must be a number in your dashboard. To view a "
"representation of your dashboard run get_preview()."
)
def _empty_box():
empty_box = {"type": "box", "boxType": "empty"}
return empty_box
def _box(fileId="", shareKey=None, title=""):
box = {
"type": "box",
"boxType": "plot",
"fileId": fileId,
"shareKey": shareKey,
"title": title,
}
return box
def _container(box_1=None, box_2=None, size=50, sizeUnit="%", direction="vertical"):
if box_1 is None:
box_1 = _empty_box()
if box_2 is None:
box_2 = _empty_box()
container = {
"type": "split",
"size": size,
"sizeUnit": sizeUnit,
"direction": direction,
"first": box_1,
"second": box_2,
}
return container
dashboard_html = """
<!DOCTYPE HTML>
<html>
<head>
<style>
body {{
margin: 0px;
padding: 0px;
}}
</style>
</head>
<body>
<canvas id="myCanvas" width="{width}" height="{height}"></canvas>
<script>
var canvas = document.getElementById('myCanvas');
var context = canvas.getContext('2d');
<!-- Dashboard -->
context.beginPath();
context.rect(0, 0, {width}, {height});
context.lineWidth = 2;
context.strokeStyle = 'black';
context.stroke();
</script>
</body>
</html>
""".format(
width=MASTER_WIDTH, height=MASTER_HEIGHT
)
def _draw_line_through_box(
dashboard_html,
top_left_x,
top_left_y,
box_w,
box_h,
is_horizontal,
direction,
fill_percent=50,
):
if is_horizontal:
new_top_left_x = top_left_x + box_w * (fill_percent / 100.0)
new_top_left_y = top_left_y
new_box_w = 1
new_box_h = box_h
else:
new_top_left_x = top_left_x
new_top_left_y = top_left_y + box_h * (fill_percent / 100.0)
new_box_w = box_w
new_box_h = 1
html_box = """<!-- Draw some lines in -->
context.beginPath();
context.rect({top_left_x}, {top_left_y}, {box_w}, {box_h});
context.lineWidth = 1;
context.strokeStyle = 'black';
context.stroke();
""".format(
top_left_x=new_top_left_x,
top_left_y=new_top_left_y,
box_w=new_box_w,
box_h=new_box_h,
)
index_for_new_box = dashboard_html.find("</script>") - 1
dashboard_html = (
dashboard_html[:index_for_new_box]
+ html_box
+ dashboard_html[index_for_new_box:]
)
return dashboard_html
def _add_html_text(dashboard_html, text, top_left_x, top_left_y, box_w, box_h):
html_text = """<!-- Insert box numbers -->
context.font = '{}pt Times New Roman';
context.textAlign = 'center';
context.fillText({}, {} + 0.5*{}, {} + 0.5*{});
""".format(
FONT_SIZE, text, top_left_x, box_w, top_left_y, box_h
)
index_to_add_text = dashboard_html.find("</script>") - 1
dashboard_html = (
dashboard_html[:index_to_add_text]
+ html_text
+ dashboard_html[index_to_add_text:]
)
return dashboard_html
class Dashboard(dict):
"""
Dashboard class for creating interactive dashboard objects.
Dashboards are dicts that contain boxes which hold plot information.
These boxes can be arranged in various ways. The most basic form of
a box is:
```
{
'type': 'box',
'boxType': 'plot'
}
```
where 'fileId' can be set to the 'username:#' of your plot. The other
parameters a box takes are `shareKey` (default is None) and `title`
(default is '').
`.get_preview()` should be called quite regularly to get an HTML
representation of the dashboard in which the boxes in the HTML
are labelled with on-the-fly-generated numbers or box ids which
change after each modification to the dashboard.
`.get_box()` returns the box located in the dashboard by calling
its box id as displayed via `.get_preview()`.
Example 1: Create a simple Dashboard object
```
import plotly.dashboard_objs as dashboard
box_a = {
'type': 'box',
'boxType': 'plot',
'fileId': 'username:some#',
'title': 'box a'
}
box_b = {
'type': 'box',
'boxType': 'plot',
'fileId': 'username:some#',
'title': 'box b'
}
box_c = {
'type': 'box',
'boxType': 'plot',
'fileId': 'username:some#',
'title': 'box c'
}
my_dboard = dashboard.Dashboard()
my_dboard.insert(box_a)
# my_dboard.get_preview()
my_dboard.insert(box_b, 'above', 1)
# my_dboard.get_preview()
my_dboard.insert(box_c, 'left', 2)
# my_dboard.get_preview()
my_dboard.swap(1, 2)
# my_dboard.get_preview()
my_dboard.remove(1)
# my_dboard.get_preview()
```
Example 2: 4 vertical boxes of equal height
```
import plotly.dashboard_objs as dashboard
box_a = {
'type': 'box',
'boxType': 'plot',
'fileId': 'username:some#',
'title': 'box a'
}
my_dboard = dashboard.Dashboard()
my_dboard.insert(box_a)
my_dboard.insert(box_a, 'below', 1)
my_dboard.insert(box_a, 'below', 1)
my_dboard.insert(box_a, 'below', 3)
# my_dboard.get_preview()
```
"""
def __init__(self, content=None):
if content is None:
content = {}
if not content:
self["layout"] = None
self["version"] = 2
self["settings"] = {}
else:
self["layout"] = content["layout"]
self["version"] = content["version"]
self["settings"] = content["settings"]
def _compute_box_ids(self):
from plotly.utils import node_generator
box_ids_to_path = {}
all_nodes = list(node_generator(self["layout"]))
all_nodes.sort(key=lambda x: x[1])
for node in all_nodes:
if (
node[1] != ()
and node[0]["type"] == "box"
and node[0]["boxType"] != "empty"
):
try:
max_id = max(box_ids_to_path.keys())
except ValueError:
max_id = 0
box_ids_to_path[max_id + 1] = node[1]
return box_ids_to_path
def _insert(self, box_or_container, path):
if any(first_second not in ["first", "second"] for first_second in path):
raise _plotly_utils.exceptions.PlotlyError(
"Invalid path. Your 'path' list must only contain "
"the strings 'first' and 'second'."
)
if "first" in self["layout"]:
loc_in_dashboard = self["layout"]
for index, first_second in enumerate(path):
if index != len(path) - 1:
loc_in_dashboard = loc_in_dashboard[first_second]
else:
loc_in_dashboard[first_second] = box_or_container
else:
self["layout"] = box_or_container
def _make_all_nodes_and_paths(self):
from plotly.utils import node_generator
all_nodes = list(node_generator(self["layout"]))
all_nodes.sort(key=lambda x: x[1])
# remove path 'second' as it's always an empty box
all_paths = []
for node in all_nodes:
all_paths.append(node[1])
path_second = ("second",)
if path_second in all_paths:
all_paths.remove(path_second)
return all_nodes, all_paths
def _path_to_box(self, path):
loc_in_dashboard = self["layout"]
for first_second in path:
loc_in_dashboard = loc_in_dashboard[first_second]
return loc_in_dashboard
def _set_dashboard_size(self):
# set dashboard size to keep consistent with GUI
num_of_boxes = len(self._compute_box_ids())
if num_of_boxes == 0:
pass
elif num_of_boxes == 1:
self["layout"]["size"] = 800
self["layout"]["sizeUnit"] = "px"
elif num_of_boxes == 2:
self["layout"]["size"] = 1500
self["layout"]["sizeUnit"] = "px"
else:
self["layout"]["size"] = 1500 + 350 * (num_of_boxes - 2)
self["layout"]["sizeUnit"] = "px"
def get_box(self, box_id):
"""Returns box from box_id number."""
box_ids_to_path = self._compute_box_ids()
loc_in_dashboard = self["layout"]
if box_id not in box_ids_to_path.keys():
raise _plotly_utils.exceptions.PlotlyError(ID_NOT_VALID_MESSAGE)
for first_second in box_ids_to_path[box_id]:
loc_in_dashboard = loc_in_dashboard[first_second]
return loc_in_dashboard
def get_preview(self):
"""
Returns JSON or HTML respresentation of the dashboard.
If IPython is not imported, returns a pretty print of the dashboard
dict. Otherwise, returns an IPython.core.display.HTML display of the
dashboard.
The algorithm used to build the HTML preview involves going through
the paths of the node generator of the dashboard. The paths of the
dashboard are sequenced through from shorter to longer and whether
it's a box or container that lies at the end of the path determines
the action.
If it's a container, draw a line in the figure to divide the current
box into two and store the specs of the resulting two boxes. If the
path points to a terminal box (often containing a plot), then draw
the box id in the center of the box.
It's important to note that these box ids are generated on-the-fly and
they do not necessarily stay assigned to the boxes they were once
assigned to.
"""
if IPython is None:
pprint.pprint(self)
return
elif self["layout"] is None:
return IPython.display.HTML(dashboard_html)
top_left_x = 0
top_left_y = 0
box_w = MASTER_WIDTH
box_h = MASTER_HEIGHT
html_figure = dashboard_html
box_ids_to_path = self._compute_box_ids()
# used to store info about box dimensions
path_to_box_specs = {}
first_box_specs = {
"top_left_x": top_left_x,
"top_left_y": top_left_y,
"box_w": box_w,
"box_h": box_h,
}
# uses tuples to store paths as for hashable keys
path_to_box_specs[("first",)] = first_box_specs
# generate all paths
all_nodes, all_paths = self._make_all_nodes_and_paths()
max_path_len = max(len(path) for path in all_paths)
for path_len in range(1, max_path_len + 1):
for path in [path for path in all_paths if len(path) == path_len]:
current_box_specs = path_to_box_specs[path]
if self._path_to_box(path)["type"] == "split":
fill_percent = self._path_to_box(path)["size"]
direction = self._path_to_box(path)["direction"]
is_horizontal = direction == "horizontal"
top_left_x = current_box_specs["top_left_x"]
top_left_y = current_box_specs["top_left_y"]
box_w = current_box_specs["box_w"]
box_h = current_box_specs["box_h"]
html_figure = _draw_line_through_box(
html_figure,
top_left_x,
top_left_y,
box_w,
box_h,
is_horizontal=is_horizontal,
direction=direction,
fill_percent=fill_percent,
)
# determine the specs for resulting two box split
if is_horizontal:
new_top_left_x = top_left_x
new_top_left_y = top_left_y
new_box_w = box_w * (fill_percent / 100.0)
new_box_h = box_h
new_top_left_x_2 = top_left_x + new_box_w
new_top_left_y_2 = top_left_y
new_box_w_2 = box_w * ((100 - fill_percent) / 100.0)
new_box_h_2 = box_h
else:
new_top_left_x = top_left_x
new_top_left_y = top_left_y
new_box_w = box_w
new_box_h = box_h * (fill_percent / 100.0)
new_top_left_x_2 = top_left_x
new_top_left_y_2 = top_left_y + box_h * (fill_percent / 100.0)
new_box_w_2 = box_w
new_box_h_2 = box_h * ((100 - fill_percent) / 100.0)
first_box_specs = {
"top_left_x": top_left_x,
"top_left_y": top_left_y,
"box_w": new_box_w,
"box_h": new_box_h,
}
second_box_specs = {
"top_left_x": new_top_left_x_2,
"top_left_y": new_top_left_y_2,
"box_w": new_box_w_2,
"box_h": new_box_h_2,
}
path_to_box_specs[path + ("first",)] = first_box_specs
path_to_box_specs[path + ("second",)] = second_box_specs
elif self._path_to_box(path)["type"] == "box":
for box_id in box_ids_to_path:
if box_ids_to_path[box_id] == path:
number = box_id
html_figure = _add_html_text(
html_figure,
number,
path_to_box_specs[path]["top_left_x"],
path_to_box_specs[path]["top_left_y"],
path_to_box_specs[path]["box_w"],
path_to_box_specs[path]["box_h"],
)
# display HTML representation
return IPython.display.HTML(html_figure)
def insert(self, box, side="above", box_id=None, fill_percent=50):
"""
Insert a box into your dashboard layout.
:param (dict) box: the box you are inserting into the dashboard.
:param (str) side: specifies where your new box is going to be placed
relative to the given 'box_id'. Valid values are 'above', 'below',
'left', and 'right'.
:param (int) box_id: the box id which is used as a reference for the
insertion of the new box. Box ids are memoryless numbers that are
generated on-the-fly and assigned to boxes in the layout each time
.get_preview() is run.
:param (float) fill_percent: specifies the percentage of the container
box from the given 'side' that the new box occupies. For example
if you apply the method\n
.insert(box=new_box, box_id=2, side='left', fill_percent=20)\n
to a dashboard object, a new box is inserted 20% from the left
side of the box with id #2. Run .get_preview() to see the box ids
assigned to each box in the dashboard layout.
Default = 50
Example:
```
import plotly.dashboard_objs as dashboard
box_a = {
'type': 'box',
'boxType': 'plot',
'fileId': 'username:some#',
'title': 'box a'
}
my_dboard = dashboard.Dashboard()
my_dboard.insert(box_a)
my_dboard.insert(box_a, 'left', 1)
my_dboard.insert(box_a, 'below', 2)
my_dboard.insert(box_a, 'right', 3)
my_dboard.insert(box_a, 'above', 4, fill_percent=20)
my_dboard.get_preview()
```
"""
box_ids_to_path = self._compute_box_ids()
# doesn't need box_id or side specified for first box
if self["layout"] is None:
self["layout"] = _container(
box, _empty_box(), size=MASTER_HEIGHT, sizeUnit="px"
)
else:
if box_id is None:
raise _plotly_utils.exceptions.PlotlyError(
"Make sure the box_id is specfied if there is at least "
"one box in your dashboard."
)
if box_id not in box_ids_to_path:
raise _plotly_utils.exceptions.PlotlyError(ID_NOT_VALID_MESSAGE)
if fill_percent < 0 or fill_percent > 100:
raise _plotly_utils.exceptions.PlotlyError(
"fill_percent must be a number between 0 and 100 " "inclusive"
)
if side == "above":
old_box = self.get_box(box_id)
self._insert(
_container(box, old_box, direction="vertical", size=fill_percent),
box_ids_to_path[box_id],
)
elif side == "below":
old_box = self.get_box(box_id)
self._insert(
_container(
old_box, box, direction="vertical", size=100 - fill_percent
),
box_ids_to_path[box_id],
)
elif side == "left":
old_box = self.get_box(box_id)
self._insert(
_container(box, old_box, direction="horizontal", size=fill_percent),
box_ids_to_path[box_id],
)
elif side == "right":
old_box = self.get_box(box_id)
self._insert(
_container(
old_box, box, direction="horizontal", size=100 - fill_percent
),
box_ids_to_path[box_id],
)
else:
raise _plotly_utils.exceptions.PlotlyError(
"If there is at least one box in your dashboard, you "
"must specify a valid side value. You must choose from "
"'above', 'below', 'left', and 'right'."
)
self._set_dashboard_size()
def remove(self, box_id):
"""
Remove a box from the dashboard by its box_id.
Example:
```
import plotly.dashboard_objs as dashboard
box_a = {
'type': 'box',
'boxType': 'plot',
'fileId': 'username:some#',
'title': 'box a'
}
my_dboard = dashboard.Dashboard()
my_dboard.insert(box_a)
my_dboard.remove(1)
my_dboard.get_preview()
```
"""
box_ids_to_path = self._compute_box_ids()
if box_id not in box_ids_to_path:
raise _plotly_utils.exceptions.PlotlyError(ID_NOT_VALID_MESSAGE)
path = box_ids_to_path[box_id]
if path != ("first",):
container_for_box_id = self._path_to_box(path[:-1])
if path[-1] == "first":
adjacent_path = "second"
elif path[-1] == "second":
adjacent_path = "first"
adjacent_box = container_for_box_id[adjacent_path]
self._insert(adjacent_box, path[:-1])
else:
self["layout"] = None
self._set_dashboard_size()
def swap(self, box_id_1, box_id_2):
"""
Swap two boxes with their specified ids.
Example:
```
import plotly.dashboard_objs as dashboard
box_a = {
'type': 'box',
'boxType': 'plot',
'fileId': 'username:first#',
'title': 'box a'
}
box_b = {
'type': 'box',
'boxType': 'plot',
'fileId': 'username:second#',
'title': 'box b'
}
my_dboard = dashboard.Dashboard()
my_dboard.insert(box_a)
my_dboard.insert(box_b, 'above', 1)
# check box at box id 1
box_at_1 = my_dboard.get_box(1)
print(box_at_1)
my_dboard.swap(1, 2)
box_after_swap = my_dboard.get_box(1)
print(box_after_swap)
```
"""
box_ids_to_path = self._compute_box_ids()
box_a = self.get_box(box_id_1)
box_b = self.get_box(box_id_2)
box_a_path = box_ids_to_path[box_id_1]
box_b_path = box_ids_to_path[box_id_2]
for pairs in [(box_a_path, box_b), (box_b_path, box_a)]:
loc_in_dashboard = self["layout"]
for first_second in pairs[0][:-1]:
loc_in_dashboard = loc_in_dashboard[first_second]
loc_in_dashboard[pairs[0][-1]] = pairs[1]
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@chart-studio@chart_studio@dashboard_objs@dashboard_objs.py@.PATH_END.py
|
{
"filename": "flip_frequencies.py",
"repo_name": "JordanHoffmann3/RedisperseFRB",
"repo_path": "RedisperseFRB_extracted/RedisperseFRB-main/src/flip_frequencies.py",
"type": "Python"
}
|
"""
Program: incoherent_dedispersion.py
Author: Jordan Hoffmann
Date: 07/03/2023
Purpose: Incoherently dedispersed an FRB and obtains the SNR for it
Last update: Initial creation
Inputs:
frb = FRB ID number
DM = Dispersion measure to redisperse to
f_low = Lowest observational frequency (MHz)
f_high = Highest observational frequency (MHz)
int_t = Integration time (ms)
Note: One input file is expected
../outputs/<frb>_DM_<DM>.fil
Outputs to Dispersed_<frb>/incoherent_dedispersion/<frb>_incoherent_SNR.txt
"""
import numpy as np
import sigproc
import logging
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
def main():
# Parse command line parameters
parser = ArgumentParser(description='Script description', formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', help='Be verbose')
parser.add_argument(dest='filename', help='Filterbank file name', type=str)
parser.add_argument(dest='outfile', help='Outpute file name', type=str)
parser.set_defaults(verbose=False, nxy="1,1")
values = parser.parse_args()
if values.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
f = sigproc.SigprocFile(values.filename)
# Reverse header
header = f.header
print(str(header['foff']))
header['foff'] = -header['foff']
header['fch1'] = header['fch1'] + (header['nchans'] - 1) * header['foff']
# Reverse frequency ordering in data
f.seek_data()
v = np.fromfile(f.fin, dtype=np.uint8, count=-1)
v.shape = (-1, f.nchans)
v = np.fliplr(v)
# Write to new filterbank file
f_out = sigproc.SigprocFile(values.outfile,'wb',header)
f_out.seek_data()
v.tofile(f_out.fin)
f.fin.flush()
f.fin.close()
f_out.fin.flush()
f_out.fin.close()
main()
|
JordanHoffmann3REPO_NAMERedisperseFRBPATH_START.@RedisperseFRB_extracted@RedisperseFRB-main@src@flip_frequencies.py@.PATH_END.py
|
{
"filename": "init_testrunner.py",
"repo_name": "bwinkel/pycraf",
"repo_path": "pycraf_extracted/pycraf-master/pycraf/init_testrunner.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
try:
# Create the test function for self test
from astropy.tests.runner import TestRunner
test = TestRunner.make_test_runner_in(os.path.dirname(__file__))
except ImportError:
def test():
import warnings
warnings.warn(
'Package "astropy" is needed for using the "test()" function'
)
test.__test__ = False
__all__ = ['test']
|
bwinkelREPO_NAMEpycrafPATH_START.@pycraf_extracted@pycraf-master@pycraf@init_testrunner.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "njcuk9999/apero-drs",
"repo_path": "apero-drs_extracted/apero-drs-main/apero/tools/recipes/bin/README.md",
"type": "Markdown"
}
|
# General tools
General tools for use by the user should be placed here.
|
njcuk9999REPO_NAMEapero-drsPATH_START.@apero-drs_extracted@apero-drs-main@apero@tools@recipes@bin@README.md@.PATH_END.py
|
{
"filename": "helpers.py",
"repo_name": "smfactor/Argus",
"repo_path": "Argus_extracted/Argus-main/helpers.py",
"type": "Python"
}
|
import numpy as np
def get8(im,i,j):
if i==255 and j==255:
a = np.array([im[i-1,j-1],im[i-1,j],im[i,j-1]])
elif i==255 and j==0:
a = np.array([im[i-1,j+1],im[i-1,j],im[i,j+1]])
elif i==0 and j==255:
a = np.array([im[i+1,j-1],im[i+1,j],im[i,j-1]])
elif i==255:
a = np.array([im[i-1,j-1],im[i-1,j],im[i-1,j+1],im[i,j+1],im[i,j-1]])
elif j==255:
a = np.array([im[i-1,j-1],im[i-1,j],im[i,j-1],im[i+1,j-1],im[i+1,j]])
elif i==0 and j==0:
a = np.array([im[i,j+1],im[i+1,j],im[i+1,j+1]])
elif i==0:
a = np.array([im[i,j+1],im[i,j-1],im[i+1,j-1],im[i+1,j],im[i+1,j+1]])
elif j==0:
a = np.array([im[i-1,j],im[i-1,j+1],im[i,j+1],im[i+1,j],im[i+1,j+1]])
else:
a = np.array([im[i-1,j-1],im[i-1,j],im[i-1,j+1],im[i,j+1],im[i,j-1],im[i+1,j-1],im[i+1,j],im[i+1,j+1]])
return a
def fixim(im,mask):
#replace bad pix with nan
imnan=np.copy(im)
imnan[np.where(mask)]=np.nan
fixedim = np.copy(imnan)
while np.isnan(fixedim).any():
i,j=np.where(np.isnan(fixedim))
for n in range(np.size(i)):
samp=get8(imnan,i[n],j[n])
fixedim[i[n],j[n]]=np.nanmedian(samp)
imnan=np.copy(fixedim)
return fixedim
def findStar(im, xg, yg,r):
'''
Finds the flux weighted centroid of im in a circle centered at (xg, yg)
with radius r.
:param im:
image array. convention is im[y,x].
:param (xg, yg):
initial guess for the center of the flux peak.
:param r:
radius to find the flux weighted centroid within (around (xg, yg)).
'''
ysize,xsize=np.shape(im)
x, y, tf = 0, 0, 0
#sum flux and flux weighted coordinates
for i in range(-r,r+1):
for j in range(-r,r+1):
if (np.sqrt((i)**2+(j)**2)<=r):
ygg = (yg+j)%ysize
xgg = (xg+i)%xsize
f = im[ygg,xgg]
tf += f
x += f*i
y += f*j
#normalize by total flux
x = x/tf
y = y/tf
return (x+xg, y+yg)
def readMNAout(path,params,chi2):
'''
Reads output of analyzeMN, appends median and 1sigma errorbar values to params array.
returns array of log likelihood, chi2, and median and 1sigma parameter values
:param path:
path to output file
:param params:
array to append median and 1sigma parameter values to
:param chi2:
chi^2 value of model from kerphCor (also gets appended onto params)
'''
f2=open(path,'r')
#f3=open(path+'chi2','r')
#skip intro lines
#garbage=f2.readline()
garbage=f2.readline()
garbage=f2.readline()
sarray=f2.readline().split()
#garbage=f3.readline()
#lgZ,slgZ
params=np.append(params,float(sarray[2]))
params=np.append(params,float(sarray[4]))
#chi^2
params=np.append(params,chi2) #float(f3.readline()))
garbage=f2.readline()
#other params
for line in f2:
sarray=line.split()
params=np.append(params,float(sarray[1]))
params=np.append(params,float(sarray[3]))
f2.close()
#f3.close()
return params
def bpix_close(bad_pix_map, xc, yc):
'''
returns the closest bad pixel to the flux centroid
:param bad_pix_map:
bad pixel map: non zero values are bad pixels
:param (xc,yc):
flux centroid from output of findStar
'''
ind=np.where(bad_pix_map)
ind_yshift=ind[0]-yc
ind_xshift=ind[1]-xc
r = np.sqrt(ind_xshift*ind_xshift+ind_yshift*ind_yshift)
return np.min(r)
|
smfactorREPO_NAMEArgusPATH_START.@Argus_extracted@Argus-main@helpers.py@.PATH_END.py
|
{
"filename": "_customdatasrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/densitymapbox/_customdatasrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class CustomdatasrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="customdatasrc", parent_name="densitymapbox", **kwargs
):
super(CustomdatasrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@densitymapbox@_customdatasrc.py@.PATH_END.py
|
{
"filename": "convergence.py",
"repo_name": "pymc-devs/pymc",
"repo_path": "pymc_extracted/pymc-main/pymc/stats/convergence.py",
"type": "Python"
}
|
# Copyright 2024 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import enum
import logging
from collections.abc import Sequence
from typing import Any
import arviz
from pymc.util import get_untransformed_name, is_transformed_name
_LEVELS = {
"info": logging.INFO,
"error": logging.ERROR,
"warn": logging.WARN,
"debug": logging.DEBUG,
"critical": logging.CRITICAL,
}
logger = logging.getLogger(__name__)
@enum.unique
class WarningType(enum.Enum):
# For HMC and NUTS
DIVERGENCE = 1
TUNING_DIVERGENCE = 2
DIVERGENCES = 3
TREEDEPTH = 4
# Problematic sampler parameters
BAD_PARAMS = 5
# Indications that chains did not converge, eg Rhat
CONVERGENCE = 6
BAD_ACCEPTANCE = 7
BAD_ENERGY = 8
@dataclasses.dataclass
class SamplerWarning:
kind: WarningType
message: str
level: str
step: int | None = None
exec_info: Any | None = None
extra: Any | None = None
divergence_point_source: dict | None = None
divergence_point_dest: dict | None = None
divergence_info: Any | None = None
def run_convergence_checks(idata: arviz.InferenceData, model) -> list[SamplerWarning]:
warnings: list[SamplerWarning] = []
if not hasattr(idata, "posterior"):
msg = "No posterior samples. Unable to run convergence checks"
warn = SamplerWarning(WarningType.BAD_PARAMS, msg, "info", None, None, None)
warnings.append(warn)
return warnings
warnings += warn_divergences(idata)
warnings += warn_treedepth(idata)
if idata["posterior"].sizes["draw"] < 100:
msg = "The number of samples is too small to check convergence reliably."
warn = SamplerWarning(WarningType.BAD_PARAMS, msg, "info", None, None, None)
warnings.append(warn)
return warnings
if idata["posterior"].sizes["chain"] == 1:
msg = "Only one chain was sampled, this makes it impossible to run some convergence checks"
warn = SamplerWarning(WarningType.BAD_PARAMS, msg, "info")
warnings.append(warn)
return warnings
elif idata["posterior"].sizes["chain"] < 4:
msg = (
"We recommend running at least 4 chains for robust computation of "
"convergence diagnostics"
)
warn = SamplerWarning(WarningType.BAD_PARAMS, msg, "info")
warnings.append(warn)
valid_name = [rv.name for rv in model.free_RVs + model.deterministics]
varnames = []
for rv in model.free_RVs:
rv_name = rv.name
if is_transformed_name(rv_name):
rv_name2 = get_untransformed_name(rv_name)
rv_name = rv_name2 if rv_name2 in valid_name else rv_name
if rv_name in idata["posterior"]:
varnames.append(rv_name)
ess = arviz.ess(idata, var_names=varnames)
rhat = arviz.rhat(idata, var_names=varnames)
rhat_max = max(val.max() for val in rhat.values())
if rhat_max > 1.01:
msg = (
"The rhat statistic is larger than 1.01 for some "
"parameters. This indicates problems during sampling. "
"See https://arxiv.org/abs/1903.08008 for details"
)
warn = SamplerWarning(WarningType.CONVERGENCE, msg, "info", extra=rhat)
warnings.append(warn)
eff_min = min(val.min() for val in ess.values())
eff_per_chain = eff_min / idata["posterior"].sizes["chain"]
if eff_per_chain < 100:
msg = (
"The effective sample size per chain is smaller than 100 for some parameters. "
" A higher number is needed for reliable rhat and ess computation. "
"See https://arxiv.org/abs/1903.08008 for details"
)
warn = SamplerWarning(WarningType.CONVERGENCE, msg, "error", extra=ess)
warnings.append(warn)
return warnings
def warn_divergences(idata: arviz.InferenceData) -> list[SamplerWarning]:
"""Check sampler stats and creates a list of warnings about divergences."""
sampler_stats = idata.get("sample_stats", None)
if sampler_stats is None:
return []
diverging = sampler_stats.get("diverging", None)
if diverging is None:
return []
# Warn about divergences
n_div = int(diverging.sum())
if n_div == 0:
return []
warning = SamplerWarning(
WarningType.DIVERGENCES,
f"There were {n_div} divergences after tuning. Increase `target_accept` or reparameterize.",
"error",
)
return [warning]
def warn_treedepth(idata: arviz.InferenceData) -> list[SamplerWarning]:
"""Check sampler stats and creates a list of warnings about tree depth."""
sampler_stats = idata.get("sample_stats", None)
if sampler_stats is None:
return []
rmtd = sampler_stats.get("reached_max_treedepth", None)
if rmtd is None:
return []
warnings = []
for c in rmtd.chain:
if (rmtd.sel(chain=c).mean("draw") > 0.05).any():
warnings.append(
SamplerWarning(
WarningType.TREEDEPTH,
f"Chain {int(c)} reached the maximum tree depth."
" Increase `max_treedepth`, increase `target_accept` or reparameterize.",
"warn",
)
)
return warnings
def log_warning(warn: SamplerWarning):
level = _LEVELS.get(warn.level, logging.WARNING)
logger.log(level, warn.message)
def log_warnings(warnings: Sequence[SamplerWarning]):
for warn in warnings:
log_warning(warn)
def log_warning_stats(stats: Sequence[dict[str, Any]]):
"""Log 'warning' stats if present."""
if stats is None:
return
for sts in stats:
warn = sts.get("warning", None)
if warn is None:
continue
if isinstance(warn, SamplerWarning):
log_warning(warn)
else:
logger.warning(warn)
return
|
pymc-devsREPO_NAMEpymcPATH_START.@pymc_extracted@pymc-main@pymc@stats@convergence.py@.PATH_END.py
|
{
"filename": "photometry.py",
"repo_name": "desihub/fastspecfit",
"repo_path": "fastspecfit_extracted/fastspecfit-main/py/fastspecfit/photometry.py",
"type": "Python"
}
|
"""
fastspecfit.photometry
======================
Tools for handling filters and photometry calculations.
"""
import os
import numpy as np
import fitsio
from astropy.table import Table
from fastspecfit.logger import log
from fastspecfit.util import trapz, C_LIGHT, FLUXNORM
class Photometry(object):
"""Class to load filters and containing filter- and dust-related methods.
"""
def __init__(self, fphotofile=None, fitstack=False, ignore_photometry=False):
"""
Parameters
----------
ignore_photometry : :class:`bool`
Boolean flag indicating whether or not to ignore the broadband
photometry.
"""
from speclite import filters
import yaml
if fphotofile is None:
from importlib import resources
if fitstack:
fphotofile = resources.files('fastspecfit').joinpath('data/stacked-phot.yaml')
else:
fphotofile = resources.files('fastspecfit').joinpath('data/legacysurvey-dr9.yaml')
self.fphotofile = fphotofile
try:
with open(fphotofile, 'r') as F:
fphoto = yaml.safe_load(F)
except:
errmsg = f'Unable to read parameter file {fphotofile}'
log.critical(errmsg)
raise IOError(errmsg)
self.uniqueid_col = fphoto['uniqueid']
self.photounits = fphoto['photounits']
if 'readcols' in fphoto:
self.readcols = np.array(fphoto['readcols'])
if 'dropcols' in fphoto:
self.dropcols = np.array(fphoto['dropcols'])
if 'outcols' in fphoto:
self.outcols = np.array(fphoto['outcols'])
self.bands = np.array(fphoto['bands'])
self.bands_to_fit = np.array(fphoto['bands_to_fit'])
self.fluxcols = np.array(fphoto['fluxcols'])
self.fluxivarcols = np.array(fphoto['fluxivarcols'])
self.min_uncertainty = np.array(fphoto['min_uncertainty'])
if 'legacysurveydr' in fphoto:
self.legacysurveydr = fphoto['legacysurveydr']
if 'viewer_layer' in fphoto:
self.viewer_layer = fphoto['viewer_layer']
if 'viewer_pixscale' in fphoto:
self.viewer_pixscale = fphoto['viewer_pixscale']
if 'synth_bands' in fphoto:
self.synth_bands = np.array(fphoto['synth_bands'])
if 'fiber_bands' in fphoto:
self.fiber_bands = np.array(fphoto['fiber_bands'])
self.absmag_bands = np.array(fphoto['absmag_bands'])
self.band_shift = np.array(fphoto['band_shift'])
# Deprecated - trim excessive filter wavelengths where the response is
# effectively ~zero.
#trim = {
# 'decam2014-u': [None, 4100.],
# 'decam2014-g': [3850., 5700.],
# 'decam2014-r': [5500., 7300.],
# 'decam2014-i': [6750., 8750.],
# 'decam2014-z': [8200., 10200.],
# 'decam2014-Y': [9300., 10800.],
# 'BASS-g': [None, 5750.],
# 'BASS-r': [None, None],
# 'MzLS-z': [8200., 10300.],
#}
#
#def trim_filter(filt):
# """Trim a filter."""
# lo, hi = 0, filt.wavelength.size
# if trim[filtname][0] is not None:
# lo = np.searchsorted(filt.wavelength, trim[filtname][0], 'left')
# if trim[filtname][1] is not None:
# hi = np.searchsorted(filt.wavelength, trim[filtname][1], 'left')
# filtwave = filt.wavelength[lo:hi]
# filtresp = filt.response[lo:hi]
# # response has to go to zero
# filtwave = np.hstack((filtwave[0]-0.1, filtwave, filtwave[-1]+0.1))
# filtresp = np.hstack((0., filtresp, 0.))
# return filters.FilterResponse(filtwave, filtresp, filt.meta)
# If fphoto['filters'] is a dictionary, then assume that there
# are N/S filters (as indicated by photsys).
self.filters = {}
for key in fphoto['filters']:
filts = []
for filtname in fphoto['filters'][key]:
filt = filters.load_filter(filtname)
#if filtname in trim.keys():
# filt = trim_filter(filt)
filts.append(filt)
self.filters[key] = filters.FilterSequence(filts)
self.synth_filters = {}
for key in fphoto['synth_filters']:
self.synth_filters[key] = filters.FilterSequence([filters.load_filter(filtname)
for filtname in fphoto['synth_filters'][key]])
if 'fiber_bands' in fphoto:
self.fiber_filters = {}
for key in fphoto['fiber_filters']:
self.fiber_filters[key] = filters.FilterSequence([filters.load_filter(filtname)
for filtname in fphoto['fiber_filters'][key]])
# absmag filters
self.absmag_filters = filters.FilterSequence([filters.load_filter(filtname) for filtname in fphoto['absmag_filters']])
# shifted absmag filters for use in kcorr_and_absmag
self.filters_out = \
filters.FilterSequence( [ f.create_shifted(band_shift=bs) for f, bs in zip(self.absmag_filters, self.band_shift) ])
if len(self.absmag_bands) != len(self.band_shift):
errmsg = 'absmag_bands and band_shift must have the same number of elements.'
log.critical(errmsg)
raise ValueError(errmsg)
if self.photounits != 'nanomaggies':
errmsg = 'nanomaggies is the only currently supported photometric unit!'
log.critical(errmsg)
raise ValueError(errmsg)
# Do not fit the photometry.
if ignore_photometry:
self.bands_to_fit *= [False]
def synth_absmag(self, redshift, dmod, zmodelwave, zmodelflux):
"""Synthesize absolute magnitudes from the best-fitting SED.
Parameters
----------
redshift : :class:`float`
Galaxy or QSO redshift.
dmod : :class:`float`
Distance modulus corresponding to `redshift`.
zmodelwave : `numpy.ndarray`
Observed-frame (redshifted) model wavelength array.
zmodelflux : `numpy.ndarray`
Observed-frame (redshifted) model spectrum.
Returns
-------
synth_absmag : `numpy.ndarray`
Absolute magnitudes based on synthesized photometry.
synth_maggies_rest : `numpy.ndarray`
Synthesized rest-frame photometry.
"""
if redshift <= 0.:
log.warning('Input redshift not defined, zero, or negative!')
nabs = len(self.absmag_filters)
synth_absmag = np.zeros(nabs, dtype='f8')
synth_maggies_rest = np.zeros(nabs, dtype='f8')
return synth_absmag, synth_maggies_rest
# Multiply by (1+z) to convert the best-fitting model to the "rest frame".
synth_maggies_rest = self.get_ab_maggies_unchecked(
self.filters_out, zmodelflux * (1. + redshift) / FLUXNORM,
zmodelwave / (1. + redshift))
synth_absmag = -2.5 * np.log10(synth_maggies_rest) - dmod
return synth_absmag, synth_maggies_rest
def kcorr_and_absmag(self, nanomaggies, ivar_nanomaggies, redshift, dmod,
photsys, zmodelwave, zmodelflux, synth_absmag,
synth_maggies_rest, snrmin=2.):
"""Compute K-corrected rest-frame photometry.
Parameters
----------
nanomaggies : `numpy.ndarray`
Input photometric fluxes in the `filters_obs` bandpasses.
ivar_nanomaggies : `numpy.ndarray`
Inverse variance photometry corresponding to `nanomaggies`.
redshift : :class:`float`
Galaxy or QSO redshift.
dmod : :class:`float`
Distance modulus corresponding to `redshift`.
zmodelwave : `numpy.ndarray`
Observed-frame (redshifted) model wavelength array.
zmodelflux : `numpy.ndarray`
Observed-frame (redshifted) model spectrum.
synth_absmag : `numpy.ndarray`
Absolute magnitudes based on synthesized photometry.
synth_maggies_rest : `numpy.ndarray`
Synthesized rest-frame photometry.
snrmin : :class:`float`, defaults to 2.
Minimum signal-to-noise ratio in the input photometry (`maggies`) in
order for that bandpass to be used to compute a K-correction.
Returns
-------
kcorr : `numpy.ndarray`
K-corrections for each bandpass in `absmag_filters`.
absmag : `numpy.ndarray`
Absolute magnitudes, band-shifted according to `band_shift` (if
provided) for each bandpass in `absmag_filters`.
ivarabsmag : `numpy.ndarray`
Inverse variance corresponding to `absmag`.
synth_maggies_obs : `numpy.ndarray`
Synthesized observed-frame photometry.
Notes
-----
By default, the K-correction is computed by finding the observed-frame
bandpass closest in wavelength (and with a minimum signal-to-noise ratio) to
the desired band-shifted absolute magnitude bandpass. In other words, by
default we endeavor to minimize the K-correction. The inverse variance,
`ivarabsmag`, is derived from the inverse variance of the K-corrected
photometry. If no bandpass is available then `ivarabsmag` is set to zero and
`absmag` is derived from the synthesized rest-frame photometry.
"""
nabs = len(self.absmag_filters)
if redshift <= 0.:
log.warning('Input redshift not defined, zero, or negative!')
kcorr = np.zeros(nabs, dtype='f8')
absmag = np.zeros(nabs, dtype='f8')
ivarabsmag = np.zeros(nabs, dtype='f8')
synth_maggies_obs = np.zeros(len(nanomaggies))
return kcorr, absmag, ivarabsmag, synth_maggies_obs
maggies = nanomaggies * 1e-9
ivarmaggies = (ivar_nanomaggies / 1e-9**2) * self.bands_to_fit
# Input bandpasses, observed frame; maggies and synth_maggies_obs
# should be very close.
filters_obs = self.filters[photsys]
lambda_obs = filters_obs.effective_wavelengths.value
lambda_out = self.filters_out.effective_wavelengths.value
# Synthesize observed-frame photometry (should be close to maggies).
synth_maggies_obs = self.get_ab_maggies_unchecked(
filters_obs, zmodelflux / FLUXNORM, zmodelwave)
# K-correct from the nearest "good" bandpass (to minimizes the K-correction)
oband = np.empty(nabs, dtype=np.int16)
for jj in range(nabs):
lambdadist = np.abs(lambda_obs / (1. + redshift) - lambda_out[jj])
oband[jj] = np.argmin(lambdadist + (maggies * np.sqrt(ivarmaggies) < snrmin) * 1e10)
kcorr = + 2.5 * np.log10(synth_maggies_rest / synth_maggies_obs[oband])
# m_R = M_Q + DM(z) + K_QR(z) or
# M_Q = m_R - DM(z) - K_QR(z)
absmag = np.copy(synth_absmag)
ivarabsmag = np.zeros_like(absmag)
# if we use synthesized photometry then ivarabsmag is zero
# (which should never happen?)
I = (maggies[oband] * np.sqrt(ivarmaggies[oband]) > snrmin)
if np.any(I):
C = 0.8483036976765437 # (0.4 * np.log(10.))**2
absmag[I] = -2.5 * np.log10(maggies[oband[I]]) - dmod - kcorr[I]
ivarabsmag[I] = maggies[oband[I]]**2 * ivarmaggies[oband[I]] * C
return kcorr, absmag, ivarabsmag, synth_maggies_obs
@staticmethod
def get_ab_maggies_pre(filters, wave):
"""
Compute preprocessing data for get_ab_maggies_unchecked() for given filter list
and target wavelength.
"""
# AB reference spctrum in erg/s/cm2/Hz times the speed of light in A/s
# and converted to erg/s/cm2/A.
abflam = 3.631e-20 * C_LIGHT * 1e13 / wave**2
pre = []
for ifilt, filt in enumerate(filters):
if wave[0] > filt.wavelength[0] or filt.wavelength[-1] > wave[-1]:
#print(filt.name, wave[0], wave[-1], filt.wavelength[0], filt.wavelength[-1])
raise RuntimeError('Filter boundaries exceed wavelength array')
# NB: if we assume that no padding is needed, wave extends
# strictly past filt_wavelength, so this is safe
lo = np.searchsorted(wave, filt.wavelength[ 0], 'right')
hi = np.searchsorted(wave, filt.wavelength[-1], 'left') + 1
resp = np.interp(wave[lo:hi], filt.wavelength, filt.response, left=0., right=0.) * wave[lo:hi]
idenom = 1. / trapz(resp * abflam[lo:hi], x=wave[lo:hi])
pre.append((lo, hi, resp, idenom))
return tuple(pre)
@staticmethod
def get_ab_maggies_unchecked(filters, flux, wave, pre=None):
"""Like `get_ab_maggies()`, but by-passing the units and, more
importantly, the padding and interpolation of wave/flux that
speclite does. We assume that the response function for each
filter lies strictly within the bounds of wave, and that the
response functions don't change so fast that we would need to
interpolate wave to get an accurate integral.
When wave comes from a stellar template, it has a very large
wavelength range, so these assumptions are reasonable. When
wave comes from an actual camera, however, the filter
responses are known to exceed the cameras' range of observed
wavelengths.
flux and wave are assumed to be in erg/s/cm2/A and A, respectively.
"""
if pre == None:
pre = Photometry.get_ab_maggies_pre(filters, wave)
maggies = np.empty(len(pre))
for ifilt, filtpre in enumerate(pre):
lo, hi, resp, idenom = filtpre
numer = trapz(resp * flux[lo:hi], x=wave[lo:hi])
maggies[ifilt] = numer * idenom
return maggies
@staticmethod
def get_ab_maggies(filters, flux, wave):
"""This version of get_ab_maggies() is robust to wavelength vectors
that do not entirely cover one the response range of one or more
filters.
"""
try:
if flux.ndim > 1:
nflux = flux.shape[0]
maggies = np.empty((nflux, len(filters)))
for ii in range(nflux):
maggies[ii, :] = Photometry.get_ab_maggies_unchecked(filters, flux[ii, :], wave)
else:
maggies = Photometry.get_ab_maggies_unchecked(filters, flux, wave)
except:
# pad in case of an object at very high redshift (z > 5.5)
log.debug('Padding input spectrum due to insufficient wavelength coverage to synthesize photometry.')
padflux, padwave = filters.pad_spectrum(flux, wave, axis=0, method='edge')
if flux.ndim > 1:
nflux = padflux.shape[0]
maggies = np.empty((nflux, len(filters)))
for ii in range(nflux):
maggies[ii, :] = Photometry.get_ab_maggies_unchecked(filters, padflux[ii, :], padwave)
else:
maggies = Photometry.get_ab_maggies_unchecked(filters, padflux, padwave)
return maggies
@staticmethod
def to_nanomaggies(maggies):
return maggies * 1e9
@staticmethod
def get_photflam(maggies, lambda_eff):
factor = 10.**(-0.4 * 48.6) * C_LIGHT * 1e13 / lambda_eff**2 # [maggies-->erg/s/cm2/A]
return maggies * factor
@staticmethod
def parse_photometry(bands, maggies, lambda_eff, ivarmaggies=None,
nanomaggies=True, nsigma=2., min_uncertainty=None,
get_abmag=False):
"""Parse input (nano)maggies to various outputs and pack into a table.
Parameters
----------
flam - 10-17 erg/s/cm2/A
fnu - 10-17 erg/s/cm2/Hz
abmag - AB mag
nanomaggies - input maggies are actually 1e-9 maggies
nsigma - magnitude limit
get_abmag - true iff table will be used by fastqa (which needs
columns that fastspec does not)
Returns
-------
phot - photometric table
Notes
-----
"""
if ivarmaggies is None:
ivarmaggies = np.zeros_like(maggies)
# Gaia-only targets can sometimes have grz=-99.
if np.any(ivarmaggies < 0.) or np.any(maggies == -99.):
errmsg = 'All ivarmaggies must be zero or positive!'
log.critical(errmsg)
raise ValueError(errmsg)
if nanomaggies:
nanofactor = 1e-9 # [nanomaggies-->maggies]
nmg = maggies
nmg_ivar = ivarmaggies.copy()
else:
nanofactor = 1.
nmg = maggies * 1e9
nmg_ivar = ivarmaggies * 1e-18
if get_abmag:
# compute columns used only by fastqa
abmag = np.zeros_like(maggies)
abmag_limit = np.zeros_like(maggies)
abmag_brighterr = np.zeros_like(maggies)
abmag_fainterr = np.zeros_like(maggies)
abmag_ivar = np.zeros_like(maggies)
# deal with measurements
good = (maggies > 0.)
abmag[good] = -2.5 * np.log10(nanofactor * maggies[good])
# deal with upper limits
snr = maggies * np.sqrt(ivarmaggies)
upper = ((ivarmaggies > 0.) & (snr <= nsigma))
abmag_limit[upper] = - 2.5 * np.log10(nanofactor * nsigma / np.sqrt(ivarmaggies[upper]))
# significant detections
C = 0.4 * np.log(10.)
good = (snr > nsigma)
maggies_good = maggies[good]
ivarmaggies_good = ivarmaggies[good]
errmaggies = 1. / np.sqrt(ivarmaggies_good)
abmag_brighterr[good] = errmaggies / (C * (maggies_good + errmaggies)) # bright end (flux upper limit)
abmag_fainterr[good] = errmaggies / (C * (maggies_good - errmaggies)) # faint end (flux lower limit)
abmag_ivar[good] = ivarmaggies_good * (C * maggies_good)**2
# Add a minimum uncertainty in quadrature **but only for flam**, which
# is used in the fitting.
if min_uncertainty is not None:
log.debug('Propagating minimum photometric uncertainties (mag): [{}]'.format(
' '.join(min_uncertainty.astype(str))))
good = ((maggies != 0.) & (ivarmaggies > 0.))
maggies_good = maggies[good]
factor = 2.5 / np.log(10.)
magerr = factor / (np.sqrt(ivarmaggies[good]) * maggies_good)
magerr2 = magerr**2 + min_uncertainty[good]**2
ivarmaggies[good] = factor**2 / (maggies_good**2 * magerr2)
factor = nanofactor * 10**(-0.4 * 48.6) * C_LIGHT * 1e13 / lambda_eff**2 # [maggies-->erg/s/cm2/A]
ngal = 1 if maggies.ndim == 1 else maggies.shape[1]
if ngal > 1:
factor = factor[:, None] # broadcast for the models
flam = maggies * factor
flam_ivar = ivarmaggies / factor**2
data = {
'band': bands,
'lambda_eff': lambda_eff,
'nanomaggies': nmg,
'nanomaggies_ivar': nmg_ivar,
'flam': flam,
'flam_ivar': flam_ivar,
}
dtypes = [
bands.dtype,
'f4',
'f4',
'f4',
'f8', # flam
'f8', # flam_ivar
]
if get_abmag:
# add columns used only by fastqa
data_qa = {
'abmag': abmag,
'abmag_ivar': abmag_ivar,
'abmag_brighterr': abmag_brighterr,
'abmag_fainterr': abmag_fainterr,
'abmag_limit': abmag_limit,
}
data |= data_qa
dtypes_qa = [
'f4',
'f4',
'f4',
'f4',
'f4',
]
dtypes.extend(dtypes_qa)
phot = Table(data=data, dtype=dtypes)
return phot
@staticmethod
def get_dn4000(wave, flam, flam_ivar=None, redshift=None, rest=True):
"""Compute DN(4000) and, optionally, the inverse variance.
Parameters
----------
wave
flam
flam_ivar
redshift
rest
Returns
-------
Notes
-----
If `rest`=``False`` then `redshift` input is required.
Require full wavelength coverage over the definition of the index.
See eq. 11 in Bruzual 1983
(https://articles.adsabs.harvard.edu/pdf/1983ApJ...273..105B) but with
the "narrow" definition of Balogh et al. 1999.
"""
dn4000, dn4000_ivar = 0., 0.
if rest is False or redshift is not None:
restwave = wave / (1. + redshift) # [Angstrom]
flam2fnu = (1. + redshift) * restwave**2 / (C_LIGHT * 1e5) # [erg/s/cm2/A-->erg/s/cm2/Hz, rest]
else:
restwave = wave
flam2fnu = restwave**2 / (C_LIGHT * 1e5) # [erg/s/cm2/A-->erg/s/cm2/Hz, rest]
# Require a 2-Angstrom pad around the break definition.
wpad = 2.
if np.min(restwave) > (3850.-wpad) or np.max(restwave) < (4100.+wpad):
log.debug('Too little wavelength coverage to compute Dn(4000).')
return dn4000, dn4000_ivar
fnu = flam * flam2fnu # [erg/s/cm2/Hz]
if flam_ivar is not None:
fnu_ivar = flam_ivar / flam2fnu**2
else:
fnu_ivar = np.ones_like(flam) # uniform weights
def _integrate(wave, flux, ivar, w1, w2):
from scipy import integrate
# trim for speed
I = ((wave > w1-wpad) & (wave < w2+wpad))
J = np.logical_and(I, ivar > 0.)
if np.sum(I) == 0:
return 0., 0.
if np.sum(J) / np.sum(I) < 0.9:
log.warning('More than 10% of pixels in Dn(4000) definition are masked.')
return 0., 0.
wave = wave[J]
flux = flux[J]
ivar = ivar[J]
srt = np.argsort(wave)
# should never have to extrapolate
f1, f2 = np.interp((w1, w2), wave[srt], flux[srt])
i1, i2 = np.interp((w1, w2), wave[srt], ivar[srt])
# insert the boundary wavelengths then integrate
I = ((wave > w1) & (wave < w2))
wave = np.hstack((w1, wave[I], w2))
flux = np.hstack((f1, flux[I], f2))
ivar = np.hstack((i1, ivar[I], i2))
#index_var = 1. / trapz(ivar, x=wave)
#index = trapz(flux*ivar, x=wave) * index_var
index_var = 1. / integrate.simpson(y=ivar, x=wave)
index = integrate.simpson(y=flux*ivar, x=wave) * index_var
return index, index_var
blufactor = 3950. - 3850.
redfactor = 4100. - 4000.
try:
# yes, blue wavelength go with red integral bounds
numer, numer_var = _integrate(restwave, fnu, fnu_ivar, 4000., 4100.)
denom, denom_var = _integrate(restwave, fnu, fnu_ivar, 3850., 3950.)
except:
log.warning('Integration failed when computing DN(4000).')
return dn4000, dn4000_ivar
if denom == 0. or numer == 0.:
log.warning('DN(4000) is ill-defined or could not be computed.')
return dn4000, dn4000_ivar
dn4000 = (blufactor / redfactor) * numer / denom
if flam_ivar is not None:
dn4000_ivar = (1. / (dn4000**2)) / (denom_var / (denom**2) + numer_var / (numer**2))
return dn4000, dn4000_ivar
def tractorphot_datamodel(from_file=False, datarelease='dr9'):
"""Initialize the tractorphot data model for a given Legacy Surveys data
release.
Args:
from_file (bool, optional): read the datamodel from a file on-disk.
datarelease (str, optional): data release to read; currently only `dr9`
and `dr10` are supported.
Returns an `astropy.table.Table` which follows the Tractor catalog
datamodel for the given data release.
"""
if from_file:
from desispec.io.meta import get_desi_root_readonly
desi_root = get_desi_root_readonly()
datamodel_file = f'{desi_root}/external/legacysurvey/{datarelease}/south/tractor/000/tractor-0001m002.fits'
datamodel = Table(fitsio.read(datamodel_file, rows=0, upper=True))
for col in datamodel.colnames:
datamodel[col] = np.zeros(datamodel[col].shape, dtype=datamodel[col].dtype)
#for col in datamodel.colnames:
# print("('{}', {}, '{}'),".format(col, datamodel[col].shape, datamodel[col].dtype))
else:
if datarelease.lower() == 'dr9':
COLS = [
('RELEASE', (1,), '>i2'),
('BRICKID', (1,), '>i4'),
('BRICKNAME', (1,), '<U8'),
('OBJID', (1,), '>i4'),
('BRICK_PRIMARY', (1,), 'bool'),
('MASKBITS', (1,), '>i2'),
('FITBITS', (1,), '>i2'),
('TYPE', (1,), '<U3'),
('RA', (1,), '>f8'),
('DEC', (1,), '>f8'),
('RA_IVAR', (1,), '>f4'),
('DEC_IVAR', (1,), '>f4'),
('BX', (1,), '>f4'),
('BY', (1,), '>f4'),
('DCHISQ', (1, 5), '>f4'),
('EBV', (1,), '>f4'),
('MJD_MIN', (1,), '>f8'),
('MJD_MAX', (1,), '>f8'),
('REF_CAT', (1,), '<U2'),
('REF_ID', (1,), '>i8'),
('PMRA', (1,), '>f4'),
('PMDEC', (1,), '>f4'),
('PARALLAX', (1,), '>f4'),
('PMRA_IVAR', (1,), '>f4'),
('PMDEC_IVAR', (1,), '>f4'),
('PARALLAX_IVAR', (1,), '>f4'),
('REF_EPOCH', (1,), '>f4'),
('GAIA_PHOT_G_MEAN_MAG', (1,), '>f4'),
('GAIA_PHOT_G_MEAN_FLUX_OVER_ERROR', (1,), '>f4'),
('GAIA_PHOT_G_N_OBS', (1,), '>i2'),
('GAIA_PHOT_BP_MEAN_MAG', (1,), '>f4'),
('GAIA_PHOT_BP_MEAN_FLUX_OVER_ERROR', (1,), '>f4'),
('GAIA_PHOT_BP_N_OBS', (1,), '>i2'),
('GAIA_PHOT_RP_MEAN_MAG', (1,), '>f4'),
('GAIA_PHOT_RP_MEAN_FLUX_OVER_ERROR', (1,), '>f4'),
('GAIA_PHOT_RP_N_OBS', (1,), '>i2'),
('GAIA_PHOT_VARIABLE_FLAG', (1,), 'bool'),
('GAIA_ASTROMETRIC_EXCESS_NOISE', (1,), '>f4'),
('GAIA_ASTROMETRIC_EXCESS_NOISE_SIG', (1,), '>f4'),
('GAIA_ASTROMETRIC_N_OBS_AL', (1,), '>i2'),
('GAIA_ASTROMETRIC_N_GOOD_OBS_AL', (1,), '>i2'),
('GAIA_ASTROMETRIC_WEIGHT_AL', (1,), '>f4'),
('GAIA_DUPLICATED_SOURCE', (1,), 'bool'),
('GAIA_A_G_VAL', (1,), '>f4'),
('GAIA_E_BP_MIN_RP_VAL', (1,), '>f4'),
('GAIA_PHOT_BP_RP_EXCESS_FACTOR', (1,), '>f4'),
('GAIA_ASTROMETRIC_SIGMA5D_MAX', (1,), '>f4'),
('GAIA_ASTROMETRIC_PARAMS_SOLVED', (1,), 'uint8'),
('FLUX_G', (1,), '>f4'),
('FLUX_R', (1,), '>f4'),
('FLUX_Z', (1,), '>f4'),
('FLUX_W1', (1,), '>f4'),
('FLUX_W2', (1,), '>f4'),
('FLUX_W3', (1,), '>f4'),
('FLUX_W4', (1,), '>f4'),
('FLUX_IVAR_G', (1,), '>f4'),
('FLUX_IVAR_R', (1,), '>f4'),
('FLUX_IVAR_Z', (1,), '>f4'),
('FLUX_IVAR_W1', (1,), '>f4'),
('FLUX_IVAR_W2', (1,), '>f4'),
('FLUX_IVAR_W3', (1,), '>f4'),
('FLUX_IVAR_W4', (1,), '>f4'),
('FIBERFLUX_G', (1,), '>f4'),
('FIBERFLUX_R', (1,), '>f4'),
('FIBERFLUX_Z', (1,), '>f4'),
('FIBERTOTFLUX_G', (1,), '>f4'),
('FIBERTOTFLUX_R', (1,), '>f4'),
('FIBERTOTFLUX_Z', (1,), '>f4'),
('APFLUX_G', (1, 8), '>f4'),
('APFLUX_R', (1, 8), '>f4'),
('APFLUX_Z', (1, 8), '>f4'),
('APFLUX_RESID_G', (1, 8), '>f4'),
('APFLUX_RESID_R', (1, 8), '>f4'),
('APFLUX_RESID_Z', (1, 8), '>f4'),
('APFLUX_BLOBRESID_G', (1, 8), '>f4'),
('APFLUX_BLOBRESID_R', (1, 8), '>f4'),
('APFLUX_BLOBRESID_Z', (1, 8), '>f4'),
('APFLUX_IVAR_G', (1, 8), '>f4'),
('APFLUX_IVAR_R', (1, 8), '>f4'),
('APFLUX_IVAR_Z', (1, 8), '>f4'),
('APFLUX_MASKED_G', (1, 8), '>f4'),
('APFLUX_MASKED_R', (1, 8), '>f4'),
('APFLUX_MASKED_Z', (1, 8), '>f4'),
('APFLUX_W1', (1, 5), '>f4'),
('APFLUX_W2', (1, 5), '>f4'),
('APFLUX_W3', (1, 5), '>f4'),
('APFLUX_W4', (1, 5), '>f4'),
('APFLUX_RESID_W1', (1, 5), '>f4'),
('APFLUX_RESID_W2', (1, 5), '>f4'),
('APFLUX_RESID_W3', (1, 5), '>f4'),
('APFLUX_RESID_W4', (1, 5), '>f4'),
('APFLUX_IVAR_W1', (1, 5), '>f4'),
('APFLUX_IVAR_W2', (1, 5), '>f4'),
('APFLUX_IVAR_W3', (1, 5), '>f4'),
('APFLUX_IVAR_W4', (1, 5), '>f4'),
('MW_TRANSMISSION_G', (1,), '>f4'),
('MW_TRANSMISSION_R', (1,), '>f4'),
('MW_TRANSMISSION_Z', (1,), '>f4'),
('MW_TRANSMISSION_W1', (1,), '>f4'),
('MW_TRANSMISSION_W2', (1,), '>f4'),
('MW_TRANSMISSION_W3', (1,), '>f4'),
('MW_TRANSMISSION_W4', (1,), '>f4'),
('NOBS_G', (1,), '>i2'),
('NOBS_R', (1,), '>i2'),
('NOBS_Z', (1,), '>i2'),
('NOBS_W1', (1,), '>i2'),
('NOBS_W2', (1,), '>i2'),
('NOBS_W3', (1,), '>i2'),
('NOBS_W4', (1,), '>i2'),
('RCHISQ_G', (1,), '>f4'),
('RCHISQ_R', (1,), '>f4'),
('RCHISQ_Z', (1,), '>f4'),
('RCHISQ_W1', (1,), '>f4'),
('RCHISQ_W2', (1,), '>f4'),
('RCHISQ_W3', (1,), '>f4'),
('RCHISQ_W4', (1,), '>f4'),
('FRACFLUX_G', (1,), '>f4'),
('FRACFLUX_R', (1,), '>f4'),
('FRACFLUX_Z', (1,), '>f4'),
('FRACFLUX_W1', (1,), '>f4'),
('FRACFLUX_W2', (1,), '>f4'),
('FRACFLUX_W3', (1,), '>f4'),
('FRACFLUX_W4', (1,), '>f4'),
('FRACMASKED_G', (1,), '>f4'),
('FRACMASKED_R', (1,), '>f4'),
('FRACMASKED_Z', (1,), '>f4'),
('FRACIN_G', (1,), '>f4'),
('FRACIN_R', (1,), '>f4'),
('FRACIN_Z', (1,), '>f4'),
('ANYMASK_G', (1,), '>i2'),
('ANYMASK_R', (1,), '>i2'),
('ANYMASK_Z', (1,), '>i2'),
('ALLMASK_G', (1,), '>i2'),
('ALLMASK_R', (1,), '>i2'),
('ALLMASK_Z', (1,), '>i2'),
('WISEMASK_W1', (1,), 'uint8'),
('WISEMASK_W2', (1,), 'uint8'),
('PSFSIZE_G', (1,), '>f4'),
('PSFSIZE_R', (1,), '>f4'),
('PSFSIZE_Z', (1,), '>f4'),
('PSFDEPTH_G', (1,), '>f4'),
('PSFDEPTH_R', (1,), '>f4'),
('PSFDEPTH_Z', (1,), '>f4'),
('GALDEPTH_G', (1,), '>f4'),
('GALDEPTH_R', (1,), '>f4'),
('GALDEPTH_Z', (1,), '>f4'),
('NEA_G', (1,), '>f4'),
('NEA_R', (1,), '>f4'),
('NEA_Z', (1,), '>f4'),
('BLOB_NEA_G', (1,), '>f4'),
('BLOB_NEA_R', (1,), '>f4'),
('BLOB_NEA_Z', (1,), '>f4'),
('PSFDEPTH_W1', (1,), '>f4'),
('PSFDEPTH_W2', (1,), '>f4'),
('PSFDEPTH_W3', (1,), '>f4'),
('PSFDEPTH_W4', (1,), '>f4'),
('WISE_COADD_ID', (1,), '<U8'),
('WISE_X', (1,), '>f4'),
('WISE_Y', (1,), '>f4'),
('LC_FLUX_W1', (1, 15), '>f4'),
('LC_FLUX_W2', (1, 15), '>f4'),
('LC_FLUX_IVAR_W1', (1, 15), '>f4'),
('LC_FLUX_IVAR_W2', (1, 15), '>f4'),
('LC_NOBS_W1', (1, 15), '>i2'),
('LC_NOBS_W2', (1, 15), '>i2'),
('LC_FRACFLUX_W1', (1, 15), '>f4'),
('LC_FRACFLUX_W2', (1, 15), '>f4'),
('LC_RCHISQ_W1', (1, 15), '>f4'),
('LC_RCHISQ_W2', (1, 15), '>f4'),
('LC_MJD_W1', (1, 15), '>f8'),
('LC_MJD_W2', (1, 15), '>f8'),
('LC_EPOCH_INDEX_W1', (1, 15), '>i2'),
('LC_EPOCH_INDEX_W2', (1, 15), '>i2'),
('SERSIC', (1,), '>f4'),
('SERSIC_IVAR', (1,), '>f4'),
('SHAPE_R', (1,), '>f4'),
('SHAPE_R_IVAR', (1,), '>f4'),
('SHAPE_E1', (1,), '>f4'),
('SHAPE_E1_IVAR', (1,), '>f4'),
('SHAPE_E2', (1,), '>f4'),
('SHAPE_E2_IVAR', (1,), '>f4'),
# added columns
('LS_ID', (1,), '>i8'),
('TARGETID', (1,), '>i8'),
]
elif datarelease.lower() == 'dr10':
COLS = [
('RELEASE', (1,), '>i2'),
('BRICKID', (1,), '>i4'),
('BRICKNAME', (1,), '<U8'),
('OBJID', (1,), '>i4'),
('BRICK_PRIMARY', (1,), 'bool'),
('MASKBITS', (1,), '>i2'),
('FITBITS', (1,), '>i2'),
('TYPE', (1,), '<U3'),
('RA', (1,), '>f8'),
('DEC', (1,), '>f8'),
('RA_IVAR', (1,), '>f4'),
('DEC_IVAR', (1,), '>f4'),
('BX', (1,), '>f4'),
('BY', (1,), '>f4'),
('DCHISQ', (1, 5), '>f4'),
('EBV', (1,), '>f4'),
('MJD_MIN', (1,), '>f8'),
('MJD_MAX', (1,), '>f8'),
('REF_CAT', (1,), '<U2'),
('REF_ID', (1,), '>i8'),
('PMRA', (1,), '>f4'),
('PMDEC', (1,), '>f4'),
('PARALLAX', (1,), '>f4'),
('PMRA_IVAR', (1,), '>f4'),
('PMDEC_IVAR', (1,), '>f4'),
('PARALLAX_IVAR', (1,), '>f4'),
('REF_EPOCH', (1,), '>f4'),
('GAIA_PHOT_G_MEAN_MAG', (1,), '>f4'),
('GAIA_PHOT_G_MEAN_FLUX_OVER_ERROR', (1,), '>f4'),
('GAIA_PHOT_G_N_OBS', (1,), '>i2'),
('GAIA_PHOT_BP_MEAN_MAG', (1,), '>f4'),
('GAIA_PHOT_BP_MEAN_FLUX_OVER_ERROR', (1,), '>f4'),
('GAIA_PHOT_BP_N_OBS', (1,), '>i2'),
('GAIA_PHOT_RP_MEAN_MAG', (1,), '>f4'),
('GAIA_PHOT_RP_MEAN_FLUX_OVER_ERROR', (1,), '>f4'),
('GAIA_PHOT_RP_N_OBS', (1,), '>i2'),
('GAIA_PHOT_VARIABLE_FLAG', (1,), 'bool'),
('GAIA_ASTROMETRIC_EXCESS_NOISE', (1,), '>f4'),
('GAIA_ASTROMETRIC_EXCESS_NOISE_SIG', (1,), '>f4'),
('GAIA_ASTROMETRIC_N_OBS_AL', (1,), '>i2'),
('GAIA_ASTROMETRIC_N_GOOD_OBS_AL', (1,), '>i2'),
('GAIA_ASTROMETRIC_WEIGHT_AL', (1,), '>f4'),
('GAIA_DUPLICATED_SOURCE', (1,), 'bool'),
('GAIA_A_G_VAL', (1,), '>f4'),
('GAIA_E_BP_MIN_RP_VAL', (1,), '>f4'),
('GAIA_PHOT_BP_RP_EXCESS_FACTOR', (1,), '>f4'),
('GAIA_ASTROMETRIC_SIGMA5D_MAX', (1,), '>f4'),
('GAIA_ASTROMETRIC_PARAMS_SOLVED', (1,), 'uint8'),
('FLUX_G', (1,), '>f4'),
('FLUX_R', (1,), '>f4'),
('FLUX_I', (1,), '>f4'),
('FLUX_Z', (1,), '>f4'),
('FLUX_W1', (1,), '>f4'),
('FLUX_W2', (1,), '>f4'),
('FLUX_W3', (1,), '>f4'),
('FLUX_W4', (1,), '>f4'),
('FLUX_IVAR_G', (1,), '>f4'),
('FLUX_IVAR_R', (1,), '>f4'),
('FLUX_IVAR_I', (1,), '>f4'),
('FLUX_IVAR_Z', (1,), '>f4'),
('FLUX_IVAR_W1', (1,), '>f4'),
('FLUX_IVAR_W2', (1,), '>f4'),
('FLUX_IVAR_W3', (1,), '>f4'),
('FLUX_IVAR_W4', (1,), '>f4'),
('FIBERFLUX_G', (1,), '>f4'),
('FIBERFLUX_R', (1,), '>f4'),
('FIBERFLUX_I', (1,), '>f4'),
('FIBERFLUX_Z', (1,), '>f4'),
('FIBERTOTFLUX_G', (1,), '>f4'),
('FIBERTOTFLUX_R', (1,), '>f4'),
('FIBERTOTFLUX_I', (1,), '>f4'),
('FIBERTOTFLUX_Z', (1,), '>f4'),
('APFLUX_G', (1, 8), '>f4'),
('APFLUX_R', (1, 8), '>f4'),
('APFLUX_I', (1, 8), '>f4'),
('APFLUX_Z', (1, 8), '>f4'),
('APFLUX_RESID_G', (1, 8), '>f4'),
('APFLUX_RESID_R', (1, 8), '>f4'),
('APFLUX_RESID_I', (1, 8), '>f4'),
('APFLUX_RESID_Z', (1, 8), '>f4'),
('APFLUX_BLOBRESID_G', (1, 8), '>f4'),
('APFLUX_BLOBRESID_R', (1, 8), '>f4'),
('APFLUX_BLOBRESID_I', (1, 8), '>f4'),
('APFLUX_BLOBRESID_Z', (1, 8), '>f4'),
('APFLUX_IVAR_G', (1, 8), '>f4'),
('APFLUX_IVAR_R', (1, 8), '>f4'),
('APFLUX_IVAR_I', (1, 8), '>f4'),
('APFLUX_IVAR_Z', (1, 8), '>f4'),
('APFLUX_MASKED_G', (1, 8), '>f4'),
('APFLUX_MASKED_R', (1, 8), '>f4'),
('APFLUX_MASKED_I', (1, 8), '>f4'),
('APFLUX_MASKED_Z', (1, 8), '>f4'),
('APFLUX_W1', (1, 5), '>f4'),
('APFLUX_W2', (1, 5), '>f4'),
('APFLUX_W3', (1, 5), '>f4'),
('APFLUX_W4', (1, 5), '>f4'),
('APFLUX_RESID_W1', (1, 5), '>f4'),
('APFLUX_RESID_W2', (1, 5), '>f4'),
('APFLUX_RESID_W3', (1, 5), '>f4'),
('APFLUX_RESID_W4', (1, 5), '>f4'),
('APFLUX_IVAR_W1', (1, 5), '>f4'),
('APFLUX_IVAR_W2', (1, 5), '>f4'),
('APFLUX_IVAR_W3', (1, 5), '>f4'),
('APFLUX_IVAR_W4', (1, 5), '>f4'),
('MW_TRANSMISSION_G', (1,), '>f4'),
('MW_TRANSMISSION_R', (1,), '>f4'),
('MW_TRANSMISSION_I', (1,), '>f4'),
('MW_TRANSMISSION_Z', (1,), '>f4'),
('MW_TRANSMISSION_W1', (1,), '>f4'),
('MW_TRANSMISSION_W2', (1,), '>f4'),
('MW_TRANSMISSION_W3', (1,), '>f4'),
('MW_TRANSMISSION_W4', (1,), '>f4'),
('NOBS_G', (1,), '>i2'),
('NOBS_R', (1,), '>i2'),
('NOBS_I', (1,), '>i2'),
('NOBS_Z', (1,), '>i2'),
('NOBS_W1', (1,), '>i2'),
('NOBS_W2', (1,), '>i2'),
('NOBS_W3', (1,), '>i2'),
('NOBS_W4', (1,), '>i2'),
('RCHISQ_G', (1,), '>f4'),
('RCHISQ_R', (1,), '>f4'),
('RCHISQ_I', (1,), '>f4'),
('RCHISQ_Z', (1,), '>f4'),
('RCHISQ_W1', (1,), '>f4'),
('RCHISQ_W2', (1,), '>f4'),
('RCHISQ_W3', (1,), '>f4'),
('RCHISQ_W4', (1,), '>f4'),
('FRACFLUX_G', (1,), '>f4'),
('FRACFLUX_R', (1,), '>f4'),
('FRACFLUX_I', (1,), '>f4'),
('FRACFLUX_Z', (1,), '>f4'),
('FRACFLUX_W1', (1,), '>f4'),
('FRACFLUX_W2', (1,), '>f4'),
('FRACFLUX_W3', (1,), '>f4'),
('FRACFLUX_W4', (1,), '>f4'),
('FRACMASKED_G', (1,), '>f4'),
('FRACMASKED_R', (1,), '>f4'),
('FRACMASKED_I', (1,), '>f4'),
('FRACMASKED_Z', (1,), '>f4'),
('FRACIN_G', (1,), '>f4'),
('FRACIN_R', (1,), '>f4'),
('FRACIN_I', (1,), '>f4'),
('FRACIN_Z', (1,), '>f4'),
('NGOOD_G', (1,), '>i2'),
('NGOOD_R', (1,), '>i2'),
('NGOOD_I', (1,), '>i2'),
('NGOOD_Z', (1,), '>i2'),
('ANYMASK_G', (1,), '>i2'),
('ANYMASK_R', (1,), '>i2'),
('ANYMASK_I', (1,), '>i2'),
('ANYMASK_Z', (1,), '>i2'),
('ALLMASK_G', (1,), '>i2'),
('ALLMASK_R', (1,), '>i2'),
('ALLMASK_I', (1,), '>i2'),
('ALLMASK_Z', (1,), '>i2'),
('WISEMASK_W1', (1,), 'uint8'),
('WISEMASK_W2', (1,), 'uint8'),
('PSFSIZE_G', (1,), '>f4'),
('PSFSIZE_R', (1,), '>f4'),
('PSFSIZE_I', (1,), '>f4'),
('PSFSIZE_Z', (1,), '>f4'),
('PSFDEPTH_G', (1,), '>f4'),
('PSFDEPTH_R', (1,), '>f4'),
('PSFDEPTH_I', (1,), '>f4'),
('PSFDEPTH_Z', (1,), '>f4'),
('GALDEPTH_G', (1,), '>f4'),
('GALDEPTH_R', (1,), '>f4'),
('GALDEPTH_I', (1,), '>f4'),
('GALDEPTH_Z', (1,), '>f4'),
('NEA_G', (1,), '>f4'),
('NEA_R', (1,), '>f4'),
('NEA_I', (1,), '>f4'),
('NEA_Z', (1,), '>f4'),
('BLOB_NEA_G', (1,), '>f4'),
('BLOB_NEA_R', (1,), '>f4'),
('BLOB_NEA_I', (1,), '>f4'),
('BLOB_NEA_Z', (1,), '>f4'),
('PSFDEPTH_W1', (1,), '>f4'),
('PSFDEPTH_W2', (1,), '>f4'),
('PSFDEPTH_W3', (1,), '>f4'),
('PSFDEPTH_W4', (1,), '>f4'),
('WISE_COADD_ID', (1,), '<U8'),
('WISE_X', (1,), '>f4'),
('WISE_Y', (1,), '>f4'),
('LC_FLUX_W1', (1, 17), '>f4'),
('LC_FLUX_W2', (1, 17), '>f4'),
('LC_FLUX_IVAR_W1', (1, 17), '>f4'),
('LC_FLUX_IVAR_W2', (1, 17), '>f4'),
('LC_NOBS_W1', (1, 17), '>i2'),
('LC_NOBS_W2', (1, 17), '>i2'),
('LC_FRACFLUX_W1', (1, 17), '>f4'),
('LC_FRACFLUX_W2', (1, 17), '>f4'),
('LC_RCHISQ_W1', (1, 17), '>f4'),
('LC_RCHISQ_W2', (1, 17), '>f4'),
('LC_MJD_W1', (1, 17), '>f8'),
('LC_MJD_W2', (1, 17), '>f8'),
('LC_EPOCH_INDEX_W1', (1, 17), '>i2'),
('LC_EPOCH_INDEX_W2', (1, 17), '>i2'),
('SERSIC', (1,), '>f4'),
('SERSIC_IVAR', (1,), '>f4'),
('SHAPE_R', (1,), '>f4'),
('SHAPE_R_IVAR', (1,), '>f4'),
('SHAPE_E1', (1,), '>f4'),
('SHAPE_E1_IVAR', (1,), '>f4'),
('SHAPE_E2', (1,), '>f4'),
('SHAPE_E2_IVAR', (1,), '>f4'),
# added columns
('LS_ID', (1,), '>i8'),
('TARGETID', (1,), '>i8'),
]
else:
errmsg = f'Unrecognized data release {datarelease}; only dr9 and dr10 currently supported.'
log.critical(errmsg)
raise IOError(errmsg)
datamodel = Table()
for col in COLS:
datamodel[col[0]] = np.zeros(shape=col[1], dtype=col[2])
return datamodel
def _gather_tractorphot_onebrick(input_cat, legacysurveydir, radius_match, racolumn, deccolumn,
datamodel, restrict_region):
"""Support routine for gather_tractorphot.
"""
import astropy.units as u
from astropy.coordinates import SkyCoord
from desitarget import geomask
from desitarget.io import desitarget_resolve_dec
assert(np.all(input_cat['BRICKNAME'] == input_cat['BRICKNAME'][0]))
brick = input_cat['BRICKNAME'][0]
idr9 = np.where((input_cat['RELEASE'] > 0) * (input_cat['BRICKID'] > 0) *
(input_cat['BRICK_OBJID'] > 0) * (input_cat['PHOTSYS'] != ''))[0]
ipos = np.delete(np.arange(len(input_cat)), idr9)
out = Table(np.hstack(np.repeat(datamodel, len(np.atleast_1d(input_cat)))))
out['TARGETID'] = input_cat['TARGETID']
# DR9 targeting photometry exists
if len(idr9) > 0:
assert(np.all(input_cat['PHOTSYS'][idr9] == input_cat['PHOTSYS'][idr9][0]))
# find the catalog
photsys = input_cat['PHOTSYS'][idr9][0]
if photsys == 'S':
region = 'south'
elif photsys == 'N':
region = 'north'
#raslice = np.array(['{:06d}'.format(int(ra*1000))[:3] for ra in input_cat['RA']])
tractorfile = os.path.join(legacysurveydir, region, 'tractor', brick[:3], f'tractor-{brick}.fits')
if not os.path.isfile(tractorfile):
errmsg = f'Unable to find Tractor catalog {tractorfile}'
log.critical(errmsg)
raise IOError(errmsg)
# Some commissioning and SV targets can have brick_primary==False, so don't require it here.
#<Table length=1>
# TARGETID BRICKNAME BRICKID BRICK_OBJID RELEASE CMX_TARGET DESI_TARGET SV1_DESI_TARGET SV2_DESI_TARGET SV3_DESI_TARGET SCND_TARGET
# int64 str8 int32 int32 int16 int64 int64 int64 int64 int64 int64
#----------------- --------- ------- ----------- ------- ---------- ----------- ------------------- --------------- --------------- -----------
#39628509856927757 0352p315 503252 4109 9010 0 0 2305843009213693952 0 0 0
#<Table length=1>
# TARGETID TARGET_RA TARGET_DEC TILEID SURVEY PROGRAM
# int64 float64 float64 int32 str7 str6
#----------------- ------------------ ------------------ ------ ------ -------
#39628509856927757 35.333944142134406 31.496490061792002 80611 sv1 bright
_tractor = fitsio.read(tractorfile, columns=['OBJID', 'BRICK_PRIMARY'], upper=True)
#I = np.where(_tractor['BRICK_PRIMARY'] * np.isin(_tractor['OBJID'], input_cat['BRICK_OBJID']))[0]
I = np.where(np.isin(_tractor['OBJID'], input_cat['BRICK_OBJID'][idr9]))[0]
## Some secondary programs have BRICKNAME!='' and BRICK_OBJID==0 (i.e.,
## not populated). However, there should always be a match here because
## we "repair" brick_objid in the main function.
#if len(I) == 0:
# return Table()
tractor_dr9 = Table(fitsio.read(tractorfile, rows=I, upper=True))
# sort explicitly in order to ensure order
srt = geomask.match_to(tractor_dr9['OBJID'], input_cat['BRICK_OBJID'][idr9])
tractor_dr9 = tractor_dr9[srt]
assert(np.all((tractor_dr9['BRICKID'] == input_cat['BRICKID'][idr9])*(tractor_dr9['OBJID'] == input_cat['BRICK_OBJID'][idr9])))
tractor_dr9['LS_ID'] = np.int64(0) # will be filled in at the end
tractor_dr9['TARGETID'] = input_cat['TARGETID'][idr9]
out[idr9] = tractor_dr9
del tractor_dr9
# use positional matching
if len(ipos) > 0:
rad = radius_match * u.arcsec
# resolve north/south unless restrict region is set
if restrict_region is not None:
tractorfile = os.path.join(legacysurveydir, restrict_region, 'tractor', brick[:3], f'tractor-{brick}.fits')
if not os.path.isfile(tractorfile):
return out
else:
tractorfile_north = os.path.join(legacysurveydir, 'north', 'tractor', brick[:3], f'tractor-{brick}.fits')
tractorfile_south = os.path.join(legacysurveydir, 'south', 'tractor', brick[:3], f'tractor-{brick}.fits')
if os.path.isfile(tractorfile_north) and not os.path.isfile(tractorfile_south):
tractorfile = tractorfile_north
elif not os.path.isfile(tractorfile_north) and os.path.isfile(tractorfile_south):
tractorfile = tractorfile_south
elif os.path.isfile(tractorfile_north) and os.path.isfile(tractorfile_south):
if np.median(input_cat[deccolumn][ipos]) < desitarget_resolve_dec():
tractorfile = tractorfile_south
else:
tractorfile = tractorfile_north
elif not os.path.isfile(tractorfile_north) and not os.path.isfile(tractorfile_south):
return out
_tractor = fitsio.read(tractorfile, columns=['RA', 'DEC', 'BRICK_PRIMARY'], upper=True)
iprimary = np.where(_tractor['BRICK_PRIMARY'])[0] # only primary targets
if len(iprimary) == 0:
log.warning(f'No primary photometric targets on brick {brick}.')
else:
_tractor = _tractor[iprimary]
coord_tractor = SkyCoord(ra=_tractor['RA']*u.deg, dec=_tractor['DEC']*u.deg)
# Some targets can appear twice (with different targetids), so
# to make sure we do it right, we have to loop. Example:
#
# TARGETID SURVEY PROGRAM TARGET_RA TARGET_DEC OBJID BRICKID RELEASE SKY GAIADR RA DEC GROUP BRICKNAME
# int64 str7 str6 float64 float64 int64 int64 int64 int64 int64 float64 float64 int64 str8
# --------------- ------ ------- ------------------ ----------------- ----- ------- ------- ----- ------ ------- ------- ----- ---------
# 234545047666699 sv1 other 150.31145983340912 2.587887211205909 11 345369 53 0 0 0.0 0.0 0 1503p025
# 243341140688909 sv1 other 150.31145983340912 2.587887211205909 13 345369 55 0 0 0.0 0.0 0 1503p025
for indx_cat, (ra, dec, targetid) in enumerate(zip(input_cat[racolumn][ipos],
input_cat[deccolumn][ipos],
input_cat['TARGETID'][ipos])):
coord_cat = SkyCoord(ra=ra*u.deg, dec=dec*u.deg)
indx_tractor, d2d, _ = coord_cat.match_to_catalog_sky(coord_tractor)
if d2d < rad:
_tractor = Table(fitsio.read(tractorfile, rows=iprimary[indx_tractor], upper=True))
_tractor['LS_ID'] = np.int64(0) # will be filled in at the end
_tractor['TARGETID'] = targetid
out[ipos[indx_cat]] = _tractor[0]
# Add a unique DR9 identifier.
out['LS_ID'] = (out['RELEASE'].astype(np.int64) << 40) | (out['BRICKID'].astype(np.int64) << 16) | (out['OBJID'].astype(np.int64))
assert(np.all(input_cat['TARGETID'] == out['TARGETID']))
return out
def gather_tractorphot(input_cat, racolumn='TARGET_RA', deccolumn='TARGET_DEC',
legacysurveydir=None, dr9dir=None, radius_match=1.0,
restrict_region=None, columns=None):
"""Retrieve the Tractor catalog for all the objects in this catalog (one brick).
Args:
input_cat (astropy.table.Table): input table with the following
(required) columns: TARGETID, TARGET_RA, TARGET_DEC. Additional
optional columns that will ensure proper matching are BRICKNAME,
RELEASE, PHOTSYS, BRICKID, and BRICK_OBJID.
legacysurveydir (str): full path to the location of the Tractor catalogs
dr9dir (str): relegated keyword; please use `legacysurveydir`
radius_match (float, arcsec): matching radius (default, 1 arcsec)
restrict_region (str): when positional matching, restrict the region to
check for photometry, otherwise check both 'north' and 'south'
(default None)
columns (str array): return this subset of columns
Returns a table of Tractor photometry. Matches are identified either using
BRICKID and BRICK_OBJID or using positional matching (1 arcsec radius).
"""
from desitarget.targets import decode_targetid
from desiutil.brick import brickname
if len(input_cat) == 0:
log.warning('No objects in input catalog.')
return Table()
for col in ['TARGETID', racolumn, deccolumn]:
if col not in input_cat.colnames:
errmsg = f'Missing required input column {col}'
log.critical(errmsg)
raise ValueError(errmsg)
# If these columns don't exist, add them with blank entries:
COLS = [('RELEASE', (1,), '>i2'), ('BRICKID', (1,), '>i4'),
('BRICKNAME', (1,), '<U8'), ('BRICK_OBJID', (1,), '>i4'),
('PHOTSYS', (1,), '<U1')]
for col in COLS:
if col[0] not in input_cat.colnames:
input_cat[col[0]] = np.zeros(col[1], dtype=col[2])
if dr9dir is not None:
log.warning('Keyword dr9dir is relegated; please use legacysurveydir.')
legacysurveydir = dr9dir
if legacysurveydir is None:
from desispec.io.meta import get_desi_root_readonly
desi_root = get_desi_root_readonly()
legacysurveydir = os.path.join(desi_root, 'external', 'legacysurvey', 'dr9')
if not os.path.isdir(legacysurveydir):
errmsg = f'Legacy Surveys directory {legacysurveydir} not found.'
log.critical(errmsg)
raise IOError(errmsg)
if 'dr9' in legacysurveydir:
datarelease = 'dr9'
elif 'dr10' in legacysurveydir:
datarelease = 'dr10'
else:
errmsg = f'Unable to determine data release from {legacysurveydir}; falling back to DR9.'
log.warning(errmsg)
datarelease = 'dr9'
if restrict_region is not None:
if restrict_region not in ['north', 'south']:
errmsg = f"Optional input restrict_region must be either 'north' or 'south'."
log.critical(errmsg)
raise ValueError(errmsg)
## Some secondary programs (e.g., 39632961435338613, 39632966921487347)
## have BRICKNAME!='' & BRICKID!=0, but BRICK_OBJID==0. Unpack those here
## using decode_targetid.
#idecode = np.where((input_cat['BRICKNAME'] != '') * (input_cat['BRICK_OBJID'] == 0))[0]
#if len(idecode) > 0:
# log.debug('Inferring BRICK_OBJID for {} objects using decode_targetid'.format(len(idecode)))
# new_objid, new_brickid, _, _, _, _ = decode_targetid(input_cat['TARGETID'][idecode])
# assert(np.all(new_brickid == input_cat['BRICKID'][idecode]))
# input_cat['BRICK_OBJID'][idecode] = new_objid
# BRICKNAME can sometimes be blank; fix that here. NB: this step has to come
# *after* the decode step, above!
inobrickname = np.where(input_cat['BRICKNAME'] == '')[0]
if len(inobrickname) > 0:
log.debug(f'Inferring brickname for {len(inobrickname):,d} objects')
input_cat['BRICKNAME'][inobrickname] = brickname(input_cat[racolumn][inobrickname],
input_cat[deccolumn][inobrickname])
# Split into unique brickname(s) and initialize the data model.
bricknames = input_cat['BRICKNAME']
datamodel = tractorphot_datamodel(datarelease=datarelease)
out = Table(np.hstack(np.repeat(datamodel, len(np.atleast_1d(input_cat)))))
for onebrickname in set(bricknames):
I = np.where(onebrickname == bricknames)[0]
out[I] = _gather_tractorphot_onebrick(input_cat[I], legacysurveydir, radius_match, racolumn,
deccolumn, datamodel, restrict_region)
if 'RELEASE' in input_cat.colnames:
_, _, check_release, _, _, _ = decode_targetid(input_cat['TARGETID'])
bug = np.where(out['RELEASE'] != check_release)[0]
if len(bug) > 0:
input_cat['BRICKNAME'][bug] = brickname(input_cat[racolumn][bug], input_cat[deccolumn][bug])
input_cat['RELEASE'][bug] = 0
input_cat['BRICKID'][bug] = 0
input_cat['BRICK_OBJID'][bug] = 0
input_cat['PHOTSYS'][bug] = ''
bugout = Table(np.hstack(np.repeat(datamodel, len(bug))))
for onebrickname in set(input_cat['BRICKNAME'][bug]):
I = np.where(onebrickname == input_cat['BRICKNAME'][bug])[0]
bugout[I] = _gather_tractorphot_onebrick(input_cat[bug][I], legacysurveydir, radius_match, racolumn, deccolumn,
datamodel, restrict_region)
if columns is not None:
if type(columns) is not list:
columns = columns.tolist()
out = out[columns]
return out
|
desihubREPO_NAMEfastspecfitPATH_START.@fastspecfit_extracted@fastspecfit-main@py@fastspecfit@photometry.py@.PATH_END.py
|
{
"filename": "_y.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scatterpolar/marker/colorbar/_y.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class YValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="y", parent_name="scatterpolar.marker.colorbar", **kwargs
):
super(YValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scatterpolar@marker@colorbar@_y.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "google/jax",
"repo_path": "jax_extracted/jax-main/jax/experimental/sparse/__init__.py",
"type": "Python"
}
|
# Copyright 2021 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. currentmodule:: jax.experimental.sparse
The :mod:`jax.experimental.sparse` module includes experimental support for sparse matrix
operations in JAX. It is under active development, and the API is subject to change. The
primary interfaces made available are the :class:`BCOO` sparse array type, and the
:func:`sparsify` transform.
Batched-coordinate (BCOO) sparse matrices
-----------------------------------------
The main high-level sparse object currently available in JAX is the :class:`BCOO`,
or *batched coordinate* sparse array, which offers a compressed storage format compatible
with JAX transformations, in particular JIT (e.g. :func:`jax.jit`), batching
(e.g. :func:`jax.vmap`) and autodiff (e.g. :func:`jax.grad`).
Here is an example of creating a sparse array from a dense array:
>>> from jax.experimental import sparse
>>> import jax.numpy as jnp
>>> import numpy as np
>>> M = jnp.array([[0., 1., 0., 2.],
... [3., 0., 0., 0.],
... [0., 0., 4., 0.]])
>>> M_sp = sparse.BCOO.fromdense(M)
>>> M_sp
BCOO(float32[3, 4], nse=4)
Convert back to a dense array with the ``todense()`` method:
>>> M_sp.todense()
Array([[0., 1., 0., 2.],
[3., 0., 0., 0.],
[0., 0., 4., 0.]], dtype=float32)
The BCOO format is a somewhat modified version of the standard COO format, and the dense
representation can be seen in the ``data`` and ``indices`` attributes:
>>> M_sp.data # Explicitly stored data
Array([1., 2., 3., 4.], dtype=float32)
>>> M_sp.indices # Indices of the stored data
Array([[0, 1],
[0, 3],
[1, 0],
[2, 2]], dtype=int32)
BCOO objects have familiar array-like attributes, as well as sparse-specific attributes:
>>> M_sp.ndim
2
>>> M_sp.shape
(3, 4)
>>> M_sp.dtype
dtype('float32')
>>> M_sp.nse # "number of specified elements"
4
BCOO objects also implement a number of array-like methods, to allow you to use them
directly within jax programs. For example, here we compute the transposed matrix-vector
product:
>>> y = jnp.array([3., 6., 5.])
>>> M_sp.T @ y
Array([18., 3., 20., 6.], dtype=float32)
>>> M.T @ y # Compare to dense version
Array([18., 3., 20., 6.], dtype=float32)
BCOO objects are designed to be compatible with JAX transforms, including :func:`jax.jit`,
:func:`jax.vmap`, :func:`jax.grad`, and others. For example:
>>> from jax import grad, jit
>>> def f(y):
... return (M_sp.T @ y).sum()
...
>>> jit(grad(f))(y)
Array([3., 3., 4.], dtype=float32)
Note, however, that under normal circumstances :mod:`jax.numpy` and :mod:`jax.lax` functions
do not know how to handle sparse matrices, so attempting to compute things like
``jnp.dot(M_sp.T, y)`` will result in an error (however, see the next section).
Sparsify transform
------------------
An overarching goal of the JAX sparse implementation is to provide a means to switch from
dense to sparse computation seamlessly, without having to modify the dense implementation.
This sparse experiment accomplishes this through the :func:`sparsify` transform.
Consider this function, which computes a more complicated result from a matrix and a vector input:
>>> def f(M, v):
... return 2 * jnp.dot(jnp.log1p(M.T), v) + 1
...
>>> f(M, y)
Array([17.635532, 5.158883, 17.09438 , 7.591674], dtype=float32)
Were we to pass a sparse matrix to this directly, it would result in an error, because ``jnp``
functions do not recognize sparse inputs. However, with :func:`sparsify`, we get a version of
this function that does accept sparse matrices:
>>> f_sp = sparse.sparsify(f)
>>> f_sp(M_sp, y)
Array([17.635532, 5.158883, 17.09438 , 7.591674], dtype=float32)
Support for :func:`sparsify` includes a large number of the most common primitives, including:
- generalized (batched) matrix products & einstein summations (:obj:`~jax.lax.dot_general_p`)
- zero-preserving elementwise binary operations (e.g. :obj:`~jax.lax.add_p`, :obj:`~jax.lax.mul_p`, etc.)
- zero-preserving elementwise unary operations (e.g. :obj:`~jax.lax.abs_p`, :obj:`jax.lax.neg_p`, etc.)
- summation reductions (:obj:`~jax.lax.reduce_sum_p`)
- general indexing operations (:obj:`~jax.lax.slice_p`, `lax.dynamic_slice_p`, `lax.gather_p`)
- concatenation and stacking (:obj:`~jax.lax.concatenate_p`)
- transposition & reshaping ((:obj:`~jax.lax.transpose_p`, :obj:`~jax.lax.reshape_p`,
:obj:`~jax.lax.squeeze_p`, :obj:`~jax.lax.broadcast_in_dim_p`)
- some higher-order functions (:obj:`~jax.lax.cond_p`, :obj:`~jax.lax.while_p`, :obj:`~jax.lax.scan_p`)
- some simple 1D convolutions (:obj:`~jax.lax.conv_general_dilated_p`)
Nearly any :mod:`jax.numpy` function that lowers to these supported primitives can be used
within a sparsify transform to operate on sparse arrays. This set of primitives is enough
to enable relatively sophisticated sparse workflows, as the next section will show.
Example: sparse logistic regression
-----------------------------------
As an example of a more complicated sparse workflow, let's consider a simple logistic regression
implemented in JAX. Notice that the following implementation has no reference to sparsity:
>>> import functools
>>> from sklearn.datasets import make_classification
>>> from jax.scipy import optimize
>>> def sigmoid(x):
... return 0.5 * (jnp.tanh(x / 2) + 1)
...
>>> def y_model(params, X):
... return sigmoid(jnp.dot(X, params[1:]) + params[0])
...
>>> def loss(params, X, y):
... y_hat = y_model(params, X)
... return -jnp.mean(y * jnp.log(y_hat) + (1 - y) * jnp.log(1 - y_hat))
...
>>> def fit_logreg(X, y):
... params = jnp.zeros(X.shape[1] + 1)
... result = optimize.minimize(functools.partial(loss, X=X, y=y),
... x0=params, method='BFGS')
... return result.x
>>> X, y = make_classification(n_classes=2, random_state=1701)
>>> params_dense = fit_logreg(X, y)
>>> print(params_dense) # doctest: +SKIP
[-0.7298445 0.29893667 1.0248291 -0.44436368 0.8785025 -0.7724008
-0.62893456 0.2934014 0.82974285 0.16838408 -0.39774987 -0.5071844
0.2028872 0.5227761 -0.3739224 -0.7104083 2.4212713 0.6310087
-0.67060554 0.03139788 -0.05359547]
This returns the best-fit parameters of a dense logistic regression problem.
To fit the same model on sparse data, we can apply the :func:`sparsify` transform:
>>> Xsp = sparse.BCOO.fromdense(X) # Sparse version of the input
>>> fit_logreg_sp = sparse.sparsify(fit_logreg) # Sparse-transformed fit function
>>> params_sparse = fit_logreg_sp(Xsp, y)
>>> print(params_sparse) # doctest: +SKIP
[-0.72971725 0.29878938 1.0246326 -0.44430563 0.8784217 -0.77225566
-0.6288222 0.29335397 0.8293481 0.16820715 -0.39764675 -0.5069753
0.202579 0.522672 -0.3740134 -0.7102678 2.4209507 0.6310593
-0.670236 0.03132951 -0.05356663]
"""
# Note: import <name> as <name> is required for names to be exported.
# See PEP 484 & https://github.com/jax-ml/jax/issues/7570
from jax.experimental.sparse.ad import (
jacfwd as jacfwd,
jacobian as jacobian,
jacrev as jacrev,
grad as grad,
value_and_grad as value_and_grad,
)
from jax.experimental.sparse.bcoo import (
bcoo_broadcast_in_dim as bcoo_broadcast_in_dim,
bcoo_concatenate as bcoo_concatenate,
bcoo_conv_general_dilated as bcoo_conv_general_dilated,
bcoo_dot_general as bcoo_dot_general,
bcoo_dot_general_p as bcoo_dot_general_p,
bcoo_dot_general_sampled as bcoo_dot_general_sampled,
bcoo_dot_general_sampled_p as bcoo_dot_general_sampled_p,
bcoo_dynamic_slice as bcoo_dynamic_slice,
bcoo_extract as bcoo_extract,
bcoo_extract_p as bcoo_extract_p,
bcoo_fromdense as bcoo_fromdense,
bcoo_fromdense_p as bcoo_fromdense_p,
bcoo_gather as bcoo_gather,
bcoo_multiply_dense as bcoo_multiply_dense,
bcoo_multiply_sparse as bcoo_multiply_sparse,
bcoo_update_layout as bcoo_update_layout,
bcoo_reduce_sum as bcoo_reduce_sum,
bcoo_reshape as bcoo_reshape,
bcoo_rev as bcoo_rev,
bcoo_slice as bcoo_slice,
bcoo_sort_indices as bcoo_sort_indices,
bcoo_sort_indices_p as bcoo_sort_indices_p,
bcoo_spdot_general_p as bcoo_spdot_general_p,
bcoo_squeeze as bcoo_squeeze,
bcoo_sum_duplicates as bcoo_sum_duplicates,
bcoo_sum_duplicates_p as bcoo_sum_duplicates_p,
bcoo_todense as bcoo_todense,
bcoo_todense_p as bcoo_todense_p,
bcoo_transpose as bcoo_transpose,
bcoo_transpose_p as bcoo_transpose_p,
BCOO as BCOO,
)
from jax.experimental.sparse.bcsr import (
bcsr_broadcast_in_dim as bcsr_broadcast_in_dim,
bcsr_concatenate as bcsr_concatenate,
bcsr_dot_general as bcsr_dot_general,
bcsr_dot_general_p as bcsr_dot_general_p,
bcsr_extract as bcsr_extract,
bcsr_extract_p as bcsr_extract_p,
bcsr_fromdense as bcsr_fromdense,
bcsr_fromdense_p as bcsr_fromdense_p,
bcsr_sum_duplicates as bcsr_sum_duplicates,
bcsr_todense as bcsr_todense,
bcsr_todense_p as bcsr_todense_p,
BCSR as BCSR,
)
from jax.experimental.sparse._base import (
JAXSparse as JAXSparse
)
from jax.experimental.sparse.api import (
empty as empty,
eye as eye,
todense as todense,
todense_p as todense_p,
)
from jax.experimental.sparse.util import (
CuSparseEfficiencyWarning as CuSparseEfficiencyWarning,
SparseEfficiencyError as SparseEfficiencyError,
SparseEfficiencyWarning as SparseEfficiencyWarning,
)
from jax.experimental.sparse.coo import (
coo_fromdense as coo_fromdense,
coo_fromdense_p as coo_fromdense_p,
coo_matmat as coo_matmat,
coo_matmat_p as coo_matmat_p,
coo_matvec as coo_matvec,
coo_matvec_p as coo_matvec_p,
coo_todense as coo_todense,
coo_todense_p as coo_todense_p,
COO as COO,
)
from jax.experimental.sparse.csr import (
csr_fromdense as csr_fromdense,
csr_fromdense_p as csr_fromdense_p,
csr_matmat as csr_matmat,
csr_matmat_p as csr_matmat_p,
csr_matvec as csr_matvec,
csr_matvec_p as csr_matvec_p,
csr_todense as csr_todense,
csr_todense_p as csr_todense_p,
CSC as CSC,
CSR as CSR,
)
from jax.experimental.sparse.random import random_bcoo as random_bcoo
from jax.experimental.sparse.transform import (
sparsify as sparsify,
SparseTracer as SparseTracer,
)
from jax.experimental.sparse import linalg as linalg
|
googleREPO_NAMEjaxPATH_START.@jax_extracted@jax-main@jax@experimental@sparse@__init__.py@.PATH_END.py
|
{
"filename": "test_pysemenov.py",
"repo_name": "DariaGangardt/pAGN",
"repo_path": "pAGN_extracted/pAGN-main/utils/opacity_sources/Semenov2003/test_pysemenov.py",
"type": "Python"
}
|
"""
test_pysemenov.py
Authors: Alessandro A. Trani, Daria Gangardt
This file performs a simple test of the python interface for the Semenov2003 opacity fortran code
Check pysemenov_module.sh for instructions how to compile the interface,
and check generate_combined_tables.py for how to generate a table of opacities for tabulation
"""
import pysemenov
# This file follows the flow of the Fortran main
# except that input and ouput is via Python
# See opacitypy.f for description of the options
ross = True # False for Planck opacities
model = 'nrm' # Normal iron abundances
top = 'c' # composite grains
shap = 'a' # aggregate shape
rho = 1e-11 # gas density, g/cm^3
T = 1e2 # temperature, kelvin
kappa = pysemenov.compute_kappa(top=top,ross=ross,model=model,shap=shap,rho=rho,T=T)
print("kappa in cm^2/g", kappa) # cm^2/g
print("should be ", 2.941995025939745)
|
DariaGangardtREPO_NAMEpAGNPATH_START.@pAGN_extracted@pAGN-main@utils@opacity_sources@Semenov2003@test_pysemenov.py@.PATH_END.py
|
{
"filename": "train.md",
"repo_name": "ultralytics/ultralytics",
"repo_path": "ultralytics_extracted/ultralytics-main/docs/en/modes/train.md",
"type": "Markdown"
}
|
---
comments: true
description: Learn how to efficiently train object detection models using YOLO11 with comprehensive instructions on settings, augmentation, and hardware utilization.
keywords: Ultralytics, YOLO11, model training, deep learning, object detection, GPU training, dataset augmentation, hyperparameter tuning, model performance, apple silicon training
---
# Model Training with Ultralytics YOLO
<img width="1024" src="https://github.com/ultralytics/docs/releases/download/0/ultralytics-yolov8-ecosystem-integrations.avif" alt="Ultralytics YOLO ecosystem and integrations">
## Introduction
Training a [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) model involves feeding it data and adjusting its parameters so that it can make accurate predictions. Train mode in Ultralytics YOLO11 is engineered for effective and efficient training of object detection models, fully utilizing modern hardware capabilities. This guide aims to cover all the details you need to get started with training your own models using YOLO11's robust set of features.
<p align="center">
<br>
<iframe loading="lazy" width="720" height="405" src="https://www.youtube.com/embed/LNwODJXcvt4?si=7n1UvGRLSd9p5wKs"
title="YouTube video player" frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
allowfullscreen>
</iframe>
<br>
<strong>Watch:</strong> How to Train a YOLO model on Your Custom Dataset in Google Colab.
</p>
## Why Choose Ultralytics YOLO for Training?
Here are some compelling reasons to opt for YOLO11's Train mode:
- **Efficiency:** Make the most out of your hardware, whether you're on a single-GPU setup or scaling across multiple GPUs.
- **Versatility:** Train on custom datasets in addition to readily available ones like COCO, VOC, and ImageNet.
- **User-Friendly:** Simple yet powerful CLI and Python interfaces for a straightforward training experience.
- **Hyperparameter Flexibility:** A broad range of customizable hyperparameters to fine-tune model performance.
### Key Features of Train Mode
The following are some notable features of YOLO11's Train mode:
- **Automatic Dataset Download:** Standard datasets like COCO, VOC, and ImageNet are downloaded automatically on first use.
- **Multi-GPU Support:** Scale your training efforts seamlessly across multiple GPUs to expedite the process.
- **Hyperparameter Configuration:** The option to modify hyperparameters through YAML configuration files or CLI arguments.
- **Visualization and Monitoring:** Real-time tracking of training metrics and visualization of the learning process for better insights.
!!! tip
* YOLO11 datasets like COCO, VOC, ImageNet and many others automatically download on first use, i.e. `yolo train data=coco.yaml`
## Usage Examples
Train YOLO11n on the COCO8 dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) at image size 640. The training device can be specified using the `device` argument. If no argument is passed GPU `device=0` will be used if available, otherwise `device='cpu'` will be used. See Arguments section below for a full list of training arguments.
!!! example "Single-GPU and CPU Training Example"
Device is determined automatically. If a GPU is available then it will be used, otherwise training will start on CPU.
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n.yaml") # build a new model from YAML
model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training)
model = YOLO("yolo11n.yaml").load("yolo11n.pt") # build from YAML and transfer weights
# Train the model
results = model.train(data="coco8.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Build a new model from YAML and start training from scratch
yolo detect train data=coco8.yaml model=yolo11n.yaml epochs=100 imgsz=640
# Start training from a pretrained *.pt model
yolo detect train data=coco8.yaml model=yolo11n.pt epochs=100 imgsz=640
# Build a new model from YAML, transfer pretrained weights to it and start training
yolo detect train data=coco8.yaml model=yolo11n.yaml pretrained=yolo11n.pt epochs=100 imgsz=640
```
### Multi-GPU Training
Multi-GPU training allows for more efficient utilization of available hardware resources by distributing the training load across multiple GPUs. This feature is available through both the Python API and the command-line interface. To enable multi-GPU training, specify the GPU device IDs you wish to use.
!!! example "Multi-GPU Training Example"
To train with 2 GPUs, CUDA devices 0 and 1 use the following commands. Expand to additional GPUs as required.
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training)
# Train the model with 2 GPUs
results = model.train(data="coco8.yaml", epochs=100, imgsz=640, device=[0, 1])
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model using GPUs 0 and 1
yolo detect train data=coco8.yaml model=yolo11n.pt epochs=100 imgsz=640 device=0,1
```
### Apple Silicon MPS Training
With the support for Apple silicon chips integrated in the Ultralytics YOLO models, it's now possible to train your models on devices utilizing the powerful Metal Performance Shaders (MPS) framework. The MPS offers a high-performance way of executing computation and image processing tasks on Apple's custom silicon.
To enable training on Apple silicon chips, you should specify 'mps' as your device when initiating the training process. Below is an example of how you could do this in Python and via the command line:
!!! example "MPS Training Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training)
# Train the model with MPS
results = model.train(data="coco8.yaml", epochs=100, imgsz=640, device="mps")
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model using MPS
yolo detect train data=coco8.yaml model=yolo11n.pt epochs=100 imgsz=640 device=mps
```
While leveraging the computational power of the Apple silicon chips, this enables more efficient processing of the training tasks. For more detailed guidance and advanced configuration options, please refer to the [PyTorch MPS documentation](https://pytorch.org/docs/stable/notes/mps.html).
### Resuming Interrupted Trainings
Resuming training from a previously saved state is a crucial feature when working with deep learning models. This can come in handy in various scenarios, like when the training process has been unexpectedly interrupted, or when you wish to continue training a model with new data or for more epochs.
When training is resumed, Ultralytics YOLO loads the weights from the last saved model and also restores the optimizer state, [learning rate](https://www.ultralytics.com/glossary/learning-rate) scheduler, and the epoch number. This allows you to continue the training process seamlessly from where it was left off.
You can easily resume training in Ultralytics YOLO by setting the `resume` argument to `True` when calling the `train` method, and specifying the path to the `.pt` file containing the partially trained model weights.
Below is an example of how to resume an interrupted training using Python and via the command line:
!!! example "Resume Training Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("path/to/last.pt") # load a partially trained model
# Resume training
results = model.train(resume=True)
```
=== "CLI"
```bash
# Resume an interrupted training
yolo train resume model=path/to/last.pt
```
By setting `resume=True`, the `train` function will continue training from where it left off, using the state stored in the 'path/to/last.pt' file. If the `resume` argument is omitted or set to `False`, the `train` function will start a new training session.
Remember that checkpoints are saved at the end of every epoch by default, or at fixed intervals using the `save_period` argument, so you must complete at least 1 epoch to resume a training run.
## Train Settings
The training settings for YOLO models encompass various hyperparameters and configurations used during the training process. These settings influence the model's performance, speed, and [accuracy](https://www.ultralytics.com/glossary/accuracy). Key training settings include batch size, learning rate, momentum, and weight decay. Additionally, the choice of optimizer, [loss function](https://www.ultralytics.com/glossary/loss-function), and training dataset composition can impact the training process. Careful tuning and experimentation with these settings are crucial for optimizing performance.
{% include "macros/train-args.md" %}
!!! info "Note on Batch-size Settings"
The `batch` argument can be configured in three ways:
- **Fixed [Batch Size](https://www.ultralytics.com/glossary/batch-size)**: Set an integer value (e.g., `batch=16`), specifying the number of images per batch directly.
- **Auto Mode (60% GPU Memory)**: Use `batch=-1` to automatically adjust batch size for approximately 60% CUDA memory utilization.
- **Auto Mode with Utilization Fraction**: Set a fraction value (e.g., `batch=0.70`) to adjust batch size based on the specified fraction of GPU memory usage.
## Augmentation Settings and Hyperparameters
Augmentation techniques are essential for improving the robustness and performance of YOLO models by introducing variability into the [training data](https://www.ultralytics.com/glossary/training-data), helping the model generalize better to unseen data. The following table outlines the purpose and effect of each augmentation argument:
{% include "macros/augmentation-args.md" %}
These settings can be adjusted to meet the specific requirements of the dataset and task at hand. Experimenting with different values can help find the optimal augmentation strategy that leads to the best model performance.
!!! info
For more information about training augmentation operations, see the [reference section](../reference/data/augment.md).
## Logging
In training a YOLO11 model, you might find it valuable to keep track of the model's performance over time. This is where logging comes into play. Ultralytics' YOLO provides support for three types of loggers - Comet, ClearML, and TensorBoard.
To use a logger, select it from the dropdown menu in the code snippet above and run it. The chosen logger will be installed and initialized.
### Comet
[Comet](../integrations/comet.md) is a platform that allows data scientists and developers to track, compare, explain and optimize experiments and models. It provides functionalities such as real-time metrics, code diffs, and hyperparameters tracking.
To use Comet:
!!! example
=== "Python"
```python
# pip install comet_ml
import comet_ml
comet_ml.init()
```
Remember to sign in to your Comet account on their website and get your API key. You will need to add this to your environment variables or your script to log your experiments.
### ClearML
[ClearML](https://clear.ml/) is an open-source platform that automates tracking of experiments and helps with efficient sharing of resources. It is designed to help teams manage, execute, and reproduce their ML work more efficiently.
To use ClearML:
!!! example
=== "Python"
```python
# pip install clearml
import clearml
clearml.browser_login()
```
After running this script, you will need to sign in to your ClearML account on the browser and authenticate your session.
### TensorBoard
[TensorBoard](https://www.tensorflow.org/tensorboard) is a visualization toolkit for [TensorFlow](https://www.ultralytics.com/glossary/tensorflow). It allows you to visualize your TensorFlow graph, plot quantitative metrics about the execution of your graph, and show additional data like images that pass through it.
To use TensorBoard in [Google Colab](https://colab.research.google.com/github/ultralytics/ultralytics/blob/main/examples/tutorial.ipynb):
!!! example
=== "CLI"
```bash
load_ext tensorboard
tensorboard --logdir ultralytics/runs # replace with 'runs' directory
```
To use TensorBoard locally run the below command and view results at http://localhost:6006/.
!!! example
=== "CLI"
```bash
tensorboard --logdir ultralytics/runs # replace with 'runs' directory
```
This will load TensorBoard and direct it to the directory where your training logs are saved.
After setting up your logger, you can then proceed with your model training. All training metrics will be automatically logged in your chosen platform, and you can access these logs to monitor your model's performance over time, compare different models, and identify areas for improvement.
## FAQ
### How do I train an [object detection](https://www.ultralytics.com/glossary/object-detection) model using Ultralytics YOLO11?
To train an object detection model using Ultralytics YOLO11, you can either use the Python API or the CLI. Below is an example for both:
!!! example "Single-GPU and CPU Training Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="coco8.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
yolo detect train data=coco8.yaml model=yolo11n.pt epochs=100 imgsz=640
```
For more details, refer to the [Train Settings](#train-settings) section.
### What are the key features of Ultralytics YOLO11's Train mode?
The key features of Ultralytics YOLO11's Train mode include:
- **Automatic Dataset Download:** Automatically downloads standard datasets like COCO, VOC, and ImageNet.
- **Multi-GPU Support:** Scale training across multiple GPUs for faster processing.
- **Hyperparameter Configuration:** Customize hyperparameters through YAML files or CLI arguments.
- **Visualization and Monitoring:** Real-time tracking of training metrics for better insights.
These features make training efficient and customizable to your needs. For more details, see the [Key Features of Train Mode](#key-features-of-train-mode) section.
### How do I resume training from an interrupted session in Ultralytics YOLO11?
To resume training from an interrupted session, set the `resume` argument to `True` and specify the path to the last saved checkpoint.
!!! example "Resume Training Example"
=== "Python"
```python
from ultralytics import YOLO
# Load the partially trained model
model = YOLO("path/to/last.pt")
# Resume training
results = model.train(resume=True)
```
=== "CLI"
```bash
yolo train resume model=path/to/last.pt
```
Check the section on [Resuming Interrupted Trainings](#resuming-interrupted-trainings) for more information.
### Can I train YOLO11 models on Apple silicon chips?
Yes, Ultralytics YOLO11 supports training on Apple silicon chips utilizing the Metal Performance Shaders (MPS) framework. Specify 'mps' as your training device.
!!! example "MPS Training Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a pretrained model
model = YOLO("yolo11n.pt")
# Train the model on Apple silicon chip (M1/M2/M3/M4)
results = model.train(data="coco8.yaml", epochs=100, imgsz=640, device="mps")
```
=== "CLI"
```bash
yolo detect train data=coco8.yaml model=yolo11n.pt epochs=100 imgsz=640 device=mps
```
For more details, refer to the [Apple Silicon MPS Training](#apple-silicon-mps-training) section.
### What are the common training settings, and how do I configure them?
Ultralytics YOLO11 allows you to configure a variety of training settings such as batch size, learning rate, epochs, and more through arguments. Here's a brief overview:
| Argument | Default | Description |
| -------- | ------- | ---------------------------------------------------------------------- |
| `model` | `None` | Path to the model file for training. |
| `data` | `None` | Path to the dataset configuration file (e.g., `coco8.yaml`). |
| `epochs` | `100` | Total number of training epochs. |
| `batch` | `16` | Batch size, adjustable as integer or auto mode. |
| `imgsz` | `640` | Target image size for training. |
| `device` | `None` | Computational device(s) for training like `cpu`, `0`, `0,1`, or `mps`. |
| `save` | `True` | Enables saving of training checkpoints and final model weights. |
For an in-depth guide on training settings, check the [Train Settings](#train-settings) section.
|
ultralyticsREPO_NAMEultralyticsPATH_START.@ultralytics_extracted@ultralytics-main@docs@en@modes@train.md@.PATH_END.py
|
{
"filename": "onsite_create_cat_B_calibration_files_with_batch.py",
"repo_name": "cta-observatory/cta-lstchain",
"repo_path": "cta-lstchain_extracted/cta-lstchain-main/lstchain/scripts/onsite/onsite_create_cat_B_calibration_files_with_batch.py",
"type": "Python"
}
|
#!/usr/bin/env python
"""
Onsite script to process (in batch) the interleaved events of several runs
and to create the Cat-B calibration files
--> onsite_create_catB_calibration_files_with_batch -r xxx yyy zzz
"""
import argparse
import shutil
import subprocess
import sys
from datetime import datetime
from pathlib import Path
import lstchain
from lstchain.onsite import (
DEFAULT_BASE_PATH,
CAT_B_PIXEL_DIR,
find_interleaved_subruns,
find_r0_subrun,
DEFAULT_CONFIG_CAT_B_CALIB,
)
# parse arguments
parser = argparse.ArgumentParser(
description='Reconstruct filter scan, this must be run after the night calibration scripts',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
required = parser.add_argument_group('required arguments')
optional = parser.add_argument_group('optional arguments')
required.add_argument('-r', '--run_list', help="Run numbers of intereleaved data",
type=int, nargs="+")
optional.add_argument('-f', '--filters_list', help="Filter list (same order as run list)",
type=int, nargs="+")
optional.add_argument('-s', '--statistics',
help="Number of events for the flat-field and pedestal statistics",
type=int,
default=2500)
optional.add_argument('-b', '--base_dir', help="Root dir for the output directory tree", type=Path,
default=DEFAULT_BASE_PATH)
optional.add_argument('--interleaved-dir', help="Root dir for the input interleaved files. By default, <base_dir>/DL1/date/version/interleaved will be used",
type=Path)
optional.add_argument('--r0-dir', help="Root dir for the input r0 tree. By default, <base_dir>/R0 will be used",
type=Path)
optional.add_argument('--n_subruns', help="Number of subruns to be processed",
type=int)
optional.add_argument('--sys_date',
help="Date of systematic corrections (format YYYYMMDD). \n"
"Default: automatically search the best date \n")
optional.add_argument('--no_sys_correction',
help="Systematic corrections are not applied. \n",
action='store_true',
default=False)
optional.add_argument('-y', '--yes', action="store_true", help='Do not ask interactively for permissions, assume true')
optional.add_argument('--no_pro_symlink', action="store_true",
help='Do not update the pro dir symbolic link, assume true')
optional.add_argument('--config', help="Config file", default=DEFAULT_CONFIG_CAT_B_CALIB, type=Path)
optional.add_argument('--queue',
help="Slurm queue. Deafault: short ",
default="short")
def main():
args, remaining_args = parser.parse_known_args()
run_list = args.run_list
n_subruns = args.n_subruns
filters_list = args.filters_list
prod_id = f"v{lstchain.__version__}"
stat_events = args.statistics
base_dir = args.base_dir
config_file = args.config
sys_date = args.sys_date
no_sys_correction = args.no_sys_correction
yes = args.yes
queue = args.queue
r0_dir = args.r0_dir or args.base_dir / 'R0'
calib_dir = base_dir / CAT_B_PIXEL_DIR
if shutil.which('srun') is None:
sys.exit(">>> This script needs a slurm batch system. Stop")
print(f"\n--> Start to reconstruct runs {run_list}")
# verify config file
if not config_file.exists():
sys.exit(f"Config file {config_file} does not exists. \n")
print(f"\n--> Config file {config_file}")
# for old runs or if the data-base is not available
# it is possible to give the filter list
if filters_list is not None and len(filters_list) != len(run_list):
sys.exit("Filter list length must be equal to run list length. Verify \n")
# loops over runs and send jobs
filters = None
for i, run in enumerate(run_list):
print(f"\n--> Run {run} ")
if filters_list is not None:
filters = filters_list[i]
# look in R0 to find the date
r0_list = find_r0_subrun(run,0,r0_dir)
date = r0_list.parent.name
# find input path
ver = prod_id.rsplit(".")
input_path = args.interleaved_dir or args.base_dir / 'DL1'/ f"{date}/{ver[0]}.{ver[1]}/interleaved"
input_files = find_interleaved_subruns(run, input_path)
print(f"--> Found {len(input_files)} interleaved subruns in {input_path}")
if n_subruns:
print(f"--> Process {n_subruns} subruns")
# verify output dir
calib_dir = args.base_dir / CAT_B_PIXEL_DIR
output_dir = calib_dir / "calibration" / date / prod_id
if not output_dir.exists():
print(f"--> Create directory {output_dir}")
output_dir.mkdir(parents=True, exist_ok=True)
# make log dir
log_dir = output_dir / "log"
if not log_dir.exists():
print(f"--> Create directory {log_dir}")
log_dir.mkdir(parents=True, exist_ok=True)
# job file
now = datetime.now().replace(microsecond=0).isoformat(sep='T')
job_file = log_dir / f"run_{run}_date_{now}.job"
with job_file.open(mode="w") as fh:
fh.write("#!/bin/bash\n")
fh.write("#SBATCH --job-name=%s.job\n" % run)
fh.write("#SBATCH --output=log/run_%s_date_%s.out\n" % (run, now))
fh.write("#SBATCH --error=log/run_%s_date_%s.err\n" % (run, now))
fh.write("#SBATCH -p %s\n" % queue)
fh.write("#SBATCH --cpus-per-task=1\n")
fh.write("#SBATCH --mem-per-cpu=10G\n")
fh.write("#SBATCH -D %s \n" % output_dir)
cmd = [
"srun",
"onsite_create_cat_B_calibration_file",
f"-r {run}",
f"-v {prod_id}",
f"--interleaved-dir {input_path}",
f"--r0-dir {r0_dir}",
f"-b {base_dir}",
f"-s {stat_events}",
f"--config={config_file}",
]
if filters is not None:
cmd.append(f"--filters={filters}")
if sys_date is not None:
cmd.append(f"--sys_date={sys_date}")
if yes:
cmd.append("--yes")
if no_sys_correction:
cmd.append("--no_sys_correction")
if n_subruns:
cmd.append(f"--n_subruns={n_subruns}")
if args.no_pro_symlink is True:
cmd.append("--no_pro_symlink")
cmd.extend(remaining_args)
# join command together with newline, line continuation and indentation
fh.write(" \\\n ".join(cmd))
fh.write('\n')
subprocess.run(["sbatch", job_file], check=True)
if __name__ == '__main__':
main()
|
cta-observatoryREPO_NAMEcta-lstchainPATH_START.@cta-lstchain_extracted@cta-lstchain-main@lstchain@scripts@onsite@onsite_create_cat_B_calibration_files_with_batch.py@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.