metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
|---|---|---|
{
"filename": "_tickfont.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/parcats/_tickfont.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Tickfont(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "parcats"
_path_str = "parcats.tickfont"
_valid_props = {"color", "family", "size"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Tickfont object
Sets the font for the `category` labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.parcats.Tickfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Tickfont
"""
super(Tickfont, self).__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.parcats.Tickfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.parcats.Tickfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@parcats@_tickfont.py@.PATH_END.py
|
{
"filename": "_tick0.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scatterpolar/marker/colorbar/_tick0.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class Tick0Validator(_plotly_utils.basevalidators.AnyValidator):
def __init__(
self, plotly_name="tick0", parent_name="scatterpolar.marker.colorbar", **kwargs
):
super(Tick0Validator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
implied_edits=kwargs.pop("implied_edits", {"tickmode": "linear"}),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scatterpolar@marker@colorbar@_tick0.py@.PATH_END.py
|
{
"filename": "problem_setup.py",
"repo_name": "dullemond/radmc3d-2.0",
"repo_path": "radmc3d-2.0_extracted/radmc3d-2.0-master/examples/run_ppdisk_analytic_1/problem_setup.py",
"type": "Python"
}
|
#
# Import NumPy for array handling
#
import numpy as np
#
# A simple grid refinement function
#
def grid_refine_inner_edge(x_orig,nlev,nspan):
x = x_orig.copy()
rev = x[0]>x[1]
for ilev in range(nlev):
x_new = 0.5 * ( x[1:nspan+1] + x[:nspan] )
x_ref = np.hstack((x,x_new))
x_ref.sort()
x = x_ref
if rev:
x = x[::-1]
return x
#
# Some natural constants
#
au = 1.49598e13 # Astronomical Unit [cm]
pc = 3.08572e18 # Parsec [cm]
ms = 1.98892e33 # Solar mass [g]
ts = 5.78e3 # Solar temperature [K]
ls = 3.8525e33 # Solar luminosity [erg/s]
rs = 6.96e10 # Solar radius [cm]
ss = 5.6703e-5 # Stefan-Boltzmann const [erg/cm^2/K^4/s]
kk = 1.3807e-16 # Bolzmann's constant [erg/K]
mp = 1.6726e-24 # Mass of proton [g]
GG = 6.67408e-08 # Gravitational constant [cm^3/g/s^2]
pi = np.pi # Pi
#
# Star parameters
#
mstar = ms
rstar = rs
tstar = ts
pstar = np.array([0.,0.,0.])
#
# Disk parameters
#
sigmag0 = 1e1 # Sigma gas at 1 AU
sigmad0 = sigmag0 * 0.01 # Sigma dust at 1 AU
plsig = -1.0e0 # Powerlaw of the surface density
#
# First make a simple analytical disk model roughly along the
# lines of Chiang & Goldreich (1997), but with just a single
# vertical layer and with a constant radiative incidence angle.
#
flang = 0.05 # The assumed constant radiative incidence angle
nr = 32 # Nr of radial grid points
rin = 10*au # Inner radius
rout = 100*au # Outer radius
nlev_rin = 12 # Grid refinement at the inner edge: nr of cycles
nspan_rin= 3 # Grid refinement at the inner edge: nr of cells each cycle
ri = np.logspace(np.log10(rin),np.log10(rout),nr+1)
ri = grid_refine_inner_edge(ri,nlev_rin,nspan_rin) # Refinement at inner edge
rc = 0.5 * ( ri[:-1] + ri[1:] )
nr = len(rc) # Recompute nr, because of refinement at inner edge
r = rc # The radial grid of the analytic disk model
lstar = 4*pi*rstar**2*ss*tstar**4 # Stellar luminosity
firr = flang*lstar/(4*pi*r**2) # Irradiative flux
tmid = (firr/ss)**0.25 # Estimate of midplane temperature
cs = np.sqrt(kk*tmid/(2.3*mp)) # Isothermal sound speed at midplane
omk = np.sqrt(GG*mstar/r**3) # The Kepler angular frequency
hp = cs/omk # The pressure scale height
hpr = hp/r # The dimensionless hp
sigmad = sigmad0 * (r/au)**plsig # The surface density profile
#
# Vertical grid parameters (theta-grid in spherical coordinates)
#
ntheta = 32
zrmax = 0.5
thetaup = np.pi*0.5 - 0.5e0
#
# Make the theta and phi coordinates
#
nphi = 1
thetai = np.linspace(thetaup,0.5e0*np.pi,ntheta+1)
phii = np.linspace(0.e0,np.pi*2.e0,nphi+1)
thetac = 0.5 * ( thetai[0:ntheta] + thetai[1:ntheta+1] )
phic = 0.5 * ( phii[0:nphi] + phii[1:nphi+1] )
#
# Make the 2-D grid (actually 3-D but with axisymmetry)
#
qq = np.meshgrid(rc,thetac,phic,indexing='ij')
rr = qq[0]
tt = qq[1]
zr = np.pi/2.e0 - qq[1]
#
# Expand the 1-D analytic model to 2-D
#
sigmad_3d = np.meshgrid(sigmad,thetac,phic,indexing='ij')[0]
hh = np.meshgrid(hp,thetac,phic,indexing='ij')[0]
hhr = np.meshgrid(hpr,thetac,phic,indexing='ij')[0]
#
# Make the dust density model
#
rhod = ( sigmad_3d / (np.sqrt(2.e0*np.pi)*hh) ) * np.exp(-(zr**2/hhr**2)/2.e0)
#
# Monte Carlo parameters
#
nphot = 1000000
#
# Write the wavelength_micron.inp file
#
lam1 = 0.1e0
lam2 = 7.0e0
lam3 = 25.e0
lam4 = 1.0e4
n12 = 20
n23 = 100
n34 = 30
lam12 = np.logspace(np.log10(lam1),np.log10(lam2),n12,endpoint=False)
lam23 = np.logspace(np.log10(lam2),np.log10(lam3),n23,endpoint=False)
lam34 = np.logspace(np.log10(lam3),np.log10(lam4),n34,endpoint=True)
lam = np.concatenate([lam12,lam23,lam34])
nlam = lam.size
#
# Write the wavelength file
#
with open('wavelength_micron.inp','w+') as f:
f.write('%d\n'%(nlam))
for value in lam:
f.write('%13.6e\n'%(value))
#
#
# Write the stars.inp file
#
with open('stars.inp','w+') as f:
f.write('2\n')
f.write('1 %d\n\n'%(nlam))
f.write('%13.6e %13.6e %13.6e %13.6e %13.6e\n\n'%(rstar,mstar,pstar[0],pstar[1],pstar[2]))
for value in lam:
f.write('%13.6e\n'%(value))
f.write('\n%13.6e\n'%(-tstar))
#
# Write the grid file
#
with open('amr_grid.inp','w+') as f:
f.write('1\n') # iformat
f.write('0\n') # AMR grid style (0=regular grid, no AMR)
f.write('100\n') # Coordinate system: spherical
f.write('0\n') # gridinfo
f.write('1 1 0\n') # Include r,theta coordinates
f.write('%d %d %d\n'%(nr,ntheta,1)) # Size of grid
for value in ri:
f.write('%13.6e\n'%(value)) # X coordinates (cell walls)
for value in thetai:
f.write('%13.6e\n'%(value)) # Y coordinates (cell walls)
for value in phii:
f.write('%13.6e\n'%(value)) # Z coordinates (cell walls)
#
# Write the density file
#
with open('dust_density.inp','w+') as f:
f.write('1\n') # Format number
f.write('%d\n'%(nr*ntheta*nphi)) # Nr of cells
f.write('1\n') # Nr of dust species
data = rhod.ravel(order='F') # Create a 1-D view, fortran-style indexing
data.tofile(f, sep='\n', format="%13.6e")
f.write('\n')
#
# Dust opacity control file
#
with open('dustopac.inp','w+') as f:
f.write('2 Format number of this file\n')
f.write('1 Nr of dust species\n')
f.write('============================================================================\n')
f.write('1 Way in which this dust species is read\n')
f.write('0 0=Thermal grain\n')
f.write('silicate Extension of name of dustkappa_***.inp file\n')
f.write('----------------------------------------------------------------------------\n')
#
# Write the radmc3d.inp control file
#
with open('radmc3d.inp','w+') as f:
f.write('nphot = %d\n'%(nphot))
f.write('scattering_mode_max = 1\n')
f.write('iranfreqmode = 1\n')
|
dullemondREPO_NAMEradmc3d-2.0PATH_START.@radmc3d-2.0_extracted@radmc3d-2.0-master@examples@run_ppdisk_analytic_1@problem_setup.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "brinckmann/montepython_public",
"repo_path": "montepython_public_extracted/montepython_public-master/montepython/likelihoods/ska1_lensing/__init__.py",
"type": "Python"
}
|
########################################################
# ska1_lensing likelihood
########################################################
# Copied from Euclid_lensing 05.2017
# Tim Sprenger: changed galaxy_distribution and photo_z_distribution
# to match ska1 specifications from 1601.03947
from montepython.likelihood_class import Likelihood
import io_mp
#import time
import scipy.integrate
from scipy import interpolate as itp
import os
import numpy as np
import math
import warnings
try:
xrange
except NameError:
xrange = range
class ska1_lensing(Likelihood):
def __init__(self, path, data, command_line):
Likelihood.__init__(self, path, data, command_line)
# Force the cosmological module to store Pk for redshifts up to
# max(self.z) and for k up to k_max
self.need_cosmo_arguments(data, {'output': 'mPk'})
self.need_cosmo_arguments(data, {'z_max_pk': self.zmax})
self.need_cosmo_arguments(data, {'P_k_max_1/Mpc': 0.75*self.k_max_h_by_Mpc})
# Compute non-linear power spectrum if requested
if (self.use_halofit):
self.need_cosmo_arguments(data, {'non linear':'halofit'})
# Warn if theoretical error and linear cutoff are requested
if (self.use_lmax_lincut and self.theoretical_error!=0):
warnings.warn("A lmax cutoff infered from kmax and a theoretical error are requested. This combination is not implemented and in most cases not necessary as the theoretical error should induce a cutoff by itself.")
# Define array of l values, and initialize them
# It is a logspace
# find nlmax in order to reach lmax with logarithmic steps dlnl
self.nlmax = np.int(np.log(self.lmax/self.lmin)/self.dlnl)+1
# redefine slightly dlnl so that the last point is always exactly lmax
self.dlnl = np.log(self.lmax/self.lmin)/(self.nlmax-1)
self.l = self.lmin*np.exp(self.dlnl*np.arange(self.nlmax))
########################################################
# Find distribution of dn_dz (not normalized) in each bin
########################################################
# Assuming each bin contains the same number of galaxies, we find the
# bin limits in z space
# Compute the total number of galaxies until zmax (no normalization
# yet), that is the integral of the galaxy distribution function from 0
# to self.zmax
n_tot, error = scipy.integrate.quad(
self.galaxy_distribution, 0, self.zmax)
assert error <= 1e-7, (
"The integration of the galaxy distribution is not as "
"precise as expected.")
# For each bin, compute the limit in z space
# Create the array that will contain the z boundaries for each bin. The
# first value is already correctly set to 0.
self.z_bin_edge = np.zeros(self.nbin+1, 'float64')
total_count = 0.
for Bin in xrange(self.nbin-1):
bin_count = 0.
z = self.z_bin_edge[Bin]
while (bin_count <= (n_tot-total_count)/(self.nbin-Bin)):
gd_1 = self.galaxy_distribution(z)
gd_2 = self.galaxy_distribution(z+self.dz)
bin_count += 0.5*(gd_1+gd_2)*self.dz
z += self.dz
self.z_bin_edge[Bin+1] = z
total_count += bin_count
self.z_bin_edge[self.nbin] = self.zmax
# Fill array of discrete z values
self.z = np.linspace(0, self.zmax, num=self.nzmax)
# check normalizations:
#print("n_tot = %s" % (n_tot))
#int_nz = self.nzmax
#int_z = np.linspace(0, self.zmax, num=int_nz)
#DDD = np.zeros(int_nz, 'float64')
#for nz in xrange(self.nzmax):
# #for nz2 in xrange(int_nz):
# # DDD[nz2] = self.photo_z_distribution(self.z[nz], int_z[nz2],False)
# DDD = self.photo_z_distribution(self.z[nz], int_z)
# D_tot = scipy.integrate.trapz(DDD,int_z)
# print("%s\t%s\t%s" % (self.z[nz], self.galaxy_distribution(self.z[nz]), D_tot ))
# Fill distribution for each bin (convolving with photo_z distribution)
self.eta_z = np.zeros((self.nzmax, self.nbin), 'float64')
gal = self.galaxy_distribution(self.z, True)
for Bin in xrange(self.nbin):
low = self.z_bin_edge[Bin]
hig = self.z_bin_edge[Bin+1]
for nz in xrange(self.nzmax):
z = self.z[nz]
integrand = gal*self.photo_z_distribution(z, self.z, True)
integrand = np.array([
elem if low <= self.z[index] <= hig else 0
for index, elem in enumerate(integrand)])
self.eta_z[nz, Bin] = scipy.integrate.trapz(
integrand,
self.z)
# integrate eta(z) over z (in view of normalizing it to one)
self.eta_norm = np.zeros(self.nbin, 'float64')
for Bin in xrange(self.nbin):
self.eta_norm[Bin] = np.sum(0.5*(
self.eta_z[1:, Bin]+self.eta_z[:-1, Bin])*(
self.z[1:]-self.z[:-1]))
################
# Noise spectrum
################
# Number of galaxies per steradian
self.noise = 3600.*self.gal_per_sqarcmn*(180./math.pi)**2
# Number of galaxies per steradian per bin
self.noise = self.noise/self.nbin
# Noise spectrum (diagonal in bin*bin space, independent of l and Bin)
self.noise = self.rms_shear**2/self.noise
###########
# Read data
###########
# If the file exists, initialize the fiducial values
# It has been stored flat, so we use the reshape function to put it in
# the right shape.
self.Cl_fid = np.zeros((self.nlmax, self.nbin, self.nbin), 'float64')
self.fid_values_exist = False
fid_file_path = os.path.join(self.data_directory, self.fiducial_file)
if os.path.exists(fid_file_path):
self.fid_values_exist = True
flat_Cl = np.loadtxt(fid_file_path)
self.Cl_fid = flat_Cl.reshape((self.nlmax, self.nbin, self.nbin))
return
def galaxy_distribution(self, z, array=False):
"""
Galaxy distribution returns the function D(z) from the notes
If the array flag is set to True, z is then interpretated as an array,
and not as a single value.
Replaced by Tim Sprenger due to switch from euclid to ska
"""
z0 = self.par_zm/self.par_a
if not array:
galaxy_dist = z**(self.par_b)*math.exp(-(z/z0)**(self.par_c))
else:
return z**(self.par_b)*np.exp(-(z/z0)**(self.par_c))
return galaxy_dist
def photo_z_distribution(self, z, zph, array=True):
"""
Photo z distribution
If the array flag is set to True, zph is then interpretated as an array,
and not as a single value.
Replaced by Tim Sprenger due to switch from euclid to ska
"""
# Note: you must normalize it yourself to one if you want to get nice
# plots of the galaxy distribution function in each bin (otherwise, the
# spectra will remain correct, but each D_i(x) will loot strangely
# normalized when compared to the original D(z)
if not array:
if (z<=self.z_spec):
photo_z_dist = (1.-self.f_spec)*math.exp(
-0.5*((z-zph)/self.sigma_phot/(1.+z))**2)/self.sigma_phot/(
1.+z)/math.sqrt(2.*math.pi)
if (z==zph):
photo_z_dist += self.f_spec/self.zmax*(self.nzmax-1.)
if (z==0.):
photo_z_dist += self.f_spec/self.zmax*(self.nzmax-1.)
elif (z<=self.z_phot):
photo_z_dist = math.exp(
-0.5*((z-zph)/self.sigma_phot/(1.+z))**2)/self.sigma_phot/(
1.+z)/math.sqrt(2.*math.pi)
else:
photo_z_dist = math.exp(
-0.5*((z-zph)/self.sigma_noz/(1.+z))**2)/self.sigma_noz/(
1.+z)/math.sqrt(2.*math.pi)
else:
if (z<=self.z_spec):
photo_z_dist = (1.-self.f_spec)*np.exp(
-0.5*((z-zph)/self.sigma_phot/(1.+z))**2)/self.sigma_phot/(
1.+z)/math.sqrt(2.*math.pi)
for index_z in xrange(len(zph)):
if (z==zph[index_z]):
photo_z_dist[index_z] += self.f_spec/self.zmax*(self.nzmax-1.)
if (z==0.):
photo_z_dist[index_z] += self.f_spec/self.zmax*(self.nzmax-1.)
break
elif (z<=self.z_phot):
photo_z_dist = np.exp(
-0.5*((z-zph)/self.sigma_phot/(1.+z))**2)/self.sigma_phot/(
1.+z)/math.sqrt(2.*math.pi)
else:
photo_z_dist = np.exp(
-0.5*((z-zph)/self.sigma_noz/(1.+z))**2)/self.sigma_noz/(
1.+z)/math.sqrt(2.*math.pi)
return photo_z_dist
def loglkl(self, cosmo, data):
#start = time.time()
# One wants to obtain here the relation between z and r, this is done
# by asking the cosmological module with the function z_of_r
self.r = np.zeros(self.nzmax, 'float64')
self.dzdr = np.zeros(self.nzmax, 'float64')
self.r, self.dzdr = cosmo.z_of_r(self.z)
# Compute now the selection function eta(r) = eta(z) dz/dr normalized
# to one. The np.newaxis helps to broadcast the one-dimensional array
# dzdr to the proper shape. Note that eta_norm is also broadcasted as
# an array of the same shape as eta_z
self.eta_r = self.eta_z*(self.dzdr[:, np.newaxis]/self.eta_norm)
# Compute function g_i(r), that depends on r and the bin
# g_i(r) = 2r(1+z(r)) int_0^+\infty drs eta_r(rs) (rs-r)/rs
# TODO is the integration from 0 or r ?
g = np.zeros((self.nzmax, self.nbin), 'float64')
for Bin in xrange(self.nbin):
for nr in xrange(1, self.nzmax-1):
fun = self.eta_r[nr:, Bin]*(self.r[nr:]-self.r[nr])/self.r[nr:]
g[nr, Bin] = np.sum(0.5*(
fun[1:]+fun[:-1])*(self.r[nr+1:]-self.r[nr:-1]))
g[nr, Bin] *= 2.*self.r[nr]*(1.+self.z[nr])
# compute the maximum l where most contributions are linear
# as a function of the lower bin number
if self.use_lmax_lincut:
lintegrand_lincut_o = np.zeros((self.nzmax, self.nbin, self.nbin), 'float64')
lintegrand_lincut_u = np.zeros((self.nzmax, self.nbin, self.nbin), 'float64')
l_lincut = np.zeros((self.nbin, self.nbin), 'float64')
l_lincut_mean = np.zeros(self.nbin, 'float64')
for Bin1 in xrange(self.nbin):
for Bin2 in xrange(Bin1,self.nbin):
lintegrand_lincut_o[1:,Bin1, Bin2] = g[1:, Bin1]*g[1:, Bin2]/(
self.r[1:])
for Bin1 in xrange(self.nbin):
for Bin2 in xrange(Bin1,self.nbin):
lintegrand_lincut_u[1:,Bin1, Bin2] = g[1:, Bin1]*g[1:, Bin2]/(
self.r[1:]**2)
for Bin1 in xrange(self.nbin):
for Bin2 in xrange(Bin1,self.nbin):
l_lincut[Bin1, Bin2] = np.sum(0.5*(
lintegrand_lincut_o[1:, Bin1, Bin2] +
lintegrand_lincut_o[:-1, Bin1, Bin2])*(
self.r[1:]-self.r[:-1]))
l_lincut[Bin1, Bin2] /= np.sum(0.5*(
lintegrand_lincut_u[1:, Bin1, Bin2] +
lintegrand_lincut_u[:-1, Bin1, Bin2])*(
self.r[1:]-self.r[:-1]))
z_peak = np.zeros((self.nbin, self.nbin), 'float64')
for Bin1 in xrange(self.nbin):
for Bin2 in xrange(Bin1,self.nbin):
z_peak[Bin1, Bin2] = self.zmax
for index_z in xrange(self.nzmax):
if (self.r[index_z]>l_lincut[Bin1, Bin2]):
z_peak[Bin1, Bin2] = self.z[index_z]
break
if self.use_zscaling:
l_lincut[Bin1, Bin2] *= self.kmax_hMpc*cosmo.h()*pow(1.+z_peak[Bin1, Bin2],2./(2.+cosmo.n_s()))
else:
l_lincut[Bin1, Bin2] *= self.kmax_hMpc*cosmo.h()
l_lincut_mean[Bin1] = np.sum(l_lincut[Bin1, :])/(self.nbin-Bin1)
#for Bin1 in xrange(self.nbin):
#for Bin2 in xrange(Bin1,self.nbin):
#print("%s\t%s\t%s\t%s" % (Bin1, Bin2, l_lincut[Bin1, Bin2], l_lincut_mean[Bin1]))
#for nr in xrange(1, self.nzmax-1):
# print("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s" % (self.z[nr], g[nr, 0], g[nr, 1], g[nr, 2], g[nr, 3], g[nr, 4], g[nr, 5], g[nr, 6], g[nr, 7], g[nr, 8], g[nr, 9]))
# Get power spectrum P(k=l/r,z(r)) from cosmological module
kmin_in_inv_Mpc = self.k_min_h_by_Mpc * cosmo.h()
kmax_in_inv_Mpc = self.k_max_h_by_Mpc * cosmo.h()
pk = np.zeros((self.nlmax, self.nzmax), 'float64')
for index_l in xrange(self.nlmax):
for index_z in xrange(1, self.nzmax):
# These lines would return an error when you ask for P(k,z) out of computed range
# if (self.l[index_l]/self.r[index_z] > self.k_max):
# raise io_mp.LikelihoodError(
# "you should increase euclid_lensing.k_max up to at least %g" % (self.l[index_l]/self.r[index_z]))
# pk[index_l, index_z] = cosmo.pk(
# self.l[index_l]/self.r[index_z], self.z[index_z])
# These lines set P(k,z) to zero out of [k_min, k_max] range
k_in_inv_Mpc = self.l[index_l]/self.r[index_z]
if (k_in_inv_Mpc < kmin_in_inv_Mpc) or (k_in_inv_Mpc > kmax_in_inv_Mpc):
pk[index_l, index_z] = 0.
else:
pk[index_l, index_z] = cosmo.pk(self.l[index_l]/self.r[index_z], self.z[index_z])
#print("%s\t%s\t%s" %(self.l[index_l], self.z[index_z], pk[index_l, index_z]))
# Recover the non_linear scale computed by halofit. If no scale was
# affected, set the scale to one, and make sure that the nuisance
# parameter epsilon is set to zero
k_sigma = np.zeros(self.nzmax, 'float64')
if (cosmo.nonlinear_method == 0):
k_sigma[:] = 1.e6
else:
k_sigma = cosmo.nonlinear_scale(self.z, self.nzmax)
if not (cosmo.nonlinear_method == 0):
k_sigma_problem = False
for index_z in xrange(self.nzmax-1):
if (k_sigma[index_z+1]<k_sigma[index_z]) or (k_sigma[index_z+1]>2.5):
k_sigma[index_z+1] = 2.5
k_sigma_problem = True
#print("%s\t%s" % (k_sigma[index_z], self.z[index_z]))
if k_sigma_problem:
warnings.warn("There were unphysical (decreasing in redshift or exploding) values of k_sigma (=cosmo.nonlinear_scale(...)). To proceed they were set to 2.5, the highest scale that seems to be stable.")
# Define the alpha function, that will characterize the theoretical
# uncertainty. Chosen to be 0.001 at low k, raise between 0.1 and 0.2
# to self.theoretical_error
alpha = np.zeros((self.nlmax, self.nzmax), 'float64')
# self.theoretical_error = 0.1
if self.theoretical_error != 0:
#MArchi for index_l in range(self.nlmax):
#k = self.l[index_l]/self.r[1:]
#alpha[index_l, 1:] = np.log(1.+k[:]/k_sigma[1:])/(
#1.+np.log(1.+k[:]/k_sigma[1:]))*self.theoretical_error
for index_l in xrange(self.nlmax):
for index_z in xrange(1, self.nzmax):
k = self.l[index_l]/self.r[index_z]
alpha[index_l, index_z] = np.log(1.+k/k_sigma[index_z])/(
1.+np.log(1.+k/k_sigma[index_z]))*self.theoretical_error
# recover the e_th_nu part of the error function
e_th_nu = self.coefficient_f_nu*cosmo.Omega_nu/cosmo.Omega_m()
# Compute the Error E_th_nu function
if 'epsilon' in self.use_nuisance:
E_th_nu = np.zeros((self.nlmax, self.nzmax), 'float64')
for index_l in range(1, self.nlmax):
E_th_nu[index_l, :] = np.log(
1.+self.l[index_l]/k_sigma[:]*self.r[:]) / (
1.+np.log(1.+self.l[index_l]/k_sigma[:]*self.r[:]))*e_th_nu
# Add the error function, with the nuisance parameter, to P_nl_th, if
# the nuisance parameter exists
for index_l in range(self.nlmax):
epsilon = data.mcmc_parameters['epsilon']['current']*(
data.mcmc_parameters['epsilon']['scale'])
pk[index_l, :] *= (1.+epsilon*E_th_nu[index_l, :])
# Start loop over l for computation of C_l^shear
Cl_integrand = np.zeros((self.nzmax, self.nbin, self.nbin), 'float64')
Cl = np.zeros((self.nlmax, self.nbin, self.nbin), 'float64')
# Start loop over l for computation of E_l
if self.theoretical_error != 0:
El_integrand = np.zeros((self.nzmax, self.nbin, self.nbin),
'float64')
El = np.zeros((self.nlmax, self.nbin, self.nbin), 'float64')
for nl in xrange(self.nlmax):
# find Cl_integrand = (g(r) / r)**2 * P(l/r,z(r))
for Bin1 in xrange(self.nbin):
for Bin2 in xrange(Bin1,self.nbin):
Cl_integrand[1:, Bin1, Bin2] = g[1:, Bin1]*g[1:, Bin2]/(
self.r[1:]**2)*pk[nl, 1:]
if self.theoretical_error != 0:
El_integrand[1:, Bin1, Bin2] = g[1:, Bin1]*(
g[1:, Bin2])/(
self.r[1:]**2)*pk[nl, 1:]*alpha[nl, 1:]
# Integrate over r to get C_l^shear_ij = P_ij(l)
# C_l^shear_ij = 9/16 Omega0_m^2 H_0^4 \sum_0^rmax dr (g_i(r)
# g_j(r) /r**2) P(k=l/r,z(r))
# It it then multiplied by 9/16*Omega_m**2 to be in units of Mpc**4
# and then by (h/2997.9)**4 to be dimensionless
for Bin1 in xrange(self.nbin):
for Bin2 in xrange(Bin1,self.nbin):
Cl[nl, Bin1, Bin2] = np.sum(0.5*(
Cl_integrand[1:, Bin1, Bin2] +
Cl_integrand[:-1, Bin1, Bin2])*(
self.r[1:]-self.r[:-1]))
Cl[nl, Bin1, Bin2] *= 9./16.*(cosmo.Omega_m())**2
Cl[nl, Bin1, Bin2] *= (cosmo.h()/2997.9)**4
if self.theoretical_error != 0:
El[nl, Bin1, Bin2] = np.sum(0.5*(
El_integrand[1:, Bin1, Bin2] +
El_integrand[:-1, Bin1, Bin2])*(
self.r[1:]-self.r[:-1]))
El[nl, Bin1, Bin2] *= 9./16.*(cosmo.Omega_m())**2
El[nl, Bin1, Bin2] *= (cosmo.h()/2997.9)**4
if Bin1 == Bin2:
Cl[nl, Bin1, Bin2] += self.noise
# Write fiducial model spectra if needed (exit in that case)
if self.fid_values_exist is False:
# Store the values now, and exit.
fid_file_path = os.path.join(
self.data_directory, self.fiducial_file)
with open(fid_file_path, 'w') as fid_file:
fid_file.write('# Fiducial parameters')
for key, value in io_mp.dictitems(data.mcmc_parameters):
fid_file.write(
', %s = %.5g' % (key, value['current']*value['scale']))
fid_file.write('\n')
for nl in range(self.nlmax):
for Bin1 in range(self.nbin):
for Bin2 in range(self.nbin):
fid_file.write("%.8g\n" % Cl[nl, Bin1, Bin2])
print('\n')
warnings.warn(
"Writing fiducial model in %s, for %s likelihood\n" % (
self.data_directory+'/'+self.fiducial_file, self.name))
return 1j
# Now that the fiducial model is stored, we add the El to both Cl and
# Cl_fid (we create a new array, otherwise we would modify the
# self.Cl_fid from one step to the other)
# Spline Cl[nl,Bin1,Bin2] along l
spline_Cl = np.empty((self.nbin, self.nbin), dtype=(list, 3))
for Bin1 in xrange(self.nbin):
for Bin2 in xrange(Bin1, self.nbin):
spline_Cl[Bin1, Bin2] = list(itp.splrep(
self.l, Cl[:, Bin1, Bin2]))
if Bin2 > Bin1:
spline_Cl[Bin2, Bin1] = spline_Cl[Bin1, Bin2]
# Spline El[nl,Bin1,Bin2] along l
if self.theoretical_error != 0:
spline_El = np.empty((self.nbin, self.nbin), dtype=(list, 3))
for Bin1 in xrange(self.nbin):
for Bin2 in xrange(Bin1, self.nbin):
spline_El[Bin1, Bin2] = list(itp.splrep(
self.l, El[:, Bin1, Bin2]))
if Bin2 > Bin1:
spline_El[Bin2, Bin1] = spline_El[Bin1, Bin2]
# Spline Cl_fid[nl,Bin1,Bin2] along l
spline_Cl_fid = np.empty((self.nbin, self.nbin), dtype=(list, 3))
for Bin1 in xrange(self.nbin):
for Bin2 in xrange(Bin1, self.nbin):
spline_Cl_fid[Bin1, Bin2] = list(itp.splrep(
self.l, self.Cl_fid[:, Bin1, Bin2]))
if Bin2 > Bin1:
spline_Cl_fid[Bin2, Bin1] = spline_Cl_fid[Bin1, Bin2]
# Compute likelihood
# Prepare interpolation for every integer value of l, from the array
# self.l, to finally compute the likelihood (sum over all l's)
dof = 1./(int(self.l[-1])-int(self.l[0])+1)
ells = list(range(int(self.l[0]), int(self.l[-1])+1))
# Define cov theory, observ and error on the whole integer range of ell
# values
Cov_theory = np.zeros((len(ells), self.nbin, self.nbin), 'float64')
Cov_observ = np.zeros((len(ells), self.nbin, self.nbin), 'float64')
Cov_error = np.zeros((len(ells), self.nbin, self.nbin), 'float64')
for Bin1 in xrange(self.nbin):
for Bin2 in xrange(Bin1, self.nbin):
Cov_theory[:, Bin1, Bin2] = itp.splev(
ells, spline_Cl[Bin1, Bin2])
Cov_observ[:, Bin1, Bin2] = itp.splev(
ells, spline_Cl_fid[Bin1, Bin2])
if self.theoretical_error > 0:
Cov_error[:, Bin1, Bin2] = itp.splev(
ells, spline_El[Bin1, Bin2])
if Bin2 > Bin1:
Cov_theory[:, Bin2, Bin1] = Cov_theory[:, Bin1, Bin2]
Cov_observ[:, Bin2, Bin1] = Cov_observ[:, Bin1, Bin2]
Cov_error[:, Bin2, Bin1] = Cov_error[:, Bin1, Bin2]
chi2 = 0.
# TODO parallelize this
for index, ell in enumerate(ells):
if self.use_lmax_lincut:
CutBin = -1
for zBin in xrange(self.nbin):
if (ell<l_lincut_mean[zBin]):
CutBin = zBin
det_theory = np.linalg.det(Cov_theory[index, CutBin:, CutBin:])
det_observ = np.linalg.det(Cov_observ[index, CutBin:, CutBin:])
break
if (CutBin==-1):
break
else:
det_theory = np.linalg.det(Cov_theory[index, :, :])
det_observ = np.linalg.det(Cov_observ[index, :, :])
if (self.theoretical_error > 0):
det_cross_err = 0
for i in range(self.nbin):
newCov = np.copy(Cov_theory[index,:,:]) #MArchi#newCov = np.copy(Cov_theory)
newCov[:, i] = Cov_error[index,:, i] #MArchi#newCov[:, i] = Cov_error[:, i]
det_cross_err += np.linalg.det(newCov)
# Newton method
# Find starting point for the method:
start = 0
step = 0.001*det_theory/det_cross_err
error = 1
old_chi2 = -1.*data.boundary_loglike
error_tol = 0.01
epsilon_l = start
while error > error_tol:
vector = np.array([epsilon_l-step,
epsilon_l,
epsilon_l+step])
#print(vector.shape)
# Computing the function on three neighbouring points
function_vector = np.zeros(3, 'float64')
for k in range(3):
Cov_theory_plus_error = Cov_theory+vector[k]*Cov_error
det_theory_plus_error = np.linalg.det(Cov_theory_plus_error[index,:,:]) #MArchi#det_theory_plus_error = np.linalg.det(Cov_theory_plus_error)
det_theory_plus_error_cross_obs = 0
for i in range(self.nbin):
newCov = np.copy(Cov_theory_plus_error[index,:,:])#MArchi#newCov = np.copy(Cov_theory_plus_error)
newCov[:, i] = Cov_observ[index,:, i]#MArchi#newCov[:, i] = Cov_observ[:, i]
det_theory_plus_error_cross_obs += np.linalg.det(
newCov)
try:
function_vector[k] = (2.*ell+1.)*self.fsky*(det_theory_plus_error_cross_obs/det_theory_plus_error + math.log(det_theory_plus_error/det_observ) - self.nbin ) + dof*vector[k]**2
except ValueError:
warnings.warn("ska1_lensing: Could not evaluate chi2 including theoretical error with the current parameters. The corresponding chi2 is now set to nan!")
break
break
break
chi2 = np.nan
# Computing first
first_d = (function_vector[2]-function_vector[0]) / (vector[2]-vector[0])
second_d = (function_vector[2]+function_vector[0]-2*function_vector[1]) / (vector[2]-vector[1])**2
# Updating point and error
epsilon_l = vector[1] - first_d/second_d
error = abs(function_vector[1] - old_chi2)
old_chi2 = function_vector[1]
# End Newton
Cov_theory_plus_error = Cov_theory + epsilon_l * Cov_error
det_theory_plus_error = np.linalg.det(Cov_theory_plus_error[index,:,:]) #MArchi#det_theory_plus_error = np.linalg.det(Cov_theory_plus_error)
det_theory_plus_error_cross_obs = 0
for i in range(self.nbin):
newCov = np.copy(Cov_theory_plus_error[index,:,:]) #MArchi#newCov = np.copy(Cov_theory_plus_error)
newCov[:, i] = Cov_observ[index,:, i] #MArchi#newCov[:, i] = Cov_observ[:, i]
det_theory_plus_error_cross_obs += np.linalg.det(newCov)
chi2 += (2.*ell+1.)*self.fsky*(det_theory_plus_error_cross_obs/det_theory_plus_error + math.log(det_theory_plus_error/det_observ) - self.nbin ) + dof*epsilon_l**2
else:
if self.use_lmax_lincut:
det_cross = 0.
for i in xrange(0,self.nbin-CutBin):
newCov = np.copy(Cov_theory[index, CutBin:, CutBin:])
newCov[:, i] = Cov_observ[index, CutBin:, CutBin+i]
det_cross += np.linalg.det(newCov)
else:
det_cross = 0.
for i in xrange(self.nbin):
newCov = np.copy(Cov_theory[index, :, :])
newCov[:, i] = Cov_observ[index, :, i]
det_cross += np.linalg.det(newCov)
if self.use_lmax_lincut:
chi2 += (2.*ell+1.)*self.fsky*(det_cross/det_theory + math.log(det_theory/det_observ) - self.nbin+CutBin)
else:
chi2 += (2.*ell+1.)*self.fsky*(det_cross/det_theory + math.log(det_theory/det_observ) - self.nbin)
# Finally adding a gaussian prior on the epsilon nuisance parameter, if
# present
if 'epsilon' in self.use_nuisance:
epsilon = data.mcmc_parameters['epsilon']['current'] * \
data.mcmc_parameters['epsilon']['scale']
chi2 += epsilon**2
#end = time.time()
#print("time needed in s:",(end-start))
return -chi2/2.
|
brinckmannREPO_NAMEmontepython_publicPATH_START.@montepython_public_extracted@montepython_public-master@montepython@likelihoods@ska1_lensing@__init__.py@.PATH_END.py
|
{
"filename": "plot_save_mesa_individual_fin1p25.py",
"repo_name": "NikolayBritavskiyAstro/fast_rotating_binaries",
"repo_path": "fast_rotating_binaries_extracted/fast_rotating_binaries-main/src/scripts/plot_save_mesa_individual_fin1p25.py",
"type": "Python"
}
|
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import numpy as np
import os
import mesaPlot as mp
from showyourwork.paths import user as Paths
paths = Paths()
if os.path.exists(os.path.join(paths.data, 'post_interaction/30_20_1p25_g1_new/LOGS3/history.data')):
pass
else:
os.system(f'python {os.path.join(paths.scripts / "unzip_MESA_output.py")}')
plt.style.use(paths.scripts / "matplotlibrc")
def find_nearest(array, value):
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx
m_200 = mp.MESA()
m2_200 = mp.MESA()
m3_200 = mp.MESA()
m_200_newtides = mp.MESA()
m2_200_newtides = mp.MESA()
m3_200_newtides = mp.MESA()
m3_200_g1_new = mp.MESA()
m3_200_g2_new = mp.MESA()
m3_200_g10_new = mp.MESA()
name = 'post_interaction/30_20_1p25'
m3_200_g1_new.log_fold = os.path.join(paths.data, name + '_g1_new/LOGS3')
print(os.path.join(paths.data, name + '_g1_new/LOGS3'))
m3_200_g1_new.loadHistory()
m3_200_g2_new.log_fold = os.path.join(paths.data, name + '_g2_new/LOGS3')
# m3_200_g2_new.log_fold=name+'_g1_pos_new/LOGS3'
m3_200_g2_new.loadHistory()
m3_200_g10_new.log_fold = os.path.join(paths.data, name + '_g10_new/LOGS3')
m3_200_g10_new.loadHistory()
m_200.log_fold = os.path.join(paths.data, name + '/LOGS1')
m_200.loadHistory()
m2_200.log_fold = os.path.join(paths.data, name + '/LOGS2')
m2_200.loadHistory()
m3_200.log_fold = os.path.join(paths.data, name + '/LOGS3')
m3_200.loadHistory()
m_200_newtides.log_fold = os.path.join(paths.data, name + '_posydon/LOGS1')
m_200_newtides.loadHistory()
m2_200_newtides.log_fold = os.path.join(paths.data, name + '_posydon/LOGS2')
m2_200_newtides.loadHistory()
m3_200_newtides.log_fold = os.path.join(paths.data, name + '_posydon/LOGS3')
m3_200_newtides.loadHistory()
star_age_200 = m_200.hist.star_age
surf_avg_vtor_1 = m_200.hist.surf_avg_v_rot
surf_avg_vtor_2 = m2_200.hist.surf_avg_v_rot
surf_avg_omega200 = m_200.hist.surf_avg_omega
star_1_radius200 = m3_200.hist.star_1_radius
star_1_J_orb_200 = m3_200.hist.J_orb
star_1_J_spin_200 = m3_200.hist.J_spin_1
star_2_J_spin_200 = m3_200.hist.J_spin_2
rl_relative_gap_1 = m3_200.hist.rl_relative_overflow_1
rl_relative_gap_2 = m3_200.hist.rl_relative_overflow_2
star_1_mass = m3_200.hist.star_1_mass
star_2_mass = m3_200.hist.star_2_mass
iRLOF_1 = rl_relative_gap_1 > 0
iRLOF_2 = rl_relative_gap_2 > 0
period_class = m3_200.hist.period_days
rl_relative_gap_1_g1_new = m3_200_g1_new.hist.rl_relative_overflow_1
rl_relative_gap_2_g1_new = m3_200_g1_new.hist.rl_relative_overflow_2
star_1_mass_g1_new = m3_200_g1_new.hist.star_1_mass
star_2_mass_g1_new = m3_200_g1_new.hist.star_2_mass
iRLOF_1_g1_new = rl_relative_gap_1_g1_new > 0
iRLOF_2_g1_new = rl_relative_gap_2_g1_new > 0
period_class_g1_new = m3_200_g1_new.hist.period_days
rl_relative_gap_1_g2_new = m3_200_g2_new.hist.rl_relative_overflow_1
rl_relative_gap_2_g2_new = m3_200_g2_new.hist.rl_relative_overflow_2
star_1_mass_g2_new = m3_200_g2_new.hist.star_1_mass
star_2_mass_g2_new = m3_200_g2_new.hist.star_2_mass
iRLOF_1_g2_new = rl_relative_gap_1_g2_new > 0
iRLOF_2_g2_new = rl_relative_gap_2_g2_new > 0
period_class_g2_new = m3_200_g2_new.hist.period_days
rl_relative_gap_1_g10_new = m3_200_g10_new.hist.rl_relative_overflow_1
rl_relative_gap_2_g10_new = m3_200_g10_new.hist.rl_relative_overflow_2
star_1_mass_g10_new = m3_200_g10_new.hist.star_1_mass
star_2_mass_g10_new = m3_200_g10_new.hist.star_2_mass
iRLOF_1_g10_new = rl_relative_gap_1_g10_new > 0
iRLOF_2_g10_new = rl_relative_gap_2_g10_new > 0
period_class_g10_new = m3_200_g10_new.hist.period_days
age_200 = m3_200.hist.age
jtotal_200 = m3_200.hist.J_total
log_total_angular_momentum_200 = m_200.hist.log_total_angular_momentum
surf_avg_j_rot_200 = m_200.hist.surf_avg_j_rot
center_h1_200 = m_200.hist.center_h1
LOGL_1 = m_200.hist.log_L
LOGL_2 = m2_200.hist.log_L
LOGL_1_newtides = m_200_newtides.hist.log_L
LOGL_2_newtides = m2_200_newtides.hist.log_L
log_Teff_1 = m_200.hist.log_Teff
log_Teff_2 = m2_200.hist.log_Teff
log_Teff_1_newtides = m_200_newtides.hist.log_Teff
log_Teff_2_newtides = m2_200_newtides.hist.log_Teff
star_age_200_newtides = m_200_newtides.hist.star_age
surf_avg_vtor_1_newtides = m_200_newtides.hist.surf_avg_v_rot
surf_avg_vtor_2_newtides = m2_200_newtides.hist.surf_avg_v_rot
surf_avg_omega200_newtides = m_200_newtides.hist.surf_avg_omega
star_1_radius200_newtides = m3_200_newtides.hist.star_1_radius
star_1_J_orb_200_newtides = m3_200_newtides.hist.J_orb
star_1_J_spin_200_newtides = m3_200_newtides.hist.J_spin_1
star_2_J_spin_200_newtides = m3_200_newtides.hist.J_spin_2
age_200_newtides = m3_200_newtides.hist.age
jtotal_200_newtides = m3_200_newtides.hist.J_total
log_total_angular_momentum_200_newtides = m_200_newtides.hist.log_total_angular_momentum
surf_avg_j_rot_200_newtides = m_200_newtides.hist.surf_avg_j_rot
center_h1_200_newtides = m_200_newtides.hist.center_h1
period_class = m3_200.hist.period_days
period_posydon = m3_200_newtides.hist.period_days
star_1_radius_class = m3_200.hist.star_1_radius
star_1_radius_posydon = m3_200_newtides.hist.star_1_radius
star_2_radius_class = m3_200.hist.star_2_radius
star_2_radius_posydon = m3_200_newtides.hist.star_2_radius
J_orb_class = m3_200.hist.J_orb
J_orb_posydon = m3_200_newtides.hist.J_orb
J_spin2_class = m3_200.hist.J_spin_2
J_spin2_posydon = m3_200_newtides.hist.J_spin_2
J_spin1_class = m3_200.hist.J_spin_1
J_spin1_posydon = m3_200_newtides.hist.J_spin_1
star_1_mass_posydon = m3_200_newtides.hist.star_1_mass
star_2_mass_posydon = m3_200_newtides.hist.star_2_mass
surf_avg_omega_1_class = m_200.hist.surf_avg_omega
surf_avg_omega_1_pos = m_200_newtides.hist.surf_avg_omega
surf_avg_omega_2_class = m2_200.hist.surf_avg_omega
surf_avg_omega_2_pos = m2_200_newtides.hist.surf_avg_omega
star_age_pos = m2_200_newtides.hist.star_age
star_age_class = m2_200.hist.star_age
# p1.log_fold='LOGS1'
# p1.loadProfile(num=-1)
# p=mp.plot()
rl_relative_gap_1_posydon = m3_200_newtides.hist.rl_relative_overflow_1
rl_relative_gap_2_posydon = m3_200_newtides.hist.rl_relative_overflow_2
age_class = m3_200.hist.age
age_posydon = m3_200_newtides.hist.age
lg_t_sync_2_class = m3_200.hist.lg_t_sync_2
lg_t_sync_2_posydon = m3_200_newtides.hist.lg_t_sync_2
lg_t_sync_1_class = m3_200.hist.lg_t_sync_1
lg_t_sync_1_posydon = m3_200_newtides.hist.lg_t_sync_1
iRLOF_1_posydon = rl_relative_gap_1_posydon > 0
iRLOF_2_posydon = rl_relative_gap_2_posydon > 0
age_class_rlof = age_class[iRLOF_1]
age_posydon_rlof = age_posydon[iRLOF_1_posydon]
J_orb_class_rlof = J_orb_class[iRLOF_1]
J_orb_posydon_rlof = J_orb_posydon[iRLOF_1_posydon]
i_pre_RLOF_class = age_class < min(age_class[iRLOF_1])
i_pre_RLOF_pos = age_posydon < min(age_posydon[iRLOF_1_posydon])
i_post_RLOF_class = age_class > max(age_class[iRLOF_1])
i_post_RLOF_pos = age_posydon > max(age_posydon[iRLOF_1_posydon])
star_age_rlof_ind = find_nearest(star_age_class, min(age_class[iRLOF_1]))
star_age_rlof_ind_pos = find_nearest(star_age_pos, min(age_posydon[iRLOF_1_posydon]))
pp1_all_panel = PdfPages(paths.figures / 'p_q_1p25days.pdf')
fig = plt.figure(figsize=(10, 10))
plt.title('$\it{P}_\mathrm{ini}$ = 1.25 [days]', fontsize=30)
plt.plot(star_2_mass / star_1_mass, period_class, color='k', linestyle='-', label='MESA default, $\it{\gamma}$ = 0',
lw=2)
plt.plot(star_2_mass[iRLOF_1] / star_1_mass[iRLOF_1], period_class[iRLOF_1], lw=7, c='k')
plt.plot(star_2_mass_g1_new / star_1_mass_g1_new, period_class_g1_new, label='MESA default, $\it{\gamma}$ = 1', lw=2,
linestyle='-', color='orange')
# plt.plot(star_2_mass_g1_new[:-5000]/star_1_mass_g1_new[:-5000], period_class_g1_new[:-5000],label='MESA default, $\gamma$ = 1',lw=2,linestyle='-',color='orange')
plt.plot(star_2_mass_g1_new[iRLOF_1_g1_new] / star_1_mass_g1_new[iRLOF_1_g1_new], period_class_g1_new[iRLOF_1_g1_new],
lw=7, linestyle='-', color='orange')
plt.plot(star_2_mass_g2_new / star_1_mass_g2_new, period_class_g2_new, label='MESA default, $\it{\gamma}$ = 2', lw=2,
linestyle='-', color='blue')
plt.plot(star_2_mass_g2_new[iRLOF_1_g2_new] / star_1_mass_g2_new[iRLOF_1_g2_new], period_class_g2_new[iRLOF_1_g2_new],
lw=7, linestyle='-', color='blue')
plt.plot(star_2_mass_g10_new / star_1_mass_g10_new, period_class_g10_new, label='MESA default, $\it{\gamma}$ = 10',
lw=2, linestyle='-', color='red')
plt.plot(star_2_mass_g10_new[iRLOF_1_g10_new] / star_1_mass_g10_new[iRLOF_1_g10_new],
period_class_g10_new[iRLOF_1_g10_new], lw=7, linestyle='-', color='red')
plt.plot(star_2_mass_posydon / star_1_mass_posydon, period_posydon, linestyle='-', label='POSYDON, $\it{\gamma}$ = 0',
color='green', lw=2)
plt.plot(star_2_mass_posydon[iRLOF_1_posydon] / star_1_mass_posydon[iRLOF_1_posydon], period_posydon[iRLOF_1_posydon],
lw=7, c='green')
'''
if any(iRLOF_2_g1_new) == True:
indx1 = list(star_2_mass_g1_new).index(min(star_2_mass_g1_new[iRLOF_1_g1_new]))
indx2 = list(star_2_mass_g1_new).index(min(star_2_mass_g1_new[iRLOF_2_g1_new]))
print('(len(iRLOF_1_g1_new) > 0):', indx1)
print('(len(iRLOF_2_g1_new) > 0):', indx2)
plt.plot(star_2_mass_g1_new[0:indx1]/star_1_mass_g1_new[0:indx1],period_class_g1_new[0:indx1],color='orange',linestyle='-',label='MESA default, $\gamma$ = 1',lw=2)
plt.plot(star_2_mass_g1_new[indx1:indx2]/star_1_mass_g1_new[indx1:indx2],period_class_g1_new[indx1:indx2], lw=7, c='orange')
plt.plot(star_2_mass_g1_new[indx2]/star_1_mass_g1_new[indx2],period_class_g1_new[indx2], marker='o', c='orange', mfc = 'orange', ms = 25)
else:
indx1 = list(star_2_mass_g1_new).index(min(star_2_mass_g1_new[iRLOF_1_g1_new]))
print('(len(iRLOF_1_g1_new) > 0):', indx1)
plt.plot(star_2_mass_g1_new[0:indx1]/star_1_mass_g1_new[0:indx1],period_class_g1_new[0:indx1],color='orange',linestyle='-',label='MESA default, $\gamma$ = 1',lw=2)
plt.plot(star_2_mass_g1_new[indx1:-1]/star_1_mass_g1_new[indx1:-1],period_class_g1_new[indx1:-1], lw=7, c='orange')
plt.plot(star_2_mass_g1_new[-2]/star_1_mass_g1_new[-2],period_class_g1_new[-2], marker='o', c='orange',ms=25 ,mfc = 'None')
if any(iRLOF_2_g2_new) == True:
indx1 = list(star_2_mass_g2_new).index(min(star_2_mass_g2_new[iRLOF_1_g2_new]))
indx2 = list(star_2_mass_g2_new).index(min(star_2_mass_g2_new[iRLOF_2_g2_new]))
print('(len(iRLOF_1_g2_new) > 0):', indx1)
print('(len(iRLOF_2_g2_new) > 0):', indx2)
plt.plot(star_2_mass_g2_new[0:indx1]/star_1_mass_g2_new[0:indx1],period_class_g2_new[0:indx1],color='blue',linestyle='-',label='MESA default, $\gamma$ = 2',lw=2)
plt.plot(star_2_mass_g2_new[indx1:indx2]/star_1_mass_g2_new[indx1:indx2],period_class_g2_new[indx1:indx2], lw=7, c='blue')
plt.plot(star_2_mass_g2_new[indx2]/star_1_mass_g2_new[indx2],period_class_g2_new[indx2], marker='o', c='blue', mfc = 'blue', ms = 25)
else:
indx1 = list(star_2_mass_g2_new).index(min(star_2_mass_g2_new[iRLOF_1_g2_new]))
print('(len(iRLOF_1_g2_new) > 0):', indx1)
plt.plot(star_2_mass_g2_new[0:indx1]/star_1_mass_g2_new[0:indx1],period_class_g2_new[0:indx1],color='blue',linestyle='-',label='MESA default, $\gamma$ = 2',lw=2)
plt.plot(star_2_mass_g2_new[indx1:-1]/star_1_mass_g2_new[indx1:-1],period_class_g2_new[indx1:-1], lw=7, c='blue')
plt.plot(star_2_mass_g2_new[-1]/star_1_mass_g2_new[-1],period_class_g2_new[-1], marker='o', c='blue',ms=25 ,mfc = 'None')
if any(iRLOF_2_g10_new) == True:
indx1 = list(star_2_mass_g10_new).index(min(star_2_mass_g10_new[iRLOF_1_g10_new]))
indx2 = list(star_2_mass_g10_new).index(min(star_2_mass_g10_new[iRLOF_2_g10_new]))
print('(len(iRLOF_1_g10_new) > 0):', indx1)
print('(len(iRLOF_2_g10_new) > 0):', indx2)
plt.plot(star_2_mass_g10_new[0:indx1]/star_1_mass_g10_new[0:indx1],period_class_g10_new[0:indx1],color='red',linestyle='-',label='MESA default, $\gamma$ = 10',lw=2)
plt.plot(star_2_mass_g10_new[indx1:indx2]/star_1_mass_g10_new[indx1:indx2],period_class_g10_new[indx1:indx2], lw=7, c='red')
plt.plot(star_2_mass_g10_new[indx2]/star_1_mass[indx2],period_class_g10_new[indx2], marker='o', c='red', mfc = 'red', ms = 25)
else:
indx1 = list(star_2_mass_g10_new).index(min(star_2_mass_g10_new[iRLOF_1_g10_new]))
print('(len(iRLOF_1_g10_new) > 0):', indx1)
plt.plot(star_2_mass_g10_new[0:indx1]/star_1_mass_g10_new[0:indx1],period_class_g10_new[0:indx1],color='red',linestyle='-',label='MESA default, $\gamma$ = 10',lw=2)
plt.plot(star_2_mass_g10_new[indx1:-1]/star_1_mass_g10_new[indx1:-1],period_class_g10_new[indx1:-1], lw=7, c='red')
plt.plot(star_2_mass_g10_new[-1]/star_1_mass_g10_new[-1],period_class_g10_new[-1], marker='o', c='red',ms=25 ,mfc = 'None')
if any(iRLOF_2_posydon) == True:
indx1 = list(star_2_mass_posydon).index(min(star_2_mass_posydon[iRLOF_1_posydon]))
indx2 = list(star_2_mass_posydon).index(min(star_2_mass_posydon[iRLOF_2_posydon]))
print('(len(iRLOF_1_posydon) > 0):', indx1)
print('(len(iRLOF_2_posydon) > 0):', indx2)
plt.plot(star_2_mass_posydon[0:indx1]/star_1_mass_posydon[0:indx1],period_posydon[0:indx1],color='green',linestyle='-',label='POSYDON, $\gamma$ = 0',lw=2)
plt.plot(star_2_mass_posydon[indx1:indx2]/star_1_mass_posydon[indx1:indx2],period_posydon[indx1:indx2], lw=7, c='green')
plt.plot(star_2_mass_posydon[indx2]/star_1_mass_posydon[indx2],period_posydon[indx2], marker='o', c='green', mfc = 'green', ms = 25)
else:
indx1 = list(star_2_mass_posydon).index(min(star_2_mass_posydon[iRLOF_1_posydon]))
print('(len(iRLOF_1_posydon) > 0):', indx1)
plt.plot(star_2_mass_posydon[0:indx1]/star_1_mass_posydon[0:indx1],period_posydon[0:indx1],color='green',linestyle='-',label='POSYDON, $\gamma$ = 0',lw=2)
plt.plot(star_2_mass_posydon[indx1:-1]/star_1_mass_posydon[indx1:-1],period_posydon[indx1:-1], lw=7, c='green')
plt.plot(star_2_mass_posydon[-1]/star_1_mass_posydon[-1],period_posydon[-1], marker='o', c='green',ms=25 ,mfc = 'None')
'''
# plt.xticks(fontsize=20)
# plt.yticks(fontsize=20)
# plt.xlim([5e6,7.5e6])
plt.ylim([0.9, 1.4]) # 1.25 days
# plt.ylim([1.9,7.5]) # 5 days
# plt.ylim([1,5]) # 3 days
# plt.ylim([0,17]) # 10 days
# plt.ylim([0,125]) # 70 days
# plt.ylim([0,90]) # 50 days
plt.xlabel('$\it{Q}$', fontsize=25)
plt.ylabel('Period [days]', fontsize=25)
plt.legend(loc=2, fontsize=20)
##plt.xlim([0.3,1.8])
###plt.xlim([0.3,1.5])
# plt.xlim([0.64,0.72])
plt.xlim([0.64, 0.72]) # 1.25 days
# plt.xlim([0.25,1.8]) # 5 days
# plt.xlim([0.3,2]) # 3 days
# plt.xlim([0.3,1.8]) # 10 days
# plt.xlim([0.3,1.8]) # 70 days
plt.savefig(pp1_all_panel, format='pdf')
pp1_all_panel.close()
m_200 = mp.MESA()
m2_200 = mp.MESA()
m3_200 = mp.MESA()
m_200_newtides = mp.MESA()
m2_200_newtides = mp.MESA()
m3_200_newtides = mp.MESA()
m3_200_g1_new = mp.MESA()
m3_200_g2_new = mp.MESA()
m3_200_g10_new = mp.MESA()
name = 'post_interaction/30_20_3'
print(os.path.join(paths.data, name + '_g1_new/LOGS3'))
m3_200_g1_new.log_fold = os.path.join(paths.data, name + '_g1_new/LOGS3')
m3_200_g1_new.loadHistory()
m3_200_g2_new.log_fold = os.path.join(paths.data, name + '_g2_new/LOGS3')
# m3_200_g2_new.log_fold=name+'_g1_pos_new/LOGS3'
m3_200_g2_new.loadHistory()
m3_200_g10_new.log_fold = os.path.join(paths.data, name + '_g10_new/LOGS3')
m3_200_g10_new.loadHistory()
m_200.log_fold = os.path.join(paths.data, name + '/LOGS1')
m_200.loadHistory()
m2_200.log_fold = os.path.join(paths.data, name + '/LOGS2')
m2_200.loadHistory()
m3_200.log_fold = os.path.join(paths.data, name + '/LOGS3')
m3_200.loadHistory()
m_200_newtides.log_fold = os.path.join(paths.data, name + '_posydon/LOGS1')
m_200_newtides.loadHistory()
m2_200_newtides.log_fold = os.path.join(paths.data, name + '_posydon/LOGS2')
m2_200_newtides.loadHistory()
m3_200_newtides.log_fold = os.path.join(paths.data, name + '_posydon/LOGS3')
m3_200_newtides.loadHistory()
star_age_200 = m_200.hist.star_age
surf_avg_vtor_1 = m_200.hist.surf_avg_v_rot
surf_avg_vtor_2 = m2_200.hist.surf_avg_v_rot
surf_avg_omega200 = m_200.hist.surf_avg_omega
star_1_radius200 = m3_200.hist.star_1_radius
star_1_J_orb_200 = m3_200.hist.J_orb
star_1_J_spin_200 = m3_200.hist.J_spin_1
star_2_J_spin_200 = m3_200.hist.J_spin_2
rl_relative_gap_1 = m3_200.hist.rl_relative_overflow_1
rl_relative_gap_2 = m3_200.hist.rl_relative_overflow_2
star_1_mass = m3_200.hist.star_1_mass
star_2_mass = m3_200.hist.star_2_mass
iRLOF_1 = rl_relative_gap_1 > 0
iRLOF_2 = rl_relative_gap_2 > 0
period_class = m3_200.hist.period_days
rl_relative_gap_1_g1_new = m3_200_g1_new.hist.rl_relative_overflow_1
rl_relative_gap_2_g1_new = m3_200_g1_new.hist.rl_relative_overflow_2
star_1_mass_g1_new = m3_200_g1_new.hist.star_1_mass
star_2_mass_g1_new = m3_200_g1_new.hist.star_2_mass
iRLOF_1_g1_new = rl_relative_gap_1_g1_new > 0
iRLOF_2_g1_new = rl_relative_gap_2_g1_new > 0
period_class_g1_new = m3_200_g1_new.hist.period_days
rl_relative_gap_1_g2_new = m3_200_g2_new.hist.rl_relative_overflow_1
rl_relative_gap_2_g2_new = m3_200_g2_new.hist.rl_relative_overflow_2
star_1_mass_g2_new = m3_200_g2_new.hist.star_1_mass
star_2_mass_g2_new = m3_200_g2_new.hist.star_2_mass
iRLOF_1_g2_new = rl_relative_gap_1_g2_new > 0
iRLOF_2_g2_new = rl_relative_gap_2_g2_new > 0
period_class_g2_new = m3_200_g2_new.hist.period_days
rl_relative_gap_1_g10_new = m3_200_g10_new.hist.rl_relative_overflow_1
rl_relative_gap_2_g10_new = m3_200_g10_new.hist.rl_relative_overflow_2
star_1_mass_g10_new = m3_200_g10_new.hist.star_1_mass
star_2_mass_g10_new = m3_200_g10_new.hist.star_2_mass
iRLOF_1_g10_new = rl_relative_gap_1_g10_new > 0
iRLOF_2_g10_new = rl_relative_gap_2_g10_new > 0
period_class_g10_new = m3_200_g10_new.hist.period_days
age_200 = m3_200.hist.age
jtotal_200 = m3_200.hist.J_total
log_total_angular_momentum_200 = m_200.hist.log_total_angular_momentum
surf_avg_j_rot_200 = m_200.hist.surf_avg_j_rot
center_h1_200 = m_200.hist.center_h1
LOGL_1 = m_200.hist.log_L
LOGL_2 = m2_200.hist.log_L
LOGL_1_newtides = m_200_newtides.hist.log_L
LOGL_2_newtides = m2_200_newtides.hist.log_L
log_Teff_1 = m_200.hist.log_Teff
log_Teff_2 = m2_200.hist.log_Teff
log_Teff_1_newtides = m_200_newtides.hist.log_Teff
log_Teff_2_newtides = m2_200_newtides.hist.log_Teff
star_age_200_newtides = m_200_newtides.hist.star_age
surf_avg_vtor_1_newtides = m_200_newtides.hist.surf_avg_v_rot
surf_avg_vtor_2_newtides = m2_200_newtides.hist.surf_avg_v_rot
surf_avg_omega200_newtides = m_200_newtides.hist.surf_avg_omega
star_1_radius200_newtides = m3_200_newtides.hist.star_1_radius
star_1_J_orb_200_newtides = m3_200_newtides.hist.J_orb
star_1_J_spin_200_newtides = m3_200_newtides.hist.J_spin_1
star_2_J_spin_200_newtides = m3_200_newtides.hist.J_spin_2
age_200_newtides = m3_200_newtides.hist.age
jtotal_200_newtides = m3_200_newtides.hist.J_total
log_total_angular_momentum_200_newtides = m_200_newtides.hist.log_total_angular_momentum
surf_avg_j_rot_200_newtides = m_200_newtides.hist.surf_avg_j_rot
center_h1_200_newtides = m_200_newtides.hist.center_h1
period_class = m3_200.hist.period_days
period_posydon = m3_200_newtides.hist.period_days
star_1_radius_class = m3_200.hist.star_1_radius
star_1_radius_posydon = m3_200_newtides.hist.star_1_radius
star_2_radius_class = m3_200.hist.star_2_radius
star_2_radius_posydon = m3_200_newtides.hist.star_2_radius
J_orb_class = m3_200.hist.J_orb
J_orb_posydon = m3_200_newtides.hist.J_orb
J_spin2_class = m3_200.hist.J_spin_2
J_spin2_posydon = m3_200_newtides.hist.J_spin_2
J_spin1_class = m3_200.hist.J_spin_1
J_spin1_posydon = m3_200_newtides.hist.J_spin_1
star_1_mass_posydon = m3_200_newtides.hist.star_1_mass
star_2_mass_posydon = m3_200_newtides.hist.star_2_mass
surf_avg_omega_1_class = m_200.hist.surf_avg_omega
surf_avg_omega_1_pos = m_200_newtides.hist.surf_avg_omega
surf_avg_omega_2_class = m2_200.hist.surf_avg_omega
surf_avg_omega_2_pos = m2_200_newtides.hist.surf_avg_omega
star_age_pos = m2_200_newtides.hist.star_age
star_age_class = m2_200.hist.star_age
# p1.log_fold='LOGS1'
# p1.loadProfile(num=-1)
# p=mp.plot()
rl_relative_gap_1_posydon = m3_200_newtides.hist.rl_relative_overflow_1
rl_relative_gap_2_posydon = m3_200_newtides.hist.rl_relative_overflow_2
age_class = m3_200.hist.age
age_posydon = m3_200_newtides.hist.age
lg_t_sync_2_class = m3_200.hist.lg_t_sync_2
lg_t_sync_2_posydon = m3_200_newtides.hist.lg_t_sync_2
lg_t_sync_1_class = m3_200.hist.lg_t_sync_1
lg_t_sync_1_posydon = m3_200_newtides.hist.lg_t_sync_1
iRLOF_1_posydon = rl_relative_gap_1_posydon > 0
iRLOF_2_posydon = rl_relative_gap_2_posydon > 0
age_class_rlof = age_class[iRLOF_1]
age_posydon_rlof = age_posydon[iRLOF_1_posydon]
J_orb_class_rlof = J_orb_class[iRLOF_1]
J_orb_posydon_rlof = J_orb_posydon[iRLOF_1_posydon]
i_pre_RLOF_class = age_class < min(age_class[iRLOF_1])
i_pre_RLOF_pos = age_posydon < min(age_posydon[iRLOF_1_posydon])
i_post_RLOF_class = age_class > max(age_class[iRLOF_1])
i_post_RLOF_pos = age_posydon > max(age_posydon[iRLOF_1_posydon])
star_age_rlof_ind = find_nearest(star_age_class, min(age_class[iRLOF_1]))
star_age_rlof_ind_pos = find_nearest(star_age_pos, min(age_posydon[iRLOF_1_posydon]))
pp1_all_panel3 = PdfPages(paths.figures / 'p_q_3days.pdf')
fig = plt.figure(figsize=(10, 10))
plt.title('$\it{P}_\mathrm{ini}$ = 3 [days]', fontsize=30)
# plt.plot(star_2_mass/star_1_mass,period_class,color='k',linestyle='-',label='MESA default, $\gamma$ = 0',lw=2)
# plt.plot(star_2_mass[iRLOF_1]/star_1_mass[iRLOF_1],period_class[iRLOF_1], lw=7, c='k')
if any(iRLOF_2) == True:
indx1 = list(star_2_mass).index(min(star_2_mass[iRLOF_1]))
indx2 = list(star_2_mass).index(min(star_2_mass[iRLOF_2]))
print('(len(iRLOF_1) > 0):', indx1)
print('(len(iRLOF_2) > 0):', indx2)
plt.plot(star_2_mass[0:indx1] / star_1_mass[0:indx1], period_class[0:indx1], color='k', linestyle='-',
label='MESA default, $\it{\gamma}$ = 0', lw=2)
plt.plot(star_2_mass[indx1:indx2] / star_1_mass[indx1:indx2], period_class[indx1:indx2], lw=7, c='k')
plt.plot(star_2_mass[indx2] / star_1_mass[indx2], period_class[indx2], marker='o', c='k', mfc='k', ms=25)
else:
indx1 = list(star_2_mass).index(min(star_2_mass[iRLOF_1]))
print('(len(iRLOF_1) > 0):', indx1)
plt.plot(star_2_mass[0:indx1] / star_1_mass[0:indx1], period_class[0:indx1], color='k', linestyle='-',
label='MESA default, $\it{\gamma}$ = 0', lw=2)
plt.plot(star_2_mass[indx1:-1] / star_1_mass[indx1:-1], period_class[indx1:-1], lw=7, c='k')
plt.plot(star_2_mass[-1] / star_1_mass[-1], period_class[-1], marker='o', c='k', ms=25, mfc='None')
if any(iRLOF_2_g1_new) == True:
indx1 = list(star_2_mass_g1_new).index(min(star_2_mass_g1_new[iRLOF_1_g1_new]))
indx2 = list(star_2_mass_g1_new).index(min(star_2_mass_g1_new[iRLOF_2_g1_new]))
print('(len(iRLOF_1_g1_new) > 0):', indx1)
print('(len(iRLOF_2_g1_new) > 0):', indx2)
plt.plot(star_2_mass_g1_new[0:indx1] / star_1_mass_g1_new[0:indx1], period_class_g1_new[0:indx1], color='orange',
linestyle='-', label='MESA default, $\it{\gamma}$ = 1', lw=2)
plt.plot(star_2_mass_g1_new[indx1:indx2] / star_1_mass_g1_new[indx1:indx2], period_class_g1_new[indx1:indx2], lw=7,
c='orange')
plt.plot(star_2_mass_g1_new[indx2] / star_1_mass_g1_new[indx2], period_class_g1_new[indx2], marker='o', c='orange',
mfc='orange', ms=25)
else:
indx1 = list(star_2_mass_g1_new).index(min(star_2_mass_g1_new[iRLOF_1_g1_new]))
print('(len(iRLOF_1_g1_new) > 0):', indx1)
plt.plot(star_2_mass_g1_new[0:indx1] / star_1_mass_g1_new[0:indx1], period_class_g1_new[0:indx1], color='orange',
linestyle='-', label='MESA default, $\it{\gamma}$ = 1', lw=2)
plt.plot(star_2_mass_g1_new[indx1:-1] / star_1_mass_g1_new[indx1:-1], period_class_g1_new[indx1:-1], lw=7,
c='orange')
plt.plot(star_2_mass_g1_new[-2] / star_1_mass_g1_new[-2], period_class_g1_new[-2], marker='o', c='orange', ms=25,
mfc='None')
if any(iRLOF_2_g2_new) == True:
indx1 = list(star_2_mass_g2_new).index(min(star_2_mass_g2_new[iRLOF_1_g2_new]))
indx2 = list(star_2_mass_g2_new).index(min(star_2_mass_g2_new[iRLOF_2_g2_new]))
print('(len(iRLOF_1_g2_new) > 0):', indx1)
print('(len(iRLOF_2_g2_new) > 0):', indx2)
plt.plot(star_2_mass_g2_new[0:indx1] / star_1_mass_g2_new[0:indx1], period_class_g2_new[0:indx1], color='blue',
linestyle='-', label='MESA default, $\it{\gamma}$ = 2', lw=2)
plt.plot(star_2_mass_g2_new[indx1:indx2] / star_1_mass_g2_new[indx1:indx2], period_class_g2_new[indx1:indx2], lw=7,
c='blue')
plt.plot(star_2_mass_g2_new[indx2] / star_1_mass_g2_new[indx2], period_class_g2_new[indx2], marker='o', c='blue',
mfc='blue', ms=25)
else:
indx1 = list(star_2_mass_g2_new).index(min(star_2_mass_g2_new[iRLOF_1_g2_new]))
print('(len(iRLOF_1_g2_new) > 0):', indx1)
plt.plot(star_2_mass_g2_new[0:indx1] / star_1_mass_g2_new[0:indx1], period_class_g2_new[0:indx1], color='blue',
linestyle='-', label='MESA default, $\it{\gamma}$ = 2', lw=2)
plt.plot(star_2_mass_g2_new[indx1:-1] / star_1_mass_g2_new[indx1:-1], period_class_g2_new[indx1:-1], lw=7, c='blue')
plt.plot(star_2_mass_g2_new[-1] / star_1_mass_g2_new[-1], period_class_g2_new[-1], marker='o', c='blue', ms=25,
mfc='None')
if any(iRLOF_2_g10_new) == True:
indx1 = list(star_2_mass_g10_new).index(min(star_2_mass_g10_new[iRLOF_1_g10_new]))
indx2 = list(star_2_mass_g10_new).index(min(star_2_mass_g10_new[iRLOF_2_g10_new]))
print('(len(iRLOF_1_g10_new) > 0):', indx1)
print('(len(iRLOF_2_g10_new) > 0):', indx2)
plt.plot(star_2_mass_g10_new[0:indx1] / star_1_mass_g10_new[0:indx1], period_class_g10_new[0:indx1], color='red',
linestyle='-', label='MESA default, $\it{\gamma}$ = 10', lw=2)
plt.plot(star_2_mass_g10_new[indx1:indx2] / star_1_mass_g10_new[indx1:indx2], period_class_g10_new[indx1:indx2],
lw=7, c='red')
plt.plot(star_2_mass_g10_new[indx2] / star_1_mass[indx2], period_class_g10_new[indx2], marker='o', c='red',
mfc='red', ms=25)
else:
indx1 = list(star_2_mass_g10_new).index(min(star_2_mass_g10_new[iRLOF_1_g10_new]))
print('(len(iRLOF_1_g10_new) > 0):', indx1)
plt.plot(star_2_mass_g10_new[0:indx1] / star_1_mass_g10_new[0:indx1], period_class_g10_new[0:indx1], color='red',
linestyle='-', label='MESA default, $\it{\gamma}$ = 10', lw=2)
plt.plot(star_2_mass_g10_new[indx1:-1] / star_1_mass_g10_new[indx1:-1], period_class_g10_new[indx1:-1], lw=7,
c='red')
plt.plot(star_2_mass_g10_new[-1] / star_1_mass_g10_new[-1], period_class_g10_new[-1], marker='o', c='red', ms=25,
mfc='None')
if any(iRLOF_2_posydon) == True:
indx1 = list(star_2_mass_posydon).index(min(star_2_mass_posydon[iRLOF_1_posydon]))
indx2 = list(star_2_mass_posydon).index(min(star_2_mass_posydon[iRLOF_2_posydon]))
print('(len(iRLOF_1_posydon) > 0):', indx1)
print('(len(iRLOF_2_posydon) > 0):', indx2)
plt.plot(star_2_mass_posydon[0:indx1] / star_1_mass_posydon[0:indx1], period_posydon[0:indx1], color='green',
linestyle='-', label='POSYDON, $\it{\gamma}$ = 0', lw=2)
plt.plot(star_2_mass_posydon[indx1:indx2] / star_1_mass_posydon[indx1:indx2], period_posydon[indx1:indx2], lw=7,
c='green')
plt.plot(star_2_mass_posydon[indx2] / star_1_mass_posydon[indx2], period_posydon[indx2], marker='o', c='green',
mfc='green', ms=25)
else:
indx1 = list(star_2_mass_posydon).index(min(star_2_mass_posydon[iRLOF_1_posydon]))
print('(len(iRLOF_1_posydon) > 0):', indx1)
plt.plot(star_2_mass_posydon[0:indx1] / star_1_mass_posydon[0:indx1], period_posydon[0:indx1], color='green',
linestyle='-', label='POSYDON, $\it{\gamma}$ = 0', lw=2)
plt.plot(star_2_mass_posydon[indx1:-1] / star_1_mass_posydon[indx1:-1], period_posydon[indx1:-1], lw=7, c='green')
plt.plot(star_2_mass_posydon[-1] / star_1_mass_posydon[-1], period_posydon[-1], marker='o', c='green', ms=25,
mfc='None')
# plt.xticks(fontsize=20)
# plt.yticks(fontsize=20)
# plt.xlim([5e6,7.5e6])
# plt.ylim([0.9,1.4]) # 1.25 days
# plt.ylim([1.9,7.5]) # 5 days
plt.ylim([1, 5]) # 3 days
# plt.ylim([0,17]) # 10 days
# plt.ylim([0,125]) # 70 days
# plt.ylim([0,90]) # 50 days
plt.xlabel('$\it{Q}$', fontsize=25)
plt.ylabel('Period [days]', fontsize=25)
plt.legend(loc=2, fontsize=20)
##plt.xlim([0.3,1.8])
###plt.xlim([0.3,1.5])
# plt.xlim([0.64,0.72])
# plt.xlim([0.64,0.72]) # 1.25 days
# plt.xlim([0.25,1.8]) # 5 days
plt.xlim([0.3, 2]) # 3 days
# plt.xlim([0.3,1.8]) # 10 days
# plt.xlim([0.3,1.8]) # 70 days
plt.savefig(pp1_all_panel3, format='pdf')
pp1_all_panel3.close()
m_200 = mp.MESA()
m2_200 = mp.MESA()
m3_200 = mp.MESA()
m_200_newtides = mp.MESA()
m2_200_newtides = mp.MESA()
m3_200_newtides = mp.MESA()
m3_200_g1_new = mp.MESA()
m3_200_g2_new = mp.MESA()
m3_200_g10_new = mp.MESA()
m_200 = mp.MESA()
m2_200 = mp.MESA()
m3_200 = mp.MESA()
m_200_newtides = mp.MESA()
m2_200_newtides = mp.MESA()
m3_200_newtides = mp.MESA()
m3_200_g1_new = mp.MESA()
m3_200_g2_new = mp.MESA()
m3_200_g10_new = mp.MESA()
name = 'post_interaction/30_20_5'
print(os.path.join(paths.data, name + '_g1_new/LOGS3'))
m3_200_g1_new.log_fold = os.path.join(paths.data, name + '_g1_new/LOGS3')
m3_200_g1_new.loadHistory()
m3_200_g2_new.log_fold = os.path.join(paths.data, name + '_g2_new/LOGS3')
# m3_200_g2_new.log_fold=name+'_g1_pos_new/LOGS3'
m3_200_g2_new.loadHistory()
m3_200_g10_new.log_fold = os.path.join(paths.data, name + '_g10_new/LOGS3')
m3_200_g10_new.loadHistory()
m_200.log_fold = os.path.join(paths.data, name + '/LOGS1')
m_200.loadHistory()
m2_200.log_fold = os.path.join(paths.data, name + '/LOGS2')
m2_200.loadHistory()
m3_200.log_fold = os.path.join(paths.data, name + '/LOGS3')
m3_200.loadHistory()
m_200_newtides.log_fold = os.path.join(paths.data, name + '_posydon/LOGS1')
m_200_newtides.loadHistory()
m2_200_newtides.log_fold = os.path.join(paths.data, name + '_posydon/LOGS2')
m2_200_newtides.loadHistory()
m3_200_newtides.log_fold = os.path.join(paths.data, name + '_posydon/LOGS3')
m3_200_newtides.loadHistory()
star_age_200 = m_200.hist.star_age
surf_avg_vtor_1 = m_200.hist.surf_avg_v_rot
surf_avg_vtor_2 = m2_200.hist.surf_avg_v_rot
surf_avg_omega200 = m_200.hist.surf_avg_omega
star_1_radius200 = m3_200.hist.star_1_radius
star_1_J_orb_200 = m3_200.hist.J_orb
star_1_J_spin_200 = m3_200.hist.J_spin_1
star_2_J_spin_200 = m3_200.hist.J_spin_2
rl_relative_gap_1 = m3_200.hist.rl_relative_overflow_1
rl_relative_gap_2 = m3_200.hist.rl_relative_overflow_2
star_1_mass = m3_200.hist.star_1_mass
star_2_mass = m3_200.hist.star_2_mass
iRLOF_1 = rl_relative_gap_1 > 0
iRLOF_2 = rl_relative_gap_2 > 0
period_class = m3_200.hist.period_days
rl_relative_gap_1_g1_new = m3_200_g1_new.hist.rl_relative_overflow_1
rl_relative_gap_2_g1_new = m3_200_g1_new.hist.rl_relative_overflow_2
star_1_mass_g1_new = m3_200_g1_new.hist.star_1_mass
star_2_mass_g1_new = m3_200_g1_new.hist.star_2_mass
iRLOF_1_g1_new = rl_relative_gap_1_g1_new > 0
iRLOF_2_g1_new = rl_relative_gap_2_g1_new > 0
period_class_g1_new = m3_200_g1_new.hist.period_days
rl_relative_gap_1_g2_new = m3_200_g2_new.hist.rl_relative_overflow_1
rl_relative_gap_2_g2_new = m3_200_g2_new.hist.rl_relative_overflow_2
star_1_mass_g2_new = m3_200_g2_new.hist.star_1_mass
star_2_mass_g2_new = m3_200_g2_new.hist.star_2_mass
iRLOF_1_g2_new = rl_relative_gap_1_g2_new > 0
iRLOF_2_g2_new = rl_relative_gap_2_g2_new > 0
period_class_g2_new = m3_200_g2_new.hist.period_days
rl_relative_gap_1_g10_new = m3_200_g10_new.hist.rl_relative_overflow_1
rl_relative_gap_2_g10_new = m3_200_g10_new.hist.rl_relative_overflow_2
star_1_mass_g10_new = m3_200_g10_new.hist.star_1_mass
star_2_mass_g10_new = m3_200_g10_new.hist.star_2_mass
iRLOF_1_g10_new = rl_relative_gap_1_g10_new > 0
iRLOF_2_g10_new = rl_relative_gap_2_g10_new > 0
period_class_g10_new = m3_200_g10_new.hist.period_days
age_200 = m3_200.hist.age
jtotal_200 = m3_200.hist.J_total
log_total_angular_momentum_200 = m_200.hist.log_total_angular_momentum
surf_avg_j_rot_200 = m_200.hist.surf_avg_j_rot
center_h1_200 = m_200.hist.center_h1
LOGL_1 = m_200.hist.log_L
LOGL_2 = m2_200.hist.log_L
LOGL_1_newtides = m_200_newtides.hist.log_L
LOGL_2_newtides = m2_200_newtides.hist.log_L
log_Teff_1 = m_200.hist.log_Teff
log_Teff_2 = m2_200.hist.log_Teff
log_Teff_1_newtides = m_200_newtides.hist.log_Teff
log_Teff_2_newtides = m2_200_newtides.hist.log_Teff
star_age_200_newtides = m_200_newtides.hist.star_age
surf_avg_vtor_1_newtides = m_200_newtides.hist.surf_avg_v_rot
surf_avg_vtor_2_newtides = m2_200_newtides.hist.surf_avg_v_rot
surf_avg_omega200_newtides = m_200_newtides.hist.surf_avg_omega
star_1_radius200_newtides = m3_200_newtides.hist.star_1_radius
star_1_J_orb_200_newtides = m3_200_newtides.hist.J_orb
star_1_J_spin_200_newtides = m3_200_newtides.hist.J_spin_1
star_2_J_spin_200_newtides = m3_200_newtides.hist.J_spin_2
age_200_newtides = m3_200_newtides.hist.age
jtotal_200_newtides = m3_200_newtides.hist.J_total
log_total_angular_momentum_200_newtides = m_200_newtides.hist.log_total_angular_momentum
surf_avg_j_rot_200_newtides = m_200_newtides.hist.surf_avg_j_rot
center_h1_200_newtides = m_200_newtides.hist.center_h1
period_class = m3_200.hist.period_days
period_posydon = m3_200_newtides.hist.period_days
star_1_radius_class = m3_200.hist.star_1_radius
star_1_radius_posydon = m3_200_newtides.hist.star_1_radius
star_2_radius_class = m3_200.hist.star_2_radius
star_2_radius_posydon = m3_200_newtides.hist.star_2_radius
J_orb_class = m3_200.hist.J_orb
J_orb_posydon = m3_200_newtides.hist.J_orb
J_spin2_class = m3_200.hist.J_spin_2
J_spin2_posydon = m3_200_newtides.hist.J_spin_2
J_spin1_class = m3_200.hist.J_spin_1
J_spin1_posydon = m3_200_newtides.hist.J_spin_1
star_1_mass_posydon = m3_200_newtides.hist.star_1_mass
star_2_mass_posydon = m3_200_newtides.hist.star_2_mass
surf_avg_omega_1_class = m_200.hist.surf_avg_omega
surf_avg_omega_1_pos = m_200_newtides.hist.surf_avg_omega
surf_avg_omega_2_class = m2_200.hist.surf_avg_omega
surf_avg_omega_2_pos = m2_200_newtides.hist.surf_avg_omega
star_age_pos = m2_200_newtides.hist.star_age
star_age_class = m2_200.hist.star_age
# p1.log_fold='LOGS1'
# p1.loadProfile(num=-1)
# p=mp.plot()
rl_relative_gap_1_posydon = m3_200_newtides.hist.rl_relative_overflow_1
rl_relative_gap_2_posydon = m3_200_newtides.hist.rl_relative_overflow_2
age_class = m3_200.hist.age
age_posydon = m3_200_newtides.hist.age
lg_t_sync_2_class = m3_200.hist.lg_t_sync_2
lg_t_sync_2_posydon = m3_200_newtides.hist.lg_t_sync_2
lg_t_sync_1_class = m3_200.hist.lg_t_sync_1
lg_t_sync_1_posydon = m3_200_newtides.hist.lg_t_sync_1
iRLOF_1_posydon = rl_relative_gap_1_posydon > 0
iRLOF_2_posydon = rl_relative_gap_2_posydon > 0
age_class_rlof = age_class[iRLOF_1]
age_posydon_rlof = age_posydon[iRLOF_1_posydon]
J_orb_class_rlof = J_orb_class[iRLOF_1]
J_orb_posydon_rlof = J_orb_posydon[iRLOF_1_posydon]
i_pre_RLOF_class = age_class < min(age_class[iRLOF_1])
i_pre_RLOF_pos = age_posydon < min(age_posydon[iRLOF_1_posydon])
i_post_RLOF_class = age_class > max(age_class[iRLOF_1])
i_post_RLOF_pos = age_posydon > max(age_posydon[iRLOF_1_posydon])
star_age_rlof_ind = find_nearest(star_age_class, min(age_class[iRLOF_1]))
star_age_rlof_ind_pos = find_nearest(star_age_pos, min(age_posydon[iRLOF_1_posydon]))
pp1_all_panel5 = PdfPages(paths.figures / 'p_q_5days.pdf')
fig = plt.figure(figsize=(10, 10))
plt.title('$\it{P}_\mathrm{ini}$ = 5 [days]', fontsize=30)
# plt.plot(star_2_mass/star_1_mass,period_class,color='k',linestyle='-',label='MESA default, $\gamma$ = 0',lw=2)
# plt.plot(star_2_mass[iRLOF_1]/star_1_mass[iRLOF_1],period_class[iRLOF_1], lw=7, c='k')
if any(iRLOF_2) == True:
indx1 = list(star_2_mass).index(min(star_2_mass[iRLOF_1]))
indx2 = list(star_2_mass).index(min(star_2_mass[iRLOF_2]))
print('(len(iRLOF_1) > 0):', indx1)
print('(len(iRLOF_2) > 0):', indx2)
plt.plot(star_2_mass[0:indx1] / star_1_mass[0:indx1], period_class[0:indx1], color='k', linestyle='-',
label='MESA default, $\it{\gamma}$ = 0', lw=2)
plt.plot(star_2_mass[indx1:indx2] / star_1_mass[indx1:indx2], period_class[indx1:indx2], lw=7, c='k')
plt.plot(star_2_mass[indx2] / star_1_mass[indx2], period_class[indx2], marker='o', c='k', mfc='k', ms=25)
else:
indx1 = list(star_2_mass).index(min(star_2_mass[iRLOF_1]))
print('(len(iRLOF_1) > 0):', indx1)
plt.plot(star_2_mass[0:indx1] / star_1_mass[0:indx1], period_class[0:indx1], color='k', linestyle='-',
label='MESA default, $\it{\gamma}$ = 0', lw=2)
plt.plot(star_2_mass[indx1:-1] / star_1_mass[indx1:-1], period_class[indx1:-1], lw=7, c='k')
plt.plot(star_2_mass[-1] / star_1_mass[-1], period_class[-1], marker='o', c='k', ms=25, mfc='None')
if any(iRLOF_2_g1_new) == True:
indx1 = list(star_2_mass_g1_new).index(min(star_2_mass_g1_new[iRLOF_1_g1_new]))
indx2 = list(star_2_mass_g1_new).index(min(star_2_mass_g1_new[iRLOF_2_g1_new]))
print('(len(iRLOF_1_g1_new) > 0):', indx1)
print('(len(iRLOF_2_g1_new) > 0):', indx2)
plt.plot(star_2_mass_g1_new[0:indx1] / star_1_mass_g1_new[0:indx1], period_class_g1_new[0:indx1], color='orange',
linestyle='-', label='MESA default, $\it{\gamma}$ = 1', lw=2)
plt.plot(star_2_mass_g1_new[indx1:indx2] / star_1_mass_g1_new[indx1:indx2], period_class_g1_new[indx1:indx2], lw=7,
c='orange')
plt.plot(star_2_mass_g1_new[indx2] / star_1_mass_g1_new[indx2], period_class_g1_new[indx2], marker='o', c='orange',
mfc='orange', ms=25)
else:
indx1 = list(star_2_mass_g1_new).index(min(star_2_mass_g1_new[iRLOF_1_g1_new]))
print('(len(iRLOF_1_g1_new) > 0):', indx1)
plt.plot(star_2_mass_g1_new[0:indx1] / star_1_mass_g1_new[0:indx1], period_class_g1_new[0:indx1], color='orange',
linestyle='-', label='MESA default, $\it{\gamma}$ = 1', lw=2)
plt.plot(star_2_mass_g1_new[indx1:-1] / star_1_mass_g1_new[indx1:-1], period_class_g1_new[indx1:-1], lw=7,
c='orange')
plt.plot(star_2_mass_g1_new[-2] / star_1_mass_g1_new[-2], period_class_g1_new[-2], marker='o', c='orange', ms=25,
mfc='None')
if any(iRLOF_2_g2_new) == True:
indx1 = list(star_2_mass_g2_new).index(min(star_2_mass_g2_new[iRLOF_1_g2_new]))
indx2 = list(star_2_mass_g2_new).index(min(star_2_mass_g2_new[iRLOF_2_g2_new]))
print('(len(iRLOF_1_g2_new) > 0):', indx1)
print('(len(iRLOF_2_g2_new) > 0):', indx2)
plt.plot(star_2_mass_g2_new[0:indx1] / star_1_mass_g2_new[0:indx1], period_class_g2_new[0:indx1], color='blue',
linestyle='-', label='MESA default, $\it{\gamma}$ = 2', lw=2)
plt.plot(star_2_mass_g2_new[indx1:indx2] / star_1_mass_g2_new[indx1:indx2], period_class_g2_new[indx1:indx2], lw=7,
c='blue')
plt.plot(star_2_mass_g2_new[indx2] / star_1_mass_g2_new[indx2], period_class_g2_new[indx2], marker='o', c='blue',
mfc='blue', ms=25)
else:
indx1 = list(star_2_mass_g2_new).index(min(star_2_mass_g2_new[iRLOF_1_g2_new]))
print('(len(iRLOF_1_g2_new) > 0):', indx1)
plt.plot(star_2_mass_g2_new[0:indx1] / star_1_mass_g2_new[0:indx1], period_class_g2_new[0:indx1], color='blue',
linestyle='-', label='MESA default, $\it{\gamma}$ = 2', lw=2)
plt.plot(star_2_mass_g2_new[indx1:-1] / star_1_mass_g2_new[indx1:-1], period_class_g2_new[indx1:-1], lw=7, c='blue')
plt.plot(star_2_mass_g2_new[-1] / star_1_mass_g2_new[-1], period_class_g2_new[-1], marker='o', c='blue', ms=25,
mfc='None')
if any(iRLOF_2_g10_new) == True:
indx1 = list(star_2_mass_g10_new).index(min(star_2_mass_g10_new[iRLOF_1_g10_new]))
indx2 = list(star_2_mass_g10_new).index(min(star_2_mass_g10_new[iRLOF_2_g10_new]))
print('(len(iRLOF_1_g10_new) > 0):', indx1)
print('(len(iRLOF_2_g10_new) > 0):', indx2)
plt.plot(star_2_mass_g10_new[0:indx1] / star_1_mass_g10_new[0:indx1], period_class_g10_new[0:indx1], color='red',
linestyle='-', label='MESA default, $\it{\gamma}$ = 10', lw=2)
plt.plot(star_2_mass_g10_new[indx1:indx2] / star_1_mass_g10_new[indx1:indx2], period_class_g10_new[indx1:indx2],
lw=7, c='red')
plt.plot(star_2_mass_g10_new[indx2] / star_1_mass[indx2], period_class_g10_new[indx2], marker='o', c='red',
mfc='red', ms=25)
else:
indx1 = list(star_2_mass_g10_new).index(min(star_2_mass_g10_new[iRLOF_1_g10_new]))
print('(len(iRLOF_1_g10_new) > 0):', indx1)
plt.plot(star_2_mass_g10_new[0:indx1] / star_1_mass_g10_new[0:indx1], period_class_g10_new[0:indx1], color='red',
linestyle='-', label='MESA default, $\it{\gamma}$ = 10', lw=2)
plt.plot(star_2_mass_g10_new[indx1:-1] / star_1_mass_g10_new[indx1:-1], period_class_g10_new[indx1:-1], lw=7,
c='red')
plt.plot(star_2_mass_g10_new[-1] / star_1_mass_g10_new[-1], period_class_g10_new[-1], marker='o', c='red', ms=25,
mfc='None')
if any(iRLOF_2_posydon) == True:
indx1 = list(star_2_mass_posydon).index(min(star_2_mass_posydon[iRLOF_1_posydon]))
indx2 = list(star_2_mass_posydon).index(min(star_2_mass_posydon[iRLOF_2_posydon]))
print('(len(iRLOF_1_posydon) > 0):', indx1)
print('(len(iRLOF_2_posydon) > 0):', indx2)
plt.plot(star_2_mass_posydon[0:indx1] / star_1_mass_posydon[0:indx1], period_posydon[0:indx1], color='green',
linestyle='-', label='POSYDON, $\it{\gamma}$ = 0', lw=2)
plt.plot(star_2_mass_posydon[indx1:indx2] / star_1_mass_posydon[indx1:indx2], period_posydon[indx1:indx2], lw=7,
c='green')
plt.plot(star_2_mass_posydon[indx2] / star_1_mass_posydon[indx2], period_posydon[indx2], marker='o', c='green',
mfc='green', ms=25)
else:
indx1 = list(star_2_mass_posydon).index(min(star_2_mass_posydon[iRLOF_1_posydon]))
print('(len(iRLOF_1_posydon) > 0):', indx1)
plt.plot(star_2_mass_posydon[0:indx1] / star_1_mass_posydon[0:indx1], period_posydon[0:indx1], color='green',
linestyle='-', label='POSYDON, $\it{\gamma}$ = 0', lw=2)
plt.plot(star_2_mass_posydon[indx1:-1] / star_1_mass_posydon[indx1:-1], period_posydon[indx1:-1], lw=7, c='green')
plt.plot(star_2_mass_posydon[-1] / star_1_mass_posydon[-1], period_posydon[-1], marker='o', c='green', ms=25,
mfc='None')
# plt.xticks(fontsize=20)
# plt.yticks(fontsize=20)
# plt.xlim([5e6,7.5e6])
# plt.ylim([0.9,1.4]) # 1.25 days
plt.ylim([1.9, 7.5]) # 5 days
# plt.ylim([1,5]) # 3 days
# plt.ylim([0,17]) # 10 days
# plt.ylim([0,125]) # 70 days
# plt.ylim([0,90]) # 50 days
plt.xlabel('$\it{Q}$', fontsize=25)
plt.ylabel('Period [days]', fontsize=25)
plt.legend(loc=2, fontsize=20)
##plt.xlim([0.3,1.8])
###plt.xlim([0.3,1.5])
# plt.xlim([0.64,0.72])
# plt.xlim([0.64,0.72]) # 1.25 days
plt.xlim([0.25, 1.8]) # 5 days
# plt.xlim([0.3,2]) # 3 days
# plt.xlim([0.3,1.8]) # 10 days
# plt.xlim([0.3,1.8]) # 70 days
plt.savefig(pp1_all_panel5, format='pdf')
pp1_all_panel5.close()
m_200 = mp.MESA()
m2_200 = mp.MESA()
m3_200 = mp.MESA()
m_200_newtides = mp.MESA()
m2_200_newtides = mp.MESA()
m3_200_newtides = mp.MESA()
m3_200_g1_new = mp.MESA()
m3_200_g2_new = mp.MESA()
m3_200_g10_new = mp.MESA()
name = 'post_interaction/30_20_10'
print(os.path.join(paths.data, name + '_g1_new/LOGS3'))
m3_200_g1_new.log_fold = os.path.join(paths.data, name + '_g1_new/LOGS3')
m3_200_g1_new.loadHistory()
m3_200_g2_new.log_fold = os.path.join(paths.data, name + '_g2_new/LOGS3')
# m3_200_g2_new.log_fold=name+'_g1_pos_new/LOGS3'
m3_200_g2_new.loadHistory()
m3_200_g10_new.log_fold = os.path.join(paths.data, name + '_g10_new/LOGS3')
m3_200_g10_new.loadHistory()
m_200.log_fold = os.path.join(paths.data, name + '/LOGS1')
m_200.loadHistory()
m2_200.log_fold = os.path.join(paths.data, name + '/LOGS2')
m2_200.loadHistory()
m3_200.log_fold = os.path.join(paths.data, name + '/LOGS3')
m3_200.loadHistory()
m_200_newtides.log_fold = os.path.join(paths.data, name + '_posydon/LOGS1')
m_200_newtides.loadHistory()
m2_200_newtides.log_fold = os.path.join(paths.data, name + '_posydon/LOGS2')
m2_200_newtides.loadHistory()
m3_200_newtides.log_fold = os.path.join(paths.data, name + '_posydon/LOGS3')
m3_200_newtides.loadHistory()
star_age_200 = m_200.hist.star_age
surf_avg_vtor_1 = m_200.hist.surf_avg_v_rot
surf_avg_vtor_2 = m2_200.hist.surf_avg_v_rot
surf_avg_omega200 = m_200.hist.surf_avg_omega
star_1_radius200 = m3_200.hist.star_1_radius
star_1_J_orb_200 = m3_200.hist.J_orb
star_1_J_spin_200 = m3_200.hist.J_spin_1
star_2_J_spin_200 = m3_200.hist.J_spin_2
rl_relative_gap_1 = m3_200.hist.rl_relative_overflow_1
rl_relative_gap_2 = m3_200.hist.rl_relative_overflow_2
star_1_mass = m3_200.hist.star_1_mass
star_2_mass = m3_200.hist.star_2_mass
iRLOF_1 = rl_relative_gap_1 > 0
iRLOF_2 = rl_relative_gap_2 > 0
period_class = m3_200.hist.period_days
rl_relative_gap_1_g1_new = m3_200_g1_new.hist.rl_relative_overflow_1
rl_relative_gap_2_g1_new = m3_200_g1_new.hist.rl_relative_overflow_2
star_1_mass_g1_new = m3_200_g1_new.hist.star_1_mass
star_2_mass_g1_new = m3_200_g1_new.hist.star_2_mass
iRLOF_1_g1_new = rl_relative_gap_1_g1_new > 0
iRLOF_2_g1_new = rl_relative_gap_2_g1_new > 0
period_class_g1_new = m3_200_g1_new.hist.period_days
rl_relative_gap_1_g2_new = m3_200_g2_new.hist.rl_relative_overflow_1
rl_relative_gap_2_g2_new = m3_200_g2_new.hist.rl_relative_overflow_2
star_1_mass_g2_new = m3_200_g2_new.hist.star_1_mass
star_2_mass_g2_new = m3_200_g2_new.hist.star_2_mass
iRLOF_1_g2_new = rl_relative_gap_1_g2_new > 0
iRLOF_2_g2_new = rl_relative_gap_2_g2_new > 0
period_class_g2_new = m3_200_g2_new.hist.period_days
rl_relative_gap_1_g10_new = m3_200_g10_new.hist.rl_relative_overflow_1
rl_relative_gap_2_g10_new = m3_200_g10_new.hist.rl_relative_overflow_2
star_1_mass_g10_new = m3_200_g10_new.hist.star_1_mass
star_2_mass_g10_new = m3_200_g10_new.hist.star_2_mass
iRLOF_1_g10_new = rl_relative_gap_1_g10_new > 0
iRLOF_2_g10_new = rl_relative_gap_2_g10_new > 0
period_class_g10_new = m3_200_g10_new.hist.period_days
age_200 = m3_200.hist.age
jtotal_200 = m3_200.hist.J_total
log_total_angular_momentum_200 = m_200.hist.log_total_angular_momentum
surf_avg_j_rot_200 = m_200.hist.surf_avg_j_rot
center_h1_200 = m_200.hist.center_h1
LOGL_1 = m_200.hist.log_L
LOGL_2 = m2_200.hist.log_L
LOGL_1_newtides = m_200_newtides.hist.log_L
LOGL_2_newtides = m2_200_newtides.hist.log_L
log_Teff_1 = m_200.hist.log_Teff
log_Teff_2 = m2_200.hist.log_Teff
log_Teff_1_newtides = m_200_newtides.hist.log_Teff
log_Teff_2_newtides = m2_200_newtides.hist.log_Teff
star_age_200_newtides = m_200_newtides.hist.star_age
surf_avg_vtor_1_newtides = m_200_newtides.hist.surf_avg_v_rot
surf_avg_vtor_2_newtides = m2_200_newtides.hist.surf_avg_v_rot
surf_avg_omega200_newtides = m_200_newtides.hist.surf_avg_omega
star_1_radius200_newtides = m3_200_newtides.hist.star_1_radius
star_1_J_orb_200_newtides = m3_200_newtides.hist.J_orb
star_1_J_spin_200_newtides = m3_200_newtides.hist.J_spin_1
star_2_J_spin_200_newtides = m3_200_newtides.hist.J_spin_2
age_200_newtides = m3_200_newtides.hist.age
jtotal_200_newtides = m3_200_newtides.hist.J_total
log_total_angular_momentum_200_newtides = m_200_newtides.hist.log_total_angular_momentum
surf_avg_j_rot_200_newtides = m_200_newtides.hist.surf_avg_j_rot
center_h1_200_newtides = m_200_newtides.hist.center_h1
period_class = m3_200.hist.period_days
period_posydon = m3_200_newtides.hist.period_days
star_1_radius_class = m3_200.hist.star_1_radius
star_1_radius_posydon = m3_200_newtides.hist.star_1_radius
star_2_radius_class = m3_200.hist.star_2_radius
star_2_radius_posydon = m3_200_newtides.hist.star_2_radius
J_orb_class = m3_200.hist.J_orb
J_orb_posydon = m3_200_newtides.hist.J_orb
J_spin2_class = m3_200.hist.J_spin_2
J_spin2_posydon = m3_200_newtides.hist.J_spin_2
J_spin1_class = m3_200.hist.J_spin_1
J_spin1_posydon = m3_200_newtides.hist.J_spin_1
star_1_mass_posydon = m3_200_newtides.hist.star_1_mass
star_2_mass_posydon = m3_200_newtides.hist.star_2_mass
surf_avg_omega_1_class = m_200.hist.surf_avg_omega
surf_avg_omega_1_pos = m_200_newtides.hist.surf_avg_omega
surf_avg_omega_2_class = m2_200.hist.surf_avg_omega
surf_avg_omega_2_pos = m2_200_newtides.hist.surf_avg_omega
star_age_pos = m2_200_newtides.hist.star_age
star_age_class = m2_200.hist.star_age
# p1.log_fold='LOGS1'
# p1.loadProfile(num=-1)
# p=mp.plot()
rl_relative_gap_1_posydon = m3_200_newtides.hist.rl_relative_overflow_1
rl_relative_gap_2_posydon = m3_200_newtides.hist.rl_relative_overflow_2
age_class = m3_200.hist.age
age_posydon = m3_200_newtides.hist.age
lg_t_sync_2_class = m3_200.hist.lg_t_sync_2
lg_t_sync_2_posydon = m3_200_newtides.hist.lg_t_sync_2
lg_t_sync_1_class = m3_200.hist.lg_t_sync_1
lg_t_sync_1_posydon = m3_200_newtides.hist.lg_t_sync_1
iRLOF_1_posydon = rl_relative_gap_1_posydon > 0
iRLOF_2_posydon = rl_relative_gap_2_posydon > 0
age_class_rlof = age_class[iRLOF_1]
age_posydon_rlof = age_posydon[iRLOF_1_posydon]
J_orb_class_rlof = J_orb_class[iRLOF_1]
J_orb_posydon_rlof = J_orb_posydon[iRLOF_1_posydon]
i_pre_RLOF_class = age_class < min(age_class[iRLOF_1])
i_pre_RLOF_pos = age_posydon < min(age_posydon[iRLOF_1_posydon])
i_post_RLOF_class = age_class > max(age_class[iRLOF_1])
i_post_RLOF_pos = age_posydon > max(age_posydon[iRLOF_1_posydon])
star_age_rlof_ind = find_nearest(star_age_class, min(age_class[iRLOF_1]))
star_age_rlof_ind_pos = find_nearest(star_age_pos, min(age_posydon[iRLOF_1_posydon]))
pp1_all_panel10 = PdfPages(paths.figures / 'p_q_10days.pdf')
fig = plt.figure(figsize=(10, 10))
plt.title('$\it{P}_\mathrm{ini}$ = 10 [days]', fontsize=30)
# plt.plot(star_2_mass/star_1_mass,period_class,color='k',linestyle='-',label='MESA default, $\gamma$ = 0',lw=2)
# plt.plot(star_2_mass[iRLOF_1]/star_1_mass[iRLOF_1],period_class[iRLOF_1], lw=7, c='k')
if any(iRLOF_2) == True:
indx1 = list(star_2_mass).index(min(star_2_mass[iRLOF_1]))
indx2 = list(star_2_mass).index(min(star_2_mass[iRLOF_2]))
print('(len(iRLOF_1) > 0):', indx1)
print('(len(iRLOF_2) > 0):', indx2)
plt.plot(star_2_mass[0:indx1] / star_1_mass[0:indx1], period_class[0:indx1], color='k', linestyle='-',
label='MESA default, $\it{\gamma}$ = 0', lw=2)
plt.plot(star_2_mass[indx1:indx2] / star_1_mass[indx1:indx2], period_class[indx1:indx2], lw=7, c='k')
plt.plot(star_2_mass[indx2] / star_1_mass[indx2], period_class[indx2], marker='o', c='k', mfc='k', ms=25)
else:
indx1 = list(star_2_mass).index(min(star_2_mass[iRLOF_1]))
print('(len(iRLOF_1) > 0):', indx1)
plt.plot(star_2_mass[0:indx1] / star_1_mass[0:indx1], period_class[0:indx1], color='k', linestyle='-',
label='MESA default, $\it{\gamma}$ = 0', lw=2)
plt.plot(star_2_mass[indx1:-1] / star_1_mass[indx1:-1], period_class[indx1:-1], lw=7, c='k')
plt.plot(star_2_mass[-1] / star_1_mass[-1], period_class[-1], marker='o', c='k', ms=25, mfc='None')
if any(iRLOF_2_g1_new) == True:
indx1 = list(star_2_mass_g1_new).index(min(star_2_mass_g1_new[iRLOF_1_g1_new]))
indx2 = list(star_2_mass_g1_new).index(min(star_2_mass_g1_new[iRLOF_2_g1_new]))
print('(len(iRLOF_1_g1_new) > 0):', indx1)
print('(len(iRLOF_2_g1_new) > 0):', indx2)
plt.plot(star_2_mass_g1_new[0:indx1] / star_1_mass_g1_new[0:indx1], period_class_g1_new[0:indx1], color='orange',
linestyle='-', label='MESA default, $\it{\gamma}$ = 1', lw=2)
plt.plot(star_2_mass_g1_new[indx1:indx2] / star_1_mass_g1_new[indx1:indx2], period_class_g1_new[indx1:indx2], lw=7,
c='orange')
plt.plot(star_2_mass_g1_new[indx2] / star_1_mass_g1_new[indx2], period_class_g1_new[indx2], marker='o', c='orange',
mfc='orange', ms=25)
else:
indx1 = list(star_2_mass_g1_new).index(min(star_2_mass_g1_new[iRLOF_1_g1_new]))
print('(len(iRLOF_1_g1_new) > 0):', indx1)
plt.plot(star_2_mass_g1_new[0:indx1] / star_1_mass_g1_new[0:indx1], period_class_g1_new[0:indx1], color='orange',
linestyle='-', label='MESA default, $\it{\gamma}$ = 1', lw=2)
plt.plot(star_2_mass_g1_new[indx1:-1] / star_1_mass_g1_new[indx1:-1], period_class_g1_new[indx1:-1], lw=7,
c='orange')
plt.plot(star_2_mass_g1_new[-2] / star_1_mass_g1_new[-2], period_class_g1_new[-2], marker='o', c='orange', ms=25,
mfc='None')
if any(iRLOF_2_g2_new) == True:
indx1 = list(star_2_mass_g2_new).index(min(star_2_mass_g2_new[iRLOF_1_g2_new]))
indx2 = list(star_2_mass_g2_new).index(min(star_2_mass_g2_new[iRLOF_2_g2_new]))
print('(len(iRLOF_1_g2_new) > 0):', indx1)
print('(len(iRLOF_2_g2_new) > 0):', indx2)
plt.plot(star_2_mass_g2_new[0:indx1] / star_1_mass_g2_new[0:indx1], period_class_g2_new[0:indx1], color='blue',
linestyle='-', label='MESA default, $\it{\gamma}$ = 2', lw=2)
plt.plot(star_2_mass_g2_new[indx1:indx2] / star_1_mass_g2_new[indx1:indx2], period_class_g2_new[indx1:indx2], lw=7,
c='blue')
plt.plot(star_2_mass_g2_new[indx2] / star_1_mass_g2_new[indx2], period_class_g2_new[indx2], marker='o', c='blue',
mfc='blue', ms=25)
else:
indx1 = list(star_2_mass_g2_new).index(min(star_2_mass_g2_new[iRLOF_1_g2_new]))
print('(len(iRLOF_1_g2_new) > 0):', indx1)
plt.plot(star_2_mass_g2_new[0:indx1] / star_1_mass_g2_new[0:indx1], period_class_g2_new[0:indx1], color='blue',
linestyle='-', label='MESA default, $\it{\gamma}$ = 2', lw=2)
plt.plot(star_2_mass_g2_new[indx1:-1] / star_1_mass_g2_new[indx1:-1], period_class_g2_new[indx1:-1], lw=7, c='blue')
plt.plot(star_2_mass_g2_new[-1] / star_1_mass_g2_new[-1], period_class_g2_new[-1], marker='o', c='blue', ms=25,
mfc='None')
if any(iRLOF_2_g10_new) == True:
indx1 = list(star_2_mass_g10_new).index(min(star_2_mass_g10_new[iRLOF_1_g10_new]))
indx2 = list(star_2_mass_g10_new).index(min(star_2_mass_g10_new[iRLOF_2_g10_new]))
print('(len(iRLOF_1_g10_new) > 0):', indx1)
print('(len(iRLOF_2_g10_new) > 0):', indx2)
plt.plot(star_2_mass_g10_new[0:indx1] / star_1_mass_g10_new[0:indx1], period_class_g10_new[0:indx1], color='red',
linestyle='-', label='MESA default, $\it{\gamma}$ = 10', lw=2)
plt.plot(star_2_mass_g10_new[indx1:indx2] / star_1_mass_g10_new[indx1:indx2], period_class_g10_new[indx1:indx2],
lw=7, c='red')
plt.plot(star_2_mass_g10_new[indx2] / star_1_mass[indx2], period_class_g10_new[indx2], marker='o', c='red',
mfc='red', ms=25)
else:
indx1 = list(star_2_mass_g10_new).index(min(star_2_mass_g10_new[iRLOF_1_g10_new]))
print('(len(iRLOF_1_g10_new) > 0):', indx1)
plt.plot(star_2_mass_g10_new[0:indx1] / star_1_mass_g10_new[0:indx1], period_class_g10_new[0:indx1], color='red',
linestyle='-', label='MESA default, $\it{\gamma}$ = 10', lw=2)
plt.plot(star_2_mass_g10_new[indx1:-1] / star_1_mass_g10_new[indx1:-1], period_class_g10_new[indx1:-1], lw=7,
c='red')
plt.plot(star_2_mass_g10_new[-1] / star_1_mass_g10_new[-1], period_class_g10_new[-1], marker='o', c='red', ms=25,
mfc='None')
if any(iRLOF_2_posydon) == True:
indx1 = list(star_2_mass_posydon).index(min(star_2_mass_posydon[iRLOF_1_posydon]))
indx2 = list(star_2_mass_posydon).index(min(star_2_mass_posydon[iRLOF_2_posydon]))
print('(len(iRLOF_1_posydon) > 0):', indx1)
print('(len(iRLOF_2_posydon) > 0):', indx2)
plt.plot(star_2_mass_posydon[0:indx1] / star_1_mass_posydon[0:indx1], period_posydon[0:indx1], color='green',
linestyle='-', label='POSYDON, $\it{\gamma}$ = 0', lw=2)
plt.plot(star_2_mass_posydon[indx1:indx2] / star_1_mass_posydon[indx1:indx2], period_posydon[indx1:indx2], lw=7,
c='green')
plt.plot(star_2_mass_posydon[indx2] / star_1_mass_posydon[indx2], period_posydon[indx2], marker='o', c='green',
mfc='green', ms=25)
else:
indx1 = list(star_2_mass_posydon).index(min(star_2_mass_posydon[iRLOF_1_posydon]))
print('(len(iRLOF_1_posydon) > 0):', indx1)
plt.plot(star_2_mass_posydon[0:indx1] / star_1_mass_posydon[0:indx1], period_posydon[0:indx1], color='green',
linestyle='-', label='POSYDON, $\it{\gamma}$ = 0', lw=2)
plt.plot(star_2_mass_posydon[indx1:-1] / star_1_mass_posydon[indx1:-1], period_posydon[indx1:-1], lw=7, c='green')
plt.plot(star_2_mass_posydon[-1] / star_1_mass_posydon[-1], period_posydon[-1], marker='o', c='green', ms=25,
mfc='None')
# plt.xticks(fontsize=20)
# plt.yticks(fontsize=20)
# plt.xlim([5e6,7.5e6])
# plt.ylim([0.9,1.4]) # 1.25 days
# plt.ylim([1.9,7.5]) # 5 days
# plt.ylim([1,5]) # 3 days
plt.ylim([0, 17]) # 10 days
# plt.ylim([0,125]) # 70 days
# plt.ylim([0,90]) # 50 days
plt.xlabel('$\it{Q}$', fontsize=25)
plt.ylabel('Period [days]', fontsize=25)
plt.legend(loc=2, fontsize=20)
##plt.xlim([0.3,1.8])
###plt.xlim([0.3,1.5])
# plt.xlim([0.64,0.72])
# plt.xlim([0.64,0.72]) # 1.25 days
# plt.xlim([0.25,1.8]) # 5 days
# plt.xlim([0.3,2]) # 3 days
plt.xlim([0.3, 1.8]) # 10 days
# plt.xlim([0.3,1.8]) # 70 days
plt.savefig(pp1_all_panel10, format='pdf')
pp1_all_panel10.close()
m_200 = mp.MESA()
m2_200 = mp.MESA()
m3_200 = mp.MESA()
m_200_newtides = mp.MESA()
m2_200_newtides = mp.MESA()
m3_200_newtides = mp.MESA()
m3_200_g1_new = mp.MESA()
m3_200_g2_new = mp.MESA()
m3_200_g10_new = mp.MESA()
name = 'post_interaction/30_20_50'
print(os.path.join(paths.data, name + '_g1_new/LOGS3'))
m3_200_g1_new.log_fold = os.path.join(paths.data, name + '_g1_new/LOGS3')
m3_200_g1_new.loadHistory()
m3_200_g2_new.log_fold = os.path.join(paths.data, name + '_g2_new/LOGS3')
# m3_200_g2_new.log_fold=name+'_g1_pos_new/LOGS3'
m3_200_g2_new.loadHistory()
m3_200_g10_new.log_fold = os.path.join(paths.data, name + '_g10_new/LOGS3')
m3_200_g10_new.loadHistory()
m_200.log_fold = os.path.join(paths.data, name + '/LOGS1')
m_200.loadHistory()
m2_200.log_fold = os.path.join(paths.data, name + '/LOGS2')
m2_200.loadHistory()
m3_200.log_fold = os.path.join(paths.data, name + '/LOGS3')
m3_200.loadHistory()
m_200_newtides.log_fold = os.path.join(paths.data, name + '_posydon/LOGS1')
m_200_newtides.loadHistory()
m2_200_newtides.log_fold = os.path.join(paths.data, name + '_posydon/LOGS2')
m2_200_newtides.loadHistory()
m3_200_newtides.log_fold = os.path.join(paths.data, name + '_posydon/LOGS3')
m3_200_newtides.loadHistory()
star_age_200 = m_200.hist.star_age
surf_avg_vtor_1 = m_200.hist.surf_avg_v_rot
surf_avg_vtor_2 = m2_200.hist.surf_avg_v_rot
surf_avg_omega200 = m_200.hist.surf_avg_omega
star_1_radius200 = m3_200.hist.star_1_radius
star_1_J_orb_200 = m3_200.hist.J_orb
star_1_J_spin_200 = m3_200.hist.J_spin_1
star_2_J_spin_200 = m3_200.hist.J_spin_2
rl_relative_gap_1 = m3_200.hist.rl_relative_overflow_1
rl_relative_gap_2 = m3_200.hist.rl_relative_overflow_2
star_1_mass = m3_200.hist.star_1_mass
star_2_mass = m3_200.hist.star_2_mass
iRLOF_1 = rl_relative_gap_1 > 0
iRLOF_2 = rl_relative_gap_2 > 0
period_class = m3_200.hist.period_days
rl_relative_gap_1_g1_new = m3_200_g1_new.hist.rl_relative_overflow_1
rl_relative_gap_2_g1_new = m3_200_g1_new.hist.rl_relative_overflow_2
star_1_mass_g1_new = m3_200_g1_new.hist.star_1_mass
star_2_mass_g1_new = m3_200_g1_new.hist.star_2_mass
iRLOF_1_g1_new = rl_relative_gap_1_g1_new > 0
iRLOF_2_g1_new = rl_relative_gap_2_g1_new > 0
period_class_g1_new = m3_200_g1_new.hist.period_days
rl_relative_gap_1_g2_new = m3_200_g2_new.hist.rl_relative_overflow_1
rl_relative_gap_2_g2_new = m3_200_g2_new.hist.rl_relative_overflow_2
star_1_mass_g2_new = m3_200_g2_new.hist.star_1_mass
star_2_mass_g2_new = m3_200_g2_new.hist.star_2_mass
iRLOF_1_g2_new = rl_relative_gap_1_g2_new > 0
iRLOF_2_g2_new = rl_relative_gap_2_g2_new > 0
period_class_g2_new = m3_200_g2_new.hist.period_days
rl_relative_gap_1_g10_new = m3_200_g10_new.hist.rl_relative_overflow_1
rl_relative_gap_2_g10_new = m3_200_g10_new.hist.rl_relative_overflow_2
star_1_mass_g10_new = m3_200_g10_new.hist.star_1_mass
star_2_mass_g10_new = m3_200_g10_new.hist.star_2_mass
iRLOF_1_g10_new = rl_relative_gap_1_g10_new > 0
iRLOF_2_g10_new = rl_relative_gap_2_g10_new > 0
period_class_g10_new = m3_200_g10_new.hist.period_days
age_200 = m3_200.hist.age
jtotal_200 = m3_200.hist.J_total
log_total_angular_momentum_200 = m_200.hist.log_total_angular_momentum
surf_avg_j_rot_200 = m_200.hist.surf_avg_j_rot
center_h1_200 = m_200.hist.center_h1
LOGL_1 = m_200.hist.log_L
LOGL_2 = m2_200.hist.log_L
LOGL_1_newtides = m_200_newtides.hist.log_L
LOGL_2_newtides = m2_200_newtides.hist.log_L
log_Teff_1 = m_200.hist.log_Teff
log_Teff_2 = m2_200.hist.log_Teff
log_Teff_1_newtides = m_200_newtides.hist.log_Teff
log_Teff_2_newtides = m2_200_newtides.hist.log_Teff
star_age_200_newtides = m_200_newtides.hist.star_age
surf_avg_vtor_1_newtides = m_200_newtides.hist.surf_avg_v_rot
surf_avg_vtor_2_newtides = m2_200_newtides.hist.surf_avg_v_rot
surf_avg_omega200_newtides = m_200_newtides.hist.surf_avg_omega
star_1_radius200_newtides = m3_200_newtides.hist.star_1_radius
star_1_J_orb_200_newtides = m3_200_newtides.hist.J_orb
star_1_J_spin_200_newtides = m3_200_newtides.hist.J_spin_1
star_2_J_spin_200_newtides = m3_200_newtides.hist.J_spin_2
age_200_newtides = m3_200_newtides.hist.age
jtotal_200_newtides = m3_200_newtides.hist.J_total
log_total_angular_momentum_200_newtides = m_200_newtides.hist.log_total_angular_momentum
surf_avg_j_rot_200_newtides = m_200_newtides.hist.surf_avg_j_rot
center_h1_200_newtides = m_200_newtides.hist.center_h1
period_class = m3_200.hist.period_days
period_posydon = m3_200_newtides.hist.period_days
star_1_radius_class = m3_200.hist.star_1_radius
star_1_radius_posydon = m3_200_newtides.hist.star_1_radius
star_2_radius_class = m3_200.hist.star_2_radius
star_2_radius_posydon = m3_200_newtides.hist.star_2_radius
J_orb_class = m3_200.hist.J_orb
J_orb_posydon = m3_200_newtides.hist.J_orb
J_spin2_class = m3_200.hist.J_spin_2
J_spin2_posydon = m3_200_newtides.hist.J_spin_2
J_spin1_class = m3_200.hist.J_spin_1
J_spin1_posydon = m3_200_newtides.hist.J_spin_1
star_1_mass_posydon = m3_200_newtides.hist.star_1_mass
star_2_mass_posydon = m3_200_newtides.hist.star_2_mass
surf_avg_omega_1_class = m_200.hist.surf_avg_omega
surf_avg_omega_1_pos = m_200_newtides.hist.surf_avg_omega
surf_avg_omega_2_class = m2_200.hist.surf_avg_omega
surf_avg_omega_2_pos = m2_200_newtides.hist.surf_avg_omega
star_age_pos = m2_200_newtides.hist.star_age
star_age_class = m2_200.hist.star_age
# p1.log_fold='LOGS1'
# p1.loadProfile(num=-1)
# p=mp.plot()
rl_relative_gap_1_posydon = m3_200_newtides.hist.rl_relative_overflow_1
rl_relative_gap_2_posydon = m3_200_newtides.hist.rl_relative_overflow_2
age_class = m3_200.hist.age
age_posydon = m3_200_newtides.hist.age
lg_t_sync_2_class = m3_200.hist.lg_t_sync_2
lg_t_sync_2_posydon = m3_200_newtides.hist.lg_t_sync_2
lg_t_sync_1_class = m3_200.hist.lg_t_sync_1
lg_t_sync_1_posydon = m3_200_newtides.hist.lg_t_sync_1
iRLOF_1_posydon = rl_relative_gap_1_posydon > 0
iRLOF_2_posydon = rl_relative_gap_2_posydon > 0
age_class_rlof = age_class[iRLOF_1]
age_posydon_rlof = age_posydon[iRLOF_1_posydon]
J_orb_class_rlof = J_orb_class[iRLOF_1]
J_orb_posydon_rlof = J_orb_posydon[iRLOF_1_posydon]
i_pre_RLOF_class = age_class < min(age_class[iRLOF_1])
i_pre_RLOF_pos = age_posydon < min(age_posydon[iRLOF_1_posydon])
i_post_RLOF_class = age_class > max(age_class[iRLOF_1])
i_post_RLOF_pos = age_posydon > max(age_posydon[iRLOF_1_posydon])
star_age_rlof_ind = find_nearest(star_age_class, min(age_class[iRLOF_1]))
star_age_rlof_ind_pos = find_nearest(star_age_pos, min(age_posydon[iRLOF_1_posydon]))
pp1_all_panel50 = PdfPages(paths.figures / 'p_q_50days.pdf')
fig = plt.figure(figsize=(10, 10))
plt.title('$\it{P}_\mathrm{ini}$ = 50 [days]', fontsize=30)
# plt.plot(star_2_mass/star_1_mass,period_class,color='k',linestyle='-',label='MESA default, $\gamma$ = 0',lw=2)
# plt.plot(star_2_mass[iRLOF_1]/star_1_mass[iRLOF_1],period_class[iRLOF_1], lw=7, c='k')
if any(iRLOF_2) == True:
indx1 = list(star_2_mass).index(min(star_2_mass[iRLOF_1]))
indx2 = list(star_2_mass).index(min(star_2_mass[iRLOF_2]))
print('(len(iRLOF_1) > 0):', indx1)
print('(len(iRLOF_2) > 0):', indx2)
plt.plot(star_2_mass[0:indx1] / star_1_mass[0:indx1], period_class[0:indx1], color='k', linestyle='-',
label='MESA default, $\it{\gamma}$ = 0', lw=2)
plt.plot(star_2_mass[indx1:indx2] / star_1_mass[indx1:indx2], period_class[indx1:indx2], lw=7, c='k')
plt.plot(star_2_mass[indx2] / star_1_mass[indx2], period_class[indx2], marker='o', c='k', mfc='k', ms=25)
else:
indx1 = list(star_2_mass).index(min(star_2_mass[iRLOF_1]))
print('(len(iRLOF_1) > 0):', indx1)
plt.plot(star_2_mass[0:indx1] / star_1_mass[0:indx1], period_class[0:indx1], color='k', linestyle='-',
label='MESA default, $\it{\gamma}$ = 0', lw=2)
plt.plot(star_2_mass[indx1:-1] / star_1_mass[indx1:-1], period_class[indx1:-1], lw=7, c='k')
plt.plot(star_2_mass[-1] / star_1_mass[-1], period_class[-1], marker='o', c='k', ms=25, mfc='None')
if any(iRLOF_2_g1_new) == True:
indx1 = list(star_2_mass_g1_new).index(min(star_2_mass_g1_new[iRLOF_1_g1_new]))
indx2 = list(star_2_mass_g1_new).index(min(star_2_mass_g1_new[iRLOF_2_g1_new]))
print('(len(iRLOF_1_g1_new) > 0):', indx1)
print('(len(iRLOF_2_g1_new) > 0):', indx2)
plt.plot(star_2_mass_g1_new[0:indx1] / star_1_mass_g1_new[0:indx1], period_class_g1_new[0:indx1], color='orange',
linestyle='-', label='MESA default, $\it{\gamma}$ = 1', lw=2)
plt.plot(star_2_mass_g1_new[indx1:indx2] / star_1_mass_g1_new[indx1:indx2], period_class_g1_new[indx1:indx2], lw=7,
c='orange')
plt.plot(star_2_mass_g1_new[indx2] / star_1_mass_g1_new[indx2], period_class_g1_new[indx2], marker='o', c='orange',
mfc='orange', ms=25)
else:
indx1 = list(star_2_mass_g1_new).index(min(star_2_mass_g1_new[iRLOF_1_g1_new]))
print('(len(iRLOF_1_g1_new) > 0):', indx1)
plt.plot(star_2_mass_g1_new[0:indx1] / star_1_mass_g1_new[0:indx1], period_class_g1_new[0:indx1], color='orange',
linestyle='-', label='MESA default, $\it{\gamma}$ = 1', lw=2)
plt.plot(star_2_mass_g1_new[indx1:-1] / star_1_mass_g1_new[indx1:-1], period_class_g1_new[indx1:-1], lw=7,
c='orange')
plt.plot(star_2_mass_g1_new[-2] / star_1_mass_g1_new[-2], period_class_g1_new[-2], marker='o', c='orange', ms=25,
mfc='None')
if any(iRLOF_2_g2_new) == True:
indx1 = list(star_2_mass_g2_new).index(min(star_2_mass_g2_new[iRLOF_1_g2_new]))
indx2 = list(star_2_mass_g2_new).index(min(star_2_mass_g2_new[iRLOF_2_g2_new]))
print('(len(iRLOF_1_g2_new) > 0):', indx1)
print('(len(iRLOF_2_g2_new) > 0):', indx2)
plt.plot(star_2_mass_g2_new[0:indx1] / star_1_mass_g2_new[0:indx1], period_class_g2_new[0:indx1], color='blue',
linestyle='-', label='MESA default, $\it{\gamma}$ = 2', lw=2)
plt.plot(star_2_mass_g2_new[indx1:indx2] / star_1_mass_g2_new[indx1:indx2], period_class_g2_new[indx1:indx2], lw=7,
c='blue')
plt.plot(star_2_mass_g2_new[indx2] / star_1_mass_g2_new[indx2], period_class_g2_new[indx2], marker='o', c='blue',
mfc='blue', ms=25)
else:
indx1 = list(star_2_mass_g2_new).index(min(star_2_mass_g2_new[iRLOF_1_g2_new]))
print('(len(iRLOF_1_g2_new) > 0):', indx1)
plt.plot(star_2_mass_g2_new[0:indx1] / star_1_mass_g2_new[0:indx1], period_class_g2_new[0:indx1], color='blue',
linestyle='-', label='MESA default, $\it{\gamma}$= 2', lw=2)
plt.plot(star_2_mass_g2_new[indx1:-1] / star_1_mass_g2_new[indx1:-1], period_class_g2_new[indx1:-1], lw=7, c='blue')
plt.plot(star_2_mass_g2_new[-1] / star_1_mass_g2_new[-1], period_class_g2_new[-1], marker='o', c='blue', ms=25,
mfc='None')
if any(iRLOF_2_g10_new) == True:
indx1 = list(star_2_mass_g10_new).index(min(star_2_mass_g10_new[iRLOF_1_g10_new]))
indx2 = list(star_2_mass_g10_new).index(min(star_2_mass_g10_new[iRLOF_2_g10_new]))
print('(len(iRLOF_1_g10_new) > 0):', indx1)
print('(len(iRLOF_2_g10_new) > 0):', indx2)
plt.plot(star_2_mass_g10_new[0:indx1] / star_1_mass_g10_new[0:indx1], period_class_g10_new[0:indx1], color='red',
linestyle='-', label='MESA default, $\it{\gamma}$ = 10', lw=2)
plt.plot(star_2_mass_g10_new[indx1:indx2] / star_1_mass_g10_new[indx1:indx2], period_class_g10_new[indx1:indx2],
lw=7, c='red')
plt.plot(star_2_mass_g10_new[indx2] / star_1_mass[indx2], period_class_g10_new[indx2], marker='o', c='red',
mfc='red', ms=25)
else:
indx1 = list(star_2_mass_g10_new).index(min(star_2_mass_g10_new[iRLOF_1_g10_new]))
print('(len(iRLOF_1_g10_new) > 0):', indx1)
plt.plot(star_2_mass_g10_new[0:indx1] / star_1_mass_g10_new[0:indx1], period_class_g10_new[0:indx1], color='red',
linestyle='-', label='MESA default, $\it{\gamma}$ = 10', lw=2)
plt.plot(star_2_mass_g10_new[indx1:-1] / star_1_mass_g10_new[indx1:-1], period_class_g10_new[indx1:-1], lw=7,
c='red')
plt.plot(star_2_mass_g10_new[-1] / star_1_mass_g10_new[-1], period_class_g10_new[-1], marker='o', c='red', ms=25,
mfc='None')
if any(iRLOF_2_posydon) == True:
indx1 = list(star_2_mass_posydon).index(min(star_2_mass_posydon[iRLOF_1_posydon]))
indx2 = list(star_2_mass_posydon).index(min(star_2_mass_posydon[iRLOF_2_posydon]))
print('(len(iRLOF_1_posydon) > 0):', indx1)
print('(len(iRLOF_2_posydon) > 0):', indx2)
plt.plot(star_2_mass_posydon[0:indx1] / star_1_mass_posydon[0:indx1], period_posydon[0:indx1], color='green',
linestyle='-', label='POSYDON, $\it{\gamma}$ = 0', lw=2)
plt.plot(star_2_mass_posydon[indx1:indx2] / star_1_mass_posydon[indx1:indx2], period_posydon[indx1:indx2], lw=7,
c='green')
plt.plot(star_2_mass_posydon[indx2] / star_1_mass_posydon[indx2], period_posydon[indx2], marker='o', c='green',
mfc='green', ms=25)
else:
indx1 = list(star_2_mass_posydon).index(min(star_2_mass_posydon[iRLOF_1_posydon]))
print('(len(iRLOF_1_posydon) > 0):', indx1)
plt.plot(star_2_mass_posydon[0:indx1] / star_1_mass_posydon[0:indx1], period_posydon[0:indx1], color='green',
linestyle='-', label='POSYDON, $\it{\gamma}$ = 0', lw=2)
plt.plot(star_2_mass_posydon[indx1:-1] / star_1_mass_posydon[indx1:-1], period_posydon[indx1:-1], lw=7, c='green')
plt.plot(star_2_mass_posydon[-1] / star_1_mass_posydon[-1], period_posydon[-1], marker='o', c='green', ms=25,
mfc='None')
# plt.xticks(fontsize=20)
# plt.yticks(fontsize=20)
# plt.xlim([5e6,7.5e6])
# plt.ylim([0.9,1.4]) # 1.25 days
# plt.ylim([1.9,7.5]) # 5 days
# plt.ylim([1,5]) # 3 days
# plt.ylim([0,17]) # 10 days
# plt.ylim([0,125]) # 70 days
plt.ylim([0, 90]) # 50 days
plt.xlabel('$\it{Q}$', fontsize=25)
plt.ylabel('Period [days]', fontsize=25)
plt.legend(loc=2, fontsize=20)
##plt.xlim([0.3,1.8])
###plt.xlim([0.3,1.5])
# plt.xlim([0.64,0.72])
# plt.xlim([0.64,0.72]) # 1.25 days
# plt.xlim([0.25,1.8]) # 5 days
# plt.xlim([0.3,2]) # 3 days
plt.xlim([0.3, 1.8]) # 10 days
# plt.xlim([0.3,1.8]) # 70 days
plt.savefig(pp1_all_panel50, format='pdf')
pp1_all_panel50.close()
m_200 = mp.MESA()
m2_200 = mp.MESA()
m3_200 = mp.MESA()
m_200_newtides = mp.MESA()
m2_200_newtides = mp.MESA()
m3_200_newtides = mp.MESA()
m3_200_g1_new = mp.MESA()
m3_200_g2_new = mp.MESA()
m3_200_g10_new = mp.MESA()
name = 'post_interaction/30_20_70'
print(os.path.join(paths.data, name + '_g1_new/LOGS3'))
m3_200_g1_new.log_fold = os.path.join(paths.data, name + '_g1_new/LOGS3')
m3_200_g1_new.loadHistory()
m3_200_g2_new.log_fold = os.path.join(paths.data, name + '_g2_new/LOGS3')
# m3_200_g2_new.log_fold=name+'_g1_pos_new/LOGS3'
m3_200_g2_new.loadHistory()
m3_200_g10_new.log_fold = os.path.join(paths.data, name + '_g10_new/LOGS3')
m3_200_g10_new.loadHistory()
m_200.log_fold = os.path.join(paths.data, name + '/LOGS1')
m_200.loadHistory()
m2_200.log_fold = os.path.join(paths.data, name + '/LOGS2')
m2_200.loadHistory()
m3_200.log_fold = os.path.join(paths.data, name + '/LOGS3')
m3_200.loadHistory()
m_200_newtides.log_fold = os.path.join(paths.data, name + '_posydon/LOGS1')
m_200_newtides.loadHistory()
m2_200_newtides.log_fold = os.path.join(paths.data, name + '_posydon/LOGS2')
m2_200_newtides.loadHistory()
m3_200_newtides.log_fold = os.path.join(paths.data, name + '_posydon/LOGS3')
m3_200_newtides.loadHistory()
star_age_200 = m_200.hist.star_age
surf_avg_vtor_1 = m_200.hist.surf_avg_v_rot
surf_avg_vtor_2 = m2_200.hist.surf_avg_v_rot
surf_avg_omega200 = m_200.hist.surf_avg_omega
star_1_radius200 = m3_200.hist.star_1_radius
star_1_J_orb_200 = m3_200.hist.J_orb
star_1_J_spin_200 = m3_200.hist.J_spin_1
star_2_J_spin_200 = m3_200.hist.J_spin_2
rl_relative_gap_1 = m3_200.hist.rl_relative_overflow_1
rl_relative_gap_2 = m3_200.hist.rl_relative_overflow_2
star_1_mass = m3_200.hist.star_1_mass
star_2_mass = m3_200.hist.star_2_mass
iRLOF_1 = rl_relative_gap_1 > 0
iRLOF_2 = rl_relative_gap_2 > 0
period_class = m3_200.hist.period_days
rl_relative_gap_1_g1_new = m3_200_g1_new.hist.rl_relative_overflow_1
rl_relative_gap_2_g1_new = m3_200_g1_new.hist.rl_relative_overflow_2
star_1_mass_g1_new = m3_200_g1_new.hist.star_1_mass
star_2_mass_g1_new = m3_200_g1_new.hist.star_2_mass
iRLOF_1_g1_new = rl_relative_gap_1_g1_new > 0
iRLOF_2_g1_new = rl_relative_gap_2_g1_new > 0
period_class_g1_new = m3_200_g1_new.hist.period_days
rl_relative_gap_1_g2_new = m3_200_g2_new.hist.rl_relative_overflow_1
rl_relative_gap_2_g2_new = m3_200_g2_new.hist.rl_relative_overflow_2
star_1_mass_g2_new = m3_200_g2_new.hist.star_1_mass
star_2_mass_g2_new = m3_200_g2_new.hist.star_2_mass
iRLOF_1_g2_new = rl_relative_gap_1_g2_new > 0
iRLOF_2_g2_new = rl_relative_gap_2_g2_new > 0
period_class_g2_new = m3_200_g2_new.hist.period_days
rl_relative_gap_1_g10_new = m3_200_g10_new.hist.rl_relative_overflow_1
rl_relative_gap_2_g10_new = m3_200_g10_new.hist.rl_relative_overflow_2
star_1_mass_g10_new = m3_200_g10_new.hist.star_1_mass
star_2_mass_g10_new = m3_200_g10_new.hist.star_2_mass
iRLOF_1_g10_new = rl_relative_gap_1_g10_new > 0
iRLOF_2_g10_new = rl_relative_gap_2_g10_new > 0
period_class_g10_new = m3_200_g10_new.hist.period_days
age_200 = m3_200.hist.age
jtotal_200 = m3_200.hist.J_total
log_total_angular_momentum_200 = m_200.hist.log_total_angular_momentum
surf_avg_j_rot_200 = m_200.hist.surf_avg_j_rot
center_h1_200 = m_200.hist.center_h1
LOGL_1 = m_200.hist.log_L
LOGL_2 = m2_200.hist.log_L
LOGL_1_newtides = m_200_newtides.hist.log_L
LOGL_2_newtides = m2_200_newtides.hist.log_L
log_Teff_1 = m_200.hist.log_Teff
log_Teff_2 = m2_200.hist.log_Teff
log_Teff_1_newtides = m_200_newtides.hist.log_Teff
log_Teff_2_newtides = m2_200_newtides.hist.log_Teff
star_age_200_newtides = m_200_newtides.hist.star_age
surf_avg_vtor_1_newtides = m_200_newtides.hist.surf_avg_v_rot
surf_avg_vtor_2_newtides = m2_200_newtides.hist.surf_avg_v_rot
surf_avg_omega200_newtides = m_200_newtides.hist.surf_avg_omega
star_1_radius200_newtides = m3_200_newtides.hist.star_1_radius
star_1_J_orb_200_newtides = m3_200_newtides.hist.J_orb
star_1_J_spin_200_newtides = m3_200_newtides.hist.J_spin_1
star_2_J_spin_200_newtides = m3_200_newtides.hist.J_spin_2
age_200_newtides = m3_200_newtides.hist.age
jtotal_200_newtides = m3_200_newtides.hist.J_total
log_total_angular_momentum_200_newtides = m_200_newtides.hist.log_total_angular_momentum
surf_avg_j_rot_200_newtides = m_200_newtides.hist.surf_avg_j_rot
center_h1_200_newtides = m_200_newtides.hist.center_h1
period_class = m3_200.hist.period_days
period_posydon = m3_200_newtides.hist.period_days
star_1_radius_class = m3_200.hist.star_1_radius
star_1_radius_posydon = m3_200_newtides.hist.star_1_radius
star_2_radius_class = m3_200.hist.star_2_radius
star_2_radius_posydon = m3_200_newtides.hist.star_2_radius
J_orb_class = m3_200.hist.J_orb
J_orb_posydon = m3_200_newtides.hist.J_orb
J_spin2_class = m3_200.hist.J_spin_2
J_spin2_posydon = m3_200_newtides.hist.J_spin_2
J_spin1_class = m3_200.hist.J_spin_1
J_spin1_posydon = m3_200_newtides.hist.J_spin_1
star_1_mass_posydon = m3_200_newtides.hist.star_1_mass
star_2_mass_posydon = m3_200_newtides.hist.star_2_mass
surf_avg_omega_1_class = m_200.hist.surf_avg_omega
surf_avg_omega_1_pos = m_200_newtides.hist.surf_avg_omega
surf_avg_omega_2_class = m2_200.hist.surf_avg_omega
surf_avg_omega_2_pos = m2_200_newtides.hist.surf_avg_omega
star_age_pos = m2_200_newtides.hist.star_age
star_age_class = m2_200.hist.star_age
# p1.log_fold='LOGS1'
# p1.loadProfile(num=-1)
# p=mp.plot()
rl_relative_gap_1_posydon = m3_200_newtides.hist.rl_relative_overflow_1
rl_relative_gap_2_posydon = m3_200_newtides.hist.rl_relative_overflow_2
age_class = m3_200.hist.age
age_posydon = m3_200_newtides.hist.age
lg_t_sync_2_class = m3_200.hist.lg_t_sync_2
lg_t_sync_2_posydon = m3_200_newtides.hist.lg_t_sync_2
lg_t_sync_1_class = m3_200.hist.lg_t_sync_1
lg_t_sync_1_posydon = m3_200_newtides.hist.lg_t_sync_1
iRLOF_1_posydon = rl_relative_gap_1_posydon > 0
iRLOF_2_posydon = rl_relative_gap_2_posydon > 0
age_class_rlof = age_class[iRLOF_1]
age_posydon_rlof = age_posydon[iRLOF_1_posydon]
J_orb_class_rlof = J_orb_class[iRLOF_1]
J_orb_posydon_rlof = J_orb_posydon[iRLOF_1_posydon]
i_pre_RLOF_class = age_class < min(age_class[iRLOF_1])
i_pre_RLOF_pos = age_posydon < min(age_posydon[iRLOF_1_posydon])
i_post_RLOF_class = age_class > max(age_class[iRLOF_1])
i_post_RLOF_pos = age_posydon > max(age_posydon[iRLOF_1_posydon])
star_age_rlof_ind = find_nearest(star_age_class, min(age_class[iRLOF_1]))
star_age_rlof_ind_pos = find_nearest(star_age_pos, min(age_posydon[iRLOF_1_posydon]))
pp1_all_panel70 = PdfPages(paths.figures / 'p_q_70days.pdf')
fig = plt.figure(figsize=(10, 10))
plt.title('$\it{P}_\mathrm{ini}$ = 70 [days]', fontsize=30)
# plt.plot(star_2_mass/star_1_mass,period_class,color='k',linestyle='-',label='MESA default, $\gamma$ = 0',lw=2)
# plt.plot(star_2_mass[iRLOF_1]/star_1_mass[iRLOF_1],period_class[iRLOF_1], lw=7, c='k')
if any(iRLOF_2) == True:
indx1 = list(star_2_mass).index(min(star_2_mass[iRLOF_1]))
indx2 = list(star_2_mass).index(min(star_2_mass[iRLOF_2]))
print('(len(iRLOF_1) > 0):', indx1)
print('(len(iRLOF_2) > 0):', indx2)
plt.plot(star_2_mass[0:indx1] / star_1_mass[0:indx1], period_class[0:indx1], color='k', linestyle='-',
label='MESA default, $\it{\gamma}$ = 0', lw=2)
plt.plot(star_2_mass[indx1:indx2] / star_1_mass[indx1:indx2], period_class[indx1:indx2], lw=7, c='k')
plt.plot(star_2_mass[indx2] / star_1_mass[indx2], period_class[indx2], marker='o', c='k', mfc='k', ms=25)
else:
indx1 = list(star_2_mass).index(min(star_2_mass[iRLOF_1]))
print('(len(iRLOF_1) > 0):', indx1)
plt.plot(star_2_mass[0:indx1] / star_1_mass[0:indx1], period_class[0:indx1], color='k', linestyle='-',
label='MESA default, $\it{\gamma}$ = 0', lw=2)
plt.plot(star_2_mass[indx1:-1] / star_1_mass[indx1:-1], period_class[indx1:-1], lw=7, c='k')
plt.plot(star_2_mass[-1] / star_1_mass[-1], period_class[-1], marker='o', c='k', ms=25, mfc='None')
if any(iRLOF_2_g1_new) == True:
indx1 = list(star_2_mass_g1_new).index(min(star_2_mass_g1_new[iRLOF_1_g1_new]))
indx2 = list(star_2_mass_g1_new).index(min(star_2_mass_g1_new[iRLOF_2_g1_new]))
print('(len(iRLOF_1_g1_new) > 0):', indx1)
print('(len(iRLOF_2_g1_new) > 0):', indx2)
plt.plot(star_2_mass_g1_new[0:indx1] / star_1_mass_g1_new[0:indx1], period_class_g1_new[0:indx1], color='orange',
linestyle='-', label='MESA default, $\it{\gamma}$ = 1', lw=2)
plt.plot(star_2_mass_g1_new[indx1:indx2] / star_1_mass_g1_new[indx1:indx2], period_class_g1_new[indx1:indx2], lw=7,
c='orange')
plt.plot(star_2_mass_g1_new[indx2] / star_1_mass_g1_new[indx2], period_class_g1_new[indx2], marker='o', c='orange',
mfc='orange', ms=25)
else:
indx1 = list(star_2_mass_g1_new).index(min(star_2_mass_g1_new[iRLOF_1_g1_new]))
print('(len(iRLOF_1_g1_new) > 0):', indx1)
plt.plot(star_2_mass_g1_new[0:indx1] / star_1_mass_g1_new[0:indx1], period_class_g1_new[0:indx1], color='orange',
linestyle='-', label='MESA default, $\it{\gamma}$ = 1', lw=2)
plt.plot(star_2_mass_g1_new[indx1:-1] / star_1_mass_g1_new[indx1:-1], period_class_g1_new[indx1:-1], lw=7,
c='orange')
plt.plot(star_2_mass_g1_new[-2] / star_1_mass_g1_new[-2], period_class_g1_new[-2], marker='o', c='orange', ms=25,
mfc='None')
if any(iRLOF_2_g2_new) == True:
indx1 = list(star_2_mass_g2_new).index(min(star_2_mass_g2_new[iRLOF_1_g2_new]))
indx2 = list(star_2_mass_g2_new).index(min(star_2_mass_g2_new[iRLOF_2_g2_new]))
print('(len(iRLOF_1_g2_new) > 0):', indx1)
print('(len(iRLOF_2_g2_new) > 0):', indx2)
plt.plot(star_2_mass_g2_new[0:indx1] / star_1_mass_g2_new[0:indx1], period_class_g2_new[0:indx1], color='blue',
linestyle='-', label='MESA default, $\it{\gamma}$ = 2', lw=2)
plt.plot(star_2_mass_g2_new[indx1:indx2] / star_1_mass_g2_new[indx1:indx2], period_class_g2_new[indx1:indx2], lw=7,
c='blue')
plt.plot(star_2_mass_g2_new[indx2] / star_1_mass_g2_new[indx2], period_class_g2_new[indx2], marker='o', c='blue',
mfc='blue', ms=25)
else:
indx1 = list(star_2_mass_g2_new).index(min(star_2_mass_g2_new[iRLOF_1_g2_new]))
print('(len(iRLOF_1_g2_new) > 0):', indx1)
plt.plot(star_2_mass_g2_new[0:indx1] / star_1_mass_g2_new[0:indx1], period_class_g2_new[0:indx1], color='blue',
linestyle='-', label='MESA default, $\it{\gamma}$ = 2', lw=2)
plt.plot(star_2_mass_g2_new[indx1:-1] / star_1_mass_g2_new[indx1:-1], period_class_g2_new[indx1:-1], lw=7, c='blue')
plt.plot(star_2_mass_g2_new[-1] / star_1_mass_g2_new[-1], period_class_g2_new[-1], marker='o', c='blue', ms=25,
mfc='None')
if any(iRLOF_2_g10_new) == True:
indx1 = list(star_2_mass_g10_new).index(min(star_2_mass_g10_new[iRLOF_1_g10_new]))
indx2 = list(star_2_mass_g10_new).index(min(star_2_mass_g10_new[iRLOF_2_g10_new]))
print('(len(iRLOF_1_g10_new) > 0):', indx1)
print('(len(iRLOF_2_g10_new) > 0):', indx2)
plt.plot(star_2_mass_g10_new[0:indx1] / star_1_mass_g10_new[0:indx1], period_class_g10_new[0:indx1], color='red',
linestyle='-', label='MESA default, $\it{\gamma}$ = 10', lw=2)
plt.plot(star_2_mass_g10_new[indx1:indx2] / star_1_mass_g10_new[indx1:indx2], period_class_g10_new[indx1:indx2],
lw=7, c='red')
plt.plot(star_2_mass_g10_new[indx2] / star_1_mass[indx2], period_class_g10_new[indx2], marker='o', c='red',
mfc='red', ms=25)
else:
indx1 = list(star_2_mass_g10_new).index(min(star_2_mass_g10_new[iRLOF_1_g10_new]))
print('(len(iRLOF_1_g10_new) > 0):', indx1)
plt.plot(star_2_mass_g10_new[0:indx1] / star_1_mass_g10_new[0:indx1], period_class_g10_new[0:indx1], color='red',
linestyle='-', label='MESA default, $\it{\gamma}$ = 10', lw=2)
plt.plot(star_2_mass_g10_new[indx1:-1] / star_1_mass_g10_new[indx1:-1], period_class_g10_new[indx1:-1], lw=7,
c='red')
plt.plot(star_2_mass_g10_new[-1] / star_1_mass_g10_new[-1], period_class_g10_new[-1], marker='o', c='red', ms=25,
mfc='None')
if any(iRLOF_2_posydon) == True:
indx1 = list(star_2_mass_posydon).index(min(star_2_mass_posydon[iRLOF_1_posydon]))
indx2 = list(star_2_mass_posydon).index(min(star_2_mass_posydon[iRLOF_2_posydon]))
print('(len(iRLOF_1_posydon) > 0):', indx1)
print('(len(iRLOF_2_posydon) > 0):', indx2)
plt.plot(star_2_mass_posydon[0:indx1] / star_1_mass_posydon[0:indx1], period_posydon[0:indx1], color='green',
linestyle='-', label='POSYDON, $\it{\gamma}$ = 0', lw=2)
plt.plot(star_2_mass_posydon[indx1:indx2] / star_1_mass_posydon[indx1:indx2], period_posydon[indx1:indx2], lw=7,
c='green')
plt.plot(star_2_mass_posydon[indx2] / star_1_mass_posydon[indx2], period_posydon[indx2], marker='o', c='green',
mfc='green', ms=25)
else:
indx1 = list(star_2_mass_posydon).index(min(star_2_mass_posydon[iRLOF_1_posydon]))
print('(len(iRLOF_1_posydon) > 0):', indx1)
plt.plot(star_2_mass_posydon[0:indx1] / star_1_mass_posydon[0:indx1], period_posydon[0:indx1], color='green',
linestyle='-', label='POSYDON, $\it{\gamma}$ = 0', lw=2)
plt.plot(star_2_mass_posydon[indx1:-1] / star_1_mass_posydon[indx1:-1], period_posydon[indx1:-1], lw=7, c='green')
plt.plot(star_2_mass_posydon[-1] / star_1_mass_posydon[-1], period_posydon[-1], marker='o', c='green', ms=25,
mfc='None')
# plt.xticks(fontsize=20)
# plt.yticks(fontsize=20)
# plt.xlim([5e6,7.5e6])
# plt.ylim([0.9,1.4]) # 1.25 days
# plt.ylim([1.9,7.5]) # 5 days
# plt.ylim([1,5]) # 3 days
# plt.ylim([0,17]) # 10 days
plt.ylim([0, 125]) # 70 days
# plt.ylim([0,90]) # 50 days
plt.xlabel('$\it{Q}$', fontsize=25)
plt.ylabel('Period [days]', fontsize=25)
plt.legend(loc=2, fontsize=20)
##plt.xlim([0.3,1.8])
###plt.xlim([0.3,1.5])
# plt.xlim([0.64,0.72])
# plt.xlim([0.64,0.72]) # 1.25 days
# plt.xlim([0.25,1.8]) # 5 days
# plt.xlim([0.3,2]) # 3 days
plt.xlim([0.3, 1.8]) # 10 days
# plt.xlim([0.3,1.8]) # 70 days
plt.savefig(pp1_all_panel70, format='pdf')
pp1_all_panel70.close()
|
NikolayBritavskiyAstroREPO_NAMEfast_rotating_binariesPATH_START.@fast_rotating_binaries_extracted@fast_rotating_binaries-main@src@scripts@plot_save_mesa_individual_fin1p25.py@.PATH_END.py
|
{
"filename": "parse_maestro_params.py",
"repo_name": "AMReX-Astro/MAESTROeX",
"repo_path": "MAESTROeX_extracted/MAESTROeX-main/Source/param/parse_maestro_params.py",
"type": "Python"
}
|
#!/usr/bin/env python3
"""This script parses the list of C++ runtime parameters and writes
the necessary header and source files to make them available in
Maestro's C++ routines.
parameters have the format:
name type default ifdef
the first three (name, type, default) are mandatory:
name: the name of the parameter. This will be the same name as the
variable in C++ unless a pair is specified as (name, cpp_name)
type: the C++ data type (int, Real, bool, string)
default: the default value. If specified as a pair, (a, b), then
the first value is the normal default and the second is for
debug mode (#ifdef AMREX_DEBUG)
the next are optional:
ifdef: only define this parameter if the name provided is #ifdef-ed
Any line beginning with a "#" is ignored
Commands begin with a "@":
@namespace: sets the namespace that these will be under (see below)
e.g. @namespace maestro
Note: categories listed in the input file aren't used for code generation
but are used for the documentation generation
For a namespace, name, we write out:
-- name_params.H (for maestro, included in Maestro.H):
sets up the namespace and extern parameters
-- name_queries.H (for maestro, included in Maestro.cpp):
does the parmparse query to override the default in C++
-- name_job_info_tests.H
this tests the current value against the default and outputs
into a file
-- runtime_params.cpp
has the actual definition of the variables (without extern)
"""
import argparse
import re
import sys
import runtime_parameters as rp
CWARNING = """
// This file is automatically created by parse_maestro_params.py at build time.
// To update or add runtime parameters, please edit _cpp_parameters and rebuild.\n
"""
def read_param_file(infile):
params = []
namespace = None
try:
f = open(infile, encoding="UTF-8")
except OSError:
sys.exit("error opening the input file")
for line in f:
if line[0] == "#":
continue
if line.strip() == "":
continue
if line[0] == "@":
# this is a command
cmd, value = line.split(":")
if cmd == "@namespace":
fields = value.split()
namespace = fields[0]
else:
sys.exit("invalid command")
continue
# this splits the line into separate fields. A field is a
# single word or a pair in parentheses like "(a, b)"
fields = re.findall(r'".+"|[\w\"\+\.-]+|\([\w+\.-]+\s*,\s*[\w\+\.-]+\)', line)
name = fields[0]
if name[0] == "(":
name, cpp_var_name = re.findall(r"\w+", name)
else:
cpp_var_name = name
dtype = fields[1].lower()
default = fields[2]
if default[0] == "(":
default, debug_default = re.findall(r"\w+", default)
else:
debug_default = None
try:
ifdef = fields[3]
except IndexError:
ifdef = None
if namespace is None:
sys.exit("namespace not set")
params.append(rp.Param(name, dtype, default,
cpp_var_name=cpp_var_name,
namespace=namespace,
debug_default=debug_default,
ifdef=ifdef))
return params
def write_headers_and_source(params, out_directory):
# output
# find all the namespaces
namespaces = sorted({q.namespace for q in params})
for nm in namespaces:
params_nm = [q for q in params if q.namespace == nm]
# sort by repr since None may be present
ifdefs = sorted({q.ifdef for q in params_nm}, key=repr)
# write name_params.H
try:
cp = open(f"{out_directory}/{nm}_params.H", "w", encoding="UTF-8")
except OSError:
sys.exit(f"unable to open {nm}_params.H for writing")
cp.write(CWARNING)
cp.write(f"#ifndef {nm.upper()}_PARAMS_H\n")
cp.write(f"#define {nm.upper()}_PARAMS_H\n")
cp.write("\n")
cp.write(f"namespace {nm} {{\n")
for ifdef in ifdefs:
if ifdef is None:
for p in [q for q in params_nm if q.ifdef is None]:
cp.write(p.get_declare_string(with_extern=True))
else:
cp.write(f"#ifdef {ifdef}\n")
for p in [q for q in params_nm if q.ifdef == ifdef]:
cp.write(p.get_declare_string(with_extern=True))
cp.write("#endif\n")
cp.write("}\n\n")
cp.write("#endif\n")
cp.close()
# write name_queries.H
try:
cq = open(f"{out_directory}/{nm}_queries.H", "w", encoding="UTF-8")
except OSError:
sys.exit(f"unable to open {nm}_queries.H for writing")
cq.write(CWARNING)
for ifdef in ifdefs:
if ifdef is None:
for p in [q for q in params_nm if q.ifdef is None]:
cq.write(p.get_default_string())
cq.write(p.get_query_string())
cq.write("\n")
else:
cq.write(f"#ifdef {ifdef}\n")
for p in [q for q in params_nm if q.ifdef == ifdef]:
cq.write(p.get_default_string())
cq.write(p.get_query_string())
cq.write("\n")
cq.write("#endif\n")
cq.write("\n")
cq.close()
# write the job info tests
try:
jo = open(f"{out_directory}/{nm}_job_info_tests.H", "w", encoding="UTF-8")
except OSError:
sys.exit(f"unable to open {nm}_job_info_tests.H")
for ifdef in ifdefs:
if ifdef is None:
for p in [q for q in params_nm if q.ifdef is None]:
jo.write(p.get_job_info_test())
else:
jo.write(f"#ifdef {ifdef}\n")
for p in [q for q in params_nm if q.ifdef == ifdef]:
jo.write(p.get_job_info_test())
jo.write("#endif\n")
jo.close()
# write a single C++ source file that actually defines the parameters
# (one file for all namespaces)
try:
pf = open(f"{out_directory}/runtime_params.cpp", "w", encoding="UTF-8")
except OSError:
sys.exit(f"unable to open runtime_params.cpp")
pf.write("#include <AMReX_REAL.H>\n")
pf.write("#include <AMReX_Gpu.H>\n")
for nm in namespaces:
pf.write(f"#include <{nm}_params.H>\n")
pf.write("\n")
for nm in namespaces:
params_nm = [q for q in params if q.namespace == nm]
# sort by repr since None may be present
ifdefs = sorted({q.ifdef for q in params_nm}, key=repr)
pf.write(f"namespace {nm} {{\n")
for ifdef in ifdefs:
if ifdef is None:
for p in [q for q in params_nm if q.ifdef is None]:
pf.write(p.get_declare_string())
else:
pf.write(f"#ifdef {ifdef}\n")
for p in [q for q in params_nm if q.ifdef == ifdef]:
pf.write(p.get_declare_string())
pf.write("#endif\n")
pf.write("}\n\n")
pf.close()
def main():
"""the main driver"""
parser = argparse.ArgumentParser()
parser.add_argument("-o", type=str, default=None,
help="output directory for the generated files")
parser.add_argument("input_file", type=str, nargs=1,
help="input file containing the list of parameters we will define")
args = parser.parse_args()
p = read_param_file(args.input_file[0])
write_headers_and_source(p, args.o)
if __name__ == "__main__":
main()
|
AMReX-AstroREPO_NAMEMAESTROeXPATH_START.@MAESTROeX_extracted@MAESTROeX-main@Source@param@parse_maestro_params.py@.PATH_END.py
|
{
"filename": "hash_test.py",
"repo_name": "vaexio/vaex",
"repo_path": "vaex_extracted/vaex-master/tests/internal/hash_test.py",
"type": "Python"
}
|
from vaex.superutils import *
import vaex.strings
from vaex.utils import dropnan
import numpy as np
import sys
import pytest
import pyarrow as pa
@pytest.mark.parametrize('counter_cls', [counter_string]) #, counter_stringview])
def test_counter_string(counter_cls):
strings = vaex.strings.array(['aap', 'noot', 'mies'])
counter = counter_cls(1)
counter.update(strings)
counts = counter.extract()[0]
assert counts["aap"] == 1
assert counts["noot"] == 1
assert counts["mies"] == 1
strings2 = vaex.strings.array(['aap', 'n00t'])
counter.update(strings2)
counts = counter.extract()[0]
assert counts["aap"] == 2
assert counts["noot"] == 1
assert counts["n00t"] == 1
assert counts["mies"] == 1
# test merge
strings1 = vaex.strings.array(['aap', 'noot', 'mies'])
counter1 = counter_cls(1)
counter1.update(strings1)
strings2 = vaex.strings.array(['kees', None])
counter2 = counter_cls(1)
counter2.update(strings2)
counter1.merge(counter2)
assert set(counter1.key_array().tolist()) == {'aap', 'noot', 'mies', 'kees', None}
assert counter1.counts().tolist() == [1, 1, 1, 1, 1]
def test_counter_string_nulls_issue():
first = [None, 'b', 'c']
strings = vaex.strings.array(first)
counter = counter_string(1)
counter.update(strings)
more = ['d', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o']
strings2 = vaex.strings.array(more)
# the second update would nut grow the null array correctly
counter.update(strings2)
assert set(counter.key_array().tolist()) == set(first + more)
def test_set_string():
set = ordered_set_string(1)
strings = ['aap', 'noot', 'mies']
strings_array = vaex.strings.array(strings)
set.update(strings_array)
set.seal()
assert set.keys() == strings
keys = set.key_array()
# col = vaex.column.ColumnStringArrow.from_string_sequence(keys)
# keys = pa.array(col)
keys = pa.array(keys.to_numpy())
assert keys.tolist() == strings
def test_set_bool():
bset = ordered_set_bool(4)
ar = np.array([True, True, False, False, True])
chunk_size = 1024**2
bset.update(ar, -1, chunk_size=chunk_size, bucket_size=chunk_size*4)
keys = bset.key_array()
assert len(keys) == 2
assert set(keys.tolist()) == {True, False}
@pytest.mark.parametrize("nan", [False, True])
@pytest.mark.parametrize("missing", [False, True])
@pytest.mark.parametrize("nmaps", [1, 2, 3])
def test_set_float(repickle, nan, missing, nmaps):
ar = np.arange(4, dtype='f8')[::-1].copy()
keys_expected = [3, 2, 1, 0]
null_index = 2
if missing:
mask = [0, 0, 1, 0]
keys_expected[null_index] = None
if nan:
ar[1] = np.nan
keys_expected[1] = np.nan
oset = ordered_set_float64(nmaps)
if missing:
ordinals_local, map_index = oset.update(ar, mask, return_values=True)
else:
ordinals_local, map_index = oset.update(ar, return_values=True)
ordinals = np.empty(len(keys_expected), dtype='i8')
ordinals = oset.flatten_values(ordinals_local, map_index, ordinals)
keys = oset.keys()
# if missing:
# ordinals[oset.null_index] = oset.null_index
assert dropnan(np.take(keys, ordinals).tolist()) == dropnan(keys_expected)
# plain object keys
oset.seal()
keys = oset.keys()
expect_nan = 1 if nan else None
assert dropnan(set(keys), expect=expect_nan) == dropnan(set(keys_expected), expect=expect_nan)
assert oset.map_ordinal(keys).dtype.name == 'int8'
# arrays
keys = oset.key_array().tolist()
if missing:
keys[oset.null_index] = None
assert dropnan(set(keys), expect=expect_nan) == dropnan(set(keys_expected), expect=expect_nan)
if nan:
assert np.isnan(keys[oset.nan_index])
ordinals = oset.map_ordinal(keys).tolist()
if missing:
ordinals[oset.null_index] = oset.null_index
assert ordinals == list(range(4))
# tests extraction and constructor
keys = oset.key_array()
set_copy = ordered_set_float64(keys, oset.null_index, oset.nan_count, oset.null_count, '')
keys = set_copy.key_array().tolist()
if missing:
keys[oset.null_index] = None
assert dropnan(set(keys)) == dropnan(set(keys_expected))
if nan:
assert np.isnan(keys[set_copy.nan_index])
ordinals = set_copy.map_ordinal(keys).tolist()
if missing:
ordinals[set_copy.null_index] = set_copy.null_index
assert ordinals == list(range(4))
# test pickle
set_copy = repickle(oset)
keys = set_copy.key_array().tolist()
if missing:
keys[oset.null_index] = None
assert dropnan(set(keys)) == dropnan(set(keys_expected))
if nan:
assert np.isnan(keys[set_copy.nan_index])
ordinals = set_copy.map_ordinal(keys).tolist()
if missing:
ordinals[set_copy.null_index] = set_copy.null_index
assert ordinals == list(range(4))
@pytest.mark.parametrize("missing", [False, True])
@pytest.mark.parametrize("nmaps", [1, 2, 3])
def test_set_string(repickle, missing, nmaps):
ar = ["aap", "noot", "mies", "teun"]
keys_expected = ar
null_index = 1
if missing:
ar[null_index] = None
keys_expected[null_index] = None
keys_expected = ar
arlist = ar
ar = vaex.strings.array(ar)
oset = ordered_set_string(nmaps)
ordinals_local, map_index = oset.update(ar, return_values=True)
ordinals = np.empty(len(keys_expected), dtype='i8')
ordinals = oset.flatten_values(ordinals_local, map_index, ordinals)
keys = oset.key_array()
assert keys.to_numpy()[ordinals].tolist() == keys_expected
# plain object keys
oset.seal()
keys = oset.keys()
assert set(keys) == set(keys_expected)
# return
assert oset.map_ordinal(vaex.strings.array(keys)).dtype.name == 'int8'
# return
# arrays
keys = oset.key_array()
assert set(keys.tolist()) == set(keys_expected)
ordinals = oset.map_ordinal(keys).tolist()
if missing:
ordinals[oset.null_index] = oset.null_index
assert ordinals == list(range(4))
# tests extraction and constructor
keys = oset.key_array()
set_copy = ordered_set_string(keys, oset.null_index, oset.nan_count, oset.null_count, '')
keys = set_copy.key_array()
assert set(keys.tolist()) == set(keys_expected)
ordinals = set_copy.map_ordinal(keys).tolist()
if missing:
ordinals[set_copy.null_index] = set_copy.null_index
assert ordinals == list(range(4))
# test pickle
set_copy = repickle(oset)
keys = set_copy.key_array()
assert set(keys.tolist()) == set(keys_expected)
ordinals = set_copy.map_ordinal(keys).tolist()
if missing:
ordinals[set_copy.null_index] = set_copy.null_index
assert ordinals == list(range(4))
ar1 = vaex.strings.array(arlist[:2])
ar2 = vaex.strings.array(arlist[2:])
oset1 = ordered_set_string(nmaps)
oset1.update(ar1)
oset2 = ordered_set_string(nmaps)
oset2.update(ar2)
oset1.merge([oset2])
assert set(oset1.keys()) == set(keys_expected)
assert set(oset1.key_array().tolist()) == set(keys_expected)
ar1 = vaex.strings.array(arlist[:1])
ar2 = vaex.strings.array(arlist[1:])
oset1 = ordered_set_string(nmaps)
oset1.update(ar1)
oset2 = ordered_set_string(nmaps)
oset2.update(ar2)
oset1.merge([oset2])
assert set(oset1.keys()) == set(keys_expected)
assert set(oset1.key_array().tolist()) == set(keys_expected)
def test_counter_float64(repickle):
ar = np.arange(3, dtype='f8')
counter = counter_float64(1)
counter.update(ar)
counts = counter.extract()[0]
assert set(counts.keys()) == {0, 1, 2}
assert set(counts.values()) == {1, 1, 1}
counter.update([np.nan, 0])
counts = counter.extract()[0]
assert counter.nan_count == 1
assert counts[0] == 2
counter.update([np.nan, 0])
counts = counter.extract()[0]
assert counter.nan_count == 2
assert counts[0] == 3
counter2 = counter_float64(1)
counter2.update([np.nan, 0, 10])
assert counter2.nan_count == 1
counter.merge(counter2)
counts = counter.extract()[0]
assert set(counts.keys()) == {0, 1, 2, 10}
assert counter.nan_count == 3
assert counts[0] == 4
assert counts[10] == 1
assert counts[1] == 1
def test_ordered_set_object():
s = str("hi there!!")
ar = np.array([0, 1.5, s, None, s], dtype='O')
oset = ordered_set_object(-1)
oset.update(ar)
keys = np.array(oset.keys())
assert set(oset.map_ordinal(keys)) == set(list(range(len(keys))))
ar2 = np.array([np.nan, None, s], dtype='O')
oset.update(ar2)
keys = np.array(oset.keys())
assert set(oset.map_ordinal(keys)) == set(list(range(1, 1 + len(keys))))
def test_counter_object():
s = str("hi there!!")
s2 = str("hi there!!2")
start_ref_count = sys.getrefcount(s)
start_ref_count2 = sys.getrefcount(s2)
counter = counter_object(-1)
ar = np.array([0, 1.5, s, None, s], dtype='O')
assert sys.getrefcount(s) == start_ref_count+2
counter.update(ar)
assert sys.getrefcount(s) == start_ref_count+3
counts = counter.extract()
assert sys.getrefcount(s) == start_ref_count+4, 'stored in the dics'
assert set(counts.keys()) == {0, 1.5, s, None}
assert counts[0] == 1
assert counts[1.5] == 1
assert counts[s] == 2
assert counts[None] == 1
del counts
assert sys.getrefcount(s) == start_ref_count+3, 'released from the dict'
assert sys.getrefcount(s) == start_ref_count+3
counter.update(np.array([np.nan, None, s], dtype='O'))
assert sys.getrefcount(s) == start_ref_count+3
counts = counter.extract()
assert counter.nan_count == 1
assert counts[0] == 1
assert counts[None] == 2
assert counts[s] == 3
counter.update(np.array([np.nan, 0], dtype='O'))
counts = counter.extract()
assert counter.nan_count == 2
assert counts[0] == 2
counter2 = counter_object(-1)
ar2 = np.array([np.nan, np.nan, 0, 10, s, s2], dtype='O')
assert sys.getrefcount(s) == start_ref_count+5
assert sys.getrefcount(s2) == start_ref_count2+1
counter2.update(ar2)
assert sys.getrefcount(s2) == start_ref_count2+2
assert sys.getrefcount(s) == start_ref_count+6
assert counter2.nan_count == 2
counter.merge(counter2)
assert sys.getrefcount(s2) == start_ref_count2+3
assert sys.getrefcount(s) == start_ref_count+6
del counter2
assert sys.getrefcount(s2) == start_ref_count2+2
assert sys.getrefcount(s) == start_ref_count+5
del ar2
assert sys.getrefcount(s2) == start_ref_count2+1
assert sys.getrefcount(s) == start_ref_count+4
counts = counter.extract()
assert set(counts.keys()) == {0, 1.5, s, s2, None, 10}
assert counter.nan_count == 4
assert counts[0] == 3
assert counts[10] == 1
del ar
assert sys.getrefcount(s) == start_ref_count+2
del counter
assert sys.getrefcount(s) == start_ref_count+1
del counts
assert sys.getrefcount(s) == start_ref_count
assert sys.getrefcount(s2) == start_ref_count2
def test_index():
ar1 = np.arange(3, dtype='f8')
ar2 = np.arange(10, 13, dtype='f8')
ar = np.concatenate([ar1, ar2])
index = index_hash_float64(1)
index.update(ar1, 0)
assert index.map_index(ar1).tolist() == [0, 1, 2]
index.update(ar2, 3)
assert index.map_index(ar).tolist() == [0, 1, 2, 3, 4, 5]
def test_index_non_existing_primitive():
ar1 = np.arange(3, dtype="f8")
ar2 = np.arange(1, 4, dtype="f8")
index = index_hash_float64(1)
index.update(ar1, 0)
assert index.map_index(ar1).tolist() == [0, 1, 2]
assert index.map_index(ar2).tolist() == [1, 2, -1]
# test when nan is not missing (which triggers a special case comparison)
ar2[-1] = np.nan
assert not index.has_nan
assert index.map_index(ar2).tolist() == [1, 2, -1]
def test_index_non_existing_string():
ar1 = vaex.strings.array(["aap", "noot", "mies"])
index = index_hash_string(1)
ar2 = vaex.strings.array(["noot", "mies", None])
index.update(ar1, 0)
assert index.map_index(ar1).tolist() == [0, 1, 2]
assert index.map_index(ar2).tolist() == [1, 2, -1]
@pytest.mark.parametrize("nmaps", [1, 2, 3])
def test_index_multi(nmaps):
strings = vaex.strings.array(['aap', 'noot', 'mies'])
index = index_hash_string(nmaps)
index.update(strings, 0)
assert index.map_index(strings).tolist() == [0, 1, 2]
assert [k.tolist() for k in index.map_index_duplicates(strings, 0)] == [[], []]
assert index.has_duplicates is False
assert len(index) == 3
# duplicate that is already present
strings2 = vaex.strings.array(['aap', 'aap', 'kees', 'mies'])
index.update(strings2, 3)
assert index.map_index(strings2).tolist() == [0, 0, 5, 2]
assert [k.tolist() for k in index.map_index_duplicates(strings2, 3)] == [[3, 3, 4, 4, 6], [3, 4, 3, 4, 6]]
assert index.has_duplicates is True
assert index.extract() == {'noot': 1, 'aap': [0, 3, 4], 'mies': [2, 6], 'kees': 5}
assert len(index) == 7
# duplicate that is not present, and a single one that is already in index
strings3 = vaex.strings.array(['foo', 'foo', 'mies'])
index.update(strings3, 7)
assert index.map_index(strings3).tolist() == [7, 7, 2]
assert [k.tolist() for k in index.map_index_duplicates(strings3, 7)] == [[7, 8, 9, 9], [8, 8, 6, 9]]
assert index.has_duplicates is True
assert len(index) == 10
# same, but now use merge
strings = vaex.strings.array(['aap', 'noot', 'mies'])
index = index_hash_string(nmaps)
index.update(strings, 0)
assert index.map_index(strings).tolist() == [0, 1, 2]
assert [k.tolist() for k in index.map_index_duplicates(strings, 0)] == [[], []]
assert index.has_duplicates is False
assert len(index) == 3
strings2 = vaex.strings.array(['aap', 'aap', 'kees', 'mies'])
index2 = index_hash_string(nmaps)
index2.update(strings2, 3)
index.merge(index2)
assert index.map_index(strings2).tolist() == [0, 0, 5, 2]
assert [k.tolist() for k in index.map_index_duplicates(strings2, 3)] == [[3, 3, 4, 4, 6], [3, 4, 3, 4, 6]]
assert index.has_duplicates is True
assert len(index) == 7
strings3 = vaex.strings.array(['foo', 'foo', 'mies'])
index3 = index_hash_string(nmaps)
index3.update(strings3, 7)
index.merge(index3)
assert index.map_index(strings3).tolist() == [7, 7, 2]
assert [k.tolist() for k in index.map_index_duplicates(strings3, 7)] == [[7, 8, 9, 9], [8, 8, 6, 9]]
assert index.has_duplicates is True
assert len(index) == 10
@pytest.mark.parametrize("nmaps", [1, 2, 3])
def test_index_multi_float64(nmaps):
floats = np.array([1.0, 2.0, 3.0])
index = index_hash_float64(nmaps)
index.update(floats, 0)
assert index.map_index(floats).tolist() == [0, 1, 2]
assert [k.tolist() for k in index.map_index_duplicates(floats, 0)] == [[], []]
assert index.has_duplicates is False
assert len(index) == 3
# duplicate that is already present
floats2 = np.array([1.0, 1.0, 10.0, 3.0])
index.update(floats2, 3)
assert index.map_index(floats2).tolist() == [0, 0, 5, 2]
assert [k.tolist() for k in index.map_index_duplicates(floats2, 3)] == [[3, 3, 4, 4, 6], [3, 4, 3, 4, 6]]
assert index.has_duplicates is True
assert len(index) == 7
# duplicate that is not present, and a single one that is already in index
floats3 = np.array([99.9, 99.9, 3.0])
index.update(floats3, 7)
assert index.map_index(floats3).tolist() == [7, 7, 2]
assert [k.tolist() for k in index.map_index_duplicates(floats3, 7)] == [[7, 8, 9, 9], [8, 8, 6, 9]]
assert index.has_duplicates is True
assert len(index) == 10
# same, but now use merge
floats = np.array([1.0, 2.0, 3.0])
index = index_hash_float64(nmaps)
index.update(floats, 0)
assert index.map_index(floats).tolist() == [0, 1, 2]
assert [k.tolist() for k in index.map_index_duplicates(floats, 0)] == [[], []]
assert index.has_duplicates is False
assert len(index) == 3
floats2 = np.array([1.0, 1.0, 10.0, 3.0])
index2 = index_hash_float64(nmaps)
index2.update(floats2, 3)
index.merge(index2)
assert index.map_index(floats2).tolist() == [0, 0, 5, 2]
assert [k.tolist() for k in index.map_index_duplicates(floats2, 3)] == [[3, 3, 4, 4, 6], [3, 4, 3, 4, 6]]
assert index.has_duplicates is True
assert len(index) == 7
floats3 = np.array([99.9, 99.9, 3.0])
index3 = index_hash_float64(nmaps)
index3.update(floats3, 7)
index.merge(index3)
assert index.map_index(floats3).tolist() == [7, 7, 2]
assert [k.tolist() for k in index.map_index_duplicates(floats3, 7)] == [[7, 8, 9, 9], [8, 8, 6, 9]]
assert index.has_duplicates is True
assert len(index) == 10
@pytest.mark.parametrize("nmaps", [1, 2, 3])
def test_index_write(nmaps):
ints = np.array([1, 2, 3], dtype=np.int32)
index = index_hash_int32(nmaps)
index.update(ints, 0)
assert index.map_index(ints).tolist() == [0, 1, 2]
indices = np.full(3, -1, dtype=np.int32)
index.map_index(ints, indices)
assert indices.tolist() == [0, 1, 2]
indices = np.full(3, -1, dtype=np.int32)
mask = np.zeros(3, dtype=bool)
index.map_index_masked(ints, mask, indices)
assert indices.tolist() == [0, 1, 2]
def test_set_max_unique(buffer_size):
df = vaex.from_arrays(x=np.arange(1000))
with buffer_size(df):
with pytest.raises(vaex.RowLimitException, match='.* >= 2 .*'):
df._set('x', limit=2)
# TODO: this does not happen any more if we have a single set/hashmap
# with pytest.raises(vaex.RowLimitException, match='.*larger than.*'):
# df._set('x', unique_limit=len(df)-1)
@pytest.mark.parametrize("nmaps", [1])#, 2, 3])
def test_string_refs(nmaps):
strings = vaex.strings.array(['aap', 'noot', 'mies'])
oset = ordered_set_string(nmaps)
oset.update(strings, 0)
strings = oset.key_array()
refs = sys.getrefcount(strings)
assert refs == 2
assert set(strings.tolist()) == {'aap', 'noot', 'mies'}
set_copy = ordered_set_string(strings, 0, 0, 0, 'fingerprint')
assert sys.getrefcount(strings) == refs + 1
strings_copy = oset.key_array()
# assert sys.getrefcount(strings) == 2
assert set(strings_copy.tolist()) == {'aap', 'noot', 'mies'}
# assert index.map_index(strings).tolist() == [0, 1, 2]
# assert [k.tolist() for k in index.map_index_duplicates(strings, 0)] == [[], []]
# assert index.has_duplicates is False
# assert len(index) == 3
|
vaexioREPO_NAMEvaexPATH_START.@vaex_extracted@vaex-master@tests@internal@hash_test.py@.PATH_END.py
|
{
"filename": "writer.py",
"repo_name": "jan-rybizki/Chempy",
"repo_path": "Chempy_extracted/Chempy-master/Chempy/input/yields/West17/fortranfile/writer.py",
"type": "Python"
}
|
"""
Classes for writing UNIX unformatted FORTRAN files.
"""
# TODO
# * for general reading, load needs to be able to specify record size?
# * fortranfile needs "backspace" and "truncate" functions.
import os
import sys
import gzip
import bz2
import lzma
import numpy as np
from .utils import prod
from .errors import RecordBeginningError, WriteError
from .common import _np_types, _set_method
from .defaults import FortranSpecs
#=======================================================================
# WRITER
#=======================================================================
class DataOutputBuffer(object):
"""
Interface for writing buffered output data
"""
default_byte_order = ">"
sys_is_le = sys.byteorder == "little"
native_byteorder = "<" if sys_is_le else ">"
initial_buf_size = 2**24
buf_grow_factor = 2
buf_grow_limit = 2**28
# as of 2011, even my best solid state drive will take 0.5 s to write that much
def __init__(self,
byteorder = default_byte_order,
**kwargs):
self._set_byteorder(byteorder = byteorder)
self._init()
def _init(self):
self.pos = 0
self.buffer = bytearray(self.initial_buf_size)
self.buf_size = self.initial_buf_size
def _set_byteorder(self,
byteorder = default_byte_order):
"""
set up all data types for deserved byte order
"""
if byteorder == "=":
byteorder = self.native_byteorder
self.swapbyteorder = byteorder != self.native_byteorder
self.byteorder = byteorder
def bor(self):
"""Return whether position is beginning of record."""
return self.pos == 0
def assert_bor(self):
"""
Throw exception if current position is not beginnig of record.
This can be used to deterime whether all previous data has been written,
i.e., as a consistency check of previous writes.
"""
if not self.bor():
raise RecordBeginnigError(self.pos)
def _extend_buf(self):
"""
Grow write buffer as specified.
"""
self.buf_size += min(self.buf_size, self.buf_grow_limit)
new_buffer = bytearray(self.buf_size)
new_buffer[0:self.pos] = self.buffer[0:self.pos]
del self.buffer
self.buffer = new_buffer
def _check_buf_size(self, size, offset = None):
if offset is not None:
if offset <= 0:
p = size - offset
else:
p = self.pos + size + offset
else:
p = self.pos + size
while p > self.buf_size:
self._extend_buf()
def skip_bytes(self, nbytes, fill = None):
"""Skip a number of empty bytes, optionally initializing with `fill`."""
self._check_buf_size(nbytes)
if fill is not None:
if isinstance(fill, bytes):
self.buffer[self.pos:self.pos+nbytes] = (fill * (nbytes // len(fill) + 1))[0:nbytes]
else:
self.buffer[self.pos:self.pos+nbytes] = bytes(nbytes)
self.pos += nbytes
def put_data(self, data):
"""
Just put all the data into record.
"""
self.assert_bor()
size = len(data)
self._check_buf_size(size)
self.buffer[0:size] = data
self.pos = size
def put_n(self, data, dtype = None, order = 'F', offset = None):
"""
Write numpy object to buffer
KEYWORDS:
order - output order of array
`None` - use data default
default is `F`
dtype - output data type
"""
if not isinstance(data, np.ndarray):
data = np.array(data)
if dtype is None:
dtype = data.dtype
else:
dtype = np.dtype(dtype)
if order is None:
if data.flags.fnc:
order = 'F'
else:
order = 'C'
assert order in ('F', 'C')
new_data = np.ndarray(data.shape,
dtype = dtype,
order = order)
new_data[()] = data[()]
data = new_data
if not data.flags.c_contiguous:
data = np.ndarray(data.shape,
dtype = data.dtype,
buffer = data.data,
order = 'C')
if self.swapbyteorder:
data.byteswap(True)
nbytes = data.nbytes
self._check_buf_size(nbytes, offset)
if offset is not None:
if offset > 0:
p = self.pos + offset
else:
p = -offset
else:
p = self.pos
self.buffer[p:p+nbytes] = data.data.tobytes()
if offset is None:
self.pos += nbytes
def put_1d(self, data, dtype=np.float64, lead=0, tail=0, offset = None):
"""Write a 1D np array padded with 0 on either side as specified. Do not write padding.
"""
if not isinstance(data, np.ndarray):
data = np.array(data)
if dtype is None:
dtype = data.dtype
else:
dtype = np.dtype(dtype)
items = prod(data.shape) - lead - tail
new_data = np.ndarray(items, dtype = dtype)
new_data[0:items] = data.flat[lead:lead+items]
data = new_data
if self.swapbyteorder:
data.byteswap(True)
nbytes = data.nbytes
self._check_buf_size(nbytes, offset)
if offset is not None:
if offset > 0:
p = self.pos + offset
else:
p = -offset
else:
p = self.pos
self.buffer[p:p+nbytes] = data.data.tobytes()
if offset is None:
self.pos += nbytes
@staticmethod
def get_s_len(s, codec = 'cp437', strip = False):
"""
Return length of string after encoding.
If parameter is an array, return array of same shape.
If parameter is not an np.ndarray, return (nested) list.
PARAMETERS
codec - default: 'cp437' used to encode
"""
t = type(s)
if not isinstance(s, np.ndarray):
s = np.array(s, dtype = np.object)
l = np.ndarray(s.shape, dtype = np.int)
sflat = s.flat
lflat = l.flat
if strip:
for i in range(len(sflat)):
lflat[i] = len(sflat[i].strip().encode(codec))
else:
for i in range(len(sflat)):
lflat[i] = len(sflat[i].encode(codec))
if not issubclass(t, np.ndarray):
l = l.tolist()
else:
l = l[()]
return l
def put_s(self, s, length = None, fill = b'\x00', codec = 'cp437', order = 'F', strip = False, offset = None):
"""write string (array) to buffer
KWARGS
length - >0: length of string - fill/truncate
-1: find and max length
None: write actual length of each string
np.ndarray: length of strings if match shape
(TODO - extend in missing dimesions?)
fill - pattern (not encoded), memory data if None
codec - default 'cp437'
order - of data to write to buffer, default is 'F'
offset - relative to current location if positive
relative to beginning of buffer if negative (abs value)
`None` - no offset, advnace buffer
"""
if order is None:
if data.flags.fnc:
order = 'F'
else:
order = 'C'
assert order in ('F', 'C')
if not isinstance(s, np.ndarray):
s = np.array(s, dtype = np.object, order = order)
# create length array
try:
if length is None or length == -1:
l = self.get_s_len(s)
if length == -1:
l = np.max(l)
else:
l = length
except ValueError:
l = length
if not isinstance(l, np.ndarray):
l = np.array(l, dtype = np.int)
if prod(l.shape) == 1:
l = np.array(l.flat[0])
if l.shape == ():
l = np.tile(l, s.shape)
if order is 'F' and not s.flags.f_contiguous:
s = s.copy(order = 'F')
if order is 'F' and not l.flags.f_contiguous:
l = l.copy(order = 'F')
if not s.flags.c_contiguous:
s = np.ndarray(s.shape,
dtype = s.dtype,
buffer = s.data,
order = 'C')
if not l.flags.c_contiguous:
l = np.ndarray(l.shape,
dtype = l.dtype,
buffer = l.data,
order = 'C')
nbytes = np.sum(l)
self._check_buf_size(nbytes, offset)
if offset is not None:
if offset > 0:
p = self.pos + offset
else:
p = -offset
else:
p = self.pos
if prod(l.shape) > 0:
lmax = np.max(l)
else:
lmax = 0
f = (fill * (lmax // len(fill) + 1))[:lmax]
for si, li in zip(s.flat, l.flat):
d = si.encode(codec)
n = min(len(d), li)
self.buffer[p:p+n] = d[:n]
if fill is not None and n < li:
self.buffer[p+n:p+li] = f[:li-n]
p += li
if offset is None:
self.pos += nbytes
assert p == self.pos, "inconsitency in written data"
# binary data
def put_buf(self, data, length = None, order = 'F', fill = b'\x00', offset = None):
"""Write array/list of raw data pieces of equal length to buffer.
ARGS:
data - array/scalar to be written
KEYWORDS:
length - of data pieces, truncate/fill, if `None` use max value
order - of junks in written array, default is 'F'
fill - default is \\x00
"""
if fill is not None:
assert isinstance(fill, bytes) , \
"Only bytes-type fill allowed."
if length is None:
dtype = np.dtype(np.bytes_)
else:
dtype = np.dtype((np.bytes_, length))
data = np.array(data,
dtype = dtype,
order = order)
if not fill in (None, b'\x00'):
if length is None:
length = data.dtype.itemsize
f = (fill * (length // len(fill) + 1)) [:length]
# array operations for concatenation do not work in numpy 1.11
d = data.flat
for i in range(prod(d.shape)):
d[i] += f
if not data.flags.c_contiguous:
data = np.ndarray(data.shape,
dtype = data.dtype,
buffer = data,
order = 'C')
nbytes = data.nbytes
self._check_buf_size(nbytes, offset)
if offset is not None:
if offset > 0:
p = self.pos + offset
else:
p = -offset
else:
p = self.pos
self.buffer[p:p+nbytes] = data.tobytes()
if offset is None:
self.pos += nbytes
# ========================================
# application-specific routines
# ========================================
def put_kep_parm(self, data):
"""Write a kepler parameter binary list with 32 bit integers."""
count = len(data)
value = np.zeros(
count,
dtype=np.float64)
ivalue = np.ndarray(
count,
buffer=value.data.cast('b'),
offset=4,
dtype=np.int32,
strides=8)
for i,d in enumerate(data):
if d.dtype == np.int32:
ivalue[i] = d
else:
value[i] = d
if self.swapbyteorder:
value.byteswap(True)
p = self.pos
nbytes = value.nbytes
self._check_buf_size(nbytes)
self.buffer[p:p+nbytes] = value.data.tobytes()
self.pos += nbytes
def put_kep_parm64(self, data):
"""Write a kepler parameter binary list with 64 bit integers."""
count = len(data)
if count == 0:
return
value = np.zeros(
count,
dtype=np.float64)
ivalue = np.ndarray(
count,
buffer=value.data.cast('b'),
dtype=np.int64)
for i,d in enumerate(data):
if d.dtype == np.int64:
ivalue[i] = d
else:
value[i] = d
if self.swapbyteorder:
value.byteswap(True)
p = self.pos
nbytes = value.nbytes
self._check_buf_size(nbytes)
self.buffer[p:p+nbytes] = value.data.tobytes()
self.pos += nbytes
def put_f8_kep_i4(self, data):
"""Write i4 in f8 array for kepler.
Pass the f8 dimension.
Half the space seems wasted the way KEPLER treats this, the
entire second half of each array is empty.
Here we shall just fill up the 2nd part of the array and
write the passed dimension.
Byteswap is only needed on i4 level (see read routine).
"""
self.put_n(data)
self.skip_bytes(data.nbytes, fill=b'\x00')
# dummy routine to allow data IO code below
def write(self):
"""
Provide interface for writing data to whereever.
"""
raise NotImplementedError("Writing Data not implemented.")
# =======================================================================
# data IO
# =======================================================================
def write_bytes(self, *args, **kwargs):
"""Write numpy empty bytes to file"""
self.assert_bor()
self.skip_bytes(*args, **kwargs)
self.write()
def write_data(self, *args, **kwargs):
"""Write plain buffer to file"""
self.assert_bor()
self.put_data(*args, **kwargs)
self.write()
def write_n(self, *args, **kwargs):
"""Write numpy scalar/array to file"""
self.assert_bor()
kwargs['offset'] = None
self.put_n(*args, **kwargs)
self.write()
def write_1d(self, *args, **kwargs):
"""Write 1d padded numpy scalar/array to file"""
self.assert_bor()
kwargs['offset'] = None
self.put_1d(*args, **kwargs)
self.write()
def write_s(self, *args, **kwargs):
"""Write (numpy) string (array) to file"""
self.assert_bor()
kwargs['offset'] = None
self.put_s(*args, **kwargs)
self.write()
def write_buf(self, *args, **kwargs):
"""Write array/list of raw data pieces to file"""
self.assert_bor()
kwargs['offset'] = None
self.put_buf(*args, **kwargs)
self.write()
# application-specific routines
def write_kep_parm(self, data):
"""
write kepler parm array to file
"""
self.assert_bor()
self.put_kep_parm(data)
self.write()
def write_f8_kep_i4(self, data):
"""Write i4 in f8 array for kepler.
Pass the f8 dimension.
Half the space seems wasted the way KEPLER treats this, the
entire second half of each arry is empty.
Here we shall just fill up the 2nd part of the array and
write the passed dimension.
Byteswap is only needed on i4 level (see read routine).
"""
self.assert_bor()
self.put_f8_kep_i4(data)
self.write()
def _f_store_n(p = None, dt = None, **_kwargs):
def _f(self, *args, **kwargs):
kwargs['dtype'] = dt
kwargs.setdefault('order', 'F')
p(self, *args, **kwargs)
_f.dt = dt
_f.p = p
return _f
def _f_store_n1d(p = None, dt = None, **_kwargs):
def _f(self, data, *args, **kwargs):
kwargs['dtype'] = dt
p(self, data, *args, **kwargs)
_f.p = p
_f.dt = dt
return _f
def _f_store_n1d_(p = None, dt = None, lead = 0, tail = 0, **_kwargs):
def _f(self, data, *args, **kwargs):
kwargs['dtype'] = dt
kwargs['lead'] = lead
kwargs['tail'] = tail
p(self, data, *args, **kwargs)
_f.p = p
_f.dt = dt
_f.lead = lead
_f.tail = tail
return _f
for t in _np_types:
kw = dict(
cls = DataOutputBuffer,
t = t)
_set_method(
fn = _f_store_n,
parent = 'put_n',
name = 'put_{t}',
doc = """Write numpy {dn} to buffer at offset relative to
current position.\n
Does not advance buffer pointer.""",
**kw)
_set_method(
fn = _f_store_n,
parent = 'write_n',
name = 'write_{t}',
doc = "Write numpy {dn} array to file as record.",
**kw)
_set_method(
fn = _f_store_n1d,
parent = 'put_1d',
name = 'put_{t}_1d',
doc = """Write a 1D numpy {dn} array padded with 0 as specified to buffer. Padding is not written.""",
**kw)
_set_method(
fn = _f_store_n1d,
parent = 'write_1d',
name = 'write_{t}_1d',
doc = """Write a 1D numpy {dn} array padded with 0 as specified to file as record. Padding is not written.""",
**kw)
_set_method(
fn = _f_store_n1d_,
parent = 'put_1d',
name = 'put_{t}_1d_0',
doc = """Write a 1D numpy {dn} array padded with one element at beginning. Padding is not written.""",
extra_kw = dict(lead=1, tail=0),
**kw)
_set_method(
fn = _f_store_n1d_,
parent = 'write_1d',
name = 'write_{t}_1d_0',
doc = """Write a 1D numpy {dn} array padded with one element at beginning to file as record. Padding is not written.""",
extra_kw = dict(lead=1, tail=0),
**kw)
_set_method(
fn = _f_store_n1d_,
parent = 'put_1d',
name = 'put_{t}_1d_n',
doc = """Write a 1D numpy {dn} array padded with one element at end. Padding is not written.""",
extra_kw = dict(lead=0, tail=1),
**kw)
_set_method(
fn = _f_store_n1d_,
parent = 'write_1d',
name = 'write_{t}_1d_n',
doc = """Write a 1D numpy {dn} array padded with one element at end to file as record. Padding is not written.""",
extra_kw = dict(lead=0, tail=1),
**kw)
_set_method(
fn = _f_store_n1d_,
parent = 'put_1d',
name = 'put_{t}_1d_0n',
doc = """Write a 1D numpy {dn} array padded with one element at begiining and end each. Padding is not written.""",
extra_kw = dict(lead=1, tail=1),
**kw)
_set_method(
fn = _f_store_n1d_,
parent = 'write_1d',
name = 'write_{t}_1d_0n',
doc = """Write a 1D numpy {dn} array padded with one element at beginning and end each to file as record. Padding is not written.""",
extra_kw = dict(lead=1, tail=1),
**kw)
#=======================================================================
class DataWriter(DataOutputBuffer):
"""
Class for writing 'unformatted' binary files.
File names ending with .gz, .xz, .bz2 will be automatically
compressed.
For .gz this may fail, however, if the file is bigger than 2GB or
4GB.
"""
# TODO: add (file) truncate functionallity
def __init__(self,
filename, *args, **kwargs):
"""
Initialize data fields and open file.
Optionally the byte order can be specified.
The default is big endian.
"""
# TODO: Add append mode.
# not sure how/whether this will work with compressed files
super().__init__(*args, **kwargs)
self.open(filename)
def open(self, filename):
"""
Open the file for writing.
"""
self.filename = os.path.expandvars(os.path.expanduser(filename))
if self.filename.endswith('.gz'):
self.compressed = True
self.compress_mode = 'gz'
self.file = gzip.open(filename,'wb')
elif self.filename.endswith('.bz2'):
self.compressed = True
self.compress_mode = 'bz2'
self.file = bz2.BZ2File(filename,'wb',2**16)
elif self.filename.endswith('.xz'):
self.compressed = True
self.compress_mode = 'xz'
self.file = LZMAFile(self.filename,'wb')
else:
self.file = open(filename,'wb',-1)
self.compressed = False
self.compress_mode = None
self._init()
def _init(self):
"""Initialize the file position and data to empty."""
super()._init()
self.fpos = 0
def close(self):
"""Close the file."""
if self.pos != 0:
self.write()
self.file.close()
def rewind(self):
"""Rewind the file."""
self.file.seek(0, os.SEEK_SET)
self._init()
def write(self):
"""
Write the data record to file.
"""
self._write()
def _write(self):
"""
Write a data record to file.
"""
self.file.write(self.buffer[0:self.pos])
self.fpos += self.pos
self.pos = 0
# context manager interface
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
self.close()
return False
#=======================================================================
class FortranWriter(DataWriter, FortranSpecs):
"""
Class for writing 'unformatted' Fortran binary files.
Based on DataWriter, automatic compression support.
"""
# I still need to find out how to efficiently work with a buffer
# for writing internally - how to extend it, etc. The plan is to
# always write an entire record at once. Currently it would
# appear the largest FORTRAN files I have come with records for
# ppnb in KEPLER, 5000 isotopes * 2000 zones * 8 bytes ... 80 MB
# if pushing it ... usually < 16 MB. So we could start with that,
# then extend if ever needed.
# TODO - add functionallity for record-based backward skipping
# (potentially need to tuncate file on close)
def __init__(self, *args, reclen = 4, **kwargs):
self._set_reclen(reclen)
super().__init__(*args, **kwargs)
def _init(self):
"""Initialize the file position and data to empty."""
super()._init()
self.rpos = 0
def _set_byteorder(self, *args, **kwargs):
super()._set_byteorder(*args, **kwargs)
self.reclen_dtype = self.reclen_dtype.newbyteorder(self.byteorder)
def _write(self):
"""
Write a data record to file.
"""
self._write_reclen()
self.file.write(self.buffer[0:self.pos])
self._write_reclen()
self.fpos += self.pos + 2 * self.fortran_reclen
self.rpos += 1
self.pos = 0
def _write_reclen(self):
"""Write the record length."""
self.file.write(np.array(
self.pos,
dtype = self.reclen_dtype).data.tobytes())
|
jan-rybizkiREPO_NAMEChempyPATH_START.@Chempy_extracted@Chempy-master@Chempy@input@yields@West17@fortranfile@writer.py@.PATH_END.py
|
{
"filename": "generate_model.py",
"repo_name": "ebachelet/pyLIMA",
"repo_path": "pyLIMA_extracted/pyLIMA-master/pyLIMA/models/generate_model.py",
"type": "Python"
}
|
import importlib
def create_model(model_type, event, parallax=['None', 0.0], double_source=['None',0],
orbital_motion=['None', 0.0], origin=['center_of_mass', [0, 0]],
blend_flux_parameter='fblend',
fancy_parameters={}):
"""
Load a model according to the supplied model_type. Models are expected to be named
Model<model_type> e.g. ModelPSPL
:param string model_type: Model type e.g. PSPL
:return: Model object for given model_type
"""
try:
model_module = importlib.import_module('pyLIMA.models.' + model_type + '_model')
except ValueError:
return None
new_model = getattr(model_module, '{}model'.format(model_type))
return new_model(event, parallax=parallax, double_source=double_source,
orbital_motion=orbital_motion,
blend_flux_parameter=blend_flux_parameter, origin=origin,
fancy_parameters=fancy_parameters)
|
ebacheletREPO_NAMEpyLIMAPATH_START.@pyLIMA_extracted@pyLIMA-master@pyLIMA@models@generate_model.py@.PATH_END.py
|
{
"filename": "run_lavatmos_example1.py",
"repo_name": "cvbuchem/LavAtmos",
"repo_path": "LavAtmos_extracted/LavAtmos-master/scripts/run_lavatmos_example1.py",
"type": "Python"
}
|
# Standard python packages
import os
import numpy as np
import pandas as pd
import sys
# LavAtmos
mod_dir = '/home/jovyan/ThermoEngine/LavAtmos'
sys.path.append(mod_dir)
import lavatmos
# Import compositions
os.chdir(mod_dir)
vf13_comps_df = pd.read_csv('/home/jovyan/ThermoEngine/LavAtmos/data/input/vf2013_comps.csv',index_col=0)
vf13_comps = {}
for name in vf13_comps_df.columns:
vf13_comps[name] = vf13_comps_df[name].to_dict()
print(vf13_comps['BSE'])
# Initiate instance of LavAtmos
system = lavatmos.melt_vapor_system()
# Temperature values
# T = np.arange(1500,4050,50)
T = np.array([2000,3000])
# Run the vaporisation calculations
lavatmos_bse = system.vaporise(T, vf13_comps['BSE'])
# Save results
output_dir = 'output/'
name = 'script_example1_output.csv'
print(f'Saving results to: {output_dir+name}')
lavatmos_bse.to_csv(output_dir+name)
|
cvbuchemREPO_NAMELavAtmosPATH_START.@LavAtmos_extracted@LavAtmos-master@scripts@run_lavatmos_example1.py@.PATH_END.py
|
{
"filename": "_margin.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/layout/_margin.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class MarginValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="margin", parent_name="layout", **kwargs):
super(MarginValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Margin"),
data_docs=kwargs.pop(
"data_docs",
"""
autoexpand
Turns on/off margin expansion computations.
Legends, colorbars, updatemenus, sliders, axis
rangeselector and rangeslider are allowed to
push the margins by defaults.
b
Sets the bottom margin (in px).
l
Sets the left margin (in px).
pad
Sets the amount of padding (in px) between the
plotting area and the axis lines
r
Sets the right margin (in px).
t
Sets the top margin (in px).
""",
),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@layout@_margin.py@.PATH_END.py
|
{
"filename": "argilla_callback.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/langchain/langchain/callbacks/argilla_callback.py",
"type": "Python"
}
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.callbacks.argilla_callback import ArgillaCallbackHandler
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"ArgillaCallbackHandler": "langchain_community.callbacks.argilla_callback"
}
_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ArgillaCallbackHandler",
]
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@langchain@langchain@callbacks@argilla_callback.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "keras-team/keras-tuner",
"repo_path": "keras-tuner_extracted/keras-tuner-master/keras_tuner/applications/__init__.py",
"type": "Python"
}
|
# Copyright 2019 The KerasTuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_tuner.applications.augment import HyperImageAugment
from keras_tuner.applications.efficientnet import HyperEfficientNet
from keras_tuner.applications.resnet import HyperResNet
from keras_tuner.applications.xception import HyperXception
|
keras-teamREPO_NAMEkeras-tunerPATH_START.@keras-tuner_extracted@keras-tuner-master@keras_tuner@applications@__init__.py@.PATH_END.py
|
{
"filename": "_legendgroup.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattermap/_legendgroup.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LegendgroupValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="legendgroup", parent_name="scattermap", **kwargs):
super(LegendgroupValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scattermap@_legendgroup.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "triton-inference-server/server",
"repo_path": "server_extracted/server-main/python/openai/openai_frontend/__init__.py",
"type": "Python"
}
|
# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
triton-inference-serverREPO_NAMEserverPATH_START.@server_extracted@server-main@python@openai@openai_frontend@__init__.py@.PATH_END.py
|
{
"filename": "test_disjoint_set.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/scipy/py3/scipy/cluster/tests/test_disjoint_set.py",
"type": "Python"
}
|
import pytest
from pytest import raises as assert_raises
import numpy as np
from scipy.cluster.hierarchy import DisjointSet
import string
def generate_random_token():
k = len(string.ascii_letters)
tokens = list(np.arange(k, dtype=int))
tokens += list(np.arange(k, dtype=float))
tokens += list(string.ascii_letters)
tokens += [None for i in range(k)]
tokens = np.array(tokens, dtype=object)
rng = np.random.RandomState(seed=0)
while 1:
size = rng.randint(1, 3)
element = rng.choice(tokens, size)
if size == 1:
yield element[0]
else:
yield tuple(element)
def get_elements(n):
# dict is deterministic without difficulty of comparing numpy ints
elements = {}
for element in generate_random_token():
if element not in elements:
elements[element] = len(elements)
if len(elements) >= n:
break
return list(elements.keys())
def test_init():
n = 10
elements = get_elements(n)
dis = DisjointSet(elements)
assert dis.n_subsets == n
assert list(dis) == elements
def test_len():
n = 10
elements = get_elements(n)
dis = DisjointSet(elements)
assert len(dis) == n
dis.add("dummy")
assert len(dis) == n + 1
@pytest.mark.parametrize("n", [10, 100])
def test_contains(n):
elements = get_elements(n)
dis = DisjointSet(elements)
for x in elements:
assert x in dis
assert "dummy" not in dis
@pytest.mark.parametrize("n", [10, 100])
def test_add(n):
elements = get_elements(n)
dis1 = DisjointSet(elements)
dis2 = DisjointSet()
for i, x in enumerate(elements):
dis2.add(x)
assert len(dis2) == i + 1
# test idempotency by adding element again
dis2.add(x)
assert len(dis2) == i + 1
assert list(dis1) == list(dis2)
def test_element_not_present():
elements = get_elements(n=10)
dis = DisjointSet(elements)
with assert_raises(KeyError):
dis["dummy"]
with assert_raises(KeyError):
dis.merge(elements[0], "dummy")
with assert_raises(KeyError):
dis.connected(elements[0], "dummy")
@pytest.mark.parametrize("direction", ["forwards", "backwards"])
@pytest.mark.parametrize("n", [10, 100])
def test_linear_union_sequence(n, direction):
elements = get_elements(n)
dis = DisjointSet(elements)
assert elements == list(dis)
indices = list(range(n - 1))
if direction == "backwards":
indices = indices[::-1]
for it, i in enumerate(indices):
assert not dis.connected(elements[i], elements[i + 1])
assert dis.merge(elements[i], elements[i + 1])
assert dis.connected(elements[i], elements[i + 1])
assert dis.n_subsets == n - 1 - it
roots = [dis[i] for i in elements]
if direction == "forwards":
assert all(elements[0] == r for r in roots)
else:
assert all(elements[-2] == r for r in roots)
assert not dis.merge(elements[0], elements[-1])
@pytest.mark.parametrize("n", [10, 100])
def test_self_unions(n):
elements = get_elements(n)
dis = DisjointSet(elements)
for x in elements:
assert dis.connected(x, x)
assert not dis.merge(x, x)
assert dis.connected(x, x)
assert dis.n_subsets == len(elements)
assert elements == list(dis)
roots = [dis[x] for x in elements]
assert elements == roots
@pytest.mark.parametrize("order", ["ab", "ba"])
@pytest.mark.parametrize("n", [10, 100])
def test_equal_size_ordering(n, order):
elements = get_elements(n)
dis = DisjointSet(elements)
rng = np.random.RandomState(seed=0)
indices = np.arange(n)
rng.shuffle(indices)
for i in range(0, len(indices), 2):
a, b = elements[indices[i]], elements[indices[i + 1]]
if order == "ab":
assert dis.merge(a, b)
else:
assert dis.merge(b, a)
expected = elements[min(indices[i], indices[i + 1])]
assert dis[a] == expected
assert dis[b] == expected
@pytest.mark.parametrize("kmax", [5, 10])
def test_binary_tree(kmax):
n = 2**kmax
elements = get_elements(n)
dis = DisjointSet(elements)
rng = np.random.RandomState(seed=0)
for k in 2**np.arange(kmax):
for i in range(0, n, 2 * k):
r1, r2 = rng.randint(0, k, size=2)
a, b = elements[i + r1], elements[i + k + r2]
assert not dis.connected(a, b)
assert dis.merge(a, b)
assert dis.connected(a, b)
assert elements == list(dis)
roots = [dis[i] for i in elements]
expected_indices = np.arange(n) - np.arange(n) % (2 * k)
expected = [elements[i] for i in expected_indices]
assert roots == expected
@pytest.mark.parametrize("n", [10, 100])
def test_subsets(n):
elements = get_elements(n)
dis = DisjointSet(elements)
rng = np.random.RandomState(seed=0)
for i, j in rng.randint(0, n, (n, 2)):
x = elements[i]
y = elements[j]
expected = {element for element in dis if {dis[element]} == {dis[x]}}
assert dis.subset_size(x) == len(dis.subset(x))
assert expected == dis.subset(x)
expected = {dis[element]: set() for element in dis}
for element in dis:
expected[dis[element]].add(element)
expected = list(expected.values())
assert expected == dis.subsets()
dis.merge(x, y)
assert dis.subset(x) == dis.subset(y)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@scipy@py3@scipy@cluster@tests@test_disjoint_set.py@.PATH_END.py
|
{
"filename": "test_tree.py",
"repo_name": "shaoshanglqy/shap-shapley",
"repo_path": "shap-shapley_extracted/shap-shapley-master/tests/explainers/test_tree.py",
"type": "Python"
}
|
import matplotlib
import numpy as np
matplotlib.use('Agg')
import shap
def test_front_page_xgboost():
try:
import xgboost
except Exception as e:
print("Skipping test_front_page_xgboost!")
return
import shap
# load JS visualization code to notebook
shap.initjs()
# train XGBoost model
X, y = shap.datasets.boston()
model = xgboost.train({"learning_rate": 0.01, "silent": 1}, xgboost.DMatrix(X, label=y), 100)
# explain the model's predictions using SHAP values
explainer = shap.TreeExplainer(model)
shap_values = explainer.shap_values(X)
# visualize the first prediction's explaination
shap.force_plot(explainer.expected_value, shap_values[0, :], X.iloc[0, :])
# visualize the training set predictions
shap.force_plot(explainer.expected_value, shap_values, X)
# create a SHAP dependence plot to show the effect of a single feature across the whole dataset
shap.dependence_plot(5, shap_values, X, show=False)
shap.dependence_plot("RM", shap_values, X, show=False)
# summarize the effects of all the features
shap.summary_plot(shap_values, X, show=False)
def test_front_page_sklearn():
import sklearn.ensemble
import shap
# load JS visualization code to notebook
shap.initjs()
# train model
X, y = shap.datasets.boston()
models = [
sklearn.ensemble.RandomForestRegressor(n_estimators=100),
sklearn.ensemble.ExtraTreesRegressor(n_estimators=100),
]
for model in models:
model.fit(X, y)
# explain the model's predictions using SHAP values
explainer = shap.TreeExplainer(model)
shap_values = explainer.shap_values(X)
# visualize the first prediction's explaination
shap.force_plot(explainer.expected_value, shap_values[0, :], X.iloc[0, :])
# visualize the training set predictions
shap.force_plot(explainer.expected_value, shap_values, X)
# create a SHAP dependence plot to show the effect of a single feature across the whole dataset
shap.dependence_plot(5, shap_values, X, show=False)
shap.dependence_plot("RM", shap_values, X, show=False)
# summarize the effects of all the features
shap.summary_plot(shap_values, X, show=False)
def test_xgboost_multiclass():
try:
import xgboost
except Exception as e:
print("Skipping test_xgboost_multiclass!")
return
import shap
# train XGBoost model
X, Y = shap.datasets.iris()
model = xgboost.XGBClassifier(objective="binary:logistic", max_depth=4)
model.fit(X, Y)
# explain the model's predictions using SHAP values (use pred_contrib in LightGBM)
shap_values = shap.TreeExplainer(model).shap_values(X)
# ensure plot works for first class
shap.dependence_plot(0, shap_values[0], X, show=False)
def test_xgboost_mixed_types():
try:
import xgboost
except Exception as e:
print("Skipping test_xgboost_mixed_types!")
return
import shap
import numpy as np
X,y = shap.datasets.boston()
X["LSTAT"] = X["LSTAT"].astype(np.int64)
X["B"] = X["B"].astype(np.bool)
bst = xgboost.train({"learning_rate": 0.01, "silent": 1}, xgboost.DMatrix(X, label=y), 1000)
shap_values = shap.TreeExplainer(bst).shap_values(X)
shap.dependence_plot(0, shap_values, X, show=False)
def test_sklearn_random_forest_multiclass():
import shap
from sklearn.ensemble import RandomForestClassifier
X, y = shap.datasets.iris()
y[y == 2] = 1
model = RandomForestClassifier(n_estimators=100, max_depth=None, min_samples_split=2, random_state=0)
model.fit(X, y)
explainer = shap.TreeExplainer(model)
shap_values = explainer.shap_values(X)
assert np.abs(shap_values[0][0,0] - 0.05) < 1e-3
assert np.abs(shap_values[1][0,0] + 0.05) < 1e-3
def create_binary_newsgroups_data():
from sklearn.datasets import fetch_20newsgroups
categories = ['alt.atheism', 'soc.religion.christian']
newsgroups_train = fetch_20newsgroups(subset='train', categories=categories)
newsgroups_test = fetch_20newsgroups(subset='test', categories=categories)
class_names = ['atheism', 'christian']
return newsgroups_train, newsgroups_test, class_names
def create_random_forest_vectorizer():
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestClassifier
from sklearn.base import TransformerMixin
vectorizer = CountVectorizer(lowercase=False, min_df=0.0, binary=True)
class DenseTransformer(TransformerMixin):
def fit(self, X, y=None, **fit_params):
return self
def transform(self, X, y=None, **fit_params):
return X.toarray()
rf = RandomForestClassifier(n_estimators=500, random_state=777)
return Pipeline([('vectorizer', vectorizer), ('to_dense', DenseTransformer()), ('rf', rf)])
def test_sklearn_random_forest_newsgroups():
import shap
from sklearn.ensemble import RandomForestClassifier
# note: this test used to fail in native TreeExplainer code due to memory corruption
newsgroups_train, newsgroups_test, classes = create_binary_newsgroups_data()
pipeline = create_random_forest_vectorizer()
pipeline.fit(newsgroups_train.data, newsgroups_train.target)
rf = pipeline.named_steps['rf']
vectorizer = pipeline.named_steps['vectorizer']
densifier = pipeline.named_steps['to_dense']
test_row = newsgroups_test.data[83:84]
explainer = shap.TreeExplainer(rf)
vec_row = vectorizer.transform(test_row)
dense_row = densifier.transform(vec_row)
explainer.shap_values(dense_row)
def test_sklearn_decision_tree_multiclass():
import shap
from sklearn.tree import DecisionTreeClassifier
X, y = shap.datasets.iris()
y[y == 2] = 1
model = DecisionTreeClassifier(max_depth=None, min_samples_split=2, random_state=0)
model.fit(X, y)
explainer = shap.TreeExplainer(model)
shap_values = explainer.shap_values(X)
assert np.abs(shap_values[0][0,0] - 0.05) < 1e-1
assert np.abs(shap_values[1][0,0] + 0.05) < 1e-1
def test_lightgbm():
try:
import lightgbm
except:
print("Skipping test_lightgbm!")
return
import shap
# train XGBoost model
X, y = shap.datasets.boston()
model = lightgbm.sklearn.LGBMRegressor(categorical_feature=[8])
model.fit(X, y)
# explain the model's predictions using SHAP values
shap_values = shap.TreeExplainer(model).shap_values(X)
def test_lightgbm_multiclass():
try:
import lightgbm
except:
print("Skipping test_lightgbm_multiclass!")
return
import shap
# train XGBoost model
X, Y = shap.datasets.iris()
model = lightgbm.sklearn.LGBMClassifier()
model.fit(X, Y)
# explain the model's predictions using SHAP values
shap_values = shap.TreeExplainer(model).shap_values(X)
# ensure plot works for first class
shap.dependence_plot(0, shap_values[0], X, show=False)
# TODO: Test tree_limit argument
def test_sklearn_interaction():
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
# train a simple sklean RF model on the iris dataset
X, y = shap.datasets.iris()
X_train,X_test,Y_train,Y_test = train_test_split(*shap.datasets.iris(), test_size=0.2, random_state=0)
rforest = RandomForestClassifier(n_estimators=100, max_depth=None, min_samples_split=2, random_state=0)
model = rforest.fit(X_train, Y_train)
# verify symmetry of the interaction values (this typically breaks if anything is wrong)
interaction_vals = shap.TreeExplainer(model).shap_interaction_values(X)
for i in range(len(interaction_vals)):
for j in range(len(interaction_vals[i])):
for k in range(len(interaction_vals[i][j])):
for l in range(len(interaction_vals[i][j][k])):
assert abs(interaction_vals[i][j][k][l] - interaction_vals[i][j][l][k]) < 1e-6
# ensure the interaction plot works
shap.summary_plot(interaction_vals[0], X, show=False)
def test_lightgbm_interaction():
try:
import lightgbm
except Exception as e:
print("Skipping test_lightgbm_interaction!")
return
import shap
# train XGBoost model
X, y = shap.datasets.boston()
model = lightgbm.sklearn.LGBMRegressor()
model.fit(X, y)
# verify symmetry of the interaction values (this typically breaks if anything is wrong)
interaction_vals = shap.TreeExplainer(model).shap_interaction_values(X)
for j in range(len(interaction_vals)):
for k in range(len(interaction_vals[j])):
for l in range(len(interaction_vals[j][k])):
assert abs(interaction_vals[j][k][l] - interaction_vals[j][l][k]) < 1e-6
def test_sum_match_random_forest():
import shap
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
import sklearn
X_train,X_test,Y_train,Y_test = train_test_split(*shap.datasets.adult(), test_size=0.2, random_state=0)
clf = RandomForestClassifier(random_state=202, n_estimators=10, max_depth=10)
clf.fit(X_train, Y_train)
predicted = clf.predict_proba(X_test)
ex = shap.TreeExplainer(clf)
shap_values = ex.shap_values(X_test)
assert np.abs(shap_values[0].sum(1) + ex.expected_value[0] - predicted[:,0]).max() < 1e-6, \
"SHAP values don't sum to model output!"
def test_sum_match_extra_trees():
import shap
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.ensemble import ExtraTreesRegressor
import sklearn
X_train,X_test,Y_train,Y_test = train_test_split(*shap.datasets.adult(), test_size=0.2, random_state=0)
clf = ExtraTreesRegressor(random_state=202, n_estimators=10, max_depth=10)
clf.fit(X_train, Y_train)
predicted = clf.predict(X_test)
ex = shap.TreeExplainer(clf)
shap_values = ex.shap_values(X_test)
assert np.abs(shap_values.sum(1) + ex.expected_value - predicted).max() < 1e-6, \
"SHAP values don't sum to model output!"
def test_single_row_random_forest():
import shap
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
import sklearn
X_train,X_test,Y_train,Y_test = train_test_split(*shap.datasets.adult(), test_size=0.2, random_state=0)
clf = RandomForestClassifier(random_state=202, n_estimators=10, max_depth=10)
clf.fit(X_train, Y_train)
predicted = clf.predict_proba(X_test)
ex = shap.TreeExplainer(clf)
shap_values = ex.shap_values(X_test.iloc[0,:])
assert np.abs(shap_values[0].sum() + ex.expected_value[0] - predicted[0,0]) < 1e-6, \
"SHAP values don't sum to model output!"
def test_sum_match_gradient_boosting_classifier():
import shap
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.ensemble import GradientBoostingClassifier
import sklearn
X_train,X_test,Y_train,Y_test = train_test_split(*shap.datasets.adult(), test_size=0.2, random_state=0)
clf = GradientBoostingClassifier(random_state=202, n_estimators=10, max_depth=10)
clf.fit(X_train, Y_train)
# Use decision function to get prediction before it is mapped to a probability
predicted = clf.decision_function(X_test)
ex = shap.TreeExplainer(clf)
shap_values = ex.shap_values(X_test)
assert np.abs(shap_values.sum(1) + ex.expected_value - predicted).max() < 1e-6, \
"SHAP values don't sum to model output!"
def test_single_row_gradient_boosting_classifier():
import shap
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.ensemble import GradientBoostingClassifier
import sklearn
X_train,X_test,Y_train,Y_test = train_test_split(*shap.datasets.adult(), test_size=0.2, random_state=0)
clf = GradientBoostingClassifier(random_state=202, n_estimators=10, max_depth=10)
clf.fit(X_train, Y_train)
predicted = clf.decision_function(X_test)
ex = shap.TreeExplainer(clf)
shap_values = ex.shap_values(X_test.iloc[0,:])
assert np.abs(shap_values.sum() + ex.expected_value - predicted[0]) < 1e-6, \
"SHAP values don't sum to model output!"
def test_sum_match_gradient_boosting_regressor():
import shap
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.ensemble import GradientBoostingRegressor
import sklearn
X_train,X_test,Y_train,Y_test = train_test_split(*shap.datasets.adult(), test_size=0.2, random_state=0)
clf = GradientBoostingRegressor(random_state=202, n_estimators=10, max_depth=10)
clf.fit(X_train, Y_train)
predicted = clf.predict(X_test)
ex = shap.TreeExplainer(clf)
shap_values = ex.shap_values(X_test)
assert np.abs(shap_values.sum(1) + ex.expected_value - predicted).max() < 1e-6, \
"SHAP values don't sum to model output!"
def test_single_row_gradient_boosting_regressor():
import shap
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.ensemble import GradientBoostingRegressor
import sklearn
X_train,X_test,Y_train,Y_test = train_test_split(*shap.datasets.adult(), test_size=0.2, random_state=0)
clf = GradientBoostingRegressor(random_state=202, n_estimators=10, max_depth=10)
clf.fit(X_train, Y_train)
predicted = clf.predict(X_test)
ex = shap.TreeExplainer(clf)
shap_values = ex.shap_values(X_test.iloc[0,:])
assert np.abs(shap_values.sum() + ex.expected_value - predicted[0]) < 1e-6, \
"SHAP values don't sum to model output!"
def test_provided_background_tree_path_dependent():
try:
import xgboost
except:
print("Skipping test_provided_background_tree_path_dependent!")
return
from sklearn.model_selection import train_test_split
import numpy as np
import shap
np.random.seed(10)
X,y = shap.datasets.iris()
X = X[:100]
y = y[:100]
train_x, test_x, train_y, test_y = train_test_split(X, y, random_state=1)
feature_names = ["a", "b", "c", "d"]
dtrain = xgboost.DMatrix(train_x, label=train_y, feature_names=feature_names)
dtest = xgboost.DMatrix(test_x, feature_names=feature_names)
params = {
'booster': 'gbtree',
'objective': 'binary:logistic',
'max_depth': 4,
'eta': 0.1,
'nthread': -1,
'silent': 1
}
bst = xgboost.train(params=params, dtrain=dtrain, num_boost_round=100)
explainer = shap.TreeExplainer(bst, train_x, feature_dependence="tree_path_dependent")
diffs = explainer.expected_value + explainer.shap_values(test_x).sum(1) - bst.predict(dtest, output_margin=True)
assert np.max(np.abs(diffs)) < 1e-6, "SHAP values don't sum to model output!"
assert np.abs(explainer.expected_value - bst.predict(dtrain, output_margin=True).mean()) < 1e-6, "Bad expected_value!"
def test_provided_background_independent():
try:
import xgboost
except:
print("Skipping test_provided_background_independent!")
return
from sklearn.model_selection import train_test_split
import numpy as np
import shap
np.random.seed(10)
X,y = shap.datasets.iris()
X = X[:100]
y = y[:100]
train_x, test_x, train_y, test_y = train_test_split(X, y, random_state=1)
feature_names = ["a", "b", "c", "d"]
dtrain = xgboost.DMatrix(train_x, label=train_y, feature_names=feature_names)
dtest = xgboost.DMatrix(test_x, feature_names=feature_names)
params = {
'booster': 'gbtree',
'objective': 'binary:logistic',
'max_depth': 4,
'eta': 0.1,
'nthread': -1,
'silent': 1
}
bst = xgboost.train(params=params, dtrain=dtrain, num_boost_round=100)
explainer = shap.TreeExplainer(bst, train_x, feature_dependence="independent")
diffs = explainer.expected_value + explainer.shap_values(test_x).sum(1) - bst.predict(dtest, output_margin=True)
assert np.max(np.abs(diffs)) < 1e-6, "SHAP values don't sum to model output!"
assert np.abs(explainer.expected_value - bst.predict(dtrain, output_margin=True).mean()) < 1e-6, "Bad expected_value!"
def test_provided_background_independent_prob_output():
try:
import xgboost
except:
print("Skipping test_provided_background_independent_prob_output!")
return
from sklearn.model_selection import train_test_split
import numpy as np
import shap
np.random.seed(10)
X,y = shap.datasets.iris()
X = X[:100]
y = y[:100]
train_x, test_x, train_y, test_y = train_test_split(X, y, random_state=1)
feature_names = ["a", "b", "c", "d"]
dtrain = xgboost.DMatrix(train_x, label=train_y, feature_names=feature_names)
dtest = xgboost.DMatrix(test_x, feature_names=feature_names)
params = {
'booster': 'gbtree',
'objective': 'binary:logistic',
'max_depth': 4,
'eta': 0.1,
'nthread': -1,
'silent': 1
}
bst = xgboost.train(params=params, dtrain=dtrain, num_boost_round=100)
explainer = shap.TreeExplainer(bst, train_x, feature_dependence="independent", model_output="probability")
diffs = explainer.expected_value + explainer.shap_values(test_x).sum(1) - bst.predict(dtest)
assert np.max(np.abs(diffs)) < 1e-6, "SHAP values don't sum to model output!"
assert np.abs(explainer.expected_value - bst.predict(dtrain).mean()) < 1e-6, "Bad expected_value!"
def test_single_tree_compare_with_kernel_shap():
""" Compare with Kernel SHAP, which makes the same independence assumptions
as Independent Tree SHAP. Namely, they both assume independence between the
set being conditioned on, and the remainder set.
"""
try:
import xgboost
except:
print("Skipping test_single_tree_compare_with_kernel_shap!")
return
np.random.seed(10)
n = 1000
X = np.random.normal(size=(n,7))
b = np.array([-2,1,3,5,2,20,-5])
y = np.matmul(X,b)
max_depth = 6
# train a model with single tree
Xd = xgboost.DMatrix(X, label=y)
model = xgboost.train({'eta':1,
'max_depth':max_depth,
'base_score': 0,
"lambda": 0},
Xd, 1)
ypred = model.predict(Xd)
# Compare for five random samples
for i in range(5):
x_ind = np.random.choice(X.shape[1]); x = X[x_ind:x_ind+1,:]
expl = shap.TreeExplainer(model, X, feature_dependence="independent")
f = lambda inp : model.predict(xgboost.DMatrix(inp))
expl_kern = shap.KernelExplainer(f, X)
itshap = expl.shap_values(x)
kshap = expl_kern.shap_values(x, nsamples=150)
assert np.allclose(itshap,kshap), \
"Kernel SHAP doesn't match Independent Tree SHAP!"
assert np.allclose(itshap.sum() + expl.expected_value, ypred[x_ind]), \
"SHAP values don't sum to model output!"
def test_several_trees():
""" Make sure Independent Tree SHAP sums up to the correct value for
larger models (20 trees).
"""
try:
import xgboost
except:
print("Skipping test_several_trees!")
return
np.random.seed(10)
n = 1000
X = np.random.normal(size=(n,7))
b = np.array([-2,1,3,5,2,20,-5])
y = np.matmul(X,b)
max_depth = 6
# train a model with single tree
Xd = xgboost.DMatrix(X, label=y)
model = xgboost.train({'eta':1,
'max_depth':max_depth,
'base_score': 0,
"lambda": 0},
Xd, 20)
ypred = model.predict(Xd)
# Compare for five random samples
for i in range(5):
x_ind = np.random.choice(X.shape[1]); x = X[x_ind:x_ind+1,:]
expl = shap.TreeExplainer(model, X, feature_dependence="independent")
itshap = expl.shap_values(x)
assert np.allclose(itshap.sum() + expl.expected_value, ypred[x_ind]), \
"SHAP values don't sum to model output!"
def test_single_tree_nonlinear_transformations():
""" Make sure Independent Tree SHAP single trees with non-linear
transformations.
"""
# Supported non-linear transforms
def sigmoid(x):
return(1/(1+np.exp(-x)))
def log_loss(yt,yp):
return(-(yt*np.log(yp) + (1 - yt)*np.log(1 - yp)))
def mse(yt,yp):
return(np.square(yt-yp))
try:
import xgboost
except:
print("Skipping test_several_trees!")
return
np.random.seed(10)
n = 1000
X = np.random.normal(size=(n,7))
b = np.array([-2,1,3,5,2,20,-5])
y = np.matmul(X,b)
y = y + abs(min(y))
y = np.random.binomial(n=1,p=y/max(y))
max_depth = 6
# train a model with single tree
Xd = xgboost.DMatrix(X, label=y)
model = xgboost.train({'eta':1,
'max_depth':max_depth,
'base_score': y.mean(),
"lambda": 0,
"objective": "binary:logistic"},
Xd, 1)
pred = model.predict(Xd,output_margin=True) # In margin space (log odds)
trans_pred = model.predict(Xd) # In probability space
expl = shap.TreeExplainer(model, X, feature_dependence="independent")
f = lambda inp : model.predict(xgboost.DMatrix(inp), output_margin=True)
expl_kern = shap.KernelExplainer(f, X)
x_ind = 0; x = X[x_ind:x_ind+1,:]
itshap = expl.shap_values(x)
kshap = expl_kern.shap_values(x, nsamples=300)
assert np.allclose(itshap.sum() + expl.expected_value, pred[x_ind]), \
"SHAP values don't sum to model output on explaining margin!"
assert np.allclose(itshap, kshap), \
"Independent Tree SHAP doesn't match Kernel SHAP on explaining margin!"
model.set_attr(objective="binary:logistic")
expl = shap.TreeExplainer(model, X, feature_dependence="independent", model_output="probability")
itshap = expl.shap_values(x)
assert np.allclose(itshap.sum() + expl.expected_value, trans_pred[x_ind]), \
"SHAP values don't sum to model output on explaining logistic!"
# expl = shap.TreeExplainer(model, X, feature_dependence="independent", model_output="logloss")
# itshap = expl.shap_values(x,y=y[x_ind])
# margin_pred = model.predict(xgb.DMatrix(x),output_margin=True)
# currpred = log_loss(y[x_ind],sigmoid(margin_pred))
# assert np.allclose(itshap.sum(), currpred - expl.expected_value), \
# "SHAP values don't sum to model output on explaining logloss!"
def test_xgboost_classifier_independent_margin():
try:
import xgboost
except:
print("Skipping test_several_trees!")
return
# train XGBoost model
np.random.seed(10)
n = 1000
X = np.random.normal(size=(n,7))
b = np.array([-2,1,3,5,2,20,-5])
y = np.matmul(X,b)
y = y + abs(min(y))
y = np.random.binomial(n=1,p=y/max(y))
model = xgboost.XGBClassifier(n_estimators=10, max_depth=5)
model.fit(X, y)
# explain the model's predictions using SHAP values
e = shap.TreeExplainer(model, X, feature_dependence="independent", model_output="margin")
shap_values = e.shap_values(X)
assert np.allclose(shap_values.sum(1) + e.expected_value, model.predict(X, output_margin=True))
def test_xgboost_classifier_independent_probability():
try:
import xgboost
except:
print("Skipping test_several_trees!")
return
# train XGBoost model
np.random.seed(10)
n = 1000
X = np.random.normal(size=(n,7))
b = np.array([-2,1,3,5,2,20,-5])
y = np.matmul(X,b)
y = y + abs(min(y))
y = np.random.binomial(n=1,p=y/max(y))
model = xgboost.XGBClassifier(n_estimators=10, max_depth=5)
model.fit(X, y)
# explain the model's predictions using SHAP values
e = shap.TreeExplainer(model, X, feature_dependence="independent", model_output="probability")
shap_values = e.shap_values(X)
assert np.allclose(shap_values.sum(1) + e.expected_value, model.predict_proba(X)[:,1])
def test_front_page_xgboost_global_path_dependent():
try:
import xgboost
except:
print("Skipping test_front_page_xgboost!")
return
# train XGBoost model
X, y = shap.datasets.boston()
model = xgboost.XGBRegressor()
model.fit(X, y)
# explain the model's predictions using SHAP values
explainer = shap.TreeExplainer(model, X, feature_dependence="global_path_dependent")
shap_values = explainer.shap_values(X)
assert np.allclose(shap_values.sum(1) + explainer.expected_value, model.predict(X))
def test_skopt_rf_et():
try:
import skopt
import pandas as pd
except:
print("Skipping test_skopt_rf_et!")
return
# Define an objective function for skopt to optimise.
def objective_function(x):
return x[0]**2 - x[1]**2 + x[1]*x[0]
# Uneven bounds to prevent "objective has been evaluated" warnings.
problem_bounds = [(-1e6, 3e6), (-1e6, 3e6)]
# Don't worry about "objective has been evaluated" warnings.
result_et = skopt.forest_minimize(objective_function, problem_bounds, n_calls = 100, base_estimator = "ET")
result_rf = skopt.forest_minimize(objective_function, problem_bounds, n_calls = 100, base_estimator = "RF")
et_df = pd.DataFrame(result_et.x_iters, columns = ["X0", "X1"])
# Explain the model's predictions.
explainer_et = shap.TreeExplainer(result_et.models[-1], et_df)
shap_values_et = explainer_et.shap_values(et_df)
rf_df = pd.DataFrame(result_rf.x_iters, columns = ["X0", "X1"])
# Explain the model's predictions (Random forest).
explainer_rf = shap.TreeExplainer(result_rf.models[-1], rf_df)
shap_values_rf = explainer_rf.shap_values(rf_df)
assert np.allclose(shap_values_et.sum(1) + explainer_et.expected_value, result_et.models[-1].predict(et_df))
assert np.allclose(shap_values_rf.sum(1) + explainer_rf.expected_value, result_rf.models[-1].predict(rf_df))
|
shaoshanglqyREPO_NAMEshap-shapleyPATH_START.@shap-shapley_extracted@shap-shapley-master@tests@explainers@test_tree.py@.PATH_END.py
|
{
"filename": "setup_package.py",
"repo_name": "D-arioSpace/astroquery",
"repo_path": "astroquery_extracted/astroquery-main/astroquery/image_cutouts/first/tests/setup_package.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
def get_package_data():
paths = [os.path.join('data', '*.fits'),
]
return {'astroquery.image_cutouts.first.tests': paths}
|
D-arioSpaceREPO_NAMEastroqueryPATH_START.@astroquery_extracted@astroquery-main@astroquery@image_cutouts@first@tests@setup_package.py@.PATH_END.py
|
{
"filename": "get_fof_halo_shapes.py",
"repo_name": "LSSTDESC/lsstdesc-diffsky",
"repo_path": "lsstdesc-diffsky_extracted/lsstdesc-diffsky-main/lsstdesc_diffsky/halo_information/get_fof_halo_shapes.py",
"type": "Python"
}
|
import os
import h5py
import numpy as np
from astropy.table import Table
shape_file_template = "shapes_{}_l.hdf5"
evalues = "eigenvalues_SIT_COM"
evectors = "eigenvectors_SIT_COM"
def get_halo_shapes(snapshot, hpx_fof_tags, hpx_reps, shape_dir, debug=True):
"""
read file with halo shapes and return shape data for matches
Parameters
----------
snapshot: current snapshot
hpx_fof_tags: fof halo tags in current healpix snapshot
shape_dir: directory containing halo shape files
Returns
-------
shapes: output dict containing shape information for matching halo tags
"""
shapes = {}
# check for missing steps
if snapshot == "347":
fsnap = "338"
else:
fsnap = "253" if snapshot == "259" else snapshot
fn = os.path.join(shape_dir, shape_file_template.format(fsnap))
if os.path.isfile(fn):
with h5py.File(fn) as fh:
fof_tags = fh["fof_halo_tag"]
mask = np.in1d(fof_tags, hpx_fof_tags) # duplicates possible
nfof = np.count_nonzero(mask)
if nfof > 0:
reps = fh["replication"]
mask &= np.in1d(reps, hpx_reps) # duplicates possible
# check foftag/replication pairs to verify they are matched
mask_locations = np.where(mask is True)[0]
for mloc, foftag, rep in zip(
mask_locations, fof_tags[mask], reps[mask]
):
locs = np.where(hpx_fof_tags == foftag)[0]
found = False
n = 0
while not found and n < len(locs):
found = hpx_reps[locs[n]] == rep
n += 1
mask[mloc] = found
msg = "...Matched {} fof & replictn tags (/{} fof tags) for snapshot {}"
print(msg.format(np.count_nonzero(mask), nfof, snapshot))
for k, v in fh.items():
if "RIT" not in k and k[-3:] != "SIT":
shapes[k] = v[mask]
else:
if debug:
print("...Skipping {} (not found)".format(fn))
return shapes
def get_locations(shapes, fof_halo_tags, replications):
"""
find position of fof_halo_tags in target_halo array
Parameters
----------
Returns
-------
searchsorted returns location of first occurrence and fails for multiple occurrences
orig_indices = target_halos['fof_halo_id'].argsort()
insertions = np.searchsorted(target_halos['fof_halo_id'][orig_indices],
shapes['fof_halo_tag'])
locations = orig_indices[insertions]
"""
locations = []
for foftag, rep in zip(shapes["fof_halo_tag"], shapes["replication"]):
loc = np.where(fof_halo_tags == foftag)[0]
if len(loc) > 1:
idx = np.where(replications[loc] == rep)[0]
if len(idx) == 1:
locations.append(loc[idx[0]])
else: # duplicate halo
print(
"Warning: duplicate entries for fof_tag {} rep {}".format(
foftag, rep
)
)
locations.append(loc[idx[0]])
elif len(loc) == 1:
locations.append(loc[0])
else:
print("Error: entry not found for fof_tag {}".format(foftag))
return np.asarray(locations)
def get_matched_shapes(
shapes, target_halos, rep_key="rep", check_positions=False, Lbox=3000.0
):
"""
modify array of target halo shape information to include
host halo shape information if available
Parameters
----------
shapes: dict of available shape information
target_halos: astropy table of target halo information
Returns
-------
target_halos: modified table
"""
locations = get_locations(
shapes, target_halos["fof_halo_id"], target_halos[rep_key]
)
assert np.array_equal(
target_halos["fof_halo_id"][locations], shapes["fof_halo_tag"]
), "Fof tag arrays don't match"
assert np.array_equal(
target_halos[rep_key][locations], shapes["replication"]
), "Replication mismatch"
# get axis lengths, convert to ratios and compute ellipticity and prolaticity
# see code in triaxial_satellite_distributions/axis_ratio_model.py for definitions
# reorder eigenvalues
reorder = shapes[evalues].argsort()
nvals = len(shapes[evalues])
ordered_evals = np.asarray([shapes[evalues][i][reorder[i]] for i in range(nvals)])
a = np.sqrt(ordered_evals[:, 2])
b = np.sqrt(ordered_evals[:, 1])
c = np.sqrt(ordered_evals[:, 0])
b_to_a = b / a
c_to_a = c / a
s = 1.0 + b_to_a**2 + c_to_a**2
e = (1.0 - c_to_a**2) / 2.0 / s
p = (1.0 - 2 * b_to_a**2 + c_to_a**2) / 2.0 / s
target_halos["axis_A_length"][locations] = a
target_halos["axis_B_length"][locations] = b
target_halos["axis_C_length"][locations] = c
target_halos["halo_ellipticity"][locations] = e
target_halos["halo_prolaticity"][locations] = p
# save direction of major axis; note we transpose the evectors so that xyz
# components are in rows
ordered_evecs = np.asarray(
[shapes[evectors][i].T[reorder[i]] for i in range(nvals)]
)
# select evector corresponding to largest evalue
major_axis_evectors = ordered_evecs[:, 2]
# check that normalization is correct
norms = np.asarray(
[np.dot(major_axis_evectors[i], major_axis_evectors[i].T) for i in range(nvals)]
)
assert all(np.isclose(norms, 1)), "Major-axis eigenvector has incorrect norm"
# save axis vector; z direction is already flipped to cosmoDC2 coordinates
target_halos["axis_A_x"][locations] = major_axis_evectors[:, 0]
target_halos["axis_A_y"][locations] = major_axis_evectors[:, 1]
target_halos["axis_A_z"][locations] = major_axis_evectors[:, 2]
# check positions
if check_positions:
for q in ["x", "y", "z"]:
if "z" in q: # flip sign of z component since positions were not flipped
dq = np.mod(
target_halos[q][locations] + shapes["c" + q].flatten(), Lbox
)
else:
dq = np.mod(
target_halos[q][locations] - shapes["c" + q].flatten(), Lbox
)
mask = abs(dq) > Lbox / 2
dq[mask] = dq[mask] - Lbox
print(
"...Min/max for |d{}| = {:.2g}/{:.2g}:".format(
q, np.min(dq), np.max(dq)
)
)
return target_halos
new_col_names = (
"axis_A_length",
"axis_B_length",
"axis_C_length",
"halo_ellipticity",
"halo_prolaticity",
"axis_A_x",
"axis_A_y",
"axis_A_z",
)
def get_halo_table(file_handle): # read hpx file into astropy table for testing
t = Table()
for k in file_handle.keys():
t[k] = file_handle[k]
t.rename_column("id", "fof_halo_id")
# add test columns
for k in new_col_names:
t[k] = np.zeros(len(t["fof_halo_id"]))
return t
def run_shapes(h5, shape_dir): # for testing
shapes = {}
for k, v in h5.items():
fof_tags = v["id"]
reps = v["rep"]
# match on fof tags and replication values
shapes[k] = get_halo_shapes(k, fof_tags, reps, shape_dir, debug=True)
return shapes
def run_check(healpix_file, shape_dir): # for testing
fh = h5py.File(healpix_file)
shapes = run_shapes(fh, shape_dir)
for snapshot in fh.keys():
target_halos = get_halo_table(fh[snapshot])
if shapes[snapshot]:
print("Processing {}".format(snapshot))
target_halos = get_matched_shapes(
shapes[snapshot], target_halos, check_positions=True
)
else:
print("Skipping: no shape information for {}".format(snapshot))
|
LSSTDESCREPO_NAMElsstdesc-diffskyPATH_START.@lsstdesc-diffsky_extracted@lsstdesc-diffsky-main@lsstdesc_diffsky@halo_information@get_fof_halo_shapes.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "kboone/avocado",
"repo_path": "avocado_extracted/avocado-master/avocado/__init__.py",
"type": "Python"
}
|
from .settings import settings
from .utils import *
from .augment import *
from .astronomical_object import *
from .classifier import *
from .dataset import *
from .features import *
from .instruments import *
from . import plasticc
# Expose the load method of Dataset
load = Dataset.load
# Expose the load method of Classifier
load_classifier = Classifier.load
__all__ = ["Dataset", "AstronomicalObject"]
|
kbooneREPO_NAMEavocadoPATH_START.@avocado_extracted@avocado-master@avocado@__init__.py@.PATH_END.py
|
{
"filename": "timing_utils.py",
"repo_name": "rapidsai/cuml",
"repo_path": "cuml_extracted/cuml-main/python/cuml/cuml/common/timing_utils.py",
"type": "Python"
}
|
#
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from contextlib import contextmanager
# Helper function for timing blocks of code.
@contextmanager
def timed(name):
"""
For timing blocks of code.
Examples
--------
>>> with timed("Print Call"):
... print("Hello, World") # doctest: +SKIP
Hello, World
..Print Call : 0.0005
"""
t0 = time.time()
yield
t1 = time.time()
print("..%-24s: %8.4f" % (name, t1 - t0))
|
rapidsaiREPO_NAMEcumlPATH_START.@cuml_extracted@cuml-main@python@cuml@cuml@common@timing_utils.py@.PATH_END.py
|
{
"filename": "_minexponent.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/bar/marker/colorbar/_minexponent.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class MinexponentValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="minexponent", parent_name="bar.marker.colorbar", **kwargs
):
super(MinexponentValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@bar@marker@colorbar@_minexponent.py@.PATH_END.py
|
{
"filename": "_hooks.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/pluggy/py3/pluggy/_hooks.py",
"type": "Python"
}
|
"""
Internal hook annotation, representation and calling machinery.
"""
from __future__ import annotations
import inspect
import sys
from types import ModuleType
from typing import AbstractSet
from typing import Any
from typing import Callable
from typing import Final
from typing import final
from typing import Generator
from typing import List
from typing import Mapping
from typing import Optional
from typing import overload
from typing import Sequence
from typing import Tuple
from typing import TYPE_CHECKING
from typing import TypedDict
from typing import TypeVar
from typing import Union
import warnings
from ._result import Result
_T = TypeVar("_T")
_F = TypeVar("_F", bound=Callable[..., object])
_Namespace = Union[ModuleType, type]
_Plugin = object
_HookExec = Callable[
[str, Sequence["HookImpl"], Mapping[str, object], bool],
Union[object, List[object]],
]
_HookImplFunction = Callable[..., Union[_T, Generator[None, Result[_T], None]]]
class HookspecOpts(TypedDict):
"""Options for a hook specification."""
#: Whether the hook is :ref:`first result only <firstresult>`.
firstresult: bool
#: Whether the hook is :ref:`historic <historic>`.
historic: bool
#: Whether the hook :ref:`warns when implemented <warn_on_impl>`.
warn_on_impl: Warning | None
#: Whether the hook warns when :ref:`certain arguments are requested
#: <warn_on_impl>`.
#:
#: .. versionadded:: 1.5
warn_on_impl_args: Mapping[str, Warning] | None
class HookimplOpts(TypedDict):
"""Options for a hook implementation."""
#: Whether the hook implementation is a :ref:`wrapper <hookwrapper>`.
wrapper: bool
#: Whether the hook implementation is an :ref:`old-style wrapper
#: <old_style_hookwrappers>`.
hookwrapper: bool
#: Whether validation against a hook specification is :ref:`optional
#: <optionalhook>`.
optionalhook: bool
#: Whether to try to order this hook implementation :ref:`first
#: <callorder>`.
tryfirst: bool
#: Whether to try to order this hook implementation :ref:`last
#: <callorder>`.
trylast: bool
#: The name of the hook specification to match, see :ref:`specname`.
specname: str | None
@final
class HookspecMarker:
"""Decorator for marking functions as hook specifications.
Instantiate it with a project_name to get a decorator.
Calling :meth:`PluginManager.add_hookspecs` later will discover all marked
functions if the :class:`PluginManager` uses the same project name.
"""
__slots__ = ("project_name",)
def __init__(self, project_name: str) -> None:
self.project_name: Final = project_name
@overload
def __call__(
self,
function: _F,
firstresult: bool = False,
historic: bool = False,
warn_on_impl: Warning | None = None,
warn_on_impl_args: Mapping[str, Warning] | None = None,
) -> _F: ...
@overload # noqa: F811
def __call__( # noqa: F811
self,
function: None = ...,
firstresult: bool = ...,
historic: bool = ...,
warn_on_impl: Warning | None = ...,
warn_on_impl_args: Mapping[str, Warning] | None = ...,
) -> Callable[[_F], _F]: ...
def __call__( # noqa: F811
self,
function: _F | None = None,
firstresult: bool = False,
historic: bool = False,
warn_on_impl: Warning | None = None,
warn_on_impl_args: Mapping[str, Warning] | None = None,
) -> _F | Callable[[_F], _F]:
"""If passed a function, directly sets attributes on the function
which will make it discoverable to :meth:`PluginManager.add_hookspecs`.
If passed no function, returns a decorator which can be applied to a
function later using the attributes supplied.
:param firstresult:
If ``True``, the 1:N hook call (N being the number of registered
hook implementation functions) will stop at I<=N when the I'th
function returns a non-``None`` result. See :ref:`firstresult`.
:param historic:
If ``True``, every call to the hook will be memorized and replayed
on plugins registered after the call was made. See :ref:`historic`.
:param warn_on_impl:
If given, every implementation of this hook will trigger the given
warning. See :ref:`warn_on_impl`.
:param warn_on_impl_args:
If given, every implementation of this hook which requests one of
the arguments in the dict will trigger the corresponding warning.
See :ref:`warn_on_impl`.
.. versionadded:: 1.5
"""
def setattr_hookspec_opts(func: _F) -> _F:
if historic and firstresult:
raise ValueError("cannot have a historic firstresult hook")
opts: HookspecOpts = {
"firstresult": firstresult,
"historic": historic,
"warn_on_impl": warn_on_impl,
"warn_on_impl_args": warn_on_impl_args,
}
setattr(func, self.project_name + "_spec", opts)
return func
if function is not None:
return setattr_hookspec_opts(function)
else:
return setattr_hookspec_opts
@final
class HookimplMarker:
"""Decorator for marking functions as hook implementations.
Instantiate it with a ``project_name`` to get a decorator.
Calling :meth:`PluginManager.register` later will discover all marked
functions if the :class:`PluginManager` uses the same project name.
"""
__slots__ = ("project_name",)
def __init__(self, project_name: str) -> None:
self.project_name: Final = project_name
@overload
def __call__(
self,
function: _F,
hookwrapper: bool = ...,
optionalhook: bool = ...,
tryfirst: bool = ...,
trylast: bool = ...,
specname: str | None = ...,
wrapper: bool = ...,
) -> _F: ...
@overload # noqa: F811
def __call__( # noqa: F811
self,
function: None = ...,
hookwrapper: bool = ...,
optionalhook: bool = ...,
tryfirst: bool = ...,
trylast: bool = ...,
specname: str | None = ...,
wrapper: bool = ...,
) -> Callable[[_F], _F]: ...
def __call__( # noqa: F811
self,
function: _F | None = None,
hookwrapper: bool = False,
optionalhook: bool = False,
tryfirst: bool = False,
trylast: bool = False,
specname: str | None = None,
wrapper: bool = False,
) -> _F | Callable[[_F], _F]:
"""If passed a function, directly sets attributes on the function
which will make it discoverable to :meth:`PluginManager.register`.
If passed no function, returns a decorator which can be applied to a
function later using the attributes supplied.
:param optionalhook:
If ``True``, a missing matching hook specification will not result
in an error (by default it is an error if no matching spec is
found). See :ref:`optionalhook`.
:param tryfirst:
If ``True``, this hook implementation will run as early as possible
in the chain of N hook implementations for a specification. See
:ref:`callorder`.
:param trylast:
If ``True``, this hook implementation will run as late as possible
in the chain of N hook implementations for a specification. See
:ref:`callorder`.
:param wrapper:
If ``True`` ("new-style hook wrapper"), the hook implementation
needs to execute exactly one ``yield``. The code before the
``yield`` is run early before any non-hook-wrapper function is run.
The code after the ``yield`` is run after all non-hook-wrapper
functions have run. The ``yield`` receives the result value of the
inner calls, or raises the exception of inner calls (including
earlier hook wrapper calls). The return value of the function
becomes the return value of the hook, and a raised exception becomes
the exception of the hook. See :ref:`hookwrapper`.
:param hookwrapper:
If ``True`` ("old-style hook wrapper"), the hook implementation
needs to execute exactly one ``yield``. The code before the
``yield`` is run early before any non-hook-wrapper function is run.
The code after the ``yield`` is run after all non-hook-wrapper
function have run The ``yield`` receives a :class:`Result` object
representing the exception or result outcome of the inner calls
(including earlier hook wrapper calls). This option is mutually
exclusive with ``wrapper``. See :ref:`old_style_hookwrapper`.
:param specname:
If provided, the given name will be used instead of the function
name when matching this hook implementation to a hook specification
during registration. See :ref:`specname`.
.. versionadded:: 1.2.0
The ``wrapper`` parameter.
"""
def setattr_hookimpl_opts(func: _F) -> _F:
opts: HookimplOpts = {
"wrapper": wrapper,
"hookwrapper": hookwrapper,
"optionalhook": optionalhook,
"tryfirst": tryfirst,
"trylast": trylast,
"specname": specname,
}
setattr(func, self.project_name + "_impl", opts)
return func
if function is None:
return setattr_hookimpl_opts
else:
return setattr_hookimpl_opts(function)
def normalize_hookimpl_opts(opts: HookimplOpts) -> None:
opts.setdefault("tryfirst", False)
opts.setdefault("trylast", False)
opts.setdefault("wrapper", False)
opts.setdefault("hookwrapper", False)
opts.setdefault("optionalhook", False)
opts.setdefault("specname", None)
_PYPY = hasattr(sys, "pypy_version_info")
def varnames(func: object) -> tuple[tuple[str, ...], tuple[str, ...]]:
"""Return tuple of positional and keywrord argument names for a function,
method, class or callable.
In case of a class, its ``__init__`` method is considered.
For methods the ``self`` parameter is not included.
"""
if inspect.isclass(func):
try:
func = func.__init__
except AttributeError:
return (), ()
elif not inspect.isroutine(func): # callable object?
try:
func = getattr(func, "__call__", func)
except Exception:
return (), ()
try:
# func MUST be a function or method here or we won't parse any args.
sig = inspect.signature(
func.__func__ if inspect.ismethod(func) else func # type:ignore[arg-type]
)
except TypeError:
return (), ()
_valid_param_kinds = (
inspect.Parameter.POSITIONAL_ONLY,
inspect.Parameter.POSITIONAL_OR_KEYWORD,
)
_valid_params = {
name: param
for name, param in sig.parameters.items()
if param.kind in _valid_param_kinds
}
args = tuple(_valid_params)
defaults = (
tuple(
param.default
for param in _valid_params.values()
if param.default is not param.empty
)
or None
)
if defaults:
index = -len(defaults)
args, kwargs = args[:index], tuple(args[index:])
else:
kwargs = ()
# strip any implicit instance arg
# pypy3 uses "obj" instead of "self" for default dunder methods
if not _PYPY:
implicit_names: tuple[str, ...] = ("self",)
else:
implicit_names = ("self", "obj")
if args:
qualname: str = getattr(func, "__qualname__", "")
if inspect.ismethod(func) or ("." in qualname and args[0] in implicit_names):
args = args[1:]
return args, kwargs
@final
class HookRelay:
"""Hook holder object for performing 1:N hook calls where N is the number
of registered plugins."""
__slots__ = ("__dict__",)
def __init__(self) -> None:
""":meta private:"""
if TYPE_CHECKING:
def __getattr__(self, name: str) -> HookCaller: ...
# Historical name (pluggy<=1.2), kept for backward compatibility.
_HookRelay = HookRelay
_CallHistory = List[Tuple[Mapping[str, object], Optional[Callable[[Any], None]]]]
class HookCaller:
"""A caller of all registered implementations of a hook specification."""
__slots__ = (
"name",
"spec",
"_hookexec",
"_hookimpls",
"_call_history",
)
def __init__(
self,
name: str,
hook_execute: _HookExec,
specmodule_or_class: _Namespace | None = None,
spec_opts: HookspecOpts | None = None,
) -> None:
""":meta private:"""
#: Name of the hook getting called.
self.name: Final = name
self._hookexec: Final = hook_execute
# The hookimpls list. The caller iterates it *in reverse*. Format:
# 1. trylast nonwrappers
# 2. nonwrappers
# 3. tryfirst nonwrappers
# 4. trylast wrappers
# 5. wrappers
# 6. tryfirst wrappers
self._hookimpls: Final[list[HookImpl]] = []
self._call_history: _CallHistory | None = None
# TODO: Document, or make private.
self.spec: HookSpec | None = None
if specmodule_or_class is not None:
assert spec_opts is not None
self.set_specification(specmodule_or_class, spec_opts)
# TODO: Document, or make private.
def has_spec(self) -> bool:
return self.spec is not None
# TODO: Document, or make private.
def set_specification(
self,
specmodule_or_class: _Namespace,
spec_opts: HookspecOpts,
) -> None:
if self.spec is not None:
raise ValueError(
f"Hook {self.spec.name!r} is already registered "
f"within namespace {self.spec.namespace}"
)
self.spec = HookSpec(specmodule_or_class, self.name, spec_opts)
if spec_opts.get("historic"):
self._call_history = []
def is_historic(self) -> bool:
"""Whether this caller is :ref:`historic <historic>`."""
return self._call_history is not None
def _remove_plugin(self, plugin: _Plugin) -> None:
for i, method in enumerate(self._hookimpls):
if method.plugin == plugin:
del self._hookimpls[i]
return
raise ValueError(f"plugin {plugin!r} not found")
def get_hookimpls(self) -> list[HookImpl]:
"""Get all registered hook implementations for this hook."""
return self._hookimpls.copy()
def _add_hookimpl(self, hookimpl: HookImpl) -> None:
"""Add an implementation to the callback chain."""
for i, method in enumerate(self._hookimpls):
if method.hookwrapper or method.wrapper:
splitpoint = i
break
else:
splitpoint = len(self._hookimpls)
if hookimpl.hookwrapper or hookimpl.wrapper:
start, end = splitpoint, len(self._hookimpls)
else:
start, end = 0, splitpoint
if hookimpl.trylast:
self._hookimpls.insert(start, hookimpl)
elif hookimpl.tryfirst:
self._hookimpls.insert(end, hookimpl)
else:
# find last non-tryfirst method
i = end - 1
while i >= start and self._hookimpls[i].tryfirst:
i -= 1
self._hookimpls.insert(i + 1, hookimpl)
def __repr__(self) -> str:
return f"<HookCaller {self.name!r}>"
def _verify_all_args_are_provided(self, kwargs: Mapping[str, object]) -> None:
# This is written to avoid expensive operations when not needed.
if self.spec:
for argname in self.spec.argnames:
if argname not in kwargs:
notincall = ", ".join(
repr(argname)
for argname in self.spec.argnames
# Avoid self.spec.argnames - kwargs.keys() - doesn't preserve order.
if argname not in kwargs.keys()
)
warnings.warn(
"Argument(s) {} which are declared in the hookspec "
"cannot be found in this hook call".format(notincall),
stacklevel=2,
)
break
def __call__(self, **kwargs: object) -> Any:
"""Call the hook.
Only accepts keyword arguments, which should match the hook
specification.
Returns the result(s) of calling all registered plugins, see
:ref:`calling`.
"""
assert (
not self.is_historic()
), "Cannot directly call a historic hook - use call_historic instead."
self._verify_all_args_are_provided(kwargs)
firstresult = self.spec.opts.get("firstresult", False) if self.spec else False
# Copy because plugins may register other plugins during iteration (#438).
return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult)
def call_historic(
self,
result_callback: Callable[[Any], None] | None = None,
kwargs: Mapping[str, object] | None = None,
) -> None:
"""Call the hook with given ``kwargs`` for all registered plugins and
for all plugins which will be registered afterwards, see
:ref:`historic`.
:param result_callback:
If provided, will be called for each non-``None`` result obtained
from a hook implementation.
"""
assert self._call_history is not None
kwargs = kwargs or {}
self._verify_all_args_are_provided(kwargs)
self._call_history.append((kwargs, result_callback))
# Historizing hooks don't return results.
# Remember firstresult isn't compatible with historic.
# Copy because plugins may register other plugins during iteration (#438).
res = self._hookexec(self.name, self._hookimpls.copy(), kwargs, False)
if result_callback is None:
return
if isinstance(res, list):
for x in res:
result_callback(x)
def call_extra(
self, methods: Sequence[Callable[..., object]], kwargs: Mapping[str, object]
) -> Any:
"""Call the hook with some additional temporarily participating
methods using the specified ``kwargs`` as call parameters, see
:ref:`call_extra`."""
assert (
not self.is_historic()
), "Cannot directly call a historic hook - use call_historic instead."
self._verify_all_args_are_provided(kwargs)
opts: HookimplOpts = {
"wrapper": False,
"hookwrapper": False,
"optionalhook": False,
"trylast": False,
"tryfirst": False,
"specname": None,
}
hookimpls = self._hookimpls.copy()
for method in methods:
hookimpl = HookImpl(None, "<temp>", method, opts)
# Find last non-tryfirst nonwrapper method.
i = len(hookimpls) - 1
while i >= 0 and (
# Skip wrappers.
(hookimpls[i].hookwrapper or hookimpls[i].wrapper)
# Skip tryfirst nonwrappers.
or hookimpls[i].tryfirst
):
i -= 1
hookimpls.insert(i + 1, hookimpl)
firstresult = self.spec.opts.get("firstresult", False) if self.spec else False
return self._hookexec(self.name, hookimpls, kwargs, firstresult)
def _maybe_apply_history(self, method: HookImpl) -> None:
"""Apply call history to a new hookimpl if it is marked as historic."""
if self.is_historic():
assert self._call_history is not None
for kwargs, result_callback in self._call_history:
res = self._hookexec(self.name, [method], kwargs, False)
if res and result_callback is not None:
# XXX: remember firstresult isn't compat with historic
assert isinstance(res, list)
result_callback(res[0])
# Historical name (pluggy<=1.2), kept for backward compatibility.
_HookCaller = HookCaller
class _SubsetHookCaller(HookCaller):
"""A proxy to another HookCaller which manages calls to all registered
plugins except the ones from remove_plugins."""
# This class is unusual: in inhertits from `HookCaller` so all of
# the *code* runs in the class, but it delegates all underlying *data*
# to the original HookCaller.
# `subset_hook_caller` used to be implemented by creating a full-fledged
# HookCaller, copying all hookimpls from the original. This had problems
# with memory leaks (#346) and historic calls (#347), which make a proxy
# approach better.
# An alternative implementation is to use a `_getattr__`/`__getattribute__`
# proxy, however that adds more overhead and is more tricky to implement.
__slots__ = (
"_orig",
"_remove_plugins",
)
def __init__(self, orig: HookCaller, remove_plugins: AbstractSet[_Plugin]) -> None:
self._orig = orig
self._remove_plugins = remove_plugins
self.name = orig.name # type: ignore[misc]
self._hookexec = orig._hookexec # type: ignore[misc]
@property # type: ignore[misc]
def _hookimpls(self) -> list[HookImpl]:
return [
impl
for impl in self._orig._hookimpls
if impl.plugin not in self._remove_plugins
]
@property
def spec(self) -> HookSpec | None: # type: ignore[override]
return self._orig.spec
@property
def _call_history(self) -> _CallHistory | None: # type: ignore[override]
return self._orig._call_history
def __repr__(self) -> str:
return f"<_SubsetHookCaller {self.name!r}>"
@final
class HookImpl:
"""A hook implementation in a :class:`HookCaller`."""
__slots__ = (
"function",
"argnames",
"kwargnames",
"plugin",
"opts",
"plugin_name",
"wrapper",
"hookwrapper",
"optionalhook",
"tryfirst",
"trylast",
)
def __init__(
self,
plugin: _Plugin,
plugin_name: str,
function: _HookImplFunction[object],
hook_impl_opts: HookimplOpts,
) -> None:
""":meta private:"""
#: The hook implementation function.
self.function: Final = function
argnames, kwargnames = varnames(self.function)
#: The positional parameter names of ``function```.
self.argnames: Final = argnames
#: The keyword parameter names of ``function```.
self.kwargnames: Final = kwargnames
#: The plugin which defined this hook implementation.
self.plugin: Final = plugin
#: The :class:`HookimplOpts` used to configure this hook implementation.
self.opts: Final = hook_impl_opts
#: The name of the plugin which defined this hook implementation.
self.plugin_name: Final = plugin_name
#: Whether the hook implementation is a :ref:`wrapper <hookwrapper>`.
self.wrapper: Final = hook_impl_opts["wrapper"]
#: Whether the hook implementation is an :ref:`old-style wrapper
#: <old_style_hookwrappers>`.
self.hookwrapper: Final = hook_impl_opts["hookwrapper"]
#: Whether validation against a hook specification is :ref:`optional
#: <optionalhook>`.
self.optionalhook: Final = hook_impl_opts["optionalhook"]
#: Whether to try to order this hook implementation :ref:`first
#: <callorder>`.
self.tryfirst: Final = hook_impl_opts["tryfirst"]
#: Whether to try to order this hook implementation :ref:`last
#: <callorder>`.
self.trylast: Final = hook_impl_opts["trylast"]
def __repr__(self) -> str:
return f"<HookImpl plugin_name={self.plugin_name!r}, plugin={self.plugin!r}>"
@final
class HookSpec:
__slots__ = (
"namespace",
"function",
"name",
"argnames",
"kwargnames",
"opts",
"warn_on_impl",
"warn_on_impl_args",
)
def __init__(self, namespace: _Namespace, name: str, opts: HookspecOpts) -> None:
self.namespace = namespace
self.function: Callable[..., object] = getattr(namespace, name)
self.name = name
self.argnames, self.kwargnames = varnames(self.function)
self.opts = opts
self.warn_on_impl = opts.get("warn_on_impl")
self.warn_on_impl_args = opts.get("warn_on_impl_args")
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@pluggy@py3@pluggy@_hooks.py@.PATH_END.py
|
{
"filename": "leastsq_rv_fit.py",
"repo_name": "mikecokina/elisa",
"repo_path": "elisa_extracted/elisa-master/scripts/analytics/leastsq_rv_fit.py",
"type": "Python"
}
|
import json
import os.path as op
import numpy as np
from elisa import units
from elisa.analytics import RVData, RVBinaryAnalyticsTask
from elisa.analytics.params.parameters import BinaryInitialParameters
np.random.seed(1)
DATA = op.join(op.abspath(op.dirname(__file__)), "data")
def get_rv():
fpath = op.join(DATA, "rv.json")
with open(fpath, "r") as f:
return json.loads(f.read())
def main():
phases = np.arange(-0.6, 0.62, 0.02)
rv = get_rv()
u = np.random.normal
n = len(phases)
sigma = 2000
_max = np.max(list(rv.values()))
rv = {comp: u(val, sigma, n) for comp, val in rv.items()}
rv_err = {comp: sigma * np.ones(val.shape) for comp, val in rv.items()}
data = {comp: RVData(**{
"x_data": phases,
"y_data": rv[comp],
"y_err": rv_err[comp],
"x_unit": units.dimensionless_unscaled,
"y_unit": units.m / units.s
}) for comp in rv}
rv_initial = {
"system": {
"eccentricity": {
"value": 0.2,
"fixed": False,
"min": 0.0,
"max": 0.5
},
"asini": {
"value": 15.0,
"fixed": False,
"min": 10.0,
"max": 20.0
},
"mass_ratio": {
"value": 3,
"fixed": False,
"min": 0.1,
"max": 10
},
"argument_of_periastron": {
"value": 0.0,
"fixed": True
},
"gamma": {
"value": 30000.0,
"fixed": False,
"min": 10000.0,
"max": 50000.0
},
"period": {
"value": 4.5,
"fixed": True
}
}
}
rv_initial = BinaryInitialParameters(**rv_initial)
task = RVBinaryAnalyticsTask(data=data, method='least_squares')
task.fit(x0=rv_initial)
task.plot.model()
if __name__ == '__main__':
main()
|
mikecokinaREPO_NAMEelisaPATH_START.@elisa_extracted@elisa-master@scripts@analytics@leastsq_rv_fit.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/tests/integration_tests/document_compressors/__init__.py",
"type": "Python"
}
|
"""Test document compressor integrations."""
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@tests@integration_tests@document_compressors@__init__.py@.PATH_END.py
|
{
"filename": "_dtick.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/carpet/aaxis/_dtick.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class DtickValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="dtick", parent_name="carpet.aaxis", **kwargs):
super(DtickValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@carpet@aaxis@_dtick.py@.PATH_END.py
|
{
"filename": "io.py",
"repo_name": "mj-will/nessai",
"repo_path": "nessai_extracted/nessai-main/src/nessai/utils/io.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
"""
Utilities related to loading files, saving files etc.
"""
import json
import os
import shutil
import numpy as np
from ..livepoint import live_points_to_dict
def is_jsonable(x):
"""Check if an object is JSON serialisable.
Based on: https://stackoverflow.com/a/53112659
Parameters
----------
x : obj
Object to check
Returns
-------
bool
Boolean that indicates if the object is JSON serialisable.
"""
try:
json.dumps(x)
return True
except (TypeError, OverflowError):
return False
class NessaiJSONEncoder(json.JSONEncoder):
"""Class to encode numpy arrays and other non-serialisable objects.
Based on: https://stackoverflow.com/a/57915246.
Examples
--------
This class should be used in the ``cls`` argument::
with open(filename, 'w') as wf:
json.dump(d, wf, indent=4, cls=NessaiJSONEncoder)
"""
def default(self, obj):
"""Method that returns a serialisable object"""
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
elif not is_jsonable(obj):
return str(obj)
else:
return super().default(obj)
def save_to_json(d, filename, **kwargs):
"""Save a dictionary to a JSON file.
Kwargs are passed to :code:`json.dump`. Uses :code:`NessaiJSONEncoder` by
default.
Parameters
----------
d : dict
Dictionary to save.
filename : str
Filename (with the extension) to save the dictionary to. Should include
the complete path.
kwargs : Any
Keyword arguments passed to :code:`json.dump`.
"""
default_kwargs = dict(
indent=4,
cls=NessaiJSONEncoder,
)
default_kwargs.update(kwargs)
with open(filename, "w") as fp:
json.dump(d, fp, **default_kwargs)
def safe_file_dump(data, filename, module, save_existing=False):
"""Safely dump data to a .pickle file.
See Bilby for the original implementation.
Parameters
----------
data :
Data to dump.
filename : str
The file to dump to.
module : {pickle, dill}
The python module to use.
save_existing : bool, optional
If true move the existing file to <file>.old.
"""
if save_existing:
if os.path.exists(filename):
old_filename = filename + ".old"
shutil.move(filename, old_filename)
temp_filename = filename + ".temp"
with open(temp_filename, "wb") as file:
module.dump(data, file)
shutil.move(temp_filename, filename)
def save_live_points(live_points, filename):
"""Save live points to a file using JSON.
Live points are converted to a dictionary and then saved.
Parameters
----------
live_points : ndarray
Live points to save.
filename : str
File to save to.
"""
d = live_points_to_dict(live_points)
with open(filename, "w") as wf:
json.dump(d, wf, indent=4, cls=NessaiJSONEncoder)
def encode_for_hdf5(value):
"""Encode a value for HDF5 file format.
Parameters
----------
value : Any
Value to encode.
Returns
-------
Any
Encoded value.
"""
if value is None:
output = "__none__"
else:
output = value
return output
def add_dict_to_hdf5_file(hdf5_file, path, d):
"""Save a dictionary to a HDF5 file.
Based on :code:`recursively_save_dict_contents_to_group` in bilby.
Parameters
----------
hdf5_file : h5py.File
HDF5 file.
path : str
Path added to the keys of the dictionary.
d : dict
The dictionary to save.
"""
for key, value in d.items():
if isinstance(value, dict):
add_dict_to_hdf5_file(hdf5_file, path + key + "/", value)
else:
hdf5_file[path + key] = encode_for_hdf5(value)
def save_dict_to_hdf5(d, filename):
"""Save a dictionary to a HDF5 file.
Parameters
----------
d : dict
Dictionary to save.
filename : str
Filename (with the extension) to save the dictionary to. Should include
the complete path.
"""
import h5py
with h5py.File(filename, "w") as f:
add_dict_to_hdf5_file(f, "/", d)
|
mj-willREPO_NAMEnessaiPATH_START.@nessai_extracted@nessai-main@src@nessai@utils@io.py@.PATH_END.py
|
{
"filename": "_data.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/frame/_data.py",
"type": "Python"
}
|
import plotly.validators
class DataValidator(plotly.validators.DataValidator):
def __init__(self, plotly_name="data", parent_name="frame", **kwargs):
super(DataValidator, self).__init__(
plotly_name=plotly_name, parent_name=parent_name, **kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@frame@_data.py@.PATH_END.py
|
{
"filename": "main.py",
"repo_name": "Astro-Sean/autophot",
"repo_path": "autophot_extracted/autophot-master/main.py",
"type": "Python"
}
|
Astro-SeanREPO_NAMEautophotPATH_START.@autophot_extracted@autophot-master@main.py@.PATH_END.py
|
|
{
"filename": "_nticks.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/isosurface/colorbar/_nticks.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class NticksValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self, plotly_name="nticks", parent_name="isosurface.colorbar", **kwargs
):
super(NticksValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@isosurface@colorbar@_nticks.py@.PATH_END.py
|
{
"filename": "12b. RFC Comparison HIDDEN REBALANCED (binary).ipynb",
"repo_name": "sidchaini/LightCurveDistanceClassification",
"repo_path": "LightCurveDistanceClassification_extracted/LightCurveDistanceClassification-main/notebooks/12. RFC Comparison HIDDEN REBALANCED/12b. RFC Comparison HIDDEN REBALANCED (binary).ipynb",
"type": "Jupyter Notebook"
}
|
```python
import numpy as np
import pandas as pd
from tqdm.auto import tqdm
import matplotlib.pyplot as plt
import seaborn as sns
from mlxtend.feature_selection import (
SequentialFeatureSelector,
)
from mlxtend.evaluate import feature_importance_permutation
from mlxtend.plotting import plot_sequential_feature_selection as plot_sfs
from sklearn.utils.estimator_checks import check_estimator
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_predict, train_test_split
from sklearn.metrics import accuracy_score, f1_score, matthews_corrcoef
import matplotlib.ticker as ticker
import os
os.chdir("../../")
from pathlib import Path
import json
import sys
sys.path.append("scripts")
import utils
import distclassipy as dcpy
cd = dcpy.Distance()
```
```python
with open("settings.txt") as f:
settings_dict = json.load(f)
np.random.seed(settings_dict["seed_choice"])
classification_letter = "b"
classification_problem = settings_dict["classification_problem"][classification_letter]
classes_to_keep = settings_dict["classes_to_keep"][classification_letter]
results_subfolder = f"{classification_letter}. {classification_problem}"
sns_dict = settings_dict["sns_dict"]
sns.set_theme(**sns_dict)
```
```python
# Load Data
X_df_FULL = pd.read_csv("data/X_df.csv", index_col=0)
y_df_FULL = pd.read_csv("data/y_df.csv", index_col=0)
```
```python
# Remove features to be dropped from previous notebook
with open(os.path.join("results", results_subfolder, "drop_features.txt")) as f:
bad_features = json.load(f) # manually selected
X_df_FULL = X_df_FULL.drop(bad_features, axis=1)
print(X_df_FULL.shape[1])
```
36
```python
# Keep only current classes
cl_keep_str = "_".join(classes_to_keep)
y_df = y_df_FULL[y_df_FULL["class"].isin(classes_to_keep)]
X_df = X_df_FULL.loc[y_df.index]
X = X_df.to_numpy()
y = y_df.to_numpy().ravel()
```
```python
locpath = os.path.join("results", results_subfolder, "random forest")
Path(locpath).mkdir(parents=True, exist_ok=True)
```
```python
# Make sure we're not over fitting
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.33, stratify=y, random_state=settings_dict["seed_choice"]
)
rfc = RandomForestClassifier(
random_state=settings_dict["seed_choice"], max_depth=3, n_jobs=-1
)
# Important to make sure it's not overfitting.
rfc.fit(X_train, y_train)
print(rfc.score(X_train, y_train))
print(rfc.score(X_test, y_test))
```
0.7710843373493976
0.7073170731707317
```python
y_pred = cross_val_predict(rfc, X, y, cv=5, n_jobs=-1)
```
```python
acc_train = accuracy_score(y, y_pred)
f1score_train = f1_score(y, y_pred, average="macro")
matthew_coef_train = matthews_corrcoef(y, y_pred)
print(
f"F1 = {f1score_train*100:.2f} %\nAccuracy = {acc_train*100:.2f} %\nMatthew's Coefficient = {matthew_coef_train*100:.2f} %"
)
ax = utils.plot_cm(y_true=y, y_pred=y_pred)
plt.title("Random Forest")
# plt.savefig(os.path.join(locpath, "confusion_matrix.pdf"), bbox_inches="tight")
plt.show()
```
F1 = 69.78 %
Accuracy = 69.98 %
Matthew's Coefficient = 40.51 %

```python
table1 = {"RSCVN": 81393, "BYDra": 84697}
total = sum(table1.values())
table1_ratio = {key: value / total for key, value in table1.items()}
max_ratio = max(table1_ratio.values())
sample_sizes = {
key: int(500 * value / max_ratio) for key, value in table1_ratio.items()
}
sample_sizes
```
{'RSCVN': 480, 'BYDra': 500}
```python
HIDDENX_df = pd.read_csv("data/HIDDENX_df_multiclass.csv", index_col=0)
HIDDENX_df = HIDDENX_df.loc[:, X_df.columns]
HIDDENX_df = HIDDENX_df.dropna()
```
```python
HIDDENX_df = pd.read_csv("data/HIDDENX_df_binary.csv", index_col=0)
HIDDENX_df = HIDDENX_df.loc[:, X_df.columns]
HIDDENX_df = HIDDENX_df.dropna()
HIDDENy_df = pd.read_csv("data/HIDDENy_df_binary.csv", index_col=0)
HIDDENy_df = HIDDENy_df.loc[HIDDENX_df.index]
# RESAMPLE TO BE OF RATIO OF REAL WORLD
HIDDENy_df_resampled = []
for cl, subdf in HIDDENy_df.groupby("class"):
HIDDENy_df_resampled.append(
subdf.sample(n=sample_sizes[cl], random_state=settings_dict["seed_choice"])
)
HIDDENy_df = pd.concat(HIDDENy_df_resampled).sample(
frac=1, random_state=settings_dict["seed_choice"]
)
HIDDENX_df = HIDDENX_df.loc[HIDDENy_df.index]
HIDDENy_df = HIDDENy_df.loc[HIDDENX_df.index]
```
```python
HIDDENX = HIDDENX_df.to_numpy()
HIDDENy = HIDDENy_df.to_numpy().ravel()
```
```python
rfc_pred = rfc.predict(HIDDENX)
```
```python
print("Random Forest")
acc = accuracy_score(y_true=HIDDENy, y_pred=rfc_pred)
f1score = f1_score(y_true=HIDDENy, y_pred=rfc_pred, average="macro")
matthew_coef = matthews_corrcoef(y_true=HIDDENy, y_pred=rfc_pred)
print("\tExpected Score from training:")
print(f"\t\tF1 ≈ {100*f1score_train:.2f} %")
print("\tActual score on hidden set:")
# print(f"\tAcc = {100*acc:.2f} %")
print(f"\t\tF1 = {100*f1score:.2f} %")
ax = utils.plot_cm(y_true=HIDDENy, y_pred=rfc_pred)
plt.title(f"Random forest (hidden set)")
plt.savefig(os.path.join(locpath, "hidden_cm.pdf"), bbox_inches="tight")
# plt.savefig(f"hidden_cm/{metric_str}.pdf",bbox_inches = 'tight')
plt.show()
```
Random Forest
Expected Score from training:
F1 ≈ 69.78 %
Actual score on hidden set:
F1 = 69.84 %

```python
HIDDENy_df.value_counts()
```
class
BYDra 500
RSCVN 480
Name: count, dtype: int64
```python
```
|
sidchainiREPO_NAMELightCurveDistanceClassificationPATH_START.@LightCurveDistanceClassification_extracted@LightCurveDistanceClassification-main@notebooks@12. RFC Comparison HIDDEN REBALANCED@12b. RFC Comparison HIDDEN REBALANCED (binary).ipynb@.PATH_END.py
|
{
"filename": "main.py",
"repo_name": "cdslaborg/paramonte",
"repo_path": "paramonte_extracted/paramonte-main/example/fortran/pm_distNorm/getNormRand/main.py",
"type": "Python"
}
|
#!/usr/bin/env python
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import glob
import sys
linewidth = 2
fontsize = 17
marker ={ "CK" : "-"
, "IK" : "."
, "RK" : "-"
}
xlab = { "CK" : "Normal Random Number ( real/imaginary components )"
, "IK" : "Normal Random Number ( integer-valued )"
, "RK" : "Normal Random Number ( real-valued )"
}
legends = [ r"$\mu = -5.,~\sigma = 1.0$"
, r"$\mu = 0.0,~\sigma = 1.0$"
, r"$\mu = 2.0,~\sigma = 3.0$"
]
for kind in ["IK", "CK", "RK"]:
pattern = "*." + kind + ".txt"
fileList = glob.glob(pattern)
if len(fileList) == 1:
df = pd.read_csv(fileList[0], delimiter = " ", header = None)
fig = plt.figure(figsize = 1.25 * np.array([6.4, 4.8]), dpi = 200)
ax = plt.subplot()
if kind == "CK":
plt.hist( df.values[:,0:3]
, histtype = "stepfilled"
, alpha = 0.5
, bins = 75
)
else:
plt.hist( df.values[:,0:3]
, histtype = "stepfilled"
, alpha = 0.5
, bins = 75
)
ax.legend ( legends
, fontsize = fontsize
)
plt.xticks(fontsize = fontsize - 2)
plt.yticks(fontsize = fontsize - 2)
ax.set_xlabel(xlab[kind], fontsize = 17)
ax.set_ylabel("Count", fontsize = 17)
ax.set_title("Histograms of {} Normal random numbers".format(len(df.values[:, 0])), fontsize = 17)
plt.grid(visible = True, which = "both", axis = "both", color = "0.85", linestyle = "-")
ax.tick_params(axis = "y", which = "minor")
ax.tick_params(axis = "x", which = "minor")
plt.savefig(fileList[0].replace(".txt",".png"))
elif len(fileList) > 1:
sys.exit("Ambiguous file list exists.")
|
cdslaborgREPO_NAMEparamontePATH_START.@paramonte_extracted@paramonte-main@example@fortran@pm_distNorm@getNormRand@main.py@.PATH_END.py
|
{
"filename": "setup.py",
"repo_name": "pec27/hfof",
"repo_path": "hfof_extracted/hfof-master/setup.py",
"type": "Python"
}
|
from setuptools import setup, Extension
def find_version(path):
import re
# path shall be a plain ascii text file.
s = open(path, 'rt').read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
s, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Version not found")
lib = Extension('build.libhfof',
sources = ["src/fof.c", "src/fof64.c", "src/testing.c", "src/periodic.c", "src/fof64_2d.c"])
setup(name='hfof', version=find_version("hfof/version.py"),
author="Peter Creasey",
author_email="pec27",
description='Friends of Friends with spatial hashing (hfof)',
url="http://github.com/pec27/hfof",
package_dir = {'hfof': 'hfof'},
packages = ['hfof', 'hfof.tests'],
include_package_data = True,
license='MIT',
install_requires=['numpy', 'scipy'],
ext_modules = [lib],
setup_requires=['pytest-runner'])
|
pec27REPO_NAMEhfofPATH_START.@hfof_extracted@hfof-master@setup.py@.PATH_END.py
|
{
"filename": "simtools.py",
"repo_name": "agreenbaum/gsgs",
"repo_path": "gsgs_extracted/gsgs-master/simtools.py",
"type": "Python"
}
|
#! /usr/bin/env python
import numpy as np
import sys,os
def mas2rad(mas):
rad = mas*(10**(-3)) / (3600*180/np.pi)
return rad
def rad2mas(rad):
mas = rad * (3600*180/np.pi) * 10**3
return mas
def makedisk(N, R, ctr=(0,0)):
if N%2 == 1:
M = (N-1)/2
xx = np.linspace(-M-ctr[0],M-ctr[0],N)
yy = np.linspace(-M-ctr[1],M-ctr[1],N)
if N%2 == 0:
M = N/2
xx = np.linspace(-M-ctr[0],M-ctr[0]-1,N)
yy = np.linspace(-M-ctr[1],M-ctr[1]-1,N)
(x,y) = np.meshgrid(xx, yy.T)
r = np.sqrt((x**2)+(y**2))
array = np.zeros((N,N))
array[r<R] = 1
return array
def makeA(nh, verbose=False):
"""
Writes the "NRM matrix" that gets pseudo-inverterd to provide
(arbitrarily constrained) zero-mean phases of the holes.
makeA taken verbatim from Anand's pseudoinverse.py
input: nh - number of holes in NR mask
input: verbose - True or False
output: A matrix, nh columns, nh(nh-1)/2 rows (eg 21 for nh=7)
Ax = b where x are the nh hole phases, b the nh(nh-1)/2 fringe phases,
and A the NRM matrix
Solve for the hole phases:
Apinv = np.linalg.pinv(A)
Solution for unknown x's:
x = np.dot(Apinv, b)
Following Noah Gamper's convention of fringe phases,
for holes 'a b c d e f g', rows of A are
(-1 +1 0 0 ...)
( 0 -1 +1 0 ...)
which is implemented in makeA() as:
matrixA[row,h2] = -1
matrixA[row,h1] = +1
To change the convention just reverse the signs of the 'ones'.
When tested against Alex'' NRM_Model.py "piston_phase" text output of fringe phases,
these signs appear to be correct - anand@stsci.edu 12 Nov 2014
anand@stsci.edu 29 Aug 2014
"""
print "\nmakeA(): "
# rows cols
ncols = (nh*(nh-1))//2
nrows = nh
matrixA = np.zeros((ncols, nrows))
if verbose: print matrixA
row = 0
for h2 in range(nh):
if verbose: print
for h1 in range(h2+1,nh):
if h1 >= nh:
break
else:
if verbose:
print "R%2d: "%row,
print "%d-%d"%(h1,h2)
matrixA[row,h2] = -1
matrixA[row,h1] = +1
row += 1
if verbose: print
return matrixA
def makeK(nh, verbose=False):
"""
As above, write the "kernel matrix" that converts fringe phases
to closure phases. This can be psuedo-inverted to provide a
subset of "calibrated" fringe phases (hole-based noise removed)
input: nh - number of holes in NR mask
input: verbose - True or False
output: L matrix, nh(nh-1)/2 columns, comb(nh, 3) rows (eg 35 for nh=7)
Kx = b, where:
- x are the nh(nh-1)/2 calibrated fringe phases
- b the comb(nh, 3) closure phases,
and K the kernel matrix
Solve for the "calibrated" phases:
Kpinv = np.linalg.pinv(K)
Solution for unknown x's:
x = np.dot(Kpinv, b)
Following the convention of fringe phase ordering above, which should look like:
h12, h13, h14, ..., h23, h24, ....
rows of K should look like:
(+1 -1 0 0 0 0 0 0 +1 ...) e.g., h12 - h13 + h23
(+1 +1 0 +1 ...)
which is implemented in makeK() as:
matrixK[n_cp, f12] = +1
matrixK[n_cp, f13] = -1
matrixK[n_cp, f23] = +1
need to define the row selectors
k is a list that looks like [9, 9+8, 9+8+7, 9+8+7+6, 9+8+7+6+5, ...]
-----up to nh*(nh-1)/2
i is a list that looks like [0,9, 9+8, 9+8+7, 9+8+7+6, 9+8+7+6+5, ...]
-----up to nh*(nh-1)/2 -1
because there are 9 fringe phases per single hole (decreasing by one to avoid repeating)
hope that helps explain this!
agreenba@pha.jhu.edu 22 Aug 2015
"""
print "\nmakeK(): "
nrow = comb(nh, 3)
ncol = nh*(nh-1)/2
# first define the row selectors
# k is a list that looks like [9, 9+8, 9+8+7, 9+8+7+6, 9+8+7+6+5, ...]
# -----up to nh*(nh-1)/2
# i is a list that looks like [0,9, 9+8, 9+8+7, 9+8+7+6, 9+8+7+6+5, ...]
# -----up to nh*(nh-1)/2 -1
countk=[]
val=0
for q in range(nh-1):
val = val + (nh-1)-q
countk.append(val)
counti = [0,]+countk[:-1]
# MatrixK
row=0
matrixK = np.zeros((nrow, ncol))
for ii in range(nh-2):
for jj in range(nh-ii-2):
for kk in range(nh-ii-jj-2):
matrixK[row+kk, counti[ii]+jj] = 1
matrixK[row+kk, countk[ii+jj]+kk] = 1
matrixK[row+kk, counti[ii]+jj+kk+1] = -1
row=row+kk+1
if verbose: print
return matrixK
def baselinify(ctrs):
N = len(ctrs)
uvs = np.zeros((N*(N-1)//2, 2))
label = np.zeros((N*(N-1)//2, 2))
bllengths = np.zeros(N*(N-1)//2)
nn=0
for ii in range(N-1):
for jj in range(N-ii-1):
uvs[jj+nn, 0] = ctrs[ii,0] - ctrs[ii+jj+1,0]
uvs[jj+nn, 1] = ctrs[ii,1] - ctrs[ii+jj+1,1]
bllengths[jj+nn] = np.sqrt((ctrs[ii,0]-ctrs[ii+jj+1,0])**2 +\
(ctrs[ii,1]-ctrs[ii+jj+1,1])**2)
label[jj+nn,:] = np.array([ii, ii+jj+1])
nn = nn+jj+1
return uvs, bllengths, label
#def simulate_zern(fov = 80, mode=5, livepupilrad=None, mask=None):
# """
# Default Astig_1 0.1 radians
# """
# # Define our coordinates for wavefront aberration of choice
# y,x = np.indices((fov,fov)) - fov/2
# if livepupilrad is not None:
# rho = np.sqrt( (x*x) + (y*y) ) / livepupilrad
# else:
# rho = np.sqrt( (x*x) + (y*y) ) / (fov/2)
# theta = np.arctan2(y,x)
# if hasattr(mode, "__iter__"):
# aberr = np.zeros(rho.shape)
# frac = 1/len(mode)
# for md in mode:
# aberr += zern.zernikel(md,rho,theta)
# else:
# aberr = zern.zernikel(mode, rho, theta)
# if mask is not None:
# mask = mask
# else:
# mask = np.ones((fov,fov))
# mask_aberr = mask * aberr
# # print out rms of this aberration over circular pupil? - AS
# print np.var(aberr[rho<1])
# return mask_aberr, aberr
#
#def avg_piston(aberr, positions, mask, R = 20, point=False):
# """
### aberr is the full square array aberration
#
# positions are approximaete coordinates of mask holes recorded by eye
#
# mask is the NRM or other pupil mask for over which to compute average phases
#
# R - radius to enclose mask holes around coordinate points (also by eye for now)
# purpose to choose holes one by one.
#
# point -- should this return average phase values in a single pixel (TRUE)
# or over the whole mask support (FALSE). Recommended to remain FALSE.
# """
# fov = aberr.shape[0]
# avgphase = np.zeros(aberr.shape) #radians
# holemask = avgphase.copy()
# x,y = np.indices(holemask.shape)
# debug = avgphase.copy()
# new_support = avgphase.copy()
# for ii,pos in enumerate(positions):
# holemask = makedisk(fov, R, ctr = (pos[0] - fov/2, pos[1] - fov/2))
# debug+=holemask
# holemask[abs(holemask*mask)>0] = 1
# holemask[abs(holemask*mask)==0] = 0
# if not point:
# avgphase[holemask==1] = aberr[holemask==1].sum() / len(aberr[holemask==1])
# else:
# #int(np.sum(holemask*x)/holemask.sum()),int(np.sum(holemask*y)/holemask.sum())
# avgphase[int(np.sum(holemask*x)/holemask.sum()),\
# int(np.sum(holemask*y)/holemask.sum())] = \
# aberr[holemask==1].sum() / len(aberr[holemask==1])
# #plt.figure()
# #plt.imshow(avgphase)
# #plt.figure()
# #plt.imshow(debug)
# #plt.show()
# new_support[abs(avgphase)>0] = 1
#"""
# return avgphase, new_support
|
agreenbaumREPO_NAMEgsgsPATH_START.@gsgs_extracted@gsgs-master@simtools.py@.PATH_END.py
|
{
"filename": "ELL_map_class.py",
"repo_name": "alphalyncis/doppler-imaging-maxentropy",
"repo_path": "doppler-imaging-maxentropy_extracted/doppler-imaging-maxentropy-main/src/ELL_map_class.py",
"type": "Python"
}
|
"""
# Original author: Ian Crossfield (Python 2.7)
Planetary mapping routines.
phi = 0 faces toward the observer
phi = pi thus faces away from the observer
theta=pi/2 is the z-axis or 'north pole'
theta=-pi/2 is the 'south pole' -- this is in fact not true, theta=(0, pi)
"""
######################################################
# 23-03-2023 Xueqing Chen: added equal area grids
# 18-02-2020 Emma Bubb: change to run on Python 3
######################################################
# 2010-01-15 20:31 IJC: Started . . .
# 2013-08-07 11:04 IJMC: Added mu field for cells and maps
from numpy import pi
import numpy as np
import matplotlib.pyplot as plt
def polyarea(x, y):
"""Compute the area of a polygon whose vertices are at the points (x,y).
:INPUTS:
x, y : 1D sequences
Cartesian coordinates of the (non-intersecting) polygon.
:REFERENCE:
http://mathworld.wolfram.com/PolygonArea.html
"""
# 2013-05-29 12:18 IJMC: Created
area = 0.
npts = max(len(x), len(y))
for ii in range(npts): # EB: xrange to range
area += x[ii]*y[(ii+1) % npts] - x[(ii+1) % npts]*y[ii]
return np.abs(area*0.5)
def make_latlon_grid(nphi, ntheta):
"""Make grids of phi and theta values with the specified number of
points in each direction. Phi ranges from 0 to 2pi, and theta
ranges from -pi/2 (in fact 0) to pi/2 (in fact pi).
Returns meshgird(phi, theta)
"""
# 2010-01-15 20:29 IJC: Created
# 2013-08-18 15:57 IJMC: Updated so phi values don't repeat at 0 & 2pi
# 2023-03-25 XQ: make equal area grids
phi, theta = np.meshgrid(np.linspace(0,2*pi,nphi+1), np.linspace(0, pi,ntheta+1))
return phi, theta
def make_eqarea_grid(ncell, verbose=False):
"""Make grids of phi and theta values with the specified number of
cells of roughly equal area. Phi ranges from 0 to 2pi, and theta
ranges from -pi/2 (in fact 0) to pi/2 (in fact pi).
Returns:
phi: List of 1d arrays of size N_cells_per_row
theta: 1d array of size nlat (number of rows)
"""
# 2023-03-25 XQ: make equal area grids
def find_number_of_rows(Ncell, m0=10):
diff_old = 1e10
m = m0
while True:
if Ncell < 5:
raise ValueError("Number of cells is too small. At least 5 cells are needed.")
ncells_per_row = np.array([int(2 * m * np.cos(n*np.pi/m)) for n in range(1, int(m/2))])
Ncell_new = 2 * np.sum(ncells_per_row)
diff = Ncell - Ncell_new
if np.abs(diff) > np.abs(diff_old): # right amount of cells, return the previous one
return 2*len(ncells_per_row_old), Ncell_old, ncells_per_row_old
if diff > 0: # need more cells
m += 2
else: # need less cells
m -= 2
Ncell_old = Ncell_new
ncells_per_row_old = ncells_per_row
diff_old = diff
nlat, ncell_true, ncells_per_row = find_number_of_rows(ncell)
ncells_per_row = np.concatenate([np.flip(ncells_per_row), ncells_per_row])
height = np.pi / nlat
theta = np.array([height/2 + m * height for m in range(0, nlat)])
widths = np.pi * 2 / ncells_per_row
phi = [None for m in range(nlat)]
for m in range(nlat):
phi[m] = np.array([widths[m]/2 + n * widths[m] for n in range(ncells_per_row[m])])
if verbose:
print(f"Created equa-area grid of {ncell_true} cells, in {len(theta)} latitude grids with {ncells_per_row} lontitude cells.")
return phi, theta, height, widths, ncell_true
def makespot(spotlat, spotlon, spotrad, phi, theta):
"""
:INPUTS:
spotlat : scalar
Latitude of spot center, in degrees, from 0 to 180 (actually from -90 to 90)
spotlon : scalar
Longitude of spot center, in degrees, from 0 to 360
spotrad : scalar
Radius of spot, in radians. degrees
phi, theta : 2D NumPy arrays
output from :func:`makegrid`. Theta ranges from -pi/2 to +pi/2.
:EXAMPLE:
::
import maps
nlat, nlon = 60, 30
phi, theta = maps.makegrid(nlat, nlon)
# Make a small spot centered near, but not at, the equator:
equator_spot = maps.makespot(0, 0, 23, phi, theta)
# Make a larger spot centered near, but not at, the pole:
pole_spot = maps.makespot(68, 0, 40, phi, theta)
::
import maps
nlat, nlon = 60, 30
map = maps.map(nlat, nlon, i=0., deltaphi=0.)
phi = map.corners_latlon.mean(2)[:,1].reshape(nlon, nlat)
theta = map.corners_latlon.mean(2)[:,0].reshape(nlon, nlat) - np.pi/2.
# Make a small spot centered near, but not at, the equator:
equator_spot = maps.makespot(0, 0, 23, phi, theta)
# Make a larger spot centered near, but not at, the pole:
pole_spot = maps.makespot(68, 0, 40, phi, theta)
"""
# 2013-08-18 16:01 IJMC: Created
spotlat *= (np.pi/180)
spotlon *= (np.pi/180)
spotrad *= (np.pi/180)
pi2 = 0.5*np.pi
xyz = np.array((np.cos(phi) * np.sin(theta + pi2), np.sin(phi) * np.sin(theta + pi2), np.cos(theta + pi2))).reshape(3, phi.size)
# First rotate around z axis, to align spot with sub-observer meridian
# Then, rotate around y axis, to align spot with pole.
zrot = np.array([[np.cos(np.pi-spotlon), -np.sin(np.pi-spotlon), 0], [np.sin(np.pi-spotlon), np.cos(np.pi-spotlon), 0.], [0,0,1]])
yrot = np.array([[np.cos(spotlat+pi2), 0, np.sin(spotlat+pi2)], [0,1,0], [-np.sin(spotlat+pi2), 0, np.cos(spotlat+pi2)]])
xyz = np.dot(np.dot(yrot, zrot), xyz)
# Convert Cartesian to spherical coordinates
ang = np.arccos(xyz[2])
# Spot is where (theta - theta_pole) < radius.
spotmap = ang.T <= spotrad
return spotmap.reshape(phi.shape)
def profile_spotmap(param, *args, **kw):
"""Model line profiles, assuming a simple one-spot model.
phi, theta, R = args[0:3]
startemp, spottemp, spotlat, spotlon, spotrad = param[0:5]
OR
startemp, temp1, lat1, lon1, rad1, temp2, lat2, lon2, rad2 = param[0:9]
"""
# 2013-08-19 09:59 IJMC: Created
# 2013-08-27 10:45 IJMC: Updated to multi-spot-capable
phi, theta, R = args[0:3]
nparam = len(param)
nspots = int((nparam-1)/4)
startemp = param[0]
map_pixels = np.ones(phi.shape) * param[0]
for ii in range(nspots):
spottemp, spotlat, spotlon, spotrad = param[1+ii*4:1+(ii+1)*4]
boolspot = makespot(spotlat, spotlon, spotrad, phi, theta).astype(np.float32)
map_pixels -= boolspot * (startemp - spottemp)
return np.dot(map_pixels.ravel(), R)
class MapCell:
def __init__(self):
self.corners = np.zeros((3, 4), dtype=float)
self.corners_latlon = np.zeros((2, 4), dtype=float)
self.vcorners = np.zeros((3, 4), dtype=float)
self.rvcorners = np.zeros(4, dtype=float)
self.visible_corners = np.zeros((3, 4), dtype=float)
self.visible_vcorners = np.zeros((3, 4), dtype=float)
self.visible_rvcorners = np.zeros(4, dtype=float)
self.projected_area = 0.
self.mu = 0.
return
def get_mu(self):
### Compute mu:
normal_vector = np.dot(np.linalg.pinv(self.corners.T), np.ones(4))
self.mu = normal_vector[0] / np.sqrt(np.dot(normal_vector, normal_vector))
return
def get_projected_area(self, inc):
if (self.corners[0] <= 0).all():
# cell is hidden, on the back side.
area = 0.
self.visible_corners = self.corners * np.nan
elif (self.corners[0] > 0).all():
# cell is completely visible, on the front side.
self.visible_corners = self.corners
y = self.corners[1]
z = self.corners[2]
inds = np.argsort(np.arctan2(z-z.mean(), y-y.mean()))
area = polyarea(y[inds], z[inds])
else:
# Cell is only partially visible (on the limb). Find the
# nearest point on on the limb, with the same latitude as
# each vertex.
visible_corners = self.corners.copy()
back_indices = (visible_corners[0] < 0).nonzero()[0]
for ii in back_indices:
newx = 0. # on the limb!
newy = np.sin(self.corners_latlon[0,ii]) * \
np.sqrt(1. - np.tan(inc)**2 / np.tan(self.corners_latlon[0,ii])**2)
if visible_corners[1,ii]/newy < 0:
newy *= -1
newz = np.cos(self.corners_latlon[0,ii]) / np.cos(inc)
visible_corners[:, ii] = newx, newy, newz
if not (np.isfinite(visible_corners)).all():
self.visible_corners = self.corners * np.nan
area = 0
#print("Non-finite projected corners; need to fix this.") # EB updated print statement
else:
self.visible_corners = visible_corners
y = self.visible_corners[1]
z = self.visible_corners[2]
#yz = np.array(zip(y,z)) #2017-01-10 13:04 IJMC: removed: np.unique(zip(y,z))
#inds = np.argsort(np.arctan2(yz[:,1]-yz[:,1].mean(), yz[:,0]-yz[:,0].mean()))
#area = polyarea(yz[inds,0], yz[inds,1])
inds = np.argsort(np.arctan2(z-z.mean(), y-y.mean()))
area = polyarea(y[inds], z[inds])
#area = 0.
self.projected_area = area
return
class Map:
"""Very handy spherical mapping object.
:INPUTS:
nlon, nlat : scalars
If mod=='latlon', these inputs specify the number of grid cells
across map, in latitude and longitude.
inc : scalar
the inclination, is in units of radians. Zero means we see the
object equator-on; pi/2 means we see it pole-on.
type : str
'latlon' or 'eqarea'.
deltaphi : scalar
Rotation of map, specified in radians.
:OUTPUT:
A map-class object with various useful fields. Most of these
fields refer to the coordinates (either Cartesian or spherical
polar) or the projected radial velocities at the corners of
specified grid cells, or the approximate projected areas of
these grid cells.
:NOTES:
I have *not* been as careful as I should be in this code -- my
original goal was speed rather than exactitude. This means that
some values are returned as 'nan', and the projected areas are
only roughly correct. There's plenty of room for improvement!
"""
# 2013-05-29 09:37 IJMC: Created
# 2013-08-07 11:05 IJMC: Added mu field for maps and cells
# 2014-08-07 15:00 IJMC: Updated documentation -- exactly 1 year later!
def __init__(self, nlon=20, nlat=10, type='latlon', deltaphi=0, inc=0, verbose=False):
self.type = type
self.nlon = nlon
self.nlat = nlat
self.ncell = nlon*nlat
if self.type == "eqarea":
phi, theta, height, widths, ncell_true = make_eqarea_grid(self.ncell, verbose=verbose)
self.ncell = ncell_true
self.deltaphi = deltaphi
self.inc = inc
self.cells = []
self.visible_corners = np.zeros((self.ncell, 3, 4), dtype=float)
self.corners = np.zeros((self.ncell, 3, 4), dtype=float) # corners in xyz
self.corners_latlon = np.zeros((self.ncell, 2, 4), dtype=float) # must be the latlon before rot to get correct area
self.rvcorners = np.zeros((self.ncell, 4), dtype=float) # corners in y direction
# (0-1) proportional to the projected radial velocity at y coord of that corner
# rvcorners / np.cos(inc) * vsini [km/s] = rv
self.visible_rvcorners = np.zeros((self.ncell, 4), dtype=float) # replace the non-visible by nan
self.projected_area = np.zeros(self.ncell, dtype=float)
self.mu = np.zeros(self.ncell, dtype=float)
self.phi = np.zeros(self.ncell)
self.theta = np.zeros(self.ncell)
rot_matrix = np.array([
[np.cos(inc), 0, -np.sin(inc)],
[ 0, 1, 0],
[np.sin(inc), 0, np.cos(inc)]
])
if self.type == 'latlon':
### Initialize coordinate system:
#phi0 = np.arange(0, self.nlon+1) * (2*np.pi/self.nlon)
#theta0 = np.arange(0, self.nlat+1) * (np.pi/self.nlat)
#phi, theta = np.meshgrid(phi0, theta0)
#print(phi.shape, theta.shape)
phi, theta = make_latlon_grid(nlon, nlat)
### Rotate by deltaPhi:
phi1 = (phi + deltaphi).ravel()
theta1 = theta.ravel()
### Convert to x1, y1, z1:
xyz1 = np.vstack((np.sin(theta1) * np.cos(phi1), \
np.sin(theta1) * np.sin(phi1), \
np.cos(theta1)))
### Rotate by inclination angle i:
xyz2 = np.dot(rot_matrix, xyz1)
xyz3 = xyz2.reshape(3, nlat+1, nlon+1)
kk = 0
for ii in range(self.nlat):
for jj in range(self.nlon):
cell = MapCell()
cell.corners = xyz3[:, ii:ii+2, jj:jj+2].reshape(3,4)
cell.corners_latlon = np.vstack((theta[ii:ii+2,jj:jj+2].ravel(), phi[ii:ii+2,jj:jj+2].ravel()))
cell.rvcorners = xyz3[1,ii:ii+2,jj:jj+2].ravel() * np.cos(inc)
cell.get_projected_area(inc)
cell.get_mu()
cell.visible_rvcorners = cell.visible_corners[1] * np.cos(inc)
self.cells.append(cell)
self.corners[kk] = cell.corners
self.visible_corners[kk] = cell.visible_corners
self.projected_area[kk] = cell.projected_area
self.mu[kk] = cell.mu
self.corners_latlon[kk] = cell.corners_latlon
self.rvcorners[kk] = cell.rvcorners
self.visible_rvcorners[kk] = cell.visible_rvcorners
kk += 1
elif self.type == 'eqarea':
self.nlat = len(theta)
self.nlon = np.array([len(row) for row in phi])
phi_corners = [np.zeros((4, self.nlon[m])) for m in range(self.nlat)]
theta_corners = np.zeros((4, self.nlat))
for m in range(self.nlat):
for n in range(self.nlon[m]):
phi_corners[m][:,n] = np.array([
phi[m][n]-widths[m]/2, phi[m][n]+widths[m]/2, # corner 0, 1
phi[m][n]-widths[m]/2, phi[m][n]+widths[m]/2 # corner 2, 3
])
theta_corners[:,m] = np.array([
theta[m]-height/2, theta[m]-height/2, # corner 0, 1
theta[m]+height/2, theta[m]+height/2 # corner 2, 3
])
### Rotate by deltaPhi:
phi_corners_2d = np.concatenate([phi_corners[m] for m in range(self.nlat)], axis=1)
phi_corners_2d_rot = np.concatenate([phi_corners[m] + deltaphi for m in range(self.nlat)], axis=1)
theta_corners_2d = np.concatenate([np.tile(theta_corners[:,m], (self.nlon[m], 1)).T for m in range(self.nlat)], axis=1)
phi_corners_1d = phi_corners_2d_rot.ravel()
theta_corners_1d = theta_corners_2d.ravel()
### Convert to x1, y1, z1:
xyz_1d = np.stack((
np.sin(theta_corners_1d) * np.cos(phi_corners_1d),
np.sin(theta_corners_1d) * np.sin(phi_corners_1d),
np.cos(theta_corners_1d)
))
### Rotate by inclination angle i:
xyz_1d_rot = np.dot(rot_matrix, xyz_1d)
xyz_2d = xyz_1d_rot.reshape(3, 4, ncell_true)
start = 0
xyz_3d = []
theta_corners_3d = []
phi_corners_3d = []
for m in range(self.nlat):
xyz_3d.append(xyz_2d[:, :, start:start+self.nlon[m]])
theta_corners_3d.append(theta_corners_2d[:, start:start+self.nlon[m]])
phi_corners_3d.append(phi_corners_2d[:, start:start+self.nlon[m]])
start = start + self.nlon[m]
kk=0
for m in range(self.nlat):
for n in range(self.nlon[m]):
cell = MapCell()
cell.corners = xyz_3d[m][:,:,n]
cell.corners_latlon = np.vstack([theta_corners_3d[m][:,n], phi_corners_3d[m][:,n]])
cell.rvcorners = xyz_3d[m][1,:,n] * np.cos(inc)
cell.get_projected_area(inc)
cell.get_mu()
cell.visible_rvcorners = cell.visible_corners[1] * np.cos(inc)
self.cells.append(cell)
self.corners[kk] = cell.corners
self.visible_corners[kk] = cell.visible_corners
self.projected_area[kk] = cell.projected_area
self.mu[kk] = cell.mu
self.corners_latlon[kk] = cell.corners_latlon
self.rvcorners[kk] = cell.rvcorners
self.visible_rvcorners[kk] = cell.visible_rvcorners
kk += 1
return None
def get_vprofile(self, v):
"""Compute velocity profile for normalized velocity values v.
:INPUTS:
v : NumPy array
Velocity normalized by the maximum rotation velocity
observed; i.e., to convert v to true velocities, multiply
by 2piR/P.
"""
# 2013-05-29 12:28 IJMC: Created
profile = np.zeros(v.shape, dtype=float)
for ii in range(self.ncell): # EB: xrange to range
vmin, vmax = self.visible_rvcorners[ii].min(), self.visible_rvcorners[ii].max()
profile[(v > vmin) * (v <= vmax)] += 1 #self.projected_area[ii]
return profile
def plot_map_cells(self):
'''Plot the map cells on a Mollweide projection.'''
fig = plt.figure(figsize=(6,5))
ax = fig.add_subplot(111, projection="mollweide")
ax.grid(True)
good = (self.projected_area>0)
for k in range(self.ncell):
lats = self.corners_latlon[k][0]
lons = self.corners_latlon[k][1]
y = np.array([lats[0], lats[1], lats[3], lats[2]]) - np.pi/2
x = np.array([lons[0], lons[1], lons[3], lons[2]]) - np.pi
# Plot the polygon
if good[k]:
poly = plt.Polygon(np.column_stack((x, y)), facecolor='gray', edgecolor='black')
ax.add_patch(poly)
ax.text(x.mean(), y.mean(), f"{k}", size=5)
# Set plot parameters
ax.set_xticklabels([30, 60, 90, 120, 150, 180, 210, 240, 270, 300, 330], fontsize=8)
|
alphalyncisREPO_NAMEdoppler-imaging-maxentropyPATH_START.@doppler-imaging-maxentropy_extracted@doppler-imaging-maxentropy-main@src@ELL_map_class.py@.PATH_END.py
|
{
"filename": "plot_mars-coordinate-frame.py",
"repo_name": "astropy/astropy",
"repo_path": "astropy_extracted/astropy-main/examples/coordinates/plot_mars-coordinate-frame.py",
"type": "Python"
}
|
r"""
============================================
Create a new coordinate frame class for Mars
============================================
This example describes how to subclass and define a custom coordinate frame for a
planetary body which can be described by a geodetic or bodycentric representation,
as discussed in :ref:`astropy:astropy-coordinates-design` and
:ref:`astropy-coordinates-create-geodetic`.
Note that we use the frame here only to store coordinates. To use it to determine, e.g.,
where to point a telescope on Earth to observe Olympus Mons, one would need to add the
frame to the transfer graph, which is beyond the scope of this example.
To do this, first we need to define a subclass of a
`~astropy.coordinates.BaseGeodeticRepresentation` and
`~astropy.coordinates.BaseBodycentricRepresentation`, then a subclass of
`~astropy.coordinates.BaseCoordinateFrame` using the previous defined
representations.
*By: Chiara Marmo, Marten van Kerkwijk*
*License: BSD*
"""
##############################################################################
# Set up numpy, matplotlib, and use a nicer set of plot parameters:
import matplotlib.pyplot as plt
import numpy as np
from astropy.visualization import astropy_mpl_style, quantity_support
plt.style.use(astropy_mpl_style)
quantity_support()
##############################################################################
# Import the packages necessary for coordinates
import astropy.units as u
from astropy.coordinates.baseframe import BaseCoordinateFrame
from astropy.coordinates.representation import CartesianRepresentation
from astropy.coordinates.representation.geodetic import (
BaseBodycentricRepresentation,
BaseGeodeticRepresentation,
)
##############################################################################
# The first step is to create a new class, and make it a subclass of
# `~astropy.coordinates.BaseGeodeticRepresentation`.
# Geodetic latitudes are used and longitudes span from 0 to 360 degrees east positive
# It represent a best fit of the Mars spheroid to the martian geoid (areoid):
class MarsBestFitAeroid(BaseGeodeticRepresentation):
"""
A Spheroidal representation of Mars that minimized deviations with respect to the
areoid following
Ardalan A. A, R. Karimi, and E. W. Grafarend (2010)
https://doi.org/10.1007/s11038-009-9342-7
"""
_equatorial_radius = 3395.4280 * u.km
_flattening = 0.5227617843759314 * u.percent
#####################################################################################
# Now let's define a new geodetic representation obtained from MarsBestFitAeroid but
# described by planetocentric latitudes.
class MarsBestFitOcentricAeroid(BaseBodycentricRepresentation):
"""
A Spheroidal planetocentric representation of Mars that minimized deviations with
respect to the areoid following
Ardalan A. A, R. Karimi, and E. W. Grafarend (2010)
https://doi.org/10.1007/s11038-009-9342-7
"""
_equatorial_radius = 3395.4280 * u.km
_flattening = 0.5227617843759314 * u.percent
#############################################################################
# As a comparison we define a new spherical frame representation, we could
# have based it on `~astropy.coordinates.BaseBodycentricRepresentation` too.
class MarsSphere(BaseGeodeticRepresentation):
"""
A Spherical representation of Mars
"""
_equatorial_radius = 3395.4280 * u.km
_flattening = 0.0 * u.percent
#############################################################################
# The new planetary body-fixed reference system will be described using the
# previous defined representations.
class MarsCoordinateFrame(BaseCoordinateFrame):
"""
A reference system for Mars.
"""
name = "Mars"
#############################################################################
# Now we plot the differences between each component of the cartesian
# representation with respect to the spherical model, assuming the point on the
# surface of the body (``height = 0``)
mars_sphere = MarsCoordinateFrame(
lon=np.linspace(0, 360, 128) * u.deg,
lat=np.linspace(-90, 90, 128) * u.deg,
representation_type=MarsSphere,
)
mars = MarsCoordinateFrame(
lon=np.linspace(0, 360, 128) * u.deg,
lat=np.linspace(-90, 90, 128) * u.deg,
representation_type=MarsBestFitAeroid,
)
mars_ocentric = MarsCoordinateFrame(
lon=np.linspace(0, 360, 128) * u.deg,
lat=np.linspace(-90, 90, 128) * u.deg,
representation_type=MarsBestFitOcentricAeroid,
)
xyz_sphere = mars_sphere.represent_as(CartesianRepresentation)
xyz = mars.represent_as(CartesianRepresentation)
xyz_ocentric = mars_ocentric.represent_as(CartesianRepresentation)
fig, ax = plt.subplots(2, subplot_kw={"projection": "3d"})
ax[0].scatter(*((xyz - xyz_sphere).xyz << u.km))
ax[0].tick_params(labelsize=8)
ax[0].set(xlabel="x [km]", ylabel="y [km]", zlabel="z [km]")
ax[0].set_title("Mars-odetic spheroid difference from sphere")
ax[1].scatter(*((xyz_ocentric - xyz_sphere).xyz << u.km))
ax[1].tick_params(labelsize=8)
ax[1].set(xlabel="x [km]", ylabel="y [km]", zlabel="z [km]")
ax[1].set_title("Mars-ocentric spheroid difference from sphere")
plt.show()
|
astropyREPO_NAMEastropyPATH_START.@astropy_extracted@astropy-main@examples@coordinates@plot_mars-coordinate-frame.py@.PATH_END.py
|
{
"filename": "component.py",
"repo_name": "glue-viz/glue",
"repo_path": "glue_extracted/glue-main/glue/core/component.py",
"type": "Python"
}
|
import logging
import numpy as np
import pandas as pd
import shapely
from glue.core.coordinate_helpers import dependent_axes, pixel2world_single_axis
from glue.utils import shape_to_string, coerce_numeric, categorical_ndarray
try:
import dask.array as da
DASK_INSTALLED = True
except ImportError:
DASK_INSTALLED = False
__all__ = ['Component', 'DerivedComponent', 'CategoricalComponent',
'CoordinateComponent', 'DateTimeComponent', 'ExtendedComponent']
class Component(object):
""" Stores the actual, numerical information for a particular quantity
Data objects hold one or more components, accessed via
ComponentIDs. All Components in a data set must have the same
shape and number of dimensions
Parameters
----------
data : :class:`~numpy.ndarray`
The data to store.
units : `str`, optional
Unit label.
Notes
-----
Instead of instantiating Components directly, consider using
:meth:`Component.autotyped`, which chooses a subclass most appropriate
for the data type.
"""
def __init__(self, data, units=None):
# The physical units of the data
self.units = units
# The actual data
# subclasses may pass non-arrays here as placeholders.
if isinstance(data, np.ndarray):
if data.dtype.kind == 'M':
raise TypeError('DateTimeComponent should be used instead of Component for np.datetime64 arrays')
data = coerce_numeric(data)
data.setflags(write=False) # data is read-only
self._data = data
@property
def units(self):
return self._units or ''
@units.setter
def units(self, value):
if value is None:
self._units = None
else:
self._units = str(value)
@property
def data(self):
"""The underlying :class:`~numpy.ndarray`"""
return self._data
@property
def shape(self):
"""Tuple of array dimensions"""
return self._data.shape
@property
def ndim(self):
"""The number of dimensions"""
return len(self._data.shape)
def __getitem__(self, key):
logging.debug("Using %s to index data of shape %s", key, self.shape)
return self._data[key]
@property
def numeric(self):
"""
Whether or not the datatype is numeric.
"""
# We need to be careful here to not just access self.data since that
# would force the computation of the whole component in the case of
# derived components, so instead we specifically only get the first
# element.
return np.can_cast(self[(0,) * self.ndim].dtype, complex)
@property
def categorical(self):
"""
Whether or not the datatype is categorical.
"""
return False
@property
def datetime(self):
"""
Whether or not or not the datatype is a date/time
"""
return False
@property
def extended(self):
"""
Whether or not or not the datatype represents an extended region
"""
return False
def __str__(self):
return "%s with shape %s" % (self.__class__.__name__, shape_to_string(self.shape))
def jitter(self, method=None):
raise NotImplementedError
def to_series(self, **kwargs):
""" Convert into a pandas.Series object.
Parameters
----------
**kwargs :
All kwargs are passed to the Series constructor.
Returns
-------
:class:`pandas.Series`
"""
return pd.Series(self.data.ravel(), **kwargs)
@classmethod
def autotyped(cls, data, units=None):
"""
Automatically choose between Component and CategoricalComponent,
based on the input data type.
Parameters
----------
data : array-like
The data to pack into a Component.
units : `str`, optional
Unit description.
Returns
-------
:class:`Component` (or subclass)
"""
if DASK_INSTALLED and isinstance(data, da.Array):
return DaskComponent(data, units=units)
data = np.asarray(data)
if np.issubdtype(data.dtype, np.object_):
return CategoricalComponent(data, units=units)
if data.dtype.kind == 'M':
return DateTimeComponent(data)
n = coerce_numeric(data)
thresh = 0.5
try:
use_categorical = np.issubdtype(data.dtype, np.character) and \
np.isfinite(n).mean() <= thresh
except TypeError: # isfinite not supported. non-numeric dtype
use_categorical = True
if use_categorical:
return CategoricalComponent(data, units=units)
else:
return Component(n, units=units)
class DerivedComponent(Component):
"""
A component which derives its data from a function.
Parameters
----------
data : :class:`~glue.core.data.Data`
The data object to use for calculation.
link : :class:`~glue.core.component_link.ComponentLink`
The link that carries out the function.
units : `str`, optional
Unit description.
"""
def __init__(self, data, link, units=None):
super(DerivedComponent, self).__init__(data, units=units)
self._link = link
def set_parent(self, data):
""" Reassign the Data object that this DerivedComponent operates on """
self._data = data
@property
def data(self):
"""Return the numerical data as a numpy array"""
return self._link.compute(self._data)
@property
def link(self):
"""Return the component link"""
return self._link
def __getitem__(self, key):
return self._link.compute(self._data, key)
class CoordinateComponent(Component):
"""
Components associated with pixel or world coordinates
The numerical values are computed on the fly.
"""
def __init__(self, data, axis, world=False):
self.world = world
self._data = data
self.axis = axis
@property
def data(self):
return self._calculate()
@property
def units(self):
if self.world:
return self._data.coords.world_axis_units[self._data.ndim - 1 - self.axis] or ''
else:
return ''
def _calculate(self, view=None):
if self.world:
# Calculating the world coordinates can be a bottleneck if we aren't
# careful, so we need to make sure that if not all dimensions depend
# on each other, we use smart broadcasting.
# The unoptimized way to do this for an N-dimensional dataset would
# be to construct N-dimensional arrays of pixel values for each
# coordinate. However, if we are computing the coordinates for axis
# i, and axis i is not dependent on any other axis, then the result
# will be an N-dimensional array where the same 1D array of
# coordinates will be repeated over and over.
# To optimize this, we therefore essentially consider only the
# dependent dimensions and then broacast the result to the full
# array size at the very end.
# view=None actually adds a dimension which is never what we really
# mean, at least in glue.
if view is None:
view = Ellipsis
# If the view is a tuple or list of arrays, we should actually just
# convert these straight to world coordinates since the indices
# of the pixel coordinates are the pixel coordinates themselves.
if isinstance(view, (tuple, list)) and isinstance(view[0], np.ndarray):
axis = self._data.ndim - 1 - self.axis
return pixel2world_single_axis(self._data.coords, *view[::-1],
world_axis=axis)
# For 1D arrays, slice can be given as a single slice but we need
# to wrap it in a list to make the following code work correctly,
# as it is then consistent with higher-dimensional cases.
if isinstance(view, slice) or np.isscalar(view):
view = [view]
# Some views, e.g. with lists of integer arrays, can give arbitrarily
# complex (copied) subsets of arrays, so in this case we don't do any
# optimization
if view is Ellipsis:
optimize_view = False
else:
for v in view:
if not np.isscalar(v) and not isinstance(v, slice):
optimize_view = False
break
else:
optimize_view = True
pix_coords = []
dep_coords = dependent_axes(self._data.coords, self.axis)
final_slice = []
final_shape = []
for i in range(self._data.ndim):
if optimize_view and i < len(view) and np.isscalar(view[i]):
final_slice.append(0)
else:
final_slice.append(slice(None))
# We set up a 1D pixel axis along that dimension.
pix_coord = np.arange(self._data.shape[i])
# If a view was specified, we need to take it into account for
# that axis.
if optimize_view and i < len(view):
pix_coord = pix_coord[view[i]]
if not np.isscalar(view[i]):
final_shape.append(len(pix_coord))
else:
final_shape.append(self._data.shape[i])
if i not in dep_coords:
# The axis is not dependent on this instance's axis, so we
# just compute the values once and broadcast along this
# dimension later.
pix_coord = 0
pix_coords.append(pix_coord)
# We build the list of N arrays, one for each pixel coordinate
pix_coords = np.meshgrid(*pix_coords, indexing='ij', copy=False)
# Finally we convert these to world coordinates
axis = self._data.ndim - 1 - self.axis
world_coords = pixel2world_single_axis(self._data.coords,
*pix_coords[::-1],
world_axis=axis)
# We get rid of any dimension for which using the view should get
# rid of that dimension.
if optimize_view:
world_coords = world_coords[tuple(final_slice)]
# We then broadcast the final array back to what it should be
world_coords = np.broadcast_to(world_coords, tuple(final_shape))
# We apply the view if we weren't able to optimize before
if optimize_view:
return world_coords
else:
return world_coords[view]
else:
slices = [slice(0, s, 1) for s in self.shape]
grids = np.broadcast_arrays(*np.ogrid[slices])
if view is not None:
grids = [g[view] for g in grids]
return grids[self.axis]
@property
def shape(self):
"""Tuple of array dimensions."""
return self._data.shape
@property
def ndim(self):
"""Number of dimensions"""
return len(self._data.shape)
def __getitem__(self, key):
return self._calculate(key)
def __lt__(self, other):
if self.world == other.world:
return self.axis < other.axis
return self.world
def __gluestate__(self, context):
return dict(axis=self.axis, world=self.world)
@classmethod
def __setgluestate__(cls, rec, context):
return cls(None, rec['axis'], rec['world'])
@property
def numeric(self):
return True
@property
def categorical(self):
return False
class CategoricalComponent(Component):
"""
Container for categorical data.
Parameters
----------
categorical_data : :class:`~numpy.ndarray`
The underlying array.
categories : `iterable`, optional
List of unique values in the data.
jitter : `str`, optional
Strategy for jittering the data.
units : `str`, optional
Unit description.
"""
def __init__(self, categorical_data, categories=None, jitter=None, units=None):
# TOOD: deal with custom categories
super(CategoricalComponent, self).__init__(None, units)
self._data = categorical_ndarray(categorical_data, copy=False, categories=categories)
if self._data.ndim < 1:
raise ValueError("Categorical Data must be at least 1-dimensional")
self.jitter(method=jitter)
@property
def codes(self):
"""
The index of the category for each value in the array.
"""
return self._data.codes
@property
def labels(self):
"""
The original categorical data.
"""
return self._data.view(np.ndarray)
@property
def categories(self):
"""
The categories.
"""
return self._data.categories
@property
def data(self):
return self._data
@property
def numeric(self):
return False
@property
def categorical(self):
return True
def jitter(self, method=None):
"""
Jitter the codes so the density of points can be easily seen in a
scatter plot for example.
Parameters
----------
method : {None, 'uniform'}
If `None`, no jittering is done (or any jittering is undone).
If ``'uniform'``, the codes are randomized by a uniformly
distributed random variable.
"""
self._data.jitter(method=method)
self.jitter_method = method
def to_series(self, **kwargs):
"""
Convert into a pandas.Series object.
This will be converted as a dtype=np.object!
Parameters
----------
**kwargs :
All kwargs are passed to the Series constructor.
Returns
-------
:class:`pandas.Series`
"""
return pd.Series(self.labels, dtype=object, **kwargs)
class DateTimeComponent(Component):
"""
A component representing a date/time.
Parameters
----------
data : :class:`~numpy.ndarray`
The data to store, with `~numpy.datetime64` dtype
"""
def __init__(self, data, units=None):
self.units = units
if not isinstance(data, np.ndarray) or data.dtype.kind != 'M':
raise TypeError("DateTimeComponent should be initialized with a datetim64 Numpy array")
self._data = data
@property
def numeric(self):
return True
@property
def datetime(self):
return True
class DaskComponent(Component):
"""
A data component powered by a dask array.
"""
def __init__(self, data, units=None):
self._data = data
self.units = units
@property
def units(self):
return self._units or ''
@units.setter
def units(self, value):
if value is None:
self._units = None
else:
self._units = str(value)
@property
def data(self):
return self._data
@property
def shape(self):
return self._data.shape
@property
def ndim(self):
return len(self._data.shape)
def __getitem__(self, key):
return np.asarray(self._data[key].compute())
@property
def numeric(self):
return True
@property
def categorical(self):
return False
@property
def datetime(self):
return False
class ExtendedComponent(Component):
"""
A data component representing an extent or a region.
This component can be used when a dataset describes regions or ranges
and is typically used with a `RegionData` object, since that object
provides helper functions to display regions on viewers. For example,
a `RegionData` object might provide properties of geographic
regions, and the boundaries of these regions would be an ExtendedComponent.
Data loaders are required to know how to convert regions to a list
of Shapely objects which can be used to initialize an ExtendedComponent.
A circular region can be represented as:
>>> circle = shapely.Point(x, y).buffer(rad)
A range in one dimension can be represented as:
>>> range = shapely.LineString([[x0,0],[x1,0]])
(This is a bit of an odd representation, since we are forced to specify a y
coordinate for this line. We adopt a convention of y == 0.)
ExtendedComponents are NOT used directly in linking. Instead, ExtendedComponents
always have corresponding ComponentIDs that represent the x (and y) coordinates
over which the regions are defined. If not specified otherwise, a
`RegionData` object will create 'representative points'
for each region, representing a point near the center of the reigon that is
guaranteed to be inside the region.
NOTE: that this implementation does not support regions in more than
two dimensions. (Shapely has limited support for 3D shapes, but not more).
Parameters
----------
data : list of `shapely.Geometry`` objects
The data to store.
center_comp_ids : list of :class:`glue.core.component_id.ComponentID` objects
The ComponentIDs of the `center` of the extended region. These do not
have to be the literal center of the region, but they must be in the x (and y)
coordinates of the regions. These ComponentIDs are used in the linking
framework to allow an ExtendedComponent to be linked to other components.
units : `str`, optional
Unit description.
Attributes
----------
x : ComponentID
The ComponentID of the x coordinate at the center of the extended region.
y : ComponentID
The ComponentID of the y coordinate at the center of the extended region.
Raises
------
TypeError
If data is not a list of ``shapely.Geometry`` objects
ValueError
If center_comp_ids is not a list of length 1 or 2
"""
def __init__(self, data, center_comp_ids, units=None):
if not all(isinstance(s, shapely.Geometry) for s in data):
raise TypeError(
"Input data for a ExtendedComponent should be a list of shapely.Geometry objects"
)
if len(center_comp_ids) == 2:
self.x = center_comp_ids[0]
self.y = center_comp_ids[1]
elif len(center_comp_ids) == 1:
self.x = center_comp_ids[0]
self.y = None
else:
raise ValueError(
"ExtendedComponent must be initialized with one or two ComponentIDs"
)
self.units = units
self._data = data
@property
def extended(self):
return True
@property
def numeric(self):
return False
@property
def datetime(self):
return False
@property
def categorical(self):
return False
|
glue-vizREPO_NAMEgluePATH_START.@glue_extracted@glue-main@glue@core@component.py@.PATH_END.py
|
{
"filename": "test_inter_rater.py",
"repo_name": "statsmodels/statsmodels",
"repo_path": "statsmodels_extracted/statsmodels-main/statsmodels/stats/tests/test_inter_rater.py",
"type": "Python"
}
|
"""
Created on Mon Dec 10 09:18:14 2012
Author: Josef Perktold
"""
import numpy as np
from numpy.testing import assert_almost_equal, assert_equal, assert_allclose
from statsmodels.stats.inter_rater import (fleiss_kappa, cohens_kappa,
to_table, aggregate_raters)
from statsmodels.tools.testing import Holder
table0 = np.asarray('''\
1 0 0 0 0 14 1.000
2 0 2 6 4 2 0.253
3 0 0 3 5 6 0.308
4 0 3 9 2 0 0.440
5 2 2 8 1 1 0.330
6 7 7 0 0 0 0.462
7 3 2 6 3 0 0.242
8 2 5 3 2 2 0.176
9 6 5 2 1 0 0.286
10 0 2 2 3 7 0.286'''.split(), float).reshape(10,-1)
table1 = table0[:, 1:-1]
table10 = [[0, 4, 1],
[0, 8, 0],
[0, 1, 5]]
#Fleiss 1971, Fleiss has only the transformed table
diagnoses = np.array( [[4, 4, 4, 4, 4, 4],
[2, 2, 2, 5, 5, 5],
[2, 3, 3, 3, 3, 5],
[5, 5, 5, 5, 5, 5],
[2, 2, 2, 4, 4, 4],
[1, 1, 3, 3, 3, 3],
[3, 3, 3, 3, 5, 5],
[1, 1, 3, 3, 3, 4],
[1, 1, 4, 4, 4, 4],
[5, 5, 5, 5, 5, 5],
[1, 4, 4, 4, 4, 4],
[1, 2, 4, 4, 4, 4],
[2, 2, 2, 3, 3, 3],
[1, 4, 4, 4, 4, 4],
[2, 2, 4, 4, 4, 5],
[3, 3, 3, 3, 3, 5],
[1, 1, 1, 4, 5, 5],
[1, 1, 1, 1, 1, 2],
[2, 2, 4, 4, 4, 4],
[1, 3, 3, 5, 5, 5],
[5, 5, 5, 5, 5, 5],
[2, 4, 4, 4, 4, 4],
[2, 2, 4, 5, 5, 5],
[1, 1, 4, 4, 4, 4],
[1, 4, 4, 4, 4, 5],
[2, 2, 2, 2, 2, 4],
[1, 1, 1, 1, 5, 5],
[2, 2, 4, 4, 4, 4],
[1, 3, 3, 3, 3, 3],
[5, 5, 5, 5, 5, 5]])
diagnoses_rownames = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', ]
diagnoses_colnames = ['rater1', 'rater2', 'rater3', 'rater4', 'rater5', 'rater6', ]
def test_fleiss_kappa():
#currently only example from Wikipedia page
kappa_wp = 0.210
assert_almost_equal(fleiss_kappa(table1), kappa_wp, decimal=3)
def test_fleis_randolph():
# reference numbers from online calculator
# http://justusrandolph.net/kappa/#dInfo
table = [[7, 0], [7, 0]]
assert_equal(fleiss_kappa(table, method='unif'), 1)
table = [[6.99, 0.01], [6.99, 0.01]]
# % Overall Agreement 0.996671
# Fixed Marginal Kappa: -0.166667
# Free Marginal Kappa: 0.993343
assert_allclose(fleiss_kappa(table), -0.166667, atol=6e-6)
assert_allclose(fleiss_kappa(table, method='unif'), 0.993343, atol=6e-6)
table = [[7, 1], [3, 5]]
# % Overall Agreement 0.607143
# Fixed Marginal Kappa: 0.161905
# Free Marginal Kappa: 0.214286
assert_allclose(fleiss_kappa(table, method='fleiss'), 0.161905, atol=6e-6)
assert_allclose(fleiss_kappa(table, method='randolph'), 0.214286, atol=6e-6)
table = [[7, 0], [0, 7]]
# % Overall Agreement 1.000000
# Fixed Marginal Kappa: 1.000000
# Free Marginal Kappa: 1.000000
assert_allclose(fleiss_kappa(table), 1)
assert_allclose(fleiss_kappa(table, method='uniform'), 1)
table = [[6, 1, 0], [0, 7, 0]]
# % Overall Agreement 0.857143
# Fixed Marginal Kappa: 0.708333
# Free Marginal Kappa: 0.785714
assert_allclose(fleiss_kappa(table), 0.708333, atol=6e-6)
assert_allclose(fleiss_kappa(table, method='rand'), 0.785714, atol=6e-6)
class CheckCohens:
def test_results(self):
res = self.res
res2 = self.res2
res_ = [res.kappa, res.std_kappa, res.kappa_low, res.kappa_upp, res.std_kappa0,
res.z_value, res.pvalue_one_sided, res.pvalue_two_sided]
assert_almost_equal(res_, res2, decimal=4)
assert_equal(str(res), self.res_string)
class TestUnweightedCohens(CheckCohens):
# comparison to printout of a SAS example
@classmethod
def setup_class(cls):
#temporary: res instance is at last position
cls.res = cohens_kappa(table10)
res10_sas = [0.4842, 0.1380, 0.2137, 0.7547]
res10_sash0 = [0.1484, 3.2626, 0.0006, 0.0011] #for test H0:kappa=0
cls.res2 = res10_sas + res10_sash0 #concatenate
cls.res_string = '''\
Simple Kappa Coefficient
--------------------------------
Kappa 0.4842
ASE 0.1380
95% Lower Conf Limit 0.2137
95% Upper Conf Limit 0.7547
Test of H0: Simple Kappa = 0
ASE under H0 0.1484
Z 3.2626
One-sided Pr > Z 0.0006
Two-sided Pr > |Z| 0.0011''' + '\n'
def test_option(self):
kappa = cohens_kappa(table10, return_results=False)
assert_almost_equal(kappa, self.res2[0], decimal=4)
class TestWeightedCohens(CheckCohens):
#comparison to printout of a SAS example
@classmethod
def setup_class(cls):
#temporary: res instance is at last position
cls.res = cohens_kappa(table10, weights=[0, 1, 2])
res10w_sas = [0.4701, 0.1457, 0.1845, 0.7558]
res10w_sash0 = [0.1426, 3.2971, 0.0005, 0.0010] #for test H0:kappa=0
cls.res2 = res10w_sas + res10w_sash0 #concatenate
cls.res_string = '''\
Weighted Kappa Coefficient
--------------------------------
Kappa 0.4701
ASE 0.1457
95% Lower Conf Limit 0.1845
95% Upper Conf Limit 0.7558
Test of H0: Weighted Kappa = 0
ASE under H0 0.1426
Z 3.2971
One-sided Pr > Z 0.0005
Two-sided Pr > |Z| 0.0010''' + '\n'
def test_option(self):
kappa = cohens_kappa(table10, weights=[0, 1, 2], return_results=False)
assert_almost_equal(kappa, self.res2[0], decimal=4)
def test_cohenskappa_weights():
#some tests for equivalent results with different options
np.random.seed(9743678)
table = np.random.randint(0, 10, size=(5,5)) + 5*np.eye(5)
#example aggregation, 2 groups of levels
mat = np.array([[1,1,1, 0,0],[0,0,0,1,1]])
table_agg = np.dot(np.dot(mat, table), mat.T)
res1 = cohens_kappa(table, weights=np.arange(5) > 2, wt='linear')
res2 = cohens_kappa(table_agg, weights=np.arange(2), wt='linear')
assert_almost_equal(res1.kappa, res2.kappa, decimal=14)
assert_almost_equal(res1.var_kappa, res2.var_kappa, decimal=14)
#equivalence toeplitz with linear for special cases
res1 = cohens_kappa(table, weights=2*np.arange(5), wt='linear')
res2 = cohens_kappa(table, weights=2*np.arange(5), wt='toeplitz')
res3 = cohens_kappa(table, weights=res1.weights[0], wt='toeplitz')
#2-Dim weights
res4 = cohens_kappa(table, weights=res1.weights)
assert_almost_equal(res1.kappa, res2.kappa, decimal=14)
assert_almost_equal(res1.var_kappa, res2.var_kappa, decimal=14)
assert_almost_equal(res1.kappa, res3.kappa, decimal=14)
assert_almost_equal(res1.var_kappa, res3.var_kappa, decimal=14)
assert_almost_equal(res1.kappa, res4.kappa, decimal=14)
assert_almost_equal(res1.var_kappa, res4.var_kappa, decimal=14)
#equivalence toeplitz with quadratic for special cases
res1 = cohens_kappa(table, weights=5*np.arange(5)**2, wt='toeplitz')
res2 = cohens_kappa(table, weights=5*np.arange(5), wt='quadratic')
assert_almost_equal(res1.kappa, res2.kappa, decimal=14)
assert_almost_equal(res1.var_kappa, res2.var_kappa, decimal=14)
anxiety = np.array([
3, 3, 3, 4, 5, 5, 2, 3, 5, 2, 2, 6, 1, 5, 2, 2, 1, 2, 4, 3, 3, 6, 4,
6, 2, 4, 2, 4, 3, 3, 2, 3, 3, 3, 2, 2, 1, 3, 3, 4, 2, 1, 4, 4, 3, 2,
1, 6, 1, 1, 1, 2, 3, 3, 1, 1, 3, 3, 2, 2
]).reshape(20,3, order='F')
anxiety_rownames = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', ]
anxiety_colnames = ['rater1', 'rater2', 'rater3', ]
def test_cohens_kappa_irr():
ck_w3 = Holder()
ck_w4 = Holder()
#>r = kappa2(anxiety[,1:2], c(0,0,0,1,1,1))
#> cat_items(r, pref="ck_w3.")
ck_w3.method = "Cohen's Kappa for 2 Raters (Weights: 0,0,0,1,1,1)"
ck_w3.irr_name = 'Kappa'
ck_w3.value = 0.1891892
ck_w3.stat_name = 'z'
ck_w3.statistic = 0.5079002
ck_w3.p_value = 0.6115233
#> r = kappa2(anxiety[,1:2], c(0,0,1,1,2,2))
#> cat_items(r, pref="ck_w4.")
ck_w4.method = "Cohen's Kappa for 2 Raters (Weights: 0,0,1,1,2,2)"
ck_w4.irr_name = 'Kappa'
ck_w4.value = 0.2820513
ck_w4.stat_name = 'z'
ck_w4.statistic = 1.257410
ck_w4.p_value = 0.2086053
ck_w1 = Holder()
ck_w2 = Holder()
ck_w3 = Holder()
ck_w4 = Holder()
#> r = kappa2(anxiety[,2:3])
#> cat_items(r, pref="ck_w1.")
ck_w1.method = "Cohen's Kappa for 2 Raters (Weights: unweighted)"
ck_w1.irr_name = 'Kappa'
ck_w1.value = -0.006289308
ck_w1.stat_name = 'z'
ck_w1.statistic = -0.0604067
ck_w1.p_value = 0.9518317
#> r = kappa2(anxiety[,2:3], "equal")
#> cat_items(r, pref="ck_w2.")
ck_w2.method = "Cohen's Kappa for 2 Raters (Weights: equal)"
ck_w2.irr_name = 'Kappa'
ck_w2.value = 0.1459075
ck_w2.stat_name = 'z'
ck_w2.statistic = 1.282472
ck_w2.p_value = 0.1996772
#> r = kappa2(anxiety[,2:3], "squared")
#> cat_items(r, pref="ck_w3.")
ck_w3.method = "Cohen's Kappa for 2 Raters (Weights: squared)"
ck_w3.irr_name = 'Kappa'
ck_w3.value = 0.2520325
ck_w3.stat_name = 'z'
ck_w3.statistic = 1.437451
ck_w3.p_value = 0.1505898
#> r = kappa2(anxiety[,2:3], c(0,0,1,1,2))
#> cat_items(r, pref="ck_w4.")
ck_w4.method = "Cohen's Kappa for 2 Raters (Weights: 0,0,1,1,2)"
ck_w4.irr_name = 'Kappa'
ck_w4.value = 0.2391304
ck_w4.stat_name = 'z'
ck_w4.statistic = 1.223734
ck_w4.p_value = 0.2210526
all_cases = [(ck_w1, None, None),
(ck_w2, None, 'linear'),
(ck_w2, np.arange(5), None),
(ck_w2, np.arange(5), 'toeplitz'),
(ck_w3, None, 'quadratic'),
(ck_w3, np.arange(5)**2, 'toeplitz'),
(ck_w3, 4*np.arange(5)**2, 'toeplitz'),
(ck_w4, [0,0,1,1,2], 'toeplitz')]
#Note R:irr drops the missing category level 4 and uses the reduced matrix
r = np.histogramdd(anxiety[:,1:], ([1, 2, 3, 4, 6, 7], [1, 2, 3, 4, 6, 7]))
for res2, w, wt in all_cases:
msg = repr(w) + repr(wt)
res1 = cohens_kappa(r[0], weights=w, wt=wt)
assert_almost_equal(res1.kappa, res2.value, decimal=6, err_msg=msg)
assert_almost_equal(res1.z_value, res2.statistic, decimal=5, err_msg=msg)
assert_almost_equal(res1.pvalue_two_sided, res2.p_value, decimal=6, err_msg=msg)
def test_fleiss_kappa_irr():
fleiss = Holder()
#> r = kappam.fleiss(diagnoses)
#> cat_items(r, pref="fleiss.")
fleiss.method = "Fleiss' Kappa for m Raters"
fleiss.irr_name = 'Kappa'
fleiss.value = 0.4302445
fleiss.stat_name = 'z'
fleiss.statistic = 17.65183
fleiss.p_value = 0
data_, _ = aggregate_raters(diagnoses)
res1_kappa = fleiss_kappa(data_)
assert_almost_equal(res1_kappa, fleiss.value, decimal=7)
def test_to_table():
data = diagnoses
res1 = to_table(data[:,:2]-1, 5)
res0 = np.asarray([[(data[:,:2]-1 == [i,j]).all(1).sum()
for j in range(5)]
for i in range(5)] )
assert_equal(res1[0], res0)
res2 = to_table(data[:,:2])
assert_equal(res2[0], res0)
bins = [0.5, 1.5, 2.5, 3.5, 4.5, 5.5]
res3 = to_table(data[:,:2], bins)
assert_equal(res3[0], res0)
#more than 2 columns
res4 = to_table(data[:,:3]-1, bins=[-0.5, 0.5, 1.5, 2.5, 3.5, 4.5])
res5 = to_table(data[:,:3]-1, bins=5)
assert_equal(res4[0].sum(-1), res0)
assert_equal(res5[0].sum(-1), res0)
def test_aggregate_raters():
data = diagnoses
data_, categories = aggregate_raters(data)
colsum = np.array([26, 26, 30, 55, 43])
assert_equal(data_.sum(0), colsum)
assert_equal(np.unique(diagnoses), categories)
|
statsmodelsREPO_NAMEstatsmodelsPATH_START.@statsmodels_extracted@statsmodels-main@statsmodels@stats@tests@test_inter_rater.py@.PATH_END.py
|
{
"filename": "_outlinecolor.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/densitymapbox/colorbar/_outlinecolor.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class OutlinecolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="outlinecolor", parent_name="densitymapbox.colorbar", **kwargs
):
super(OutlinecolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@densitymapbox@colorbar@_outlinecolor.py@.PATH_END.py
|
{
"filename": "parameters.py",
"repo_name": "baptklein/ATMOSPHERIX_DATA_RED",
"repo_path": "ATMOSPHERIX_DATA_RED_extracted/ATMOSPHERIX_DATA_RED-main/parameters.py",
"type": "Python"
}
|
import numpy as np
import matplotlib.pyplot as plt
import scipy.signal
type_obs = "emission"
SMALL_SIZE = 28
MEDIUM_SIZE = 32
BIGGER_SIZE = 34
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
READ_DATA = True #do you want to read some t.fits files ?
INJ_PLANET = True
REDUCE_DATA = False #do you want to reduce one or several pkl file that has been read beforehand ?
CORREL_DATA = False #do you want to perform correlation from reduced pkl files ?
dir_global = "/home/adminloc/Bureau/Atmospheres/Data/GL15A/"
### Directory to save figures if plot = True
dir_figures = dir_global+"Figures/"
num_obs = 1 #Number of observing nights that will be treated independently
#before being added up in the correlation
###########################################################################
###########################################################################
################### PARAMETERS TO READ DATA
###########################################################################
###########################################################################
### Directory where all the "t.fits" files are stores
dir_data = [dir_global+"fits/"]
### Name of the pickle file to store the info in
dir_save_read = dir_global+"read/"
read_name_fin = ["test.pkl"]
### List of SPIRou absolute orders -- Reddest: 31; Bluest: 79
orders = np.arange(31,80)[::-1].tolist()
nord = len(orders)
### Ephemerides (to compute orbital phase)
T0 = 2459130.8962180 #Mid-transit (or cunjunction) time [BJD]
Porb = 2.218577 #Orbital period [d]
T_peri = 2459130.8962180 ## Time of peri astron passage for an elliptical orbit
### Transit parameters -- Compute the transit window
### Using batman python package https://lweb.cfa.harvard.edu/~lkreidberg/batman/
### Get the limb-darkening coefficients in H band from Claret+2011: https://vizier.cds.unistra.fr/viz-bin/VizieR?-source=J/A+A/529/A75
Rp = 3834618350. #Planet radius [m]
Rs = 26112750000. #Stellar radius [m]
Ms = 0.4*1.989*1e30 #Stellar mass [kg]
ip = 90.0 #Transit incl. [deg]
ap = 14.0534030 #Semi-maj axis [R_star]
ep = 0.2 #Eccentricity of Pl. orbit
wp = 0.0 #Arg of periaps [deg]
ld_mod = "quadratic" #Limb-darkening model ["nonlinear", "quadratic", "linear"]
ld_coef = [0.0156,0.313] #Limb-darkening coefficients
T_star = 5000. #stellar temperature [K]. Only necessary for injecting a planet in emission.
### Stellar radial velocity info
Ks = 0.164 #RV semi-amplitude of the star orbital motion due to planet [km/s]
V0 = 11.73 #Stellar systemic velocity [km/s]
### Plots
plot_read = True # If True, plot transit info
###########################################################################
###########################################################################
################### PARAMETERS TO INJECT PLANET
###########################################################################
###########################################################################
planet_wavelength_nm_file = "/home/adminloc/Bureau/Atmospheres/Models/Results/lambdastest-H2O.txt"
planet_radius_m_file = "/home/adminloc/Bureau/Atmospheres/Pipeline_v2/ATMOSPHERIX_DATA_RED/Model/Results/RpGL15A_HD189_onlyH2O-VMR3-T900.txt"
planet_flux_SI_file = "/home/adminloc/Bureau/Atmospheres/Models/Results/Rptest-H2O.txt"
K_inj = 120.
V_inj = 30.
amp_inj = 1
###########################################################################
###########################################################################
################### PARAMETERS TO REDUCE DATA
###########################################################################
###########################################################################
dir_reduce_in = dir_global+"read/"
dir_reduce_out = dir_global+"reduced/"
reduce_name_in = ['test.pkl']
#output fil
reduce_name_out = ['test.pkl' ]
#information file
reduce_info_file = dir_figures+"info.dat"
### Correction of stellar contamination
### Only used if synthetic spectrum available
corr_star = False
WC_name = "" ### Input wavelength for synthetic stellar spectra
IC_name = "" ### Input flux for synthetic stellar spectra
### Additional Boucher correction. If dep_min >=1, not included.
dep_min = 1.0 # remove all data when telluric relative absorption > 1 - dep_min
thres_up = 0.05 # Remove the line until reaching 1-thres_up
Npt_lim = 800 # If the order contains less than Npt_lim points, it is discarded from the analysis
### Interpolation parameters
pixel = np.linspace(-1.14,1.14,11) ### Sampling a SPIRou pixel in velocity space -- Width ~ 2.28 km/s
sig_g = 2.28 ### STD of one SPIRou px in km/s
N_bor = 15 ### Nb of pts removed at each extremity (twice)
### Normalisation parameters
N_med = 150 ### Nb of points used in the median filter for the inteprolation
sig_out = 5.0 ### Threshold for outliers identification during normalisation process
N_adj = 2 ### Number of adjacent pixel removed with outliers
deg_px = 2 ### Degree of the polynomial fit to the distribution of pixel STDs
### Parameters for detrending with airmass
det_airmass = False
deg_airmass = 2
### Parameters PCA. Auto-tune automatically decides the number of component
#to remove by comparing with white noise map.
mode_pca = "pca" ### "pca" or "autoencoder"
wpca = False #Use weighted pca
auto_tune = True ### Automatic tuning of number of components
factor_pca = 1.1 #factor in the auto tune: every PC above factor*white_noise_mean_eigenvalue is suppressed
min_pca = 0 #minimim number of removed components
mode_norm_pca = "none" #how to remove mean and std in the data before PCA. Four possibilities:
# "none" : data untouched.
# "global" : suppression of mean and division by the std of the whole data set
# 'per_pix': same as global but column by colum (per pixel)
# 'per_obs': same as global but line by line (per observation)
### Nb of removed components if auto tune is false
npca = np.array(1*np.ones(49),dtype=int)
### Plot info
plot_red = True
numb = 46
#If you want to remove some orders, put them here
orders_rem = []
#We can manually decide the where is the transit in the phase direction,
#and exclude it for the calculation of the mean stellar spectrum.
#If set_window = False, the transit window defines n_ini and n_end
set_window = False
n_ini_fix,n_end_fix = 10,20 ### Get transits start and end indices
### Size of the estimation of the std of the order for final metrics
N_px = 200
###########################################################################
###########################################################################
################### PARAMETERS FOR CORRELATION
###########################################################################
############################################################################
parallel = False
#This is just for the integration over a pixel
pixel_correl = np.linspace(-1.17,1.17,15)
weights= np.ones(15)
#Kp intervals
Kpmin = 60.0
Kpmax =180.0
Nkp = 31
Kp_array = np.linspace(Kpmin,Kpmax,Nkp)
#Vsys intervals
Vmin = 0.
Vmax= 60.
Nv = 61
Vsys_array = np.linspace(Vmin,Vmax,Nv)
#Number of pkl observations files and their names
dir_correl_in = dir_global+"reduced/"
correl_name_in = ["test.pkl",]
#Do we save the correlation file ? If yes, put as much files as there are observations
save_ccf = True
dir_correl_out = dir_global+"correlated/"
correl_name_out = ["test.pkl"]
dir_correl_mod = "/home/adminloc/Bureau/Atmospheres/Pipeline_v2/ATMOSPHERIX_DATA_RED/Model/to-correl/GL15A_HD189_onlyH2O-VMR3-T900/"
#DO we select orders or take them all ? If True, provide your order selection
# for each observation. If an order does not exist in the pkl file, it will
# obivously not be used but will not trigger an error.
select_ord = True
list_ord_correl = np.arange(32,80)
#If false, the calculation is performed over the whole dataset. If
#True, we only select observation that have a transit window > min_window
select_phase = True
min_window = 0.2
#Interpolation factor for the speed array. If you d'ont know what that means, choose something between 1 and 10
int_speed = 8
#Number of pixels to discard at the borders.
nbor_correl = 10
#Do we include the projector from Gibson 2022 ?
use_proj = True
#If we just removed the mean and std of the whole map, we can use a fast verion of the projector
#Else, it will be even longer
proj_fast = True
mode_norm_pca_correl = "none" #if proj_fast is not used, we can choose
#how to remove mean and std in the data before PCA. Four possibilities:
# "none" : data untouched.
# "global" : suppression of mean and division by the std of the whole data set
# 'per_pix': same as global but column by colum (per pixel)
# 'per_obs': same as global but line by line (per observation)
#Do we select only certain orders for the plot ?
#if yes, lili is the list oforders to select
select_plot = False
list_ord_plot_correl = np.array([48,47,46,34,33,32])
#In order to calculate the std of the map,we need to exclude
#a zone of the Kp-Vsys map around the planet. These are the limits
#of this rectangular zone.
Kp_min_std = 80
Kp_max_std = 160
Vsys_min_std = 20
Vsys_max_std = 40
#number of levels in the contour plot
nlevels = 15
#Do we plot the correlation map at each obs ?
plot_ccf_indiv = True
#Do we plot the global correaltion map ?
plot_ccf_tot = True
#Do we add white lines at the planet position ?
white_lines = True
Kp_planet = 120.
Vsys_planet = 30.
###########################################################################
###########################################################################
################### PARAMETERS FOR PLOTS
###########################################################################
############################################################################
SMALL_SIZE = 28
MEDIUM_SIZE = 32
BIGGER_SIZE = 34
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
|
baptkleinREPO_NAMEATMOSPHERIX_DATA_REDPATH_START.@ATMOSPHERIX_DATA_RED_extracted@ATMOSPHERIX_DATA_RED-main@parameters.py@.PATH_END.py
|
{
"filename": "_thicknessmode.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/coloraxis/colorbar/_thicknessmode.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ThicknessmodeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="thicknessmode",
parent_name="layout.coloraxis.colorbar",
**kwargs,
):
super(ThicknessmodeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["fraction", "pixels"]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@coloraxis@colorbar@_thicknessmode.py@.PATH_END.py
|
{
"filename": "_visible.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/layout/polar/angularaxis/_visible.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class VisibleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="visible", parent_name="layout.polar.angularaxis", **kwargs
):
super(VisibleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@layout@polar@angularaxis@_visible.py@.PATH_END.py
|
{
"filename": "test_glm_weights.py",
"repo_name": "statsmodels/statsmodels",
"repo_path": "statsmodels_extracted/statsmodels-main/statsmodels/genmod/tests/test_glm_weights.py",
"type": "Python"
}
|
"""
Test for weights in GLM, Poisson and OLS/WLS, continuous test_glm.py
Below is a table outlining the test coverage.
================================= ====================== ====== ===================== === ======= ======== ============== ============= ============== ============= ============== ==== =========
Test Compared To params normalized_cov_params bse loglike deviance resid_response resid_pearson resid_deviance resid_working resid_anscombe chi2 optimizer
================================= ====================== ====== ===================== === ======= ======== ============== ============= ============== ============= ============== ==== =========
TestGlmPoissonPlain stata X X X X X X X X X X bfgs
TestGlmPoissonFwNr stata X X X X X X X X X X bfgs
TestGlmPoissonAwNr stata X X X X X X X X X X bfgs
TestGlmPoissonFwHC stata X X X X X
TestGlmPoissonAwHC stata X X X X X
TestGlmPoissonFwClu stata X X X X X
TestGlmTweedieAwNr R X X X X X X X newton
TestGlmGammaAwNr R X X special X X X X X bfgs
TestGlmGaussianAwNr R X X special X X X X X bfgs
TestRepeatedvsAggregated statsmodels.GLM X X bfgs
TestRepeatedvsAverage statsmodels.GLM X X bfgs
TestTweedieRepeatedvsAggregated statsmodels.GLM X X bfgs
TestTweedieRepeatedvsAverage statsmodels.GLM X X bfgs
TestBinomial0RepeatedvsAverage statsmodels.GLM X X
TestBinomial0RepeatedvsDuplicated statsmodels.GLM X X bfgs
TestBinomialVsVarWeights statsmodels.GLM X X X bfgs
TestGlmGaussianWLS statsmodels.WLS X X X bfgs
================================= ====================== ====== ===================== === ======= ======== ============== ============= ============== ============= ============== ==== =========
""" # noqa: E501
import warnings
import numpy as np
from numpy.testing import assert_allclose, assert_raises
import pandas as pd
import pytest
import statsmodels.api as sm
# load data into module namespace
from statsmodels.datasets.cpunish import load
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.tools.sm_exceptions import SpecificationWarning
from statsmodels.tools.tools import add_constant
from .results import (
res_R_var_weight as res_r,
results_glm_poisson_weights as res_stata,
)
cpunish_data = load()
cpunish_data.exog = np.asarray(cpunish_data.exog)
cpunish_data.endog = np.asarray(cpunish_data.endog)
cpunish_data.exog[:, 3] = np.log(cpunish_data.exog[:, 3])
cpunish_data.exog = add_constant(cpunish_data.exog, prepend=False)
class CheckWeight:
def test_basic(self):
res1 = self.res1
res2 = self.res2
assert_allclose(res1.params, res2.params, atol=1e-6, rtol=2e-6)
corr_fact = getattr(self, 'corr_fact', 1)
if hasattr(res2, 'normalized_cov_params'):
assert_allclose(res1.normalized_cov_params,
res2.normalized_cov_params,
atol=1e-8, rtol=2e-6)
if isinstance(self, (TestRepeatedvsAggregated, TestRepeatedvsAverage,
TestTweedieRepeatedvsAggregated,
TestTweedieRepeatedvsAverage,
TestBinomial0RepeatedvsAverage,
TestBinomial0RepeatedvsDuplicated)):
# Loglikelihood, scale, deviance is different between repeated vs.
# exposure/average
return None
assert_allclose(res1.bse, corr_fact * res2.bse, atol=1e-6, rtol=2e-6)
if isinstance(self, TestBinomialVsVarWeights):
# Binomial ll and deviance are different for 1d vs. counts...
return None
if isinstance(self, TestGlmGaussianWLS):
# This will not work right now either
return None
if not isinstance(self, (TestGlmGaussianAwNr, TestGlmGammaAwNr)):
# Matching R is hard
assert_allclose(res1.llf, res2.ll, atol=1e-6, rtol=1e-7)
assert_allclose(res1.deviance, res2.deviance, atol=1e-6, rtol=1e-7)
def test_residuals(self):
if isinstance(self, (TestRepeatedvsAggregated, TestRepeatedvsAverage,
TestTweedieRepeatedvsAggregated,
TestTweedieRepeatedvsAverage,
TestBinomial0RepeatedvsAverage,
TestBinomial0RepeatedvsDuplicated)):
# This will not match as different number of records
return None
res1 = self.res1
res2 = self.res2
if not hasattr(res2, 'resids'):
return None # use SkipError instead
resid_all = dict(zip(res2.resids_colnames, res2.resids.T))
assert_allclose(res1.resid_response, resid_all['resid_response'],
atol=1e-6, rtol=2e-6)
assert_allclose(res1.resid_pearson, resid_all['resid_pearson'],
atol=1e-6, rtol=2e-6)
assert_allclose(res1.resid_deviance, resid_all['resid_deviance'],
atol=1e-6, rtol=2e-6)
assert_allclose(res1.resid_working, resid_all['resid_working'],
atol=1e-6, rtol=2e-6)
if resid_all.get('resid_anscombe') is None:
return None
# Stata does not use var_weights in anscombe residuals, it seems.
# Adjust residuals to match our approach.
resid_a = res1.resid_anscombe
resid_a1 = resid_all['resid_anscombe'] * np.sqrt(res1._var_weights)
assert_allclose(resid_a, resid_a1, atol=1e-6, rtol=2e-6)
def test_compare_optimizers(self):
res1 = self.res1
if isinstance(res1.model.family, sm.families.Tweedie):
method = 'newton'
optim_hessian = 'eim'
else:
method = 'bfgs'
optim_hessian = 'oim'
if isinstance(self, (TestGlmPoissonFwHC, TestGlmPoissonAwHC,
TestGlmPoissonFwClu,
TestBinomial0RepeatedvsAverage)):
return None
start_params = res1.params
res2 = self.res1.model.fit(start_params=start_params, method=method,
optim_hessian=optim_hessian)
assert_allclose(res1.params, res2.params, atol=1e-3, rtol=2e-3)
H = res2.model.hessian(res2.params, observed=False)
res2_bse = np.sqrt(-np.diag(np.linalg.inv(H)))
assert_allclose(res1.bse, res2_bse, atol=1e-3, rtol=1e-3)
def test_pearson_chi2(self):
if hasattr(self.res2, 'chi2'):
assert_allclose(self.res1.pearson_chi2, self.res2.deviance_p,
atol=1e-6, rtol=1e-6)
def test_getprediction(self):
pred = self.res1.get_prediction()
assert_allclose(pred.linpred.se_mean, pred.linpred.se_mean, rtol=1e-10)
class TestGlmPoissonPlain(CheckWeight):
@classmethod
def setup_class(cls):
cls.res1 = GLM(cpunish_data.endog, cpunish_data.exog,
family=sm.families.Poisson()).fit()
cls.res2 = res_stata.results_poisson_none_nonrobust
class TestGlmPoissonFwNr(CheckWeight):
@classmethod
def setup_class(cls):
fweights = [1, 1, 1, 2, 2, 2, 3, 3, 3, 1, 1, 1, 2, 2, 2, 3, 3]
fweights = np.array(fweights)
cls.res1 = GLM(cpunish_data.endog, cpunish_data.exog,
family=sm.families.Poisson(), freq_weights=fweights
).fit()
cls.res2 = res_stata.results_poisson_fweight_nonrobust
class TestGlmPoissonAwNr(CheckWeight):
@classmethod
def setup_class(cls):
fweights = [1, 1, 1, 2, 2, 2, 3, 3, 3, 1, 1, 1, 2, 2, 2, 3, 3]
# faking aweights by using normalized freq_weights
fweights = np.array(fweights)
wsum = fweights.sum()
nobs = len(cpunish_data.endog)
aweights = fweights / wsum * nobs
cls.res1 = GLM(cpunish_data.endog, cpunish_data.exog,
family=sm.families.Poisson(), var_weights=aweights
).fit()
# Need to copy to avoid inplace adjustment
from copy import copy
cls.res2 = copy(res_stata.results_poisson_aweight_nonrobust)
cls.res2.resids = cls.res2.resids.copy()
# Need to adjust resids for pearson and deviance to add weights
cls.res2.resids[:, 3:5] *= np.sqrt(aweights[:, np.newaxis])
# prob_weights fail with HC, not properly implemented yet
class TestGlmPoissonPwNr(CheckWeight):
@classmethod
def setup_class(cls):
fweights = [1, 1, 1, 2, 2, 2, 3, 3, 3, 1, 1, 1, 2, 2, 2, 3, 3]
# faking aweights by using normalized freq_weights
fweights = np.array(fweights)
wsum = fweights.sum()
nobs = len(cpunish_data.endog)
aweights = fweights / wsum * nobs
cls.res1 = GLM(cpunish_data.endog, cpunish_data.exog,
family=sm.families.Poisson(), freq_weights=fweights
).fit(cov_type='HC1')
cls.res2 = res_stata.results_poisson_pweight_nonrobust
# TODO: find more informative reasons why these fail
@pytest.mark.xfail(reason='Known to fail', strict=True)
def test_basic(self):
super().test_basic()
@pytest.mark.xfail(reason='Known to fail', strict=True)
def test_compare_optimizers(self):
super().test_compare_optimizers()
class TestGlmPoissonFwHC(CheckWeight):
@classmethod
def setup_class(cls):
fweights = [1, 1, 1, 2, 2, 2, 3, 3, 3, 1, 1, 1, 2, 2, 2, 3, 3]
# faking aweights by using normalized freq_weights
fweights = np.array(fweights)
wsum = fweights.sum()
nobs = len(cpunish_data.endog)
aweights = fweights / wsum * nobs
cls.corr_fact = np.sqrt((wsum - 1.) / wsum)
mod = GLM(cpunish_data.endog, cpunish_data.exog,
family=sm.families.Poisson(),
freq_weights=fweights)
cls.res1 = mod.fit(cov_type='HC0')
# cov_kwds={'use_correction':False})
cls.res2 = res_stata.results_poisson_fweight_hc1
# var_weights (aweights fail with HC, not properly implemented yet
class TestGlmPoissonAwHC(CheckWeight):
@classmethod
def setup_class(cls):
fweights = [1, 1, 1, 2, 2, 2, 3, 3, 3, 1, 1, 1, 2, 2, 2, 3, 3]
# faking aweights by using normalized freq_weights
fweights = np.array(fweights)
wsum = fweights.sum()
nobs = len(cpunish_data.endog)
aweights = fweights / wsum * nobs
# This is really close when corr_fact = (wsum - 1.) / wsum, but to
# avoid having loosen precision of the assert_allclose, I'm doing this
# manually. Its *possible* lowering the IRLS convergence criterion
# in stata and here will make this less sketchy.
cls.corr_fact = np.sqrt((wsum - 1.) / wsum) * 0.98518473599905609
mod = GLM(cpunish_data.endog, cpunish_data.exog,
family=sm.families.Poisson(),
var_weights=aweights)
cls.res1 = mod.fit(cov_type='HC0')
# cov_kwds={'use_correction':False})
cls.res2 = res_stata.results_poisson_aweight_hc1
class TestGlmPoissonFwClu(CheckWeight):
@classmethod
def setup_class(cls):
fweights = [1, 1, 1, 2, 2, 2, 3, 3, 3, 1, 1, 1, 2, 2, 2, 3, 3]
# faking aweights by using normalized freq_weights
fweights = np.array(fweights)
wsum = fweights.sum()
nobs = len(cpunish_data.endog)
aweights = fweights / wsum * nobs
gid = np.arange(1, 17 + 1) // 2
n_groups = len(np.unique(gid))
# no wnobs yet in sandwich covariance calcualtion
cls.corr_fact = 1 / np.sqrt(n_groups / (n_groups - 1))
# np.sqrt((wsum - 1.) / wsum)
cov_kwds = {'groups': gid, 'use_correction': False}
with pytest.warns(SpecificationWarning):
mod = GLM(cpunish_data.endog, cpunish_data.exog,
family=sm.families.Poisson(),
freq_weights=fweights)
cls.res1 = mod.fit(cov_type='cluster', cov_kwds=cov_kwds)
cls.res2 = res_stata.results_poisson_fweight_clu1
class TestGlmTweedieAwNr(CheckWeight):
@classmethod
def setup_class(cls):
import statsmodels.formula.api as smf
data = sm.datasets.fair.load_pandas()
endog = data.endog
data = data.exog
data['fair'] = endog
aweights = np.repeat(1, len(data.index))
aweights[::5] = 5
aweights[::13] = 3
model = smf.glm(
'fair ~ age + yrs_married',
data=data,
family=sm.families.Tweedie(
var_power=1.55,
link=sm.families.links.Log()
),
var_weights=aweights
)
cls.res1 = model.fit(rtol=1e-25, atol=0)
cls.res2 = res_r.results_tweedie_aweights_nonrobust
class TestGlmGammaAwNr(CheckWeight):
@classmethod
def setup_class(cls):
from .results.results_glm import CancerLog
res2 = CancerLog()
endog = res2.endog
exog = res2.exog[:, :-1]
exog = sm.add_constant(exog, prepend=True)
aweights = np.repeat(1, len(endog))
aweights[::5] = 5
aweights[::13] = 3
model = sm.GLM(endog, exog,
family=sm.families.Gamma(link=sm.families.links.Log()),
var_weights=aweights)
cls.res1 = model.fit(rtol=1e-25, atol=0)
cls.res2 = res_r.results_gamma_aweights_nonrobust
def test_r_llf(self):
scale = self.res1.deviance / self.res1._iweights.sum()
ll = self.res1.family.loglike(self.res1.model.endog,
self.res1.mu,
freq_weights=self.res1._var_weights,
scale=scale)
assert_allclose(ll, self.res2.ll, atol=1e-6, rtol=1e-7)
class TestGlmGaussianAwNr(CheckWeight):
@classmethod
def setup_class(cls):
import statsmodels.formula.api as smf
data = sm.datasets.cpunish.load_pandas()
endog = data.endog
data = data.exog
data['EXECUTIONS'] = endog
data['INCOME'] /= 1000
aweights = np.array([1, 2, 3, 4, 5, 4, 3, 2, 1, 2, 3, 4, 5, 4, 3, 2,
1])
model = smf.glm(
'EXECUTIONS ~ INCOME + SOUTH - 1',
data=data,
family=sm.families.Gaussian(link=sm.families.links.Log()),
var_weights=aweights
)
cls.res1 = model.fit(rtol=1e-25, atol=0)
cls.res2 = res_r.results_gaussian_aweights_nonrobust
def test_r_llf(self):
res1 = self.res1
res2 = self.res2
model = self.res1.model
# Need to make a few adjustments...
# First, calculate scale using nobs as denominator
scale = res1.scale * model.df_resid / model.wnobs
# Calculate llf using adj scale and wts = freq_weights
wts = model.freq_weights
llf = model.family.loglike(model.endog, res1.mu,
freq_weights=wts,
scale=scale)
# SM uses (essentially) stat's loglike formula... first term is
# (endog - mu) ** 2 / scale
adj_sm = -1 / 2 * ((model.endog - res1.mu) ** 2).sum() / scale
# R has these 2 terms that stata/sm do not
adj_r = -model.wnobs / 2 + np.sum(np.log(model.var_weights)) / 2
llf_adj = llf - adj_sm + adj_r
assert_allclose(llf_adj, res2.ll, atol=1e-6, rtol=1e-7)
def gen_endog(lin_pred, family_class, link, binom_version=0):
np.random.seed(872)
fam = sm.families
mu = link().inverse(lin_pred)
if family_class == fam.Binomial:
if binom_version == 0:
endog = 1*(np.random.uniform(size=len(lin_pred)) < mu)
else:
endog = np.empty((len(lin_pred), 2))
n = 10
endog[:, 0] = (np.random.uniform(size=(len(lin_pred), n)) <
mu[:, None]).sum(1)
endog[:, 1] = n - endog[:, 0]
elif family_class == fam.Poisson:
endog = np.random.poisson(mu)
elif family_class == fam.Gamma:
endog = np.random.gamma(2, mu)
elif family_class == fam.Gaussian:
endog = mu + np.random.normal(size=len(lin_pred))
elif family_class == fam.NegativeBinomial:
from scipy.stats.distributions import nbinom
endog = nbinom.rvs(mu, 0.5)
elif family_class == fam.InverseGaussian:
from scipy.stats.distributions import invgauss
endog = invgauss.rvs(mu)
elif family_class == fam.Tweedie:
rate = 1
shape = 1.0
scale = mu / (rate * shape)
endog = (np.random.poisson(rate, size=scale.shape[0]) *
np.random.gamma(shape * scale))
else:
raise ValueError
return endog
def test_wtd_gradient_irls():
# Compare the results when using gradient optimization and IRLS.
# TODO: Find working examples for inverse_squared link
np.random.seed(87342)
fam = sm.families
lnk = sm.families.links
families = [(fam.Binomial, [lnk.Logit, lnk.Probit, lnk.CLogLog, lnk.Log,
lnk.Cauchy]),
(fam.Poisson, [lnk.Log, lnk.Identity, lnk.Sqrt]),
(fam.Gamma, [lnk.Log, lnk.Identity, lnk.InversePower]),
(fam.Gaussian, [lnk.Identity, lnk.Log, lnk.InversePower]),
(fam.InverseGaussian, [lnk.Log, lnk.Identity,
lnk.InversePower,
lnk.InverseSquared]),
(fam.NegativeBinomial, [lnk.Log, lnk.InversePower,
lnk.InverseSquared, lnk.Identity])]
n = 100
p = 3
exog = np.random.normal(size=(n, p))
exog[:, 0] = 1
skip_one = False
for family_class, family_links in families:
for link in family_links:
for binom_version in 0, 1:
method = 'bfgs'
if family_class != fam.Binomial and binom_version == 1:
continue
elif family_class == fam.Binomial and link == lnk.CLogLog:
# Cannot get gradient to converage with var_weights here
continue
elif family_class == fam.Binomial and link == lnk.Log:
# Cannot get gradient to converage with var_weights here
continue
elif (family_class, link) == (fam.Poisson, lnk.Identity):
lin_pred = 20 + exog.sum(1)
elif (family_class, link) == (fam.Binomial, lnk.Log):
lin_pred = -1 + exog.sum(1) / 8
elif (family_class, link) == (fam.Poisson, lnk.Sqrt):
lin_pred = -2 + exog.sum(1)
elif (family_class, link) == (fam.Gamma, lnk.Log):
# Cannot get gradient to converge with var_weights here
continue
elif (family_class, link) == (fam.Gamma, lnk.Identity):
# Cannot get gradient to converage with var_weights here
continue
elif (family_class, link) == (fam.Gamma, lnk.InversePower):
# Cannot get gradient to converage with var_weights here
continue
elif (family_class, link) == (fam.Gaussian, lnk.Log):
# Cannot get gradient to converage with var_weights here
continue
elif (family_class, link) == (fam.Gaussian, lnk.InversePower):
# Cannot get gradient to converage with var_weights here
continue
elif (family_class, link) == (fam.InverseGaussian, lnk.Log):
# Cannot get gradient to converage with var_weights here
lin_pred = -1 + exog.sum(1)
continue
elif (family_class, link) == (fam.InverseGaussian,
lnk.Identity):
# Cannot get gradient to converage with var_weights here
lin_pred = 20 + 5*exog.sum(1)
lin_pred = np.clip(lin_pred, 1e-4, np.inf)
continue
elif (family_class, link) == (fam.InverseGaussian,
lnk.InverseSquared):
lin_pred = 0.5 + exog.sum(1) / 5
continue # skip due to non-convergence
elif (family_class, link) == (fam.InverseGaussian,
lnk.InversePower):
lin_pred = 1 + exog.sum(1) / 5
method = 'newton'
elif (family_class, link) == (fam.NegativeBinomial,
lnk.Identity):
lin_pred = 20 + 5*exog.sum(1)
lin_pred = np.clip(lin_pred, 1e-3, np.inf)
method = 'newton'
elif (family_class, link) == (fam.NegativeBinomial,
lnk.InverseSquared):
lin_pred = 0.1 + np.random.uniform(size=exog.shape[0])
continue # skip due to non-convergence
elif (family_class, link) == (fam.NegativeBinomial,
lnk.InversePower):
# Cannot get gradient to converage with var_weights here
lin_pred = 1 + exog.sum(1) / 5
continue
elif (family_class, link) == (fam.Gaussian, lnk.InversePower):
# adding skip because of convergence failure
skip_one = True
else:
lin_pred = np.random.uniform(size=exog.shape[0])
endog = gen_endog(lin_pred, family_class, link, binom_version)
if binom_version == 0:
wts = np.ones_like(endog)
tmp = np.random.randint(
2,
5,
size=(endog > endog.mean()).sum()
)
wts[endog > endog.mean()] = tmp
else:
wts = np.ones(shape=endog.shape[0])
y = endog[:, 0] / endog.sum(axis=1)
tmp = np.random.gamma(2, size=(y > y.mean()).sum())
wts[y > y.mean()] = tmp
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mod_irls = sm.GLM(endog, exog, var_weights=wts,
family=family_class(link=link()))
rslt_irls = mod_irls.fit(method="IRLS", atol=1e-10,
tol_criterion='params')
# Try with and without starting values.
for max_start_irls, start_params in ((0, rslt_irls.params),
(3, None)):
# TODO: skip convergence failures for now
if max_start_irls > 0 and skip_one:
continue
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mod_gradient = sm.GLM(endog, exog, var_weights=wts,
family=family_class(link=link()))
rslt_gradient = mod_gradient.fit(
max_start_irls=max_start_irls,
start_params=start_params,
method=method
)
assert_allclose(rslt_gradient.params,
rslt_irls.params, rtol=1e-6, atol=5e-5)
assert_allclose(rslt_gradient.llf, rslt_irls.llf,
rtol=1e-6, atol=1e-6)
assert_allclose(rslt_gradient.scale, rslt_irls.scale,
rtol=1e-6, atol=1e-6)
# Get the standard errors using expected information.
gradient_bse = rslt_gradient.bse
ehess = mod_gradient.hessian(rslt_gradient.params,
observed=False)
gradient_bse = np.sqrt(-np.diag(np.linalg.inv(ehess)))
assert_allclose(gradient_bse, rslt_irls.bse, rtol=1e-6,
atol=5e-5)
def get_dummies(x):
values = np.sort(np.unique(x))
out = np.zeros(shape=(x.shape[0], len(values) - 1))
for i, v in enumerate(values):
if i == 0:
continue
out[:, i - 1] = np.where(v == x, 1, 0)
return out
class TestRepeatedvsAggregated(CheckWeight):
@classmethod
def setup_class(cls):
np.random.seed(4321)
n = 100
p = 5
exog = np.empty((n, p))
exog[:, 0] = 1
exog[:, 1] = np.random.randint(low=-5, high=5, size=n)
x = np.repeat(np.array([1, 2, 3, 4]), n / 4)
exog[:, 2:] = get_dummies(x)
beta = np.array([-1, 0.1, -0.05, .2, 0.35])
lin_pred = (exog * beta).sum(axis=1)
family = sm.families.Poisson
link = sm.families.links.Log
endog = gen_endog(lin_pred, family, link)
mod1 = sm.GLM(endog, exog, family=family(link=link()))
cls.res1 = mod1.fit()
agg = pd.DataFrame(exog)
agg['endog'] = endog
agg_endog = agg.groupby([0, 1, 2, 3, 4]).sum()[['endog']]
agg_wt = agg.groupby([0, 1, 2, 3, 4]).count()[['endog']]
agg_exog = np.array(agg_endog.index.tolist())
agg_wt = agg_wt['endog']
agg_endog = agg_endog['endog']
mod2 = sm.GLM(agg_endog, agg_exog, family=family(link=link()),
exposure=agg_wt)
cls.res2 = mod2.fit()
class TestRepeatedvsAverage(CheckWeight):
@classmethod
def setup_class(cls):
np.random.seed(4321)
n = 10000
p = 5
exog = np.empty((n, p))
exog[:, 0] = 1
exog[:, 1] = np.random.randint(low=-5, high=5, size=n)
x = np.repeat(np.array([1, 2, 3, 4]), n / 4)
exog[:, 2:] = get_dummies(x)
beta = np.array([-1, 0.1, -0.05, .2, 0.35])
lin_pred = (exog * beta).sum(axis=1)
family = sm.families.Poisson
link = sm.families.links.Log
endog = gen_endog(lin_pred, family, link)
mod1 = sm.GLM(endog, exog, family=family(link=link()))
cls.res1 = mod1.fit()
agg = pd.DataFrame(exog)
agg['endog'] = endog
agg_endog = agg.groupby([0, 1, 2, 3, 4]).sum()[['endog']]
agg_wt = agg.groupby([0, 1, 2, 3, 4]).count()[['endog']]
agg_exog = np.array(agg_endog.index.tolist())
agg_wt = agg_wt['endog']
avg_endog = agg_endog['endog'] / agg_wt
mod2 = sm.GLM(avg_endog, agg_exog, family=family(link=link()),
var_weights=agg_wt)
cls.res2 = mod2.fit()
class TestTweedieRepeatedvsAggregated(CheckWeight):
@classmethod
def setup_class(cls):
np.random.seed(4321)
n = 10000
p = 5
exog = np.empty((n, p))
exog[:, 0] = 1
exog[:, 1] = np.random.randint(low=-5, high=5, size=n)
x = np.repeat(np.array([1, 2, 3, 4]), n / 4)
exog[:, 2:] = get_dummies(x)
beta = np.array([7, 0.1, -0.05, .2, 0.35])
lin_pred = (exog * beta).sum(axis=1)
family = sm.families.Tweedie
link = sm.families.links.Log
endog = gen_endog(lin_pred, family, link)
mod1 = sm.GLM(endog, exog, family=family(link=link(), var_power=1.5))
cls.res1 = mod1.fit(rtol=1e-20, atol=0, tol_criterion='params')
agg = pd.DataFrame(exog)
agg['endog'] = endog
agg_endog = agg.groupby([0, 1, 2, 3, 4]).sum()[['endog']]
agg_wt = agg.groupby([0, 1, 2, 3, 4]).count()[['endog']]
agg_exog = np.array(agg_endog.index.tolist())
agg_wt = agg_wt['endog']
agg_endog = agg_endog['endog']
mod2 = sm.GLM(agg_endog, agg_exog,
family=family(link=link(), var_power=1.5),
exposure=agg_wt, var_weights=agg_wt ** 0.5)
cls.res2 = mod2.fit(rtol=1e-20, atol=0, tol_criterion='params')
class TestTweedieRepeatedvsAverage(CheckWeight):
@classmethod
def setup_class(cls):
np.random.seed(4321)
n = 1000
p = 5
exog = np.empty((n, p))
exog[:, 0] = 1
exog[:, 1] = np.random.randint(low=-5, high=5, size=n)
x = np.repeat(np.array([1, 2, 3, 4]), n / 4)
exog[:, 2:] = get_dummies(x)
beta = np.array([7, 0.1, -0.05, .2, 0.35])
lin_pred = (exog * beta).sum(axis=1)
family = sm.families.Tweedie
link = sm.families.links.Log
endog = gen_endog(lin_pred, family, link)
mod1 = sm.GLM(endog, exog, family=family(link=link(), var_power=1.5))
cls.res1 = mod1.fit(rtol=1e-10, atol=0, tol_criterion='params',
scaletype='x2')
agg = pd.DataFrame(exog)
agg['endog'] = endog
agg_endog = agg.groupby([0, 1, 2, 3, 4]).sum()[['endog']]
agg_wt = agg.groupby([0, 1, 2, 3, 4]).count()[['endog']]
agg_exog = np.array(agg_endog.index.tolist())
agg_wt = agg_wt['endog']
avg_endog = agg_endog['endog'] / agg_wt
mod2 = sm.GLM(avg_endog, agg_exog,
family=family(link=link(), var_power=1.5),
var_weights=agg_wt)
cls.res2 = mod2.fit(rtol=1e-10, atol=0, tol_criterion='params')
class TestBinomial0RepeatedvsAverage(CheckWeight):
@classmethod
def setup_class(cls):
np.random.seed(4321)
n = 20
p = 5
exog = np.empty((n, p))
exog[:, 0] = 1
exog[:, 1] = np.random.randint(low=-5, high=5, size=n)
x = np.repeat(np.array([1, 2, 3, 4]), n / 4)
exog[:, 2:] = get_dummies(x)
beta = np.array([-1, 0.1, -0.05, .2, 0.35])
lin_pred = (exog * beta).sum(axis=1)
family = sm.families.Binomial
link = sm.families.links.Logit
endog = gen_endog(lin_pred, family, link, binom_version=0)
mod1 = sm.GLM(endog, exog, family=family(link=link()))
cls.res1 = mod1.fit(rtol=1e-10, atol=0, tol_criterion='params',
scaletype='x2')
agg = pd.DataFrame(exog)
agg['endog'] = endog
agg_endog = agg.groupby([0, 1, 2, 3, 4]).sum()[['endog']]
agg_wt = agg.groupby([0, 1, 2, 3, 4]).count()[['endog']]
agg_exog = np.array(agg_endog.index.tolist())
agg_wt = agg_wt['endog']
avg_endog = agg_endog['endog'] / agg_wt
mod2 = sm.GLM(avg_endog, agg_exog,
family=family(link=link()),
var_weights=agg_wt)
cls.res2 = mod2.fit(rtol=1e-10, atol=0, tol_criterion='params')
class TestBinomial0RepeatedvsDuplicated(CheckWeight):
@classmethod
def setup_class(cls):
np.random.seed(4321)
n = 10000
p = 5
exog = np.empty((n, p))
exog[:, 0] = 1
exog[:, 1] = np.random.randint(low=-5, high=5, size=n)
x = np.repeat(np.array([1, 2, 3, 4]), n / 4)
exog[:, 2:] = get_dummies(x)
beta = np.array([-1, 0.1, -0.05, .2, 0.35])
lin_pred = (exog * beta).sum(axis=1)
family = sm.families.Binomial
link = sm.families.links.Logit
endog = gen_endog(lin_pred, family, link, binom_version=0)
wt = np.random.randint(1, 5, n)
mod1 = sm.GLM(endog, exog, family=family(link=link()), freq_weights=wt)
cls.res1 = mod1.fit()
exog_dup = np.repeat(exog, wt, axis=0)
endog_dup = np.repeat(endog, wt)
mod2 = sm.GLM(endog_dup, exog_dup, family=family(link=link()))
cls.res2 = mod2.fit()
def test_warnings_raised():
weights = [1, 1, 1, 2, 2, 2, 3, 3, 3, 1, 1, 1, 2, 2, 2, 3, 3]
# faking aweights by using normalized freq_weights
weights = np.array(weights)
gid = np.arange(1, 17 + 1) // 2
cov_kwds = {'groups': gid, 'use_correction': False}
with pytest.warns(SpecificationWarning):
res1 = GLM(cpunish_data.endog, cpunish_data.exog,
family=sm.families.Poisson(), freq_weights=weights
).fit(cov_type='cluster', cov_kwds=cov_kwds)
res1.summary()
with pytest.warns(SpecificationWarning):
res1 = GLM(cpunish_data.endog, cpunish_data.exog,
family=sm.families.Poisson(), var_weights=weights
).fit(cov_type='cluster', cov_kwds=cov_kwds)
res1.summary()
weights = [1, 1, 1, 2, 2, 2, 3, 3, 3, 1, 1, 1, 2, 2, 2, 3, 3]
@pytest.mark.parametrize('formatted', [weights, np.asarray(weights),
pd.Series(weights)],
ids=['list', 'ndarray', 'Series'])
def test_weights_different_formats(formatted):
check_weights_as_formats(formatted)
def check_weights_as_formats(weights):
res = GLM(cpunish_data.endog, cpunish_data.exog,
family=sm.families.Poisson(), freq_weights=weights
).fit()
assert isinstance(res._freq_weights, np.ndarray)
assert isinstance(res._var_weights, np.ndarray)
assert isinstance(res._iweights, np.ndarray)
res = GLM(cpunish_data.endog, cpunish_data.exog,
family=sm.families.Poisson(), var_weights=weights
).fit()
assert isinstance(res._freq_weights, np.ndarray)
assert isinstance(res._var_weights, np.ndarray)
assert isinstance(res._iweights, np.ndarray)
class TestBinomialVsVarWeights(CheckWeight):
@classmethod
def setup_class(cls):
from statsmodels.datasets.star98 import load
data = load()
data.exog = np.require(data.exog, requirements="W")
data.endog = np.require(data.endog, requirements="W")
data.exog /= data.exog.std(0)
data.exog = add_constant(data.exog, prepend=False)
cls.res1 = GLM(data.endog, data.exog,
family=sm.families.Binomial()).fit()
weights = data.endog.sum(axis=1)
endog2 = data.endog[:, 0] / weights
cls.res2 = GLM(endog2, data.exog,
family=sm.families.Binomial(),
var_weights=weights).fit()
class TestGlmGaussianWLS(CheckWeight):
@classmethod
def setup_class(cls):
import statsmodels.formula.api as smf
data = sm.datasets.cpunish.load_pandas()
endog = data.endog
data = data.exog
data['EXECUTIONS'] = endog
data['INCOME'] /= 1000
aweights = np.array([1, 2, 3, 4, 5, 4, 3, 2, 1, 2, 3, 4, 5, 4, 3, 2,
1])
model = smf.glm(
'EXECUTIONS ~ INCOME + SOUTH - 1',
data=data,
family=sm.families.Gaussian(link=sm.families.links.Identity()),
var_weights=aweights
)
wlsmodel = smf.wls(
'EXECUTIONS ~ INCOME + SOUTH - 1',
data=data,
weights=aweights)
cls.res1 = model.fit(rtol=1e-25, atol=1e-25)
cls.res2 = wlsmodel.fit()
def test_incompatible_input():
weights = [1, 1, 1, 2, 2, 2, 3, 3, 3, 1, 1, 1, 2, 2, 2, 3, 3]
exog = cpunish_data.exog
endog = cpunish_data.endog
family = sm.families.Poisson()
# Too short
assert_raises(ValueError, GLM, endog, exog, family=family,
freq_weights=weights[:-1])
assert_raises(ValueError, GLM, endog, exog, family=family,
var_weights=weights[:-1])
# Too long
assert_raises(ValueError, GLM, endog, exog, family=family,
freq_weights=weights + [3])
assert_raises(ValueError, GLM, endog, exog, family=family,
var_weights=weights + [3])
# Too many dimensions
assert_raises(ValueError, GLM, endog, exog, family=family,
freq_weights=[weights, weights])
assert_raises(ValueError, GLM, endog, exog, family=family,
var_weights=[weights, weights])
def test_poisson_residuals():
nobs, k_exog = 100, 5
np.random.seed(987125)
x = np.random.randn(nobs, k_exog - 1)
x = add_constant(x)
y_true = x.sum(1) / 2
y = y_true + 2 * np.random.randn(nobs)
exposure = 1 + np.arange(nobs) // 4
yp = np.random.poisson(np.exp(y_true) * exposure)
yp[10:15] += 10
fam = sm.families.Poisson()
mod_poi_e = GLM(yp, x, family=fam, exposure=exposure)
res_poi_e = mod_poi_e.fit()
mod_poi_w = GLM(yp / exposure, x, family=fam, var_weights=exposure)
res_poi_w = mod_poi_w.fit()
assert_allclose(res_poi_e.resid_response / exposure,
res_poi_w.resid_response)
assert_allclose(res_poi_e.resid_pearson, res_poi_w.resid_pearson)
assert_allclose(res_poi_e.resid_deviance, res_poi_w.resid_deviance)
assert_allclose(res_poi_e.resid_anscombe, res_poi_w.resid_anscombe)
assert_allclose(res_poi_e.resid_anscombe_unscaled,
res_poi_w.resid_anscombe)
|
statsmodelsREPO_NAMEstatsmodelsPATH_START.@statsmodels_extracted@statsmodels-main@statsmodels@genmod@tests@test_glm_weights.py@.PATH_END.py
|
{
"filename": "data_structures_test.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/autograph/operators/data_structures_test.py",
"type": "Python"
}
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for data_structures module."""
from tensorflow.python.autograph.operators import data_structures
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import list_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.platform import test
class ListTest(test.TestCase):
def test_new_list_empty(self):
l = data_structures.new_list()
# Can't evaluate an empty list.
# TODO(mdan): sess.run should allow tf.variant maybe?
self.assertTrue(isinstance(l, tensor.Tensor))
def test_new_list_tensor(self):
l = data_structures.new_list([3, 4, 5])
self.assertAllEqual(l, [3, 4, 5])
def test_tf_tensor_list_new(self):
l = data_structures.tf_tensor_list_new([3, 4, 5])
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.int32)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(t), [3, 4, 5])
def test_tf_tensor_list_new_empty(self):
l = data_structures.tf_tensor_list_new([],
element_dtype=dtypes.int32,
element_shape=())
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.int32)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(t), [])
def test_tf_tensor_list_new_from_tensor(self):
l = data_structures.tf_tensor_list_new(constant_op.constant([3, 4, 5]))
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.int32)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(t), [3, 4, 5])
@test_util.run_deprecated_v1
def test_tf_tensor_list_new_illegal_input(self):
with self.assertRaises(ValueError):
data_structures.tf_tensor_list_new([3, 4.0])
# TODO(mdan): It might make more sense to type cast in this case.
with self.assertRaises(ValueError):
data_structures.tf_tensor_list_new([3, 4], element_dtype=dtypes.float32)
# Tensor lists do support heterogeneous lists.
self.assertIsNot(data_structures.tf_tensor_list_new([3, [4, 5]]), None)
with self.assertRaises(ValueError):
data_structures.tf_tensor_list_new([3, 4], element_shape=(2,))
with self.assertRaises(ValueError):
data_structures.tf_tensor_list_new(
constant_op.constant([1, 2, 3]), element_shape=[1])
def test_tf_tensor_array_new(self):
l = data_structures.tf_tensor_array_new([3, 4, 5])
t = l.stack()
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(t), [3, 4, 5])
def test_tf_tensor_array_new_illegal_input(self):
with self.assertRaises(ValueError):
data_structures.tf_tensor_array_new([3, 4.0])
with self.assertRaises(ValueError):
data_structures.tf_tensor_array_new([3, 4], element_dtype=dtypes.float32)
with self.assertRaises(ValueError):
data_structures.tf_tensor_array_new([3, [4, 5]])
with self.assertRaises(ValueError):
data_structures.tf_tensor_array_new([3, 4], element_shape=(2,))
with self.assertRaises(ValueError):
data_structures.tf_tensor_array_new([], element_shape=(2,))
# TAs can infer the shape.
self.assertIsNot(
data_structures.tf_tensor_array_new([], element_dtype=dtypes.float32),
None)
def test_append_tensor_list(self):
l = data_structures.new_list()
x = constant_op.constant([1, 2, 3])
l = data_structures.list_append(l, x)
t = list_ops.tensor_list_stack(l, element_dtype=x.dtype)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(t), [[1, 2, 3]])
@test_util.run_deprecated_v1
def test_append_tensorarray(self):
l = tensor_array_ops.TensorArray(dtypes.int32, size=0, dynamic_size=True)
l1 = data_structures.list_append(l, 1)
l2 = data_structures.list_append(l1, 2)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(l1.stack()), [1])
self.assertAllEqual(self.evaluate(l2.stack()), [1, 2])
def test_append_python(self):
l = []
self.assertAllEqual(data_structures.list_append(l, 1), [1])
self.assertAllEqual(data_structures.list_append(l, 2), [1, 2])
def test_pop_tensor_list(self):
initial_list = constant_op.constant([[1, 2], [3, 4]])
elem_shape = constant_op.constant([2])
l = list_ops.tensor_list_from_tensor(initial_list, element_shape=elem_shape)
opts = data_structures.ListPopOpts(
element_dtype=initial_list.dtype,
element_shape=(2,))
with self.assertRaises(NotImplementedError):
data_structures.list_pop(l, 0, opts)
with self.cached_session() as sess:
l, x = data_structures.list_pop(l, None, opts)
self.assertAllEqual(self.evaluate(x), [3, 4])
t = list_ops.tensor_list_stack(l, element_dtype=initial_list.dtype)
self.assertAllEqual(self.evaluate(t), [[1, 2]])
def test_pop_python(self):
l = [1, 2, 3]
opts = data_structures.ListPopOpts(element_dtype=None, element_shape=())
self.assertAllEqual(data_structures.list_pop(l, None, opts), ([1, 2], 3))
self.assertAllEqual(data_structures.list_pop(l, None, opts), ([1], 2))
def test_stack_tensor_list(self):
initial_list = constant_op.constant([[1, 2], [3, 4]])
elem_shape = constant_op.constant([2])
l = list_ops.tensor_list_from_tensor(initial_list, element_shape=elem_shape)
opts = data_structures.ListStackOpts(
element_dtype=initial_list.dtype, original_call=None)
with self.cached_session() as sess:
t = data_structures.list_stack(l, opts)
self.assertAllEqual(self.evaluate(t), self.evaluate(initial_list))
@test_util.run_deprecated_v1
def test_stack_tensor_list_empty(self):
l = list_ops.empty_tensor_list(
element_shape=None, element_dtype=dtypes.variant)
opts = data_structures.ListStackOpts(
element_dtype=dtypes.int32, original_call=None)
# TODO(mdan): Allow stacking empty lists if the dtype and shape are known.
with self.assertRaises(ValueError):
data_structures.list_stack(l, opts)
def test_stack_fallback(self):
def dummy_function(l):
# Lazy person's mock: just transform the argument in a way in which we
# can check that this function was indeed called.
return [x * 2 for x in l]
opts = data_structures.ListStackOpts(
element_dtype=None, original_call=dummy_function)
self.assertAllEqual(data_structures.list_stack([1, 2], opts), [2, 4])
if __name__ == '__main__':
test.main()
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@autograph@operators@data_structures_test.py@.PATH_END.py
|
{
"filename": "axislines.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/mpl_toolkits/axes_grid/axislines.py",
"type": "Python"
}
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from mpl_toolkits.axisartist.axislines import *
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@mpl_toolkits@axes_grid@axislines.py@.PATH_END.py
|
{
"filename": "keywords.py",
"repo_name": "hpc4cmb/toast",
"repo_path": "toast_extracted/toast-main/src/libtoast/gtest/googlemock/scripts/generator/cpp/keywords.py",
"type": "Python"
}
|
#!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""C++ keywords and helper utilities for determining keywords."""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
try:
# Python 3.x
import builtins
except ImportError:
# Python 2.x
import __builtin__ as builtins
if not hasattr(builtins, 'set'):
# Nominal support for Python 2.3.
from sets import Set as set
TYPES = set('bool char int long short double float void wchar_t unsigned signed'.split())
TYPE_MODIFIERS = set('auto register const inline extern static virtual volatile mutable'.split())
ACCESS = set('public protected private friend'.split())
CASTS = set('static_cast const_cast dynamic_cast reinterpret_cast'.split())
OTHERS = set('true false asm class namespace using explicit this operator sizeof'.split())
OTHER_TYPES = set('new delete typedef struct union enum typeid typename template'.split())
CONTROL = set('case switch default if else return goto'.split())
EXCEPTION = set('try catch throw'.split())
LOOP = set('while do for break continue'.split())
ALL = TYPES | TYPE_MODIFIERS | ACCESS | CASTS | OTHERS | OTHER_TYPES | CONTROL | EXCEPTION | LOOP
def IsKeyword(token):
return token in ALL
def IsBuiltinType(token):
if token in ('virtual', 'inline'):
# These only apply to methods, they can't be types by themselves.
return False
return token in TYPES or token in TYPE_MODIFIERS
|
hpc4cmbREPO_NAMEtoastPATH_START.@toast_extracted@toast-main@src@libtoast@gtest@googlemock@scripts@generator@cpp@keywords.py@.PATH_END.py
|
{
"filename": "variance_relations.ipynb",
"repo_name": "astrolamb/pop_synth",
"repo_path": "pop_synth_extracted/pop_synth-main/notebooks/variance_relations.ipynb",
"type": "Jupyter Notebook"
}
|
```python
%load_ext autoreload
%autoreload 2
# retina quality
%config InlineBackend.figure_format = 'retina'
```
```python
import numpy as np
import scipy.stats as ss
import matplotlib.pyplot as plt
from astropy.cosmology import FlatLambdaCDM
from astropy.constants import G, c
from astropy import units as u
from scipy.stats import rv_histogram
from scipy.stats import skew, kurtosis, moment
```
```python
cd /Users/lambwg/Documents/Vanderbilt/Research/pop_synth//
```
/Users/lambwg/Documents/Vanderbilt/Research/pop_synth
```python
from scripts import pop_synth
```
```python
rc_params = {#"backend": "pdf",
"axes.labelsize": 10, "lines.markersize": 4,
"font.size": 10, "xtick.top": True, "ytick.right": True,
"xtick.major.size": 6, "xtick.minor.size": 3, "ytick.major.size": 6,
"ytick.minor.size": 3, "xtick.major.width": 0.5, "ytick.major.width": 0.5,
"xtick.minor.width": 0.5, "ytick.minor.width": 0.5,
"lines.markeredgewidth": 1, "axes.linewidth": 1.2, "legend.fontsize": 7,
"xtick.labelsize": 10, "xtick.direction": "in", "xtick.minor.visible": True,
"xtick.major.top": True, "xtick.minor.top": True, "ytick.labelsize": 10,
"ytick.direction": "in", "ytick.minor.visible": True,
"ytick.major.right": True, "ytick.minor.right": True, "savefig.dpi": 400,
"path.simplify": True, "font.family": "serif", "font.serif": "Times",
"text.usetex": False, "figure.figsize": [3.5503666805036667,
2.1942472810764047]}
plt.rcParams.update(rc_params)
```
```python
fyr = 1/(365.24 * 86400)
```
script to compute statistics on the background for various quantities
```python
# load some precomputed backgrounds!
h2cf = np.load('./data/hc2f_10k_model1.npy')[:99999]
for ii in range(2, 6):
h2cf = np.dstack((h2cf, np.load(f'./data/hc2f_10k_model{ii}.npy')[:99999]))
```
```python
h2cf.shape
```
(99999, 30, 5)
# functions to transform between quantities
```python
def hc(hc2f, freqs):
return np.sqrt(hc2f)
def omega(hc2f, freqs):
H0 = (70 * u.km/u.s/u.Mpc).to(1/u.s).value
return (2*np.pi**2)/(3*H0**2) * freqs[None, :, None]**2 * hc2f
def S_h(hc2f, freqs):
return hc2f / freqs[None, :, None]
def rho2(hc2f, freqs):
return hc2f / (12 * np.pi**2 * freqs[None, :, None]**3 / freqs[0])
def rho(hc2f, freqs):
return np.sqrt(rho2(hc2f, freqs))
def log10_rho(hc2f, freqs):
return 0.5 * np.log10(rho2(hc2f, freqs))
```
```python
Tspan = 20 * 365.24 * 86400
freqs = np.arange(1, 31) / Tspan
```
## compute means and variances for each of these models
```python
fyr = 1 / (365.24 * 86400)
quantities = {'h_c^2(f)': None, 'Omega(f)': omega,
'S_h(f) [s]': S_h, 'rho(f) [s]}': rho,
'rho^2(f) [s^2]': rho2, 'h_c(f)': hc,
'log_{10}(rho(f) [s])': log10_rho}
amplitudes = np.array([
10.**np.arange(-70, -50)[:, None] * (freqs[None, :]/fyr),
10.**np.arange(-25, -5)[:, None] * (freqs[None, :]/fyr)**5.,
10.**np.arange(-55, -35)[:, None] * (freqs[None, :]/fyr)**-1.,
10.**np.arange(-26, -6)[:, None] * (freqs[None, :]/fyr)**-(11/6),
10.**np.arange(-45, -25)[:, None] * (freqs[None, :]/fyr)**-5.,
10.**np.arange(-40, -20)[:, None] * (freqs[None, :]/fyr)**(7/3),
10.**np.arange(-15, 5)[:, None] * (freqs[None, :]/fyr)**(11/3),
])
```
```python
amplitudes.shape
```
(7, 20, 30)
```python
figsize_params = {"figure.figsize": [3.5503666805036667*2, 2.1942472810764047*7]}
plt.rcParams.update(figsize_params)
fig, axes = plt.subplots(ncols=2, nrows=7, tight_layout=True,)
# plot mean and variance of each model for each quantity
for q, quant in enumerate(quantities):
axs = axes[q]
if quant==r'h_c^2(f)':
quantity = h2cf
else:
quantity = quantities[quant](h2cf, freqs) # compute the quantity
# checking for infs
if np.isneginf(quantity).any():
quantity[np.isneginf(quantity)] = np.nan
# compute the mean and variance across realisations
mean = np.nanmean(quantity, axis=0)
var = np.nanvar(quantity, axis=0)
fyr = 1/(365.24*86400)
for ii in range(h2cf.shape[-1]): # plot models
axs[0].plot(freqs, mean[:, ii], label=f'Model {ii}')
axs[1].plot(freqs, var[:, ii], label=f'Model {ii}')
#ax.plot(f_mid, 5e-30*(f_mid/fyr)**(-4/3), c='r')
axs[0].set_xscale('log')
axs[0].set_yscale('log') if quant != 'log_{10}(rho(f) [s])' else None
axs[0].set_ylabel(quant)
axs[0].set_xlabel(r'GW Frequency (Hz)')
#ax.plot(f_mid, 1.4e-60 * (f_mid/fyr)**(1), c='k', ls='--', label=r'$f^1$')
axs[1].set_xscale('log', base=10)
axs[1].set_yscale('log', base=10)
axs[1].set_ylabel('Var['+quant+']')
axs[1].set_xlabel(r'GW Frequency (Hz)')
[axs[1].plot(freqs, amplitudes[q, jj], c='grey', alpha=0.25, ls='--')
for jj in range(20)]
```
/var/folders/n5/tlyhfd1j5s7gm9fxj02byc0c0000gn/T/ipykernel_9765/416567136.py:18: RuntimeWarning: divide by zero encountered in log10
return 0.5 * np.log10(rho2(hc2f, freqs))

# compute skewness
```python
figsize_params = {"figure.figsize": [3.5503666805036667, 2.1942472810764047]}
plt.rcParams.update(figsize_params)
#fig, axes = plt.subplots(ncols=1, nrows=1, tight_layout=True,)
moment3 = moment(h2cf, axis=0, nan_policy='omit', moment=3)
for ii in range(h2cf.shape[-1]): # plot models
plt.plot(freqs, moment3[:, ii], label=f'Model {ii}', alpha=0.5)
#ax.plot(f_mid, 5e-30*(f_mid/fyr)**(-4/3), c='r')
#axs[0].set_xscale('log')
#axs[0].set_yscale('log') if quant != 'log_{10}(rho(f) [s])' else None
#axs[0].set_ylabel(quant)
#axs[0].set_xlabel(r'GW Frequency (Hz)')
plt.xscale('log', base=10)
plt.yscale('log', base=10)
plt.xlabel(r'GW Frequency (Hz)')
plt.ylabel('3rd Moment [hc2]')
plt.plot(freqs, 1.4e-83 * (freqs/fyr)**(10/3), c='k', ls='--', label=r'$f^{10/3}$')
#[axs[1].plot(freqs, amplitudes[q, jj], c='grey', alpha=0.25, ls='--')
# for jj in range(20)]
```
[<matplotlib.lines.Line2D at 0x15bbd2f80>]

Model 2, frequency 1/T - third moment is negative
```python
figsize_params = {"figure.figsize": [3.5503666805036667, 2.1942472810764047]}
plt.rcParams.update(figsize_params)
#fig, axes = plt.subplots(ncols=1, nrows=1, tight_layout=True,)
skewness = skew(h2cf, axis=0, nan_policy='omit')
for ii in range(h2cf.shape[-1]): # plot models
plt.plot(freqs, skewness[:, ii], label=f'Model {ii}', alpha=0.5)
#ax.plot(f_mid, 5e-30*(f_mid/fyr)**(-4/3), c='r')
#axs[0].set_xscale('log')
#axs[0].set_yscale('log') if quant != 'log_{10}(rho(f) [s])' else None
#axs[0].set_ylabel(quant)
#axs[0].set_xlabel(r'GW Frequency (Hz)')
plt.xscale('log', base=10)
plt.yscale('log', base=10)
plt.ylabel('Skew[hc2]')
plt.xlabel(r'GW Frequency (Hz)')
plt.plot(freqs, 1.4e1 * (freqs/fyr)**(11/6), c='k', ls='--', label=r'$f^{11/6}$')
#[axs[1].plot(freqs, amplitudes[q, jj], c='grey', alpha=0.25, ls='--')
# for jj in range(20)]
```
[<matplotlib.lines.Line2D at 0x15ccb1300>]

```python
figsize_params = {"figure.figsize": [3.5503666805036667, 2.1942472810764047]}
plt.rcParams.update(figsize_params)
#fig, axes = plt.subplots(ncols=1, nrows=1, tight_layout=True,)
moment4 = moment(h2cf, axis=0, nan_policy='omit', moment=4)
for ii in range(h2cf.shape[-1]): # plot models
plt.plot(freqs, moment4[:, ii], label=f'Model {ii}', alpha=0.5)
#ax.plot(f_mid, 5e-30*(f_mid/fyr)**(-4/3), c='r')
#axs[0].set_xscale('log')
#axs[0].set_yscale('log') if quant != 'log_{10}(rho(f) [s])' else None
#axs[0].set_ylabel(quant)
#axs[0].set_xlabel(r'GW Frequency (Hz)')
plt.xscale('log', base=10)
plt.yscale('log', base=10)
plt.xlabel(r'GW Frequency (Hz)')
plt.ylabel('4th Moment [hc2]')
plt.plot(freqs, 1.4e-115 * (freqs/fyr)**(10/3), c='k', ls='--', label=r'$f^{11/6}$')
#[axs[1].plot(freqs, amplitudes[q, jj], c='grey', alpha=0.25, ls='--')
# for jj in range(20)]
```
[<matplotlib.lines.Line2D at 0x15ce95750>]

```python
figsize_params = {"figure.figsize": [3.5503666805036667, 2.1942472810764047]}
plt.rcParams.update(figsize_params)
#fig, axes = plt.subplots(ncols=1, nrows=1, tight_layout=True,)
kurt = kurtosis(h2cf, axis=0, nan_policy='omit')
for ii in range(h2cf.shape[-1]): # plot models
plt.plot(freqs, kurt[:, ii], label=f'Model {ii}', alpha=0.5)
#ax.plot(f_mid, 5e-30*(f_mid/fyr)**(-4/3), c='r')
#axs[0].set_xscale('log')
#axs[0].set_yscale('log') if quant != 'log_{10}(rho(f) [s])' else None
#axs[0].set_ylabel(quant)
#axs[0].set_xlabel(r'GW Frequency (Hz)')
plt.xscale('log', base=10)
plt.yscale('log', base=10)
plt.ylabel('Skew[hc2]')
plt.xlabel(r'GW Frequency (Hz)')
plt.plot(freqs, 1.4e4 * (freqs/fyr)**(11/3), c='k', ls='--', label=r'$f^{11/3}$')
#[axs[1].plot(freqs, amplitudes[q, jj], c='grey', alpha=0.25, ls='--')
# for jj in range(20)]
```
[<matplotlib.lines.Line2D at 0x15d014580>]

```python
```
|
astrolambREPO_NAMEpop_synthPATH_START.@pop_synth_extracted@pop_synth-main@notebooks@variance_relations.ipynb@.PATH_END.py
|
{
"filename": "test_fBM_delvar_vs_idl.py",
"repo_name": "Astroua/TurbuStat",
"repo_path": "TurbuStat_extracted/TurbuStat-master/Examples/paper_plots/test_fBM_delvar_vs_idl.py",
"type": "Python"
}
|
'''
Compare Turbustat's Delta-variance to the original IDL code.
'''
from turbustat.statistics import DeltaVariance
from turbustat.simulator import make_extended
import astropy.io.fits as fits
from astropy.table import Table
import matplotlib.pyplot as plt
import astropy.units as u
import seaborn as sb
font_scale = 1.25
width = 4.2
# Keep the default ratio used in seaborn. This can get overwritten.
height = (4.4 / 6.4) * width
figsize = (width, height)
sb.set_context("paper", font_scale=font_scale,
rc={"figure.figsize": figsize})
sb.set_palette("colorblind")
col_pal = sb.color_palette()
plt.rcParams['axes.unicode_minus'] = False
size = 256
markers = ['D', 'o']
# Make a single figure example to save space in the paper.
fig = plt.figure(figsize=figsize)
slope = 3.0
test_img = fits.PrimaryHDU(make_extended(size, powerlaw=slope))
# The power-law behaviour continues up to ~1/4 of the size
delvar = DeltaVariance(test_img).run(xlow=3 * u.pix,
xhigh=0.25 * size * u.pix,
boundary='wrap')
plt.xscale("log")
plt.yscale("log")
plt.errorbar(delvar.lags.value, delvar.delta_var,
yerr=delvar.delta_var_error,
fmt=markers[0], label='TurbuStat')
# Now plot the IDL output
tab = Table.read("deltavar_{}.txt".format(slope), format='ascii')
# First is pixel scale, second is delvar, then delvar error, and finally
# the fit values
plt.errorbar(tab['col1'], tab['col2'], yerr=tab['col3'],
fmt=markers[1], label='IDL')
plt.grid()
plt.legend(frameon=True)
plt.ylabel(r"$\Delta$-Variance")
plt.xlabel("Scales (pix)")
plt.tight_layout()
plt.savefig("../figures/delvar_vs_idl.png")
plt.savefig("../figures/delvar_vs_idl.pdf")
plt.close()
|
AstrouaREPO_NAMETurbuStatPATH_START.@TurbuStat_extracted@TurbuStat-master@Examples@paper_plots@test_fBM_delvar_vs_idl.py@.PATH_END.py
|
{
"filename": "Code.py",
"repo_name": "mariapetro/LeHaMoC",
"repo_path": "LeHaMoC_extracted/LeHaMoC-main/Fit_emcee/Code.py",
"type": "Python"
}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 11 19:09:51 2023
@author: mapet
"""
# This is the leptohadronic version of a radiative transfer code LeHaMoC.
# Copyright (C) 2023 S. I. Stathopoulos, M. Petropoulou.
# When using this code, make reference to the following
# publication: Stathopoulos et al., 2023, A&A
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation (check licence).
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import numpy as np
import astropy.units as u
from astropy import constants as const
from astropy.modeling.models import BlackBody
import pandas as pd
import LeHaMoC_f as f # imports functions
#######################
#constants#
#######################
G = (const.G).cgs.value
c = (const.c).cgs.value
Ro = (const.R_sun).cgs.value
Mo = (const.M_sun).cgs.value
yr = (u.yr).to(u.s)
kpc = (u.kpc).to(u.cm)
pc = (u.pc).to(u.cm)
m_pr = (u.M_p).to(u.g)
m_el = (u.M_e).to(u.g)
kb = (const.k_B).cgs.value
h = (const.h).cgs.value
q = (const.e.gauss).value
sigmaT = (const.sigma_T).cgs.value
eV = (u.eV).to(u.erg)
B_cr = 2*np.pi*m_el**2*c**3/(h*q)
#########################
def LeMoC(params, fileName):
# Free model parameters
R0 = 10**params[0]
B0 = 10**params[1]
g_PL_min = params[2]
g_PL_max = params[3]
comp_el = params[4]
p_el = params[5]
g_min_el = g_PL_min - 1.
g_max_el = g_PL_max + 1.
fileObj = open(fileName)
params_frozen = {}
for line in fileObj:
line=line.strip()
key_value = line.split("=")
params_frozen[key_value[0].strip()] = float(key_value[1].strip())
time_init = float(params_frozen['time_init']) #R0/c
time_end = float(params_frozen['time_end']) #R0/c
step_alg = float(params_frozen['step_alg']) #R0/c
grid_g_el = float(params_frozen['grid_g_el'])
grid_nu = float(params_frozen['grid_nu'])
Vexp = float(params_frozen['Vexp'])*c #/c
m = float(params_frozen['m'])
inj_flag = float(params_frozen['inj_flag'])
Ad_l_flag = float(params_frozen['Ad_l_flag'])
Syn_l_flag = float(params_frozen['Syn_l_flag'])
Syn_emis_flag = float(params_frozen['Syn_emis_flag'])
IC_l_flag = float(params_frozen['IC_l_flag'])
IC_emis_flag = float(params_frozen['IC_emis_flag'])
SSA_l_flag = float(params_frozen['SSA_l_flag'])
gg_flag = float(params_frozen['gg_flag'])
esc_flag = float(params_frozen['esc_flag'])
BB_flag = float(params_frozen['BB_flag'])
temperature = 10**float(params_frozen['BB_temperature']) #log
GB_ext = float(params_frozen['GB_ext'])
PL_flag = float(params_frozen['PL_flag'])
dE_dV_ph = float(params_frozen['dE_dV_ph'])
nu_min_ph = float(params_frozen['nu_min_ph'])
nu_max_ph = float(params_frozen['nu_max_ph'])
s_ph = float(params_frozen['s_ph'])
User_ph = float(params_frozen['User_ph'])
time_real = time_init
dt = step_alg*R0/c # time step used for solving the PDE
day_counter = 0.
Radius = R0
# initialization of the electron Lorentz factor array
grid_size = grid_g_el
g_el = np.logspace(g_min_el,g_max_el,int(grid_size))
g_el_mp = np.array([(g_el[im+1]+g_el[im-1])/2. for im in range(0,len(g_el)-1)])
dg_el = np.array([((g_el[im+1])-(g_el[im-1]))/2. for im in range(1,len(g_el)-1)]) # delta gamma
dg_l_el = np.log(g_el[1])-np.log(g_el[0]) # logarithmic delta gamma
if g_PL_max == g_max_el:
index_PL_max = -1
else:
index_PL_max = min(min(np.where(g_el > 10**g_PL_max)))
if g_PL_min == 0.:
index_PL_min = 1
else:
index_PL_min = max(max(np.where(g_el < 10**g_PL_min)))
# initialization of photon frequency arrays
nu_syn = np.logspace(7.5,np.log10(7.*f.nu_c(g_el[-1],B0))+1.4,int(grid_size/2))
nu_ic = np.logspace(10.,30.,int(grid_size/2))
nu_tot = np.logspace(np.log10(nu_syn[0]),np.log10(nu_ic[-1]),int(grid_nu))
a_gg_f = np.zeros(len(nu_ic))
#External grey body (GB) photon field (if GB_ext = 1 then photon spectrum is BB with the given temperature)
#Units (nu,dN/dVdnu)
if BB_flag == 0.:
dN_dVdnu_BB = np.zeros(2)
nu_bb = np.array([nu_syn[0], nu_syn[-1]])
else:
bb = BlackBody(temperature*u.K)
nu_bb = np.array(np.logspace(np.log10(5.879*10**10*temperature)-6., np.log10(5.879*10**10*temperature)+1.5,60)*u.Hz)
photons_bb = np.array(4.*np.pi/c*bb(nu_bb)/(h*nu_bb))
GB_norm = np.trapz(photons_bb*h*nu_bb**2.,np.log(nu_bb))/(GB_ext)
dN_dVdnu_BB = photons_bb/GB_norm
#External power law (PL) photon field
#Units (nu,dN/dVdnu)
if PL_flag == 0.:
dN_dVdnu_pl = np.zeros(len(nu_tot))
else:
nu_ph_ext_sp = np.logspace(nu_min_ph,nu_max_ph,100)
k_ph = (np.trapz(dE_dV_ph*nu_ph_ext_sp**(-s_ph+1.)))**(-1.)
nu_ph_ext_sp[-1] = 0.
dN_dVdnu_pl = 10**np.interp(np.log10(nu_tot),np.log10(nu_ph_ext_sp),np.log10(k_ph*nu_ph_ext_sp**(-s_ph)))
#External user-defined photon field
#Units (nu,dN/dVdnu)
if User_ph == 0.:
dN_dVdnu_user = np.zeros(len(nu_tot))
else:
Photons_spec_user = pd.read_csv('Photons_spec_user.txt',names=('logx','logy'),sep=",")
nu_user = 10**np.array(Photons_spec_user.logx)
dN_dVdnu_user_temp = 10**np.array(Photons_spec_user.logy)
dN_dVdnu_user_temp[-1] = 10**(-160.)
dN_dVdnu_user = 10**np.interp(np.log10(nu_tot),np.log10(nu_user),np.log10(dN_dVdnu_user_temp))
#Initialize arrays for particles and photons
N_el = np.zeros(len(g_el)) # Number of electrons & positrons
Q_ee = np.zeros(len(g_el)-1) # Pair production rate
el_inj = np.ones(len(g_el))*10**(-260.) # Primary electron injection rate
el_inj[index_PL_min:index_PL_max] = f.Q_el_Lum(f.Lum_e_inj(10**comp_el,Radius),p_el,g_el[index_PL_min],g_el[index_PL_max])*g_el[index_PL_min:index_PL_max]**(-p_el)
N_el = el_inj.copy()
N_el[0] = N_el[-1] = 10**(-260.) # boundary conditions
photons_syn = np.ones(len(nu_syn))*10**(-260.)
photons_IC = np.ones(len(nu_ic))*10**(-260.)
photons_syn = np.append(photons_syn,10**(-260.))
dN_dVdnu_BB = np.append(dN_dVdnu_BB,10**(-260.))
nu_syn = np.append(nu_syn,nu_tot[-1])
nu_bb = np.append(nu_bb,nu_tot[-1])
nu_syn_mp = np.array([(nu_syn[im+1]+nu_syn[im-1])/2. for im in range(0,len(nu_syn)-1)])
nu_ic_mp = np.array([(nu_ic[im+1]+nu_ic[im-1])/2. for im in range(0,len(nu_ic)-1)])
dnu = np.array([(nu_syn[nu_ind+1]-nu_syn[nu_ind-1])/2. for nu_ind in range(1,len(nu_syn)-1)])
dnu_ic = np.array([(nu_ic[nu_ind+1]-nu_ic[nu_ind-1])/2. for nu_ind in range(1,len(nu_ic)-1)])
# Solution of the PDEs
while time_real < time_end*R0/c:
time_real += dt
Radius = f.R(R0,time_real,time_init,Vexp)
M_F = f.B(B0,R0,Radius,m)
a_cr_el = 3.*q*M_F/(4.*np.pi*m_el*c)
# Calculate total dN/dVdnu
photons = f.photons_tot(nu_syn,nu_bb,photons_syn,nu_ic,photons_IC,nu_tot,dN_dVdnu_BB*f.Volume(Radius),dN_dVdnu_pl*f.Volume(Radius),dN_dVdnu_user*f.Volume(Radius))/f.Volume(Radius)
if Ad_l_flag == 1.:
b_ad = Vexp/Radius
dgdt_ad_el_m = b_ad*np.divide(np.power(g_el_mp[0:-1],1.),dg_el)
dgdt_ad_el_p = b_ad*np.divide(np.power(g_el_mp[1:],1.),dg_el)
dnudt_ad_syn_m = b_ad*np.divide(nu_syn_mp[0:-1],dnu)
dnudt_ad_syn_p = b_ad*np.divide(nu_syn_mp[1:],dnu)
dnudt_ad_IC_m = b_ad*np.divide(nu_ic_mp[0:-1]-nu_ic[:-2],dnu_ic)
dnudt_ad_IC_p = b_ad*np.divide(nu_ic[:1],dnu_ic)
else:
dgdt_ad_el_m = np.zeros(len(g_el)-2)
dgdt_ad_el_p = np.zeros(len(g_el)-2)
dnudt_ad_syn_m = np.zeros(len(nu_syn)-2)
dnudt_ad_syn_p = np.zeros(len(nu_syn)-2)
dnudt_ad_IC_m = np.zeros(len(nu_ic)-2)
dnudt_ad_IC_p = np.zeros(len(nu_ic)-2)
if Syn_l_flag == 1.:
b_syn_el = (4./3.)*sigmaT/(8.*np.pi*m_el*c)*M_F**2.
dgdt_Syn_el_m = b_syn_el*np.divide(np.power(g_el_mp[0:-1],2.),dg_el)
dgdt_Syn_el_p = b_syn_el*np.divide(np.power(g_el_mp[1:],2.),dg_el)
else :
dgdt_Syn_el_m = np.zeros(len(g_el)-2)
dgdt_Syn_el_p = np.zeros(len(g_el)-2)
if IC_l_flag == 1.:
U_ph = f.U_ph_f(g_el,nu_tot,photons,Radius)
b_Com_el = 4./3.*sigmaT*np.multiply(c,U_ph)/(m_el*c**2.)
dgdt_IC_el_m = b_Com_el[1:-1]*np.divide(np.power(g_el_mp[0:-1],2.),dg_el)
dgdt_IC_el_p = b_Com_el[2:]*np.divide(np.power(g_el_mp[1:],2.),dg_el)
else:
dgdt_IC_el_m = np.zeros(len(g_el)-2)
dgdt_IC_el_p = np.zeros(len(g_el)-2)
V1 = np.zeros(len(g_el)-2)
V2 = 1.+dt*(c/Radius*esc_flag+dgdt_Syn_el_m+dgdt_IC_el_m+dgdt_ad_el_m)
V3 = -dt*(dgdt_Syn_el_p+dgdt_IC_el_p+dgdt_ad_el_p)
if inj_flag == 1.:
S_ij = N_el[1:-1]+np.multiply(el_inj[1:-1],dt)+np.multiply(Q_ee[1:],dt)*f.Volume(Radius)
if inj_flag == 0.:
S_ij = N_el[1:-1]+np.multiply(Q_ee[1:],dt)*f.Volume(Radius)
N_el[1:-1] = f.thomas(V1, V2, V3, S_ij)
dN_el_dVdg_el = np.array(N_el/f.Volume(Radius))
if Syn_emis_flag == 1.:
Q_Syn_el = np.divide([f.Q_syn_space(dN_el_dVdg_el,M_F,nu_syn[nu_ind],a_cr_el,g_el) for nu_ind in range(len(nu_syn)-1)], f.cor_factor_syn_el(g_el,R0,10**4.,p_el,f.Lum_e_inj(comp_el,R0)))
else:
Q_Syn_el = np.zeros(len(nu_syn)-1)
if IC_emis_flag == 1.:
Q_IC = [f.Q_IC_space_optimized(dN_el_dVdg_el,g_el,nu_ic[nu_ind],photons,nu_tot,len(nu_tot)-1) for nu_ind in range(0,len(nu_ic)-1)]
else:
Q_IC = np.zeros(len(nu_ic)-1)
if SSA_l_flag == 1.:
aSSA_space_syn = [-np.absolute(f.aSSA(dN_el_dVdg_el,M_F,nu_syn[nu_ind],g_el,dg_l_el)) for nu_ind in range(0,len(nu_syn-1))]
aSSA_space_ic = [-np.absolute(f.aSSA(dN_el_dVdg_el,M_F,nu_ic[nu_ind],g_el,dg_l_el)) for nu_ind in range(0,len(nu_ic-1))]
else:
aSSA_space_syn = np.zeros(len(nu_syn-1))
aSSA_space_ic = np.zeros(len(nu_ic-1))
V1 = np.zeros(len(nu_syn)-2)
V2 = 1.+dt*(c/Radius+dnudt_ad_syn_m-np.multiply(aSSA_space_syn[1:-1],1)*c)
V3 = -dt*dnudt_ad_syn_p
S_ij = photons_syn[1:-1]+4.*np.pi*np.multiply(Q_Syn_el,dt)[1:]*f.Volume(Radius)
photons_syn[1:-1] = f.thomas(V1, V2, V3, S_ij)
V1 = np.zeros(len(nu_ic)-2)
V2 = 1.+dt*(c/Radius+dnudt_ad_IC_m+np.multiply(a_gg_f[1:-1],c)-np.multiply(aSSA_space_ic[1:-1],1)*c)
V3 = -dt*dnudt_ad_IC_p
S_ij = photons_IC[1:-1]+np.multiply(Q_IC,dt)[1:]*f.Volume(Radius)
photons_IC[1:-1] = f.thomas(V1, V2, V3, S_ij )
if gg_flag == 0.:
a_gg_f = np.zeros(len(nu_ic))
else:
a_gg_f = f.a_gg(nu_ic,nu_tot,photons)
Q_ee = f.Q_ee_f(nu_tot,photons,nu_ic,photons_IC/f.Volume(Radius),g_el,Radius)
if day_counter<time_real:
day_counter=day_counter+dt
photons = f.photons_tot(nu_syn,nu_bb,photons_syn,nu_ic,photons_IC,nu_tot,dN_dVdnu_BB*f.Volume(Radius),dN_dVdnu_pl*f.Volume(Radius),dN_dVdnu_user*f.Volume(Radius))/f.Volume(Radius)
Spec_temp_tot = np.multiply(photons,h*nu_tot**2.)*4.*np.pi/3.*Radius**2.*c
#returns v'[Hz] and (vLv)' [erg s^{-1}] in the comoving frame
return (nu_tot,Spec_temp_tot)
def LeHaMoC(params, fileName):
## CAUTION ##
# this hadronic module has only proton synchrotron included, no adiabatic losses
# Free model parameters
R0 = 10**params[0]
B0 = 10**params[1]
g_PL_min = params[2]
g_PL_max = params[3]
comp_el = params[4]
p_el = params[5]
g_PL_min_pr = params[6]
g_PL_max_pr = params[7]
comp_pr = params[8]
p_pr = params[9]
g_min_el = g_PL_min - 0.5
g_max_el = g_PL_max + 0.5
g_min_pr = g_PL_min_pr - 0.1
g_max_pr = g_PL_max_pr + 0.5
fileObj = open(fileName)
params_frozen = {}
for line in fileObj:
line=line.strip()
key_value = line.split("=")
params_frozen[key_value[0].strip()] = float(key_value[1].strip())
time_init = float(params_frozen['time_init']) #R0/c
time_end = float(params_frozen['time_end']) #R0/c
step_alg = float(params_frozen['step_alg']) #R0/c
grid_g_el = float(params_frozen['grid_g_el'])
grid_g_pr = float(params_frozen['grid_g_pr'])
grid_nu = float(params_frozen['grid_nu'])
Vexp = float(params_frozen['Vexp'])*c #/c
m = float(params_frozen['m'])
inj_flag = float(params_frozen['inj_flag'])
Syn_l_flag = float(params_frozen['Syn_l_flag'])
Syn_emis_flag = float(params_frozen['Syn_emis_flag'])
IC_l_flag = float(params_frozen['IC_l_flag'])
IC_emis_flag = float(params_frozen['IC_emis_flag'])
SSA_l_flag = float(params_frozen['SSA_l_flag'])
gg_flag = float(params_frozen['gg_flag'])
esc_flag = float(params_frozen['esc_flag'])
BB_flag = float(params_frozen['BB_flag'])
temperature = 10**float(params_frozen['BB_temperature']) #log
GB_ext = float(params_frozen['GB_ext'])
PL_flag = float(params_frozen['PL_flag'])
dE_dV_ph = float(params_frozen['dE_dV_ph'])
nu_min_ph = float(params_frozen['nu_min_ph'])
nu_max_ph = float(params_frozen['nu_max_ph'])
s_ph = float(params_frozen['s_ph'])
User_ph = float(params_frozen['User_ph'])
time_real = time_init
dt = step_alg*R0/c # time step used for solving the PDE
day_counter = 0.
Radius = R0
# initialization of the electron Lorentz factor array
grid_size = grid_g_el
g_el = np.logspace(g_min_el,g_max_el,int(grid_size))
g_el_mp = np.array([(g_el[im+1]+g_el[im-1])/2. for im in range(0,len(g_el)-1)])
dg_el = np.array([((g_el[im+1])-(g_el[im-1]))/2. for im in range(1,len(g_el)-1)]) # delta gamma
dg_l_el = np.log(g_el[1])-np.log(g_el[0]) # logarithmic delta gamma
# initialization of the proton Lorentz factor array
grid_size_pr = grid_g_pr
g_pr = np.logspace(g_min_pr,g_max_pr,int(grid_size_pr))
g_pr_mp = np.array([(g_pr[im+1]+g_pr[im-1])/2. for im in range(0,len(g_pr)-1)])
dg_pr = np.array([((g_pr[im+1])-(g_pr[im-1]))/2. for im in range(1,len(g_pr)-1)])
if g_PL_max == g_max_el:
index_PL_max = -1
else:
index_PL_max = min(min(np.where(g_el > 10**g_PL_max)))
if g_PL_min == 0.:
index_PL_min = 1
else:
index_PL_min = max(max(np.where(g_el < 10**g_PL_min)))
if g_PL_max_pr == g_max_pr:
index_PL_max_pr = -1
else:
index_PL_max_pr = min(min(np.where(g_pr > 10**g_PL_max_pr)))
if g_PL_min_pr == 0.:
index_PL_min_pr = 1
else:
index_PL_min_pr = max(max(np.where(g_pr < 10**g_PL_min_pr)))
# initialization of photon frequency arrays
nu_syn = np.logspace(7.5,np.log10(7.*f.nu_c(g_el[-1],B0))+1.4,int(grid_size/2))
nu_ic = np.logspace(10.,30.,int(grid_size/2))
nu_tot = np.logspace(np.log10(nu_syn[0]),np.log10(nu_ic[-1]),int(grid_nu))
a_gg_f = np.zeros(len(nu_ic))
#External grey body (GB) photon field (if GB_ext = 1 then photon spectrum is BB with the given temperature)
#Units (nu,dN/dVdnu)
if BB_flag == 0.:
dN_dVdnu_BB = np.zeros(2)
nu_bb = np.array([nu_syn[0], nu_syn[-1]])
else:
bb = BlackBody(temperature*u.K)
nu_bb = np.array(np.logspace(np.log10(5.879*10**10*temperature)-6., np.log10(5.879*10**10*temperature)+1.5,60)*u.Hz)
photons_bb = np.array(4.*np.pi/c*bb(nu_bb)/(h*nu_bb))
GB_norm = np.trapz(photons_bb*h*nu_bb**2.,np.log(nu_bb))/(GB_ext)
dN_dVdnu_BB = photons_bb/GB_norm
#External power law (PL) photon field
#Units (nu,dN/dVdnu)
if PL_flag == 0.:
dN_dVdnu_pl = np.zeros(len(nu_tot))
else:
nu_ph_ext_sp = np.logspace(nu_min_ph,nu_max_ph,100)
k_ph = (np.trapz(dE_dV_ph*nu_ph_ext_sp**(-s_ph+1.)))**(-1.)
nu_ph_ext_sp[-1] = 0.
dN_dVdnu_pl = 10**np.interp(np.log10(nu_tot),np.log10(nu_ph_ext_sp),np.log10(k_ph*nu_ph_ext_sp**(-s_ph)))
#External user-defined photon field
#Units (nu,dN/dVdnu)
if User_ph == 0.:
dN_dVdnu_user = np.zeros(len(nu_tot))
else:
Photons_spec_user = pd.read_csv('Photons_spec_user.txt',names=('logx','logy'),sep=",")
nu_user = 10**np.array(Photons_spec_user.logx)
dN_dVdnu_user_temp = 10**np.array(Photons_spec_user.logy)
dN_dVdnu_user_temp[-1] = 10**(-160.)
dN_dVdnu_user = 10**np.interp(np.log10(nu_tot),np.log10(nu_user),np.log10(dN_dVdnu_user_temp))
#Initialize arrays for particles and photons
N_el = np.zeros(len(g_el)) # Number of electrons & positrons
Q_ee = np.zeros(len(g_el)-1) # Pair production rate
el_inj = np.ones(len(g_el))*10**(-260.) # Primary electron injection rate
el_inj[index_PL_min:index_PL_max] = f.Q_el_Lum(f.Lum_e_inj(10**comp_el,Radius),p_el,g_el[index_PL_min],g_el[index_PL_max])*g_el[index_PL_min:index_PL_max]**(-p_el)
N_el = el_inj.copy()
N_el[0] = N_el[-1] = 10**(-260.) # boundary conditions
#Initialize arrays for particles and photons
N_pr = np.zeros(len(g_pr)) # Number of electrons & positrons
pr_inj = np.ones(len(g_pr))*10**(-260.) # Primary electron injection rate
pr_inj[index_PL_min_pr:index_PL_max_pr] = f.Q_pr_Lum(f.Lum_pr_inj(10**comp_pr,Radius),p_pr,g_pr[index_PL_min_pr],g_pr[index_PL_max_pr])*g_pr[index_PL_min_pr:index_PL_max_pr]**(-p_pr)
N_pr = pr_inj.copy()
N_pr[0] = N_pr[-1] = 10**(-260.) # boundary conditions
photons_syn = np.ones(len(nu_syn))*10**(-260.)
photons_IC = np.ones(len(nu_ic))*10**(-260.)
photons_syn = np.append(photons_syn,10**(-260.))
dN_dVdnu_BB = np.append(dN_dVdnu_BB,10**(-260.))
nu_syn = np.append(nu_syn,nu_tot[-1])
nu_bb = np.append(nu_bb,nu_tot[-1])
# Solution of the PDEs
while time_real < time_end*R0/c:
time_real += dt
Radius = f.R(R0,time_real,time_init,Vexp)
M_F = f.B(B0,R0,Radius,m)
a_cr_el = 3.*q*M_F/(4.*np.pi*m_el*c)
a_cr_pr = 3.*q*M_F/(4.*np.pi*m_pr*c)
# Calculate total dN/dVdnu
photons = f.photons_tot(nu_syn,nu_bb,photons_syn,nu_ic,photons_IC,nu_tot,dN_dVdnu_BB*f.Volume(Radius),dN_dVdnu_pl*f.Volume(Radius),dN_dVdnu_user*f.Volume(Radius))/f.Volume(Radius)
if Syn_l_flag == 1.:
b_syn_el = (4./3.)*sigmaT/(8.*np.pi*m_el*c)*M_F**2.
b_syn_pr = b_syn_el*(m_el/m_pr)**3.
dgdt_Syn_el_m = b_syn_el*np.divide(np.power(g_el_mp[0:-1],2.),dg_el)
dgdt_Syn_el_p = b_syn_el*np.divide(np.power(g_el_mp[1:],2.),dg_el)
dgdt_Syn_pr_m = b_syn_pr*np.divide(np.power(g_pr_mp[0:-1],2.),dg_pr)
dgdt_Syn_pr_p = b_syn_pr*np.divide(np.power(g_pr_mp[1:],2.),dg_pr)
else :
dgdt_Syn_el_m = np.zeros(len(g_el)-2)
dgdt_Syn_el_p = np.zeros(len(g_el)-2)
dgdt_Syn_pr_m = np.zeros(len(g_pr)-2)
dgdt_Syn_pr_p = np.zeros(len(g_pr)-2)
if IC_l_flag == 1.:
U_ph = f.U_ph_f(g_el,nu_tot,photons,Radius)
b_Com_el = 4./3.*sigmaT*np.multiply(c,U_ph)/(m_el*c**2.)
dgdt_IC_el_m = b_Com_el[1:-1]*np.divide(np.power(g_el_mp[0:-1],2.),dg_el)
dgdt_IC_el_p = b_Com_el[2:]*np.divide(np.power(g_el_mp[1:],2.),dg_el)
else:
dgdt_IC_el_m = np.zeros(len(g_el)-2)
dgdt_IC_el_p = np.zeros(len(g_el)-2)
V1 = np.zeros(len(g_el)-2)
V2 = 1.+dt*(c/Radius*esc_flag+dgdt_Syn_el_m+dgdt_IC_el_m)
V3 = -dt*(dgdt_Syn_el_p+dgdt_IC_el_p)
if inj_flag == 1.:
S_ij = N_el[1:-1]+np.multiply(el_inj[1:-1],dt)+np.multiply(Q_ee[1:],dt)*f.Volume(Radius)
if inj_flag == 0.:
S_ij = N_el[1:-1]+np.multiply(Q_ee[1:],dt)*f.Volume(Radius)
N_el[1:-1] = f.thomas(V1, V2, V3, S_ij)
dN_el_dVdg_el = np.array(N_el/f.Volume(Radius))
V1 = np.zeros(len(g_pr)-2)
V2 = 1.+dt*(c/Radius*esc_flag+dgdt_Syn_pr_m)
V3 = -dt*(dgdt_Syn_pr_p)
if inj_flag == 1.:
S_ij = N_pr[1:-1]+np.multiply(pr_inj[1:-1],dt)
if inj_flag == 0.:
S_ij = N_pr[1:-1]
N_pr[1:-1] = f.thomas(V1, V2, V3, S_ij)
dN_pr_dVdg_pr = np.array(N_pr/f.Volume(Radius))
if Syn_emis_flag == 1.:
Q_Syn_el = np.divide([f.Q_syn_space(dN_el_dVdg_el,M_F,nu_syn[nu_ind],a_cr_el,g_el) for nu_ind in range(len(nu_syn)-1)], f.cor_factor_syn_el(g_el,R0,10**4.,p_el,f.Lum_e_inj(comp_el,R0)))
Q_Syn_pr = [f.Q_syn_space_pr(dN_pr_dVdg_pr,M_F,nu_syn[nu_ind],a_cr_pr,g_pr) for nu_ind in range(len(nu_syn)-1)]
else:
Q_Syn_el = np.zeros(len(nu_syn)-1)
Q_Syn_pr = np.zeros(len(nu_syn)-1)
if IC_emis_flag == 1.:
Q_IC = [f.Q_IC_space_optimized(dN_el_dVdg_el,g_el,nu_ic[nu_ind],photons,nu_tot,len(nu_tot)-1) for nu_ind in range(0,len(nu_ic)-1)]
else:
Q_IC = np.zeros(len(nu_ic)-1)
if SSA_l_flag == 1.:
aSSA_space_syn = [-np.absolute(f.aSSA(dN_el_dVdg_el,M_F,nu_syn[nu_ind],g_el,dg_l_el)) for nu_ind in range(0,len(nu_syn-1))]
aSSA_space_ic = [-np.absolute(f.aSSA(dN_el_dVdg_el,M_F,nu_ic[nu_ind],g_el,dg_l_el)) for nu_ind in range(0,len(nu_ic-1))]
else:
aSSA_space_syn = np.zeros(len(nu_syn-1))
aSSA_space_ic = np.zeros(len(nu_ic-1))
V1 = np.zeros(len(nu_syn)-2)
V2 = 1.+dt*(c/Radius-np.multiply(aSSA_space_syn[1:-1],1)*c)
V3 = np.zeros(len(nu_syn)-2)
S_ij = photons_syn[1:-1]+4.*np.pi*np.multiply(Q_Syn_el,dt)[1:]*f.Volume(Radius)+4.*np.pi*np.multiply(Q_Syn_pr,dt)[1:]*f.Volume(Radius)
photons_syn[1:-1] = f.thomas(V1, V2, V3, S_ij)
V1 = np.zeros(len(nu_ic)-2)
V2 = 1.+dt*(c/Radius+np.multiply(a_gg_f[1:-1],c)-np.multiply(aSSA_space_ic[1:-1],1)*c)
V3 = np.zeros(len(nu_ic)-2)
S_ij = photons_IC[1:-1]+np.multiply(Q_IC,dt)[1:]*f.Volume(Radius)
photons_IC[1:-1] = f.thomas(V1, V2, V3, S_ij)
if gg_flag == 0.:
a_gg_f = np.zeros(len(nu_ic))
else:
a_gg_f = f.a_gg(nu_ic,nu_tot,photons)
Q_ee = f.Q_ee_f(nu_tot,photons,nu_ic,photons_IC/f.Volume(Radius),g_el,Radius)
if day_counter<time_real:
day_counter=day_counter+dt
photons = f.photons_tot(nu_syn,nu_bb,photons_syn,nu_ic,photons_IC,nu_tot,dN_dVdnu_BB*f.Volume(Radius),dN_dVdnu_pl*f.Volume(Radius),dN_dVdnu_user*f.Volume(Radius))/f.Volume(Radius)
Spec_temp_tot = np.multiply(photons,h*nu_tot**2.)*4.*np.pi/3.*Radius**2.*c
#returns nu'[Hz] and (nuL_nu)' [erg s^{-1}] in the comoving frame
return (nu_tot,Spec_temp_tot)
|
mariapetroREPO_NAMELeHaMoCPATH_START.@LeHaMoC_extracted@LeHaMoC-main@Fit_emcee@Code.py@.PATH_END.py
|
{
"filename": "exolib.py",
"repo_name": "ExoSim/ExoSimPublic",
"repo_path": "ExoSimPublic_extracted/ExoSimPublic-master/exosim/lib/exolib.py",
"type": "Python"
}
|
import numpy as np
from scipy import signal
from scipy import interpolate
from scipy.integrate import cumtrapz
import scipy.special
import quantities as pq
#import sys, os, pyfits
import sys
import os
import astropy.io.fits as pyfits
def exosim_error(error_msg):
sys.stderr.write("Error code: {:s}\n".format(error_msg))
sys.exit(0)
def exosim_msg(msg, prefix = None):
msg = msg if prefix==None else "[%s]: %s\n"%(prefix, msg)
sys.stdout.write(msg)
sys.stdout.flush()
def logbin(x, a, R, xmin=None, xmax=None):
n = a.size
imin = 0
imax = n-1
if xmin == None or xmin < x.min(): xmin = x.min()
if xmax == None or xmax > x.max(): xmax = x.max()
idx = np.argsort(x)
xp = x[idx]
yp = a[idx]
delta_x = xmax/R
N = 20.0 * (xmax-xmin)/delta_x
_x = np.linspace(xmin,xmax, N)
_y = np.interp(_x, xp, yp)
nbins = 1+np.round( np.log(xmax/xmin)/np.log(1.0 + 1.0/R) ).astype(np.int)
bins = xmin*np.power( (1.0+1.0/R), np.arange(nbins))
slices = np.searchsorted(_x, bins)
counts = np.ediff1d(slices)
mean = np.add.reduceat(_y, slices[:-1])/(counts)
bins = 0.5*(bins[:-1] + bins[1:])
return bins[:-1], mean[:-1]
def rebin(x, xp, fp):
''' Resample a function fp(xp) over the new grid x, rebinning if necessary,
otherwise interpolates
Parameters
----------
x : array like
New coordinates
fp : array like
y-coordinates to be resampled
xp : array like
x-coordinates at which fp are sampled
Returns
-------
out : array like
new samples
'''
if (x.units != xp.units):
print(x.units, xp.units)
exosim_error('Units mismatch')
idx = np.where(np.logical_and(xp > 0.9*x.min(), xp < 1.1*x.max()))[0]
xp = xp[idx]
fp = fp[idx]
if np.diff(xp).min() < np.diff(x).min():
# Binning!
c = cumtrapz(fp, x=xp)*fp.units*xp.units
xpc = xp[1:]
delta = np.gradient(x)
new_c_1 = np.interp(x-0.5*delta, xpc, c,
left=0.0, right=0.0)*c.units
new_c_2 = np.interp(x+0.5*delta, xpc, c,
left=0.0, right=0.0)*c.units
new_f = (new_c_2 - new_c_1)/delta
else:
# Interpolate !
new_f = np.interp(x, xp, fp, left=0.0, right=0.0)*fp.units
'''
import matplotlib.pyplot as plt
plt.plot(xp, fp, '-')
plt.plot(x, new_f, '.-')
plt.show()
# check
print np.trapz(new_f, x)
idx = np.where(np.logical_and(xp>= x.min(), xp <= x.max()))
print np.trapz(fp[idx], xp[idx])
'''
return x, new_f
def fast_convolution(im, delta_im, ker, delta_ker):
""" fast_convolution.
Convolve an image with a kernel. Image and kernel can be sampled on different
grids defined.
Parameters
__________
im : array like
the image to be convolved
delta_im : scalar
image sampling interval
ker : array like
the convolution kernel
delta_ker : scalar
Kernel sampling interval
Returns
-------
spectrum: array like
the image convolved with the kernel.
"""
fc_debug = False
# Fourier transform the kernel
kerf = (np.fft.rfft2(ker))
ker_k = [ np.fft.fftfreq(ker.shape[0], d=delta_ker),
np.fft.rfftfreq(ker.shape[1], d=delta_ker) ]
ker_k[0] = np.fft.fftshift(ker_k[0])
kerf = np.fft.fftshift(kerf, axes=0)
# Fourier transform the image
imf = np.fft.rfft2(im)
im_k = [ np.fft.fftfreq(im.shape[0], d=delta_im),
np.fft.rfftfreq(im.shape[1], d=delta_im) ]
im_k[0] = np.fft.fftshift(im_k[0])
imf = np.fft.fftshift(imf, axes=0)
# Interpolate kernel
kerf_r = interpolate.RectBivariateSpline(ker_k[0], ker_k[1],
kerf.real)
kerf_i = interpolate.RectBivariateSpline(ker_k[0], ker_k[1],
kerf.imag)
if (fc_debug):
pl.plot(ker_k[0], kerf[:, 0].real,'.r')
pl.plot(ker_k[0], kerf[:, 0].imag,'.g')
pl.plot(im_k[0], kerf_r(im_k[0], im_k[1])[:, 0],'-r')
pl.plot(im_k[0], np.abs(imf[:, 0]),'-b')
# Convolve
imf = imf * (kerf_r(im_k[0], im_k[1]) + 1j*kerf_i(im_k[0], im_k[1]))
if (fc_debug):
pl.plot(im_k[0], np.abs(imf[:, 0]),'-y')
imf = np.fft.ifftshift(imf, axes=0)
return np.fft.irfft2(imf)*(delta_ker/delta_im)**2
def planck(wl, T):
""" Planck function.
Parameters
__________
wl : array
wavelength [micron]
T : scalar
Temperature [K]
Spot temperature [K]
Returns
-------
spectrum: array
The Planck spectrum [W m^-2 sr^-2 micron^-1]
"""
a = np.float64(1.191042768e8)*pq.um**5 *pq.W/ pq.m**2 /pq.sr/pq.um
b = np.float64(14387.7516)*1*pq.um * 1*pq.K
try:
x = b/(wl*T)
bb = a/wl**5 / (np.exp(x) - 1.0)
except ArithmeticError:
bb = np.zeros(np.size(wl))
return bb
def sed_propagation(sed, transmission, emissivity=None, temperature = None):
sed.sed = sed.sed*transmission.sed
if emissivity and temperature:
sed.sed = sed.sed + emissivity.sed*planck(sed.wl, temperature)
return sed
def Psf_Interp(zfile, delta_pix, WavRange):
'''
PSF Interpolation
Parametes
---------
zfile : string
input PSF fits file
Delta : scalar
Sampling interval in micron
WavRange : ndarray
array of wavelengths in micron
Returns
-------
PSF interpolated data cube. Area normalised to unity.
'''
hdulist = pyfits.open(zfile)
NAXIS1, NAXIS2 = hdulist[0].header['NAXIS1'], hdulist[0].header['NAXIS2']
in_ph_size_x, in_ph_size_y = hdulist[0].header['CDELT1']*NAXIS1, hdulist[0].header['CDELT2']*NAXIS2
num_pix_x, num_pix_y = np.trunc(in_ph_size_x/delta_pix).astype(np.int), np.trunc(in_ph_size_y/delta_pix).astype(np.int)
inwl = np.zeros(len(hdulist))
redata = np.zeros((num_pix_y, num_pix_x, len(hdulist)))
xin = np.linspace(-1.0, 1.0, NAXIS1)
yin = np.linspace(-1.0, 1.0, NAXIS2)
xout = np.linspace(-1.0, 1.0, num_pix_x)
yout = np.linspace(-1.0, 1.0, num_pix_y)
for i, hdu in enumerate(hdulist):
inwl[i] = np.float64(hdu.header['WAV'])
f = interpolate.RectBivariateSpline(xin, yin, hdu.data)
redata[..., i] = f(xout,yout)
redata[..., i] /= redata[..., i].sum()
return interpolate.interp1d(inwl, redata, axis=2, bounds_error=False, fill_value=0.0, kind='quadratic')(WavRange)
def Psf(wl, fnum, delta, nzero = 4, shape='airy'):
'''
Calculates an Airy Point Spread Function arranged as a data-cube. The spatial axies are
0 and 1. The wavelength axis is 2. Each PSF area is normalised to unity.
Parameters
----------
wl : ndarray [physical dimension of length]
array of wavelengths at which to calculate the PSF
fnum : scalar
Instrument f/number
delta : scalar
the increment to use [physical units of length]
nzero : scalar
number of Airy zeros. The PSF kernel will be this big. Calculated at wl.max()
shape : string
Set to 'airy' for a Airy function,to 'gauss' for a Gaussian
Returns
------
Psf : ndarray
three dimensional array. Each PSF normalised to unity
'''
delta = delta.rescale(wl.units)
Nx = np.round(scipy.special.jn_zeros(1, nzero)[-1]/(2.0*np.pi) * fnum*wl.max()/delta).astype(np.int)
Ny = Nx = np.int(Nx)
if shape=='airy':
d = 1.0/(fnum*(1.0e-30*delta.units+wl))
elif shape=='gauss':
sigma = 1.029*fnum*(1.0e-30*delta.units+wl)/np.sqrt(8.0*np.log(2.0))
d = 0.5/sigma**2
x = np.linspace(-Nx*delta.item(), Nx*delta.item(), 2*Nx+1)*delta.units
y = np.linspace(-Ny*delta.item(), Ny*delta.item(), 2*Ny+1)*delta.units
yy, xx = np.meshgrid(y, x)
if shape=='airy':
arg = 1.0e-20+np.pi*np.multiply.outer(np.sqrt(yy**2 + xx**2), d)
arg = arg.magnitude
img = (scipy.special.j1(arg)/arg)**2
elif shape=='gauss':
arg = np.multiply.outer(yy**2 + xx**2, d)
img = np.exp(-arg)
norm = img.sum(axis=0).sum(axis=0)
img /= norm
idx = np.where(wl <= 0.0)
if idx:
img[..., idx] *= 0.0
return img
def PixelResponseFunction(psf_shape, osf, delta, lx = 1.7*pq.um, ipd = 0.0*pq.um):
'''
Estimate the detector pixel response function with the prescription of
Barron et al., PASP, 119, 466-475 (2007).
Parameters
----------
psf_shape : touple of scalars
(ny, nx) defining the PSF size
osf : scalar
number of samples in each resolving element. The
final shape of the response function would be shape*osf
delta : scalar
Phisical size of the detector pixel in microns
lx : scalar
diffusion length in microns
ipd : scalar
distance between two adjacent detector pixels
in microns
Returns
-------
kernel : 2D array
the kernel image
kernel_delta : scalar
the kernel sampling interval in microns
'''
if type(osf) != int: osf = np.int(osf)
lx += 1e-8*pq.um # to avoid problems if user pass lx=0
lx = lx.rescale(delta.units)
kernel = np.zeros( (psf_shape[0]*osf, psf_shape[1]*osf) )
kernel_delta = delta/osf
yc, xc = np.array(kernel.shape) // 2
yy = (np.arange(kernel.shape[0]) - yc) * kernel_delta
xx = (np.arange(kernel.shape[1]) - xc) * kernel_delta
mask_xx = np.where(np.abs(xx) > 0.5*(delta-ipd))
mask_yy = np.where(np.abs(yy) > 0.5*(delta-ipd))
xx, yy = np.meshgrid(xx, yy)
kernel = np.arctan(np.tanh( 0.5*( 0.5*delta - xx)/lx )) - \
np.arctan(np.tanh( 0.5*(-0.5*delta - xx)/lx ))
kernel*= np.arctan(np.tanh( 0.5*( 0.5*delta - yy)/lx )) - \
np.arctan(np.tanh( 0.5*(-0.5*delta - yy)/lx ))
kernel[mask_yy, ...] = 0.0
kernel[..., mask_xx] = 0.0
# Normalise the kernel such that the pixel has QE=1
kernel *= osf**2/kernel.sum()
kernel = np.roll(kernel, -xc, axis=1)
kernel = np.roll(kernel, -yc, axis=0)
return kernel, kernel_delta
def pointing_add_scan(pointing_timeline, scan_throw_arcsec, frame_time, frame_osf, exposure_time):
''' Superimpose saw-tooth scan mode to a pointing jitter timeline.
The period of a scan is equal exposure time
Parameters
----------
pointing_timeline: Quantities Array
Poitning timeline (yaw/pitch) in deg
scan_throw_arcsec: scalar
The scan throw in units of arcseconds.
frame_time: scalar
detector frame time in units of time
frame_osf: scalar
Frame oversampling factor
exposure_time: scalar
time for one exposure containing set of NDRs
Returns
-------
pointing_timeline: Quantities Array
Pointing timeline updated with saw-tooth scan pointing superimposed.
'''
tArr = np.arange(len(pointing_timeline) +1)*frame_time/frame_osf
tArr = tArr[1:]
saw_tooth = (tArr.magnitude %exposure_time.magnitude /exposure_time.magnitude)
saw_tooth = np.where(saw_tooth > 0.0, saw_tooth, 1.0)
saw_tooth = saw_tooth* scan_throw_arcsec.rescale(pq.deg)
pointing_timeline = saw_tooth + pointing_timeline
return pointing_timeline
def pointing_jitter(jitter_file, total_observing_time, frame_time, rms=None):
''' Estimate pointing jitter timeline
Parameters
----------
jitter_file: string
filename containing CSV columns with
frequency [Hz], Yaw PSD [deg**2/Hz], Pitch [deg**2/Hz]
If only two columns given, then it is assumed that
the second column is the PSD of radial displacements
totoal_observing_time: scalar
total observing time in units of time
frame_time: scalar
detector frame time in units of time
rms: scalar
renormalisation rms in units of angle
Returns
-------
yaw_jit: jitter timeline in units of degrees
pitch_jit: jitter rimeline in units of degrees
osf: number of additional samples in each frame_time needed to capture
the jitter spectral information
'''
data = np.genfromtxt(jitter_file, delimiter=',')
psd_freq = data[..., 0]
if data.shape[1] > 2:
psd_yaw = data[..., 1]
psd_pitch = data[..., 2]
else:
psd_yaw = data[..., 1]/2
psd_pitch = psd_yaw
# each frame needs to be split such that jitter is Nyquis sampled
jitter_sps = 2.0*psd_freq.max()/pq.s
osf = np.ceil(frame_time.rescale(pq.s) * jitter_sps).take(0).astype(np.int)
if osf < 1: osf = 1
number_of_samples_ = np.int(osf*np.ceil(total_observing_time/frame_time).simplified)-10
number_of_samples = np.int(2**np.ceil(np.log2(number_of_samples_))/2+1)
freq_nyq = osf*0.5/frame_time.rescale(pq.s).magnitude
freq = np.linspace(0.0, freq_nyq, number_of_samples)
# Log Interpolation
#freq_log = np.log(freq + 1.0e-30)
#psd_freq_log = np.log(psd_freq + 1.0e-30)
#psd_yaw_log = np.log(psd_yaw + 1.0e-30)
#psd_pitch_log = np.log(psd_pitch + 1.0e-30)
#npsd_yaw = 1.0e-30+np.interp(freq_log, psd_freq_log, psd_yaw_log,
#left=np.log(1.0e-30), right=np.log(1.0e-30))
#npsd_pitch = 1.0e-30+np.interp(freq_log, psd_freq_log, psd_pitch_log,
#left=np.log(1.0e-30), right=np.log(1.0e-30))
#npsd_yaw = np.exp(npsd_yaw)
#npsd_pitch = np.exp(npsd_pitch)
# Line interpolation: preserves RMS
npsd_yaw = 1.0e-30+interpolate.interp1d(psd_freq, psd_yaw, fill_value=0.0,
kind='linear', bounds_error=False)(freq)
npsd_pitch = 1.0e-30+interpolate.interp1d(psd_freq, psd_pitch, fill_value=0.0,
kind='linear', bounds_error=False)(freq)
#import matplotlib.pyplot as plt
#plt.plot(psd_freq, psd_yaw, 'or')
#plt.plot(freq, npsd_yaw, '.-g')
#plt.show()
npsd_yaw = np.sqrt(npsd_yaw * np.gradient(freq))
npsd_pitch = np.sqrt(npsd_pitch * np.gradient(freq))
yaw_jit_re = np.random.normal(scale=npsd_yaw/2.0)
yaw_jit_im = np.random.normal(scale=npsd_yaw/2.0)
pitch_jit_re = np.random.normal(scale=npsd_pitch/2.0)
pitch_jit_im = np.random.normal(scale=npsd_pitch/2.0)
pitch_jit_im[0] = pitch_jit_im[-1] = 0.0
yaw_jit_im[0] = yaw_jit_im[-1] = 0.0
norm = 2*(number_of_samples-1)
yaw_jit = norm*np.fft.irfft(yaw_jit_re + 1j * yaw_jit_im)*pq.deg
pitch_jit = norm*np.fft.irfft(pitch_jit_re + 1j * pitch_jit_im)*pq.deg
if rms:
norm = (rms**2/(yaw_jit[number_of_samples_].var()+ pitch_jit[:number_of_samples_].var())).simplified
yaw_jit *= np.sqrt(norm)
pitch_jit *= np.sqrt(norm)
if False:
print(np.sqrt((npsd_yaw**2).sum()), np.sqrt((npsd_pitch**2).sum()))
print(yaw_jit.rms(), pitch_jit.rms(), np.sqrt(yaw_jit.var()+pitch_jit.var()))
sps = osf/frame_time
fp, psdp = signal.periodogram(pitch_jit, sps, window='hamming')
fy, psdy = signal.periodogram(yaw_jit, sps, window='hamming')
plt.subplot(211)
plt.plot(fp, psdp, 'b')
plt.plot(psd_freq, psd_pitch, 'kx')
plt.yscale('log'); plt.xscale('log')
plt.subplot(212)
plt.plot(fy, psdy, 'r')
plt.plot(psd_freq, psd_yaw, 'kx')
plt.yscale('log'); plt.xscale('log')
plt.show()
return yaw_jit, pitch_jit, osf
def jitter__remove(jitter_file, obs_time, ndr_time, rms,mode=2):
"""
Jitter
Simulates 2 d jitter (pointing variation) as a timeline of positional offsets
Uses Herschel jitter timeline as reference.
Inputs:
1) jitter_file : reference file with jitter observation timeline
2) obs_time : total observation time in seconds
3) ndr_time : time for one non-destructive read
4) rms : rms of the desired jitter in degrees
5) mode = 1 : one PSD used to obtain jitter in 2 dimensions
mode = 2 : two PSDs used - one for each orthogonal dimeension
Output:
1) RA jitter time series in radians (xt) in degrees
2) Dec jitter time series in radians (yt) in degrees
3) time: timegrid of the jitter
4) ndr_osf : number of oversamples per ndr
Requirements:
1) jitter_file : file with known jitter timelines (e.g. Herschel data)
"""
f = pyfits.open(jitter_file) # open a FITS file
tbdata = f[1].data # assume the first extension is a table
time = tbdata['Time']
ra_t = tbdata['RA']
dec_t = tbdata['Dec']
if len(time)%2 != 0: # timeline needs to be even number for real fft
time = time[0:-1]
ra_t = ra_t[0:-1]
dec_t = dec_t[0:-1]
ra_t = (ra_t-np.mean(ra_t))*(np.cos(dec_t*np.pi/180))
dec_t = dec_t-np.mean(dec_t)
N = np.float(len(time)) # N = no of samples in reference file
dt = time[1]-time[0] # dt =sampling period of reference jitter
fs = 1.0/dt # fs = sampling rate = 2B (B = Nyquist frequency)
df = fs/N
freq = np.fft.rfftfreq(np.int(N),d=dt)
ndr_osf = int(1+ ndr_time/dt) # ndr_osf = the minimum integer number of osf to nyquist sample jitter
new_dt = np.float(ndr_time/ndr_osf) #sampling period for ndr_osf (new_dt is always < dt)
ndr_number = np.int(obs_time/ndr_time) # total number of full NDRs in the total obs time
N0 = ndr_number *ndr_osf # total samples for this number of NDRs
x = int(1+np.log(N0)/np.log(2))
new_N = 2**x # total samples recalculated to be a power of 2 for fourier transforms
# therefore final number of NDRs > number to fit in obs_time
# final timeline length > obs_time
new_freq = np.fft.rfftfreq(np.int(new_N),d=new_dt)
new_fs= 1/new_dt # new_fs must be > fs to ensure nyquist sampling of all frequencies in original power spectrum
ra_f = np.fft.rfft(ra_t)/N
dec_f = np.fft.rfft(dec_t)/N
ra_psd= 2*abs(ra_f)**2/df
dec_psd = 2*abs(dec_f)**2/df
ra_psd[0]=1e-30
ra_psd[-1]=ra_psd[-1]/2
dec_psd[0]=1e-30
dec_psd[-1]=dec_psd[-1]/2
# smooth the psd
window_size = 10
window = np.ones(int(window_size))/float(window_size)
ra_psd = np.convolve(ra_psd, window, 'same')
dec_psd = np.convolve(dec_psd, window, 'same')
# resample and 'zero pad' to new frequency grid and N
f1 = interpolate.interp1d(freq, ra_psd,bounds_error=False,fill_value=1e-30, kind='linear')
f2 = interpolate.interp1d(freq, dec_psd,bounds_error=False,fill_value=1e-30, kind='linear')
new_ra_psd = f1(new_freq)
new_dec_psd = f2(new_freq)
#plt.figure('power spectrum regrid')
#plt.plot(new_freq, new_ra_psd, freq, ra_psd)
psd = [new_ra_psd,new_dec_psd]
N = new_N
fs = new_fs
df = fs/N
if mode == 1:
#new to work on how to scale this
comb_t = np.sqrt(ra_t**2 + dec_t**2)
comb_f = np.fft.rfft(comb_t)/N
comb_psd = 2*abs(comb_f)**2/df
comb_psd[0]=1e-30
comb_psd[-1]=comb_psd[-1]/2
phi_t = np.arctan2(dec_t,ra_t)
phi_f = np.fft.rfft(phi_t)/N
phi_psd = 2*abs(phi_f)**2/df
phi_psd[0]=1e-30
phi_psd[-1]=phi_psd[-1]/2
psd1 = psd[0]
ps1 = psd1*df/2
amp = np.random.normal(0,np.sqrt(ps1),len(ps1))
phi = np.random.uniform(0,np.pi,len(ps1))
zf = amp*np.exp(phi*1j)
zt = np.fft.irfft(zf)*N
psd2 = psd[1]
ps2 = psd2*df/2
amp = np.random.normal(0,np.sqrt(ps2),len(ps2))
phi = np.random.uniform(0,np.pi,len(ps2))
anglef = amp*np.exp(phi*1j)
anglet = np.fft.irfft(anglef)*N
xt = zt*np.cos(anglet)
yt = zt*np.sin(anglet)
elif mode == 2:
psd1 = psd[0]
ps = psd1*df/2
amp = np.random.normal(0,np.sqrt(ps),len(ps))
phi = np.random.uniform(0,np.pi,len(ps))
xf = amp*np.exp(phi*1j)
xt = np.fft.irfft(xf)*N
psd2 = psd[1]
ps = psd2*df/2
amp = np.random.normal(0,np.sqrt(ps),len(ps))
phi = np.random.uniform(0,np.pi,len(ps))
yf = amp*np.exp(phi*1j)
yt = np.fft.irfft(yf)*N
else:
print("error: maximum of 2 psds can be used")
xt = xt[0:N0] # only need N0 samples to cover the observation
yt = yt[0:N0]
xt = xt*(rms*(np.sqrt(2)/2)/np.std(xt)) # only need N0 samples to cover the observation
yt = yt*(rms*(np.sqrt(2)/2)/np.std(yt))
time = np.arange(0,(N0)*new_dt,new_dt) # timegrid for final jitter timelines
return xt,yt,time,ndr_osf
def oversample(fp, ad_osf):
xin = np.linspace(0,fp.shape[1]-1,fp.shape[1])
yin = np.linspace(0,fp.shape[0]-1,fp.shape[0])
x_step = abs(xin[1]) - abs(xin[0])
y_step = abs(yin[1]) - abs(yin[0])
# calculates the new step sizes for new grid
x_step_new = np.float(x_step/ad_osf)
y_step_new = np.float(y_step/ad_osf)
# new grid must start with an exact offset to produce correct number of new points
x_start = -x_step_new * np.float((ad_osf-1)/2)
y_start = -y_step_new * np.float((ad_osf-1)/2)
# new grid points- with correct start, end and spacing
xout = np.arange(x_start, x_start + x_step_new*fp.shape[1]*ad_osf, x_step_new)
yout = np.arange(y_start, y_start + y_step_new*fp.shape[0]*ad_osf, y_step_new)
# interpolate fp onto new grid
fn = interpolate.RectBivariateSpline(yin,xin, fp)
new_fp = fn(yout,xout)
return new_fp
def animate(Data):
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
# Data = data['channel']['SWIR'].timeline
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
wframe = None
for j in range(0,Data.shape[2]):
oldcol = wframe
X = np.arange(0, Data.shape[1])
Y = np.arange(0, Data.shape[0])
X, Y = np.meshgrid(X, Y)
Z = Data[...,j]
# print Z.sum()
wframe = ax.plot_wireframe(X, Y, Z, rstride=2, cstride=2)
# ax.set_zlim(0,20000)
# ax.set_title(j)
#
# Remove old line collection before drawing
if oldcol is not None:
ax.collections.remove(oldcol)
plt.pause(.01)
|
ExoSimREPO_NAMEExoSimPublicPATH_START.@ExoSimPublic_extracted@ExoSimPublic-master@exosim@lib@exolib.py@.PATH_END.py
|
{
"filename": "fsclean.py",
"repo_name": "mrbell/fsclean",
"repo_path": "fsclean_extracted/fsclean-master/fsclean.py",
"type": "Python"
}
|
#!/usr/bin/env python
"""
fsclean.py
Faraday synthesis using 3D CLEAN deconvolution
*******************************************************************************
Copyright 2012 Michael Bell
This file is part of fsclean.
fsclean is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
fsclean is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with fsclean. If not, see <http://www.gnu.org/licenses/>.
*******************************************************************************
Software for imaging the Faraday spectrum, i.e. the 3D distribution of
polarized intensity as a function of Faraday depth and position on the sky.
Imaging is performed using the Faraday synthesis technique (see Bell and
Ensslin (2012) for details) and therefore inherently in 3D. Deconvolution is
carried out using a 3D CLEAN algorithm.
Data is read from MeasurementSet files of the type used by CASA. Images are
written to FITS files.
"""
# leave here while testing
#import sys
#sys.path.append('/home/mrbell/Work/code/')
import os
import datetime
import numpy as np
from optparse import OptionParser
from FSCData import FSCData, FSCPolData
from FSCImage import FSCImage
from FSCleanPM import FSCleanPM
import pyrat.Messenger as M
from pyrat.RAImage import GridParams
from pyrat.RAData import read_data_from_ms
from pyrat.Constants import *
VERSION = '0.1.0.0'
class FSCoords(object):
"""
"""
def __init__(self, pm):
"""
Takes a parset manager class instance and computes all coordinate
values required.
Args:
Returns:
"""
# Requested image plane grid parameters
dphi = pm.parset['dphi']
dra = pm.parset['cellsize'] * ARCSEC_TO_RAD
ddec = pm.parset['cellsize'] * ARCSEC_TO_RAD
nphi = pm.parset['nphi']
nra = pm.parset['nra']
ndec = pm.parset['ndec']
self.grid_def = [(dphi, nphi), (ddec, ndec), (dra, nra)]
# Gridding parameters
self.grid_params = GridParams(pm.parset['grid_alpha'],
pm.parset['grid_w'])
class FSClean(object):
"""
"""
# CLEAN algorithm types
CLARK = 0
HOGBOM = 1
def __init__(self, pm=None):
"""
Initialize the FSClean imager. Sets common values and inits the
messenger class.
Args:
parset: FSCleanPM class instance, with the parset dict already
loaded
Returns:
Nothing
"""
if pm is None:
self.m = M.Messenger()
return
self.pm = pm
# Internal verbosity level convention
# -1 off
# 0 Warnings, Errors, Headers, Basic information
# 1 Useful diagnostic information for most users
# 2 Detailed diagnostic information for users
# 3 Developer diagnostics
# 4 Temporary print statements
self.m = M.Messenger(self.pm.parset['verbosity'], use_color=True,
use_structure=False, add_timestamp=True)
self.coords = FSCoords(pm)
self.K = 1. # Normalization constant for data to image transform
self.Kinv = 1. # Normalization constant for image to data transform
self._scratch_files = []
self.do_clean = False
if self.pm.parset['niter'] > 0:
self.do_clean = True
def condense_cc_list(self, cc):
"""
Desc.
Args:
Returns:
"""
tcc = list(cc)
cc_redux = []
while len(tcc) > 0:
temp = tcc.pop()
topop = []
for i in range(len(tcc)):
if temp[0] == tcc[i][0] and temp[1] == tcc[i][1] \
and temp[2] == tcc[i][2]:
temp2 = tcc[i]
topop.append(i)
temp[3] += temp2[3]
cc_redux.append(temp)
topop.sort(reverse=True)
for i in range(len(topop)):
tcc.pop(topop[i])
return cc_redux
def run(self, msfn, outfn_base):
"""
The main routine.
Args:
msfn: MeasurementSet file name
outfn_base: base name for the output files
Returns:
"""
clean_funcs = {self.CLARK: self.clark_clean,
self.HOGBOM: self.hogbom_clean}
self.ofnbase = outfn_base
self.sfnbase = os.path.join(self.pm.parset['scratch_dir'],
os.path.basename(outfn_base))
self.m.set_logfile(self.ofnbase + ".log")
imfn = self.ofnbase + '_im.hdf5'
dbfn = self.ofnbase + '_db.hdf5'
self.m.header1("Starting FSCLEAN v." + VERSION)
self.m.message("Requested parameters:", 0)
if self.m.verbosity >= 0:
self.pm.print_parset()
self.m.message("Initializing data objects...", 1)
weights = FSCData(self.sfnbase + '_weights.hdf5',
np.dtype('float64'),
m=self.m)
self.register_scratch_files([weights.fn, weights.coords.fn])
vis = FSCPolData(self.sfnbase + '_vis.hdf5',
coords=weights.coords,
m=self.m)
self.register_scratch_files([vis.Q.fn, vis.U.fn])
im = FSCImage(imfn, np.dtype('complex128'),
self.coords.grid_def, self.coords.grid_params, m=self.m)
self.register_scratch_files([im.osim.fn, im.fourier_grid.fn])
db = FSCImage(dbfn, np.dtype('complex128'),
self.coords.grid_def, self.coords.grid_params, m=self.m,
grid_dtype=np.dtype('float64'))
self.register_scratch_files([db.osim.fn, db.fourier_grid.fn])
read_data_from_ms(msfn, vis, weights, self.pm.parset['ms_column'],
'WEIGHT', mode='pol')
self.m.message("Setting l2min", 3)
l2min = vis.coords.get_min_freq()
l2min = l2min - \
self.coords.grid_params.W * 2. * im.fourier_grid.deltas[0]
# if l2min < 0.:
# l2min = 0.
im.fourier_grid.set_mincoord(0, l2min)
db.fourier_grid.set_mincoord(0, l2min)
self.m.message("l2min set to " + str(l2min) + " m^2", 3)
self.m.message("Setting normalization...", 2)
self.set_normalizations(weights, db)
self.m.message("K is " + str(self.K), 3)
self.m.message("Kinv is " + str(self.Kinv), 3)
# Hand off data to the appropriate CLEAN function
#[cc, resim] = self.clark_clean(vis, weights, im, db)
[cc, resim] = clean_funcs[self.pm.parset['clean_type']](vis, weights,
im, db)
# Write images and CC list to disk
self.m.message("Writing metadata to image files.", 2)
self.write_image_metadata(im, msfn)
self.write_image_metadata(db, msfn)
if resim is not None:
self.write_image_metadata(resim, msfn)
self.write_cclist(self.ofnbase + "_cclist.txt", cc)
self.clean_up()
def register_scratch_files(self, fns):
"""
Desc.
Args:
Returns:
"""
if isinstance(fns, str):
self._scratch_files.append(fns)
elif np.iterable(fns):
self._scratch_files += fns
else:
raise TypeError('Cannot add the requested data type to ' +
'the scratch files list.')
def write_cclist(self, fn, cc):
"""
Desc.
Args:
Returns:
"""
if not np.iterable(cc):
self.m.warn("No clean components to write.")
return
self.m.message("Writing CLEAN component list to file.", 2)
f = open(fn, 'w')
for i in range(len(cc)):
c = cc[i]
line = "%d %d %d %f %f\n" % (c[0], c[1], c[2],
c[3].real, c[3].imag)
f.write(line)
f.close()
def set_normalizations(self, weights, db):
"""
Set normalizations for transform and inverse transforms. Resets the
class attributes K and Kinv.
Args:
weights: An FSCData object containing the weights for each
visibility.
db: An FSCImage object that will be used to store the dirty beam.
Returns:
Nothing.
"""
if self.do_clean and self.pm.parset['clean_type'] == self.CLARK:
self.m.message("Computing Kinv", 3)
temp = FSCData(self.sfnbase + '_tempdata.hdf5',
coords=weights.coords,
dtype=np.dtype('float64'), m=self.m,
template=weights)
self.register_scratch_files(temp.fn)
[nphi, ndec, nra] = db.im.shape
db.multiplywith(0.)
db.im[nphi / 2, ndec / 2, nra / 2] = complex(1., 0.)
db.transform(temp)
nchan = 0.
val = 0.
for i in temp.iterkeys():
freqs = temp.coords.get_freqs(i)
for j in range(len(freqs)):
nchan += 1
val += np.mean(abs(temp.get_records(i, j)))
self.Kinv = 1. / (val / nchan)
self.m.message("Computing K", 3)
weights.transform(db)
self.K = 1. / db.find_max(abs)
def clean_up(self):
"""
Deletes all temp files created during imaging.
Args:
Returns:
"""
if self.pm.parset['clear_scratch'] != 0:
self.m.header2("Removing scratch files...")
for i in range(len(self._scratch_files)):
os.remove(self._scratch_files[i])
def write_image_metadata(self, im, msfn):
"""
Writes important parameters to the header of the image.
Args:
im: FSCImage object pointing to the file to write metadata to.
msfn: Filename of the MeasurementSet containing the visibility data
that has been imaged.
Returns:
Nothing.
"""
from pyrap import tables
if tables.tableexists(os.path.join(msfn, 'SOURCE')):
pt = tables.table(os.path.join(msfn, 'SOURCE'))
crval = pt.getcol('DIRECTION')[0]
source_name = pt.getcol('NAME')[0]
else:
crval = [0., 0.]
source_name = ''
im.f.attrs['origin'] = 'fsclean v. ' + VERSION
im.f.attrs['date'] = str(datetime.date.today())
im.f.attrs['source'] = source_name
im.f.attrs['axis_desc'] = ['Faraday Depth', 'Dec.', 'RA']
im.f.attrs['axis_units'] = ['rad/m/m', 'rad', 'rad']
im.f.attrs['image_units'] = 'Jy/beam'
im.f.attrs['crpix'] = [im.im.shape[0] / 2, im.im.shape[1] / 2,
im.im.shape[2] / 2]
im.f.attrs['cdelt'] = [im.deltas[0], im.deltas[1], im.deltas[2]]
im.f.attrs['crval'] = [0., crval[1], crval[0]]
def hogbom_clean(self, vis, weights, im, db):
"""
The 3D Hogbom CLEAN algorithm.
Args:
vis: An FSCPolData object containing the stokes Q and U visibility
data to be cleaned.
weights: An FSCData object containing the weights for each
visibility.
im: An FSCImage object in which to store the cleaned image.
db: An FSCImage object in which to store the dirty beam image.
Should have 2x the image volume of im.
Returns:
A list of clean components. Each list entry contains a tuple of
model locations (phi, dec, ra) defined in pixels, and the model
flux.
"""
self.m.header2("Started the Hogbom CLEAN routine...")
# Works fine
self.m.message("Computing dirty image...", 1)
vis.multiplywith(weights) # vis now contains the weighted data!
vis.transform(im)
# im will contain the residual image going forward
im.multiplywith(self.K)
if not self.do_clean:
return None, None
# contains the oversized dirty beam (8x larger than normal one by vol)
self.m.message("Computing oversized dirty beam...", 1)
grid_def = self.coords.grid_def
big_grid_def = list()
for i in range(3):
big_grid_def.append((grid_def[i][0], grid_def[i][1] * 2))
bigdb = FSCImage(self.sfnbase + '_bigdb.hdf5', np.dtype('complex128'),
big_grid_def, self.coords.grid_params,
m=self.m, grid_dtype=np.dtype('float64'))
self.register_scratch_files([bigdb.fn, bigdb.osim.fn,
bigdb.fourier_grid.fn])
weights.transform(bigdb)
Kbig = 1. / bigdb.find_max(abs)
bigdb.multiplywith(Kbig)
# object for holding the model point source image
pointim = FSCImage(self.sfnbase + '_pointim.hdf5',
np.dtype('complex128'),
self.coords.grid_def, self.coords.grid_params,
m=self.m)
self.register_scratch_files([pointim.fn, pointim.osim.fn,
pointim.fourier_grid.fn])
[nphi, nm, nl] = im.im.shape
cutoff = self.pm.parset['cutoff']
niter = self.pm.parset['niter']
gain = self.pm.parset['gain']
# will contain the shifted beam image scaled by the residual peak value
tdb = FSCImage(self.sfnbase + '_tdb.hdf5', np.dtype('complex128'),
self.coords.grid_def, self.coords.grid_params,
m=self.m, grid_dtype=np.dtype('float64'))
self.register_scratch_files([tdb.fn, tdb.osim.fn, tdb.fourier_grid.fn])
cclist = list()
N = 0
total_flux = complex(0, 0)
while True:
[pphi, pm, pl] = im.find_argmax(abs)
pval = im.im[pphi, pm, pl]
if abs(pval) < cutoff:
self.m.success("Stopping! Cutoff has been reached.")
break
total_flux = total_flux + pval * gain
N += 1
self.m.message(". Iteration " + str(N), 2)
self.m.message(". CLEAN Component info:", 2)
self.m.message(". . value: " + str(pval * gain), 2)
self.m.message(". . abs. value: " +
str(abs(pval * gain)), 2)
self.m.message(". . phi: " + str(pphi), 2)
self.m.message(". . m: " + str(pm), 2)
self.m.message(". . l: " + str(pl), 2)
self.m.message(". . total pol. flux: " +
str(abs(total_flux)), 2)
cclist.append([pphi, pm, pl, pval * gain])
bigdb.copy_patch_to(tdb, (pphi, pm, pl))
tdb.multiplywith(gain * pval)
im.subtractoff(tdb)
if N >= niter:
self.m.success("Stopping! Maximum iterations reached.")
break
self.m.message("Adding CLEAN model to image...", 1)
cclist = self.condense_cc_list(cclist) # OK
self.make_cclist_image(cclist, pointim) # OK
self.m.message("Convolving with CLEAN beam...", 1)
self.make_beam_image(tdb) # OK
pointim.convolve_with(tdb) # OK
resim = FSCImage(self.sfnbase + '_resim.hdf5',
np.dtype('complex128'),
self.coords.grid_def, self.coords.grid_params,
m=self.m)
im.copy_to(resim)
self.register_scratch_files([resim.osim.fn, resim.fourier_grid.fn])
im.addto(pointim)
return cclist, resim
def clark_clean(self, vis, weights, im, db):
"""
The 3D Clark CLEAN algorithm.
Args:
vis: An FSCPolData object containing the stokes Q and U visibility
data to be cleaned.
weights: An FSCData object containing the weights for each
visibility.
im: An FSCImage object in which to store the cleaned image.
db: An FSCImage object in which to store the dirty beam image.
Returns:
A list of clean components. Each list entry contains a tuple of
model locations (phi, dec, ra) defined in pixels, and the model
flux.
"""
self.m.header2("Started the Clark CLEAN routine...")
# Works fine
self.m.message("Computing dirty beam...", 1)
weights.transform(db)
db.multiplywith(self.K)
# Works fine
self.m.message("Computing dirty image...", 1)
vis.multiplywith(weights) # vis now contains the weighted data!
if not self.do_clean:
# im will contain the residual image going forward
vis.transform(im)
im.multiplywith(self.K)
return None, None
# object for holding the model point source image
pointim = FSCImage(self.sfnbase + '_pointim.hdf5',
np.dtype('complex128'),
self.coords.grid_def, self.coords.grid_params,
m=self.m)
self.register_scratch_files([pointim.fn, pointim.osim.fn,
pointim.fourier_grid.fn])
# object for holding the model visibilities
modelvis = FSCPolData(self.sfnbase + '_modelvis.hdf5',
coords=weights.coords, m=self.m, template=vis)
self.register_scratch_files([modelvis.Q.fn, modelvis.U.fn])
[nphi, nm, nl] = im.im.shape
PFRAC = self.pm.parset['beam_patch_frac']
cutoff = self.pm.parset['cutoff']
niter = self.pm.parset['niter']
gain = self.pm.parset['gain']
# number of pixels along each axis of the beam patch
pnphi = nphi / PFRAC
pnm = nm / PFRAC
pnl = nl / PFRAC
self.m.message("Extracting beam patch and computing highest " +
"external sidelobe...", 2)
tdb = FSCImage(self.sfnbase + '_tdb.hdf5', np.dtype('complex128'),
self.coords.grid_def, self.coords.grid_params,
m=self.m, grid_dtype=np.dtype('float64'))
self.register_scratch_files([tdb.fn, tdb.osim.fn, tdb.fourier_grid.fn])
db.copy_to(tdb)
patch = tdb.im[nphi / 2 - pnphi / 2:nphi / 2 + pnphi / 2,
nm / 2 - pnm / 2:nm / 2 + pnm / 2,
nl / 2 - pnl / 2:nl / 2 + pnl / 2]
# get only the rest of the beam outside of the patch
tdb.im[nphi / 2 - pnphi / 2:nphi / 2 + pnphi / 2,
nm / 2 - pnm / 2:nm / 2 + pnm / 2,
nl / 2 - pnl / 2:nl / 2 + pnl / 2] = np.zeros((pnphi, pnm, pnl),
dtype=np.dtype('complex128'))
# find the largest sidelobe external to the patch
extsl = tdb.find_max(abs)
# for test dataset, extsl should be 0.112
self.m.message("Largest sidelobe level outside beam patch: " +
str(extsl), 3)
cclist = list()
stop = False
N = 1
total_flux = complex(0, 0)
while True:
# Major cycle
self.m.message("Begin Major Cycle", 1)
# im will contain the residual image going forward
vis.transform(im)
im.multiplywith(self.K)
[pphi, pm, pl] = im.find_argmax(abs)
pval = im.im[pphi, pm, pl]
self.m.message("Initial residual map peak: " + str(abs(pval)), 1)
if abs(pval) < cutoff:
self.m.success("Stopping! Cutoff has been reached.")
stop = True
break
slim = extsl * abs(pval)
F = 1. + 1. / N
tcclist = list()
if abs(pval) < slim * F:
slim = abs(pval) / F
self.m.message("Initial minor cycle stop level: " +
str(slim * F), 2)
while abs(pval) >= slim * F:
# Minor cycle
total_flux = total_flux + pval * gain
self.m.message(". Starting minor cycle " + str(N), 2)
self.m.message(". CLEAN Component info:", 2)
self.m.message(". . value: " + str(pval * gain), 2)
self.m.message(". . abs. value: " +
str(abs(pval * gain)), 2)
self.m.message(". . phi: " + str(pphi), 2)
self.m.message(". . m: " + str(pm), 2)
self.m.message(". . l: " + str(pl), 2)
self.m.message(". . total pol. flux: " +
str(abs(total_flux)), 2)
tcclist.append([pphi, pm, pl, pval * gain])
# find phimin/max, lmin/max, mmin/max, accounting for map edges
# crop the patch if necessary (because it runs off the edge)
phimax = pphi + pnphi / 2
phimin = pphi - pnphi / 2
pc_phi_low = 0
pc_phi_high = pnphi
if phimin < 0:
phimin = 0
# lower index of the cropped patch
pc_phi_low = pnphi / 2 - pphi
if phimax > nphi:
phimax = nphi
# upper index of the cropped patch
pc_phi_high = pnphi / 2 + (nphi - pphi)
mmax = pm + pnm / 2
mmin = pm - pnm / 2
pc_m_low = 0
pc_m_high = pnm
if mmin < 0:
mmin = 0
# lower index of the cropped patch
pc_m_low = pnm / 2 - pm
if mmax > nm:
mmax = nm
# upper index of the cropped patch
pc_m_high = pnm / 2 + (nm - pm)
lmax = pl + pnl / 2
lmin = pl - pnl / 2
pc_l_low = 0
pc_l_high = pnl
if lmin < 0:
lmin = 0
# lower index of the cropped patch
pc_l_low = pnl / 2 - pl
if lmax > nl:
lmax = nl
# upper index of the cropped patch
pc_l_high = pnl / 2 + (nl - pl)
tpatch = patch[pc_phi_low:pc_phi_high,
pc_m_low:pc_m_high,
pc_l_low:pc_l_high].copy()
im.im[phimin:phimax, mmin:mmax, lmin:lmax] = \
im.im[phimin:phimax, mmin:mmax, lmin:lmax] - \
gain * pval * tpatch
[pphi, pm, pl] = im.find_argmax(abs)
pval = im.im[pphi, pm, pl]
N += 1
F += 1. / N
if abs(pval) < cutoff or N > niter:
# Is this true? Does the peak value found during the minor
# cycle count for the stop condition? The residual image
# here is kind of meaningless
self.m.success("Stopping! Cutoff or niter " +
"has been reached.")
stop = True
break
if not stop:
self.m.message("Minor cycle stop condition reached.", 1)
self.m.message("Inverting model to vis. space and " +
"subtracting...", 1)
tcclist = self.condense_cc_list(tcclist)
self.make_cclist_image(tcclist, pointim)
pointim.transform(modelvis)
modelvis.multiplywith(self.Kinv)
modelvis.multiplywith(weights)
vis.subtractoff(modelvis)
self.m.message("Done.", 1)
cclist = cclist + tcclist
if stop:
break
self.m.message("Inverting CLEAN model...", 1)
del patch
cclist = self.condense_cc_list(cclist) # OK
self.make_cclist_image(cclist, pointim) # OK
self.m.message("Convolving with CLEAN beam...", 1)
self.make_beam_image(tdb) # OK
pointim.convolve_with(tdb) # OK
# construct residual image
vis.transform(im)
im.multiplywith(self.K)
resim = FSCImage(self.sfnbase + '_resim.hdf5',
np.dtype('complex128'),
self.coords.grid_def, self.coords.grid_params,
m=self.m)
im.copy_to(resim)
self.register_scratch_files([resim.osim.fn, resim.fourier_grid.fn])
im.addto(pointim)
return cclist, resim
def make_beam_image(self, beamim):
"""
Desc.
Args:
Returns:
"""
beamim.multiplywith(0.)
ln2 = 0.693147181
# Convert arcsec to pixels which are used below
bmaj = self.pm.parset['bmaj'] / self.pm.parset['cellsize']
bmin = self.pm.parset['bmin'] / self.pm.parset['cellsize']
bphi = self.pm.parset['bphi'] / self.pm.parset['dphi']
invsigmal2 = 8 * ln2 * bmaj ** -2.
invsigmam2 = 8 * ln2 * bmin ** -2.
invsigmaphi2 = 8 * ln2 * bphi ** -2.
# the size of the image over which to compute the gaussian
# zero outside
denom = self.pm.parset['beam_patch_frac']
[nphi, nm, nl] = beamim.im.shape
patch = np.zeros((nphi / denom, nm / denom, nl / denom),
dtype=beamim.im.dtype)
phic = patch.shape[0] / 2
mc = patch.shape[1] / 2
lc = patch.shape[2] / 2
philow = nphi / 2 - phic
phihigh = nphi / 2 + phic
mlow = nm / 2 - mc
mhigh = nm / 2 + mc
llow = nl / 2 - lc
lhigh = nl / 2 + lc
for i in range(patch.shape[0]):
for j in range(patch.shape[1]):
for k in range(patch.shape[2]):
patch[i, j, k] = np.exp(-0.5 * (invsigmaphi2 *
(i - phic) ** 2 +
invsigmam2 * (j - mc) ** 2 +
invsigmal2 * (k - lc) ** 2))
beamim.im[philow:phihigh, mlow:mhigh, llow:lhigh] = patch
def make_cclist_image(self, cclist, im):
"""
Desc.
Args:
Returns:
Nothing.
"""
im.multiplywith(0.)
# list entries... [phi, m, l, val]
for i in range(len(cclist)):
[phi, m, l, val] = cclist[i]
# there must be a better way...
im.im[phi, m, l] = im.im[phi, m, l] + val
if __name__ == '__main__':
"""
Handle all parsing here if started from the command line, then pass off to
the main routine.
"""
desc = "Software for reconstructing the Faraday spectrum, i.e. the 3D " + \
"distribution of polarized intensity as a function of Faraday depth" +\
" and position on the sky, from full-polarization, multi-frequency " +\
"visibility data. Imaging is conducted using the Faraday " + \
"synthesis technique (for details see Bell and Ensslin, 2012). " + \
"Deconvolution is " + \
"carried out using a 3D CLEAN algorithm. " + \
"Data is read from MeasurementSet files of the type used by CASA. " + \
"Images are written to HDF5 image files."
parser = OptionParser(usage="%prog <parset file> <in file> <out file>",
description=desc, version="%prog " + VERSION)
parser.add_option("-p", "--parset_desc", action="store_true",
help="show parameter set file description and exit",
default=False)
(options, args) = parser.parse_args()
pm = FSCleanPM()
if options.parset_desc:
pm.print_help()
else:
if len(args) != 3:
parser.error("Incorrect number of arguments.")
pm.parse_file(args[0])
fsc = FSClean(pm)
fsc.run(args[1], args[2])
|
mrbellREPO_NAMEfscleanPATH_START.@fsclean_extracted@fsclean-master@fsclean.py@.PATH_END.py
|
{
"filename": "amplifiers.py",
"repo_name": "nu-radio/NuRadioMC",
"repo_path": "NuRadioMC_extracted/NuRadioMC-master/NuRadioMC/examples/SignalVisualization/amplifiers.py",
"type": "Python"
}
|
amplifier_options = [
{
'label': 'None',
'value': None,
'description': ('No amplifier is selected. Only the response of the'
' antenna is displayed')
},
{
'label': 'RNO-G, Iglu',
'value': 'iglu',
'description': 'Amplifier used for the downhole channels of the RNO-G experiment'
},
{
'label': 'RNO-G, Surface',
'value': 'rno_surface',
'description': 'Amplifier used for the surface channels of the RNO-G experiment'
},
{
'label': 'ARIANNA-100',
'value': '100',
'description': 'First generation of Amplifiers of the ARIANNA experiment'
},
{
'label': 'ARIANNA-200',
'value': '200',
'description': 'Second generation of Amplifiers of the ARIANNA experiment'
},
{
'label': 'ARIANNA-300',
'value': '300',
'description': 'Third generation of Amplifiers of the ARIANNA experiment'
}
]
|
nu-radioREPO_NAMENuRadioMCPATH_START.@NuRadioMC_extracted@NuRadioMC-master@NuRadioMC@examples@SignalVisualization@amplifiers.py@.PATH_END.py
|
{
"filename": "trantable.md",
"repo_name": "jbroll/starbase",
"repo_path": "starbase_extracted/starbase-master/docs/trantable.md",
"type": "Markdown"
}
|
### `trantable` - table driven string substitution.
SYNOPSYS
--------
```
`trantable` [-i *ifile*] [-o *ofile*] *table* *fromcol* *tocol*
```
DESCRIPTION
-----------
Use m4 to replace all of the strings in *fromcol* with the
strings in *tocol*.
SEE ALSO
--------
* [grestable](grestable.html) - table driven general regular expression substitution.
{% include starbase-seealso.md %}
|
jbrollREPO_NAMEstarbasePATH_START.@starbase_extracted@starbase-master@docs@trantable.md@.PATH_END.py
|
{
"filename": "_tickmode.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/yaxis/minor/_tickmode.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickmodeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="tickmode", parent_name="layout.yaxis.minor", **kwargs
):
super(TickmodeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "ticks"),
implied_edits=kwargs.pop("implied_edits", {}),
values=kwargs.pop("values", ["auto", "linear", "array"]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@yaxis@minor@_tickmode.py@.PATH_END.py
|
{
"filename": "elasticc2_demo3.ipynb",
"repo_name": "LSSTDESC/elasticc",
"repo_path": "elasticc_extracted/elasticc-main/jupyter/sprint_week_2024oct/elasticc2_demo3.ipynb",
"type": "Jupyter Notebook"
}
|
### DESC Sprint Week ELAsTiCC Tutorial Demo 3
## Querying the DESC TOM
This will be less efficient than just reading the parquet files if what you want is access to the SNANA simulations. Use this if you're Amanda Wasserman and developing DESC infrastructure for spectroscopic followup, or somebody doing something similar. Also, if you want access to the broker classifications, this is the only way to get them, as they have not all been exported to any flat files.
You will need a username and password to log into https://desc-tom.lbl.gov . Ask Rob for an account if you don't have one. He will need the username you want on the TOM, and your email address (for things like password change links).
You can just hit the web API at https://desc-tom.lbl.gov/elasticc2/... with python requests. However, there are some annoying requirements for request headers. A small library, `tom_client.py`, is used below that handles these annoying requirements. Download it from https://raw.githubusercontent.com/LSSTDESC/tom_desc/refs/heads/main/tom_client.py
```python
%matplotlib inline
import sys
import os
import io
import math
import pathlib
import logging
import time
import json
import numpy
import polars
import pandas
from matplotlib import pyplot
# Add to path the directory where tom_client.py exists. You can copy
# the file from here:
# https://raw.githubusercontent.com/LSSTDESC/tom_desc/refs/heads/main/tom_client.py
# Then just replace the second argument to sys.path.insert below with the
# directory where you saved it.
sys.path.insert( 0, str( pathlib.Path( os.getenv("HOME") ) / "desc/tom_desc" ) )
from tom_client import TomClient
# Get your DESC TOM username and password. tompasswdfile
# is a file with a single line containing your TOM password.
# This should be in a place that's not readable by anybody
# but you. (Use this rather than putting the actual
# password into something that might get committed to a
# git archive!)
tomuser = 'rknop'
tompasswdfile = pathlib.Path( os.getenv("HOME") ) / "secrets/tom_rknop_passwd"
# TomClient is a thin front-end to Python requests that handles
# authentication and some annoying headers that need to be set
# for connections to Django to work.
tomclient = TomClient( url="https://desc-tom.lbl.gov", username=tomuser, passwordfile=tompasswdfile )
# Make a random number generator. If you
# want reproducibility, set the seed
# to something other than None.
# _random_seed = None
_random_seed = 42
rng = numpy.random.default_rng( seed=_random_seed )
# Make a logger so that we can print out timings and things like that
_logger = logging.getLogger("main")
if not _logger.hasHandlers():
_logout = logging.StreamHandler( sys.stderr )
_logger.addHandler( _logout )
_logout.setFormatter( logging.Formatter( f'[%(asctime)s - %(levelname)s] - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S' ) )
_logger.setLevel( logging.INFO )
_logger.info( "Testing" )
```
[2024-10-28 14:06:47 - INFO] - Testing
```python
# Make ourselves a convenience function that does some return value checking,
# so we don't have to do that over and over again.
def query_tom( api_endpoint, data=None, verbose=False, return_text=False ):
if verbose:
_logger.info( f"Sending {api_endpoint} request to tom..." )
t0 = time.perf_counter()
res = tomclient.post( api_endpoint, json=data )
dt = time.perf_counter() - t0
if verbose:
_logger.info( f"...done after {dt:.1f} seconds" )
if res.status_code != 200:
strio = io.StringIO()
strio.write( f"Got status code {res.status_code}" )
if verbose:
strio.write( f"\n{res.text}" )
raise RuntimeError( strio.getvalue() )
if return_text:
return res.text
else:
retval = res.json()
if isinstance(retval, dict) and ( 'status' in retval ) and ( retval['status'] != 'ok' ):
strio = io.StringIO()
strio.write( f"Got status {retval['status']}" )
if verbose and ( 'error' in retval ):
strio.write( f"\n{retval['error']}" )
raise RuntimeError (strio.getvalue() )
return retval
```
```python
# Define a function for plotting lightcurves. We're going to use
# this lots below, and this saves repeated code in cells
#
# This version of plot_ltcv expects sequences (lists or similar) for mjd, band, flux, and fluxerr
def plot_ltcv( mjd, band, flux, fluxerr, snid=None, zcmb=None, mjdoff=0, figsize=None, width=None, multiplots=False ):
plotcolors = { 'u': '#cc0ccc',
'g': '#00cc44',
'r': '#cc0000',
'i': '#ff4400',
'z': '#886600',
'Y': '#442200' }
knownbands = set( band )
if any( b not in plotcolors.keys() for b in knownbands ):
_logger.warning( f"Unknown bands not plotted: {[b for b in knownbands if b not in plotcolors.keys()]}" )
bandstoplot = [ b for b in plotcolors.keys() if b in knownbands ]
if multiplots:
nrows = math.ceil( len(bandstoplot) / 2 )
if figsize is None:
width = 9 if width is None else width
figsize = ( width, width/3. * nrows )
fig, axes = pyplot.subplots( nrows, 2, figsize=figsize, tight_layout=True, sharex='all' )
axes = axes.flatten()
else:
if figsize is None:
width = 9 if width is None else width
figsize = ( width, width/2. )
fig, axes = pyplot.subplots( 1, 1, figsize=figsize, tight_layout=True )
axes = [ axes ]
axesdex = 0
for curband in bandstoplot:
inband = [ b == curband for b in band ]
curmjd = [ m - mjdoff for m, i in zip( mjd, inband ) if i ]
curflux = [ f for f, i in zip( flux, inband ) if i ]
curfluxerr = [ e for e, i in zip( fluxerr, inband ) if i ]
axes[axesdex].errorbar( curmjd, curflux, yerr=curfluxerr,
color=plotcolors[curband], linestyle='None', marker='o',
label=curband )
if multiplots: axesdex += 1
for i, axis in enumerate(axes):
if i >= len(bandstoplot):
axis.set_visible( False )
else:
title = ""
if snid is not None: title += f"SN {snid}"
if zcmb is not None: title += f"{' at ' if snid is not None else ''} z={zcmb:.3f}"
if len(title) > 0: axis.set_title( title )
if mjdoff != 0:
axis.set_xlabel( f"MJD-{mjdoff}" )
else:
axis.set_xlabel( r"MJD" )
axis.set_ylabel( r"Flux" )
axis.tick_params( axis='both', reset=True )
axis.legend()
# In the jupyter script environment, the plot gets shown
# inline automatically. If you're running this
# from the command line, you might need to do
# fig.show(). You might also want to do something
# like fig.savefig(filename). So, return the Figure
# to make these things possible.
# One side-effect of this is that your figure may be
# shown *twice* in your jupyter notebook; once for
# the plotting above, and once again if the call
# to plot_ltcv is the last command in the cell,
# because jupyter by default displays the value
# of the last expression in each cell. Add a ;
# to the end of your plot_ltcv cell to supporess
# this in jupyter.
return fig
```
```python
# Ask for "hot" SNe. This is the elasticc2/gethotsne API endpoint
# Documentation: https://github.com/LSSTDESC/tom_desc?tab=readme-ov-file#elasticc2hotsne
# Normally, this looks for supernovae whose latest detection
# was some number of days before now. This is what makes sense for a running
# survey. However, for a simulation, you want "now" to be different from the
# actual time now. So, there's a cheat parameter, mjd_now, that lets you tell
# the server to pretend that the present time is different from the real
# present time.
#
# The ELAsTiCC2 simulation has data from mjd 60796 to 61896, so pick
# something randomly in the middle.
#
# The database is huge, so this query takes some time to return.
# (A couple of minutes.) (If it doesn't succeed in 5 minutes,
# it will time out and you'll get an error return.)
retval = query_tom( "elasticc2/gethottransients",
{ "mjd_now": 61000., "detected_in_last_days": 1. },
verbose=True )
hotsne = retval['diaobject']
print( f"Got {len(hotsne)} hot transients!" )
```
[2024-10-28 14:07:37 - INFO] - Sending elasticc2/gethottransients request to tom...
[2024-10-28 14:08:31 - INFO] - ...done after 53.9 seconds
Got 54149 hot transients!
```python
# hotsne is a list. each element of the list is a dictionary. Let's look at one of them:
#
# (Note that the fields 'redshift' and 'sncode' are both -99 right now. They are there
# intended for future use as we develop the spectrum return information.)
hotsne[0]
```
{'objectid': 1002611,
'ra': 81.41968524696914,
'dec': -49.968970477933674,
'photometry': {'mjd': [60925.3889,
60925.3985,
60953.2899,
60953.3125,
60955.3221,
60955.3225,
60955.3474,
60955.3479,
60956.3486,
60958.3617,
60958.3728,
60969.3496,
60969.3616,
60974.3396,
60974.3515,
60981.2149,
60981.2398,
60992.2305,
60992.2547,
60994.2755,
60996.215,
60996.2392,
60999.3527],
'band': ['Y',
'Y',
'Y',
'Y',
'i',
'i',
'z',
'z',
'i',
'z',
'Y',
'i',
'z',
'r',
'i',
'z',
'Y',
'r',
'i',
'r',
'g',
'r',
'z'],
'flux': [-112.59107,
-555.5123,
5231.281,
4139.281,
6060.394,
5658.994,
5197.9297,
5161.213,
5684.1816,
5602.6343,
4597.311,
5365.39,
5534.702,
4790.2104,
4875.5986,
4021.6533,
4823.289,
2758.784,
3569.2063,
2633.6055,
619.1163,
2358.463,
3801.1348],
'fluxerr': [1315.0219,
1010.5355,
933.1185,
972.47253,
331.25726,
336.48068,
390.87753,
397.71918,
311.80356,
448.82755,
965.4553,
196.01305,
326.09912,
130.8937,
208.14319,
377.6744,
751.50073,
137.06282,
274.5344,
149.06378,
94.26724,
129.35391,
428.28445]},
'zp': 27.5,
'redshift': -99,
'sncode': -99}
```python
# Plot it:
photometry = hotsne[0]['photometry']
plot_ltcv( photometry['mjd'], photometry['band'], photometry['flux'], photometry['fluxerr'] );
```

```python
# The API call can package the data in a few different ways. The default is
# what's shown above. If we set the parmaeter return_format to 2, we get
# something that's better suited for importing into a data frame.
# (It still returns JSON. Returning a binary serialized data frame is fraught,
# because those don't always restore well between versions.)
retval = query_tom( "elasticc2/gethottransients",
{ "mjd_now": 61000., "detected_in_last_days": 1., "return_format": 2 },
verbose=True )
_logger.info( "Making Pandas data frame..." )
hotsne_pandas = pandas.DataFrame( retval['diaobject'] )
_logger.info( "Making Polars data frame..." )
hotsne_polars = polars.DataFrame( retval['diaobject'] )
_logger.info( "...done making dataframes." )
print( f"Pandas dataframe uses {hotsne_pandas.memory_usage(deep=True).sum()/1024/1024:.1f} MiB" )
display( hotsne_pandas[0:5] )
print( f"Polars dataframe uses ~{hotsne_polars.estimated_size('mb'):.1f} MiB" )
display( hotsne_polars[0:5] )
# I'm going to do futher operations on the Polars dataframe. Although the Polars
# sytanx is perhaps even more byzantine than Pandas (if you can imagine that),
# it handles list columsn better than Pandas does. (For pandas, you probably
# want to "explode" the dataframe and deal with a multi-level index.)
hotsne = hotsne_polars;
```
[2024-10-28 14:10:33 - INFO] - Sending elasticc2/gethottransients request to tom...
[2024-10-28 14:11:25 - INFO] - ...done after 52.1 seconds
[2024-10-28 14:11:27 - INFO] - Making Pandas data frame...
[2024-10-28 14:11:27 - INFO] - Making Polars data frame...
[2024-10-28 14:11:30 - INFO] - ...done making dataframes.
Pandas dataframe uses 95.4 MiB
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>objectid</th>
<th>ra</th>
<th>dec</th>
<th>mjd</th>
<th>band</th>
<th>flux</th>
<th>fluxerr</th>
<th>zp</th>
<th>redshift</th>
<th>sncode</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>1002611</td>
<td>81.419685</td>
<td>-49.968970</td>
<td>[60925.3889, 60925.3985, 60953.2899, 60953.312...</td>
<td>[Y, Y, Y, Y, i, i, z, z, i, z, Y, i, z, r, i, ...</td>
<td>[-112.59107, -555.5123, 5231.281, 4139.281, 60...</td>
<td>[1315.0219, 1010.5355, 933.1185, 972.47253, 33...</td>
<td>27.5</td>
<td>-99</td>
<td>-99</td>
</tr>
<tr>
<th>1</th>
<td>1002665</td>
<td>76.010132</td>
<td>-30.257800</td>
<td>[60949.3301, 60949.3539, 60950.3063, 60950.318...</td>
<td>[Y, Y, Y, Y, Y, i, i, z, z, r, i, z, i, i, z, ...</td>
<td>[-825.03766, 62.77219, -1943.0405, -898.07465,...</td>
<td>[1232.0863, 1149.9305, 1047.8407, 1200.26, 135...</td>
<td>27.5</td>
<td>-99</td>
<td>-99</td>
</tr>
<tr>
<th>2</th>
<td>1003516</td>
<td>82.045701</td>
<td>-50.100473</td>
<td>[60925.3926, 60925.4022, 60953.2899, 60953.312...</td>
<td>[Y, Y, Y, Y, i, z, i, z, Y, i, z, i, z, Y, r, ...</td>
<td>[-496.79715, -559.70245, -961.9892, 2602.1426,...</td>
<td>[1153.6647, 911.6201, 937.4305, 984.1008, 322....</td>
<td>27.5</td>
<td>-99</td>
<td>-99</td>
</tr>
<tr>
<th>3</th>
<td>1005817</td>
<td>101.808818</td>
<td>-47.275664</td>
<td>[60954.329, 60954.3516, 60955.3632, 60955.3745...</td>
<td>[Y, Y, z, Y, i, z, i, i, z, r, i]</td>
<td>[-62299.676, -64347.457, 41399.92, 22678.135, ...</td>
<td>[1619.7021, 1574.4094, 1421.069, 1599.6809, 15...</td>
<td>27.5</td>
<td>-99</td>
<td>-99</td>
</tr>
<tr>
<th>4</th>
<td>1007129</td>
<td>102.948256</td>
<td>-61.290785</td>
<td>[60955.3818, 60969.3408, 60969.3527, 60973.351...</td>
<td>[Y, i, z, i, z, Y, z, Y, i, r, i, z]</td>
<td>[1071.8907, 67.256325, 418.2215, -389.7148, -4...</td>
<td>[896.3415, 203.7171, 366.3868, 291.12665, 421....</td>
<td>27.5</td>
<td>-99</td>
<td>-99</td>
</tr>
</tbody>
</table>
</div>
Polars dataframe uses ~62.9 MiB
<div><style>
.dataframe > thead > tr,
.dataframe > tbody > tr {
text-align: right;
white-space: pre-wrap;
}
</style>
<small>shape: (5, 10)</small><table border="1" class="dataframe"><thead><tr><th>objectid</th><th>ra</th><th>dec</th><th>mjd</th><th>band</th><th>flux</th><th>fluxerr</th><th>zp</th><th>redshift</th><th>sncode</th></tr><tr><td>i64</td><td>f64</td><td>f64</td><td>list[f64]</td><td>list[str]</td><td>list[f64]</td><td>list[f64]</td><td>f64</td><td>i64</td><td>i64</td></tr></thead><tbody><tr><td>1002611</td><td>81.419685</td><td>-49.96897</td><td>[60925.3889, 60925.3985, … 60999.3527]</td><td>["Y", "Y", … "z"]</td><td>[-112.59107, -555.5123, … 3801.1348]</td><td>[1315.0219, 1010.5355, … 428.28445]</td><td>27.5</td><td>-99</td><td>-99</td></tr><tr><td>1002665</td><td>76.010132</td><td>-30.2578</td><td>[60949.3301, 60949.3539, … 60999.1766]</td><td>["Y", "Y", … "g"]</td><td>[-825.03766, 62.77219, … 22.669981]</td><td>[1232.0863, 1149.9305, … 96.26641]</td><td>27.5</td><td>-99</td><td>-99</td></tr><tr><td>1003516</td><td>82.045701</td><td>-50.100473</td><td>[60925.3926, 60925.4022, … 60999.3527]</td><td>["Y", "Y", … "z"]</td><td>[-496.79715, -559.70245, … 4218.726]</td><td>[1153.6647, 911.6201, … 435.79462]</td><td>27.5</td><td>-99</td><td>-99</td></tr><tr><td>1005817</td><td>101.808818</td><td>-47.275664</td><td>[60954.329, 60954.3516, … 60999.2775]</td><td>["Y", "Y", … "i"]</td><td>[-62299.676, -64347.457, … -79094.01]</td><td>[1619.7021, 1574.4094, … 1492.498]</td><td>27.5</td><td>-99</td><td>-99</td></tr><tr><td>1007129</td><td>102.948256</td><td>-61.290785</td><td>[60955.3818, 60969.3408, … 60999.3579]</td><td>["Y", "i", … "z"]</td><td>[1071.8907, 67.256325, … -187.3112]</td><td>[896.3415, 203.7171, … 538.5722]</td><td>27.5</td><td>-99</td><td>-99</td></tr></tbody></table></div>
```python
# Let's make a snr column, and then an 'ndets' column with the number of points that have snr >= 5
#
# This requires knowing some Polars syntax, which is nontrivial.
hotsne = hotsne.with_columns( snr = hotsne['flux'] / hotsne['fluxerr'] )
hotsne = hotsne.with_columns( ndets = ( hotsne['snr'].list.eval( polars.element().ge(5) ).list.count_matches(True) ) )
```
```python
# Just for fun, lets find things with truly lots of detections
manydets = hotsne.filter( hotsne['ndets'] > 100 )
# Pick one at random
which = rng.integers( 0, len(manydets) )
# One particularly annoying thing of Polars... you
# can't index it with an numpy.int64, so turn
# "which" into a regular python integer
which = int(which)
manydets[which]
```
<div><style>
.dataframe > thead > tr,
.dataframe > tbody > tr {
text-align: right;
white-space: pre-wrap;
}
</style>
<small>shape: (1, 12)</small><table border="1" class="dataframe"><thead><tr><th>objectid</th><th>ra</th><th>dec</th><th>mjd</th><th>band</th><th>flux</th><th>fluxerr</th><th>zp</th><th>redshift</th><th>sncode</th><th>snr</th><th>ndets</th></tr><tr><td>i64</td><td>f64</td><td>f64</td><td>list[f64]</td><td>list[str]</td><td>list[f64]</td><td>list[f64]</td><td>f64</td><td>i64</td><td>i64</td><td>list[f64]</td><td>u32</td></tr></thead><tbody><tr><td>18597192</td><td>8.059171</td><td>-43.682868</td><td>[60848.2569, 60848.2574, … 60999.102]</td><td>["g", "g", … "z"]</td><td>[-527.07776, -69.010185, … 1632.5796]</td><td>[417.77103, 412.00803, … 376.1189]</td><td>27.5</td><td>-99</td><td>-99</td><td>[-1.261643, -0.167497, … 4.340594]</td><td>130</td></tr></tbody></table></div>
```python
# The version of plot_ltcv in this notebook needs lists, so we have to cast the
# Polars Serieses we get out to regular python lists
# (The [0] is because we really have polars Series, even though it
# only has one element. We want just that element, which itself
# is a list.)
plot_ltcv( manydets[which]['mjd'][0], manydets[which]['band'][0], manydets[which]['flux'][0], manydets[which]['fluxerr'][0],
multiplots=True );
# You can see that this is one that was observed a lot, but only started to explode near the end.
# Remember that we gave "mjd_now=61000" to the call above, so any simulated photometry from after
# that date won't be included.
```

```python
# Some other miscellaneous API endpoints.
#
# The "classids" endpoint will get a dictionary whose keys are broker class IDs. (Because of
# JSON limitations, these keys are strings, even though ideally they would be ints.) Each value
# is itself a dictionary; "description" describes the classID, and "gentype" is a list of SNANA
# gentypes that correspond to this classID. Then there are four booleans that indicate how
# precise the class ID is; from most general to least general, these are generalmatch, broadmatch,
# categorymatch, and exactmatch. The level of match will correspond to which digits aren't 0
# in the class ID. For more information, see the ELAsTiCC2 taxonomy at:
# https://github.com/LSSTDESC/elasticc/blob/elasticc2/taxonomy/taxonomy.ipynb
classidinfo = query_tom( 'elasticc2/classids' )
print( json.dumps( classidinfo, indent=4 ) )
```
{
"0": {
"exactmatch": false,
"categorymatch": false,
"broadmatch": false,
"generalmatch": false,
"description": "Meta",
"gentype": []
},
"100": {
"exactmatch": false,
"categorymatch": false,
"broadmatch": false,
"generalmatch": false,
"description": "Meta/Other",
"gentype": []
},
"200": {
"exactmatch": false,
"categorymatch": false,
"broadmatch": false,
"generalmatch": false,
"description": "Residual",
"gentype": []
},
"300": {
"exactmatch": false,
"categorymatch": false,
"broadmatch": false,
"generalmatch": false,
"description": "NotClassified",
"gentype": []
},
"1000": {
"exactmatch": false,
"categorymatch": false,
"broadmatch": false,
"generalmatch": false,
"description": "Static",
"gentype": []
},
"1100": {
"exactmatch": false,
"categorymatch": false,
"broadmatch": false,
"generalmatch": false,
"description": "Static/Other",
"gentype": []
},
"2000": {
"exactmatch": false,
"categorymatch": false,
"broadmatch": true,
"generalmatch": false,
"description": "Variable",
"gentype": [
10,
11,
12,
20,
21,
25,
26,
27,
30,
31,
32,
35,
36,
37,
40,
42,
45,
46,
50,
51,
59,
60,
80,
82,
83,
84,
87,
88,
89,
90,
91
]
},
"2100": {
"exactmatch": false,
"categorymatch": false,
"broadmatch": false,
"generalmatch": false,
"description": "Variable/Other",
"gentype": []
},
"2200": {
"exactmatch": false,
"categorymatch": false,
"broadmatch": true,
"generalmatch": true,
"description": "Non-Recurring",
"gentype": [
10,
11,
12,
20,
21,
25,
26,
27,
30,
31,
32,
35,
36,
37,
40,
42,
45,
46,
50,
51,
59,
82,
84,
87,
88,
89
]
},
"2210": {
"exactmatch": false,
"categorymatch": false,
"broadmatch": false,
"generalmatch": false,
"description": "Non-Recurring/Other",
"gentype": []
},
"2220": {
"exactmatch": false,
"categorymatch": true,
"broadmatch": true,
"generalmatch": true,
"description": "SN-like",
"gentype": [
10,
11,
12,
20,
21,
25,
26,
27,
30,
31,
32,
35,
36,
37
]
},
"2221": {
"exactmatch": false,
"categorymatch": false,
"broadmatch": false,
"generalmatch": false,
"description": "SN-like/Other",
"gentype": []
},
"2222": {
"exactmatch": true,
"categorymatch": true,
"broadmatch": true,
"generalmatch": true,
"description": "Ia",
"gentype": [
10
]
},
"2223": {
"exactmatch": true,
"categorymatch": true,
"broadmatch": true,
"generalmatch": true,
"description": "Ib/c",
"gentype": [
20,
21,
25,
26,
27
]
},
"2224": {
"exactmatch": true,
"categorymatch": true,
"broadmatch": true,
"generalmatch": true,
"description": "II",
"gentype": [
30,
31,
32,
35,
36,
37
]
},
"2225": {
"exactmatch": true,
"categorymatch": true,
"broadmatch": true,
"generalmatch": true,
"description": "Iax",
"gentype": [
12
]
},
"2226": {
"exactmatch": true,
"categorymatch": true,
"broadmatch": true,
"generalmatch": true,
"description": "91bg",
"gentype": [
11
]
},
"2230": {
"exactmatch": false,
"categorymatch": true,
"broadmatch": true,
"generalmatch": true,
"description": "Fast",
"gentype": [
50,
51,
82,
84,
87,
88,
89
]
},
"2231": {
"exactmatch": false,
"categorymatch": false,
"broadmatch": false,
"generalmatch": false,
"description": "Fast/Other",
"gentype": []
},
"2232": {
"exactmatch": true,
"categorymatch": true,
"broadmatch": true,
"generalmatch": true,
"description": "KN",
"gentype": [
50,
51
]
},
"2233": {
"exactmatch": true,
"categorymatch": true,
"broadmatch": true,
"generalmatch": true,
"description": "M-dwarf Flare",
"gentype": [
82
]
},
"2234": {
"exactmatch": true,
"categorymatch": true,
"broadmatch": true,
"generalmatch": true,
"description": "Dwarf Novae",
"gentype": [
84
]
},
"2235": {
"exactmatch": true,
"categorymatch": true,
"broadmatch": true,
"generalmatch": true,
"description": "uLens",
"gentype": [
87,
88,
89
]
},
"2240": {
"exactmatch": false,
"categorymatch": true,
"broadmatch": true,
"generalmatch": true,
"description": "Long",
"gentype": [
40,
42,
45,
46,
59
]
},
"2241": {
"exactmatch": false,
"categorymatch": false,
"broadmatch": false,
"generalmatch": false,
"description": "Long/Other",
"gentype": []
},
"2242": {
"exactmatch": true,
"categorymatch": true,
"broadmatch": true,
"generalmatch": true,
"description": "SLSN",
"gentype": [
40
]
},
"2243": {
"exactmatch": true,
"categorymatch": true,
"broadmatch": true,
"generalmatch": true,
"description": "TDE",
"gentype": [
42
]
},
"2244": {
"exactmatch": true,
"categorymatch": true,
"broadmatch": true,
"generalmatch": true,
"description": "ILOT",
"gentype": [
45
]
},
"2245": {
"exactmatch": true,
"categorymatch": true,
"broadmatch": true,
"generalmatch": true,
"description": "CART",
"gentype": [
46
]
},
"2246": {
"exactmatch": true,
"categorymatch": true,
"broadmatch": true,
"generalmatch": true,
"description": "PISN",
"gentype": [
59
]
},
"2300": {
"exactmatch": false,
"categorymatch": false,
"broadmatch": true,
"generalmatch": true,
"description": "Recurring",
"gentype": [
60,
80,
83,
90,
91
]
},
"2310": {
"exactmatch": false,
"categorymatch": false,
"broadmatch": false,
"generalmatch": false,
"description": "Recurring/Other",
"gentype": []
},
"2320": {
"exactmatch": false,
"categorymatch": true,
"broadmatch": true,
"generalmatch": true,
"description": "Periodic",
"gentype": [
80,
83,
90,
91
]
},
"2321": {
"exactmatch": false,
"categorymatch": false,
"broadmatch": false,
"generalmatch": false,
"description": "Periodic/Other",
"gentype": []
},
"2322": {
"exactmatch": true,
"categorymatch": true,
"broadmatch": true,
"generalmatch": true,
"description": "Cepheid",
"gentype": [
90
]
},
"2323": {
"exactmatch": true,
"categorymatch": true,
"broadmatch": true,
"generalmatch": true,
"description": "RR Lyrae",
"gentype": [
80
]
},
"2324": {
"exactmatch": true,
"categorymatch": true,
"broadmatch": true,
"generalmatch": true,
"description": "Delta Scuti",
"gentype": [
91
]
},
"2325": {
"exactmatch": true,
"categorymatch": true,
"broadmatch": true,
"generalmatch": true,
"description": "EB",
"gentype": [
83
]
},
"2326": {
"exactmatch": false,
"categorymatch": false,
"broadmatch": false,
"generalmatch": false,
"description": "LPV/Mira",
"gentype": []
},
"2330": {
"exactmatch": false,
"categorymatch": true,
"broadmatch": true,
"generalmatch": true,
"description": "Non-Periodic",
"gentype": [
60
]
},
"2331": {
"exactmatch": false,
"categorymatch": false,
"broadmatch": false,
"generalmatch": false,
"description": "Non-Periodic/Other",
"gentype": []
},
"2332": {
"exactmatch": true,
"categorymatch": true,
"broadmatch": true,
"generalmatch": true,
"description": "AGN",
"gentype": [
60
]
}
}
```python
# You can get information about which classifiers provided classifications
# during the ELAsTiCC2 campaign. The return value is a dictionary, whose
# key is the database's classifier_id. It's supposed to be an integer, but
# because of limitations in JSON they are actually strings. Probably run these
# through int() if you actually use them.
res = query_tom( 'elasticc2/classifiers_json' )
print( json.dumps( res, indent=4 ) )
```
{
"11": {
"brokername": "ANTARES",
"brokerversion": "2.6.8",
"classifiername": "LightGBM_filter",
"classifierparams": "v7"
},
"12": {
"brokername": "ALeRCE",
"brokerversion": "6.0.0",
"classifiername": "BaltoClassifier",
"classifierparams": "6.0.0"
},
"13": {
"brokername": "ALeRCE",
"brokerversion": "1.1.0",
"classifiername": "TinkyWinkyClassifier",
"classifierparams": "1.1.0"
},
"14": {
"brokername": "Pitt-Google Broker",
"brokerversion": "v0.7",
"classifiername": "SuperNNova_v1.3",
"classifierparams": ""
},
"15": {
"brokername": "ALeRCE",
"brokerversion": "3.0.0",
"classifiername": "MessiClassifier",
"classifierparams": "3.0.0"
},
"16": {
"brokername": "AMPEL",
"brokerversion": "v0.8",
"classifiername": "ElasticcMonsterAllInclPost",
"classifierparams": "v230819"
},
"17": {
"brokername": "AMPEL",
"brokerversion": "v0.8",
"classifiername": "ElasticcMonsterAllIncl",
"classifierparams": "v230819"
},
"18": {
"brokername": "AMPEL",
"brokerversion": "v0.8",
"classifiername": "ElasticcMonsterAllInclLtd",
"classifierparams": "v230819"
},
"19": {
"brokername": "AMPEL",
"brokerversion": "v0.8",
"classifiername": "ElasticcMonsterPing",
"classifierparams": "v230819"
},
"21": {
"brokername": "ALeRCE",
"brokerversion": "6.0.0",
"classifiername": "MessiClassifier",
"classifierparams": "6.0.0"
},
"22": {
"brokername": "AMPEL",
"brokerversion": "v0.8",
"classifiername": "ElasticcMonsterPing",
"classifierparams": "v231123"
},
"23": {
"brokername": "AMPEL",
"brokerversion": "v0.8",
"classifiername": "ElasticcMonsterAllInclPost",
"classifierparams": "v231123"
},
"24": {
"brokername": "AMPEL",
"brokerversion": "v0.8",
"classifiername": "ElasticcMonsterAllInclLtd",
"classifierparams": "v231123"
},
"25": {
"brokername": "AMPEL",
"brokerversion": "v0.8",
"classifiername": "ElasticcMonsterAllIncl",
"classifierparams": "v231123"
},
"26": {
"brokername": "ALeRCE",
"brokerversion": "6.1.0",
"classifiername": "BaltoClassifier",
"classifierparams": "6.1.0"
},
"27": {
"brokername": "ALeRCE",
"brokerversion": "3.1.0",
"classifiername": "MessiClassifier",
"classifierparams": "3.1.0"
},
"28": {
"brokername": "AMPEL",
"brokerversion": "v0.8",
"classifiername": "ElasticcMonsterPing",
"classifierparams": "v231203"
},
"29": {
"brokername": "AMPEL",
"brokerversion": "v0.8",
"classifiername": "ElasticcMonsterAllInclPost",
"classifierparams": "v231203"
},
"30": {
"brokername": "AMPEL",
"brokerversion": "v0.8",
"classifiername": "ElasticcMonsterAllInclLtd",
"classifierparams": "v231203"
},
"31": {
"brokername": "AMPEL",
"brokerversion": "v0.8",
"classifiername": "ElasticcMonsterAllIncl",
"classifierparams": "v231203"
}
}
```python
# This one gets you the data you'd need to make your own confusion matrices.
# It's complicated. This is just one quick example. For full documentation
# on this endpoint and more instructions for its use, see
#
# https://github.com/LSSTDESC/elasticc_metrics/blob/main/elasticc2_rest_metric_demo.ipynb
#
# This one doesn't actually query the database (which would
# take a very long time), but reads cached results of queries
# to the database run previously.
#
# brokerclassfortruetype/<str:dataformat>/<str:what>/<int:classifier_id>/<int:classid>/
#
# The slash at the end is crucial; otherwise, django gets annoyed.
#
# "dataformat" can be "dict" or "pickle". If the latter, it gives
# you a pickled Pandas dataframe that may or may not sucessfully
# unpickle with the version of Pandas you're using. If "dict", you
# get back a dictionary you could feed to
# Pandas.DataFrame.from_dict( ..., orient='tight' )
# to get a data frame. "pickle" will be faster in all ways if it works.
#
# "what" can be one of "objects", "sources", "meanprobabilities", and "maxprobabilities".
#
# "classifier_id" is the numeric classifier id (see previous cell)
#
# "classid" is the ELAsTiCC2 taxonomy of the classid for the *true* class. These needs
# to be one of the most specific classes (i.e. the ones digit is not 0).
#
# Example here just gets some basic info about all objects of type SNIa.
# It doesn't actually get any broker classification info. (See the link
# above for more info.)
#
# set dataformat to json
#
# ask for "objects" just to see what that is
#
# classifier ID 27 = ALeRCE MessiClasifier v3.1.0
#
# classid 2222 = SNIa
res = query_tom( 'elasticc2/brokerclassfortruetype/json/objects/27/2222/', verbose=True )
df = pandas.DataFrame.from_dict( res, orient='tight' )
df
```
[2024-10-28 14:17:49 - INFO] - Sending elasticc2/brokerclassfortruetype/json/objects/27/2222/ request to tom...
[2024-10-28 14:17:50 - INFO] - ...done after 1.5 seconds
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>t.zcmb</th>
<th>t.peakmjd</th>
<th>t.gentype</th>
</tr>
<tr>
<th>s.diaobject_id</th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>1000154</th>
<td>0.502343</td>
<td>61504.242</td>
<td>10</td>
</tr>
<tr>
<th>1001012</th>
<td>0.308069</td>
<td>61515.680</td>
<td>10</td>
</tr>
<tr>
<th>1001852</th>
<td>0.566813</td>
<td>61513.750</td>
<td>10</td>
</tr>
<tr>
<th>1004371</th>
<td>0.971284</td>
<td>61526.650</td>
<td>10</td>
</tr>
<tr>
<th>1004448</th>
<td>0.599852</td>
<td>61394.780</td>
<td>10</td>
</tr>
<tr>
<th>...</th>
<td>...</td>
<td>...</td>
<td>...</td>
</tr>
<tr>
<th>159506706</th>
<td>0.610671</td>
<td>61421.773</td>
<td>10</td>
</tr>
<tr>
<th>159506951</th>
<td>0.508571</td>
<td>61401.190</td>
<td>10</td>
</tr>
<tr>
<th>159507572</th>
<td>0.274884</td>
<td>61552.414</td>
<td>10</td>
</tr>
<tr>
<th>159508765</th>
<td>0.256128</td>
<td>61354.145</td>
<td>10</td>
</tr>
<tr>
<th>159508854</th>
<td>0.441880</td>
<td>61504.773</td>
<td>10</td>
</tr>
</tbody>
</table>
<p>149323 rows × 3 columns</p>
</div>
```python
```
|
LSSTDESCREPO_NAMEelasticcPATH_START.@elasticc_extracted@elasticc-main@jupyter@sprint_week_2024oct@elasticc2_demo3.ipynb@.PATH_END.py
|
{
"filename": "bokeh_plot_khat.py",
"repo_name": "arviz-devs/arviz",
"repo_path": "arviz_extracted/arviz-main/examples/bokeh/bokeh_plot_khat.py",
"type": "Python"
}
|
"""
Pareto Shape Plot
=================
"""
import arviz as az
idata = az.load_arviz_data("radon")
loo = az.loo(idata, pointwise=True)
ax = az.plot_khat(loo, show_bins=True, backend="bokeh")
|
arviz-devsREPO_NAMEarvizPATH_START.@arviz_extracted@arviz-main@examples@bokeh@bokeh_plot_khat.py@.PATH_END.py
|
{
"filename": "test_XYLike.py",
"repo_name": "threeML/threeML",
"repo_path": "threeML_extracted/threeML-master/threeML/test/test_XYLike.py",
"type": "Python"
}
|
from threeML import *
from threeML.plugins.XYLike import XYLike
import os
import numpy as np
def get_signal():
# Generate a test signal
generator = Line() + Gaussian()
generator.mu_2 = 5.0
generator.sigma_2 = 0.32
generator.F_2 = 70.4
generator.a_1 = 40.0
signal = generator(x)
return signal
# A simple x
x = np.linspace(0, 10, 50)
# These datasets have been generated adding noise to the signal
gauss_signal = [
50.7602612346,
45.9932836018,
32.7977610209,
36.854638754,
39.5900950043,
41.9882356834,
35.5464965039,
47.7006711308,
51.350490463,
41.3574154897,
43.5213662377,
39.1763197352,
39.817699622,
30.515504494,
33.5154124187,
52.2808043531,
37.3822864933,
54.8713758496,
31.501229516,
44.3932720107,
40.9919050981,
46.6234446307,
65.0001223876,
78.9277394629,
119.888320901,
144.295807048,
94.2679915254,
68.3366002984,
48.7021725122,
43.8175069547,
48.4100953701,
53.4893430887,
44.3520922656,
37.3317011115,
48.8614340877,
45.8279746014,
41.4841202405,
54.4940719287,
34.1601281994,
38.0362535503,
50.8319092017,
45.0868214795,
50.7982173405,
59.7236796118,
42.8220846239,
47.978397568,
59.6987029918,
50.8856593966,
55.5325981236,
33.879756494,
]
gauss_sigma = [
6.32455532034,
6.34066886319,
6.35674156037,
6.37277372091,
6.38876565,
6.40471764899,
6.4206300155,
6.43650304347,
6.45233702322,
6.46813224153,
6.48388898166,
6.49960752347,
6.51528814342,
6.53093111468,
6.54653670831,
6.56210525885,
6.57763959273,
6.59320350524,
6.60982256293,
6.63810676386,
6.74610996782,
7.18007988042,
8.31336897994,
10.0117217614,
11.3276025229,
11.3366071004,
10.0422516164,
8.37451562539,
7.27888168044,
6.88089633636,
6.80509804246,
6.80755576825,
6.82140432398,
6.83628044824,
6.85118795893,
6.86606562443,
6.88091118789,
6.89572479208,
6.9105066414,
6.92525693917,
6.93997588657,
6.95466368265,
6.96932052437,
6.98394660662,
6.99854212224,
7.01310726208,
7.027642215,
7.04214716792,
7.05662230584,
7.07106781187,
]
poiss_sig = [
44,
43,
38,
25,
51,
37,
46,
47,
55,
36,
40,
32,
46,
37,
44,
42,
50,
48,
52,
47,
39,
55,
80,
93,
123,
135,
96,
74,
43,
49,
43,
51,
27,
32,
35,
42,
43,
49,
38,
43,
59,
54,
50,
40,
50,
57,
55,
47,
38,
64,
]
def test_XYLike_chi2():
# Get fake data with Gaussian noise
yerr = np.array(gauss_sigma)
y = np.array(gauss_signal)
# Fit
xy = XYLike("test", x, y, yerr)
fitfun = Line() + Gaussian()
fitfun.F_2 = 60.0
fitfun.mu_2 = 4.5
res = xy.fit(fitfun)
# Verify that the fit converged where it should have
assert np.allclose(
#res[0]["value"].values,
res.get_data_frame()["value"].values,
[40.20269202, 0.82896119, 62.80359114, 5.04080011, 0.27286713],
rtol=0.05,
)
# test not setting yerr
xy = XYLike("test", x, y)
assert np.all(xy.yerr == np.ones_like(y))
fitfun = Line() + Gaussian()
fitfun.F_2 = 60.0
fitfun.mu_2 = 4.5
res = xy.fit(fitfun)
def test_XYLike_poisson():
# Now Poisson case
y = np.array(poiss_sig)
xy = XYLike("test", x, y, poisson_data=True)
fitfun = Line() + Gaussian()
fitfun.F_2 = 60.0
fitfun.F_2.bounds = (0, 200.0)
fitfun.mu_2 = 5.0
fitfun.b_1.bounds = (0.1, 5.0)
fitfun.a_1.bounds = (0.1, 100.0)
res = xy.fit(fitfun)
# Verify that the fit converged where it should have
# print res[0]['value']
assert np.allclose(
#res[0]["value"],
res.get_data_frame()["value"],
[40.344599, 0.783748, 71.560055, 4.989727, 0.330570], rtol=0.05
)
def test_XYLike_assign_to_source():
# Get fake data with Gaussian noise
yerr = np.array(gauss_sigma)
y = np.array(gauss_signal)
# Fit
xy = XYLike("test", x, y, yerr)
xy.assign_to_source("pts1")
fitfun = Line() + Gaussian()
fitfun.F_2 = 60.0
fitfun.mu_2 = 4.5
fitfun2 = Line() + Gaussian()
fitfun2.F_2 = 60.0
fitfun2.mu_2 = 4.5
pts1 = PointSource("pts1", ra=0.0, dec=0.0, spectral_shape=fitfun)
pts2 = PointSource("pts2", ra=2.5, dec=3.2, spectral_shape=fitfun2)
for parameter in list(fitfun2.parameters.values()):
parameter.fix = True
model = Model(pts1, pts2)
data = DataList(xy)
jl = JointLikelihood(model, data)
_ = jl.fit()
predicted_parameters = np.array(
[40.20269202, 0.82896119, 62.80359114, 5.04080011, 0.27286713]
)
assert np.allclose(
[
fitfun.a_1.value,
fitfun.b_1.value,
fitfun.F_2.value,
fitfun.mu_2.value,
fitfun.sigma_2.value,
],
predicted_parameters,
rtol=0.05,
)
# Test that the likelihood does not change by changing the parameters of the other source
log_like_before = jl.minus_log_like_profile(*predicted_parameters)
fitfun2.F_2 = 120.0
log_like_after = jl.minus_log_like_profile(*predicted_parameters)
assert log_like_before == log_like_after
# Now test that if we do not assign a source, then the log likelihood value will change
xy.assign_to_source(None)
# Test that the likelihood this time changes by changing the parameters of the other source
log_like_before = jl.minus_log_like_profile(*predicted_parameters)
fitfun2.F_2 = 60.0
log_like_after = jl.minus_log_like_profile(*predicted_parameters)
assert log_like_before != log_like_after
def test_XYLike_dataframe():
yerr = np.array(gauss_sigma)
y = np.array(gauss_signal)
# chi2 version
xy = XYLike("test", x, y, yerr)
df = xy.to_dataframe()
# read back in dataframe
new_xy = XYLike.from_dataframe("df", df)
assert not xy.is_poisson
# poisson version
xy = XYLike("test", x, y, poisson_data=True)
df = xy.to_dataframe()
# read back in dataframe
new_xy = XYLike.from_dataframe("df", df, poisson=True)
assert xy.is_poisson
def test_XYLike_txt():
yerr = np.array(gauss_sigma)
y = np.array(gauss_signal)
# chi2 version
xy = XYLike("test", x, y, yerr)
fname = "test_txt.txt"
xy.to_txt(fname)
# read back in txt file
new_xy = XYLike.from_text_file("txt", fname)
assert not xy.is_poisson
# poisson version
xy = XYLike("test", x, y, poisson_data=True)
fname = "test_txt_poisson.txt"
xy.to_txt(fname)
# read back in txt file
new_xy = XYLike.from_text_file("txt", fname)
assert new_xy.is_poisson
# Remove files
os.remove("test_txt.txt")
os.remove("test_txt_poisson.txt")
def test_xy_plot():
# Get fake data with Gaussian noise
yerr = np.array(gauss_sigma)
y = np.array(gauss_signal)
# Fit
xy = XYLike("test", x, y, yerr)
xy.plot()
fitfun = Line() + Gaussian()
fitfun.F_2 = 60.0
fitfun.mu_2 = 4.5
res = xy.fit(fitfun)
xy.plot()
|
threeMLREPO_NAMEthreeMLPATH_START.@threeML_extracted@threeML-master@threeML@test@test_XYLike.py@.PATH_END.py
|
{
"filename": "test_deploy.py",
"repo_name": "PrefectHQ/prefect",
"repo_path": "prefect_extracted/prefect-main/tests/cli/test_deploy.py",
"type": "Python"
}
|
import io
import json
import os
import shutil
import subprocess
import sys
import tempfile
from datetime import timedelta
from pathlib import Path
from typing import Optional
from unittest import mock
from uuid import UUID, uuid4
import pendulum
import pytest
import readchar
import yaml
from typer import Exit
import prefect
from prefect.blocks.system import JSON, Secret
from prefect.cli.deploy import (
_check_for_matching_deployment_name_and_entrypoint_in_prefect_file,
_create_deployment_triggers,
_initialize_deployment_triggers,
)
from prefect.client.orchestration import PrefectClient, ServerType
from prefect.client.schemas.actions import WorkPoolCreate
from prefect.client.schemas.objects import Worker, WorkerStatus, WorkPool
from prefect.client.schemas.schedules import (
CronSchedule,
IntervalSchedule,
RRuleSchedule,
)
from prefect.deployments.base import (
_save_deployment_to_prefect_file,
initialize_project,
)
from prefect.deployments.steps.core import StepExecutionError
from prefect.events import (
DeploymentCompoundTrigger,
DeploymentEventTrigger,
EventTrigger,
Posture,
)
from prefect.exceptions import ObjectAlreadyExists, ObjectNotFound
from prefect.server.schemas.actions import (
BlockDocumentCreate,
BlockSchemaCreate,
BlockTypeCreate,
)
from prefect.settings import (
PREFECT_DEFAULT_WORK_POOL_NAME,
PREFECT_UI_URL,
temporary_settings,
)
from prefect.testing.cli import invoke_and_assert
from prefect.testing.utilities import AsyncMock
from prefect.utilities.asyncutils import run_sync_in_worker_thread
from prefect.utilities.filesystem import tmpchdir
TEST_PROJECTS_DIR = prefect.__development_base_path__ / "tests" / "test-projects"
@pytest.fixture
def interactive_console(monkeypatch):
monkeypatch.setattr("prefect.cli.deploy.is_interactive", lambda: True)
# `readchar` does not like the fake stdin provided by typer isolation so we provide
# a version that does not require a fd to be attached
def readchar():
sys.stdin.flush()
position = sys.stdin.tell()
if not sys.stdin.read():
print("TEST ERROR: CLI is attempting to read input but stdin is empty.")
raise Exit(-2)
else:
sys.stdin.seek(position)
return sys.stdin.read(1)
monkeypatch.setattr("readchar._posix_read.readchar", readchar)
@pytest.fixture
def project_dir(tmp_path):
with tmpchdir(tmp_path):
shutil.copytree(TEST_PROJECTS_DIR, tmp_path, dirs_exist_ok=True)
prefect_home = tmp_path / ".prefect"
prefect_home.mkdir(exist_ok=True, mode=0o0700)
initialize_project()
yield tmp_path
@pytest.fixture
def project_dir_with_single_deployment_format(tmp_path):
with tmpchdir(tmp_path):
shutil.copytree(TEST_PROJECTS_DIR, tmp_path, dirs_exist_ok=True)
prefect_home = tmp_path / ".prefect"
prefect_home.mkdir(exist_ok=True, mode=0o0700)
initialize_project()
with open("prefect.yaml", "r") as f:
contents = yaml.safe_load(f)
contents["deployments"][0]["schedule"] = None
with open("deployment.yaml", "w") as f:
yaml.safe_dump(contents["deployments"][0], f)
yield tmp_path
@pytest.fixture
def uninitialized_project_dir(project_dir):
Path(project_dir, "prefect.yaml").unlink()
return project_dir
@pytest.fixture
def uninitialized_project_dir_with_git_no_remote(uninitialized_project_dir):
subprocess.run(["git", "init"], cwd=uninitialized_project_dir)
assert Path(uninitialized_project_dir, ".git").exists()
return uninitialized_project_dir
@pytest.fixture
def uninitialized_project_dir_with_git_with_remote(
uninitialized_project_dir_with_git_no_remote,
):
subprocess.run(
["git", "remote", "add", "origin", "https://example.com/org/repo.git"],
cwd=uninitialized_project_dir_with_git_no_remote,
)
return uninitialized_project_dir_with_git_no_remote
@pytest.fixture
async def default_agent_pool(prefect_client):
try:
return await prefect_client.create_work_pool(
WorkPoolCreate(name="default-agent-pool", type="prefect-agent")
)
except ObjectAlreadyExists:
return await prefect_client.read_work_pool("default-agent-pool")
@pytest.fixture
async def docker_work_pool(prefect_client: PrefectClient) -> WorkPool:
return await prefect_client.create_work_pool(
work_pool=WorkPoolCreate(
name="test-docker-work-pool",
type="docker",
base_job_template={
"job_configuration": {"image": "{{ image}}"},
"variables": {
"type": "object",
"properties": {
"image": {
"title": "Image",
"type": "string",
},
},
},
},
)
)
@pytest.fixture
async def mock_prompt(monkeypatch):
# Mock prompts() where password=True to prevent hanging
def new_prompt(message, password=False, **kwargs):
if password:
return "456"
else:
return original_prompt(message, password=password, **kwargs)
original_prompt = prefect.cli._prompts.prompt
monkeypatch.setattr("prefect.cli._prompts.prompt", new_prompt)
@pytest.fixture
def mock_provide_password(monkeypatch):
def new_prompt(message, password=False, **kwargs):
if password:
return "my-token"
else:
return original_prompt(message, password=password, **kwargs)
original_prompt = prefect.cli._prompts.prompt
monkeypatch.setattr("prefect.cli.deploy.prompt", new_prompt)
@pytest.fixture
def mock_build_docker_image(monkeypatch):
mock_build = mock.MagicMock()
mock_build.return_value = {"build-image": {"image": "{{ build-image.image }}"}}
monkeypatch.setattr(
"prefect.deployments.steps.core.import_object",
lambda x: mock_build,
)
monkeypatch.setattr(
"prefect.deployments.steps.core.import_module",
lambda x: None,
)
return mock_build
@pytest.fixture
async def aws_credentials(prefect_client):
aws_credentials_type = await prefect_client.create_block_type(
block_type=BlockTypeCreate(
name="AWS Credentials",
slug="aws-credentials",
)
)
aws_credentials_schema = await prefect_client.create_block_schema(
block_schema=BlockSchemaCreate(
block_type_id=aws_credentials_type.id,
fields={"properties": {"aws_access_key_id": {"type": "string"}}},
)
)
return await prefect_client.create_block_document(
block_document=BlockDocumentCreate(
name="bezos-creds",
block_type_id=aws_credentials_type.id,
block_schema_id=aws_credentials_schema.id,
data={"aws_access_key_id": "AKIA1234"},
)
)
@pytest.fixture
def set_ui_url():
with temporary_settings({PREFECT_UI_URL: "http://gimmedata.com"}):
yield
class TestProjectDeploy:
@pytest.fixture
def uninitialized_project_dir(self, project_dir):
Path(project_dir, "prefect.yaml").unlink()
return project_dir
@pytest.fixture
def uninitialized_project_dir_with_git_no_remote(self, uninitialized_project_dir):
subprocess.run(["git", "init"], cwd=uninitialized_project_dir)
assert Path(uninitialized_project_dir, ".git").exists()
return uninitialized_project_dir
@pytest.fixture
def uninitialized_project_dir_with_git_with_remote(
self, uninitialized_project_dir_with_git_no_remote
):
subprocess.run(
["git", "remote", "add", "origin", "https://example.com/org/repo.git"],
cwd=uninitialized_project_dir_with_git_no_remote,
)
return uninitialized_project_dir_with_git_no_remote
async def test_project_deploy(self, project_dir, prefect_client: PrefectClient):
await prefect_client.create_work_pool(
WorkPoolCreate(name="test-pool", type="test")
)
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name -p test-pool --version"
" 1.0.0 -v env=prod -t foo-bar"
),
expected_code=0,
expected_output_contains=[
"An important name/test-name",
"prefect worker start --pool 'test-pool'",
],
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert deployment.name == "test-name"
assert deployment.work_pool_name == "test-pool"
assert deployment.version == "1.0.0"
assert deployment.tags == ["foo-bar"]
assert deployment.job_variables == {"env": "prod"}
assert deployment.enforce_parameter_schema
async def test_deploy_with_active_workers(
self, project_dir, work_pool, prefect_client, monkeypatch
):
mock_read_workers_for_work_pool = AsyncMock(
return_value=[
Worker(
name="test-worker",
work_pool_id=work_pool.id,
status=WorkerStatus.ONLINE,
)
]
)
monkeypatch.setattr(
"prefect.client.orchestration.PrefectClient.read_workers_for_work_pool",
mock_read_workers_for_work_pool,
)
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
f"deploy ./wrapped-flow-project/flow.py:test_flow -n test-name -p {work_pool.name}"
),
expected_code=0,
expected_output_does_not_contain=[
f"prefect worker start --pool '{work_pool.name}'",
],
)
async def test_deploy_with_wrapped_flow_decorator(
self, project_dir, work_pool, prefect_client
):
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
f"deploy ./wrapped-flow-project/flow.py:test_flow -n test-name -p {work_pool.name}"
),
expected_code=0,
expected_output_does_not_contain=["test-flow"],
expected_output_contains=[
"wrapped-flow/test-name",
f"prefect worker start --pool '{work_pool.name}'",
],
)
deployment = await prefect_client.read_deployment_by_name(
"wrapped-flow/test-name"
)
assert deployment.name == "test-name"
assert deployment.work_pool_name == work_pool.name
async def test_deploy_with_missing_imports(
self, project_dir, work_pool, prefect_client
):
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
f"deploy ./wrapped-flow-project/missing_imports.py:bloop_flow -n test-name -p {work_pool.name}"
),
expected_code=0,
expected_output_does_not_contain=["test-flow"],
expected_output_contains=[
"wrapped-flow/test-name",
f"prefect worker start --pool '{work_pool.name}'",
],
)
deployment = await prefect_client.read_deployment_by_name(
"wrapped-flow/test-name"
)
assert deployment.name == "test-name"
assert deployment.work_pool_name == work_pool.name
async def test_project_deploy_with_default_work_pool(
self, project_dir, prefect_client
):
await prefect_client.create_work_pool(
WorkPoolCreate(name="test-pool", type="test")
)
with temporary_settings(updates={PREFECT_DEFAULT_WORK_POOL_NAME: "test-pool"}):
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name --version"
" 1.0.0 -v env=prod -t foo-bar"
),
expected_code=0,
expected_output_contains=[
"An important name/test-name",
"prefect worker start --pool 'test-pool'",
],
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert deployment.name == "test-name"
assert deployment.work_pool_name == "test-pool"
assert deployment.version == "1.0.0"
assert deployment.tags == ["foo-bar"]
assert deployment.job_variables == {"env": "prod"}
assert deployment.enforce_parameter_schema
async def test_project_deploy_with_no_deployment_file(
self, project_dir, prefect_client: PrefectClient
):
await prefect_client.create_work_pool(
WorkPoolCreate(name="test-pool", type="test")
)
result = await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name -p test-pool --version"
" 1.0.0 -v env=prod -t foo-bar --enforce-parameter-schema"
),
)
assert result.exit_code == 0
assert "An important name/test" in result.output
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert deployment.name == "test-name"
assert deployment.work_pool_name == "test-pool"
assert deployment.version == "1.0.0"
assert deployment.tags == ["foo-bar"]
assert deployment.job_variables == {"env": "prod"}
assert deployment.enforce_parameter_schema is True
async def test_project_deploy_with_no_prefect_yaml(self, project_dir, work_pool):
Path(project_dir, "prefect.yaml").unlink()
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name -p"
f" {work_pool.name} --version 1.0.0 -v env=prod -t foo-bar"
),
expected_code=0,
expected_output_contains=[
"Your Prefect workers will attempt to load your flow from:",
"To see more options for managing your flow's code, run:",
"$ prefect init",
],
)
async def test_deploy_does_not_prompt_storage_when_pull_step_exists(
self, project_dir, work_pool, interactive_console
):
# write a pull step to the prefect.yaml
with open("prefect.yaml", "r") as f:
config = yaml.safe_load(f)
config["pull"] = [
{"prefect.deployments.steps.set_working_directory": {"directory": "."}}
]
with open("prefect.yaml", "w") as f:
yaml.safe_dump(config, f)
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name -p"
f" {work_pool.name} --version 1.0.0 -v env=prod -t foo-bar"
" --interval 60"
),
user_input=(
# don't save the deployment configuration
"n" + readchar.key.ENTER
),
expected_code=0,
expected_output_does_not_contain=[
"Would you like your workers to pull your flow code from a remote"
" storage location when running this flow?"
],
)
@pytest.mark.parametrize(
"cli_options,expected_limit,expected_strategy",
[
pytest.param("-cl 42", 42, None, id="limit-only"),
pytest.param(
"-cl 42 --collision-strategy CANCEL_NEW",
42,
"CANCEL_NEW",
id="limit-and-strategy",
),
pytest.param(
"--collision-strategy CANCEL_NEW",
None,
None,
id="strategy-only",
),
],
)
@pytest.mark.usefixtures("interactive_console", "uninitialized_project_dir")
async def test_deploy_with_concurrency_limit_and_options(
self,
project_dir,
prefect_client: PrefectClient,
cli_options,
expected_limit,
expected_strategy,
):
await prefect_client.create_work_pool(
WorkPoolCreate(name="test-pool", type="test")
)
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-deploy-concurrency-limit -p test-pool "
+ "--interval 60 "
+ cli_options
# "-cl 42 --collision-strategy CANCEL_NEW"
),
expected_code=0,
user_input=(
# Decline pulling from remote storage
"n"
+ readchar.key.ENTER
+
# Accept saving the deployment configuration
"y"
+ readchar.key.ENTER
),
expected_output_contains=[
"prefect deployment run 'An important name/test-deploy-concurrency-limit'"
],
)
prefect_file = Path("prefect.yaml")
assert prefect_file.exists()
with open(prefect_file, "r") as f:
config = yaml.safe_load(f)
if expected_limit is not None:
if expected_strategy is not None:
assert config["deployments"][0]["concurrency_limit"] == {
"limit": expected_limit,
"collision_strategy": expected_strategy,
}
else:
assert config["deployments"][0]["concurrency_limit"] == expected_limit
else:
assert config["deployments"][0]["concurrency_limit"] is None
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-deploy-concurrency-limit"
)
assert deployment.name == "test-deploy-concurrency-limit"
assert deployment.work_pool_name == "test-pool"
if expected_limit is not None:
assert deployment.global_concurrency_limit is not None
assert deployment.global_concurrency_limit.limit == expected_limit
else:
assert deployment.global_concurrency_limit is None
if expected_strategy is not None:
assert deployment.concurrency_options is not None
assert (
deployment.concurrency_options.collision_strategy == expected_strategy
)
else:
assert deployment.concurrency_options is None
class TestGeneratedPullAction:
async def test_project_deploy_generates_pull_action(
self, work_pool, prefect_client, uninitialized_project_dir
):
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy flows/hello.py:my_flow -n test-name -p"
f" {work_pool.name} --interval 60"
),
expected_code=0,
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert deployment.pull_steps == [
{
"prefect.deployments.steps.set_working_directory": {
"directory": str(uninitialized_project_dir)
}
}
]
async def test_project_deploy_with_no_prefect_yaml_git_repo_no_remote(
self,
work_pool,
prefect_client,
uninitialized_project_dir_with_git_no_remote,
):
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name -p"
f" {work_pool.name} --version 1.0.0 -v env=prod -t foo-bar"
" --interval 60"
),
expected_code=0,
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert deployment.pull_steps == [
{
"prefect.deployments.steps.set_working_directory": {
"directory": str(uninitialized_project_dir_with_git_no_remote)
}
}
]
@pytest.mark.usefixtures("interactive_console")
async def test_project_deploy_with_no_prefect_yaml_git_repo_user_rejects(
self,
work_pool,
prefect_client,
uninitialized_project_dir_with_git_with_remote,
):
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name -p"
f" {work_pool.name} --version 1.0.0 -v env=prod -t foo-bar"
" --interval 60"
),
# User rejects pulling from the remote repo and rejects saving the
# deployment configuration
user_input="n" + readchar.key.ENTER + "n" + readchar.key.ENTER,
expected_code=0,
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert deployment.pull_steps == [
{
"prefect.deployments.steps.set_working_directory": {
"directory": str(uninitialized_project_dir_with_git_with_remote)
}
}
]
@pytest.mark.usefixtures(
"interactive_console", "uninitialized_project_dir_with_git_with_remote"
)
async def test_project_deploy_with_no_prefect_yaml_git_repo(
self, work_pool, prefect_client
):
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name -p"
f" {work_pool.name} --version 1.0.0 -v env=prod -t foo-bar"
" --interval 60"
),
expected_code=0,
user_input=(
# Accept pulling from remote storage
readchar.key.ENTER
+
# Select remote Git repo as storage (first option)
readchar.key.ENTER
+
# Accept discovered URL
readchar.key.ENTER
+
# Accept discovered branch
readchar.key.ENTER
+
# Choose public repo
"n"
+ readchar.key.ENTER
# Accept saving the deployment configuration
+ "y"
+ readchar.key.ENTER
),
expected_output_contains=[
"Would you like your workers to pull your flow code from a remote"
" storage location when running this flow?"
],
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert deployment.pull_steps == [
{
"prefect.deployments.steps.git_clone": {
"repository": "https://example.com/org/repo.git",
"branch": "main",
}
}
]
prefect_file_contents = yaml.safe_load(Path("prefect.yaml").read_text())
assert prefect_file_contents["pull"] == [
{
"prefect.deployments.steps.git_clone": {
"repository": "https://example.com/org/repo.git",
"branch": "main",
}
}
]
@pytest.mark.usefixtures(
"interactive_console", "uninitialized_project_dir_with_git_with_remote"
)
async def test_project_deploy_with_no_prefect_yaml_git_repo_user_overrides(
self, work_pool, prefect_client
):
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name -p"
f" {work_pool.name} --version 1.0.0 -v env=prod -t foo-bar"
" --interval 60"
),
expected_code=0,
user_input=(
# Accept pulling from remote storage
readchar.key.ENTER
+
# Select remote Git repo as storage (first option)
readchar.key.ENTER
+
# Reject discovered URL
"n"
+ readchar.key.ENTER
+
# Enter new URL
"https://example.com/org/repo-override.git"
+ readchar.key.ENTER
+
# Reject discovered branch
"n"
+ readchar.key.ENTER
+
# Enter new branch
"dev"
+ readchar.key.ENTER
+
# Choose public repo
"n"
+ readchar.key.ENTER
# Decline saving the deployment configuration
+ "n"
+ readchar.key.ENTER
),
expected_output_contains=[
"Would you like your workers to pull your flow code from a remote"
" storage location when running this flow?"
],
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert deployment.pull_steps == [
{
"prefect.deployments.steps.git_clone": {
"repository": "https://example.com/org/repo-override.git",
"branch": "dev",
}
}
]
@pytest.mark.usefixtures(
"interactive_console",
"uninitialized_project_dir_with_git_with_remote",
"mock_provide_password",
)
async def test_project_deploy_with_no_prefect_yaml_git_repo_with_token(
self,
work_pool,
prefect_client,
):
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name -p"
f" {work_pool.name} --version 1.0.0 -v env=prod -t foo-bar"
" --interval 60"
),
expected_code=0,
user_input=(
# Accept pulling from remote storage
readchar.key.ENTER
+
# Select remote Git repo as storage (first option)
readchar.key.ENTER
+
# Accept discovered URL
readchar.key.ENTER
+
# Accept discovered branch
readchar.key.ENTER
+
# Choose private repo
"y"
+ readchar.key.ENTER
# Enter token
+ "my-token"
+ readchar.key.ENTER
# Decline saving the deployment configuration
+ "n"
+ readchar.key.ENTER
),
expected_output_contains=[
"Would you like your workers to pull your flow code from a remote"
" storage location when running this flow?"
],
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert deployment.pull_steps == [
{
"prefect.deployments.steps.git_clone": {
"repository": "https://example.com/org/repo.git",
"branch": "main",
"access_token": (
"{{ prefect.blocks.secret.deployment-test-name-an-important-name-repo-token }}"
),
}
}
]
token_block = await Secret.load(
"deployment-test-name-an-important-name-repo-token"
)
assert token_block.get() == "my-token"
@pytest.mark.usefixtures("interactive_console", "uninitialized_project_dir")
async def test_deploy_with_blob_storage_select_existing_credentials(
self,
work_pool,
prefect_client,
aws_credentials,
monkeypatch,
):
mock_step = mock.MagicMock()
monkeypatch.setattr(
"prefect.deployments.steps.core.import_object", lambda x: mock_step
)
monkeypatch.setattr(
"prefect.deployments.steps.core.import_module",
lambda x: None,
)
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name -p"
f" {work_pool.name} --version 1.0.0 -v env=prod -t foo-bar"
" --interval 60"
),
expected_code=0,
user_input=(
# Accept pulling from remote storage
readchar.key.ENTER
# Select S3 bucket as storage (second option)
+ readchar.key.DOWN
+ readchar.key.ENTER
# Provide bucket name
+ "my-bucket"
+ readchar.key.ENTER
# Accept default folder (root of bucket)
+ readchar.key.ENTER
# Select existing credentials (first option)
+ readchar.key.ENTER
# Decline saving the deployment configuration
+ "n"
+ readchar.key.ENTER
),
expected_output_contains=[
"Would you like your workers to pull your flow code from a remote"
" storage location when running this flow?"
],
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert deployment.pull_steps == [
{
"prefect_aws.deployments.steps.pull_from_s3": {
"bucket": "my-bucket",
"folder": "",
"credentials": (
"{{ prefect.blocks.aws-credentials.bezos-creds }}"
),
}
}
]
@pytest.mark.usefixtures("interactive_console", "uninitialized_project_dir")
async def test_deploy_with_blob_storage_create_credentials(
self,
work_pool,
prefect_client,
aws_credentials,
set_ui_url,
monkeypatch,
):
mock_step = mock.MagicMock()
monkeypatch.setattr(
"prefect.deployments.steps.core.import_object", lambda x: mock_step
)
monkeypatch.setattr(
"prefect.deployments.steps.core.import_module",
lambda x: None,
)
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name -p"
f" {work_pool.name} --version 1.0.0 -jv env=prod -t foo-bar"
" --interval 60"
),
expected_code=0,
user_input=(
# Accept pulling from remote storage
readchar.key.ENTER
# Select S3 bucket as storage (first option)
+ readchar.key.ENTER
# Provide bucket name
+ "my-bucket"
+ readchar.key.ENTER
# Accept default folder (root of bucket)
+ readchar.key.ENTER
# Create new credentials (second option)
+ readchar.key.DOWN
+ readchar.key.ENTER
# Enter access key id (only field in this hypothetical)
+ "my-access-key-id"
+ readchar.key.ENTER
# Accept default name for new credentials block (s3-storage-credentials)
+ readchar.key.ENTER
# Accept saving the deployment configuration
+ "y"
+ readchar.key.ENTER
),
expected_output_contains=[
(
"Would you like your workers to pull your flow code from a"
" remote storage location when running this flow?"
),
"View/Edit your new credentials block in the UI:",
PREFECT_UI_URL.value(),
],
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert deployment.pull_steps == [
{
"prefect_aws.deployments.steps.pull_from_s3": {
"bucket": "my-bucket",
"folder": "",
"credentials": (
"{{ prefect.blocks.aws-credentials.s3-storage-credentials }}"
),
}
}
]
@pytest.mark.usefixtures("interactive_console", "uninitialized_project_dir")
async def test_build_docker_image_step_auto_build_dockerfile(
self,
work_pool,
prefect_client,
monkeypatch,
):
mock_step = mock.MagicMock()
monkeypatch.setattr(
"prefect.deployments.steps.core.import_object", lambda x: mock_step
)
monkeypatch.setattr(
"prefect.deployments.steps.core.import_module",
lambda x: None,
)
prefect_yaml = {
"build": [
{
"prefect_docker.deployments.steps.build_docker_image": {
"requires": "prefect-docker",
"image_name": "repo-name/image-name",
"tag": "dev",
"dockerfile": "auto",
}
}
]
}
with open("prefect.yaml", "w") as f:
yaml.dump(prefect_yaml, f)
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name -p"
f" {work_pool.name} --version 1.0.0 -v env=prod -t foo-bar"
" --interval 60"
),
expected_code=0,
user_input=(
# Decline pulling from remote storage
"n"
+ readchar.key.ENTER
+
# Accept saving the deployment configuration
"y"
+ readchar.key.ENTER
),
expected_output_contains=[
"prefect deployment run 'An important name/test-name'"
],
)
prefect_file = Path("prefect.yaml")
assert prefect_file.exists()
with open(prefect_file, "r") as f:
config = yaml.safe_load(f)
dir_name = os.path.basename(os.getcwd())
assert config["deployments"][0]["pull"] == [
{
"prefect.deployments.steps.set_working_directory": {
"directory": f"/opt/prefect/{dir_name}"
}
}
]
mock_step.assert_called_once_with(
image_name="repo-name/image-name",
tag="dev",
dockerfile="auto",
)
# check to make sure prefect-docker is not installed
with pytest.raises(ImportError):
import prefect_docker # noqa
@pytest.mark.usefixtures(
"interactive_console", "uninitialized_project_dir_with_git_with_remote"
)
async def test_build_docker_image_step_custom_dockerfile_remote_flow_code_confirm(
self,
work_pool,
prefect_client,
monkeypatch,
):
mock_step = mock.MagicMock()
monkeypatch.setattr(
"prefect.deployments.steps.core.import_object", lambda x: mock_step
)
monkeypatch.setattr(
"prefect.deployments.steps.core.import_module",
lambda x: None,
)
with open("Dockerfile", "w") as f:
f.write("FROM python:3.9-slim\n")
prefect_yaml = {
"build": [
{
"prefect_docker.deployments.steps.build_docker_image": {
"id": "build-image",
"requires": "prefect-docker",
"image_name": "repo-name/image-name",
"tag": "dev",
"dockerfile": "Dockerfile",
}
}
]
}
with open("prefect.yaml", "w") as f:
yaml.dump(prefect_yaml, f)
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name -p"
f" {work_pool.name} --version 1.0.0 -v env=prod -t foo-bar"
" --interval 60"
),
expected_code=0,
user_input=(
# Accept pulling from remote storage
readchar.key.ENTER
+
# Select remote Git repo as storage (first option)
readchar.key.ENTER
+
# Accept discovered URL
readchar.key.ENTER
+
# Accept discovered branch
readchar.key.ENTER
+
# Choose public repo
"n"
+ readchar.key.ENTER
# Accept saving the deployment configuration
+ "y"
+ readchar.key.ENTER
),
expected_output_contains=[
(
"Would you like your workers to pull your flow code from a"
" remote storage location when running this flow?"
),
"Is this a private repository?",
"prefect deployment run 'An important name/test-name'",
],
)
prefect_file = Path("prefect.yaml")
assert prefect_file.exists()
with open(prefect_file, "r") as f:
config = yaml.safe_load(f)
assert config["deployments"][0]["pull"] == [
{
"prefect.deployments.steps.git_clone": {
"repository": "https://example.com/org/repo.git",
"branch": "main",
}
}
]
mock_step.assert_called_once_with(
image_name="repo-name/image-name",
tag="dev",
dockerfile="Dockerfile",
)
# check to make sure prefect-docker is not installed
with pytest.raises(ImportError):
import prefect_docker # noqa
@pytest.mark.usefixtures(
"interactive_console", "uninitialized_project_dir_with_git_with_remote"
)
async def test_build_docker_image_step_custom_dockerfile_remote_flow_code_reject(
self,
work_pool,
prefect_client,
monkeypatch,
):
mock_step = mock.MagicMock()
monkeypatch.setattr(
"prefect.deployments.steps.core.import_object", lambda x: mock_step
)
monkeypatch.setattr(
"prefect.deployments.steps.core.import_module",
lambda x: None,
)
with open("Dockerfile", "w") as f:
f.write("FROM python:3.9-slim\n")
prefect_yaml = {
"build": [
{
"prefect_docker.deployments.steps.build_docker_image": {
"id": "build-image",
"requires": "prefect-docker",
"image_name": "repo-name/image-name",
"tag": "dev",
"dockerfile": "Dockerfile",
}
}
]
}
with open("prefect.yaml", "w") as f:
yaml.dump(prefect_yaml, f)
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name -p"
f" {work_pool.name} --version 1.0.0 -v env=prod -t foo-bar"
" --interval 60"
),
expected_code=0,
user_input=(
# Reject pulling from remote git origin
"n"
+ readchar.key.ENTER
+
# Accept copied flow code into Dockerfile
"y"
+ readchar.key.ENTER
+
# Provide path to flow code
"/opt/prefect/hello-projects/"
+ readchar.key.ENTER
# Accept saving the deployment configuration
+ "y"
+ readchar.key.ENTER
),
expected_output_contains=[
(
"Would you like your workers to pull your flow code from a"
" remote storage location when running this flow?"
),
(
"Does your Dockerfile have a line that copies the current"
" working directory"
),
"What is the path to your flow code in your Dockerfile?",
"prefect deployment run 'An important name/test-name'",
],
)
prefect_file = Path("prefect.yaml")
assert prefect_file.exists()
with open(prefect_file, "r") as f:
config = yaml.safe_load(f)
assert config["deployments"][0]["pull"] == [
{
"prefect.deployments.steps.set_working_directory": {
"directory": "/opt/prefect/hello-projects/"
}
}
]
mock_step.assert_called_once_with(
image_name="repo-name/image-name",
tag="dev",
dockerfile="Dockerfile",
)
# check to make sure prefect-docker is not installed
with pytest.raises(ImportError):
import prefect_docker # noqa
@pytest.mark.usefixtures(
"interactive_console", "uninitialized_project_dir_with_git_with_remote"
)
async def test_build_docker_image_step_custom_dockerfile_reject_copy_confirm(
self,
work_pool,
prefect_client,
monkeypatch,
):
mock_step = mock.MagicMock()
monkeypatch.setattr(
"prefect.deployments.steps.core.import_object", lambda x: mock_step
)
monkeypatch.setattr(
"prefect.deployments.steps.core.import_module",
lambda x: None,
)
with open("Dockerfile", "w") as f:
f.write("FROM python:3.9-slim\n")
prefect_yaml = {
"build": [
{
"prefect_docker.deployments.steps.build_docker_image": {
"id": "build-image",
"requires": "prefect-docker",
"image_name": "repo-name/image-name",
"tag": "dev",
"dockerfile": "Dockerfile",
}
}
]
}
with open("prefect.yaml", "w") as f:
yaml.dump(prefect_yaml, f)
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name -p"
f" {work_pool.name} --version 1.0.0 -v env=prod -t foo-bar"
" --interval 60"
),
expected_code=1,
user_input=(
# Reject pulling from remote git origin
"n"
+ readchar.key.ENTER
+
# Reject copied flow code into Dockerfile
"n"
),
expected_output_contains=[
(
"Would you like your workers to pull your flow code from a"
" remote storage location when running this flow?"
),
(
"Does your Dockerfile have a line that copies the current"
" working directory"
),
(
"Your flow code must be copied into your Docker image"
" to run your deployment."
),
],
)
# check to make sure prefect-docker is not installed
with pytest.raises(ImportError):
import prefect_docker # noqa
class TestGeneratedPushAction:
@pytest.mark.usefixtures(
"interactive_console", "uninitialized_project_dir_with_git_with_remote"
)
async def test_deploy_select_blob_storage_configures_push_step(
self,
work_pool,
prefect_client,
aws_credentials,
monkeypatch,
):
mock_step = mock.MagicMock()
monkeypatch.setattr(
"prefect.deployments.steps.core.import_object", lambda x: mock_step
)
monkeypatch.setattr(
"prefect.deployments.steps.core.import_module",
lambda x: None,
)
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name"
f" -p {work_pool.name} --interval 60"
),
expected_code=0,
user_input=(
# Accept pulling from remote storage
"y"
+ readchar.key.ENTER
# Select S3 bucket as storage (second option)
+ readchar.key.DOWN
+ readchar.key.ENTER
# Provide bucket name
+ "my-bucket"
+ readchar.key.ENTER
# Accept default folder (root of bucket)
+ readchar.key.ENTER
# Select existing credentials (first option)
+ readchar.key.ENTER
# Accept saving the deployment configuration
+ "y"
+ readchar.key.ENTER
),
expected_output_contains=[
"Would you like your workers to pull your flow code from a remote"
" storage location when running this flow?"
],
)
mock_step.assert_called_once_with(
bucket="my-bucket",
folder="",
credentials={"aws_access_key_id": "AKIA1234"},
)
prefect_file = Path("prefect.yaml")
assert prefect_file.exists()
with open(prefect_file, "r") as f:
config = yaml.safe_load(f)
assert config["push"] == [
{
"prefect_aws.deployments.steps.push_to_s3": {
"bucket": "my-bucket",
"folder": "",
"credentials": (
"{{ prefect.blocks.aws-credentials.bezos-creds }}"
),
}
}
]
assert config["pull"] == [
{
"prefect_aws.deployments.steps.pull_from_s3": {
"bucket": "my-bucket",
"folder": "",
"credentials": (
"{{ prefect.blocks.aws-credentials.bezos-creds }}"
),
}
}
]
async def test_project_deploy_with_empty_dep_file(
self, project_dir, prefect_client, work_pool
):
deployment_file = project_dir / "deployment.yaml"
with deployment_file.open(mode="w") as f:
f.write("{}")
deployment_name = f"test-name-{uuid4()}"
await run_sync_in_worker_thread(
invoke_and_assert,
command=f"deploy ./flows/hello.py:my_flow -n {deployment_name} -p {work_pool.name}",
expected_code=0,
expected_output_contains=["An important name/test"],
)
deployment = await prefect_client.read_deployment_by_name(
f"An important name/{deployment_name}"
)
assert deployment.name == deployment_name
assert deployment.work_pool_name == work_pool.name
@pytest.mark.usefixtures("project_dir")
async def test_project_deploy_templates_values(self, work_pool, prefect_client):
# prepare a templated deployment
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
contents["deployments"][0]["name"] = "test-name"
contents["deployments"][0]["version"] = "{{ input }}"
contents["deployments"][0]["tags"] = "{{ output2 }}"
contents["deployments"][0]["description"] = "{{ output1 }}"
# save it back
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
# update prefect.yaml to include a new build step
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
prefect_config = yaml.safe_load(f)
# test step that returns a dictionary of inputs and output1, output2
prefect_config["build"] = [
{"prefect.testing.utilities.a_test_step": {"input": "foo"}}
]
# save it back
with prefect_file.open(mode="w") as f:
yaml.safe_dump(prefect_config, f)
deployment_name = f"test-name-{uuid4()}"
result = await run_sync_in_worker_thread(
invoke_and_assert,
command=f"deploy ./flows/hello.py:my_flow -n {deployment_name} -p {work_pool.name}",
)
assert result.exit_code == 0
assert "An important name/test" in result.output
deployment = await prefect_client.read_deployment_by_name(
f"An important name/{deployment_name}"
)
assert deployment.name == deployment_name
assert deployment.work_pool_name == work_pool.name
assert deployment.version == "foo"
assert deployment.tags == ["b", "2", "3"]
assert deployment.description == "1"
@pytest.mark.usefixtures("project_dir")
async def test_project_deploy_templates_env_var_values(
self, prefect_client, work_pool, monkeypatch
):
# prepare a templated deployment
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
deployment_name = f"test-name-{uuid4()}"
contents["deployments"][0]["name"] = deployment_name
contents["deployments"][0]["version"] = "{{ $MY_VERSION }}"
contents["deployments"][0]["tags"] = "{{ $MY_TAGS }}"
contents["deployments"][0]["description"] = "{{ $MY_DESCRIPTION }}"
# save it back
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
# update prefect.yaml to include some new build steps
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
prefect_config = yaml.safe_load(f)
monkeypatch.setenv("MY_DIRECTORY", "bar")
monkeypatch.setenv("MY_FILE", "foo.txt")
prefect_config["build"] = [
{
"prefect.deployments.steps.run_shell_script": {
"id": "get-dir",
"script": "echo '{{ $MY_DIRECTORY }}'",
"stream_output": True,
}
},
]
# save it back
with prefect_file.open(mode="w") as f:
yaml.safe_dump(prefect_config, f)
monkeypatch.setenv("MY_VERSION", "foo")
monkeypatch.setenv("MY_TAGS", "b,2,3")
monkeypatch.setenv("MY_DESCRIPTION", "1")
result = await run_sync_in_worker_thread(
invoke_and_assert,
command=f"deploy ./flows/hello.py:my_flow -n {deployment_name} -p {work_pool.name}",
expected_output_contains=["bar"],
)
assert result.exit_code == 0
assert "An important name/test" in result.output
deployment = await prefect_client.read_deployment_by_name(
f"An important name/{deployment_name}"
)
assert deployment.name == deployment_name
assert deployment.work_pool_name == work_pool.name
assert deployment.version == "foo"
assert deployment.tags == ["b", ",", "2", ",", "3"]
assert deployment.description == "1"
@pytest.mark.usefixtures("project_dir")
async def test_project_deploy_with_default_parameters(
self, prefect_client, work_pool
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
deploy_config = yaml.safe_load(f)
deploy_config["deployments"][0]["parameters"] = {
"number": 1,
"message": "hello",
}
deploy_config["deployments"][0]["name"] = "test-name"
deploy_config["deployments"][0]["entrypoint"] = "flows/hello.py:my_flow"
deploy_config["deployments"][0]["work_pool"]["name"] = work_pool.name
with prefect_file.open(mode="w") as f:
yaml.safe_dump(deploy_config, f)
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy -n test-name",
expected_code=0,
expected_output_contains="An important name/test-name",
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert deployment.parameters == {"number": 1, "message": "hello"}
@pytest.mark.parametrize(
"option", ["--param number=2", "--params '{\"number\": 2}'"]
)
async def test_project_deploy_with_default_parameters_from_cli(
self, project_dir, prefect_client, work_pool, option
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
config = yaml.safe_load(f)
config["deployments"][0]["parameters"] = {
"number": 1,
"message": "hello",
}
config["deployments"][0]["name"] = "test-name"
config["deployments"][0]["entrypoint"] = "flows/hello.py:my_flow"
config["deployments"][0]["work_pool"]["name"] = work_pool.name
with prefect_file.open(mode="w") as f:
yaml.safe_dump(config, f)
await run_sync_in_worker_thread(
invoke_and_assert,
command=f"deploy -n test-name {option}",
expected_code=0,
expected_output_contains="An important name/test-name",
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert deployment.parameters == {"number": 2, "message": "hello"}
@pytest.mark.usefixtures("project_dir")
async def test_project_deploy_templates_pull_step_safely(
self, prefect_client, work_pool
):
"""
We want step outputs to get templated, but block references to only be
retrieved at runtime.
Unresolved placeholders should be left as-is, and not be resolved
to allow templating between steps in the pull action.
"""
await Secret(value="super-secret-name").save(name="test-secret")
# update prefect.yaml to include a new build step
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
prefect_config = yaml.safe_load(f)
# test step that returns a dictionary of inputs and output1, output2
prefect_config["build"] = [
{"prefect.testing.utilities.a_test_step": {"input": "foo"}}
]
prefect_config["pull"] = [
{
"prefect.testing.utilities.b_test_step": {
"id": "b-test-step",
"input": "{{ output1 }}",
"secret-input": "{{ prefect.blocks.secret.test-secret }}",
},
},
{
"prefect.testing.utilities.b_test_step": {
"input": "foo-{{ b-test-step.output1 }}",
"secret-input": "{{ b-test-step.output1 }}",
},
},
]
# save it back
with prefect_file.open(mode="w") as f:
yaml.safe_dump(prefect_config, f)
result = await run_sync_in_worker_thread(
invoke_and_assert,
command=f"deploy ./flows/hello.py:my_flow -n test-name -p {work_pool.name}",
)
assert result.exit_code == 0
assert "An important name/test" in result.output
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert deployment.pull_steps == [
{
"prefect.testing.utilities.b_test_step": {
"id": "b-test-step",
"input": 1,
"secret-input": "{{ prefect.blocks.secret.test-secret }}",
}
},
{
"prefect.testing.utilities.b_test_step": {
"input": "foo-{{ b-test-step.output1 }}",
"secret-input": "{{ b-test-step.output1 }}",
}
},
]
@pytest.mark.usefixtures("project_dir")
async def test_project_deploy_templates_pull_step_in_deployments_section_safely(
self, prefect_client, work_pool
):
"""
We want step outputs to get templated, but block references to only be
retrieved at runtime.
Unresolved placeholders should be left as-is, and not be resolved
to allow templating between steps in the pull action.
"""
await Secret(value="super-secret-name").save(name="test-secret")
# update prefect.yaml to include a new build step
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
prefect_config = yaml.safe_load(f)
# test step that returns a dictionary of inputs and output1, output2
prefect_config["build"] = [
{"prefect.testing.utilities.a_test_step": {"input": "foo"}}
]
prefect_config["deployments"][0]["pull"] = [
{
"prefect.testing.utilities.b_test_step": {
"id": "b-test-step",
"input": "{{ output1 }}",
"secret-input": "{{ prefect.blocks.secret.test-secret }}",
},
},
{
"prefect.testing.utilities.b_test_step": {
"input": "foo-{{ b-test-step.output1 }}",
"secret-input": "{{ b-test-step.output1 }}",
},
},
]
# save it back
with prefect_file.open(mode="w") as f:
yaml.safe_dump(prefect_config, f)
result = await run_sync_in_worker_thread(
invoke_and_assert,
command=f"deploy ./flows/hello.py:my_flow -n test-name -p {work_pool.name}",
)
assert result.exit_code == 0
assert "An important name/test" in result.output
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert deployment.pull_steps == [
{
"prefect.testing.utilities.b_test_step": {
"id": "b-test-step",
"input": 1,
"secret-input": "{{ prefect.blocks.secret.test-secret }}",
}
},
{
"prefect.testing.utilities.b_test_step": {
"input": "foo-{{ b-test-step.output1 }}",
"secret-input": "{{ b-test-step.output1 }}",
}
},
]
@pytest.mark.usefixtures("project_dir")
async def test_project_deploy_reads_entrypoint_from_prefect_yaml(self, work_pool):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
deploy_config = yaml.safe_load(f)
deploy_config["deployments"][0]["name"] = "test-name"
deploy_config["deployments"][0]["entrypoint"] = "flows/hello.py:my_flow"
deploy_config["deployments"][0]["work_pool"]["name"] = work_pool.name
with prefect_file.open(mode="w") as f:
yaml.safe_dump(deploy_config, f)
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy -n test-name",
expected_code=0,
expected_output_contains="An important name/test-name",
)
@pytest.mark.usefixtures("project_dir")
async def test_project_deploy_exits_with_no_entrypoint_configured(self, work_pool):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
deploy_config = yaml.safe_load(f)
deploy_config["deployments"][0]["name"] = "test-name"
deploy_config["deployments"][0]["work_pool"]["name"] = work_pool.name
with prefect_file.open(mode="w") as f:
yaml.safe_dump(deploy_config, f)
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy -n test-name",
expected_code=1,
expected_output_contains="An entrypoint must be provided:",
)
@pytest.mark.usefixtures("interactive_console", "project_dir")
async def test_deploy_without_name_interactive(self, work_pool, prefect_client):
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
f"deploy ./flows/hello.py:my_flow -p {work_pool.name} --interval 3600"
),
expected_code=0,
user_input=(
# Provide a deployment name
"test-prompt-name"
+ readchar.key.ENTER
# Decline remote storage
+ "n"
+ readchar.key.ENTER
# Decline saving the deployment configuration
+ "n"
+ readchar.key.ENTER
),
expected_output_contains=[
"Deployment name",
],
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-prompt-name"
)
assert deployment.name == "test-prompt-name"
assert deployment.work_pool_name == work_pool.name
assert deployment.entrypoint == "./flows/hello.py:my_flow"
@pytest.mark.usefixtures("project_dir")
async def test_deploy_without_work_pool_non_interactive(self):
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy ./flows/hello.py:my_flow -n test-name",
expected_code=1,
expected_output_contains=[
"A work pool is required to deploy this flow. Please specify a"
" work pool name via the '--pool' flag or in your prefect.yaml file."
],
)
@pytest.mark.usefixtures("interactive_console", "project_dir")
async def test_deploy_without_work_pool_interactive(
self, work_pool, prefect_client
):
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy ./flows/hello.py:my_flow -n test-name --interval 3600",
expected_code=0,
user_input=(
# Select only existing work pool
readchar.key.ENTER
# Decline remote storage
+ "n"
+ readchar.key.ENTER
# Decline saving the deployment configuration
+ "n"
+ readchar.key.ENTER
),
expected_output_contains=[
"Which work pool would you like to deploy this flow to?",
],
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert deployment.name == "test-name"
assert deployment.work_pool_name == work_pool.name
assert deployment.entrypoint == "./flows/hello.py:my_flow"
@pytest.mark.usefixtures("project_dir")
async def test_deploy_with_prefect_agent_work_pool_non_interactive(
self, default_agent_pool
):
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name -p"
f" {default_agent_pool.name}"
),
expected_code=1,
expected_output_contains=(
"Cannot create a project-style deployment with work pool of type"
" 'prefect-agent'. If you wish to use an agent with your deployment,"
" please use the `prefect deployment build` command."
),
)
@pytest.mark.usefixtures("interactive_console", "project_dir")
async def test_deploy_with_prefect_agent_work_pool_interactive(
self, work_pool, prefect_client, default_agent_pool
):
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name -p"
f" {default_agent_pool.name} --interval 3600"
),
expected_code=0,
user_input=(
# Accept only existing work pool
readchar.key.ENTER
# Decline remote storage
+ "n"
+ readchar.key.ENTER
# Decline saving the deployment configuration
+ "n"
+ readchar.key.ENTER
),
expected_output_contains=[
(
"You've chosen a work pool with type 'prefect-agent' which cannot"
" be used for project-style deployments. Let's pick another work"
" pool to deploy to."
),
],
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert deployment.name == "test-name"
assert deployment.work_pool_name == work_pool.name
assert deployment.entrypoint == "./flows/hello.py:my_flow"
@pytest.mark.usefixtures("interactive_console", "project_dir")
async def test_deploy_with_push_pool_no_worker_start_message(self, push_work_pool):
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name -p"
f" {push_work_pool.name} --interval 3600"
),
expected_code=0,
user_input=(
# Decline remote storage
"n"
+ readchar.key.ENTER
# Decline saving the deployment configuration
+ "n"
+ readchar.key.ENTER
),
expected_output_does_not_contain=[
f"$ prefect worker start --pool {push_work_pool.name!r}",
],
)
@pytest.mark.usefixtures("interactive_console", "project_dir")
async def test_deploy_with_no_available_work_pool_interactive(self, prefect_client):
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy ./flows/hello.py:my_flow -n test-name --interval 3600",
expected_code=0,
user_input=(
# Accept creating a new work pool
readchar.key.ENTER
# Select the first work pool type
+ readchar.key.ENTER
# Enter a name for the new work pool
+ "test-created-via-deploy"
+ readchar.key.ENTER
# Decline remote storage
+ "n"
+ readchar.key.ENTER
# Decline save the deployment configuration
+ "n"
+ readchar.key.ENTER
),
expected_output_contains=[
(
"Looks like you don't have any work pools this flow can be deployed"
" to. Would you like to create one?"
),
(
"What infrastructure type would you like to use for your new work"
" pool?"
),
"Work pool name",
],
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert deployment.name == "test-name"
assert deployment.work_pool_name == "test-created-via-deploy"
assert deployment.entrypoint == "./flows/hello.py:my_flow"
@pytest.mark.usefixtures("project_dir")
async def test_deploy_with_entrypoint_does_not_fail_with_missing_prefect_folder(
self, work_pool
):
Path(".prefect").rmdir()
await run_sync_in_worker_thread(
invoke_and_assert,
command=f"deploy ./flows/hello.py:my_flow -n test-name -p {work_pool.name}",
expected_code=0,
expected_output_contains=[
"Deployment 'An important name/test-name' successfully created"
],
)
@pytest.mark.parametrize("schedule_value", [None, {}])
@pytest.mark.usefixtures("project_dir", "interactive_console")
async def test_deploy_does_not_prompt_schedule_when_empty_schedule_prefect_yaml(
self, schedule_value, work_pool, prefect_client
):
prefect_yaml_file = Path("prefect.yaml")
with prefect_yaml_file.open(mode="r") as f:
deploy_config = yaml.safe_load(f)
deploy_config["deployments"] = [
{
"name": "test-name",
"entrypoint": "flows/hello.py:my_flow",
"work_pool": {
"name": work_pool.name,
},
"schedule": schedule_value,
}
]
with prefect_yaml_file.open(mode="w") as f:
yaml.safe_dump(deploy_config, f)
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy -n test-name",
user_input=(
# Decline remote storage
"n"
+ readchar.key.ENTER
# reject saving configuration
+ "n"
+ readchar.key.ENTER
),
expected_code=0,
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert len(deployment.schedules) == 0
@pytest.mark.parametrize("build_value", [None, {}])
@pytest.mark.usefixtures("project_dir", "interactive_console")
async def test_deploy_does_not_prompt_build_docker_image_when_empty_build_action_prefect_yaml(
self, build_value, work_pool, prefect_client
):
prefect_yaml_file = Path("prefect.yaml")
with prefect_yaml_file.open(mode="r") as f:
deploy_config = yaml.safe_load(f)
deploy_config["deployments"] = [
{
"name": "test-name",
"entrypoint": "flows/hello.py:my_flow",
"work_pool": {
"name": work_pool.name,
},
"build": build_value,
"schedule": {},
}
]
with prefect_yaml_file.open(mode="w") as f:
yaml.safe_dump(deploy_config, f)
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy -n test-name",
user_input=(
# Decline remote storage
"n"
+ readchar.key.ENTER
# reject saving configuration
+ "n"
+ readchar.key.ENTER
),
expected_code=0,
expected_output_does_not_contain="Would you like to build a Docker image?",
)
assert await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
async def test_deploy_with_bad_run_shell_script_raises(
self, project_dir, work_pool
):
"""
Regression test for a bug where deployment steps would continue even when
a `run_shell_script` step failed.
"""
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
config = yaml.safe_load(f)
config["build"] = [
{
"prefect.deployments.steps.run_shell_script": {
"id": "test",
"script": "cat nothing",
"stream_output": True,
}
}
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(config, f)
with pytest.raises(StepExecutionError):
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name --pool"
f" {work_pool.name}"
),
)
@pytest.mark.usefixtures("project_dir")
async def test_deploy_templates_env_vars(
self, prefect_client, monkeypatch, work_pool
):
# set up environment variables
monkeypatch.setenv("WORK_POOL", work_pool.name)
monkeypatch.setenv("MY_VAR", "my-value")
# set up prefect.yaml that has env var placeholders for the work pool name
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
prefect_config = yaml.safe_load(f)
prefect_config["deployments"] = [
{
"name": "test-deployment",
"entrypoint": "flows/hello.py:my_flow",
"work_pool": {"name": "{{ $WORK_POOL }}"},
},
{
"name": "test-deployment2",
"entrypoint": "flows/hello.py:my_flow",
"work_pool": {"name": "{{ $WORK_POOL }}"},
},
]
prefect_config["build"] = [
{"prefect.testing.utilities.a_test_step": {"input": "{{ $MY_VAR }}"}}
]
# save config to prefect.yaml
with prefect_file.open(mode="w") as f:
yaml.safe_dump(prefect_config, f)
result = await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy --all",
expected_code=0,
expected_output_does_not_contain=(
"This deployment configuration references work pool",
(
"This means no worker will be able to pick up its runs. You can"
" create a work pool in the Prefect UI."
),
),
)
assert result.exit_code == 0
deployments = await prefect_client.read_deployments()
assert len(deployments) == 2
assert deployments[0].name == "test-deployment"
assert deployments[0].work_pool_name == work_pool.name
assert deployments[1].name == "test-deployment2"
assert deployments[1].work_pool_name == work_pool.name
with prefect_file.open(mode="r") as f:
config = yaml.safe_load(f)
assert (
config["build"][0]["prefect.testing.utilities.a_test_step"]["input"]
== "{{ $MY_VAR }}"
)
assert config["deployments"][0]["work_pool"]["name"] == "{{ $WORK_POOL }}"
assert config["deployments"][1]["work_pool"]["name"] == "{{ $WORK_POOL }}"
@pytest.mark.usefixtures("interactive_console")
class TestRemoteStoragePicklist:
@pytest.mark.usefixtures("uninitialized_project_dir_with_git_no_remote")
async def test_no_git_option_when_no_remote_url(
self, docker_work_pool, aws_credentials, monkeypatch
):
mock_step = mock.MagicMock()
monkeypatch.setattr(
"prefect.deployments.steps.core.import_object", lambda x: mock_step
)
monkeypatch.setattr(
"prefect.deployments.steps.core.import_module",
lambda x: None,
)
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name --cron '0 4 * * *' -p"
" test-docker-work-pool"
),
expected_code=0,
expected_output_contains="s3",
expected_output_does_not_contain="Git Repo",
user_input=(
# no custom image
"n"
+ readchar.key.ENTER
# Accept remote storage
+ "y"
+ readchar.key.ENTER
# Select S3
+ readchar.key.ENTER
# Enter bucket name
+ "test-bucket"
+ readchar.key.ENTER
# Enter bucket prefix
+ readchar.key.ENTER
# Select existing credentials
+ readchar.key.ENTER
# Decline saving the deployment configuration
+ "n"
+ readchar.key.ENTER
),
)
@pytest.mark.usefixtures("uninitialized_project_dir_with_git_with_remote")
async def test_git_option_present_when_remote_url(
self, docker_work_pool, monkeypatch
):
mock_step = mock.MagicMock()
monkeypatch.setattr(
"prefect.deployments.steps.core.import_object", lambda x: mock_step
)
monkeypatch.setattr(
"prefect.deployments.steps.core.import_module",
lambda x: None,
)
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name --cron '0 4 * * *' -p"
" test-docker-work-pool"
),
expected_code=0,
expected_output_contains="Git Repo",
expected_output_does_not_contain="s3",
user_input=(
# no custom image
"n"
+ readchar.key.ENTER
# Accept remote storage
+ "y"
+ readchar.key.ENTER
# Select Git (first option)
+ readchar.key.ENTER
# Confirm git url
+ readchar.key.ENTER
# Confirm git branch
+ readchar.key.ENTER
# Not a private repo
+ "n"
+ readchar.key.ENTER
# Decline saving the deployment configuration
+ "n"
+ readchar.key.ENTER
),
)
class TestSchedules:
@pytest.mark.usefixtures("project_dir")
async def test_passing_cron_schedules_to_deploy(self, work_pool, prefect_client):
result = await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name --cron '0 4 * * *'"
f" --timezone 'Europe/Berlin' --pool {work_pool.name}"
),
)
assert result.exit_code == 0
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
schedule = deployment.schedules[0].schedule
assert schedule.cron == "0 4 * * *"
assert schedule.timezone == "Europe/Berlin"
@pytest.mark.usefixtures("project_dir")
async def test_deployment_yaml_cron_schedule(self, work_pool, prefect_client):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
deploy_config = yaml.safe_load(f)
deploy_config["deployments"][0]["name"] = "test-name"
deploy_config["deployments"][0]["schedule"]["cron"] = "0 4 * * *"
deploy_config["deployments"][0]["schedule"]["timezone"] = "America/Chicago"
with prefect_file.open(mode="w") as f:
yaml.safe_dump(deploy_config, f)
result = await run_sync_in_worker_thread(
invoke_and_assert,
command=(
f"deploy ./flows/hello.py:my_flow -n test-name --pool {work_pool.name}"
),
)
assert result.exit_code == 0
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
schedule = deployment.schedules[0].schedule
assert schedule.cron == "0 4 * * *"
assert schedule.timezone == "America/Chicago"
@pytest.mark.usefixtures("project_dir")
async def test_deployment_yaml_cron_schedule_timezone_cli(
self, work_pool, prefect_client
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
deploy_config = yaml.safe_load(f)
deploy_config["deployments"][0]["name"] = "test-name"
deploy_config["deployments"][0]["schedule"]["cron"] = "0 4 * * *"
deploy_config["deployments"][0]["schedule"]["timezone"] = "America/Chicago"
with prefect_file.open(mode="w") as f:
yaml.safe_dump(deploy_config, f)
result = await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name "
f"--timezone 'Europe/Berlin' --pool {work_pool.name}"
),
)
assert result.exit_code == 0
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert len(deployment.schedules) == 1
schedule = deployment.schedules[0].schedule
assert schedule.cron == "0 4 * * *"
assert schedule.timezone == "Europe/Berlin"
@pytest.mark.usefixtures("project_dir")
async def test_passing_interval_schedules_to_deploy(
self, work_pool, prefect_client
):
result = await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name --interval 42"
" --anchor-date 2040-02-02 --timezone 'America/New_York' --pool"
f" {work_pool.name}"
),
)
assert result.exit_code == 0
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert len(deployment.schedules) == 1
schedule = deployment.schedules[0].schedule
assert schedule.interval == timedelta(seconds=42)
assert schedule.anchor_date == pendulum.parse("2040-02-02")
assert schedule.timezone == "America/New_York"
@pytest.mark.usefixtures("project_dir")
async def test_interval_schedule_deployment_yaml(self, prefect_client, work_pool):
prefect_yaml = Path("prefect.yaml")
with prefect_yaml.open(mode="r") as f:
deploy_config = yaml.safe_load(f)
deploy_config["deployments"][0]["name"] = "test-name"
deploy_config["deployments"][0]["schedule"]["interval"] = 42
deploy_config["deployments"][0]["schedule"]["anchor_date"] = "2040-02-02"
deploy_config["deployments"][0]["schedule"]["timezone"] = "America/Chicago"
with prefect_yaml.open(mode="w") as f:
yaml.safe_dump(deploy_config, f)
result = await run_sync_in_worker_thread(
invoke_and_assert,
command=(
f"deploy ./flows/hello.py:my_flow -n test-name --pool {work_pool.name}"
),
)
assert result.exit_code == 0
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert len(deployment.schedules) == 1
schedule = deployment.schedules[0].schedule
assert schedule.interval == timedelta(seconds=42)
assert schedule.anchor_date == pendulum.parse("2040-02-02")
assert schedule.timezone == "America/Chicago"
@pytest.mark.usefixtures("project_dir")
async def test_parsing_rrule_schedule_string_literal(
self, prefect_client, work_pool
):
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name --rrule"
" 'DTSTART:20220910T110000\nRRULE:FREQ=HOURLY;BYDAY=MO,TU,WE,TH,FR,SA;BYHOUR=9,10,11,12,13,14,15,16,17'"
f" --pool {work_pool.name}"
),
expected_code=0,
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
schedule = deployment.schedules[0].schedule
assert (
schedule.rrule
== "DTSTART:20220910T110000\nRRULE:FREQ=HOURLY;BYDAY=MO,TU,WE,TH,FR,SA;BYHOUR=9,10,11,12,13,14,15,16,17"
)
@pytest.mark.usefixtures("project_dir")
async def test_rrule_deployment_yaml(self, work_pool, prefect_client):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
deploy_config = yaml.safe_load(f)
deploy_config["deployments"][0]["schedule"][
"rrule"
] = "DTSTART:20220910T110000\nRRULE:FREQ=HOURLY;BYDAY=MO,TU,WE,TH,FR,SA;BYHOUR=9,10,11,12,13,14,15,16,17"
with prefect_file.open(mode="w") as f:
yaml.safe_dump(deploy_config, f)
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name --rrule"
" 'DTSTART:20220910T110000\nRRULE:FREQ=HOURLY;BYDAY=MO,TU,WE,TH,FR,SA;BYHOUR=9,10,11,12,13,14,15,16,17'"
f" --pool {work_pool.name}"
),
expected_code=0,
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
schedule = deployment.schedules[0].schedule
assert (
schedule.rrule
== "DTSTART:20220910T110000\nRRULE:FREQ=HOURLY;BYDAY=MO,TU,WE,TH,FR,SA;BYHOUR=9,10,11,12,13,14,15,16,17"
)
@pytest.mark.usefixtures("project_dir")
async def test_can_provide_multiple_schedules_via_command(
self, prefect_client, work_pool
):
await run_sync_in_worker_thread(
invoke_and_assert,
command=f"deploy ./flows/hello.py:my_flow -n test-name --cron '* * * * *' --interval 42 --rrule 'FREQ=HOURLY' --pool {work_pool.name}",
expected_code=0,
expected_output_contains=[
"Deployment 'An important name/test-name' successfully created"
],
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
schedule_config = {}
for deployment_schedule in deployment.schedules:
schedule = deployment_schedule.schedule
if isinstance(schedule, IntervalSchedule):
schedule_config["interval"] = schedule.interval
elif isinstance(schedule, CronSchedule):
schedule_config["cron"] = schedule.cron
elif isinstance(schedule, RRuleSchedule):
schedule_config["rrule"] = schedule.rrule
else:
raise AssertionError("Unknown schedule type received")
assert schedule_config == {
"interval": timedelta(seconds=42),
"cron": "* * * * *",
"rrule": "FREQ=HOURLY",
}
@pytest.mark.usefixtures("project_dir")
async def test_can_provide_multiple_schedules_via_yaml(
self, prefect_client, work_pool
):
prefect_yaml = Path("prefect.yaml")
with prefect_yaml.open(mode="r") as f:
deploy_config = yaml.safe_load(f)
deploy_config["deployments"][0]["name"] = "test-name"
deploy_config["deployments"][0]["schedules"] = [
{"interval": 42},
{"cron": "* * * * *"},
{"rrule": "FREQ=HOURLY"},
]
with prefect_yaml.open(mode="w") as f:
yaml.safe_dump(deploy_config, f)
await run_sync_in_worker_thread(
invoke_and_assert,
command=f"deploy ./flows/hello.py:my_flow -n test-name --pool {work_pool.name}",
expected_code=0,
expected_output_contains=[
"Deployment 'An important name/test-name' successfully created"
],
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
schedule_config = {}
for deployment_schedule in deployment.schedules:
schedule = deployment_schedule.schedule
if isinstance(schedule, IntervalSchedule):
schedule_config["interval"] = schedule.interval
elif isinstance(schedule, CronSchedule):
schedule_config["cron"] = schedule.cron
elif isinstance(schedule, RRuleSchedule):
schedule_config["rrule"] = schedule.rrule
else:
raise AssertionError("Unknown schedule type received")
assert schedule_config == {
"interval": timedelta(seconds=42),
"cron": "* * * * *",
"rrule": "FREQ=HOURLY",
}
@pytest.mark.usefixtures("project_dir")
async def test_yaml_with_schedule_and_schedules_raises_error(self, work_pool):
prefect_yaml = Path("prefect.yaml")
with prefect_yaml.open(mode="r") as f:
deploy_config = yaml.safe_load(f)
deploy_config["deployments"][0]["name"] = "test-name"
deploy_config["deployments"][0]["schedule"]["interval"] = 42
deploy_config["deployments"][0]["schedules"] = [{"interval": 42}]
with prefect_yaml.open(mode="w") as f:
yaml.safe_dump(deploy_config, f)
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
f"deploy ./flows/hello.py:my_flow -n test-name --pool {work_pool.name}"
),
expected_code=1,
expected_output_contains="Both 'schedule' and 'schedules' keys are present in the deployment configuration. Please use only use `schedules`.",
)
@pytest.mark.usefixtures("project_dir")
async def test_yaml_with_schedule_prints_deprecation_warning(self, work_pool):
prefect_yaml = Path("prefect.yaml")
with prefect_yaml.open(mode="r") as f:
deploy_config = yaml.safe_load(f)
deploy_config["deployments"][0]["name"] = "test-name"
deploy_config["deployments"][0]["schedule"]["interval"] = 42
with prefect_yaml.open(mode="w") as f:
yaml.safe_dump(deploy_config, f)
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
f"deploy ./flows/hello.py:my_flow -n test-name --pool {work_pool.name}"
),
expected_code=0,
expected_output_contains="Defining a schedule via the `schedule` key in the deployment",
)
@pytest.mark.usefixtures("project_dir")
async def test_can_provide_multiple_schedules_of_the_same_type_via_command(
self, prefect_client, work_pool
):
await run_sync_in_worker_thread(
invoke_and_assert,
command=f"deploy ./flows/hello.py:my_flow -n test-name --cron '* * * * *' --cron '0 * * * *' --pool {work_pool.name}",
expected_code=0,
expected_output_contains=[
"Deployment 'An important name/test-name' successfully created"
],
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
schedules = set()
for deployment_schedule in deployment.schedules:
schedule = deployment_schedule.schedule
assert isinstance(schedule, CronSchedule)
schedules.add(schedule.cron)
assert schedules == {
"* * * * *",
"0 * * * *",
}
@pytest.mark.usefixtures("interactive_console", "project_dir")
async def test_deploy_interval_schedule_interactive(
self, prefect_client, work_pool
):
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
f"deploy ./flows/hello.py:my_flow -n test-name --pool {work_pool.name}"
),
user_input=(
# Confirm schedule creation
readchar.key.ENTER
# Select interval schedule
+ readchar.key.ENTER
# Enter invalid interval
+ "bad interval"
+ readchar.key.ENTER
# Enter another invalid interval
+ "0"
+ readchar.key.ENTER
# Enter valid interval
+ "42"
+ readchar.key.ENTER
# accept schedule being active
+ readchar.key.ENTER
# decline adding another schedule
+ readchar.key.ENTER
# decline save
+ "n"
+ readchar.key.ENTER
),
expected_code=0,
expected_output_contains=[
"? Seconds between scheduled runs",
"Please enter a valid interval denoted in seconds",
"Interval must be greater than 0",
],
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert deployment.schedules[0].schedule.interval == timedelta(seconds=42)
@pytest.mark.usefixtures("interactive_console", "project_dir")
async def test_deploy_default_interval_schedule_interactive(
self, prefect_client, work_pool
):
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
f"deploy ./flows/hello.py:my_flow -n test-name --pool {work_pool.name}"
),
user_input=(
# Confirm schedule creation
readchar.key.ENTER
# Select interval schedule
+ readchar.key.ENTER
# Enter default interval
+ readchar.key.ENTER
# accept schedule being active
+ readchar.key.ENTER
# decline adding another schedule
+ readchar.key.ENTER
# decline save
+ "n"
+ readchar.key.ENTER
),
expected_code=0,
expected_output_contains=[
"Seconds between scheduled runs (3600)",
],
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert deployment.schedules[0].schedule.interval == timedelta(seconds=3600)
@pytest.mark.usefixtures("interactive_console", "project_dir")
async def test_deploy_cron_schedule_interactive(self, prefect_client, work_pool):
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
f"deploy ./flows/hello.py:my_flow -n test-name --pool {work_pool.name}"
),
user_input=(
# Confirm schedule creation
readchar.key.ENTER
# Select cron schedule
+ readchar.key.DOWN
+ readchar.key.ENTER
# Enter invalid cron string
+ "bad cron string"
+ readchar.key.ENTER
# Enter cron
+ "* * * * *"
+ readchar.key.ENTER
# Enter invalid timezone
+ "bad timezone"
+ readchar.key.ENTER
# Select default timezone
+ readchar.key.ENTER
# accept schedule being active
+ readchar.key.ENTER
# decline adding another schedule
+ readchar.key.ENTER
# decline save
+ "n"
+ readchar.key.ENTER
),
expected_code=0,
expected_output_contains=[
"? Cron string",
"Please enter a valid cron string",
"? Timezone",
"Please enter a valid timezone",
],
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert deployment.schedules[0].schedule.cron == "* * * * *"
@pytest.mark.usefixtures("interactive_console", "project_dir")
async def test_deploy_rrule_schedule_interactive(self, prefect_client, work_pool):
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
f"deploy ./flows/hello.py:my_flow -n test-name --pool {work_pool.name}"
),
user_input=(
# Confirm schedule creation
readchar.key.ENTER
# Select rrule schedule
+ readchar.key.DOWN
+ readchar.key.DOWN
+ readchar.key.ENTER
# Enter invalid rrule string
+ "bad rrule string"
+ readchar.key.ENTER
# Enter valid rrule string
+ "FREQ=WEEKLY;BYDAY=MO,WE,FR;UNTIL=20240730T040000Z"
+ readchar.key.ENTER
# Enter invalid timezone
+ "bad timezone"
+ readchar.key.ENTER
# Select default timezone
+ readchar.key.ENTER
# accept schedule being active
+ readchar.key.ENTER
# decline adding another schedule
+ readchar.key.ENTER
# decline save
+ "n"
+ readchar.key.ENTER
),
expected_code=0,
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert (
deployment.schedules[0].schedule.rrule
== "FREQ=WEEKLY;BYDAY=MO,WE,FR;UNTIL=20240730T040000Z"
)
@pytest.mark.usefixtures("interactive_console", "project_dir")
async def test_deploy_no_schedule_interactive(self, prefect_client, work_pool):
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
f"deploy ./flows/hello.py:my_flow -n test-name --pool {work_pool.name}"
),
user_input=(
# Decline schedule creation
"n"
+ readchar.key.ENTER
# Decline remote storage
+ "n"
+ readchar.key.ENTER
# Decline save
+ "n"
+ readchar.key.ENTER
),
expected_code=0,
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert len(deployment.schedules) == 0
@pytest.mark.usefixtures("project_dir")
async def test_deploy_with_inactive_schedule(self, work_pool, prefect_client):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
deploy_config = yaml.safe_load(f)
deploy_config["deployments"][0]["name"] = "test-name"
deploy_config["deployments"][0]["schedule"]["cron"] = "0 4 * * *"
deploy_config["deployments"][0]["schedule"]["timezone"] = "America/Chicago"
deploy_config["deployments"][0]["schedule"]["active"] = False
with prefect_file.open(mode="w") as f:
yaml.safe_dump(deploy_config, f)
result = await run_sync_in_worker_thread(
invoke_and_assert,
command=(
f"deploy ./flows/hello.py:my_flow -n test-name --pool {work_pool.name}"
),
)
assert result.exit_code == 0
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
deployment_schedule = deployment.schedules[0]
assert deployment_schedule.active is False
assert deployment_schedule.schedule.cron == "0 4 * * *"
assert deployment_schedule.schedule.timezone == "America/Chicago"
@pytest.mark.usefixtures("project_dir")
async def test_yaml_null_schedules(self, prefect_client, work_pool):
prefect_yaml_content = f"""
deployments:
- name: test-name
entrypoint: flows/hello.py:my_flow
work_pool:
name: {work_pool.name}
schedules: null
"""
with open("prefect.yaml", "w") as f:
f.write(prefect_yaml_content)
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy --all",
expected_code=0,
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert deployment.schedules == []
class TestMultiDeploy:
@pytest.mark.usefixtures("project_dir")
async def test_deploy_all(self, prefect_client, work_pool):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
# Create multiple deployments
contents["deployments"] = [
{
"entrypoint": "./flows/hello.py:my_flow",
"name": "test-name-1",
"work_pool": {"name": work_pool.name},
},
{
"entrypoint": "./flows/hello.py:my_flow",
"name": "test-name-2",
"work_pool": {"name": work_pool.name},
},
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
# Deploy all
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy --all",
expected_code=0,
expected_output_contains=[
"An important name/test-name-1",
"An important name/test-name-2",
],
expected_output_does_not_contain=[
"You have passed options to the deploy command, but you are"
" creating or updating multiple deployments. These options"
" will be ignored."
],
)
# Check if deployments were created correctly
deployment1 = await prefect_client.read_deployment_by_name(
"An important name/test-name-1"
)
deployment2 = await prefect_client.read_deployment_by_name(
"An important name/test-name-2"
)
assert deployment1.name == "test-name-1"
assert deployment1.work_pool_name == work_pool.name
assert deployment2.name == "test-name-2"
assert deployment2.work_pool_name == work_pool.name
@pytest.mark.usefixtures("project_dir")
async def test_deploy_all_schedules_remain_inactive(
self, prefect_client, work_pool
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
contents["deployments"] = [
{
"entrypoint": "./flows/hello.py:my_flow",
"name": "test-name-1",
"schedule": {"interval": 60.0, "active": True},
"work_pool": {"name": work_pool.name},
},
{
"entrypoint": "./flows/hello.py:my_flow",
"name": "test-name-2",
"schedule": {"interval": 60.0, "active": False},
"work_pool": {"name": work_pool.name},
},
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy --all",
expected_code=0,
expected_output_contains=[
"An important name/test-name-1",
"An important name/test-name-2",
],
expected_output_does_not_contain=[
"You have passed options to the deploy command, but you are"
" creating or updating multiple deployments. These options"
" will be ignored."
],
)
deployment1 = await prefect_client.read_deployment_by_name(
"An important name/test-name-1"
)
deployment2 = await prefect_client.read_deployment_by_name(
"An important name/test-name-2"
)
assert deployment1.name == "test-name-1"
assert deployment1.schedules[0].active is True
assert deployment2.name == "test-name-2"
assert deployment2.schedules[0].active is False
async def test_deploy_selected_deployments(
self, project_dir, prefect_client, work_pool
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
contents["deployments"] = [
{
"entrypoint": "./flows/hello.py:my_flow",
"name": "test-name-1",
"work_pool": {"name": work_pool.name},
"enforce_parameter_schema": True,
},
{
"entrypoint": "./flows/hello.py:my_flow",
"name": "test-name-2",
"work_pool": {"name": work_pool.name},
},
{
"entrypoint": "./flows/hello.py:my_flow",
"name": "test-name-3",
"work_pool": {"name": work_pool.name},
},
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
# Deploy only two deployments by name
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy --name test-name-1 --name test-name-2",
expected_code=0,
expected_output_contains=[
(
"Deployment 'An important name/test-name-1' successfully created"
" with id"
),
(
"Deployment 'An important name/test-name-2' successfully created"
" with id"
),
],
expected_output_does_not_contain=[
(
"Deployment 'An important name/test-name-3' successfully created"
" with id"
),
(
"You have passed options to the deploy command, but you are"
" creating or updating multiple deployments. These options"
" will be ignored."
),
],
)
# Check if the two deployments were created correctly
deployment1 = await prefect_client.read_deployment_by_name(
"An important name/test-name-1"
)
deployment2 = await prefect_client.read_deployment_by_name(
"An important name/test-name-2"
)
assert deployment1.name == "test-name-1"
assert deployment1.work_pool_name == work_pool.name
assert deployment1.enforce_parameter_schema is True
assert deployment2.name == "test-name-2"
assert deployment2.work_pool_name == work_pool.name
assert deployment2.enforce_parameter_schema
# Check if the third deployment was not created
with pytest.raises(ObjectNotFound):
await prefect_client.read_deployment_by_name(
"An important name/test-name-3"
)
async def test_deploy_single_with_cron_schedule(
self, project_dir, prefect_client, work_pool
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
# Create multiple deployments
contents["deployments"] = [
{
"entrypoint": "./flows/hello.py:my_flow",
"name": "test-name-1",
"work_pool": {"name": work_pool.name},
},
{
"entrypoint": "./flows/hello.py:my_flow",
"name": "test-name-2",
"work_pool": {"name": work_pool.name},
},
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
# Deploy a single deployment with a cron schedule
cron_schedule = "0 * * * *"
await run_sync_in_worker_thread(
invoke_and_assert,
command=f"deploy --name test-name-1 --cron '{cron_schedule}'",
expected_code=0,
expected_output_contains=[
(
"Deployment 'An important name/test-name-1' successfully created"
" with id"
),
],
)
# Check if the deployment was created correctly
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name-1"
)
assert deployment.name == "test-name-1"
assert deployment.work_pool_name == work_pool.name
assert len(deployment.schedules) == 1
assert deployment.schedules[0].schedule == CronSchedule(cron="0 * * * *")
# Check if the second deployment was not created
with pytest.raises(ObjectNotFound):
await prefect_client.read_deployment_by_name(
"An important name/test-name-2"
)
@pytest.mark.parametrize(
"deployment_selector_options", ["--all", "-n test-name-1 -n test-name-2"]
)
async def test_deploy_multiple_with_cli_options(
self, project_dir, prefect_client, work_pool, deployment_selector_options
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
# Create multiple deployments
contents["deployments"] = [
{
"entrypoint": "./flows/hello.py:my_flow",
"name": "test-name-1",
"work_pool": {"name": work_pool.name},
},
{
"entrypoint": "./flows/hello.py:my_flow",
"name": "test-name-2",
"work_pool": {"name": work_pool.name},
},
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
# Deploy multiple deployments with CLI options
await run_sync_in_worker_thread(
invoke_and_assert,
command=f"deploy {deployment_selector_options} --cron '0 * * * *'",
expected_code=0,
expected_output_contains=[
"An important name/test-name-1",
"An important name/test-name-2",
(
"You have passed options to the deploy command, but you are"
" creating or updating multiple deployments. These options will be"
" ignored."
),
],
)
# Check if deployments were created correctly and without the provided CLI options
deployment1 = await prefect_client.read_deployment_by_name(
"An important name/test-name-1"
)
deployment2 = await prefect_client.read_deployment_by_name(
"An important name/test-name-2"
)
assert deployment1.name == "test-name-1"
assert deployment1.work_pool_name == work_pool.name
assert len(deployment1.schedules) == 0
assert deployment2.name == "test-name-2"
assert deployment2.work_pool_name == work_pool.name
assert len(deployment2.schedules) == 0
async def test_deploy_with_cli_option_name(
self, project_dir, prefect_client, work_pool
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
contents["deployments"] = [
{
"entrypoint": "./flows/hello.py:my_flow",
"name": "test-name-1",
"work_pool": {"name": work_pool.name},
}
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy --name from-cli-name --pool"
f" {work_pool.name} ./flows/hello.py:my_flow"
),
expected_code=0,
expected_output_contains=[
"Deployment 'An important name/from-cli-name' successfully created"
" with id"
],
)
# Check name from deployment.yaml was not used
with pytest.raises(ObjectNotFound):
await prefect_client.read_deployment_by_name(
"An important name/test-name-1"
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/from-cli-name"
)
deployment.name = "from-cli-name"
@pytest.mark.usefixtures("project_dir")
async def test_deploy_without_name_in_prefect_yaml(self, prefect_client, work_pool):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
# Create multiple deployments
contents["deployments"] = [
{
"entrypoint": "./flows/hello.py:my_flow",
"name": "test-name-1",
"work_pool": {"name": work_pool.name},
"schedule": {"interval": 3600},
},
{
"entrypoint": "./flows/hello.py:my_flow",
# Missing name
"work_pool": {"name": work_pool.name},
"schedule": {"interval": 3600},
},
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
# Attempt to deploy all
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy --all",
expected_code=0,
expected_output_contains=["Discovered unnamed deployment. Skipping..."],
)
with pytest.raises(ObjectNotFound):
await prefect_client.read_deployment_by_name(
"An important name/test-name-2"
)
@pytest.mark.usefixtures("interactive_console", "project_dir")
async def test_deploy_without_name_in_prefect_yaml_interactive(
self, prefect_client, work_pool
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
# Create multiple deployments
contents["deployments"] = [
{
"entrypoint": "./flows/hello.py:my_flow",
"name": "test-name-1",
"work_pool": {"name": work_pool.name},
"schedule": {"interval": 3600},
},
{
"entrypoint": "./flows/hello.py:my_flow",
# Missing name
"work_pool": {"name": work_pool.name},
"schedule": {"interval": 3600},
},
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
# Attempt to deploy all
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy --all",
expected_code=0,
user_input=(
# reject saving configuration
"n"
+ readchar.key.ENTER
# accept naming deployment
+ "y"
+ readchar.key.ENTER
# enter deployment name
+ "test-name-2"
+ readchar.key.ENTER
# decline remote storage
+ "n"
+ readchar.key.ENTER
# reject saving configuration
+ "n"
+ readchar.key.ENTER
),
expected_output_contains=[
"Discovered unnamed deployment.",
"Would you like to give this deployment a name and deploy it?",
"Deployment name",
],
)
assert await prefect_client.read_deployment_by_name(
"An important name/test-name-2"
)
@pytest.mark.usefixtures("interactive_console", "project_dir")
async def test_deploy_without_name_in_prefect_yaml_interactive_user_skips(
self, prefect_client: PrefectClient, work_pool
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
# Create multiple deployments
contents["deployments"] = [
{
"entrypoint": "./flows/hello.py:my_flow",
"name": "test-name-1",
"work_pool": {"name": work_pool.name},
"schedule": {"interval": 3600},
},
{
"entrypoint": "./flows/hello.py:my_flow",
# Missing name
"work_pool": {"name": work_pool.name},
"schedule": {"interval": 3600},
},
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
# Attempt to deploy all
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy --all",
expected_code=0,
user_input=(
# decline remote storage
"n"
+ readchar.key.ENTER
# reject saving configuration
+ "n"
+ readchar.key.ENTER
# reject naming deployment
+ "n"
+ readchar.key.ENTER
),
expected_output_contains=[
"Discovered unnamed deployment.",
"Would you like to give this deployment a name and deploy it?",
"Skipping unnamed deployment.",
],
)
assert len(await prefect_client.read_deployments()) == 1
async def test_deploy_with_name_not_in_prefect_yaml(
self, project_dir, prefect_client, work_pool
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
contents["deployments"] = [
{
"entrypoint": "./flows/hello.py:my_flow",
"name": "test-name-1",
"work_pool": {"name": work_pool.name},
},
{
"entrypoint": "./flows/hello.py:my_flow",
"name": "test-name-2",
"work_pool": {"name": work_pool.name},
},
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
# Attempt to deploy all
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy -n test-name-2 -n test-name-3",
expected_code=0,
expected_output_contains=[
(
"The following deployment(s) could not be found and will not be"
" deployed: test-name-3"
),
],
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name-2"
)
assert deployment.name == "test-name-2"
assert deployment.work_pool_name == work_pool.name
with pytest.raises(ObjectNotFound):
await prefect_client.read_deployment_by_name(
"An important name/test-name-3"
)
async def test_deploy_with_single_deployment_with_name_in_file(
self, project_dir, prefect_client, work_pool
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
contents["deployments"] = [
{
"entrypoint": "./flows/hello.py:my_flow",
"name": "test-name-1",
"work_pool": {"name": work_pool.name},
}
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
# Deploy the deployment with a name
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy -n test-name-1",
expected_code=0,
expected_output_contains=[
"An important name/test-name-1",
],
)
# Check if the deployment was created correctly
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name-1"
)
assert deployment.name == "test-name-1"
assert deployment.work_pool_name == work_pool.name
async def test_deploy_errors_with_empty_deployments_list_and_no_cli_options(
self, project_dir
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
contents["deployments"] = []
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
# Deploy the deployment with a name
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy",
expected_code=1,
expected_output_contains=[
"An entrypoint must be provided:",
],
)
async def test_deploy_single_allows_options_override(
self, project_dir, prefect_client, work_pool
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
contents["deployments"] = [
{
"name": "test-name-1",
}
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
# Deploy the deployment with a name
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name -p"
f" {work_pool.name} --version 1.0.0 -v env=prod -t foo-bar"
),
expected_code=0,
expected_output_contains=[
"Deployment 'An important name/test-name' successfully created with id"
],
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert deployment.name == "test-name"
assert deployment.work_pool_name == work_pool.name
assert deployment.version == "1.0.0"
assert deployment.tags == ["foo-bar"]
assert deployment.job_variables == {"env": "prod"}
async def test_deploy_single_deployment_with_name_in_cli(
self, project_dir, prefect_client, work_pool
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
contents["deployments"] = [
{
"name": "test-name-1",
"entrypoint": "./flows/hello.py:my_flow",
"work_pool": {"name": work_pool.name},
},
{
"name": "test-name-2",
"entrypoint": "./flows/hello.py:my_flow",
"work_pool": {"name": work_pool.name},
},
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
# Deploy the deployment with a name
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy -n test-name-1",
expected_code=0,
expected_output_contains=[
"An important name/test-name-1",
],
)
# Check if the deployment was created correctly
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name-1"
)
assert deployment.name == "test-name-1"
assert deployment.work_pool_name == work_pool.name
@pytest.mark.parametrize(
"deploy_names",
[
("my-flow/test-name-1", "test-name-3"),
("my-flow/test-name-1", "my-flow/test-name-3"),
("test-name-1", "my-flow/test-name-3"),
("test-name-1", "test-name-3"),
],
)
async def test_deploy_existing_deployment_and_nonexistent_deployment_deploys_former(
self, deploy_names, project_dir, prefect_client, work_pool
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
contents["deployments"] = [
{
"name": "test-name-1",
"entrypoint": "./flows/hello.py:my_flow",
"work_pool": {"name": work_pool.name},
},
{
"name": "test-name-2",
"entrypoint": "./flows/hello.py:my_flow",
"work_pool": {"name": work_pool.name},
},
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
# Deploy the deployment with a name
deploy_command = f"deploy -n '{deploy_names[0]}' -n '{deploy_names[1]}'"
await run_sync_in_worker_thread(
invoke_and_assert,
command=deploy_command,
expected_code=0,
expected_output_contains=[
(
"The following deployment(s) could not be found and will not be"
f" deployed: {deploy_names[1].split('/')[-1]}"
),
"An important name/test-name-1",
],
expected_output_does_not_contain=[
"An important name/test-name-3",
],
)
# Check if the deployment was created correctly
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name-1"
)
assert deployment.name == "test-name-1"
assert deployment.work_pool_name == work_pool.name
with pytest.raises(ObjectNotFound):
await prefect_client.read_deployment_by_name(
"An important name/test-name-3"
)
class TestDeployPattern:
@pytest.mark.parametrize(
"deploy_name",
[
("my-flow/test-name-*", "my-flow-test-name-2"),
("my-f*/test-name-1", "my-f*/test-name-2"),
"*-name-*",
("my-*ow/test-name-1", "test-*-2"),
("*-flow/*-name-1", "*-name-2"),
"my-flow/t*",
("*/test-name-1", "*/test-name-2"),
"*/t*",
],
)
async def test_pattern_deploy_multiple_existing_deployments(
self, deploy_name, project_dir, prefect_client, work_pool
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
contents["deployments"] = [
{
"name": "test-name-1",
"entrypoint": "./flows/hello.py:my_flow",
"work_pool": {"name": work_pool.name},
},
{
"name": "test-name-2",
"entrypoint": "./flows/hello.py:my_flow",
"work_pool": {"name": work_pool.name},
},
{
"name": "dont-deploy-me",
"entrypoint": "./flows/hello.py:my_flow",
"work_pool": {"name": work_pool.name},
},
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
if isinstance(deploy_name, tuple):
deploy_command = "deploy " + " ".join(
[f"-n '{name}'" for name in deploy_name]
)
else:
deploy_command = f"deploy -n '{deploy_name}'"
await run_sync_in_worker_thread(
invoke_and_assert,
command=deploy_command,
expected_code=0,
expected_output_contains=[
"Deploying flows with selected deployment configurations...",
"An important name/test-name-1",
"An important name/test-name-2",
],
expected_output_does_not_contain=[
"An important name/dont-deploy-me",
],
)
# Check if the deployment was created correctly
deployment1 = await prefect_client.read_deployment_by_name(
"An important name/test-name-1"
)
assert deployment1.name == "test-name-1"
assert deployment1.work_pool_name == work_pool.name
deployment2 = await prefect_client.read_deployment_by_name(
"An important name/test-name-2"
)
assert deployment2.name == "test-name-2"
assert deployment2.work_pool_name == work_pool.name
with pytest.raises(ObjectNotFound):
await prefect_client.read_deployment_by_name(
"An important name/dont-deploy-me"
)
@pytest.mark.parametrize(
"deploy_name",
[
"*/nonexistent-deployment-name",
"my-f*/nonexistent-deployment-name",
"nonexistent-deployment-name",
"nonexistent-*-name",
"nonexistent-flow/*",
"nonexistent-*/nonexistent-*",
],
)
async def test_pattern_deploy_nonexistent_deployments_no_existing_deployments(
self, deploy_name, project_dir, prefect_client, work_pool
):
await run_sync_in_worker_thread(
invoke_and_assert,
command=f"deploy -n '{deploy_name}'",
expected_code=1,
expected_output_contains=[
"An entrypoint must be provided",
],
)
@pytest.mark.parametrize(
"deploy_name",
[
"*/nonexistent-deployment-name",
"my-f*/nonexistent-deployment-name",
"nonexistent-*-name",
"nonexistent-flow/*",
"nonexistent-*/nonexistent-*",
],
)
async def test_pattern_deploy_nonexistent_deployments_with_existing_deployments(
self, deploy_name, project_dir, prefect_client, work_pool
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
contents["deployments"] = [
{
"name": "test-name-1",
"entrypoint": "./flows/hello.py:my_flow",
"work_pool": {"name": work_pool.name},
},
{
"name": "test-name-2",
"entrypoint": "./flows/hello.py:my_flow",
"work_pool": {"name": work_pool.name},
},
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
await run_sync_in_worker_thread(
invoke_and_assert,
command=f"deploy -n '{deploy_name}'",
expected_code=1,
expected_output_contains=[
(
"Discovered one or more deployment configurations, but no name was"
" given. Please specify the name of at least one deployment to"
" create or update."
),
],
expected_output_does_not_contain=[
"An important name/test-name-1",
"An important name/test-name-2",
],
)
# Check if the deployments were not created
with pytest.raises(ObjectNotFound):
await prefect_client.read_deployment_by_name(
"An important name/test-name-1"
)
with pytest.raises(ObjectNotFound):
await prefect_client.read_deployment_by_name(
"An important name/test-name-2"
)
@pytest.mark.parametrize(
"deploy_name",
[
("my-flow/test-name-*", "nonexistent-deployment"),
("my-f*/test-name-1", "my-f*/test-name-2", "my-f*/nonexistent-deployment"),
("*-name-4", "*-name-*"),
("my-flow/t*", "nonexistent-flow/*"),
],
)
async def test_pattern_deploy_one_existing_deployment_one_nonexistent_deployment(
self, project_dir, prefect_client, work_pool, deploy_name
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
contents["deployments"] = [
{
"name": "test-name-1",
"entrypoint": "./flows/hello.py:my_flow",
"work_pool": {"name": work_pool.name},
},
{
"name": "test-name-2",
"entrypoint": "./flows/hello.py:my_flow",
"work_pool": {"name": work_pool.name},
},
{
"name": "dont-deploy-me",
"entrypoint": "./flows/hello.py:my_flow",
},
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
if isinstance(deploy_name, tuple):
deploy_command = "deploy " + " ".join(
[f"-n '{name}'" for name in deploy_name]
)
else:
deploy_command = f"deploy -n '{deploy_name}'"
await run_sync_in_worker_thread(
invoke_and_assert,
command=deploy_command,
expected_code=0,
expected_output_contains=[
"Deploying flows with selected deployment configurations...",
"An important name/test-name-1",
"An important name/test-name-2",
],
expected_output_does_not_contain=[
(
"Discovered one or more deployment configurations, but no name was"
" given. Please specify the name of at least one deployment to"
" create or update."
),
"An important name/dont-deploy-me",
],
)
# Check if the deployment was created correctly
deployment1 = await prefect_client.read_deployment_by_name(
"An important name/test-name-1"
)
assert deployment1.name == "test-name-1"
assert deployment1.work_pool_name == work_pool.name
deployment2 = await prefect_client.read_deployment_by_name(
"An important name/test-name-2"
)
assert deployment2.name == "test-name-2"
with pytest.raises(ObjectNotFound):
await prefect_client.read_deployment_by_name(
"An important name/dont-deploy-me"
)
@pytest.mark.parametrize(
"deploy_names",
[
("my-flow/test-name-3", "test-name-4"),
("test-name-3", "my-flow/test-name-4"),
("test-name-3", "test-name-4"),
("my-flow/test-name-3", "my-flow/test-name-4"),
],
)
@pytest.mark.usefixtures("project_dir")
async def test_deploy_multiple_nonexistent_deployments_raises(
self, deploy_names, work_pool, prefect_client
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
contents["deployments"] = [
{
"name": "test-name-1",
"entrypoint": "./flows/hello.py:my_flow",
"work_pool": {"name": work_pool.name},
},
{
"name": "test-name-2",
"entrypoint": "./flows/hello.py:my_flow",
"work_pool": {"name": work_pool.name},
},
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
# Deploy the deployment with a name
deploy_command = f"deploy -n '{deploy_names[0]}' -n '{deploy_names[1]}'"
await run_sync_in_worker_thread(
invoke_and_assert,
command=deploy_command,
expected_code=1,
expected_output_contains=[
(
"The following deployment(s) could not be found and will not be"
f" deployed: {deploy_names[0].split('/')[-1]},"
f" {deploy_names[1].split('/')[-1]}"
),
(
"Could not find any deployment configurations with the given"
f" name(s): {deploy_names[0]}, {deploy_names[1]}. Your flow will be"
" deployed with a new deployment configuration."
),
],
)
with pytest.raises(ObjectNotFound):
await prefect_client.read_deployment_by_name(
"An important name/test-name-3"
)
with pytest.raises(ObjectNotFound):
await prefect_client.read_deployment_by_name(
"An important name/test-name-4"
)
@pytest.mark.parametrize(
"deploy_names",
[
("my-flow/test-name-1", "my-flow/test-name-2"),
("test-name-1", "test-name-2"),
("my-flow/test-name-1", "test-name-2"),
],
)
async def test_deploy_multiple_existing_deployments_deploys_both(
self, deploy_names, project_dir, prefect_client, work_pool
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
contents["deployments"] = [
{
"name": "test-name-1",
"entrypoint": "./flows/hello.py:my_flow",
"work_pool": {"name": work_pool.name},
},
{
"name": "test-name-2",
"entrypoint": "./flows/hello.py:my_flow",
"work_pool": {"name": work_pool.name},
},
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
# Deploy the deployment with a name
deploy_command = f"deploy -n '{deploy_names[0]}' -n '{deploy_names[1]}'"
await run_sync_in_worker_thread(
invoke_and_assert,
command=deploy_command,
expected_code=0,
expected_output_contains=[
"Deploying flows with selected deployment configurations...",
"An important name/test-name-1",
"An important name/test-name-2",
],
)
# Check if the deployment was created correctly
deployment1 = await prefect_client.read_deployment_by_name(
"An important name/test-name-1"
)
assert deployment1.name == "test-name-1"
assert deployment1.work_pool_name == work_pool.name
deployment2 = await prefect_client.read_deployment_by_name(
"An important name/test-name-2"
)
assert deployment2.name == "test-name-2"
assert deployment2.work_pool_name == work_pool.name
async def test_deploy_exits_with_multiple_deployments_with_no_name(
self, project_dir
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
contents["deployments"] = [
{
"name": "test-name-1",
"entrypoint": "./flows/hello.py:my_flow",
},
{
"name": "test-name-2",
"entrypoint": "./flows/hello.py:my_flow",
},
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
# Deploy the deployment with a name
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy",
expected_code=1,
expected_output_contains=[
(
"Discovered one or more deployment configurations, but"
" no name was given. Please specify the name of at least one"
" deployment to create or update."
),
],
)
@pytest.mark.parametrize(
"deploy_names",
[
"test-name-1",
"my-flow/test-name-1",
],
)
async def test_deploy_with_single_deployment_with_no_name(
self, deploy_names, project_dir, work_pool
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
contents["deployments"] = [
{
"entrypoint": "./flows/hello.py:my_flow",
"work_pool": {"name": work_pool.name},
},
{
"entrypoint": "./flows/hello.py:my_flow",
"work_pool": {"name": work_pool.name},
},
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
# Deploy the deployment with a name
await run_sync_in_worker_thread(
invoke_and_assert,
command=f"deploy -n '{deploy_names[0]}'",
expected_code=1,
expected_output_contains=[
(
"Could not find any deployment configurations with the given"
f" name(s): {deploy_names[0]}. Your flow will be deployed with a"
" new deployment configuration."
),
],
)
@pytest.mark.usefixtures("interactive_console", "project_dir")
async def test_deploy_with_two_deployments_with_same_name_interactive_prompts_select(
self, work_pool, prefect_client
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
contents["deployments"] = [
{
"name": "test-name-1",
"entrypoint": "./flows/hello.py:my_flow",
"work_pool": {"name": work_pool.name},
},
{
"name": "test-name-1",
"entrypoint": "./flows/hello.py:my_flow2",
"work_pool": {"name": work_pool.name},
},
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy -n 'test-name-1'",
user_input=(
# select 2nd flow named my_flow2
readchar.key.DOWN
+ readchar.key.ENTER
# reject scheduling when flow runs
+ "n"
+ readchar.key.ENTER
# reject saving configuration
+ "n"
+ readchar.key.ENTER
# Decline remote storage
+ "n"
+ readchar.key.ENTER
),
expected_code=0,
expected_output_contains=[
"Found multiple deployment configurations with the name test-name-1",
"'Second important name/test-name-1' successfully created",
],
)
# Check if the deployment was created correctly
deployment = await prefect_client.read_deployment_by_name(
"Second important name/test-name-1"
)
assert deployment.name == "test-name-1"
assert deployment.work_pool_name == work_pool.name
with pytest.raises(ObjectNotFound):
await prefect_client.read_deployment_by_name(
"An important name/test-name-1"
)
@pytest.mark.usefixtures("project_dir")
async def test_deploy_with_two_deployments_with_same_name_noninteractive_deploys_both(
self, work_pool, prefect_client
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
contents["deployments"] = [
{
"name": "test-name-1",
"entrypoint": "./flows/hello.py:my_flow",
"work_pool": {"name": work_pool.name},
},
{
"name": "test-name-1",
"entrypoint": "./flows/hello.py:my_flow2",
"work_pool": {"name": work_pool.name},
},
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy -n 'test-name-1'",
expected_code=0,
expected_output_contains=[
"Deploying flows with selected deployment configurations...",
"'An important name/test-name-1' successfully created",
"'Second important name/test-name-1' successfully created",
],
)
# Check if the deployments were created correctly
deployment1 = await prefect_client.read_deployment_by_name(
"An important name/test-name-1"
)
assert deployment1.name == "test-name-1"
assert deployment1.work_pool_name == work_pool.name
deployment2 = await prefect_client.read_deployment_by_name(
"Second important name/test-name-1"
)
assert deployment2.name == "test-name-1"
assert deployment2.work_pool_name == work_pool.name
async def test_deploy_warns_with_single_deployment_and_multiple_names(
self, project_dir, work_pool
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
contents["deployments"] = [
{
"name": "test-name-1",
"entrypoint": "./flows/hello.py:my_flow",
"work_pool": {"name": work_pool.name},
}
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
# Deploy the deployment with a name
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy -n test-name-1 -n test-name-2",
expected_code=0,
expected_output_contains=[
(
"The following deployment(s) could not be found and will not be"
" deployed: test-name-2"
),
],
)
@pytest.mark.usefixtures("project_dir")
async def test_concurrency_limit_config_deployment_yaml(
self, work_pool, prefect_client: PrefectClient
):
concurrency_limit_config = {"limit": 42, "collision_strategy": "CANCEL_NEW"}
prefect_yaml = Path("prefect.yaml")
with prefect_yaml.open(mode="r") as f:
deploy_config = yaml.safe_load(f)
deploy_config["deployments"][0]["name"] = "test-name"
deploy_config["deployments"][0]["concurrency_limit"] = concurrency_limit_config
with prefect_yaml.open(mode="w") as f:
yaml.safe_dump(deploy_config, f)
result = await run_sync_in_worker_thread(
invoke_and_assert,
command=(f"deploy ./flows/hello.py:my_flow --pool {work_pool.name}"),
)
assert result.exit_code == 0
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert deployment.global_concurrency_limit is not None
assert (
deployment.global_concurrency_limit.limit
== concurrency_limit_config["limit"]
)
assert deployment.concurrency_options is not None
assert (
deployment.concurrency_options.collision_strategy
== concurrency_limit_config["collision_strategy"]
)
@pytest.mark.usefixtures("interactive_console", "project_dir")
async def test_deploy_select_from_existing_deployments(
self, work_pool, prefect_client
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
contents["deployments"] = [
{
"name": "test-name-1",
"description": "test-description-1",
"entrypoint": "./flows/hello.py:my_flow",
"work_pool": {"name": work_pool.name},
"schedule": {"interval": 3600},
},
{
"name": "test-name-2",
"description": "test-description-2",
"entrypoint": "./flows/hello.py:my_flow",
"work_pool": {"name": work_pool.name},
"schedule": {"interval": 3600},
},
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy",
expected_code=0,
user_input=(
readchar.key.ENTER
# decline remote storage
+ "n"
+ readchar.key.ENTER
# reject saving configuration
+ "n"
+ readchar.key.ENTER
),
expected_output_contains=[
"Would you like to use an existing deployment configuration?",
"test-name-1",
"test-name-2",
"test-description-1",
"test-description-2",
],
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name-1"
)
assert deployment.name == "test-name-1"
@pytest.mark.usefixtures("interactive_console", "project_dir")
class TestSaveUserInputs:
def test_save_user_inputs_no_existing_prefect_file(self):
prefect_file = Path("prefect.yaml")
prefect_file.unlink()
assert not prefect_file.exists()
invoke_and_assert(
command="deploy flows/hello.py:my_flow",
user_input=(
# Accept default deployment name
readchar.key.ENTER
+
# decline schedule
"n"
+ readchar.key.ENTER
+
# accept create work pool
readchar.key.ENTER
+
# choose process work pool
readchar.key.ENTER
+
# enter work pool name
"inflatable"
+ readchar.key.ENTER
# Decline remote storage
+ "n"
+ readchar.key.ENTER
# accept save user inputs
+ "y"
+ readchar.key.ENTER
),
expected_code=0,
expected_output_contains=[
(
"Would you like to save configuration for this deployment for"
" faster deployments in the future?"
),
"Deployment configuration saved to prefect.yaml",
],
)
assert prefect_file.exists()
with prefect_file.open(mode="r") as f:
config = yaml.safe_load(f)
assert len(config["deployments"]) == 1
assert config["deployments"][0]["name"] == "default"
assert config["deployments"][0]["entrypoint"] == "flows/hello.py:my_flow"
assert config["deployments"][0]["schedules"] == []
assert config["deployments"][0]["work_pool"]["name"] == "inflatable"
def test_save_user_inputs_existing_prefect_file(self):
prefect_file = Path("prefect.yaml")
assert prefect_file.exists()
invoke_and_assert(
command="deploy flows/hello.py:my_flow",
user_input=(
# Accept default deployment name
readchar.key.ENTER
+
# decline schedule
"n"
+ readchar.key.ENTER
+
# accept create work pool
readchar.key.ENTER
+
# choose process work pool
readchar.key.ENTER
+
# enter work pool name
"inflatable"
+ readchar.key.ENTER
# accept save user inputs
+ "y"
+ readchar.key.ENTER
),
expected_code=0,
expected_output_contains=[
(
"Would you like to save configuration for this deployment for"
" faster deployments in the future?"
),
"Deployment configuration saved to prefect.yaml",
],
)
with prefect_file.open(mode="r") as f:
config = yaml.safe_load(f)
assert len(config["deployments"]) == 2
assert config["deployments"][1]["name"] == "default"
assert config["deployments"][1]["entrypoint"] == "flows/hello.py:my_flow"
assert config["deployments"][1]["schedules"] == []
assert config["deployments"][1]["work_pool"]["name"] == "inflatable"
def test_save_user_inputs_with_interval_schedule(self):
invoke_and_assert(
command="deploy flows/hello.py:my_flow",
prompts_and_responses=[
("? Deployment name (default)", ""),
("Would you like to configure schedules for this deployment?", ""),
("What type of schedule would you like to use?", "", "Interval"),
("Seconds between scheduled runs", "3600"),
("Would you like to activate this schedule?", "y"),
("Would you like to add another schedule?", "n"),
("you don't have any work pools", "y"),
("What infrastructure type", "", "process"),
("Work pool name", "inflatable"),
("Would you like to save configuration", "y"),
],
expected_code=0,
expected_output_contains=[
(
"Would you like to save configuration for this deployment for"
" faster deployments in the future?"
),
"Deployment configuration saved to prefect.yaml",
],
)
with open("prefect.yaml", mode="r") as f:
config = yaml.safe_load(f)
assert len(config["deployments"]) == 2
assert config["deployments"][1]["name"] == "default"
assert config["deployments"][1]["entrypoint"] == "flows/hello.py:my_flow"
assert config["deployments"][1]["work_pool"]["name"] == "inflatable"
schedule = config["deployments"][1]["schedules"][0]
assert schedule["interval"] == 3600
assert schedule["timezone"] == "UTC"
assert schedule["anchor_date"] is not None
assert schedule["active"]
def test_save_user_inputs_with_cron_schedule(self):
invoke_and_assert(
command="deploy flows/hello.py:my_flow",
prompts_and_responses=[
("? Deployment name (default)", ""),
("Would you like to configure schedules for this deployment?", ""),
("What type of schedule would you like to use?", "↓", "Cron"),
("Cron string (0 0 * * *)", "* * * * *"),
("Timezone (UTC)", ""),
("Would you like to activate this schedule?", "y"),
("Would you like to add another schedule?", "n"),
("you don't have any work pools", "y"),
("What infrastructure type", "", "process"),
("Work pool name", "inflatable"),
("Would you like to save configuration", "y"),
],
expected_code=0,
expected_output_contains=[
(
"Would you like to save configuration for this deployment for"
" faster deployments in the future?"
),
"Deployment configuration saved to prefect.yaml",
],
)
with open("prefect.yaml", mode="r") as f:
config = yaml.safe_load(f)
assert len(config["deployments"]) == 2
assert config["deployments"][1]["name"] == "default"
assert config["deployments"][1]["entrypoint"] == "flows/hello.py:my_flow"
assert config["deployments"][1]["work_pool"]["name"] == "inflatable"
schedule = config["deployments"][1]["schedules"][0]
assert schedule == {
"cron": "* * * * *",
"day_or": True,
"timezone": "UTC",
"active": True,
}
def test_deploy_existing_deployment_with_no_changes_does_not_prompt_save(self):
# Set up initial deployment deployment
invoke_and_assert(
command="deploy flows/hello.py:my_flow",
prompts_and_responses=[
("? Deployment name (default)", "existing-deployment"),
("Would you like to configure schedules for this deployment?", ""),
("What type of schedule would you like to use?", "↓", "Cron"),
("Cron string (0 0 * * *)", "* * * * *"),
("Timezone (UTC)", ""),
("Would you like to activate this schedule?", "y"),
("Would you like to add another schedule?", "n"),
("you don't have any work pools", "y"),
("What infrastructure type", "", "process"),
("Work pool name", "inflatable"),
("Would you like to save configuration", "y"),
],
expected_code=0,
expected_output_contains=[
(
"Would you like to save configuration for this deployment for"
" faster deployments in the future?"
),
"Deployment configuration saved to prefect.yaml",
],
)
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
config = yaml.safe_load(f)
assert len(config["deployments"]) == 2
assert config["deployments"][1]["name"] == "existing-deployment"
assert config["deployments"][1]["entrypoint"] == "flows/hello.py:my_flow"
assert config["deployments"][1]["work_pool"]["name"] == "inflatable"
assert config["deployments"][1]["schedules"][0] == {
"cron": "* * * * *",
"day_or": True,
"timezone": "UTC",
"active": True,
}
invoke_and_assert(
command="deploy -n existing-deployment --cron '* * * * *'",
prompts_and_responses=[
("Would you like to save configuration", "y"),
("Would you like to overwrite that entry?", "y"),
],
expected_code=0,
expected_output_does_not_contain=[
(
"Would you like to save configuration for this deployment for"
" faster deployments in the future?"
"Deployment configuration saved to prefect.yaml"
),
],
)
# assert that the deployment was updated in the prefect.yaml
with open("prefect.yaml", mode="r") as f:
config = yaml.safe_load(f)
assert len(config["deployments"]) == 2
assert config["deployments"][1]["name"] == "existing-deployment"
assert config["deployments"][1]["entrypoint"] == "flows/hello.py:my_flow"
assert config["deployments"][1]["work_pool"]["name"] == "inflatable"
assert config["deployments"][1]["schedules"][0] == {
"cron": "* * * * *",
"day_or": True,
"timezone": "UTC",
"active": True,
}
def test_deploy_existing_deployment_with_changes_prompts_save(self):
# Set up initial deployment deployment
invoke_and_assert(
command="deploy flows/hello.py:my_flow",
user_input=(
# enter deployment name
"existing-deployment"
+ readchar.key.ENTER
# reject create schedule
+ "n"
+ readchar.key.ENTER
+
# accept create work pool
readchar.key.ENTER
+
# choose process work pool
readchar.key.ENTER
+
# enter work pool name
"inflatable"
+ readchar.key.ENTER
# accept save user inputs
+ "y"
+ readchar.key.ENTER
),
expected_code=0,
expected_output_contains=[
(
"Would you like to save configuration for this deployment for"
" faster deployments in the future?"
),
"Deployment configuration saved to prefect.yaml",
],
)
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
config = yaml.safe_load(f)
assert len(config["deployments"]) == 2
assert config["deployments"][1]["name"] == "existing-deployment"
assert config["deployments"][1]["entrypoint"] == "flows/hello.py:my_flow"
assert config["deployments"][1]["work_pool"]["name"] == "inflatable"
assert config["deployments"][1]["schedules"] == []
invoke_and_assert(
command="deploy -n existing-deployment --cron '* * * * *'",
user_input=(
# accept create work pool
readchar.key.ENTER
# choose process work pool
+ readchar.key.ENTER
+
# enter work pool name
"inflatable"
+ readchar.key.ENTER
# accept save user inputs
+ "y"
+ readchar.key.ENTER
# accept found existing deployment
+ "y"
+ readchar.key.ENTER
),
expected_code=0,
expected_output_contains=[
(
"Would you like to save configuration for this deployment for"
" faster deployments in the future?"
),
"Deployment configuration saved to prefect.yaml",
],
)
# assert that the deployment was updated in the prefect.yaml
with open("prefect.yaml", mode="r") as f:
config = yaml.safe_load(f)
assert len(config["deployments"]) == 2
assert config["deployments"][1]["name"] == "existing-deployment"
assert config["deployments"][1]["entrypoint"] == "flows/hello.py:my_flow"
assert config["deployments"][1]["work_pool"]["name"] == "inflatable"
assert config["deployments"][1]["schedules"][0]["cron"] == "* * * * *"
def test_save_user_inputs_with_rrule_schedule(self):
invoke_and_assert(
command="deploy flows/hello.py:my_flow",
user_input=(
# Accept default deployment name
readchar.key.ENTER
+
# accept schedule
readchar.key.ENTER
+
# select rrule schedule
readchar.key.DOWN
+ readchar.key.DOWN
+ readchar.key.ENTER
+
# enter rrule schedule
"FREQ=MINUTELY"
+ readchar.key.ENTER
# accept schedule being active
+ readchar.key.ENTER
# decline adding another schedule
+ readchar.key.ENTER
# accept create work pool
+ readchar.key.ENTER
+
# accept create work pool
readchar.key.ENTER
+
# choose process work pool
readchar.key.ENTER
+
# enter work pool name
"inflatable"
+ readchar.key.ENTER
# accept save user inputs
+ "y"
+ readchar.key.ENTER
),
expected_code=0,
expected_output_contains=[
(
"Would you like to save configuration for this deployment for"
" faster deployments in the future?"
),
"Deployment configuration saved to prefect.yaml",
],
)
with open("prefect.yaml", mode="r") as f:
config = yaml.safe_load(f)
assert len(config["deployments"]) == 2
assert config["deployments"][1]["name"] == "default"
assert config["deployments"][1]["entrypoint"] == "flows/hello.py:my_flow"
assert config["deployments"][1]["work_pool"]["name"] == "inflatable"
schedule = config["deployments"][1]["schedules"][0]
assert schedule == {
"rrule": "FREQ=MINUTELY",
"timezone": "UTC",
"active": True,
}
async def test_save_user_inputs_with_actions(self):
new_deployment_to_save = {
"name": "new_deployment",
"entrypoint": "flows/new_flow.py:my_flow",
"schedule": None,
"work_pool": {"name": "new_pool"},
"parameter_openapi_schema": None,
}
build_steps = [
{
"prefect.steps.set_working_directory": {
"directory": "/path/to/working/directory"
}
},
]
push_steps = [
{
"prefect_aws.deployments.steps.push_to_s3": {
"requires": "prefect-aws>=0.3.0",
"bucket": "my-bucket",
"folder": "project-name",
"credentials": None,
}
},
]
pull_steps = [
{
"prefect_aws.deployments.steps.pull_from_s3": {
"requires": "prefect-aws>=0.3.0",
"bucket": "my-bucket",
"folder": "{{ push-code.folder }}",
"credentials": None,
}
},
]
_save_deployment_to_prefect_file(
new_deployment_to_save,
build_steps=build_steps,
push_steps=push_steps,
pull_steps=pull_steps,
)
prefect_file = Path("prefect.yaml")
assert prefect_file.exists()
with prefect_file.open(mode="r") as f:
config = yaml.safe_load(f)
assert len(config["deployments"]) == 2
assert config["deployments"][1]["name"] == new_deployment_to_save["name"]
assert (
config["deployments"][1]["entrypoint"]
== new_deployment_to_save["entrypoint"]
)
assert (
config["deployments"][1]["work_pool"]["name"]
== new_deployment_to_save["work_pool"]["name"]
)
assert (
config["deployments"][1]["schedule"] == new_deployment_to_save["schedule"]
)
assert config["deployments"][1]["build"] == build_steps
assert config["deployments"][1]["push"] == push_steps
assert config["deployments"][1]["pull"] == pull_steps
def test_save_new_deployment_with_same_name_as_existing_deployment_overwrites(self):
# Set up initial 'prefect.yaml' file with a deployment
initial_deployment = {
"name": "existing_deployment",
"entrypoint": "flows/existing_flow.py:my_flow",
"schedule": None,
"work_pool": {"name": "existing_pool"},
"parameter_openapi_schema": None,
}
_save_deployment_to_prefect_file(initial_deployment)
prefect_file = Path("prefect.yaml")
assert prefect_file.exists()
with prefect_file.open(mode="r") as f:
config = yaml.safe_load(f)
assert len(config["deployments"]) == 2
assert config["deployments"][1]["name"] == initial_deployment["name"]
# Overwrite the existing deployment
new_deployment = {
"name": "existing_deployment",
"entrypoint": "flows/existing_flow.py:my_flow",
"schedule": None,
"concurrency_limit": 42,
"work_pool": {"name": "new_pool"},
"parameter_openapi_schema": None,
}
_save_deployment_to_prefect_file(new_deployment)
# Check that the new deployment has overwritten the old one
with prefect_file.open(mode="r") as f:
config = yaml.safe_load(f)
assert len(config["deployments"]) == 2
assert config["deployments"][1]["name"] == new_deployment["name"]
assert config["deployments"][1]["entrypoint"] == new_deployment["entrypoint"]
assert (
config["deployments"][1]["concurrency_limit"]
== new_deployment["concurrency_limit"]
)
assert (
config["deployments"][1]["work_pool"]["name"]
== new_deployment["work_pool"]["name"]
)
def test_save_user_inputs_overwrite_confirmed(self):
invoke_and_assert(
command="deploy flows/hello.py:my_flow",
user_input=(
# Accept default deployment name
readchar.key.ENTER
# decline schedule
+ "n"
+ readchar.key.ENTER
# accept create work pool
+ readchar.key.ENTER
# choose process work pool
+ readchar.key.ENTER
# enter work pool name
+ "inflatable"
+ readchar.key.ENTER
# accept save user inputs
+ "y"
+ readchar.key.ENTER
),
expected_code=0,
expected_output_contains=[
(
"Would you like to save configuration for this deployment for"
" faster deployments in the future?"
),
"Deployment configuration saved to prefect.yaml",
],
)
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
config = yaml.safe_load(f)
assert len(config["deployments"]) == 2
assert config["deployments"][1]["name"] == "default"
assert config["deployments"][1]["entrypoint"] == "flows/hello.py:my_flow"
assert config["deployments"][1]["schedules"] == []
assert config["deployments"][1]["work_pool"]["name"] == "inflatable"
invoke_and_assert(
command="deploy flows/hello.py:my_flow",
user_input=(
# Configure new deployment
"n"
+ readchar.key.ENTER
+
# accept schedule
readchar.key.ENTER
+
# select interval schedule
readchar.key.ENTER
+
# enter interval schedule
"3600"
+ readchar.key.ENTER
+
# accept create work pool
readchar.key.ENTER
+
# choose process work pool
readchar.key.ENTER
+
# enter work pool name
"inflatable"
+ readchar.key.ENTER
# accept save user inputs
+ "y"
+ readchar.key.ENTER
# accept overwriting existing deployment that is found
+ "y"
+ readchar.key.ENTER
),
expected_code=0,
expected_output_contains=[
"Found existing deployment configuration",
"Deployment configuration saved to prefect.yaml",
],
)
with prefect_file.open(mode="r") as f:
config = yaml.safe_load(f)
assert len(config["deployments"]) == 2
assert config["deployments"][1]["name"] == "default"
assert config["deployments"][1]["entrypoint"] == "flows/hello.py:my_flow"
assert config["deployments"][1]["schedules"][0]["interval"] == 3600
assert config["deployments"][1]["work_pool"]["name"] == "inflatable"
def test_save_user_inputs_overwrite_rejected_saving_cancelled(self):
invoke_and_assert(
command="deploy flows/hello.py:my_flow",
user_input=(
# accept default deployment name
readchar.key.ENTER
+
# decline schedule
"n"
+ readchar.key.ENTER
+
# accept create work pool
readchar.key.ENTER
+
# choose process work pool
readchar.key.ENTER
+
# enter work pool name
"inflatable"
+ readchar.key.ENTER
# accept save user inputs
+ "y"
+ readchar.key.ENTER
),
expected_code=0,
expected_output_contains=[
(
"Would you like to save configuration for this deployment for"
" faster deployments in the future?"
),
"Deployment configuration saved to prefect.yaml",
],
)
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
config = yaml.safe_load(f)
assert len(config["deployments"]) == 2
assert config["deployments"][1]["name"] == "default"
assert config["deployments"][1]["entrypoint"] == "flows/hello.py:my_flow"
assert config["deployments"][1]["schedules"] == []
assert config["deployments"][1]["work_pool"]["name"] == "inflatable"
invoke_and_assert(
command="deploy flows/hello.py:my_flow",
user_input=(
# configure new deployment
"n"
+ readchar.key.ENTER
# configure schedule
+ readchar.key.ENTER
# select interval schedule
+ readchar.key.ENTER
# enter interval schedule
+ "3600"
+ readchar.key.ENTER
# accept schedule being active
+ readchar.key.ENTER
# decline adding another schedule
+ readchar.key.ENTER
# accept existing work pool
+ readchar.key.ENTER
# accept save user inputs
+ "y"
+ readchar.key.ENTER
# reject overwriting existing deployment that is found
+ "n"
+ readchar.key.ENTER
),
expected_code=0,
expected_output_contains=[
"Found existing deployment configuration",
"Cancelled saving deployment configuration",
],
)
with prefect_file.open(mode="r") as f:
config = yaml.safe_load(f)
assert len(config["deployments"]) == 2
assert config["deployments"][1]["name"] == "default"
assert config["deployments"][1]["entrypoint"] == "flows/hello.py:my_flow"
assert config["deployments"][1]["schedules"] == []
assert config["deployments"][1]["work_pool"]["name"] == "inflatable"
@pytest.mark.usefixtures("project_dir", "interactive_console")
async def test_deploy_resolves_block_references_in_deployments_section(
self, prefect_client, work_pool, ignore_prefect_deprecation_warnings
):
"""
Ensure block references are resolved in deployments section of prefect.yaml
"""
# TODO: Remove this test when `JSON` block is removed
await JSON(value={"work_pool_name": work_pool.name}).save(
name="test-json-block"
)
# add block reference to prefect.yaml
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
prefect_config = yaml.safe_load(f)
prefect_config["deployments"] = [
{
"name": "test-name",
"entrypoint": "flows/hello.py:my_flow",
"work_pool": {
"name": (
"{{ prefect.blocks.json.test-json-block.value.work_pool_name }}"
),
},
}
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(prefect_config, f)
# ensure block reference was added
assert (
prefect_config["deployments"][0]["work_pool"]["name"]
== "{{ prefect.blocks.json.test-json-block.value.work_pool_name }}"
)
# run deploy
result = await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy flows/hello.py:my_flow -n test-name",
user_input=(
# reject schedule
"n"
+ readchar.key.ENTER
# accept saving configuration
+ "y"
+ readchar.key.ENTER
# accept overwrite config
+ "y"
+ readchar.key.ENTER
),
expected_code=0,
expected_output_contains=[
"Deployment 'An important name/test-name' successfully created",
(
"Would you like to save configuration for this deployment for"
" faster deployments in the future?"
),
"Would you like",
"to overwrite that entry?",
"Deployment configuration saved to prefect.yaml!",
],
)
assert result.exit_code == 0
assert "An important name/test" in result.output
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert deployment.name == "test-name"
assert deployment.work_pool_name == work_pool.name
# ensure block reference was resolved
with prefect_file.open(mode="r") as f:
prefect_config = yaml.safe_load(f)
assert prefect_config["deployments"][0]["work_pool"]["name"] == work_pool.name
@pytest.mark.usefixtures("project_dir", "interactive_console")
async def test_deploy_resolves_variables_in_deployments_section(
self, prefect_client, work_pool
):
"""
Ensure deployments section of prefect.yaml placeholders are resolved
"""
# create variable
await prefect_client._client.post(
"/variables/", json={"name": "my_work_pool", "value": work_pool.name}
)
# add variable to deployments section of prefect.yaml
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
prefect_config = yaml.safe_load(f)
prefect_config["deployments"] = [
{
"name": "test-name",
"entrypoint": "flows/hello.py:my_flow",
"work_pool": {
"name": "{{ prefect.variables.my_work_pool }}",
},
}
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(prefect_config, f)
# ensure it is there!
assert (
prefect_config["deployments"][0]["work_pool"]["name"]
== "{{ prefect.variables.my_work_pool }}"
)
# run deploy
result = await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy flows/hello.py:my_flow -n test-name",
user_input=(
# reject schedule
"n"
+ readchar.key.ENTER
# accept saving configuration
+ "y"
+ readchar.key.ENTER
# accept overwrite config
+ "y"
+ readchar.key.ENTER
),
expected_code=0,
expected_output_contains=[
"Deployment 'An important name/test-name' successfully created",
(
"Would you like to save configuration for this deployment for"
" faster deployments in the future?"
),
"Would you like",
"to overwrite that entry?",
"Deployment configuration saved to prefect.yaml!",
],
)
assert result.exit_code == 0
assert "An important name/test" in result.output
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert deployment.name == "test-name"
assert deployment.work_pool_name == work_pool.name
# ensure variable is resolved in prefect.yaml
with prefect_file.open(mode="r") as f:
prefect_config = yaml.safe_load(f)
assert prefect_config["deployments"][0]["work_pool"]["name"] == work_pool.name
@pytest.mark.usefixtures("project_dir", "interactive_console", "work_pool")
class TestDeployWithoutEntrypoint:
async def test_deploy_without_entrypoint(self, prefect_client: PrefectClient):
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy",
user_input=(
# Accept first flow
readchar.key.ENTER
+
# Accept default deployment name
readchar.key.ENTER
+
# decline schedule
"n"
+ readchar.key.ENTER
+
# accept first work pool
readchar.key.ENTER
+
# Decline remote storage
"n"
+ readchar.key.ENTER
+
# decline save user inputs
"n"
+ readchar.key.ENTER
),
expected_code=0,
expected_output_contains=[
"Select a flow to deploy",
"test_flow",
"import-project/my_module/flow.py",
"prod_flow",
"import-project/my_module/flow.py",
"foobar",
"nested-project/implicit_relative.py",
"nested-project/explicit_relative.py",
"my_flow",
"flows/hello.py",
"successfully created",
],
)
async def test_deploy_without_entrypoint_manually_enter(
self, prefect_client: PrefectClient
):
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy",
user_input=(
# Decline selecting from list
"n"
+
# Enter entrypoint
"flows/hello.py:my_flow"
+ readchar.key.ENTER
+
# Accept default deployment name
readchar.key.ENTER
+
# decline schedule
"n"
+ readchar.key.ENTER
+
# accept first work pool
readchar.key.ENTER
+
# Decline remote storage
"n"
+ readchar.key.ENTER
+
# decline save user inputs
"n"
+ readchar.key.ENTER
),
expected_code=0,
expected_output_contains=[
"Select a flow to deploy",
"Flow entrypoint (expected format path/to/file.py:function_name)",
"Deployment 'An important name/default' successfully created",
],
)
deployment = await prefect_client.read_deployment_by_name(
name="An important name/default"
)
assert deployment.entrypoint == "flows/hello.py:my_flow"
async def test_deploy_validates_manually_entered_entrypoints(
self, prefect_client: PrefectClient
):
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy",
user_input=(
# Decline selecting from list
"n"
+
# Enter syntactically invalid entrypoint
"flows/hello.py"
+ readchar.key.ENTER
+
# Enter entrypoint with non-existent file
"flows/does_not_exist.py:my_flow"
+ readchar.key.ENTER
+
# Enter entrypoint with non-existent function
"flows/hello.py:does_not_exist"
+ readchar.key.ENTER
+
# Enter valid entrypoint
"flows/hello.py:my_flow"
+ readchar.key.ENTER
+
# Accept default deployment name
readchar.key.ENTER
+
# decline schedule
"n"
+ readchar.key.ENTER
+
# accept first work pool
readchar.key.ENTER
+
# Decline remote storage
"n"
+ readchar.key.ENTER
+
# decline save user inputs
"n"
+ readchar.key.ENTER
),
expected_code=0,
expected_output_contains=[
"Select a flow to deploy",
"Please enter a valid flow entrypoint.",
"Failed to load flow from entrypoint 'flows/does_not_exist.py:my_flow'",
"Failed to load flow from entrypoint 'flows/hello.py:does_not_exist'",
"Deployment 'An important name/default' successfully created",
],
)
deployment = await prefect_client.read_deployment_by_name(
name="An important name/default"
)
assert deployment.entrypoint == "flows/hello.py:my_flow"
class TestCheckForMatchingDeployment:
@pytest.fixture(autouse=True)
def in_temporary_directory(self, tmp_path: Path):
with tmpchdir(tmp_path):
yield
async def test_matching_deployment_in_prefect_file_returns_true(self):
deployment = {
"name": "existing_deployment",
"entrypoint": "flows/existing_flow.py:my_flow",
"schedule": None,
"work_pool": {"name": "existing_pool"},
"parameter_openapi_schema": None,
}
_save_deployment_to_prefect_file(deployment)
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
config = yaml.safe_load(f)
matching_deployment_exists = any(
d["name"] == deployment["name"]
and d["entrypoint"] == deployment["entrypoint"]
for d in config["deployments"]
)
assert matching_deployment_exists, "No matching deployment found in the file."
new_deployment = {
"name": "existing_deployment",
"entrypoint": "flows/existing_flow.py:my_flow",
}
matching_deployment_exists = (
_check_for_matching_deployment_name_and_entrypoint_in_prefect_file(
new_deployment
)
)
assert matching_deployment_exists is True
async def test_no_matching_deployment_in_prefect_file_returns_false(self):
deployment = {
"name": "existing_deployment",
"entrypoint": "flows/existing_flow.py:my_flow",
"schedule": None,
"work_pool": {"name": "existing_pool"},
"parameter_openapi_schema": None,
}
_save_deployment_to_prefect_file(deployment)
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
config = yaml.safe_load(f)
matching_deployment_exists = any(
d["name"] == deployment["name"]
and d["entrypoint"] == deployment["entrypoint"]
for d in config["deployments"]
)
assert matching_deployment_exists
deployment_with_same_entrypoint_but_different_name = {
"name": "new_deployment",
"entrypoint": "flows/existing_flow.py:my_flow",
}
matching_deployment_exists_1 = (
_check_for_matching_deployment_name_and_entrypoint_in_prefect_file(
deployment_with_same_entrypoint_but_different_name
)
)
assert not matching_deployment_exists_1
deployment_with_same_name_but_different_entrypoint = {
"name": "new_deployment",
"entrypoint": "flows/new_flow.py:my_flow",
}
matching_deployment_exists_2 = (
_check_for_matching_deployment_name_and_entrypoint_in_prefect_file(
deployment_with_same_name_but_different_entrypoint
)
)
assert not matching_deployment_exists_2
class TestDeploymentTrigger:
class TestDeploymentTriggerSyncing:
async def test_initialize_named_deployment_triggers(self):
trigger_spec = {
"name": "Trigger McTriggerson",
"enabled": True,
"match": {"prefect.resource.id": "prefect.flow-run.*"},
"expect": ["prefect.flow-run.Completed"],
"match_related": {
"prefect.resource.name": "seed",
"prefect.resource.role": "flow",
},
"job_variables": {"foo": "bar"},
}
triggers = _initialize_deployment_triggers("my_deployment", [trigger_spec])
assert triggers == [
DeploymentEventTrigger(
**{
"name": "Trigger McTriggerson",
"description": "",
"enabled": True,
"match": {"prefect.resource.id": "prefect.flow-run.*"},
"match_related": {
"prefect.resource.name": "seed",
"prefect.resource.role": "flow",
},
"after": set(),
"expect": {"prefect.flow-run.Completed"},
"for_each": set(),
"posture": Posture.Reactive,
"threshold": 1,
"within": timedelta(0),
"job_variables": {"foo": "bar"},
}
)
]
async def test_initialize_deployment_triggers_composite(self):
trigger_spec = {
"name": "Trigger McTriggerson",
"enabled": True,
"type": "compound",
"require": "all",
"job_variables": {"foo": "bar"},
"triggers": [
{
"type": "event",
"match": {"prefect.resource.id": "prefect.flow-run.*"},
"match_related": {
"prefect.resource.name": "seed",
"prefect.resource.role": "flow",
},
"expect": {"prefect.flow-run.Completed"},
}
],
}
triggers = _initialize_deployment_triggers("my_deployment", [trigger_spec])
assert triggers == [
DeploymentCompoundTrigger(
**{
"name": "Trigger McTriggerson",
"enabled": True,
"require": "all",
"job_variables": {"foo": "bar"},
"triggers": [
EventTrigger(
**{
"enabled": True,
"match": {
"prefect.resource.id": "prefect.flow-run.*"
},
"match_related": {
"prefect.resource.name": "seed",
"prefect.resource.role": "flow",
},
"after": set(),
"expect": {"prefect.flow-run.Completed"},
"for_each": set(),
"posture": Posture.Reactive,
"threshold": 1,
"within": timedelta(0),
"job_variables": {"foo": "bar"},
}
)
],
}
)
]
async def test_initialize_deployment_triggers_implicit_name(self):
trigger_spec = {
"enabled": True,
"match": {"prefect.resource.id": "prefect.flow-run.*"},
"expect": ["prefect.flow-run.Completed"],
"match_related": {
"prefect.resource.name": "seed",
"prefect.resource.role": "flow",
},
}
triggers = _initialize_deployment_triggers("my_deployment", [trigger_spec])
assert triggers[0].name == "my_deployment__automation_1"
async def test_deployment_triggers_without_job_variables(self):
trigger_spec = {
"enabled": True,
"match": {"prefect.resource.id": "prefect.flow-run.*"},
"expect": ["prefect.flow-run.Completed"],
"match_related": {
"prefect.resource.name": "seed",
"prefect.resource.role": "flow",
},
}
triggers = _initialize_deployment_triggers("my_deployment", [trigger_spec])
assert triggers[0].job_variables is None
async def test_create_deployment_triggers(self):
client = AsyncMock()
client.server_type = ServerType.CLOUD
trigger_spec = {
"enabled": True,
"match": {"prefect.resource.id": "prefect.flow-run.*"},
"expect": ["prefect.flow-run.Completed"],
"match_related": {
"prefect.resource.name": "seed",
"prefect.resource.role": "flow",
},
"job_variables": {"nested": {"foo": "bar"}},
}
triggers = _initialize_deployment_triggers("my_deployment", [trigger_spec])
deployment_id = uuid4()
await _create_deployment_triggers(client, deployment_id, triggers)
assert triggers[0]._deployment_id == deployment_id
client.delete_resource_owned_automations.assert_called_once_with(
f"prefect.deployment.{deployment_id}"
)
client.create_automation.assert_called_once_with(
triggers[0].as_automation()
)
async def test_triggers_creation_orchestrated(
self, project_dir, prefect_client, work_pool
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
contents["deployments"] = [
{
"name": "test-name-1",
"work_pool": {
"name": work_pool.name,
},
"triggers": [
{
"enabled": True,
"match": {"prefect.resource.id": "prefect.flow-run.*"},
"expect": ["prefect.flow-run.Completed"],
"match_related": {
"prefect.resource.name": "seed",
"prefect.resource.role": "flow",
},
"job_variables": {"foo": 123},
}
],
}
]
expected_triggers = _initialize_deployment_triggers(
"test-name-1", contents["deployments"][0]["triggers"]
)
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
with mock.patch(
"prefect.cli.deploy._create_deployment_triggers",
AsyncMock(),
) as create_triggers:
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy ./flows/hello.py:my_flow -n test-name-1",
expected_code=0,
)
assert create_triggers.call_count == 1
client, deployment_id, triggers = create_triggers.call_args[0]
assert isinstance(client, PrefectClient)
assert isinstance(deployment_id, UUID)
expected_triggers[0].set_deployment_id(deployment_id)
assert triggers == expected_triggers
class TestDeploymentTriggerPassedViaCLI:
@pytest.mark.usefixtures("project_dir")
async def test_json_string_trigger(self, docker_work_pool):
client = AsyncMock()
client.server_type = ServerType.CLOUD
trigger_spec = {
"enabled": True,
"match": {"prefect.resource.id": "prefect.flow-run.*"},
"expect": ["prefect.flow-run.Completed"],
"job_variables": {"foo": "bar"},
"within": 60,
"threshold": 2,
}
expected_triggers = _initialize_deployment_triggers(
"test-name-1", [trigger_spec]
)
with mock.patch(
"prefect.cli.deploy._create_deployment_triggers",
AsyncMock(),
) as create_triggers:
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name-1 --trigger"
f" '{json.dumps(trigger_spec)}' -p {docker_work_pool.name}"
),
expected_code=0,
)
assert create_triggers.call_count == 1
client, deployment_id, triggers = create_triggers.call_args[0]
expected_triggers[0].set_deployment_id(deployment_id)
assert triggers == expected_triggers
@pytest.mark.usefixtures("project_dir")
async def test_json_file_trigger(self, docker_work_pool):
client = AsyncMock()
client.server_type = ServerType.CLOUD
trigger_spec = {
"enabled": True,
"match": {"prefect.resource.id": "prefect.flow-run.*"},
"expect": ["prefect.flow-run.Completed"],
"job_variables": {"foo": "bar"},
}
with open("triggers.json", "w") as f:
json.dump({"triggers": [trigger_spec]}, f)
expected_triggers = _initialize_deployment_triggers(
"test-name-1", [trigger_spec]
)
with mock.patch(
"prefect.cli.deploy._create_deployment_triggers",
AsyncMock(),
) as create_triggers:
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name-1"
f" --trigger triggers.json -p {docker_work_pool.name}"
),
expected_code=0,
)
assert create_triggers.call_count == 1
client, deployment_id, triggers = create_triggers.call_args[0]
expected_triggers[0].set_deployment_id(deployment_id)
assert triggers == expected_triggers
@pytest.mark.usefixtures("project_dir")
async def test_yaml_file_trigger(self, docker_work_pool):
client = AsyncMock()
client.server_type = ServerType.CLOUD
trigger_spec = {
"enabled": True,
"match": {"prefect.resource.id": "prefect.flow-run.*"},
"expect": ["prefect.flow-run.Completed"],
"job_variables": {"foo": "bar"},
}
with open("triggers.yaml", "w") as f:
yaml.safe_dump({"triggers": [trigger_spec]}, f)
expected_triggers = _initialize_deployment_triggers(
"test-name-1", [trigger_spec]
)
with mock.patch(
"prefect.cli.deploy._create_deployment_triggers",
AsyncMock(),
) as create_triggers:
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name-1"
f" --trigger triggers.yaml -p {docker_work_pool.name}"
),
expected_code=0,
)
assert create_triggers.call_count == 1
client, deployment_id, triggers = create_triggers.call_args[0]
expected_triggers[0].set_deployment_id(deployment_id)
assert triggers == expected_triggers
@pytest.mark.usefixtures("project_dir")
async def test_nested_yaml_file_trigger(self, docker_work_pool, tmpdir):
client = AsyncMock()
client.server_type = ServerType.CLOUD
trigger_spec = {
"enabled": True,
"match": {"prefect.resource.id": "prefect.flow-run.*"},
"expect": ["prefect.flow-run.Completed"],
}
triggers_file = tmpdir.mkdir("my_stuff") / "triggers.yaml"
with open(triggers_file, "w") as f:
yaml.safe_dump({"triggers": [trigger_spec]}, f)
expected_triggers = _initialize_deployment_triggers(
"test-name-1", [trigger_spec]
)
with mock.patch(
"prefect.cli.deploy._create_deployment_triggers",
AsyncMock(),
) as create_triggers:
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name-1"
f" --trigger my_stuff/triggers.yaml -p {docker_work_pool.name}"
),
expected_code=0,
)
assert create_triggers.call_count == 1
client, deployment_id, triggers = create_triggers.call_args[0]
expected_triggers[0].set_deployment_id(deployment_id)
assert triggers == expected_triggers
@pytest.mark.usefixtures("project_dir")
async def test_multiple_trigger_flags(self, docker_work_pool):
client = AsyncMock()
client.server_type = ServerType.CLOUD
trigger_spec_1 = {
"enabled": True,
"match": {"prefect.resource.id": "prefect.flow-run.*"},
"expect": ["prefect.flow-run.Completed"],
"job_variables": {"foo": "bar"},
}
trigger_spec_2 = {
"enabled": False,
"match": {"prefect.resource.id": "prefect.flow-run.*"},
"expect": ["prefect.flow-run.Failed"],
}
with open("triggers.yaml", "w") as f:
yaml.safe_dump({"triggers": [trigger_spec_2]}, f)
expected_triggers = _initialize_deployment_triggers(
"test-name-1", [trigger_spec_1, trigger_spec_2]
)
with mock.patch(
"prefect.cli.deploy._create_deployment_triggers",
AsyncMock(),
) as create_triggers:
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name-1 --trigger"
f" '{json.dumps(trigger_spec_1)}' --trigger triggers.yaml -p"
f" {docker_work_pool.name}"
),
expected_code=0,
)
assert create_triggers.call_count == 1
client, deployment_id, triggers = create_triggers.call_args[0]
for expected_trigger in expected_triggers:
expected_trigger.set_deployment_id(deployment_id)
assert triggers == expected_triggers
@pytest.mark.usefixtures("project_dir")
async def test_override_on_trigger_conflict(self, docker_work_pool):
client = AsyncMock()
client.server_type = ServerType.CLOUD
cli_trigger_spec = {
"enabled": True,
"match": {"prefect.resource.id": "prefect.flow-run.*"},
"expect": ["prefect.flow-run.Failed"],
}
expected_triggers = _initialize_deployment_triggers(
"test-name-1", [cli_trigger_spec]
)
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
contents["deployments"] = [
{
"name": "test-name-1",
"work_pool": {
"name": docker_work_pool.name,
},
"triggers": [
{**cli_trigger_spec, "expect": ["prefect.flow-run.Completed"]}
],
}
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
with mock.patch(
"prefect.cli.deploy._create_deployment_triggers",
AsyncMock(),
) as create_triggers:
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name-1"
f" --trigger '{json.dumps(cli_trigger_spec)}'"
),
expected_code=0,
)
_, _, triggers = create_triggers.call_args[0]
assert len(triggers) == 1
assert triggers == expected_triggers
@pytest.mark.usefixtures("project_dir")
async def test_invalid_trigger_parsing(self, docker_work_pool):
client = AsyncMock()
client.server_type = ServerType.CLOUD
invalid_json_str_trigger = "{enabled: true, match: woodchonk.move.*}"
invalid_yaml_trigger = "invalid.yaml"
with open(invalid_yaml_trigger, "w") as f:
f.write("pretty please, trigger my flow when you see the woodchonk")
for invalid_trigger in [invalid_json_str_trigger, invalid_yaml_trigger]:
with mock.patch(
"prefect.cli.deploy._create_deployment_triggers",
AsyncMock(),
):
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name-1"
f" -p {docker_work_pool.name} --trigger '{invalid_trigger}'"
),
expected_code=1,
expected_output_contains=["Failed to parse trigger"],
)
@pytest.mark.usefixtures("interactive_console", "project_dir")
async def test_triggers_saved_to_prefect_yaml(self, docker_work_pool):
client = AsyncMock()
client.server_type = ServerType.CLOUD
cli_trigger_spec = {
"name": "Trigger McTriggerson",
"match": {"prefect.resource.id": "prefect.flow-run.*"},
"expect": ["prefect.flow-run.Completed"],
}
with mock.patch(
"prefect.cli.deploy._create_deployment_triggers",
AsyncMock(),
):
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name-1 -p"
f" {docker_work_pool.name} --trigger"
f" '{json.dumps(cli_trigger_spec)}'"
),
user_input=(
# Decline schedule
"n"
+ readchar.key.ENTER
# Decline docker build
+ "n"
+ readchar.key.ENTER
# Accept save configuration
+ "y"
+ readchar.key.ENTER
),
expected_code=0,
)
# Read the updated prefect.yaml
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
assert "deployments" in contents
assert "triggers" in contents["deployments"][-1]
assert contents["deployments"][-1]["triggers"] == [cli_trigger_spec]
@pytest.mark.usefixtures("project_dir", "interactive_console", "work_pool")
class TestDeployDockerBuildSteps:
async def test_docker_build_step_exists_does_not_prompt_build_custom_docker_image(
self,
docker_work_pool,
mock_build_docker_image,
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
prefect_config = yaml.safe_load(f)
with open("Dockerfile", "w") as f:
f.write("FROM python:3.9-slim\n")
prefect_config["build"] = [
{
"prefect_docker.deployments.steps.build_docker_image": {
"requires": "prefect-docker",
"image_name": "local/repo",
"tag": "dev",
"id": "build-image",
"dockerfile": "Dockerfile",
}
}
]
# save it back
with prefect_file.open(mode="w") as f:
yaml.safe_dump(prefect_config, f)
result = await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name --interval 3600 -p"
f" {docker_work_pool.name}"
),
user_input=(
# Decline remote storage
"n"
+ readchar.key.ENTER
+
# Accept save configuration
"y"
+ readchar.key.ENTER
),
expected_output_does_not_contain=[
"Would you like to build a custom Docker image"
],
)
assert result.exit_code == 0
assert "An important name/test" in result.output
with prefect_file.open(mode="r") as f:
prefect_config = yaml.safe_load(f)
async def test_other_build_step_exists_prompts_build_custom_docker_image(
self,
docker_work_pool,
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
prefect_config = yaml.safe_load(f)
prefect_config["build"] = [
{
"prefect.deployments.steps.run_shell_script": {
"id": "sample-bash-cmd",
"script": "echo 'Hello, World!'",
"stream_output": False,
}
}
]
# save it back
with prefect_file.open(mode="w") as f:
yaml.safe_dump(prefect_config, f)
result = await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name --interval 3600"
f" -p {docker_work_pool.name}"
),
user_input=(
# Reject build custom docker image
"n"
+ readchar.key.ENTER
# Accept save configuration
+ "y"
+ readchar.key.ENTER
),
expected_output_contains=[
"Would you like to build a custom Docker image",
"Would you like to save configuration for this deployment",
],
)
assert result.exit_code == 0
assert "An important name/test" in result.output
prefect_file = Path("prefect.yaml")
with open(prefect_file, "r") as f:
config = yaml.safe_load(f)
assert len(config["deployments"]) == 2
assert config["deployments"][1]["name"] == "test-name"
assert not config["deployments"][1].get("build")
async def test_no_build_step_exists_prompts_build_custom_docker_image(
self, docker_work_pool
):
result = await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name --interval 3600"
f" -p {docker_work_pool.name}"
),
user_input=(
# Reject build custom docker image
"n"
+ readchar.key.ENTER
# Accept save configuration
+ "y"
+ readchar.key.ENTER
),
expected_output_contains=[
"Would you like to build a custom Docker image",
"Would you like to save configuration for this deployment",
],
)
assert result.exit_code == 0
assert "An important name/test" in result.output
prefect_file = Path("prefect.yaml")
with open(prefect_file, "r") as f:
config = yaml.safe_load(f)
assert len(config["deployments"]) == 2
assert config["deployments"][1]["name"] == "test-name"
assert not config["deployments"][1].get("build")
async def test_prompt_build_custom_docker_image_accepted_use_existing_dockerfile_accepted(
self, docker_work_pool, mock_build_docker_image
):
with open("Dockerfile", "w") as f:
f.write("FROM python:3.9-slim\n")
result = await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name --interval 3600"
f" -p {docker_work_pool.name}"
),
user_input=(
# Accept build custom docker image
"y"
+ readchar.key.ENTER
# Accept use existing dockerfile
+ "y"
+ readchar.key.ENTER
# Enter repo name
+ "prefecthq/prefect"
+ readchar.key.ENTER
+
# Default image_name
readchar.key.ENTER
+
# Default tag
readchar.key.ENTER
+
# Reject push to registry
"n"
+ readchar.key.ENTER
# Accept save configuration
+ "y"
+ readchar.key.ENTER
),
expected_output_contains=[
"Would you like to build a custom Docker image",
"Would you like to use the Dockerfile in the current directory?",
"Image prefecthq/prefect/test-name:latest will be built",
"Would you like to push this image to a remote registry?",
"Would you like to save configuration for this deployment",
],
expected_output_does_not_contain=["Is this a private registry?"],
)
assert result.exit_code == 0
assert "An important name/test" in result.output
with open("prefect.yaml", "r") as f:
config = yaml.safe_load(f)
assert len(config["deployments"]) == 2
assert config["deployments"][1]["name"] == "test-name"
assert config["deployments"][1]["build"] == [
{
"prefect_docker.deployments.steps.build_docker_image": {
"id": "build-image",
"requires": "prefect-docker>=0.3.1",
"dockerfile": "Dockerfile",
"image_name": "prefecthq/prefect/test-name",
"tag": "latest",
}
}
]
async def test_prompt_build_custom_docker_image_accepted_use_existing_dockerfile_rejected_rename_accepted(
self, docker_work_pool, monkeypatch, mock_build_docker_image
):
with open("Dockerfile", "w") as f:
f.write("FROM python:3.9-slim\n")
result = await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name --interval 3600"
f" -p {docker_work_pool.name}"
),
user_input=(
# Accept build custom docker image
"y"
+ readchar.key.ENTER
# Reject use existing dockerfile
+ "n"
+ readchar.key.ENTER
# Accept rename dockerfile
+ "y"
+ readchar.key.ENTER
+
# Enter new dockerfile name
"Dockerfile.backup"
+ readchar.key.ENTER
# Enter repo name
+ "prefecthq/prefect"
+ readchar.key.ENTER
+
# Default image_name
readchar.key.ENTER
+
# Default tag
readchar.key.ENTER
+
# Reject push to registry
"n"
+ readchar.key.ENTER
# Accept save configuration
+ "y"
+ readchar.key.ENTER
),
expected_output_contains=[
"Would you like to build a custom Docker image",
"Would you like to use the Dockerfile in the current directory?",
"A Dockerfile exists. You chose not to use it.",
"Image prefecthq/prefect/test-name:latest will be built",
"Would you like to push this image to a remote registry?",
"Would you like to save configuration for this deployment",
],
expected_output_does_not_contain=["Is this a private registry?"],
)
assert result.exit_code == 0
with open("prefect.yaml", "r") as f:
config = yaml.safe_load(f)
assert len(config["deployments"]) == 2
assert config["deployments"][1]["name"] == "test-name"
assert config["deployments"][1]["build"] == [
{
"prefect_docker.deployments.steps.build_docker_image": {
"id": "build-image",
"requires": "prefect-docker>=0.3.1",
"dockerfile": "auto",
"image_name": "prefecthq/prefect/test-name",
"tag": "latest",
}
}
]
async def test_prompt_build_custom_docker_image_accepted_use_existing_dockerfile_rejected_rename_rejected(
self, docker_work_pool
):
with open("Dockerfile", "w") as f:
f.write("FROM python:3.9-slim\n")
result = await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name --interval 3600"
f" -p {docker_work_pool.name}"
),
user_input=(
# Accept build custom docker image
"y"
+ readchar.key.ENTER
# Reject use existing dockerfile
+ "n"
+ readchar.key.ENTER
# Accept rename dockerfile
+ "n"
+ readchar.key.ENTER
),
expected_code=1,
expected_output_contains=[
"Would you like to build a custom Docker image",
"Would you like to use the Dockerfile in the current directory?",
"A Dockerfile exists. You chose not to use it.",
(
"A Dockerfile already exists. Please remove or rename the existing"
" one."
),
],
expected_output_does_not_contain=["Is this a private registry?"],
)
assert result.exit_code == 1
async def test_prompt_build_custom_docker_image_accepted_no_existing_dockerfile_uses_auto_build(
self, docker_work_pool, monkeypatch, mock_build_docker_image
):
result = await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name --interval 3600"
f" -p {docker_work_pool.name}"
),
user_input=(
# Accept build custom docker image
"y"
+ readchar.key.ENTER
# Enter repo name
+ "prefecthq/prefect"
+ readchar.key.ENTER
# Default image_name
+ readchar.key.ENTER
# Default tag
+ readchar.key.ENTER
# Reject push to registry
+ "n"
+ readchar.key.ENTER
# Accept save configuration
+ "y"
+ readchar.key.ENTER
),
expected_output_contains=[
"Would you like to build a custom Docker image",
"Image prefecthq/prefect/test-name:latest will be built",
"Would you like to push this image to a remote registry?",
"Would you like to save configuration for this deployment",
],
expected_output_does_not_contain=["Is this a private registry?"],
)
assert result.exit_code == 0
with open("prefect.yaml", "r") as f:
config = yaml.safe_load(f)
assert len(config["deployments"]) == 2
assert config["deployments"][1]["name"] == "test-name"
assert config["deployments"][1]["build"] == [
{
"prefect_docker.deployments.steps.build_docker_image": {
"id": "build-image",
"requires": "prefect-docker>=0.3.1",
"dockerfile": "auto",
"image_name": "prefecthq/prefect/test-name",
"tag": "latest",
}
}
]
async def test_no_existing_work_pool_image_gets_updated_after_adding_build_docker_image_step(
self, docker_work_pool, monkeypatch, mock_build_docker_image
):
prefect_file = Path("prefect.yaml")
if prefect_file.exists():
prefect_file.unlink()
assert not prefect_file.exists()
result = await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name --interval 3600"
f" -p {docker_work_pool.name}"
),
user_input=(
# Accept build custom docker image
"y"
+ readchar.key.ENTER
# Enter repo name
+ "prefecthq/prefect"
+ readchar.key.ENTER
# Default image_name
+ readchar.key.ENTER
# Default tag
+ readchar.key.ENTER
# Reject push to registry
+ "n"
+ readchar.key.ENTER
# Decline remote storage
+ "n"
+ readchar.key.ENTER
# Accept save configuration
+ "y"
+ readchar.key.ENTER
),
expected_output_contains=[
"Would you like to build a custom Docker image",
"Image prefecthq/prefect/test-name:latest will be built",
"Would you like to push this image to a remote registry?",
"Would you like to save configuration for this deployment",
],
expected_output_does_not_contain=["Is this a private registry?"],
)
assert result.exit_code == 0
with open("prefect.yaml", "r") as f:
config = yaml.safe_load(f)
assert len(config["deployments"]) == 1
assert config["deployments"][0]["name"] == "test-name"
assert config["deployments"][0]["work_pool"]["name"] == docker_work_pool.name
assert (
config["deployments"][0]["work_pool"]["job_variables"]["image"]
== "{{ build-image.image }}"
)
assert config["build"] == [
{
"prefect_docker.deployments.steps.build_docker_image": {
"id": "build-image",
"requires": "prefect-docker>=0.3.1",
"dockerfile": "auto",
"image_name": "prefecthq/prefect/test-name",
"tag": "latest",
}
}
]
async def test_work_pool_image_already_exists_not_updated_after_adding_build_docker_image_step(
self, docker_work_pool, monkeypatch, mock_build_docker_image
):
prefect_file = Path("prefect.yaml")
with open("prefect.yaml", "w") as f:
contents = {
"work_pool": {
"name": docker_work_pool.name,
"job_variables": {"image": "original-image"},
}
}
yaml.dump(contents, f)
assert prefect_file.exists()
result = await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name --interval 3600"
f" -p {docker_work_pool.name}"
),
user_input=(
# Accept build custom docker image
"y"
+ readchar.key.ENTER
# Enter repo name
+ "prefecthq/prefect"
+ readchar.key.ENTER
# Default image_name
+ readchar.key.ENTER
# Default tag
+ readchar.key.ENTER
# Reject push to registry
+ "n"
+ readchar.key.ENTER
# Decline remote storage
+ "n"
+ readchar.key.ENTER
# Accept save configuration
+ "y"
+ readchar.key.ENTER
),
expected_output_contains=[
"Would you like to build a custom Docker image",
"Image prefecthq/prefect/test-name:latest will be built",
"Would you like to push this image to a remote registry?",
"Would you like to save configuration for this deployment",
],
expected_output_does_not_contain=["Is this a private registry?"],
)
assert result.exit_code == 0
with open("prefect.yaml", "r") as f:
config = yaml.safe_load(f)
assert len(config["deployments"]) == 1
assert config["deployments"][0]["name"] == "test-name"
assert config["deployments"][0]["work_pool"]["name"] == docker_work_pool.name
assert (
config["deployments"][0]["work_pool"]["job_variables"]["image"]
== "{{ build-image.image }}"
)
assert config["work_pool"] == {
"name": docker_work_pool.name,
"job_variables": {"image": "original-image"},
}
async def test_deploying_managed_work_pool_does_not_prompt_to_build_image(
self, managed_work_pool
):
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name --interval 3600"
f" -p {managed_work_pool.name}"
),
user_input=(
# Decline remote storage
"n"
+ readchar.key.ENTER
# Decline save configuration
+ "n"
+ readchar.key.ENTER
),
expected_output_contains=[
"$ prefect deployment run 'An important name/test-name'",
],
expected_output_does_not_contain=[
"Would you like to build a custom Docker image?",
],
)
class TestDeployInfraOverrides:
@pytest.fixture
async def work_pool(self, prefect_client):
await prefect_client.create_work_pool(
WorkPoolCreate(name="test-pool", type="test")
)
async def test_uses_job_variables(self, project_dir, work_pool, prefect_client):
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name -p test-pool --version"
" 1.0.0 -v env=prod -t foo-bar --job-variable"
' \'{"resources":{"limits":{"cpu": 1}}}\''
),
expected_code=0,
expected_output_contains=[
"An important name/test-name",
"prefect worker start --pool 'test-pool'",
],
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert deployment.name == "test-name"
assert deployment.work_pool_name == "test-pool"
assert deployment.version == "1.0.0"
assert deployment.tags == ["foo-bar"]
assert deployment.job_variables == {
"env": "prod",
"resources": {"limits": {"cpu": 1}},
}
async def test_rejects_json_strings(self, project_dir, work_pool):
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name -p test-pool --version"
" 1.0.0 -v env=prod -t foo-bar --job-variable 'my-variable'"
),
expected_code=1,
expected_output_contains=[
"Could not parse variable",
],
)
async def test_rejects_json_arrays(self, project_dir, work_pool):
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name -p test-pool --version"
" 1.0.0 -v env=prod -t foo-bar --job-variable ['my-variable']"
),
expected_code=1,
expected_output_contains=[
"Could not parse variable",
],
)
async def test_rejects_invalid_json(self, project_dir, work_pool):
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name -p test-pool --version"
" 1.0.0 -v env=prod -t foo-bar --job-variable "
' \'{"resources":{"limits":{"cpu"}\''
),
expected_code=1,
expected_output_contains=[
"Could not parse variable",
],
)
@pytest.mark.usefixtures("project_dir", "interactive_console", "work_pool")
class TestDeployDockerPushSteps:
async def test_prompt_push_custom_docker_image_rejected(
self, docker_work_pool, monkeypatch, mock_build_docker_image
):
result = await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name --interval 3600"
f" -p {docker_work_pool.name}"
),
user_input=(
# Accept build custom docker image
"y"
+ readchar.key.ENTER
# Enter repo name
+ "prefecthq/prefect"
+ readchar.key.ENTER
# Default image_name
+ readchar.key.ENTER
# Default tag
+ readchar.key.ENTER
# Reject push to registry
+ "n"
+ readchar.key.ENTER
# Accept save configuration
+ "y"
+ readchar.key.ENTER
),
expected_output_contains=[
"Would you like to build a custom Docker image",
"Image prefecthq/prefect/test-name:latest will be built",
"Would you like to push this image to a remote registry?",
"Would you like to save configuration for this deployment",
],
expected_output_does_not_contain=["Is this a private registry?"],
)
assert result.exit_code == 0
with open("prefect.yaml", "r") as f:
config = yaml.safe_load(f)
assert len(config["deployments"]) == 2
assert config["deployments"][1]["name"] == "test-name"
assert config["deployments"][1]["build"] == [
{
"prefect_docker.deployments.steps.build_docker_image": {
"id": "build-image",
"requires": "prefect-docker>=0.3.1",
"dockerfile": "auto",
"image_name": "prefecthq/prefect/test-name",
"tag": "latest",
}
}
]
assert not config["deployments"][1].get("push")
async def test_prompt_push_custom_docker_image_accepted_public_registry(
self, docker_work_pool, monkeypatch, mock_build_docker_image
):
result = await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name --interval 3600"
f" -p {docker_work_pool.name}"
),
user_input=(
# Accept build custom docker image
"y"
+ readchar.key.ENTER
# Enter repo name
+ "prefecthq/prefect"
+ readchar.key.ENTER
# Default image_name
+ readchar.key.ENTER
# Default tag
+ readchar.key.ENTER
# Accept push to registry
+ "y"
+ readchar.key.ENTER
# Registry URL
+ "https://hub.docker.com"
+ readchar.key.ENTER
# Reject private registry
+ "n"
+ readchar.key.ENTER
# Accept save configuration
+ "y"
+ readchar.key.ENTER
),
expected_output_contains=[
"Would you like to build a custom Docker image",
"Image prefecthq/prefect/test-name:latest will be built",
"Would you like to push this image to a remote registry?",
"Is this a private registry?",
"Would you like to save configuration for this deployment",
],
expected_output_does_not_contain=[
"Would you like use prefect-docker to manage Docker registry"
" credentials?"
],
)
assert result.exit_code == 0
with open("prefect.yaml", "r") as f:
config = yaml.safe_load(f)
assert len(config["deployments"]) == 2
assert config["deployments"][1]["name"] == "test-name"
assert config["deployments"][1]["build"] == [
{
"prefect_docker.deployments.steps.build_docker_image": {
"id": "build-image",
"requires": "prefect-docker>=0.3.1",
"dockerfile": "auto",
"image_name": "https://hub.docker.com/prefecthq/prefect/test-name",
"tag": "latest",
}
}
]
assert config["deployments"][1]["push"] == [
{
"prefect_docker.deployments.steps.push_docker_image": {
"requires": "prefect-docker>=0.3.1",
"image_name": "{{ build-image.image_name }}",
"tag": "{{ build-image.tag }}",
}
}
]
class TestDeployingUsingCustomPrefectFile:
def customize_from_existing_prefect_file(
self,
existing_file: Path,
new_file: io.TextIOBase,
work_pool: Optional[WorkPool],
):
with existing_file.open(mode="r") as f:
contents = yaml.safe_load(f)
# Customize the template
contents["deployments"] = [
{
"name": "test-deployment1",
"entrypoint": "flows/hello.py:my_flow",
"work_pool": {"name": work_pool.name if work_pool else "some_name"},
},
{
"name": "test-deployment2",
"entrypoint": "flows/hello.py:my_flow",
"work_pool": {"name": work_pool.name if work_pool else "some_name"},
},
]
# Write the customized template
yaml.dump(contents, new_file)
@pytest.mark.usefixtures("project_dir")
async def test_deploying_using_custom_prefect_file(
self, prefect_client: PrefectClient, work_pool: WorkPool
):
# Create and use a temporary prefect.yaml file
with tempfile.NamedTemporaryFile("w+") as fp:
self.customize_from_existing_prefect_file(
Path("prefect.yaml"), fp, work_pool
)
await run_sync_in_worker_thread(
invoke_and_assert,
command=f"deploy --all --prefect-file {fp.name}",
expected_code=0,
user_input=(
# decline remote storage
"n"
+ readchar.key.ENTER
# reject saving configuration
+ "n"
+ readchar.key.ENTER
# reject naming deployment
+ "n"
+ readchar.key.ENTER
),
expected_output_contains=[
(
"Deployment 'An important name/test-deployment1' successfully"
" created"
),
(
"Deployment 'An important name/test-deployment2' successfully"
" created"
),
],
)
# Check if deployments were created correctly
deployment1 = await prefect_client.read_deployment_by_name(
"An important name/test-deployment1",
)
deployment2 = await prefect_client.read_deployment_by_name(
"An important name/test-deployment2"
)
assert deployment1.name == "test-deployment1"
assert deployment1.work_pool_name == work_pool.name
assert deployment2.name == "test-deployment2"
assert deployment2.work_pool_name == work_pool.name
@pytest.mark.usefixtures("project_dir")
async def test_deploying_using_missing_prefect_file(self):
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy --all --prefect-file THIS_FILE_DOES_NOT_EXIST",
expected_code=1,
expected_output_contains=[
"Unable to read the specified config file. Reason: [Errno 2] "
"No such file or directory: 'THIS_FILE_DOES_NOT_EXIST'. Skipping"
],
)
@pytest.mark.usefixtures("project_dir")
@pytest.mark.parametrize(
"content", ["{this isn't valid YAML!}", "unbalanced blackets: ]["]
)
async def test_deploying_using_malformed_prefect_file(self, content: str):
with tempfile.NamedTemporaryFile("w+") as fp:
fp.write(content)
await run_sync_in_worker_thread(
invoke_and_assert,
command=f"deploy --all --prefect-file {fp.name}",
expected_code=1,
expected_output_contains=[
"Unable to parse the specified config file. Skipping."
],
)
@pytest.mark.usefixtures("project_dir")
async def test_deploying_directory_as_prefect_file(self):
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy --all --prefect-file ./",
expected_code=1,
expected_output_contains=[
"Unable to read the specified config file. Reason: [Errno 21] "
"Is a directory: '.'. Skipping."
],
)
|
PrefectHQREPO_NAMEprefectPATH_START.@prefect_extracted@prefect-main@tests@cli@test_deploy.py@.PATH_END.py
|
{
"filename": "_thicknessmode.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/parcats/line/colorbar/_thicknessmode.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ThicknessmodeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="thicknessmode", parent_name="parcats.line.colorbar", **kwargs
):
super(ThicknessmodeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["fraction", "pixels"]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@parcats@line@colorbar@_thicknessmode.py@.PATH_END.py
|
{
"filename": "polarization.py",
"repo_name": "threeML/astromodels",
"repo_path": "astromodels_extracted/astromodels-master/astromodels/core/polarization.py",
"type": "Python"
}
|
__author__ = "giacomov"
from astromodels.core.tree import Node
from astromodels.core.parameter import Parameter
import numpy as np
class Polarization(Node):
def __init__(self, polarization_type="linear"):
assert polarization_type in [
"linear",
"stokes",
], "polarization must be linear or stokes"
self._polarization_type = polarization_type
Node.__init__(self, "polarization")
@staticmethod
def _get_parameter_from_input(
number_or_parameter, minimum, maximum, what, desc, unit
):
# Try to transform it to float, if it works than we transform it to a parameter
try:
number_or_parameter = float(number_or_parameter)
except TypeError:
assert isinstance(number_or_parameter, Parameter), (
"%s must be either a number or a " "parameter instance" % what
)
# So this is a Parameter instance already. Enforce that it has the right maximum and minimum
parameter = number_or_parameter
assert parameter.min_value == minimum, "%s must have a minimum of %s" % (
what,
minimum,
)
assert parameter.max_value == maximum, "%s must have a maximum of %s" % (
what,
maximum,
)
else:
# This was a float. Enforce that it has a legal value
assert (
minimum <= number_or_parameter <= maximum
), "%s cannot have a value of %s, " "it must be %s <= %s <= %s" % (
what,
number_or_parameter,
minimum,
what,
maximum,
)
parameter = Parameter(
what,
number_or_parameter,
desc=desc,
min_value=minimum,
max_value=maximum,
unit=unit,
free=True,
)
return parameter
# TODO: add transform between polarizations
class LinearPolarization(Polarization):
def __init__(self, degree, angle):
"""
Linear parameterization of polarization
:param degree: The polarization degree
:param angle: The polarization angle
"""
super(LinearPolarization, self).__init__(polarization_type="linear")
if callable(degree):
self.degree = LinearParameter('degree', degree)
else:
self.degree = self._get_parameter_from_input(degree, 0, 100, 'degree', 'Polarization degree', 'dimensionless_unscaled')
if callable(angle):
self.angle = LinearParameter('angle', angle)
else:
self.angle = self._get_parameter_from_input(angle, 0, 180, 'angle', 'Polarization angle', 'deg')
self._add_child(self.degree)
self._add_child(self.angle)
def __call__(self, energies, stokes):
if stokes == 'Q':
return self.degree(energies) * np.cos(2.0 * np.radians(self.angle(energies)))
elif stokes == 'U':
return self.degree(energies) * np.sin(2.0 * np.radians(self.angle(energies)))
return 1
class StokesPolarization(Polarization):
def __init__(self, I=None, Q=None, U=None, V=None):
"""
Stokes parameterization of polarization
"""
super(StokesPolarization, self).__init__(polarization_type='stokes')
self._Q = StokesParameter('Q', Q)
self._add_child(self._Q)
self._U = StokesParameter('U', U)
self._add_child(self._U)
def __call__(self, energies, stokes):
if stokes == 'Q':
return self._Q(energies)
elif stokes == 'U':
return self._U(energies)
return 1
# def to_linear_polarization(self):
# # polarization angle
# # psi = 0.5 * np.arctan2(U_bin, Q_bin)
#
# # polarization fraction
# # frac = np.sqrt(Q_bin ** 2 + U_bin ** 2) / I_bin
#
# pass
#
# #angle = 0.5 * np.arctan2(se)
#
#
class StokesParameter(Node):
def __init__(self, name, value):
assert name in ['I', 'Q', 'U', 'V']
Node.__init__(self, name)
self._add_child(value)
self.value = value
def __call__(self, energies):
return self.value(energies)
class LinearParameter(Node):
def __init__(self, name, value):
assert name in ['degree', 'angle']
Node.__init__(self, name)
self._add_child(value)
self.value = value
def __call__(self, energies):
return self.value(energies)
|
threeMLREPO_NAMEastromodelsPATH_START.@astromodels_extracted@astromodels-master@astromodels@core@polarization.py@.PATH_END.py
|
{
"filename": "test_rt_nbs14_1k.py",
"repo_name": "aewallin/allantools",
"repo_path": "allantools_extracted/allantools-master/tests/realtime/test_rt_nbs14_1k.py",
"type": "Python"
}
|
"""
NBS14 test for allantools (https://github.com/aewallin/allantools)
nbs14 datasets are from http://www.ieee-uffc.org/frequency-control/learning-riley.asp
Stable32 was used to calculate the deviations we compare against.
The small dataset and deviations are from
http://www.ieee-uffc.org/frequency-control/learning-riley.asp
http://www.wriley.com/paper1ht.htm
see also:
NIST Special Publication 1065
Handbook of Frequency Stability Analysis
http://tf.nist.gov/general/pdf/2220.pdf
around page 107
"""
import math
import time
import sys
print(sys.version)
import pytest
import numpy
import allantools as allan
# 1000 point deviations from:
# http://www.ieee-uffc.org/frequency-control/learning-riley.asp Table III
# http://www.wriley.com/paper1ht.htm
# http://tf.nist.gov/general/pdf/2220.pdf page 108
nbs14_1000_devs = [ [2.922319e-01, 9.965736e-02, 3.897804e-02], # 0 ADEV 1, 10, 100
[2.922319e-01, 9.159953e-02, 3.241343e-02], # 1 OADEV
[2.922319e-01, 6.172376e-02, 2.170921e-02], # 2 MDEV
#[2.922319e-01, 9.172131e-02, 3.501795e-02], # TOTDEV, http://www.ieee-uffc.org/frequency-control/learning-riley.asp
# "Calculated using bias-corrected reflected method from endpoint-matched phase data"
[2.922319e-01, 9.134743e-02, 3.406530e-02], # 3 TOTDEV, http://tf.nist.gov/general/pdf/2220.pdf page 108
# "Calculated using doubly reflected TOTVAR method"
[2.943883e-01, 1.052754e-01, 3.910860e-02], # 4 HDEV
[1.687202e-01, 3.563623e-01, 1.253382e-00], # 5 TDEV
[2.943883e-01, 9.581083e-02, 3.237638e-02], # 6 OHDEV
[2.884664e-01, 9.296352e-02, 3.206656e-02], # 7 standard deviation, sample (not population)
[2.943883e-01, 9.614787e-02, 3.058103e-02], # 8 HTOTDEV
#[2.418528e-01, 6.499161e-02, 2.287774e-02], # 9 MTOTDEV (from published table, WITH bias correction)
[2.0664e-01, 5.5529e-02, 1.9547e-02], # MTOTDEV (from Stable32 v1.60 decade run, NO bias correction)
#[1.396338e-01, 3.752293e-01, 1.320847e-00], # 10 TTOTDEV (from published table, WITH bias correction)
[1.1930e-01, 3.2060e-01, 1.1285e+00 ], # 10 TTOTDEV (from Stable 32 v1.60 decade run, NO bias correction)
[1.0757e-01, 3.1789e-02, 5.0524e-03 ], ] # 11 THEO1 (tau= 10,100,1000, from Stable32, NO bias correction
# this generates the nbs14 1000 point frequency dataset.
# random number generator described in
# http://www.ieee-uffc.org/frequency-control/learning-riley.asp
# http://tf.nist.gov/general/pdf/2220.pdf page 107
# http://www.wriley.com/tst_suit.dat
def nbs14_1000():
"""
1000-point test dataset.
data is fractional frequency
"""
n = [0]*1000
n[0] = 1234567890
for i in range(999):
n[i+1] = (16807*n[i]) % 2147483647
# the first three numbers are given in the paper, so check them:
assert( n[1] == 395529916 and n[2] == 1209410747 and n[3] == 633705974 )
n = [x/float(2147483647) for x in n] # normalize so that n is in [0, 1]
return n
nbs14_f = nbs14_1000()
nbs14_phase = allan.frequency2phase(nbs14_f, 1.0)
def check_dev(name, tau, a, b):
print(name," tau=",tau, " ", a ," == ", b)
assert( numpy.isclose( a, b) )
def test_oadev_rt_nbs14_1k():
oadev_rt = allan.realtime.oadev_realtime(afs=[1,10,100],tau0=1.0)
for x in nbs14_phase:
oadev_rt.add_phase(x)
for n in range(3):
check_dev('OADEV', oadev_rt.taus()[n], oadev_rt.dev[n], nbs14_1000_devs[1][n])
def test_ohdev_rt_nbs14_1k():
dev_rt = allan.realtime.ohdev_realtime(afs=[1,10,100],tau0=1.0)
for x in nbs14_phase:
dev_rt.add_phase(x)
for n in range(3):
check_dev('OHDEV', dev_rt.taus()[n], dev_rt.dev[n], nbs14_1000_devs[6][n])
def test_tdev_rt_nbs14_1k():
dev_rt = allan.realtime.tdev_realtime(afs=[1,10,100],tau0=1.0)
for x in nbs14_phase:
dev_rt.add_phase(x)
for n in range(3):
check_dev('TDEV', dev_rt.taus()[n], dev_rt.dev[n], nbs14_1000_devs[5][n])
if __name__ == "__main__":
test_oadev_rt_nbs14_1k()
test_ohdev_rt_nbs14_1k()
test_tdev_rt_nbs14_1k()
|
aewallinREPO_NAMEallantoolsPATH_START.@allantools_extracted@allantools-master@tests@realtime@test_rt_nbs14_1k.py@.PATH_END.py
|
{
"filename": "fista.py",
"repo_name": "miguelcarcamov/csromer",
"repo_path": "csromer_extracted/csromer-master/src/csromer/optimization/methods/fista.py",
"type": "Python"
}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 14 12:09:14 2019
@author: miguel
"""
import copy
from dataclasses import dataclass
import numpy as np
from ...objectivefunction import Chi2, Fi
from ..optimizer import Optimizer
@dataclass(init=True, repr=True)
class FISTA(Optimizer):
fx: Chi2 = None
gx: Fi = None
noise: float = None
def run(self):
ret, x = self.__fista_algorithm(
self.guess_param.data,
self.F_obj.evaluate,
self.fx.calculate_gradient_fista,
self.gx,
self.maxiter,
self.tol,
self.guess_param.n,
self.noise,
self.verbose,
)
param = copy.deepcopy(self.guess_param)
param.data = x
return ret, param
@staticmethod
def __fista_algorithm(
x=None,
F=None,
fx=None,
g_prox=None,
max_iter=None,
tol=np.finfo(np.float32).tiny,
n=None,
noise=None,
verbose=True,
):
if x is None and n is not None:
x = np.zeros(n, dtype=np.complex64)
t = 1
z = x.copy()
min_cost = 0.0
if max_iter is None and noise is not None:
if noise is not np.nan:
if noise != 0.0:
max_iter = int(np.floor(g_prox.getLambda() / noise))
else:
noise = 1e-5
max_iter = int(np.floor(g_prox.getLambda() / noise))
else:
raise ValueError("Noise must be a number")
if verbose:
print("Iterations set to " + str(max_iter))
if noise is None:
noise = 1e-5
if noise >= g_prox.getLambda():
if verbose:
print("Error, noise cannot be greater than lambda")
return min_cost, x
for it in range(0, max_iter):
xold = x.copy()
z = z - fx(z)
x = g_prox.calc_prox(z)
t0 = t
t = 0.5 * (1.0 + np.sqrt(1.0 + 4.0 * t**2))
z = x + ((t0 - 1.0) / t) * (x - xold)
# e = np.sqrt(np.sum((x-xold)**2)) / np.sqrt(np.sum(xold**2))
# print(e)
e = np.sum(np.abs(x - xold)) / len(x)
# if e <= tol:
# if verbose:
# print("Exit due to tolerance: ", e, " < ", tol)
# print("Iterations: ", it + 1)
# break
if verbose and it % 10 == 0:
cost = F(x)
print("Iteration: ", it, " objective function value: {0:0.5f}".format(cost))
new_lambda = g_prox.getLambda() - noise
if new_lambda > 0.0:
g_prox.setLambda(reg=new_lambda)
else:
if verbose:
print("Exit due to negative regularization parameter")
break
min_cost = F(x)
return min_cost, x
|
miguelcarcamovREPO_NAMEcsromerPATH_START.@csromer_extracted@csromer-master@src@csromer@optimization@methods@fista.py@.PATH_END.py
|
{
"filename": "1_GetStarted.ipynb",
"repo_name": "natashabatalha/picaso",
"repo_path": "picaso_extracted/picaso-master/docs/notebooks/1_GetStarted.ipynb",
"type": "Jupyter Notebook"
}
|
# Getting Started : Basic Inputs and Outputs
If you are here then you have already successfully
1) Installed the code
2) Downloaded the necessary reference data
3) Added ``export picaso_refdata="/path/to/picaso/reference"`` to ~/.bash_profile
If you have not done these things, please return to [Installation Guilde](https://natashabatalha.github.io/picaso/installation.html)
```python
#picaso
from picaso import justdoit as jdi
from picaso import justplotit as jpi
import numpy as np
jpi.output_notebook()
```
```python
#double check that your reference file path has been set
import os
refdata = os.getenv("picaso_refdata")
print(refdata)
#if you are having trouble setting this you can do it right here in the command line
#os.environ["picaso_refdata"]= add your path here AND COPY AND PASTE ABOVE
#IT WILL NEED TO GO ABOVE YOUR PICASO IMPORT
```
## Connect to Opacity Database
There is a full notebook in the tutorials devoted to learning how to make opacity databases in our format. If you cloned from `github`, there should also be an opacity database there called `opacity.db`.
```python
help(jdi.opannection)
```
`opannection` has a few default parameters. A few notes:
1) `wave_range` can be used to select a subset of the full opacity database you are using
2) `filename_db` is a sqlite database that should have been downloaded and stored with your reference data (see [Installation Documentation](https://natashabatalha.github.io/picaso/installation.html))
3) `raman_db` is a small file that should already be in your reference folder from Github.
```python
opacity = jdi.opannection(wave_range=[0.3,1]) #lets just use all defaults
```
## Load blank slate
```python
start_case = jdi.inputs()
```
In order to run the code we need (at the minimum) specific info about the:
- **phase angle**
- **planet** : gravity
- **star** : temperature, metallicity, gravity
- **atmosphere** : P-T profile, chemical composition, cloud properties (discussed later)
Additionally, we have some optional parameters that will be discussed in later notebooks:
- **approx** : approximations to rapidly compute reflected light
- **disco** : number of individual facets you want to compute before integrating to 1D spectrum (think disco ball)
- **opacities** : keeping track of opacity files used
- **test_mode** : used to do benchmark testing
## Set Planet & Star Properties
```python
#phase angle
start_case.phase_angle(0) #radians
#define gravity
start_case.gravity(gravity=25, gravity_unit=jdi.u.Unit('m/(s**2)')) #any astropy units available
#define star
start_case.star(opacity, 5000,0,4.0) #opacity db, pysynphot database, temp, metallicity, logg
```
## Set Atmospheric Composition
There are different options for setting atmospheric composition.
1) Specifying a file path to model run
2) Give arbitrary pressure, temperature and composition directly as a dictionary input
### Option 1) Specify file path
Below, I am loading in a profile path for Jupiter that should be included in your reference data
```python
print(jdi.jupiter_pt()) #should return the path to your reference data
```
```python
start_case.atmosphere(filename=jdi.jupiter_pt(), delim_whitespace=True)
```
### File format
1) Must have **pressure**(bars), **temperature**(Kelvin), and `case sensitive` molecule names (e.g. TiO, Na, H2O, etc) for mixing ratios (in no particular order)
2) Can specify any necessary key word arguments for pd.read_csv at the end
**PICASO will auto-compute mixing ratios, determine what CIA is neceesary and compute mean molecular weight based on these headers. Take at the preloaded example below**
```python
#to give you an idea
comp_file = jdi.pd.read_csv(jdi.jupiter_pt(), delim_whitespace=True)
#see example below
comp_file.head()
```
## Create 1D Reflected Light Spectrum
Let's create our first spectrum of Jupiter's reflected light at full phase
```python
df = start_case.spectrum(opacity)
```
Checkout out what was returned (Note this is a change in v1.0)
```python
df.keys()
```
## Regrid Opacities to Constant Resolution
```python
wno, alb, fpfs = df['wavenumber'] , df['albedo'] , df['fpfs_reflected']
wno, alb = jdi.mean_regrid(wno, alb , R=150)
```
```python
jpi.show(jpi.spectrum(wno, alb, plot_width=500,x_range=[0.3,1]))
```
FpFs is the relative flux of the planet and star or :
$\frac{f_p}{f_s} = a_\lambda \left( \frac{ r_p}{a} \right) ^2$
where $a$ is the semi-major axis. You may have noticed that **we did not supply a radius or semi-major axis in the above code**. Therefore, if you print out $\frac{f_p}{f_s}$ you will see this:
```python
fpfs
```
Let's add this to the star function so we can get the relative flux as well..
```python
start_case.star(opacity, 5000,0,4.0,semi_major=1, semi_major_unit=jdi.u.Unit('au'))
start_case.gravity(radius=1, radius_unit=jdi.u.Unit('R_jup'),
mass = 1, mass_unit=jdi.u.Unit('M_jup'))
df = start_case.spectrum(opacity)
wno, alb, fpfs = df['wavenumber'] , df['albedo'] , df['fpfs_reflected']
```
```python
fpfs
```
### Option 2) Arbitrary PT and Chemistry
Sometimes for testing (or for atmospheres that we don't fully understand) an isothermal, well-mixed profile is sufficient. If we don't want to load in a full profile, we can give it a simple DataFrame with the info we need.
```python
start_case.atmosphere( df = jdi.pd.DataFrame({'pressure':np.logspace(-6,2,60),
'temperature':np.logspace(-6,2,60)*0+200,
"H2":np.logspace(-6,2,60)*0+0.837,
"He":np.logspace(-6,2,60)*0+0.163,
"CH4":np.logspace(-6,2,60)*0+0.000466})
)
```
```python
df = start_case.spectrum(opacity)
wno_ch4, alb_ch4, fpfs = df['wavenumber'] , df['albedo'] , df['fpfs_reflected']
wno_ch4, alb_ch4 = jdi.mean_regrid(wno_ch4, alb_ch4 , R=150)
```
```python
jpi.show(jpi.spectrum(wno_ch4, alb_ch4, plot_width=500,x_range=[0.3,1]))
```
See how the plot above is much easier to interpret than the one with the full set of molecular input. Here we can clearly see the effects of methane opacity, raman and rayleigh scattering (the next notebook will include a tutorial for more diagnostic plotting)
### Diagnostic help: Sometimes it helps to exclude molecule to see how it is influencing the spectrum
Take a look below
```python
start_case.atmosphere(filename=jdi.jupiter_pt(), exclude_mol='H2O', delim_whitespace=True)
df = start_case.spectrum(opacity)
wno_nowater, alb_nowater, fpfs = df['wavenumber'] , df['albedo'] , df['fpfs_reflected']
wno_nowater, alb_nowater= jdi.mean_regrid(wno_ch4, alb_ch4 , R=150)
fig = jpi.spectrum(wno, alb, plot_width=500)
fig.line(1e4/wno_nowater, alb_nowater, line_width=2, color='red')
jpi.show(fig)
```
|
natashabatalhaREPO_NAMEpicasoPATH_START.@picaso_extracted@picaso-master@docs@notebooks@1_GetStarted.ipynb@.PATH_END.py
|
{
"filename": "_detail.py",
"repo_name": "bek0s/gbkfit",
"repo_path": "gbkfit_extracted/gbkfit-master/src/gbkfit/dataset/datasets/_detail.py",
"type": "Python"
}
|
import logging
import typing
from gbkfit.dataset.data import data_parser
from gbkfit.utils import iterutils, parseutils
__init__ = [
'load_dataset_common',
'dump_dataset_common'
]
_log = logging.getLogger(__name__)
def _sanitize_dimensional_option(option, value, lengths, type_):
args = typing.get_args(type_)
types_ = args if args else [type_]
type_name = " | ".join([t.__name__ for t in types_])
lengths = iterutils.listify(lengths)
max_length = max(lengths)
if isinstance(value, type_):
return iterutils.make_list(max_length, value)
if iterutils.is_sequence_of_type(value, type_):
if len(value) < max_length:
raise RuntimeError(
f"option '{option}' has a value "
f"with a length shorter than expected; "
f"expected length: {' or '.join(map(str, lengths))}, "
f"current length: {len(value)}")
if len(value) > max_length:
new_value = value[:max_length]
_log.warning(
f"option '{option}' has a value "
f"with a length longer than expected; "
f"current length: {len(value)}; "
f"expected length: {' or '.join(map(str, lengths))}, "
f"the value will be trimmed from {value} to {new_value}")
value = new_value
return value
raise RuntimeError(
f"option '{option}' should be a scalar of type {type_name}, "
f"or a sequence of type {type_name} and "
f"length of {' or '.join(map(str, lengths))}")
def load_dataset_common(cls, info, names, ndim, **kwargs):
prefix = kwargs.get('prefix', '')
desc = parseutils.make_typed_desc(cls, 'dataset')
# Validate, sanitize, and prepare dimensional options.
# While we could rely on the type hint validation of
# parseutils.parse_options_for_callable or other assertions inside
# the __init__() method, we prepare those options here. This allows
# us to be more tolerant with their values, when possible.
# For example, we can now convert an scube configuration to
# an image configuration by just changing its type, without
# having to adjust any dimensional options. The code will just use
# the first two dimensions and ignore the last one.
for option_name, option_type in [
('size', int),
('step', int | float),
('rpix', int | float),
('rval', int | float)]:
if option_name in info:
info[option_name] = _sanitize_dimensional_option(
option_name, info[option_name], ndim, option_type)
# Read global coordinate system options.
# These will apply to all data in the dataset that do not define
# their own options.
step = info.pop('step', None)
rpix = info.pop('rpix', None)
rval = info.pop('rval', None)
rota = info.pop('rota', None)
# Load all data in the dataset, using the above options
for name in names:
if name in info:
info[name] = data_parser.load(
info[name], step, rpix, rval, rota, prefix)
# Parse options and return them
opts = parseutils.parse_options_for_callable(info, desc, cls.__init__)
return opts
def dump_dataset_common(dataset, **kwargs):
prefix = kwargs.get('prefix', '')
dump_path = kwargs.get('dump_path', True)
overwrite = kwargs.get('overwrite', False)
info = dict(type=dataset.type())
info.update(
step=dataset.step(),
rpix=dataset.rpix(),
rval=dataset.rval(),
rota=dataset.rota())
for key, data in dataset.items():
filename_d = f'{prefix}{key}_d.fits'
filename_m = f'{prefix}{key}_m.fits'
filename_e = f'{prefix}{key}_e.fits'
info[key] = data.dump(
filename_d, filename_m, filename_e,
dump_wcs=False, # Reduce unnecessary verbosity
dump_path=dump_path, overwrite=overwrite)
return info
|
bek0sREPO_NAMEgbkfitPATH_START.@gbkfit_extracted@gbkfit-master@src@gbkfit@dataset@datasets@_detail.py@.PATH_END.py
|
{
"filename": "MF_theory.py",
"repo_name": "franciscovillaescusa/Pylians",
"repo_path": "Pylians_extracted/Pylians-master/Mass_function/MF_theory.py",
"type": "Python"
}
|
import numpy as np
import mass_function_library as MFL
################################# INPUT ######################################
# input Pk at wanted MF redshift. For neutrinos use CDM+B Pk
f_Pk = 'Pk_m_z=0.dat'
bins_k = 10000 #number of bins to use in the input Pk
# For neutrinos use Omega_{CDM+B} instead of Omega_m
Omega_m = 0.3175
M_min = 1e10 #Msun/h
M_max = 1e16 #Msun/h
bins_MF = 300 #number of bins in the HMF
author = 'ST'
f_out = 'ST_MF_z=0.dat'
# optional arguments
z = 0.0 # only for 'Tinker', 'Tinker10' and Crocce
delta = 200 # only for 'Tinker' and 'Tinker10'
##############################################################################
# read input Pk
k, Pk = np.loadtxt(f_Pk, unpack=True)
# compute the masses at which compute the halo mass function
M = np.logspace(np.log10(M_min), np.log10(M_max), bins_MF)
# compute the MF
MF = MFL.MF_theory(k, Pk, Omega_m, M, author, bins_k, z, delta)
# save results to file
np.savetxt(f_out, np.transpose([M,MF]))
|
franciscovillaescusaREPO_NAMEPyliansPATH_START.@Pylians_extracted@Pylians-master@Mass_function@MF_theory.py@.PATH_END.py
|
{
"filename": "runfiles_nirps_he.py",
"repo_name": "njcuk9999/apero-drs",
"repo_path": "apero-drs_extracted/apero-drs-main/apero/tools/module/processing/instruments/runfiles_nirps_he.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# CODE NAME HERE
# CODE DESCRIPTION HERE
Created on 2022-06-06
@author: cook
"""
from typing import List
from apero.base import base
from apero.core import constants
from apero.tools.module.processing import drs_run_ini
# =============================================================================
# Define variables
# =============================================================================
__NAME__ = 'instruments.runfiles_nirps_he.ini.py'
__INSTRUMENT__ = 'NIRPS_HE'
__PACKAGE__ = base.__PACKAGE__
__version__ = base.__version__
__author__ = base.__author__
__date__ = base.__date__
__release__ = base.__release__
# get the run file class
RunIniFile = drs_run_ini.RunIniFile
# get parameter dictionary class
ParamDict = constants.ParamDict
# Define the default reference observation directory
DEFAULT_REF_OBSDIR = drs_run_ini.DEFAULT_REF_OBSDIR[__INSTRUMENT__]
# =============================================================================
# Define functions
# =============================================================================
def get_runfiles(params: ParamDict) -> List[RunIniFile]:
"""
Defines all possible run files
:param params: ParamDict, parameter dictionary of constants
:return: list of RunIniFile instances
"""
# storage list
run_files = []
# -------------------------------------------------------------------------
# create default runs files for nirps_he
# -------------------------------------------------------------------------
# blank run
blank_run_nirps_he = RunIniFile(params, 'NIRPS_HE', 'blank_run')
blank_run_nirps_he.append_sequence('blank_seq')
run_files.append(blank_run_nirps_he)
# mini run
mini_run_nirps_he = RunIniFile(params, 'NIRPS_HE', 'mini_run')
mini_run_nirps_he.append_sequence('limited_seq')
# do not skip any steps of the lbl
mini_run_nirps_he.modify('SKIP_LBLREF', False)
mini_run_nirps_he.modify('SKIP_LBLMASK_SCI', False)
mini_run_nirps_he.modify('SKIP_LBLCOMPUTE_SCI', False)
mini_run_nirps_he.modify('SKIP_LBLCOMPILE_SCI', False)
run_files.append(mini_run_nirps_he)
# quick run
quick_run_nirps_he = RunIniFile(params, 'NIRPS_HE', 'quick_run')
quick_run_nirps_he.append_sequence('pp_seq_opt')
quick_run_nirps_he.append_sequence('quick_seq')
quick_run_nirps_he.modify('RUN_PP_CAL', False)
quick_run_nirps_he.modify('RUN_PP_TEL', False)
quick_run_nirps_he.modify('RUN_PP_HC1HC1', False)
quick_run_nirps_he.modify('RUN_PP_FPFP', False)
quick_run_nirps_he.modify('RUN_PP_FF', False)
quick_run_nirps_he.modify('RUN_PP_DFP', False)
quick_run_nirps_he.modify('RUN_PP_SKY', False)
quick_run_nirps_he.modify('RUN_PP_LFC', False)
quick_run_nirps_he.modify('RUN_PP_LFCFP', False)
quick_run_nirps_he.modify('RUN_PP_FPLFC', False)
run_files.append(quick_run_nirps_he)
# calib run
calib_run_nirps_he = RunIniFile(params, 'NIRPS_HE', 'calib_run')
calib_run_nirps_he.append_sequence('pp_seq_opt')
calib_run_nirps_he.append_sequence('calib_seq')
calib_run_nirps_he.modify('RUN_PP_SCI', False)
calib_run_nirps_he.modify('RUN_PP_TEL', False)
calib_run_nirps_he.modify('RUN_PP_HC1HC1', False)
calib_run_nirps_he.modify('RUN_PP_FPFP', False)
calib_run_nirps_he.modify('RUN_PP_FF', False)
calib_run_nirps_he.modify('RUN_PP_DFP', False)
calib_run_nirps_he.modify('RUN_PP_SKY', False)
calib_run_nirps_he.modify('RUN_PP_LFC', False)
calib_run_nirps_he.modify('RUN_PP_LFCFP', False)
calib_run_nirps_he.modify('RUN_PP_FPLFC', False)
run_files.append(calib_run_nirps_he)
# complete run
complete_run_nirps_he = RunIniFile(params, 'NIRPS_HE', 'complete_run')
complete_run_nirps_he.skip_default = False
complete_run_nirps_he.append_sequence('full_seq')
run_files.append(complete_run_nirps_he)
# reference calib run
mcalib_run_nirps_he = RunIniFile(params, 'NIRPS_HE', 'ref_calib_run')
mcalib_run_nirps_he.append_sequence('pp_seq_opt')
mcalib_run_nirps_he.append_sequence('ref_seq')
mcalib_run_nirps_he.modify('RUN_PP_SCI', False)
mcalib_run_nirps_he.modify('RUN_PP_TEL', False)
mcalib_run_nirps_he.modify('RUN_PP_HC1HC1', False)
mcalib_run_nirps_he.modify('RUN_PP_FPFP', False)
mcalib_run_nirps_he.modify('RUN_PP_FF', False)
mcalib_run_nirps_he.modify('RUN_PP_DFP', False)
mcalib_run_nirps_he.modify('RUN_PP_SKY', False)
mcalib_run_nirps_he.modify('RUN_PP_LFC', False)
mcalib_run_nirps_he.modify('RUN_PP_LFCFP', False)
mcalib_run_nirps_he.modify('RUN_PP_FPLFC', False)
run_files.append(mcalib_run_nirps_he)
# other run
other_run_nirps_he = RunIniFile(params, 'NIRPS_HE', 'other_run')
other_run_nirps_he.append_sequence('pp_seq_opt')
other_run_nirps_he.append_sequence('eng_seq')
other_run_nirps_he.run_default = False
run_files.append(other_run_nirps_he)
# science run
science_run_nirps_he = RunIniFile(params, 'NIRPS_HE', 'science_run')
science_run_nirps_he.append_sequence('pp_seq_opt')
science_run_nirps_he.append_sequence('science_seq')
science_run_nirps_he.modify('RUN_PP_CAL', False)
science_run_nirps_he.modify('RUN_PP_TEL', False)
science_run_nirps_he.modify('RUN_PP_HC1HC1', False)
science_run_nirps_he.modify('RUN_PP_FPFP', False)
science_run_nirps_he.modify('RUN_PP_FF', False)
science_run_nirps_he.modify('RUN_PP_DFP', False)
science_run_nirps_he.modify('RUN_PP_SKY', False)
science_run_nirps_he.modify('RUN_PP_LFC', False)
science_run_nirps_he.modify('RUN_PP_LFCFP', False)
science_run_nirps_he.modify('RUN_PP_FPLFC', False)
science_run_nirps_he.modify('RECAL_TEMPLATES', False)
# tellu run
tellu_run_nirps_he = RunIniFile(params, 'NIRPS_HE', 'tellu_run')
tellu_run_nirps_he.append_sequence('pp_seq_opt')
tellu_run_nirps_he.append_sequence('science_seq')
tellu_run_nirps_he.modify('RUN_PP_CAL', False)
tellu_run_nirps_he.modify('RUN_PP_SCI', False)
tellu_run_nirps_he.modify('RUN_PP_HC1HC1', False)
tellu_run_nirps_he.modify('RUN_PP_FPFP', False)
tellu_run_nirps_he.modify('RUN_PP_FF', False)
tellu_run_nirps_he.modify('RUN_PP_DFP', False)
tellu_run_nirps_he.modify('RUN_PP_SKY', False)
tellu_run_nirps_he.modify('RUN_PP_LFC', False)
tellu_run_nirps_he.modify('RUN_PP_LFCFP', False)
tellu_run_nirps_he.modify('RUN_PP_FPLFC', False)
run_files.append(tellu_run_nirps_he)
# test run
test_run_nirps_he = RunIniFile(params, 'NIRPS_HE', 'test_run')
test_run_nirps_he.append_sequence('limited_seq')
test_run_nirps_he.run_default = False
test_run_nirps_he.modify('TEST_RUN', True)
run_files.append(test_run_nirps_he)
# helios run
helios_nirps_he = RunIniFile(params, 'NIRPS_HE', 'helios_run')
helios_nirps_he.append_sequence('helios_seq')
run_files.append(helios_nirps_he)
# trigger night calib run
tnc_run_nirps_he = RunIniFile(params, 'NIRPS_HE', 'trigger_night_calibrun')
tnc_run_nirps_he.append_sequence('pp_seq_opt')
tnc_run_nirps_he.append_sequence('calib_seq')
tnc_run_nirps_he.modify('RUN_PP_SCI', False)
tnc_run_nirps_he.modify('RUN_PP_TEL', False)
tnc_run_nirps_he.modify('RUN_PP_HC1HC1', False)
tnc_run_nirps_he.modify('RUN_PP_FPFP', False)
tnc_run_nirps_he.modify('RUN_PP_FF', False)
tnc_run_nirps_he.modify('RUN_PP_DFP', False)
tnc_run_nirps_he.modify('RUN_PP_SKY', False)
tnc_run_nirps_he.modify('RUN_PP_LFC', False)
tnc_run_nirps_he.modify('RUN_PP_LFCFP', False)
tnc_run_nirps_he.modify('RUN_PP_FPLFC', False)
tnc_run_nirps_he.modify('RECAL_TEMPLATES', False)
tnc_run_nirps_he.modify('TRIGGER_RUN', True)
tnc_run_nirps_he.modify('USE_ENGINEERING', True)
run_files.append(tnc_run_nirps_he)
# trigger night science run
tns_run_nirps_he = RunIniFile(params, 'NIRPS_HE', 'trigger_night_scirun')
tns_run_nirps_he.append_sequence('pp_seq_opt')
tns_run_nirps_he.append_sequence('science_seq')
tns_run_nirps_he.modify('RUN_PP_CAL', False)
tns_run_nirps_he.modify('RUN_PP_TEL', False)
tns_run_nirps_he.modify('RUN_PP_HC1HC1', False)
tns_run_nirps_he.modify('RUN_PP_FPFP', False)
tns_run_nirps_he.modify('RUN_PP_FF', False)
tns_run_nirps_he.modify('RUN_PP_DFP', False)
tns_run_nirps_he.modify('RUN_PP_SKY', False)
tns_run_nirps_he.modify('RUN_PP_LFC', False)
tns_run_nirps_he.modify('RUN_PP_LFCFP', False)
tns_run_nirps_he.modify('RUN_PP_FPLFC', False)
tns_run_nirps_he.modify('RECAL_TEMPLATES', False)
tns_run_nirps_he.modify('TRIGGER_RUN', True)
tns_run_nirps_he.modify('USE_ENGINEERING', True)
run_files.append(tns_run_nirps_he)
# lbl run
lbl_run_nirps_he = RunIniFile(params, 'NIRPS_HE', 'lbl_run')
lbl_run_nirps_he.append_sequence('lbl_seq')
# do not skip any steps of the lbl
lbl_run_nirps_he.modify('SKIP_LBLREF', False)
lbl_run_nirps_he.modify('SKIP_LBLMASK_SCI', False)
lbl_run_nirps_he.modify('SKIP_LBLCOMPUTE_SCI', False)
lbl_run_nirps_he.modify('SKIP_LBLCOMPILE_SCI', False)
run_files.append(lbl_run_nirps_he)
# batch run
# batch_run_nirps_he = RunIniFile(params, 'NIRPS_HE', 'batch_run')
# batch_run_nirps_he.add_sequence_as_command('limited_seq')
# batch_run_nirps_he.modify('RUN_OBS_DIR', DEFAULT_REF_OBSDIR['NIRPS_HE'])
# run_files.append(batch_run_nirps_he)
# -------------------------------------------------------------------------
# return list of RunIniFile instances
return run_files
# =============================================================================
# Start of code
# =============================================================================
if __name__ == "__main__":
# print hello world
print('Hello World')
# =============================================================================
# End of code
# =============================================================================
|
njcuk9999REPO_NAMEapero-drsPATH_START.@apero-drs_extracted@apero-drs-main@apero@tools@module@processing@instruments@runfiles_nirps_he.py@.PATH_END.py
|
{
"filename": "sparse_embeddings.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/partners/qdrant/langchain_qdrant/sparse_embeddings.py",
"type": "Python"
}
|
from abc import ABC, abstractmethod
from typing import List
from langchain_core.runnables.config import run_in_executor
from pydantic import BaseModel, Field
class SparseVector(BaseModel, extra="forbid"):
"""
Sparse vector structure
"""
indices: List[int] = Field(..., description="indices must be unique")
values: List[float] = Field(
..., description="values and indices must be the same length"
)
class SparseEmbeddings(ABC):
"""An interface for sparse embedding models to use with Qdrant."""
@abstractmethod
def embed_documents(self, texts: List[str]) -> List[SparseVector]:
"""Embed search docs."""
@abstractmethod
def embed_query(self, text: str) -> SparseVector:
"""Embed query text."""
async def aembed_documents(self, texts: List[str]) -> List[SparseVector]:
"""Asynchronous Embed search docs."""
return await run_in_executor(None, self.embed_documents, texts)
async def aembed_query(self, text: str) -> SparseVector:
"""Asynchronous Embed query text."""
return await run_in_executor(None, self.embed_query, text)
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@partners@qdrant@langchain_qdrant@sparse_embeddings.py@.PATH_END.py
|
{
"filename": "parallel_stacking.ipynb",
"repo_name": "ali-beheshti/Astro-Paint",
"repo_path": "Astro-Paint_extracted/Astro-Paint-master/examples/parallel_stacking.ipynb",
"type": "Jupyter Notebook"
}
|
```python
import numpy as np
np.random.seed(0)
import astropaint as ap
from astropaint import Catalog, Canvas, Painter
from astropaint import utils, transform
from astropaint.profiles import NFW
import matplotlib.pyplot as plt
from matplotlib import cm
```
In this notebook we will
1. Paint `NFW.kSZ_T` profiles on top of a random shell catalog with 1000 halos
2. Add Noise to the map to make individual signals undetectable
3. Stack 2x2 sqr deg cutouts around each halo to enhance detection S/N
# Painting kSZ
Let's generate a random shell with 1000 halos.
```python
catalog = Catalog()
```
Building the dataframe and updating all the parameters...
Done!
```python
catalog.generate_random_shell(n_tot=1000)
```
generating random catalog...
Catalog data has been modified...
Building the dataframe and updating all the parameters...
Done!
```python
plt.figure(figsize=(5,5))
plt.scatter(catalog.data.x, catalog.data.y, s=0.5)
```
<matplotlib.collections.PathCollection at 0x12422b898>

Put it on a canvas.
```python
nside = 2048
canvas = Canvas(catalog, nside, R_times=4)
```
```python
canvas.show_halo_centers(s=0.5)
```
0.0 180.0 -180.0 180.0
The interval between parallels is 30 deg -0.00'.
The interval between meridians is 30 deg -0.00'.

Paint `NFW.kSZ`...
```python
painter = Painter(NFW.kSZ_T)
```
The template 'kSZ_T' takes in the following arguments:
['R', 'rho_s', 'R_s', 'v_r']
and the following keyword-only arguments:
['T_cmb']
```python
R = np.linspace(0,5,100)
painter.plot_template(R, catalog)
plt.yscale("log")
plt.xscale("log")
```
No template_kwargs provided
spray_df.columns = Index(['v_r', 'rho_s', 'R_s'], dtype='object')

```python
painter.spray(canvas)
```
Painting the canvas...
No template_kwargs provided
spray_df.columns = Index(['v_r', 'rho_s', 'R_s'], dtype='object')
Spraying in parallel...
canvas memory size [GB]: 0.37500008940696716
n_cpus = 4
Spraying in 4 batches
HBox(children=(IntProgress(value=0, description='painting', max=250, style=ProgressStyle(description_width='in…
HBox(children=(IntProgress(value=0, description='painting', max=250, style=ProgressStyle(description_width='in…
HBox(children=(IntProgress(value=0, description='painting', max=250, style=ProgressStyle(description_width='in…
HBox(children=(IntProgress(value=0, description='painting', max=250, style=ProgressStyle(description_width='in…
/Users/siavashyasini/Dropbox/cosmology/Projects/2020/AstroPaint/astropaint/paint_bucket.py:2524: ComplexWarning: Casting complex values to real discards the imaginary part
template(**spray_dict))
Your artwork is finished. Check it out with Canvas.show_map()
```python
canvas.show_map(min=-1E-6,max=1E-6)
```

```python
canvas.show_map("cartesian", lonra=[0,20], latra=[0,20], min=-1E-6,max=1E-6)
```

# Stack kSZ
## Signal Stack
Since the velocities can be randomly either positive or negative, stacking the kSZ patches will average to zero. To avoid this, we can flip the sign of the cutout around each halo based on the sign of the velocity vector so that all halos are moving towards us. In other words, if the radial velocity is positive, we multiply the patch by -1, and if `v_r` is negative we multiply by +1. Obviously here we are assuming that we know the radial velocity for all the halos. An alternative strategy would be to stack the absolute value of each cutout.
```python
def flip(patch, v_r):
return - patch * np.sign(v_r)
```
```python
v_r_df = {"v_r": catalog.data.v_r}
```
Let's check out the cutouts around the first three halos
```python
lat_range = lon_range = [-1,1] # extent of the cutout around each halo in degrees
xpix= 200 # number of pixels on the side
cutouts = canvas.cutouts(halo_list=np.arange(3),
lon_range=lon_range,
xpix=xpix
)
for cutout in cutouts:
plt.imshow(cutout, cmap=cm.RdBu_r)
#plt.plot(cutout[xpix//2])
plt.show()
```



The central pixels are all positive (hot) as expected. Now stack all 1000 halos on top of each other.
```python
kSZ_stack = canvas.stack_cutouts(halo_list="all",
lon_range=lon_range,
xpix=xpix,
apply_func=[flip], # list of functions to apply to each cutout
func_kwargs=[v_r_df], # kwargs to pass to each function
inplace=False,
parallel=True)
```
Stacking in parallel...
n_cpus = 4
Stacking 4 batches
HBox(children=(IntProgress(value=0, description='stacking', max=250, style=ProgressStyle(description_width='in…
HBox(children=(IntProgress(value=0, description='stacking', max=250, style=ProgressStyle(description_width='in…
HBox(children=(IntProgress(value=0, description='stacking', max=250, style=ProgressStyle(description_width='in…
HBox(children=(IntProgress(value=0, description='stacking', max=250, style=ProgressStyle(description_width='in…
Checkout the result with canvas.stack
```python
fig, ax = plt.subplots(1,2, figsize=(13, 5), dpi=100)
stack = 1E6*kSZ_stack/catalog.size
# plot the stacked BG pile
stack_plot = ax[0].imshow(stack, cmap=cm.RdBu_r)
cbar = plt.colorbar(stack_plot, ax=ax[0])
cbar.set_label(r'$\Theta [\mu K]$', labelpad=-50, y=1.15, rotation=0)
# plot a slice through the middle
ax[1].plot(stack[xpix//2])
ax[1].grid()
```

## Adding Noise
Now, let's add some white noise to the map...
```python
Nl = utils.get_custom_Nl(sigma_n=10, lmax=6000)
```
Custom noise @ [217] GHz:
fwhm [arcmin] = None
sigma_T [uK-arcmin] = [10]
```python
# plot the power spectra for comparison
# calculating canvas.Cl might take a while...
# Use canvas.get_Cl(lmax=500 or 1000) if you're not patient.
plt.figure(figsize=(6,4), dpi=100)
plt.loglog(canvas.Cl, label="kSZ")
plt.loglog(Nl, label="Noise")
plt.ylabel("$C_\ell$")
plt.xlabel("$\ell$")
plt.legend()
```
alms saved in canvas.alm
<matplotlib.legend.Legend at 0x13d8e6ba8>

The noise is dominant over small scales! Great, now let's add itto the map.
```python
canvas.add_noise(Nl)
```
/Users/siavashyasini/anaconda3/lib/python3.7/site-packages/healpy/sphtfunc.py:400: FutureChangeWarning: The order of the input cl's will change in a future release.
Use new=True keyword to start using the new order.
See documentation of healpy.synalm.
category=FutureChangeWarning,
Sigma is 0.000000 arcmin (0.000000 rad)
-> fwhm is 0.000000 arcmin
```python
canvas.show_map()
```

```python
canvas.show_map("cartesian", lonra=[0,20], latra=[0,20], min=-1E-6,max=1E-6)
```

## Noisy Stack
Let's make sure the individual cutouts are dominated by noise.
```python
cutouts = canvas.cutouts(halo_list=np.arange(3),
lon_range=lon_range,
xpix=xpix
)
for cutout in cutouts:
plt.imshow(cutout, cmap=cm.RdBu_r)
plt.show()
```



And now stack everything together...
```python
canvas.stack_cutouts(halo_list="all",
lon_range=lon_range,
xpix=xpix,
apply_func=[flip],
func_kwargs=[v_r_df],
parallel=True)
```
Stacking in parallel...
n_cpus = 4
Stacking 4 batches
HBox(children=(IntProgress(value=0, description='stacking', max=250, style=ProgressStyle(description_width='in…
HBox(children=(IntProgress(value=0, description='stacking', max=250, style=ProgressStyle(description_width='in…
HBox(children=(IntProgress(value=0, description='stacking', max=250, style=ProgressStyle(description_width='in…
HBox(children=(IntProgress(value=0, description='stacking', max=250, style=ProgressStyle(description_width='in…
Checkout the result with canvas.stack
```python
fig, ax = plt.subplots(1,2, figsize=(13, 5), dpi=100)
noisy_stack = 1E6*canvas.stack/catalog.size
stack = 1E6*kSZ_stack/catalog.size
# plot the stacked noisy kSZ
stack_plot = ax[0].imshow(noisy_stack, cmap=cm.RdBu_r)
cbar = plt.colorbar(stack_plot, ax=ax[0])
cbar.set_label(r'$\Theta [\mu K]$', labelpad=-50, y=1.15, rotation=0)
# plot a slice through the middle
ax[1].plot(noisy_stack[xpix//2], label="Noisy Stack")
ax[1].plot(stack[xpix//2], label = "Signal Stack")
ax[1].grid()
plt.legend()
```
<matplotlib.legend.Legend at 0x13e6a74e0>

_Voila!_
|
ali-beheshtiREPO_NAMEAstro-PaintPATH_START.@Astro-Paint_extracted@Astro-Paint-master@examples@parallel_stacking.ipynb@.PATH_END.py
|
{
"filename": "_filter_design.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/scipy/py3/scipy/signal/_filter_design.py",
"type": "Python"
}
|
"""Filter design."""
import math
import operator
import warnings
import numpy
import numpy as np
from numpy import (atleast_1d, poly, polyval, roots, real, asarray,
resize, pi, absolute, logspace, r_, sqrt, tan, log10,
arctan, arcsinh, sin, exp, cosh, arccosh, ceil, conjugate,
zeros, sinh, append, concatenate, prod, ones, full, array,
mintypecode)
from numpy.polynomial.polynomial import polyval as npp_polyval
from numpy.polynomial.polynomial import polyvalfromroots
from scipy import special, optimize, fft as sp_fft
from scipy.special import comb
from scipy._lib._util import float_factorial
__all__ = ['findfreqs', 'freqs', 'freqz', 'tf2zpk', 'zpk2tf', 'normalize',
'lp2lp', 'lp2hp', 'lp2bp', 'lp2bs', 'bilinear', 'iirdesign',
'iirfilter', 'butter', 'cheby1', 'cheby2', 'ellip', 'bessel',
'band_stop_obj', 'buttord', 'cheb1ord', 'cheb2ord', 'ellipord',
'buttap', 'cheb1ap', 'cheb2ap', 'ellipap', 'besselap',
'BadCoefficients', 'freqs_zpk', 'freqz_zpk',
'tf2sos', 'sos2tf', 'zpk2sos', 'sos2zpk', 'group_delay',
'sosfreqz', 'iirnotch', 'iirpeak', 'bilinear_zpk',
'lp2lp_zpk', 'lp2hp_zpk', 'lp2bp_zpk', 'lp2bs_zpk',
'gammatone', 'iircomb']
class BadCoefficients(UserWarning):
"""Warning about badly conditioned filter coefficients"""
pass
abs = absolute
def _is_int_type(x):
"""
Check if input is of a scalar integer type (so ``5`` and ``array(5)`` will
pass, while ``5.0`` and ``array([5])`` will fail.
"""
if np.ndim(x) != 0:
# Older versions of NumPy did not raise for np.array([1]).__index__()
# This is safe to remove when support for those versions is dropped
return False
try:
operator.index(x)
except TypeError:
return False
else:
return True
def findfreqs(num, den, N, kind='ba'):
"""
Find array of frequencies for computing the response of an analog filter.
Parameters
----------
num, den : array_like, 1-D
The polynomial coefficients of the numerator and denominator of the
transfer function of the filter or LTI system, where the coefficients
are ordered from highest to lowest degree. Or, the roots of the
transfer function numerator and denominator (i.e., zeroes and poles).
N : int
The length of the array to be computed.
kind : str {'ba', 'zp'}, optional
Specifies whether the numerator and denominator are specified by their
polynomial coefficients ('ba'), or their roots ('zp').
Returns
-------
w : (N,) ndarray
A 1-D array of frequencies, logarithmically spaced.
Examples
--------
Find a set of nine frequencies that span the "interesting part" of the
frequency response for the filter with the transfer function
H(s) = s / (s^2 + 8s + 25)
>>> from scipy import signal
>>> signal.findfreqs([1, 0], [1, 8, 25], N=9)
array([ 1.00000000e-02, 3.16227766e-02, 1.00000000e-01,
3.16227766e-01, 1.00000000e+00, 3.16227766e+00,
1.00000000e+01, 3.16227766e+01, 1.00000000e+02])
"""
if kind == 'ba':
ep = atleast_1d(roots(den)) + 0j
tz = atleast_1d(roots(num)) + 0j
elif kind == 'zp':
ep = atleast_1d(den) + 0j
tz = atleast_1d(num) + 0j
else:
raise ValueError("input must be one of {'ba', 'zp'}")
if len(ep) == 0:
ep = atleast_1d(-1000) + 0j
ez = r_['-1',
numpy.compress(ep.imag >= 0, ep, axis=-1),
numpy.compress((abs(tz) < 1e5) & (tz.imag >= 0), tz, axis=-1)]
integ = abs(ez) < 1e-10
hfreq = numpy.around(numpy.log10(numpy.max(3 * abs(ez.real + integ) +
1.5 * ez.imag)) + 0.5)
lfreq = numpy.around(numpy.log10(0.1 * numpy.min(abs(real(ez + integ)) +
2 * ez.imag)) - 0.5)
w = logspace(lfreq, hfreq, N)
return w
def freqs(b, a, worN=200, plot=None):
"""
Compute frequency response of analog filter.
Given the M-order numerator `b` and N-order denominator `a` of an analog
filter, compute its frequency response::
b[0]*(jw)**M + b[1]*(jw)**(M-1) + ... + b[M]
H(w) = ----------------------------------------------
a[0]*(jw)**N + a[1]*(jw)**(N-1) + ... + a[N]
Parameters
----------
b : array_like
Numerator of a linear filter.
a : array_like
Denominator of a linear filter.
worN : {None, int, array_like}, optional
If None, then compute at 200 frequencies around the interesting parts
of the response curve (determined by pole-zero locations). If a single
integer, then compute at that many frequencies. Otherwise, compute the
response at the angular frequencies (e.g., rad/s) given in `worN`.
plot : callable, optional
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqs`.
Returns
-------
w : ndarray
The angular frequencies at which `h` was computed.
h : ndarray
The frequency response.
See Also
--------
freqz : Compute the frequency response of a digital filter.
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude. Try ``lambda w, h: plot(w, abs(h))``.
Examples
--------
>>> from scipy.signal import freqs, iirfilter
>>> import numpy as np
>>> b, a = iirfilter(4, [1, 10], 1, 60, analog=True, ftype='cheby1')
>>> w, h = freqs(b, a, worN=np.logspace(-1, 2, 1000))
>>> import matplotlib.pyplot as plt
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.xlabel('Frequency')
>>> plt.ylabel('Amplitude response [dB]')
>>> plt.grid(True)
>>> plt.show()
"""
if worN is None:
# For backwards compatibility
w = findfreqs(b, a, 200)
elif _is_int_type(worN):
w = findfreqs(b, a, worN)
else:
w = atleast_1d(worN)
s = 1j * w
h = polyval(b, s) / polyval(a, s)
if plot is not None:
plot(w, h)
return w, h
def freqs_zpk(z, p, k, worN=200):
"""
Compute frequency response of analog filter.
Given the zeros `z`, poles `p`, and gain `k` of a filter, compute its
frequency response::
(jw-z[0]) * (jw-z[1]) * ... * (jw-z[-1])
H(w) = k * ----------------------------------------
(jw-p[0]) * (jw-p[1]) * ... * (jw-p[-1])
Parameters
----------
z : array_like
Zeroes of a linear filter
p : array_like
Poles of a linear filter
k : scalar
Gain of a linear filter
worN : {None, int, array_like}, optional
If None, then compute at 200 frequencies around the interesting parts
of the response curve (determined by pole-zero locations). If a single
integer, then compute at that many frequencies. Otherwise, compute the
response at the angular frequencies (e.g., rad/s) given in `worN`.
Returns
-------
w : ndarray
The angular frequencies at which `h` was computed.
h : ndarray
The frequency response.
See Also
--------
freqs : Compute the frequency response of an analog filter in TF form
freqz : Compute the frequency response of a digital filter in TF form
freqz_zpk : Compute the frequency response of a digital filter in ZPK form
Notes
-----
.. versionadded:: 0.19.0
Examples
--------
>>> import numpy as np
>>> from scipy.signal import freqs_zpk, iirfilter
>>> z, p, k = iirfilter(4, [1, 10], 1, 60, analog=True, ftype='cheby1',
... output='zpk')
>>> w, h = freqs_zpk(z, p, k, worN=np.logspace(-1, 2, 1000))
>>> import matplotlib.pyplot as plt
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.xlabel('Frequency')
>>> plt.ylabel('Amplitude response [dB]')
>>> plt.grid(True)
>>> plt.show()
"""
k = np.asarray(k)
if k.size > 1:
raise ValueError('k must be a single scalar gain')
if worN is None:
# For backwards compatibility
w = findfreqs(z, p, 200, kind='zp')
elif _is_int_type(worN):
w = findfreqs(z, p, worN, kind='zp')
else:
w = worN
w = atleast_1d(w)
s = 1j * w
num = polyvalfromroots(s, z)
den = polyvalfromroots(s, p)
h = k * num/den
return w, h
def freqz(b, a=1, worN=512, whole=False, plot=None, fs=2*pi,
include_nyquist=False):
"""
Compute the frequency response of a digital filter.
Given the M-order numerator `b` and N-order denominator `a` of a digital
filter, compute its frequency response::
jw -jw -jwM
jw B(e ) b[0] + b[1]e + ... + b[M]e
H(e ) = ------ = -----------------------------------
jw -jw -jwN
A(e ) a[0] + a[1]e + ... + a[N]e
Parameters
----------
b : array_like
Numerator of a linear filter. If `b` has dimension greater than 1,
it is assumed that the coefficients are stored in the first dimension,
and ``b.shape[1:]``, ``a.shape[1:]``, and the shape of the frequencies
array must be compatible for broadcasting.
a : array_like
Denominator of a linear filter. If `b` has dimension greater than 1,
it is assumed that the coefficients are stored in the first dimension,
and ``b.shape[1:]``, ``a.shape[1:]``, and the shape of the frequencies
array must be compatible for broadcasting.
worN : {None, int, array_like}, optional
If a single integer, then compute at that many frequencies (default is
N=512). This is a convenient alternative to::
np.linspace(0, fs if whole else fs/2, N, endpoint=include_nyquist)
Using a number that is fast for FFT computations can result in
faster computations (see Notes).
If an array_like, compute the response at the frequencies given.
These are in the same units as `fs`.
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
fs/2 (upper-half of unit-circle). If `whole` is True, compute
frequencies from 0 to fs. Ignored if worN is array_like.
plot : callable
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqz`.
fs : float, optional
The sampling frequency of the digital system. Defaults to 2*pi
radians/sample (so w is from 0 to pi).
.. versionadded:: 1.2.0
include_nyquist : bool, optional
If `whole` is False and `worN` is an integer, setting `include_nyquist`
to True will include the last frequency (Nyquist frequency) and is
otherwise ignored.
.. versionadded:: 1.5.0
Returns
-------
w : ndarray
The frequencies at which `h` was computed, in the same units as `fs`.
By default, `w` is normalized to the range [0, pi) (radians/sample).
h : ndarray
The frequency response, as complex numbers.
See Also
--------
freqz_zpk
sosfreqz
Notes
-----
Using Matplotlib's :func:`matplotlib.pyplot.plot` function as the callable
for `plot` produces unexpected results, as this plots the real part of the
complex transfer function, not the magnitude.
Try ``lambda w, h: plot(w, np.abs(h))``.
A direct computation via (R)FFT is used to compute the frequency response
when the following conditions are met:
1. An integer value is given for `worN`.
2. `worN` is fast to compute via FFT (i.e.,
`next_fast_len(worN) <scipy.fft.next_fast_len>` equals `worN`).
3. The denominator coefficients are a single value (``a.shape[0] == 1``).
4. `worN` is at least as long as the numerator coefficients
(``worN >= b.shape[0]``).
5. If ``b.ndim > 1``, then ``b.shape[-1] == 1``.
For long FIR filters, the FFT approach can have lower error and be much
faster than the equivalent direct polynomial calculation.
Examples
--------
>>> from scipy import signal
>>> import numpy as np
>>> b = signal.firwin(80, 0.5, window=('kaiser', 8))
>>> w, h = signal.freqz(b)
>>> import matplotlib.pyplot as plt
>>> fig, ax1 = plt.subplots()
>>> ax1.set_title('Digital filter frequency response')
>>> ax1.plot(w, 20 * np.log10(abs(h)), 'b')
>>> ax1.set_ylabel('Amplitude [dB]', color='b')
>>> ax1.set_xlabel('Frequency [rad/sample]')
>>> ax2 = ax1.twinx()
>>> angles = np.unwrap(np.angle(h))
>>> ax2.plot(w, angles, 'g')
>>> ax2.set_ylabel('Angle (radians)', color='g')
>>> ax2.grid(True)
>>> ax2.axis('tight')
>>> plt.show()
Broadcasting Examples
Suppose we have two FIR filters whose coefficients are stored in the
rows of an array with shape (2, 25). For this demonstration, we'll
use random data:
>>> rng = np.random.default_rng()
>>> b = rng.random((2, 25))
To compute the frequency response for these two filters with one call
to `freqz`, we must pass in ``b.T``, because `freqz` expects the first
axis to hold the coefficients. We must then extend the shape with a
trivial dimension of length 1 to allow broadcasting with the array
of frequencies. That is, we pass in ``b.T[..., np.newaxis]``, which has
shape (25, 2, 1):
>>> w, h = signal.freqz(b.T[..., np.newaxis], worN=1024)
>>> w.shape
(1024,)
>>> h.shape
(2, 1024)
Now, suppose we have two transfer functions, with the same numerator
coefficients ``b = [0.5, 0.5]``. The coefficients for the two denominators
are stored in the first dimension of the 2-D array `a`::
a = [ 1 1 ]
[ -0.25, -0.5 ]
>>> b = np.array([0.5, 0.5])
>>> a = np.array([[1, 1], [-0.25, -0.5]])
Only `a` is more than 1-D. To make it compatible for
broadcasting with the frequencies, we extend it with a trivial dimension
in the call to `freqz`:
>>> w, h = signal.freqz(b, a[..., np.newaxis], worN=1024)
>>> w.shape
(1024,)
>>> h.shape
(2, 1024)
"""
b = atleast_1d(b)
a = atleast_1d(a)
if worN is None:
# For backwards compatibility
worN = 512
h = None
if _is_int_type(worN):
N = operator.index(worN)
del worN
if N < 0:
raise ValueError(f'worN must be nonnegative, got {N}')
lastpoint = 2 * pi if whole else pi
# if include_nyquist is true and whole is false, w should
# include end point
w = np.linspace(0, lastpoint, N,
endpoint=include_nyquist and not whole)
n_fft = N if whole else 2 * (N - 1) if include_nyquist else 2 * N
if (a.size == 1 and (b.ndim == 1 or (b.shape[-1] == 1))
and n_fft >= b.shape[0]
and n_fft > 0): # TODO: review threshold acc. to benchmark?
if np.isrealobj(b) and np.isrealobj(a):
fft_func = sp_fft.rfft
else:
fft_func = sp_fft.fft
h = fft_func(b, n=n_fft, axis=0)[:N]
h /= a
if fft_func is sp_fft.rfft and whole:
# exclude DC and maybe Nyquist (no need to use axis_reverse
# here because we can build reversal with the truncation)
stop = -1 if n_fft % 2 == 1 else -2
h_flip = slice(stop, 0, -1)
h = np.concatenate((h, h[h_flip].conj()))
if b.ndim > 1:
# Last axis of h has length 1, so drop it.
h = h[..., 0]
# Move the first axis of h to the end.
h = np.moveaxis(h, 0, -1)
else:
w = atleast_1d(worN)
del worN
w = 2*pi*w/fs
if h is None: # still need to compute using freqs w
zm1 = exp(-1j * w)
h = (npp_polyval(zm1, b, tensor=False) /
npp_polyval(zm1, a, tensor=False))
w = w*fs/(2*pi)
if plot is not None:
plot(w, h)
return w, h
def freqz_zpk(z, p, k, worN=512, whole=False, fs=2*pi):
r"""
Compute the frequency response of a digital filter in ZPK form.
Given the Zeros, Poles and Gain of a digital filter, compute its frequency
response:
:math:`H(z)=k \prod_i (z - Z[i]) / \prod_j (z - P[j])`
where :math:`k` is the `gain`, :math:`Z` are the `zeros` and :math:`P` are
the `poles`.
Parameters
----------
z : array_like
Zeroes of a linear filter
p : array_like
Poles of a linear filter
k : scalar
Gain of a linear filter
worN : {None, int, array_like}, optional
If a single integer, then compute at that many frequencies (default is
N=512).
If an array_like, compute the response at the frequencies given.
These are in the same units as `fs`.
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
fs/2 (upper-half of unit-circle). If `whole` is True, compute
frequencies from 0 to fs. Ignored if w is array_like.
fs : float, optional
The sampling frequency of the digital system. Defaults to 2*pi
radians/sample (so w is from 0 to pi).
.. versionadded:: 1.2.0
Returns
-------
w : ndarray
The frequencies at which `h` was computed, in the same units as `fs`.
By default, `w` is normalized to the range [0, pi) (radians/sample).
h : ndarray
The frequency response, as complex numbers.
See Also
--------
freqs : Compute the frequency response of an analog filter in TF form
freqs_zpk : Compute the frequency response of an analog filter in ZPK form
freqz : Compute the frequency response of a digital filter in TF form
Notes
-----
.. versionadded:: 0.19.0
Examples
--------
Design a 4th-order digital Butterworth filter with cut-off of 100 Hz in a
system with sample rate of 1000 Hz, and plot the frequency response:
>>> import numpy as np
>>> from scipy import signal
>>> z, p, k = signal.butter(4, 100, output='zpk', fs=1000)
>>> w, h = signal.freqz_zpk(z, p, k, fs=1000)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(1, 1, 1)
>>> ax1.set_title('Digital filter frequency response')
>>> ax1.plot(w, 20 * np.log10(abs(h)), 'b')
>>> ax1.set_ylabel('Amplitude [dB]', color='b')
>>> ax1.set_xlabel('Frequency [Hz]')
>>> ax1.grid(True)
>>> ax2 = ax1.twinx()
>>> angles = np.unwrap(np.angle(h))
>>> ax2.plot(w, angles, 'g')
>>> ax2.set_ylabel('Angle [radians]', color='g')
>>> plt.axis('tight')
>>> plt.show()
"""
z, p = map(atleast_1d, (z, p))
if whole:
lastpoint = 2 * pi
else:
lastpoint = pi
if worN is None:
# For backwards compatibility
w = numpy.linspace(0, lastpoint, 512, endpoint=False)
elif _is_int_type(worN):
w = numpy.linspace(0, lastpoint, worN, endpoint=False)
else:
w = atleast_1d(worN)
w = 2*pi*w/fs
zm1 = exp(1j * w)
h = k * polyvalfromroots(zm1, z) / polyvalfromroots(zm1, p)
w = w*fs/(2*pi)
return w, h
def group_delay(system, w=512, whole=False, fs=2*pi):
r"""Compute the group delay of a digital filter.
The group delay measures by how many samples amplitude envelopes of
various spectral components of a signal are delayed by a filter.
It is formally defined as the derivative of continuous (unwrapped) phase::
d jw
D(w) = - -- arg H(e)
dw
Parameters
----------
system : tuple of array_like (b, a)
Numerator and denominator coefficients of a filter transfer function.
w : {None, int, array_like}, optional
If a single integer, then compute at that many frequencies (default is
N=512).
If an array_like, compute the delay at the frequencies given. These
are in the same units as `fs`.
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
fs/2 (upper-half of unit-circle). If `whole` is True, compute
frequencies from 0 to fs. Ignored if w is array_like.
fs : float, optional
The sampling frequency of the digital system. Defaults to 2*pi
radians/sample (so w is from 0 to pi).
.. versionadded:: 1.2.0
Returns
-------
w : ndarray
The frequencies at which group delay was computed, in the same units
as `fs`. By default, `w` is normalized to the range [0, pi)
(radians/sample).
gd : ndarray
The group delay.
See Also
--------
freqz : Frequency response of a digital filter
Notes
-----
The similar function in MATLAB is called `grpdelay`.
If the transfer function :math:`H(z)` has zeros or poles on the unit
circle, the group delay at corresponding frequencies is undefined.
When such a case arises the warning is raised and the group delay
is set to 0 at those frequencies.
For the details of numerical computation of the group delay refer to [1]_.
.. versionadded:: 0.16.0
References
----------
.. [1] Richard G. Lyons, "Understanding Digital Signal Processing,
3rd edition", p. 830.
Examples
--------
>>> from scipy import signal
>>> b, a = signal.iirdesign(0.1, 0.3, 5, 50, ftype='cheby1')
>>> w, gd = signal.group_delay((b, a))
>>> import matplotlib.pyplot as plt
>>> plt.title('Digital filter group delay')
>>> plt.plot(w, gd)
>>> plt.ylabel('Group delay [samples]')
>>> plt.xlabel('Frequency [rad/sample]')
>>> plt.show()
"""
if w is None:
# For backwards compatibility
w = 512
if _is_int_type(w):
if whole:
w = np.linspace(0, 2 * pi, w, endpoint=False)
else:
w = np.linspace(0, pi, w, endpoint=False)
else:
w = np.atleast_1d(w)
w = 2*pi*w/fs
b, a = map(np.atleast_1d, system)
c = np.convolve(b, a[::-1])
cr = c * np.arange(c.size)
z = np.exp(-1j * w)
num = np.polyval(cr[::-1], z)
den = np.polyval(c[::-1], z)
gd = np.real(num / den) - a.size + 1
singular = ~np.isfinite(gd)
near_singular = np.absolute(den) < 10 * EPSILON
if np.any(singular):
gd[singular] = 0
warnings.warn(
"The group delay is singular at frequencies [{}], setting to 0".
format(", ".join(f"{ws:.3f}" for ws in w[singular])),
stacklevel=2
)
elif np.any(near_singular):
warnings.warn(
"The filter's denominator is extremely small at frequencies [{}], \
around which a singularity may be present".
format(", ".join(f"{ws:.3f}" for ws in w[near_singular])),
stacklevel=2
)
w = w*fs/(2*pi)
return w, gd
def _validate_sos(sos):
"""Helper to validate a SOS input"""
sos = np.atleast_2d(sos)
if sos.ndim != 2:
raise ValueError('sos array must be 2D')
n_sections, m = sos.shape
if m != 6:
raise ValueError('sos array must be shape (n_sections, 6)')
if not (sos[:, 3] == 1).all():
raise ValueError('sos[:, 3] should be all ones')
return sos, n_sections
def sosfreqz(sos, worN=512, whole=False, fs=2*pi):
r"""
Compute the frequency response of a digital filter in SOS format.
Given `sos`, an array with shape (n, 6) of second order sections of
a digital filter, compute the frequency response of the system function::
B0(z) B1(z) B{n-1}(z)
H(z) = ----- * ----- * ... * ---------
A0(z) A1(z) A{n-1}(z)
for z = exp(omega*1j), where B{k}(z) and A{k}(z) are numerator and
denominator of the transfer function of the k-th second order section.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. Each row corresponds to a second-order
section, with the first three columns providing the numerator
coefficients and the last three providing the denominator
coefficients.
worN : {None, int, array_like}, optional
If a single integer, then compute at that many frequencies (default is
N=512). Using a number that is fast for FFT computations can result
in faster computations (see Notes of `freqz`).
If an array_like, compute the response at the frequencies given (must
be 1-D). These are in the same units as `fs`.
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
fs/2 (upper-half of unit-circle). If `whole` is True, compute
frequencies from 0 to fs.
fs : float, optional
The sampling frequency of the digital system. Defaults to 2*pi
radians/sample (so w is from 0 to pi).
.. versionadded:: 1.2.0
Returns
-------
w : ndarray
The frequencies at which `h` was computed, in the same units as `fs`.
By default, `w` is normalized to the range [0, pi) (radians/sample).
h : ndarray
The frequency response, as complex numbers.
See Also
--------
freqz, sosfilt
Notes
-----
.. versionadded:: 0.19.0
Examples
--------
Design a 15th-order bandpass filter in SOS format.
>>> from scipy import signal
>>> import numpy as np
>>> sos = signal.ellip(15, 0.5, 60, (0.2, 0.4), btype='bandpass',
... output='sos')
Compute the frequency response at 1500 points from DC to Nyquist.
>>> w, h = signal.sosfreqz(sos, worN=1500)
Plot the response.
>>> import matplotlib.pyplot as plt
>>> plt.subplot(2, 1, 1)
>>> db = 20*np.log10(np.maximum(np.abs(h), 1e-5))
>>> plt.plot(w/np.pi, db)
>>> plt.ylim(-75, 5)
>>> plt.grid(True)
>>> plt.yticks([0, -20, -40, -60])
>>> plt.ylabel('Gain [dB]')
>>> plt.title('Frequency Response')
>>> plt.subplot(2, 1, 2)
>>> plt.plot(w/np.pi, np.angle(h))
>>> plt.grid(True)
>>> plt.yticks([-np.pi, -0.5*np.pi, 0, 0.5*np.pi, np.pi],
... [r'$-\pi$', r'$-\pi/2$', '0', r'$\pi/2$', r'$\pi$'])
>>> plt.ylabel('Phase [rad]')
>>> plt.xlabel('Normalized frequency (1.0 = Nyquist)')
>>> plt.show()
If the same filter is implemented as a single transfer function,
numerical error corrupts the frequency response:
>>> b, a = signal.ellip(15, 0.5, 60, (0.2, 0.4), btype='bandpass',
... output='ba')
>>> w, h = signal.freqz(b, a, worN=1500)
>>> plt.subplot(2, 1, 1)
>>> db = 20*np.log10(np.maximum(np.abs(h), 1e-5))
>>> plt.plot(w/np.pi, db)
>>> plt.ylim(-75, 5)
>>> plt.grid(True)
>>> plt.yticks([0, -20, -40, -60])
>>> plt.ylabel('Gain [dB]')
>>> plt.title('Frequency Response')
>>> plt.subplot(2, 1, 2)
>>> plt.plot(w/np.pi, np.angle(h))
>>> plt.grid(True)
>>> plt.yticks([-np.pi, -0.5*np.pi, 0, 0.5*np.pi, np.pi],
... [r'$-\pi$', r'$-\pi/2$', '0', r'$\pi/2$', r'$\pi$'])
>>> plt.ylabel('Phase [rad]')
>>> plt.xlabel('Normalized frequency (1.0 = Nyquist)')
>>> plt.show()
"""
sos, n_sections = _validate_sos(sos)
if n_sections == 0:
raise ValueError('Cannot compute frequencies with no sections')
h = 1.
for row in sos:
w, rowh = freqz(row[:3], row[3:], worN=worN, whole=whole, fs=fs)
h *= rowh
return w, h
def _cplxreal(z, tol=None):
"""
Split into complex and real parts, combining conjugate pairs.
The 1-D input vector `z` is split up into its complex (`zc`) and real (`zr`)
elements. Every complex element must be part of a complex-conjugate pair,
which are combined into a single number (with positive imaginary part) in
the output. Two complex numbers are considered a conjugate pair if their
real and imaginary parts differ in magnitude by less than ``tol * abs(z)``.
Parameters
----------
z : array_like
Vector of complex numbers to be sorted and split
tol : float, optional
Relative tolerance for testing realness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e., 2e-14 for
float64)
Returns
-------
zc : ndarray
Complex elements of `z`, with each pair represented by a single value
having positive imaginary part, sorted first by real part, and then
by magnitude of imaginary part. The pairs are averaged when combined
to reduce error.
zr : ndarray
Real elements of `z` (those having imaginary part less than
`tol` times their magnitude), sorted by value.
Raises
------
ValueError
If there are any complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxpair
Examples
--------
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> zc, zr = _cplxreal(a)
>>> print(zc)
[ 1.+1.j 2.+1.j 2.+1.j 2.+2.j]
>>> print(zr)
[ 1. 3. 4.]
"""
z = atleast_1d(z)
if z.size == 0:
return z, z
elif z.ndim != 1:
raise ValueError('_cplxreal only accepts 1-D input')
if tol is None:
# Get tolerance from dtype of input
tol = 100 * np.finfo((1.0 * z).dtype).eps
# Sort by real part, magnitude of imaginary part (speed up further sorting)
z = z[np.lexsort((abs(z.imag), z.real))]
# Split reals from conjugate pairs
real_indices = abs(z.imag) <= tol * abs(z)
zr = z[real_indices].real
if len(zr) == len(z):
# Input is entirely real
return array([]), zr
# Split positive and negative halves of conjugates
z = z[~real_indices]
zp = z[z.imag > 0]
zn = z[z.imag < 0]
if len(zp) != len(zn):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Find runs of (approximately) the same real part
same_real = np.diff(zp.real) <= tol * abs(zp[:-1])
diffs = numpy.diff(concatenate(([0], same_real, [0])))
run_starts = numpy.nonzero(diffs > 0)[0]
run_stops = numpy.nonzero(diffs < 0)[0]
# Sort each run by their imaginary parts
for i in range(len(run_starts)):
start = run_starts[i]
stop = run_stops[i] + 1
for chunk in (zp[start:stop], zn[start:stop]):
chunk[...] = chunk[np.lexsort([abs(chunk.imag)])]
# Check that negatives match positives
if any(abs(zp - zn.conj()) > tol * abs(zn)):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Average out numerical inaccuracy in real vs imag parts of pairs
zc = (zp + zn.conj()) / 2
return zc, zr
def _cplxpair(z, tol=None):
"""
Sort into pairs of complex conjugates.
Complex conjugates in `z` are sorted by increasing real part. In each
pair, the number with negative imaginary part appears first.
If pairs have identical real parts, they are sorted by increasing
imaginary magnitude.
Two complex numbers are considered a conjugate pair if their real and
imaginary parts differ in magnitude by less than ``tol * abs(z)``. The
pairs are forced to be exact complex conjugates by averaging the positive
and negative values.
Purely real numbers are also sorted, but placed after the complex
conjugate pairs. A number is considered real if its imaginary part is
smaller than `tol` times the magnitude of the number.
Parameters
----------
z : array_like
1-D input array to be sorted.
tol : float, optional
Relative tolerance for testing realness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e., 2e-14 for
float64)
Returns
-------
y : ndarray
Complex conjugate pairs followed by real numbers.
Raises
------
ValueError
If there are any complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxreal
Examples
--------
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> z = _cplxpair(a)
>>> print(z)
[ 1.-1.j 1.+1.j 2.-1.j 2.+1.j 2.-1.j 2.+1.j 2.-2.j 2.+2.j 1.+0.j
3.+0.j 4.+0.j]
"""
z = atleast_1d(z)
if z.size == 0 or np.isrealobj(z):
return np.sort(z)
if z.ndim != 1:
raise ValueError('z must be 1-D')
zc, zr = _cplxreal(z, tol)
# Interleave complex values and their conjugates, with negative imaginary
# parts first in each pair
zc = np.dstack((zc.conj(), zc)).flatten()
z = np.append(zc, zr)
return z
def tf2zpk(b, a):
r"""Return zero, pole, gain (z, p, k) representation from a numerator,
denominator representation of a linear filter.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Notes
-----
If some values of `b` are too close to 0, they are removed. In that case,
a BadCoefficients warning is emitted.
The `b` and `a` arrays are interpreted as coefficients for positive,
descending powers of the transfer function variable. So the inputs
:math:`b = [b_0, b_1, ..., b_M]` and :math:`a =[a_0, a_1, ..., a_N]`
can represent an analog filter of the form:
.. math::
H(s) = \frac
{b_0 s^M + b_1 s^{(M-1)} + \cdots + b_M}
{a_0 s^N + a_1 s^{(N-1)} + \cdots + a_N}
or a discrete-time filter of the form:
.. math::
H(z) = \frac
{b_0 z^M + b_1 z^{(M-1)} + \cdots + b_M}
{a_0 z^N + a_1 z^{(N-1)} + \cdots + a_N}
This "positive powers" form is found more commonly in controls
engineering. If `M` and `N` are equal (which is true for all filters
generated by the bilinear transform), then this happens to be equivalent
to the "negative powers" discrete-time form preferred in DSP:
.. math::
H(z) = \frac
{b_0 + b_1 z^{-1} + \cdots + b_M z^{-M}}
{a_0 + a_1 z^{-1} + \cdots + a_N z^{-N}}
Although this is true for common filters, remember that this is not true
in the general case. If `M` and `N` are not equal, the discrete-time
transfer function coefficients must first be converted to the "positive
powers" form before finding the poles and zeros.
"""
b, a = normalize(b, a)
b = (b + 0.0) / a[0]
a = (a + 0.0) / a[0]
k = b[0]
b /= b[0]
z = roots(b)
p = roots(a)
return z, p, k
def zpk2tf(z, p, k):
"""
Return polynomial transfer function representation from zeros and poles
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
"""
z = atleast_1d(z)
k = atleast_1d(k)
if len(z.shape) > 1:
temp = poly(z[0])
b = np.empty((z.shape[0], z.shape[1] + 1), temp.dtype.char)
if len(k) == 1:
k = [k[0]] * z.shape[0]
for i in range(z.shape[0]):
b[i] = k[i] * poly(z[i])
else:
b = k * poly(z)
a = atleast_1d(poly(p))
# Use real output if possible. Copied from numpy.poly, since
# we can't depend on a specific version of numpy.
if issubclass(b.dtype.type, numpy.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = numpy.asarray(z, complex)
pos_roots = numpy.compress(roots.imag > 0, roots)
neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))
if len(pos_roots) == len(neg_roots):
if numpy.all(numpy.sort_complex(neg_roots) ==
numpy.sort_complex(pos_roots)):
b = b.real.copy()
if issubclass(a.dtype.type, numpy.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = numpy.asarray(p, complex)
pos_roots = numpy.compress(roots.imag > 0, roots)
neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))
if len(pos_roots) == len(neg_roots):
if numpy.all(numpy.sort_complex(neg_roots) ==
numpy.sort_complex(pos_roots)):
a = a.real.copy()
return b, a
def tf2sos(b, a, pairing=None, *, analog=False):
"""
Return second-order sections from transfer function representation
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
pairing : {None, 'nearest', 'keep_odd', 'minimal'}, optional
The method to use to combine pairs of poles and zeros into sections.
See `zpk2sos` for information and restrictions on `pairing` and
`analog` arguments.
analog : bool, optional
If True, system is analog, otherwise discrete.
.. versionadded:: 1.8.0
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
zpk2sos, sosfilt
Notes
-----
It is generally discouraged to convert from TF to SOS format, since doing
so usually will not improve numerical precision errors. Instead, consider
designing filters in ZPK format and converting directly to SOS. TF is
converted to SOS by first converting to ZPK format, then converting
ZPK to SOS.
.. versionadded:: 0.16.0
"""
return zpk2sos(*tf2zpk(b, a), pairing=pairing, analog=analog)
def sos2tf(sos):
"""
Return a single transfer function from a series of second-order sections
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
Notes
-----
.. versionadded:: 0.16.0
"""
sos = np.asarray(sos)
result_type = sos.dtype
if result_type.kind in 'bui':
result_type = np.float64
b = np.array([1], dtype=result_type)
a = np.array([1], dtype=result_type)
n_sections = sos.shape[0]
for section in range(n_sections):
b = np.polymul(b, sos[section, :3])
a = np.polymul(a, sos[section, 3:])
return b, a
def sos2zpk(sos):
"""
Return zeros, poles, and gain of a series of second-order sections
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Notes
-----
The number of zeros and poles returned will be ``n_sections * 2``
even if some of these are (effectively) zero.
.. versionadded:: 0.16.0
"""
sos = np.asarray(sos)
n_sections = sos.shape[0]
z = np.zeros(n_sections*2, np.complex128)
p = np.zeros(n_sections*2, np.complex128)
k = 1.
for section in range(n_sections):
zpk = tf2zpk(sos[section, :3], sos[section, 3:])
z[2*section:2*section+len(zpk[0])] = zpk[0]
p[2*section:2*section+len(zpk[1])] = zpk[1]
k *= zpk[2]
return z, p, k
def _nearest_real_complex_idx(fro, to, which):
"""Get the next closest real or complex element based on distance"""
assert which in ('real', 'complex', 'any')
order = np.argsort(np.abs(fro - to))
if which == 'any':
return order[0]
else:
mask = np.isreal(fro[order])
if which == 'complex':
mask = ~mask
return order[np.nonzero(mask)[0][0]]
def _single_zpksos(z, p, k):
"""Create one second-order section from up to two zeros and poles"""
sos = np.zeros(6)
b, a = zpk2tf(z, p, k)
sos[3-len(b):3] = b
sos[6-len(a):6] = a
return sos
def zpk2sos(z, p, k, pairing=None, *, analog=False):
"""Return second-order sections from zeros, poles, and gain of a system
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
pairing : {None, 'nearest', 'keep_odd', 'minimal'}, optional
The method to use to combine pairs of poles and zeros into sections.
If analog is False and pairing is None, pairing is set to 'nearest';
if analog is True, pairing must be 'minimal', and is set to that if
it is None.
analog : bool, optional
If True, system is analog, otherwise discrete.
.. versionadded:: 1.8.0
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
sosfilt
Notes
-----
The algorithm used to convert ZPK to SOS format is designed to
minimize errors due to numerical precision issues. The pairing
algorithm attempts to minimize the peak gain of each biquadratic
section. This is done by pairing poles with the nearest zeros, starting
with the poles closest to the unit circle for discrete-time systems, and
poles closest to the imaginary axis for continuous-time systems.
``pairing='minimal'`` outputs may not be suitable for `sosfilt`,
and ``analog=True`` outputs will never be suitable for `sosfilt`.
*Algorithms*
The steps in the ``pairing='nearest'``, ``pairing='keep_odd'``,
and ``pairing='minimal'`` algorithms are mostly shared. The
``'nearest'`` algorithm attempts to minimize the peak gain, while
``'keep_odd'`` minimizes peak gain under the constraint that
odd-order systems should retain one section as first order.
``'minimal'`` is similar to ``'keep_odd'``, but no additional
poles or zeros are introduced
The algorithm steps are as follows:
As a pre-processing step for ``pairing='nearest'``,
``pairing='keep_odd'``, add poles or zeros to the origin as
necessary to obtain the same number of poles and zeros for
pairing. If ``pairing == 'nearest'`` and there are an odd number
of poles, add an additional pole and a zero at the origin.
The following steps are then iterated over until no more poles or
zeros remain:
1. Take the (next remaining) pole (complex or real) closest to the
unit circle (or imaginary axis, for ``analog=True``) to
begin a new filter section.
2. If the pole is real and there are no other remaining real poles [#]_,
add the closest real zero to the section and leave it as a first
order section. Note that after this step we are guaranteed to be
left with an even number of real poles, complex poles, real zeros,
and complex zeros for subsequent pairing iterations.
3. Else:
1. If the pole is complex and the zero is the only remaining real
zero*, then pair the pole with the *next* closest zero
(guaranteed to be complex). This is necessary to ensure that
there will be a real zero remaining to eventually create a
first-order section (thus keeping the odd order).
2. Else pair the pole with the closest remaining zero (complex or
real).
3. Proceed to complete the second-order section by adding another
pole and zero to the current pole and zero in the section:
1. If the current pole and zero are both complex, add their
conjugates.
2. Else if the pole is complex and the zero is real, add the
conjugate pole and the next closest real zero.
3. Else if the pole is real and the zero is complex, add the
conjugate zero and the real pole closest to those zeros.
4. Else (we must have a real pole and real zero) add the next
real pole closest to the unit circle, and then add the real
zero closest to that pole.
.. [#] This conditional can only be met for specific odd-order inputs
with the ``pairing = 'keep_odd'`` or ``'minimal'`` methods.
.. versionadded:: 0.16.0
Examples
--------
Design a 6th order low-pass elliptic digital filter for a system with a
sampling rate of 8000 Hz that has a pass-band corner frequency of
1000 Hz. The ripple in the pass-band should not exceed 0.087 dB, and
the attenuation in the stop-band should be at least 90 dB.
In the following call to `ellip`, we could use ``output='sos'``,
but for this example, we'll use ``output='zpk'``, and then convert
to SOS format with `zpk2sos`:
>>> from scipy import signal
>>> import numpy as np
>>> z, p, k = signal.ellip(6, 0.087, 90, 1000/(0.5*8000), output='zpk')
Now convert to SOS format.
>>> sos = signal.zpk2sos(z, p, k)
The coefficients of the numerators of the sections:
>>> sos[:, :3]
array([[0.0014152 , 0.00248677, 0.0014152 ],
[1. , 0.72976874, 1. ],
[1. , 0.17607852, 1. ]])
The symmetry in the coefficients occurs because all the zeros are on the
unit circle.
The coefficients of the denominators of the sections:
>>> sos[:, 3:]
array([[ 1. , -1.32544025, 0.46989976],
[ 1. , -1.26118294, 0.62625924],
[ 1. , -1.2570723 , 0.8619958 ]])
The next example shows the effect of the `pairing` option. We have a
system with three poles and three zeros, so the SOS array will have
shape (2, 6). The means there is, in effect, an extra pole and an extra
zero at the origin in the SOS representation.
>>> z1 = np.array([-1, -0.5-0.5j, -0.5+0.5j])
>>> p1 = np.array([0.75, 0.8+0.1j, 0.8-0.1j])
With ``pairing='nearest'`` (the default), we obtain
>>> signal.zpk2sos(z1, p1, 1)
array([[ 1. , 1. , 0.5 , 1. , -0.75, 0. ],
[ 1. , 1. , 0. , 1. , -1.6 , 0.65]])
The first section has the zeros {-0.5-0.05j, -0.5+0.5j} and the poles
{0, 0.75}, and the second section has the zeros {-1, 0} and poles
{0.8+0.1j, 0.8-0.1j}. Note that the extra pole and zero at the origin
have been assigned to different sections.
With ``pairing='keep_odd'``, we obtain:
>>> signal.zpk2sos(z1, p1, 1, pairing='keep_odd')
array([[ 1. , 1. , 0. , 1. , -0.75, 0. ],
[ 1. , 1. , 0.5 , 1. , -1.6 , 0.65]])
The extra pole and zero at the origin are in the same section.
The first section is, in effect, a first-order section.
With ``pairing='minimal'``, the first-order section doesn't have
the extra pole and zero at the origin:
>>> signal.zpk2sos(z1, p1, 1, pairing='minimal')
array([[ 0. , 1. , 1. , 0. , 1. , -0.75],
[ 1. , 1. , 0.5 , 1. , -1.6 , 0.65]])
"""
# TODO in the near future:
# 1. Add SOS capability to `filtfilt`, `freqz`, etc. somehow (#3259).
# 2. Make `decimate` use `sosfilt` instead of `lfilter`.
# 3. Make sosfilt automatically simplify sections to first order
# when possible. Note this might make `sosfiltfilt` a bit harder (ICs).
# 4. Further optimizations of the section ordering / pole-zero pairing.
# See the wiki for other potential issues.
if pairing is None:
pairing = 'minimal' if analog else 'nearest'
valid_pairings = ['nearest', 'keep_odd', 'minimal']
if pairing not in valid_pairings:
raise ValueError('pairing must be one of %s, not %s'
% (valid_pairings, pairing))
if analog and pairing != 'minimal':
raise ValueError('for analog zpk2sos conversion, '
'pairing must be "minimal"')
if len(z) == len(p) == 0:
if not analog:
return np.array([[k, 0., 0., 1., 0., 0.]])
else:
return np.array([[0., 0., k, 0., 0., 1.]])
if pairing != 'minimal':
# ensure we have the same number of poles and zeros, and make copies
p = np.concatenate((p, np.zeros(max(len(z) - len(p), 0))))
z = np.concatenate((z, np.zeros(max(len(p) - len(z), 0))))
n_sections = (max(len(p), len(z)) + 1) // 2
if len(p) % 2 == 1 and pairing == 'nearest':
p = np.concatenate((p, [0.]))
z = np.concatenate((z, [0.]))
assert len(p) == len(z)
else:
if len(p) < len(z):
raise ValueError('for analog zpk2sos conversion, '
'must have len(p)>=len(z)')
n_sections = (len(p) + 1) // 2
# Ensure we have complex conjugate pairs
# (note that _cplxreal only gives us one element of each complex pair):
z = np.concatenate(_cplxreal(z))
p = np.concatenate(_cplxreal(p))
if not np.isreal(k):
raise ValueError('k must be real')
k = k.real
if not analog:
# digital: "worst" is the closest to the unit circle
def idx_worst(p):
return np.argmin(np.abs(1 - np.abs(p)))
else:
# analog: "worst" is the closest to the imaginary axis
def idx_worst(p):
return np.argmin(np.abs(np.real(p)))
sos = np.zeros((n_sections, 6))
# Construct the system, reversing order so the "worst" are last
for si in range(n_sections-1, -1, -1):
# Select the next "worst" pole
p1_idx = idx_worst(p)
p1 = p[p1_idx]
p = np.delete(p, p1_idx)
# Pair that pole with a zero
if np.isreal(p1) and np.isreal(p).sum() == 0:
# Special case (1): last remaining real pole
if pairing != 'minimal':
z1_idx = _nearest_real_complex_idx(z, p1, 'real')
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
sos[si] = _single_zpksos([z1, 0], [p1, 0], 1)
elif len(z) > 0:
z1_idx = _nearest_real_complex_idx(z, p1, 'real')
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
sos[si] = _single_zpksos([z1], [p1], 1)
else:
sos[si] = _single_zpksos([], [p1], 1)
elif (len(p) + 1 == len(z)
and not np.isreal(p1)
and np.isreal(p).sum() == 1
and np.isreal(z).sum() == 1):
# Special case (2): there's one real pole and one real zero
# left, and an equal number of poles and zeros to pair up.
# We *must* pair with a complex zero
z1_idx = _nearest_real_complex_idx(z, p1, 'complex')
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
sos[si] = _single_zpksos([z1, z1.conj()], [p1, p1.conj()], 1)
else:
if np.isreal(p1):
prealidx = np.flatnonzero(np.isreal(p))
p2_idx = prealidx[idx_worst(p[prealidx])]
p2 = p[p2_idx]
p = np.delete(p, p2_idx)
else:
p2 = p1.conj()
# find closest zero
if len(z) > 0:
z1_idx = _nearest_real_complex_idx(z, p1, 'any')
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
if not np.isreal(z1):
sos[si] = _single_zpksos([z1, z1.conj()], [p1, p2], 1)
else:
if len(z) > 0:
z2_idx = _nearest_real_complex_idx(z, p1, 'real')
z2 = z[z2_idx]
assert np.isreal(z2)
z = np.delete(z, z2_idx)
sos[si] = _single_zpksos([z1, z2], [p1, p2], 1)
else:
sos[si] = _single_zpksos([z1], [p1, p2], 1)
else:
# no more zeros
sos[si] = _single_zpksos([], [p1, p2], 1)
assert len(p) == len(z) == 0 # we've consumed all poles and zeros
del p, z
# put gain in first sos
sos[0][:3] *= k
return sos
def _align_nums(nums):
"""Aligns the shapes of multiple numerators.
Given an array of numerator coefficient arrays [[a_1, a_2,...,
a_n],..., [b_1, b_2,..., b_m]], this function pads shorter numerator
arrays with zero's so that all numerators have the same length. Such
alignment is necessary for functions like 'tf2ss', which needs the
alignment when dealing with SIMO transfer functions.
Parameters
----------
nums: array_like
Numerator or list of numerators. Not necessarily with same length.
Returns
-------
nums: array
The numerator. If `nums` input was a list of numerators then a 2-D
array with padded zeros for shorter numerators is returned. Otherwise
returns ``np.asarray(nums)``.
"""
try:
# The statement can throw a ValueError if one
# of the numerators is a single digit and another
# is array-like e.g. if nums = [5, [1, 2, 3]]
nums = asarray(nums)
if not np.issubdtype(nums.dtype, np.number):
raise ValueError("dtype of numerator is non-numeric")
return nums
except ValueError:
nums = [np.atleast_1d(num) for num in nums]
max_width = max(num.size for num in nums)
# pre-allocate
aligned_nums = np.zeros((len(nums), max_width))
# Create numerators with padded zeros
for index, num in enumerate(nums):
aligned_nums[index, -num.size:] = num
return aligned_nums
def normalize(b, a):
"""Normalize numerator/denominator of a continuous-time transfer function.
If values of `b` are too close to 0, they are removed. In that case, a
BadCoefficients warning is emitted.
Parameters
----------
b: array_like
Numerator of the transfer function. Can be a 2-D array to normalize
multiple transfer functions.
a: array_like
Denominator of the transfer function. At most 1-D.
Returns
-------
num: array
The numerator of the normalized transfer function. At least a 1-D
array. A 2-D array if the input `num` is a 2-D array.
den: 1-D array
The denominator of the normalized transfer function.
Notes
-----
Coefficients for both the numerator and denominator should be specified in
descending exponent order (e.g., ``s^2 + 3s + 5`` would be represented as
``[1, 3, 5]``).
Examples
--------
>>> from scipy.signal import normalize
Normalize the coefficients of the transfer function
``(3*s^2 - 2*s + 5) / (2*s^2 + 3*s + 1)``:
>>> b = [3, -2, 5]
>>> a = [2, 3, 1]
>>> normalize(b, a)
(array([ 1.5, -1. , 2.5]), array([1. , 1.5, 0.5]))
A warning is generated if, for example, the first coefficient of
`b` is 0. In the following example, the result is as expected:
>>> import warnings
>>> with warnings.catch_warnings(record=True) as w:
... num, den = normalize([0, 3, 6], [2, -5, 4])
>>> num
array([1.5, 3. ])
>>> den
array([ 1. , -2.5, 2. ])
>>> print(w[0].message)
Badly conditioned filter coefficients (numerator): the results may be meaningless
"""
num, den = b, a
den = np.atleast_1d(den)
num = np.atleast_2d(_align_nums(num))
if den.ndim != 1:
raise ValueError("Denominator polynomial must be rank-1 array.")
if num.ndim > 2:
raise ValueError("Numerator polynomial must be rank-1 or"
" rank-2 array.")
if np.all(den == 0):
raise ValueError("Denominator must have at least on nonzero element.")
# Trim leading zeros in denominator, leave at least one.
den = np.trim_zeros(den, 'f')
# Normalize transfer function
num, den = num / den[0], den / den[0]
# Count numerator columns that are all zero
leading_zeros = 0
for col in num.T:
if np.allclose(col, 0, atol=1e-14):
leading_zeros += 1
else:
break
# Trim leading zeros of numerator
if leading_zeros > 0:
warnings.warn("Badly conditioned filter coefficients (numerator): the "
"results may be meaningless", BadCoefficients)
# Make sure at least one column remains
if leading_zeros == num.shape[1]:
leading_zeros -= 1
num = num[:, leading_zeros:]
# Squeeze first dimension if singular
if num.shape[0] == 1:
num = num[0, :]
return num, den
def lp2lp(b, a, wo=1.0):
r"""
Transform a lowpass filter prototype to a different frequency.
Return an analog low-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency, in
transfer function ('ba') representation.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
wo : float
Desired cutoff, as angular frequency (e.g. rad/s).
Defaults to no change.
Returns
-------
b : array_like
Numerator polynomial coefficients of the transformed low-pass filter.
a : array_like
Denominator polynomial coefficients of the transformed low-pass filter.
See Also
--------
lp2hp, lp2bp, lp2bs, bilinear
lp2lp_zpk
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s}{\omega_0}
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> lp = signal.lti([1.0], [1.0, 1.0])
>>> lp2 = signal.lti(*signal.lp2lp(lp.num, lp.den, 2))
>>> w, mag_lp, p_lp = lp.bode()
>>> w, mag_lp2, p_lp2 = lp2.bode(w)
>>> plt.plot(w, mag_lp, label='Lowpass')
>>> plt.plot(w, mag_lp2, label='Transformed Lowpass')
>>> plt.semilogx()
>>> plt.grid(True)
>>> plt.xlabel('Frequency [rad/s]')
>>> plt.ylabel('Magnitude [dB]')
>>> plt.legend()
"""
a, b = map(atleast_1d, (a, b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
M = max((d, n))
pwo = pow(wo, numpy.arange(M - 1, -1, -1))
start1 = max((n - d, 0))
start2 = max((d - n, 0))
b = b * pwo[start1] / pwo[start2:]
a = a * pwo[start1] / pwo[start1:]
return normalize(b, a)
def lp2hp(b, a, wo=1.0):
r"""
Transform a lowpass filter prototype to a highpass filter.
Return an analog high-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency, in
transfer function ('ba') representation.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
wo : float
Desired cutoff, as angular frequency (e.g., rad/s).
Defaults to no change.
Returns
-------
b : array_like
Numerator polynomial coefficients of the transformed high-pass filter.
a : array_like
Denominator polynomial coefficients of the transformed high-pass filter.
See Also
--------
lp2lp, lp2bp, lp2bs, bilinear
lp2hp_zpk
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{\omega_0}{s}
This maintains symmetry of the lowpass and highpass responses on a
logarithmic scale.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> lp = signal.lti([1.0], [1.0, 1.0])
>>> hp = signal.lti(*signal.lp2hp(lp.num, lp.den))
>>> w, mag_lp, p_lp = lp.bode()
>>> w, mag_hp, p_hp = hp.bode(w)
>>> plt.plot(w, mag_lp, label='Lowpass')
>>> plt.plot(w, mag_hp, label='Highpass')
>>> plt.semilogx()
>>> plt.grid(True)
>>> plt.xlabel('Frequency [rad/s]')
>>> plt.ylabel('Magnitude [dB]')
>>> plt.legend()
"""
a, b = map(atleast_1d, (a, b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
if wo != 1:
pwo = pow(wo, numpy.arange(max((d, n))))
else:
pwo = numpy.ones(max((d, n)), b.dtype.char)
if d >= n:
outa = a[::-1] * pwo
outb = resize(b, (d,))
outb[n:] = 0.0
outb[:n] = b[::-1] * pwo[:n]
else:
outb = b[::-1] * pwo
outa = resize(a, (n,))
outa[d:] = 0.0
outa[:d] = a[::-1] * pwo[:d]
return normalize(outb, outa)
def lp2bp(b, a, wo=1.0, bw=1.0):
r"""
Transform a lowpass filter prototype to a bandpass filter.
Return an analog band-pass filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, in transfer function ('ba') representation.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
wo : float
Desired passband center, as angular frequency (e.g., rad/s).
Defaults to no change.
bw : float
Desired passband width, as angular frequency (e.g., rad/s).
Defaults to 1.
Returns
-------
b : array_like
Numerator polynomial coefficients of the transformed band-pass filter.
a : array_like
Denominator polynomial coefficients of the transformed band-pass filter.
See Also
--------
lp2lp, lp2hp, lp2bs, bilinear
lp2bp_zpk
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s^2 + {\omega_0}^2}{s \cdot \mathrm{BW}}
This is the "wideband" transformation, producing a passband with
geometric (log frequency) symmetry about `wo`.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> lp = signal.lti([1.0], [1.0, 1.0])
>>> bp = signal.lti(*signal.lp2bp(lp.num, lp.den))
>>> w, mag_lp, p_lp = lp.bode()
>>> w, mag_bp, p_bp = bp.bode(w)
>>> plt.plot(w, mag_lp, label='Lowpass')
>>> plt.plot(w, mag_bp, label='Bandpass')
>>> plt.semilogx()
>>> plt.grid(True)
>>> plt.xlabel('Frequency [rad/s]')
>>> plt.ylabel('Magnitude [dB]')
>>> plt.legend()
"""
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a, b))
ma = max([N, D])
Np = N + ma
Dp = D + ma
bprime = numpy.empty(Np + 1, artype)
aprime = numpy.empty(Dp + 1, artype)
wosq = wo * wo
for j in range(Np + 1):
val = 0.0
for i in range(0, N + 1):
for k in range(0, i + 1):
if ma - i + 2 * k == j:
val += comb(i, k) * b[N - i] * (wosq) ** (i - k) / bw ** i
bprime[Np - j] = val
for j in range(Dp + 1):
val = 0.0
for i in range(0, D + 1):
for k in range(0, i + 1):
if ma - i + 2 * k == j:
val += comb(i, k) * a[D - i] * (wosq) ** (i - k) / bw ** i
aprime[Dp - j] = val
return normalize(bprime, aprime)
def lp2bs(b, a, wo=1.0, bw=1.0):
r"""
Transform a lowpass filter prototype to a bandstop filter.
Return an analog band-stop filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, in transfer function ('ba') representation.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
wo : float
Desired stopband center, as angular frequency (e.g., rad/s).
Defaults to no change.
bw : float
Desired stopband width, as angular frequency (e.g., rad/s).
Defaults to 1.
Returns
-------
b : array_like
Numerator polynomial coefficients of the transformed band-stop filter.
a : array_like
Denominator polynomial coefficients of the transformed band-stop filter.
See Also
--------
lp2lp, lp2hp, lp2bp, bilinear
lp2bs_zpk
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s \cdot \mathrm{BW}}{s^2 + {\omega_0}^2}
This is the "wideband" transformation, producing a stopband with
geometric (log frequency) symmetry about `wo`.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> lp = signal.lti([1.0], [1.0, 1.5])
>>> bs = signal.lti(*signal.lp2bs(lp.num, lp.den))
>>> w, mag_lp, p_lp = lp.bode()
>>> w, mag_bs, p_bs = bs.bode(w)
>>> plt.plot(w, mag_lp, label='Lowpass')
>>> plt.plot(w, mag_bs, label='Bandstop')
>>> plt.semilogx()
>>> plt.grid(True)
>>> plt.xlabel('Frequency [rad/s]')
>>> plt.ylabel('Magnitude [dB]')
>>> plt.legend()
"""
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a, b))
M = max([N, D])
Np = M + M
Dp = M + M
bprime = numpy.empty(Np + 1, artype)
aprime = numpy.empty(Dp + 1, artype)
wosq = wo * wo
for j in range(Np + 1):
val = 0.0
for i in range(0, N + 1):
for k in range(0, M - i + 1):
if i + 2 * k == j:
val += (comb(M - i, k) * b[N - i] *
(wosq) ** (M - i - k) * bw ** i)
bprime[Np - j] = val
for j in range(Dp + 1):
val = 0.0
for i in range(0, D + 1):
for k in range(0, M - i + 1):
if i + 2 * k == j:
val += (comb(M - i, k) * a[D - i] *
(wosq) ** (M - i - k) * bw ** i)
aprime[Dp - j] = val
return normalize(bprime, aprime)
def bilinear(b, a, fs=1.0):
r"""
Return a digital IIR filter from an analog one using a bilinear transform.
Transform a set of poles and zeros from the analog s-plane to the digital
z-plane using Tustin's method, which substitutes ``2*fs*(z-1) / (z+1)`` for
``s``, maintaining the shape of the frequency response.
Parameters
----------
b : array_like
Numerator of the analog filter transfer function.
a : array_like
Denominator of the analog filter transfer function.
fs : float
Sample rate, as ordinary frequency (e.g., hertz). No prewarping is
done in this function.
Returns
-------
b : ndarray
Numerator of the transformed digital filter transfer function.
a : ndarray
Denominator of the transformed digital filter transfer function.
See Also
--------
lp2lp, lp2hp, lp2bp, lp2bs
bilinear_zpk
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> fs = 100
>>> bf = 2 * np.pi * np.array([7, 13])
>>> filts = signal.lti(*signal.butter(4, bf, btype='bandpass',
... analog=True))
>>> filtz = signal.lti(*signal.bilinear(filts.num, filts.den, fs))
>>> wz, hz = signal.freqz(filtz.num, filtz.den)
>>> ws, hs = signal.freqs(filts.num, filts.den, worN=fs*wz)
>>> plt.semilogx(wz*fs/(2*np.pi), 20*np.log10(np.abs(hz).clip(1e-15)),
... label=r'$|H_z(e^{j \omega})|$')
>>> plt.semilogx(wz*fs/(2*np.pi), 20*np.log10(np.abs(hs).clip(1e-15)),
... label=r'$|H(j \omega)|$')
>>> plt.legend()
>>> plt.xlabel('Frequency [Hz]')
>>> plt.ylabel('Magnitude [dB]')
>>> plt.grid(True)
"""
fs = float(fs)
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = float
M = max([N, D])
Np = M
Dp = M
bprime = numpy.empty(Np + 1, artype)
aprime = numpy.empty(Dp + 1, artype)
for j in range(Np + 1):
val = 0.0
for i in range(N + 1):
for k in range(i + 1):
for l in range(M - i + 1):
if k + l == j:
val += (comb(i, k) * comb(M - i, l) * b[N - i] *
pow(2 * fs, i) * (-1) ** k)
bprime[j] = real(val)
for j in range(Dp + 1):
val = 0.0
for i in range(D + 1):
for k in range(i + 1):
for l in range(M - i + 1):
if k + l == j:
val += (comb(i, k) * comb(M - i, l) * a[D - i] *
pow(2 * fs, i) * (-1) ** k)
aprime[j] = real(val)
return normalize(bprime, aprime)
def _validate_gpass_gstop(gpass, gstop):
if gpass <= 0.0:
raise ValueError("gpass should be larger than 0.0")
elif gstop <= 0.0:
raise ValueError("gstop should be larger than 0.0")
elif gpass > gstop:
raise ValueError("gpass should be smaller than gstop")
def iirdesign(wp, ws, gpass, gstop, analog=False, ftype='ellip', output='ba',
fs=None):
"""Complete IIR digital and analog filter design.
Given passband and stopband frequencies and gains, construct an analog or
digital IIR filter of minimum order for a given basic type. Return the
output in numerator, denominator ('ba'), pole-zero ('zpk') or second order
sections ('sos') form.
Parameters
----------
wp, ws : float or array like, shape (2,)
Passband and stopband edge frequencies. Possible values are scalars
(for lowpass and highpass filters) or ranges (for bandpass and bandstop
filters).
For digital filters, these are in the same units as `fs`. By default,
`fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
where 1 is the Nyquist frequency. For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g., rad/s).
Note, that for bandpass and bandstop filters passband must lie strictly
inside stopband or vice versa.
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- Butterworth : 'butter'
- Chebyshev I : 'cheby1'
- Chebyshev II : 'cheby2'
- Cauer/elliptic: 'ellip'
output : {'ba', 'zpk', 'sos'}, optional
Filter form of the output:
- second-order sections (recommended): 'sos'
- numerator/denominator (default) : 'ba'
- pole-zero : 'zpk'
In general the second-order sections ('sos') form is
recommended because inferring the coefficients for the
numerator/denominator form ('ba') suffers from numerical
instabilities. For reasons of backward compatibility the default
form is the numerator/denominator form ('ba'), where the 'b'
and the 'a' in 'ba' refer to the commonly used names of the
coefficients used.
Note: Using the second-order sections form ('sos') is sometimes
associated with additional computational costs: for
data-intense use cases it is therefore recommended to also
investigate the numerator/denominator form ('ba').
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output='sos'``.
See Also
--------
butter : Filter design using order and critical points
cheby1, cheby2, ellip, bessel
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
Notes
-----
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
>>> import numpy as np
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> import matplotlib.ticker
>>> wp = 0.2
>>> ws = 0.3
>>> gpass = 1
>>> gstop = 40
>>> system = signal.iirdesign(wp, ws, gpass, gstop)
>>> w, h = signal.freqz(*system)
>>> fig, ax1 = plt.subplots()
>>> ax1.set_title('Digital filter frequency response')
>>> ax1.plot(w, 20 * np.log10(abs(h)), 'b')
>>> ax1.set_ylabel('Amplitude [dB]', color='b')
>>> ax1.set_xlabel('Frequency [rad/sample]')
>>> ax1.grid(True)
>>> ax1.set_ylim([-120, 20])
>>> ax2 = ax1.twinx()
>>> angles = np.unwrap(np.angle(h))
>>> ax2.plot(w, angles, 'g')
>>> ax2.set_ylabel('Angle (radians)', color='g')
>>> ax2.grid(True)
>>> ax2.axis('tight')
>>> ax2.set_ylim([-6, 1])
>>> nticks = 8
>>> ax1.yaxis.set_major_locator(matplotlib.ticker.LinearLocator(nticks))
>>> ax2.yaxis.set_major_locator(matplotlib.ticker.LinearLocator(nticks))
"""
try:
ordfunc = filter_dict[ftype][1]
except KeyError as e:
raise ValueError("Invalid IIR filter type: %s" % ftype) from e
except IndexError as e:
raise ValueError(("%s does not have order selection. Use "
"iirfilter function.") % ftype) from e
_validate_gpass_gstop(gpass, gstop)
wp = atleast_1d(wp)
ws = atleast_1d(ws)
if wp.shape[0] != ws.shape[0] or wp.shape not in [(1,), (2,)]:
raise ValueError("wp and ws must have one or two elements each, and"
"the same shape, got %s and %s"
% (wp.shape, ws.shape))
if any(wp <= 0) or any(ws <= 0):
raise ValueError("Values for wp, ws must be greater than 0")
if not analog:
if fs is None:
if any(wp >= 1) or any(ws >= 1):
raise ValueError("Values for wp, ws must be less than 1")
elif any(wp >= fs/2) or any(ws >= fs/2):
raise ValueError("Values for wp, ws must be less than fs/2"
" (fs={} -> fs/2={})".format(fs, fs/2))
if wp.shape[0] == 2:
if not ((ws[0] < wp[0] and wp[1] < ws[1]) or
(wp[0] < ws[0] and ws[1] < wp[1])):
raise ValueError("Passband must lie strictly inside stopband"
" or vice versa")
band_type = 2 * (len(wp) - 1)
band_type += 1
if wp[0] >= ws[0]:
band_type += 1
btype = {1: 'lowpass', 2: 'highpass',
3: 'bandstop', 4: 'bandpass'}[band_type]
N, Wn = ordfunc(wp, ws, gpass, gstop, analog=analog, fs=fs)
return iirfilter(N, Wn, rp=gpass, rs=gstop, analog=analog, btype=btype,
ftype=ftype, output=output, fs=fs)
def iirfilter(N, Wn, rp=None, rs=None, btype='band', analog=False,
ftype='butter', output='ba', fs=None):
"""
IIR digital and analog filter design given order and critical points.
Design an Nth-order digital or analog filter and return the filter
coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For digital filters, `Wn` are in the same units as `fs`. By default,
`fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
where 1 is the Nyquist frequency. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g., rad/s).
When Wn is a length-2 sequence, ``Wn[0]`` must be less than ``Wn[1]``.
rp : float, optional
For Chebyshev and elliptic filters, provides the maximum ripple
in the passband. (dB)
rs : float, optional
For Chebyshev and elliptic filters, provides the minimum attenuation
in the stop band. (dB)
btype : {'bandpass', 'lowpass', 'highpass', 'bandstop'}, optional
The type of filter. Default is 'bandpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- Butterworth : 'butter'
- Chebyshev I : 'cheby1'
- Chebyshev II : 'cheby2'
- Cauer/elliptic: 'ellip'
- Bessel/Thomson: 'bessel'
output : {'ba', 'zpk', 'sos'}, optional
Filter form of the output:
- second-order sections (recommended): 'sos'
- numerator/denominator (default) : 'ba'
- pole-zero : 'zpk'
In general the second-order sections ('sos') form is
recommended because inferring the coefficients for the
numerator/denominator form ('ba') suffers from numerical
instabilities. For reasons of backward compatibility the default
form is the numerator/denominator form ('ba'), where the 'b'
and the 'a' in 'ba' refer to the commonly used names of the
coefficients used.
Note: Using the second-order sections form ('sos') is sometimes
associated with additional computational costs: for
data-intense use cases it is therefore recommended to also
investigate the numerator/denominator form ('ba').
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output='sos'``.
See Also
--------
butter : Filter design using order and critical points
cheby1, cheby2, ellip, bessel
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord, ellipord
iirdesign : General filter design using passband and stopband spec
Notes
-----
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Generate a 17th-order Chebyshev II analog bandpass filter from 50 Hz to
200 Hz and plot the frequency response:
>>> import numpy as np
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.iirfilter(17, [2*np.pi*50, 2*np.pi*200], rs=60,
... btype='band', analog=True, ftype='cheby2')
>>> w, h = signal.freqs(b, a, 1000)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(1, 1, 1)
>>> ax.semilogx(w / (2*np.pi), 20 * np.log10(np.maximum(abs(h), 1e-5)))
>>> ax.set_title('Chebyshev Type II bandpass frequency response')
>>> ax.set_xlabel('Frequency [Hz]')
>>> ax.set_ylabel('Amplitude [dB]')
>>> ax.axis((10, 1000, -100, 10))
>>> ax.grid(which='both', axis='both')
>>> plt.show()
Create a digital filter with the same properties, in a system with
sampling rate of 2000 Hz, and plot the frequency response. (Second-order
sections implementation is required to ensure stability of a filter of
this order):
>>> sos = signal.iirfilter(17, [50, 200], rs=60, btype='band',
... analog=False, ftype='cheby2', fs=2000,
... output='sos')
>>> w, h = signal.sosfreqz(sos, 2000, fs=2000)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(1, 1, 1)
>>> ax.semilogx(w, 20 * np.log10(np.maximum(abs(h), 1e-5)))
>>> ax.set_title('Chebyshev Type II bandpass frequency response')
>>> ax.set_xlabel('Frequency [Hz]')
>>> ax.set_ylabel('Amplitude [dB]')
>>> ax.axis((10, 1000, -100, 10))
>>> ax.grid(which='both', axis='both')
>>> plt.show()
"""
ftype, btype, output = (x.lower() for x in (ftype, btype, output))
Wn = asarray(Wn)
if fs is not None:
if analog:
raise ValueError("fs cannot be specified for an analog filter")
Wn = 2*Wn/fs
if numpy.any(Wn <= 0):
raise ValueError("filter critical frequencies must be greater than 0")
if Wn.size > 1 and not Wn[0] < Wn[1]:
raise ValueError("Wn[0] must be less than Wn[1]")
try:
btype = band_dict[btype]
except KeyError as e:
raise ValueError("'%s' is an invalid bandtype for filter." % btype) from e
try:
typefunc = filter_dict[ftype][0]
except KeyError as e:
raise ValueError("'%s' is not a valid basic IIR filter." % ftype) from e
if output not in ['ba', 'zpk', 'sos']:
raise ValueError("'%s' is not a valid output form." % output)
if rp is not None and rp < 0:
raise ValueError("passband ripple (rp) must be positive")
if rs is not None and rs < 0:
raise ValueError("stopband attenuation (rs) must be positive")
# Get analog lowpass prototype
if typefunc == buttap:
z, p, k = typefunc(N)
elif typefunc == besselap:
z, p, k = typefunc(N, norm=bessel_norms[ftype])
elif typefunc == cheb1ap:
if rp is None:
raise ValueError("passband ripple (rp) must be provided to "
"design a Chebyshev I filter.")
z, p, k = typefunc(N, rp)
elif typefunc == cheb2ap:
if rs is None:
raise ValueError("stopband attenuation (rs) must be provided to "
"design an Chebyshev II filter.")
z, p, k = typefunc(N, rs)
elif typefunc == ellipap:
if rs is None or rp is None:
raise ValueError("Both rp and rs must be provided to design an "
"elliptic filter.")
z, p, k = typefunc(N, rp, rs)
else:
raise NotImplementedError("'%s' not implemented in iirfilter." % ftype)
# Pre-warp frequencies for digital filter design
if not analog:
if numpy.any(Wn <= 0) or numpy.any(Wn >= 1):
if fs is not None:
raise ValueError("Digital filter critical frequencies must "
f"be 0 < Wn < fs/2 (fs={fs} -> fs/2={fs/2})")
raise ValueError("Digital filter critical frequencies "
"must be 0 < Wn < 1")
fs = 2.0
warped = 2 * fs * tan(pi * Wn / fs)
else:
warped = Wn
# transform to lowpass, bandpass, highpass, or bandstop
if btype in ('lowpass', 'highpass'):
if numpy.size(Wn) != 1:
raise ValueError('Must specify a single critical frequency Wn '
'for lowpass or highpass filter')
if btype == 'lowpass':
z, p, k = lp2lp_zpk(z, p, k, wo=warped)
elif btype == 'highpass':
z, p, k = lp2hp_zpk(z, p, k, wo=warped)
elif btype in ('bandpass', 'bandstop'):
try:
bw = warped[1] - warped[0]
wo = sqrt(warped[0] * warped[1])
except IndexError as e:
raise ValueError('Wn must specify start and stop frequencies for '
'bandpass or bandstop filter') from e
if btype == 'bandpass':
z, p, k = lp2bp_zpk(z, p, k, wo=wo, bw=bw)
elif btype == 'bandstop':
z, p, k = lp2bs_zpk(z, p, k, wo=wo, bw=bw)
else:
raise NotImplementedError("'%s' not implemented in iirfilter." % btype)
# Find discrete equivalent if necessary
if not analog:
z, p, k = bilinear_zpk(z, p, k, fs=fs)
# Transform to proper out type (pole-zero, state-space, numer-denom)
if output == 'zpk':
return z, p, k
elif output == 'ba':
return zpk2tf(z, p, k)
elif output == 'sos':
return zpk2sos(z, p, k, analog=analog)
def _relative_degree(z, p):
"""
Return relative degree of transfer function from zeros and poles
"""
degree = len(p) - len(z)
if degree < 0:
raise ValueError("Improper transfer function. "
"Must have at least as many poles as zeros.")
else:
return degree
def bilinear_zpk(z, p, k, fs):
r"""
Return a digital IIR filter from an analog one using a bilinear transform.
Transform a set of poles and zeros from the analog s-plane to the digital
z-plane using Tustin's method, which substitutes ``2*fs*(z-1) / (z+1)`` for
``s``, maintaining the shape of the frequency response.
Parameters
----------
z : array_like
Zeros of the analog filter transfer function.
p : array_like
Poles of the analog filter transfer function.
k : float
System gain of the analog filter transfer function.
fs : float
Sample rate, as ordinary frequency (e.g., hertz). No prewarping is
done in this function.
Returns
-------
z : ndarray
Zeros of the transformed digital filter transfer function.
p : ndarray
Poles of the transformed digital filter transfer function.
k : float
System gain of the transformed digital filter.
See Also
--------
lp2lp_zpk, lp2hp_zpk, lp2bp_zpk, lp2bs_zpk
bilinear
Notes
-----
.. versionadded:: 1.1.0
Examples
--------
>>> import numpy as np
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> fs = 100
>>> bf = 2 * np.pi * np.array([7, 13])
>>> filts = signal.lti(*signal.butter(4, bf, btype='bandpass', analog=True,
... output='zpk'))
>>> filtz = signal.lti(*signal.bilinear_zpk(filts.zeros, filts.poles,
... filts.gain, fs))
>>> wz, hz = signal.freqz_zpk(filtz.zeros, filtz.poles, filtz.gain)
>>> ws, hs = signal.freqs_zpk(filts.zeros, filts.poles, filts.gain,
... worN=fs*wz)
>>> plt.semilogx(wz*fs/(2*np.pi), 20*np.log10(np.abs(hz).clip(1e-15)),
... label=r'$|H_z(e^{j \omega})|$')
>>> plt.semilogx(wz*fs/(2*np.pi), 20*np.log10(np.abs(hs).clip(1e-15)),
... label=r'$|H(j \omega)|$')
>>> plt.legend()
>>> plt.xlabel('Frequency [Hz]')
>>> plt.ylabel('Magnitude [dB]')
>>> plt.grid(True)
"""
z = atleast_1d(z)
p = atleast_1d(p)
degree = _relative_degree(z, p)
fs2 = 2.0*fs
# Bilinear transform the poles and zeros
z_z = (fs2 + z) / (fs2 - z)
p_z = (fs2 + p) / (fs2 - p)
# Any zeros that were at infinity get moved to the Nyquist frequency
z_z = append(z_z, -ones(degree))
# Compensate for gain change
k_z = k * real(prod(fs2 - z) / prod(fs2 - p))
return z_z, p_z, k_z
def lp2lp_zpk(z, p, k, wo=1.0):
r"""
Transform a lowpass filter prototype to a different frequency.
Return an analog low-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency,
using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog filter transfer function.
p : array_like
Poles of the analog filter transfer function.
k : float
System gain of the analog filter transfer function.
wo : float
Desired cutoff, as angular frequency (e.g., rad/s).
Defaults to no change.
Returns
-------
z : ndarray
Zeros of the transformed low-pass filter transfer function.
p : ndarray
Poles of the transformed low-pass filter transfer function.
k : float
System gain of the transformed low-pass filter.
See Also
--------
lp2hp_zpk, lp2bp_zpk, lp2bs_zpk, bilinear
lp2lp
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s}{\omega_0}
.. versionadded:: 1.1.0
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo) # Avoid int wraparound
degree = _relative_degree(z, p)
# Scale all points radially from origin to shift cutoff frequency
z_lp = wo * z
p_lp = wo * p
# Each shifted pole decreases gain by wo, each shifted zero increases it.
# Cancel out the net change to keep overall gain the same
k_lp = k * wo**degree
return z_lp, p_lp, k_lp
def lp2hp_zpk(z, p, k, wo=1.0):
r"""
Transform a lowpass filter prototype to a highpass filter.
Return an analog high-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency,
using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog filter transfer function.
p : array_like
Poles of the analog filter transfer function.
k : float
System gain of the analog filter transfer function.
wo : float
Desired cutoff, as angular frequency (e.g., rad/s).
Defaults to no change.
Returns
-------
z : ndarray
Zeros of the transformed high-pass filter transfer function.
p : ndarray
Poles of the transformed high-pass filter transfer function.
k : float
System gain of the transformed high-pass filter.
See Also
--------
lp2lp_zpk, lp2bp_zpk, lp2bs_zpk, bilinear
lp2hp
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{\omega_0}{s}
This maintains symmetry of the lowpass and highpass responses on a
logarithmic scale.
.. versionadded:: 1.1.0
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
degree = _relative_degree(z, p)
# Invert positions radially about unit circle to convert LPF to HPF
# Scale all points radially from origin to shift cutoff frequency
z_hp = wo / z
p_hp = wo / p
# If lowpass had zeros at infinity, inverting moves them to origin.
z_hp = append(z_hp, zeros(degree))
# Cancel out gain change caused by inversion
k_hp = k * real(prod(-z) / prod(-p))
return z_hp, p_hp, k_hp
def lp2bp_zpk(z, p, k, wo=1.0, bw=1.0):
r"""
Transform a lowpass filter prototype to a bandpass filter.
Return an analog band-pass filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog filter transfer function.
p : array_like
Poles of the analog filter transfer function.
k : float
System gain of the analog filter transfer function.
wo : float
Desired passband center, as angular frequency (e.g., rad/s).
Defaults to no change.
bw : float
Desired passband width, as angular frequency (e.g., rad/s).
Defaults to 1.
Returns
-------
z : ndarray
Zeros of the transformed band-pass filter transfer function.
p : ndarray
Poles of the transformed band-pass filter transfer function.
k : float
System gain of the transformed band-pass filter.
See Also
--------
lp2lp_zpk, lp2hp_zpk, lp2bs_zpk, bilinear
lp2bp
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s^2 + {\omega_0}^2}{s \cdot \mathrm{BW}}
This is the "wideband" transformation, producing a passband with
geometric (log frequency) symmetry about `wo`.
.. versionadded:: 1.1.0
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
bw = float(bw)
degree = _relative_degree(z, p)
# Scale poles and zeros to desired bandwidth
z_lp = z * bw/2
p_lp = p * bw/2
# Square root needs to produce complex result, not NaN
z_lp = z_lp.astype(complex)
p_lp = p_lp.astype(complex)
# Duplicate poles and zeros and shift from baseband to +wo and -wo
z_bp = concatenate((z_lp + sqrt(z_lp**2 - wo**2),
z_lp - sqrt(z_lp**2 - wo**2)))
p_bp = concatenate((p_lp + sqrt(p_lp**2 - wo**2),
p_lp - sqrt(p_lp**2 - wo**2)))
# Move degree zeros to origin, leaving degree zeros at infinity for BPF
z_bp = append(z_bp, zeros(degree))
# Cancel out gain change from frequency scaling
k_bp = k * bw**degree
return z_bp, p_bp, k_bp
def lp2bs_zpk(z, p, k, wo=1.0, bw=1.0):
r"""
Transform a lowpass filter prototype to a bandstop filter.
Return an analog band-stop filter with center frequency `wo` and
stopband width `bw` from an analog low-pass filter prototype with unity
cutoff frequency, using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog filter transfer function.
p : array_like
Poles of the analog filter transfer function.
k : float
System gain of the analog filter transfer function.
wo : float
Desired stopband center, as angular frequency (e.g., rad/s).
Defaults to no change.
bw : float
Desired stopband width, as angular frequency (e.g., rad/s).
Defaults to 1.
Returns
-------
z : ndarray
Zeros of the transformed band-stop filter transfer function.
p : ndarray
Poles of the transformed band-stop filter transfer function.
k : float
System gain of the transformed band-stop filter.
See Also
--------
lp2lp_zpk, lp2hp_zpk, lp2bp_zpk, bilinear
lp2bs
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s \cdot \mathrm{BW}}{s^2 + {\omega_0}^2}
This is the "wideband" transformation, producing a stopband with
geometric (log frequency) symmetry about `wo`.
.. versionadded:: 1.1.0
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
bw = float(bw)
degree = _relative_degree(z, p)
# Invert to a highpass filter with desired bandwidth
z_hp = (bw/2) / z
p_hp = (bw/2) / p
# Square root needs to produce complex result, not NaN
z_hp = z_hp.astype(complex)
p_hp = p_hp.astype(complex)
# Duplicate poles and zeros and shift from baseband to +wo and -wo
z_bs = concatenate((z_hp + sqrt(z_hp**2 - wo**2),
z_hp - sqrt(z_hp**2 - wo**2)))
p_bs = concatenate((p_hp + sqrt(p_hp**2 - wo**2),
p_hp - sqrt(p_hp**2 - wo**2)))
# Move any zeros that were at infinity to the center of the stopband
z_bs = append(z_bs, full(degree, +1j*wo))
z_bs = append(z_bs, full(degree, -1j*wo))
# Cancel out gain change caused by inversion
k_bs = k * real(prod(-z) / prod(-p))
return z_bs, p_bs, k_bs
def butter(N, Wn, btype='low', analog=False, output='ba', fs=None):
"""
Butterworth digital and analog filter design.
Design an Nth-order digital or analog Butterworth filter and return
the filter coefficients.
Parameters
----------
N : int
The order of the filter. For 'bandpass' and 'bandstop' filters,
the resulting order of the final second-order sections ('sos')
matrix is ``2*N``, with `N` the number of biquad sections
of the desired system.
Wn : array_like
The critical frequency or frequencies. For lowpass and highpass
filters, Wn is a scalar; for bandpass and bandstop filters,
Wn is a length-2 sequence.
For a Butterworth filter, this is the point at which the gain
drops to 1/sqrt(2) that of the passband (the "-3 dB point").
For digital filters, if `fs` is not specified, `Wn` units are
normalized from 0 to 1, where 1 is the Nyquist frequency (`Wn` is
thus in half cycles / sample and defined as 2*critical frequencies
/ `fs`). If `fs` is specified, `Wn` is in the same units as `fs`.
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba' for backwards
compatibility, but 'sos' should be used for general-purpose filtering.
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output='sos'``.
See Also
--------
buttord, buttap
Notes
-----
The Butterworth filter has maximally flat frequency response in the
passband.
The ``'sos'`` output parameter was added in 0.16.0.
If the transfer function form ``[b, a]`` is requested, numerical
problems can occur since the conversion between roots and
the polynomial coefficients is a numerically sensitive operation,
even for N >= 4. It is recommended to work with the SOS
representation.
.. warning::
Designing high-order and narrowband IIR filters in TF form can
result in unstable or incorrect filtering due to floating point
numerical precision issues. Consider inspecting output filter
characteristics `freqz` or designing the filters with second-order
sections via ``output='sos'``.
Examples
--------
Design an analog filter and plot its frequency response, showing the
critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> b, a = signal.butter(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Butterworth filter frequency response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.show()
Generate a signal made up of 10 Hz and 20 Hz, sampled at 1 kHz
>>> t = np.linspace(0, 1, 1000, False) # 1 second
>>> sig = np.sin(2*np.pi*10*t) + np.sin(2*np.pi*20*t)
>>> fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
>>> ax1.plot(t, sig)
>>> ax1.set_title('10 Hz and 20 Hz sinusoids')
>>> ax1.axis([0, 1, -2, 2])
Design a digital high-pass filter at 15 Hz to remove the 10 Hz tone, and
apply it to the signal. (It's recommended to use second-order sections
format when filtering, to avoid numerical error with transfer function
(``ba``) format):
>>> sos = signal.butter(10, 15, 'hp', fs=1000, output='sos')
>>> filtered = signal.sosfilt(sos, sig)
>>> ax2.plot(t, filtered)
>>> ax2.set_title('After 15 Hz high-pass filter')
>>> ax2.axis([0, 1, -2, 2])
>>> ax2.set_xlabel('Time [seconds]')
>>> plt.tight_layout()
>>> plt.show()
"""
return iirfilter(N, Wn, btype=btype, analog=analog,
output=output, ftype='butter', fs=fs)
def cheby1(N, rp, Wn, btype='low', analog=False, output='ba', fs=None):
"""
Chebyshev type I digital and analog filter design.
Design an Nth-order digital or analog Chebyshev type I filter and
return the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rp : float
The maximum ripple allowed below unity gain in the passband.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For Type I filters, this is the point in the transition band at which
the gain first drops below -`rp`.
For digital filters, `Wn` are in the same units as `fs`. By default,
`fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
where 1 is the Nyquist frequency. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g., rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba' for backwards
compatibility, but 'sos' should be used for general-purpose filtering.
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output='sos'``.
See Also
--------
cheb1ord, cheb1ap
Notes
-----
The Chebyshev type I filter maximizes the rate of cutoff between the
frequency response's passband and stopband, at the expense of ripple in
the passband and increased ringing in the step response.
Type I filters roll off faster than Type II (`cheby2`), but Type II
filters do not have any ripple in the passband.
The equiripple passband has N maxima or minima (for example, a
5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is
unity for odd-order filters, or -rp dB for even-order filters.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Design an analog filter and plot its frequency response, showing the
critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> b, a = signal.cheby1(4, 5, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev Type I frequency response (rp=5)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-5, color='green') # rp
>>> plt.show()
Generate a signal made up of 10 Hz and 20 Hz, sampled at 1 kHz
>>> t = np.linspace(0, 1, 1000, False) # 1 second
>>> sig = np.sin(2*np.pi*10*t) + np.sin(2*np.pi*20*t)
>>> fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
>>> ax1.plot(t, sig)
>>> ax1.set_title('10 Hz and 20 Hz sinusoids')
>>> ax1.axis([0, 1, -2, 2])
Design a digital high-pass filter at 15 Hz to remove the 10 Hz tone, and
apply it to the signal. (It's recommended to use second-order sections
format when filtering, to avoid numerical error with transfer function
(``ba``) format):
>>> sos = signal.cheby1(10, 1, 15, 'hp', fs=1000, output='sos')
>>> filtered = signal.sosfilt(sos, sig)
>>> ax2.plot(t, filtered)
>>> ax2.set_title('After 15 Hz high-pass filter')
>>> ax2.axis([0, 1, -2, 2])
>>> ax2.set_xlabel('Time [seconds]')
>>> plt.tight_layout()
>>> plt.show()
"""
return iirfilter(N, Wn, rp=rp, btype=btype, analog=analog,
output=output, ftype='cheby1', fs=fs)
def cheby2(N, rs, Wn, btype='low', analog=False, output='ba', fs=None):
"""
Chebyshev type II digital and analog filter design.
Design an Nth-order digital or analog Chebyshev type II filter and
return the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rs : float
The minimum attenuation required in the stop band.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For Type II filters, this is the point in the transition band at which
the gain first reaches -`rs`.
For digital filters, `Wn` are in the same units as `fs`. By default,
`fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
where 1 is the Nyquist frequency. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g., rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba' for backwards
compatibility, but 'sos' should be used for general-purpose filtering.
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output='sos'``.
See Also
--------
cheb2ord, cheb2ap
Notes
-----
The Chebyshev type II filter maximizes the rate of cutoff between the
frequency response's passband and stopband, at the expense of ripple in
the stopband and increased ringing in the step response.
Type II filters do not roll off as fast as Type I (`cheby1`).
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Design an analog filter and plot its frequency response, showing the
critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> b, a = signal.cheby2(4, 40, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev Type II frequency response (rs=40)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-40, color='green') # rs
>>> plt.show()
Generate a signal made up of 10 Hz and 20 Hz, sampled at 1 kHz
>>> t = np.linspace(0, 1, 1000, False) # 1 second
>>> sig = np.sin(2*np.pi*10*t) + np.sin(2*np.pi*20*t)
>>> fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
>>> ax1.plot(t, sig)
>>> ax1.set_title('10 Hz and 20 Hz sinusoids')
>>> ax1.axis([0, 1, -2, 2])
Design a digital high-pass filter at 17 Hz to remove the 10 Hz tone, and
apply it to the signal. (It's recommended to use second-order sections
format when filtering, to avoid numerical error with transfer function
(``ba``) format):
>>> sos = signal.cheby2(12, 20, 17, 'hp', fs=1000, output='sos')
>>> filtered = signal.sosfilt(sos, sig)
>>> ax2.plot(t, filtered)
>>> ax2.set_title('After 17 Hz high-pass filter')
>>> ax2.axis([0, 1, -2, 2])
>>> ax2.set_xlabel('Time [seconds]')
>>> plt.show()
"""
return iirfilter(N, Wn, rs=rs, btype=btype, analog=analog,
output=output, ftype='cheby2', fs=fs)
def ellip(N, rp, rs, Wn, btype='low', analog=False, output='ba', fs=None):
"""
Elliptic (Cauer) digital and analog filter design.
Design an Nth-order digital or analog elliptic filter and return
the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rp : float
The maximum ripple allowed below unity gain in the passband.
Specified in decibels, as a positive number.
rs : float
The minimum attenuation required in the stop band.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For elliptic filters, this is the point in the transition band at
which the gain first drops below -`rp`.
For digital filters, `Wn` are in the same units as `fs`. By default,
`fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
where 1 is the Nyquist frequency. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g., rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba' for backwards
compatibility, but 'sos' should be used for general-purpose filtering.
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output='sos'``.
See Also
--------
ellipord, ellipap
Notes
-----
Also known as Cauer or Zolotarev filters, the elliptical filter maximizes
the rate of transition between the frequency response's passband and
stopband, at the expense of ripple in both, and increased ringing in the
step response.
As `rp` approaches 0, the elliptical filter becomes a Chebyshev
type II filter (`cheby2`). As `rs` approaches 0, it becomes a Chebyshev
type I filter (`cheby1`). As both approach 0, it becomes a Butterworth
filter (`butter`).
The equiripple passband has N maxima or minima (for example, a
5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is
unity for odd-order filters, or -rp dB for even-order filters.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Design an analog filter and plot its frequency response, showing the
critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> b, a = signal.ellip(4, 5, 40, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Elliptic filter frequency response (rp=5, rs=40)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-40, color='green') # rs
>>> plt.axhline(-5, color='green') # rp
>>> plt.show()
Generate a signal made up of 10 Hz and 20 Hz, sampled at 1 kHz
>>> t = np.linspace(0, 1, 1000, False) # 1 second
>>> sig = np.sin(2*np.pi*10*t) + np.sin(2*np.pi*20*t)
>>> fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
>>> ax1.plot(t, sig)
>>> ax1.set_title('10 Hz and 20 Hz sinusoids')
>>> ax1.axis([0, 1, -2, 2])
Design a digital high-pass filter at 17 Hz to remove the 10 Hz tone, and
apply it to the signal. (It's recommended to use second-order sections
format when filtering, to avoid numerical error with transfer function
(``ba``) format):
>>> sos = signal.ellip(8, 1, 100, 17, 'hp', fs=1000, output='sos')
>>> filtered = signal.sosfilt(sos, sig)
>>> ax2.plot(t, filtered)
>>> ax2.set_title('After 17 Hz high-pass filter')
>>> ax2.axis([0, 1, -2, 2])
>>> ax2.set_xlabel('Time [seconds]')
>>> plt.tight_layout()
>>> plt.show()
"""
return iirfilter(N, Wn, rs=rs, rp=rp, btype=btype, analog=analog,
output=output, ftype='elliptic', fs=fs)
def bessel(N, Wn, btype='low', analog=False, output='ba', norm='phase',
fs=None):
"""
Bessel/Thomson digital and analog filter design.
Design an Nth-order digital or analog Bessel filter and return the
filter coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies (defined
by the `norm` parameter).
For analog filters, `Wn` is an angular frequency (e.g., rad/s).
For digital filters, `Wn` are in the same units as `fs`. By default,
`fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
where 1 is the Nyquist frequency. (`Wn` is thus in
half-cycles / sample.)
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned. (See Notes.)
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
norm : {'phase', 'delay', 'mag'}, optional
Critical frequency normalization:
``phase``
The filter is normalized such that the phase response reaches its
midpoint at angular (e.g. rad/s) frequency `Wn`. This happens for
both low-pass and high-pass filters, so this is the
"phase-matched" case.
The magnitude response asymptotes are the same as a Butterworth
filter of the same order with a cutoff of `Wn`.
This is the default, and matches MATLAB's implementation.
``delay``
The filter is normalized such that the group delay in the passband
is 1/`Wn` (e.g., seconds). This is the "natural" type obtained by
solving Bessel polynomials.
``mag``
The filter is normalized such that the gain magnitude is -3 dB at
angular frequency `Wn`.
.. versionadded:: 0.18.0
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output='sos'``.
Notes
-----
Also known as a Thomson filter, the analog Bessel filter has maximally
flat group delay and maximally linear phase response, with very little
ringing in the step response. [1]_
The Bessel is inherently an analog filter. This function generates digital
Bessel filters using the bilinear transform, which does not preserve the
phase response of the analog filter. As such, it is only approximately
correct at frequencies below about fs/4. To get maximally-flat group
delay at higher frequencies, the analog Bessel filter must be transformed
using phase-preserving techniques.
See `besselap` for implementation details and references.
The ``'sos'`` output parameter was added in 0.16.0.
References
----------
.. [1] Thomson, W.E., "Delay Networks having Maximally Flat Frequency
Characteristics", Proceedings of the Institution of Electrical
Engineers, Part III, November 1949, Vol. 96, No. 44, pp. 487-490.
Examples
--------
Plot the phase-normalized frequency response, showing the relationship
to the Butterworth's cutoff frequency (green):
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> b, a = signal.butter(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)), color='silver', ls='dashed')
>>> b, a = signal.bessel(4, 100, 'low', analog=True, norm='phase')
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)))
>>> plt.title('Bessel filter magnitude response (with Butterworth)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.show()
and the phase midpoint:
>>> plt.figure()
>>> plt.semilogx(w, np.unwrap(np.angle(h)))
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-np.pi, color='red') # phase midpoint
>>> plt.title('Bessel filter phase response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Phase [radians]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
Plot the magnitude-normalized frequency response, showing the -3 dB cutoff:
>>> b, a = signal.bessel(3, 10, 'low', analog=True, norm='mag')
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)))
>>> plt.axhline(-3, color='red') # -3 dB magnitude
>>> plt.axvline(10, color='green') # cutoff frequency
>>> plt.title('Magnitude-normalized Bessel filter frequency response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
Plot the delay-normalized filter, showing the maximally-flat group delay
at 0.1 seconds:
>>> b, a = signal.bessel(5, 1/0.1, 'low', analog=True, norm='delay')
>>> w, h = signal.freqs(b, a)
>>> plt.figure()
>>> plt.semilogx(w[1:], -np.diff(np.unwrap(np.angle(h)))/np.diff(w))
>>> plt.axhline(0.1, color='red') # 0.1 seconds group delay
>>> plt.title('Bessel filter group delay')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Group delay [seconds]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
"""
return iirfilter(N, Wn, btype=btype, analog=analog,
output=output, ftype='bessel_'+norm, fs=fs)
def maxflat():
pass
def yulewalk():
pass
def band_stop_obj(wp, ind, passb, stopb, gpass, gstop, type):
"""
Band Stop Objective Function for order minimization.
Returns the non-integer order for an analog band stop filter.
Parameters
----------
wp : scalar
Edge of passband `passb`.
ind : int, {0, 1}
Index specifying which `passb` edge to vary (0 or 1).
passb : ndarray
Two element sequence of fixed passband edges.
stopb : ndarray
Two element sequence of fixed stopband edges.
gstop : float
Amount of attenuation in stopband in dB.
gpass : float
Amount of ripple in the passband in dB.
type : {'butter', 'cheby', 'ellip'}
Type of filter.
Returns
-------
n : scalar
Filter order (possibly non-integer).
"""
_validate_gpass_gstop(gpass, gstop)
passbC = passb.copy()
passbC[ind] = wp
nat = (stopb * (passbC[0] - passbC[1]) /
(stopb ** 2 - passbC[0] * passbC[1]))
nat = min(abs(nat))
if type == 'butter':
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
n = (log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat)))
elif type == 'cheby':
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
n = arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) / arccosh(nat)
elif type == 'ellip':
GSTOP = 10 ** (0.1 * gstop)
GPASS = 10 ** (0.1 * gpass)
arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0))
arg0 = 1.0 / nat
d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2])
d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2])
n = (d0[0] * d1[1] / (d0[1] * d1[0]))
else:
raise ValueError("Incorrect type: %s" % type)
return n
def buttord(wp, ws, gpass, gstop, analog=False, fs=None):
"""Butterworth filter order selection.
Return the order of the lowest order digital or analog Butterworth filter
that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are in the same units as `fs`. By default,
`fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
where 1 is the Nyquist frequency. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g., rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
ord : int
The lowest order for a Butterworth filter which meets specs.
wn : ndarray or float
The Butterworth natural frequency (i.e. the "3dB frequency"). Should
be used with `butter` to give filter results. If `fs` is specified,
this is in the same units, and `fs` must also be passed to `butter`.
See Also
--------
butter : Filter design using order and critical points
cheb1ord : Find order and critical points from passband and stopband spec
cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design an analog bandpass filter with passband within 3 dB from 20 to
50 rad/s, while rejecting at least -40 dB below 14 and above 60 rad/s.
Plot its frequency response, showing the passband and stopband
constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> N, Wn = signal.buttord([20, 50], [14, 60], 3, 40, True)
>>> b, a = signal.butter(N, Wn, 'band', True)
>>> w, h = signal.freqs(b, a, np.logspace(1, 2, 500))
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Butterworth bandpass filter fit to constraints')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([1, 14, 14, 1], [-40, -40, 99, 99], '0.9', lw=0) # stop
>>> plt.fill([20, 20, 50, 50], [-99, -3, -3, -99], '0.9', lw=0) # pass
>>> plt.fill([60, 60, 1e9, 1e9], [99, -40, -40, 99], '0.9', lw=0) # stop
>>> plt.axis([10, 100, -60, 3])
>>> plt.show()
"""
_validate_gpass_gstop(gpass, gstop)
wp = atleast_1d(wp)
ws = atleast_1d(ws)
if fs is not None:
if analog:
raise ValueError("fs cannot be specified for an analog filter")
wp = 2*wp/fs
ws = 2*ws/fs
filter_type = 2 * (len(wp) - 1)
filter_type += 1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop,
'butter'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop,
'butter'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat))))
# Find the Butterworth natural frequency WN (or the "3dB" frequency")
# to give exactly gpass at passb.
try:
W0 = (GPASS - 1.0) ** (-1.0 / (2.0 * ord))
except ZeroDivisionError:
W0 = 1.0
warnings.warn("Order is zero...check input parameters.",
RuntimeWarning, 2)
# now convert this frequency back from lowpass prototype
# to the original analog filter
if filter_type == 1: # low
WN = W0 * passb
elif filter_type == 2: # high
WN = passb / W0
elif filter_type == 3: # stop
WN = numpy.empty(2, float)
discr = sqrt((passb[1] - passb[0]) ** 2 +
4 * W0 ** 2 * passb[0] * passb[1])
WN[0] = ((passb[1] - passb[0]) + discr) / (2 * W0)
WN[1] = ((passb[1] - passb[0]) - discr) / (2 * W0)
WN = numpy.sort(abs(WN))
elif filter_type == 4: # pass
W0 = numpy.array([-W0, W0], float)
WN = (-W0 * (passb[1] - passb[0]) / 2.0 +
sqrt(W0 ** 2 / 4.0 * (passb[1] - passb[0]) ** 2 +
passb[0] * passb[1]))
WN = numpy.sort(abs(WN))
else:
raise ValueError("Bad type: %s" % filter_type)
if not analog:
wn = (2.0 / pi) * arctan(WN)
else:
wn = WN
if len(wn) == 1:
wn = wn[0]
if fs is not None:
wn = wn*fs/2
return ord, wn
def cheb1ord(wp, ws, gpass, gstop, analog=False, fs=None):
"""Chebyshev type I filter order selection.
Return the order of the lowest order digital or analog Chebyshev Type I
filter that loses no more than `gpass` dB in the passband and has at
least `gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are in the same units as `fs`. By default,
`fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
where 1 is the Nyquist frequency. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g., rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
ord : int
The lowest order for a Chebyshev type I filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby1` to give filter results. If `fs` is specified,
this is in the same units, and `fs` must also be passed to `cheby1`.
See Also
--------
cheby1 : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design a digital lowpass filter such that the passband is within 3 dB up
to 0.2*(fs/2), while rejecting at least -40 dB above 0.3*(fs/2). Plot its
frequency response, showing the passband and stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> N, Wn = signal.cheb1ord(0.2, 0.3, 3, 40)
>>> b, a = signal.cheby1(N, 3, Wn, 'low')
>>> w, h = signal.freqz(b, a)
>>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev I lowpass filter fit to constraints')
>>> plt.xlabel('Normalized frequency')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.01, 0.2, 0.2, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop
>>> plt.fill([0.3, 0.3, 2, 2], [ 9, -40, -40, 9], '0.9', lw=0) # pass
>>> plt.axis([0.08, 1, -60, 3])
>>> plt.show()
"""
_validate_gpass_gstop(gpass, gstop)
wp = atleast_1d(wp)
ws = atleast_1d(ws)
if fs is not None:
if analog:
raise ValueError("fs cannot be specified for an analog filter")
wp = 2*wp/fs
ws = 2*ws/fs
filter_type = 2 * (len(wp) - 1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) /
arccosh(nat)))
# Natural frequencies are just the passband edges
if not analog:
wn = (2.0 / pi) * arctan(passb)
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
if fs is not None:
wn = wn*fs/2
return ord, wn
def cheb2ord(wp, ws, gpass, gstop, analog=False, fs=None):
"""Chebyshev type II filter order selection.
Return the order of the lowest order digital or analog Chebyshev Type II
filter that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are in the same units as `fs`. By default,
`fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
where 1 is the Nyquist frequency. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g., rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
ord : int
The lowest order for a Chebyshev type II filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby2` to give filter results. If `fs` is specified,
this is in the same units, and `fs` must also be passed to `cheby2`.
See Also
--------
cheby2 : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb1ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design a digital bandstop filter which rejects -60 dB from 0.2*(fs/2) to
0.5*(fs/2), while staying within 3 dB below 0.1*(fs/2) or above
0.6*(fs/2). Plot its frequency response, showing the passband and
stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> N, Wn = signal.cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 60)
>>> b, a = signal.cheby2(N, 60, Wn, 'stop')
>>> w, h = signal.freqz(b, a)
>>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev II bandstop filter fit to constraints')
>>> plt.xlabel('Normalized frequency')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.01, .1, .1, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop
>>> plt.fill([.2, .2, .5, .5], [ 9, -60, -60, 9], '0.9', lw=0) # pass
>>> plt.fill([.6, .6, 2, 2], [-99, -3, -3, -99], '0.9', lw=0) # stop
>>> plt.axis([0.06, 1, -80, 3])
>>> plt.show()
"""
_validate_gpass_gstop(gpass, gstop)
wp = atleast_1d(wp)
ws = atleast_1d(ws)
if fs is not None:
if analog:
raise ValueError("fs cannot be specified for an analog filter")
wp = 2*wp/fs
ws = 2*ws/fs
filter_type = 2 * (len(wp) - 1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) /
arccosh(nat)))
# Find frequency where analog response is -gpass dB.
# Then convert back from low-pass prototype to the original filter.
new_freq = cosh(1.0 / ord * arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))))
new_freq = 1.0 / new_freq
if filter_type == 1:
nat = passb / new_freq
elif filter_type == 2:
nat = passb * new_freq
elif filter_type == 3:
nat = numpy.empty(2, float)
nat[0] = (new_freq / 2.0 * (passb[0] - passb[1]) +
sqrt(new_freq ** 2 * (passb[1] - passb[0]) ** 2 / 4.0 +
passb[1] * passb[0]))
nat[1] = passb[1] * passb[0] / nat[0]
elif filter_type == 4:
nat = numpy.empty(2, float)
nat[0] = (1.0 / (2.0 * new_freq) * (passb[0] - passb[1]) +
sqrt((passb[1] - passb[0]) ** 2 / (4.0 * new_freq ** 2) +
passb[1] * passb[0]))
nat[1] = passb[0] * passb[1] / nat[0]
if not analog:
wn = (2.0 / pi) * arctan(nat)
else:
wn = nat
if len(wn) == 1:
wn = wn[0]
if fs is not None:
wn = wn*fs/2
return ord, wn
_POW10_LOG10 = np.log(10)
def _pow10m1(x):
"""10 ** x - 1 for x near 0"""
return np.expm1(_POW10_LOG10 * x)
def ellipord(wp, ws, gpass, gstop, analog=False, fs=None):
"""Elliptic (Cauer) filter order selection.
Return the order of the lowest order digital or analog elliptic filter
that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are in the same units as `fs`. By default,
`fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
where 1 is the Nyquist frequency. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g., rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
ord : int
The lowest order for an Elliptic (Cauer) filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`ellip` to give filter results. If `fs` is specified,
this is in the same units, and `fs` must also be passed to `ellip`.
See Also
--------
ellip : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design an analog highpass filter such that the passband is within 3 dB
above 30 rad/s, while rejecting -60 dB at 10 rad/s. Plot its
frequency response, showing the passband and stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> N, Wn = signal.ellipord(30, 10, 3, 60, True)
>>> b, a = signal.ellip(N, 3, 60, Wn, 'high', True)
>>> w, h = signal.freqs(b, a, np.logspace(0, 3, 500))
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Elliptical highpass filter fit to constraints')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.1, 10, 10, .1], [1e4, 1e4, -60, -60], '0.9', lw=0) # stop
>>> plt.fill([30, 30, 1e9, 1e9], [-99, -3, -3, -99], '0.9', lw=0) # pass
>>> plt.axis([1, 300, -80, 3])
>>> plt.show()
"""
_validate_gpass_gstop(gpass, gstop)
wp = atleast_1d(wp)
ws = atleast_1d(ws)
if fs is not None:
if analog:
raise ValueError("fs cannot be specified for an analog filter")
wp = 2*wp/fs
ws = 2*ws/fs
filter_type = 2 * (len(wp) - 1)
filter_type += 1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'ellip'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'ellip'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
arg1_sq = _pow10m1(0.1 * gpass) / _pow10m1(0.1 * gstop)
arg0 = 1.0 / nat
d0 = special.ellipk(arg0 ** 2), special.ellipkm1(arg0 ** 2)
d1 = special.ellipk(arg1_sq), special.ellipkm1(arg1_sq)
ord = int(ceil(d0[0] * d1[1] / (d0[1] * d1[0])))
if not analog:
wn = arctan(passb) * 2.0 / pi
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
if fs is not None:
wn = wn*fs/2
return ord, wn
def buttap(N):
"""Return (z,p,k) for analog prototype of Nth-order Butterworth filter.
The filter will have an angular (e.g., rad/s) cutoff frequency of 1.
See Also
--------
butter : Filter design function using this prototype
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
z = numpy.array([])
m = numpy.arange(-N+1, N, 2)
# Middle value is 0 to ensure an exactly real pole
p = -numpy.exp(1j * pi * m / (2 * N))
k = 1
return z, p, k
def cheb1ap(N, rp):
"""
Return (z,p,k) for Nth-order Chebyshev type I analog lowpass filter.
The returned filter prototype has `rp` decibels of ripple in the passband.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first drops below ``-rp``.
See Also
--------
cheby1 : Filter design function using this prototype
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero error
# Even order filters have DC gain of -rp dB
return numpy.array([]), numpy.array([]), 10**(-rp/20)
z = numpy.array([])
# Ripple factor (epsilon)
eps = numpy.sqrt(10 ** (0.1 * rp) - 1.0)
mu = 1.0 / N * arcsinh(1 / eps)
# Arrange poles in an ellipse on the left half of the S-plane
m = numpy.arange(-N+1, N, 2)
theta = pi * m / (2*N)
p = -sinh(mu + 1j*theta)
k = numpy.prod(-p, axis=0).real
if N % 2 == 0:
k = k / sqrt(1 + eps * eps)
return z, p, k
def cheb2ap(N, rs):
"""
Return (z,p,k) for Nth-order Chebyshev type I analog lowpass filter.
The returned filter prototype has `rs` decibels of ripple in the stopband.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first reaches ``-rs``.
See Also
--------
cheby2 : Filter design function using this prototype
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero warning
return numpy.array([]), numpy.array([]), 1
# Ripple factor (epsilon)
de = 1.0 / sqrt(10 ** (0.1 * rs) - 1)
mu = arcsinh(1.0 / de) / N
if N % 2:
m = numpy.concatenate((numpy.arange(-N+1, 0, 2),
numpy.arange(2, N, 2)))
else:
m = numpy.arange(-N+1, N, 2)
z = -conjugate(1j / sin(m * pi / (2.0 * N)))
# Poles around the unit circle like Butterworth
p = -exp(1j * pi * numpy.arange(-N+1, N, 2) / (2 * N))
# Warp into Chebyshev II
p = sinh(mu) * p.real + 1j * cosh(mu) * p.imag
p = 1.0 / p
k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real
return z, p, k
EPSILON = 2e-16
# number of terms in solving degree equation
_ELLIPDEG_MMAX = 7
def _ellipdeg(n, m1):
"""Solve degree equation using nomes
Given n, m1, solve
n * K(m) / K'(m) = K1(m1) / K1'(m1)
for m
See [1], Eq. (49)
References
----------
.. [1] Orfanidis, "Lecture Notes on Elliptic Filter Design",
https://www.ece.rutgers.edu/~orfanidi/ece521/notes.pdf
"""
K1 = special.ellipk(m1)
K1p = special.ellipkm1(m1)
q1 = np.exp(-np.pi * K1p / K1)
q = q1 ** (1/n)
mnum = np.arange(_ELLIPDEG_MMAX + 1)
mden = np.arange(1, _ELLIPDEG_MMAX + 2)
num = np.sum(q ** (mnum * (mnum+1)))
den = 1 + 2 * np.sum(q ** (mden**2))
return 16 * q * (num / den) ** 4
# Maximum number of iterations in Landen transformation recursion
# sequence. 10 is conservative; unit tests pass with 4, Orfanidis
# (see _arc_jac_cn [1]) suggests 5.
_ARC_JAC_SN_MAXITER = 10
def _arc_jac_sn(w, m):
"""Inverse Jacobian elliptic sn
Solve for z in w = sn(z, m)
Parameters
----------
w : complex scalar
argument
m : scalar
modulus; in interval [0, 1]
See [1], Eq. (56)
References
----------
.. [1] Orfanidis, "Lecture Notes on Elliptic Filter Design",
https://www.ece.rutgers.edu/~orfanidi/ece521/notes.pdf
"""
def _complement(kx):
# (1-k**2) ** 0.5; the expression below
# works for small kx
return ((1 - kx) * (1 + kx)) ** 0.5
k = m ** 0.5
if k > 1:
return np.nan
elif k == 1:
return np.arctanh(w)
ks = [k]
niter = 0
while ks[-1] != 0:
k_ = ks[-1]
k_p = _complement(k_)
ks.append((1 - k_p) / (1 + k_p))
niter += 1
if niter > _ARC_JAC_SN_MAXITER:
raise ValueError('Landen transformation not converging')
K = np.prod(1 + np.array(ks[1:])) * np.pi/2
wns = [w]
for kn, knext in zip(ks[:-1], ks[1:]):
wn = wns[-1]
wnext = (2 * wn /
((1 + knext) * (1 + _complement(kn * wn))))
wns.append(wnext)
u = 2 / np.pi * np.arcsin(wns[-1])
z = K * u
return z
def _arc_jac_sc1(w, m):
"""Real inverse Jacobian sc, with complementary modulus
Solve for z in w = sc(z, 1-m)
w - real scalar
m - modulus
From [1], sc(z, m) = -i * sn(i * z, 1 - m)
References
----------
# noqa: E501
.. [1] https://functions.wolfram.com/EllipticFunctions/JacobiSC/introductions/JacobiPQs/ShowAll.html,
"Representations through other Jacobi functions"
"""
zcomplex = _arc_jac_sn(1j * w, m)
if abs(zcomplex.real) > 1e-14:
raise ValueError
return zcomplex.imag
def ellipap(N, rp, rs):
"""Return (z,p,k) of Nth-order elliptic analog lowpass filter.
The filter is a normalized prototype that has `rp` decibels of ripple
in the passband and a stopband `rs` decibels down.
The filter's angular (e.g., rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first drops below ``-rp``.
See Also
--------
ellip : Filter design function using this prototype
References
----------
.. [1] Lutova, Tosic, and Evans, "Filter Design for Signal Processing",
Chapters 5 and 12.
.. [2] Orfanidis, "Lecture Notes on Elliptic Filter Design",
https://www.ece.rutgers.edu/~orfanidi/ece521/notes.pdf
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero warning
# Even order filters have DC gain of -rp dB
return numpy.array([]), numpy.array([]), 10**(-rp/20)
elif N == 1:
p = -sqrt(1.0 / _pow10m1(0.1 * rp))
k = -p
z = []
return asarray(z), asarray(p), k
eps_sq = _pow10m1(0.1 * rp)
eps = np.sqrt(eps_sq)
ck1_sq = eps_sq / _pow10m1(0.1 * rs)
if ck1_sq == 0:
raise ValueError("Cannot design a filter with given rp and rs"
" specifications.")
val = special.ellipk(ck1_sq), special.ellipkm1(ck1_sq)
m = _ellipdeg(N, ck1_sq)
capk = special.ellipk(m)
j = numpy.arange(1 - N % 2, N, 2)
jj = len(j)
[s, c, d, phi] = special.ellipj(j * capk / N, m * numpy.ones(jj))
snew = numpy.compress(abs(s) > EPSILON, s, axis=-1)
z = 1.0 / (sqrt(m) * snew)
z = 1j * z
z = numpy.concatenate((z, conjugate(z)))
r = _arc_jac_sc1(1. / eps, ck1_sq)
v0 = capk * r / (N * val[0])
[sv, cv, dv, phi] = special.ellipj(v0, 1 - m)
p = -(c * d * sv * cv + 1j * s * dv) / (1 - (d * sv) ** 2.0)
if N % 2:
newp = numpy.compress(abs(p.imag) > EPSILON *
numpy.sqrt(numpy.sum(p * numpy.conjugate(p),
axis=0).real),
p, axis=-1)
p = numpy.concatenate((p, conjugate(newp)))
else:
p = numpy.concatenate((p, conjugate(p)))
k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real
if N % 2 == 0:
k = k / numpy.sqrt(1 + eps_sq)
return z, p, k
# TODO: Make this a real public function scipy.misc.ff
def _falling_factorial(x, n):
r"""
Return the factorial of `x` to the `n` falling.
This is defined as:
.. math:: x^\underline n = (x)_n = x (x-1) \cdots (x-n+1)
This can more efficiently calculate ratios of factorials, since:
n!/m! == falling_factorial(n, n-m)
where n >= m
skipping the factors that cancel out
the usual factorial n! == ff(n, n)
"""
val = 1
for k in range(x - n + 1, x + 1):
val *= k
return val
def _bessel_poly(n, reverse=False):
"""
Return the coefficients of Bessel polynomial of degree `n`
If `reverse` is true, a reverse Bessel polynomial is output.
Output is a list of coefficients:
[1] = 1
[1, 1] = 1*s + 1
[1, 3, 3] = 1*s^2 + 3*s + 3
[1, 6, 15, 15] = 1*s^3 + 6*s^2 + 15*s + 15
[1, 10, 45, 105, 105] = 1*s^4 + 10*s^3 + 45*s^2 + 105*s + 105
etc.
Output is a Python list of arbitrary precision long ints, so n is only
limited by your hardware's memory.
Sequence is http://oeis.org/A001498, and output can be confirmed to
match http://oeis.org/A001498/b001498.txt :
>>> i = 0
>>> for n in range(51):
... for x in _bessel_poly(n, reverse=True):
... print(i, x)
... i += 1
"""
if abs(int(n)) != n:
raise ValueError("Polynomial order must be a nonnegative integer")
else:
n = int(n) # np.int32 doesn't work, for instance
out = []
for k in range(n + 1):
num = _falling_factorial(2*n - k, n)
den = 2**(n - k) * math.factorial(k)
out.append(num // den)
if reverse:
return out[::-1]
else:
return out
def _campos_zeros(n):
"""
Return approximate zero locations of Bessel polynomials y_n(x) for order
`n` using polynomial fit (Campos-Calderon 2011)
"""
if n == 1:
return asarray([-1+0j])
s = npp_polyval(n, [0, 0, 2, 0, -3, 1])
b3 = npp_polyval(n, [16, -8]) / s
b2 = npp_polyval(n, [-24, -12, 12]) / s
b1 = npp_polyval(n, [8, 24, -12, -2]) / s
b0 = npp_polyval(n, [0, -6, 0, 5, -1]) / s
r = npp_polyval(n, [0, 0, 2, 1])
a1 = npp_polyval(n, [-6, -6]) / r
a2 = 6 / r
k = np.arange(1, n+1)
x = npp_polyval(k, [0, a1, a2])
y = npp_polyval(k, [b0, b1, b2, b3])
return x + 1j*y
def _aberth(f, fp, x0, tol=1e-15, maxiter=50):
"""
Given a function `f`, its first derivative `fp`, and a set of initial
guesses `x0`, simultaneously find the roots of the polynomial using the
Aberth-Ehrlich method.
``len(x0)`` should equal the number of roots of `f`.
(This is not a complete implementation of Bini's algorithm.)
"""
N = len(x0)
x = array(x0, complex)
beta = np.empty_like(x0)
for iteration in range(maxiter):
alpha = -f(x) / fp(x) # Newton's method
# Model "repulsion" between zeros
for k in range(N):
beta[k] = np.sum(1/(x[k] - x[k+1:]))
beta[k] += np.sum(1/(x[k] - x[:k]))
x += alpha / (1 + alpha * beta)
if not all(np.isfinite(x)):
raise RuntimeError('Root-finding calculation failed')
# Mekwi: The iterative process can be stopped when |hn| has become
# less than the largest error one is willing to permit in the root.
if all(abs(alpha) <= tol):
break
else:
raise Exception('Zeros failed to converge')
return x
def _bessel_zeros(N):
"""
Find zeros of ordinary Bessel polynomial of order `N`, by root-finding of
modified Bessel function of the second kind
"""
if N == 0:
return asarray([])
# Generate starting points
x0 = _campos_zeros(N)
# Zeros are the same for exp(1/x)*K_{N+0.5}(1/x) and Nth-order ordinary
# Bessel polynomial y_N(x)
def f(x):
return special.kve(N+0.5, 1/x)
# First derivative of above
def fp(x):
return (special.kve(N-0.5, 1/x)/(2*x**2) -
special.kve(N+0.5, 1/x)/(x**2) +
special.kve(N+1.5, 1/x)/(2*x**2))
# Starting points converge to true zeros
x = _aberth(f, fp, x0)
# Improve precision using Newton's method on each
for i in range(len(x)):
x[i] = optimize.newton(f, x[i], fp, tol=1e-15)
# Average complex conjugates to make them exactly symmetrical
x = np.mean((x, x[::-1].conj()), 0)
# Zeros should sum to -1
if abs(np.sum(x) + 1) > 1e-15:
raise RuntimeError('Generated zeros are inaccurate')
return x
def _norm_factor(p, k):
"""
Numerically find frequency shift to apply to delay-normalized filter such
that -3 dB point is at 1 rad/sec.
`p` is an array_like of polynomial poles
`k` is a float gain
First 10 values are listed in "Bessel Scale Factors" table,
"Bessel Filters Polynomials, Poles and Circuit Elements 2003, C. Bond."
"""
p = asarray(p, dtype=complex)
def G(w):
"""
Gain of filter
"""
return abs(k / prod(1j*w - p))
def cutoff(w):
"""
When gain = -3 dB, return 0
"""
return G(w) - 1/np.sqrt(2)
return optimize.newton(cutoff, 1.5)
def besselap(N, norm='phase'):
"""
Return (z,p,k) for analog prototype of an Nth-order Bessel filter.
Parameters
----------
N : int
The order of the filter.
norm : {'phase', 'delay', 'mag'}, optional
Frequency normalization:
``phase``
The filter is normalized such that the phase response reaches its
midpoint at an angular (e.g., rad/s) cutoff frequency of 1. This
happens for both low-pass and high-pass filters, so this is the
"phase-matched" case. [6]_
The magnitude response asymptotes are the same as a Butterworth
filter of the same order with a cutoff of `Wn`.
This is the default, and matches MATLAB's implementation.
``delay``
The filter is normalized such that the group delay in the passband
is 1 (e.g., 1 second). This is the "natural" type obtained by
solving Bessel polynomials
``mag``
The filter is normalized such that the gain magnitude is -3 dB at
angular frequency 1. This is called "frequency normalization" by
Bond. [1]_
.. versionadded:: 0.18.0
Returns
-------
z : ndarray
Zeros of the transfer function. Is always an empty array.
p : ndarray
Poles of the transfer function.
k : scalar
Gain of the transfer function. For phase-normalized, this is always 1.
See Also
--------
bessel : Filter design function using this prototype
Notes
-----
To find the pole locations, approximate starting points are generated [2]_
for the zeros of the ordinary Bessel polynomial [3]_, then the
Aberth-Ehrlich method [4]_ [5]_ is used on the Kv(x) Bessel function to
calculate more accurate zeros, and these locations are then inverted about
the unit circle.
References
----------
.. [1] C.R. Bond, "Bessel Filter Constants",
http://www.crbond.com/papers/bsf.pdf
.. [2] Campos and Calderon, "Approximate closed-form formulas for the
zeros of the Bessel Polynomials", :arXiv:`1105.0957`.
.. [3] Thomson, W.E., "Delay Networks having Maximally Flat Frequency
Characteristics", Proceedings of the Institution of Electrical
Engineers, Part III, November 1949, Vol. 96, No. 44, pp. 487-490.
.. [4] Aberth, "Iteration Methods for Finding all Zeros of a Polynomial
Simultaneously", Mathematics of Computation, Vol. 27, No. 122,
April 1973
.. [5] Ehrlich, "A modified Newton method for polynomials", Communications
of the ACM, Vol. 10, Issue 2, pp. 107-108, Feb. 1967,
:DOI:`10.1145/363067.363115`
.. [6] Miller and Bohn, "A Bessel Filter Crossover, and Its Relation to
Others", RaneNote 147, 1998,
https://www.ranecommercial.com/legacy/note147.html
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
N = int(N) # calculation below doesn't always fit in np.int64
if N == 0:
p = []
k = 1
else:
# Find roots of reverse Bessel polynomial
p = 1/_bessel_zeros(N)
a_last = _falling_factorial(2*N, N) // 2**N
# Shift them to a different normalization if required
if norm in ('delay', 'mag'):
# Normalized for group delay of 1
k = a_last
if norm == 'mag':
# -3 dB magnitude point is at 1 rad/sec
norm_factor = _norm_factor(p, k)
p /= norm_factor
k = norm_factor**-N * a_last
elif norm == 'phase':
# Phase-matched (1/2 max phase shift at 1 rad/sec)
# Asymptotes are same as Butterworth filter
p *= 10**(-math.log10(a_last)/N)
k = 1
else:
raise ValueError('normalization not understood')
return asarray([]), asarray(p, dtype=complex), float(k)
def iirnotch(w0, Q, fs=2.0):
"""
Design second-order IIR notch digital filter.
A notch filter is a band-stop filter with a narrow bandwidth
(high quality factor). It rejects a narrow frequency band and
leaves the rest of the spectrum little changed.
Parameters
----------
w0 : float
Frequency to remove from a signal. If `fs` is specified, this is in
the same units as `fs`. By default, it is a normalized scalar that must
satisfy ``0 < w0 < 1``, with ``w0 = 1`` corresponding to half of the
sampling frequency.
Q : float
Quality factor. Dimensionless parameter that characterizes
notch filter -3 dB bandwidth ``bw`` relative to its center
frequency, ``Q = w0/bw``.
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
b, a : ndarray, ndarray
Numerator (``b``) and denominator (``a``) polynomials
of the IIR filter.
See Also
--------
iirpeak
Notes
-----
.. versionadded:: 0.19.0
References
----------
.. [1] Sophocles J. Orfanidis, "Introduction To Signal Processing",
Prentice-Hall, 1996
Examples
--------
Design and plot filter to remove the 60 Hz component from a
signal sampled at 200 Hz, using a quality factor Q = 30
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> fs = 200.0 # Sample frequency (Hz)
>>> f0 = 60.0 # Frequency to be removed from signal (Hz)
>>> Q = 30.0 # Quality factor
>>> # Design notch filter
>>> b, a = signal.iirnotch(f0, Q, fs)
>>> # Frequency response
>>> freq, h = signal.freqz(b, a, fs=fs)
>>> # Plot
>>> fig, ax = plt.subplots(2, 1, figsize=(8, 6))
>>> ax[0].plot(freq, 20*np.log10(abs(h)), color='blue')
>>> ax[0].set_title("Frequency Response")
>>> ax[0].set_ylabel("Amplitude (dB)", color='blue')
>>> ax[0].set_xlim([0, 100])
>>> ax[0].set_ylim([-25, 10])
>>> ax[0].grid(True)
>>> ax[1].plot(freq, np.unwrap(np.angle(h))*180/np.pi, color='green')
>>> ax[1].set_ylabel("Angle (degrees)", color='green')
>>> ax[1].set_xlabel("Frequency (Hz)")
>>> ax[1].set_xlim([0, 100])
>>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90])
>>> ax[1].set_ylim([-90, 90])
>>> ax[1].grid(True)
>>> plt.show()
"""
return _design_notch_peak_filter(w0, Q, "notch", fs)
def iirpeak(w0, Q, fs=2.0):
"""
Design second-order IIR peak (resonant) digital filter.
A peak filter is a band-pass filter with a narrow bandwidth
(high quality factor). It rejects components outside a narrow
frequency band.
Parameters
----------
w0 : float
Frequency to be retained in a signal. If `fs` is specified, this is in
the same units as `fs`. By default, it is a normalized scalar that must
satisfy ``0 < w0 < 1``, with ``w0 = 1`` corresponding to half of the
sampling frequency.
Q : float
Quality factor. Dimensionless parameter that characterizes
peak filter -3 dB bandwidth ``bw`` relative to its center
frequency, ``Q = w0/bw``.
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
b, a : ndarray, ndarray
Numerator (``b``) and denominator (``a``) polynomials
of the IIR filter.
See Also
--------
iirnotch
Notes
-----
.. versionadded:: 0.19.0
References
----------
.. [1] Sophocles J. Orfanidis, "Introduction To Signal Processing",
Prentice-Hall, 1996
Examples
--------
Design and plot filter to remove the frequencies other than the 300 Hz
component from a signal sampled at 1000 Hz, using a quality factor Q = 30
>>> import numpy as np
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> fs = 1000.0 # Sample frequency (Hz)
>>> f0 = 300.0 # Frequency to be retained (Hz)
>>> Q = 30.0 # Quality factor
>>> # Design peak filter
>>> b, a = signal.iirpeak(f0, Q, fs)
>>> # Frequency response
>>> freq, h = signal.freqz(b, a, fs=fs)
>>> # Plot
>>> fig, ax = plt.subplots(2, 1, figsize=(8, 6))
>>> ax[0].plot(freq, 20*np.log10(np.maximum(abs(h), 1e-5)), color='blue')
>>> ax[0].set_title("Frequency Response")
>>> ax[0].set_ylabel("Amplitude (dB)", color='blue')
>>> ax[0].set_xlim([0, 500])
>>> ax[0].set_ylim([-50, 10])
>>> ax[0].grid(True)
>>> ax[1].plot(freq, np.unwrap(np.angle(h))*180/np.pi, color='green')
>>> ax[1].set_ylabel("Angle (degrees)", color='green')
>>> ax[1].set_xlabel("Frequency (Hz)")
>>> ax[1].set_xlim([0, 500])
>>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90])
>>> ax[1].set_ylim([-90, 90])
>>> ax[1].grid(True)
>>> plt.show()
"""
return _design_notch_peak_filter(w0, Q, "peak", fs)
def _design_notch_peak_filter(w0, Q, ftype, fs=2.0):
"""
Design notch or peak digital filter.
Parameters
----------
w0 : float
Normalized frequency to remove from a signal. If `fs` is specified,
this is in the same units as `fs`. By default, it is a normalized
scalar that must satisfy ``0 < w0 < 1``, with ``w0 = 1``
corresponding to half of the sampling frequency.
Q : float
Quality factor. Dimensionless parameter that characterizes
notch filter -3 dB bandwidth ``bw`` relative to its center
frequency, ``Q = w0/bw``.
ftype : str
The type of IIR filter to design:
- notch filter : ``notch``
- peak filter : ``peak``
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0:
Returns
-------
b, a : ndarray, ndarray
Numerator (``b``) and denominator (``a``) polynomials
of the IIR filter.
"""
# Guarantee that the inputs are floats
w0 = float(w0)
Q = float(Q)
w0 = 2*w0/fs
# Checks if w0 is within the range
if w0 > 1.0 or w0 < 0.0:
raise ValueError("w0 should be such that 0 < w0 < 1")
# Get bandwidth
bw = w0/Q
# Normalize inputs
bw = bw*np.pi
w0 = w0*np.pi
# Compute -3dB attenuation
gb = 1/np.sqrt(2)
if ftype == "notch":
# Compute beta: formula 11.3.4 (p.575) from reference [1]
beta = (np.sqrt(1.0-gb**2.0)/gb)*np.tan(bw/2.0)
elif ftype == "peak":
# Compute beta: formula 11.3.19 (p.579) from reference [1]
beta = (gb/np.sqrt(1.0-gb**2.0))*np.tan(bw/2.0)
else:
raise ValueError("Unknown ftype.")
# Compute gain: formula 11.3.6 (p.575) from reference [1]
gain = 1.0/(1.0+beta)
# Compute numerator b and denominator a
# formulas 11.3.7 (p.575) and 11.3.21 (p.579)
# from reference [1]
if ftype == "notch":
b = gain*np.array([1.0, -2.0*np.cos(w0), 1.0])
else:
b = (1.0-gain)*np.array([1.0, 0.0, -1.0])
a = np.array([1.0, -2.0*gain*np.cos(w0), (2.0*gain-1.0)])
return b, a
def iircomb(w0, Q, ftype='notch', fs=2.0, *, pass_zero=False):
"""
Design IIR notching or peaking digital comb filter.
A notching comb filter consists of regularly-spaced band-stop filters with
a narrow bandwidth (high quality factor). Each rejects a narrow frequency
band and leaves the rest of the spectrum little changed.
A peaking comb filter consists of regularly-spaced band-pass filters with
a narrow bandwidth (high quality factor). Each rejects components outside
a narrow frequency band.
Parameters
----------
w0 : float
The fundamental frequency of the comb filter (the spacing between its
peaks). This must evenly divide the sampling frequency. If `fs` is
specified, this is in the same units as `fs`. By default, it is
a normalized scalar that must satisfy ``0 < w0 < 1``, with
``w0 = 1`` corresponding to half of the sampling frequency.
Q : float
Quality factor. Dimensionless parameter that characterizes
notch filter -3 dB bandwidth ``bw`` relative to its center
frequency, ``Q = w0/bw``.
ftype : {'notch', 'peak'}
The type of comb filter generated by the function. If 'notch', then
the Q factor applies to the notches. If 'peak', then the Q factor
applies to the peaks. Default is 'notch'.
fs : float, optional
The sampling frequency of the signal. Default is 2.0.
pass_zero : bool, optional
If False (default), the notches (nulls) of the filter are centered on
frequencies [0, w0, 2*w0, ...], and the peaks are centered on the
midpoints [w0/2, 3*w0/2, 5*w0/2, ...]. If True, the peaks are centered
on [0, w0, 2*w0, ...] (passing zero frequency) and vice versa.
.. versionadded:: 1.9.0
Returns
-------
b, a : ndarray, ndarray
Numerator (``b``) and denominator (``a``) polynomials
of the IIR filter.
Raises
------
ValueError
If `w0` is less than or equal to 0 or greater than or equal to
``fs/2``, if `fs` is not divisible by `w0`, if `ftype`
is not 'notch' or 'peak'
See Also
--------
iirnotch
iirpeak
Notes
-----
For implementation details, see [1]_. The TF implementation of the
comb filter is numerically stable even at higher orders due to the
use of a single repeated pole, which won't suffer from precision loss.
References
----------
.. [1] Sophocles J. Orfanidis, "Introduction To Signal Processing",
Prentice-Hall, 1996, ch. 11, "Digital Filter Design"
Examples
--------
Design and plot notching comb filter at 20 Hz for a
signal sampled at 200 Hz, using quality factor Q = 30
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> fs = 200.0 # Sample frequency (Hz)
>>> f0 = 20.0 # Frequency to be removed from signal (Hz)
>>> Q = 30.0 # Quality factor
>>> # Design notching comb filter
>>> b, a = signal.iircomb(f0, Q, ftype='notch', fs=fs)
>>> # Frequency response
>>> freq, h = signal.freqz(b, a, fs=fs)
>>> response = abs(h)
>>> # To avoid divide by zero when graphing
>>> response[response == 0] = 1e-20
>>> # Plot
>>> fig, ax = plt.subplots(2, 1, figsize=(8, 6), sharex=True)
>>> ax[0].plot(freq, 20*np.log10(abs(response)), color='blue')
>>> ax[0].set_title("Frequency Response")
>>> ax[0].set_ylabel("Amplitude (dB)", color='blue')
>>> ax[0].set_xlim([0, 100])
>>> ax[0].set_ylim([-30, 10])
>>> ax[0].grid(True)
>>> ax[1].plot(freq, (np.angle(h)*180/np.pi+180)%360 - 180, color='green')
>>> ax[1].set_ylabel("Angle (degrees)", color='green')
>>> ax[1].set_xlabel("Frequency (Hz)")
>>> ax[1].set_xlim([0, 100])
>>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90])
>>> ax[1].set_ylim([-90, 90])
>>> ax[1].grid(True)
>>> plt.show()
Design and plot peaking comb filter at 250 Hz for a
signal sampled at 1000 Hz, using quality factor Q = 30
>>> fs = 1000.0 # Sample frequency (Hz)
>>> f0 = 250.0 # Frequency to be retained (Hz)
>>> Q = 30.0 # Quality factor
>>> # Design peaking filter
>>> b, a = signal.iircomb(f0, Q, ftype='peak', fs=fs, pass_zero=True)
>>> # Frequency response
>>> freq, h = signal.freqz(b, a, fs=fs)
>>> response = abs(h)
>>> # To avoid divide by zero when graphing
>>> response[response == 0] = 1e-20
>>> # Plot
>>> fig, ax = plt.subplots(2, 1, figsize=(8, 6), sharex=True)
>>> ax[0].plot(freq, 20*np.log10(np.maximum(abs(h), 1e-5)), color='blue')
>>> ax[0].set_title("Frequency Response")
>>> ax[0].set_ylabel("Amplitude (dB)", color='blue')
>>> ax[0].set_xlim([0, 500])
>>> ax[0].set_ylim([-80, 10])
>>> ax[0].grid(True)
>>> ax[1].plot(freq, (np.angle(h)*180/np.pi+180)%360 - 180, color='green')
>>> ax[1].set_ylabel("Angle (degrees)", color='green')
>>> ax[1].set_xlabel("Frequency (Hz)")
>>> ax[1].set_xlim([0, 500])
>>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90])
>>> ax[1].set_ylim([-90, 90])
>>> ax[1].grid(True)
>>> plt.show()
"""
# Convert w0, Q, and fs to float
w0 = float(w0)
Q = float(Q)
fs = float(fs)
# Check for invalid cutoff frequency or filter type
ftype = ftype.lower()
if not 0 < w0 < fs / 2:
raise ValueError("w0 must be between 0 and {}"
" (nyquist), but given {}.".format(fs / 2, w0))
if ftype not in ('notch', 'peak'):
raise ValueError('ftype must be either notch or peak.')
# Compute the order of the filter
N = round(fs / w0)
# Check for cutoff frequency divisibility
if abs(w0 - fs/N)/fs > 1e-14:
raise ValueError('fs must be divisible by w0.')
# Compute frequency in radians and filter bandwidth
# Eq. 11.3.1 (p. 574) from reference [1]
w0 = (2 * np.pi * w0) / fs
w_delta = w0 / Q
# Define base gain values depending on notch or peak filter
# Compute -3dB attenuation
# Eqs. 11.4.1 and 11.4.2 (p. 582) from reference [1]
if ftype == 'notch':
G0, G = 1, 0
elif ftype == 'peak':
G0, G = 0, 1
GB = 1 / np.sqrt(2)
# Compute beta
# Eq. 11.5.3 (p. 591) from reference [1]
beta = np.sqrt((GB**2 - G0**2) / (G**2 - GB**2)) * np.tan(N * w_delta / 4)
# Compute filter coefficients
# Eq 11.5.1 (p. 590) variables a, b, c from reference [1]
ax = (1 - beta) / (1 + beta)
bx = (G0 + G * beta) / (1 + beta)
cx = (G0 - G * beta) / (1 + beta)
# Last coefficients are negative to get peaking comb that passes zero or
# notching comb that doesn't.
negative_coef = ((ftype == 'peak' and pass_zero) or
(ftype == 'notch' and not pass_zero))
# Compute numerator coefficients
# Eq 11.5.1 (p. 590) or Eq 11.5.4 (p. 591) from reference [1]
# b - cz^-N or b + cz^-N
b = np.zeros(N + 1)
b[0] = bx
if negative_coef:
b[-1] = -cx
else:
b[-1] = +cx
# Compute denominator coefficients
# Eq 11.5.1 (p. 590) or Eq 11.5.4 (p. 591) from reference [1]
# 1 - az^-N or 1 + az^-N
a = np.zeros(N + 1)
a[0] = 1
if negative_coef:
a[-1] = -ax
else:
a[-1] = +ax
return b, a
def _hz_to_erb(hz):
"""
Utility for converting from frequency (Hz) to the
Equivalent Rectangular Bandwidth (ERB) scale
ERB = frequency / EarQ + minBW
"""
EarQ = 9.26449
minBW = 24.7
return hz / EarQ + minBW
def gammatone(freq, ftype, order=None, numtaps=None, fs=None):
"""
Gammatone filter design.
This function computes the coefficients of an FIR or IIR gammatone
digital filter [1]_.
Parameters
----------
freq : float
Center frequency of the filter (expressed in the same units
as `fs`).
ftype : {'fir', 'iir'}
The type of filter the function generates. If 'fir', the function
will generate an Nth order FIR gammatone filter. If 'iir', the
function will generate an 8th order digital IIR filter, modeled as
as 4th order gammatone filter.
order : int, optional
The order of the filter. Only used when ``ftype='fir'``.
Default is 4 to model the human auditory system. Must be between
0 and 24.
numtaps : int, optional
Length of the filter. Only used when ``ftype='fir'``.
Default is ``fs*0.015`` if `fs` is greater than 1000,
15 if `fs` is less than or equal to 1000.
fs : float, optional
The sampling frequency of the signal. `freq` must be between
0 and ``fs/2``. Default is 2.
Returns
-------
b, a : ndarray, ndarray
Numerator (``b``) and denominator (``a``) polynomials of the filter.
Raises
------
ValueError
If `freq` is less than or equal to 0 or greater than or equal to
``fs/2``, if `ftype` is not 'fir' or 'iir', if `order` is less than
or equal to 0 or greater than 24 when ``ftype='fir'``
See Also
--------
firwin
iirfilter
References
----------
.. [1] Slaney, Malcolm, "An Efficient Implementation of the
Patterson-Holdsworth Auditory Filter Bank", Apple Computer
Technical Report 35, 1993, pp.3-8, 34-39.
Examples
--------
16-sample 4th order FIR Gammatone filter centered at 440 Hz
>>> from scipy import signal
>>> signal.gammatone(440, 'fir', numtaps=16, fs=16000)
(array([ 0.00000000e+00, 2.22196719e-07, 1.64942101e-06, 4.99298227e-06,
1.01993969e-05, 1.63125770e-05, 2.14648940e-05, 2.29947263e-05,
1.76776931e-05, 2.04980537e-06, -2.72062858e-05, -7.28455299e-05,
-1.36651076e-04, -2.19066855e-04, -3.18905076e-04, -4.33156712e-04]),
[1.0])
IIR Gammatone filter centered at 440 Hz
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> b, a = signal.gammatone(440, 'iir', fs=16000)
>>> w, h = signal.freqz(b, a)
>>> plt.plot(w / ((2 * np.pi) / 16000), 20 * np.log10(abs(h)))
>>> plt.xscale('log')
>>> plt.title('Gammatone filter frequency response')
>>> plt.xlabel('Frequency')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(440, color='green') # cutoff frequency
>>> plt.show()
"""
# Converts freq to float
freq = float(freq)
# Set sampling rate if not passed
if fs is None:
fs = 2
fs = float(fs)
# Check for invalid cutoff frequency or filter type
ftype = ftype.lower()
filter_types = ['fir', 'iir']
if not 0 < freq < fs / 2:
raise ValueError("The frequency must be between 0 and {}"
" (nyquist), but given {}.".format(fs / 2, freq))
if ftype not in filter_types:
raise ValueError('ftype must be either fir or iir.')
# Calculate FIR gammatone filter
if ftype == 'fir':
# Set order and numtaps if not passed
if order is None:
order = 4
order = operator.index(order)
if numtaps is None:
numtaps = max(int(fs * 0.015), 15)
numtaps = operator.index(numtaps)
# Check for invalid order
if not 0 < order <= 24:
raise ValueError("Invalid order: order must be > 0 and <= 24.")
# Gammatone impulse response settings
t = np.arange(numtaps) / fs
bw = 1.019 * _hz_to_erb(freq)
# Calculate the FIR gammatone filter
b = (t ** (order - 1)) * np.exp(-2 * np.pi * bw * t)
b *= np.cos(2 * np.pi * freq * t)
# Scale the FIR filter so the frequency response is 1 at cutoff
scale_factor = 2 * (2 * np.pi * bw) ** (order)
scale_factor /= float_factorial(order - 1)
scale_factor /= fs
b *= scale_factor
a = [1.0]
# Calculate IIR gammatone filter
elif ftype == 'iir':
# Raise warning if order and/or numtaps is passed
if order is not None:
warnings.warn('order is not used for IIR gammatone filter.')
if numtaps is not None:
warnings.warn('numtaps is not used for IIR gammatone filter.')
# Gammatone impulse response settings
T = 1./fs
bw = 2 * np.pi * 1.019 * _hz_to_erb(freq)
fr = 2 * freq * np.pi * T
bwT = bw * T
# Calculate the gain to normalize the volume at the center frequency
g1 = -2 * np.exp(2j * fr) * T
g2 = 2 * np.exp(-(bwT) + 1j * fr) * T
g3 = np.sqrt(3 + 2 ** (3 / 2)) * np.sin(fr)
g4 = np.sqrt(3 - 2 ** (3 / 2)) * np.sin(fr)
g5 = np.exp(2j * fr)
g = g1 + g2 * (np.cos(fr) - g4)
g *= (g1 + g2 * (np.cos(fr) + g4))
g *= (g1 + g2 * (np.cos(fr) - g3))
g *= (g1 + g2 * (np.cos(fr) + g3))
g /= ((-2 / np.exp(2 * bwT) - 2 * g5 + 2 * (1 + g5) / np.exp(bwT)) ** 4)
g = np.abs(g)
# Create empty filter coefficient lists
b = np.empty(5)
a = np.empty(9)
# Calculate the numerator coefficients
b[0] = (T ** 4) / g
b[1] = -4 * T ** 4 * np.cos(fr) / np.exp(bw * T) / g
b[2] = 6 * T ** 4 * np.cos(2 * fr) / np.exp(2 * bw * T) / g
b[3] = -4 * T ** 4 * np.cos(3 * fr) / np.exp(3 * bw * T) / g
b[4] = T ** 4 * np.cos(4 * fr) / np.exp(4 * bw * T) / g
# Calculate the denominator coefficients
a[0] = 1
a[1] = -8 * np.cos(fr) / np.exp(bw * T)
a[2] = 4 * (4 + 3 * np.cos(2 * fr)) / np.exp(2 * bw * T)
a[3] = -8 * (6 * np.cos(fr) + np.cos(3 * fr))
a[3] /= np.exp(3 * bw * T)
a[4] = 2 * (18 + 16 * np.cos(2 * fr) + np.cos(4 * fr))
a[4] /= np.exp(4 * bw * T)
a[5] = -8 * (6 * np.cos(fr) + np.cos(3 * fr))
a[5] /= np.exp(5 * bw * T)
a[6] = 4 * (4 + 3 * np.cos(2 * fr)) / np.exp(6 * bw * T)
a[7] = -8 * np.cos(fr) / np.exp(7 * bw * T)
a[8] = np.exp(-8 * bw * T)
return b, a
filter_dict = {'butter': [buttap, buttord],
'butterworth': [buttap, buttord],
'cauer': [ellipap, ellipord],
'elliptic': [ellipap, ellipord],
'ellip': [ellipap, ellipord],
'bessel': [besselap],
'bessel_phase': [besselap],
'bessel_delay': [besselap],
'bessel_mag': [besselap],
'cheby1': [cheb1ap, cheb1ord],
'chebyshev1': [cheb1ap, cheb1ord],
'chebyshevi': [cheb1ap, cheb1ord],
'cheby2': [cheb2ap, cheb2ord],
'chebyshev2': [cheb2ap, cheb2ord],
'chebyshevii': [cheb2ap, cheb2ord],
}
band_dict = {'band': 'bandpass',
'bandpass': 'bandpass',
'pass': 'bandpass',
'bp': 'bandpass',
'bs': 'bandstop',
'bandstop': 'bandstop',
'bands': 'bandstop',
'stop': 'bandstop',
'l': 'lowpass',
'low': 'lowpass',
'lowpass': 'lowpass',
'lp': 'lowpass',
'high': 'highpass',
'highpass': 'highpass',
'h': 'highpass',
'hp': 'highpass',
}
bessel_norms = {'bessel': 'phase',
'bessel_phase': 'phase',
'bessel_delay': 'delay',
'bessel_mag': 'mag'}
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@scipy@py3@scipy@signal@_filter_design.py@.PATH_END.py
|
{
"filename": "_shadow.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/histogram/marker/colorbar/title/font/_shadow.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShadowValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="shadow",
parent_name="histogram.marker.colorbar.title.font",
**kwargs,
):
super(ShadowValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@histogram@marker@colorbar@title@font@_shadow.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "s-ilic/ECLAIR",
"repo_path": "ECLAIR_extracted/ECLAIR-master/likelihoods/BG/BAO/SDSS/DR12/LRG/__init__.py",
"type": "Python"
}
|
import numpy as np
# BAO BOSS DR12 LRG 0.2<z<0.5 and 0.4<z<0.6
# Based on Alam et al. 2016
# https://arxiv.org/abs/1607.03155
class likelihood:
def __init__(self, lkl_input):
self.z = np.array([0.38, 0.38, 0.51, 0.51])
self.data = np.array([
1.023406e+01, # DM_over_rs
2.498058e+01, # DH_over_rs
1.336595e+01, # DM_over_rs
2.231656e+01, # DH_over_rs
])
cov_mat = np.array(
[
[2.860520e-02, -4.939281e-02, 1.489688e-02, -1.387079e-02],
[-4.939281e-02, 5.307187e-01, -2.423513e-02, 1.767087e-01],
[1.489688e-02, -2.423513e-02, 4.147534e-02, -4.873962e-02],
[-1.387079e-02, 1.767087e-01, -4.873962e-02, 3.268589e-01],
]
)
self.icov_mat = np.linalg.inv(cov_mat)
def get_loglike(self, class_input, lkl_input, class_run):
rs = class_run.rs_drag()
theo = np.zeros(4)
for i in [0, 2]:
DMz = class_run.angular_distance(self.z[i]) * (1. + self.z[i])
DHz = 1. / class_run.Hubble(self.z[i+1])
theo[i] = DMz / rs
theo[i+1] = DHz / rs
diff = self.data - theo
lnl = -0.5 * np.dot(np.dot(diff, self.icov_mat), diff)
return lnl
|
s-ilicREPO_NAMEECLAIRPATH_START.@ECLAIR_extracted@ECLAIR-master@likelihoods@BG@BAO@SDSS@DR12@LRG@__init__.py@.PATH_END.py
|
{
"filename": "zero_out_grad_2.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/examples/adding_an_op/zero_out_grad_2.py",
"type": "Python"
}
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The gradient of the tutorial zero_out op."""
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
@ops.RegisterGradient("ZeroOut")
def _zero_out_grad(op, grad):
"""The gradients for `zero_out`.
Args:
op: The `zero_out` `Operation` that we are differentiating, which we can use
to find the inputs and outputs of the original op.
grad: Gradient with respect to the output of the `zero_out` op.
Returns:
Gradients with respect to the input of `zero_out`.
"""
to_zero = op.inputs[0]
shape = array_ops.shape(to_zero)
index = array_ops.zeros_like(shape)
first_grad = array_ops.reshape(grad, [-1])[0]
to_zero_grad = sparse_ops.sparse_to_dense([index], shape, first_grad, 0)
return [to_zero_grad] # List of one Tensor, since we have one input
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@examples@adding_an_op@zero_out_grad_2.py@.PATH_END.py
|
{
"filename": "multinomial.py",
"repo_name": "jax-ml/jax",
"repo_path": "jax_extracted/jax-main/jax/scipy/stats/multinomial.py",
"type": "Python"
}
|
# Copyright 2022 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Note: import <name> as <name> is required for names to be exported.
# See PEP 484 & https://github.com/jax-ml/jax/issues/7570
from jax._src.scipy.stats.multinomial import (
logpmf as logpmf,
pmf as pmf,
)
|
jax-mlREPO_NAMEjaxPATH_START.@jax_extracted@jax-main@jax@scipy@stats@multinomial.py@.PATH_END.py
|
{
"filename": "mu_simplecontours.py",
"repo_name": "kapteyn-astro/kapteyn",
"repo_path": "kapteyn_extracted/kapteyn-master/doc/source/EXAMPLES/mu_simplecontours.py",
"type": "Python"
}
|
from kapteyn import maputils
from matplotlib import pyplot as plt
fitsobj = maputils.FITSimage("m101.fits")
fitsobj.set_limits((200,400), (200,400))
annim = fitsobj.Annotatedimage()
cont = annim.Contours()
annim.plot()
print("Levels=", cont.clevels)
plt.show()
|
kapteyn-astroREPO_NAMEkapteynPATH_START.@kapteyn_extracted@kapteyn-master@doc@source@EXAMPLES@mu_simplecontours.py@.PATH_END.py
|
{
"filename": "spectrum.py",
"repo_name": "gammapy/gammapy",
"repo_path": "gammapy_extracted/gammapy-main/gammapy/datasets/spectrum.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import logging
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from gammapy.utils.scripts import make_path
from .map import MapDataset, MapDatasetOnOff
from .utils import get_axes
__all__ = ["SpectrumDatasetOnOff", "SpectrumDataset"]
log = logging.getLogger(__name__)
class PlotMixin:
"""Plot mixin for the spectral datasets."""
def plot_fit(
self,
ax_spectrum=None,
ax_residuals=None,
kwargs_spectrum=None,
kwargs_residuals=None,
):
"""Plot spectrum and residuals in two panels.
Calls `~SpectrumDataset.plot_excess` and `~SpectrumDataset.plot_residuals_spectral`.
Parameters
----------
ax_spectrum : `~matplotlib.axes.Axes`, optional
Axes to plot spectrum on. Default is None.
ax_residuals : `~matplotlib.axes.Axes`, optional
Axes to plot residuals on. Default is None.
kwargs_spectrum : dict, optional
Keyword arguments passed to `~SpectrumDataset.plot_excess`. Default is None.
kwargs_residuals : dict, optional
Keyword arguments passed to `~SpectrumDataset.plot_residuals_spectral`. Default is None.
Returns
-------
ax_spectrum, ax_residuals : `~matplotlib.axes.Axes`
Spectrum and residuals plots.
Examples
--------
>>> #Creating a spectral dataset
>>> from gammapy.datasets import SpectrumDatasetOnOff
>>> from gammapy.modeling.models import PowerLawSpectralModel, SkyModel
>>> filename = "$GAMMAPY_DATA/joint-crab/spectra/hess/pha_obs23523.fits"
>>> dataset = SpectrumDatasetOnOff.read(filename)
>>> p = PowerLawSpectralModel()
>>> dataset.models = SkyModel(spectral_model=p)
>>> # optional configurations
>>> kwargs_excess = {"color": "blue", "markersize":8, "marker":'s', }
>>> kwargs_npred_signal = {"color": "black", "ls":"--"}
>>> kwargs_spectrum = {"kwargs_excess":kwargs_excess, "kwargs_npred_signal":kwargs_npred_signal}
>>> kwargs_residuals = {"color": "black", "markersize":4, "marker":'s', }
>>> dataset.plot_fit(kwargs_residuals=kwargs_residuals, kwargs_spectrum=kwargs_spectrum) # doctest: +SKIP
"""
gs = GridSpec(7, 1)
bool_visible_xticklabel = not (ax_spectrum is None and ax_residuals is None)
ax_spectrum, ax_residuals = get_axes(
ax_spectrum,
ax_residuals,
8,
7,
[gs[:5, :]],
[gs[5:, :]],
kwargs2={"sharex": ax_spectrum},
)
kwargs_spectrum = kwargs_spectrum or {}
kwargs_residuals = kwargs_residuals or {}
self.plot_excess(ax_spectrum, **kwargs_spectrum)
self.plot_residuals_spectral(ax_residuals, **kwargs_residuals)
method = kwargs_residuals.get("method", "diff")
label = self._residuals_labels[method]
ax_residuals.set_ylabel(f"Residuals\n{label}")
plt.setp(ax_spectrum.get_xticklabels(), visible=bool_visible_xticklabel)
self.plot_masks(ax=ax_spectrum)
self.plot_masks(ax=ax_residuals)
return ax_spectrum, ax_residuals
def plot_counts(
self, ax=None, kwargs_counts=None, kwargs_background=None, **kwargs
):
"""Plot counts and background.
Parameters
----------
ax : `~matplotlib.axes.Axes`, optional
Axes to plot on. Default is None.
kwargs_counts : dict, optional
Keyword arguments passed to `~matplotlib.axes.Axes.hist` for the counts. Default is None.
kwargs_background : dict, optional
Keyword arguments passed to `~matplotlib.axes.Axes.hist` for the background. Default is None.
**kwargs : dict, optional
Keyword arguments passed to both `~matplotlib.axes.Axes.hist`.
Returns
-------
ax : `~matplotlib.axes.Axes`
Axes object.
"""
kwargs_counts = kwargs_counts or {}
kwargs_background = kwargs_background or {}
plot_kwargs = kwargs.copy()
plot_kwargs.update(kwargs_counts)
plot_kwargs.setdefault("label", "Counts")
ax = self.counts.plot_hist(ax=ax, **plot_kwargs)
plot_kwargs = kwargs.copy()
plot_kwargs.update(kwargs_background)
plot_kwargs.setdefault("label", "Background")
self.background.plot_hist(ax=ax, **plot_kwargs)
ax.legend(numpoints=1)
return ax
def plot_masks(self, ax=None, kwargs_fit=None, kwargs_safe=None):
"""Plot safe mask and fit mask.
Parameters
----------
ax : `~matplotlib.axes.Axes`, optional
Axes to plot on. Default is None.
kwargs_fit : dict, optional
Keyword arguments passed to `~RegionNDMap.plot_mask()` for mask fit. Default is None.
kwargs_safe : dict, optional
Keyword arguments passed to `~RegionNDMap.plot_mask()` for mask safe. Default is None.
Returns
-------
ax : `~matplotlib.axes.Axes`
Axes object.
Examples
--------
>>> # Reading a spectral dataset
>>> from gammapy.datasets import SpectrumDatasetOnOff
>>> filename = "$GAMMAPY_DATA/joint-crab/spectra/hess/pha_obs23523.fits"
>>> dataset = SpectrumDatasetOnOff.read(filename)
>>> dataset.mask_fit = dataset.mask_safe.copy()
>>> dataset.mask_fit.data[40:46] = False # setting dummy mask_fit for illustration
>>> # Plot the masks on top of the counts histogram
>>> kwargs_safe = {"color":"green", "alpha":0.2} #optinonal arguments to configure
>>> kwargs_fit = {"color":"pink", "alpha":0.2}
>>> ax=dataset.plot_counts() # doctest: +SKIP
>>> dataset.plot_masks(ax=ax, kwargs_fit=kwargs_fit, kwargs_safe=kwargs_safe) # doctest: +SKIP
"""
kwargs_fit = kwargs_fit or {}
kwargs_safe = kwargs_safe or {}
kwargs_fit.setdefault("label", "Mask fit")
kwargs_fit.setdefault("color", "tab:green")
kwargs_safe.setdefault("label", "Mask safe")
kwargs_safe.setdefault("color", "black")
if self.mask_fit:
self.mask_fit.plot_mask(ax=ax, **kwargs_fit)
if self.mask_safe:
self.mask_safe.plot_mask(ax=ax, **kwargs_safe)
ax.legend()
return ax
def plot_excess(
self, ax=None, kwargs_excess=None, kwargs_npred_signal=None, **kwargs
):
"""Plot excess and predicted signal.
The error bars are computed with a symmetric assumption on the excess.
Parameters
----------
ax : `~matplotlib.axes.Axes`, optional
Axes to plot on. Default is None.
kwargs_excess : dict, optional
Keyword arguments passed to `~matplotlib.axes.Axes.errorbar` for
the excess. Default is None.
kwargs_npred_signal : dict, optional
Keyword arguments passed to `~matplotlib.axes.Axes.hist` for the
predicted signal. Default is None.
**kwargs : dict, optional
Keyword arguments passed to both plot methods.
Returns
-------
ax : `~matplotlib.axes.Axes`
Axes object.
Examples
--------
>>> #Creating a spectral dataset
>>> from gammapy.datasets import SpectrumDatasetOnOff
>>> from gammapy.modeling.models import PowerLawSpectralModel, SkyModel
>>> filename = "$GAMMAPY_DATA/joint-crab/spectra/hess/pha_obs23523.fits"
>>> dataset = SpectrumDatasetOnOff.read(filename)
>>> p = PowerLawSpectralModel()
>>> dataset.models = SkyModel(spectral_model=p)
>>> #Plot the excess in blue and the npred in black dotted lines
>>> kwargs_excess = {"color": "blue", "markersize":8, "marker":'s', }
>>> kwargs_npred_signal = {"color": "black", "ls":"--"}
>>> dataset.plot_excess(kwargs_excess=kwargs_excess, kwargs_npred_signal=kwargs_npred_signal) # doctest: +SKIP
"""
kwargs_excess = kwargs_excess or {}
kwargs_npred_signal = kwargs_npred_signal or {}
# Determine the uncertainty on the excess
yerr = self._counts_statistic.error
plot_kwargs = kwargs.copy()
plot_kwargs.update(kwargs_excess)
plot_kwargs.setdefault("label", "Excess counts")
ax = self.excess.plot(ax, yerr=yerr, **plot_kwargs)
plot_kwargs = kwargs.copy()
plot_kwargs.update(kwargs_npred_signal)
plot_kwargs.setdefault("label", "Predicted signal counts")
self.npred_signal().plot_hist(ax, **plot_kwargs)
ax.legend(numpoints=1)
return ax
def peek(self, figsize=(16, 4)):
"""Quick-look summary plots.
Parameters
----------
figsize : tuple
Size of the figure. Default is (16, 4).
"""
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=figsize)
ax1.set_title("Counts")
self.plot_counts(ax1)
self.plot_masks(ax=ax1)
ax1.legend()
ax2.set_title("Exposure")
self.exposure.plot(ax2, ls="-", markersize=0, xerr=None)
ax3.set_title("Energy Dispersion")
if self.edisp is not None:
kernel = self.edisp.get_edisp_kernel()
kernel.plot_matrix(ax=ax3, add_cbar=True)
class SpectrumDataset(PlotMixin, MapDataset):
"""Main dataset for spectrum fitting (1D analysis).
It bundles together binned counts, background, IRFs into `~gammapy.maps.RegionNDMap` (a Map with only one spatial bin).
A safe mask and a fit mask can be added to exclude bins during the analysis.
If models are assigned to it, it can compute the predicted number of counts and the statistic function,
here the Cash statistic (see `~gammapy.stats.cash`).
For more information see :ref:`datasets`.
"""
stat_type = "cash"
tag = "SpectrumDataset"
def cutout(self, *args, **kwargs):
"""Not supported for `SpectrumDataset`"""
raise NotImplementedError("Method not supported on a spectrum dataset")
def plot_residuals_spatial(self, *args, **kwargs):
"""Not supported for `SpectrumDataset`"""
raise NotImplementedError("Method not supported on a spectrum dataset")
def to_spectrum_dataset(self, *args, **kwargs):
"""Not supported for `SpectrumDataset`"""
raise NotImplementedError("Already a Spectrum Dataset. Method not supported")
class SpectrumDatasetOnOff(PlotMixin, MapDatasetOnOff):
"""Spectrum dataset for 1D on-off likelihood fitting.
It bundles together the binned on and off counts, the binned IRFs as well as the on and off acceptances.
A fit mask can be added to exclude bins during the analysis.
It uses the Wstat statistic (see `~gammapy.stats.wstat`).
For more information see :ref:`datasets`.
"""
stat_type = "wstat"
tag = "SpectrumDatasetOnOff"
def cutout(self, *args, **kwargs):
"""Not supported for `SpectrumDatasetOnOff`."""
raise NotImplementedError("Method not supported on a spectrum dataset")
def plot_residuals_spatial(self, *args, **kwargs):
"""Not supported for `SpectrumDatasetOnOff`."""
raise NotImplementedError("Method not supported on a spectrum dataset")
@classmethod
def read(cls, filename, format="ogip", checksum=False, **kwargs):
"""Read from file.
For OGIP formats, filename is the name of a PHA file. The BKG, ARF, and RMF file names must be
set in the PHA header and the files must be present in the same folder. For details, see `OGIPDatasetReader.read`.
For the GADF format, a MapDataset serialisation is used.
Parameters
----------
filename : `~pathlib.Path` or str
OGIP PHA file to read.
format : {"ogip", "ogip-sherpa", "gadf"}
Format to use. Default is "ogip".
checksum : bool
If True checks both DATASUM and CHECKSUM cards in the file headers. Default is False.
kwargs : dict, optional
Keyword arguments passed to `MapDataset.read`.
"""
from .io import OGIPDatasetReader
if format == "gadf":
return super().read(filename, format="gadf", checksum=checksum, **kwargs)
reader = OGIPDatasetReader(filename=filename, checksum=checksum)
return reader.read()
def write(self, filename, overwrite=False, format="ogip", checksum=False):
"""Write spectrum dataset on off to file.
Can be serialised either as a `MapDataset` with a `RegionGeom`
following the GADF specifications, or as per the OGIP format.
For OGIP formats specs, see `OGIPDatasetWriter`.
Parameters
----------
filename : `~pathlib.Path` or str
Filename to write to.
overwrite : bool, optional
Overwrite existing file. Default is False.
format : {"ogip", "ogip-sherpa", "gadf"}
Format to use. Default is "ogip".
checksum : bool
When True adds both DATASUM and CHECKSUM cards to the headers written to the file.
Default is False.
"""
from .io import OGIPDatasetWriter
if format == "gadf":
super().write(filename=filename, overwrite=overwrite, checksum=checksum)
elif format in ["ogip", "ogip-sherpa"]:
writer = OGIPDatasetWriter(
filename=filename, format=format, overwrite=overwrite, checksum=checksum
)
writer.write(self)
else:
raise ValueError(f"{format} is not a valid serialisation format")
@classmethod
def from_dict(cls, data, **kwargs):
"""Create spectrum dataset from dict.
Reads file from the disk as specified in the dict.
Parameters
----------
data : dict
Dictionary containing data to create dataset from.
Returns
-------
dataset : `SpectrumDatasetOnOff`
Spectrum dataset on off.
"""
filename = make_path(data["filename"])
dataset = cls.read(filename=filename)
dataset.mask_fit = None
return dataset
def to_dict(self):
"""Convert to dict for YAML serialization."""
filename = f"pha_obs{self.name}.fits"
return {"name": self.name, "type": self.tag, "filename": filename}
@classmethod
def from_spectrum_dataset(cls, **kwargs):
"""Create a SpectrumDatasetOnOff from a `SpectrumDataset` dataset.
Parameters
----------
dataset : `SpectrumDataset`
Spectrum dataset defining counts, edisp, exposure etc.
acceptance : `~numpy.array` or float
Relative background efficiency in the on region.
acceptance_off : `~numpy.array` or float
Relative background efficiency in the off region.
counts_off : `~gammapy.maps.RegionNDMap`
Off counts spectrum. If the dataset provides a background model,
and no off counts are defined. The off counts are deferred from
counts_off / alpha.
Returns
-------
dataset : `SpectrumDatasetOnOff`
Spectrum dataset on off.
"""
return cls.from_map_dataset(**kwargs)
def to_spectrum_dataset(self, name=None):
"""Convert a SpectrumDatasetOnOff to a SpectrumDataset.
The background model template is taken as alpha*counts_off.
Parameters
----------
name : str, optional
Name of the new dataset. Default is None.
Returns
-------
dataset : `SpectrumDataset`
SpectrumDataset with Cash statistic.
"""
return self.to_map_dataset(name=name).to_spectrum_dataset(on_region=None)
|
gammapyREPO_NAMEgammapyPATH_START.@gammapy_extracted@gammapy-main@gammapy@datasets@spectrum.py@.PATH_END.py
|
{
"filename": "prepare_plig.py",
"repo_name": "benabed/clik",
"repo_path": "clik_extracted/clik-main/src/python/tools/prepare_plig.py",
"type": "Python"
}
|
#! PYTHONEXE
import sys
sys.path = ["REPLACEPATH"]+sys.path
import numpy as nm
import numpy.random as ra
import numpy.linalg as la
import clik.parobject as php
import clik
import re
import clik.hpy as h5py
import clik.smicahlp as smh
try:
from astropy.io import fits as pf
except ImportError as e:
# try pyfits then
import pyfits as pf
import os.path as osp
def read_array(fname):
try:
pfits = pf.open(fname)
ii=0
while pfits[ii].data == None:
ii+=1
return pfits[ii].data
except Exception:
return nm.loadtxt(fname)
def expand_bins(bins,ncl):
rbins = nm.zeros((bins.shape[0]*ncl,bins.shape[1]*ncl))
for i in range(ncl):
rbins[bins.shape[0]*i:bins.shape[0]*(i+1),bins.shape[1]*i:bins.shape[1]*(i+1)] = bins
return rbins
def test_cov_mat_format(fname):
full = False
try:
hdulist = pf.open(fname)
try:
dump = hdulist[0].header['DMC_PID']
full = True
finally:
hdulist.close()
except Exception:
pass
return full
def ordering_TEB(nt,np,has_cl):
m = nt*has_cl[0]+np*has_cl[1]+np*has_cl[2]
rr=[]
# TT first
for m1 in range(nt*has_cl[0]):
for m2 in range(m1,nt*has_cl[0]):
rr += [(m1,m2)]
# EE
for m1 in range(np*has_cl[1]):
for m2 in range(m1,np*has_cl[1]):
rr += [(m1+nt*has_cl[0],m2+nt*has_cl[0])]
# BB
for m1 in range(np*has_cl[2]):
for m2 in range(m1,np*has_cl[2]):
rr += [(m1+nt*has_cl[0]+np*has_cl[1],m2+nt*has_cl[0]+np*has_cl[1])]
# TE
for m1 in range(nt*has_cl[0]):
for m2 in range(0,np*has_cl[1]):
rr += [(m1,m2+nt*has_cl[0])]
# TB
for m1 in range(nt*has_cl[0]):
for m2 in range(0,np*has_cl[2]):
rr += [(m1,m2+nt*has_cl[0]+np*has_cl[1])]
# EB
for m1 in range(np*has_cl[1]):
for m2 in range(0,np*has_cl[2]):
rr += [(m1+nt*has_cl[0],m2+nt*has_cl[0]+np*has_cl[1])]
#print nm.array(rr).shape, nt,np,has_cl,rr
return nm.array(rr)
def remove_zero_rowcol(matrix_in, mask):
idx = list(set(list(nm.nonzero(mask)[0])))
idx.sort()
matrix_out = nm.zeros([len(idx), len(idx)])
for i in range(len(idx)):
#print i,idx[i],idx
matrix_out[i,:] = matrix_in[idx[i],idx]
return matrix_out
def read_full_cov_mat(fname):
l_info = {}
hdulist = pf.open(fname)
cov_mat = hdulist['ICOV'].data
bin_TT = hdulist['BIN_TT'].data
bin_EE = hdulist['BIN_EE'].data
bin_TE = hdulist['BIN_TE'].data
try:
bin_TB = hdulist['BIN_TB'].data
except:
bin_TB = None
try:
bin_EB = hdulist['BIN_EB'].data
except:
bin_EB = None
try:
bin_BB = hdulist['BIN_BB'].data
except:
bin_BB = None
header = hdulist[0].header
hdulist.close()
for key in list(header.keys()):
if ('LMIN_' in key or 'LMAX_' in key):
l_info[key] = int(header[key])
cov_mat /= (1.0E6)**4
return l_info, bin_TT, bin_EE, bin_BB, bin_TE, bin_TB, bin_EB, cov_mat
def select_channels(l_info, nr_freq, frequencies):
mask_TP = nm.zeros([3*nr_freq, 3*nr_freq], dtype='int')
for i, freq1 in enumerate(frequencies):
for j, freq2 in enumerate(frequencies):
if i > j:
continue
mask_TP[i,j] \
= (l_info['LMAX_TT_' + freq1 + 'X' + freq2] > 0)
mask_TP[j,i] = mask_TP[i,j]
mask_TP[i+nr_freq,j+nr_freq] \
= (l_info['LMAX_EE_' + freq1 + 'X' + freq2] > 0)
mask_TP[j+nr_freq,i+nr_freq] = mask_TP[i+nr_freq,j+nr_freq]
mask_TP[i+nr_freq*2,j+nr_freq*2] \
= (l_info['LMAX_BB_' + freq1 + 'X' + freq2] > 0)
mask_TP[j+nr_freq*2,i+nr_freq*2] = mask_TP[i+nr_freq*2,j+nr_freq*2]
mask_TP[i+nr_freq,j] \
= (l_info['LMAX_TE_' + freq1 + 'X' + freq2] > 0)
mask_TP[j+nr_freq,i] = mask_TP[i+nr_freq,j]
mask_TP[i,j+nr_freq] = mask_TP[i+nr_freq,j]
mask_TP[j,i+nr_freq] = mask_TP[i+nr_freq,j]
mask_TP[i+nr_freq*2,j] \
= (l_info['LMAX_TB_' + freq1 + 'X' + freq2] > 0)
mask_TP[j+nr_freq*2,i] = mask_TP[i+nr_freq*2,j]
mask_TP[i,j+nr_freq*2] = mask_TP[i+nr_freq*2,j]
mask_TP[j,i+nr_freq*2] = mask_TP[i+nr_freq*2,j]
mask_TP[i+nr_freq*2,j+nr_freq] \
= (l_info['LMAX_EB_' + freq1 + 'X' + freq2] > 0)
mask_TP[j+nr_freq*2,i+nr_freq] = mask_TP[i+nr_freq*2,j+nr_freq]
mask_TP[i+nr_freq,j+nr_freq*2] = mask_TP[i+nr_freq*2,j+nr_freq]
mask_TP[j+nr_freq,i+nr_freq*2] = mask_TP[i+nr_freq*2,j+nr_freq]
diag_TT = (sum(mask_TP[:nr_freq,:nr_freq], 0) > 0).astype('int')
diag_EE = (sum(mask_TP[nr_freq:nr_freq*2,nr_freq:nr_freq*2], 0) > 0).astype('int')
diag_BB = (sum(mask_TP[nr_freq*2:,nr_freq*2:], 0) > 0).astype('int')
diag_TE = (sum(mask_TP[:nr_freq,nr_freq:nr_freq*2], 0) > 0).astype('int')
diag_TB = (sum(mask_TP[:nr_freq,nr_freq*2:], 0) > 0).astype('int')
diag_EB = (sum(mask_TP[nr_freq:nr_freq*2,nr_freq*2:], 0) > 0).astype('int')
nT = sum(diag_TT | diag_TE | diag_TB)
nE = sum(diag_EE | diag_TE | diag_EB)
nB = sum(diag_BB | diag_TB | diag_EB)
nP = sum(diag_EE | diag_TE | diag_BB | diag_TB | diag_EB)
nTE = sum(diag_TE)
nTB = sum(diag_TB)
nEB = sum(diag_EB)
has_cl = [1*(nT > 0), 1*(nE > 0), 1*(nB > 0), 1*(nTE > 0), 1*(nTB > 0), 1*(nEB > 0)]
frq = []
channel = []
for i, freq in enumerate(frequencies):
if sum(mask_TP[i,:]) > 0:
frq.append(float(freq))
channel.append(freq + 'T')
for i, freq in enumerate(frequencies):
if sum(mask_TP[i+nr_freq,:]) > 0 or sum(mask_TP[i+nr_freq*2,:]) > 0:
frq.append(float(freq))
channel.append(freq + 'P')
return nT, nP, has_cl, frq, channel, mask_TP
def get_l_range(l_info, mask_TP, nr_freq, frequencies):
lmin_TP = -1*nm.ones(mask_TP.shape, dtype='int')
lmax_TP = -1*nm.ones(mask_TP.shape, dtype='int')
for i, freq1 in enumerate(frequencies):
for j, freq2 in enumerate(frequencies):
if i > j:
continue
if mask_TP[i,j] != 0:
lmin_TP[i,j] = l_info['LMIN_TT_' + freq1 + 'X' + freq2]
lmin_TP[j,i] = lmin_TP[i,j]
lmax_TP[i,j] = l_info['LMAX_TT_' + freq1 + 'X' + freq2]
lmax_TP[j,i] = lmax_TP[i,j]
if mask_TP[i+nr_freq,j+nr_freq] != 0:
lmin_TP[i+nr_freq,j+nr_freq] = l_info['LMIN_EE_' + freq1 + 'X' + freq2]
lmin_TP[j+nr_freq,i+nr_freq] = lmin_TP[i+nr_freq,j+nr_freq]
lmax_TP[i+nr_freq,j+nr_freq] = l_info['LMAX_EE_' + freq1 + 'X' + freq2]
lmax_TP[j+nr_freq,i+nr_freq] = lmax_TP[i+nr_freq,j+nr_freq]
if mask_TP[i+nr_freq*2,j+nr_freq*2] != 0:
lmin_TP[i+nr_freq*2,j+nr_freq*2] = l_info['LMIN_BB_' + freq1 + 'X' + freq2]
lmin_TP[j+nr_freq*2,i+nr_freq*2] = lmin_TP[i+nr_freq*2,j+nr_freq*2]
lmax_TP[i+nr_freq*2,j+nr_freq*2] = l_info['LMAX_BB_' + freq1 + 'X' + freq2]
lmax_TP[j+nr_freq*2,i+nr_freq*2] = lmax_TP[i+nr_freq*2,j+nr_freq*2]
if mask_TP[i+nr_freq,j] != 0:
lmin_TP[i+nr_freq,j] = l_info['LMIN_TE_' + freq1 + 'X' + freq2]
lmin_TP[j+nr_freq,i] = lmin_TP[i+nr_freq,j]
lmin_TP[i,j+nr_freq] = lmin_TP[i+nr_freq,j]
lmin_TP[j,i+nr_freq] = lmin_TP[i+nr_freq,j]
lmax_TP[i+nr_freq,j] = l_info['LMAX_TE_' + freq1 + 'X' + freq2]
lmax_TP[j+nr_freq,i] = lmax_TP[i+nr_freq,j]
lmax_TP[i,j+nr_freq] = lmax_TP[i+nr_freq,j]
lmax_TP[j,i+nr_freq] = lmax_TP[i+nr_freq,j]
if mask_TP[i+nr_freq*2,j] != 0:
lmin_TP[i+nr_freq*2,j] = l_info['LMIN_TB_' + freq1 + 'X' + freq2]
lmin_TP[j+nr_freq*2,i] = lmin_TP[i+nr_freq*2,j]
lmin_TP[i,j+nr_freq*2] = lmin_TP[i+nr_freq*2,j]
lmin_TP[j,i+nr_freq*2] = lmin_TP[i+nr_freq*2,j]
lmax_TP[i+nr_freq*2,j] = l_info['LMAX_TB_' + freq1 + 'X' + freq2]
lmax_TP[j+nr_freq*2,i] = lmax_TP[i+nr_freq*2,j]
lmax_TP[i,j+nr_freq*2] = lmax_TP[i+nr_freq*2,j]
lmax_TP[j,i+nr_freq*2] = lmax_TP[i+nr_freq*2,j]
if mask_TP[i+nr_freq*2,j+nr_freq] != 0:
lmin_TP[i+nr_freq*2,j+nr_freq] = l_info['LMIN_EB_' + freq1 + 'X' + freq2]
lmin_TP[j+nr_freq*2,i+nr_freq] = lmin_TP[i+nr_freq*2,j+nr_freq]
lmin_TP[i+nr_freq,j+nr_freq*2] = lmin_TP[i+nr_freq*2,j+nr_freq]
lmin_TP[j+nr_freq,i+nr_freq*2] = lmin_TP[i+nr_freq*2,j+nr_freq]
lmax_TP[i+nr_freq*2,j+nr_freq] = l_info['LMAX_EB_' + freq1 + 'X' + freq2]
lmax_TP[j+nr_freq*2,i+nr_freq] = lmax_TP[i+nr_freq*2,j+nr_freq]
lmax_TP[i+nr_freq,j+nr_freq*2] = lmax_TP[i+nr_freq*2,j+nr_freq]
lmax_TP[j+nr_freq,i+nr_freq*2] = lmax_TP[i+nr_freq*2,j+nr_freq]
try:
submatrix = lmin_TP[:nr_freq,:nr_freq]
min_lmin_TT = min(submatrix[submatrix >= 0])
except ValueError:
min_lmin_TT = -1
submatrix = lmax_TP[:nr_freq,:nr_freq]
max_lmax_TT = max(nm.hstack([-1, submatrix[submatrix >= 0].flatten()]))
try:
submatrix = lmin_TP[nr_freq:nr_freq*2,nr_freq:nr_freq*2]
min_lmin_EE = min(submatrix[submatrix >= 0])
except ValueError:
min_lmin_EE = -1
submatrix = lmax_TP[nr_freq:nr_freq*2,nr_freq:nr_freq*2]
max_lmax_EE = max(nm.hstack([-1, submatrix[submatrix >= 0].flatten()]))
try:
submatrix = lmin_TP[nr_freq*2:,nr_freq*2:]
min_lmin_BB = min(submatrix[submatrix >= 0])
except ValueError:
min_lmin_BB = -1
submatrix = lmax_TP[nr_freq*2:,nr_freq*2:]
max_lmax_BB = max(nm.hstack([-1, submatrix[submatrix >= 0].flatten()]))
try:
submatrix = lmin_TP[:nr_freq,nr_freq:nr_freq*2]
min_lmin_TE = min(submatrix[submatrix >= 0])
except ValueError:
min_lmin_TE = -1
submatrix = lmax_TP[:nr_freq,nr_freq:nr_freq*2]
max_lmax_TE = max(nm.hstack([-1, submatrix[submatrix >= 0].flatten()]))
try:
submatrix = lmin_TP[:nr_freq,nr_freq*2:]
min_lmin_TB = min(submatrix[submatrix >= 0])
except ValueError:
min_lmin_TB = -1
submatrix = lmax_TP[:nr_freq,nr_freq*2:]
max_lmax_TB = max(nm.hstack([-1, submatrix[submatrix >= 0].flatten()]))
try:
submatrix = lmin_TP[nr_freq:nr_freq*2,nr_freq*2:]
min_lmin_EB = min(submatrix[submatrix >= 0])
except ValueError:
min_lmin_EB = -1
submatrix = lmax_TP[nr_freq:nr_freq*2,nr_freq*2:]
max_lmax_EB = max(nm.hstack([-1, submatrix[submatrix >= 0].flatten()]))
lmin = min(lmin_TP[lmax_TP >= 0])
lmax = max(lmax_TP[lmax_TP >= 0])
l_info['min_lmin_TT'] = min_lmin_TT
l_info['max_lmax_TT'] = max_lmax_TT
l_info['min_lmin_EE'] = min_lmin_EE
l_info['max_lmax_EE'] = max_lmax_EE
l_info['min_lmin_BB'] = min_lmin_BB
l_info['max_lmax_BB'] = max_lmax_BB
l_info['min_lmin_TE'] = min_lmin_TE
l_info['max_lmax_TE'] = max_lmax_TE
l_info['min_lmin_TB'] = min_lmin_TB
l_info['max_lmax_TB'] = max_lmax_TB
l_info['min_lmin_EB'] = min_lmin_EB
l_info['max_lmax_EB'] = max_lmax_EB
l_info['lmin'] = lmin
l_info['lmax'] = lmax
return lmin, lmax, lmin_TP, lmax_TP, l_info
def get_l_binning(mask_TP, lmin_TP, lmax_TP, l_info, \
bin_TT, bin_EE, bin_BB, bin_TE, bin_TB, bin_EB):
qmins = nm.zeros(mask_TP.shape, dtype='int')
qmaxs = nm.zeros(mask_TP.shape, dtype='int')
if (l_info['min_lmin_TT'] == l_info['lmin']) \
and (l_info['max_lmax_TT'] == l_info['lmax']):
bins = bin_TT
elif (l_info['min_lmin_EE'] == l_info['lmin']) \
and (l_info['max_lmax_EE'] == l_info['lmax']):
bins = bin_EE
elif (l_info['min_lmin_BB'] == l_info['lmin']) \
and (l_info['max_lmax_BB'] == l_info['lmax']):
bins = bin_BB
elif (l_info['min_lmin_TE'] == l_info['lmin']) \
and (l_info['max_lmax_TE'] == l_info['lmax']):
bins = bin_TE
elif (l_info['min_lmin_TB'] == l_info['lmin']) \
and (l_info['max_lmax_TB'] == l_info['lmax']):
bins = bin_TB
elif (l_info['min_lmin_EB'] == l_info['lmin']) \
and (l_info['max_lmax_EB'] == l_info['lmax']):
bins = bin_EB
else:
print("Error: Using combined binning matrices not implemented")
quit()
nr_bins = bins.shape[0]
lcuts = nm.zeros(nr_bins+1, dtype='int')
for i in range(nr_bins):
lcuts[i] = min(nm.where(bins[i,:] > 0)[0]) + l_info['lmin']
lcuts[-1] = l_info['lmax'] + 1
for i in range(mask_TP.shape[0]):
for j in range(mask_TP.shape[0]):
if mask_TP[i,j] == 0:
continue
qmins[i,j] = nm.where(lcuts == lmin_TP[i,j])[0]
qmaxs[i,j] = nm.where(lcuts == lmax_TP[i,j]+1)[0]
qmins = remove_zero_rowcol(qmins, mask_TP)
qmaxs = remove_zero_rowcol(qmaxs, mask_TP)
return qmins, qmaxs, nr_bins, bins
def get_power_spectra(fname, nT, nP, has_cl, nr_freq, mask_TP, l_info, nr_bins, bins):
cl_raw = read_array(fname)
return get_power_spectra_(cl_raw, nT, nP , has_cl, nr_freq, mask_TP, l_info, nr_bins, bins)
def get_power_spectra_(cl_raw, nT, nP, has_cl, nr_freq, mask_TP, l_info, nr_bins, bins):
if has_cl[2] or has_cl[4] or has_cl[5]:
try:
cl_raw.shape = [3001, 3*nr_freq, 3*nr_freq]
except ValueError:
print ("Error: Power spectrum input file format mismatch")
quit()
else:
try:
cl_raw.shape = [3001, 3*nr_freq, 3*nr_freq]
except ValueError:
try:
cl_raw.shape = [3001, 2*nr_freq, 2*nr_freq]
cl_raw_good = nm.zeros((3001,3*nr_freq,3*nr_freq))
cl_raw_good[:,:nr_freq*2,:nr_freq*2] = cl_raw
cl_raw = cl_raw_good
except ValueError:
print ("Error: Power spectrum input file format mismatch")
quit()
rqhat = nm.zeros([nr_bins, nT + nP*(has_cl[1]+has_cl[2]), nT + nP*(has_cl[1]+has_cl[2])])
rqhat_tmp = nm.zeros([nr_bins, mask_TP.shape[0], mask_TP.shape[0]])
for i in range(mask_TP.shape[0]):
for j in range(mask_TP.shape[0]):
#print i,j
rqhat_tmp[:,i,j] = nm.dot(bins, cl_raw[l_info['lmin']:l_info['lmax']+1,i,j])
for i in range(nr_bins):
rqhat[i,:,:] = remove_zero_rowcol(rqhat_tmp[i,:,:], mask_TP)
rqhat *= (1.0E6)**2
return rqhat
def dump_colorcorrections(color_corr, mask_TP, pars):
filename = 'colorcorr_dust.dat'
write_col = False
color_in = pars.str_array.parametric_dot_color
color_out = ''
for i, color in enumerate(color_in):
if color == "%DUST%":
color_out += ' ' + filename
write_col = True
else:
color_out += ' ' + color
color_out = color_out.strip()
if write_col:
pars.pf['parametric.color'] = color_out
color_corr_values = []
for i in range(mask_TP.shape[0]):
if sum(mask_TP[:,i], 0) > 0:
color_corr_values.append(color_corr[i])
handle = open(filename, "w")
nm.savetxt(handle, color_corr_values, fmt=' %15.7E')
handle.close()
return pars
def add_calibration(channel, pars):
if "calib" in pars and pars.calib.strip():
return pars
ref = '143'
if ((any('T' in entry for entry in channel) and (not ref + 'T' in channel)) or
(any('P' in entry for entry in channel) and (not ref + 'P' in channel))):
print("Error: Need {0:3s} GHz channel for calibration".format(ref))
quit()
calib_channels = ''
for freq in channel:
if ref in freq:
continue
if 'P' in freq:
continue
calib_channels += ' ' + freq
calib_channels = calib_channels.strip()
if calib_channels:
pars.pf['calib'] = calib_channels
return pars
def input_from_cov_mat(pars):
print("Parsing binning information from covariance matrix")
frequencies = ['100', '143', '217']
color_corr = [1.06881, 1.05195, 1.13962, 1.0, 1.0, 1.0]
nr_freq = len(frequencies)
l_info, bin_TT, bin_EE, bin_BB, bin_TE, bin_TB, bin_EB, cov_mat \
= read_full_cov_mat(pars.str.mat)
nT, nP, has_cl, frq, channel, mask_TP \
= select_channels(l_info, nr_freq, frequencies)
lmin, lmax, lmin_TP, lmax_TP, l_info \
= get_l_range(l_info, mask_TP, nr_freq, frequencies)
qmins, qmaxs, nr_bins, bins \
= get_l_binning(mask_TP, lmin_TP, lmax_TP, l_info, bin_TT, bin_EE, bin_BB, bin_TE, bin_TB, bin_EB)
rqhat = get_power_spectra(pars.str.rqhat, nT, nP, has_cl,nr_freq, mask_TP, \
l_info, nr_bins, bins)
#pars = dump_colorcorrections(color_corr, mask_TP, pars)
#pars = add_calibration(channel, pars)
bins = expand_bins(bins, sum(has_cl))
Acmb = nm.ones(nT + nP*max(has_cl[1],has_cl[2]))
return nT, nP, has_cl, frq, channel, lmin, lmax, nr_bins, bins, \
qmins, qmaxs, Acmb, rqhat, cov_mat, pars
def input_from_config_file(pars):
print("Parsing binning information from config file")
nT = pars.int.nT
nP = pars.int.nP
frq = pars.float_array.freq
channel = pars.str_array.channel
has_cl = pars.int_array.has_cl
ncl = nm.sum(has_cl)
if "bins.limit" in pars:
blims = pars.int_array.bins_dot_limit
lmin = pars.int(default=blims[0]).lmin
lmax = pars.int(default=blims[-1]-1).lmax
nell = lmax+1-lmin
nq = len(blims)-1
qwgh = pars.float_array.bins_dot_weights
bins = nm.zeros((nq,nell),dtype=nm.double)
bm = blims[0]
wi = 0
lmin = pars.int(default=blims[0]).lmin
lmax = pars.int(default=blims[-1]-1).lmax
for i,bM in enumerate(blims[1:]):
nb = bM - bm
bins[i,bm-lmin:bM-lmin] = qwgh[wi:wi+nb]
wi+=nb
bm=bM
bins = expand_bins(bins,ncl)
else:
lmin = pars.int.lmin
lmax = pars.int.lmax
nell = lmax+1-lmin
nq = lmax+1-lmin
bins = None
if "qmins" in pars:
qmins = pars.int_array.qmin
qmaxs = pars.int_array.qmax
else:
qmins = nm.zeros((len(channel),len(channel)))
qmaxs = nm.ones((len(channel),len(channel)))*nq
qmins.shape=((len(channel),len(channel)))
qmaxs.shape=((len(channel),len(channel)))
cov_mat = read_array(pars.str.mat)
rqhat = read_array(pars.str.rqhat)
Acmb = pars.float_array(default=nm.ones(len(frq))).Acmb
return nT, nP, has_cl, frq, channel, lmin, lmax, nq, bins, qmins, qmaxs, \
Acmb, rqhat, cov_mat
def main(argv):
pars = clik.miniparse(argv[1])
if test_cov_mat_format(pars.str.mat):
nT, nP, has_cl, frq, channel, lmin, lmax, nq, bins, qmins, qmaxs, \
Acmb, rqhat, cov_mat, pars = input_from_cov_mat(pars)
else:
nT, nP, has_cl, frq, channel, lmin, lmax, nq, bins, qmins, qmaxs, \
Acmb, rqhat, cov_mat = input_from_config_file(pars)
ordering = ordering_TEB(nT,nP,has_cl)
mask = smh.create_gauss_mask(nq,qmins,qmaxs,nT,nP,has_cl)
wq = nm.ones(nq) *1.
#if "rqhat" in pars:
# rqhat = read_array(pars.str.rqhat)
#else:
# ordering_cl = [[int(v) for v in l.split("x")] for l in pars.str_array.cl_file_order]
# rqhat = nm.zeros((nq,(len(channel),len(channel))))
# cls = [read_cl(clc,qmins[o[0],o[1]],qmaxs[o[0],o[1]]) for o, clc in zip(ordering_cl,pars.str_array.cl_file)]
# for cl,o in zip(pars.str_array.cl_file,ordering_cl):
# cls = read_cl(cl,qmins[o[0],o[1]],qmaxs[o[0],o[1]])
# rqhat[qmins[o[0],o[1]]:qmaxs[o[0],o[1]],o[0],o[1]] = cls
# rqhat[qmins[o[0],o[1]]:qmaxs[o[0],o[1]],o[1],o[0]] = cls
root_grp,hf = php.baseCreateParobject(pars.res_object)
lkl_grp = smh.base_smica(root_grp,has_cl,lmin,lmax,nT,nP,wq,rqhat,Acmb,None,bins)
smh.set_criterion(lkl_grp,"gauss",mat=cov_mat,mask=mask,ordering=ordering)
lkl_grp.attrs["dnames"] = php.pack256(*channel)
# parametric components ?
if "parametric" in pars:
defaults = {}
if "parametric.default.parameter" in pars:
defaults = dict(list(zip(pars.str_array.parametric_dot_default_dot_parameter,pars.str_array.parametric_dot_default_dot_value)))
rename = {}
if "parametric.rename.from" in pars:
rename = dict(list(zip(pars.str_array.parametric_dot_rename_dot_from,pars.str_array.parametric_dot_rename_dot_to)))
keys = pars.str_array.parametric_dot_parameter
colors = [None]*1000
if "parametric.color" in pars:
colors = []
for cl in pars.str_array.parametric_dot_color:
if cl.lower()=="none":
colors += [nm.ones(len(frq))]
else:
colors += [read_array(cl)]
for ip,pname in enumerate(pars.str_array.parametric):
print(pname)
smh.add_parametric_component(lkl_grp,str(pname),frq,keys,lmin,lmax,defaults=defaults,color=colors[ip],rename=rename)
# Some fix contribution (affected by beam and calib) ?
if "rq_fix" in pars:
for rqn in pars.str_array.rq_fix:
smh.add_cst_component(lkl_grp,read_array(rqn))
# a gcal component ?
if "calib" in pars and pars.calib.strip():
names = ["calib_"+v for v in pars.str_array.calib]
calib_order = "abcdefghijklmnopqrstuvwxyz"[:nT]+"abcdefghijklmnopqrstuvwxyz"[:nP]
if "calib.order" in pars:
calib_order = pars.str.calib_dot_order
P_track_T = pars.int(default=0).calib_dot_P_track_T
calib_symetrize = pars.int(default=0).calib_dot_symetrize
if "calib.gpelike" in pars and pars.int.calib_dot_gpelike!=0:
smh.add_icalTP_component(lkl_grp,names,calib_order,P_track_T,calib_symetrize)
else:
smh.add_calTP_component(lkl_grp,names,calib_order,P_track_T,calib_symetrize)
#if "beammode.select" in pars:
# names = ["beammode_"+v for v in pars.str_array.beammode_dot_select]
# tpl = [read_array(v) for v in pars.str_array.beammode_dot_data]
# smh.add_gcal2_component(lkl_grp,names,tpl)
if "beam" in pars and pars.beam.strip():
print("add beam eigenmodes", end=' ')
if pars.bool(default=True).beam_dot_ortho:
print("and ensure orthogonality", end=' ')
print("")
names = ["beam_"+v for v in pars.str_array.beam]
m = nT*has_cl[0]+nP*has_cl[1]+nP*has_cl[2]
bdir = pars.str.beam_dot_path.strip()
modes = pars.str_array.beam_dot_modes
neigen = pars.int(default=10).beam_dot_neigen
if len(modes) == nT*nT:
assert nT == nP or nP==0,"not ready yet"
rmodes = []
for i in range(m):
for j in range(m):
rmodes +=[modes[(i%nT)*nT+(j%nT)]]
modes = rmodes
tmodes = nm.zeros((nq,m,m,neigen))
if "beam.lmax_beam" in pars:
lMb = pars.float_array.beam_dot_lmax_beam
lMb.shape = (m,m)
else:
lMb = nm.ones((m,m))*(lmax)
if "beam.lmin_beam" in pars:
lmb = pars.float_array.beam_dot_lmin_beam
lmb.shape = (m,m)
else:
lmb = nm.ones((m,m))*(lmin)
for i in range(m):
for j in range(i,m):
lmo = nm.loadtxt(osp.join(bdir,modes[i*m+j]))
lmo.shape=(10,-1)
bmo = nm.array([nm.dot(bins[:nq,:lmax+1-lmin],lmo[t,lmin:lmax+1]*nm.where(nm.arange(lmin,lmax+1)<lMb[i,j]+1,1.,0.)*nm.where(nm.arange(lmin,lmax+1)>lmb[i,j]-1,1.,0.)) for t in range(10)])
if pars.bool(default=False).beam_dot_ortho:
a,b,c = nm.linalg.svd(bmo,False)
bmo = b[:,nm.newaxis]*c
for t in range(neigen):
tmodes[:,i,j,t] = bmo[t]
tmodes[:,j,i,t] = tmodes[:,i,j,t]
smh.add_beamTP_component(lkl_grp,names,neigen,tmodes,pars.bool(default=False).beam_dot_p_track_t)
if "P_calib" in pars:
smh.add_totcalP_component(lkl_grp,pars.P_calib)
if "tot_calib" in pars:
smh.add_totcal_component(lkl_grp,pars.tot_calib)
if "self_calib" in pars:
smh.add_totcal_component(lkl_grp,pars.self_calib)
# Some noise ?
if "rq_noise" in pars:
for rqn in pars.str_array.rq_noise:
smh.add_cst_component(lkl_grp,read_array(rqn))
hf.close()
import sys
if __name__=="__main__":
main(sys.argv)
|
benabedREPO_NAMEclikPATH_START.@clik_extracted@clik-main@src@python@tools@prepare_plig.py@.PATH_END.py
|
{
"filename": "special.py",
"repo_name": "jrenaud90/TidalPy",
"repo_path": "TidalPy_extracted/TidalPy-main/TidalPy/utilities/math/special.py",
"type": "Python"
}
|
""" This module provides several special functions that are specifically designed to work with TidalPy and its
dependencies (looking at you, Numba).
"""
from typing import TYPE_CHECKING
import numpy as np
from TidalPy.utilities.performance import njit, use_numba
if TYPE_CHECKING:
from TidalPy.utilities.types import NumArray
def _sqrt_neg_python(z: 'NumArray', is_real: bool = False) -> 'NumArray':
""" Square root - Allows for negative values
Parameters
----------
z : FloatArray
Input value (domain is all positive and negative numbers)
Returns
-------
z_sqrt : FloatArray
Output value (range is all positive values and complex numbers)
"""
if is_real:
# First solve the square root assuming z is positive.
z_sqrt_abs = np.sqrt(np.abs(z))
# Now correct the negatives (this will either be a Boolean or an array of Booleans, depending upon input type)
z_sqrt = (np.real(z) > 0.) * z_sqrt_abs + \
(np.real(z) < 0.) * z_sqrt_abs * 1.0j
else:
# This is a more "complex" process because the input could already be both negative AND complex.
z_r = np.real(z)
z_i = np.imag(z)
quad = np.sqrt(z_r * z_r + z_i * z_i)
real_part = np.sqrt((quad + z_r) / 2.)
imag_part = np.sqrt((quad - z_r) / 2.)
z_sqrt = real_part + \
(z_i != 0.) * imag_part * np.sign(z_i) * 1.0j + \
(z_i == 0.) * imag_part * 1.0j
return z_sqrt
# Imaginary square roots
if use_numba:
# TODO: Numba currently does not support wrapping np.lib.scimath.sqrt, so we have to define our own function.
# However, numba.njit of np.sqrt is about 10x faster than np.sqrt with floats and about 2x fast with arrays.
# So the above method is actually pretty efficient.
sqrt_neg = njit(cacheable=True)(_sqrt_neg_python)
else:
# Numpy already has a built in function to handle this. Use it instead
def sqrt_neg(z, is_real: bool = False):
return np.lib.scimath.sqrt(z)
|
jrenaud90REPO_NAMETidalPyPATH_START.@TidalPy_extracted@TidalPy-main@TidalPy@utilities@math@special.py@.PATH_END.py
|
{
"filename": "setup.py",
"repo_name": "rabrahm/ceres",
"repo_path": "ceres_extracted/ceres-master/utils/OptExtract/setup.py",
"type": "Python"
}
|
from distutils.core import setup, Extension
import numpy
import os
"""
According to GSL documentation (http://www.gnu.org/software/gsl/manual/html_node/Shared-Libraries.html), in order to run the different operations one must include the GSL library, the GSLCBLAS library and the math library. To compile in C one must do:
gcc -Wall -c filename.c
And then:
gcc -static nombredelarchivo.o -lgsl -lgslcblas -lm
The first part is done by Python by this file. The second part (adding "-lgsl -lgslcblas -lm"), obviously isn't. To add any libraries that in C would be called by:
gcc -static nombredelarchivo.o -lname1 -lname2 -lname3...
Is as simple as putting libraries=['name1','name2',...] inside the Extension module. Here we do it with "gsl", "gslcblas" and "m".
"""
if os.access('../../gsl.temp',os.F_OK):
path=open('../../gsl.temp','r').readlines()[0]
else:
path = '/usr/local'
module = Extension('Marsh', sources = ['Marsh.c'],libraries=['gsl','gslcblas','m'], library_dirs=[path+'/lib'], include_dirs=[numpy.get_include(),path+'/include'])
setup(name = 'Marsh Algorithm: C/Python Extension ', version = '1.0', ext_modules = [module])
|
rabrahmREPO_NAMEceresPATH_START.@ceres_extracted@ceres-master@utils@OptExtract@setup.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "NiallJeffrey/DeepMass",
"repo_path": "DeepMass_extracted/DeepMass-main/DES_mass_maps_demo/original_run_scripts/training_data/__init__.py",
"type": "Python"
}
|
NiallJeffreyREPO_NAMEDeepMassPATH_START.@DeepMass_extracted@DeepMass-main@DES_mass_maps_demo@original_run_scripts@training_data@__init__.py@.PATH_END.py
|
|
{
"filename": "_opacity.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scattercarpet/marker/_opacity.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class OpacityValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="opacity", parent_name="scattercarpet.marker", **kwargs
):
super(OpacityValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "style"),
max=kwargs.pop("max", 1),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scattercarpet@marker@_opacity.py@.PATH_END.py
|
{
"filename": "dtype_policy.py",
"repo_name": "fchollet/keras",
"repo_path": "keras_extracted/keras-master/keras/src/dtype_policies/dtype_policy.py",
"type": "Python"
}
|
from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.backend.common import global_state
QUANTIZATION_MODES = ("int8", "float8")
@keras_export(
[
"keras.DTypePolicy",
"keras.dtype_policies.DTypePolicy",
"keras.mixed_precision.DTypePolicy", # Legacy
"keras.mixed_precision.Policy", # Legacy
]
)
class DTypePolicy:
"""A dtype policy for a Keras layer.
A dtype policy determines a layer's computation and variable dtypes. Each
layer has a policy. Policies can be passed to the `dtype` argument of layer
constructors, or a global policy can be set with
`keras.config.set_dtype_policy`.
Args:
name: The policy name, which determines the compute and variable dtypes.
Can be any dtype name, such as `"float32"` or `"float64"`,
which causes both the compute and variable dtypes
will be that dtype.
Can also be the string `"mixed_float16"` or `"mixed_bfloat16"`,
which causes the compute dtype to be `float16` or `bfloat16`
and the variable dtype to be `float32`.
Typically you only need to interact with dtype policies when using mixed
precision, which is the use of float16 or bfloat16 for computations and
float32 for variables. This is why the term `mixed_precision` appears in the
API name. Mixed precision can be enabled by passing `"mixed_float16"` or
`"mixed_bfloat16"` to `keras.mixed_precision.set_dtype_policy()`.
>>> keras.config.set_dtype_policy("mixed_float16")
>>> layer1 = keras.layers.Dense(10)
>>> layer1.dtype_policy # layer1 will automatically use mixed precision
<DTypePolicy "mixed_float16">
>>> # Can optionally override layer to use float32
>>> # instead of mixed precision.
>>> layer2 = keras.layers.Dense(10, dtype="float32")
>>> layer2.dtype_policy
<DTypePolicy "float32">
>>> # Set policy back to initial float32.
>>> keras.config.set_dtype_policy('float32')
In the example above, passing `dtype="float32"` to the layer is
equivalent to passing
`dtype=keras.config.DTypePolicy("float32")`.
In general, passing a dtype policy name to a layer is equivalent
to passing the corresponding policy, so it is never necessary
to explicitly construct a `DTypePolicy` object.
"""
def __init__(self, name=None):
# Use the global dtype policy if `name` is not specified
if name is None:
name = dtype_policy().name
self._name = name
self._compute_dtype, self._variable_dtype = self._parse_name(name)
self._quantization_mode = None
def _parse_name(self, name):
"""Parses a `DTypePolicy` name into a compute and variable dtype.
Args:
name: The name of the policy.
Returns:
The `(compute_dtype, variable_dtype)` pair.
"""
if not isinstance(name, str):
raise TypeError(
"'name' must be a string, such as 'mixed_float16'. "
f"Received: name={name} (of type {type(name)})"
)
if name == "mixed_float16":
return "float16", "float32"
elif name == "mixed_bfloat16":
return "bfloat16", "float32"
try:
dtype = backend.standardize_dtype(name)
return dtype, dtype
except ValueError:
raise ValueError(
f"Cannot convert '{name}' to a mixed precision "
"DTypePolicy. Valid policies include 'mixed_float16', "
"'mixed_bfloat16', and the name of any float dtype such as "
"'float32'."
)
@property
def variable_dtype(self):
"""The variable dtype of this policy.
This is the dtype layers will create their variables in, unless a layer
explicitly chooses a different dtype. If this is different than
`DTypePolicy.compute_dtype`, Layers will cast variables to
the compute dtype to avoid type errors.
Variable regularizers are run in the variable dtype, not the compute
dtype.
Returns:
The variable dtype of this policy, as a string.
"""
return self._variable_dtype
@property
def compute_dtype(self):
"""The compute dtype of this policy.
This is the dtype layers will do their computations in. Typically layers
output tensors with the compute dtype as well.
Note that even if the compute dtype is float16 or bfloat16, hardware
devices may not do individual adds, multiplies, and other fundamental
operations in float16 or bfloat16, but instead may do some of them in
float32 for numeric stability. The compute dtype is the dtype of the
inputs and outputs of the ops that the layer executes.
Internally, many ops will do certain internal calculations in
float32 or some other device-internal intermediate format with higher
precision than float16/bfloat16, to increase numeric stability.
Returns:
The compute dtype of this policy, as a string.
"""
return self._compute_dtype
@property
def name(self):
"""Returns the name of this policy."""
return self._name
@property
def quantization_mode(self):
"""The quantization mode of this policy.
Returns:
The quantization mode of this policy, as a string. If this policy is
not quantized, it will return `None`.
"""
return self._quantization_mode
def convert_input(self, x, autocast, dtype):
"""Converts the input dtype based on `autocast` and `dtype`.
Note that `x` can be a tensor, symbolic tensor or numpy array, and this
method will keep integer inputs untouched and only apply casting to
floats.
"""
dtype = backend.standardize_dtype(dtype)
if backend.is_tensor(x):
if self._should_cast(x, autocast, dtype):
x = backend.cast(x, dtype=dtype)
return x
elif backend.is_keras_tensor(x):
if self._should_cast(x, autocast, dtype):
x = ops.cast(x, dtype=dtype)
return x
elif hasattr(x, "__array__"):
try:
x = backend.convert_to_tensor(x)
except TypeError:
x = backend.convert_to_tensor(x, dtype=dtype)
if self._should_cast(x, autocast, dtype):
x = backend.cast(x, dtype=dtype)
return x
return x
def get_config(self):
return {"name": self.name}
@classmethod
def from_config(cls, config):
return cls(**config)
def __repr__(self):
class_name = self.__class__.__name__
if class_name == "FloatDTypePolicy":
class_name = "DTypePolicy"
return f'<{class_name} "{self._name}">'
def __eq__(self, other):
if self.__class__ in (DTypePolicy, FloatDTypePolicy):
if type(other) not in (DTypePolicy, FloatDTypePolicy):
return False
else:
if type(other) is not self.__class__:
return False
return self._name == other._name
def _should_cast(self, x, autocast, dtype):
x_dtype = backend.standardize_dtype(x.dtype)
if autocast and backend.is_float_dtype(x_dtype) and x_dtype != dtype:
return True
else:
return False
@keras_export(
["keras.FloatDTypePolicy", "keras.dtype_policies.FloatDTypePolicy"]
)
class FloatDTypePolicy(DTypePolicy):
# An alias for `DTypePolicy`
pass
@keras_export("keras.dtype_policies.QuantizedDTypePolicy")
class QuantizedDTypePolicy(DTypePolicy):
def __init__(self, mode, source_name=None):
# Use the global dtype policy if `source_name` is not specified
if source_name is None:
source_name = dtype_policy().name
name = f"{mode}_from_{source_name}"
self._compute_dtype, self._variable_dtype = self._parse_name(
source_name
)
self._check_quantization_mode(mode, self._compute_dtype)
self._name = name
self._source_name = source_name
self._quantization_mode = mode
def __eq__(self, other):
if super().__eq__(other) is False:
return False
return (
self._quantization_mode == other._quantization_mode
and self._source_name == other._source_name
)
def get_config(self):
return {
"mode": self._quantization_mode,
"source_name": self._source_name,
}
def _check_quantization_mode(self, mode, compute_dtype):
if mode not in QUANTIZATION_MODES:
raise ValueError(
"Invalid quantization mode. "
f"Expected one of {QUANTIZATION_MODES}. "
f"Received: mode={mode}"
)
if compute_dtype == "float16" and mode == "int8":
raise ValueError(
f"Quantization mode='{mode}' doesn't work well with "
"compute_dtype='float16'."
)
@keras_export("keras.dtype_policies.QuantizedFloat8DTypePolicy")
class QuantizedFloat8DTypePolicy(QuantizedDTypePolicy):
default_amax_history_length = 1024
def __init__(self, mode, source_name=None, amax_history_length=1024):
super().__init__(mode=mode, source_name=source_name)
if not isinstance(amax_history_length, int):
raise TypeError(
"`amax_history_length` must be an integer. "
f"Received: amax_history_length={amax_history_length}"
)
self._amax_history_length = amax_history_length
@property
def amax_history_length(self):
"""The length of the amax history window.
This property is used for scaling factor computation in float8 training.
"""
return self._amax_history_length
def __eq__(self, other):
if super().__eq__(other) is False:
return False
return self._amax_history_length == other._amax_history_length
def get_config(self):
config = super().get_config()
config.update({"amax_history_length": self.amax_history_length})
return config
@keras_export(
[
"keras.config.set_dtype_policy",
"keras.mixed_precision.set_dtype_policy", # Legacy
"keras.mixed_precision.set_global_policy", # Legacy
]
)
def set_dtype_policy(policy):
"""Sets the default dtype policy globally.
Example:
>>> keras.config.set_dtype_policy("mixed_float16")
"""
if not isinstance(policy, DTypePolicy):
if isinstance(policy, str):
if policy.startswith(QUANTIZATION_MODES):
policy = _get_quantized_dtype_policy_by_str(policy)
else:
policy = DTypePolicy(policy)
else:
raise ValueError(
"Invalid `policy` argument. "
"Expected the string name of a policy "
"(such as 'mixed_float16') or a `DTypePolicy` "
f"instance. Received: policy={policy} "
f"(of type {type(policy)})"
)
global_state.set_global_attribute("dtype_policy", policy)
@keras_export(
[
"keras.config.dtype_policy",
"keras.mixed_precision.dtype_policy", # Legacy
"keras.mixed_precision.global_policy", # Legacy
]
)
def dtype_policy():
"""Returns the current default dtype policy object."""
policy = global_state.get_global_attribute("dtype_policy", None)
if policy is None:
policy = DTypePolicy(backend.floatx())
set_dtype_policy(policy)
return policy
def _get_quantized_dtype_policy_by_str(policy):
if not isinstance(policy, str):
raise TypeError(f"`policy` must be a string. Received: policy={policy}")
if not policy.startswith(QUANTIZATION_MODES):
raise ValueError(
"`policy` is incompatible with the current supported quantization."
)
split_name = policy.split("_from_")
if len(split_name) != 2:
raise ValueError(
"Cannot convert `policy` into a valid pair (`mode`, `source_name`) "
"to instantiate `QuantizedDTypePolicy`. "
f"Received: policy={policy}"
)
mode, source_name = split_name
if policy.startswith("int8"):
return QuantizedDTypePolicy(mode, source_name)
elif policy.startswith("float8"):
return QuantizedFloat8DTypePolicy(mode, source_name)
else:
raise NotImplementedError
|
fcholletREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@keras@src@dtype_policies@dtype_policy.py@.PATH_END.py
|
{
"filename": "io_utils.py",
"repo_name": "keras-team/keras",
"repo_path": "keras_extracted/keras-master/keras/src/utils/io_utils.py",
"type": "Python"
}
|
import sys
from absl import logging
from keras.src.api_export import keras_export
from keras.src.backend.common import global_state
@keras_export(
[
"keras.config.enable_interactive_logging",
"keras.utils.enable_interactive_logging",
]
)
def enable_interactive_logging():
"""Turn on interactive logging.
When interactive logging is enabled, Keras displays logs via stdout.
This provides the best experience when using Keras in an interactive
environment such as a shell or a notebook.
"""
global_state.set_global_attribute("interactive_logging", True)
@keras_export(
[
"keras.config.disable_interactive_logging",
"keras.utils.disable_interactive_logging",
]
)
def disable_interactive_logging():
"""Turn off interactive logging.
When interactive logging is disabled, Keras sends logs to `absl.logging`.
This is the best option when using Keras in a non-interactive
way, such as running a training or inference job on a server.
"""
global_state.set_global_attribute("interactive_logging", False)
@keras_export(
[
"keras.config.is_interactive_logging_enabled",
"keras.utils.is_interactive_logging_enabled",
]
)
def is_interactive_logging_enabled():
"""Check if interactive logging is enabled.
To switch between writing logs to stdout and `absl.logging`, you may use
`keras.config.enable_interactive_logging()` and
`keras.config.disable_interactive_logging()`.
Returns:
Boolean, `True` if interactive logging is enabled,
and `False` otherwise.
"""
return global_state.get_global_attribute("interactive_logging", True)
def set_logging_verbosity(level):
"""Sets the verbosity level for logging.
Supported log levels are as follows:
- `"FATAL"` (least verbose)
- `"ERROR"`
- `"WARNING"`
- `"INFO"`
- `"DEBUG"` (most verbose)
Args:
level: A string corresponding to the level of verbosity for logging.
"""
valid_levels = {
"FATAL": logging.FATAL,
"ERROR": logging.ERROR,
"WARNING": logging.WARNING,
"INFO": logging.INFO,
"DEBUG": logging.DEBUG,
}
verbosity = valid_levels.get(level)
if verbosity is None:
raise ValueError(
"Please pass a valid level for logging verbosity. "
f"Expected one of: {set(valid_levels.keys())}. "
f"Received: {level}"
)
logging.set_verbosity(verbosity)
def print_msg(message, line_break=True):
"""Print the message to absl logging or stdout."""
message = str(message)
if is_interactive_logging_enabled():
message = message + "\n" if line_break else message
try:
sys.stdout.write(message)
except UnicodeEncodeError:
# If the encoding differs from UTF-8, `sys.stdout.write` may fail.
# To address this, replace special unicode characters in the
# message, and then encode and decode using the target encoding.
message = _replace_special_unicode_character(message)
message_bytes = message.encode(sys.stdout.encoding, errors="ignore")
message = message_bytes.decode(sys.stdout.encoding)
sys.stdout.write(message)
sys.stdout.flush()
else:
logging.info(message)
def ask_to_proceed_with_overwrite(filepath):
"""Produces a prompt asking about overwriting a file.
Args:
filepath: the path to the file to be overwritten.
Returns:
True if we can proceed with overwrite, False otherwise.
"""
overwrite = (
input(f"[WARNING] {filepath} already exists - overwrite? [y/n]")
.strip()
.lower()
)
while overwrite not in ("y", "n"):
overwrite = (
input('Enter "y" (overwrite) or "n" (cancel).').strip().lower()
)
if overwrite == "n":
return False
print_msg("[TIP] Next time specify overwrite=True!")
return True
def _replace_special_unicode_character(message):
message = str(message).replace("━", "=") # Fall back to Keras2 behavior.
return message
|
keras-teamREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@keras@src@utils@io_utils.py@.PATH_END.py
|
{
"filename": "test_hypergeometric.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/scipy/py3/scipy/special/tests/test_hypergeometric.py",
"type": "Python"
}
|
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_equal
import scipy.special as sc
class TestHyperu:
def test_negative_x(self):
a, b, x = np.meshgrid(
[-1, -0.5, 0, 0.5, 1],
[-1, -0.5, 0, 0.5, 1],
np.linspace(-100, -1, 10),
)
assert np.all(np.isnan(sc.hyperu(a, b, x)))
def test_special_cases(self):
assert sc.hyperu(0, 1, 1) == 1.0
@pytest.mark.parametrize('a', [0.5, 1, np.nan])
@pytest.mark.parametrize('b', [1, 2, np.nan])
@pytest.mark.parametrize('x', [0.25, 3, np.nan])
def test_nan_inputs(self, a, b, x):
assert np.isnan(sc.hyperu(a, b, x)) == np.any(np.isnan([a, b, x]))
class TestHyp1f1:
@pytest.mark.parametrize('a, b, x', [
(np.nan, 1, 1),
(1, np.nan, 1),
(1, 1, np.nan)
])
def test_nan_inputs(self, a, b, x):
assert np.isnan(sc.hyp1f1(a, b, x))
def test_poles(self):
assert_equal(sc.hyp1f1(1, [0, -1, -2, -3, -4], 0.5), np.inf)
@pytest.mark.parametrize('a, b, x, result', [
(-1, 1, 0.5, 0.5),
(1, 1, 0.5, 1.6487212707001281468),
(2, 1, 0.5, 2.4730819060501922203),
(1, 2, 0.5, 1.2974425414002562937),
(-10, 1, 0.5, -0.38937441413785204475)
])
def test_special_cases(self, a, b, x, result):
# Hit all the special case branches at the beginning of the
# function. Desired answers computed using Mpmath.
assert_allclose(sc.hyp1f1(a, b, x), result, atol=0, rtol=1e-15)
@pytest.mark.parametrize('a, b, x, result', [
(1, 1, 0.44, 1.5527072185113360455),
(-1, 1, 0.44, 0.55999999999999999778),
(100, 100, 0.89, 2.4351296512898745592),
(-100, 100, 0.89, 0.40739062490768104667),
(1.5, 100, 59.99, 3.8073513625965598107),
(-1.5, 100, 59.99, 0.25099240047125826943)
])
def test_geometric_convergence(self, a, b, x, result):
# Test the region where we are relying on the ratio of
#
# (|a| + 1) * |x| / |b|
#
# being small. Desired answers computed using Mpmath
assert_allclose(sc.hyp1f1(a, b, x), result, atol=0, rtol=1e-15)
@pytest.mark.parametrize('a, b, x, result', [
(-1, 1, 1.5, -0.5),
(-10, 1, 1.5, 0.41801777430943080357),
(-25, 1, 1.5, 0.25114491646037839809),
(-50, 1, 1.5, -0.25683643975194756115),
(-80, 1, 1.5, -0.24554329325751503601),
(-150, 1, 1.5, -0.173364795515420454496),
])
def test_a_negative_integer(self, a, b, x, result):
# Desired answers computed using Mpmath.
assert_allclose(sc.hyp1f1(a, b, x), result, atol=0, rtol=1.5e-14)
@pytest.mark.parametrize('a, b, x, expected', [
(0.01, 150, -4, 0.99973683897677527773), # gh-3492
(1, 5, 0.01, 1.0020033381011970966), # gh-3593
(50, 100, 0.01, 1.0050126452421463411), # gh-3593
(1, 0.3, -1e3, -7.011932249442947651455e-04), # gh-14149
(1, 0.3, -1e4, -7.001190321418937164734e-05), # gh-14149
(9, 8.5, -350, -5.224090831922378361082e-20), # gh-17120
(9, 8.5, -355, -4.595407159813368193322e-20), # gh-17120
(75, -123.5, 15, 3.425753920814889017493e+06),
])
def test_assorted_cases(self, a, b, x, expected):
# Expected values were computed with mpmath.hyp1f1(a, b, x).
assert_allclose(sc.hyp1f1(a, b, x), expected, atol=0, rtol=1e-14)
def test_a_neg_int_and_b_equal_x(self):
# This is a case where the Boost wrapper will call hypergeometric_pFq
# instead of hypergeometric_1F1. When we use a version of Boost in
# which https://github.com/boostorg/math/issues/833 is fixed, this
# test case can probably be moved into test_assorted_cases.
# The expected value was computed with mpmath.hyp1f1(a, b, x).
a = -10.0
b = 2.5
x = 2.5
expected = 0.0365323664364104338721
computed = sc.hyp1f1(a, b, x)
assert_allclose(computed, expected, atol=0, rtol=1e-13)
@pytest.mark.parametrize('a, b, x, desired', [
(-1, -2, 2, 2),
(-1, -4, 10, 3.5),
(-2, -2, 1, 2.5)
])
def test_gh_11099(self, a, b, x, desired):
# All desired results computed using Mpmath
assert sc.hyp1f1(a, b, x) == desired
@pytest.mark.parametrize('a', [-3, -2])
def test_x_zero_a_and_b_neg_ints_and_a_ge_b(self, a):
assert sc.hyp1f1(a, -3, 0) == 1
# The "legacy edge cases" mentioned in the comments in the following
# tests refers to the behavior of hyp1f1(a, b, x) when b is a nonpositive
# integer. In some subcases, the behavior of SciPy does not match that
# of Boost (1.81+), mpmath and Mathematica (via Wolfram Alpha online).
# If the handling of these edges cases is changed to agree with those
# libraries, these test will have to be updated.
@pytest.mark.parametrize('b', [0, -1, -5])
def test_legacy_case1(self, b):
# Test results of hyp1f1(0, n, x) for n <= 0.
# This is a legacy edge case.
# Boost (versions greater than 1.80), Mathematica (via Wolfram Alpha
# online) and mpmath all return 1 in this case, but SciPy's hyp1f1
# returns inf.
assert_equal(sc.hyp1f1(0, b, [-1.5, 0, 1.5]), [np.inf, np.inf, np.inf])
def test_legacy_case2(self):
# This is a legacy edge case.
# In software such as boost (1.81+), mpmath and Mathematica,
# the value is 1.
assert sc.hyp1f1(-4, -3, 0) == np.inf
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@scipy@py3@scipy@special@tests@test_hypergeometric.py@.PATH_END.py
|
{
"filename": "test_chained_assignment_deprecation.py",
"repo_name": "pandas-dev/pandas",
"repo_path": "pandas_extracted/pandas-main/pandas/tests/copy_view/test_chained_assignment_deprecation.py",
"type": "Python"
}
|
import numpy as np
import pytest
from pandas.errors import ChainedAssignmentError
from pandas import DataFrame
import pandas._testing as tm
@pytest.mark.parametrize(
"indexer", [0, [0, 1], slice(0, 2), np.array([True, False, True])]
)
def test_series_setitem(indexer):
# ensure we only get a single warning for those typical cases of chained
# assignment
df = DataFrame({"a": [1, 2, 3], "b": 1})
# using custom check instead of tm.assert_produces_warning because that doesn't
# fail if multiple warnings are raised
with pytest.warns() as record: # noqa: TID251
df["a"][indexer] = 0
assert len(record) == 1
assert record[0].category == ChainedAssignmentError
@pytest.mark.parametrize(
"indexer", ["a", ["a", "b"], slice(0, 2), np.array([True, False, True])]
)
def test_frame_setitem(indexer):
df = DataFrame({"a": [1, 2, 3, 4, 5], "b": 1})
with tm.raises_chained_assignment_error():
df[0:3][indexer] = 10
|
pandas-devREPO_NAMEpandasPATH_START.@pandas_extracted@pandas-main@pandas@tests@copy_view@test_chained_assignment_deprecation.py@.PATH_END.py
|
{
"filename": "parameters_spect.py",
"repo_name": "GeminiDRSoftware/DRAGONS",
"repo_path": "DRAGONS_extracted/DRAGONS-master/geminidr/core/parameters_spect.py",
"type": "Python"
}
|
# This parameter file contains the parameters related to the primitives located
# in the primitives_spect.py file, in alphabetical order.
from astropy import table, units as u
from astropy.io import registry
from astrodata import AstroData
from geminidr.core import parameters_generic
from gempy.library import config, astrotools as at
from . import parameters_preprocess
def list_of_ints_check(value):
[int(x) for x in str(value).split(',')]
return True
def table_writing_formats():
t = registry.get_formats(table.Table, readwrite="Write")
return {fmt: "" for fmt, dep in t["Format", "Deprecated"] if dep != "Yes"}
def validate_regions_float(value):
at.parse_user_regions(value, dtype=float, allow_step=False)
return True
def validate_regions_int(value, multiple=False):
ranges = at.parse_user_regions(value, dtype=int, allow_step=True)
return multiple or len(ranges) == 1
class adjustWavelengthZeroPointConfig(config.Config):
suffix = config.Field("Filename suffix", str, "_wavelengthZeroPointAdjusted",
optional=True)
center = config.RangeField("Central row/column to extract", int, None,
min=1, optional=True)
shift = config.RangeField("Shift to apply in pixels (None: determine automatically)",
float, 0, min=-2048, max=2048, optional=True)
verbose = config.Field("Print extra information", bool, False,
optional=True)
debug_max_shift = config.RangeField("Maximum shift to allow (in pixels)",
float, 5, min=0)
class adjustWCSToReferenceConfig(config.Config):
suffix = config.Field("Filename suffix",
str, "_wcsCorrected", optional=True)
method = config.ChoiceField("Alignment method", str,
allowed={"sources_wcs": "Match sources using WCS",
"sources_offsets": "Match sources using telescope offsets",
"offsets": "Use telescope offsets only"},
default="sources_wcs", optional=False)
fallback = config.ChoiceField("Fallback method", str,
allowed={"sources_offsets": "Match sources using telescope offsets",
"offsets": "Use telescope offsets only"},
default="offsets", optional=True)
region = config.Field("Pixel section for measuring the spatial profile",
str, None, optional=True, check=validate_regions_int)
tolerance = config.RangeField("Maximum distance from the header offset, "
"for the correlation method (arcsec)",
float, 1, min=0., optional=True)
debug_block_resampling = config.Field("Block resampling in the spatial direction?", bool, False)
debug_plots = config.Field("Plot the cross-correlation function?", bool, False)
class attachPinholeModelConfig(parameters_generic.calRequirementConfig):
suffix = config.Field("Filename suffix", str, "_pinholeModelAttached", optional=True)
pinhole = config.Field("Pinhole frame", (str, AstroData), None, optional=True)
class attachWavelengthSolutionConfig(config.Config):
suffix = config.Field("Filename suffix", str, "_wavelengthSolutionAttached", optional=True)
arc = config.ListField("Arc(s) with distortion map", (AstroData, str), None,
optional=True, single=True)
class calculateSensitivityConfig(config.core_1Dfitting_config):
suffix = config.Field("Filename suffix", str, "_sensitivityCalculated", optional=True)
filename = config.Field("Name of spectrophotometric data file", str, None, optional=True)
in_vacuo = config.Field("Are spectrophotometric data wavelengths measured "
"in vacuo?", bool, None, optional=True)
bandpass = config.RangeField("Bandpass width (nm) if not supplied",
float, 0.001, min=0.001, max=10.)
resampling = config.RangeField("Resampling interval (nm) for spectrophotometric data file",
float, None, min=0, inclusiveMin=False, optional=True)
debug_airmass0 = config.Field("Calculate sensitivity curve at zero airmass?",
bool, False)
regions = config.Field("Wavelength sample regions (nm)", str, None, optional=True,
check=validate_regions_float)
debug_plot = config.Field("Plot sensitivity curve?", bool, False)
interactive = config.Field("Display interactive fitter?", bool, False)
def setDefaults(self):
del self.grow
class createNewApertureConfig(config.Config):
aperture = config.Field("Base aperture to offset from", int, None, optional=False)
shift = config.Field("Shift (in pixels) to new aperture", float, None, optional=False)
aper_upper = config.RangeField("Offset to new upper edge", float, None,
optional=True, min=0., inclusiveMin=False)
aper_lower = config.RangeField("Offset to new lower edge", float, None,
optional=True, max=0., inclusiveMax=False)
suffix = config.Field("Filename suffix", str, "_newApertureCreated", optional=True)
def validate(self):
config.Config.validate(self)
if (self.aper_lower and self.aper_upper) or\
(not self.aper_lower and not self.aper_upper):
pass
else:
raise ValueError("Both aper_lower and aper_upper must either be "
"specified, or left as None.")
class cutSlitsConfig(config.Config):
suffix = config.Field("Filename suffix", str, "_slitsCut", optional=True)
class determineDistortionConfig(config.Config):
suffix = config.Field("Filename suffix", str, "_distortionDetermined", optional=True)
spatial_order = config.RangeField("Fitting order in spatial direction", int, 3, min=1)
spectral_order = config.RangeField("Fitting order in spectral direction", int, 4, min=0)
id_only = config.Field("Use only lines identified for wavelength calibration?", bool, False)
min_snr = config.RangeField("Minimum SNR for peak detection", float, 5., min=3.)
fwidth = config.RangeField("Feature width in pixels if reidentifying",
float, None, min=1., optional=True)
nsum = config.RangeField("Number of lines to sum", int, 10, min=1)
step = config.RangeField("Step in rows/columns for tracing", int, 10, min=1)
max_shift = config.RangeField("Maximum shift per pixel in line position",
float, 0.05, min=0.001, max=0.1)
max_missed = config.RangeField("Maximum number of steps to miss before a line is lost", int, 5, min=0)
min_line_length = config.RangeField("Exclude line traces shorter than this fraction of spatial dimension",
float, 0., min=0., max=1.)
debug_reject_bad = config.Field("Reject lines with suspiciously high SNR (e.g. bad columns)?", bool, True)
debug = config.Field("Display line traces on image display?", bool, False)
class determineSlitEdgesConfig(config.Config):
suffix = config.Field("Filename suffix", str, "_slitEdgesDetermined", optional=True)
spectral_order = config.RangeField("Fitting order in spectral direction",
int, 3, min=1)
edge1 = config.RangeField("Left/lower edge of illuminated region",
float, None, min=1)
edge2 = config.RangeField("Right/upper edge of illuminated region",
float, None, min=1)
search_radius = config.RangeField("Radius (in pixels) to search for edges",
float, 30, min=5)
debug_plots = config.Field("Plot fits of edges and print extra information",
bool, False)
debug_max_missed = config.RangeField("Maximum missed steps when tracing edges",
int, 8, min=1)
debug_max_shift = config.RangeField("Maximum perpendicular shift (in pixels) per pixel",
float, 0.08, min=0.)
debug_step = config.RangeField("Step size (in pixels) for fitting edges",
int, 20, min=5)
debug_nsum = config.RangeField("Columns/rows to sum each step when fitting edges",
int, 10, min=5)
def validate(self):
if hasattr(self, 'edge1'):
if [self.edge1, self.edge2].count(None) == 1:
raise ValueError("Both edges or neither edges must be specified")
if self.edge1 is not self.edge2 <= self.edge1:
raise ValueError("Right/upper edge must be greater than left/lower edge")
class determineWavelengthSolutionConfig(config.core_1Dfitting_config):
suffix = config.Field("Filename suffix", str, "_wavelengthSolutionDetermined", optional=True)
center = config.RangeField("Central row/column to extract", int, None, min=1, optional=True)
nsum = config.RangeField("Number of lines to sum", int, 10, min=1)
min_snr = config.RangeField("Minimum SNR for peak detection", float, 10., min=1.)
min_sep = config.RangeField("Minimum feature separation (pixels)", float, 2., min=1.)
weighting = config.ChoiceField("Weighting of identified peaks", str,
allowed={"uniform": "uniform weighting",
"global": "weighted by strength",
"local": "weighted by strength relative to local peaks"},
default="global")
fwidth = config.RangeField("Feature width in pixels", float, None, min=2., optional=True)
central_wavelength = config.RangeField("Estimated central wavelength (nm)", float, None,
min=300., max=6000., optional=True)
dispersion = config.RangeField("Estimated dispersion (nm/pixel)", float, None,
min=-2, max=2, inclusiveMax=True, optional=True)
linelist = config.Field("Filename of arc line list", str, None, optional=True)
in_vacuo = config.Field("Use vacuum wavelength scale (rather than air)?", bool, False)
absorption = config.Field("Is feature type absorption?", bool, False)
debug_min_lines = config.Field("Minimum number of lines to fit each segment", (str, int), '15,20',
check=list_of_ints_check)
debug_alternative_centers = config.Field("Try alternative wavelength centers?", bool, False)
interactive = config.Field("Display interactive fitter?", bool, False)
num_atran_lines = config.RangeField("Number of lines in ATRAN line list", int, 50.,
min=10, max=300, inclusiveMax=True)
wv_band = config.ChoiceField("Water Vapor constraint", str,
allowed={"20": "20%-ile",
"50": "50%-ile",
"80": "80%-ile",
"100": "Any",
"header": "header value"},
default="header", optional=False)
resolution = config.RangeField("Resolution of the observation", int, None, min=10, max=100000,
optional=True)
combine_method = config.ChoiceField("Combine method to use in 1D spectrum extraction", str,
allowed={"mean": "mean",
"median": "median"},
default="mean", optional=False)
verbose = config.Field("Print additional fitting information?", bool, False)
def setDefaults(self):
del self.function
del self.grow
self.niter = 3
class distortionCorrectConfig(parameters_generic.calRequirementConfig):
suffix = config.Field("Filename suffix", str, "_distortionCorrected", optional=True)
interpolant = config.ChoiceField("Type of interpolant", str,
allowed={"nearest": "Nearest neighbour",
"linear": "Linear interpolation",
"poly3": "Cubic polynomial interpolation",
"poly5": "Quintic polynomial interpolation",
"spline3": "Cubic spline interpolation",
"spline5": "Quintic spline interpolation"},
default="poly3", optional=False)
subsample = config.RangeField("Subsampling", int, 1, min=1)
dq_threshold = config.RangeField("Fraction from DQ-flagged pixel to count as 'bad'",
float, 0.001, min=0.)
class extractSpectraConfig(config.Config):
suffix = config.Field("Filename suffix", str, "_extracted", optional=True)
method = config.ChoiceField("Extraction method", str,
allowed={"aperture": "no weighting",
"optimal": "optimal extraction",
"default": "use 'optimal' for STANDARDs, and 'aperture' otherwise"},
default="aperture")
width = config.RangeField("Width of extraction aperture (pixels)", float, None, min=1, optional=True)
grow = config.RangeField("Source aperture avoidance region (pixels)", float, 10, min=0, optional=True)
subtract_sky = config.Field("Subtract sky spectra if the data have not been sky corrected?", bool, True)
debug = config.Field("Draw extraction apertures on image display? (not used with interactive)", bool, False)
def check_section(value):
# Check for validity of a section string
subsections = value.split(',')
if len(subsections) == 1 and subsections[0] == '':
# no Sections
return True
for i, (x1, x2) in enumerate(s.split(':') for s in subsections):
try:
x1 = int(x1)
except ValueError:
if i > 0 or x1 != '':
return False
else:
x1 = 0
try:
x2 = int(x2)
except ValueError:
if i < len(subsections) - 1 or x2 != '':
return False
else:
if x2 <= x1:
raise ValueError("Section(s) do not have end pixel number "
"greater than start pixel number")
return True
class findAperturesConfig(config.Config):
suffix = config.Field("Filename suffix", str, "_aperturesFound", optional=True)
max_apertures = config.RangeField("Maximum number of sources to find",
int, None, min=1, optional=True)
percentile = config.RangeField("Percentile to determine signal for each spatial pixel",
int, 80, min=1, max=100, optional=True, inclusiveMax=True)
section = config.Field("Pixel section(s) for measuring the spatial profile",
str, "", optional=False, check=check_section)
min_sky_region = config.RangeField("Minimum number of contiguous pixels "
"between sky lines", int, 50, min=1)
min_snr = config.RangeField("Signal-to-noise ratio threshold for peak detection",
float, 5.0, min=0.1)
use_snr = config.Field("Use signal-to-noise ratio rather than data in "
"collapsed profile?", bool, True)
threshold = config.RangeField("Threshold for automatic width determination",
float, 0.1, min=0, max=1, fix_end_to_max=True)
interactive = config.Field("Use interactive interface", bool, False)
max_separation = config.RangeField("Maximum separation from target location (arcsec)",
int, None, min=1, inclusiveMax=True, optional=True)
class transferDistortionModelConfig(config.Config):
suffix = config.Field("Filename suffix", str, "_distortionModelTransferred", optional=True)
source = config.Field("Stream to transfer from", str, None)
class flagCosmicRaysConfig(config.Config):
suffix = config.Field(
doc="Filename suffix",
dtype=str,
default="_CRMasked",
optional=True,
)
bitmask = config.Field(
doc="Bits in the input data quality `flags` that are to be used to "
"exclude bad pixels from cosmic ray detection and cleaning. Default "
"65535 (all non-zero bits, up to 16 planes).",
dtype=int,
optional=True,
default=65535,
)
debug = config.Field(
doc="Make diagnostic plots?",
dtype=bool,
default=False
)
# Fit parameters --------------------------------------------------------
spectral_order = config.Field(
doc="Order for fitting and subtracting object continuum and sky line "
"models, prior to running the main cosmic ray detection algorithm. "
"To control which fits are performed, use the bkgmodel parameter.",
dtype=int,
optional=True,
default=9,
)
spatial_order = config.Field(
doc="Order for fitting and subtracting object continuum and sky line "
"models, prior to running the main cosmic ray detection algorithm. "
"To control which fits are performed, use the bkgmodel parameter.",
dtype=int,
optional=True,
default=5,
)
bkgmodel = config.ChoiceField(
doc="Set which background model(s) to use, between 'object', "
"'skyline','both', or 'none'. Different data may get better results "
"with different background models.",
allowed={
'both': 'Use both object and sky line models.',
'object': 'Use object model only.',
'skyline': 'Use sky line model only.',
'none': "Don't use a background model.",
},
dtype=str,
optional=True,
default='skyline',
)
bkgfit_niter = config.Field(
doc="Maximum number of iterations for the objects and sky fits.",
dtype=int,
optional=True,
default=3,
)
bkgfit_lsigma = config.Field(
doc="Rejection threshold in standard deviations below the mean, "
"for the objects and sky fits.",
dtype=float,
optional=True,
default=4.0,
)
bkgfit_hsigma = config.Field(
doc="Rejection threshold in standard deviations above the mean, "
"for the objects and sky fits.",
dtype=float,
optional=True,
default=4.0,
)
# Astroscrappy's detect_cosmics parameters ------------------------------
sigclip = config.Field(
doc="Laplacian-to-noise limit for cosmic ray detection. Lower "
"values will flag more pixels as cosmic rays.",
dtype=float,
optional=True,
default=4.5,
)
sigfrac = config.Field(
doc="Fractional detection limit for neighboring pixels. For cosmic "
"ray neighbor pixels, a lapacian-to-noise detection limit of"
"sigfrac * sigclip will be used.",
dtype=float,
optional=True,
default=0.3,
)
objlim = config.Field(
doc="Minimum contrast between Laplacian image and the fine structure "
"image. Increase this value if cores of bright stars are flagged as "
"cosmic rays.",
dtype=float,
optional=True,
default=5.0,
)
niter = config.Field(
doc="Number of iterations of the LA Cosmic algorithm to perform",
dtype=int,
optional=True,
default=4,
)
sepmed = config.Field(
doc="Use the separable median filter instead of the full median "
"filter. The separable median is not identical to the full median "
"filter, but they are approximately the same and the separable median "
"filter is significantly faster and still detects cosmic rays well.",
dtype=bool,
optional=True,
default=True,
)
cleantype = config.ChoiceField(
doc="Set which clean algorithm is used.",
allowed={
'median': 'An umasked 5x5 median filter',
'medmask': 'A masked 5x5 median filter',
'meanmask': 'A masked 5x5 mean filter',
'idw': 'A masked 5x5 inverse distance weighted interpolation',
},
dtype=str,
optional=True,
default="meanmask",
)
fsmode = config.ChoiceField(
doc="Method to build the fine structure image.",
allowed={
'median': 'Use the median filter in the standard LA Cosmic '
'algorithm',
'convolve': 'Convolve the image with the psf kernel to calculate '
'the fine structure image.',
},
dtype=str,
optional=True,
default='median',
)
psfmodel = config.ChoiceField(
doc="Model to use to generate the psf kernel if fsmode == 'convolve' "
"and psfk is None. The current choices are Gaussian and Moffat "
"profiles.",
allowed={
'gauss': 'Circular Gaussian kernel',
'moffat': 'Circular Moffat kernel',
'gaussx': 'Gaussian kernel in the x direction',
'gaussy': 'Gaussian kernel in the y direction',
},
dtype=str,
optional=True,
default="gauss",
)
psffwhm = config.Field(
doc="Full Width Half Maximum of the PSF to use for the kernel.",
dtype=float,
optional=True,
default=2.5,
)
psfsize = config.Field(
doc="Size of the kernel to calculate. Returned kernel will have size "
"psfsize x psfsize. psfsize should be odd.",
dtype=int,
optional=True,
default=7,
)
psfbeta = config.Field(
doc="Moffat beta parameter. Only used if psfmodel=='moffat'.",
dtype=float,
optional=True,
default=4.765,
)
verbose = config.Field(
doc="Print to the screen or not.",
dtype=bool,
optional=True,
default=False,
)
def flux_units_check(value):
# Confirm that the specified units can be converted to a flux density
try:
unit = u.Unit(value)
except:
raise ValueError(f"{value} is not a recognized unit")
try:
unit.to(u.W / u.m ** 3, equivalencies=u.spectral_density(1. * u.m))
except u.UnitConversionError:
raise ValueError(f"Cannot convert {value} to a flux density")
return True
class fluxCalibrateConfig(parameters_generic.calRequirementConfig):
suffix = config.Field("Filename suffix", str, "_fluxCalibrated", optional=True)
standard = config.ListField("Standard(s) with sensitivity function", (AstroData, str),
None, optional=True, single=True)
units = config.Field("Units for output spectrum", str, "W m-2 nm-1",
check=flux_units_check)
class linearizeSpectraConfig(config.Config):
suffix = config.Field("Filename suffix", str, "_linearized", optional=True)
w1 = config.RangeField("Starting wavelength (nm)", float, None, min=0., optional=True)
w2 = config.RangeField("Ending wavelength (nm)", float, None, min=0., optional=True)
dw = config.RangeField("Dispersion (nm/pixel)", float, None, min=0.01, optional=True)
npix = config.RangeField("Number of pixels in spectrum", int, None, min=2, optional=True)
conserve = config.Field("Conserve flux?", bool, None, optional=True)
interpolant = config.ChoiceField("Type of interpolant", str,
allowed={"nearest": "Nearest neighbour",
"linear": "Linear interpolation",
"poly3": "Cubic polynomial interpolation",
"poly5": "Quintic polynomial interpolation",
"spline3": "Cubic spline interpolation",
"spline5": "Quintic spline interpolation"},
default="poly3", optional=False)
def validate(self):
config.Config.validate(self)
if [self.w1, self.w2, self.dw, self.npix].count(None) not in (1, 4):
raise ValueError("Exactly 0 or 3 of w1, w2, dw, npix must be specified")
if self.w1 is not None and self.w2 is not None and self.w2 <= self.w1:
raise ValueError("Ending wavelength must be greater than starting wavelength")
class maskBeyondSlitConfig(config.Config):
suffix = config.Field("Filename suffix", str, "_maskedBeyondSlit",
optional=True)
class normalizeFlatConfig(config.core_1Dfitting_config):
suffix = config.Field("Filename suffix", str, "_normalized", optional=True)
center = config.RangeField("Central (spatial axis) row/column for 1D extraction (None => use middle)", int, None, min=1, optional=True)
offset_from_center = config.Field("Offset in pixels from center of slit",
int, None, optional=True)
nsum = config.RangeField('Number of rows/columns to average (about "center")', int, 10, min=1)
threshold = config.RangeField("Threshold for flagging unilluminated pixels",
float, 0.01, min=0.0001, max=1.0)
interactive = config.Field("Interactive fitting?", bool, False)
def setDefaults(self):
self.order = 20
class resampleToCommonFrameConfig(config.Config):
suffix = config.Field("Filename suffix", str, "_align", optional=True)
w1 = config.RangeField("Starting wavelength (nm)", float, None, min=0., optional=True)
w2 = config.RangeField("Ending wavelength (nm)", float, None, min=0., optional=True)
dw = config.RangeField("Dispersion (nm/pixel)", float, None, min=0.01, optional=True)
npix = config.RangeField("Number of pixels in spectrum", int, None, min=2, optional=True)
conserve = config.Field("Conserve flux?", bool, None, optional=True)
interpolant = config.ChoiceField("Type of interpolant", str,
allowed={"nearest": "Nearest neighbour",
"linear": "Linear interpolation",
"poly3": "Cubic polynomial interpolation",
"poly5": "Quintic polynomial interpolation",
"spline3": "Cubic spline interpolation",
"spline5": "Quintic spline interpolation"},
default="poly3", optional=False)
trim_spatial = config.Field("Trim spatial range to fully-covered region?", bool, True)
trim_spectral = config.Field("Trim wavelength range to fully-covered region?", bool, False)
force_linear = config.Field("Force linear wavelength solution?", bool, True)
dq_threshold = config.RangeField("Fraction from DQ-flagged pixel to count as 'bad'",
float, 0.001, min=0.)
def validate(self):
config.Config.validate(self)
if [self.w1, self.w2, self.dw, self.npix].count(None) == 0:
raise ValueError("Maximum 3 of w1, w2, dw, npix must be specified")
if self.w1 is not None and self.w2 is not None and self.w2 <= self.w1:
raise ValueError("Ending wavelength must be greater than starting wavelength")
class separateSkyConfig(parameters_preprocess.separateSkyConfig):
debug_allowable_perpendicular_offset = config.RangeField(
"Maximum allowable offset perpendicular to the slit (arcsec)",
float, None, min=0, inclusiveMin=False, optional=True)
class skyCorrectFromSlitConfig(config.core_1Dfitting_config):
suffix = config.Field("Filename suffix", str, "_skyCorrected", optional=True)
regions = config.Field("Sample regions. (eg. 100:150,251:264)", str, None, optional=True)
aperture_growth = config.RangeField("Aperture avoidance distance (pixels)", float, 2, min=0)
debug_plot = config.Field("Show diagnostic plots?", bool, False)
interactive = config.Field("Run primitive interactively?", bool, False)
def setDefaults(self):
self.order = 5
self.niter = 3
self.grow = 2
class traceAperturesConfig(config.core_1Dfitting_config):
"""
Configuration for the traceApertures() primitive.
"""
suffix = config.Field("Filename suffix",
str, "_aperturesTraced", optional=True)
max_missed = config.RangeField("Maximum number of steps to miss before a line is lost",
int, 5, min=0)
max_shift = config.RangeField("Maximum shift per pixel in line position",
float, 0.05, min=0.001, max=0.1, inclusiveMax=True,
fix_end_to_max=True)
nsum = config.RangeField("Number of lines to sum",
int, 10, min=1)
step = config.RangeField("Step in rows/columns for tracing",
int, 10, min=1)
interactive = config.Field("Run primitive interactively?",
bool, False)
debug = config.Field("Draw aperture traces on image display?",
bool, False)
def setDefaults(self):
del self.function
self.order = 2
class tracePinholeAperturesConfig(config.Config):
"""
Configuration for the tracePinholeApertures() primitive.
While the primitive itself should be useable with various modes, it has
only been tested with cross-dispersed so far (September 2023). Parameters
thoughtare therefore left unspecified here, and should be defined in parameter
files more specific to the mode.
"""
suffix = config.Field("Filename suffix",
str, "_pinholeAperturesTraced", optional=True)
start_pos = config.RangeField("Row or column to start tracing at (default: halfway)",
int, None, min=0, inclusiveMin=True, optional=True)
max_missed = config.RangeField("Maximum number of steps to miss before a line is lost",
int, 5, min=0)
max_shift = config.RangeField("Maximum shift per pixel in line position",
float, 0.05, min=0.001, max=0.3,
inclusiveMax=True)
min_line_length = config.RangeField("Minimum line length as a fraction of array",
float, 0, min=0, max=1, inclusiveMin=True,
inclusiveMax=True)
min_snr = config.RangeField("Minimum SNR for apertures", float, 10., min=0.)
nsum = config.RangeField("Number of lines to sum", int, 10, min=1)
step = config.RangeField("Step in rows/columns for tracing", int, 10, min=1)
spectral_order = config.RangeField("Order of fit in spectral direction",
int, 3, min=1)
# These exist in case excluding some of the pinhole traces is desired. This
# is important for GNIRS but may not be the case for other instruments/
# modes, so the defaults here are to use all traces found.
debug_min_trace_pos = config.RangeField("First pinhole trace to use",
dtype=int, default=None, min=1, optional=True)
debug_max_trace_pos = config.RangeField("Last pinhole trace to use",
dtype=int, default=None, min=1, optional=True)
debug_plots = config.Field("Create diagnostic plots of traces", bool, False)
def validate(self):
if (self.debug_max_trace_pos is not None and
self.debug_min_trace_pos is not None and
self.debug_max_trace_pos < self.debug_min_trace_pos):
raise ValueError("debug_max_trace_pos cannot be less than debug_min_trace_pos")
def wavelength_units_check(value):
# Confirm that the specified units are suitable for wavelength or frequency
try:
unit = u.Unit(value)
except:
raise ValueError(f"{value} is not a recognized unit")
try:
unit.to(u.m)
except u.UnitConversionError:
raise ValueError(f"{value} is not a wavelength unit")
return True
def wavelength_units_check(value):
# Confirm that the specified units are suitable for wavelength or frequency
try:
unit = u.Unit(value)
except:
raise ValueError(f"{value} is not a recognized unit")
try:
unit.to(u.m)
except u.UnitConversionError:
raise ValueError(f"{value} is not a wavelength unit")
return True
class write1DSpectraConfig(config.Config):
#format = config.Field("Format for writing", str, "ascii")
format = config.ChoiceField("Format for writing", str,
allowed=table_writing_formats(),
default="ascii", optional=False)
header = config.Field("Write full FITS header?", bool, False)
extension = config.Field("Filename extension", str, "dat")
apertures = config.Field("Apertures to write", (str, int), None,
optional=True, check=list_of_ints_check)
dq = config.Field("Write Data Quality values?", bool, False)
var = config.Field("Write Variance values?", bool, False)
overwrite = config.Field("Overwrite existing files?", bool, False)
wave_units = config.Field("Output wavelength units", str, None,
check=wavelength_units_check, optional=True)
# Cannot check as we don't know what the input units are
data_units = config.Field("Output data units", str, None, optional=True)
def validate(self):
config.Config.validate(self)
if self.header and not self.format.startswith("ascii"):
raise ValueError("FITS header can only be written with ASCII formats")
|
GeminiDRSoftwareREPO_NAMEDRAGONSPATH_START.@DRAGONS_extracted@DRAGONS-master@geminidr@core@parameters_spect.py@.PATH_END.py
|
{
"filename": "pooling_ops_test.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/compiler/tests/pooling_ops_test.py",
"type": "Python"
}
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for pooling operations."""
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import googletest
def NHWCToNCHW(input_tensor):
"""Convert the input from NHWC format to NCHW.
Args:
input_tensor: a 4-D tensor, or a 4-element array representing the same.
Returns:
the converted tensor or a shape array
"""
if isinstance(input_tensor, tensor.Tensor):
return array_ops.transpose(input_tensor, [0, 3, 1, 2])
else:
return [input_tensor[0], input_tensor[3], input_tensor[1], input_tensor[2]]
def NCHWToNHWC(input_tensor):
"""Convert the input from NCHW format to NHWC.
Args:
input_tensor: a 4-D tensor, or a 4-element array representing the same.
Returns:
the converted tensor or a shape array
"""
if isinstance(input_tensor, tensor.Tensor):
return array_ops.transpose(input_tensor, [0, 2, 3, 1])
else:
return [input_tensor[0], input_tensor[2], input_tensor[3], input_tensor[1]]
def GetTestConfigs():
"""Get all the valid tests configs to run.
Returns:
all the valid test configs
"""
test_configs = ["NHWC", "NCHW"]
return test_configs
class PoolingTest(xla_test.XLATestCase):
def _VerifyOneTest(self, pool_func, input_sizes, ksize, strides, padding,
data_format, expected):
"""Verifies the output values of the pooling function.
Args:
pool_func: Function to be called, currently only co.MaxPool.
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
data_format: The data format we use to run the pooling operation.
expected: An array containing the expected operation outputs.
"""
total_size = np.prod(input_sizes)
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x = np.array([f * 1.0 for f in range(1, total_size + 1)], dtype=np.float32)
x = x.reshape(input_sizes)
with self.session() as sess:
with self.test_scope():
inputs = array_ops.placeholder(dtypes.float32)
t = inputs
if data_format == "NCHW":
t = NHWCToNCHW(t)
ksize = NHWCToNCHW(ksize)
strides = NHWCToNCHW(strides)
t = pool_func(t,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
t = NCHWToNHWC(t)
actual = sess.run(t, {inputs: x})
self.assertAllClose(expected, actual.flatten(), rtol=1e-5, atol=1e-6)
def _VerifyValues(self, pool_func, input_sizes, ksize, strides, padding,
expected):
"""Verifies the output values of the pooling function.
Args:
pool_func: Function to be called, co.MaxPool, co.AvgPool,
or the Lua version.
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
expected: An array containing the expected operation outputs.
"""
for data_format in GetTestConfigs():
self._VerifyOneTest(pool_func, input_sizes, ksize, strides, padding,
data_format, expected)
def testMaxPoolValidPadding(self):
expected_output = [13.0, 14.0, 15.0]
self._VerifyValues(nn_ops.max_pool,
input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="VALID",
expected=expected_output)
def testMaxPoolSamePadding(self):
expected_output = [13.0, 14.0, 15.0, 16.0, 17.0, 18.0]
self._VerifyValues(nn_ops.max_pool,
input_sizes=[1, 2, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output)
def testMaxPoolSamePaddingNonSquareWindow(self):
# input is:
# [1.0, 2.0
# 3.0 4.0]
#
# Window of [x, x] should do:
#
# [max(1.0, 2.0), max(2.0, padded0),
# max(3.0, 4.0), max(4.0, padded0)]
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 2, 2, 1],
ksize=[1, 1, 2, 1],
strides=[1, 1, 1, 1],
padding="SAME",
expected=[2.0, 2.0, 4.0, 4.0])
def testMaxPoolValidPaddingUnevenStride(self):
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 1, 2, 1],
padding="VALID",
expected=[6.0, 8.0, 10.0, 12.0, 14.0, 16.0])
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 2, 1, 1],
padding="VALID",
expected=[6.0, 7.0, 8.0, 14.0, 15.0, 16.0])
def testMaxPoolSamePaddingFilter4(self):
expected_output = [
21.0, 22.0, 23.0, 24.0, 29.0, 30.0, 31.0, 32.0, 53.0, 54.0, 55.0, 56.0,
61.0, 62.0, 63.0, 64.0
]
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 4, 4, 4],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output)
def testMaxPoolSamePaddingFilter8(self):
expected_output = [
145.0, 146.0, 147.0, 148.0, 149.0, 150.0, 151.0, 152.0, 161.0, 162.0,
163.0, 164.0, 165.0, 166.0, 167.0, 168.0, 177.0, 178.0, 179.0, 180.0,
181.0, 182.0, 183.0, 184.0, 185.0, 186.0, 187.0, 188.0, 189.0, 190.0,
191.0, 192.0, 273.0, 274.0, 275.0, 276.0, 277.0, 278.0, 279.0, 280.0,
289.0, 290.0, 291.0, 292.0, 293.0, 294.0, 295.0, 296.0, 305.0, 306.0,
307.0, 308.0, 309.0, 310.0, 311.0, 312.0, 313.0, 314.0, 315.0, 316.0,
317.0, 318.0, 319.0, 320.0, 401.0, 402.0, 403.0, 404.0, 405.0, 406.0,
407.0, 408.0, 417.0, 418.0, 419.0, 420.0, 421.0, 422.0, 423.0, 424.0,
433.0, 434.0, 435.0, 436.0, 437.0, 438.0, 439.0, 440.0, 441.0, 442.0,
443.0, 444.0, 445.0, 446.0, 447.0, 448.0, 465.0, 466.0, 467.0, 468.0,
469.0, 470.0, 471.0, 472.0, 481.0, 482.0, 483.0, 484.0, 485.0, 486.0,
487.0, 488.0, 497.0, 498.0, 499.0, 500.0, 501.0, 502.0, 503.0, 504.0,
505.0, 506.0, 507.0, 508.0, 509.0, 510.0, 511.0, 512.0
]
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 8, 8, 8],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output)
# Tests for DepthwiseMaxPooling on CPU only.
def testDepthwiseMaxPool1x1DepthWindow1(self):
# input is:
# [1.0, ..., 10.0] along depth,
#
# We maxpool by depth in patches of 2.
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 1, 1, 10],
ksize=[1, 1, 1, 2],
strides=[1, 1, 1, 2],
padding="SAME",
expected=[2.0, 4.0, 6.0, 8.0, 10.0])
def testDepthwiseMaxPool2x2DepthWindow3(self):
# input is:
#
# a 2x2x6 cube, and we depthwise max across 3 to produce a 2x2x2
# output. Each node has contiguous values, so the depthwise max
# should be multiples of 3.0.
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 2, 2, 6],
ksize=[1, 1, 1, 3],
strides=[1, 1, 1, 3],
padding="SAME",
expected=[3.0, 6.0, 9.0, 12.0, 15.0, 18.0, 21.0, 24.0])
def testKernelSmallerThanStrideValid(self):
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 7, 7, 1],
ksize=[1, 2, 2, 1],
strides=[1, 3, 3, 1],
padding="VALID",
expected=[9, 12, 30, 33])
def testKernelSmallerThanStrideSame(self):
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 3, 3, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=[1, 3, 7, 9])
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 4, 4, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=[1, 3, 9, 11])
# Average pooling
def testAvgPoolValidPadding(self):
expected_output = [7, 8, 9]
self._VerifyValues(
nn_ops.avg_pool,
input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="VALID",
expected=expected_output)
def testAvgPoolSamePadding(self):
expected_output = [7., 8., 9., 11.5, 12.5, 13.5]
self._VerifyValues(
nn_ops.avg_pool,
input_sizes=[1, 2, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output)
class PoolGradTest(xla_test.XLATestCase):
CPU_DEVICE = "/job:localhost/replica:0/task:0/cpu:0"
def _VerifyOneTest(self,
pool_func,
pool_grad_func,
input_sizes,
ksize,
strides,
padding,
data_format,
pool_grad_grad_func=None):
"""Verifies the output values of the pooling gradient function.
Args:
pool_func: Forward pooling function
pool_grad_func: Pooling gradient function for pool_grad_func
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
data_format: The data format we use to run the pooling operation.
pool_grad_grad_func: Second-order gradient function, if available.
"""
total_size = np.prod(input_sizes)
# TODO(b/73062247): MaxPoolGradGrad can confuse gradients when x is equally
# maximal at 16 bits. Switch to np.random.randn when resolved.
x = np.arange(1, total_size + 1, dtype=np.float32)
x *= (np.random.randint(2, size=total_size) * 2 - 1) # Flip signs randomly
# Verify some specifically interesting values...
x[np.random.choice(total_size)] = np.inf
x[np.random.choice(total_size)] = -np.inf
# TODO(b/74222344): Fix nan handling for max pool grad.
# x[np.random.choice(total_size)] = np.nan
x = x.reshape(input_sizes)
with self.session() as sess:
# Use the forward pool function to compute some corresponding outputs
# (needed for the CPU device, and we need the shape in both cases).
with ops.device(self.CPU_DEVICE):
inputs = array_ops.placeholder(dtypes.float32, shape=input_sizes)
outputs = pool_func(
inputs,
ksize=ksize,
strides=strides,
padding=padding,
data_format="NHWC")
output_vals = np.array(sess.run(outputs, {inputs: x}))
output_gradient_vals = np.arange(
1, output_vals.size + 1, dtype=np.float32)
output_gradient_vals = output_gradient_vals.reshape(output_vals.shape)
output_grad_grad_vals = np.arange(1, x.size + 1, dtype=np.float32)
output_grad_grad_vals = output_grad_grad_vals.reshape(x.shape)
# Use the Tensorflow CPU pooling gradient to compute the expected input
# gradients.
with ops.device(self.CPU_DEVICE):
output_gradients = array_ops.placeholder(
dtypes.float32, shape=output_vals.shape)
expected_input_gradients = pool_grad_func(
inputs,
outputs,
output_gradients,
ksize=ksize,
strides=strides,
padding=padding,
data_format="NHWC")
expected_input_gradient_vals = sess.run(
expected_input_gradients,
{inputs: x,
output_gradients: output_gradient_vals})
output_grad_gradients = array_ops.placeholder(
dtypes.float32, shape=expected_input_gradient_vals.shape)
if pool_grad_grad_func is not None:
expected_grad_gradients = pool_grad_grad_func(
inputs,
outputs,
output_grad_gradients,
ksize=ksize,
strides=strides,
padding=padding,
data_format="NHWC")
expected_grad_gradients_vals = sess.run(expected_grad_gradients, {
inputs: x,
output_grad_gradients: output_grad_grad_vals
})
# Run the gradient op on the XLA device
with self.test_scope():
outputs = array_ops.placeholder(dtypes.float32, shape=output_vals.shape)
xla_inputs = inputs
xla_outputs = outputs
xla_output_gradients = output_gradients
xla_output_grad_gradients = output_grad_gradients
xla_ksize = ksize
xla_strides = strides
if data_format == "NCHW":
xla_inputs = NHWCToNCHW(inputs)
xla_outputs = NHWCToNCHW(outputs)
xla_output_gradients = NHWCToNCHW(output_gradients)
xla_output_grad_gradients = NHWCToNCHW(output_grad_gradients)
xla_ksize = NHWCToNCHW(ksize)
xla_strides = NHWCToNCHW(strides)
actual_input_gradients = pool_grad_func(
xla_inputs,
xla_outputs,
xla_output_gradients,
ksize=xla_ksize,
strides=xla_strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
actual_input_gradients = NCHWToNHWC(actual_input_gradients)
if pool_grad_grad_func is not None:
actual_grad_gradients = pool_grad_grad_func(
xla_inputs,
xla_outputs,
xla_output_grad_gradients,
ksize=xla_ksize,
strides=xla_strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
actual_grad_gradients = NCHWToNHWC(actual_grad_gradients)
actual_input_gradients_vals = sess.run(actual_input_gradients, {
inputs: x,
outputs: output_vals,
output_gradients: output_gradient_vals
})
# Compare the Tensorflow and XLA results.
self.assertAllClose(
expected_input_gradient_vals,
actual_input_gradients_vals,
rtol=1e-4,
atol=1e-6)
self.assertShapeEqual(actual_input_gradients_vals, inputs)
if pool_grad_grad_func is not None:
actual_grad_gradients_vals = sess.run(
actual_grad_gradients, {
inputs: x,
outputs: output_vals,
output_grad_gradients: output_grad_grad_vals
})
# Compare the Tensorflow and XLA results.
self.assertAllClose(
expected_grad_gradients_vals,
actual_grad_gradients_vals,
rtol=1e-4,
atol=1e-6)
self.assertShapeEqual(actual_grad_gradients_vals, outputs)
def _VerifyValues(self,
pool_func,
pool_grad_func,
input_sizes,
ksize,
strides,
padding,
pool_grad_grad_func=None):
"""Verifies the output values of the pooling function.
Args:
pool_func: Pooling function to be called, e.g., tf.nn.max_pool2d
pool_grad_func: Corresponding pooling gradient function.
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
pool_grad_grad_func: Second-order gradient function, if available.
"""
for data_format in GetTestConfigs():
self._VerifyOneTest(
pool_func,
pool_grad_func,
input_sizes,
ksize,
strides,
padding,
data_format,
pool_grad_grad_func=pool_grad_grad_func)
def _TestPooling(self, forward_op, backward_op, pool_grad_grad_func=None):
# VALID padding
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="VALID",
pool_grad_grad_func=pool_grad_grad_func)
# SAME padding
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 2, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
pool_grad_grad_func=pool_grad_grad_func)
# SAME padding, non square window
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 2, 2, 1],
ksize=[1, 1, 2, 1],
strides=[1, 1, 1, 1],
padding="SAME",
pool_grad_grad_func=pool_grad_grad_func)
# VALID padding, uneven stride
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 1, 2, 1],
padding="VALID",
pool_grad_grad_func=pool_grad_grad_func)
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 2, 1, 1],
padding="VALID",
pool_grad_grad_func=pool_grad_grad_func)
# SAME padding, size 4 input
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 4, 4, 4],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
pool_grad_grad_func=pool_grad_grad_func)
# SAME padding, size 8 input
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 8, 8, 8],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding="SAME",
pool_grad_grad_func=pool_grad_grad_func)
def testMaxPool(self):
self._TestPooling(
nn_ops.max_pool,
gen_nn_ops.max_pool_grad,
pool_grad_grad_func=gen_nn_ops.max_pool_grad_grad)
def testAvgPool(self):
# Wrapper around AvgPoolGrad that ignores extra arguments needed by
# MaxPoolGrad.
def AvgPoolGrad(inputs, outputs, output_gradients, ksize, strides, padding,
data_format):
del outputs # Unused by average-pooling gradients.
return gen_nn_ops.avg_pool_grad(
inputs.get_shape().as_list(),
output_gradients,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format)
self._TestPooling(nn_ops.avg_pool, AvgPoolGrad)
@test_util.disable_mlir_bridge(
"TODO(b/266613412): investigate FPE in AvgPoolGrad for TPU"
)
def testAvgPoolGradSamePaddingZeroStrideZeroSize(self):
output_gradient_vals = np.array([0.39117979], dtype=np.float32)
output_gradient_vals = output_gradient_vals.reshape([1, 1, 1, 1])
with self.session() as sess:
with self.test_scope():
output_gradients = array_ops.placeholder(
dtypes.float32, shape=output_gradient_vals.shape
)
t = gen_nn_ops.avg_pool_grad(
orig_input_shape=[1, 0, 0, 0],
grad=output_gradients,
ksize=[1, 0, 0, 0],
strides=[1, 0, 0, 0],
padding="SAME",
data_format="NCHW",
)
with self.assertRaisesRegex(
errors.InvalidArgumentError,
(
"Sliding window ksize field for dimension 1 must be positive but"
" is 0"
),
):
sess.run(t, {output_gradients: output_gradient_vals})
# The CPU implementation of AvgPoolGrad doesn't accept kernels smaller than
# the stride size, so we only run the following tests on MaxPoolGrad.
def testMaxPoolKernelSmallerThanStrideValid(self):
self._VerifyValues(
nn_ops.max_pool,
gen_nn_ops.max_pool_grad,
input_sizes=[1, 7, 7, 1],
ksize=[1, 2, 2, 1],
strides=[1, 3, 3, 1],
padding="VALID")
def testMaxPoolKernelSmallerThanStrideSame(self):
self._VerifyValues(
nn_ops.max_pool,
gen_nn_ops.max_pool_grad,
input_sizes=[1, 3, 3, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME")
self._VerifyValues(
nn_ops.max_pool,
gen_nn_ops.max_pool_grad,
input_sizes=[1, 4, 4, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME")
if __name__ == "__main__":
googletest.main()
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@compiler@tests@pooling_ops_test.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "desihub/LSS",
"repo_path": "LSS_extracted/LSS-main/py/LSS/DESI_ke/__init__.py",
"type": "Python"
}
|
desihubREPO_NAMELSSPATH_START.@LSS_extracted@LSS-main@py@LSS@DESI_ke@__init__.py@.PATH_END.py
|
|
{
"filename": "11158_rval_100015.py",
"repo_name": "shreeyesh-biswal/Rvalue_3D",
"repo_path": "Rvalue_3D_extracted/Rvalue_3D-main/Codes/X-class/AR_11158/11158_rval_100015.py",
"type": "Python"
}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 26 20:36:28 2022
@author: shreeyeshbiswal
"""
import os
import numpy as np
import matplotlib as mpl
from matplotlib import pyplot as plt
from matplotlib.pyplot import figure
AR = "11158"
core_dir = "/home/shreeyeshbiswal/IDLWorkspace/Dataset_PF/"
base_dir = "/home/shreeyeshbiswal/IDLWorkspace/Dataset_PF/AR_" + AR
dir_list = sorted(os.listdir(base_dir))
n = len(dir_list)
m = 10 # values per file
d = '15'
th = '100'
rval_matrix = np.zeros(shape=(n,m))
index = np.arange(0,n)
height = np.arange(0,m)*0.36
P4 = 'Log of R-value (Mx); AR ' + AR
colorbarticks = [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]
cbar_min = 15
cbar_max = 23
flare_time = 97.73
for i in range(0,n):
Time_tag = dir_list[i]
Time = Time_tag[0:19]
Hour = Time[11:13]
print(Time)
dir = "/home/shreeyeshbiswal/IDLWorkspace/Dataset_PF/AR_" + AR + "/" + Time_tag
os.chdir(dir)
# the if-else statement takes care of missing data
if len(os.listdir(dir)) != 0:
rval = np.loadtxt("PF_ext_rvals_100015_" + Time + ".dat")
rval = rval + 15.1172 # LOG FACTOR FOR 1.3141 x 10^15
print(rval)
print(np.shape(rval))
rval_matrix[i,:] = rval
print(Hour)
else:
rval_matrix[i,:] = np.nan
print("Empty directory")
os.chdir(core_dir)
x = np.arange(0,n)
figure(figsize=(10,10), dpi=100000)
figure, axs = plt.subplots(10)
figure.set_figheight(15)
figure.set_figwidth(9)
cm = plt.cm.get_cmap('afmhot')
mpl.rc('xtick', labelsize=13)
# Plot
sc = axs[0].scatter(x, rval_matrix[:,9], c = rval_matrix[:,9], vmin=cbar_min, vmax=cbar_max, s=10, cmap=cm)
for i in range(0,m):
axs[i].scatter(x, rval_matrix[:,9-i], c = rval_matrix[:,9-i], vmin=cbar_min, vmax=cbar_max, s=10, cmap=cm)
for i in range(0,m):
axs[i].set_ylim([cbar_min, cbar_max])
plt.setp(plt.gcf().get_axes(), xticks=[], yticks=[]);
axs[9].tick_params(axis='x', labelsize=16)
axs[9].set_xticks(np.arange(0,n,24))
# Hide the ylims of individual boxes
for i in range(0,m):
axs[i].set_yticks([])
# Show heights in the altitude
heightfont = 16
for i in range(0,m):
max_alt = (m-1)*0.36
altitude = max_alt-(i*0.36)
alt_str = "{:.2f}".format(altitude)
axs[i].set_ylabel(alt_str + ' ', fontsize = heightfont, rotation = 0)
# Show flare occurence in dotted lines
for i in range(0,m):
axs[i].axvline(x = flare_time, ymin = 0, ymax = 1, linestyle = '--', color = 'k', alpha=0.40)# Show heights in the altitude
# Orient the text
st = dir_list[0]
start_time = st[0:4] + '/' + st[5:7] + '/' + st[8:10] + '/' + st[11:13] + ':' + st[14:16]
axs[0].text(12, (cbar_max + (0.35*(cbar_max - cbar_min))), P4, fontsize=23)
axs[5].text(-39, cbar_min + 0.5*(cbar_max - cbar_min), 'Height (Mm)', rotation = 90, fontsize=18)
axs[9].text(-15, (cbar_min - (0.65*(cbar_max - cbar_min))), 'Time after ' + start_time + ' (hrs)' + '; ($B_{th}$, $D_{sep}$) = ' + '(' + th + ',' + d + ')', rotation = 0, fontsize=18)
figure.subplots_adjust(right=0.8)
cbar_ax = figure.add_axes([0.85, 0.15, 0.05, 0.7])
cbar_ax.tick_params(labelsize=16)
figure.colorbar(sc, cax=cbar_ax, ticks=range(cbar_min,cbar_max+1,1))
plt.subplots_adjust(wspace=0.5, hspace=0)
plt.show()
mpl.rcParams.update(mpl.rcParamsDefault)
|
shreeyesh-biswalREPO_NAMERvalue_3DPATH_START.@Rvalue_3D_extracted@Rvalue_3D-main@Codes@X-class@AR_11158@11158_rval_100015.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "juanep97/iop4",
"repo_path": "iop4_extracted/iop4-main/iop4lib/db/__init__.py",
"type": "Python"
}
|
from ..enums import *
from .epoch import Epoch
from .rawfit import RawFit
from .astrosource import AstroSource
from .masterbias import MasterBias
from .masterflat import MasterFlat
from .masterdark import MasterDark
from .reducedfit import ReducedFit
from .photopolresult import PhotoPolResult, PhotoPolResultReducedFitRelation
from .aperphotresult import AperPhotResult
|
juanep97REPO_NAMEiop4PATH_START.@iop4_extracted@iop4-main@iop4lib@db@__init__.py@.PATH_END.py
|
{
"filename": "Box1d.py",
"repo_name": "LLNL/spheral",
"repo_path": "spheral_extracted/spheral-main/src/PYB11/Geometry/Box1d.py",
"type": "Python"
}
|
#-------------------------------------------------------------------------------
# Box1d
#-------------------------------------------------------------------------------
from PYB11Generator import *
class Box1d:
PYB11typedefs = """
typedef Box1d::Vector Vector;
"""
#...........................................................................
# Constructors
def pyinit(self):
"Default constructor"
def pyinit1(self,
points = "const std::vector<Vector>&"):
"Construct as a convex hull"
def pyinit2(self,
points = "const std::vector<Vector>&",
facetIndices = "const std::vector<std::vector<unsigned> >&"):
"Construct with explicit vertices and facets"
def pyinit3(self,
center = "const Dim<1>::Vector&",
extent = "const double"):
"Construct with the given center and width"
#...........................................................................
# Methods
@PYB11const
def contains(self,
point = "const Vector&",
countBoundary = ("const bool", "true"),
tol = ("const double", "1.0e-8")):
"Test if the given point is internal to the box."
return "bool"
@PYB11const
def convexContains(self,
point = "const Vector&",
countBoundary = ("const bool", "true"),
tol = ("const double", "1.0e-8")):
"Test if the given point is internal to the box (assumes convexity)."
return "bool"
@PYB11const
@PYB11pycppname("intersect")
def intersect1(self,
rhs = "const Box1d&"):
"Test if we intersect another box."
return "bool"
@PYB11const
def convexIntersect(self,
rhs = "const Box1d&"):
"Test if we intersect another box (assumes convexity)"
return "bool"
@PYB11const
@PYB11pycppname("intersect")
def intersect2(self,
rhs = "const std::pair<Vector, Vector>&"):
"Test if we intersect another box represented by a min/max pair of coordinates."
return "bool"
@PYB11const
@PYB11pycppname("intersect")
def intersect3(self,
s0 = "const Vector&",
s1 = "const Vector&"):
"Test if we intersect a line segment (interior counts as intersection)."
return "bool"
@PYB11const
def distance(self, p="const Vector&"):
"Compute the minimum distance to a point."
return "double"
@PYB11const
def closestPoint(self, p="const Vector&"):
"Find the point in the box closest to the given point."
return "Vector"
@PYB11const
def facetArea(self, facetID="const unsigned"):
return "double"
@PYB11const
def facetAreaNormal(self, facetID="const unsigned"):
return "Vector"
#...........................................................................
# Operators
def __iadd__(self, rhs="Vector()"):
return
def __isub__(self, rhs="Vector()"):
return
def __add__(self, rhs="Vector()"):
return
def __sub__(self, rhs="Vector()"):
return
def __imul__(self, rhs="double()"):
return
def __itruediv__(self, rhs="double()"):
return
def __mul__(self, rhs="double()"):
return
def __truediv__(self, rhs="double()"):
return
def __eq__(self):
return
def __ne__(self):
return
#...........................................................................
# Properties
center = PYB11property("const Vector&", "center", "center")
extent = PYB11property("double", "extent", "extent")
xmin = PYB11property(returnpolicy="reference_internal")
xmax = PYB11property(returnpolicy="reference_internal")
centroid = PYB11property()
vertices = PYB11property(returnpolicy="reference_internal")
facetVertices = PYB11property()
volume = PYB11property()
|
LLNLREPO_NAMEspheralPATH_START.@spheral_extracted@spheral-main@src@PYB11@Geometry@Box1d.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "JulianBMunoz/Zeus21",
"repo_path": "Zeus21_extracted/Zeus21-main/zeus21/__init__.py",
"type": "Python"
}
|
from .inputs import Cosmo_Parameters_Input, Cosmo_Parameters, Astro_Parameters
from .constants import *
from .cosmology import *
from .correlations import *
from .sfrd import get_T21_coefficients
from .xrays import Xray_class
from .UVLFs import UVLF_binned
from .maps import CoevalMaps
import warnings
warnings.filterwarnings("ignore", category=UserWarning) #to silence unnecessary warning in mcfit
|
JulianBMunozREPO_NAMEZeus21PATH_START.@Zeus21_extracted@Zeus21-main@zeus21@__init__.py@.PATH_END.py
|
{
"filename": "_bgcolor.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/image/hoverlabel/_bgcolor.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class BgcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(self, plotly_name="bgcolor", parent_name="image.hoverlabel", **kwargs):
super(BgcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@image@hoverlabel@_bgcolor.py@.PATH_END.py
|
{
"filename": "1_fitting_wd_spectra.ipynb",
"repo_name": "vedantchandra/wdtools",
"repo_path": "wdtools_extracted/wdtools-master/docs/examples/1_fitting_wd_spectra.ipynb",
"type": "Jupyter Notebook"
}
|
# Tutorial: Fitting a DA Spectrum
For this demonstration, we use a sample spectrum from the Sloan Digital Sky Survey (SDSS) named SDSS J082600.58+282346.2.
Tremblay et al. (2019) assigned this star an effective temperature of $13917$ Kelvin and a surface gravity of $8.06$ log[cm/s^2] using their latest atmospheric models and this SDSS spectrum. In this notebook we'll demonstrate how you can use `wdtools` to infer these parameters from any white dwarf spectrum.
```python
from astropy.io import fits
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
speed_light = 299792458 #m/s
import sys
sys.path.append('../../') # Your path should contain the parent GitHub repository
import wdtools
```
```python
with fits.open('sample_spectrum.fits') as f:
flux = f[1].data['flux']
wl = 10**f[1].data['loglam']
ivar = f[1].data['ivar']
```
```python
plt.figure(figsize = (10,5))
plt.plot(wl, flux, 'k')
plt.xlabel('Wavelength ($\mathrm{\AA}$)');
plt.ylabel('Flux ($\mathrm{10^{-17} erg\ cm^{-2}\ s^{-1}\ \AA^{-1}}$)')
plt.title('SDSS J082600.58+282346.2');
```

## Generative Fitting Pipeline
We normalize the hydrogen Balmer lines and fit atmospheric models from Koester (2010) to this spectrum using our MCMC algorithm.
```python
gfp = wdtools.GFP(resolution = 2, specclass = 'DA')
```
We pass the instrumental resolution in Angstroms when we initialize `GFP`. The theoretical models are convolved with a Gaussian kernel with this resolution prior to each fitting step. We also select the `'DA'` spectral class to fit pure-hydrogen models. At this time, these are the only models available. In future we hope to include helium (DB) models when those models become publicly available.
```python
labels, e_labels, redchi = gfp.fit_spectrum(wl, flux, ivar,
mcmc = True, nwalkers = 25, burn = 50, ndraws = 50,
plot_init = True, make_plot = True, plot_corner = True, savename = 'example',
verbose = True,
lines = ['beta', 'gamma', 'delta', 'eps', 'h8'])
```
fitting radial velocity...
fitting continuum...

Radial Velocity = 21 ± 18 km/s
final optimization...
initializing at teff = 8000 K
initializing at teff = 21500 K
initializing at teff = 35000 K
[13179.736764779153, 8.223222899991825]
[37.87348137233288, 0.01883644503570162]
burning in chains...
100%|██████████| 50/50 [00:41<00:00, 1.19it/s]
sampling posterior...
100%|██████████| 50/50 [00:42<00:00, 1.19it/s]


```python
print('Teff = %i ± %i Kelvin' % (labels[0], e_labels[0]))
print('logg = %.2f ± %.2f log[cm/s^2]' % (labels[1], e_labels[1]))
print('RV = %.1f ± %.1f km/s' % (labels[2], e_labels[2]))
print('reduced chi^2 = %.2f' % (redchi))
```
Teff = 13242 ± 78 Kelvin
logg = 8.22 ± 0.01 log[cm/s^2]
RV = 21.2 ± 18.2 km/s
reduced chi^2 = 0.35
All parameters passed to `gfp.fit_spectrum` other than the normalized wavelength, flux, and inverse variance array are optional.
The MCMC hyperparameter selection involves a trade-off between accuracy and speed. Unless you have a particular need for more samples or quicker sampling, you can leave them at the default settings, which should enable you to fit a single spectrum in under a minute on a regular laptop computer. We recommend keeping `burn` greater than 100 steps at least, so that walkers can spread out from the high-probability starting point and explore the likelehood. Otherwise, the errors on fitted parameters can be grossly underestimated. You can also pass the `mcmc = False` keyword to `fit_spectrum` to skip the MCMC sampling and simply return the optimized solution.
## Fitting RVs using a Single Absorption Line
You can use a single absorption line to find the RV of a star, assuming you know the rest frame wavelength of that absorption line. This function first fits the absorption line with a sum of concentric Voigt profiles (the number is governed by `nmodel`) to obtain a model-independent template profile. It then fixes that composite profile as a template and performs a cross-correlation to determine the RV. The cross-correlation is repeated for 100 realizations of the observed spectrum (using the provided `ivar` array) to propagate RV uncertainties. The mean and standard deviation of the RV realizations is reported by the function.
The `distance` keyword governs how far away from `centroid` (in Angstroms) we consider in the fit. The `edge` keyword governs how many pixels on each side of the line are used to define a simple linear continuum before the fitting procedure. By tweaking these parameters, this function can be used to fit any absorption line. To fit an emission line, simply take the negative of the flux before running the function.
We demonstrate here with H-alpha:
```python
sp = wdtools.SpecTools()
h_alpha = 6564.61
```
```python
rv, e_rv = sp.get_line_rv(wl, flux, ivar, centroid = h_alpha,
distance = 100, edge = 15,
nmodel = 2, plot = True)
```

## Parametric Random Forest
Rather than fitting ab-inito models directly to observed spectra, the parametric random forest in `wdtools` forms a regression relation between the hydrogen Balmer lines on the spectrum and stellar labels derived by previous studies. Whilst this method is less interpretable than the full statistical approach described above, it is much faster and therefore suitable for statistical analyses on larger samples. Additionally, fitting an ab-initio model directly requires some care regarding multi-modal posteriors, photometric temperature priors, and so on.
We train our model with a pre-existing catalog of stars that have been carefully fitted for their stellar labels (keeping all the confounding effects in mind) by Tremblay et al. (2019). For more details, refer to our paper or software documentation.
```python
lp = wdtools.LineProfiles(lines = ['alpha', 'beta', 'gamma', 'delta'])
```
The trianing SDSS data only permitted high-quality line profile fits to the first 4 Balmer lines, so you can only pass some subset of those for this method. The first time you call `LineProfiles` on your machine, it will perform a self-initialization to train its regression model for the provided combination of lines and save it in the `models/` folder. This should only take a few seconds and will create a ~60 MB pickled random forest model that will be used every time you subsequently call `LineProfiles`.
```python
results = lp.labels_from_spectrum(wl, flux, make_plot = True)
```




```python
print('Teff = %i ± %i Kelvin' % (results[0], results[1]))
print('logg = %.2f ± %.2f log[cm/s^2]' % (results[2], results[3]))
```
Teff = 13912 ± 295 Kelvin
logg = 8.10 ± 0.06 log[cm/s^2]
The `labels_from_spectrum` method fits all selected Balmer lines with a Voigt profile to compute their FWHM and height, and then runs the fitted parameters through the ensemble of random forest regression models to produce stellar label estimates with uncertainties.
Both our methods produce self-consistent stellar labels that are also consistent with the prior result from Tremblay et al. (2019).
For any more details, questions, or comments please don't hesistate to contact the authors via email. You can also [raise an issue](https://github.com/vedantchandra/wdtools/issues) on GitHub for specific software bugs or feature requests. We hope you enjoy using wdtools!
|
vedantchandraREPO_NAMEwdtoolsPATH_START.@wdtools_extracted@wdtools-master@docs@examples@1_fitting_wd_spectra.ipynb@.PATH_END.py
|
{
"filename": "test_index.py",
"repo_name": "facebookresearch/faiss",
"repo_path": "faiss_extracted/faiss-main/tests/test_index.py",
"type": "Python"
}
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""this is a basic test script for simple indices work"""
from __future__ import absolute_import, division, print_function
# no unicode_literals because it messes up in py2
import numpy as np
import unittest
import faiss
import tempfile
import os
import re
import warnings
from common_faiss_tests import get_dataset, get_dataset_2
from faiss.contrib.evaluation import check_ref_knn_with_draws
class TestModuleInterface(unittest.TestCase):
def test_version_attribute(self):
assert hasattr(faiss, '__version__')
assert re.match('^\\d+\\.\\d+\\.\\d+$', faiss.__version__)
class TestIndexFlat(unittest.TestCase):
def do_test(self, nq, metric_type=faiss.METRIC_L2, k=10):
d = 32
nb = 1000
nt = 0
(xt, xb, xq) = get_dataset_2(d, nt, nb, nq)
index = faiss.IndexFlat(d, metric_type)
### k-NN search
index.add(xb)
D1, I1 = index.search(xq, k)
if metric_type == faiss.METRIC_L2:
all_dis = ((xq.reshape(nq, 1, d) - xb.reshape(1, nb, d)) ** 2).sum(2)
Iref = all_dis.argsort(axis=1)[:, :k]
else:
all_dis = np.dot(xq, xb.T)
Iref = all_dis.argsort(axis=1)[:, ::-1][:, :k]
Dref = all_dis[np.arange(nq)[:, None], Iref]
# not too many elements are off.
self.assertLessEqual((Iref != I1).sum(), Iref.size * 0.0002)
# np.testing.assert_equal(Iref, I1)
np.testing.assert_almost_equal(Dref, D1, decimal=5)
### Range search
radius = float(np.median(Dref[:, -1]))
lims, D2, I2 = index.range_search(xq, radius)
for i in range(nq):
l0, l1 = lims[i:i + 2]
_, Il = D2[l0:l1], I2[l0:l1]
if metric_type == faiss.METRIC_L2:
Ilref, = np.where(all_dis[i] < radius)
else:
Ilref, = np.where(all_dis[i] > radius)
Il.sort()
Ilref.sort()
np.testing.assert_equal(Il, Ilref)
np.testing.assert_almost_equal(
all_dis[i, Ilref], D2[l0:l1],
decimal=5
)
def set_blas_blocks(self, small):
if small:
faiss.cvar.distance_compute_blas_query_bs = 16
faiss.cvar.distance_compute_blas_database_bs = 12
else:
faiss.cvar.distance_compute_blas_query_bs = 4096
faiss.cvar.distance_compute_blas_database_bs = 1024
def test_with_blas(self):
self.set_blas_blocks(small=True)
self.do_test(200)
self.set_blas_blocks(small=False)
def test_noblas(self):
self.do_test(10)
def test_with_blas_ip(self):
self.set_blas_blocks(small=True)
self.do_test(200, faiss.METRIC_INNER_PRODUCT)
self.set_blas_blocks(small=False)
def test_noblas_ip(self):
self.do_test(10, faiss.METRIC_INNER_PRODUCT)
def test_noblas_reservoir(self):
self.do_test(10, k=150)
def test_with_blas_reservoir(self):
self.do_test(200, k=150)
def test_noblas_reservoir_ip(self):
self.do_test(10, faiss.METRIC_INNER_PRODUCT, k=150)
def test_with_blas_reservoir_ip(self):
self.do_test(200, faiss.METRIC_INNER_PRODUCT, k=150)
class TestIndexFlatL2(unittest.TestCase):
def test_indexflat_l2_sync_norms_1(self):
d = 32
nb = 10000
nt = 0
nq = 16
k = 10
(xt, xb, xq) = get_dataset_2(d, nt, nb, nq)
# instantiate IndexHNSWFlat
index = faiss.IndexHNSWFlat(d, 32)
index.hnsw.efConstruction = 40
index.add(xb)
D1, I1 = index.search(xq, k)
index_l2 = faiss.downcast_index(index.storage)
index_l2.sync_l2norms()
D2, I2 = index.search(xq, k)
index_l2.clear_l2norms()
D3, I3 = index.search(xq, k)
# not too many elements are off.
self.assertLessEqual((I2 != I1).sum(), 1)
# np.testing.assert_equal(Iref, I1)
np.testing.assert_almost_equal(D2, D1, decimal=5)
# not too many elements are off.
self.assertLessEqual((I3 != I1).sum(), 0)
# np.testing.assert_equal(Iref, I1)
np.testing.assert_equal(D3, D1)
class EvalIVFPQAccuracy(unittest.TestCase):
def test_IndexIVFPQ(self):
d = 32
nb = 1000
nt = 1500
nq = 200
(xt, xb, xq) = get_dataset_2(d, nt, nb, nq)
gt_index = faiss.IndexFlatL2(d)
gt_index.add(xb)
D, gt_nns = gt_index.search(xq, 1)
coarse_quantizer = faiss.IndexFlatL2(d)
index = faiss.IndexIVFPQ(coarse_quantizer, d, 32, 8, 8)
index.cp.min_points_per_centroid = 5 # quiet warning
index.train(xt)
index.add(xb)
index.nprobe = 4
D, nns = index.search(xq, 10)
n_ok = (nns == gt_nns).sum()
nq = xq.shape[0]
self.assertGreater(n_ok, nq * 0.66)
# check that and Index2Layer gives the same reconstruction
# this is a bit fragile: it assumes 2 runs of training give
# the exact same result.
index2 = faiss.Index2Layer(coarse_quantizer, 32, 8)
if True:
index2.train(xt)
else:
index2.pq = index.pq
index2.is_trained = True
index2.add(xb)
ref_recons = index.reconstruct_n(0, nb)
new_recons = index2.reconstruct_n(0, nb)
self.assertTrue(np.all(ref_recons == new_recons))
def test_IMI(self):
d = 32
nb = 1000
nt = 1500
nq = 200
(xt, xb, xq) = get_dataset_2(d, nt, nb, nq)
d = xt.shape[1]
gt_index = faiss.IndexFlatL2(d)
gt_index.add(xb)
D, gt_nns = gt_index.search(xq, 1)
nbits = 5
coarse_quantizer = faiss.MultiIndexQuantizer(d, 2, nbits)
index = faiss.IndexIVFPQ(coarse_quantizer, d, (1 << nbits) ** 2, 8, 8)
index.quantizer_trains_alone = 1
index.train(xt)
index.add(xb)
index.nprobe = 100
D, nns = index.search(xq, 10)
n_ok = (nns == gt_nns).sum()
# Should return 166 on mac, and 170 on linux.
self.assertGreater(n_ok, 165)
############# replace with explicit assignment indexes
nbits = 5
pq = coarse_quantizer.pq
centroids = faiss.vector_to_array(pq.centroids)
centroids = centroids.reshape(pq.M, pq.ksub, pq.dsub)
ai0 = faiss.IndexFlatL2(pq.dsub)
ai0.add(centroids[0])
ai1 = faiss.IndexFlatL2(pq.dsub)
ai1.add(centroids[1])
coarse_quantizer_2 = faiss.MultiIndexQuantizer2(d, nbits, ai0, ai1)
coarse_quantizer_2.pq = pq
coarse_quantizer_2.is_trained = True
index.quantizer = coarse_quantizer_2
index.reset()
index.add(xb)
D, nns = index.search(xq, 10)
n_ok = (nns == gt_nns).sum()
# should return the same result
self.assertGreater(n_ok, 165)
def test_IMI_2(self):
d = 32
nb = 1000
nt = 1500
nq = 200
(xt, xb, xq) = get_dataset_2(d, nt, nb, nq)
d = xt.shape[1]
gt_index = faiss.IndexFlatL2(d)
gt_index.add(xb)
D, gt_nns = gt_index.search(xq, 1)
############# redo including training
nbits = 5
ai0 = faiss.IndexFlatL2(int(d / 2))
ai1 = faiss.IndexFlatL2(int(d / 2))
coarse_quantizer = faiss.MultiIndexQuantizer2(d, nbits, ai0, ai1)
index = faiss.IndexIVFPQ(coarse_quantizer, d, (1 << nbits) ** 2, 8, 8)
index.quantizer_trains_alone = 1
index.train(xt)
index.add(xb)
index.nprobe = 100
D, nns = index.search(xq, 10)
n_ok = (nns == gt_nns).sum()
# should return the same result
self.assertGreater(n_ok, 165)
class TestMultiIndexQuantizer(unittest.TestCase):
def test_search_k1(self):
# verify codepath for k = 1 and k > 1
d = 64
nb = 0
nt = 1500
nq = 200
(xt, xb, xq) = get_dataset(d, nb, nt, nq)
miq = faiss.MultiIndexQuantizer(d, 2, 6)
miq.train(xt)
D1, I1 = miq.search(xq, 1)
D5, I5 = miq.search(xq, 5)
self.assertEqual(np.abs(I1[:, :1] - I5[:, :1]).max(), 0)
self.assertEqual(np.abs(D1[:, :1] - D5[:, :1]).max(), 0)
class TestScalarQuantizer(unittest.TestCase):
def test_4variants_ivf(self):
d = 32
nt = 2500
nq = 400
nb = 5000
(xt, xb, xq) = get_dataset_2(d, nt, nb, nq)
# common quantizer
quantizer = faiss.IndexFlatL2(d)
ncent = 64
index_gt = faiss.IndexFlatL2(d)
index_gt.add(xb)
D, I_ref = index_gt.search(xq, 10)
nok = {}
index = faiss.IndexIVFFlat(quantizer, d, ncent,
faiss.METRIC_L2)
index.cp.min_points_per_centroid = 5 # quiet warning
index.nprobe = 4
index.train(xt)
index.add(xb)
D, I = index.search(xq, 10)
nok['flat'] = (I[:, 0] == I_ref[:, 0]).sum()
for qname in "QT_4bit QT_4bit_uniform QT_8bit QT_8bit_uniform QT_fp16 QT_bf16".split():
qtype = getattr(faiss.ScalarQuantizer, qname)
index = faiss.IndexIVFScalarQuantizer(quantizer, d, ncent,
qtype, faiss.METRIC_L2)
index.nprobe = 4
index.train(xt)
index.add(xb)
D, I = index.search(xq, 10)
nok[qname] = (I[:, 0] == I_ref[:, 0]).sum()
self.assertGreaterEqual(nok['flat'], nq * 0.6)
# The tests below are a bit fragile, it happens that the
# ordering between uniform and non-uniform are reverted,
# probably because the dataset is small, which introduces
# jitter
self.assertGreaterEqual(nok['flat'], nok['QT_8bit'])
self.assertGreaterEqual(nok['QT_8bit'], nok['QT_4bit'])
self.assertGreaterEqual(nok['QT_8bit'], nok['QT_8bit_uniform'])
self.assertGreaterEqual(nok['QT_4bit'], nok['QT_4bit_uniform'])
self.assertGreaterEqual(nok['QT_fp16'], nok['QT_8bit'])
self.assertGreaterEqual(nok['QT_bf16'], nok['QT_8bit'])
def test_4variants(self):
d = 32
nt = 2500
nq = 400
nb = 5000
(xt, xb, xq) = get_dataset(d, nb, nt, nq)
index_gt = faiss.IndexFlatL2(d)
index_gt.add(xb)
D_ref, I_ref = index_gt.search(xq, 10)
nok = {}
for qname in "QT_4bit QT_4bit_uniform QT_8bit QT_8bit_uniform QT_fp16 QT_bf16".split():
qtype = getattr(faiss.ScalarQuantizer, qname)
index = faiss.IndexScalarQuantizer(d, qtype, faiss.METRIC_L2)
index.train(xt)
index.add(xb)
D, I = index.search(xq, 10)
nok[qname] = (I[:, 0] == I_ref[:, 0]).sum()
self.assertGreaterEqual(nok['QT_8bit'], nq * 0.9)
self.assertGreaterEqual(nok['QT_8bit'], nok['QT_4bit'])
self.assertGreaterEqual(nok['QT_8bit'], nok['QT_8bit_uniform'])
self.assertGreaterEqual(nok['QT_4bit'], nok['QT_4bit_uniform'])
self.assertGreaterEqual(nok['QT_fp16'], nok['QT_8bit'])
self.assertGreaterEqual(nok['QT_bf16'], nq * 0.9)
class TestRangeSearch(unittest.TestCase):
def test_range_search(self):
d = 4
nt = 100
nq = 10
nb = 50
(xt, xb, xq) = get_dataset(d, nb, nt, nq)
index = faiss.IndexFlatL2(d)
index.add(xb)
Dref, Iref = index.search(xq, 5)
thresh = 0.1 # *squared* distance
lims, D, I = index.range_search(xq, thresh)
for i in range(nq):
Iline = I[lims[i]:lims[i + 1]]
Dline = D[lims[i]:lims[i + 1]]
for j, dis in zip(Iref[i], Dref[i]):
if dis < thresh:
li, = np.where(Iline == j)
self.assertTrue(li.size == 1)
idx = li[0]
self.assertGreaterEqual(1e-4, abs(Dline[idx] - dis))
class TestSearchAndReconstruct(unittest.TestCase):
def run_search_and_reconstruct(self, index, xb, xq, k=10, eps=None):
n, d = xb.shape
assert xq.shape[1] == d
assert index.d == d
D_ref, I_ref = index.search(xq, k)
R_ref = index.reconstruct_n(0, n)
D, I, R = index.search_and_reconstruct(xq, k)
np.testing.assert_almost_equal(D, D_ref, decimal=5)
check_ref_knn_with_draws(D_ref, I_ref, D, I)
self.assertEqual(R.shape[:2], I.shape)
self.assertEqual(R.shape[2], d)
# (n, k, ..) -> (n * k, ..)
I_flat = I.reshape(-1)
R_flat = R.reshape(-1, d)
# Filter out -1s when not enough results
R_flat = R_flat[I_flat >= 0]
I_flat = I_flat[I_flat >= 0]
recons_ref_err = np.mean(np.linalg.norm(R_flat - R_ref[I_flat]))
self.assertLessEqual(recons_ref_err, 1e-6)
def norm1(x):
return np.sqrt((x ** 2).sum(axis=1))
recons_err = np.mean(norm1(R_flat - xb[I_flat]))
if eps is not None:
self.assertLessEqual(recons_err, eps)
return D, I, R
def test_IndexFlat(self):
d = 32
nb = 1000
nt = 1500
nq = 200
(xt, xb, xq) = get_dataset(d, nb, nt, nq)
index = faiss.IndexFlatL2(d)
index.add(xb)
self.run_search_and_reconstruct(index, xb, xq, eps=0.0)
def test_IndexIVFFlat(self):
d = 32
nb = 1000
nt = 1500
nq = 200
(xt, xb, xq) = get_dataset(d, nb, nt, nq)
quantizer = faiss.IndexFlatL2(d)
index = faiss.IndexIVFFlat(quantizer, d, 32, faiss.METRIC_L2)
index.cp.min_points_per_centroid = 5 # quiet warning
index.nprobe = 4
index.train(xt)
index.add(xb)
self.run_search_and_reconstruct(index, xb, xq, eps=0.0)
def test_IndexIVFPQ(self):
d = 32
nb = 1000
nt = 1500
nq = 200
(xt, xb, xq) = get_dataset(d, nb, nt, nq)
quantizer = faiss.IndexFlatL2(d)
index = faiss.IndexIVFPQ(quantizer, d, 32, 8, 8)
index.cp.min_points_per_centroid = 5 # quiet warning
index.nprobe = 4
index.train(xt)
index.add(xb)
self.run_search_and_reconstruct(index, xb, xq, eps=1.0)
def test_IndexIVFRQ(self):
d = 32
nb = 1000
nt = 1500
nq = 200
(xt, xb, xq) = get_dataset(d, nb, nt, nq)
quantizer = faiss.IndexFlatL2(d)
index = faiss.IndexIVFResidualQuantizer(quantizer, d, 32, 8, 8)
index.cp.min_points_per_centroid = 5 # quiet warning
index.nprobe = 4
index.train(xt)
index.add(xb)
self.run_search_and_reconstruct(index, xb, xq, eps=1.0)
def test_MultiIndex(self):
d = 32
nb = 1000
nt = 1500
nq = 200
(xt, xb, xq) = get_dataset(d, nb, nt, nq)
index = faiss.index_factory(d, "IMI2x5,PQ8np")
faiss.ParameterSpace().set_index_parameter(index, "nprobe", 4)
index.train(xt)
index.add(xb)
self.run_search_and_reconstruct(index, xb, xq, eps=1.0)
def test_IndexTransform(self):
d = 32
nb = 1000
nt = 1500
nq = 200
(xt, xb, xq) = get_dataset(d, nb, nt, nq)
index = faiss.index_factory(d, "L2norm,PCA8,IVF32,PQ8np")
faiss.ParameterSpace().set_index_parameter(index, "nprobe", 4)
index.train(xt)
index.add(xb)
self.run_search_and_reconstruct(index, xb, xq)
class TestDistancesPositive(unittest.TestCase):
def test_l2_pos(self):
"""
roundoff errors occur only with the L2 decomposition used
with BLAS, ie. in IndexFlatL2 and with
n > distance_compute_blas_threshold = 20
"""
d = 128
n = 100
rs = np.random.RandomState(1234)
x = rs.rand(n, d).astype('float32')
index = faiss.IndexFlatL2(d)
index.add(x)
D, I = index.search(x, 10)
assert np.all(D >= 0)
class TestShardReplicas(unittest.TestCase):
def test_shard_flag_propagation(self):
d = 64 # dimension
nb = 1000
rs = np.random.RandomState(1234)
xb = rs.rand(nb, d).astype('float32')
nlist = 10
quantizer1 = faiss.IndexFlatL2(d)
quantizer2 = faiss.IndexFlatL2(d)
index1 = faiss.IndexIVFFlat(quantizer1, d, nlist)
index2 = faiss.IndexIVFFlat(quantizer2, d, nlist)
index = faiss.IndexShards(d, True)
index.add_shard(index1)
index.add_shard(index2)
self.assertFalse(index.is_trained)
index.train(xb)
self.assertTrue(index.is_trained)
self.assertEqual(index.ntotal, 0)
index.add(xb)
self.assertEqual(index.ntotal, nb)
index.remove_shard(index2)
self.assertEqual(index.ntotal, nb / 2)
index.remove_shard(index1)
self.assertEqual(index.ntotal, 0)
def test_replica_flag_propagation(self):
d = 64 # dimension
nb = 1000
rs = np.random.RandomState(1234)
xb = rs.rand(nb, d).astype('float32')
nlist = 10
quantizer1 = faiss.IndexFlatL2(d)
quantizer2 = faiss.IndexFlatL2(d)
index1 = faiss.IndexIVFFlat(quantizer1, d, nlist)
index2 = faiss.IndexIVFFlat(quantizer2, d, nlist)
index = faiss.IndexReplicas(d, True)
index.add_replica(index1)
index.add_replica(index2)
self.assertFalse(index.is_trained)
index.train(xb)
self.assertTrue(index.is_trained)
self.assertEqual(index.ntotal, 0)
index.add(xb)
self.assertEqual(index.ntotal, nb)
index.remove_replica(index2)
self.assertEqual(index.ntotal, nb)
index.remove_replica(index1)
self.assertEqual(index.ntotal, 0)
class TestReconsException(unittest.TestCase):
def test_recons_exception(self):
d = 64 # dimension
nb = 1000
rs = np.random.RandomState(1234)
xb = rs.rand(nb, d).astype('float32')
nlist = 10
quantizer = faiss.IndexFlatL2(d) # the other index
index = faiss.IndexIVFFlat(quantizer, d, nlist)
index.train(xb)
index.add(xb)
index.make_direct_map()
index.reconstruct(9)
self.assertRaises(
RuntimeError,
index.reconstruct, 100001
)
def test_reconstuct_after_add(self):
index = faiss.index_factory(10, 'IVF5,SQfp16')
index.train(faiss.randn((100, 10), 123))
index.add(faiss.randn((100, 10), 345))
index.make_direct_map()
index.add(faiss.randn((100, 10), 678))
# should not raise an exception
index.reconstruct(5)
index.reconstruct(150)
class TestReconsHash(unittest.TestCase):
def do_test(self, index_key):
d = 32
index = faiss.index_factory(d, index_key)
index.train(faiss.randn((100, d), 123))
# reference reconstruction
index.add(faiss.randn((100, d), 345))
index.add(faiss.randn((100, d), 678))
ref_recons = index.reconstruct_n(0, 200)
# with lookup
index.reset()
rs = np.random.RandomState(123)
ids = rs.choice(10000, size=200, replace=False).astype(np.int64)
index.add_with_ids(faiss.randn((100, d), 345), ids[:100])
index.set_direct_map_type(faiss.DirectMap.Hashtable)
index.add_with_ids(faiss.randn((100, d), 678), ids[100:])
# compare
for i in range(0, 200, 13):
recons = index.reconstruct(int(ids[i]))
self.assertTrue(np.all(recons == ref_recons[i]))
# test I/O
buf = faiss.serialize_index(index)
index2 = faiss.deserialize_index(buf)
# compare
for i in range(0, 200, 13):
recons = index2.reconstruct(int(ids[i]))
self.assertTrue(np.all(recons == ref_recons[i]))
# remove
toremove = np.ascontiguousarray(ids[0:200:3])
sel = faiss.IDSelectorArray(50, faiss.swig_ptr(toremove[:50]))
# test both ways of removing elements
nremove = index2.remove_ids(sel)
nremove += index2.remove_ids(toremove[50:])
self.assertEqual(nremove, len(toremove))
for i in range(0, 200, 13):
if i % 3 == 0:
self.assertRaises(
RuntimeError,
index2.reconstruct, int(ids[i])
)
else:
recons = index2.reconstruct(int(ids[i]))
self.assertTrue(np.all(recons == ref_recons[i]))
# index error should raise
self.assertRaises(
RuntimeError,
index.reconstruct, 20000
)
def test_IVFFlat(self):
self.do_test("IVF5,Flat")
def test_IVFSQ(self):
self.do_test("IVF5,SQfp16")
def test_IVFPQ(self):
self.do_test("IVF5,PQ4x4np")
class TestValidIndexParams(unittest.TestCase):
def test_IndexIVFPQ(self):
d = 32
nb = 1000
nt = 1500
nq = 200
(xt, xb, xq) = get_dataset_2(d, nt, nb, nq)
coarse_quantizer = faiss.IndexFlatL2(d)
index = faiss.IndexIVFPQ(coarse_quantizer, d, 32, 8, 8)
index.cp.min_points_per_centroid = 5 # quiet warning
index.train(xt)
index.add(xb)
# invalid nprobe
index.nprobe = 0
k = 10
self.assertRaises(RuntimeError, index.search, xq, k)
# invalid k
index.nprobe = 4
k = -10
self.assertRaises(AssertionError, index.search, xq, k)
# valid params
index.nprobe = 4
k = 10
D, nns = index.search(xq, k)
self.assertEqual(D.shape[0], nq)
self.assertEqual(D.shape[1], k)
def test_IndexFlat(self):
d = 32
nb = 1000
nt = 0
nq = 200
(xt, xb, xq) = get_dataset_2(d, nt, nb, nq)
index = faiss.IndexFlat(d, faiss.METRIC_L2)
index.add(xb)
# invalid k
k = -5
self.assertRaises(AssertionError, index.search, xq, k)
# valid k
k = 5
D, I = index.search(xq, k)
self.assertEqual(D.shape[0], nq)
self.assertEqual(D.shape[1], k)
class TestLargeRangeSearch(unittest.TestCase):
def test_range_search(self):
# test for https://github.com/facebookresearch/faiss/issues/1889
d = 256
nq = 16
nb = 1000000
# faiss.cvar.distance_compute_blas_threshold = 10
faiss.omp_set_num_threads(1)
index = faiss.IndexFlatL2(d)
xb = np.zeros((nb, d), dtype="float32")
index.add(xb)
xq = np.zeros((nq, d), dtype="float32")
lims, D, I = index.range_search(xq, 1.0)
assert len(D) == len(xb) * len(xq)
class TestRandomIndex(unittest.TestCase):
def test_random(self):
""" just check if several runs of search retrieve the
same results """
index = faiss.IndexRandom(32, 1000000000)
(xt, xb, xq) = get_dataset_2(32, 0, 0, 10)
Dref, Iref = index.search(xq, 10)
self.assertTrue(np.all(Dref[:, 1:] >= Dref[:, :-1]))
Dnew, Inew = index.search(xq, 10)
np.testing.assert_array_equal(Dref, Dnew)
np.testing.assert_array_equal(Iref, Inew)
|
facebookresearchREPO_NAMEfaissPATH_START.@faiss_extracted@faiss-main@tests@test_index.py@.PATH_END.py
|
{
"filename": "RELEASE_NOTES.md",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/libs/tbb/RELEASE_NOTES.md",
"type": "Markdown"
}
|
<!--
******************************************************************************
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/-->
# Release Notes <!-- omit in toc -->
This document contains changes of oneTBB compared to the last release.
## Table of Contents <!-- omit in toc -->
- [New Features](#new_features)
- [Known Limitations](#known-limitations)
- [Fixed Issues](#fixed-issues)
- [Open-source Contributions Integrated](#open-source-contributions-integrated)
## :white_check_mark: New Features
- Improved support and use of the latest C++ standards for parallel_sort that allows using this algorithm with user-defined and standard library-defined objects with modern semantics.
- The following features are now fully functional: task_arena extensions, collaborative_call_once, adaptive mutexes, heterogeneous overloads for concurrent_hash_map, and task_scheduler_handle.
- Added support for Windows* Server 2022 and Python 3.10.
## :rotating_light: Known Limitations
- An application using Parallel STL algorithms in libstdc++ versions 9 and 10 may fail to compile due to incompatible interface changes between earlier versions of Threading Building Blocks (TBB) and oneAPI Threading Building Blocks (oneTBB). Disable support for Parallel STL algorithms by defining PSTL_USE_PARALLEL_POLICIES (in libstdc++ 9) or _GLIBCXX_USE_TBB_PAR_BACKEND (in libstdc++ 10) macro to zero before inclusion of the first standard header file in each translation unit.
- On Linux* OS, if oneAPI Threading Building Blocks (oneTBB) or Threading Building Blocks (TBB) are installed in a system folder like /usr/lib64, the application may fail to link due to the order in which the linker searches for libraries. Use the -L linker option to specify the correct location of oneTBB library. This issue does not affect the program execution.
- The oneapi::tbb::info namespace interfaces might unexpectedly change the process affinity mask on Windows* OS systems (see https://github.com/open-mpi/hwloc/issues/366 for details) when using hwloc version lower than 2.5.
- Using a hwloc version other than 1.11, 2.0, or 2.5 may cause an undefined behavior on Windows OS. See https://github.com/open-mpi/hwloc/issues/477 for details.
- The NUMA topology may be detected incorrectly on Windows OS machines where the number of NUMA node threads exceeds the size of 1 processor group.
- On Windows OS on ARM64*, when compiling an application using oneTBB with the Microsoft* Compiler, the compiler issues a warning C4324 that a structure was padded due to the alignment specifier. Consider suppressing the warning by specifying /wd4324 to the compiler command line.
- oneTBB does not support fork(), to work-around the issue, consider using task_scheduler_handle to join oneTBB worker threads before using fork().
- C++ exception handling mechanism on Windows* OS on ARM64* might corrupt memory if an exception is thrown from any oneTBB parallel algorithm (see Windows* OS on ARM64* compiler issue: https://developercommunity.visualstudio.com/t/ARM64-incorrect-stack-unwinding-for-alig/1544293).
## :hammer: Fixed Issues
- Memory allocator crash on a system with an incomplete /proc/meminfo (GitHub* [#584](https://github.com/oneapi-src/oneTBB/issues/584)).
- Incorrect blocking of task stealing (GitHub* #[478](https://github.com/oneapi-src/oneTBB/issues/478)).
- Hang due to incorrect decrement of a limiter_node (GitHub* [#634](https://github.com/oneapi-src/oneTBB/issues/634)).
- Memory corruption in some rare cases when passing big messages in a flow graph (GitHub* [#639](https://github.com/oneapi-src/oneTBB/issues/639)).
- Possible deadlock in a throwable flow graph node with a lightweight policy. The lightweight policy is now ignored for functors that can throw exceptions (GitHub* [#420](https://github.com/oneapi-src/oneTBB/issues/420)).
- Crash when obtaining a range from empty ordered and unordered containers (GitHub* [#641](https://github.com/oneapi-src/oneTBB/issues/641)).
- Deadlock in a concurrent_vector resize() that could happen when the new size is less than the previous size (GitHub* [#733](https://github.com/oneapi-src/oneTBB/issues/733)).
## :octocat: Open-source Contributions Integrated
- Improved aligned memory allocation. Contributed by Andrey Semashev (https://github.com/oneapi-src/oneTBB/pull/671).
- Optimized usage of atomic_fence on IA-32 and Intel(R) 64 architectures. Contributed by Andrey Semashev (https://github.com/oneapi-src/oneTBB/pull/328).
- Fixed incorrect definition of the assignment operator in containers. Contributed by Andrey Semashev (https://github.com/oneapi-src/oneTBB/issues/372).
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@libs@tbb@RELEASE_NOTES.md@.PATH_END.py
|
{
"filename": "_stream.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/choropleth/_stream.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Stream(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "choropleth"
_path_str = "choropleth.stream"
_valid_props = {"maxpoints", "token"}
# maxpoints
# ---------
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self["maxpoints"]
@maxpoints.setter
def maxpoints(self, val):
self["maxpoints"] = val
# token
# -----
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://chart-studio.plotly.com/settings for more
details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["token"]
@token.setter
def token(self, val):
self["token"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.choropleth.Stream`
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
Returns
-------
Stream
"""
super(Stream, self).__init__("stream")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.choropleth.Stream
constructor must be a dict or
an instance of :class:`plotly.graph_objs.choropleth.Stream`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("maxpoints", None)
_v = maxpoints if maxpoints is not None else _v
if _v is not None:
self["maxpoints"] = _v
_v = arg.pop("token", None)
_v = token if token is not None else _v
if _v is not None:
self["token"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@choropleth@_stream.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/core/langchain_core/outputs/__init__.py",
"type": "Python"
}
|
"""**Output** classes are used to represent the output of a language model call
and the output of a chat.
The top container for information is the `LLMResult` object. `LLMResult` is used by
both chat models and LLMs. This object contains the output of the language
model and any additional information that the model provider wants to return.
When invoking models via the standard runnable methods (e.g. invoke, batch, etc.):
- Chat models will return `AIMessage` objects.
- LLMs will return regular text strings.
In addition, users can access the raw output of either LLMs or chat models via
callbacks. The on_chat_model_end and on_llm_end callbacks will return an
LLMResult object containing the generated outputs and any additional information
returned by the model provider.
In general, if information is already available
in the AIMessage object, it is recommended to access it from there rather than
from the `LLMResult` object.
"""
from langchain_core.outputs.chat_generation import ChatGeneration, ChatGenerationChunk
from langchain_core.outputs.chat_result import ChatResult
from langchain_core.outputs.generation import Generation, GenerationChunk
from langchain_core.outputs.llm_result import LLMResult
from langchain_core.outputs.run_info import RunInfo
__all__ = [
"ChatGeneration",
"ChatGenerationChunk",
"ChatResult",
"Generation",
"GenerationChunk",
"LLMResult",
"RunInfo",
]
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@core@langchain_core@outputs@__init__.py@.PATH_END.py
|
{
"filename": "_variantsrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattersmith/textfont/_variantsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class VariantsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="variantsrc", parent_name="scattersmith.textfont", **kwargs
):
super(VariantsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scattersmith@textfont@_variantsrc.py@.PATH_END.py
|
{
"filename": "validate_decoder.py",
"repo_name": "changhoonhahn/provabgs",
"repo_path": "provabgs_extracted/provabgs-main/bin/validate_decoder.py",
"type": "Python"
}
|
'''
validate the trained decoder
'''
import os, sys
import numpy as np
from datetime import date
import torch
from torch import nn
from torch import optim
from torch.nn import functional as F
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
#-------------------------------------------------------
# params
#-------------------------------------------------------
name = 'nmfburst'
nbatch = 500
layers = [1280, 1024, 512, 256, 64]
#-------------------------------------------------------
dat_dir = '/tigress/chhahn/provabgs/'
wave = np.load(os.path.join(dat_dir, 'wave_fsps.npy'))
nwave = len(wave)
class Decoder(nn.Module):
def __init__(self, nfeat=1000, ncode=5, nhidden0=128, nhidden1=128, nhidden2=35, nhidden3=35, nhidden4=35, dropout=0.2):
super(Decoder, self).__init__()
self.ncode = int(ncode)
self.dec0 = nn.Linear(ncode, nhidden4)
self.d1 = nn.Dropout(p=dropout)
self.dec1 = nn.Linear(nhidden4, nhidden3)
self.d2 = nn.Dropout(p=dropout)
self.dec2 = nn.Linear(nhidden3, nhidden2)
self.d3 = nn.Dropout(p=dropout)
self.dec3 = nn.Linear(nhidden2, nhidden1)
self.d4 = nn.Dropout(p=dropout)
self.dec4 = nn.Linear(nhidden1, nhidden0)
self.d5 = nn.Dropout(p=dropout)
self.outp = nn.Linear(nhidden0, nfeat)
def decode(self, x):
x = self.d1(F.leaky_relu(self.dec0(x)))
x = self.d2(F.leaky_relu(self.dec1(x)))
x = self.d3(F.leaky_relu(self.dec2(x)))
x = self.d4(F.leaky_relu(self.dec3(x)))
x = self.d5(F.leaky_relu(self.dec4(x)))
x = self.outp(x)
return x
def forward(self, x):
return self.decode(x)
def loss(self, x, y):
recon_y = self.forward(x)
MSE = torch.sum(0.5 * (y - recon_y).pow(2))
return MSE
# load in test data
theta_test = np.load(os.path.join(dat_dir, 'fsps.%s.theta.test.npy' % name))
lnspec_test = np.load(os.path.join(dat_dir, 'fsps.%s.lnspectrum.test.npy' % name))
lnspec_recon = []
for iw in range(3): # wave bins
wave_bin = [(wave < 4500), ((wave >= 4500) & (wave < 6500)), (wave >= 6500)][iw]
# shift and scale of ln(spectra)
shift_lnspec = np.load(os.path.join(dat_dir, 'fsps.%s.w%i.shift_lnspectrum.%i.npy' % (name, iw, nbatch)))
scale_lnspec = np.load(os.path.join(dat_dir, 'fsps.%s.w%i.scale_lnspectrum.%i.npy' % (name, iw, nbatch)))
lnspec_white_test = (lnspec_test[:,wave_bin] - shift_lnspec) / scale_lnspec
n_test = theta_test.shape[0]
n_theta = theta_test.shape[1]
n_lnspec = len(shift_lnspec)
model = torch.load(os.path.join(dat_dir, 'decoder.fsps.%s.w%i.%ibatches.%s.pth' % (name, iw, nbatch, '_'.join([str(l) for l in layers]))))
lnspec_white_recon_iw = model.forward(torch.tensor(theta_test, dtype=torch.float32))
lnspec_recon_iw = scale_lnspec * lnspec_white_recon_iw.detach().numpy() + shift_lnspec
lnspec_recon.append(lnspec_recon_iw)
print(lnspec_white_test[:5,:5])
print(lnspec_white_recon_iw.detach().numpy()[:5,:5])
lnspec_recon = np.concatenate(lnspec_recon, axis=1)
print(lnspec_test[:5,:5])
print(lnspec_recon[:5,:5])
# plot a handful of SEDs
fig = plt.figure(figsize=(10,5))
sub = fig.add_subplot(111)
for ii, i in enumerate(np.random.choice(n_test, size=5, replace=False)):
sub.plot(wave, np.exp(lnspec_recon[i]), c='C%i' % ii)
sub.plot(wave, np.exp(lnspec_test[i]), c='C%i' % ii, ls='--')
sub.set_xlim(wave.min(), wave.max())
fig.savefig('fsps.%s.%i.%s.valid_decoder.sed.png' % (name, nbatch, str(date.today().isoformat())), bbox_inches='tight')
# plot fractional reconstruction error
frac_dspectrum = 1. - np.exp(lnspec_recon - lnspec_test)
frac_dspectrum_quantiles = np.nanquantile(frac_dspectrum,
[0.0005, 0.005, 0.025, 0.16, 0.5, 0.84, 0.975, 0.995, 0.9995], axis=0)
fig = plt.figure(figsize=(15,5))
sub = fig.add_subplot(111)
sub.fill_between(wave, frac_dspectrum_quantiles[0],
frac_dspectrum_quantiles[-1], fc='C0', ec='none', alpha=0.1, label='99.9%')
sub.fill_between(wave, frac_dspectrum_quantiles[1],
frac_dspectrum_quantiles[-2], fc='C0', ec='none', alpha=0.2, label='99%')
sub.fill_between(wave, frac_dspectrum_quantiles[2],
frac_dspectrum_quantiles[-3], fc='C0', ec='none', alpha=0.3, label='95%')
sub.fill_between(wave, frac_dspectrum_quantiles[3],
frac_dspectrum_quantiles[-4], fc='C0', ec='none', alpha=0.5, label='68%')
sub.plot(wave, frac_dspectrum_quantiles[4], c='C0', ls='-')
sub.plot(wave, np.zeros(len(wave)), c='k', ls=':')
# mark +/- 1%
sub.plot(wave, 0.01 * np.ones(len(wave)), c='k', ls='--', lw=0.5)
sub.plot(wave, -0.01 * np.ones(len(wave)), c='k', ls='--', lw=0.5)
sub.set_xlim(wave.min(), wave.max())
sub.set_ylabel(r'$(f_{\rm emu} - f_{\rm fsps})/f_{\rm fsps}$', fontsize=25)
sub.set_ylim(-0.03, 0.03)
fig.savefig('fsps.%s.%i.%s.valid_decoder.png' % (name, nbatch, str(date.today().isoformat())), bbox_inches='tight')
# plot CDF of fractional reconstruction error
mean_frac_dspectrum = np.mean(np.abs(1. - np.exp(lnspec_recon - lnspec_test)), axis=1)
quant = np.quantile(mean_frac_dspectrum, [0.68, 0.95, 0.99, 0.999])
fig = plt.figure(figsize=(8,6))
sub = fig.add_subplot(111)
for q, a in zip(quant[::-1], [0.1, 0.2, 0.3, 0.5]):
sub.fill_between([0., q], [0., 0.], [1., 1.], alpha=a, color='C0')
_ = sub.hist(mean_frac_dspectrum, 40, density=True, histtype='step', cumulative=True, color='k')
sub.set_xlabel(r'${\rm mean}_\lambda \langle (f_{\rm speculator} - f_{\rm fsps}) / f_{\rm fsps} \rangle$', fontsize=20)
sub.set_xlim(0., 0.03)
sub.set_ylabel('cumulative distribution', fontsize=20)
sub.set_ylim(0., 1.)
fig.savefig('fsps.%s.%i.%s.valid_decoder.cdf.png' % (name, nbatch, str(date.today().isoformat())), bbox_inches='tight')
|
changhoonhahnREPO_NAMEprovabgsPATH_START.@provabgs_extracted@provabgs-main@bin@validate_decoder.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "PlasmaPy/PlasmaPy",
"repo_path": "PlasmaPy_extracted/PlasmaPy-main/tests/dispersion/numerical/__init__.py",
"type": "Python"
}
|
PlasmaPyREPO_NAMEPlasmaPyPATH_START.@PlasmaPy_extracted@PlasmaPy-main@tests@dispersion@numerical@__init__.py@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.