metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
|---|---|---|
{
"filename": "_font.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/barpolar/legendgrouptitle/_font.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="font", parent_name="barpolar.legendgrouptitle", **kwargs
):
super(FontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Font"),
data_docs=kwargs.pop(
"data_docs",
"""
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans", "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text,
such as an "under", "over" or "through" as well
as combinations e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind
text. "auto" places minimal shadow and applies
contrast text font color. See
https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional
options.
size
style
Sets whether a font should be styled with a
normal or italic face from its family.
textcase
Sets capitalization of text. It can be used to
make text appear in all-uppercase or all-
lowercase, or with each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
""",
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@barpolar@legendgrouptitle@_font.py@.PATH_END.py
|
{
"filename": "pad_expander.py",
"repo_name": "CU-NESS/pylinex",
"repo_path": "pylinex_extracted/pylinex-master/examples/expander/pad_expander.py",
"type": "Python"
}
|
"""
File: examples/expander/pad_expander.py
Author: Keith Tauscher
Date: 10 Sep 2017
Description: Example showing how to use PadExpander to have multiplicative or
additive padding regions and any pad value.
"""
import os
import numpy as np
from pylinex import PadExpander, load_expander_from_hdf5_file
array = np.arange(100)
error = np.tile(np.linspace(1, 2, 20), (5,))
expander = PadExpander('1+', '3+')
expanded_array = expander(array)
assert np.all(expanded_array[:1] == 0)
assert np.all(expanded_array[1:-3] == array)
assert np.all(expanded_array[-3:] == 0)
assert np.all(expander.contract_error(error) == error[1:-3])
expander = PadExpander('2+', '3*')
expanded_array = expander(array)
assert np.all(expanded_array[:2] == 0)
assert np.all(expanded_array[2:-300] == array)
assert np.all(expanded_array[-300:] == 0)
assert np.all(expander.contract_error(\
np.concatenate([[0, 0], error, [0] * 300])) == error)
expander = PadExpander('3*', '1*', pad_value=1)
expanded_array = expander(array)
assert np.all(expanded_array[:300] == 1)
assert np.all(expanded_array[300:-100] == array)
assert np.all(expanded_array[-100:] == 1)
assert np.all(expander.contract_error(error) == error[:20])
file_name = 'test_pad_expander_TEMP.hdf5'
expander.save(file_name)
try:
assert expander == load_expander_from_hdf5_file(file_name)
except:
os.remove(file_name)
raise
os.remove(file_name)
|
CU-NESSREPO_NAMEpylinexPATH_START.@pylinex_extracted@pylinex-master@examples@expander@pad_expander.py@.PATH_END.py
|
{
"filename": "Plot_benchmark1.py",
"repo_name": "pw31/GGchem",
"repo_path": "GGchem_extracted/GGchem-master/tools/Plot_benchmark1.py",
"type": "Python"
}
|
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
from matplotlib.backends.backend_pdf import PdfPages
plt.rcParams['axes.linewidth'] = 2
pp = PdfPages('ggchem.pdf')
single_figures = 0 # 1 for pdf, 2 for png
file = 'Static_Conc.dat'
data = open(file)
dummy = data.readline()
dimens = data.readline()
dimens = np.array(dimens.split())
NELEM = int(dimens[0])
NMOLE = int(dimens[1])
NDUST = int(dimens[2])
NPOINT = int(dimens[3])
header = data.readline()
data.close()
dat = np.loadtxt(file,skiprows=3)
keyword = np.array(header.split())
bar = 1.E+6 # 1 bar in dyn/cm2
bk = bk=1.380662E-16
Tg = dat[:,0] # T [K]
nHtot = dat[:,1] # n<H> [cm-3]
ntot = 0.0*nHtot
for i in range(4,4+NELEM+NMOLE): # without el, but including ions and cations
ntot = ntot + 10**dat[:,i]
#print keyword[i]
lognn = np.log10(ntot)
press = dat[:,2]/bar # p [dyn/cm2] -> [bar]
pmin = np.min(press)
pmax = np.max(press)
pmin = pmin/1.2
pmax = pmax*1.2
nHmin = np.min(nHtot)
nHmax = np.max(nHtot)
nHmin = nHmin*0.9
nHmax = nHmax*1.1
Tmin = np.min(Tg)
Tmax = np.max(Tg)
Tmin = 100.0
Tmax = 6000.0
TEAmin = 400.0
#if (Tmax>4*Tmin): Tmax=4*Tmin
#if (Tmin<Tmax/3): Tmin=Tmax/3
sep = 20
if (Tmax-Tmin>1500): sep=100
Tmin = Tmin*0.85
Tmax = Tmax*1.05
file = 'results/TEAoutBench1/results/TEAoutBench1.tea'
data = open(file)
dum = data.readline()
dum = data.readline()
dum = data.readline()
dum = data.readline()
dum = data.readline()
dum = data.readline()
dum = data.readline()
header = data.readline()
lines = data.readlines()
data.close()
sp_tea = np.array(header.split())
print "TEA has ",sp_tea
Npoint = len(lines)-1
Nsp = len(sp_tea)
#dat2 = np.loadtxt(file,skiprows=8)
dat2 = np.zeros((Npoint,Nsp),dtype='float')
for i in range(0,Npoint):
lval = lines[i].split()
for isp in range(0,Nsp):
dat2[i,isp] = np.float(lval[isp])
p_tea = dat2[:,0] # TEA's pressure [bar]
T_tea = dat2[:,1] # TEA's temperature
ntot_tea = p_tea*bar/bk/T_tea # TEA's ntot [cm-3]
logn_tea = np.log10(ntot)
nH_tea = dat2[:,np.where(sp_tea == 'H_g')[0]]
nHe_tea = dat2[:,np.where(sp_tea == 'He_ref')[0]]
nH2_tea = dat2[:,np.where(sp_tea == 'H2_ref')[0]]
nH2O_tea = dat2[:,np.where(sp_tea == 'H2O_g')[0]]
nO2_tea = dat2[:,np.where(sp_tea == 'O2_ref')[0]]
nCO_tea = dat2[:,np.where(sp_tea == 'CO_g')[0]]
nCO2_tea = dat2[:,np.where(sp_tea == 'CO2_g')[0]]
nCH4_tea = dat2[:,np.where(sp_tea == 'CH4_g')[0]]
nC2H2_tea = dat2[:,np.where(sp_tea == 'C2H2_g')[0]]
nC2H4_tea = dat2[:,np.where(sp_tea == 'C2H4_g')[0]]
nN2_tea = dat2[:,np.where(sp_tea == 'N2_ref')[0]]
nNH3_tea = dat2[:,np.where(sp_tea == 'NH3_g')[0]]
nOH_tea = dat2[:,np.where(sp_tea == 'OH_g')[0]]
Tind1 = np.where((Tg<Tmax) & (Tg>Tmin))[0]
Tind2 = np.where((T_tea<Tmax) & (T_tea>TEAmin))[0]
colo = ['blue','cornflowerblue','green','red','cyan','darkmagenta','gold','black','chocolate']
#'blue','silver','darkgoldenrod','darkgreen','darkmagenta','red','darkorange','gold','darkorchid','aqua','cadetblue','darkolivegreen','burlywood','chartreuse','chocolate','coral','cornflowerblue','black','darkkhaki','pink','moccasin','limegreen'
Ncolor = len(colo)
colo = colo*10
styl = ['-']*Ncolor + ['--']*Ncolor + [':']*Ncolor + ['-.']*Ncolor*7
widt = [2]*Ncolor*10
#================== temperature-pressure structure ====================
fig,ax = plt.subplots()
plt.plot(Tg,press,lw=4)
plt.plot(T_tea,p_tea,c='lightgray',lw=1)
plt.xlabel(r'$T\ \mathrm{[K]}$',fontsize=20)
plt.ylabel(r'$p\ \mathrm{[bar]}$',fontsize=20)
plt.xlim(Tmin,Tmax)
plt.ylim(pmin,pmax)
#plt.xscale('log')
#plt.yscale('log')
plt.tick_params(axis='both', labelsize=15)
plt.tick_params('both', length=6, width=1.5, which='major')
plt.tick_params('both', length=3, width=1, which='minor')
minorLocator = MultipleLocator(sep)
ax.xaxis.set_minor_locator(minorLocator)
plt.tight_layout()
plt.savefig(pp,format='pdf')
plt.clf()
#================== temperature-density structure ====================
fig,ax = plt.subplots()
plt.plot(Tg,nHtot,lw=4)
plt.xlabel(r'$T\ \mathrm{[K]}$',fontsize=20)
plt.ylabel(r'$n_\mathrm{\langle H\rangle}\ \mathrm{[cm^{-3}]}$',fontsize=20)
plt.xlim(Tmin,Tmax)
plt.ylim(nHmin,nHmax)
if (nHmax>nHmin*5): plt.yscale('log')
plt.tick_params(axis='both', labelsize=15)
plt.tick_params('both', length=6, width=1.5, which='major')
plt.tick_params('both', length=3, width=1, which='minor')
minorLocator = MultipleLocator(sep)
ax.xaxis.set_minor_locator(minorLocator)
#fmt=ScalarFormatter(useOffset=False)
#fmt.set_scientific(False)
#ax.yaxis.set_major_formatter(fmt)
plt.tight_layout()
plt.savefig(pp,format='pdf')
plt.clf()
#================== some important molecules ====================
fig,ax = plt.subplots()
mols = ['H2','H','N2','H2O','O2','CO','CO2','CH4','NH3','He','SI(CH3)4']
nmax = np.float(0)
for mol in range(4,4+NELEM+NMOLE):
yy = dat[:,mol] # log10 nmol [cm-3]
yy = yy - lognn # log10 nmol/ntot
nmax = np.max([nmax,np.max(yy)])
count = 0
for mol in mols:
ind = np.where(keyword == mol)[0]
if (np.size(ind) == 0): continue
ind = ind[0]
print mol,ind
yy = dat[:,ind] # log10 nmol [cm-3]
yy = yy - lognn # log10 nmol/ntot
#if (np.max(yy)>nmax-6):
plt.plot(Tg,yy,ls=styl[count],lw=4,label=mol)
count = count + 1
plt.plot(T_tea[Tind2],np.log10(nH_tea[Tind2]) ,c='lightgray',lw=1)
plt.plot(T_tea[Tind2],np.log10(nH2_tea[Tind2]) ,c='lightgray',lw=1)
plt.plot(T_tea[Tind2],np.log10(nHe_tea[Tind2]) ,c='lightgray',lw=1)
plt.plot(T_tea[Tind2],np.log10(nCO_tea[Tind2]) ,c='lightgray',lw=1)
plt.plot(T_tea[Tind2],np.log10(nH2O_tea[Tind2]) ,c='lightgray',lw=1)
plt.plot(T_tea[Tind2],np.log10(nO2_tea[Tind2]) ,c='lightgray',lw=1)
plt.plot(T_tea[Tind2],np.log10(nCO2_tea[Tind2]) ,c='lightgray',lw=1)
plt.plot(T_tea[Tind2],np.log10(nCH4_tea[Tind2]) ,c='lightgray',lw=1)
plt.plot(T_tea[Tind2],np.log10(nN2_tea[Tind2]) ,c='lightgray',lw=1)
plt.plot(T_tea[Tind2],np.log10(nNH3_tea[Tind2]) ,c='lightgray',lw=1)
plt.plot(T_tea[Tind2],np.log10(nC2H2_tea[Tind2]),c='lightgray',lw=1)
plt.title('important molecules',fontsize=20)
plt.xlabel(r'$T\ \mathrm{[K]}$',fontsize=20)
plt.ylabel(r'$\mathrm{log}_{10}\ n_\mathrm{mol}/n_\mathrm{tot}$',fontsize=20)
plt.xlim(Tmin,Tmax)
plt.ylim(nmax-8,nmax+0.5)
plt.xscale('log')
plt.tick_params(axis='both', labelsize=16)
plt.tick_params('both', length=7, width=2, which='major')
plt.tick_params('both', length=4, width=1.5, which='minor')
#minorLocator = MultipleLocator(sep)
#ax.xaxis.set_minor_locator(minorLocator)
plt.legend(loc='lower left',fontsize=10,fancybox=True)
plt.tight_layout()
plt.savefig(pp,format='pdf')
plt.clf()
if (single_figures>0): pp.close()
#================== where are the elements? ================
ellist = ['H','C','O','N','SI','S','NA','CL','CA','TI','K','AL','MG','FE','LI','F','P','NI','MN','CR','ZN','ZR','RB','CU','B','BR','V','SR','W','el']
allist = [' ',' ',' ',' ','Si',' ','Na','Cl','Ca','Ti',' ','Al','Mg','Fe','Li',' ',' ','Ni','Mn','Cr','Zn','Zr','Rb','Cu',' ','Br',' ','Sr',' ','+']
exlist = [' He ',' Cl CL Ca CA Cr CR Co Cu CU ',' ',' Na NA Ni NI ',' ',' Si SI Sr SR ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' Fe FE ',' ',' ',' ',' ',' ',' ',' ',' ',' Br BR ',' ',' ',' ',' ',' ']
titels = ['hydrogen','carbon','oxygen','nitrogen','silicon','sulphur','sodium','chlorine','calcium','titanium','potassium','aluminum','magnesium','iron','lithium','fluorine','phosphorus','nickel','manganese','chromium','zinc','zirconium','rubidium','copper','boron','bromine','vanadium','strontium','tungsten','charge carriers']
# H C O N SiS Na ClCa Ti
limits = [2,4.5,3.5,4,7,5,10,3,10,5,10,6,6,12,6,3.5,5,10,10,10,10,10,10,10,10,10,10,10,10,3] #Orich
#limits = [2,5,3.5,6,6,5,6,4,6,6,6,6,6,6,6.5,3] #Crich
for i in range(0,30):
el = ellist[i]
al = allist[i]
ex = exlist[i]
limit = limits[i]
titel = titels[i]
print
print titel+" ..."
print 'ggchem ...'
nmax = np.float(-100)
nmin = np.float(0)
mollist = []
abulist = []
maxy = 0.0*dat[:,0]
for mol in range(3,4+NELEM+NMOLE,1):
molname = keyword[mol]
ind = str.find(molname,el)
if (ind < 0):
ind = str.find(molname,al)
if (ind < 0 and el=='el'):
ind = str.find(molname,'-')
if (ind >= 0):
next = molname[ind:ind+2]
#print mol,keyword[mol],next,str.find(ex,next),len(next)
plotit = 0
if (len(next)==1): plotit=1
if (str.find(ex,next)==-1 or molname=='SIS'): plotit=1
if (el=='N' and molname=='MNH'): plotit=0
if (plotit==1):
yy = dat[:,mol] # log10 nmol [cm-3]
yy = yy - lognn # log10 nmol/ntot
nmax = np.max([nmax,np.max(yy[Tind1])])
maxy = maxy + 10**yy
mollist.append(mol)
abulist.append(np.mean(yy[Tind1]))
if (len(abulist)<=0): continue
if (single_figures==1): pp = PdfPages('benchmark_'+titel+'.pdf')
if (single_figures>0): fig,ax = plt.subplots(figsize=(7,6))
indices = np.argsort(abulist)
count = 0
maxy = np.log10(maxy)
nmin = np.min([nmin,np.min(maxy)-limit,nmax-14])
if (el=='el'): nmin=-30
for ind in reversed(indices):
mol = mollist[ind]
abu = abulist[ind]
molname = keyword[mol]
yy = dat[:,mol] # log10 nmol [cm-3]
yy = yy - lognn # log10 nmol/ntot
if (np.max(yy[Tind1]-maxy[Tind1])>-limit):
print molname,np.max(yy[Tind1]-maxy[Tind1])
plt.plot(Tg,yy,c=colo[count],ls=styl[count],lw=4,label=molname)
count = count + 1
if (al<>' '): el=al
print 'TEA ...'
NTEA = len(sp_tea)
maxy = 0.0*dat2[:,0]
for mol in range(2,NTEA):
molname = sp_tea[mol]
ind = str.find(molname,el)
if (ind >= 0):
next = molname[ind:ind+2]
if (len(next)==1 or str.find(ex,next)==-1 or molname=='SiS_g'):
yy = np.log10(dat2[:,mol]) # log10 nmol/ntot
maxy = maxy + 10**yy
maxy = np.log10(maxy)
for mol in range(2,NTEA):
molname = sp_tea[mol]
ind = str.find(molname,el)
if (ind >= 0):
next = molname[ind:ind+2]
if (len(next)==1 or str.find(ex,next)==-1 or molname=='SiS_g'):
yy = np.log10(dat2[:,mol]) # log10 nmol/ntot
plotit = 0
if (np.max(yy[Tind2]-maxy[Tind2])>-limit): plotit=1
if (molname.find('ZrF4_g')>=0): plotit=1
if (molname.find('ZrCl4_g')>=0): plotit=1
if (molname.find('TiCl3_g')>=0): plotit=1
if ((el<>'O') and (molname.find('TiOCl2_g')>=0)): plotit=1
#print el,molname,plotit
if (plotit==1):
print sp_tea[mol],np.max(yy[Tind2]-maxy[Tind2])
plt.plot(T_tea[Tind2],yy[Tind2],c='lightgray',lw=1.5)
plt.title(titel,fontsize=20)
plt.xlabel(r'$T\ \mathrm{[K]}$',fontsize=22)
plt.ylabel(r'$\mathrm{log}_{10}\ n_\mathrm{mol}/n_\mathrm{tot}$',fontsize=20)
plt.xscale('log')
plt.xlim(100,Tmax)
plt.ylim(nmin,nmax+1)
plt.tick_params(axis='both', labelsize=18)
ax.xaxis.set_major_formatter(FormatStrFormatter('%d'))
plt.tick_params('both', length=11, width=2, which='major')
plt.tick_params('both', length=8, width=1.5, which='minor')
#minorLocator = MultipleLocator(sep)
#ax.xaxis.set_minor_locator(minorLocator)
leg = plt.legend(loc='lower left',fontsize=10,fancybox=True)
leg.get_frame().set_alpha(0.7)
plt.tight_layout()
if (single_figures<2): plt.savefig(pp,format='pdf')
if (single_figures==2): plt.savefig('benchmark_'+titel+'.png')
if (single_figures==0): plt.clf()
if (single_figures==1): pp.close()
if (single_figures==0): pp.close()
print '... written output to ggchem.pdf.'
|
pw31REPO_NAMEGGchemPATH_START.@GGchem_extracted@GGchem-master@tools@Plot_benchmark1.py@.PATH_END.py
|
{
"filename": "types.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/tools/python3/Lib/wsgiref/types.py",
"type": "Python"
}
|
"""WSGI-related types for static type checking"""
from collections.abc import Callable, Iterable, Iterator
from types import TracebackType
from typing import Any, Protocol, TypeAlias
__all__ = [
"StartResponse",
"WSGIEnvironment",
"WSGIApplication",
"InputStream",
"ErrorStream",
"FileWrapper",
]
_ExcInfo: TypeAlias = tuple[type[BaseException], BaseException, TracebackType]
_OptExcInfo: TypeAlias = _ExcInfo | tuple[None, None, None]
class StartResponse(Protocol):
"""start_response() callable as defined in PEP 3333"""
def __call__(
self,
status: str,
headers: list[tuple[str, str]],
exc_info: _OptExcInfo | None = ...,
/,
) -> Callable[[bytes], object]: ...
WSGIEnvironment: TypeAlias = dict[str, Any]
WSGIApplication: TypeAlias = Callable[[WSGIEnvironment, StartResponse],
Iterable[bytes]]
class InputStream(Protocol):
"""WSGI input stream as defined in PEP 3333"""
def read(self, size: int = ..., /) -> bytes: ...
def readline(self, size: int = ..., /) -> bytes: ...
def readlines(self, hint: int = ..., /) -> list[bytes]: ...
def __iter__(self) -> Iterator[bytes]: ...
class ErrorStream(Protocol):
"""WSGI error stream as defined in PEP 3333"""
def flush(self) -> object: ...
def write(self, s: str, /) -> object: ...
def writelines(self, seq: list[str], /) -> object: ...
class _Readable(Protocol):
def read(self, size: int = ..., /) -> bytes: ...
# Optional: def close(self) -> object: ...
class FileWrapper(Protocol):
"""WSGI file wrapper as defined in PEP 3333"""
def __call__(
self, file: _Readable, block_size: int = ..., /,
) -> Iterable[bytes]: ...
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@tools@python3@Lib@wsgiref@types.py@.PATH_END.py
|
{
"filename": "test_version.py",
"repo_name": "simonsobs/nextline-schedule",
"repo_path": "nextline-schedule_extracted/nextline-schedule-main/tests/test_version.py",
"type": "Python"
}
|
import nextline_schedule
def test_version() -> None:
'''Confirm that the version string is attached to the module'''
nextline_schedule.__version__
|
simonsobsREPO_NAMEnextline-schedulePATH_START.@nextline-schedule_extracted@nextline-schedule-main@tests@test_version.py@.PATH_END.py
|
{
"filename": "_colorscale.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/bar/marker/line/_colorscale.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorscaleValidator(_plotly_utils.basevalidators.ColorscaleValidator):
def __init__(
self, plotly_name="colorscale", parent_name="bar.marker.line", **kwargs
):
super(ColorscaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {"autocolorscale": False}),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@bar@marker@line@_colorscale.py@.PATH_END.py
|
{
"filename": "test_mark_flares.py",
"repo_name": "afeinstein20/stella",
"repo_path": "stella_extracted/stella-main/stella/tests/test_mark_flares.py",
"type": "Python"
}
|
import numpy as np
from stella import ConvNN
from stella import FitFlares
from lightkurve.search import search_lightcurve
from numpy.testing import assert_almost_equal
lk = search_lightcurve(target='tic62124646', mission='TESS',
exptime=120, sector=13, author='SPOC')
lk = lk.download(download_dir='.')
lk = lk.remove_nans().normalize()
modelname = 'ensemble_s0002_i0010_b0.73.h5'
cnn = ConvNN(output_dir='.')
def test_predictions():
cnn.predict(modelname=modelname,
times=lk.time.value,
fluxes=lk.flux.value,
errs=lk.flux_err.value)
high_flares = np.where(cnn.predictions[0]>0.99)[0]
assert(len(high_flares) == 0)
def find_flares():
flares = FitFlares(id=[lk.targetid],
time=[lk.time.value],
flux=[lk.flux.value],
flux_err=[lk.flux_err.value],
predictions=[cn.predictions[0]])
flares.identify_flare_peaks()
assert(len(flares.flare_table)==0)
|
afeinstein20REPO_NAMEstellaPATH_START.@stella_extracted@stella-main@stella@tests@test_mark_flares.py@.PATH_END.py
|
{
"filename": "mpi_pool.py",
"repo_name": "igomezv/simplemc_tests",
"repo_path": "simplemc_tests_extracted/simplemc_tests-main/simplemc/analyzers/emcee/mpi_pool.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
try:
from schwimmbad import MPIPool
except ImportError:
class MPIPool(object):
def __init__(self, *args, **kwargs):
raise ImportError(
"The MPIPool from emcee has been forked to "
"https://github.com/adrn/schwimmbad, "
"please install that package to continue using the MPIPool"
)
__all__ = ["MPIPool"]
|
igomezvREPO_NAMEsimplemc_testsPATH_START.@simplemc_tests_extracted@simplemc_tests-main@simplemc@analyzers@emcee@mpi_pool.py@.PATH_END.py
|
{
"filename": "faq.md",
"repo_name": "triton-inference-server/server",
"repo_path": "server_extracted/server-main/docs/user_guide/faq.md",
"type": "Markdown"
}
|
<!--
# Copyright 2019-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-->
# FAQ
## What are the advantages of running a model with Triton Inference Server compared to running directly using the model's framework API?
When using Triton Inference Server the inference result will be the
same as when using the model's framework directly. However, with
Triton you get benefits like [concurrent model
execution](architecture.md#concurrent-model-execution) (the ability to
run multiple models at the same time on the same GPU) and [dynamic
batching](model_configuration.md#dynamic-batcher) to get better
throughput. You can also [replace or upgrade models while Triton and
client application are running](model_management.md). Another benefit
is that Triton can be deployed as a Docker container, anywhere – on
premises and on public clouds. Triton Inference Server also [supports
multiple
frameworks](https://github.com/triton-inference-server/backend) such
as TensorRT, TensorFlow, PyTorch, and ONNX on both GPUs and CPUs
leading to a streamlined deployment.
## Can Triton Inference Server run on systems that don't have GPUs?
Yes, the QuickStart guide describes how to [run Triton on a CPU-Only
System](../getting_started/quickstart.md#run-on-cpu-only-system).
## Can Triton Inference Server be used in non-Docker environments?
Yes. Triton Inference Server can also be [built from
source](../customization_guide/build.md#building-without-docker) on your "bare metal"
system.
## Do you provide client libraries for languages other than C++ and Python?
We provide C++ and Python client libraries to make it easy for users
to write client applications that communicate with Triton. We chose
those languages because they were likely to be popular and performant
in the ML inference space, but in the future we can possibly add other
languages if there is a need.
We provide the GRPC API as a way to generate your own client library
for a large number of languages. By following the official GRPC
documentation and using
[grpc_service.proto](https://github.com/triton-inference-server/common/blob/main/protobuf/grpc_service.proto)
you can generate language bindings for all the languages supported by
GRPC. We provide three examples of this for
[Go](https://github.com/triton-inference-server/client/blob/main/src/grpc_generated/go),
[Python](https://github.com/triton-inference-server/client/blob/main/src/python/examples/grpc_client.py) and
[Java](https://github.com/triton-inference-server/client/blob/main/src/grpc_generated/java).
In general the client libraries (and client examples) are meant to be
just that, examples. We feel the client libraries are well written and
well tested, but they are not meant to serve every possible use
case. In some cases you may want to develop your own customized
library to suit your specific needs.
## How would you use Triton Inference Server within the AWS environment?
In an AWS environment, the Triton Inference Server docker container
can run on [CPU-only instances or GPU compute
instances](../getting_started/quickstart.md#launch-triton). Triton can run directly on the
compute instance or inside Elastic Kubernetes Service (EKS). In
addition, other AWS services such as Elastic Load Balancer (ELB) can
be used for load balancing traffic among multiple Triton
instances. Elastic Block Store (EBS) or S3 can be used for storing
deep-learning models loaded by the inference server.
## How do I measure the performance of my model running in the Triton Inference Server?
The Triton Inference Server exposes performance information in two
ways: by [Prometheus metrics](metrics.md) and by the statistics
available through the [HTTP/REST, GRPC, and C
APIs](../customization_guide/inference_protocols.md).
A client application,
[perf_analyzer](https://github.com/triton-inference-server/perf_analyzer/blob/main/README.md),
allows you to measure the performance of an individual model using a synthetic
load. The perf_analyzer application is designed to show you the tradeoff of
latency vs. throughput.
## How can I fully utilize the GPU with Triton Inference Server?
Triton Inference Server has several features designed to increase
GPU utilization:
* Triton can [simultaneously perform inference for multiple
models](architecture.md#concurrent-model-execution) (using either
the same or different frameworks) using the same GPU.
* Triton can increase inference throughput by using [multiple
instances of the same
model](architecture.md#concurrent-model-execution) to handle multiple
simultaneous inferences requests to that model. Triton chooses
reasonable defaults but [you can also control the exact level of
concurrency](model_configuration.md#instance-groups) on a
model-by-model basis.
* Triton can [batch together multiple inference requests into a single
inference execution](model_configuration.md#dynamic-batcher). Typically,
batching inference requests leads to much higher thoughput with only
a relatively small increase in latency.
As a general rule, batching is the most beneficial way to increase GPU
utilization. So you should always try enabling the [dynamic
batcher](model_configuration.md#dynamic-batcher) with your models. Using
multiple instances of a model can also provide some benefit but is
typically most useful for models that have small compute
requirements. Most models will benefit from using two instances but
more than that is often not useful.
## If I have a server with multiple GPUs should I use one Triton Inference Server to manage all GPUs or should I use multiple inference servers, one for each GPU?
Triton Inference Server will take advantage of all GPUs that it has
access to on the server. You can limit the GPUs available to Triton by
using the CUDA_VISIBLE_DEVICES environment variable (or with Docker
you can also use NVIDIA_VISIBLE_DEVICES or --gpus flag when launching
the container). When using multiple GPUs, Triton will distribute
inference request across the GPUs to keep them all equally
utilized. You can also [control more explicitly which models are
running on which GPUs](model_configuration.md#instance-groups).
In some deployment and orchestration environments (for example,
Kubernetes) it may be more desirable to partition a single multi-GPU
server into multiple *nodes*, each with one GPU. In this case the
orchestration environment will run a different Triton for each GPU and
an load balancer will be used to divide inference requests across the
available Triton instances.
## If the server segfaults, how can I debug it?
The NGC build is a Release build and does not contain Debug symbols.
The build.py as well defaults to a Release build. Refer to the instructions
in [build.md](../customization_guide/build.md#building-with-debug-symbols) to create a Debug build
of Triton. This will help find the cause of the segmentation fault when
looking at the gdb trace for the segfault.
When opening a GitHub issue for the segfault with Triton, please include
the backtrace to better help us resolve the problem.
## What are the benefits of using [Triton Inference Server](https://developer.nvidia.com/triton-inference-server) as part of the [NVIDIA AI Enterprise Software Suite](https://www.nvidia.com/en-us/data-center/products/ai-enterprise/)?
NVIDIA AI Enterprise enables enterprises to implement full AI workflows by
delivering an entire end-to-end AI platform. Four key benefits:
### Enterprise-Grade Support, Security & API Stability:
Business-critical AI projects stay on track with NVIDIA Enterprise Support,
available globally to assist both IT teams with deploying and managing the
lifecycle of AI applications and the developer teams with building AI
applications. Support includes maintenance updates, dependable SLAs and
response times. Regular security reviews and priority notifications mitigate
potential risk of unmanaged opensource and ensure compliance with corporate
standards. Finally, long term support and regression testing ensures API
stability between releases.
### Speed time to production with AI Workflows & Pretrained Models:
To reduce the complexity of developing common AI applications, NVIDIA AI
Enterprise includes
[AI workflows](https://www.nvidia.com/en-us/launchpad/ai/workflows/) which are
reference applications for specific business outcomes such as Intelligent
Virtual Assistants and Digital Fingerprinting for real-time cybersecurity threat
detection. AI workflow reference applications may include
[AI frameworks](https://docs.nvidia.com/deeplearning/frameworks/index.html) and
[pretrained models](https://developer.nvidia.com/ai-models),
[Helm Charts](https://catalog.ngc.nvidia.com/helm-charts),
[Jupyter Notebooks](https://developer.nvidia.com/run-jupyter-notebooks) and
[documentation](https://docs.nvidia.com/ai-enterprise/index.html#overview).
### Performance for Efficiency and Cost Savings:
Using accelerated compute for AI workloads such as data process with
[NVIDIA RAPIDS Accelerator](https://developer.nvidia.com/rapids) for Apache
Spark and inference with Triton Inference Sever delivers better performance
which also improves efficiency and reduces operation and infrastructure costs,
including savings from reduced time and energy consumption.
### Optimized and Certified to Deploy Everywhere:
Cloud, Data Center, Edge Optimized and certified to ensure reliable performance
whether it’s running your AI in the public cloud, virtualized data centers, or
on DGX systems.
|
triton-inference-serverREPO_NAMEserverPATH_START.@server_extracted@server-main@docs@user_guide@faq.md@.PATH_END.py
|
{
"filename": "rws.py",
"repo_name": "pyro-ppl/pyro",
"repo_path": "pyro_extracted/pyro-master/pyro/infer/rws.py",
"type": "Python"
}
|
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import math
import torch
import pyro
import pyro.poutine as poutine
from pyro.infer.elbo import ELBO
from pyro.infer.enum import get_importance_trace
from pyro.infer.util import is_validation_enabled
from pyro.poutine.util import prune_subsample_sites
from pyro.util import check_if_enumerated, check_model_guide_match, warn_if_nan
class ReweightedWakeSleep(ELBO):
r"""
An implementation of Reweighted Wake Sleep following reference [1].
.. note:: Sampling and log_prob evaluation asymptotic complexity:
1) Using wake-theta and/or wake-phi
O(`num_particles`) samples from guide,
O(`num_particles`) `log_prob` evaluations of model and guide
2) Using sleep-phi
O(`num_sleep_particles`) samples from model,
O(`num_sleep_particles`) `log_prob` evaluations of guide
if 1) and 2) are combined,
O(`num_particles`) samples from the guide,
O(`num_sleep_particles`) from the model,
O(`num_particles` + `num_sleep_particles`) `log_prob` evaluations of the guide, and
O(`num_particles`) evaluations of the model
.. note:: This is particularly useful for models with stochastic branching,
as described in [2].
.. note:: This returns _two_ losses, one each for (a) the model parameters (`theta`), computed using the
`iwae` objective, and (b) the guide parameters (`phi`), computed using (a combination of) the `csis`
objective and a self-normalized importance-sampled version of the `csis` objective.
.. note:: In order to enable computing the sleep-phi terms, the guide program must have its observations
explicitly passed in through the keyworded argument `observations`. Where the value of the observations
is unknown during definition, such as for amortized variational inference, it may be given a default
argument as `observations=None`, and the correct value supplied during learning through
`svi.step(observations=...)`.
.. warning:: Mini-batch training is not supported yet.
:param int num_particles: The number of particles/samples used to form the objective
(gradient) estimator. Default is 2.
:param insomnia: The scaling between the wake-phi and sleep-phi terms. Default is 1.0 [wake-phi]
:param bool model_has_params: Indicate if model has learnable params. Useful in avoiding extra
computation when running in pure sleep mode [csis]. Default is True.
:param int num_sleep_particles: The number of particles used to form the sleep-phi estimator.
Matches `num_particles` by default.
:param bool vectorize_particles: Whether the traces should be vectorised
across `num_particles`. Default is True.
:param int max_plate_nesting: Bound on max number of nested
:func:`pyro.plate` contexts. Default is infinity.
:param bool strict_enumeration_warning: Whether to warn about possible
misuse of enumeration, i.e. that
:class:`~pyro.infer.traceenum_elbo.TraceEnum_ELBO` is used iff there
are enumerated sample sites.
References:
[1] `Reweighted Wake-Sleep`,
Jörg Bornschein, Yoshua Bengio
[2] `Revisiting Reweighted Wake-Sleep for Models with Stochastic Control Flow`,
Tuan Anh Le, Adam R. Kosiorek, N. Siddharth, Yee Whye Teh, Frank Wood
"""
def __init__(
self,
num_particles=2,
insomnia=1.0,
model_has_params=True,
num_sleep_particles=None,
vectorize_particles=True,
max_plate_nesting=float("inf"),
strict_enumeration_warning=True,
):
# force K > 1 otherwise SNIS not possible
assert (
num_particles > 1
), "Reweighted Wake Sleep needs to be run with more than one particle"
super().__init__(
num_particles=num_particles,
max_plate_nesting=max_plate_nesting,
vectorize_particles=vectorize_particles,
strict_enumeration_warning=strict_enumeration_warning,
)
self.insomnia = insomnia
self.model_has_params = model_has_params
self.num_sleep_particles = (
num_particles if num_sleep_particles is None else num_sleep_particles
)
assert insomnia >= 0 and insomnia <= 1, "insomnia should be in [0, 1]"
def _get_trace(self, model, guide, args, kwargs):
"""
Returns a single trace from the guide, and the model that is run against it.
"""
model_trace, guide_trace = get_importance_trace(
"flat", self.max_plate_nesting, model, guide, args, kwargs, detach=True
)
if is_validation_enabled():
check_if_enumerated(guide_trace)
return model_trace, guide_trace
def _loss(self, model, guide, args, kwargs):
"""
:returns: returns model loss and guide loss
:rtype: float, float
Computes the re-weighted wake-sleep estimators for the model (wake-theta) and the
guide (insomnia * wake-phi + (1 - insomnia) * sleep-phi).
Performs backward as appropriate on both, over the specified number of particles.
"""
wake_theta_loss = torch.tensor(100.0)
if self.model_has_params or self.insomnia > 0.0:
# compute quantities for wake theta and wake phi
log_joints = []
log_qs = []
for model_trace, guide_trace in self._get_traces(
model, guide, args, kwargs
):
log_joint = 0.0
log_q = 0.0
for _, site in model_trace.nodes.items():
if site["type"] == "sample":
if self.vectorize_particles:
log_p_site = (
site["log_prob"].reshape(self.num_particles, -1).sum(-1)
)
else:
log_p_site = site["log_prob_sum"]
log_joint = log_joint + log_p_site
for _, site in guide_trace.nodes.items():
if site["type"] == "sample":
if self.vectorize_particles:
log_q_site = (
site["log_prob"].reshape(self.num_particles, -1).sum(-1)
)
else:
log_q_site = site["log_prob_sum"]
log_q = log_q + log_q_site
log_joints.append(log_joint)
log_qs.append(log_q)
log_joints = (
log_joints[0] if self.vectorize_particles else torch.stack(log_joints)
)
log_qs = log_qs[0] if self.vectorize_particles else torch.stack(log_qs)
log_weights = log_joints - log_qs.detach()
# compute wake theta loss
log_sum_weight = torch.logsumexp(log_weights, dim=0)
wake_theta_loss = -(log_sum_weight - math.log(self.num_particles)).sum()
warn_if_nan(wake_theta_loss, "wake theta loss")
if self.insomnia > 0:
# compute wake phi loss
normalised_weights = (log_weights - log_sum_weight).exp().detach()
wake_phi_loss = -(normalised_weights * log_qs).sum()
warn_if_nan(wake_phi_loss, "wake phi loss")
if self.insomnia < 1:
# compute sleep phi loss
_model = pyro.poutine.uncondition(model)
_guide = guide
_log_q = 0.0
if self.vectorize_particles:
if self.max_plate_nesting == float("inf"):
self._guess_max_plate_nesting(_model, _guide, args, kwargs)
_model = self._vectorized_num_sleep_particles(_model)
_guide = self._vectorized_num_sleep_particles(guide)
for _ in range(1 if self.vectorize_particles else self.num_sleep_particles):
_model_trace = poutine.trace(_model).get_trace(*args, **kwargs)
_model_trace.detach_()
_guide_trace = self._get_matched_trace(
_model_trace, _guide, args, kwargs
)
_log_q += _guide_trace.log_prob_sum()
sleep_phi_loss = -_log_q / self.num_sleep_particles
warn_if_nan(sleep_phi_loss, "sleep phi loss")
# compute phi loss
phi_loss = (
sleep_phi_loss
if self.insomnia == 0
else (
wake_phi_loss
if self.insomnia == 1
else self.insomnia * wake_phi_loss
+ (1.0 - self.insomnia) * sleep_phi_loss
)
)
return wake_theta_loss, phi_loss
def loss(self, model, guide, *args, **kwargs):
"""
:returns: returns model loss and guide loss
:rtype: float, float
Computes the re-weighted wake-sleep estimators for the model (wake-theta) and the
guide (insomnia * wake-phi + (1 - insomnia) * sleep-phi).
"""
with torch.no_grad():
wake_theta_loss, phi_loss = self._loss(model, guide, args, kwargs)
return wake_theta_loss, phi_loss
def loss_and_grads(self, model, guide, *args, **kwargs):
"""
:returns: returns model loss and guide loss
:rtype: float
Computes the RWS estimators for the model (wake-theta) and the guide (wake-phi).
Performs backward as appropriate on both, using num_particle many samples/particles.
"""
wake_theta_loss, phi_loss = self._loss(model, guide, args, kwargs)
# convenience addition to ensure easier gradients without requiring `retain_graph=True`
(wake_theta_loss + phi_loss).backward()
return wake_theta_loss.detach().item(), phi_loss.detach().item()
def _vectorized_num_sleep_particles(self, fn):
"""
Copy of `_vectorised_num_particles` that uses `num_sleep_particles`.
"""
def wrapped_fn(*args, **kwargs):
if self.num_sleep_particles == 1:
return fn(*args, **kwargs)
with pyro.plate(
"num_sleep_particles_vectorized",
self.num_sleep_particles,
dim=-self.max_plate_nesting,
):
return fn(*args, **kwargs)
return wrapped_fn
@staticmethod
def _get_matched_trace(model_trace, guide, args, kwargs):
kwargs["observations"] = {}
for node in model_trace.stochastic_nodes + model_trace.observation_nodes:
if "was_observed" in model_trace.nodes[node]["infer"]:
model_trace.nodes[node]["is_observed"] = True
kwargs["observations"][node] = model_trace.nodes[node]["value"]
guide_trace = poutine.trace(poutine.replay(guide, model_trace)).get_trace(
*args, **kwargs
)
check_model_guide_match(model_trace, guide_trace)
guide_trace = prune_subsample_sites(guide_trace)
return guide_trace
|
pyro-pplREPO_NAMEpyroPATH_START.@pyro_extracted@pyro-master@pyro@infer@rws.py@.PATH_END.py
|
{
"filename": "rename_pulsar.py",
"repo_name": "plazar/TOASTER",
"repo_path": "TOASTER_extracted/TOASTER-master/toolkit/pulsars/rename_pulsar.py",
"type": "Python"
}
|
#!/usr/bin/env python
import utils
import errors
import database
SHORTNAME = 'rename'
DESCRIPTION = "Change the name of a pulsar entry. " \
"The old name will remain a valid alias."
def add_arguments(parser):
parser.add_argument('-n', '--name', dest='newname', type=str, \
help="The new name of the pulsar entry.")
parser.add_argument('-p', '--psr', dest='psrname', type=str, \
help="The pulsar to rename.")
def check_new_name(pulsar_id, newname):
"""Check if the new name is OK to use for the given
pulsar. The new name is invalid if it is already
in use with a different pulsar_id entry.
An error is raised if the proposed name is invalid.
Inputs:
pulsar_id: The DB ID number of the pulsar to rename.
newname: The proposed new name.
Ouputs:
None
"""
pulsarid_cache = utils.get_pulsarid_cache()
if (newname in pulsarid_cache.keys()) and \
(pulsarid_cache[newname] != pulsar_id):
used_id = pulsarid_cache[newname]
raise errors.BadInputError("The proposed pulsar name, '%s', " \
"is already in use with a different " \
"pulsar (%s, ID: %d). Pulsar names and " \
"aliases must refer to a single " \
"pulsar only." % \
(newname, utils.get_pulsarname(used_id), \
used_id))
def rename_pulsar(oldname, newname, existdb=None):
"""Rename pulsar DB entry. An error is raised if the
renaming in invalid.
Inputs:
oldname: The old name of the pulsar entry.
newname: The proposed new name of the entry.
existdb: A (optional) existing database connection object.
(Default: Establish a db connection)
Ouputs:
None
"""
db = existdb or database.Database()
db.connect()
# Get the pulsar_id of the entry to rename
pulsar_id = utils.get_pulsarid(oldname)
trans = db.begin()
try:
# Check if the new name is valid
check_new_name(pulsar_id, newname)
# Rename the pulsar entry
values = {'pulsar_name': newname}
update = db.pulsars.update().\
where(db.pulsars.c.pulsar_id == pulsar_id)
results = db.execute(update, values)
results.close()
if newname not in utils.get_pulsarid_cache().keys():
# Add newname to pulsar_aliases table
ins = db.pulsar_aliases.insert()
values = {'pulsar_id':pulsar_id, \
'pulsar_alias':newname}
result = db.execute(ins, values)
result.close()
except:
db.rollback()
raise
else:
db.commit()
finally:
if not existdb:
db.close()
def main(args):
# Connect to the database
db = database.Database()
db.connect()
try:
if args.newname is None:
raise errors.BadInputError("A new name must be provided.")
# Rename the pulsar
rename_pulsar(args.psrname, args.newname, db)
finally:
# Close DB connection
db.close()
if __name__=='__main__':
parser = utils.DefaultArguments(description=DESCRIPTION)
add_arguments(parser)
args = parser.parse_args()
main(args)
|
plazarREPO_NAMETOASTERPATH_START.@TOASTER_extracted@TOASTER-master@toolkit@pulsars@rename_pulsar.py@.PATH_END.py
|
{
"filename": "anchored_artists.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/mpl_toolkits/axes_grid/anchored_artists.py",
"type": "Python"
}
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.offsetbox import AnchoredOffsetbox, AuxTransformBox, VPacker,\
TextArea, AnchoredText, DrawingArea, AnnotationBbox
from mpl_toolkits.axes_grid1.anchored_artists import \
AnchoredDrawingArea, AnchoredAuxTransformBox, \
AnchoredEllipse, AnchoredSizeBar
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@mpl_toolkits@axes_grid@anchored_artists.py@.PATH_END.py
|
{
"filename": "sn_sl_metric.py",
"repo_name": "lsst/rubin_sim",
"repo_path": "rubin_sim_extracted/rubin_sim-main/rubin_sim/maf/metrics/sn_sl_metric.py",
"type": "Python"
}
|
__all__ = ("SNSLMetric",)
import healpy as hp
import numpy as np
from rubin_scheduler.utils import calc_season
import rubin_sim.maf.metrics as metrics
from rubin_sim.maf.utils import collapse_night
from rubin_sim.phot_utils import DustValues
class SNSLMetric(metrics.BaseMetric):
"""Calculate the number of expected well-measured strongly lensed SN
(per data_slice).
Parameters
----------
metric_name : `str`, optional
metric name
Default : SNCadenceMetric
mjd_col : `str`, optional
mjd column name
Default : observationStartMJD,
filter_col : `str`, optional
filter column name
Default: filter
night_col : `str`, optional
night column name
Default : night
m5_col : `str`, optional
individual visit five-sigma limiting magnitude (m5) column name
Default : fiveSigmaDepth
season : `list` [`int`] or None, optional
season to process (default: None: all seasons)
A list with [-1] processes all seasons, as does None.
nfilters_min : `int`, optional
The number of filters to demand in a season
Default: 4.
min_season_obs : `int`, optional
Minimum number of observations per season. Default 5.
m5mins : `dict`, optional
Minimum individual image depth for visit to 'count'.
Default None uses
{'u': 22.7, 'g': 24.1, 'r': 23.7, 'i': 23.1, 'z': 22.2, 'y': 21.4}.
maps : `list`, optional
List of maps to use.
Default is the dustmap, to reduce m5 limiting mags accordingly.
Returns
-------
n_slsn : `float`
Number of expected well-measured strongly lensed SN
Notes
-----
The number of expected strongly lensed SN detections with a
well-measured time delay is given by:
N (lensed SNe Ia with well measured time delay) = 45.7 *
survey_area / (20000 deg^2) *
cumulative_season_length / (2.5 years) /
(2.15 * exp(0.37 * gap_median_all_filter))
where:
survey_area: survey area (in deg2)
cumulative_season_length: cumulative season length (in years)
gap_median_all_filter: median gap (all filters) (in days)
(reference? metric originated from Simon Huber and Phillipe Gris)
"""
def __init__(
self,
metric_name="SNSLMetric",
mjd_col="observationStartMJD",
filter_col="filter",
night_col="night",
m5_col="fiveSigmaDepth",
season=None,
nfilters_min=4,
min_season_obs=5,
m5mins=None,
maps=["DustMap"],
**kwargs,
):
self.mjd_col = mjd_col
self.filter_col = filter_col
self.night_col = night_col
self.m5_col = m5_col
self.maps = maps
cols = [self.night_col, self.filter_col, self.mjd_col, self.m5_col]
super().__init__(col=cols, metric_name=metric_name, maps=self.maps, units="N SL", **kwargs)
self.bad_val = 0
if season is None:
self.season = [-1]
else:
self.season = season
self.bands = "ugrizy"
if m5mins is None:
self.m5mins = {
"u": 22.7,
"g": 24.1,
"r": 23.7,
"i": 23.1,
"z": 22.2,
"y": 21.4,
}
else:
self.m5mins = m5mins
self.min_season_obs = min_season_obs
self.nfilters_min = nfilters_min
# Set up dust-extinction values to use to interpret the dust map.
self.phot_properties = DustValues()
def n_lensed(self, area, cadence, season_length):
"""Estimate the number of lensed supernovae.
Parameters
-----------
area : `float`
Area in square degrees related to this data_slice (sq deg)
gap_median : `float`
median gap between nights with visits (days) - any filter
cumul_season : `float`
length of the season or period of consideration (years)
Returns
-------
n_lensed_s_ne__ia : `float`
Number of strongly lensed SN expected in this area
"""
# estimate the number of lensed supernovae
n_lensed_s_ne__ia = 45.7 * area / 20000.0 * season_length / 2.5 / (2.15 * np.exp(0.37 * cadence))
return n_lensed_s_ne__ia
def run(self, data_slice, slice_point=None):
"""
Runs the metric for each data_slice
Parameters
---------------
data_slice : simulation data
slice_point: slice_point(default None)
Returns
-----------
number of SL time delay supernovae
"""
# If we had no incoming data - just return with badVal.
if len(data_slice) == 0:
return self.bad_val
# Crop it down so things are coadded per night per
# filter at the median MJD time
night_slice = collapse_night(
data_slice,
night_col=self.night_col,
filter_col=self.filter_col,
m5_col=self.m5_col,
mjd_col=self.mjd_col,
)
# Calculate the dust extinction-corrected m5 values
# and cut visits which don't meet self.m5mins
for f in np.unique(night_slice[self.filter_col]):
in_filt = np.where(night_slice[self.filter_col] == f)[0]
a_x = self.phot_properties.ax1[f] * slice_point["ebv"]
night_slice[self.m5_col][in_filt] = night_slice[self.m5_col][in_filt] - a_x
# Set the visits which fall below the minimum
# to an obvious non-valid value
night_slice[self.m5_col][in_filt] = np.where(
night_slice[self.m5_col][in_filt] > self.m5mins[f],
night_slice[self.m5_col][in_filt],
-999,
)
idxs = np.where(night_slice[self.m5_col] > -998)
# If nothing survived these cuts, just return with badVal.
if len(idxs[0]) == 0:
return self.badval
# Reset, with coadded per-night/per-filter values,
# skipping any too-shallow visits.
night_slice = np.sort(night_slice[idxs], order=self.mjd_col)
# get the pixel area
area = hp.nside2pixarea(slice_point["nside"], degrees=True)
# Note that 'seasons' is the same length as night_slice,
# and contains integer (season) + float (day)
seasons = calc_season(np.degrees(slice_point["ra"]), night_slice[self.mjd_col])
season_ints = np.floor(seasons)
if self.season == [-1]:
season_loop = np.unique(season_ints)
else:
season_loop = self.season
n_lensed_s_ne__ia = 0
for s in season_loop:
s_idx = np.where(season_ints == s)[0]
u_filters = np.unique(night_slice[s_idx][self.filter_col])
if (len(s_idx) < self.min_season_obs) | (np.size(u_filters) < self.nfilters_min):
# Skip this season
n_lensed_s_ne__ia += 0
else:
# Find the cadence (days) between visits within the season
cadence = np.diff(night_slice["observationStartMJD"][s_idx])
# But only the values between nights, not within nights
cadence = np.median(cadence[np.where(cadence > 0.4)])
# Season length in years
season_length = seasons[s_idx][-1] - seasons[s_idx][0]
n_lensed_s_ne__ia += self.n_lensed(area, cadence, season_length)
return n_lensed_s_ne__ia
|
lsstREPO_NAMErubin_simPATH_START.@rubin_sim_extracted@rubin_sim-main@rubin_sim@maf@metrics@sn_sl_metric.py@.PATH_END.py
|
{
"filename": "wx_gradient_editor.py",
"repo_name": "enthought/mayavi",
"repo_path": "mayavi_extracted/mayavi-master/tvtk/util/wx_gradient_editor.py",
"type": "Python"
}
|
""" A wxPython based color gradient editor for vtkLookupTables and
color transfer functions.
This code is distributed under the conditions of the BSD license.
Based on a Tk version of this widget by Gerald Knizia <cgk.d@gmx.net>
Ported to wxPython by Pete Schmitt <schmitt@colorado.edu>
Cleaned up and enhanced for use with MayaVi2 by Prabhu Ramachandran
Copyright (c) 2005-2020, Gerald Knizia, Pete Schmitt and Prabhu Ramachandran
"""
# Third-party imports
import wx
# Local imports
from tvtk.util.gradient_editor import (
ColorControlPoint, ChannelBase, FunctionControl, GradientEditorWidget
)
##########################################################################
# `wxGradientControl` class.
##########################################################################
class wxGradientControl(wx.Panel):
"""Widget which displays the gradient represented by an GradientTable
object (and does nothing beyond that)"""
def __init__(self, masterPanel, gradient_table, width, height ):
"""master: panel in which to place the control. GradientTable is the
Table to which to attach."""
wx.Panel.__init__(self, masterPanel, size=wx.Size(width, height),
style=wx.RAISED_BORDER,
name="Colormap Panel")
self.SetBackgroundColour(wx.Colour(255,255,255))
self.width = width
self.height = height
self.gradient_table = gradient_table
assert( gradient_table.size == width )
# ^- currently only able to use gradient tables in the same size as the canvas width
# bind paint event to redraw when resizing/creating window...
wx.EVT_PAINT(self, self.OnPaint)
def OnPaint(self, event):
""" Paint event handler for when the window is resized and
whatnot."""
dc = wx.PaintDC(self)
self.update()
def update(self):
"""Repaint the control."""
#self.canvas.delete(tk.ALL) # clears all lines contained.
dc = wx.ClientDC(self)
dc.SetBackground(wx.Brush(wx.Colour(0,0,0), wx.SOLID))
dc.Clear()
width, height = self.GetSize()
# From the old tk GradientEditor:
# a look around the web (http://wiki.tcl.tk/11868) told me that
# using the PhotoImage tk-control would not be a good idea and
# that line objects work faster. While I doubt this is an optimal
# solution it currently works fast enought.
# So... let's do the same thing for the new and improved (?) wxPython GradientEditor.
xform = self.gradient_table.scaling_function
start_y = 0
end_y = height
if xform:
# if a scaling transformation is provided, paint the original
# gradient under the scaled gradient.
start_y = height/2
# paint the original gradient as it stands in the table.
dc.BeginDrawing()
for x in range(width):
(r,g,b,a) = self.gradient_table.get_pos_rgba_color_lerped(float(x)/(width-1))
dc.SetPen(wx.Pen(wx.Colour(int(255*r),int(255*g),int(255*b))))
dc.SetBrush(wx.Brush((int(255*r),int(255*g),int(255*b)), wx.SOLID))
dc.DrawLine(x, start_y, x, end_y)
if xform:
# paint the scaled gradient below
end_y = start_y
start_y = 0
for x in range(width):
f = float(x)/(width-1)
(r,g,b,a) = self.gradient_table.get_pos_rgba_color_lerped(xform(f))
dc.SetBrush(wx.Brush((int(255*r),int(255*g),int(255*b)), wx.SOLID))
dc.DrawLine(x, start_y, x, end_y)
dc.EndDrawing()
##########################################################################
# `Channel` class.
##########################################################################
class Channel(ChannelBase):
def paint(self, deviceContext):
"""Paint current channel into Canvas (a canvas of a function control
object).
Contents of the canvas are not deleted prior to painting,
so more than one channel can be painted into the same canvas."""
dc = deviceContext
table = self.control.table
# only control points which are active for the current channel
# are to be painted. filter them out.
relevant_control_points = [
x for x in table.control_points if self.name in x.active_channels
]
dc.BeginDrawing()
# lines between control points
dc.SetPen(wx.Pen(self.rgb_color,1))
#dc.SetBrush(wx.Brush((255,255,255), wx.SOLID))
dc.SetBrush(wx.Brush((255,255,255), wx.SOLID))
for k in range( len(relevant_control_points) - 1 ):
cur_point = relevant_control_points[k]
next_point = relevant_control_points[1+k]
dc.DrawLine( self.get_pos_index(cur_point.pos),
self.get_value_index(cur_point.color),
self.get_pos_index(next_point.pos),
self.get_value_index(next_point.color))
# control points themself.
dc.SetPen(wx.Pen("BLACK",1))
dc.SetBrush(wx.Brush((255,255,255), wx.SOLID))
for control_point in relevant_control_points:
x = self.get_pos_index( control_point.pos )
y = self.get_value_index( control_point.color )
radius=6
#print(x,y)
dc.DrawRectangle(x-(radius/2.0), y-(radius/2.0),radius,radius)
dc.DrawRectangle(100,80,6,6)
dc.EndDrawing()
##########################################################################
# `wxFunctionControl` class.
##########################################################################
class wxFunctionControl(wx.Panel, FunctionControl):
"""Widget which displays a rectangular regions on which hue, sat, val
or rgb values can be modified. An function control can have one or more
attached color channels."""
# Radius around a control point center in which we'd still count a
# click as "clicked the control point"
control_pt_click_tolerance = 4
ChannelFactory = Channel
def __init__(self, master, gradient_table, color_space, width, height):
"""Initialize a function control widget on tkframe master.
Parameters:
-----------
master: The master widget. Note that this widget *must* have
the methods specified in the `AbstractGradientEditorWidget`
interface.
on_table_changed: Callback function taking a bool argument of meaning
'FinalUpdate'. FinalUpdate is true if a control point is dropped,
created or removed and false if the update is due to a control point
currently beeing dragged (but not yet dropped)
color_space: String which specifies the channels painted on this control.
May be any combination of h,s,v,r,g,b,a in which each channel
occurs only once.
set_status_text: a callback used to set the status text
when using the editor.
"""
FunctionControl.__init__(self, master, gradient_table, color_space,
width, height)
wx.Panel.__init__(self, master, size=wx.Size(width, height),
name="RGBHSVA Editor")
self.update()
wx.EVT_LEFT_DOWN(self, self.on_left_button_down)
wx.EVT_LEFT_UP(self, self.on_left_button_up)
wx.EVT_RIGHT_DOWN(self, self.on_right_button_down)
wx.EVT_RIGHT_UP(self, self.on_right_button_up)
wx.EVT_MOTION(self, self.on_mouse_move)
wx.EVT_PAINT(self, self.on_paint)
wx.EVT_LEAVE_WINDOW(self, self.on_leave_window)
######################################################################
# wxPython event methods.
######################################################################
def update(self, event = None):
"""Repaint the control."""
dc = wx.ClientDC(self)
#if we have a custom background, we *must* set the background brush *BEFORE* clearing...
dc.SetBackground(wx.Brush(wx.Colour(255,255,255), wx.SOLID))
dc.Clear()
for channel in self.channels:
channel.paint(dc)
def on_paint(self, event=None):
dc = wx.PaintDC(self)
self.update()
def on_left_button_down(self, event):
self.cur_drag = self.find_control_point( event.GetX(), event.GetY() )
def on_left_button_up(self, event):
if self.cur_drag:
self.table_config_changed( final_update = True )
self.cur_drag = None
def on_leave_window(self, event):
self.on_left_button_up(event)
def on_right_button_down(self, event):
pass
def on_right_button_up(self, event):
# toggle control point. check if there is a control point
# under the mouse. If yes, delete it, if not, create one
# at that point.
cur_control_point = self.find_control_point(event.GetX(), None)
if cur_control_point:
# found a marker at the click position. delete it and return,
# unless it is a fixed marker (at pos 0 or 1)..
if ( cur_control_point[1].fixed ):
# in this case do nothing. Fixed markers cannot be deleted.
return
self.table.control_points.remove(cur_control_point[1])
self.table_config_changed(final_update=True)
else:
# since there was no marker to remove at the point, we assume
# that we should place one there
new_control_point = ColorControlPoint(active_channels = self.active_channels_string)
new_control_point.set_pos(self.channels[0].get_index_pos(event.GetX()))
# set new control point color to the color currently present
# at its designated position
new_control_point.color = self.table.get_pos_color(new_control_point.pos)
self.table.insert_control_point( new_control_point )
self.table_config_changed( final_update = True )
def on_mouse_move(self, event):
# currently dragging a control point?
channel = None
point = None
if self.cur_drag:
channel = self.cur_drag[0]
point = self.cur_drag[1]
if ( not point.fixed ):
point.set_pos( channel.get_index_pos(event.GetX()) )
point.activate_channels( self.active_channels_string )
self.table.sort_control_points()
channel.set_value_index( point.color, event.GetY() )
self.table_config_changed( final_update = False )
screenX = event.GetX()
screenY = event.GetY()
width, height = self.GetSize()
master = self.master
s1, s2 = master.get_table_range()
if channel is not None:
name = self.text_map[channel.name]
pos = s1 + (s2 - s1)*point.pos
val = channel.get_value(point.color)
txt = '%s: (%.3f, %.3f)'%(name, pos, val)
else:
x = s1 + (s2 - s1)*float(screenX)/(width-1)
y = 1.0 - float(screenY)/(height-1)
txt = "position: (%.3f, %.3f)"%(x, y)
self.master.set_status_text(txt)
##########################################################################
# `wxGradientEditorWidget` class.
##########################################################################
class wxGradientEditorWidget(wx.Panel, GradientEditorWidget):
"""A Gradient Editor widget that can be used anywhere.
"""
def __init__(self, master, vtk_table, on_change_color_table=None,
colors=None):
"""
Parameters:
-----------
vtk_table : the `tvtk.LookupTable` or `tvtk.VolumeProperty` object
to set.
on_change_color_table : A callback called when the color table
changes.
colors : list of 'rgb', 'hsv', 'h', 's', 'v', 'a'
(Default : ['rgb', 'hsv', 'a'])
'rgb' creates one panel to edit Red, Green and Blue
colors.
'hsv' creates one panel to edit Hue, Saturation and
Value.
'h', 's', 'v', 'r', 'g', 'b', 'a' separately
specified creates different panels for each.
"""
GradientEditorWidget.__init__(self, master, vtk_table,
on_change_color_table, colors)
wx.Panel.__init__(self, master)
gradient_preview_width = self.gradient_preview_width
gradient_preview_height = self.gradient_preview_height
channel_function_width = self.channel_function_width
channel_function_height = self.channel_function_height
# set up all the panels in a gridbagsizer (i.e. a big grid)
# 6x2 size: 6 rows, 2 columns...
sizer = wx.GridBagSizer(2, 2)
# "Gradient Viewer" panel, in position (0,1) for sizer
self.gradient_control = wxGradientControl(self,
self.gradient_table,
gradient_preview_width,
gradient_preview_height)
tt = wx.ToolTip('Right click for menu')
self.gradient_control.Bind(wx.EVT_CONTEXT_MENU, self.on_gradient_menu)
self.gradient_control.SetToolTip(tt)
sizer.Add(self.gradient_control, pos=(0,1))
# Add the function controls:
function_controls = self.function_controls
editor_data = self.editor_data
row = 1
for color in self.colors:
data = editor_data[color]
control = wxFunctionControl(self, self.gradient_table, color,
channel_function_width,
channel_function_height)
txt = data[0] + self.tooltip_text
control.SetToolTip(wx.ToolTip(txt))
# Add name of editor (to left side of editor)
sizer.Add(wx.StaticText(self, -1, data[1]), pos=(row, 0),
flag=wx.ALIGN_CENTER|wx.ALL)
# Add the "RGB" control point editor
sizer.Add(control, pos=(row, 1))
function_controls.append(control)
row += 1
# The status text.
self.text = wx.StaticText(self, -1, 'status')
sizer.Add(self.text, (row,0), (row,2))
row += 1
# set the appropriate sizer.
sizer.SetSizeHints(self)
self.SetSizerAndFit(sizer)
######################################################################
# `wxGradientEditorWidget` interface.
######################################################################
def set_status_text(self, msg):
t = self.text
t.SetLabel(msg)
t.Refresh()
t.Update()
######################################################################
# wxPython event methods.
######################################################################
def on_gradient_menu(self, event):
if not hasattr(self, 'save_menuid'):
# Do this only the first time.
self.save_menuid = wx.NewId()
self.load_menuid = wx.NewId()
self.Bind(wx.EVT_MENU, self.on_save, id=self.save_menuid)
self.Bind(wx.EVT_MENU, self.on_load, id=self.load_menuid)
menu = wx.Menu()
menu.Append(self.save_menuid, "Save as")
menu.Append(self.load_menuid, "Load")
self.PopupMenu(menu)
menu.Destroy()
def on_save(self, event):
"""
Open "Save" dialog, write lookuptable to 3 files: ``*.lut``
(lookuptable) ``*.grad`` (gradient table for use with this program),
and ``*.jpg`` (image of the gradient)
"""
dlg = wx.FileDialog(self, "Save LUT to...", style=wx.FD_SAVE)
wildcard = "Gradient Files (.grad)|*.grad|" \
"All files (*.*)|*.*"
dlg.SetWildcard(wildcard)
if (dlg.ShowModal() == wx.ID_OK):
file_name = dlg.GetPath()
if file_name:
self.save(file_name)
def on_load(self, event):
"""
Load a ``*.grad`` lookuptable file using wxpython dialog
"""
style = wx.FD_OPEN
dlg = wx.FileDialog(self, "Open a file", style=style)
wildcard = "Gradient Files (.grad)|*.grad|" \
"All files (*.*)|*.*"
dlg.SetWildcard(wildcard)
if (dlg.ShowModal() == wx.ID_OK):
file_name = dlg.GetPath()
if file_name:
self.load(file_name)
##########################################################################
# `wxGradientEditor` class.
##########################################################################
class wxGradientEditor(wx.Frame):
""" wxPython frame that displays the gradient editor window,
i.e. the thing that contains the gradient display, the function
controls and the buttons.
"""
def __init__(self, vtk_table, on_change_color_table = None, colors=None):
"""Initialize the gradient editor window.
Parameters
----------
vtk_table: Instance of vtkLookupTable, designating the table which is
to be edited.
on_change_color_table: Callback function taking no arguments. Called
when the color table was changed and rendering is
requested.
"""
wx.Frame.__init__(self, None, -1, "Color Gradient Editor",
wx.DefaultPosition, [350, 400])
self.widget = wxGradientEditorWidget(self, vtk_table,
on_change_color_table,
colors)
# draw the rest of the GUI (i.e. statusbar, menubar, etc.
self.SetupMenuBar()
self.CreateStatusBar()
def SetupMenuBar(self):
"""
Create menus (i.e. Create Filemenu and submenus, help menu, ...)
"""
## Set up the MenuBar
MenuBar = wx.MenuBar()
#FILE Menu....
file_menu = wx.Menu()
item = file_menu.Append(-1, "&Save","Save CTF")
self.Bind(wx.EVT_MENU, self.widget.on_save, item)
item = file_menu.Append(-1, "&Load","Load CTF")
self.Bind(wx.EVT_MENU, self.widget.on_load, item)
item = file_menu.Append(-1, "&Close","Close this frame")
self.Bind(wx.EVT_MENU, self.OnQuit, item)
MenuBar.Append(file_menu, "&File")
help_menu = wx.Menu()
item = help_menu.Append(-1, "&Help", "Help")
self.Bind(wx.EVT_MENU, self.OnHelp, item)
item = help_menu.Append(-1, "&About", "About")
self.Bind(wx.EVT_MENU, self.OnAbout, item)
MenuBar.Append(help_menu, "&Help")
self.SetMenuBar(MenuBar)
def OnQuit(self, event):
self.Close()
def OnHelp(self, event):
""" Help defining the mouse interactions """
message = "Right click to add control points. Left click to move control points"
dlg = wx.MessageDialog(self, message,
'About wxGradientEditor',
wx.OK | wx.ICON_INFORMATION
)
dlg.ShowModal()
dlg.Destroy()
def OnAbout(self, event):
""" Who wrote the program?"""
message = 'tk Gradient Editor for MayaVi1: Gerald Knizia (cgk.d@gmx.net)\n'\
'wxPython port: Pete Schmitt (schmitt@colorado.edu)\n'\
'Enhanced for MayaVi2: Prabhu Ramachandran'
dlg = wx.MessageDialog(self, message,
'About wxGradientEditor',
wx.OK | wx.ICON_INFORMATION
)
dlg.ShowModal()
dlg.Destroy()
##########################################################################
# Test application.
##########################################################################
def main():
from tvtk.util.traitsui_gradient_editor import make_test_table
table, ctf, otf = make_test_table(lut=False)
# the actual gradient editor code.
def on_color_table_changed():
"""If we had a vtk window running, update it here"""
print("Update Render Window")
app = wx.App(False)
editor = wxGradientEditor(table,
on_color_table_changed,
colors=['rgb', 'a', 'h', 's', 'v'],
)
editor.Show()
app.MainLoop()
if __name__ == "__main__":
main()
|
enthoughtREPO_NAMEmayaviPATH_START.@mayavi_extracted@mayavi-master@tvtk@util@wx_gradient_editor.py@.PATH_END.py
|
{
"filename": "schedule.py",
"repo_name": "BRML/climin",
"repo_path": "climin_extracted/climin-master/climin/schedule.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
"""This module holds various schedules for parameters such as the step
rate or momentum for gradient descent.
A schedule is implemented as an iterator. This allows it to have iterators
of infinite length. It also makes it possible to manipulate scheduls with
the ``itertools`` python module, e.g. for chaining iterators.
"""
import itertools
import math
import numpy as np
def decaying(start, decay):
"""Return an iterator of exponentially decaying values.
The first value is ``start``. Every further value is obtained by multiplying
the last one by a factor of ``decay``.
Examples
--------
>>> from climin.schedule import decaying
>>> s = decaying(10, .9)
>>> [next(s) for i in range(5)]
[10.0, 9.0, 8.100000000000001, 7.290000000000001, 6.561]
"""
return (start * decay ** i for i in itertools.count(0))
def linear_annealing(start, stop, n_steps):
"""Return an iterator that anneals linearly to a point linearly.
The first value is ``start``, the last value is ``stop``. The annealing will
be linear over ``n_steps`` iterations. After that, ``stop`` is yielded.
Examples
--------
>>> from climin.schedule import linear_annealing
>>> s = linear_annealing(1, 0, 4)
>>> [next(s) for i in range(10)]
[1.0, 0.75, 0.5, 0.25, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
"""
start, stop = float(start), float(stop)
inc = (stop - start) / n_steps
for i in range(n_steps):
yield start + i * inc
while True:
yield stop
def repeater(iter, n):
"""Return an iterator that repeats each element of `iter` exactly
`n` times before moving on to the next element.
Examples
--------
>>> from climin.schedule import repeater
>>> s = repeater([1, 2, 3], 2)
>>> [next(s) for i in range(6)]
[1, 1, 2, 2, 3, 3]
"""
for i in iter:
for j in range(n):
yield i
class SutskeverBlend(object):
"""Class representing a schedule that step-wise increases from zero to a
maximum value, as described in [sutskever2013importance]_.
Examples
--------
>>> from climin.schedule import SutskeverBlend
>>> s = iter(SutskeverBlend(0.9, 2))
>>> [next(s) for i in range(10)]
[0.5, 0.75, 0.75, 0.8333333333333333, 0.8333333333333333, 0.875, 0.875, 0.9, 0.9, 0.9]
.. [sutskever2013importance] On the importance of initialization and
momentum in deep learning, Sutskever et al (ICML 2013)
"""
def __init__(self, max_momentum, stretch=250):
self.max_momentum = max_momentum
self.stretch = stretch
def __iter__(self):
for i in itertools.count(1):
m = 1 - (2 ** (-1 - math.log(np.floor_divide(i, self.stretch) + 1, 2)))
yield min(m, self.max_momentum)
|
BRMLREPO_NAMEcliminPATH_START.@climin_extracted@climin-master@climin@schedule.py@.PATH_END.py
|
{
"filename": "g3_sim.py",
"repo_name": "simonsobs/sotodlib",
"repo_path": "sotodlib_extracted/sotodlib-master/sotodlib/g3_sim.py",
"type": "Python"
}
|
# Copyright (c) 2018-2020 Simons Observatory.
# Full license can be found in the top level "LICENSE" file.
"""Data simulation.
This module contains code for simulating data.
"""
import numpy as np
from spt3g import core
from .core.g3_core import DataG3Module
class PipelineSeeder(list):
"""
A way to introduce statically generated frames into a pipeline.
Instantiate this as a list of seed Frames, then add it as the
first Pipeline element.
"""
def __call__(self, frame_in):
output = []
if frame_in is not None:
output.append(frame_in)
if len(self):
output.append(self.pop(0))
return output
def enumerate_det_id(n_dets, freq=39, band='LF1'):
"""
Generator for looping through detector names. Not very smart, only for
basic testing.
Args:
n_dets (int): number of detector names to make
freq (int): detector freqency
band (str): detector band
Returns:
SO formatted detector keys
"""
types = ['A', 'B']
for n in range(n_dets):
dnum = int(np.floor(n/2))
t = int(np.mod(n,2))
yield '{}_{:03}_{}_{}'.format(freq,dnum,band,types[t])
def noise_scan_frames(n_frames=3, n_dets=20, input='signal', n_samps=200,
samp_rate=0.005*core.G3Units.second,
t_start=core.G3Time('2020-1-1T00:00:00')):
"""
Generate a list of frames filled with noise data and nothing else.
Args:
n_frames (int): number of frames to make
n_dets (int): number of detectors per frame
input (str): name of G3TimestreamMap for detectors, should be some form of 'signal'
n_samps (int): number of samples per detector timestream
samp_rate (G3Unit.second): detector sampling rate
t_start (G3Time): start time of the set of frames
"""
frame_list = []
for n in range(n_frames):
f = core.G3Frame()
f.type = core.G3FrameType.Scan
tsm = core.G3TimestreamMap()
z = np.zeros( (n_samps,) )
for d in enumerate_det_id(n_dets):
tsm[d] = core.G3Timestream(z)
tsm.start = t_start
tsm.stop = t_start + n_samps*samp_rate
tsm = MakeNoiseData().apply(tsm)
f[input] = tsm
t_start += n_samps*samp_rate
frame_list.append(f)
return frame_list
class MakeNoiseData(DataG3Module):
"""
Writes a signal with just noise. To be used where an observation
has already been set up but there's no data (such as the output of
so3g.python.quicksim.py) mostly just an easy way to get numbers in G3Timestreams
The noise is a basic white noise plus a 1/f component described by a
knee frequency and index
Args:
input (str): the key to a G3Timestream map in the G3Frame
to replace with noise data
output (str or None): key of G3Timestream map of output data
if None: input will be overwritten with output
white_noise (float): while noise level
f_knee (float): knee frequency
f_knee_index (float): index of 1/f spectrum, should be negative
Returns:
None
"""
def __init__(self, input='signal', output=None, white_noise = 0.005,
f_knee = 0.01, f_knee_index=-2):
self.white_noise = white_noise
self.f_knee = f_knee
self.f_knee_index = f_knee_index
super().__init__(input, output)
def process(self, data, k):
freqs = np.fft.fftfreq(data.n_samples, core.G3Units.Hz/data.sample_rate)
noise = self.white_noise*(1 + (freqs[1:]/self.f_knee)**self.f_knee_index)
noise = noise*np.exp( 1.0j * np.random.uniform(0, 2*np.pi, size=(data.n_samples-1),))
## prevent divide by zero error and assume 1/f doesn't go to infinity
noise = np.append(noise[0], noise)
return np.real(np.fft.fft(noise)).astype('float64')
class MakeJumps(DataG3Module):
"""
G3Module that takes a G3Timestream map and adds randomly
distributed jumps
Args:
input (str): the key to a G3TimestreamMap that is the data source
output (str or None): the key for a G3TimestreamMap that will have data
plus glitches. If None: jumps are added to Input
info (str): a G3Timestream map will be made with this name that
includes just the jumps.
max_jumps (int): number of jumps in each G3Timestream is
np.random.randint(max_jumps)
height_std_sigma (float): hight of each jump is a draw from a normal
distribution with standard deviation of
height_std_sigma*np.std(timestream)
"""
def __init__(self, input='signal', output=None, info='flags_encoded_jumps',
max_jumps=3, height_std_sigma=10):
self.info = info
self.max_jumps = max_jumps
self.height_std_sigma = height_std_sigma
super().__init__(input, output)
def __call__(self, f):
if f.type == core.G3FrameType.Scan:
self.jump_map = core.G3TimestreamMap()
super().__call__(f)
if f.type == core.G3FrameType.Scan:
self.jump_map.start = f[self.input].start
self.jump_map.stop = f[self.input].stop
f[self.info] = self.jump_map
def process(self, data, det_name):
locs = np.random.randint(data.n_samples, size=(np.random.randint(self.max_jumps),) )
heights = np.random.randn( len(locs) )*self.height_std_sigma*np.std(data)
jumps = np.zeros( (data.n_samples,) )
for i in range(len(locs)):
jumps[locs[i]:] += heights[i]
self.jump_map[det_name] = core.G3Timestream( jumps )
return data + jumps
class MakeGlitches(DataG3Module):
"""
G3Module that takes the G3Timestream map and adds randomly
distributed glitches
Args:
input (str): the key to a G3TimestreamMap that is the data source
output (str or None): the key for a G3TimestreamMap that will have data
plus glitches. If None: Glitches are added to Input
info (str): a G3Timestream map will be made with this name that
includes just the glitches.
max_glitches (int): number of glitches in each G3Timestream is
np.random.randint(max_glitches)
height_std_sigma (float): hight of each jump is a draw from a normal
distribution with standard deviation of
height_std_sigma*np.std(timestream)
"""
def __init__(self, input='signal', output=None, info='flags_encoded_glitches', max_glitches=3, height_std_sigma=20):
self.info = info
self.max_glitches = max_glitches
self.height_std_sigma = height_std_sigma
super().__init__(input, output)
def __call__(self, f):
if f.type == core.G3FrameType.Scan:
self.glitch_map = core.G3TimestreamMap()
super().__call__(f)
if f.type == core.G3FrameType.Scan:
self.glitch_map.start = f[self.input].start
self.glitch_map.stop = f[self.input].stop
f[self.info] = self.glitch_map
def process(self, data, det_name):
locs = np.random.randint(data.n_samples, size=(np.random.randint(self.max_glitches),) )
heights = np.random.randn( len(locs) )*self.height_std_sigma*np.std(data)
glitches = np.zeros( (data.n_samples,) )
glitches[locs] += heights
self.glitch_map[det_name] = core.G3Timestream( glitches )
return core.G3Timestream( data + glitches )
|
simonsobsREPO_NAMEsotodlibPATH_START.@sotodlib_extracted@sotodlib-master@sotodlib@g3_sim.py@.PATH_END.py
|
{
"filename": "perspective-effect.ipynb",
"repo_name": "smoh/kinesis",
"repo_path": "kinesis_extracted/kinesis-master/notebooks/perspective-effect.ipynb",
"type": "Jupyter Notebook"
}
|
This notebook verifies math in Appendix A. Perspective effect in Oh & Evans 2020.
```python
from sympy import symbols, simplify, latex
from sympy import cos, sin, Matrix, diff, N
import numpy as np
ra, dec = symbols('alpha, delta')
vra,vdec,vr = symbols(r'v_\alpha, v_\delta, v_r')
vx,vy,vz = symbols('v_x v_y v_z')
delta_ra, delta_dec= symbols(r'\Delta\alpha \Delta\delta')
R = Matrix([
[-sin(ra), cos(ra), 0.],
[-sin(dec)*cos(ra), -sin(dec)*sin(ra), cos(dec)],
[cos(dec)*cos(ra), cos(dec)*sin(ra), sin(dec)]
])
```
```python
R
```
$\displaystyle \left[\begin{matrix}- \sin{\left(\alpha \right)} & \cos{\left(\alpha \right)} & 0.0\\- \sin{\left(\delta \right)} \cos{\left(\alpha \right)} & - \sin{\left(\alpha \right)} \sin{\left(\delta \right)} & \cos{\left(\delta \right)}\\\cos{\left(\alpha \right)} \cos{\left(\delta \right)} & \sin{\left(\alpha \right)} \cos{\left(\delta \right)} & \sin{\left(\delta \right)}\end{matrix}\right]$
```python
simplify(R.inv()) == R.T
```
True
```python
diff(R, ra)
```
$\displaystyle \left[\begin{matrix}- \cos{\left(\alpha \right)} & - \sin{\left(\alpha \right)} & 0\\\sin{\left(\alpha \right)} \sin{\left(\delta \right)} & - \sin{\left(\delta \right)} \cos{\left(\alpha \right)} & 0\\- \sin{\left(\alpha \right)} \cos{\left(\delta \right)} & \cos{\left(\alpha \right)} \cos{\left(\delta \right)} & 0\end{matrix}\right]$
```python
diff(R, dec)
```
$\displaystyle \left[\begin{matrix}0 & 0 & 0\\- \cos{\left(\alpha \right)} \cos{\left(\delta \right)} & - \sin{\left(\alpha \right)} \cos{\left(\delta \right)} & - \sin{\left(\delta \right)}\\- \sin{\left(\delta \right)} \cos{\left(\alpha \right)} & - \sin{\left(\alpha \right)} \sin{\left(\delta \right)} & \cos{\left(\delta \right)}\end{matrix}\right]$
## Geneneral $\Delta v_\mathrm{sphere}$ to the first order
```python
vvec = Matrix([
[vx],
[vy],
[vz]
])
```
```python
delta_v_sphere = diff(R, ra)*vvec*delta_ra + diff(R, dec)*vvec*delta_dec
delta_v_sphere
```
$\displaystyle \left[\begin{matrix}\Delta\alpha \left(- v_{x} \cos{\left(\alpha \right)} - v_{y} \sin{\left(\alpha \right)}\right)\\\Delta\alpha \left(v_{x} \sin{\left(\alpha \right)} \sin{\left(\delta \right)} - v_{y} \sin{\left(\delta \right)} \cos{\left(\alpha \right)}\right) + \Delta\delta \left(- v_{x} \cos{\left(\alpha \right)} \cos{\left(\delta \right)} - v_{y} \sin{\left(\alpha \right)} \cos{\left(\delta \right)} - v_{z} \sin{\left(\delta \right)}\right)\\\Delta\alpha \left(- v_{x} \sin{\left(\alpha \right)} \cos{\left(\delta \right)} + v_{y} \cos{\left(\alpha \right)} \cos{\left(\delta \right)}\right) + \Delta\delta \left(- v_{x} \sin{\left(\delta \right)} \cos{\left(\alpha \right)} - v_{y} \sin{\left(\alpha \right)} \sin{\left(\delta \right)} + v_{z} \cos{\left(\delta \right)}\right)\end{matrix}\right]$
We can express this with $v_\mathrm{sphere} = [v_\alpha,\,v_\delta,\,v_r]^T$ **at** $(\alpha,\,\delta)$.
Such first-order correction has been applied in e.g., Kuhn et al. 2019.
The limits of this is:
1. the mean velocity is estimaten in the projected space, where the perspective effect is **baked in** already
2. it is correct to only first-order in $\Delta \alpha$ and $\Delta \delta$
3. it assumes an absolute center at $(\alpha,\,\delta)$
```python
vvec = R.T @ Matrix([[vra],[vdec],[vr]])
delta_v_sphere = diff(R, ra)*vvec*delta_ra + diff(R, dec)*vvec*delta_dec
```
```python
simplify(delta_v_sphere)
```
$\displaystyle \left[\begin{matrix}\Delta\alpha \left(v_\delta \sin{\left(\delta \right)} - v_{r} \cos{\left(\delta \right)}\right)\\- \Delta\alpha v_\alpha \sin{\left(\delta \right)} - \Delta\delta v_{r}\\\Delta\alpha v_\alpha \cos{\left(\delta \right)} + \Delta\delta v_\delta\end{matrix}\right]$
```python
print(latex(simplify(delta_v_sphere)))
```
\left[\begin{matrix}\Delta\alpha \left(v_\delta \sin{\left(\delta \right)} - v_{r} \cos{\left(\delta \right)}\right)\\- \Delta\alpha v_\alpha \sin{\left(\delta \right)} - \Delta\delta v_{r}\\\Delta\alpha v_\alpha \cos{\left(\delta \right)} + \Delta\delta v_\delta\end{matrix}\right]
## A special case: $\vec{v}_0$ is radial: perspective expansion/contraction
When $\vec{v}_0$ is exactly radial at $(\alpha,\,\delta)$:
```python
v_radial = Matrix([
[0],
[0],
[vr]
])
v0 = R.T * v_radial
```
```python
dMdrav0 = simplify(diff(R, ra) * v0)
dMddecv0 = simplify(diff(R, dec)*v0)
dMdrav0*delta_ra + dMddecv0*delta_dec
```
$\displaystyle \left[\begin{matrix}- \Delta\alpha v_{r} \cos{\left(\delta \right)}\\- \Delta\delta v_{r}\\0\end{matrix}\right]$
$$ \left[\begin{matrix} \Delta v_\alpha \\ \Delta v_\delta \end{matrix} \right] =
- \left[\begin{matrix} \cos\delta & 0 \\ 0 & 1 \end{matrix}\right] v_r
\left[ \begin{matrix} \Delta \alpha \\ \Delta \delta \end{matrix} \right] $$
Since $\cos\delta>0$ always, and noting that there is not cross-term, this means that the signs of projected velocity gradient $\delta v_\alpha$ and $\delta v_\delta$ depends only on the sign of $v_r$: when $v_r>0$ (receding), the projected velocities decrease outward, i.e., we see an apparent contraction and vice versa.
## Second-order terms
One can expand to second-order as well. There will always be a higher-order correction as $\sin$ and $\cos$ expand forever.
The next order term will dominate the residual pattern.
```python
delta_v_sphere2 = simplify(diff(R, ra, 2) *v0 * delta_ra**2 + diff(R, dec, 2) *v0 * delta_dec**2 + 2*diff(R, ra,dec) * v0 * delta_ra * delta_dec)
```
```python
delta_v_sphere2.subs({ra:np.deg2rad(45), dec:np.deg2rad(45)})
```
$\displaystyle \left[\begin{matrix}0\\0.5 \Delta\alpha^{2} v_{r}\\- v_{r} \left(0.5 \Delta\alpha^{2} + \Delta\delta^{2}\right)\end{matrix}\right]$
```python
delta_v_sphere2.subs({ra:np.deg2rad(135), dec:np.deg2rad(135)})
```
$\displaystyle \left[\begin{matrix}0\\- 0.5 \Delta\alpha^{2} v_{r}\\- v_{r} \left(0.5 \Delta\alpha^{2} + \Delta\delta^{2}\right)\end{matrix}\right]$
```python
N(delta_v_sphere.subs({ra:np.deg2rad(45),dec:np.deg2rad(45), vx:5.0, vy:5.0, vz:7.07106781}), 3)
```
$\displaystyle \left[\begin{matrix}\Delta\alpha \left(0.707 v_\delta - 0.707 v_{r}\right)\\- 0.707 \Delta\alpha v_\alpha - 1.0 \Delta\delta v_{r}\\0.707 \Delta\alpha v_\alpha + 1.0 \Delta\delta v_\delta\end{matrix}\right]$
```python
pos2 = N(delta_v_sphere.subs({ra:np.deg2rad(300),dec:np.deg2rad(45), vx:5.0, vy:5.0, vz:7.07106781}), 3, )
```
```python
N(delta_v_sphere.subs({ra:np.deg2rad(340),dec:np.deg2rad(-65), vx:5.0, vy:5.0, vz:7.07106781}), 3)
```
$\displaystyle \left[\begin{matrix}- 2.99 \Delta\alpha\\5.81 \Delta\alpha + 5.15 \Delta\delta\\2.71 \Delta\alpha + 5.7 \Delta\delta\end{matrix}\right]$
|
smohREPO_NAMEkinesisPATH_START.@kinesis_extracted@kinesis-master@notebooks@perspective-effect.ipynb@.PATH_END.py
|
{
"filename": "_hoverlabel.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/mesh3d/_hoverlabel.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Hoverlabel(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "mesh3d"
_path_str = "mesh3d.hoverlabel"
_valid_props = {
"align",
"alignsrc",
"bgcolor",
"bgcolorsrc",
"bordercolor",
"bordercolorsrc",
"font",
"namelength",
"namelengthsrc",
}
# align
# -----
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
# alignsrc
# --------
@property
def alignsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `align`.
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bgcolorsrc
# ----------
@property
def bgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `bgcolor`.
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# bordercolorsrc
# --------------
@property
def bordercolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
# font
# ----
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.mesh3d.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans", "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for `family`.
lineposition
Sets the kind of decoration line(s) with text,
such as an "under", "over" or "through" as well
as combinations e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud
for `lineposition`.
shadow
Sets the shape and color of the shadow behind
text. "auto" places minimal shadow and applies
contrast text font color. See
https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional
options.
shadowsrc
Sets the source reference on Chart Studio Cloud
for `shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud
for `size`.
style
Sets whether a font should be styled with a
normal or italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud
for `style`.
textcase
Sets capitalization of text. It can be used to
make text appear in all-uppercase or all-
lowercase, or with each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud
for `textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud
for `variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud
for `weight`.
Returns
-------
plotly.graph_objs.mesh3d.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# namelength
# ----------
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
# namelengthsrc
# -------------
@property
def namelengthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`namelength`.
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
**kwargs,
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.mesh3d.Hoverlabel`
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
Returns
-------
Hoverlabel
"""
super(Hoverlabel, self).__init__("hoverlabel")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.mesh3d.Hoverlabel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.mesh3d.Hoverlabel`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("align", None)
_v = align if align is not None else _v
if _v is not None:
self["align"] = _v
_v = arg.pop("alignsrc", None)
_v = alignsrc if alignsrc is not None else _v
if _v is not None:
self["alignsrc"] = _v
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bgcolorsrc", None)
_v = bgcolorsrc if bgcolorsrc is not None else _v
if _v is not None:
self["bgcolorsrc"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("bordercolorsrc", None)
_v = bordercolorsrc if bordercolorsrc is not None else _v
if _v is not None:
self["bordercolorsrc"] = _v
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("namelength", None)
_v = namelength if namelength is not None else _v
if _v is not None:
self["namelength"] = _v
_v = arg.pop("namelengthsrc", None)
_v = namelengthsrc if namelengthsrc is not None else _v
if _v is not None:
self["namelengthsrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@mesh3d@_hoverlabel.py@.PATH_END.py
|
{
"filename": "_xaxis.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/funnel/_xaxis.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class XaxisValidator(_plotly_utils.basevalidators.SubplotidValidator):
def __init__(self, plotly_name="xaxis", parent_name="funnel", **kwargs):
super(XaxisValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
dflt=kwargs.pop("dflt", "x"),
edit_type=kwargs.pop("edit_type", "calc+clearAxisTypes"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@funnel@_xaxis.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "davidwhogg/NoDataInterpolation",
"repo_path": "NoDataInterpolation_extracted/NoDataInterpolation-main/python/ndi/__init__.py",
"type": "Python"
}
|
import numpy as np
import warnings
from collections import OrderedDict
from typing import List, Tuple, Optional
from itertools import cycle
def resample_spectrum(
resample_wavelength: np.array,
wavelength: np.array,
flux: np.array,
ivar: Optional[np.array] = None,
flags: Optional[np.array] = None,
mask: Optional[np.array] = None,
L: Optional[int] = None,
P: Optional[int] = None,
X_star: Optional[np.array] = None,
min_resampled_flag_value: Optional[float] = 0.1,
Lambda: Optional[float] = 0.0,
rcond: Optional[float] = 1e-15,
full_output: Optional[bool] = False,
) -> Tuple[np.array, np.array]:
"""
Sample a spectrum on a wavelength array given a set of pixels recorded from one or many visits.
:param resample_wavelength:
A ``M_star``-length array of wavelengths to sample the spectrum on. In the paper, this is equivalent
to the output-spectrum pixel grid $x_\star$.
:param wavelength:
A ``M``-length array of wavelengths from individual visits. If you have $N$ spectra where
the $i$th spectrum has $m_i$ pixels, then $M = \sum_{i=1}^N m_i$, and this array represents a
flattened 1D array of all wavelength positions. In the paper, this is equivalent to the
input-spectrum pixel grid $x_i$.
:param flux:
A ``M``-length array of flux values from individual visits. In the paper, this is equivalent to
the observations $y_i$.
:param ivar: [optional]
A ``M``-length array of inverse variance values from individual visits. In the paper, this is
equivalent to the individual inverse variance matrices $C_i^{-1}$.
:param flags: [optional]
A ``M``-length array of bitmask flags from individual visits.
:param mask: [optional]
A ``M``-length array of boolean values indicating whether a pixel should be used or not in
the resampling (`True` means mask the pixel, `False` means use the pixel). If `None` is
given then all pixels will be used. The `mask` is only relevant for sampling the flux and
inverse variance values, and not the flags.
:param X_star: [optional]
The design matrix to use when solving for the combined spectrum. If you are resampling
many spectra to the same wavelength array then you will see performance improvements by
pre-computing this design matrix and supplying it. To pre-compute it:
```python
X_star = construct_design_matrix(resample_wavelength, L, P)
```
Then supply `X_star` to this function, and optionally `L` and `P` to ensure consistency.
:param L: [optional]
The length scale for the Fourier modes.
If `None` is given, this will default to the peak-to-peak range of `wavelength`.
:param P: [optional]
The number of Fourier modes to use when solving for the resampled spectrum.
If `None` is given, this will default to the number of pixels in `wavelength`.
:param min_resampled_flag_value: [optional]
The minimum value of a flag to be considered "set" in the resampled spectrum. This is
used to reconstruct the flags in the resampled spectrum. The default is 0.1, but a
sensible choice could be 1/N, where N is the number of visits.
:param Lambda: [optional]
An optional regularization strength.
:param rcond: [optional]
Cutoff for small singular values. Singular values less than or equal to
``rcond * largest_singular_value`` are set to zero (default: 1e-15).
:param full_output: [optional]
If `True`, a number of additional outputs will be returned. These are:
- `sampled_separate_flags`: A dictionary of flags, where each key is a bit and each value
is an array of 0s and 1s.
- `X_star`: The design matrix used to solve for the resampled spectrum.
- `L`: The length scale used to solve for the resampled spectrum.
- `P`: The number of Fourier modes used to solve for the resampled spectrum.
:returns:
A three-length tuple of ``(flux, ivar, flags)`` where ``flux`` is the resampled flux values
and ``ivar`` is the variance of the resampled fluxes, and ``flags`` are the resampled flags.
All three arrays are length $M_\star$ (the same as ``resample_wavelength``). If ``full_output``
is `True`, then the tuple will be length 7 with the additional outputs specified above.
"""
linalg_kwds = dict(Lambda=Lambda, rcond=rcond)
wavelength, flux, ivar, mask = _check_shapes(wavelength, flux, ivar, mask)
resample_wavelength = np.array(resample_wavelength)
# Restrict the resampled wavelength to the range of the visit spectra.
sampled_wavelengths = wavelength[(ivar > 0) & (~mask)]
min_sampled_wavelength, max_sampled_wavelength = (np.min(sampled_wavelengths), np.max(sampled_wavelengths))
is_sampled = (max_sampled_wavelength >= resample_wavelength) * (resample_wavelength >= min_sampled_wavelength)
x_star = resample_wavelength[is_sampled]
min_wavelength, max_wavelength = x_star[[0, -1]]
L = L or (max_wavelength - min_wavelength)
P = P or len(x_star)
# We need to construct the design matrices to only be restricted by wavelength.
# Then for flux values we will use the `mask` to restrict the flux values.
# For flags, we will not use any masking.
use_pixels = (
(max_wavelength >= wavelength)
* (wavelength >= min_wavelength)
& (~mask)
)
X = construct_design_matrix(wavelength, L, P) # M x P
Y = flux
Cinv = ivar
XTCinvX_inv, XTC_invX_invXTCinv = _XTCinvX_invXTCinv(
X[use_pixels],
Cinv[use_pixels],
**linalg_kwds
)
if X_star is None:
X_star = construct_design_matrix(x_star, L, P)
A_star_masked = X_star @ XTC_invX_invXTCinv
y_star_masked = A_star_masked @ Y[use_pixels]
Cinv_star_masked = 1/np.diag(X_star @ XTCinvX_inv @ X_star.T)
if np.any(Cinv_star_masked < 0):
warnings.warn(
"Clipping negative inverse variances to zero. It is likely that the "
"requested wavelength range to resample is wider than the visit spectra."
)
Cinv_star_masked = np.clip(Cinv_star_masked, 0, None)
separate_flags = OrderedDict()
flags_star_masked = np.zeros(x_star.size, dtype=np.uint64)
if flags is not None:
_, XTC_invX_invXTCinv = _XTCinvX_invXTCinv(X, Cinv, **linalg_kwds)
A_star = X_star @ XTC_invX_invXTCinv
separated_flags = _separate_flags(flags)
for bit, flag in separated_flags.items():
separate_flags[bit] = A_star @ flag
# Reconstruct flags
for k, values in separate_flags.items():
flag = (values > min_resampled_flag_value).astype(int)
flags_star_masked += (flag * (2**k)).astype(flags_star_masked.dtype)
# De-mask.
# TODO: Should we return 0 fluxes as default, or NaNs? I think NaNs is better and 0 ivar.
y_star = _un_mask(y_star_masked, is_sampled, default=np.nan)
ivar_star = _un_mask(Cinv_star_masked, is_sampled, default=0)
flags_star = _un_mask(flags_star_masked, is_sampled, default=0, dtype=np.uint64)
if full_output:
return (y_star, ivar_star, flags_star, separate_flags, X_star, L, P)
else:
return (y_star, ivar_star, flags_star)
def _un_mask(values, mask, default, dtype=float):
v = default * np.ones(mask.shape, dtype=dtype)
v[mask] = values
return v
def _separate_flags(flags: np.array):
"""
Separate flags into a dictionary of flags for each bit.
:param flags:
An ``M``-length array of flag values.
:returns:
A dictionary of flags, where each key is a bit and each value is an array of 0s and 1s.
"""
separated = OrderedDict()
for q in range(1 + int(np.log2(np.max(flags)))):
is_set = (flags & np.uint64(2**q)) > 0
separated[q] = np.clip(is_set, 0, 1)
return separated
def construct_design_matrix(wavelength: np.array, L: float, P: int):
"""
Take in a set of wavelengths and return the Fourier design matrix.
:param wavelength:
An ``M``-length array of wavelength values.
:param L:
The length scale, usually taken as ``max(wavelength) - min(wavelength)``.
:param P:
The number of Fourier modes to use.
:returns:
A design matrix of shape (M, P).
"""
# TODO: This could be replaced with something that makes use of finufft.
scale = (np.pi * wavelength) / L
X = np.ones((wavelength.size, P), dtype=float)
for j, f in zip(range(1, P), cycle((np.sin, np.cos))):
X[:, j] = f(scale * (j + (j % 2)))
return X
def _XTCinvX_invXTCinv(X, Cinv, Lambda=0, rcond=1e-15):
N, P = X.shape
XTCinv = X.T * Cinv
XTCinvX = XTCinv @ X
XTCinvX += Lambda
XTCinvX_inv = np.linalg.pinv(XTCinvX, rcond=rcond)
XTCinvX_invXTCinv = XTCinvX_inv @ XTCinv
return (XTCinvX_inv, XTCinvX_invXTCinv)
def _check_shape(name, a, P):
a = np.array(a)
if a.size != P:
raise ValueError(f"{name} must be the same size as wavelength")
return a
def _check_shapes(wavelength, flux, ivar, mask):
wavelength = np.array(wavelength)
P = wavelength.size
flux = _check_shape("flux", flux, P)
if ivar is not None:
ivar = _check_shape("ivar", ivar, P)
else:
ivar = np.ones_like(flux)
if mask is not None:
mask = _check_shape("mask", mask, P).astype(bool)
else:
mask = np.zeros(flux.shape, dtype=bool)
return (wavelength, flux, ivar, mask)
|
davidwhoggREPO_NAMENoDataInterpolationPATH_START.@NoDataInterpolation_extracted@NoDataInterpolation-main@python@ndi@__init__.py@.PATH_END.py
|
{
"filename": "_intensity.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/mesh3d/_intensity.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class IntensityValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="intensity", parent_name="mesh3d", **kwargs):
super(IntensityValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@mesh3d@_intensity.py@.PATH_END.py
|
{
"filename": "conditional_spherical_isolation.py",
"repo_name": "astropy/halotools",
"repo_path": "halotools_extracted/halotools-master/halotools/mock_observables/isolation_functions/conditional_spherical_isolation.py",
"type": "Python"
}
|
r"""
Module containing the `~halotools.mock_observables.conditional_spherical_isolation` function
used to apply a a variety of 3d isolation criteria to a set of points in a periodic box.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from functools import partial
import multiprocessing
from .spherical_isolation import _spherical_isolation_process_args
from .isolation_functions_helpers import _conditional_isolation_process_marks
from .engines import marked_spherical_isolation_engine
from ..pair_counters.rectangular_mesh import RectangularDoubleMesh
from ..pair_counters.mesh_helpers import _set_approximate_cell_sizes, _cell1_parallelization_indices
__all__ = ('conditional_spherical_isolation', )
__author__ = ['Duncan Campbell', 'Andrew Hearin']
np.seterr(divide='ignore', invalid='ignore') # ignore divide by zero
def conditional_spherical_isolation(sample1, sample2, r_max,
marks1=None, marks2=None, cond_func=0, period=None,
num_threads=1, approx_cell1_size=None, approx_cell2_size=None):
r"""
Determine whether a set of points, ``sample1``, is isolated, i.e. does not have a
neighbor in ``sample2`` within an user specified spherical volume centered at each
point in ``sample1``, where various additional conditions may be applied to judge
whether a matching point is considered to be a neighbor.
For example, `conditional_spherical_isolation` can be used to identify galaxies as
isolated if no other galaxy with a greater stellar mass lies within 500 kpc.
Different additional criteria can be built up from different combinations of
input ``marks1``, ``marks2`` and ``cond_func``.
See the Examples section for further details, and also
:ref:`galaxy_catalog_intermediate_analysis_tutorial1` for a tutorial on usage
with a mock galaxy catalog.
Parameters
----------
sample1 : array_like
*Npts1 x 3* numpy array containing 3-D positions of points.
See the :ref:`mock_obs_pos_formatting` documentation page, or the
Examples section below, for instructions on how to transform
your coordinate position arrays into the
format accepted by the ``sample1`` and ``sample2`` arguments.
Length units are comoving and assumed to be in Mpc/h, here and throughout Halotools.
sample2 : array_like
*Npts2 x 3* numpy array containing 3-D positions of points.
r_max : array_like
radius of spheres to search for neighbors around galaxies in ``sample1``.
If a single float is given, r_max is assumed to be the same for each galaxy in
``sample1``. You may optionally pass in an array of length *Npts1*, in which case
each point in ``sample1`` will have its own individual neighbor-search radius.
Length units are comoving and assumed to be in Mpc/h, here and throughout Halotools.
marks1 : array_like, optional
*Npts1 x N_marks* array of marks. The supplied marks array must have the
appropriate shape for the chosen ``cond_func`` (see Notes for requirements).
If this parameter is not specified, all marks will be set to unity.
marks2 : array_like, optional
*Npts2 x N_marks* array of marks. The supplied marks array must have the
appropriate shape for the chosen ``cond_func`` (see Notes for requirements).
If this parameter is not specified, all marks will be set to unity.
cond_func : int, optional
Integer ID indicating which function should be used to apply an additional
condition on whether a nearby point should be considered as a candidate neighbor.
This allows, for example, stellar mass-dependent isolation criteria on a
galaxy-by-galaxy basis.
Default is 0 for an unconditioned calculation, in which case
points will be considered neighbor candidates regardless of the
value of their marks.
See Notes for a list of options for the conditional functions.
period : array_like, optional
Length-3 sequence defining the periodic boundary conditions
in each dimension. If you instead provide a single scalar, Lbox,
period is assumed to be the same in all Cartesian directions.
Length units are comoving and assumed to be in Mpc/h, here and throughout Halotools.
num_threads : int, optional
Number of threads to use in calculation, where parallelization is performed
using the python ``multiprocessing`` module. Default is 1 for a purely serial
calculation, in which case a multiprocessing Pool object will
never be instantiated. A string 'max' may be used to indicate that
the pair counters should use all available cores on the machine.
approx_cell1_size : array_like, optional
Length-3 array serving as a guess for the optimal manner by how points
will be apportioned into subvolumes of the simulation box.
The optimum choice unavoidably depends on the specs of your machine.
Default choice is to use ``r_max``/10 in each dimension,
which will return reasonable result performance for most use-cases.
Performance can vary sensitively with this parameter, so it is highly
recommended that you experiment with this parameter when carrying out
performance-critical calculations.
approx_cell2_size : array_like, optional
Analogous to ``approx_cell1_size``, but for ``sample2``. See comments for
``approx_cell1_size`` for details.
Returns
-------
is_isolated : numpy.array
array of booleans indicating if each point in `sample1` is isolated.
Notes
-----
The `~halotools.mock_observables.conditional_spherical_isolation` function only differs
from the `~halotools.mock_observables.spherical_isolation` function in the treatment of
the input marks. In order for a point *p2* in ``sample2`` with mark :math:`w_{2}`
to be considered a neighbor of a point *p1* in ``sample1`` with mark :math:`w_{1}`,
two following conditions must be met:
#. *p2* must lie within a distance ``r_max`` of *p1*, and
#. the input conditional marking function :math:`f(w_{1}, w_{2})` must return *True*.
There are multiple conditional functions available. In general, each requires a
different number of marks per point, N_marks.
All conditional functions return a boolean and get passed
two arrays per pair, *w1* and *w2*, each of length N_marks.
You can pass in more than one piece of information about each point by choosing a
the input ``marks`` arrays to be multi-dimensional of shape (N_points, N_marks).
The available marking functions, ``cond_func``, and the associated integer
ID numbers are:
0. trivial (N_marks = 1)
.. math::
f(w_1,w_2) = True
1. greater than (N_marks = 1)
.. math::
f(w_1,w_2) =
\left \{
\begin{array}{ll}
True & : w_1[0] > w_2[0] \\
False & : w_1[0] \leq w_2[0] \\
\end{array}
\right.
2. less than (N_marks = 1)
.. math::
f(w_1,w_2) =
\left \{
\begin{array}{ll}
True & : w_1[0] < w_2[0] \\
False & : w_1[0] \geq w_2[0] \\
\end{array}
\right.
3. equality (N_marks = 1)
.. math::
f(w_1,w_2) =
\left \{
\begin{array}{ll}
True & : w_1[0] = w_2[0] \\
False & : w_1[0] \neq w_2[0] \\
\end{array}
\right.
4. inequality (N_marks = 1)
.. math::
f(w_1,w_2) =
\left \{
\begin{array}{ll}
True & : w_1[0] \neq w_2[0] \\
False & : w_1[0] = w_2[0] \\
\end{array}
\right.
5. tolerance greater than (N_marks = 2)
.. math::
f(w_1,w_2) =
\left \{
\begin{array}{ll}
True & : w_1[0] > (w_2[0]+w_1[1]) \\
False & : w_1[0] \leq (w_2[0]+w_1[1]) \\
\end{array}
\right.
6. tolerance less than (N_marks = 2)
.. math::
f(w_1,w_2) =
\left \{
\begin{array}{ll}
True & : w_1[0] < (w_2[0]+w_1[1]) \\
False & : w_1[0] \geq (w_2[0]+w_1[1]) \\
\end{array}
\right.
Examples
--------
In this first example, we will show how to calculate the following notion of
galaxy isolation. A galaxy is isolated if there are zero other *more massive*
galaxies within 5 Mpc.
First we create a random distribution of points inside the box:
>>> Npts = 1000
>>> Lbox = 250.
>>> x = np.random.uniform(0, Lbox, Npts)
>>> y = np.random.uniform(0, Lbox, Npts)
>>> z = np.random.uniform(0, Lbox, Npts)
We transform our *x, y, z* points into the array shape used by the pair-counter by
taking the transpose of the result of `numpy.vstack`. This boilerplate transformation
is used throughout the `~halotools.mock_observables` sub-package:
>>> sample1 = np.vstack((x,y,z)).T
Now we will choose random stellar masses for our galaxies:
>>> stellar_mass = np.random.uniform(1e10, 1e12, Npts)
Since we are interested in whether a point in ``sample1`` is isolated from other points
in ``sample1``, we set ``sample2`` to ``sample1`` and both ``marks1`` and ``marks2``
equal to ``stellar_mass``.
>>> sample2 = sample1
>>> marks1 = stellar_mass
>>> marks2 = stellar_mass
Referring to the Notes above for the definitions of the conditional marking functions,
we see that for this particular isolation criteria the appropriate ``cond_func`` is 2.
The reason is that this function only evaluates to *True* for those points in ``sample2``
that are more massive than the ``sample1`` point under consideration. Thus the only
relevant points to consider as candidate neighbors are the more massive ones; all other
``sample2`` points will be disregarded irrespective of their distance from the
``sample1`` point under consideration.
>>> r_max = 5.0
>>> cond_func = 2
>>> is_isolated = conditional_spherical_isolation(sample1, sample2, r_max, marks1, marks2, cond_func, period=Lbox)
"""
# Process the inputs with the helper function
result = _spherical_isolation_process_args(sample1, sample2, r_max, period,
num_threads, approx_cell1_size, approx_cell2_size)
x1in, y1in, z1in, x2in, y2in, z2in = result[0:6]
r_max, max_r_max, period, num_threads, PBCs, approx_cell1_size, approx_cell2_size = result[6:]
xperiod, yperiod, zperiod = period
search_xlength, search_ylength, search_zlength = max_r_max, max_r_max, max_r_max
# Compute the estimates for the cell sizes
approx_cell1_size, approx_cell2_size = (
_set_approximate_cell_sizes(approx_cell1_size, approx_cell2_size, period)
)
approx_x1cell_size, approx_y1cell_size, approx_z1cell_size = approx_cell1_size
approx_x2cell_size, approx_y2cell_size, approx_z2cell_size = approx_cell2_size
# Build the rectangular mesh
double_mesh = RectangularDoubleMesh(x1in, y1in, z1in, x2in, y2in, z2in,
approx_x1cell_size, approx_y1cell_size, approx_z1cell_size,
approx_x2cell_size, approx_y2cell_size, approx_z2cell_size,
search_xlength, search_ylength, search_zlength, xperiod, yperiod, zperiod, PBCs)
# Build the rectangular mesh
double_mesh = RectangularDoubleMesh(x1in, y1in, z1in, x2in, y2in, z2in,
approx_x1cell_size, approx_y1cell_size, approx_z1cell_size,
approx_x2cell_size, approx_y2cell_size, approx_z2cell_size,
search_xlength, search_ylength, search_zlength, xperiod, yperiod, zperiod, PBCs)
# Process the input marks and with the helper function
marks1, marks2 = _conditional_isolation_process_marks(sample1, sample2, marks1, marks2, cond_func)
# Create a function object that has a single argument, for parallelization purposes
engine = partial(marked_spherical_isolation_engine,
double_mesh, x1in, y1in, z1in, x2in, y2in, z2in,
marks1, marks2, cond_func, r_max)
# Calculate the cell1 indices that will be looped over by the engine
num_threads, cell1_tuples = _cell1_parallelization_indices(
double_mesh.mesh1.ncells, num_threads)
if num_threads > 1:
pool = multiprocessing.Pool(num_threads)
result = pool.map(engine, cell1_tuples)
counts = np.sum(np.array(result), axis=0)
pool.close()
else:
counts = engine(cell1_tuples[0])
is_isolated = np.array(counts, dtype=bool)
return is_isolated
|
astropyREPO_NAMEhalotoolsPATH_START.@halotools_extracted@halotools-master@halotools@mock_observables@isolation_functions@conditional_spherical_isolation.py@.PATH_END.py
|
{
"filename": "Untitled-checkpoint.ipynb",
"repo_name": "stevepur/DR25-occurrence-public",
"repo_path": "DR25-occurrence-public_extracted/DR25-occurrence-public-main/GKbaseline_noMesSmear/.ipynb_checkpoints/Untitled-checkpoint.ipynb",
"type": "Jupyter Notebook"
}
|
```python
import numpy as np
import matplotlib.pyplot as plt
import scipy.special as spec
import pandas as pd
from astropy.io import ascii
from astropy.table import Table, vstack
import pickle
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from ipywidgets import FloatProgress
from IPython.display import display
```
```python
dr25CleanStellarGKIso = pd.read_csv("../stellarCatalogs/dr25_stellar_supp_gaia_clean_GK.txt")
dr25CleanStellarAllIso = pd.read_csv("../stellarCatalogs/dr25_stellar_supp_gaia_logg.txt")
dr25CleanStellarFeh = pd.read_csv("../stellarCatalogs/dr25_stellar_updated_feh.txt")
dr25CleanStellarFehAll = pd.read_csv("../stellarCatalogs/dr25_stellar_updated_feh_all.txt")
dr25CleanStellarSuppGaia = pd.read_csv("../stellarCatalogs/dr25_stellar_supp_gaia.txt")
base_kois = pd.read_csv("koiCatalogs/dr25_GK_PCs.csv")
# base_kois = pd.read_csv("koiCatalogs/dr25_GK_burke_catalog_PCs.csv")
```
```python
# ChrisStars = pd.read_csv("Chris/DR25_GKdwarf_Clean.txt", sep=" ")
ChrisStars = pd.read_csv("Chris/DR25_GKdwarf_GAIA_Clean.txt", sep=" ")
if False:
ChrisPCs = pd.read_csv("Chris/DR25_GKdwarf_PC_GAIA_Scr2Clean.txt", sep="|")
ChrisPCs["kepoi_name"] = ""
f = FloatProgress(min=0, max=len(ChrisPCs))
display(f)
for i in range(len(ChrisPCs)):
ChrisPCs.kepoi_name[i] = ChrisPCs.chris_kepoi_string[i][0:9]
f.value += 1
ChrisPCs.to_csv("Chris/DR25_GKdwarf_PC_GAIA_Scr2Clean_renamed.csv", index=False)
else:
ChrisPCs = pd.read_csv("Chris/DR25_GKdwarf_PC_GAIA_Scr2Clean_renamed.csv")
```
```python
mergedDr25Stellar = pd.merge(dr25CleanStellarGKIso, ChrisStars, on="kepid", how="inner")
mergedPCs = pd.merge(base_kois, ChrisPCs, on="kepoi_name", how="inner")
chrisPCNotInMerge = ChrisPCs[~ChrisPCs.kepoi_name.isin(mergedPCs.kepoi_name)]
```
```python
ChrisPCs.kepoi_name[0]
```
```python
base_kois
```
```python
period_rng = (50, 200)
rp_rng = (1., 2.)
chrisPCNotInMergeInBox = chrisPCNotInMerge[(chrisPCNotInMerge.chris_period>=period_rng[0])&(chrisPCNotInMerge.chris_period<=period_rng[1])&(chrisPCNotInMerge.chris_prad>=rp_rng[0])&(chrisPCNotInMerge.chris_prad<=rp_rng[1])]
print(len(chrisPCNotInMergeInBox))
chrisPCNotInMergeInBox
```
```python
print("There are " + str(len(dr25CleanStellarGKIso)) + " Berger2019 GK stars")
```
```python
ChrisStars[ChrisStars.kepid == 5888187]
```
```python
dr25CleanStellarFeh[dr25CleanStellarFeh.kepid == 5888187]
```
```python
dr25CleanStellarFehAll[dr25CleanStellarFehAll.kepid == 5888187]
```
```python
dr25CleanStellarSuppGaia[dr25CleanStellarSuppGaia.kepid == 5888187]
```
```python
dr25CleanStellarAllIso[dr25CleanStellarAllIso.kepid == 5888187]
```
```python
dr25CleanStellarGKIso[dr25CleanStellarGKIso.kepid == 5888187]
```
```python
print(str(len(chrisPCNotInMergeInBox[~chrisPCNotInMergeInBox.kepoi_name.isin(dr25CleanStellarAllIso.kepid)]))
+ " of " + str(len(chrisPCNotInMergeInBox)) + " in-box Chris PCs not in the merge are not in the Berger 2019 catalog")
```
```python
len(ChrisPCs)
```
```python
len(mergedPCs)
```
```python
len(base_kois)
```
```python
plt.figure(figsize=(5,5));
plt.plot(mergedPCs.corrected_prad, mergedPCs.chris_prad, "k.");
plt.title("PCs GK dwarfs")
plt.xlabel("iso-fitted corrected radius")
plt.ylabel("Burke corrected radius")
dd = mergedPCs.corrected_prad/mergedPCs.chris_prad
plt.figure(figsize=(5,5));
plt.hist(dd, 100);
plt.title("PCs GK dwarfs ratio of iso-fitted corrected radius to Burke corrected radius")
plt.xlabel("ratio")
```
```python
steveInBoxPcs = mergedPCs[(mergedPCs.koi_period>=period_rng[0])&(mergedPCs.koi_period<=period_rng[1])&(mergedPCs.corrected_prad>=rp_rng[0])&(mergedPCs.corrected_prad<=rp_rng[1])]
chrisInBoxPcs = mergedPCs[(mergedPCs.chris_period>=period_rng[0])&(mergedPCs.chris_period<=period_rng[1])&(mergedPCs.chris_prad>=rp_rng[0])&(mergedPCs.chris_prad<=rp_rng[1])]
chrisOrigInBoxPcs = ChrisPCs[(ChrisPCs.chris_period>=period_rng[0])&(ChrisPCs.chris_period<=period_rng[1])&(ChrisPCs.chris_prad>=rp_rng[0])&(ChrisPCs.chris_prad<=rp_rng[1])]
print("There are " + str(len(steveInBoxPcs)) + " steve PCs in the box")
print("There are " + str(len(chrisInBoxPcs)) + " chris PCs in the box")
print("There are " + str(len(chrisOrigInBoxPcs)) + " original chris PCs in the box")
```
```python
plt.figure(figsize=(5,5));
plt.plot(mergedDr25Stellar.radius_x, mergedDr25Stellar.radius_y, "k.");
plt.title("GK dwarfs")
plt.xlabel("Berger original radius")
plt.ylabel("Burke radius")
```
```python
plt.figure(figsize=(5,5));
plt.plot(mergedDr25Stellar.iso_rad, mergedDr25Stellar.radius_y, "k.");
plt.title("GK dwarfs")
plt.xlabel("Berger isochrone-fitted radius")
plt.ylabel("Burke radius")
```
```python
dd = mergedDr25Stellar.iso_rad/mergedDr25Stellar.radius_y
plt.figure(figsize=(5,5));
plt.hist(dd, 100);
plt.title("ratio of Berger isochrone-fitted radius to Burke radius")
plt.xlabel("ratio")
```
```python
dr25CleanStellarFeh[dr25CleanStellarFeh.kepid.isin(chrisPCNotInMergeInBox.chris_kepid)]["kepid"]
```
```python
dr25CleanStellarFeh[dr25CleanStellarFeh.kepid.isin(chrisPCNotInMergeInBox.chris_kepid)]["kepmag"]
```
```python
68284 - 63631
```
```python
missing = dr25CleanStellarFeh[dr25CleanStellarFeh.kepid.isin(chrisPCNotInMergeInBox.chris_kepid)]
print("kepid kepmag")
for i in range(len(missing)):
print(str(missing.iloc[i]["kepid"]) + " " + str(missing.iloc[i]["kepmag"]))
```
```python
```
|
stevepurREPO_NAMEDR25-occurrence-publicPATH_START.@DR25-occurrence-public_extracted@DR25-occurrence-public-main@GKbaseline_noMesSmear@.ipynb_checkpoints@Untitled-checkpoint.ipynb@.PATH_END.py
|
{
"filename": "_config.py",
"repo_name": "SBU-COSMOLIKE/CAMBLateDE",
"repo_path": "CAMBLateDE_extracted/CAMBLateDE-main/build/lib/camb/_config.py",
"type": "Python"
}
|
import os
from .baseconfig import import_property, CAMBError
from ctypes import c_char, c_int, c_bool, c_double
lensing_method_curv_corr = 1
lensing_method_flat_corr = 2
lensing_method_harmonic = 3
class _config:
# print feedback if > 0 (note in Jupyter notebook this will appear in the terminal, not the notebook)
FeedbackLevel = import_property(c_int, "config", "FeedbackLevel")
# print additional timing and progress (when FeedbackLevel>0)
DebugMsgs = import_property(c_bool, "config", "DebugMsgs")
global_error_flag = import_property(c_int, "config", "global_error_flag")
ThreadNum = import_property(c_int, "config", "threadnum")
DoTensorNeutrinos = import_property(c_bool, "gaugeinterface", "dotensorneutrinos")
DebugParam = import_property(c_double, "config", "debugparam")
lensing_method = import_property(c_int, "lensing", "lensing_method")
lensing_sanity_check_amplitude = import_property(c_double, "lensing", "lensing_sanity_check_amplitude")
# lensing_sanity_check_amplitude.value = 1e-7 by default, will error if (2*L+1)L(L+1)/4pi C_phi_phi > lensing_
# sanity_check_amplitude at L=10
# increase to large number to prevent sanity check (but lensing requires realistic amplitude as non-linear)
lensing_includes_tensors = import_property(c_bool, "lensing", "lensing_includes_tensors")
transfer_power_var = import_property(c_int, "transfer", "transfer_power_var")
_global_error_message = import_property(c_char * 1024, "config", "global_error_message")
def global_error_message(self):
return bytearray(self._global_error_message).decode('ascii').strip()
def check_global_error(self, reference=''):
code = self.global_error_flag
if code:
err = config.global_error_message()
self.global_error_flag = 0
if reference:
reference = 'Error in Fortran called from %s:\n' % reference
else:
reference = ''
if err:
raise CAMBError(reference + '%s' % err)
else:
raise CAMBError(reference + 'Error code: %s' % code)
def __repr__(self):
s = ''
for x in dir(self):
if x[0] != '_':
value = getattr(self, x)
if not callable(value):
s += '%s = %s\n' % (x, value)
return s
config = _config()
if os.environ.get('BINDER_LAUNCH_HOST'):
config.ThreadNum = 1 # binder is very slow with more than 1 CPU, force 1 by default
|
SBU-COSMOLIKEREPO_NAMECAMBLateDEPATH_START.@CAMBLateDE_extracted@CAMBLateDE-main@build@lib@camb@_config.py@.PATH_END.py
|
{
"filename": "fitting.ipynb",
"repo_name": "PetroFit/petrofit",
"repo_path": "petrofit_extracted/petrofit-main/docs/fitting.ipynb",
"type": "Jupyter Notebook"
}
|
# Image Fitting
Most galaxy light profiles can be well described by PSF-convolved models like the Sérsic profile. PetroFit uses the `astropy` `modeling` sub-module to provide tools to perform two-dimensional fits of galaxy light profiles. To this end, we use the PetroFit ` PSFConvolvedModel2D` class, which applies PSF convolution to and handles oversampling for `astropy` based models.
In this section, we demonstrate the basics of light profile modeling on a galaxy using a single component Sérsic profile.
To start with PetroFit, simply import it as follows:
```python
import petrofit as pf
```
## Loading Example Data
The dataset we're using is a synthetic image of a galaxy, created using astropy's `Sersic2D` model.
This galaxy representation is convolved with a PSF for the F105W filter using petrofit's PSFConvolvedModel2D to simulate observational data.
We also added noise to the data and provide a corresponding RMS map.
Key features of the synthetic galaxy:
- Sérsic index of 1 (exponential profile).
- Effective radius of 15 pixels.
- Positioned at (100, 75) pixels.
- Rotated by $\frac{\pi}{4}$.
- With ellip=0.1
### Loading Data and RMS Images
We first use `astropy`'s ``CCDData`` to load the example data and visualize it through `matplotlib`. The RMS image is loaded using `astropy`'s ``fits`` sub-module.
```python
from astropy.nddata import CCDData
from astropy.io import fits
image = CCDData.read('data/example_sersic.fits.gz', unit='electron s-1')
rms = fits.getdata('data/example_rms.fits.gz')
```
```python
# Hidden cell
%matplotlib inline
# Stop Fit Model to Data section warnings
import warnings
warnings.filterwarnings('ignore', append=True)
```
```python
import numpy as np
from matplotlib import pyplot as plt
plt.rcParams['figure.figsize'] = [6, 6]
plt.rcParams['image.origin'] = 'lower'
plt.rcParams['font.size'] = 12
vmax = 0.005 # Use the image std as max and min of all plots
vmin = - vmax
fig, axs = plt.subplots(1,2, figsize=[12, 6])
plt.sca(axs[0])
plt.imshow(image.data, vmin=vmin, vmax=vmax)
plt.title("Mock Galaxy")
plt.xlabel("Pixels")
plt.ylabel("Pixels")
plt.sca(axs[1])
plt.imshow(rms)
plt.title("RMS Image")
plt.xlabel("Pixels")
plt.ylabel("Pixels")
plt.show()
```
## PSF
A Point Spread Function (PSF) describes how light from a point source is distributed on detector due to optical effects such as diffraction. Images or cutouts of stars are good approximations of PSFs because stars are single-point sources and their images describe how their light is distributed on the detector. To make cutouts of stars in an image, use the ` astropy.nddata.Cutout2D` function.
The following PSF is a cutout of a star in the Hubble Frontier Fields image of Abell 2744 (same dataset as the example image). Since we will be using the PSF image as a convolution kernel, it is **very important** that the following requirements are satisfied:
- The image of the PSF should be at the same resolution as the data.
- The star or PSF is centered in the image.
- The PSF image does not contain other sources.
- The image is normalized so that the sum of the PSF image is near or equal to 1.0.
- The PSF image should have odd dimensions on each side (because it is a convolution kernel).
```python
from astropy.io import fits
# Load PSF image (2D array)
PSF = fits.getdata('data/f105w_psf.fits.gz')
# Normalize PSF
PSF = PSF / PSF.sum()
# Note that the PSF shape is odd on all sides
print("PSF Shape = {}".format(PSF.shape))
# Plot PSF and use vmax and vmin to show difraction spikes
plt.imshow(PSF, vmin=0, vmax=PSF.std()/10)
plt.show()
```
## Sérsic Model
### Sérsic Parameters
The `amplitude`, `r_eff`, `n`, `x_0`, `y_0`, `ellip`, and `theta` represent the galaxy's brightness,
effective radius, Sérsic index, position, ellipticity, and orientation, respectively. Here we make rough estimates of the parameters:
```python
amplitude=0.2
r_eff=20
n=1
x_0=107
y_0=70
ellip=0.1
theta=0.1
```
### AstroPy Sérsic Model
Here, we are setting up a 2D galaxy light profile model using astropy's Sersic2D model.
The Sersic2D model is a widely-used representation of the light distribution of elliptical galaxies.
We also define a set of `bounds`, a dictionary of lower and upper bounds of parameters.
Keys are parameter names. The values are a list or a tuple of length 2 giving the desired range for
the parameter and a value of `None` means no bounds. The default bounds can be provided using the
PetroFit `get_default_sersic_bounds` function. For example, we restrain the fitter from exploring
half-light radii that are negative by adding `'r_eff': (0, None)`.
We also apply a custom restriction for the center of the model to be within a range (`center_slack`) from the initial guess.
```python
from astropy.modeling import models
center_slack = 20
sersic_model = models.Sersic2D(
amplitude=amplitude,
r_eff=r_eff,
n=n,
x_0=x_0,
y_0=y_0,
ellip=ellip,
theta=theta,
bounds = pf.get_default_sersic_bounds({
'x_0': (x_0 - center_slack/2, x_0 + center_slack/2),
'y_0': (y_0 - center_slack/2, y_0 + center_slack/2),
}),
)
sersic_model
```
## PSFConvolvedModel2D
The `petrofit` `PSFConvolvedModel2D` is a `Fittable2DModel` that adds PSF convolution and model to image sampling to `astropy` core models. `PSFConvolvedModel2D` makes an image of the underlying model and samples it onto a grid. The model image is then convolved with a PSF if one is provided. Since `PSFConvolvedModel2D` is a `Fittable2DModel`, it can be used to fit model images to data images. For example, we wrap an `astropy` `Sersic2D` model in this doc with `PSFConvolvedModel2D`, which produces an oversampled and PSF convolved version of the Sérsic profile at each iteration of the fitting algorithm. **Note that `PSFModel` is deprecated and replaced by `PSFConvolvedModel2D`.**
<div class="admonition note">
<p class="admonition-title">Note</p>
<p><code class="docutils literal notranslate"><span class="pre">PSFConvolvedModel2D</span></code> is agnostic to the models it wraps and can handle complex multi-component <code class="docutils literal notranslate"><span class="pre">astropy</span></code> models.</p>
</div>
### Pixel Centering in PSFConvolvedModel2D
PSFConvolvedModel2D adopts the DS9 coordinate system, where the pixel index corresponds to its center. Thus, an index of 0 designates the center of the first pixel. This is distinct from the GALFIT convention, and users should note this difference when comparing results between tools.
### Oversampling
One of the advantages of using `PSFConvolvedModel2D` is its ability to sample models onto model images. Sometimes the models have regions that have to be oversampled to produce better estimates of the data. `PSFConvolvedModel2D` can oversample the entire model image or a specific pixel region of the image. The oversampling factor and region can be specified in the `oversample` keyword argument when wrapping an `astropy` model or during run time by setting the `PSFConvolvedModel2D.oversample` attribute.
**Disable Oversampling (Defailt)**
To disable oversampling, set the `oversampling` argument or attribute to `None`
```python
# Disable Oversampling
oversample = None
```
**Oversample Entire Model Image**
To oversample the image by a factor, you can pass a single integer value. For example:
```python
# Oversample the entire image by a factor of 4
oversample = 4
```
**Oversample a Fixed Region**
To oversample a fixed region of finite size, specify the center pixel, the length of the square region and thee oversampling factor. This means passing a tuple of `(center_x, center_y, box_length, oversample_factor)`. For example:
```python
# Replace the pixel values in a box of
# length 20 cented at (x=50, y=60) with a box of
# the same size that has been oversampled by a factor of 5
# i.e (x=50 y=60, box_length=20, oversample_factor=5)
oversample = (50, 60, 20, 5)
```
**Oversample a Moving Region**
If the model is being fit, the center of the model is likely to move around. To account for this, we can specify the names of the model parameters that define the center of the box that we are interested in oversampling as strings. This means passing a tuple of `(model_param_x, model_param_y, box_length, oversample_factor)`. For example:
```python
# Replace the pixel values in a box of
# length 20 cented at (x=model.x_0, y=model.y_0) with a box of
# the same size that has been oversampled by a factor of 5
# i.e (model.x_0, model.y_0, box_length=20, oversample_factor=5)
oversample = ('x_0', 'y_0', 20, 5)
```
### Oversampled PSF
The PSF can have intricate details and variations that are not well-captured if we simply sample at the same rate as the data image.
This is where the concept of an oversampled PSF comes into play.
An oversampled PSF is essentially a higher-resolution representation of the PSF, capturing its subtle variations with more detail.
This is beneficial because, during convolution, these details interact with the underlying data, ensuring a more accurate representation of the light distribution.
`PSFConvolvedModel2D` facilitates this by allowing users to specify an oversampled PSF alongside the model.
The `psf_oversample` keyword argument, or attribute, controls the oversampling factor of the PSF.
It's essential to remember that when working with both oversampled models and PSFs, compatibility is key.
The `PSFConvolvedModel2D` class ensures that the model's oversampling rate (oversample) is always an integer multiple of the PSF's oversampling rate (`psf_oversample`).
```python
# The star image PSF is at the
# same resolution as the data
psf_oversample = 1
```
### Create PetroFit Model
Now that we have an `astropy` model, PSF and oversampling rule, we can create a `PSFConvolvedModel2D` model as follows:
```python
psf_sersic_model = pf.PSFConvolvedModel2D(sersic_model, psf=PSF, oversample=4, psf_oversample=1)
```
The `PSFConvolvedModel2D` etherates all of the parameters, fixed-parameter rules and parameter bounds from the input `astropy` model. Notice that a new parameter, `psf_pa` is added to enable PSF rotation.
```python
print(psf_sersic_model.param_names)
```
```python
print(psf_sersic_model.bounds)
```
### PSF Rotation
`PSFConvolvedModel2D` can to rotate the PSF image until an optimal rotation angle is found. This is useful for when the PSF comes from a dataset where the orientation of the diffraction spikes are not the same as the image being fit. `psf_pa` is in degrees.
To restrict the bounds of the rotation or disable the PSF rotation, you can set the psf_pa to fixed:
```python
# Limit PSF rotation to -5 to 5 degrees
psf_sersic_model.bounds['psf_pa'] = (-5, 5)
# To disable the PSF rotation,
# you can set the psf_pa to fixed.
psf_sersic_model.fixed['psf_pa'] = True
```
### Accessing the Underlying Model
At any point, a copy of the input model with the same parameter values as the corresponding `PSFConvolvedModel2D` can be accessed using the `PSFConvolvedModel2D.model` attribute:
```python
psf_sersic_model.model
```
### Visualize Inital Guess Model
Here we visualize the inital guess model using the `plot_fit` function:
```python
pf.plot_fit(psf_sersic_model, image.data, vmax=vmax, vmin=vmin, figsize=[3*6, 6])
plt.show()
```
Looks like we'd better fit this model to optimize its paramters...
## Fitting Models
PetroFit uses the Levenberg-Marquardt, Trust Region Reflective algorithm, and linear least-squares algorithms to fit parametrized models. To achieve this, it uses `astropy` fitting and provides wrappers to fit models to images. One such function is `fit_model`, which takes any `Fittable2DModel` model and an image to fit, and returns a fitted copy of the model and the `fit_info` dictionary. If the image to be fit contains pixels that are set to `np.nan`, those pixels are ignored by the fitter. The `fit_model` function also allows us to define parameters, such as ` maxiter`, for the `astropy` fitter.
Before we fit the image, we compute the weights of each pixel using rms data as follows (please note that weights are optional and set to `None` by defualt):
```python
fitting_weights = 1 / rms
```
To fit the galaxy we prepared with the `PSFConvolvedModel2D` we constructed, we call the `fit_model` as follows:
```python
%%time
fitted_model, fit_info = pf.fit_model(
image.data, psf_sersic_model,
weights=fitting_weights,
calc_uncertainties=True,
maxiter=10000,
epsilon=1.4901161193847656e-08,
acc=1e-09,
)
```
That’s it! The retuned `fitted_model` is a copy of the input model (`psf_sersic_model`) but with the optimized parameter values. We can inspect the parameters of any `astropy` model using the `print_model_params`:
```python
pf.print_model_params(fitted_model)
```
### Paramter Errors
When `calc_uncertainties` is enabled in the `fit_model` function, Astropy's fitter calculates the parameter uncertainties using the covariance matrix.
To extract the standard deviation of the parameters, given that the covariance matrix is available:
```python
# covariance matrix dict:
fitted_model.cov_matrix
```
```python
param_stds = fitted_model.stds
for param, std in zip(param_stds.param_names, param_stds.stds):
print("{:<10} {}".format(param, std))
```
## Generate Model Image
To generate a model image we use the `plot_fit` function. The function, given a 2D model and fitted image, converts the model into a model-image we can visualize and manipulate.
```python
pf.plot_fit(fitted_model, image.data, vmax=vmax, vmin=vmin, figsize=[3*6, 6])
plt.show()
```
|
PetroFitREPO_NAMEpetrofitPATH_START.@petrofit_extracted@petrofit-main@docs@fitting.ipynb@.PATH_END.py
|
{
"filename": "test_svmlight_format.py",
"repo_name": "scikit-learn/scikit-learn",
"repo_path": "scikit-learn_extracted/scikit-learn-main/sklearn/datasets/tests/test_svmlight_format.py",
"type": "Python"
}
|
import gzip
import os
import shutil
from bz2 import BZ2File
from importlib import resources
from io import BytesIO
from tempfile import NamedTemporaryFile
import numpy as np
import pytest
import scipy.sparse as sp
import sklearn
from sklearn.datasets import dump_svmlight_file, load_svmlight_file, load_svmlight_files
from sklearn.utils._testing import (
assert_allclose,
assert_array_almost_equal,
assert_array_equal,
create_memmap_backed_data,
)
from sklearn.utils.fixes import CSR_CONTAINERS
TEST_DATA_MODULE = "sklearn.datasets.tests.data"
datafile = "svmlight_classification.txt"
multifile = "svmlight_multilabel.txt"
invalidfile = "svmlight_invalid.txt"
invalidfile2 = "svmlight_invalid_order.txt"
def _svmlight_local_test_file_path(filename):
return resources.files(TEST_DATA_MODULE) / filename
def _load_svmlight_local_test_file(filename, **kwargs):
"""
Helper to load resource `filename` with `importlib.resources`
"""
data_path = _svmlight_local_test_file_path(filename)
with data_path.open("rb") as f:
return load_svmlight_file(f, **kwargs)
def test_load_svmlight_file():
X, y = _load_svmlight_local_test_file(datafile)
# test X's shape
assert X.indptr.shape[0] == 7
assert X.shape[0] == 6
assert X.shape[1] == 21
assert y.shape[0] == 6
# test X's non-zero values
for i, j, val in (
(0, 2, 2.5),
(0, 10, -5.2),
(0, 15, 1.5),
(1, 5, 1.0),
(1, 12, -3),
(2, 20, 27),
):
assert X[i, j] == val
# tests X's zero values
assert X[0, 3] == 0
assert X[0, 5] == 0
assert X[1, 8] == 0
assert X[1, 16] == 0
assert X[2, 18] == 0
# test can change X's values
X[0, 2] *= 2
assert X[0, 2] == 5
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
# GH20081: testing equality between path-based and
# fd-based load_svmlight_file
data_path = resources.files(TEST_DATA_MODULE) / datafile
data_path = str(data_path)
X1, y1 = load_svmlight_file(data_path)
fd = os.open(data_path, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_almost_equal(X1.data, X2.data)
assert_array_almost_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_pathlib():
# test loading from file descriptor
data_path = _svmlight_local_test_file_path(datafile)
X1, y1 = load_svmlight_file(str(data_path))
X2, y2 = load_svmlight_file(data_path)
assert_allclose(X1.data, X2.data)
assert_allclose(y1, y2)
def test_load_svmlight_file_multilabel():
X, y = _load_svmlight_local_test_file(multifile, multilabel=True)
assert y == [(0, 1), (2,), (), (1, 2)]
def test_load_svmlight_files():
data_path = _svmlight_local_test_file_path(datafile)
X_train, y_train, X_test, y_test = load_svmlight_files(
[str(data_path)] * 2, dtype=np.float32
)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_almost_equal(y_train, y_test)
assert X_train.dtype == np.float32
assert X_test.dtype == np.float32
X1, y1, X2, y2, X3, y3 = load_svmlight_files([str(data_path)] * 3, dtype=np.float64)
assert X1.dtype == X2.dtype
assert X2.dtype == X3.dtype
assert X3.dtype == np.float64
def test_load_svmlight_file_n_features():
X, y = _load_svmlight_local_test_file(datafile, n_features=22)
# test X'shape
assert X.indptr.shape[0] == 7
assert X.shape[0] == 6
assert X.shape[1] == 22
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (1, 5, 1.0), (1, 12, -3)):
assert X[i, j] == val
# 21 features in file
with pytest.raises(ValueError):
_load_svmlight_local_test_file(datafile, n_features=20)
def test_load_compressed():
X, y = _load_svmlight_local_test_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with _svmlight_local_test_file_path(datafile).open("rb") as f:
with gzip.open(tmp.name, "wb") as fh_out:
shutil.copyfileobj(f, fh_out)
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_almost_equal(X.toarray(), Xgz.toarray())
assert_array_almost_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with _svmlight_local_test_file_path(datafile).open("rb") as f:
with BZ2File(tmp.name, "wb") as fh_out:
shutil.copyfileobj(f, fh_out)
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_almost_equal(X.toarray(), Xbz.toarray())
assert_array_almost_equal(y, ybz)
def test_load_invalid_file():
with pytest.raises(ValueError):
_load_svmlight_local_test_file(invalidfile)
def test_load_invalid_order_file():
with pytest.raises(ValueError):
_load_svmlight_local_test_file(invalidfile2)
def test_load_zero_based():
f = BytesIO(b"-1 4:1.\n1 0:1\n")
with pytest.raises(ValueError):
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b"-1 1:1 2:2 3:3\n"
data2 = b"-1 0:0 1:1\n"
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert X.shape == (1, 3)
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert X1.shape == (1, 4)
assert X2.shape == (1, 4)
def test_load_with_qid():
# load svmfile with qid attribute
data = b"""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12"""
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[0.53, 0.12], [0.13, 0.1], [0.87, 0.12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[0.53, 0.12], [0.13, 0.1], [0.87, 0.12]])
@pytest.mark.skip(
"testing the overflow of 32 bit sparse indexing requires a large amount of memory"
)
def test_load_large_qid():
"""
load large libsvm / svmlight file with qid attribute. Tests 64-bit query ID
"""
data = b"\n".join(
(
"3 qid:{0} 1:0.53 2:0.12\n2 qid:{0} 1:0.13 2:0.1".format(i).encode()
for i in range(1, 40 * 1000 * 1000)
)
)
X, y, qid = load_svmlight_file(BytesIO(data), query_id=True)
assert_array_equal(y[-4:], [3, 2, 3, 2])
assert_array_equal(np.unique(qid), np.arange(1, 40 * 1000 * 1000))
def test_load_invalid_file2():
with pytest.raises(ValueError):
data_path = _svmlight_local_test_file_path(datafile)
invalid_path = _svmlight_local_test_file_path(invalidfile)
load_svmlight_files([str(data_path), str(invalid_path), str(data_path)])
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
with pytest.raises(TypeError):
load_svmlight_file(0.42)
def test_invalid_filename():
with pytest.raises(OSError):
load_svmlight_file("trou pic nic douille")
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_dump(csr_container):
X_sparse, y_dense = _load_svmlight_local_test_file(datafile)
X_dense = X_sparse.toarray()
y_sparse = csr_container(np.atleast_2d(y_dense))
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
X_sliced = X_sparse[np.arange(X_sparse.shape[0])]
y_sliced = y_sparse[np.arange(y_sparse.shape[0])]
for X in (X_sparse, X_dense, X_sliced):
for y in (y_sparse, y_dense, y_sliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32, np.int64]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
if sp.issparse(y) and y.shape[0] == 1:
# make sure y's shape is: (n_samples, n_labels)
# when it is sparse
y = y.T
# Note: with dtype=np.int32 we are performing unsafe casts,
# where X.astype(dtype) overflows. The result is
# then platform dependent and X_dense.astype(dtype) may be
# different from X_sparse.astype(dtype).asarray().
X_input = X.astype(dtype)
dump_svmlight_file(
X_input, y, f, comment="test", zero_based=zero_based
)
f.seek(0)
comment = f.readline()
comment = str(comment, "utf-8")
assert "scikit-learn %s" % sklearn.__version__ in comment
comment = f.readline()
comment = str(comment, "utf-8")
assert ["one", "zero"][zero_based] + "-based" in comment
X2, y2 = load_svmlight_file(f, dtype=dtype, zero_based=zero_based)
assert X2.dtype == dtype
assert_array_equal(X2.sorted_indices().indices, X2.indices)
X2_dense = X2.toarray()
if sp.issparse(X_input):
X_input_dense = X_input.toarray()
else:
X_input_dense = X_input
if dtype == np.float32:
# allow a rounding error at the last decimal place
assert_array_almost_equal(X_input_dense, X2_dense, 4)
assert_array_almost_equal(
y_dense.astype(dtype, copy=False), y2, 4
)
else:
# allow a rounding error at the last decimal place
assert_array_almost_equal(X_input_dense, X2_dense, 15)
assert_array_almost_equal(
y_dense.astype(dtype, copy=False), y2, 15
)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_dump_multilabel(csr_container):
X = [[1, 0, 3, 0, 5], [0, 0, 0, 0, 0], [0, 5, 0, 1, 0]]
y_dense = [[0, 1, 0], [1, 0, 1], [1, 1, 0]]
y_sparse = csr_container(y_dense)
for y in [y_dense, y_sparse]:
f = BytesIO()
dump_svmlight_file(X, y, f, multilabel=True)
f.seek(0)
# make sure it dumps multilabel correctly
assert f.readline() == b"1 0:1 2:3 4:5\n"
assert f.readline() == b"0,2 \n"
assert f.readline() == b"0,1 1:5 3:1\n"
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [
[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert f.readline() == b"1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"
assert f.readline() == b"2.1 0:1000000000 1:2e+18 2:3e+27\n"
assert f.readline() == b"3.01 \n"
assert f.readline() == b"1.000000000000001 \n"
assert f.readline() == b"1 \n"
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_almost_equal(y, y2)
def test_dump_comment():
X, y = _load_svmlight_local_test_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_almost_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b"It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc"
f = BytesIO()
with pytest.raises(UnicodeDecodeError):
dump_svmlight_file(X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_almost_equal(y, y2)
f = BytesIO()
with pytest.raises(ValueError):
dump_svmlight_file(X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = _load_svmlight_local_test_file(datafile)
f = BytesIO()
y2d = [y]
with pytest.raises(ValueError):
dump_svmlight_file(X, y2d, f)
f = BytesIO()
with pytest.raises(ValueError):
dump_svmlight_file(X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = _load_svmlight_local_test_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
def test_load_with_long_qid():
# load svmfile with longint qid attribute
data = b"""
1 qid:0 0:1 1:2 2:3
0 qid:72048431380967004 0:1440446648 1:72048431380967004 2:236784985
0 qid:-9223372036854775807 0:1440446648 1:72048431380967004 2:236784985
3 qid:9223372036854775807 0:1440446648 1:72048431380967004 2:236784985"""
X, y, qid = load_svmlight_file(BytesIO(data), query_id=True)
true_X = [
[1, 2, 3],
[1440446648, 72048431380967004, 236784985],
[1440446648, 72048431380967004, 236784985],
[1440446648, 72048431380967004, 236784985],
]
true_y = [1, 0, 0, 3]
trueQID = [0, 72048431380967004, -9223372036854775807, 9223372036854775807]
assert_array_equal(y, true_y)
assert_array_equal(X.toarray(), true_X)
assert_array_equal(qid, trueQID)
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=qid, zero_based=True)
f.seek(0)
X, y, qid = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_equal(y, true_y)
assert_array_equal(X.toarray(), true_X)
assert_array_equal(qid, trueQID)
f.seek(0)
X, y = load_svmlight_file(f, query_id=False, zero_based=True)
assert_array_equal(y, true_y)
assert_array_equal(X.toarray(), true_X)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_load_zeros(csr_container):
f = BytesIO()
true_X = csr_container(np.zeros(shape=(3, 4)))
true_y = np.array([0, 1, 0])
dump_svmlight_file(true_X, true_y, f)
for zero_based in ["auto", True, False]:
f.seek(0)
X, y = load_svmlight_file(f, n_features=4, zero_based=zero_based)
assert_array_almost_equal(y, true_y)
assert_array_almost_equal(X.toarray(), true_X.toarray())
@pytest.mark.parametrize("sparsity", [0, 0.1, 0.5, 0.99, 1])
@pytest.mark.parametrize("n_samples", [13, 101])
@pytest.mark.parametrize("n_features", [2, 7, 41])
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_load_with_offsets(sparsity, n_samples, n_features, csr_container):
rng = np.random.RandomState(0)
X = rng.uniform(low=0.0, high=1.0, size=(n_samples, n_features))
if sparsity:
X[X < sparsity] = 0.0
X = csr_container(X)
y = rng.randint(low=0, high=2, size=n_samples)
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
size = len(f.getvalue())
# put some marks that are likely to happen anywhere in a row
mark_0 = 0
mark_1 = size // 3
length_0 = mark_1 - mark_0
mark_2 = 4 * size // 5
length_1 = mark_2 - mark_1
# load the original sparse matrix into 3 independent CSR matrices
X_0, y_0 = load_svmlight_file(
f, n_features=n_features, offset=mark_0, length=length_0
)
X_1, y_1 = load_svmlight_file(
f, n_features=n_features, offset=mark_1, length=length_1
)
X_2, y_2 = load_svmlight_file(f, n_features=n_features, offset=mark_2)
y_concat = np.concatenate([y_0, y_1, y_2])
X_concat = sp.vstack([X_0, X_1, X_2])
assert_array_almost_equal(y, y_concat)
assert_array_almost_equal(X.toarray(), X_concat.toarray())
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_load_offset_exhaustive_splits(csr_container):
rng = np.random.RandomState(0)
X = np.array(
[
[0, 0, 0, 0, 0, 0],
[1, 2, 3, 4, 0, 6],
[1, 2, 3, 4, 0, 6],
[0, 0, 0, 0, 0, 0],
[1, 0, 3, 0, 0, 0],
[0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0],
]
)
X = csr_container(X)
n_samples, n_features = X.shape
y = rng.randint(low=0, high=2, size=n_samples)
query_id = np.arange(n_samples) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id)
f.seek(0)
size = len(f.getvalue())
# load the same data in 2 parts with all the possible byte offsets to
# locate the split so has to test for particular boundary cases
for mark in range(size):
f.seek(0)
X_0, y_0, q_0 = load_svmlight_file(
f, n_features=n_features, query_id=True, offset=0, length=mark
)
X_1, y_1, q_1 = load_svmlight_file(
f, n_features=n_features, query_id=True, offset=mark, length=-1
)
q_concat = np.concatenate([q_0, q_1])
y_concat = np.concatenate([y_0, y_1])
X_concat = sp.vstack([X_0, X_1])
assert_array_almost_equal(y, y_concat)
assert_array_equal(query_id, q_concat)
assert_array_almost_equal(X.toarray(), X_concat.toarray())
def test_load_with_offsets_error():
with pytest.raises(ValueError, match="n_features is required"):
_load_svmlight_local_test_file(datafile, offset=3, length=3)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_multilabel_y_explicit_zeros(tmp_path, csr_container):
"""
Ensure that if y contains explicit zeros (i.e. elements of y.data equal to
0) then those explicit zeros are not encoded.
"""
save_path = str(tmp_path / "svm_explicit_zero")
rng = np.random.RandomState(42)
X = rng.randn(3, 5).astype(np.float64)
indptr = np.array([0, 2, 3, 6])
indices = np.array([0, 2, 2, 0, 1, 2])
# The first and last element are explicit zeros.
data = np.array([0, 1, 1, 1, 1, 0])
y = csr_container((data, indices, indptr), shape=(3, 3))
# y as a dense array would look like
# [[0, 0, 1],
# [0, 0, 1],
# [1, 1, 0]]
dump_svmlight_file(X, y, save_path, multilabel=True)
_, y_load = load_svmlight_file(save_path, multilabel=True)
y_true = [(2.0,), (2.0,), (0.0, 1.0)]
assert y_load == y_true
def test_dump_read_only(tmp_path):
"""Ensure that there is no ValueError when dumping a read-only `X`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/28026
"""
rng = np.random.RandomState(42)
X = rng.randn(5, 2)
y = rng.randn(5)
# Convert to memmap-backed which are read-only
X, y = create_memmap_backed_data([X, y])
save_path = str(tmp_path / "svm_read_only")
dump_svmlight_file(X, y, save_path)
|
scikit-learnREPO_NAMEscikit-learnPATH_START.@scikit-learn_extracted@scikit-learn-main@sklearn@datasets@tests@test_svmlight_format.py@.PATH_END.py
|
{
"filename": "update_det_cal.py",
"repo_name": "simonsobs/sotodlib",
"repo_path": "sotodlib_extracted/sotodlib-master/sotodlib/site_pipeline/update_det_cal.py",
"type": "Python"
}
|
"""
This module is used to compute detector calibration parameters from sodetlib
data products.
The naive computation is described in the `sodetlib documentation. <https://sodetlib.readthedocs.io/en/latest/operations/bias_steps.html#in-transition>`_
Details about the RP and loopgain correction `can be found on our confluence. <https://simonsobs.atlassian.net/wiki/spaces/~5570586d07625a6be74c8780e4b96f6156f5e6/blog/2024/02/02/286228683/Nonlinear+TES+model+using+RP+curve>`_
"""
import traceback
import os
import yaml
from dataclasses import dataclass, astuple, fields
import numpy as np
from tqdm.auto import tqdm
import logging
from typing import Optional, Union, Dict, List, Any, Tuple, Literal
from queue import Queue
import argparse
from sotodlib import core
from sotodlib.io.metadata import write_dataset, ResultSet
from sotodlib.io.load_book import get_cal_obsids
import sotodlib.site_pipeline.util as sp_util
import multiprocessing as mp
import sodetlib.tes_param_correction as tpc
from sodetlib.operations.iv import IVAnalysis
from sodetlib.operations.bias_steps import BiasStepAnalysis
# stolen from pysmurf, max bias volt / num_bits
DEFAULT_RTM_BIT_TO_VOLT = 10 / 2**19
DEFAULT_pA_per_phi0 = 9e6
TES_BIAS_COUNT = 12 # per detset / primary file group
logger = logging.getLogger("det_cal")
if not logger.hasHandlers():
sp_util.init_logger("det_cal")
def get_data_root(ctx: core.Context) -> str:
"Get root data directory based on context file"
c = ctx.obsfiledb.conn.execute("select name from files limit 1")
res = [r[0] for r in c][0]
# split out <data_root>/obs/<timecode>/<obsid>/fname
for _ in range(4):
res = os.path.dirname(res)
return res
@dataclass
class DetCalCfg:
"""
Class for configuring the behavior of the det-cal update script.
Args
-------------
root_dir: str
Path to the root of the results directory.
context_path: str
Path to the context file to use.
data_root: Optional[str]
Root path of L3 data. If this is not specified, will automatically
determine it based on the context.
raise_exceptions: bool
If Exceptions should be raised in the get_cal_resset function.
Defaults to False.
apply_cal_correction: bool
If True, apply the RP calibration correction, and use corrected results
for Rtes, Si, Pj, and loopgain when successful. Defaults to True.
index_path: str
Path to the index file to use for the det_cal database. Defaults to
"det_cal.sqlite".
h5_path: str
Path to the HDF5 file to use for the det_cal database. Default to
"det_cal.h5".
cache_failed_obsids: bool
If True, will cache failed obs-ids to avoid re-running them. Defaults to
True.
failed_file_cache: str
Path to the yaml file that will store failed obsids. Defaults to
"failed_obsids.yaml".
show_pb: bool
If True, show progress bar in the run_update function. Defaults to True.
param_correction_config: dict
Configuration for the TES param correction. If None, default values are used.
run_method: str
Must be "site" or "nersc". If "site", this function will not parallelize SQLite access, and will only parallelize the TES parameter correction. If "nersc", this will parallelize both SQLite access and the TES param correction, using ``nprocs_obs_info`` and ``nprocs_result_set`` processes respectively.
nprocs_obs_info: int
Number of processes to use to acquire observation info from the file system.
Defaults to 1.
nprocs_result_set: int
Number of parallel processes that should to compute the TES parameters,
and to run the TES parameter correction.
num_obs: Optional[int]
Max number of observations to process per run_update call. If not set,
will run on all available observations.
log_level: str
Logging level for the logger.
multiprocess_start_method: str
Method to use to start child processes. Can be "spawn" or "fork".
"""
def __init__(
self,
root_dir: str,
context_path: str,
*,
data_root: Optional[str] = None,
raise_exceptions: bool = False,
apply_cal_correction: bool = True,
index_path: str = "det_cal.sqlite",
h5_path: str = "det_cal.h5",
cache_failed_obsids: bool = True,
failed_cache_file: str = "failed_obsids.yaml",
show_pb: bool = True,
param_correction_config: Union[Dict[str, Any], None, tpc.AnalysisCfg] = None,
run_method: str = "site",
nprocs_obs_info: int = 1,
nprocs_result_set: int = 10,
num_obs: Optional[int] = None,
log_level: str = "DEBUG",
multiprocess_start_method: Literal["spawn", "fork"] = "spawn"
) -> None:
self.root_dir = root_dir
self.context_path = os.path.expandvars(context_path)
ctx = core.Context(self.context_path)
if data_root is None:
self.data_root = get_data_root(ctx)
self.raise_exceptions = raise_exceptions
self.apply_cal_correction = apply_cal_correction
self.cache_failed_obsids = cache_failed_obsids
self.show_pb = show_pb
self.run_method = run_method
if self.run_method not in ["site", "nersc"]:
raise ValueError("run_method must be in: ['site', 'nersc']")
self.nprocs_obs_info = nprocs_obs_info
self.nprocs_result_set = nprocs_result_set
self.num_obs = num_obs
self.log_level = log_level
self.multiprocess_start_method = multiprocess_start_method
self.root_dir = os.path.expandvars(self.root_dir)
if not os.path.exists(self.root_dir):
raise ValueError(f"Root dir does not exist: {self.root_dir}")
def parse_path(path: str) -> str:
"Expand vars and make path absolute"
p = os.path.expandvars(path)
if not os.path.isabs(p):
p = os.path.join(self.root_dir, p)
return p
self.index_path = parse_path(index_path)
self.h5_path = parse_path(h5_path)
self.failed_cache_file = parse_path(failed_cache_file)
kw = {"show_pb": False, "default_nprocs": self.nprocs_result_set}
if param_correction_config is None:
self.param_correction_config = tpc.AnalysisCfg(**kw) # type: ignore
elif isinstance(param_correction_config, dict):
kw.update(param_correction_config)
self.param_correction_config = tpc.AnalysisCfg(**kw) # type: ignore
else:
self.param_correction_config = param_correction_config
self.setup_files()
@classmethod
def from_yaml(cls, path) -> "DetCalCfg":
with open(path, "r") as f:
d = yaml.safe_load(f)
return cls(**d)
def setup_files(self) -> None:
"""Create directories and databases if they don't exist"""
if not os.path.exists(self.failed_cache_file):
# If file doesn't exist yet, just create an empty one
with open(self.failed_cache_file, "w") as f:
yaml.dump({}, f)
if not os.path.exists(self.index_path):
scheme = core.metadata.ManifestScheme()
scheme.add_exact_match("obs:obs_id")
scheme.add_data_field("dataset")
db = core.metadata.ManifestDb(scheme=scheme)
db.to_file(self.index_path)
@dataclass
class CalInfo:
"""
Class that contains detector calibration information that will go into
the caldb.
Attributes
----------
readout_id: str
Readout ID of the detector.
r_tes: float
Detector resistance [ohms], determined through bias steps while the
detector is biased.
r_frac: float
Fractional resistance of TES, given by r_tes / r_n.
p_bias: float
Bias power on the TES [W] computed using bias steps at the bias point.
s_i: float
Current responsivity of the TES [1/V] computed using bias steps at the
bias point.
phase_to_pW: float
Phase to power conversion factor [pW/rad] computed using s_i,
pA_per_phi0, and detector polarity.
v_bias: float
Commanded bias voltage [V] on the bias line of the detector for the observation.
tau_eff: float
Effective thermal time constant [sec] of the detector, measured from bias steps.
loopgain: float
Loopgain of the detector.
tes_param_correction_success: bool
True if TES parameter corrections were successfully applied.
bg: int
Bias group of the detector. Taken from IV curve data, which contains
bgmap data taken immediately prior to IV. This will be -1 if the
detector is unassigned.
polarity: int
Polarity of the detector response for a positive change in bias current
while the detector is superconducting. This is needed to correct for
detectors that have reversed response.
r_n: float
Normal resistance of the TES [Ohms] calculated from IV curve data.
p_sat: float
"saturation power" of the TES [W] calculated from IV curve data.
This is defined as the electrical bias power at which the TES
resistance is 90% of the normal resistance.
naive_r_tes: float
Detector resistance [ohms]. This is based on the naive bias step
estimation without any additional corrections.
naive_r_frac: float
Fractional resistance of TES, given by r_tes / r_n. This is based on the
naive bias step estimation without any additional corrections.
naive_p_bias: float
Bias power on the TES [W] computed using bias steps at the bias point.
This is based on the naive bias step estimation without any additional
corrections.
naive_s_i: float
Current responsivity of the TES [1/V] computed using bias steps at the
bias point. This is based on the naive bias step estimation without
using any additional corrections.
"""
readout_id: str = ""
r_tes: float = np.nan
r_frac: float = np.nan
p_bias: float = np.nan
s_i: float = np.nan
phase_to_pW: float = np.nan
v_bias: float = np.nan
tau_eff: float = np.nan
loopgain: float = np.nan
tes_param_correction_success: bool = False
bg: int = -1
polarity: int = 1
r_n: float = np.nan
p_sat: float = np.nan
naive_r_tes: float = np.nan
naive_r_frac: float = np.nan
naive_p_bias: float = np.nan
naive_s_i: float = np.nan
@classmethod
def dtype(cls) -> List[Tuple[str, Any]]:
"""Returns ResultSet dtype for an item based on this class"""
dtype = []
for field in fields(cls):
if field.name == "readout_id":
dt: Tuple[str, Any] = ("dets:readout_id", "<U40")
else:
dt = (field.name, field.type)
dtype.append(dt)
return dtype
@dataclass
class ObsInfo:
"""
Class containing observation gathered from obsdbs and the file system
required to compute calibration results.
Attributes
------------
obs_id: str
Obs id.
am: AxisManager
AxisManager containing metadata for the given observation.
iv_obsids: dict
Dict mapping detset to iv obs-id.
bs_obsids: dict
Dict mapping detset to bias step obs-id.
iva_files: dict
Dict mapping detset to IV analysis file path.
bsa_files: dict
Dict mapping detset to bias step analysis file path.
"""
obs_id: str
am: core.AxisManager
iv_obsids: Dict[str, str]
bs_obsids: Dict[str, str]
iva_files: Dict[str, str]
bsa_files: Dict[str, str]
@dataclass
class ObsInfoResult:
obs_id: str
success: bool = False
traceback: str = ""
obs_info: Optional[ObsInfo] = None
def get_obs_info(cfg: DetCalCfg, obs_id: str) -> ObsInfoResult:
res = ObsInfoResult(obs_id)
try:
ctx = core.Context(cfg.context_path)
am = ctx.get_obs(
obs_id,
samples=(0, 1),
ignore_missing=True,
no_signal=True,
on_missing={"det_cal": "skip"},
)
if "smurf" not in am.det_info:
raise ValueError(f"Missing smurf info for {obs_id}")
logger.debug(f"Getting cal obsids ({obs_id})")
iv_obsids = get_cal_obsids(ctx, obs_id, "iv")
# Load in IVs
logger.debug(f"Loading Bias step and IV data ({obs_id})")
rtm_bit_to_volt = None
pA_per_phi0 = None
# Automatically determine paths based on data root instead of obsfiledb
# because obsfiledb queries are slow on nersc.
iva_files = {}
bsa_files = {}
for dset, oid in iv_obsids.items():
if oid is not None:
timecode = oid.split("_")[1][:5]
zsmurf_dir = os.path.join(
cfg.data_root, "oper", timecode, oid, f"Z_smurf"
)
for f in os.listdir(zsmurf_dir):
if "iv" in f:
iva_files[dset] = os.path.join(zsmurf_dir, f)
break
else:
raise ValueError(f"IV data not found for in cal obs {oid}")
else:
logger.debug("missing IV data for %s", dset)
if len(iva_files) == 0:
raise ValueError(f"No IV data found for {obs_id}")
# Load in bias steps
bias_step_obsids = get_cal_obsids(ctx, obs_id, "bias_steps")
for dset, oid in bias_step_obsids.items():
if oid is not None:
timecode = oid.split("_")[1][:5]
zsmurf_dir = os.path.join(
cfg.data_root, "oper", timecode, oid, f"Z_smurf"
)
for f in os.listdir(zsmurf_dir):
if "bias_step" in f:
bs_file = os.path.join(zsmurf_dir, f)
bsa_files[dset] = bs_file
break
else:
raise ValueError(f"Bias step data not found for in cal obs {oid}")
else:
logger.debug("missing bias step data for %s", dset)
if rtm_bit_to_volt is None:
rtm_bit_to_volt = DEFAULT_RTM_BIT_TO_VOLT
if pA_per_phi0 is None:
pA_per_phi0 = DEFAULT_pA_per_phi0
res.obs_info = ObsInfo(
obs_id=obs_id,
am=am,
iv_obsids=iv_obsids,
bs_obsids=bias_step_obsids,
iva_files=iva_files,
bsa_files=bsa_files,
)
res.success = True
except:
res.traceback = traceback.format_exc()
if cfg.raise_exceptions:
raise
return res
@dataclass
class CalRessetResult:
"""
Results object for the get_cal_resset function.
"""
obs_info: ObsInfo
success: bool = False
traceback: Optional[str] = None
fail_msg: Optional[str] = None
correction_results: Optional[Dict[str, List[tpc.CorrectionResults]]] = None
result_set: Optional[np.ndarray] = None
def get_cal_resset(cfg: DetCalCfg, obs_info: ObsInfo, pool=None) -> CalRessetResult:
"""
Returns calibration ResultSet for a given ObsId. This pulls IV and bias step
data for each detset in the observation, and uses that to compute CalInfo
for each detector in the observation.
Args
------
cfg: DetCalCfg
DetCal configuration object.
obs_info: ObsInfo
ObsInfo object.
pool: Optional[multiprocessing.Pool]
If specified, will run TES param correction in parallel using processes
from this pool.
"""
obs_id = obs_info.obs_id
res = CalRessetResult(obs_info)
logger.debug("Computing Result set for %s", obs_info.obs_id)
# Need to reset logger here because this may be created new for spawned process
logger.setLevel(getattr(logging, cfg.log_level.upper()))
for ch in logger.handlers:
ch.setLevel(getattr(logging, cfg.log_level.upper()))
try:
am = obs_info.am
ivas = {
dset: IVAnalysis.load(iva_file)
for dset, iva_file in obs_info.iva_files.items()
}
bsas = {
dset: BiasStepAnalysis.load(bsa_file)
for dset, bsa_file in obs_info.bsa_files.items()
}
if cfg.apply_cal_correction:
for iva in ivas.values():
# Run R_L correction if analysis version is old...
if getattr(iva, "analysis_version", 0) == 0:
# This will edit IVA dicts in place
logger.debug("Recomputing IV analysis for %s", obs_id)
tpc.recompute_ivpars(iva, cfg.param_correction_config)
iva = list(ivas.values())[0]
rtm_bit_to_volt = iva.meta["rtm_bit_to_volt"]
pA_per_phi0 = iva.meta["pA_per_phi0"]
cals = [CalInfo(rid) for rid in am.det_info.readout_id]
if len(cals) == 0:
raise ValueError(f"No detectors found for {obs_id}")
# Add IV info
for i, cal in enumerate(cals):
band = am.det_info.smurf.band[i]
chan = am.det_info.smurf.channel[i]
detset = am.det_info.detset[i]
iva = ivas[detset]
if iva is None: # No IV analysis for this detset
continue
ridx = np.where((iva.bands == band) & (iva.channels == chan))[0]
if not ridx: # Channel doesn't exist in IV analysis
continue
ridx = ridx[0]
cal.bg = iva.bgmap[ridx]
cal.polarity = iva.polarity[ridx]
cal.r_n = iva.R_n[ridx] # type: ignore
cal.p_sat = iva.p_sat[ridx] # type: ignore
obs_biases = dict(
zip(am.bias_lines.vals, am.biases[:, 0] * 2 * rtm_bit_to_volt)
)
bias_line_is_valid = {k: True for k in obs_biases.keys()}
# check to see if biases have changed between bias steps and obs
for bsa in bsas.values():
if bsa is None:
continue
for bg, vb_bsa in enumerate(bsa.Vbias):
bl_label = f"{bsa.meta['stream_id']}_b{bg:0>2}"
# Usually we can count on bias voltages of bias lines >= 12 to be
# Nan, however we have seen cases where they're not, so we also
# restrict by count.
if np.isnan(vb_bsa) or bg >= TES_BIAS_COUNT:
bias_line_is_valid[bl_label] = False
continue
if np.abs(vb_bsa - obs_biases[bl_label]) > 0.1:
logger.debug(
"bias step and obs biases don't match for %s", bl_label
)
bias_line_is_valid[bl_label] = False
# Add TES corrected params
correction_results: Dict[str, List[tpc.CorrectionResults]] = {}
if cfg.apply_cal_correction:
logger.debug("Applying TES param corrections (%s)", obs_id)
for dset in bsas:
# logger.debug(f"Applying correction for {dset}")
rs = []
if pool is None:
for b, c in zip(ivas[dset].bands, ivas[dset].channels):
chdata = tpc.RpFitChanData.from_data(
ivas[dset], bsas[dset], b, c
)
rs.append(
tpc.run_correction(chdata, cfg.param_correction_config)
)
else:
rs = tpc.run_corrections_parallel(
ivas[dset], bsas[dset], cfg.param_correction_config, pool=pool
)
correction_results[dset] = rs
res.correction_results = correction_results
def find_correction_results(band, chan, dset):
for r in correction_results[dset]:
if r.chdata.band == band and r.chdata.channel == chan:
return r
return None
for i, cal in enumerate(cals):
band = am.det_info.smurf.band[i]
chan = am.det_info.smurf.channel[i]
detset = am.det_info.detset[i]
stream_id = am.det_info.stream_id[i]
bg = cal.bg
bsa = bsas[detset]
if bsa is None or bg == -1:
continue
bl_label = f"{stream_id}_b{bg:0>2}"
if not bias_line_is_valid[bl_label]:
continue
ridx = np.where((bsa.bands == band) & (bsa.channels == chan))[0]
if not ridx: # Channel doesn't exist in bias step analysis
continue
if cfg.apply_cal_correction:
correction = find_correction_results(band, chan, detset)
if correction is None:
logger.warn(
"Unable to find correction result for %s %s %s (%s)",
band,
chan,
detset,
obs_id,
)
use_correction = False
cal.tes_param_correction_success = False
else:
use_correction = correction.success
cal.tes_param_correction_success = correction.success
else:
use_correction = False
ridx = ridx[0]
cal.tau_eff = bsa.tau_eff[ridx]
if bg != -1:
cal.v_bias = bsa.Vbias[bg]
if use_correction and correction.corrected_params is not None:
cpars = correction.corrected_params
cal.r_tes = cpars.corrected_R0
cal.r_frac = cpars.corrected_R0 / cal.r_n
cal.s_i = cpars.corrected_Si * 1e6
cal.p_bias = cpars.corrected_Pj * 1e-12
cal.loopgain = cpars.loopgain
else:
cal.r_tes = bsa.R0[ridx]
cal.r_frac = bsa.Rfrac[ridx]
cal.p_bias = bsa.Pj[ridx]
cal.s_i = bsa.Si[ridx]
# Save naive parameters even if we're using corrected version
cal.naive_r_tes = bsa.R0[ridx]
cal.naive_r_frac = bsa.Rfrac[ridx]
cal.naive_s_i = bsa.Si[ridx]
cal.naive_p_bias = bsa.Pj[ridx]
if cal.s_i == 0:
cal.phase_to_pW = np.nan
else:
cal.phase_to_pW = pA_per_phi0 / (2 * np.pi) / cal.s_i * cal.polarity
res.result_set = np.array([astuple(c) for c in cals], dtype=CalInfo.dtype())
res.success = True
except Exception as e:
res.traceback = traceback.format_exc()
res.fail_msg = res.traceback
if cfg.raise_exceptions:
raise e
return res
def get_obsids_to_run(cfg: DetCalCfg) -> List[str]:
"""
Returns list of obs-ids to process, based on the configuration object.
This will included non-processed obs-ids that are not found in the fail cache,
and will be limitted to cfg.num_obs.
"""
ctx = core.Context(cfg.context_path)
# Find all obs_ids that have not been processed
with open(cfg.failed_cache_file, "r") as f:
failed_cache = yaml.safe_load(f)
if failed_cache is not None:
failed_obsids = set(failed_cache.keys())
else:
failed_obsids = set()
db = core.metadata.ManifestDb(cfg.index_path)
obs_ids_all = set(ctx.obsdb.query('type=="obs"')["obs_id"])
processed_obsids = set(db.get_entries(["dataset"])["dataset"])
obs_ids = sorted(list(obs_ids_all - processed_obsids - failed_obsids), reverse=True)
if cfg.num_obs is not None:
obs_ids = obs_ids[: cfg.num_obs]
return obs_ids
def add_to_failed_cache(cfg: DetCalCfg, obs_id: str, msg: str) -> None:
if "KeyboardInterrupt" in msg: # Don't cache keyboard interrupts
return
if cfg.cache_failed_obsids:
logger.info(f"Adding {obs_id} to failed_file_cache")
with open(cfg.failed_cache_file, "r") as f:
d = yaml.safe_load(f)
if d is None:
d = {}
d[str(obs_id)] = msg
with open(cfg.failed_cache_file, "w") as f:
yaml.dump(d, f)
return
def handle_result(result: CalRessetResult, cfg: DetCalCfg) -> None:
"""
Handles a CalRessetResult. If successful, this will add to the manifestdb,
if not this will add to the failed cache if cfg.cache_failed_obsids is True.
"""
obs_id = str(result.obs_info.obs_id)
if not result.success:
logger.error(f"Failed on obs_id: {obs_id}")
logger.error(result.traceback)
msg = result.fail_msg
if msg is None:
msg = "unknown error"
add_to_failed_cache(cfg, obs_id, msg)
return
logger.info(f"Adding obs_id {obs_id} to dataset")
rset = ResultSet.from_friend(result.result_set)
write_dataset(rset, cfg.h5_path, obs_id, overwrite=True)
db = core.metadata.ManifestDb(cfg.index_path)
relpath = os.path.relpath(cfg.h5_path, start=os.path.dirname(cfg.index_path))
db.add_entry(
{"obs:obs_id": obs_id, "dataset": obs_id}, filename=relpath, replace=True
)
def run_update_site(cfg: DetCalCfg) -> None:
"""
Main run script for computing det-cal results at the site. This will
loop over obs-ids and serially gather the ObsInfo from the filesystem and
sqlite dbs, and then compute the calibration results. A processing pool
of cfg.nprocs_result_set processes will be used to parallelize the TES
correction computation. If you have lots of compute power, and are limitted
by filesystem or sqlite access, consider using the 'nersc' update function.
Args:
------
cfg: DetCalCfg or str
DetCalCfg object or path to config yaml file
"""
logger.setLevel(getattr(logging, cfg.log_level.upper()))
for ch in logger.handlers:
ch.setLevel(getattr(logging, cfg.log_level.upper()))
obs_ids = get_obsids_to_run(cfg)
logger.info(f"Processing {len(obs_ids)} obsids...")
mp.set_start_method(cfg.multiprocess_start_method)
with mp.Pool(cfg.nprocs_result_set) as pool:
for oid in tqdm(obs_ids, disable=(not cfg.show_pb)):
res = get_obs_info(cfg, oid)
if not res.success:
logger.info(f"Could not get obs info for obs id: {oid}")
logger.error(res.traceback)
if res.obs_info is None:
continue
result_set = get_cal_resset(cfg, res.obs_info, pool=pool)
handle_result(result_set, cfg)
def run_update_nersc(cfg: DetCalCfg) -> None:
"""
Main run script for computing det-cal results. This does the same thing as
``run_update_site`` however instantiates two separate pools for gathering
ObsInfo and computing the ResultSets. This is useful in situations where
sqlite/filesystem access are bottlenecks (such as nersc) so that ObsInfo can
be gathered in parallel, and this can be done while ResultSet computation is
ongoing. Because concurrent sqlite access can be limitted, it is recommended
to keep cfg.nprocs_obs_info low (<10), while ``cfg.nprocs_result_set`` can be
set arbitrarily large as to use remaining available resources.
Args:
------
cfg: DetCalCfg or str
DetCalCfg object or path to config yaml file
"""
logger.setLevel(getattr(logging, cfg.log_level.upper()))
for ch in logger.handlers:
ch.setLevel(getattr(logging, cfg.log_level.upper()))
obs_ids = get_obsids_to_run(cfg)
# obs_ids = ['obs_1713962395_satp1_0000100']
# obs_ids = ['obs_1713758716_satp1_1000000']
logger.info(f"Processing {len(obs_ids)} obsids...")
pb = tqdm(total=len(obs_ids), disable=(not cfg.show_pb))
def callback(result: CalRessetResult):
pb.update()
handle_result(result, cfg)
def errback(e):
logger.info(e)
raise e
# We split into multiple pools because:
# - we don't want to overload sqlite files with too much concurrent access
# - we want to be able to continue getting the next obs_info data while ressets are being computed
mp.set_start_method(cfg.multiprocess_start_method)
pool1 = mp.Pool(cfg.nprocs_obs_info)
pool2 = mp.Pool(cfg.nprocs_result_set)
resset_async_results: Queue = Queue()
obsinfo_async_results: Queue = Queue()
def get_obs_info_callback(result: ObsInfoResult):
if result.success:
r = pool2.apply_async(
get_cal_resset,
args=(cfg, result.obs_info),
callback=callback,
error_callback=errback,
)
resset_async_results.put(r)
else:
pb.update()
add_to_failed_cache(cfg, result.obs_id, result.traceback)
logger.error(
f"Failed to get obs_info for {result.obs_id}:\n{result.traceback}"
)
try:
for obs_id in obs_ids:
a = pool1.apply_async(
get_obs_info,
args=(cfg, obs_id),
callback=get_obs_info_callback,
error_callback=errback,
)
obsinfo_async_results.put(a)
while not obsinfo_async_results.empty():
obsinfo_async_results.get().wait()
while not resset_async_results.empty():
resset_async_results.get().wait()
finally:
pool1.terminate()
pool1.join()
pool2.terminate()
pool2.join()
pb.close()
logger.info("Finished updates")
def main(config_file: str) -> None:
"""
Run update function. This will chose the correct method to run based on
``cfg.run_method``.
"""
cfg = DetCalCfg.from_yaml(config_file)
if cfg.run_method == "site":
run_update_site(cfg)
elif cfg.run_method == "nersc":
run_update_nersc(cfg)
else:
raise ValueError(f"Unknown run_method: {cfg.run_method}")
def get_parser(
parser: Optional[argparse.ArgumentParser] = None,
) -> argparse.ArgumentParser:
if parser is None:
p = argparse.ArgumentParser()
else:
p = parser
p.add_argument(
"config_file", type=str, help="yaml file with configuration for update script."
)
return p
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
main(config_file=args.config_file)
|
simonsobsREPO_NAMEsotodlibPATH_START.@sotodlib_extracted@sotodlib-master@sotodlib@site_pipeline@update_det_cal.py@.PATH_END.py
|
{
"filename": "crawler.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/langchain_community/chains/natbot/crawler.py",
"type": "Python"
}
|
from langchain.chains.natbot.crawler import (
Crawler,
ElementInViewPort,
black_listed_elements,
)
__all__ = ["ElementInViewPort", "Crawler", "black_listed_elements"]
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@langchain_community@chains@natbot@crawler.py@.PATH_END.py
|
{
"filename": "_size.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/sunburst/hoverlabel/font/_size.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="size", parent_name="sunburst.hoverlabel.font", **kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
min=kwargs.pop("min", 1),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@sunburst@hoverlabel@font@_size.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/pathlib2/py3/pathlib2/__init__.py",
"type": "Python"
}
|
# Copyright (c) 2014-2021 Matthias C. M. Troffaes and contributors
# Copyright (c) 2012-2014 Antoine Pitrou and contributors
# Distributed under the terms of the MIT License.
import ctypes
import fnmatch
import functools
import io
import ntpath
import os
import posixpath
import re
from typing import (
TypeVar, Type, Union, Text, Tuple, List, Any, Callable, Iterable, Optional
)
import six
import sys
from errno import EINVAL, ENOENT, ENOTDIR, EBADF
from errno import EEXIST, EPERM, EACCES
from operator import attrgetter
from stat import (
S_ISDIR, S_ISLNK, S_ISREG, S_ISSOCK, S_ISBLK, S_ISCHR, S_ISFIFO)
if six.PY2:
from collections import Sequence
else:
from collections.abc import Sequence
if six.PY2:
import urllib
urlquote_from_bytes = urllib.quote # type: Callable[[bytes], str]
else:
import urllib.parse
urlquote_from_bytes = urllib.parse.quote_from_bytes
try:
intern = intern # type: ignore
except NameError:
intern = sys.intern # type: ignore
supports_symlinks = True
if os.name == 'nt':
import nt # type: ignore
if sys.getwindowsversion().major >= 6 \
and sys.version_info >= (3, 2): # type: ignore
from nt import _getfinalpathname as _gfpn
_getfinalpathname = _gfpn # type: Optional[Callable[[str], str]]
else:
supports_symlinks = False
_getfinalpathname = None
else:
nt = None
try:
from os import scandir as os_scandir # type: ignore
except ImportError:
from scandir import scandir as os_scandir # type: ignore
__all__ = [
"PurePath", "PurePosixPath", "PureWindowsPath",
"Path", "PosixPath", "WindowsPath",
]
#
# Internals
#
# EBADF - guard agains macOS `stat` throwing EBADF
_IGNORED_ERROS = (ENOENT, ENOTDIR, EBADF)
_IGNORED_WINERRORS = (
21, # ERROR_NOT_READY - drive exists but is not accessible
)
def _ignore_error(exception):
# type: (BaseException) -> bool
return (getattr(exception, 'errno', None) in _IGNORED_ERROS or
getattr(exception, 'winerror', None) in _IGNORED_WINERRORS)
def _py2_fsencode(part):
# type: (Text) -> str
if six.PY2 and isinstance(part, six.text_type):
# py2 => minimal unicode support
# note: in rare circumstances, on Python < 3.2,
# getfilesystemencoding can return None, in that
# case fall back to ascii
return part.encode(sys.getfilesystemencoding() or 'ascii')
else:
assert isinstance(part, str)
return part
def _try_except_fileexistserror(
try_func, # type: Callable[[], None]
except_func, # type: Callable[[BaseException], None]
else_func=None, # type: Callable[[], None]
):
# type: (...) -> None
if sys.version_info >= (3, 3):
try:
try_func()
except FileExistsError as exc: # noqa: F821
except_func(exc)
else:
if else_func is not None:
else_func()
else:
try:
try_func()
except EnvironmentError as exc:
if exc.errno != EEXIST:
raise
else:
except_func(exc)
else:
if else_func is not None:
else_func()
def _try_except_filenotfounderror(
try_func, # type: Callable[[], None]
except_func, # type: Callable[[BaseException], None]
):
# type: (...) -> None
if sys.version_info >= (3, 3):
try:
try_func()
except FileNotFoundError as exc: # noqa: F821
except_func(exc)
elif os.name != 'nt':
try:
try_func()
except EnvironmentError as exc:
if exc.errno != ENOENT:
raise
else:
except_func(exc)
else:
try:
try_func()
except WindowsError as exc:
# errno contains winerror
# 2 = file not found
# 3 = path not found
if exc.errno not in (2, 3):
raise
else:
except_func(exc)
except EnvironmentError as exc:
if exc.errno != ENOENT:
raise
else:
except_func(exc)
_T = TypeVar("_T")
def _try_except_permissionerror_iter(
try_iter, # type: Callable[[], Iterable[_T]]
except_iter, # type: Callable[[BaseException], Iterable[_T]]
):
# type: (...) -> Iterable[_T]
if sys.version_info >= (3, 3):
try:
for x in try_iter():
yield x
except PermissionError as exc: # noqa: F821
for x in except_iter(exc):
yield x
else:
try:
for x in try_iter():
yield x
except EnvironmentError as exc:
if exc.errno not in (EPERM, EACCES):
raise
else:
for x in except_iter(exc):
yield x
def _win32_get_unique_path_id(path):
# type: (Text) -> Tuple[int, int, int]
# get file information, needed for samefile on older Python versions
# see http://timgolden.me.uk/python/win32_how_do_i/
# see_if_two_files_are_the_same_file.html
from ctypes import POINTER, Structure, WinError
from ctypes.wintypes import DWORD, HANDLE, BOOL
class FILETIME(Structure):
_fields_ = [("datetime_lo", DWORD),
("datetime_hi", DWORD),
]
class BY_HANDLE_FILE_INFORMATION(Structure):
_fields_ = [("attributes", DWORD),
("created_at", FILETIME),
("accessed_at", FILETIME),
("written_at", FILETIME),
("volume", DWORD),
("file_hi", DWORD),
("file_lo", DWORD),
("n_links", DWORD),
("index_hi", DWORD),
("index_lo", DWORD),
]
CreateFile = ctypes.windll.kernel32.CreateFileW
CreateFile.argtypes = [ctypes.c_wchar_p, DWORD, DWORD, ctypes.c_void_p,
DWORD, DWORD, HANDLE]
CreateFile.restype = HANDLE
GetFileInformationByHandle = (
ctypes.windll.kernel32.GetFileInformationByHandle)
GetFileInformationByHandle.argtypes = [
HANDLE, POINTER(BY_HANDLE_FILE_INFORMATION)]
GetFileInformationByHandle.restype = BOOL
CloseHandle = ctypes.windll.kernel32.CloseHandle
CloseHandle.argtypes = [HANDLE]
CloseHandle.restype = BOOL
GENERIC_READ = 0x80000000
FILE_SHARE_READ = 0x00000001
FILE_FLAG_BACKUP_SEMANTICS = 0x02000000
OPEN_EXISTING = 3
if os.path.isdir(path):
flags = FILE_FLAG_BACKUP_SEMANTICS
else:
flags = 0
hfile = CreateFile(path, GENERIC_READ, FILE_SHARE_READ,
None, OPEN_EXISTING, flags, None)
if hfile in [0xffffffff, 0xffffffffffffffff]:
if sys.version_info >= (3, 3):
raise FileNotFoundError(path) # noqa: F821
else:
exc = OSError("file not found: path")
exc.errno = ENOENT
raise exc
info = BY_HANDLE_FILE_INFORMATION()
success = GetFileInformationByHandle(hfile, info)
CloseHandle(hfile)
if success == 0:
raise WinError()
return info.volume, info.index_hi, info.index_lo
def _is_wildcard_pattern(pat):
# type: (Text) -> bool
# Whether this pattern needs actual matching using fnmatch, or can
# be looked up directly as a file.
return "*" in pat or "?" in pat or "[" in pat
class _Flavour(object):
"""A flavour implements a particular (platform-specific) set of path
semantics."""
sep = None # type: str
altsep = None # type: str
is_supported = False # type: bool
def __init__(self):
self.join = self.sep.join
def casefold(self, s):
# type: (str) -> str
raise NotImplementedError
def casefold_parts(self, parts):
# type: (List[str]) -> List[str]
raise NotImplementedError
def gethomedir(self, username):
# type: (Optional[Text]) -> Text
raise NotImplementedError
def splitroot(self, part, sep=sep):
# type: (str, str) -> Tuple[str, str, str]
raise NotImplementedError
def parse_parts(self, parts):
# type: (Sequence[Text]) -> Tuple[str, str, List[str]]
parts2 = list(map(_py2_fsencode, parts)) # type: List[str]
parsed = [] # type: List[str]
sep = self.sep
altsep = self.altsep
drv = root = ''
it = reversed(parts2)
for part in it:
if not part:
continue
if altsep:
part = part.replace(altsep, sep)
drv, root, rel = self.splitroot(part)
if sep in rel:
for x in reversed(rel.split(sep)):
if x and x != '.':
parsed.append(intern(x))
else:
if rel and rel != '.':
parsed.append(intern(rel))
if drv or root:
if not drv:
# If no drive is present, try to find one in the previous
# parts. This makes the result of parsing e.g.
# ("C:", "/", "a") reasonably intuitive.
for part2 in it:
if not part2:
continue
if altsep:
part2 = part2.replace(altsep, sep)
drv = self.splitroot(part2)[0]
if drv:
break
break
if drv or root:
parsed.append(drv + root)
parsed.reverse()
return drv, root, parsed
def join_parsed_parts(
self,
drv, # type: str
root, # type: str
parts, # type: List[str]
drv2, # type: str
root2, # type: str
parts2, # type: List[str]
):
# type: (...) -> Tuple[str, str, List[str]]
"""
Join the two paths represented by the respective
(drive, root, parts) tuples. Return a new (drive, root, parts) tuple.
"""
if root2:
if not drv2 and drv:
return drv, root2, [drv + root2] + parts2[1:]
elif drv2:
if drv2 == drv or self.casefold(drv2) == self.casefold(drv):
# Same drive => second path is relative to the first
return drv, root, parts + parts2[1:]
else:
# Second path is non-anchored (common case)
return drv, root, parts + parts2
return drv2, root2, parts2
class _WindowsFlavour(_Flavour):
# Reference for Windows paths can be found at
# http://msdn.microsoft.com/en-us/library/aa365247%28v=vs.85%29.aspx
sep = '\\'
altsep = '/'
has_drv = True
pathmod = ntpath
is_supported = (os.name == 'nt')
drive_letters = set('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ')
ext_namespace_prefix = '\\\\?\\'
reserved_names = (
set(['CON', 'PRN', 'AUX', 'NUL']) |
set(['COM%d' % i for i in range(1, 10)]) |
set(['LPT%d' % i for i in range(1, 10)])
)
# Interesting findings about extended paths:
# - '\\?\c:\a', '//?/c:\a' and '//?/c:/a' are all supported
# but '\\?\c:/a' is not
# - extended paths are always absolute; "relative" extended paths will
# fail.
def splitroot(self, part, sep=sep):
first = part[0:1]
second = part[1:2]
if second == sep and first == sep:
# XXX extended paths should also disable the collapsing of "."
# components (according to MSDN docs).
prefix, part = self._split_extended_path(part)
first = part[0:1]
second = part[1:2]
else:
prefix = ''
third = part[2:3]
if second == sep and first == sep and third != sep:
# is a UNC path:
# vvvvvvvvvvvvvvvvvvvvv root
# \\machine\mountpoint\directory\etc\...
# directory ^^^^^^^^^^^^^^
index = part.find(sep, 2)
if index != -1:
index2 = part.find(sep, index + 1)
# a UNC path can't have two slashes in a row
# (after the initial two)
if index2 != index + 1:
if index2 == -1:
index2 = len(part)
if prefix:
return prefix + part[1:index2], sep, part[index2 + 1:]
else:
return part[:index2], sep, part[index2 + 1:]
drv = root = ''
if second == ':' and first in self.drive_letters:
drv = part[:2]
part = part[2:]
first = third
if first == sep:
root = first
part = part.lstrip(sep)
return prefix + drv, root, part
def casefold(self, s):
return s.lower()
def casefold_parts(self, parts):
return [p.lower() for p in parts]
def resolve(self, path, strict=False):
s = str(path)
if not s:
return os.getcwd()
if _getfinalpathname is not None:
if strict:
return self._ext_to_normal(_getfinalpathname(s))
else:
# End of the path after the first one not found
tail_parts = [] # type: List[str]
def _try_func():
result[0] = self._ext_to_normal(_getfinalpathname(s))
# if there was no exception, set flag to 0
result[1] = 0
def _exc_func(exc):
pass
while True:
result = ['', 1]
_try_except_filenotfounderror(_try_func, _exc_func)
if result[1] == 1: # file not found exception raised
previous_s = s
s, tail = os.path.split(s) # type: str
tail_parts.append(tail)
if previous_s == s:
return path
else:
s = result[0]
return os.path.join(s, *reversed(tail_parts))
# Means fallback on absolute
return None
def _split_extended_path(self, s, ext_prefix=ext_namespace_prefix):
# type: (str, str) -> Tuple[str, str]
prefix = ''
if s.startswith(ext_prefix):
prefix = s[:4]
s = s[4:]
if s.startswith('UNC\\'):
prefix += s[:3]
s = '\\' + s[3:]
return prefix, s
def _ext_to_normal(self, s):
# type: (str) -> str
# Turn back an extended path into a normal DOS-like path
return self._split_extended_path(s)[1]
def is_reserved(self, parts):
# NOTE: the rules for reserved names seem somewhat complicated
# (e.g. r"..\NUL" is reserved but not r"foo\NUL").
# We err on the side of caution and return True for paths which are
# not considered reserved by Windows.
if not parts:
return False
if parts[0].startswith('\\\\'):
# UNC paths are never reserved
return False
return parts[-1].partition('.')[0].upper() in self.reserved_names
def make_uri(self, path):
# Under Windows, file URIs use the UTF-8 encoding.
drive = path.drive
if len(drive) == 2 and drive[1] == ':':
# It's a path on a local drive => 'file:///c:/a/b'
rest = path.as_posix()[2:].lstrip('/')
return 'file:///%s/%s' % (
drive, urlquote_from_bytes(rest.encode('utf-8')))
else:
# It's a path on a network drive => 'file://host/share/a/b'
return 'file:' + urlquote_from_bytes(
path.as_posix().encode('utf-8'))
def gethomedir(self, username):
if 'HOME' in os.environ:
userhome = os.environ['HOME']
elif 'USERPROFILE' in os.environ:
userhome = os.environ['USERPROFILE']
elif 'HOMEPATH' in os.environ:
try:
drv = os.environ['HOMEDRIVE']
except KeyError:
drv = ''
userhome = drv + os.environ['HOMEPATH']
else:
raise RuntimeError("Can't determine home directory")
if username:
# Try to guess user home directory. By default all users
# directories are located in the same place and are named by
# corresponding usernames. If current user home directory points
# to nonstandard place, this guess is likely wrong.
if os.environ['USERNAME'] != username:
drv, root, parts = self.parse_parts((userhome,))
if parts[-1] != os.environ['USERNAME']:
raise RuntimeError("Can't determine home directory "
"for %r" % username)
parts[-1] = username
if drv or root:
userhome = drv + root + self.join(parts[1:])
else:
userhome = self.join(parts)
return userhome
class _PosixFlavour(_Flavour):
sep = '/'
altsep = ''
has_drv = False
pathmod = posixpath
is_supported = (os.name != 'nt')
def splitroot(self, part, sep=sep):
if part and part[0] == sep:
stripped_part = part.lstrip(sep)
# According to POSIX path resolution:
# http://pubs.opengroup.org/onlinepubs/009695399/basedefs/
# xbd_chap04.html#tag_04_11
# "A pathname that begins with two successive slashes may be
# interpreted in an implementation-defined manner, although more
# than two leading slashes shall be treated as a single slash".
if len(part) - len(stripped_part) == 2:
return '', sep * 2, stripped_part
else:
return '', sep, stripped_part
else:
return '', '', part
def casefold(self, s):
return s
def casefold_parts(self, parts):
return parts
def resolve(self, path, strict=False):
sep = self.sep
accessor = path._accessor
seen = {}
def _resolve(path, rest):
if rest.startswith(sep):
path = ''
for name in rest.split(sep):
if not name or name == '.':
# current dir
continue
if name == '..':
# parent dir
path, _, _ = path.rpartition(sep)
continue
newpath = path + sep + name
if newpath in seen:
# Already seen this path
path = seen[newpath]
if path is not None:
# use cached value
continue
# The symlink is not resolved, so we must have a symlink
# loop.
raise RuntimeError("Symlink loop from %r" % newpath)
# Resolve the symbolic link
try:
target = accessor.readlink(newpath)
except OSError as e:
if e.errno != EINVAL and strict:
raise
# Not a symlink, or non-strict mode. We just leave the path
# untouched.
path = newpath
else:
seen[newpath] = None # not resolved symlink
path = _resolve(path, target)
seen[newpath] = path # resolved symlink
return path
# NOTE: according to POSIX, getcwd() cannot contain path components
# which are symlinks.
base = '' if path.is_absolute() else os.getcwd()
return _resolve(base, str(path)) or sep
def is_reserved(self, parts):
return False
def make_uri(self, path):
# We represent the path using the local filesystem encoding,
# for portability to other applications.
bpath = bytes(path)
return 'file://' + urlquote_from_bytes(bpath)
def gethomedir(self, username):
if not username:
try:
return os.environ['HOME']
except KeyError:
import pwd
return pwd.getpwuid(os.getuid()).pw_dir
else:
import pwd
try:
return pwd.getpwnam(username).pw_dir
except KeyError:
raise RuntimeError("Can't determine home directory "
"for %r" % username)
_windows_flavour = _WindowsFlavour()
_posix_flavour = _PosixFlavour()
class _Accessor:
"""An accessor implements a particular (system-specific or not) way of
accessing paths on the filesystem."""
def _wrap_strfunc(strfunc):
@functools.wraps(strfunc)
def wrapped(pathobj, *args):
return strfunc(str(pathobj), *args)
return staticmethod(wrapped)
def _wrap_binary_strfunc(strfunc):
@functools.wraps(strfunc)
def wrapped(pathobjA, pathobjB, *args):
return strfunc(str(pathobjA), str(pathobjB), *args)
return staticmethod(wrapped)
class _NormalAccessor(_Accessor):
stat = _wrap_strfunc(os.stat)
lstat = _wrap_strfunc(os.lstat)
open = _wrap_strfunc(os.open)
listdir = _wrap_strfunc(os.listdir)
scandir = _wrap_strfunc(os_scandir)
chmod = _wrap_strfunc(os.chmod)
if hasattr(os, "lchmod"):
lchmod = _wrap_strfunc(os.lchmod) # type: ignore
else:
def lchmod(self, pathobj, mode):
raise NotImplementedError("lchmod() not available on this system")
mkdir = _wrap_strfunc(os.mkdir)
unlink = _wrap_strfunc(os.unlink)
rmdir = _wrap_strfunc(os.rmdir)
rename = _wrap_binary_strfunc(os.rename)
if sys.version_info >= (3, 3):
replace = _wrap_binary_strfunc(os.replace)
if nt:
if supports_symlinks:
symlink = _wrap_binary_strfunc(os.symlink)
else:
@staticmethod
def symlink(a, b, target_is_directory):
raise NotImplementedError(
"symlink() not available on this system")
else:
# Under POSIX, os.symlink() takes two args
@staticmethod
def symlink(a, b, target_is_directory):
return os.symlink(str(a), str(b))
utime = _wrap_strfunc(os.utime)
# Helper for resolve()
def readlink(self, path):
return os.readlink(path)
_normal_accessor = _NormalAccessor()
#
# Globbing helpers
#
def _make_selector(pattern_parts):
pat = pattern_parts[0]
child_parts = pattern_parts[1:]
if pat == '**':
cls = _RecursiveWildcardSelector
elif '**' in pat:
raise ValueError(
"Invalid pattern: '**' can only be an entire path component")
elif _is_wildcard_pattern(pat):
cls = _WildcardSelector
else:
cls = _PreciseSelector
return cls(pat, child_parts)
if hasattr(functools, "lru_cache"):
_make_selector = functools.lru_cache()(_make_selector) # type: ignore
class _Selector:
"""A selector matches a specific glob pattern part against the children
of a given path."""
def __init__(self, child_parts):
self.child_parts = child_parts
if child_parts:
self.successor = _make_selector(child_parts)
self.dironly = True
else:
self.successor = _TerminatingSelector()
self.dironly = False
def select_from(self, parent_path):
"""Iterate over all child paths of `parent_path` matched by this
selector. This can contain parent_path itself."""
path_cls = type(parent_path)
is_dir = path_cls.is_dir
exists = path_cls.exists
scandir = parent_path._accessor.scandir
if not is_dir(parent_path):
return iter([])
return self._select_from(parent_path, is_dir, exists, scandir)
class _TerminatingSelector:
def _select_from(self, parent_path, is_dir, exists, scandir):
yield parent_path
class _PreciseSelector(_Selector):
def __init__(self, name, child_parts):
self.name = name
_Selector.__init__(self, child_parts)
def _select_from(self, parent_path, is_dir, exists, scandir):
def try_iter():
path = parent_path._make_child_relpath(self.name)
if (is_dir if self.dironly else exists)(path):
for p in self.successor._select_from(
path, is_dir, exists, scandir):
yield p
def except_iter(exc):
return iter([])
for x in _try_except_permissionerror_iter(try_iter, except_iter):
yield x
class _WildcardSelector(_Selector):
def __init__(self, pat, child_parts):
self.pat = re.compile(fnmatch.translate(pat))
_Selector.__init__(self, child_parts)
def _select_from(self, parent_path, is_dir, exists, scandir):
def try_iter():
cf = parent_path._flavour.casefold
entries = list(scandir(parent_path))
for entry in entries:
if not self.dironly or entry.is_dir():
name = entry.name
casefolded = cf(name)
if self.pat.match(casefolded):
path = parent_path._make_child_relpath(name)
for p in self.successor._select_from(
path, is_dir, exists, scandir):
yield p
def except_iter(exc):
return iter([])
for x in _try_except_permissionerror_iter(try_iter, except_iter):
yield x
class _RecursiveWildcardSelector(_Selector):
def __init__(self, pat, child_parts):
_Selector.__init__(self, child_parts)
def _iterate_directories(self, parent_path, is_dir, scandir):
yield parent_path
def try_iter():
entries = list(scandir(parent_path))
for entry in entries:
entry_is_dir = False
try:
entry_is_dir = entry.is_dir()
except OSError as e:
if not _ignore_error(e):
raise
if entry_is_dir and not entry.is_symlink():
path = parent_path._make_child_relpath(entry.name)
for p in self._iterate_directories(path, is_dir, scandir):
yield p
def except_iter(exc):
return iter([])
for x in _try_except_permissionerror_iter(try_iter, except_iter):
yield x
def _select_from(self, parent_path, is_dir, exists, scandir):
def try_iter():
yielded = set()
try:
successor_select = self.successor._select_from
for starting_point in self._iterate_directories(
parent_path, is_dir, scandir):
for p in successor_select(
starting_point, is_dir, exists, scandir):
if p not in yielded:
yield p
yielded.add(p)
finally:
yielded.clear()
def except_iter(exc):
return iter([])
for x in _try_except_permissionerror_iter(try_iter, except_iter):
yield x
#
# Public API
#
class _PathParents(Sequence):
"""This object provides sequence-like access to the logical ancestors
of a path. Don't try to construct it yourself."""
__slots__ = ('_pathcls', '_drv', '_root', '_parts')
def __init__(self, path):
# We don't store the instance to avoid reference cycles
self._pathcls = type(path)
self._drv = path._drv
self._root = path._root
self._parts = path._parts
def __len__(self):
if self._drv or self._root:
return len(self._parts) - 1
else:
return len(self._parts)
def __getitem__(self, idx):
if idx < 0 or idx >= len(self):
raise IndexError(idx)
return self._pathcls._from_parsed_parts(self._drv, self._root,
self._parts[:-idx - 1])
def __repr__(self):
return "<{0}.parents>".format(self._pathcls.__name__)
_P = TypeVar("_P", bound="PurePath")
class PurePath(object):
"""PurePath represents a filesystem path and offers operations which
don't imply any actual filesystem I/O. Depending on your system,
instantiating a PurePath will return either a PurePosixPath or a
PureWindowsPath object. You can also instantiate either of these classes
directly, regardless of your system.
"""
__slots__ = (
'_drv', '_root', '_parts',
'_str', '_hash', '_pparts', '_cached_cparts',
)
_flavour = None # type: _Flavour
def __type_hints__(self, drv, root, parts, str_, hash_):
# type: (str, str, List[str], str, int) -> None
self._drv = drv
self._root = root
self._parts = parts
self._str = str_
self._hash = hash_
def __new__(cls, *args):
# type: (Type[PurePath], *Union[Text, PurePath]) -> PurePath
"""Construct a PurePath from one or several strings and or existing
PurePath objects. The strings and path objects are combined so as
to yield a canonicalized path, which is incorporated into the
new PurePath object.
"""
if cls is PurePath:
cls = PureWindowsPath if os.name == 'nt' else PurePosixPath
return cls._from_parts(args)
def __reduce__(self):
# Using the parts tuple helps share interned path parts
# when pickling related paths.
return self.__class__, tuple(self._parts)
@classmethod
def _parse_args(
cls, # type: Type[_P]
args, # type: Sequence[Union[Text, PurePath]]
):
# type: (...) -> Tuple[str, str, List[str]]
# This is useful when you don't want to create an instance, just
# canonicalize some constructor arguments.
parts = [] # type: List[str]
for a in args:
if isinstance(a, PurePath):
parts += a._parts
else:
if sys.version_info >= (3, 6):
a = os.fspath(a)
else:
# duck typing for older Python versions
a = getattr(a, "__fspath__", lambda: a)()
if isinstance(a, str):
# Force-cast str subclasses to str (issue #21127)
parts.append(str(a))
# also handle unicode for PY2 (six.text_type = unicode)
elif six.PY2 and isinstance(a, six.text_type):
# cast to str using filesystem encoding
parts.append(_py2_fsencode(a))
else:
raise TypeError(
"argument should be a str object or an os.PathLike "
"object returning str, not %r"
% type(a))
return cls._flavour.parse_parts(parts)
@classmethod
def _from_parts(cls, args, init=True):
# type: (Type[_P], Sequence[Union[Text, PurePath]], bool) -> _P
# We need to call _parse_args on the instance, so as to get the
# right flavour.
self = object.__new__(cls)
drv, root, parts = self._parse_args(args)
self._drv = drv
self._root = root
self._parts = parts
if init:
self._init()
return self
@classmethod
def _from_parsed_parts(cls, drv, root, parts, init=True):
# type: (str, str, List[str], bool) -> _P
self = object.__new__(cls)
self._drv = drv
self._root = root
self._parts = parts
if init:
self._init()
return self
@classmethod
def _format_parsed_parts(cls, drv, root, parts):
# type: (str, str, List[str]) -> str
if drv or root:
return drv + root + cls._flavour.join(parts[1:])
else:
return cls._flavour.join(parts)
def _init(self):
# Overridden in concrete Path
pass
def _make_child(self, args):
# type: (Sequence[Union[Text, PurePath]]) -> str
drv, root, parts = self._parse_args(args)
drv, root, parts = self._flavour.join_parsed_parts(
self._drv, self._root, self._parts, drv, root, parts)
return self._from_parsed_parts(drv, root, parts)
def __str__(self):
# type: () -> str
"""Return the string representation of the path, suitable for
passing to system calls."""
try:
return self._str
except AttributeError:
self._str = self._format_parsed_parts(self._drv, self._root,
self._parts) or '.'
return self._str
def __fspath__(self):
return str(self)
def as_posix(self):
"""Return the string representation of the path with forward (/)
slashes."""
f = self._flavour
return str(self).replace(f.sep, '/')
def __bytes__(self):
"""Return the bytes representation of the path. This is only
recommended to use under Unix."""
if sys.version_info < (3, 2):
raise NotImplementedError("needs Python 3.2 or later")
return os.fsencode(str(self))
def __repr__(self):
return "{0}({1!r})".format(self.__class__.__name__, self.as_posix())
def as_uri(self):
"""Return the path as a 'file' URI."""
if not self.is_absolute():
raise ValueError("relative path can't be expressed as a file URI")
return self._flavour.make_uri(self)
@property
def _cparts(self):
# Cached casefolded parts, for hashing and comparison
try:
return self._cached_cparts
except AttributeError:
self._cached_cparts = self._flavour.casefold_parts(self._parts)
return self._cached_cparts
def __eq__(self, other):
if not isinstance(other, PurePath):
return NotImplemented
return (
self._cparts == other._cparts
and self._flavour is other._flavour)
def __ne__(self, other):
return not self == other
def __hash__(self):
# type: () -> int
try:
return self._hash
except AttributeError:
self._hash = hash(tuple(self._cparts))
return self._hash
def __lt__(self, other):
if (not isinstance(other, PurePath)
or self._flavour is not other._flavour):
return NotImplemented
return self._cparts < other._cparts
def __le__(self, other):
if (not isinstance(other, PurePath)
or self._flavour is not other._flavour):
return NotImplemented
return self._cparts <= other._cparts
def __gt__(self, other):
if (not isinstance(other, PurePath)
or self._flavour is not other._flavour):
return NotImplemented
return self._cparts > other._cparts
def __ge__(self, other):
if (not isinstance(other, PurePath)
or self._flavour is not other._flavour):
return NotImplemented
return self._cparts >= other._cparts
drive = property(attrgetter('_drv'),
doc="""The drive prefix (letter or UNC path), if any.""")
root = property(attrgetter('_root'),
doc="""The root of the path, if any.""")
@property
def anchor(self):
"""The concatenation of the drive and root, or ''."""
anchor = self._drv + self._root
return anchor
@property
def name(self):
"""The final path component, if any."""
parts = self._parts
if len(parts) == (1 if (self._drv or self._root) else 0):
return ''
return parts[-1]
@property
def suffix(self):
"""The final component's last suffix, if any."""
name = self.name
i = name.rfind('.')
if 0 < i < len(name) - 1:
return name[i:]
else:
return ''
@property
def suffixes(self):
"""A list of the final component's suffixes, if any."""
name = self.name
if name.endswith('.'):
return []
name = name.lstrip('.')
return ['.' + suffix for suffix in name.split('.')[1:]]
@property
def stem(self):
"""The final path component, minus its last suffix."""
name = self.name
i = name.rfind('.')
if 0 < i < len(name) - 1:
return name[:i]
else:
return name
def with_name(self, name):
# type: (Text) -> _P
"""Return a new path with the file name changed."""
if not self.name:
raise ValueError("%r has an empty name" % (self,))
drv, root, parts = self._flavour.parse_parts((name,))
if (not name or name[-1] in [self._flavour.sep, self._flavour.altsep]
or drv or root or len(parts) != 1):
raise ValueError("Invalid name %r" % name)
return self._from_parsed_parts(self._drv, self._root,
self._parts[:-1] + parts[-1:])
def with_suffix(self, suffix):
# type: (Text) -> _P
"""Return a new path with the file suffix changed. If the path
has no suffix, add given suffix. If the given suffix is an empty
string, remove the suffix from the path.
"""
# XXX if suffix is None, should the current suffix be removed?
f = self._flavour
if f.sep in suffix or f.altsep and f.altsep in suffix:
raise ValueError("Invalid suffix %r" % suffix)
if suffix and not suffix.startswith('.') or suffix == '.':
raise ValueError("Invalid suffix %r" % suffix)
suffix = _py2_fsencode(suffix)
name = self.name
if not name:
raise ValueError("%r has an empty name" % (self,))
old_suffix = self.suffix
if not old_suffix:
name = name + suffix
else:
name = name[:-len(old_suffix)] + suffix
return self._from_parsed_parts(self._drv, self._root,
self._parts[:-1] + [name])
def relative_to(self, *other):
"""Return the relative path to another path identified by the passed
arguments. If the operation is not possible (because this is not
a subpath of the other path), raise ValueError.
"""
# For the purpose of this method, drive and root are considered
# separate parts, i.e.:
# Path('c:/').relative_to('c:') gives Path('/')
# Path('c:/').relative_to('/') raise ValueError
if not other:
raise TypeError("need at least one argument")
parts = self._parts
drv = self._drv
root = self._root
if root:
abs_parts = [drv, root] + parts[1:]
else:
abs_parts = parts
to_drv, to_root, to_parts = self._parse_args(other)
if to_root:
to_abs_parts = [to_drv, to_root] + to_parts[1:]
else:
to_abs_parts = to_parts
n = len(to_abs_parts)
cf = self._flavour.casefold_parts
if (root or drv) if n == 0 else cf(abs_parts[:n]) != cf(to_abs_parts):
formatted = self._format_parsed_parts(to_drv, to_root, to_parts)
raise ValueError("{0!r} does not start with {1!r}"
.format(str(self), str(formatted)))
return self._from_parsed_parts('', root if n == 1 else '',
abs_parts[n:])
@property
def parts(self):
"""An object providing sequence-like access to the
components in the filesystem path."""
# We cache the tuple to avoid building a new one each time .parts
# is accessed. XXX is this necessary?
try:
return self._pparts
except AttributeError:
self._pparts = tuple(self._parts)
return self._pparts
def joinpath(self, *args):
"""Combine this path with one or several arguments, and return a
new path representing either a subpath (if all arguments are relative
paths) or a totally different path (if one of the arguments is
anchored).
"""
return self._make_child(args)
def __truediv__(self, key):
return self._make_child((key,))
def __rtruediv__(self, key):
return self._from_parts([key] + self._parts)
if six.PY2:
__div__ = __truediv__
__rdiv__ = __rtruediv__
@property
def parent(self):
"""The logical parent of the path."""
drv = self._drv
root = self._root
parts = self._parts
if len(parts) == 1 and (drv or root):
return self
return self._from_parsed_parts(drv, root, parts[:-1])
@property
def parents(self):
"""A sequence of this path's logical parents."""
return _PathParents(self)
def is_absolute(self):
"""True if the path is absolute (has both a root and, if applicable,
a drive)."""
if not self._root:
return False
return not self._flavour.has_drv or bool(self._drv)
def is_reserved(self):
"""Return True if the path contains one of the special names reserved
by the system, if any."""
return self._flavour.is_reserved(self._parts)
def match(self, path_pattern):
"""
Return True if this path matches the given pattern.
"""
cf = self._flavour.casefold
path_pattern = cf(path_pattern)
drv, root, pat_parts = self._flavour.parse_parts((path_pattern,))
if not pat_parts:
raise ValueError("empty pattern")
if drv and drv != cf(self._drv):
return False
if root and root != cf(self._root):
return False
parts = self._cparts
if drv or root:
if len(pat_parts) != len(parts):
return False
pat_parts = pat_parts[1:]
elif len(pat_parts) > len(parts):
return False
for part, pat in zip(reversed(parts), reversed(pat_parts)):
if not fnmatch.fnmatchcase(part, pat):
return False
return True
# Can't subclass os.PathLike from PurePath and keep the constructor
# optimizations in PurePath._parse_args().
if sys.version_info >= (3, 6):
os.PathLike.register(PurePath)
class PurePosixPath(PurePath):
_flavour = _posix_flavour
__slots__ = ()
class PureWindowsPath(PurePath):
"""PurePath subclass for Windows systems.
On a Windows system, instantiating a PurePath should return this object.
However, you can also instantiate it directly on any system.
"""
_flavour = _windows_flavour
__slots__ = ()
# Filesystem-accessing classes
class Path(PurePath):
"""PurePath subclass that can make system calls.
Path represents a filesystem path but unlike PurePath, also offers
methods to do system calls on path objects. Depending on your system,
instantiating a Path will return either a PosixPath or a WindowsPath
object. You can also instantiate a PosixPath or WindowsPath directly,
but cannot instantiate a WindowsPath on a POSIX system or vice versa.
"""
__slots__ = (
'_accessor',
'_closed',
)
def __new__(cls, *args, **kwargs):
# type: (Type[Path], *Union[Text, PurePath], **Any) -> Path
if cls is Path:
cls = WindowsPath if os.name == 'nt' else PosixPath
self = cls._from_parts(args, init=False)
if not self._flavour.is_supported:
raise NotImplementedError("cannot instantiate %r on your system"
% (cls.__name__,))
self._init()
return self
def _init(self,
# Private non-constructor arguments
template=None,
):
self._closed = False
if template is not None:
self._accessor = template._accessor
else:
self._accessor = _normal_accessor
def _make_child_relpath(self, part):
# This is an optimization used for dir walking. `part` must be
# a single part relative to this path.
parts = self._parts + [part]
return self._from_parsed_parts(self._drv, self._root, parts)
def __enter__(self):
if self._closed:
self._raise_closed()
return self
def __exit__(self, t, v, tb):
self._closed = True
def _raise_closed(self):
raise ValueError("I/O operation on closed path")
def _opener(self, name, flags, mode=0o666):
# A stub for the opener argument to built-in open()
return self._accessor.open(self, flags, mode)
def _raw_open(self, flags, mode=0o777):
"""
Open the file pointed by this path and return a file descriptor,
as os.open() does.
"""
if self._closed:
self._raise_closed()
return self._accessor.open(self, flags, mode)
# Public API
@classmethod
def cwd(cls):
"""Return a new path pointing to the current working directory
(as returned by os.getcwd()).
"""
return cls(os.getcwd())
@classmethod
def home(cls):
"""Return a new path pointing to the user's home directory (as
returned by os.path.expanduser('~')).
"""
return cls(cls()._flavour.gethomedir(None))
def samefile(self, other_path):
"""Return whether other_path is the same or not as this file
(as returned by os.path.samefile()).
"""
if hasattr(os.path, "samestat"):
st = self.stat()
try:
other_st = other_path.stat()
except AttributeError:
other_st = os.stat(other_path)
return os.path.samestat(st, other_st)
else:
filename1 = six.text_type(self)
filename2 = six.text_type(other_path)
st1 = _win32_get_unique_path_id(filename1)
st2 = _win32_get_unique_path_id(filename2)
return st1 == st2
def iterdir(self):
"""Iterate over the files in this directory. Does not yield any
result for the special paths '.' and '..'.
"""
if self._closed:
self._raise_closed()
for name in self._accessor.listdir(self):
if name in ('.', '..'):
# Yielding a path object for these makes little sense
continue
yield self._make_child_relpath(name)
if self._closed:
self._raise_closed()
def glob(self, pattern):
"""Iterate over this subtree and yield all existing files (of any
kind, including directories) matching the given relative pattern.
"""
if not pattern:
raise ValueError("Unacceptable pattern: {0!r}".format(pattern))
pattern = self._flavour.casefold(pattern)
drv, root, pattern_parts = self._flavour.parse_parts((pattern,))
if drv or root:
raise NotImplementedError("Non-relative patterns are unsupported")
selector = _make_selector(tuple(pattern_parts))
for p in selector.select_from(self):
yield p
def rglob(self, pattern):
"""Recursively yield all existing files (of any kind, including
directories) matching the given relative pattern, anywhere in
this subtree.
"""
pattern = self._flavour.casefold(pattern)
drv, root, pattern_parts = self._flavour.parse_parts((pattern,))
if drv or root:
raise NotImplementedError("Non-relative patterns are unsupported")
selector = _make_selector(("**",) + tuple(pattern_parts))
for p in selector.select_from(self):
yield p
def absolute(self):
"""Return an absolute version of this path. This function works
even if the path doesn't point to anything.
No normalization is done, i.e. all '.' and '..' will be kept along.
Use resolve() to get the canonical path to a file.
"""
# XXX untested yet!
if self._closed:
self._raise_closed()
if self.is_absolute():
return self
# FIXME this must defer to the specific flavour (and, under Windows,
# use nt._getfullpathname())
obj = self._from_parts([os.getcwd()] + self._parts, init=False)
obj._init(template=self)
return obj
def resolve(self, strict=False):
"""
Make the path absolute, resolving all symlinks on the way and also
normalizing it (for example turning slashes into backslashes under
Windows).
"""
if self._closed:
self._raise_closed()
s = self._flavour.resolve(self, strict=strict)
if s is None:
# No symlink resolution => for consistency, raise an error if
# the path is forbidden
# but not raise error if file does not exist (see issue #54).
def _try_func():
self.stat()
def _exc_func(exc):
pass
_try_except_filenotfounderror(_try_func, _exc_func)
s = str(self.absolute())
else:
# ensure s is a string (normpath requires this on older python)
s = str(s)
# Now we have no symlinks in the path, it's safe to normalize it.
normed = self._flavour.pathmod.normpath(s)
obj = self._from_parts((normed,), init=False)
obj._init(template=self)
return obj
def stat(self):
"""
Return the result of the stat() system call on this path, like
os.stat() does.
"""
return self._accessor.stat(self)
def owner(self):
"""
Return the login name of the file owner.
"""
import pwd
return pwd.getpwuid(self.stat().st_uid).pw_name
def group(self):
"""
Return the group name of the file gid.
"""
import grp
return grp.getgrgid(self.stat().st_gid).gr_name
def open(self, mode='r', buffering=-1, encoding=None,
errors=None, newline=None):
"""
Open the file pointed by this path and return a file object, as
the built-in open() function does.
"""
if self._closed:
self._raise_closed()
if sys.version_info >= (3, 3):
return io.open(
str(self), mode, buffering, encoding, errors, newline,
opener=self._opener)
else:
return io.open(str(self), mode, buffering,
encoding, errors, newline)
def read_bytes(self):
"""
Open the file in bytes mode, read it, and close the file.
"""
with self.open(mode='rb') as f:
return f.read()
def read_text(self, encoding=None, errors=None):
"""
Open the file in text mode, read it, and close the file.
"""
with self.open(mode='r', encoding=encoding, errors=errors) as f:
return f.read()
def write_bytes(self, data):
"""
Open the file in bytes mode, write to it, and close the file.
"""
if not isinstance(data, six.binary_type):
raise TypeError(
'data must be %s, not %s' %
(six.binary_type.__name__, data.__class__.__name__))
with self.open(mode='wb') as f:
return f.write(data)
def write_text(self, data, encoding=None, errors=None, newline=None):
"""
Open the file in text mode, write to it, and close the file.
"""
if not isinstance(data, six.text_type):
raise TypeError(
'data must be %s, not %s' %
(six.text_type.__name__, data.__class__.__name__))
with self.open(mode='w', encoding=encoding, errors=errors, newline=newline) as f:
return f.write(data)
def touch(self, mode=0o666, exist_ok=True):
"""
Create this file with the given access mode, if it doesn't exist.
"""
if self._closed:
self._raise_closed()
if exist_ok:
# First try to bump modification time
# Implementation note: GNU touch uses the UTIME_NOW option of
# the utimensat() / futimens() functions.
try:
self._accessor.utime(self, None)
except OSError:
# Avoid exception chaining
pass
else:
return
flags = os.O_CREAT | os.O_WRONLY
if not exist_ok:
flags |= os.O_EXCL
fd = self._raw_open(flags, mode)
os.close(fd)
def mkdir(self, mode=0o777, parents=False, exist_ok=False):
"""
Create a new directory at this given path.
"""
if self._closed:
self._raise_closed()
def _try_func():
self._accessor.mkdir(self, mode)
def _exc_func(exc):
if not parents or self.parent == self:
raise exc
self.parent.mkdir(parents=True, exist_ok=True)
self.mkdir(mode, parents=False, exist_ok=exist_ok)
try:
_try_except_filenotfounderror(_try_func, _exc_func)
except OSError:
# Cannot rely on checking for EEXIST, since the operating system
# could give priority to other errors like EACCES or EROFS
if not exist_ok or not self.is_dir():
raise
def chmod(self, mode):
"""
Change the permissions of the path, like os.chmod().
"""
if self._closed:
self._raise_closed()
self._accessor.chmod(self, mode)
def lchmod(self, mode):
"""
Like chmod(), except if the path points to a symlink, the symlink's
permissions are changed, rather than its target's.
"""
if self._closed:
self._raise_closed()
self._accessor.lchmod(self, mode)
def unlink(self):
"""
Remove this file or link.
If the path is a directory, use rmdir() instead.
"""
if self._closed:
self._raise_closed()
self._accessor.unlink(self)
def rmdir(self):
"""
Remove this directory. The directory must be empty.
"""
if self._closed:
self._raise_closed()
self._accessor.rmdir(self)
def lstat(self):
"""
Like stat(), except if the path points to a symlink, the symlink's
status information is returned, rather than its target's.
"""
if self._closed:
self._raise_closed()
return self._accessor.lstat(self)
def rename(self, target):
"""
Rename this path to the given path.
"""
if self._closed:
self._raise_closed()
self._accessor.rename(self, target)
def replace(self, target):
"""
Rename this path to the given path, clobbering the existing
destination if it exists.
"""
if sys.version_info < (3, 3):
raise NotImplementedError("replace() is only available "
"with Python 3.3 and later")
if self._closed:
self._raise_closed()
self._accessor.replace(self, target)
def symlink_to(self, target, target_is_directory=False):
"""
Make this path a symlink pointing to the given path.
Note the order of arguments (self, target) is the reverse of
os.symlink's.
"""
if self._closed:
self._raise_closed()
self._accessor.symlink(target, self, target_is_directory)
# Convenience functions for querying the stat results
def exists(self):
"""
Whether this path exists.
"""
try:
self.stat()
except OSError as e:
if not _ignore_error(e):
raise
return False
except ValueError:
# Non-encodable path
return False
return True
def is_dir(self):
"""
Whether this path is a directory.
"""
try:
return S_ISDIR(self.stat().st_mode)
except OSError as e:
if not _ignore_error(e):
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
except ValueError:
# Non-encodable path
return False
def is_file(self):
"""
Whether this path is a regular file (also True for symlinks pointing
to regular files).
"""
try:
return S_ISREG(self.stat().st_mode)
except OSError as e:
if not _ignore_error(e):
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
except ValueError:
# Non-encodable path
return False
def is_mount(self):
"""
Check if this path is a POSIX mount point
"""
# Need to exist and be a dir
if not self.exists() or not self.is_dir():
return False
parent = Path(self.parent)
try:
parent_dev = parent.stat().st_dev
except OSError:
return False
dev = self.stat().st_dev
if dev != parent_dev:
return True
ino = self.stat().st_ino
parent_ino = parent.stat().st_ino
return ino == parent_ino
def is_symlink(self):
"""
Whether this path is a symbolic link.
"""
try:
return S_ISLNK(self.lstat().st_mode)
except OSError as e:
if not _ignore_error(e):
raise
# Path doesn't exist
return False
except ValueError:
# Non-encodable path
return False
def is_block_device(self):
"""
Whether this path is a block device.
"""
try:
return S_ISBLK(self.stat().st_mode)
except OSError as e:
if not _ignore_error(e):
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
except ValueError:
# Non-encodable path
return False
def is_char_device(self):
"""
Whether this path is a character device.
"""
try:
return S_ISCHR(self.stat().st_mode)
except OSError as e:
if not _ignore_error(e):
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
except ValueError:
# Non-encodable path
return False
def is_fifo(self):
"""
Whether this path is a FIFO.
"""
try:
return S_ISFIFO(self.stat().st_mode)
except OSError as e:
if not _ignore_error(e):
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
except ValueError:
# Non-encodable path
return False
def is_socket(self):
"""
Whether this path is a socket.
"""
try:
return S_ISSOCK(self.stat().st_mode)
except OSError as e:
if not _ignore_error(e):
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
except ValueError:
# Non-encodable path
return False
def expanduser(self):
""" Return a new path with expanded ~ and ~user constructs
(as returned by os.path.expanduser)
"""
if (not (self._drv or self._root)
and self._parts and self._parts[0][:1] == '~'):
homedir = self._flavour.gethomedir(self._parts[0][1:])
return self._from_parts([homedir] + self._parts[1:])
return self
class PosixPath(Path, PurePosixPath):
"""Path subclass for non-Windows systems.
On a POSIX system, instantiating a Path should return this object.
"""
__slots__ = ()
class WindowsPath(Path, PureWindowsPath):
"""Path subclass for Windows systems.
On a Windows system, instantiating a Path should return this object.
"""
__slots__ = ()
def owner(self):
raise NotImplementedError("Path.owner() is unsupported on this system")
def group(self):
raise NotImplementedError("Path.group() is unsupported on this system")
def is_mount(self):
raise NotImplementedError(
"Path.is_mount() is unsupported on this system")
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@pathlib2@py3@pathlib2@__init__.py@.PATH_END.py
|
{
"filename": "_color.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattercarpet/marker/line/_color.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="color", parent_name="scattercarpet.marker.line", **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "style"),
colorscale_path=kwargs.pop(
"colorscale_path", "scattercarpet.marker.line.colorscale"
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scattercarpet@marker@line@_color.py@.PATH_END.py
|
{
"filename": "SlideSurfaceInst.cc.py",
"repo_name": "LLNL/spheral",
"repo_path": "spheral_extracted/spheral-main/src/FSISPH/SlideSurfaceInst.cc.py",
"type": "Python"
}
|
text = """
//------------------------------------------------------------------------------
// Explict instantiation.
//------------------------------------------------------------------------------
#include "FSISPH/SlideSurface.cc"
#include "Geometry/Dimension.hh"
namespace Spheral {
template class SlideSurface< Dim< %(ndim)s > >;
}
"""
|
LLNLREPO_NAMEspheralPATH_START.@spheral_extracted@spheral-main@src@FSISPH@SlideSurfaceInst.cc.py@.PATH_END.py
|
{
"filename": "test_pixsim.py",
"repo_name": "desihub/desisim",
"repo_path": "desisim_extracted/desisim-main/py/desisim/test/test_pixsim.py",
"type": "Python"
}
|
import unittest, os, sys
import tempfile
from uuid import uuid1
from shutil import rmtree
import numpy as np
from astropy.io import fits
import desimodel.io
import desispec.io
from desisim import io
from desisim import obs
from desisim import pixsim
import desisim.scripts.pixsim
from desiutil.log import get_logger
log = get_logger()
desi_templates_available = 'DESI_ROOT' in os.environ
desi_root_available = 'DESI_ROOT' in os.environ
class TestPixsim(unittest.TestCase):
#- Create test subdirectory
@classmethod
def setUpClass(cls):
global desi_templates_available
cls.testfile = 'test-{uuid}/test-{uuid}.fits'.format(uuid=uuid1())
cls.testdir = tempfile.mkdtemp()
cls.origEnv = dict(
PIXPROD = None,
SPECPROD = None,
DESI_SPECTRO_SIM = None,
DESI_SPECTRO_DATA = None,
DESI_SPECTRO_REDUX = None,
)
cls.testEnv = dict(
PIXPROD = 'test',
SPECPROD = 'test',
DESI_SPECTRO_SIM = os.path.join(cls.testdir,'spectro','sim'),
DESI_SPECTRO_DATA = os.path.join(cls.testdir,'spectro','sim', 'test'),
DESI_SPECTRO_REDUX = os.path.join(cls.testdir,'spectro','redux'),
)
for e in cls.origEnv:
if e in os.environ:
cls.origEnv[e] = os.environ[e]
os.environ[e] = cls.testEnv[e]
if desi_templates_available:
cls.cosmics = (os.environ['DESI_ROOT'] +
'/spectro/templates/cosmics/v0.3/cosmics-bias-r.fits')
else:
cls.cosmics = None
#- to save memory while testing
cls.ccdshape = (2000,2000)
#- Cleanup test files if they exist
@classmethod
def tearDownClass(cls):
if os.path.exists(cls.testfile):
os.remove(cls.testfile)
testpath = os.path.normpath(os.path.dirname(cls.testfile))
if testpath != '.':
os.removedirs(testpath)
for e in cls.origEnv:
if cls.origEnv[e] is None:
del os.environ[e]
else:
os.environ[e] = cls.origEnv[e]
if os.path.exists(cls.testdir):
rmtree(cls.testdir)
def setUp(self):
self.night = '20150105'
self.expid = 124
for expid in (self.expid, self.expid+1):
pixfile = desispec.io.findfile('preproc', self.night, expid, camera='b0')
pixdir = os.path.dirname(pixfile)
if not os.path.isdir(pixdir):
os.makedirs(pixdir)
def tearDown(self):
rawfile = desispec.io.findfile('raw', self.night, self.expid)
if os.path.exists(rawfile):
os.remove(rawfile)
fibermap = desispec.io.findfile('fibermap', self.night, self.expid)
if os.path.exists(fibermap):
os.remove(fibermap)
simspecfile = io.findfile('simspec', self.night, self.expid)
if os.path.exists(simspecfile):
os.remove(simspecfile)
simpixfile = io.findfile('simpix', self.night, self.expid)
if os.path.exists(simpixfile):
os.remove(simpixfile)
for camera in ('b0', 'r0', 'z0'):
pixfile = desispec.io.findfile('preproc', self.night, self.expid, camera=camera)
if os.path.exists(pixfile):
os.remove(pixfile)
@unittest.skipUnless(desi_root_available, '$DESI_ROOT not set')
def test_pixsim(self):
night = self.night
expid = self.expid
cameras = ['r0']
obs.new_exposure('arc', night=night, expid=expid, nspec=3)
self.assertTrue(os.path.exists(io.findfile('simspec', night, expid)))
simspecfile = io.findfile('simspec', night, expid)
rawfile = desispec.io.findfile('desi', night, expid)
simpixfile = io.findfile('simpix', night, expid)
self.assertFalse(os.path.exists(simpixfile))
self.assertFalse(os.path.exists(rawfile))
pixsim.simulate_exposure(simspecfile, rawfile, cameras,
ccdshape=self.ccdshape,
addcosmics=False, simpixfile=simpixfile)
self.assertTrue(os.path.exists(simpixfile))
self.assertTrue(os.path.exists(rawfile))
@unittest.skipUnless(desi_templates_available, 'The DESI templates directory ($DESI_ROOT/spectro/templates) was not detected.')
def test_pixsim_cosmics(self):
night = self.night
expid = self.expid
cameras = ['r0']
obs.new_exposure('arc', night=night, expid=expid, nspec=3)
simspecfile = io.findfile('simspec', night, expid)
rawfile = desispec.io.findfile('desi', night, expid)
simpixfile = io.findfile('simpix', night, expid, cameras)
self.assertFalse(os.path.exists(simpixfile))
self.assertFalse(os.path.exists(rawfile))
pixsim.simulate_exposure(simspecfile, rawfile, cameras,
addcosmics=True, ccdshape=self.ccdshape)
self.assertTrue(os.path.exists(rawfile))
#- No simpixfile option, shouldn't exist
self.assertFalse(os.path.exists(simpixfile))
def test_simulate(self):
import desispec.image
night = self.night
expid = self.expid
camera = 'r0'
nspec = 3
obs.new_exposure('arc', night=night, expid=expid, nspec=nspec)
simspec = io.read_simspec(io.findfile('simspec', night, expid))
psf = desimodel.io.load_psf(camera[0])
psf.npix_y, psf.npix_x = self.ccdshape
image, rawpix, truepix = pixsim.simulate(camera, simspec, psf,
nspec=nspec, preproc=False)
self.assertTrue(isinstance(image, desispec.image.Image))
self.assertTrue(isinstance(rawpix, np.ndarray))
self.assertTrue(isinstance(truepix, np.ndarray))
self.assertEqual(image.pix.shape, truepix.shape)
self.assertEqual(image.pix.shape[0], rawpix.shape[0])
self.assertLess(image.pix.shape[1], rawpix.shape[1]) #- raw has overscan
def test_get_nodes_per_exp(self):
# nodes_per_comm_exp = get_nodes_per_exp(nnodes, nexposures, ncameras)
self.assertEqual(pixsim.get_nodes_per_exp(6,2,30), 6)
self.assertEqual(pixsim.get_nodes_per_exp(30,2,30), 30)
self.assertEqual(pixsim.get_nodes_per_exp(9,3,21), 3)
self.assertEqual(pixsim.get_nodes_per_exp(17,3,17), 17)
self.assertEqual(pixsim.get_nodes_per_exp(12,12,6), 6)
#- Now prints warning but isn't an error
# with self.assertRaises(ValueError):
# pixsim.get_nodes_per_exp(34,3,17) #- 3*17 % 34 != 0
#- TODO: add more failure cases
#- Travis tests hang when writing coverage when both test_main* were
#- called, though the tests work on other systems.
#- Disabling multiprocessing also "fixed" this for unknown reasons.
@unittest.skipIf(False, 'Skip test that is causing coverage tests to hang.')
def test_main_defaults(self):
night = self.night
expid = self.expid
camera = 'r0'
nspec = 3
ncpu = 3
obs.new_exposure('arc', night=night, expid=expid, nspec=nspec)
#- run pixsim
simspec = io.findfile('simspec', night, expid)
simpixfile = io.findfile('simpix', night, expid)
rawfile = desispec.io.findfile('raw', night, expid)
opts = ['--simspec', simspec,'--simpixfile', simpixfile, '--rawfile', rawfile]
if ncpu is not None:
opts.extend( ['--ncpu', ncpu] )
log.debug('testing pixsim.main({})'.format(opts))
pixsimargs = desisim.scripts.pixsim.parse(opts)
desisim.scripts.pixsim.main(pixsimargs)
#- verify outputs
self.assertTrue(os.path.exists(simpixfile))
self.assertTrue(os.path.exists(rawfile))
fx = fits.open(rawfile)
self.assertTrue('B0' in fx)
self.assertTrue('R0' in fx)
self.assertTrue('Z0' in fx)
fx.close()
#- cleanup as we go
os.remove(simpixfile)
os.remove(rawfile)
def test_main_override(self):
night = self.night
expid = self.expid
camera = 'r0'
nspec = 3
ncpu = 3
obs.new_exposure('arc', night=night, expid=expid, nspec=nspec)
#- derive night from simspec input while overriding expid
#- Include wavelengths covering z, but only ask for b and r
simspecfile = io.findfile('simspec', night, expid)
altexpid = expid+1
altrawfile = desispec.io.findfile('raw', night, altexpid) + '.blat'
opts = [
'--simspec', simspecfile,
'--keywords', f'EXPID={expid}',
'--rawfile', altrawfile,
'--cameras', 'b0,r0',
'--wavemin', 5500, '--wavemax', 7000.0,
'--ccd_npix_x', 2000,
]
if ncpu is not None:
opts.extend( ['--ncpu', ncpu] )
dirname = os.path.dirname(altrawfile)
if not os.path.isdir(dirname) :
os.makedirs(dirname)
log.debug('testing pixsim.main({})'.format(opts))
pixsimargs = desisim.scripts.pixsim.parse(opts)
desisim.scripts.pixsim.main(pixsimargs)
self.assertTrue(os.path.exists(altrawfile))
fx = fits.open(altrawfile)
self.assertTrue('B0' in fx)
self.assertTrue('R0' in fx)
self.assertTrue('Z0' not in fx)
fx.close()
#- cleanup as we go
os.remove(altrawfile)
def test_project(self):
psf = desimodel.io.load_psf('z')
wave = np.arange(8000, 8010)
phot = np.ones((2, len(wave)))
specmin = 12
args = psf, wave, phot, specmin
xyrange, pix = pixsim._project(args)
with self.assertRaises(ValueError):
phot = np.ones((2,3,4))
args = psf, wave, phot, specmin
os.environ['UNITTEST_SILENT'] = 'TRUE'
xyrange, pix = pixsim._project(args)
del os.environ['UNITTEST_SILENT']
def test_parse(self):
night = self.night
expid = self.expid
simspec = io.findfile('simspec', night, expid)
simpixfile = io.findfile('simpix', night, expid)
rawfile = desispec.io.findfile('raw', night, expid)
opts = ['--simspec', simspec,'--simpixfile', simpixfile, '--rawfile', rawfile]
opts.extend(['--cameras', 'b0,r1'])
args = desisim.scripts.pixsim.parse(opts)
self.assertEqual(args.rawfile, rawfile)
self.assertEqual(args.simspec, simspec)
self.assertEqual(args.cameras, ['b0','r1'])
#- This runs all test* functions in any TestCase class in this file
if __name__ == '__main__':
unittest.main()
def test_suite():
"""Allows testing of only this module with the command::
python setup.py test -m <modulename>
"""
return unittest.defaultTestLoader.loadTestsFromName(__name__)
|
desihubREPO_NAMEdesisimPATH_START.@desisim_extracted@desisim-main@py@desisim@test@test_pixsim.py@.PATH_END.py
|
{
"filename": "demo_parasite_axes2.py",
"repo_name": "matplotlib/matplotlib",
"repo_path": "matplotlib_extracted/matplotlib-main/galleries/examples/axisartist/demo_parasite_axes2.py",
"type": "Python"
}
|
"""
==================
Parasite axis demo
==================
This example demonstrates the use of parasite axis to plot multiple datasets
onto one single plot.
Notice how in this example, *par1* and *par2* are both obtained by calling
``twinx()``, which ties their x-limits with the host's x-axis. From there, each
of those two axis behave separately from each other: different datasets can be
plotted, and the y-limits are adjusted separately.
This approach uses `mpl_toolkits.axes_grid1.parasite_axes.host_subplot` and
`mpl_toolkits.axisartist.axislines.Axes`.
The standard and recommended approach is to use instead standard Matplotlib
axes, as shown in the :doc:`/gallery/spines/multiple_yaxis_with_spines`
example.
An alternative approach using `mpl_toolkits.axes_grid1.parasite_axes.HostAxes`
and `mpl_toolkits.axes_grid1.parasite_axes.ParasiteAxes` is shown in the
:doc:`/gallery/axisartist/demo_parasite_axes` example.
"""
import matplotlib.pyplot as plt
from mpl_toolkits import axisartist
from mpl_toolkits.axes_grid1 import host_subplot
host = host_subplot(111, axes_class=axisartist.Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
par2 = host.twinx()
par2.axis["right"] = par2.new_fixed_axis(loc="right", offset=(60, 0))
par1.axis["right"].toggle(all=True)
par2.axis["right"].toggle(all=True)
p1, = host.plot([0, 1, 2], [0, 1, 2], label="Density")
p2, = par1.plot([0, 1, 2], [0, 3, 2], label="Temperature")
p3, = par2.plot([0, 1, 2], [50, 30, 15], label="Velocity")
host.set(xlim=(0, 2), ylim=(0, 2), xlabel="Distance", ylabel="Density")
par1.set(ylim=(0, 4), ylabel="Temperature")
par2.set(ylim=(1, 65), ylabel="Velocity")
host.legend()
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
par2.axis["right"].label.set_color(p3.get_color())
plt.show()
|
matplotlibREPO_NAMEmatplotlibPATH_START.@matplotlib_extracted@matplotlib-main@galleries@examples@axisartist@demo_parasite_axes2.py@.PATH_END.py
|
{
"filename": "analysis.py",
"repo_name": "samuelyeewl/specmatch-emp",
"repo_path": "specmatch-emp_extracted/specmatch-emp-master/specmatchemp/analysis.py",
"type": "Python"
}
|
"""
@filename analysis.py
Helper functions for analysis of results
"""
import numpy as np
from specmatchemp.library import Library
def generate_sm_values(params, results, method='lincomb', suffix='_sm',
cscol='chi_squared', refcol='ref_idxs', coeffcol='coeffs'):
"""Generate the derived values and add it to the parameters table
Args:
params (pd.DataFrame): Parameter table
results (pd.DataFrame): results table
method (str): Specify the method used
- 'lincomb' uses the weighted average of the parameters of a list
of spectra where the coefficients were generated from the
MatchLincomb procedure results table must contain ref_idxs,
coeffs columns
- 'best_match' uses the parameters of the best matched spectrum
results table must contain ref_idx column
suffix (str): suffix to append to column name
Returns:
params (pd.DataFrame): parameter table
"""
# Use linear combination results as sm values
if method == 'lincomb':
results = results.set_index('targ_idx')
for p in Library.STAR_PROPS:
psm = p+suffix
params.loc[:, psm] = params.lib_index.apply(
lambda i: lincomb_props(params, p, results.loc[i, refcol],
results.loc[i, coeffcol]))
elif method == 'best_match':
grouped_results = results.groupby('targ_idx')
params.loc[:, 'best_match'+suffix] = params.lib_index.apply(
lambda i: grouped_results.get_group(i).sort_values(by=cscol)
.iloc[0].ref_idx)
for p in Library.STAR_PROPS:
psm = p+suffix
params.loc[:, psm] = params['best_match'+suffix].apply(
lambda i: params.loc[i, p])
params.loc[:, 'best_chi_squared' + suffix] = params.lib_index.apply(
lambda i: grouped_results.get_group(i).sort_values(by=cscol)
.iloc[0][cscol])
return params
def lincomb_props(params, prop, idxs, coeffs):
"""Generates the weighted average of a given property
Args:
params (pd.DataFrame): Parameter table
prop (str): Name of property column
idxs (np.array): List of indices of reference spectra
coeffs (np.array): Coefficients for weighted average
Returns:
sm_prop (np.float): Weighted average
"""
assert np.isclose(np.sum(coeffs), 1, rtol=1e-3, atol=1e-3),\
'Coefficients must sum to 1'
assert np.all(np.isfinite(coeffs)), 'Coefficients must be finite'
# assert np.all(np.isfinite(library_params.loc[idxs, prop])),\
# 'Parameters must be finite'
sm_prop = 0
for i in range(len(idxs)):
lib_prop = params.loc[idxs[i], prop]
sm_prop += lib_prop*coeffs[i]
return sm_prop
def generate_residuals(params, suffix='_sm', props=Library.STAR_PROPS):
"""Calculates the residuals between the derived and true values
Args:
params (pd.Dataframe): parameter table
suffix (str): suffix of derived values
Returns:
params (pd.DataFrame): parameter table
"""
for p in props:
presid = p+suffix+'_resid'
psm = p+suffix
params.loc[:, presid] = params[psm] - params[p]
# calculate delta r/r
presid = 'dr_r'+suffix+'_resid'
params.loc[:, presid] = ((params['radius' + suffix] - params['radius']) /
params['radius'])
return params
def detrend_params(params, suffix='_sm'):
"""Detrend the parameters
Args:
params (pd.Dataframe): parameter table
suffix (str): suffix of derived values
Returns:
params (pd.DataFrame): parameter table
polycoeffs (dict of ndarrays): Fit coeffs
- 'Teff_cool': Teff fit for cool stars (Teff < 4500)
- 'Teff_hot': Teff fit for hot stars (Teff > 4500)
- 'feh': feh fit
"""
polycoeffs = {}
# Fit a linear trend to cool stars
T_derived = 'Teff'+suffix
T_detrend = 'Teff'+suffix+'_detrend'
cool = params.query('Teff < 4500')
p = np.polyfit(cool['Teff'], cool['Teff'+suffix+'_resid'], 1)
# correction in coefficients for derived values
p0 = p[0]/(p[0]+1)
p1 = p[1]/(p[0]+1)
params[T_detrend] = params[T_derived]
params.loc[:, T_detrend] = params.apply(lambda row:
(row[T_derived] - p0*row[T_derived] - p1)
if (row[T_derived] < 4500*(p[0]+1) + p[1])
else row[T_detrend], axis=1)
polycoeffs['Teff_cool'] = p
# Fit a separate linear trend to hot stars
hot = params.query('Teff >= 4500')
p = np.polyfit(hot['Teff'], hot['Teff'+suffix+'_resid'], 1)
p0 = p[0]/(p[0]+1)
p1 = p[1]/(p[0]+1)
params.loc[:, T_detrend] = params.apply(lambda row:
(row[T_derived] - p0*row[T_derived] - p1)
if (row[T_derived] >= 4500*(p[0]+1) + p[1])
else row[T_detrend], axis=1)
polycoeffs['Teff_hot'] = p
# Fit a trend to giant stars (R > 1.2 Rsun)
giants = params.query('1.1 < radius < 2.5')
giants = giants[np.logical_not(np.isnan(giants['radius'+suffix]))]
p = np.polyfit(np.log(giants['radius']),
giants['radius'+suffix+'_resid'] / giants['radius'], 1)
params.loc[:, 'radius'+suffix+'_detrend'] = params.apply(lambda row:
row['radius'+suffix] - row['radius'+suffix] *
(p[0]*np.log(row['radius'+suffix]) + p[1])
if row['radius'+suffix] > 1.1 and row['radius'+suffix] < 2.5
else row['radius'+suffix], axis=1)
polycoeffs['radius_giants'] = p
# Fit a linear trend to feh
p = np.polyfit(params['feh'], params['feh'+suffix+'_resid'], 1)
p0 = p[0]/(p[0]+1)
p1 = p[1]/(p[0]+1)
params.loc[:, 'feh'+suffix+'_detrend'] = (params['feh'+suffix] -
p0*params['feh'+suffix] - p1)
polycoeffs['feh'] = p
params = generate_residuals(params, suffix+'_detrend',
props=['Teff', 'radius', 'feh'])
return (params, polycoeffs)
def dist(star1, star2):
"""Distance between two stars in parameter space
Normalized by Teff/100K, DeltaR/R/0.2 dex, feh/0.1 dex
Args:
star1 (pd.DataFrame): Row of star 1
star2 (pd.DataFrame): Row of star 2
Returns:
dist**2: Square of distance between the two stars
"""
diff_teff = ((star1.Teff - star2.Teff)/100)**2
# diff_logg = ((star1.logg - star2.logg)/0.1)**2
diff_radius = ((star1.radius - star2.radius) /
(np.average([star1.radius, star2.radius])) / 0.2)**2
diff_feh = ((star1.feh - star2.feh) / 0.1)**2
return diff_teff + diff_radius + diff_feh
def find_closest_star(row, lib):
"""Helper function to find the closest star to the given star
"""
return lib.library_params.apply(dist, args=(row,), axis=1).\
sort_values().index[1]
|
samuelyeewlREPO_NAMEspecmatch-empPATH_START.@specmatch-emp_extracted@specmatch-emp-master@specmatchemp@analysis.py@.PATH_END.py
|
{
"filename": "_showticksuffix.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scattergeo/marker/colorbar/_showticksuffix.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShowticksuffixValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="showticksuffix",
parent_name="scattergeo.marker.colorbar",
**kwargs,
):
super(ShowticksuffixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop("values", ["all", "first", "last", "none"]),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scattergeo@marker@colorbar@_showticksuffix.py@.PATH_END.py
|
{
"filename": "p2d.py",
"repo_name": "EmmanuelSchaan/HaloGen",
"repo_path": "HaloGen_extracted/HaloGen-master/p2d.py",
"type": "Python"
}
|
from headers import *
##################################################################################
class P2dAuto(object):
def __init__(self, U, P3dAuto, Weight, name="", pNoise=lambda l: 0., save=False, nProc=1):
# copy classes
self.U = U
self.Pn = P3dAuto
self.Weight = Weight
self.name = str(Weight) + name
self.pNoise = pNoise
self.nProc = nProc
# bounds for z integrals
self.aMin = self.Weight.aMin
self.aMax = self.Weight.aMax
# values of ell to evaluate
self.L = np.genfromtxt("./input/Lc.txt") # center of the bins for l
# create directory if needed
self.pathOut = "./output/p2d/p2dauto_"+self.name+"/"
if not os.path.exists(self.pathOut):
os.makedirs(self.pathOut)
self.pathFig = "./figures/p2d/p2dauto_"+self.name+"/"
if not os.path.exists(self.pathFig):
os.makedirs(self.pathFig)
# power spectrum
if save:
self.saveP()
self.loadP()
##################################################################################
def computeP(self, fp3d):
'''Compute P2d for all self.L at once,
given the 3d power spectrum fp3d.
'''
z = self.Pn.Z.copy()
if z[0]==0:
z = z[1:]
a = 1. / (1. + z)
chi = self.U.bg.comoving_distance(z)
integrand = 3.e5/( self.U.hubble(z) * a**2 )
integrand *= self.Weight.f(a)**2 / chi**2
fp3dVect = np.vectorize(fp3d)
f = lambda l: fp3dVect((l + 0.5)/chi, z)
integrand = integrand[None,:] * np.array(map(f, self.L))
integrand *= -1. # because a is in decreasing order
result = np.trapz(integrand, a, axis=-1)
return result
def saveP(self):
print "precomputing p2d "+self.name
data = np.zeros((len(self.L), 9))
data[:,0] = self.L.copy()
#pool = Pool(ncpus=self.nProc)
data[:,1] = self.computeP(self.Pn.p1hInt)
data[:,2] = self.computeP(self.Pn.p2hInt)
data[:,3] = np.array(map(self.pNoise, self.L))
print "precomputing bare vs counter terms"
data[:,4] = self.computeP(self.Pn.p1hBareInt)
data[:,5] = self.computeP(self.Pn.p1hCounterTermInt)
data[:,6] = self.computeP(self.Pn.p2hBareInt)
data[:,7] = self.computeP(self.Pn.p2hCounterTermInt)
data[:,8] = self.computeP(self.Pn.p2hCorrectedInt)
np.savetxt(self.pathOut+"p2d_"+self.name+".txt", data)
def loadP(self):
data = np.genfromtxt(self.pathOut+"p2d_"+self.name+".txt")
self.L = data[:,0]
self.P1h = data[:,1]
self.P2h = data[:,2]
self.Pnoise = data[:,3]
self.P = self.P1h + self.P2h
self.Ptot = self.P1h + self.P2h + self.Pnoise
#
self.P1hBare = data[:,4]
self.P1hCounterTerm = data[:,5]
self.P2hBare = data[:,6]
self.P2hCounterTerm = data[:,7]
self.P2hCorrected = data[:,8]
# interpolate power spectra
forP1h = UnivariateSpline(self.L,self.P1h,k=1,s=0)
self.fP1hInt = lambda l: forP1h(l)*(l>=min(self.L))*(l<=max(self.L))
forP2h = UnivariateSpline(self.L,self.P2h,k=1,s=0)
self.fP2hInt = lambda l: forP2h(l)*(l>=min(self.L))*(l<=max(self.L))
forP = UnivariateSpline(self.L,self.P,k=1,s=0)
self.fPInt = lambda l: forP(l)*(l>=min(self.L))*(l<=max(self.L))
forPtot = UnivariateSpline(self.L,self.Ptot,k=1,s=0)
self.fPtotInt = lambda l: forPtot(l)*(l>=min(self.L))*(l<=max(self.L))
#
forP1hBare = UnivariateSpline(self.L,self.P1hBare,k=1,s=0)
self.fP1hBareInt = lambda l: forP1hBare(l)*(l>=min(self.L))*(l<=max(self.L))
forP1hCounterTerm = UnivariateSpline(self.L,self.P1hCounterTerm,k=1,s=0)
self.fP1hCounterTermInt = lambda l: forP1hCounterTerm(l)*(l>=min(self.L))*(l<=max(self.L))
forP2hBare = UnivariateSpline(self.L,self.P2hBare,k=1,s=0)
self.fP2hBareInt = lambda l: forP2hBare(l)*(l>=min(self.L))*(l<=max(self.L))
forP2hCounterTerm = UnivariateSpline(self.L,self.P2hCounterTerm,k=1,s=0)
self.fP2hCounterTermInt = lambda l: forP2hCounterTerm(l)*(l>=min(self.L))*(l<=max(self.L))
forP2hCorrected = UnivariateSpline(self.L,self.P2hCorrected,k=1,s=0)
self.fP2hCorrectedInt = lambda l: forP2hCorrected(l)*(l>=min(self.L))*(l<=max(self.L))
##################################################################################
# power spectrum
def integrandP(self, a, fP, l):
'''dP2d/da, to be integrated wrt a
'''
z = 1./a-1.
chi = self.U.bg.comoving_distance(z)
#
result = 3.e5/( self.U.hubble(z) * a**2 )
result *= self.Weight.f(a)**2
result /= chi**2
result *= fP(l/chi, z)
return result
def p1h(self, l):
f = lambda a: self.integrandP(a, self.Pn.p1hInt, l)
result = integrate.quad(f, self.aMin, self.aMax, epsabs=0, epsrel=1.e-2)[0]
print "done ell=",l
return result
def p2h(self, l):
f = lambda a: self.integrandP(a, self.Pn.p2hInt, l)
result = integrate.quad(f, self.aMin, self.aMax, epsabs=0, epsrel=1.e-2)[0]
print "done ell=",l
return result
def p(self, l):
result = self.p1h(l) + self.p2h(l)
return result
def pTot(self, l):
result = self.p1h(l) + self.p2h(l) + self.pNoise(l)
return result
##################################################################################
def dPdz1h(self, l, z):
a = 1./(1.+z)
result = self.integrandP(a, self.Pn.p1hInt, l)
result *= a**2
return result
def dPdz2h(self, l, z):
a = 1./(1.+z)
result = self.integrandP(a, self.Pn.p2hInt, l)
result *= a**2
return result
def dPdz(self, l, z):
result = self.dPdz1h(l, z) + self.dPdz2h(l, z)
return result
def dPnoisedz(self, l, z):
a = 1./(1.+z)
result = self.Weight.fdPshotNoise_da(a, l)
result *= a**2
return result
##################################################################################
# trispectrum
# def integrandT(self, a, fP, l):
# z = 1./a-1.
# chi = self.U.ComovDist(a, self.U.a_obs)
# #
# result = 3.e5/( self.U.Hubble(a) * a**2 )
# result *= self.Weight.f(a)**4
# result /= chi**6
# result *= fP(l/chi, z)
# return result
#
# def fT_1h(self, l):
# f = lambda a: self.integrandT(a, self.Pn.fT1hinterp, l)
# result = integrate.quad(f, self.aMin, self.aMax, epsabs=0, epsrel=1.e-2)[0]
# print "done ell=",l
# return result
#
# def fT_2h(self, l):
# f = lambda a: self.integrandT(a, self.Pn.fT2hinterp, l)
# result = integrate.quad(f, self.aMin, self.aMax, epsabs=0, epsrel=1.e-2)[0]
# print "done ell=",l
# return result
#
# def fT_4h(self, l):
# f = lambda a: self.integrandT(a, self.Pn.fT4hinterp, l)
# result = integrate.quad(f, self.aMin, self.aMax, epsabs=0, epsrel=1.e-2)[0]
# print "done ell=",l
# return result
#
# def fT(self, l):
# result = self.fT_1h(l)
# return result
#
#
# def fT_ssv(self, l, L=1.e2):
# """This is the difference between the almost-squeezed
# and the exactly squeezed trispectra:
# T(l, -l+L, l, -l-L) = T(l,-l,l,-l) + T_ssv,
# where L << l.
# """
# g = lambda k,z: self.Pn.fT_ssv(k, k, k*L/l, z)
# f = lambda a: self.integrandT(a, g, l)
# result = integrate.quad(f, self.aMin, self.aMax, epsabs=0, epsrel=1.e-2)[0]
# print "done ell=",l
# return result
#
#
#
# ##################################################################################
# # trispectrum non-diagonal
#
# def integrandTNonDiag(self, a, fP, l1, l2):
# z = 1./a-1.
# chi = self.U.ComovDist(a, self.U.a_obs)
# #
# result = 3.e5/( self.U.Hubble(a) * a**2 )
# result *= self.Weight.f(a)**4
# result /= chi**6
# result *= fP(l1/chi, l2/chi, z)
# return result
#
# def fTnondiag(self, l1, l2):
# f = lambda a: self.integrandTNonDiag(a, self.Pn.fTnondiag, l1, l2)
# result = integrate.quad(f, self.aMin, self.aMax, epsabs=0, epsrel=1.e-2)[0]
# print "done ell=",l1, l2
# return result
##################################################################################
def plotP(self):
# P
fig = plt.figure(0)
ax = plt.subplot(111)
#
ax.loglog(self.L, self.P1h+self.P2h+self.Pnoise, 'k', lw=4, label=r'$P_\text{total}$')
ax.loglog(self.L, self.P2h, 'b-', lw=2, label=r'$P_\text{2h}$')
ax.loglog(self.L, self.P1h, 'r-', lw=2, label=r'$P_\text{1h}$')
ax.loglog(self.L, self.Pnoise, 'g-', lw=2, label=r'$P_\text{noise}$')
#
ax.grid()
ax.legend(loc=3)
ax.set_xlabel(r'$\ell$')
ax.set_ylabel(r'$P(\ell)$')
#
path = self.pathFig+"p_"+self.name+".pdf"
fig.savefig(path, bbox_inches='tight')
fig.clf()
# l*(l+1)*P
fig = plt.figure(1)
ax = plt.subplot(111)
#
factor = self.L*(self.L+1.)/(2.*np.pi)
ax.loglog(self.L, factor*(self.P1h+self.P2h+self.Pnoise), 'k', lw=4, label=r'$P_\text{total}$')
ax.loglog(self.L, factor*self.P2h, 'b-', lw=2, label=r'$P_\text{2h}$')
ax.loglog(self.L, factor*self.P1h, 'r-', lw=2, label=r'$P_\text{1h}$')
ax.loglog(self.L, factor*self.Pnoise, 'g-', lw=2, label=r'$P_\text{noise}$')
ax.grid()
ax.legend(loc=3)
ax.set_xlabel(r'$\ell$')
ax.set_ylabel(r'$\ell(\ell+1)P(\ell) / (2\pi)$')
#
path = self.pathFig+"l2p_"+self.name+".pdf"
fig.savefig(path, bbox_inches='tight')
fig.clf()
#plt.show()
def plotPCounterTerms(self):
# fraction of total signal from bare vs counter term
fig=plt.figure(0)
ax=fig.add_subplot(111)
#
ax.axhline(1., c='gray', label=r'Bare+ Counter term')
ax.plot([], [], c='gray', ls='--', label=r'Bare')
ax.plot([], [], c='gray', ls='-.', label=r'Counter term')
ax.plot([], [], c='gray', ls=':', label=r'Corrected')
#
# 1-halo term
plot=ax.plot(self.L, self.P1hBare / self.P1h, 'b', ls='--')
ax.plot(self.L, self.P1hCounterTerm / self.P1h, c=plot[0].get_color(), ls='-.')
ax.plot([], [], c=plot[0].get_color(), ls='-', label=r'1-halo')
#
# 2-halo term
plot=ax.plot(self.L, self.P2hBare / self.P2h, 'r', ls='--')
ax.plot(self.L, self.P2hCounterTerm / self.P2h, c=plot[0].get_color(), ls='-.')
ax.plot(self.L, self.P2hCorrected / self.P2h, c=plot[0].get_color(), ls=':')
ax.plot([], [], c=plot[0].get_color(), ls='-', label=r'2-halo')
#
ax.legend(loc=4, fontsize='x-small', labelspacing=0.1)
ax.set_xscale('log', nonposx='clip')
#ax.set_yscale('log', nonposy='clip')
ax.set_xlabel(r'$\ell$')
ax.set_ylabel(r'Fraction')
#
path = self.pathFig+"fraction_counterterms_p2d_"+self.name+".pdf"
fig.savefig(path, bbox_inches='tight')
fig.clf()
#plt.show()
# def plotdP(self):
#
# # P
# fig = plt.figure(0)
# ax = plt.subplot(111)
# #
# factor = self.L*(self.L+1.)/(2.*np.pi)
# ax.loglog(L, factor*self.dP1h, '--', label=r'1h')
# ax.loglog(L, factor*self.dP2h, '--', label=r'2h')
# ax.loglog(L, factor*(self.dP1h+self.dP2h), 'k', label=r'tot')
# ax.grid()
# ax.legend(loc=3)
# ax.set_xlabel(r'l')
# ax.set_ylabel(r'$\ell(\ell+1)\frac{dP}{d\delta}(\ell)$')
# ax.set_title(r'Power spectrum response')
# path = "./figures/pn2d/dp_"+self.name+".pdf"
# # fig.savefig(path, bbox_inches='tight')
#
# # P
# fig = plt.figure(1)
# ax = plt.subplot(111)
# #
# ax.semilogx(self.L, self.dP / self.P, 'b-')
# ax.grid()
# ax.set_xlabel(r'l')
# ax.set_ylabel(r'$\frac{dlnP}{d\delta}(\ell)$')
# ax.set_title(r'Power spectrum response')
#
# plt.show()
# def plotdPdz(self, l=1.e3):
# A = np.linspace(self.aMin, self.aMax, 201)
# Z = 1./A-1.
# print Z
# Chi = np.array(map(lambda a: self.U.ComovDist(a, 1.), A))
# H = np.array(map(lambda a: self.U.Hubble(a), A))
# W = np.array(map(self.Weight.f, A))
# dChidA = 3.e5 / (H*A**2)
# dChidZ = 3.e5 / H
#
# # redshift contributions for P1h and P2h
# f = lambda a: self.integrandP(a, self.Pn.fP_1h, l)
# dP1h_da = np.array(map(f, A))
# f = lambda a: self.integrandP(a, self.Pn.fP_2h, l)
# dP2h_da = np.array(map(f, A))
# #
# dP1h_dz = dP1h_da * A**2
# dP2h_dz = dP2h_da * A**2
#
# # redshift contributions for Pshot
# if hasattr(self.Weight, 'fdPshotNoise_da'):
# f = lambda a: self.Weight.fdPshotNoise_da(a, l)
# dPshot_da = np.array(map(f, A))
# dPshot_dz = dPshot_da * A**2
#
# '''
# def f(a):
# z = 1./a-1.
# chi = self.U.ComovDist(a, 1.)
# return self.Pn.fP_1h(l/chi, z)
# P3d_1h = np.array(map(f, A))
#
# def f(a):
# z = 1./a-1.
# chi = self.U.ComovDist(a, 1.)
# return self.Pn.fP_2h(l/chi, z)
# P3d_2h = np.array(map(f, A))
#
#
# fig=plt.figure(0)
# ax=fig.add_subplot(111)
# #
# ax.plot(A, A* dChidA * W**2/Chi**2 / np.max(A* dChidA * W**2/Chi**2), 'k', lw=2, label=r'kernel')
# #
# ax.plot(A, P3d_1h / np.max(P3d_1h), 'b--', lw=2, label=r'$P_\text{3d}^\text{1h}$')
# ax.plot(A, A*dP1h_da/np.max(A*dP1h_da), 'b', lw=2, label=r'integrand 1h')
# #
# ax.plot(A, P3d_2h / np.max(P3d_2h), 'g--', lw=2, label=r'$P_\text{3d}^\text{2h}$')
# ax.plot(A, A*dP2h_da/np.max(A*dP2h_da), 'g', lw=2, label=r'integrand 2h')
# #
# ax.legend(loc=3)
# ax.set_xscale('log', nonposx='clip')
# #ax.set_yscale('log', nonposy='clip')
# ax.set_xlabel(r'scale factor $a$')
# ax.set_ylabel(r'$d C_{\ell='+str(int(l))+'} / d\ln a$')
# #
# path = "./figures/pn2d/dp2d_dlna_"+self.name+".pdf"
# #fig.savefig(path, bbox_inches='tight')
# '''
#
# fig=plt.figure(1)
# ax=fig.add_subplot(111)
# #
# # factors in the integrands
# #ax.plot(Z, dChidZ * W**2/Chi**2 / np.max(dChidZ * W**2/Chi**2), 'k', lw=2, label=r'kernel')
# #ax.plot(Z, P3d_1h / np.max(P3d_1h), 'b--', lw=2, label=r'$P_\text{3d}^\text{1h}$')
# #ax.plot(Z, P3d_2h / np.max(P3d_2h), 'g--', lw=2, label=r'$P_\text{3d}^\text{2h}$')
# #
# # normalized integrands
## ax.plot(Z, dP1h_dz / np.max(dP1h_dz), 'b', lw=2, label=r'1h')
## ax.plot(Z, dP2h_dz / np.max(dP2h_dz), 'g', lw=2, label=r'2h')
## if hasattr(self.Weight, 'fdPshotNoise_da'):
## ax.plot(Z, dPshot_dz / np.max(dPshot_dz), 'r', lw=2, label=r'shot')
## ax.plot(Z, (dP1h_dz+dP2h_dz+dPshot_dz) / np.max(dP1h_dz+dP2h_dz+dPshot_dz), 'k', lw=2, label=r'1h+2h+shot')
## else:
## ax.plot(Z, (dP1h_dz+dP2h_dz) / np.max(dP1h_dz+dP2h_dz), 'k', lw=2, label=r'1h+2h')
# #
# # non-normalized ingredients
# ax.plot(Z, dP1h_dz, 'b', lw=2, label=r'1h')
# ax.plot(Z, dP2h_dz, 'g', lw=2, label=r'2h')
# if hasattr(self.Weight, 'fdPshotNoise_da'):
# ax.plot(Z, dPshot_dz, 'r', lw=2, label=r'shot')
# ax.plot(Z, dP1h_dz+dP2h_dz+dPshot_dz, 'k', lw=2, label=r'1h+2h+shot')
# else:
# ax.plot(Z, dP1h_dz+dP2h_dz, 'k', lw=2, label=r'1h+2h')
# #
# ax.legend(loc=4)
# #ax.set_xscale('log', nonposx='clip')
# ax.set_yscale('log', nonposy='clip')
# ax.set_xlabel(r'redshift $z$')
# ax.set_ylabel(r'$d C_{\ell='+str(int(l))+'} / dz$')
# #
# path = "./figures/pn2d/dp2d_dz"+self.name+".pdf"
# #fig.savefig(path, bbox_inches='tight')
#
# '''
# fig=plt.figure(2)
# ax=fig.add_subplot(111)
# #
# ax.plot(Z, dP1h_dz / np.max(dP1h_dz), 'b', lw=2, label=r'1h')
# #
# ax.plot(Z, dP2h_dz / np.max(dP2h_dz), 'g', lw=2, label=r'2h')
# #
# ax.legend(loc=4)
# #ax.set_xscale('log', nonposx='clip')
# #ax.set_yscale('log', nonposy='clip')
# ax.set_xlabel(r'redshift $z$')
# ax.set_ylabel(r'$d C_{\ell='+str(int(l))+'} / dz$ [arbitrary unit]')
# #
# path = "./figures/pn2d/dp2d_dz_summary"+self.name+".pdf"
# #fig.savefig(path, bbox_inches='tight')
# '''
#
# plt.show()
def plotdPdz_color(self):
# redshifts to evaluate
nZ = 51
zMin = 1./self.aMax-1.
zMax = 5. #1./self.Weight.aMin-1.
dZ = (zMax-zMin)/nZ
Z = np.linspace(zMin, zMax, nZ)
zEdges = np.linspace(zMin-0.5*dZ, zMax+0.5*dZ, nZ+1)
A = 1./(1.+Z)
Chi = np.array(map(lambda a: self.U.ComovDist(a, 1.), A))
H = np.array(map(lambda a: self.U.Hubble(a), A))
W = np.array(map(self.Weight.f, A))
dChidA = 3.e5 / (H*A**2)
dChidZ = 3.e5 / H
# multipoles to evaluate
nL = 51 #51
lnlMin = np.log10(10.)
lnlMax = np.log10(1.e4)
dlnl = (lnlMax-lnlMin)/nL
lnL = np.linspace(lnlMin, lnlMax, nL)
lnlEdges = np.linspace(lnlMin-0.5*dlnl, lnlMax+0.5*dlnl, nL+1)
L = 10.**lnL
lEdges = 10.**lnlEdges
'''
# 1h
dP1hdz = np.zeros((nZ, nL))
for iL in range(nL):
l = L[iL]
f = lambda a: self.integrandP(a, self.Pn.fP_1h, l)
dP1hdz[:,iL] = np.array(map(f, A))
dP1hdz[:,iL] *= A**2
#dP1hdz[:,iL] /= np.trapz(Z, dP1hdz[:,iL])
print "done 1h"
# 2h
dP2hdz = np.zeros((nZ, nL))
for iL in range(nL):
l = L[iL]
f = lambda a: self.integrandP(a, self.Pn.fP_2h, l)
dP2hdz[:,iL] = np.array(map(f, A))
dP2hdz[:,iL] *= A**2
#dP2hdz[:,iL] /= np.trapz(Z, dP2hdz[:,iL])
print "done 2h"
# shot noise
if hasattr(self.Weight, 'fdPshotNoise_da'):
dPshotdz = np.zeros((nZ, nL))
for iL in range(nL):
l = L[iL]
f = lambda a: self.integrandP(a, self.Weight.fdPshotNoise_da, l)
dPshotdz[:,iL] = np.array(map(f, A))
dPshotdz[:,iL] *= A**2
#dPshotdz[:,iL] /= np.trapz(Z, dPshotdz[:,iL])
print "done shot noise"
'''
# total
dPdz = np.zeros((nZ, nL))
for iL in range(nL):
l = L[iL]
f = lambda a: self.integrandP(a, self.Pn.fP, l)
dPdz[:,iL] = np.array(map(f, A))
dPdz[:,iL] *= A**2
# # normalize so int dz dP/dz = 1 for all ell
# dPdz[:,iL] /= np.trapz(Z, dPdz[:,iL])
dPdz = np.abs(dPdz)
print "done total"
# show the 2d color plot
zz,ll = np.meshgrid(zEdges, lEdges, indexing='ij')
fig=plt.figure(0)
ax=fig.add_subplot(111)
#
cp=ax.pcolormesh(zz, ll, np.log(dPdz), linewidth=0, rasterized=True, cmap=plt.cm.YlOrRd_r)
#
cp.set_clim(0., 13.)
cb=fig.colorbar(cp)
cb.ax.set_title(r'$\text{ln}\left(\frac{dC^0_\ell}{dz}\right)$')
ax.set_yscale('log')
ax.set_xlabel(r'$z$')
ax.set_ylabel(r'$\ell$')
#ax.set_title(r'$\frac{dC^0_\ell}{dz}$, normalized to $\int dz\; \frac{dC^0_\ell}{dz} = 1$', fontsize=18)
plt.show()
def plotPlanckCIB(self):
# load Planck data points (Planck 13 XXX)
nu = self.Weight.nu
name = str(int(nu/1.e9))
p13 = Planck13CIBData()
L = p13.PlanckPCIB['ell']
P = p13.PlanckPCIB[name]
sP = p13.PlanckPCIB[name+'_error']
Pshot = p13.PlanckPCIBShot[name]
# sensitivity in Jy/rad
# beam in arcmin
def fdetectorNoise(l, sensitivity, beam):
beam *= np.pi/180./60. # convert arcmin to rad
sigma_beam = beam / np.sqrt(8.*np.log(2.)) # convert fwhm to sigma
return sensitivity**2 * np.exp(l**2 * sigma_beam**2)
# compare to some noise levels
# !!!!! these are only relevant for \sim 545GHz, for Planck and CCAT
f = lambda l: fdetectorNoise(l, sensitivity=13.5, beam=4.8)
noisePlanck = np.array(map(f, self.L))
f = lambda l: fdetectorNoise(l, sensitivity=1.2, beam=0.5)
noiseCCAT = np.array(map(f, self.L))
# P
fig = plt.figure(0)
ax = plt.subplot(111)
#
# halo model
# ax.plot(self.L, self.P2h, c=plt.cm.autumn(0.4), lw=1.5, label=r'2-halo')
# ax.plot(self.L, self.P1h, c=plt.cm.autumn(0.7), lw=1.5, label=r'1-halo')
# ax.plot(self.L, self.Pnoise, c=plt.cm.autumn(0.9), lw=1.5, label=r'1-galaxy')
# ax.plot(self.L, (self.P1h+self.P2h+self.Pnoise), 'r', lw=3, label=r'total')
#
ax.plot(self.L, self.P2h, c='r', lw=1.5, label=r'2-halo')
ax.plot(self.L, self.P1h, c='g', lw=1.5, label=r'1-halo')
ax.plot(self.L, self.Pnoise, c='y', lw=1.5, label=r'1-galaxy')
ax.plot(self.L, (self.P1h+self.P2h+self.Pnoise), 'b', lw=3, label=r'total')
#
# Planck data points
ax.errorbar(L, P, yerr=sP, fmt='.', c='k', label='Planck13 XXX')
#
# noise levels
ax.plot(self.L, noisePlanck, c='gray', ls='--', lw=1, label=r'Planck noise')
ax.plot(self.L, noiseCCAT, c='grey', ls='-.', lw=1, label=r'CCAT noise')
#
ax.set_xscale('log', nonposx='clip')
ax.set_yscale('log', nonposy='clip')
ax.set_xlim((10., 5.e4))
ax.set_ylim((1.e-1, 1.e6))
ax.legend(loc=3, numpoints=1, fontsize=14, framealpha=1)
ax.set_xlabel(r'$\ell$')
ax.set_ylabel(r'$C_\ell^{545\text{GHz}}$ [Jy$^2$/sr]')
#
#path="./figures/pn2d/p_"+str(self.Weight)+".pdf"
#path="./figures/cib_penin12/planck13model1_vs_planck"+name+".pdf"
path="./figures/cib_penin12/penin1214_"+name+"GHz.pdf"
#path="./figures/cib_planck13/planck13_"+name+"GHz.pdf"
fig.savefig(path, bbox_inches='tight')
plt.show()
##################################################################################
def plotT(self, func=None):
factor = 1.#self.L*(self.L+1.)/(2.*np.pi)
# T
fig=plt.figure(0)
ax=plt.subplot(111)
#
ax.plot(self.L, factor*self.Ptot**2, 'k--', lw=3, label=r'$C^2$')
#
ax.plot(self.L, factor*self.Ttot, 'k', lw=3, label=r'$T^\text{total}$')
ax.plot(self.L, factor*self.T1h, 'r', lw=1, label=r'$T^{1h}$')
ax.plot(self.L, factor*self.T2h, 'orange', lw=1, label=r'$T^{2h}$')
ax.plot(self.L, factor*self.T4h, 'gold', lw=1, label=r'$T^{4h}$')
ax.plot(self.L, factor*self.Tssv, 'm', lw=1, label=r'$T^\text{SSV}$')
ax.plot(self.L, factor*self.Tnoise, 'fuchsia', lw=1, label=r'$T^\text{shot}$')
#
if func is not None:
F = np.array(map(func, self.L))
ax.plot(self.L, factor*F**2, 'b', lw=2)
#
ax.legend(loc=1, fontsize='x-small', labelspacing=0.1)
ax.set_xscale('log', nonposx='clip')
ax.set_yscale('log', nonposy='clip')
ax.set_xlabel(r'\ell')
#ax.set_xlim((50., 5.e4))
#ax.set_ylabel(r'$T(\ell)$')
ax.set_ylabel(r'$\mathcal{T}^0_{\ell, L-\ell, \ell, -L-\ell}$')
path = "./figures/pn2d/t_"+self.name+"_test.pdf"
fig.savefig(path, bbox_inches='tight')
plt.show()
def plotIntegrandT(self, l=5.e2):
A = np.linspace(self.aMin, self.aMax, 101)
Z = 1./A-1.
Chi = np.array(map(lambda a: self.U.ComovDist(a, 1.), A))
H = np.array(map(lambda a: self.U.Hubble(a), A))
W = np.array(map(self.Weight.f, A))
dChidA = 3.e5 / (H*A**2)
dChidZ = 3.e5 / H
def f(a):
z = 1./a-1.
chi = self.U.ComovDist(a, 1.)
return self.Pn.fT1hinterp(l/chi, z)
T3d_1h = np.array(map(f, A))
# print T3d_1h
# integrand
f = lambda a: self.integrand(a, self.Pn.fT1hinterp, l)
dT1h_da = np.array(map(f, A))
dT1h_dz = dT1h_da * A**2
# print dT1h_dz
fig=plt.figure(0)
ax=fig.add_subplot(111)
#
ax.plot(A, A* dChidA * W**4/Chi**6 / np.max(A* dChidA * W**4/Chi**6), 'k', lw=2, label=r'kernel')
ax.plot(A, T3d_1h / np.max(T3d_1h), 'b--', lw=2, label=r'$T_\text{1h}^\text{3d}$')
ax.plot(A, A*dT1h_da/np.max(A*dT1h_da), 'b', lw=2, label=r'integrand for T1h')
#
ax.legend(loc=2)
ax.set_xscale('log', nonposx='clip')
#ax.set_yscale('log', nonposy='clip')
ax.set_xlabel(r'scale factor $a$')
ax.set_ylabel(r'$d T_{\ell='+str(int(l))+'} / d\ln a$')
#
path = "./figures/pn2d/dt2d_dlna_"+self.name+".pdf"
#fig.savefig(path, bbox_inches='tight')
fig=plt.figure(1)
ax=fig.add_subplot(111)
#
ax.plot(Z, dChidZ * W**4/Chi**6 / np.max(dChidZ * W**4/Chi**6), 'k', lw=2, label=r'kernel')
ax.plot(Z, T3d_1h / np.max(T3d_1h), 'b--', lw=2, label=r'$T_\text{1h}^\text{3d}$')
ax.plot(Z, dT1h_dz/np.max(dT1h_dz), 'b', lw=2, label=r'integrand for T1h')
#
ax.legend(loc=2)
#ax.set_xscale('log', nonposx='clip')
#ax.set_yscale('log', nonposy='clip')
ax.set_xlabel(r'redshift $z$')
ax.set_ylabel(r'$d T_{\ell='+str(int(l))+'} / dz$')
#
path = "./figures/pn2d/dt2d_dz_"+self.name+".pdf"
#fig.savefig(path, bbox_inches='tight')
plt.show()
##################################################################################
##################################################################################
class P2dCross(P2dAuto):
def __init__(self, U, P3dCross, Weight1, Weight2, name="", pNoise=lambda l: 0., save=False, nProc=1):
# copy classes
self.U = U
self.Pn = P3dCross
self.Weight1 = Weight1
self.Weight2 = Weight2
self.name = str(self.Weight1) + str(self.Weight2) + name
self.pNoise = pNoise
self.nProc = nProc
# bounds for z integrals
self.aMin = max(self.Weight1.aMin, self.Weight2.aMin)
self.aMax = min(self.Weight1.aMax, self.Weight2.aMax)
# values of ell to evaluate
self.L = np.genfromtxt("./input/Lc.txt") # center of the bins for l
# create directory if needed
self.pathOut = "./output/p2d/p2dcross_"+self.name+"/"
if not os.path.exists(self.pathOut):
os.makedirs(self.pathOut)
self.pathFig = "./figures/p2d/p2dcross_"+self.name+"/"
if not os.path.exists(self.pathFig):
os.makedirs(self.pathFig)
# power spectrum
if save:
self.saveP()
self.loadP()
##################################################################################
def computeP(self, fp3d):
'''Compute P2d for all self.L at once,
given the 3d power spectrum fp3d.
'''
z = self.Pn.Z.copy()
if z[0]==0:
z = z[1:]
a = 1. / (1. + z)
chi = self.U.bg.comoving_distance(z)
integrand = 3.e5/( self.U.hubble(z) * a**2 )
integrand *= self.Weight1.f(a)
integrand *= self.Weight2.f(a)
integrand /= chi**2
fp3dVect = np.vectorize(fp3d)
f = lambda l: fp3dVect((l + 0.5)/chi, z)
integrand = integrand[None,:] * np.array(map(f, self.L))
integrand *= -1. # because a is in decreasing order
result = np.trapz(integrand, a, axis=-1)
return result
##################################################################################
def integrandP(self, a, fP, l):
z = 1./a-1.
chi = self.U.ComovDist(a, self.U.a_obs)
#
result = 3.e5/( self.U.Hubble(a) * a**2 )
result *= self.Weight1.f(a) * self.Weight2.f(a)
result /= chi**2
result *= fP(l/chi, z)
return result
def integrandT(self, a, fP, l):
z = 1./a-1.
chi = self.U.ComovDist(a, self.U.a_obs)
#
result = 3.e5/( self.U.Hubble(a) * a**2 )
result *= self.Weight1.f(a)**2 * self.Weight2.f(a)**2
result /= chi**6
result *= fP(l/chi, z)
return result
##################################################################################
##################################################################################
class Planck13CIBData(object):
"""Measured CIB power spectra from Planck13 XXX
"""
# here name has to be the frequency of the CIB map
def __init__(self):
# Shot noise on CIB power spectra
# table 9 from Planck13 XXX on CIB
# unit is Jy^2 / sr
self.PlanckPCIBShot = {}
self.PlanckPCIBShot['857'] = 5364.
self.PlanckPCIBShot['857_545'] = 2701.
self.PlanckPCIBShot['857_353'] = 953.
self.PlanckPCIBShot['857_217'] = 181.
self.PlanckPCIBShot['545'] = 1690.
self.PlanckPCIBShot['545_353'] = 626.
self.PlanckPCIBShot['545_217'] = 121.
self.PlanckPCIBShot['353'] = 262.
self.PlanckPCIBShot['353_217'] = 54.
self.PlanckPCIBShot['217'] = 21.
# Measured CIB power spectra
# table D2 from Planck13 XXX on CIB
# unit is Jy^2/sr
self.PlanckPCIB = {}
# ell values
self.PlanckPCIB['ell'] = np.array([53., 114., 187., 320., 502., 684., 890., 1158., 1505., 1956., 2649.])
# Auto and cross spectra of the CIB: these maps are CMB-free, Galactic dust-free, corrected for SZ contamination and CIB contamination from CMB template
# The values of the spectra at the first two ell values are only upper limits
self.PlanckPCIB['857'] = np.array([0., 0., 2.87e5, 1.34e5, 7.20e4, 4.38e4, 3.23e4, 2.40e4, 1.83e4, 1.46e4, 1.16e4])
self.PlanckPCIB['857_545'] = np.array([0., 0., 1.30e5, 6.36e4, 3.53e4, 2.21e4, 1.63e4, 1.22e4, 9.31e3, 7.38e3, 5.91e3])
self.PlanckPCIB['857_353'] = np.array([0., 0., 4.30e4, 2.20e4, 1.25e4, 7.99e3, 5.88e3, 4.25e3, 3.24e3, 2.54e3, 0.])
self.PlanckPCIB['857_217'] = np.array([0., 0., 9.70e3, 5.26e3, 3.03e3, 1.88e3, 1.31e3, 9.18e2, 7.00e2, 5.38e2, 0.])
self.PlanckPCIB['857_143'] = np.array([0., 0., 1.84e3, 1.06e3, 6.52e2, 3.86e2, 2.55e2, 1.76e2, 1.23e2, 1.03e2, 0.])
#
self.PlanckPCIB['545'] = np.array([0., 0., 6.63e4, 3.34e4, 1.91e4, 1.25e4, 9.17e3, 6.83e3, 5.34e3, 4.24e3, 3.42e3])
self.PlanckPCIB['545_353'] = np.array([0., 0., 2.22e4, 1.19e4, 6.93e3, 4.61e3, 3.39e3, 2.50e3, 1.93e3, 1.52e3, 0.])
self.PlanckPCIB['545_217'] = np.array([0., 0., 4.97e3, 2.79e3, 1.65e3, 1.06e3, 7.41e2, 5.38e2, 4.30e2, 3.30e2, 0.])
self.PlanckPCIB['545_143'] = np.array([0., 0., 1.01e3, 5.98e2, 3.77e2, 2.29e2, 1.54e2, 1.03e2, 7.09e1, 5.89e1, 0.])
#
self.PlanckPCIB['353'] = np.array([0., 0., 7.88e3, 4.35e3, 2.60e3, 1.74e3, 1.29e3, 9.35e2, 7.45e2, 6.08e2, 0.])
self.PlanckPCIB['353_217'] = np.array([0., 0., 1.75e3, 1.02e3, 6.21e2, 3.97e2, 2.87e2, 1.99e2, 1.59e2, 1.35e2, 0.])
self.PlanckPCIB['353_143'] = np.array([0., 0., 3.61e2, 2.32e2, 1.48e2, 9.42e1, 6.33e1, 4.56e1, 2.77e1, 3.53e1, 0.])
#
self.PlanckPCIB['217'] = np.array([0., 0., 4.17e2, 2.62e2, 1.75e2, 1.17e2, 8.82e1, 6.42e1, 3.34e1, 4.74e1, 0.])
self.PlanckPCIB['217_143'] = np.array([0., 0., 1.04e2, 7.49e1, 5.87e1, 3.93e1, 2.64e1, 2.21e1, 1.07e1, 1.45e1, 0.])
#
self.PlanckPCIB['143'] = np.array([0., 0., 3.64e1, 3.23e1, 2.81e1, 2.27e1, 1.84e1, 1.58e1, 1.25e1, 0., 0.])
# Error bars on the power spectra:
self.PlanckPCIB['857_error'] = np.array([2.76e6, 7.99e5, 0.37e5, 0.08e5, 0.26e4, 0.18e4, 0.09e4, 0.05e4, 0.03e4, 0.02e4, 0.01e4])
self.PlanckPCIB['857_545_error'] = np.array([9.73e5, 3.23e5, 0.13e5, 0.30e4, 0.10e4, 0.07e4, 0.04e4, 0.02e4, 0.11e3, 0.07e3, 0.06e3])
self.PlanckPCIB['857_353_error'] = np.array([2.91e5, 1.05e5, 0.41e4, 0.11e4, 0.06e4, 0.39e3, 0.27e3, 0.17e3, 0.10e3, 0.07e3, 0.])
self.PlanckPCIB['857_217_error'] = np.array([6.43e4, 2.49e4, 1.22e3, 0.53e3, 0.32e3, 0.22e3, 0.16e3, 0.87e2, 0.23e2, 0.12e2, 0.])
self.PlanckPCIB['857_143_error'] = np.array([1.81e4, 5.12e3, 0.45e3, 0.12e3, 0.59e2, 0.41e2, 0.30e2, 0.23e2, 0.16e2, 0.15e2, 0.])
#
self.PlanckPCIB['545_error'] = np.array([3.74e5, 1.45e5, 0.51e4, 0.12e4, 0.04e4, 0.03e4, 0.17e3, 0.10e3, 0.06e3, 0.04e3, 0.04e3])
self.PlanckPCIB['545_353_error'] = np.array([1.15e5, 4.84e4, 0.16e4, 0.05e4, 0.23e3, 0.16e3, 0.11e3, 0.07e3, 0.04e3, 0.03e3, 0.])
self.PlanckPCIB['545_217_error'] = np.array([2.58e4, 1.16e4, 0.48e3, 0.21e3, 0.12e3, 0.09e3, 0.63e2, 0.35e2, 0.12e2, 0.07e2, 0.])
self.PlanckPCIB['545_143_error'] = np.array([7.04e3, 2.53e3, 0.19e3, 0.67e2, 0.39e2, 0.27e2, 0.19e2, 0.14e2, 1.80e1, 1.73e1, 0.])
#
self.PlanckPCIB['353_error'] = np.array([3.68e4, 1.66e4, 0.53e3, 0.18e3, 0.10e3, 0.07e3, 0.05e3, 0.33e2, 0.22e2, 0.16e2, 0.])
self.PlanckPCIB['353_217_error'] = np.array([8.01e3, 3.82e3, 0.15e3, 0.06e3, 0.38e2, 0.27e2, 0.20e2, 0.14e2, 0.10e2, 0.05e2, 0.])
self.PlanckPCIB['353_143_error'] = np.array([2.05e3, 8.26e2, 0.62e2, 0.24e2, 0.14e2, 1.06e1, 0.83e1, 0.91e1, 1.11e1, 0.69e1, 0.])
#
self.PlanckPCIB['217_error'] = np.array([1.78e3, 8.47e2, 0.47e2, 0.20e2, 0.13e2, 0.10e2, 0.89e1, 1.61e1, 2.15e1, 0.65e1, 0.])
self.PlanckPCIB['217_143_error'] = np.array([4.74e2, 1.89e2, 0.19e2, 0.81e1, 0.58e1, 0.50e1, 0.52e1, 1.19e1, 1.65e1, 0.54e1, 0.])
#
self.PlanckPCIB['143_error'] = np.array([1.55e2, 6.41e1, 0.73e1, 0.35e1, 0.30e1, 0.29e1, 0.35e1, 0.91e1, 1.28e1, 0., 0.])
##################################################################################
def plotPCIB(self, name='353'):
L = self.PlanckPCIB['ell']
P = self.PlanckPCIB[name]
sP = self.PlanckPCIB[name+'_error']
Pshot = self.PlanckPCIBShot[name]
# P
fig = plt.figure(0)
ax = plt.subplot(111)
#
ax.errorbar(L, P, yerr=sP, fmt='.', c='k', label=name)
ax.plot(L, Pshot*np.ones_like(L), 'b--', label=r'quoted shot noise')
#
ax.set_xscale('log', nonposx='clip')
ax.set_yscale('log', nonposy='clip')
ax.legend(loc=1, numpoints=1)
ax.set_xlabel(r'\ell')
ax.set_ylabel(r'$C_\ell^\text{CIB}$')
path = "./figures/pn2d/planck15_cib_"+name+".pdf"
#fig.savefig(path)
plt.show()
|
EmmanuelSchaanREPO_NAMEHaloGenPATH_START.@HaloGen_extracted@HaloGen-master@p2d.py@.PATH_END.py
|
{
"filename": "plot_Quintom_DR12.py",
"repo_name": "ja-vazquez/SimpleMC",
"repo_path": "SimpleMC_extracted/SimpleMC-master/simplemc/plots/plot_Quintom_DR12.py",
"type": "Python"
}
|
#!/usr/bin/env python
#TODO Add Omega_de to the plots
from simplemc.plots.plot_Quintom_variables import *
from simplemc.models.QuintomCosmology import QuintomCosmology
from simplemc.models.LCDMCosmology import LCDMCosmology
from simplemc.cosmo.paramDefs import *
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.ticker
import numpy as np
import pylab
steps = 9
coupling = 0
zl = np.arange(0, 3, 0.05)
fname = 'Quintessence'
# ---
if fname == 'Quintessence':
T = QuintomCosmology(vary_mquin=True)
name = fname
mlabel = '$m_\phi$'
if fname == 'Phantom':
T = QuintomCosmology(vary_mphan=True)
name = fname
mlabel = '$m_\psi$'
if fname == 'Quintom_mquin':
T = QuintomCosmology(vary_mquin=True, vary_mphan=True)
mphan = 1.2
name = 'Quintom, $m_{\psi}$=%0.1f'%mphan
mlabel = '$m_\phi$'
if fname == 'Quintom_mphan':
T = QuintomCosmology(vary_mquin=False, vary_mphan=True)
mphi = 1.2
name = 'Quintom, $m_{\phi}$=%0.1f'%mphi
mlabel = '$m_\psi$'
if fname == 'Quintom_coupling_mquin':
T = QuintomCosmology(vary_mquin=True, vary_coupling=True)
mphan = 1.2
coupling = 4.0
name = 'Quintom, $m_{\psi}$=%0.1f, $\\beta=%0.1f$'%(mphan, coupling)
mlabel = '$m_\phi$'
if fname == 'Quintom_coupling_mphan':
T = QuintomCosmology(vary_mphan=True, vary_coupling=True)
mphi = 1.0 #1.2
coupling = 10 #6.0
name = 'Quintom, $m_{\phi}$=%0.1f, $\\beta=%0.1f$'%(mphi, coupling)
mlabel = '$m_\psi$'
if fname == 'Quintom_coupling_both':
T = QuintomCosmology(vary_mquin=True, vary_mphan=True, vary_coupling=True)
mphi = 2.0
mphan = 1.0
coupling = -1
name = 'Quintom, $m_{\phi}$=%0.1f, $m_{\psi}$=%0.1f'%(mphi, mphan)
mlabel = '$\\beta$'
if fname == 'Quintom_coupling_both':
min, max = (4., 8.)
else:
min, max = (0.1, 2.5)
if coupling < 0:
min, max = (-10, -1.)
step = (max-min)/steps
mquin_ = mquin_par
mphan_ = mphan_par
coupling_ = coupling_par
hh = []
ww = []
da = []
dh = []
zz = []
PP = []
for i in np.arange(min, max, step):
if fname == 'Quintessence':
mquin_.setValue(i)
T.updateParams([mquin_])
if fname == 'Phantom':
mphan_.setValue(i)
T.updateParams([mphan_])
if fname == 'Quintom_mquin':
mquin_.setValue(i)
mphan_.setValue(mphan)
T.updateParams([mquin_, mphan_])
if fname == 'Quintom_mphan':
mphan_.setValue(i)
mquin_.setValue(mphi)
T.updateParams([mquin_, mphan_])
if fname == 'Quintom_coupling_mquin':
mquin_.setValue(i)
mphan_.setValue(mphan)
coupling_.setValue(coupling)
T.updateParams([mquin_, mphan_, coupling_])
if fname == 'Quintom_coupling_mphan':
mquin_.setValue(mphi)
mphan_.setValue(i)
coupling_.setValue(coupling)
T.updateParams([mquin_, mphan_, coupling_])
if fname == 'Quintom_coupling_both':
mquin_.setValue(mphi)
mphan_.setValue(mphan)
coupling_.setValue(i)
T.updateParams([mquin_, mphan_, coupling_])
T.call_functions()
ww.append([T.w_de(1./(1+z)) for z in zl])
hh.append([T.Hubble_a(1./(1+z)) for z in zl])
dh.append([T.HIOverrd(z)*z/fixer(z) for z in zl])
da.append([T.DaOverrd(z)/fixer(z) for z in zl])
PP.append(i)
zz.append(zl)
#Planck best fit cosmology
T2 = LCDMCosmology()
x1 = [67.4*np.sqrt(T2.RHSquared_a(1./(1+z))) for z in zl]
x2 = [T2.HIOverrd(z)*z/fixer(z) for z in zl]
x3 = [T2.DaOverrd(z)/fixer(z) for z in zl]
#PLK-15
#T=LCDMCosmology(Obh2=0.02225,Om=0.3156,h=0.6727)
params1 = {'backend': 'pdf',
'axes.labelsize': 18,
'xtick.labelsize': 18,
'ytick.labelsize': 18,
'legend.fontsize': 16,
'lines.markersize': 6,
'font.size': 20,
'text.usetex': True}
pylab.rcParams.update(params1)
## --- Plotting --
fig, (ax1, ax2, ax3, ax4)= plt.subplots(4, sharex=True, gridspec_kw={'hspace': 0}, figsize=(7,10))
fig.suptitle(name, fontsize=17, y=0.95)
## -- Plot 1
for x, w, z in zip(zz, ww, PP):
g = (float(z) - min)/(max - min)
b, r = 0, 1 - g
ax1.plot(x, w, color=(r, g, b))
if (fname == 'Quintessence') or (fname == 'Quintomcopphi'):
ax1.set_ylabel('$w(z)$', fontsize=20)
ax1.axhline(y=-1.0, color='k', linestyle='--')
if coupling < 0:
ax1.set_ylim(-3, 0.)
## -- Plot 2
for x, w, z in zip(zz, hh, PP):
g = (float(z)-min)/(max-min)
b, r = 0, 1-g
(l2,) = ax2.plot(x, w, color=(r, g, b))
dataHz = np.loadtxt('simplemc/data/Hz_all.dat')
redshifts, obs, errors = [dataHz[:,i] for i in [0,1,2]]
ax2.errorbar(redshifts, obs, errors, xerr=None, color='blue', marker='o', ls='None',
elinewidth =2, capsize=3, capthick = 1, alpha=1, markersize=4)
ax2.plot(zl, x1 , color='k', linestyle='--')
if (fname == 'Quintessence') or (fname =='Quintomcopphi'):
ax2.set_ylabel('$H(z)$', fontsize=20)
Z = [[0,0] , [0,0]]
mymap = mpl.colors.LinearSegmentedColormap.from_list('mycolors',['red','green'])
levels = np.arange(min, max+step, step)
CS3 = plt.contourf(Z, levels, cmap=mymap)
cbaxes = fig.add_axes([0.91, 0.1, 0.02, 0.78])
cbar = pylab.colorbar(CS3, cax=cbaxes)
cbar.set_label(mlabel, rotation=0, fontsize=18, labelpad=-10)
cbar.ax.tick_params(labelsize=12)
## -- Plot 3
for x, w, z in zip(zz, dh, PP):
g = (float(z)-min)/(max-min)
b, r = 0, 1-g
(l3,) = ax3.plot(x, w, color=(r ,g, b))
ax3.plot(zl, x2 , color='k', linestyle='--')
if (fname == 'Quintessence') or (fname == 'Quintomcopphi'):
ax3.set_ylabel("${\\rm zD_H(z)}/r_d\\sqrt{z}$")
## -- Plot 4
for x, w, z in zip(zz, da, PP):
g = (float(z)-min)/(max-min)
b, r = 0, 1-g
(l4,) = ax4.plot(x, w, color=(r, g, b))
ax4.plot(zl, x3, color='k', linestyle='--')
ax4.set_xlim(0.05, 3)
if (fname == 'Quintessence') or (fname == 'Quintomcopphi'):
ax4.set_ylabel("${\\rm D_M(z)}/r_d\\sqrt{z}$")
plot_errorbar(zCombBAO1, 1512.4/rd_fid_DR12, yerr=ersys(22.5, 11.0)/rd_fid_DR12,
color ='red', fmt='o', markersize=4, empty=empty2,
label="$\\rm{BOSS\ Galaxy\ DR12}$", ax=ax4)
plot_errorbar(zCombBAO2, 1975.2/rd_fid_DR12, yerr=ersys(26.6, 14.1)/rd_fid_DR12,
color ='red', fmt='o', markersize=4, empty=empty2, ax=ax4)
plot_errorbar(zCombBAO3, 2306.7/rd_fid_DR12, yerr=ersys(33.2, 16.7)/rd_fid_DR12,
color ='red', fmt='o', markersize=4, empty=empty2, ax=ax4)
plot_errorbar(zCombBAO1, fact*zCombBAO1/81.21, yerr=fact*zCombBAO1*ersys(2.17, 0.97)/(81.21)**2,
color ='green', fmt='o', markersize=4, empty=empty2, ax=ax3)
plot_errorbar(zCombBAO2, fact*zCombBAO2/90.90, yerr=fact*zCombBAO2*ersys(2.07, 1.08)/(90.90)**2,
color ='green', fmt='o', markersize=4, empty=empty2, ax=ax3)
plot_errorbar(zCombBAO3, fact*zCombBAO3/98.96, yerr=fact*zCombBAO3*ersys(2.21, 1.18)/(98.96)**2,
color ='green', fmt='o', markersize=4, empty=empty2, ax=ax3)
plot_errorbar(zLyaA, 11.28*(1+zLyaA), yerr=0.65*(1+ zLyaA), color ='red', fmt='o',
markersize=4, label="$\\rm{BOSS}\ \\mathrm{Ly}\\alpha\\mbox{-}\\rm{auto}\ \\rm{DR11}$",
empty=empty2, ax=ax4)
plot_errorbar(zLyaA, 9.18*zLyaA, yerr=0.28*zLyaA, color ='green', fmt='o',
markersize=4,empty=empty2, ax=ax3)
plot_errorbar(zLyaC, 10.8*(1+zLyaC), yerr=0.4*(1+zLyaC), color ='red', fmt='o',
markersize=4, label="$\\rm{BOSS}\ \\mathrm{Ly}\\alpha\\mbox{-}\\rm{cross}\ \\rm{DR11}$",
empty=empty2, ax=ax4)
plot_errorbar(zLyaC, 9.0*zLyaC, yerr=0.3*zLyaC, color ='green', fmt='o',
markersize=4, empty=empty2, ax=ax3)
#Axis
ax4.xaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax4.yaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter())
#ax4.xaxis.set_minor_formatter(plt.ScalarFormatter())
#ax4.xaxis.set_major_locator(plt.FixedLocator([0.1,1.0]))
#ax4.xaxis.set_minor_locator(plt.FixedLocator([0.2,0.5,2]))
ax4.set_xlabel("$z$")
#pylab.savefig("Fig1_"+fname+".pdf", bbox_inches='tight')
pylab.show()
|
ja-vazquezREPO_NAMESimpleMCPATH_START.@SimpleMC_extracted@SimpleMC-master@simplemc@plots@plot_Quintom_DR12.py@.PATH_END.py
|
{
"filename": "thin.py",
"repo_name": "rometsch/fargocpt",
"repo_path": "fargocpt_extracted/fargocpt-master/Tools/thin.py",
"type": "Python"
}
|
#!/usr/bin/env python3
"""Thin out the output of a fargocpt simulation. E.g. keep only every 100th snapshot.
"""
import os
import argparse
import numpy as np
from pathlib import Path
import shutil
from typing import List
def main():
opts = parse_args()
if not hasattr(opts, "Nptrn"):
mode = "inspect"
elif hasattr(opts, "destination"):
mode = "extract"
else:
mode = "delete"
if mode == "inspect":
inspect(opts.outputdir)
elif mode == "extract":
thin(opts.source, opts.Nptrn, force=opts.y,
new_outputdir=opts.destination)
elif mode == "delete":
thin(opts.outputdir, opts.Nptrn, force=opts.y)
def thin(outputdir: Path, Nptrn: slice, force: bool = False, new_outputdir: Path = None):
"""Thin out an output directory or copy a reduced version.
Delte all but every Nth snapshot or copy only scalar data and every Nth snapshot if new_outputdir is defined.
Nptrn can be a single integer, a fancy index like 1:10 or 1:10:2 as in numpy (syntax Nstart:Nstop:step)
or it can be 'keep15' where the number indicates the number of snapshots to keep.
Args:
outputdir (Path): Path to the output dir.
Nptrn (slice): Keep every Nth snapshot.
force (bool, optional): Skip asking for confirmation. Defaults to False.
new_outputdir (Path, optional): Path to the new directory where to copy data. Defaults to None.
Raises:
FileExistsError: if new_outputdir is not None, exists and is not empty.
"""
outputdir = Path(outputdir)
if new_outputdir is None:
new_outputdir = outputdir
else:
new_outputdir = Path(new_outputdir)
if new_outputdir.exists():
if len(os.listdir(new_outputdir)) > 0:
raise FileExistsError(
f"New outputdir {new_outputdir} already exists and is not empty!")
else:
os.makedirs(new_outputdir)
inds = get_inds(outputdir)
print(f"Output at path {outputdir}")
tmpfile = new_outputdir/"thin_newinds.tmp"
if tmpfile.exists():
print(f"Found temporary file {tmpfile}")
second_tmpfile = new_outputdir/"thin_todel.tmp"
if not second_tmpfile.exists():
print("However, did not find second temp file 'thin_todel.txt'. Exiting!")
exit(1)
print("Continuing with last settings...")
new_inds = np.genfromtxt(outputdir/"thin_newinds.tmp", dtype="int")
inds_to_del = np.genfromtxt(outputdir/"thin_todel.tmp", dtype="int")
else:
if Nptrn.startswith("keep"):
Nkeep = int(Nptrn[4:])
if Nkeep < 2:
raise ValueError("Nkeep must be larger equal 2!")
sel = np.linspace(0,len(inds)-1,num=Nkeep, endpoint=True, dtype=int)
new_inds = [int(inds[s]) for s in sel]
elif not ":" in Nptrn:
new_inds = [int(Nptrn)]
else:
try:
istart, istop, istep = [
int(s) if s != "" else None for s in Nptrn.split(":")]
except ValueError:
istart, istop = [
int(s) if s != "" else None for s in Nptrn.split(":")]
istep = 1
sl = slice(istart, istop, istep)
new_inds = inds[sl]
if istop is None and inds[-1] not in new_inds:
new_inds = np.append(new_inds, inds[-1])
inds_to_del = [n for n in inds if not n in new_inds]
print(f"Thinning down to {len(new_inds)} snapshots.")
print("The following snapshots will be retained:")
print(new_inds)
print_size(outputdir, inds[0], len(new_inds))
if not force:
if outputdir == new_outputdir:
query_str = "Do you want to proceed, delete data, and adjust the list files?\n[type 'y' if yes]: "
else:
query_str = "Do you want to proceed to copy the data?\n[type 'y' if yes]: "
answer = input(query_str)
if answer != "y":
print("Abort.")
exit(0)
np.savetxt(new_outputdir/"thin_newinds.tmp", new_inds, fmt="%d")
np.savetxt(new_outputdir/"thin_todel.tmp", inds_to_del, fmt="%d")
if outputdir == new_outputdir:
print("Deleting snapshots...")
delete_snapshots(outputdir, inds_to_del)
else:
copy_output(outputdir, new_outputdir, new_inds)
modify_time_files(outputdir, new_inds, new_outputdir=new_outputdir)
os.remove(new_outputdir/"thin_newinds.tmp")
os.remove(new_outputdir/"thin_todel.tmp")
print("Done")
def copy_output(outputdir: Path, new_outputdir: Path, inds: List[int]):
"""Copy the content of an output dir to a new directory only keeping some snapshots.
Args:
outputdir (Path): Source output dir.
new_outputdir (Path): Destination directory.
inds (List[int]): Indices of snapshots to copy.
"""
# copy everything but snapshots
for p in os.listdir(outputdir):
if p == "snapshots":
continue
src = outputdir / p
dst = new_outputdir / p
if src.is_dir():
shutil.copytree(src, dst)
else:
shutil.copy(src, dst)
# copy selected snapshots
os.makedirs(new_outputdir / "snapshots")
for n in inds:
src = outputdir / "snapshots" / f"{n}"
dst = new_outputdir / "snapshots" / f"{n}"
shutil.copytree(src, dst)
def delete_snapshots(outputdir: Path, inds_to_del: List[int], print_progress: bool = True):
""" Modify the snapshot list and time file to only include the retained snapshots.
Args:
outputdir (str): Path of the outputdir containing the directory 'snapshots'.
inds_to_del (List[int]): List of indices to be deleted.
print_progress (bool) [True]: Print progress of deletion.
"""
for k, n in enumerate(inds_to_del):
Ntodel = len(inds_to_del)
if print_progress:
print(
f"\r{n} ({k+1} / {len(inds_to_del)}, {k/Ntodel*100:.1f}%)", end="", flush=True)
p = outputdir / "snapshots" / f"{n}"
if not p.exists():
continue
shutil.rmtree(p)
def modify_time_files(outputdir: Path, new_inds: List[int], new_outputdir: Path = None):
""" Modify the snapshot list and time file to only include the retained snapshots.
Args:
outputdir (Path): Path of the outputdir containing the directory 'snapshots'.
new_inds (List[int]): List of indices to keep.
new_outputdir (Path, optional): Path to new outputdir. Defaults to None.
"""
if new_outputdir is None:
new_outputdir = outputdir
print("\nWriting new snapshot list...")
np.savetxt(new_outputdir/"snapshots"/"list.txt", new_inds, fmt="%d")
print("Writing new time file...")
lines = []
with open(outputdir/"snapshots"/"timeSnapshot.dat", "r") as infile:
for line in infile:
if line.strip()[0] == "#":
lines.append(line)
continue
ind = int(line.strip().split()[0])
if ind in new_inds:
lines.append(line)
with open(new_outputdir/"snapshots"/"timeSnapshot.dat", "w") as outfile:
outfile.writelines(lines)
def inspect(outputdir):
"""Print out how many snapshots the directory contains.
Args:
outputdir (str): Path to the output directory.
"""
inds = get_inds(outputdir)
print(f"Output at path {outputdir}")
print(f"contains {len(inds)} snapshots.")
print_size(outputdir, inds[0], len(inds))
def print_size(outputdir, Nref, Nsnapshots):
first_snapshot_dir = Path(outputdir) / "snapshots" / f"{Nref}"
size_snapshot = size_of_dir(first_snapshot_dir)
size_rest = 0
for p in Path(outputdir).glob("*"):
if p.name == "snapshots":
continue
size_rest += size_of_dir(p)
print(f"Size of one snapshot: {sizeof_fmt(size_snapshot)}")
print(f"Size of all snapshot: {sizeof_fmt(Nsnapshots*size_snapshot)}")
print(f"Size of rest: {sizeof_fmt(size_rest)}")
print(f"Size total: {sizeof_fmt(size_rest+Nsnapshots*size_snapshot)}")
def size_of_dir(path):
return sum(f.stat().st_size for f in Path(path).glob('**/*') if f.is_file())
def get_inds(outputdir):
"""Look up the inds of snapshots in the directory.
Args:
outputdir (str): Path to the output directory.
Returns:
list of int: List of indices of snapshots.
"""
list_file = os.path.join(outputdir, "snapshots", "list.txt")
inds = np.genfromtxt(list_file, dtype=int)
return inds
def parse_args():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
inspect_parser = subparsers.add_parser("inspect")
inspect_parser.add_argument("outputdir", type=str)
thin_parser = subparsers.add_parser("delete")
thin_parser.add_argument("outputdir", type=str)
thin_parser.add_argument(
"Nptrn", type=str, help="Pattern for inds. Can be 'keep15' to retain 15 snapshots or 1:10:3 for inds from 1 to 10 in steps of 3, or ::5 for every 5th. Same pattern as in fancy indexing.")
thin_parser.add_argument("-y", action="store_true",
help="Answer yes to all questions.")
extract_parser = subparsers.add_parser("extract")
extract_parser.add_argument("source", type=str)
extract_parser.add_argument("destination", type=str)
extract_parser.add_argument(
"Nptrn", type=str, help="Pattern for inds. E.g. 1:10:3 for inds from 1 to 10 in steps of 3, or ::5 for every 5th. Same pattern as in fancy indexing.")
extract_parser.add_argument(
"-y", action="store_true", help="Answer yes to all questions.")
opts = parser.parse_args()
return opts
def sizeof_fmt(num, suffix="B"):
"""Convert a number of bytes to human readable string.
https://stackoverflow.com/a/1094933
CC BY-SA 4.0
Args:
num (int): Size in bytes.
suffix (str, optional): unit suffiux
Returns:
str: Human readable size.
"""
for unit in ["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"]:
if abs(num) < 1024.0:
return f"{num:3.1f} {unit}{suffix}"
num /= 1024.0
return f"{num:.1f} Yi{suffix}"
if __name__ == "__main__":
main()
|
rometschREPO_NAMEfargocptPATH_START.@fargocpt_extracted@fargocpt-master@Tools@thin.py@.PATH_END.py
|
{
"filename": "flowers102.py",
"repo_name": "pytorch/vision",
"repo_path": "vision_extracted/vision-main/torchvision/datasets/flowers102.py",
"type": "Python"
}
|
from pathlib import Path
from typing import Any, Callable, Optional, Tuple, Union
import PIL.Image
from .utils import check_integrity, download_and_extract_archive, download_url, verify_str_arg
from .vision import VisionDataset
class Flowers102(VisionDataset):
"""`Oxford 102 Flower <https://www.robots.ox.ac.uk/~vgg/data/flowers/102/>`_ Dataset.
.. warning::
This class needs `scipy <https://docs.scipy.org/doc/>`_ to load target files from `.mat` format.
Oxford 102 Flower is an image classification dataset consisting of 102 flower categories. The
flowers were chosen to be flowers commonly occurring in the United Kingdom. Each class consists of
between 40 and 258 images.
The images have large scale, pose and light variations. In addition, there are categories that
have large variations within the category, and several very similar categories.
Args:
root (str or ``pathlib.Path``): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"train"`` (default), ``"val"``, or ``"test"``.
transform (callable, optional): A function/transform that takes in a PIL image and returns a
transformed version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
_download_url_prefix = "https://www.robots.ox.ac.uk/~vgg/data/flowers/102/"
_file_dict = { # filename, md5
"image": ("102flowers.tgz", "52808999861908f626f3c1f4e79d11fa"),
"label": ("imagelabels.mat", "e0620be6f572b9609742df49c70aed4d"),
"setid": ("setid.mat", "a5357ecc9cb78c4bef273ce3793fc85c"),
}
_splits_map = {"train": "trnid", "val": "valid", "test": "tstid"}
def __init__(
self,
root: Union[str, Path],
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._split = verify_str_arg(split, "split", ("train", "val", "test"))
self._base_folder = Path(self.root) / "flowers-102"
self._images_folder = self._base_folder / "jpg"
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
from scipy.io import loadmat
set_ids = loadmat(self._base_folder / self._file_dict["setid"][0], squeeze_me=True)
image_ids = set_ids[self._splits_map[self._split]].tolist()
labels = loadmat(self._base_folder / self._file_dict["label"][0], squeeze_me=True)
image_id_to_label = dict(enumerate((labels["labels"] - 1).tolist(), 1))
self._labels = []
self._image_files = []
for image_id in image_ids:
self._labels.append(image_id_to_label[image_id])
self._image_files.append(self._images_folder / f"image_{image_id:05d}.jpg")
def __len__(self) -> int:
return len(self._image_files)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
image_file, label = self._image_files[idx], self._labels[idx]
image = PIL.Image.open(image_file).convert("RGB")
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def extra_repr(self) -> str:
return f"split={self._split}"
def _check_integrity(self):
if not (self._images_folder.exists() and self._images_folder.is_dir()):
return False
for id in ["label", "setid"]:
filename, md5 = self._file_dict[id]
if not check_integrity(str(self._base_folder / filename), md5):
return False
return True
def download(self):
if self._check_integrity():
return
download_and_extract_archive(
f"{self._download_url_prefix}{self._file_dict['image'][0]}",
str(self._base_folder),
md5=self._file_dict["image"][1],
)
for id in ["label", "setid"]:
filename, md5 = self._file_dict[id]
download_url(self._download_url_prefix + filename, str(self._base_folder), md5=md5)
|
pytorchREPO_NAMEvisionPATH_START.@vision_extracted@vision-main@torchvision@datasets@flowers102.py@.PATH_END.py
|
{
"filename": "io.py",
"repo_name": "StingraySoftware/HENDRICS",
"repo_path": "HENDRICS_extracted/HENDRICS-main/hendrics/io.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Functions to perform input/output operations."""
from __future__ import annotations
import copy
import glob
import importlib
import logging
import os
import os.path
import pickle
import shutil
import sys
import warnings
from collections.abc import Iterable
import numpy as np
from stingray.base import StingrayObject, StingrayTimeseries
from stingray.crossspectrum import AveragedCrossspectrum, Crossspectrum
from stingray.events import EventList
from stingray.lightcurve import Lightcurve
from stingray.powerspectrum import AveragedPowerspectrum, Powerspectrum
from stingray.pulse.modeling import SincSquareModel
from stingray.pulse.search import search_best_peaks
from stingray.utils import assign_value_if_none
from astropy import log
from astropy.logger import AstropyUserWarning
from astropy.modeling.core import Model
from astropy.table import Table
from hendrics.base import get_file_format, splitext_improved
from .base import find_peaks_in_image, hen_root, is_string
try:
import netCDF4 as nc
HEN_FILE_EXTENSION = ".nc"
HAS_NETCDF = True
except ImportError:
msg = "Warning! NetCDF is not available. Using pickle format."
warnings.warn(msg)
HEN_FILE_EXTENSION = ".p"
HAS_NETCDF = False
HAS_H5PY = importlib.util.find_spec("h5py") is not None
try:
_ = np.complex256
HAS_C256 = True
except Exception:
HAS_C256 = False
cpl128 = np.dtype([("real", np.double), ("imag", np.double)])
if HAS_C256:
cpl256 = np.dtype([("real", np.longdouble), ("imag", np.longdouble)])
class EFPeriodogram:
def __init__(
self,
freq=None,
stat=None,
kind=None,
nbin=None,
N=None,
oversample=None,
M=None,
pepoch=None,
mjdref=None,
peaks=None,
peak_stat=None,
best_fits=None,
fdots=0,
fddots=0,
segment_size=1e32,
filename="",
parfile=None,
emin=None,
emax=None,
ncounts=None,
upperlim=None,
):
self.freq = freq
self.stat = stat
self.kind = kind
self.nbin = nbin
self.oversample = oversample
self.N = N
self.peaks = peaks
self.peak_stat = peak_stat
self.best_fits = best_fits
self.fdots = fdots
self.fddots = fddots
self.M = M
self.segment_size = segment_size
self.filename = filename
self.parfile = parfile
self.emin = emin
self.emax = emax
self.pepoch = pepoch
self.mjdref = mjdref
self.upperlim = upperlim
self.ncounts = ncounts
def find_peaks(self, conflevel=99.0):
from .base import fold_detection_level, z2_n_detection_level
ntrial = self.stat.size
if hasattr(self, "oversample") and self.oversample is not None:
ntrial /= self.oversample
ntrial = int(ntrial)
epsilon = 1 - conflevel / 100
if self.kind == "Z2n":
threshold = z2_n_detection_level(
epsilon=epsilon,
n=self.N,
ntrial=ntrial,
n_summed_spectra=int(self.M),
)
else:
threshold = fold_detection_level(nbin=int(self.nbin), epsilon=epsilon, ntrial=ntrial)
if len(self.stat.shape) == 1:
best_peaks, best_stat = search_best_peaks(self.freq, self.stat, threshold)
else:
best_cands = find_peaks_in_image(self.stat, n=10, threshold_abs=threshold)
best_peaks = []
best_stat = []
for i, idx in enumerate(best_cands):
f, fdot = (
self.freq[idx[0], idx[1]],
self.fdots[idx[0], idx[1]],
)
best_peaks.append([f, fdot])
best_stat.append(self.stat[idx[0], idx[1]])
best_peaks = np.asarray(best_peaks)
best_stat = np.asarray(best_stat)
if len(best_peaks) > 0:
self.peaks = best_peaks
self.peak_stat = best_stat
return best_peaks, best_stat
def get_energy_from_events(ev):
if hasattr(ev, "energy") and ev.energy is not None:
energy = ev.energy
elabel = "Energy"
elif hasattr(ev, "pi") and ev.pi is not None:
energy = ev.pi
elabel = "PI"
ev.energy = energy
else:
energy = np.ones_like(ev.time)
elabel = ""
return elabel, energy
def filter_energy(ev: EventList, emin: float, emax: float) -> tuple[EventList, str]:
"""Filter event list by energy (or PI)
If an ``energy`` attribute is present, uses it. Otherwise, it switches
automatically to ``pi``
Examples
--------
>>> import doctest
>>> from contextlib import redirect_stderr
>>> import sys
>>> time = np.arange(5)
>>> energy = np.array([0, 0, 30, 4, 1])
>>> events = EventList(time=time, energy=energy)
>>> ev_out, elabel = filter_energy(events, 3, None)
>>> assert np.all(ev_out.time == [2, 3])
>>> assert elabel == 'Energy'
>>> events = EventList(time=time, pi=energy)
>>> with warnings.catch_warnings(record=True) as w:
... ev_out, elabel = filter_energy(events, None, 20) # doctest: +ELLIPSIS
>>> assert "No energy information in event list" in str(w[-1].message)
>>> assert np.all(ev_out.time == [0, 1, 3, 4])
>>> assert elabel == 'PI'
>>> events = EventList(time=time, pi=energy)
>>> ev_out, elabel = filter_energy(events, None, None) # doctest: +ELLIPSIS
>>> assert np.all(ev_out.time == time)
>>> assert elabel == 'PI'
>>> events = EventList(time=time)
>>> with redirect_stderr(sys.stdout):
... ev_out, elabel = filter_energy(events, 3, None) # doctest: +ELLIPSIS
ERROR:...No Energy or PI...
>>> assert np.all(ev_out.time == time)
>>> assert elabel == ''
"""
times = ev.time
elabel, energy = get_energy_from_events(ev)
# For some reason the doctest doesn't work if I don't do this instead
# of using warnings.warn
if elabel == "":
log.error("No Energy or PI information available. " "No energy filter applied to events")
return ev, ""
if emax is None and emin is None:
return ev, elabel
# For some reason the doctest doesn't work if I don't do this instead
# of using warnings.warn
if elabel.lower() == "pi" and (emax is not None or emin is not None):
warnings.warn(
f"No energy information in event list "
f"while filtering between {emin} and {emax}. "
f"Definition of events.energy is now based on PI."
)
if emin is None:
emin = np.min(energy) - 1
if emax is None:
emax = np.max(energy) + 1
good = (energy >= emin) & (energy <= emax)
ev.apply_mask(good, inplace=True)
# ev.time = times[good]
# ev.energy = energy[good]
return ev, elabel
def _get_key(dict_like, key):
"""
Examples
--------
>>> a = dict(b=1)
>>> assert _get_key(a, 'b') == 1
>>> _get_key(a, 'c') == ""
True
"""
try:
return dict_like[key]
except KeyError:
return ""
def high_precision_keyword_read(hdr, keyword):
"""Read FITS header keywords, also if split in two.
In the case where the keyword is split in two, like
MJDREF = MJDREFI + MJDREFF
in some missions, this function returns the summed value. Otherwise, the
content of the single keyword
Parameters
----------
hdr : dict_like
The header structure, or a dictionary
keyword : str
The key to read in the header
Returns
-------
value : long double
The value of the key, or None if keyword not present
Examples
--------
>>> hdr = dict(keywordS=1.25)
>>> assert high_precision_keyword_read(hdr, 'keywordS') == 1.25
>>> hdr = dict(keywordI=1, keywordF=0.25)
>>> assert high_precision_keyword_read(hdr, 'keywordS') == 1.25
"""
if keyword in hdr:
return np.longdouble(hdr[keyword])
if len(keyword) == 8:
keyword = keyword[:7]
if keyword + "I" in hdr and keyword + "F" in hdr:
value_i = np.longdouble(hdr[keyword + "I"])
value_f = np.longdouble(hdr[keyword + "F"])
return value_i + value_f
else:
return None
def read_header_key(fits_file, key, hdu=1):
"""Read the header key ``key`` from HDU ``hdu`` of a fits file.
Parameters
----------
fits_file: str
key: str
The keyword to be read
Other Parameters
----------------
hdu : int
"""
from astropy.io import fits as pf
hdulist = pf.open(fits_file)
try:
value = hdulist[hdu].header[key]
except KeyError: # pragma: no cover
value = ""
hdulist.close()
return value
def ref_mjd(fits_file, hdu=1):
"""Read MJDREFF+ MJDREFI or, if failed, MJDREF, from the FITS header.
Parameters
----------
fits_file : str
Returns
-------
mjdref : numpy.longdouble
the reference MJD
Other Parameters
----------------
hdu : int
"""
from astropy.io import fits as pf
if isinstance(fits_file, Iterable) and not is_string(fits_file):
fits_file = fits_file[0]
log.info("opening %s", fits_file)
with pf.open(fits_file) as hdul:
return high_precision_keyword_read(hdul[hdu].header, "MJDREF")
# ---- Base function to save NetCDF4 files
def save_as_netcdf(vars, varnames, formats, fname):
"""Save variables in a NetCDF4 file."""
rootgrp = nc.Dataset(fname, "w", format="NETCDF4")
for iv, v in enumerate(vars):
dims = {}
dimname = varnames[iv] + "dim"
dimspec = (varnames[iv] + "dim",)
if formats[iv] == "c32":
# Too complicated. Let's decrease precision
warnings.warn("complex256 yet unsupported", AstropyUserWarning)
formats[iv] = "c16"
if formats[iv] == "c16":
v = np.asarray(v)
# unicode_literals breaks something, I need to specify str.
if "cpl128" not in rootgrp.cmptypes.keys():
complex128_t = rootgrp.createCompoundType(cpl128, "cpl128")
vcomp = np.empty(v.shape, dtype=cpl128)
vcomp["real"] = v.real.astype(np.float64)
vcomp["imag"] = v.imag.astype(np.float64)
v = vcomp
formats[iv] = complex128_t
unsized = False
try:
len(v)
except TypeError:
unsized = True
if isinstance(v, Iterable) and formats[iv] != str and not unsized:
dim = len(v)
dims[dimname] = dim
if isinstance(v[0], Iterable):
dim = len(v[0])
dims[dimname + "_2"] = dim
dimspec = (dimname, dimname + "_2")
else:
dims[dimname] = 1
for dimname in dims.keys():
rootgrp.createDimension(dimname, dims[dimname])
vnc = rootgrp.createVariable(varnames[iv], formats[iv], dimspec)
try:
if formats[iv] == str:
vnc[0] = v
else:
vnc[:] = v
except Exception:
log.error("Bad variable:", varnames[iv], formats[iv], dimspec, v)
raise
rootgrp.close()
def read_from_netcdf(fname):
"""Read from a netCDF4 file."""
rootgrp = nc.Dataset(fname)
out = {}
for k in rootgrp.variables.keys():
dum = rootgrp.variables[k]
values = dum.__array__()
# Handle special case of complex
if dum.dtype == cpl128:
arr = np.empty(values.shape, dtype=np.complex128)
arr.real = values["real"]
arr.imag = values["imag"]
values = arr
# Handle special case of complex
if HAS_C256 and dum.dtype == cpl256:
arr = np.empty(values.shape, dtype=np.complex256)
arr.real = values["real"]
arr.imag = values["imag"]
values = arr
if dum.dtype == str or dum.size == 1:
to_save = values[0]
else:
to_save = values
if isinstance(to_save, (str, bytes)) and to_save.startswith("__bool_"):
# Boolean single value
to_save = eval(to_save.replace("__bool__", ""))
# Boolean array
elif k.startswith("__bool__"):
to_save = to_save.astype(bool)
k = k.replace("__bool__", "")
out[k] = to_save
rootgrp.close()
return out
def _dum(x):
return x
def recognize_stingray_table(obj):
"""
Examples
--------
>>> obj = AveragedCrossspectrum()
>>> obj.freq = np.arange(10)
>>> obj.power = np.random.random(10)
>>> recognize_stingray_table(obj.to_astropy_table())
'AveragedPowerspectrum'
>>> obj.pds1 = obj.power
>>> recognize_stingray_table(obj.to_astropy_table())
'AveragedCrossspectrum'
>>> obj = EventList(np.arange(10))
>>> recognize_stingray_table(obj.to_astropy_table())
'EventList'
>>> obj = Lightcurve(np.arange(10), np.arange(10))
>>> recognize_stingray_table(obj.to_astropy_table())
'Lightcurve'
>>> obj = Table()
>>> recognize_stingray_table(obj)
Traceback (most recent call last):
...
ValueError: Object not recognized...
"""
if "hue" in obj.colnames:
return "Powercolors"
if "power" in obj.colnames:
if np.iscomplex(obj["power"][1]) or "pds1" in obj.colnames:
return "AveragedCrossspectrum"
return "AveragedPowerspectrum"
if "counts" in obj.colnames:
return "Lightcurve"
if "time" in obj.colnames:
return "EventList"
raise ValueError(f"Object not recognized:\n{obj}")
# ----- Functions to handle file types
def get_file_type(fname, raw_data=False):
"""Return the file type and its contents.
Only works for hendrics-format pickle or netcdf files, or stingray
outputs.
"""
contents_raw = load_data(fname)
if isinstance(contents_raw, Table):
ftype_raw = recognize_stingray_table(contents_raw)
if raw_data:
contents = {col: contents_raw[col] for col in contents_raw.colnames}
contents.update(contents_raw.meta)
elif "__sr__class__type__" in contents_raw:
ftype_raw = contents_raw["__sr__class__type__"]
contents = contents_raw
else:
ftype_raw = type(contents_raw).__name__
return ftype_raw, contents_raw
if "Lightcurve" in ftype_raw:
ftype = "lc"
fun = load_lcurve
elif ("Powercolor" in ftype_raw) or ("StingrayTimeseries" in ftype_raw and "hue" in contents):
ftype = "powercolor"
fun = load_timeseries
elif "StingrayTimeseries" in ftype_raw or "Color" in ftype_raw:
ftype = "color"
fun = load_lcurve
elif "EventList" in ftype_raw:
ftype = "events"
fun = load_events
elif "Crossspectrum" in ftype_raw:
ftype = "cpds"
fun = load_pds
elif "Powerspectrum" in ftype_raw:
ftype = "pds"
fun = load_pds
elif "gti" in ftype_raw:
ftype = "gti"
fun = _dum
elif "EFPeriodogram" in ftype_raw:
ftype = "folding"
fun = load_folding
else:
raise ValueError("File format not understood")
if not raw_data:
contents = fun(fname)
return ftype, contents
# ----- functions to save and load EVENT data
def save_events(eventlist, fname):
"""Save events in a file.
Parameters
----------
eventlist: :class:`stingray.EventList` object
Event list to be saved
fname: str
Name of output file
"""
save_data(eventlist, fname)
def save_timeseries(timeseries, fname):
"""Save a time series in a file.
Parameters
----------
timeseries: :class:`stingray.EventList` object
Event list to be saved
fname: str
Name of output file
"""
save_data(timeseries, fname)
def load_events(fname):
"""Load events from a file."""
fmt = get_file_format(fname)
if fmt == "pickle":
out = _load_data_pickle(fname)
elif fmt == "nc":
out = _load_data_nc(fname)
else:
# Try one of the known files from Astropy
return EventList.read(fname, fmt=fmt)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="Unrecognized keywords:.*")
eventlist = EventList(**out)
for key in out.keys():
if hasattr(eventlist, key) and getattr(eventlist, key) is not None:
continue
setattr(eventlist, key, out[key])
for attr in ["mission", "instr"]:
if attr not in list(out.keys()):
setattr(eventlist, attr, "")
return eventlist
def load_timeseries(fname):
"""Load events from a file."""
fmt = get_file_format(fname)
if fmt == "pickle":
out = _load_data_pickle(fname)
elif fmt == "nc":
out = _load_data_nc(fname)
else:
# Try one of the known files from Astropy
return StingrayTimeseries.read(fname, fmt=fmt)
# Fix issue when reading a single-element time array from the nc file
for attr in ["time", "_time"]:
if attr in out and out[attr] is not None and len(np.shape(out[attr])) == 0:
out[attr] = np.array([out[attr]])
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="Unrecognized keywords:.*")
eventlist = StingrayTimeseries(**out)
return eventlist
# ----- functions to save and load LCURVE data
def save_lcurve(lcurve, fname, lctype="Lightcurve"):
"""Save Light curve to file.
Parameters
----------
lcurve: :class:`stingray.Lightcurve` object
Event list to be saved
fname: str
Name of output file
"""
fmt = get_file_format(fname)
if hasattr(lcurve, "_mask") and lcurve._mask is not None and np.any(~lcurve._mask):
logging.info("The light curve has a mask. Applying it before saving.")
lcurve = lcurve.apply_mask(lcurve._mask, inplace=False)
lcurve._mask = None
if fmt not in ["nc", "pickle"]:
return lcurve.write(fname)
lcdict = lcurve.dict()
lcdict["__sr__class__type__"] = str(lctype)
save_data(lcdict, fname)
def load_lcurve(fname):
"""Load light curve from a file."""
fmt = get_file_format(fname)
if fmt == "pickle":
data = _load_data_pickle(fname)
elif fmt == "nc":
data = _load_data_nc(fname)
else:
# Try one of the known files from Lightcurve
return Lightcurve.read(fname, fmt=fmt, skip_checks=True)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="Unrecognized keywords:.*")
time = data["time"]
data.pop("time")
lcurve = Lightcurve()
lcurve.time = time
for key in data.keys():
vals = data[key]
if key == "mask":
key = "_mask"
setattr(lcurve, key, vals)
if "mission" not in list(data.keys()):
lcurve.mission = ""
return lcurve
# ---- Functions to save epoch folding results
def save_folding(efperiodogram, fname):
"""Save PDS in a file."""
outdata = copy.copy(efperiodogram.__dict__)
outdata["__sr__class__type__"] = "EFPeriodogram"
if "best_fits" in outdata and efperiodogram.best_fits is not None:
model_files = []
for i, b in enumerate(efperiodogram.best_fits):
mfile = fname.replace(HEN_FILE_EXTENSION, f"__mod{i}__.p")
save_model(b, mfile)
model_files.append(mfile)
outdata.pop("best_fits")
if get_file_format(fname) == "pickle":
return _save_data_pickle(outdata, fname)
elif get_file_format(fname) == "nc":
return _save_data_nc(outdata, fname)
def load_folding(fname):
"""Load PDS from a file."""
if get_file_format(fname) == "pickle":
data = _load_data_pickle(fname)
elif get_file_format(fname) == "nc":
data = _load_data_nc(fname)
data.pop("__sr__class__type__")
ef = EFPeriodogram()
for key in data.keys():
setattr(ef, key, data[key])
modelfiles = glob.glob(fname.replace(HEN_FILE_EXTENSION, "__mod*__.p"))
if len(modelfiles) >= 1:
bmodels = []
for mfile in modelfiles:
if os.path.exists(mfile):
bmodels.append(load_model(mfile)[0])
ef.best_fits = bmodels
if ef.peaks is not None and len(np.asarray(ef.peaks).shape) == 0:
ef.peaks = [ef.peaks]
return ef
# ---- Functions to save PDSs
def save_pds(cpds, fname, save_all=False, save_dyn=False, no_auxil=False, save_lcs=False):
"""Save PDS in a file."""
from .base import mkdir_p
if os.path.exists(fname):
os.unlink(fname)
cpds = copy.deepcopy(cpds)
if save_all:
save_dyn = True
no_auxil = False
save_lcs = True
basename, ext = splitext_improved(fname)
outdir = basename
if save_dyn or not no_auxil or save_lcs:
mkdir_p(outdir)
fmt = get_file_format(fname)
if hasattr(cpds, "subcs"):
del cpds.subcs
if hasattr(cpds, "unnorm_subcs"):
del cpds.unnorm_subcs
if no_auxil:
for attr in ["pds1", "pds2"]:
if hasattr(cpds, attr):
delattr(cpds, attr)
for attr in ["pds1", "pds2"]:
if hasattr(cpds, attr):
value = getattr(cpds, attr)
outf = f"__{attr}__" + ext
if "pds" in attr and isinstance(value, Crossspectrum):
outfile = os.path.join(outdir, outf)
save_pds(value, outfile, no_auxil=True)
if hasattr(cpds, attr):
delattr(cpds, attr)
for lcattr in ("lc1", "lc2"):
if hasattr(cpds, lcattr) and save_lcs:
lc_name = os.path.join(outdir, f"__{lcattr}__" + ext)
lc = getattr(cpds, lcattr)
if isinstance(lc, Iterable):
if len(lc) > 1:
warnings.warn("Saving multiple light curves is not supported. Saving only one")
lc = lc[0]
if isinstance(lc, Lightcurve):
save_lcurve(lc, lc_name)
delattr(cpds, lcattr)
for attr in ["cs_all", "unnorm_cs_all"]:
if not hasattr(cpds, attr):
continue
if not save_dyn:
delattr(cpds, attr)
continue
saved_outside = False
for i, c in enumerate(getattr(cpds, attr)):
label = attr.replace("_all", "")
if not hasattr(c, "freq"):
break
save_pds(
c,
os.path.join(outdir, f"__{label}__{i}__" + ext),
no_auxil=True,
)
saved_outside = True
if saved_outside:
delattr(cpds, attr)
if hasattr(cpds, "lc1"):
del cpds.lc1
if hasattr(cpds, "lc2"):
del cpds.lc2
if not hasattr(cpds, "instr"):
cpds.instr = "unknown"
if hasattr(cpds, "best_fits") and cpds.best_fits is not None:
model_files = []
for i, b in enumerate(cpds.best_fits):
mfile = os.path.join(
outdir,
basename + f"__mod{i}__.p",
)
save_model(b, mfile)
model_files.append(mfile)
del cpds.best_fits
if fmt not in ["nc", "pickle"]:
return cpds.write(fname, fmt=fmt)
outdata = copy.copy(cpds.__dict__)
outdata["__sr__class__type__"] = str(type(cpds))
if fmt == "pickle":
return _save_data_pickle(outdata, fname)
elif fmt == "nc":
return _save_data_nc(outdata, fname)
def remove_pds(fname):
"""Remove the pds file and the directory with auxiliary information."""
outdir, _ = splitext_improved(fname)
modelfiles = glob.glob(os.path.join(outdir, fname.replace(HEN_FILE_EXTENSION, "__mod*__.p")))
for mfile in modelfiles:
os.unlink(mfile)
if os.path.exists(outdir):
shutil.rmtree(outdir)
os.unlink(fname)
def load_pds(fname, nosub=False):
"""Load PDS from a file."""
rootname, ext = splitext_improved(fname)
fmt = get_file_format(fname)
if fmt not in ["pickle", "nc"]:
dummy = Table.read(fname, format=fmt)
if "pds1" in dummy.colnames or "power.real" in dummy.colnames:
cpds = AveragedCrossspectrum.read(fname, fmt=fmt)
else:
cpds = AveragedPowerspectrum.read(fname, fmt=fmt)
else:
if fmt == "pickle":
data = _load_data_pickle(fname)
elif fmt == "nc":
data = _load_data_nc(fname)
type_string = data["__sr__class__type__"]
if "AveragedPowerspectrum" in type_string:
cpds = AveragedPowerspectrum()
elif "Powerspectrum" in type_string:
cpds = Powerspectrum()
elif "AveragedCrossspectrum" in type_string:
cpds = AveragedCrossspectrum()
elif "Crossspectrum" in type_string:
cpds = Crossspectrum()
else:
raise ValueError("Unrecognized data type in file")
data.pop("__sr__class__type__")
for key in data.keys():
setattr(cpds, key, data[key])
outdir = rootname
modelfiles = glob.glob(os.path.join(outdir, rootname + "__mod*__.p"))
cpds.best_fits = None
if len(modelfiles) >= 1:
bmodels = []
for mfile in modelfiles:
if os.path.exists(mfile):
bmodels.append(load_model(mfile)[0])
cpds.best_fits = bmodels
if nosub:
return cpds
lc1_name = os.path.join(outdir, "__lc1__" + ext)
lc2_name = os.path.join(outdir, "__lc2__" + ext)
pds1_name = os.path.join(outdir, "__pds1__" + ext)
pds2_name = os.path.join(outdir, "__pds2__" + ext)
cs_all_names = glob.glob(os.path.join(outdir, "__cs__[0-9]*__" + ext))
unnorm_cs_all_names = glob.glob(os.path.join(outdir, "__unnorm_cs__[0-9]*__" + ext))
if os.path.exists(lc1_name):
cpds.lc1 = load_lcurve(lc1_name)
if os.path.exists(lc2_name):
cpds.lc2 = load_lcurve(lc2_name)
if os.path.exists(pds1_name):
cpds.pds1 = load_pds(pds1_name)
if os.path.exists(pds2_name):
cpds.pds2 = load_pds(pds2_name)
if len(cs_all_names) > 0:
cs_all = []
for c in sorted(cs_all_names):
cs_all.append(load_pds(c))
cpds.cs_all = cs_all
if len(unnorm_cs_all_names) > 0:
unnorm_cs_all = []
for c in sorted(unnorm_cs_all_names):
unnorm_cs_all.append(load_pds(c))
cpds.unnorm_cs_all = unnorm_cs_all
return cpds
# ---- GENERIC function to save stuff.
def _load_data_pickle(fname, kind="data"):
"""Load generic data in pickle format."""
log.info(f"Loading {kind} and info from {fname}")
with open(fname, "rb") as fobj:
result = pickle.load(fobj)
return result
def _save_data_pickle(struct, fname, kind="data"):
"""Save generic data in pickle format."""
log.info(f"Saving {kind} and info to {fname}")
with open(fname, "wb") as fobj:
pickle.dump(struct, fobj)
def _load_data_nc(fname):
"""Load generic data in netcdf format."""
contents = read_from_netcdf(fname)
keys = list(contents.keys())
keys_to_delete = []
for k in keys:
if k in keys_to_delete:
continue
if str(contents[k]) == "__hen__None__type__":
contents[k] = None
if k[-2:] in ["_I", "_L", "_F", "_k"]:
kcorr = k[:-2]
integer_key = kcorr + "_I"
float_key = kcorr + "_F"
kind_key = kcorr + "_k"
log10_key = kcorr + "_L"
if not (integer_key in keys and float_key in keys):
continue
# Maintain compatibility with old-style files:
if not (kind_key in keys and log10_key in keys):
contents[kind_key] = "longdouble"
contents[log10_key] = 0
keys_to_delete.extend([integer_key, float_key])
keys_to_delete.extend([kind_key, log10_key])
if contents[kind_key] == "longdouble":
dtype = np.longdouble
elif contents[kind_key] == "double":
dtype = np.double
else:
raise ValueError(contents[kind_key] + ": unrecognized kind string")
log10_part = contents[log10_key]
if isinstance(contents[integer_key], Iterable):
integer_part = np.array(contents[integer_key], dtype=dtype)
float_part = np.array(contents[float_key], dtype=dtype)
else:
integer_part = dtype(contents[integer_key])
float_part = dtype(contents[float_key])
contents[kcorr] = (integer_part + float_part) * 10.0**log10_part
for k in keys_to_delete:
del contents[k]
return contents
def _split_high_precision_number(varname, var, probesize):
var_log10 = 0
if probesize == 8:
kind_str = "double"
if probesize == 16:
kind_str = "longdouble"
if isinstance(var, Iterable):
var = np.asarray(var)
bad = np.isnan(var)
dum = np.min(np.abs(var[~bad]))
if dum < 1 and dum > 0.0:
var_log10 = np.floor(np.log10(dum))
var = np.asarray(var) / (10.0**var_log10)
var[bad] = 0
var_I = np.floor(var).astype(int)
var_F = np.array(var - var_I, dtype=np.double)
var_F[bad] = np.nan
else:
if np.abs(var) < 1 and np.abs(var) > 0.0:
var_log10 = np.floor(np.log10(np.abs(var)))
if np.isnan(var):
var_I = np.asarray(0).astype(int)
var_F = np.asarray(np.nan)
else:
var = np.asarray(var) / 10.0**var_log10
var_I = int(np.floor(var))
var_F = np.double(var - var_I)
return var_I, var_F, var_log10, kind_str
def _save_data_nc(struct, fname, kind="data"):
"""Save generic data in netcdf format."""
log.info(f"Saving {kind} and info to {fname}")
varnames = []
values = []
formats = []
for k in struct.keys():
var = struct[k]
if isinstance(var, bool):
var = f"__bool__{var}"
probe = var
if isinstance(var, Iterable) and len(var) >= 1:
probe = var[0]
if is_string(var):
probekind = str
probesize = -1
elif var is None:
probekind = None
else:
probekind = np.result_type(probe).kind
probesize = np.result_type(probe).itemsize
if probekind == "f" and probesize >= 8:
# If a (long)double, split it in integer + floating part.
# If the number is below zero, also use a logarithm of 10 before
# that, so that we don't lose precision
var_I, var_F, var_log10, kind_str = _split_high_precision_number(k, var, probesize)
values.extend([var_I, var_log10, var_F, kind_str])
formats.extend(["i8", "i8", "f8", str])
varnames.extend([k + "_I", k + "_L", k + "_F", k + "_k"])
elif probekind == str:
values.append(var)
formats.append(probekind)
varnames.append(k)
elif probekind == "b":
values.append(var.astype("u1"))
formats.append("u1")
varnames.append("__bool__" + k)
elif probekind is None:
values.append("__hen__None__type__")
formats.append(str)
varnames.append(k)
else:
values.append(var)
formats.append(probekind + f"{probesize}")
varnames.append(k)
save_as_netcdf(values, varnames, formats, fname)
def save_data(struct, fname, ftype="data"):
"""Save generic data in hendrics format."""
fmt = get_file_format(fname)
has_write_method = hasattr(struct, "write")
struct_dict = struct
if isinstance(struct, StingrayObject):
struct_dict = struct.dict()
if fmt in ["pickle", "nc"]:
if "__sr__class__type__" not in struct_dict:
struct_dict["__sr__class__type__"] = str(type(struct))
if fmt == "pickle":
return _save_data_pickle(struct_dict, fname, kind=ftype)
elif fmt == "nc":
return _save_data_nc(struct_dict, fname, kind=ftype)
if not has_write_method:
raise ValueError("Unrecognized data format or file format")
struct.write(fname)
def load_data(fname):
"""Load generic data in hendrics format."""
fmt = get_file_format(fname)
if fmt == "pickle":
return _load_data_pickle(fname)
elif fmt == "nc":
return _load_data_nc(fname)
try:
return Table.read(fname, format=fmt)
except Exception as e:
raise TypeError(
"The file type is not recognized. Did you convert the"
" original files into HENDRICS format (e.g. with "
"HENreadevents or HENlcurve)?"
)
# QDP format is often used in FTOOLS
def save_as_qdp(arrays, errors=None, filename="out.qdp", mode="w"):
"""Save arrays in a QDP file.
Saves an array of variables, and possibly their errors, to a QDP file.
Parameters
----------
arrays: [array1, array2]
List of variables. All variables must be arrays and of the same length.
errors: [array1, array2]
List of errors. The order has to be the same of arrays; the value can
be:
- None if no error is assigned
- an array of same length of variable for symmetric errors
- an array of len-2 lists for non-symmetric errors (e.g.
[[errm1, errp1], [errm2, errp2], [errm3, errp3], ...])
Other Parameters
----------------
mode : str
the file access mode, to be passed to the open() function. Can be 'w'
or 'a'
"""
import numpy as np
errors = assign_value_if_none(errors, [None for i in arrays])
data_to_write = []
list_of_errs = []
for ia, ar in enumerate(arrays):
data_to_write.append(ar)
if errors[ia] is None:
continue
shape = np.shape(errors[ia])
assert shape[0] == len(ar), "Errors and arrays must have same length"
if len(shape) == 1:
list_of_errs.append([ia, "S"])
data_to_write.append(errors[ia])
elif shape[1] == 2:
list_of_errs.append([ia, "T"])
mine = [k[0] for k in errors[ia]]
maxe = [k[1] for k in errors[ia]]
data_to_write.append(mine)
data_to_write.append(maxe)
print_header = True
if os.path.exists(filename) and mode == "a":
print_header = False
outfile = open(filename, mode)
if print_header:
for lerr in list_of_errs:
i, kind = lerr
print(f"READ {kind}" + f"ERR {i + 1}", file=outfile)
length = len(data_to_write[0])
for i in range(length):
for idw, d in enumerate(data_to_write):
print(d[i], file=outfile, end=" ")
print(file=outfile)
outfile.close()
def save_as_ascii(cols, filename="out.txt", colnames=None, append=False):
"""Save arrays as TXT file with respective errors."""
import numpy as np
shape = np.shape(cols)
ndim = len(shape)
if ndim == 1:
cols = [cols]
elif ndim >= 3 or ndim == 0:
log.error("Only one- or two-dim arrays accepted")
return -1
lcol = len(cols[0])
log.debug(f"{repr(cols)} {repr(np.shape(cols))}")
if append:
txtfile = open(filename, "a")
else:
txtfile = open(filename, "w")
if colnames is not None:
print("#", file=txtfile, end=" ")
for i_c, c in enumerate(cols):
print(colnames[i_c], file=txtfile, end=" ")
print(file=txtfile)
for i in range(lcol):
for c in cols:
print(c[i], file=txtfile, end=" ")
print(file=txtfile)
txtfile.close()
return 0
def print_fits_info(fits_file, hdu=1):
"""Print general info about an observation."""
from astropy.io import fits as pf
from astropy.time import Time
from astropy.units import Unit
lchdulist = pf.open(fits_file)
datahdu = lchdulist[hdu]
header = datahdu.header
info = {}
info["N. events"] = _get_key(header, "NAXIS2")
info["Telescope"] = _get_key(header, "TELESCOP")
info["Instrument"] = _get_key(header, "INSTRUME")
info["OBS_ID"] = _get_key(header, "OBS_ID")
info["Target"] = _get_key(header, "OBJECT")
info["Start"] = _get_key(header, "DATE-OBS")
info["Stop"] = _get_key(header, "DATE-END")
# Give time in MJD
mjdref = high_precision_keyword_read(header, "MJDREF")
tstart = high_precision_keyword_read(header, "TSTART")
tstop = high_precision_keyword_read(header, "TSTOP")
tunit = _get_key(header, "TIMEUNIT")
start_mjd = Time(mjdref, format="mjd") + tstart * Unit(tunit)
stop_mjd = Time(mjdref, format="mjd") + tstop * Unit(tunit)
print(f"ObsID: {info['OBS_ID']}\n")
print(f"Date: {info['Start']} -- {info['Stop']}\n")
print(f"Date (MJD): {start_mjd} -- {stop_mjd}\n")
print(f"Instrument: {info['Telescope']}/{info['Instrument']}\n")
print(f"Target: {info['Target']}\n")
print(f"N. Events: {info['N. events']}\n")
lchdulist.close()
return info
def main(args=None):
"""Main function called by the `HENreadfile` command line script."""
import argparse
description = "Print the content of HENDRICS files"
parser = argparse.ArgumentParser(description=description)
parser.add_argument("files", help="List of files", nargs="+")
parser.add_argument(
"--print-header",
help="Print the full FITS header if present in the " "meta data.",
default=False,
action="store_true",
)
args = parser.parse_args(args)
for fname in args.files:
print()
print("-" * len(fname))
print(f"{fname}")
print("-" * len(fname))
if fname.endswith((".fits", ".evt")):
print("This FITS file contains:", end="\n\n")
print_fits_info(fname)
print("-" * len(fname))
continue
ftype, contents = get_file_type(fname, raw_data=False)
print(contents)
print("-" * len(fname))
def sort_files(files):
"""Sort a list of HENDRICS files, looking at `Tstart` in each."""
allfiles = {}
ftypes = []
for f in files:
log.info("Loading file " + f)
ftype, contents = get_file_type(f)
instr = contents.instr
ftypes.append(ftype)
if instr not in list(allfiles.keys()):
allfiles[instr] = []
# Add file name to the dictionary
contents.__sort__filename__ = f
allfiles[instr].append(contents)
# Check if files are all of the same kind (lcs, PDSs, ...)
ftypes = list(set(ftypes))
assert len(ftypes) == 1, "Files are not all of the same kind."
instrs = list(allfiles.keys())
for instr in instrs:
contents = list(allfiles[instr])
tstarts = [np.min(c.gti) for c in contents]
fnames = [c.__sort__filename__ for c in contents]
fnames = [x for (y, x) in sorted(zip(tstarts, fnames))]
# Substitute dictionaries with the sorted list of files
allfiles[instr] = fnames
return allfiles
def save_model(model, fname="model.p", constraints=None):
"""Save best-fit models to data.
Parameters
----------
model : func or `astropy.modeling.core.Model` object
The model to be saved
fname : str, default 'models.p'
The output file name
Other Parameters
----------------
constraints: dict
Additional model constraints. Ignored for astropy models.
"""
modeldata = {"model": model, "constraints": None}
if isinstance(model, (Model, SincSquareModel)):
modeldata["kind"] = "Astropy"
elif callable(model):
nargs = model.__code__.co_argcount
nkwargs = len(model.__defaults__)
if not nargs - nkwargs == 1:
raise TypeError("Accepted callable models have only one " "non-keyword argument")
modeldata["kind"] = "callable"
modeldata["constraints"] = constraints
else:
raise TypeError(
"The model has to be an Astropy model or a callable"
" with only one non-keyword argument"
)
with open(fname, "wb") as fobj:
pickle.dump(modeldata, fobj)
def load_model(modelstring):
if not is_string(modelstring):
raise TypeError("modelstring has to be an existing file name")
if not os.path.exists(modelstring):
raise FileNotFoundError("Model file not found")
# modelstring is a pickle file
if modelstring.endswith(".p"):
log.debug("Loading model from pickle file")
with open(modelstring, "rb") as fobj:
modeldata = pickle.load(fobj)
return modeldata["model"], modeldata["kind"], modeldata["constraints"]
# modelstring is a python file
elif modelstring.endswith(".py"):
log.debug("Loading model from Python source")
modulename = modelstring.replace(".py", "")
sys.path.append(os.getcwd())
# If a module with the same name was already imported, unload it!
# This is because the user might be using the same file name but
# different models inside, just like we do in test_io.py
if modulename in sys.modules:
del sys.modules[modulename]
# This invalidate_caches() is called to account for the case when
# the model file does not exist the first time we call
# importlib.import_module(). In this case, the second time we call it,
# even if the file exists it will not exist for importlib.
importlib.invalidate_caches()
_model = importlib.import_module(modulename)
model = _model.model
constraints = None
if hasattr(_model, "constraints"):
constraints = _model.constraints
else:
raise TypeError("Unknown file type")
if isinstance(model, Model):
return model, "Astropy", constraints
elif callable(model):
nargs = model.__code__.co_argcount
nkwargs = len(model.__defaults__)
if not nargs - nkwargs == 1:
raise TypeError("Accepted callable models have only one " "non-keyword argument")
return model, "callable", constraints
def find_file_in_allowed_paths(fname, other_paths=None):
"""Check if file exists at its own relative/absolute path, or elsewhere.
Parameters
----------
fname : str
The name of the file, with or without a path.
Other Parameters
----------------
other_paths : list of str
list of other possible paths
"""
if fname is None:
return False
existance_condition = os.path.exists(fname)
if existance_condition:
return fname
bname = os.path.basename(fname)
if other_paths is not None:
for p in other_paths:
fullpath = os.path.join(p, bname)
if os.path.exists(fullpath):
log.info(f"Parfile found at different path: {fullpath}")
return fullpath
return False
def main_filter_events(args=None):
import argparse
from .base import _add_default_args, check_negative_numbers_in_args
description = "Filter events"
parser = argparse.ArgumentParser(description=description)
parser.add_argument("files", help="Input event files", type=str, nargs="+")
parser.add_argument(
"--emin",
default=None,
type=float,
help="Minimum energy (or PI if uncalibrated) to plot",
)
parser.add_argument(
"--emax",
default=None,
type=float,
help="Maximum energy (or PI if uncalibrated) to plot",
)
_add_default_args(
parser,
[
"loglevel",
"debug",
"test",
],
)
args = check_negative_numbers_in_args(args)
args = parser.parse_args(args)
if args.debug:
args.loglevel = "DEBUG"
for fname in args.files:
events = load_events(fname)
events, _ = filter_energy(events, args.emin, args.emax)
save_events(
events,
hen_root(fname) + f"_{args.emin:g}-{args.emax:g}keV" + HEN_FILE_EXTENSION,
)
|
StingraySoftwareREPO_NAMEHENDRICSPATH_START.@HENDRICS_extracted@HENDRICS-main@hendrics@io.py@.PATH_END.py
|
{
"filename": "lsq.py",
"repo_name": "ricardoclandim/NIRVANA",
"repo_path": "NIRVANA_extracted/NIRVANA-master/nirvana/tests/lsq.py",
"type": "Python"
}
|
from IPython import embed
import numpy
from nirvana.data import manga
from nirvana.tests.util import remote_data_file
from nirvana.models.oned import HyperbolicTangent
from nirvana.models.axisym import AxisymmetricDisk
# Benchmarking test for the least-squares fit
def test_lsq_nopsf():
# Read the data to fit
data_root = remote_data_file()
kin = manga.MaNGAGasKinematics.from_plateifu(8078, 12703, cube_path=data_root,
maps_path=data_root)
nrun = 100
for i in range(nrun):
print('{0}/{1}'.format(i+1,nrun), end='\r')
# Set the rotation curve
rc = HyperbolicTangent()
# Set the disk velocity field
disk = AxisymmetricDisk(rc)
# Fit it with a non-linear least-squares optimizer
disk.lsq_fit(kin)
print('{0}/{0}'.format(nrun))
if __name__ == '__main__':
test_lsq_nopsf()
|
ricardoclandimREPO_NAMENIRVANAPATH_START.@NIRVANA_extracted@NIRVANA-master@nirvana@tests@lsq.py@.PATH_END.py
|
{
"filename": "_size.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/surface/contours/y/_size.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="size", parent_name="surface.contours.y", **kwargs):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@surface@contours@y@_size.py@.PATH_END.py
|
{
"filename": "distribute.py",
"repo_name": "litebird/litebird_sim",
"repo_path": "litebird_sim_extracted/litebird_sim-master/litebird_sim/distribute.py",
"type": "Python"
}
|
# -*- encoding: utf-8 -*-
from collections import namedtuple
from typing import List
Span = namedtuple("Span", ["start_idx", "num_of_elements"])
"""A sub-range in a sequence of elements.
It has two fields: `start_idx` and `num_of_elements`.
"""
def distribute_evenly(num_of_elements, num_of_groups):
"""Evenly distribute a set of equal elements between groups.
Assuming that we have `num_of_elements` objects that we want to
assign to `num_of_groups` separate groups, this function proposes
a distribution that tries to assign the most uniform number of
elements to each group.
This function works even if ``num_of_elements < num_of_groups``.
.. doctest::
>>> distribute_evenly(5, 2)
[Span(start_idx=0, num_of_elements=3), Span(start_idx=3, num_of_elements=2)]
>>> distribute_evenly(1, 2)
[Span(start_idx=0, num_of_elements=1), Span(start_idx=1, num_of_elements=0)]
Args:
num_of_elements (int): The number of elements to distribute
num_of_groups (int): The number of groups to use in the result
Returns:
a list of 2-element tuples containing `num_of_groups`
elements, each of them being a 2-element tuple containing (1)
the index of the element in the group and (2) the number of
elements in the group. Being named tuples, you can access the
index using the field name ``start_idx``, and the number of
elements using the name ``num_of_elements``.
"""
assert num_of_elements > 0
assert num_of_groups > 0
base_length = num_of_elements // num_of_groups
leftovers = num_of_elements % num_of_groups
# If leftovers == 0, then the number of elements is divided evenly
# by num_of_groups, and the solution is trivial. If it's not, then
# each of the "leftoverss" is placed in one of the first groups.
#
# Example: let's split 8 elements in 3 groups. In this case,
# base_length=2 and leftovers=2 (elements #7 and #8):
#
# +----+----+----+ +----+----+----+ +----+----+
# | #1 | #2 | #3 | | #4 | #5 | #6 | | #7 | #8 |
# +----+----+----+ +----+----+----+ +----+----+
#
#
result = []
for i in range(num_of_groups):
if i < leftovers:
# Make place for one of the leftovers
cur_length = base_length + 1
cur_pos = cur_length * i
else:
# No need to accomodate for leftovers, but consider their
# presence in fixing the starting position for this group
cur_length = base_length
cur_pos = base_length * i + leftovers
result.append(Span(start_idx=cur_pos, num_of_elements=cur_length))
assert len(result) == num_of_groups, (
f"wrong result(len(result)={len(result)}) in "
+ f"distribute_evenly(num_of_elements={num_of_elements}, "
+ f"num_of_groups={num_of_groups})"
)
assert sum([pair.num_of_elements for pair in result]) == num_of_elements
return result
# The following implementation of the painter's partition problem is
# heavily inspired by the code at
# https://www.geeksforgeeks.org/painters-partition-problem-set-2/?ref=rp
def _num_of_workers(arr, n, maxLen, weight_fn):
total = 0
num_of_workers = 1
for cur_element in arr:
cur_weight = weight_fn(cur_element)
total += cur_weight
if total > maxLen:
num_of_workers += 1
# Reset "total" for the next worker
total = cur_weight
return num_of_workers
def _find_max_and_sum(arr, weight_fn):
weights = list(map(weight_fn, arr))
return (max(weights), sum(weights))
def _partition(arr, n, k, weight_fn):
lo, hi = _find_max_and_sum(arr, weight_fn)
while lo < hi:
mid = lo + (hi - lo) / 2
required_workers = _num_of_workers(arr, n, mid, weight_fn)
# find better optimum in lower half
# here mid is included because we
# may not get anything better
if required_workers <= k:
hi = mid
# find better optimum in upper half
# here mid is excluded because it gives
# required Painters > k, which is invalid
else:
lo = mid + 1
# required
return lo
def __identity_fn(x):
return x
def distribute_optimally(elements, num_of_groups, weight_fn=None) -> List[Span]:
"""Evenly distribute a set of equal elements between groups.
Assuming that we have a set of elements, each having its own
weight that is computed using `weight_fn` (i.e., for any element
``x`` in ``elements``, its weight is ``weight_fn(x)``), this
function proposes a distribution that tries to assign the elements
to each group so that the weight in each group is more or less the
same.
This function works even if ``num_of_elements < num_of_groups``.
.. doctest::
>>> distribute_optimally([10, 10, 10, 10], 2)
[Span(start_idx=0, num_of_elements=2), Span(start_idx=2, num_of_elements=2)]
>>> distribute_optimally([10, 10, 10, 20], 2)
[Span(start_idx=0, num_of_elements=3), Span(start_idx=3, num_of_elements=1)]
>>> distribute_optimally([40, 10, 10, 10], 2)
[Span(start_idx=0, num_of_elements=1), Span(start_idx=1, num_of_elements=3)]
>>> distribute_optimally([('a', 10), ('b', 10), ('c', 10)], 2, lambda x: x[1])
[Span(start_idx=0, num_of_elements=2), Span(start_idx=2, num_of_elements=1)]
This function is a generalization of :meth:`.distribute_evenly`;
in that case, the function assumes that each element weights
equally.
Args:
elements (list): A list of objects to be split in groups
num_of_groups (int): The number of groups to use in the result
weight_fn: A function-like object that computes the weight
given one of the objects in `elements`. If unspecified, the
identity function will be used.
Returns:
a list of 2-element tuples containing `num_of_groups`
elements, each of them being a 2-element tuple containing (1)
the index of the element in the group and (2) the number of
elements in the group. Being named tuples, you can access the
index using the field name ``start_idx``, and the number of
elements using the name ``num_of_elements``.
"""
if not weight_fn:
weight_fn = __identity_fn
max_weight = _partition(elements, len(elements), num_of_groups, weight_fn)
result = [] # type: List[Span]
start_idx = 0
weight = 0
cur_num = 0
for cur_idx, cur_element in enumerate(elements):
cur_weight = weight_fn(cur_element)
if weight + cur_weight > max_weight:
result.append(Span(start_idx=start_idx, num_of_elements=cur_num))
cur_num = 1
weight = cur_weight
start_idx = cur_idx
else:
weight += cur_weight
cur_num += 1
result.append(Span(start_idx=start_idx, num_of_elements=cur_num))
# The way we implemented this implies the possibility that not every processor
# is being used. We just fill with empty elements the end of `result`
result += [Span(start_idx=0, num_of_elements=0)] * (num_of_groups - len(result))
assert len(result) == num_of_groups, (
f"wrong result(len(result)={len(result)}) in "
+ f"distribute_optimally(len(elements)={len(elements)}, "
+ f"num_of_groups={num_of_groups})"
)
assert sum([r.num_of_elements for r in result]) == len(elements)
return result
|
litebirdREPO_NAMElitebird_simPATH_START.@litebird_sim_extracted@litebird_sim-master@litebird_sim@distribute.py@.PATH_END.py
|
{
"filename": "aperture.py",
"repo_name": "HiPERCAM/hipercam",
"repo_path": "hipercam_extracted/hipercam-master/hipercam/aperture.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Defines classes to represent photometric apertures.
The classes support JSON-style serialisation to allow apertures to be
saved to disk in a fairly easily read and editable format.
"""
import numpy as np
import json
from collections import OrderedDict
from .core import *
from .group import *
__all__ = ("Aperture", "CcdAper", "MccdAper")
class Aperture:
"""Represents an aperture for astronomical photometry
Essentially this consists of 3 circles representing the object,
radius `rtarg`, and the sky annulus between radii `r2` and `r3`,
centered at a specific position, but there are several extra
characteristics in addition. These are:
Reference apertures: these indicate (typically) bright stars that should
be easy to locate. The idea is that when re-positioning apertures, you
might want to initially do the brightest stars, and then see what the
overall x,y shift is before attempting fainter ones. This status is
indicated with a logical flag.
Linked apertures: some targets are nearly impossible to register, or may
fade so much that they cannot be detected. Such targets can be "linked" to
others in the sense that they are offset from them.
COMPO apertures: comparison stars injected onto the science image by
COMPO have a difference world coordinate system to the main science image.
Their motion on the CCD is flipped, so that an (x, y) shift in the stars
on the science CCD produces a (-x, -y) shift in the COMPO stars. This
status is indicated with a logical flag.
Sky masks: ('mask) these are fixed circles offset from the aperture in
question indicating pixels to ignore when estimating the sky background.
Star masks: ('extra') these are circles offset from aperture indicating
pixels to include when summing the object flux. This is to combat problems
caused by blended objects. These *also* act as sky masks so there should
be no need to also mask such object.
Parameters
----------
x : float
X position of centre of aperture, or the X offset to apply
if the aperture is linked from another.
y : float
Y position of centre of aperture, or the Y offset to apply
if the aperture is linked from another.
rtarg : float
Radius (unbinned pixels) of target aperture
rsky1 : float
Inner radius (unbinned pixels) of sky annulus
rsky2 : float
Outer radius (unbinned pixels) of sky annulus
ref : bool
True/False to indicate whether this is a reference
aperture meaning that its position will be re-determined
before non-reference apertures to provide a shift.
compo : bool
True/False to indicate whether this is a compo
aperture meaning that its shifts are inverted w.r.t non-compo
apertures.
mask : list of 3 element tuples
Each tuple in the list consists of an x,y offset and a radius in
unbinned pixels. These are used to mask nearby areas when
determining the sky value (e.g. to exclude stars)
extra : list of 2 element tuples
Similar to `mask`, but each tuple in the list consists only of an
x,y offset. These however are used as centres of additional target
apertures to allow blended stars to be include in the total flux.
They are given the same radius as the target aperture. They are also
used to exclude sky pixels.
link : str
If != '', this is a string label for another :class:`Aperture` that
*this* :class:`Aperture` is linked from. The idea is that this label
can be used to lookup the :class:`Aperture`.
.. note::
Normal practice would be to set link, mask, extra later,
having created the Aperture. Attributes of the same name as
all the arguments are defined. We copy the mask and extra
apertures to avoid propagating references.
"""
def __init__(
self, x, y, rtarg, rsky1, rsky2, ref, compo=False, mask=[], extra=[], link=""
):
self.x = x
self.y = y
self.rtarg = rtarg
self.rsky1 = rsky1
self.rsky2 = rsky2
self.ref = ref
self.compo = compo
self.mask = mask.copy()
self.extra = extra.copy()
self.link = link
def __repr__(self):
return "Aperture(x={!r}, y={!r}, rtarg={!r}, rsky1={!r}, rsky2={!r}, ref={!r}, compo={!r}, mask={!r}, extra={!r}, link={!r})".format(
self.x,
self.y,
self.rtarg,
self.rsky1,
self.rsky2,
self.ref,
self.compo,
self.mask,
self.extra,
self.link,
)
def copy(self, memo=None):
"""Returns with a copy of the Aperture"""
return Aperture(
self.x,
self.y,
self.rtarg,
self.rsky1,
self.rsky2,
self.ref,
self.compo,
self.mask.copy(),
self.extra.copy(),
self.link,
)
def add_mask(self, xoff, yoff, radius):
"""Adds a mask to the :class:Aperture"""
self.mask.append((xoff, yoff, radius))
def add_extra(self, xoff, yoff):
"""Adds a mask to the :class:Aperture"""
self.extra.append((xoff, yoff))
def set_link(self, aplabel):
"""Links this :class:Aperture to a lookup label for another"""
self.link = aplabel
def break_link(self):
"""Cancels any link to another :class:Aperture"""
self.link = ""
@property
def linked(self):
"""Returns True if the :class:Aperture is linked to another"""
return self.link != ""
def check(self):
"""Run a few checks on an :class:Aperture. Raises a ValueError if there are
problems.
"""
if self.rtarg <= 0:
raise ValueError(
"Aperture = {!r}\nTarget aperture radius = "
"{:.2f} <= 0".format(self, self.rtarg)
)
elif self.rsky1 > self.rsky2:
raise ValueError(
"Aperture = {!r}\nInner sky aperture radius "
"(={:.2f}) > outer radius (={:.2f})".format(
self, self.rsky1, self.rsky2
)
)
elif (
not isinstance(self.link, str)
or not isinstance(self.mask, list)
or not isinstance(self.ref, bool)
or not isinstance(self.compo, bool)
):
raise ValueError(
"Aperture = {!r}\nOne or more of link, mask, ref or compo "
"has the wrong type".format(self)
)
def write(self, fname):
"""Dumps Aperture in JSON format to a file called fname"""
with open(fname, "w") as fp:
json.dump(self, cls=_Encoder, indent=2)
def toString(self):
"""Returns Aperture as a JSON-type string"""
return json.dumps(self, cls=_Encoder, indent=2)
@classmethod
def read(cls, fname):
"""Read from JSON-format file fname"""
with open(fname) as fp:
aper = json.load(fp, cls=_Decoder)
aper.check()
return aper
class CcdAper(Group):
"""Class representing all the :class:Apertures for a single CCD.
Normal usage is to create an empty one and then add apertures via
the usual mechanism for updating dictionaries, i.e. ccdap[label] =
aperture.
"""
def __init__(self, aps=Group(Aperture)):
"""Constructs a :class:`CcdAper`.
Arguments::
aps : (Group)
Group of :class:`Aperture` objects
"""
super().__init__(Aperture, aps)
def __repr__(self):
return "{:s}(aps={:s})".format(self.__class__.__name__, super().__repr__())
def check(self):
"""Checks for problems with links"""
for apnam, aper in self.items():
if aper.linked:
if aper.link not in self:
raise ValueError(
"Aperture = {!r} links to anon-existent aperture".format(self)
)
elif self[aper.link].linked:
raise ValueError(
"Aperture = {!r} is linked to an aperture which is itself linked".format(
self
)
)
def write(self, fname):
"""Dumps ccdAper in JSON format to a file called fname"""
# dumps as list to retain order through default iterator encoding
# that buggers things otherwise
listify = ["hipercam.CcdAper"] + list(self.items)
with open(fname, "w") as fp:
json.dump(listify, fp, cls=_Encoder, indent=2)
def copy(self, memo=None):
return CcdAper(super().copy(memo))
class MccdAper(Group):
"""Class representing all the :class:Apertures for multiple CCDs.
Normal usage is to create an empty one and then add apertures via
the usual mechanism for updating dictionaries, e.g.
>> mccdap = MccdAper()
>> mccdap['ccd1'] = CcdAper()
>> mccdap['ccd2'] = CcdAper()
>> mccdap['ccd1']['ap1'] = Aperture(100,200,10,15,125,False)
etc.
"""
def __init__(self, aps=Group(CcdAper)):
"""Constructs a :class:`CcdAper`.
Arguments::
aps : (Group)
Group of :class:`CcdAper` objects
"""
super().__init__(CcdAper, aps)
def __repr__(self):
return "{:s}(aps={:s})".format(self.__class__.__name__, super().__repr__())
def write(self, fname):
"""Dumps MccdAper in JSON format to a file called fname"""
# dumps as list to retain order through default iterator encoding
# that buggers things otherwise
listify = ["hipercam.MccdAper"] + list(
(
(key, ["hipercam.CcdAper"] + list(val.items()))
for key, val in self.items()
)
)
with open(fname, "w") as fp:
json.dump(listify, fp, cls=_Encoder, indent=2)
def toString(self):
"""Returns MccdAper in JSON format as a string"""
# dumps as list to retain order through default iterator encoding
# that buggers things otherwise
listify = ["hipercam.MccdAper"] + list(
(
(key, ["hipercam.CcdAper"] + list(val.items()))
for key, val in self.items()
)
)
return json.dumps(listify, cls=_Encoder, indent=2)
@classmethod
def read(cls, fname):
"""Read from JSON-format file fname. Since such files can fairly easily be
corrupted by injudicious editing, some consistency checks are
run. File loading does not happen often, so this should not be a
serious overhead
fp : a file-like object opened for reading of text
Returns an MccdAper object.
"""
with open(fname) as fp:
obj = json.load(fp, cls=_Decoder)
listify = [(v1, CcdAper(v2[1:])) for v1, v2 in obj[1:]]
mccdaper = MccdAper(listify)
for cnam, ccdaper in mccdaper.items():
ccdaper.check()
return mccdaper
# classes to support JSON serialisation of Aperture objects
class _Encoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Aperture):
return OrderedDict(
(
("Comment", "hipercam.Aperture"),
("x", obj.x),
("y", obj.y),
("rtarg", obj.rtarg),
("rsky1", obj.rsky1),
("rsky2", obj.rsky2),
("ref", obj.ref),
("compo", obj.compo),
("mask", obj.mask),
("extra", obj.extra),
("link", obj.link),
)
)
return super().default(obj)
class _Decoder(json.JSONDecoder):
def __init__(self, *args, **kwargs):
super().__init__(object_hook=self.object_hook, *args, **kwargs)
def object_hook(self, obj):
# looks out for Aperture objects. Everything else done by default
if "rtarg" in obj and "rsky1" in obj and "rsky2" in obj and "link" in obj:
if "compo" not in obj:
obj["compo"] = False
return Aperture(
obj["x"],
obj["y"],
obj["rtarg"],
obj["rsky1"],
obj["rsky2"],
obj["ref"],
obj["compo"],
obj["mask"],
obj["extra"],
obj["link"],
)
return obj
|
HiPERCAMREPO_NAMEhipercamPATH_START.@hipercam_extracted@hipercam-master@hipercam@aperture.py@.PATH_END.py
|
{
"filename": "find_inventories.py",
"repo_name": "AishwaryaC26/RIS-Vis",
"repo_path": "RIS-Vis_extracted/RIS-Vis-main/app/find_inventories.py",
"type": "Python"
}
|
import os
from dotenv import load_dotenv
import ast
from obspy.clients.fdsn import Client
## finds and saves all inventories for seismic stations
load_dotenv()
stations = ast.literal_eval(os.environ["SEISMIC_STATIONS"])
#create list of stations from "stations" dict
stationsoptions = list(stations.keys())
client = Client('IRIS') #establish client to access API
## inventories are stored in "station_inventories" folder in same directory
for sta in stationsoptions:
inventory = client.get_stations(network=stations[sta]['net'],station= sta, channel=stations[sta]['chan'], location = '--', level = "response")
inventory.write(f"""{sta}.xml""", format = "STATIONXML")
|
AishwaryaC26REPO_NAMERIS-VisPATH_START.@RIS-Vis_extracted@RIS-Vis-main@app@find_inventories.py@.PATH_END.py
|
{
"filename": "test_pyintegrators.py",
"repo_name": "adrn/gala",
"repo_path": "gala_extracted/gala-main/gala/integrate/tests/test_pyintegrators.py",
"type": "Python"
}
|
"""
Test the integrators.
"""
import os
# Third-party
import pytest
import numpy as np
from astropy.utils.exceptions import AstropyDeprecationWarning
# Project
from .. import (
LeapfrogIntegrator,
RK5Integrator,
DOPRI853Integrator,
Ruth4Integrator,
)
from gala.tests.optional_deps import HAS_TQDM
# Integrators to test
integrator_list = [
RK5Integrator,
DOPRI853Integrator,
LeapfrogIntegrator,
Ruth4Integrator,
]
# Gradient functions:
def sho_F(t, w, T): # noqa
"""Simple harmonic oscillator"""
q, p = w
wdot = np.zeros_like(w)
wdot[0] = p
wdot[1] = -((2 * np.pi / T) ** 2) * q
return wdot
def forced_sho_F(t, w, A, omega_d):
q, p = w
wdot = np.zeros_like(w)
wdot[0] = p
wdot[1] = -np.sin(q) + A * np.cos(omega_d * t)
return wdot
def lorenz_F(t, w, sigma, rho, beta):
x, y, z, *_ = w
wdot = np.zeros_like(w)
wdot[0] = sigma * (y - x)
wdot[1] = x * (rho - z) - y
wdot[2] = x * y - beta * z
return wdot
def ptmass_F(t, w):
x, y, px, py = w
a = -1.0 / (x * x + y * y) ** 1.5
wdot = np.zeros_like(w)
wdot[0] = px
wdot[1] = py
wdot[2] = x * a
wdot[3] = y * a
return wdot
@pytest.mark.parametrize("Integrator", integrator_list)
def test_sho_forward_backward(Integrator):
integrator = Integrator(sho_F, func_args=(1.0,))
dt = 1e-4
n_steps = 10_000
forw = integrator([0.0, 1.0], dt=dt, n_steps=n_steps)
back = integrator([0.0, 1.0], dt=-dt, n_steps=n_steps)
assert np.allclose(forw.w()[:, -1], back.w()[:, -1], atol=1e-6)
@pytest.mark.parametrize("Integrator", integrator_list)
def test_deprecated_run_method(Integrator):
"""Test the deprecated run method."""
integrator = Integrator(sho_F, func_args=(1.0,))
dt = 1e-4
n_steps = 10_000
with pytest.warns(AstropyDeprecationWarning):
run = integrator.run([0.0, 1.0], dt=dt, n_steps=n_steps)
call = integrator([0.0, 1.0], dt=dt, n_steps=n_steps)
assert np.allclose(run.w()[:, -1], call.w()[:, -1], atol=1e-6)
@pytest.mark.parametrize("Integrator", integrator_list)
def test_point_mass(Integrator):
q0 = np.array([1.0, 0.0])
p0 = np.array([0.0, 1.0])
integrator = Integrator(ptmass_F)
orbit = integrator(np.append(q0, p0), t1=0.0, t2=2 * np.pi, n_steps=1e4)
assert np.allclose(orbit.w()[:, 0], orbit.w()[:, -1], atol=1e-6)
@pytest.mark.skipif(not HAS_TQDM, reason="requires tqdm to run this test")
@pytest.mark.parametrize("Integrator", integrator_list)
def test_progress(Integrator):
q0 = np.array([1.0, 0.0])
p0 = np.array([0.0, 1.0])
integrator = Integrator(ptmass_F, progress=True)
_ = integrator(np.append(q0, p0), t1=0.0, t2=2 * np.pi, n_steps=1e2)
@pytest.mark.parametrize("Integrator", integrator_list)
def test_point_mass_multiple(Integrator):
w0 = np.array(
[[1.0, 0.0, 0.0, 1.0], [0.8, 0.0, 0.0, 1.1], [2.0, 1.0, -1.0, 1.1]]
).T
integrator = Integrator(ptmass_F)
_ = integrator(w0, dt=1e-3, n_steps=1e4)
@pytest.mark.parametrize("Integrator", integrator_list)
def test_driven_pendulum(Integrator):
integrator = Integrator(forced_sho_F, func_args=(0.07, 0.75))
_ = integrator([3.0, 0.0], dt=1e-2, n_steps=1e4)
@pytest.mark.parametrize("Integrator", integrator_list)
def test_lorenz(Integrator):
sigma, rho, beta = 10.0, 28.0, 8 / 3.0
integrator = Integrator(lorenz_F, func_args=(sigma, rho, beta))
_ = integrator([0.5, 0.5, 0.5, 0, 0, 0], dt=1e-2, n_steps=1e4)
@pytest.mark.parametrize("Integrator", integrator_list)
def test_memmap(tmpdir, Integrator):
dt = 0.1
n_steps = 1000
nw0 = 10000
filename = os.path.join(str(tmpdir), "test_memmap.npy")
mmap = np.memmap(filename, mode="w+", shape=(2, n_steps + 1, nw0))
w0 = np.random.uniform(-1, 1, size=(2, nw0))
integrator = Integrator(sho_F, func_args=(1.0,))
_ = integrator(w0, dt=dt, n_steps=n_steps, mmap=mmap)
@pytest.mark.parametrize("Integrator", integrator_list)
def test_py_store_all(Integrator):
integrator_all = Integrator(sho_F, func_args=(1.3,), store_all=True)
integrator_final = Integrator(sho_F, func_args=(1.3,), store_all=False)
dt = 1e-4
n_steps = 10_000
out_all = integrator_all([0.0, 1.0], dt=dt, n_steps=n_steps)
out_final = integrator_final([0.0, 1.0], dt=dt, n_steps=n_steps)
assert np.allclose(out_all.w()[:, -1], out_final.w()[:, 0])
|
adrnREPO_NAMEgalaPATH_START.@gala_extracted@gala-main@gala@integrate@tests@test_pyintegrators.py@.PATH_END.py
|
{
"filename": "test_quantity_annotations.py",
"repo_name": "astropy/astropy",
"repo_path": "astropy_extracted/astropy-main/astropy/units/tests/test_quantity_annotations.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# ruff: noqa: FA100, FA102
import pytest
from astropy import units as u
from astropy.units import Quantity
def test_ignore_generic_type_annotations():
"""Test annotations that are not unit related are ignored.
This test passes if the function works.
"""
# one unit, one not (should be ignored)
@u.quantity_input
def func(x: u.m, y: str):
return x, y
i_q, i_str = 2 * u.m, "cool string"
o_q, o_str = func(i_q, i_str) # if this doesn't fail, it worked.
assert i_q == o_q
assert i_str == o_str
class TestQuantityUnitAnnotations:
"""Test Quantity[Unit] type annotation."""
def test_simple_annotation(self):
@u.quantity_input
def func(x: Quantity[u.m], y: str):
return x, y
i_q, i_str = 2 * u.m, "cool string"
o_q, o_str = func(i_q, i_str)
assert i_q == o_q
assert i_str == o_str
# checks the input on the 1st arg
with pytest.raises(u.UnitsError):
func(1 * u.s, i_str)
# but not the second
o_q, o_str = func(i_q, {"not": "a string"})
assert i_q == o_q
assert i_str != o_str
def test_multiple_annotation(self):
@u.quantity_input
def multi_func(a: Quantity[u.km]) -> Quantity[u.m]:
return a
i_q = 2 * u.km
o_q = multi_func(i_q)
assert o_q == i_q
assert o_q.unit == u.m
def test_optional_and_annotated(self):
@u.quantity_input
def opt_func(x: Quantity[u.m] | None = None) -> Quantity[u.km]:
if x is None:
return 1 * u.km
return x
i_q = 250 * u.m
o_q = opt_func(i_q)
assert o_q.unit == u.km
assert o_q == i_q
i_q = None
o_q = opt_func(i_q)
assert o_q == 1 * u.km
def test_union_and_annotated(self):
# Union and Annotated
@u.quantity_input
def union_func(x: Quantity[u.m] | (Quantity[u.s] | None)):
if x is None:
return None
else:
return 2 * x
i_q = 1 * u.m
o_q = union_func(i_q)
assert o_q == 2 * i_q
i_q = 1 * u.s
o_q = union_func(i_q)
assert o_q == 2 * i_q
i_q = None
o_q = union_func(i_q)
assert o_q is None
def test_not_unit_or_ptype(self):
with pytest.raises(TypeError, match="unit annotation is not"):
Quantity["definitely not a unit"]
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.arcsec), ("angle", "angle")]
)
def test_args3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary: solary_unit):
return solarx, solary
solarx, solary = myfunc_args(1 * u.arcsec, 1 * u.arcsec)
assert isinstance(solarx, Quantity)
assert isinstance(solary, Quantity)
assert solarx.unit == u.arcsec
assert solary.unit == u.arcsec
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.arcsec), ("angle", "angle")]
)
def test_args_noconvert3(solarx_unit, solary_unit):
@u.quantity_input()
def myfunc_args(solarx: solarx_unit, solary: solary_unit):
return solarx, solary
solarx, solary = myfunc_args(1 * u.deg, 1 * u.arcmin)
assert isinstance(solarx, Quantity)
assert isinstance(solary, Quantity)
assert solarx.unit == u.deg
assert solary.unit == u.arcmin
@pytest.mark.parametrize("solarx_unit", [u.arcsec, "angle"])
def test_args_nonquantity3(solarx_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary):
return solarx, solary
solarx, solary = myfunc_args(1 * u.arcsec, 100)
assert isinstance(solarx, Quantity)
assert isinstance(solary, int)
assert solarx.unit == u.arcsec
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.eV), ("angle", "energy")]
)
def test_arg_equivalencies3(solarx_unit, solary_unit):
@u.quantity_input(equivalencies=u.mass_energy())
def myfunc_args(solarx: solarx_unit, solary: solary_unit):
return solarx, solary + (10 * u.J) # Add an energy to check equiv is working
solarx, solary = myfunc_args(1 * u.arcsec, 100 * u.gram)
assert isinstance(solarx, Quantity)
assert isinstance(solary, Quantity)
assert solarx.unit == u.arcsec
assert solary.unit == u.gram
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.deg), ("angle", "angle")]
)
def test_wrong_unit3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary: solary_unit):
return solarx, solary
with pytest.raises(
u.UnitsError,
match=(
"Argument 'solary' to function 'myfunc_args' must be in units "
f"convertible to '{str(solary_unit)}'."
),
):
solarx, solary = myfunc_args(1 * u.arcsec, 100 * u.km)
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.deg), ("angle", "angle")]
)
def test_not_quantity3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary: solary_unit):
return solarx, solary
with pytest.raises(
TypeError,
match=(
"Argument 'solary' to function 'myfunc_args' has no 'unit' "
"attribute. You should pass in an astropy Quantity instead."
),
):
solarx, solary = myfunc_args(1 * u.arcsec, 100)
def test_decorator_override():
@u.quantity_input(solarx=u.arcsec)
def myfunc_args(solarx: u.km, solary: u.arcsec):
return solarx, solary
solarx, solary = myfunc_args(1 * u.arcsec, 1 * u.arcsec)
assert isinstance(solarx, Quantity)
assert isinstance(solary, Quantity)
assert solarx.unit == u.arcsec
assert solary.unit == u.arcsec
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.deg), ("angle", "angle")]
)
def test_kwargs3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary, myk: solary_unit = 1 * u.arcsec):
return solarx, solary, myk
solarx, solary, myk = myfunc_args(1 * u.arcsec, 100, myk=100 * u.deg)
assert isinstance(solarx, Quantity)
assert isinstance(solary, int)
assert isinstance(myk, Quantity)
assert myk.unit == u.deg
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.deg), ("angle", "angle")]
)
def test_unused_kwargs3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(
solarx: solarx_unit, solary, myk: solary_unit = 1 * u.arcsec, myk2=1000
):
return solarx, solary, myk, myk2
solarx, solary, myk, myk2 = myfunc_args(1 * u.arcsec, 100, myk=100 * u.deg, myk2=10)
assert isinstance(solarx, Quantity)
assert isinstance(solary, int)
assert isinstance(myk, Quantity)
assert isinstance(myk2, int)
assert myk.unit == u.deg
assert myk2 == 10
@pytest.mark.parametrize("solarx_unit,energy", [(u.arcsec, u.eV), ("angle", "energy")])
def test_kwarg_equivalencies3(solarx_unit, energy):
@u.quantity_input(equivalencies=u.mass_energy())
def myfunc_args(solarx: solarx_unit, energy: energy = 10 * u.eV):
return solarx, energy + (10 * u.J) # Add an energy to check equiv is working
solarx, energy = myfunc_args(1 * u.arcsec, 100 * u.gram)
assert isinstance(solarx, Quantity)
assert isinstance(energy, Quantity)
assert solarx.unit == u.arcsec
assert energy.unit == u.gram
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.deg), ("angle", "angle")]
)
def test_kwarg_wrong_unit3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary: solary_unit = 10 * u.deg):
return solarx, solary
with pytest.raises(
u.UnitsError,
match=(
"Argument 'solary' to function 'myfunc_args' must be in "
f"units convertible to '{str(solary_unit)}'."
),
):
solarx, solary = myfunc_args(1 * u.arcsec, solary=100 * u.km)
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.deg), ("angle", "angle")]
)
def test_kwarg_not_quantity3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary: solary_unit = 10 * u.deg):
return solarx, solary
with pytest.raises(
TypeError,
match=(
"Argument 'solary' to function 'myfunc_args' has no 'unit' attribute. "
"You should pass in an astropy Quantity instead."
),
):
solarx, solary = myfunc_args(1 * u.arcsec, solary=100)
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.deg), ("angle", "angle")]
)
def test_kwarg_default3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary: solary_unit = 10 * u.deg):
return solarx, solary
solarx, solary = myfunc_args(1 * u.arcsec)
def test_return_annotation():
@u.quantity_input
def myfunc_args(solarx: u.arcsec) -> u.deg:
return solarx
solarx = myfunc_args(1 * u.arcsec)
assert solarx.unit is u.deg
def test_return_annotation_none():
@u.quantity_input
def myfunc_args(solarx: u.arcsec) -> None:
pass
solarx = myfunc_args(1 * u.arcsec)
assert solarx is None
def test_return_annotation_notUnit():
@u.quantity_input
def myfunc_args(solarx: u.arcsec) -> int:
return 0
solarx = myfunc_args(1 * u.arcsec)
assert solarx == 0
def test_enum_annotation():
# Regression test for gh-9932
from enum import Enum, auto
class BasicEnum(Enum):
AnOption = auto()
@u.quantity_input
def myfunc_args(a: BasicEnum, b: u.arcsec) -> None:
pass
myfunc_args(BasicEnum.AnOption, 1 * u.arcsec)
|
astropyREPO_NAMEastropyPATH_START.@astropy_extracted@astropy-main@astropy@units@tests@test_quantity_annotations.py@.PATH_END.py
|
{
"filename": "_showwhiskers.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/box/_showwhiskers.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShowwhiskersValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="showwhiskers", parent_name="box", **kwargs):
super(ShowwhiskersValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@box@_showwhiskers.py@.PATH_END.py
|
{
"filename": "latex_symbols.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/ipython/py3/IPython/core/latex_symbols.py",
"type": "Python"
}
|
# encoding: utf-8
# DO NOT EDIT THIS FILE BY HAND.
# To update this file, run the script /tools/gen_latex_symbols.py using Python 3
# This file is autogenerated from the file:
# https://raw.githubusercontent.com/JuliaLang/julia/master/base/latex_symbols.jl
# This original list is filtered to remove any unicode characters that are not valid
# Python identifiers.
latex_symbols = {
"\\euler" : "ℯ",
"\\^a" : "ᵃ",
"\\^b" : "ᵇ",
"\\^c" : "ᶜ",
"\\^d" : "ᵈ",
"\\^e" : "ᵉ",
"\\^f" : "ᶠ",
"\\^g" : "ᵍ",
"\\^h" : "ʰ",
"\\^i" : "ⁱ",
"\\^j" : "ʲ",
"\\^k" : "ᵏ",
"\\^l" : "ˡ",
"\\^m" : "ᵐ",
"\\^n" : "ⁿ",
"\\^o" : "ᵒ",
"\\^p" : "ᵖ",
"\\^r" : "ʳ",
"\\^s" : "ˢ",
"\\^t" : "ᵗ",
"\\^u" : "ᵘ",
"\\^v" : "ᵛ",
"\\^w" : "ʷ",
"\\^x" : "ˣ",
"\\^y" : "ʸ",
"\\^z" : "ᶻ",
"\\^A" : "ᴬ",
"\\^B" : "ᴮ",
"\\^D" : "ᴰ",
"\\^E" : "ᴱ",
"\\^G" : "ᴳ",
"\\^H" : "ᴴ",
"\\^I" : "ᴵ",
"\\^J" : "ᴶ",
"\\^K" : "ᴷ",
"\\^L" : "ᴸ",
"\\^M" : "ᴹ",
"\\^N" : "ᴺ",
"\\^O" : "ᴼ",
"\\^P" : "ᴾ",
"\\^R" : "ᴿ",
"\\^T" : "ᵀ",
"\\^U" : "ᵁ",
"\\^V" : "ⱽ",
"\\^W" : "ᵂ",
"\\^alpha" : "ᵅ",
"\\^beta" : "ᵝ",
"\\^gamma" : "ᵞ",
"\\^delta" : "ᵟ",
"\\^epsilon" : "ᵋ",
"\\^theta" : "ᶿ",
"\\^iota" : "ᶥ",
"\\^phi" : "ᵠ",
"\\^chi" : "ᵡ",
"\\^Phi" : "ᶲ",
"\\_a" : "ₐ",
"\\_e" : "ₑ",
"\\_h" : "ₕ",
"\\_i" : "ᵢ",
"\\_j" : "ⱼ",
"\\_k" : "ₖ",
"\\_l" : "ₗ",
"\\_m" : "ₘ",
"\\_n" : "ₙ",
"\\_o" : "ₒ",
"\\_p" : "ₚ",
"\\_r" : "ᵣ",
"\\_s" : "ₛ",
"\\_t" : "ₜ",
"\\_u" : "ᵤ",
"\\_v" : "ᵥ",
"\\_x" : "ₓ",
"\\_schwa" : "ₔ",
"\\_beta" : "ᵦ",
"\\_gamma" : "ᵧ",
"\\_rho" : "ᵨ",
"\\_phi" : "ᵩ",
"\\_chi" : "ᵪ",
"\\hbar" : "ħ",
"\\sout" : "̶",
"\\ordfeminine" : "ª",
"\\cdotp" : "·",
"\\ordmasculine" : "º",
"\\AA" : "Å",
"\\AE" : "Æ",
"\\DH" : "Ð",
"\\O" : "Ø",
"\\TH" : "Þ",
"\\ss" : "ß",
"\\aa" : "å",
"\\ae" : "æ",
"\\eth" : "ð",
"\\dh" : "ð",
"\\o" : "ø",
"\\th" : "þ",
"\\DJ" : "Đ",
"\\dj" : "đ",
"\\imath" : "ı",
"\\jmath" : "ȷ",
"\\L" : "Ł",
"\\l" : "ł",
"\\NG" : "Ŋ",
"\\ng" : "ŋ",
"\\OE" : "Œ",
"\\oe" : "œ",
"\\hvlig" : "ƕ",
"\\nrleg" : "ƞ",
"\\doublepipe" : "ǂ",
"\\trna" : "ɐ",
"\\trnsa" : "ɒ",
"\\openo" : "ɔ",
"\\rtld" : "ɖ",
"\\schwa" : "ə",
"\\varepsilon" : "ε",
"\\pgamma" : "ɣ",
"\\pbgam" : "ɤ",
"\\trnh" : "ɥ",
"\\btdl" : "ɬ",
"\\rtll" : "ɭ",
"\\trnm" : "ɯ",
"\\trnmlr" : "ɰ",
"\\ltlmr" : "ɱ",
"\\ltln" : "ɲ",
"\\rtln" : "ɳ",
"\\clomeg" : "ɷ",
"\\ltphi" : "ɸ",
"\\trnr" : "ɹ",
"\\trnrl" : "ɺ",
"\\rttrnr" : "ɻ",
"\\rl" : "ɼ",
"\\rtlr" : "ɽ",
"\\fhr" : "ɾ",
"\\rtls" : "ʂ",
"\\esh" : "ʃ",
"\\trnt" : "ʇ",
"\\rtlt" : "ʈ",
"\\pupsil" : "ʊ",
"\\pscrv" : "ʋ",
"\\invv" : "ʌ",
"\\invw" : "ʍ",
"\\trny" : "ʎ",
"\\rtlz" : "ʐ",
"\\yogh" : "ʒ",
"\\glst" : "ʔ",
"\\reglst" : "ʕ",
"\\inglst" : "ʖ",
"\\turnk" : "ʞ",
"\\dyogh" : "ʤ",
"\\tesh" : "ʧ",
"\\rasp" : "ʼ",
"\\verts" : "ˈ",
"\\verti" : "ˌ",
"\\lmrk" : "ː",
"\\hlmrk" : "ˑ",
"\\grave" : "̀",
"\\acute" : "́",
"\\hat" : "̂",
"\\tilde" : "̃",
"\\bar" : "̄",
"\\breve" : "̆",
"\\dot" : "̇",
"\\ddot" : "̈",
"\\ocirc" : "̊",
"\\H" : "̋",
"\\check" : "̌",
"\\palh" : "̡",
"\\rh" : "̢",
"\\c" : "̧",
"\\k" : "̨",
"\\sbbrg" : "̪",
"\\strike" : "̶",
"\\Alpha" : "Α",
"\\Beta" : "Β",
"\\Gamma" : "Γ",
"\\Delta" : "Δ",
"\\Epsilon" : "Ε",
"\\Zeta" : "Ζ",
"\\Eta" : "Η",
"\\Theta" : "Θ",
"\\Iota" : "Ι",
"\\Kappa" : "Κ",
"\\Lambda" : "Λ",
"\\Xi" : "Ξ",
"\\Pi" : "Π",
"\\Rho" : "Ρ",
"\\Sigma" : "Σ",
"\\Tau" : "Τ",
"\\Upsilon" : "Υ",
"\\Phi" : "Φ",
"\\Chi" : "Χ",
"\\Psi" : "Ψ",
"\\Omega" : "Ω",
"\\alpha" : "α",
"\\beta" : "β",
"\\gamma" : "γ",
"\\delta" : "δ",
"\\zeta" : "ζ",
"\\eta" : "η",
"\\theta" : "θ",
"\\iota" : "ι",
"\\kappa" : "κ",
"\\lambda" : "λ",
"\\mu" : "μ",
"\\nu" : "ν",
"\\xi" : "ξ",
"\\pi" : "π",
"\\rho" : "ρ",
"\\varsigma" : "ς",
"\\sigma" : "σ",
"\\tau" : "τ",
"\\upsilon" : "υ",
"\\varphi" : "φ",
"\\chi" : "χ",
"\\psi" : "ψ",
"\\omega" : "ω",
"\\vartheta" : "ϑ",
"\\phi" : "ϕ",
"\\varpi" : "ϖ",
"\\Stigma" : "Ϛ",
"\\Digamma" : "Ϝ",
"\\digamma" : "ϝ",
"\\Koppa" : "Ϟ",
"\\Sampi" : "Ϡ",
"\\varkappa" : "ϰ",
"\\varrho" : "ϱ",
"\\varTheta" : "ϴ",
"\\epsilon" : "ϵ",
"\\dddot" : "⃛",
"\\ddddot" : "⃜",
"\\hslash" : "ℏ",
"\\Im" : "ℑ",
"\\ell" : "ℓ",
"\\wp" : "℘",
"\\Re" : "ℜ",
"\\aleph" : "ℵ",
"\\beth" : "ℶ",
"\\gimel" : "ℷ",
"\\daleth" : "ℸ",
"\\bbPi" : "ℿ",
"\\Zbar" : "Ƶ",
"\\overbar" : "̅",
"\\ovhook" : "̉",
"\\candra" : "̐",
"\\oturnedcomma" : "̒",
"\\ocommatopright" : "̕",
"\\droang" : "̚",
"\\wideutilde" : "̰",
"\\not" : "̸",
"\\upMu" : "Μ",
"\\upNu" : "Ν",
"\\upOmicron" : "Ο",
"\\upepsilon" : "ε",
"\\upomicron" : "ο",
"\\upvarbeta" : "ϐ",
"\\upoldKoppa" : "Ϙ",
"\\upoldkoppa" : "ϙ",
"\\upstigma" : "ϛ",
"\\upkoppa" : "ϟ",
"\\upsampi" : "ϡ",
"\\tieconcat" : "⁀",
"\\leftharpoonaccent" : "⃐",
"\\rightharpoonaccent" : "⃑",
"\\vertoverlay" : "⃒",
"\\overleftarrow" : "⃖",
"\\vec" : "⃗",
"\\overleftrightarrow" : "⃡",
"\\annuity" : "⃧",
"\\threeunderdot" : "⃨",
"\\widebridgeabove" : "⃩",
"\\bbC" : "ℂ",
"\\eulermascheroni" : "ℇ",
"\\scrg" : "ℊ",
"\\scrH" : "ℋ",
"\\frakH" : "ℌ",
"\\bbH" : "ℍ",
"\\planck" : "ℎ",
"\\scrI" : "ℐ",
"\\scrL" : "ℒ",
"\\bbN" : "ℕ",
"\\bbP" : "ℙ",
"\\bbQ" : "ℚ",
"\\scrR" : "ℛ",
"\\bbR" : "ℝ",
"\\bbZ" : "ℤ",
"\\frakZ" : "ℨ",
"\\Angstrom" : "Å",
"\\scrB" : "ℬ",
"\\frakC" : "ℭ",
"\\scre" : "ℯ",
"\\scrE" : "ℰ",
"\\scrF" : "ℱ",
"\\Finv" : "Ⅎ",
"\\scrM" : "ℳ",
"\\scro" : "ℴ",
"\\bbgamma" : "ℽ",
"\\bbGamma" : "ℾ",
"\\bbiD" : "ⅅ",
"\\bbid" : "ⅆ",
"\\bbie" : "ⅇ",
"\\bbii" : "ⅈ",
"\\bbij" : "ⅉ",
"\\bfA" : "𝐀",
"\\bfB" : "𝐁",
"\\bfC" : "𝐂",
"\\bfD" : "𝐃",
"\\bfE" : "𝐄",
"\\bfF" : "𝐅",
"\\bfG" : "𝐆",
"\\bfH" : "𝐇",
"\\bfI" : "𝐈",
"\\bfJ" : "𝐉",
"\\bfK" : "𝐊",
"\\bfL" : "𝐋",
"\\bfM" : "𝐌",
"\\bfN" : "𝐍",
"\\bfO" : "𝐎",
"\\bfP" : "𝐏",
"\\bfQ" : "𝐐",
"\\bfR" : "𝐑",
"\\bfS" : "𝐒",
"\\bfT" : "𝐓",
"\\bfU" : "𝐔",
"\\bfV" : "𝐕",
"\\bfW" : "𝐖",
"\\bfX" : "𝐗",
"\\bfY" : "𝐘",
"\\bfZ" : "𝐙",
"\\bfa" : "𝐚",
"\\bfb" : "𝐛",
"\\bfc" : "𝐜",
"\\bfd" : "𝐝",
"\\bfe" : "𝐞",
"\\bff" : "𝐟",
"\\bfg" : "𝐠",
"\\bfh" : "𝐡",
"\\bfi" : "𝐢",
"\\bfj" : "𝐣",
"\\bfk" : "𝐤",
"\\bfl" : "𝐥",
"\\bfm" : "𝐦",
"\\bfn" : "𝐧",
"\\bfo" : "𝐨",
"\\bfp" : "𝐩",
"\\bfq" : "𝐪",
"\\bfr" : "𝐫",
"\\bfs" : "𝐬",
"\\bft" : "𝐭",
"\\bfu" : "𝐮",
"\\bfv" : "𝐯",
"\\bfw" : "𝐰",
"\\bfx" : "𝐱",
"\\bfy" : "𝐲",
"\\bfz" : "𝐳",
"\\itA" : "𝐴",
"\\itB" : "𝐵",
"\\itC" : "𝐶",
"\\itD" : "𝐷",
"\\itE" : "𝐸",
"\\itF" : "𝐹",
"\\itG" : "𝐺",
"\\itH" : "𝐻",
"\\itI" : "𝐼",
"\\itJ" : "𝐽",
"\\itK" : "𝐾",
"\\itL" : "𝐿",
"\\itM" : "𝑀",
"\\itN" : "𝑁",
"\\itO" : "𝑂",
"\\itP" : "𝑃",
"\\itQ" : "𝑄",
"\\itR" : "𝑅",
"\\itS" : "𝑆",
"\\itT" : "𝑇",
"\\itU" : "𝑈",
"\\itV" : "𝑉",
"\\itW" : "𝑊",
"\\itX" : "𝑋",
"\\itY" : "𝑌",
"\\itZ" : "𝑍",
"\\ita" : "𝑎",
"\\itb" : "𝑏",
"\\itc" : "𝑐",
"\\itd" : "𝑑",
"\\ite" : "𝑒",
"\\itf" : "𝑓",
"\\itg" : "𝑔",
"\\iti" : "𝑖",
"\\itj" : "𝑗",
"\\itk" : "𝑘",
"\\itl" : "𝑙",
"\\itm" : "𝑚",
"\\itn" : "𝑛",
"\\ito" : "𝑜",
"\\itp" : "𝑝",
"\\itq" : "𝑞",
"\\itr" : "𝑟",
"\\its" : "𝑠",
"\\itt" : "𝑡",
"\\itu" : "𝑢",
"\\itv" : "𝑣",
"\\itw" : "𝑤",
"\\itx" : "𝑥",
"\\ity" : "𝑦",
"\\itz" : "𝑧",
"\\biA" : "𝑨",
"\\biB" : "𝑩",
"\\biC" : "𝑪",
"\\biD" : "𝑫",
"\\biE" : "𝑬",
"\\biF" : "𝑭",
"\\biG" : "𝑮",
"\\biH" : "𝑯",
"\\biI" : "𝑰",
"\\biJ" : "𝑱",
"\\biK" : "𝑲",
"\\biL" : "𝑳",
"\\biM" : "𝑴",
"\\biN" : "𝑵",
"\\biO" : "𝑶",
"\\biP" : "𝑷",
"\\biQ" : "𝑸",
"\\biR" : "𝑹",
"\\biS" : "𝑺",
"\\biT" : "𝑻",
"\\biU" : "𝑼",
"\\biV" : "𝑽",
"\\biW" : "𝑾",
"\\biX" : "𝑿",
"\\biY" : "𝒀",
"\\biZ" : "𝒁",
"\\bia" : "𝒂",
"\\bib" : "𝒃",
"\\bic" : "𝒄",
"\\bid" : "𝒅",
"\\bie" : "𝒆",
"\\bif" : "𝒇",
"\\big" : "𝒈",
"\\bih" : "𝒉",
"\\bii" : "𝒊",
"\\bij" : "𝒋",
"\\bik" : "𝒌",
"\\bil" : "𝒍",
"\\bim" : "𝒎",
"\\bin" : "𝒏",
"\\bio" : "𝒐",
"\\bip" : "𝒑",
"\\biq" : "𝒒",
"\\bir" : "𝒓",
"\\bis" : "𝒔",
"\\bit" : "𝒕",
"\\biu" : "𝒖",
"\\biv" : "𝒗",
"\\biw" : "𝒘",
"\\bix" : "𝒙",
"\\biy" : "𝒚",
"\\biz" : "𝒛",
"\\scrA" : "𝒜",
"\\scrC" : "𝒞",
"\\scrD" : "𝒟",
"\\scrG" : "𝒢",
"\\scrJ" : "𝒥",
"\\scrK" : "𝒦",
"\\scrN" : "𝒩",
"\\scrO" : "𝒪",
"\\scrP" : "𝒫",
"\\scrQ" : "𝒬",
"\\scrS" : "𝒮",
"\\scrT" : "𝒯",
"\\scrU" : "𝒰",
"\\scrV" : "𝒱",
"\\scrW" : "𝒲",
"\\scrX" : "𝒳",
"\\scrY" : "𝒴",
"\\scrZ" : "𝒵",
"\\scra" : "𝒶",
"\\scrb" : "𝒷",
"\\scrc" : "𝒸",
"\\scrd" : "𝒹",
"\\scrf" : "𝒻",
"\\scrh" : "𝒽",
"\\scri" : "𝒾",
"\\scrj" : "𝒿",
"\\scrk" : "𝓀",
"\\scrm" : "𝓂",
"\\scrn" : "𝓃",
"\\scrp" : "𝓅",
"\\scrq" : "𝓆",
"\\scrr" : "𝓇",
"\\scrs" : "𝓈",
"\\scrt" : "𝓉",
"\\scru" : "𝓊",
"\\scrv" : "𝓋",
"\\scrw" : "𝓌",
"\\scrx" : "𝓍",
"\\scry" : "𝓎",
"\\scrz" : "𝓏",
"\\bscrA" : "𝓐",
"\\bscrB" : "𝓑",
"\\bscrC" : "𝓒",
"\\bscrD" : "𝓓",
"\\bscrE" : "𝓔",
"\\bscrF" : "𝓕",
"\\bscrG" : "𝓖",
"\\bscrH" : "𝓗",
"\\bscrI" : "𝓘",
"\\bscrJ" : "𝓙",
"\\bscrK" : "𝓚",
"\\bscrL" : "𝓛",
"\\bscrM" : "𝓜",
"\\bscrN" : "𝓝",
"\\bscrO" : "𝓞",
"\\bscrP" : "𝓟",
"\\bscrQ" : "𝓠",
"\\bscrR" : "𝓡",
"\\bscrS" : "𝓢",
"\\bscrT" : "𝓣",
"\\bscrU" : "𝓤",
"\\bscrV" : "𝓥",
"\\bscrW" : "𝓦",
"\\bscrX" : "𝓧",
"\\bscrY" : "𝓨",
"\\bscrZ" : "𝓩",
"\\bscra" : "𝓪",
"\\bscrb" : "𝓫",
"\\bscrc" : "𝓬",
"\\bscrd" : "𝓭",
"\\bscre" : "𝓮",
"\\bscrf" : "𝓯",
"\\bscrg" : "𝓰",
"\\bscrh" : "𝓱",
"\\bscri" : "𝓲",
"\\bscrj" : "𝓳",
"\\bscrk" : "𝓴",
"\\bscrl" : "𝓵",
"\\bscrm" : "𝓶",
"\\bscrn" : "𝓷",
"\\bscro" : "𝓸",
"\\bscrp" : "𝓹",
"\\bscrq" : "𝓺",
"\\bscrr" : "𝓻",
"\\bscrs" : "𝓼",
"\\bscrt" : "𝓽",
"\\bscru" : "𝓾",
"\\bscrv" : "𝓿",
"\\bscrw" : "𝔀",
"\\bscrx" : "𝔁",
"\\bscry" : "𝔂",
"\\bscrz" : "𝔃",
"\\frakA" : "𝔄",
"\\frakB" : "𝔅",
"\\frakD" : "𝔇",
"\\frakE" : "𝔈",
"\\frakF" : "𝔉",
"\\frakG" : "𝔊",
"\\frakJ" : "𝔍",
"\\frakK" : "𝔎",
"\\frakL" : "𝔏",
"\\frakM" : "𝔐",
"\\frakN" : "𝔑",
"\\frakO" : "𝔒",
"\\frakP" : "𝔓",
"\\frakQ" : "𝔔",
"\\frakS" : "𝔖",
"\\frakT" : "𝔗",
"\\frakU" : "𝔘",
"\\frakV" : "𝔙",
"\\frakW" : "𝔚",
"\\frakX" : "𝔛",
"\\frakY" : "𝔜",
"\\fraka" : "𝔞",
"\\frakb" : "𝔟",
"\\frakc" : "𝔠",
"\\frakd" : "𝔡",
"\\frake" : "𝔢",
"\\frakf" : "𝔣",
"\\frakg" : "𝔤",
"\\frakh" : "𝔥",
"\\fraki" : "𝔦",
"\\frakj" : "𝔧",
"\\frakk" : "𝔨",
"\\frakl" : "𝔩",
"\\frakm" : "𝔪",
"\\frakn" : "𝔫",
"\\frako" : "𝔬",
"\\frakp" : "𝔭",
"\\frakq" : "𝔮",
"\\frakr" : "𝔯",
"\\fraks" : "𝔰",
"\\frakt" : "𝔱",
"\\fraku" : "𝔲",
"\\frakv" : "𝔳",
"\\frakw" : "𝔴",
"\\frakx" : "𝔵",
"\\fraky" : "𝔶",
"\\frakz" : "𝔷",
"\\bbA" : "𝔸",
"\\bbB" : "𝔹",
"\\bbD" : "𝔻",
"\\bbE" : "𝔼",
"\\bbF" : "𝔽",
"\\bbG" : "𝔾",
"\\bbI" : "𝕀",
"\\bbJ" : "𝕁",
"\\bbK" : "𝕂",
"\\bbL" : "𝕃",
"\\bbM" : "𝕄",
"\\bbO" : "𝕆",
"\\bbS" : "𝕊",
"\\bbT" : "𝕋",
"\\bbU" : "𝕌",
"\\bbV" : "𝕍",
"\\bbW" : "𝕎",
"\\bbX" : "𝕏",
"\\bbY" : "𝕐",
"\\bba" : "𝕒",
"\\bbb" : "𝕓",
"\\bbc" : "𝕔",
"\\bbd" : "𝕕",
"\\bbe" : "𝕖",
"\\bbf" : "𝕗",
"\\bbg" : "𝕘",
"\\bbh" : "𝕙",
"\\bbi" : "𝕚",
"\\bbj" : "𝕛",
"\\bbk" : "𝕜",
"\\bbl" : "𝕝",
"\\bbm" : "𝕞",
"\\bbn" : "𝕟",
"\\bbo" : "𝕠",
"\\bbp" : "𝕡",
"\\bbq" : "𝕢",
"\\bbr" : "𝕣",
"\\bbs" : "𝕤",
"\\bbt" : "𝕥",
"\\bbu" : "𝕦",
"\\bbv" : "𝕧",
"\\bbw" : "𝕨",
"\\bbx" : "𝕩",
"\\bby" : "𝕪",
"\\bbz" : "𝕫",
"\\bfrakA" : "𝕬",
"\\bfrakB" : "𝕭",
"\\bfrakC" : "𝕮",
"\\bfrakD" : "𝕯",
"\\bfrakE" : "𝕰",
"\\bfrakF" : "𝕱",
"\\bfrakG" : "𝕲",
"\\bfrakH" : "𝕳",
"\\bfrakI" : "𝕴",
"\\bfrakJ" : "𝕵",
"\\bfrakK" : "𝕶",
"\\bfrakL" : "𝕷",
"\\bfrakM" : "𝕸",
"\\bfrakN" : "𝕹",
"\\bfrakO" : "𝕺",
"\\bfrakP" : "𝕻",
"\\bfrakQ" : "𝕼",
"\\bfrakR" : "𝕽",
"\\bfrakS" : "𝕾",
"\\bfrakT" : "𝕿",
"\\bfrakU" : "𝖀",
"\\bfrakV" : "𝖁",
"\\bfrakW" : "𝖂",
"\\bfrakX" : "𝖃",
"\\bfrakY" : "𝖄",
"\\bfrakZ" : "𝖅",
"\\bfraka" : "𝖆",
"\\bfrakb" : "𝖇",
"\\bfrakc" : "𝖈",
"\\bfrakd" : "𝖉",
"\\bfrake" : "𝖊",
"\\bfrakf" : "𝖋",
"\\bfrakg" : "𝖌",
"\\bfrakh" : "𝖍",
"\\bfraki" : "𝖎",
"\\bfrakj" : "𝖏",
"\\bfrakk" : "𝖐",
"\\bfrakl" : "𝖑",
"\\bfrakm" : "𝖒",
"\\bfrakn" : "𝖓",
"\\bfrako" : "𝖔",
"\\bfrakp" : "𝖕",
"\\bfrakq" : "𝖖",
"\\bfrakr" : "𝖗",
"\\bfraks" : "𝖘",
"\\bfrakt" : "𝖙",
"\\bfraku" : "𝖚",
"\\bfrakv" : "𝖛",
"\\bfrakw" : "𝖜",
"\\bfrakx" : "𝖝",
"\\bfraky" : "𝖞",
"\\bfrakz" : "𝖟",
"\\sansA" : "𝖠",
"\\sansB" : "𝖡",
"\\sansC" : "𝖢",
"\\sansD" : "𝖣",
"\\sansE" : "𝖤",
"\\sansF" : "𝖥",
"\\sansG" : "𝖦",
"\\sansH" : "𝖧",
"\\sansI" : "𝖨",
"\\sansJ" : "𝖩",
"\\sansK" : "𝖪",
"\\sansL" : "𝖫",
"\\sansM" : "𝖬",
"\\sansN" : "𝖭",
"\\sansO" : "𝖮",
"\\sansP" : "𝖯",
"\\sansQ" : "𝖰",
"\\sansR" : "𝖱",
"\\sansS" : "𝖲",
"\\sansT" : "𝖳",
"\\sansU" : "𝖴",
"\\sansV" : "𝖵",
"\\sansW" : "𝖶",
"\\sansX" : "𝖷",
"\\sansY" : "𝖸",
"\\sansZ" : "𝖹",
"\\sansa" : "𝖺",
"\\sansb" : "𝖻",
"\\sansc" : "𝖼",
"\\sansd" : "𝖽",
"\\sanse" : "𝖾",
"\\sansf" : "𝖿",
"\\sansg" : "𝗀",
"\\sansh" : "𝗁",
"\\sansi" : "𝗂",
"\\sansj" : "𝗃",
"\\sansk" : "𝗄",
"\\sansl" : "𝗅",
"\\sansm" : "𝗆",
"\\sansn" : "𝗇",
"\\sanso" : "𝗈",
"\\sansp" : "𝗉",
"\\sansq" : "𝗊",
"\\sansr" : "𝗋",
"\\sanss" : "𝗌",
"\\sanst" : "𝗍",
"\\sansu" : "𝗎",
"\\sansv" : "𝗏",
"\\sansw" : "𝗐",
"\\sansx" : "𝗑",
"\\sansy" : "𝗒",
"\\sansz" : "𝗓",
"\\bsansA" : "𝗔",
"\\bsansB" : "𝗕",
"\\bsansC" : "𝗖",
"\\bsansD" : "𝗗",
"\\bsansE" : "𝗘",
"\\bsansF" : "𝗙",
"\\bsansG" : "𝗚",
"\\bsansH" : "𝗛",
"\\bsansI" : "𝗜",
"\\bsansJ" : "𝗝",
"\\bsansK" : "𝗞",
"\\bsansL" : "𝗟",
"\\bsansM" : "𝗠",
"\\bsansN" : "𝗡",
"\\bsansO" : "𝗢",
"\\bsansP" : "𝗣",
"\\bsansQ" : "𝗤",
"\\bsansR" : "𝗥",
"\\bsansS" : "𝗦",
"\\bsansT" : "𝗧",
"\\bsansU" : "𝗨",
"\\bsansV" : "𝗩",
"\\bsansW" : "𝗪",
"\\bsansX" : "𝗫",
"\\bsansY" : "𝗬",
"\\bsansZ" : "𝗭",
"\\bsansa" : "𝗮",
"\\bsansb" : "𝗯",
"\\bsansc" : "𝗰",
"\\bsansd" : "𝗱",
"\\bsanse" : "𝗲",
"\\bsansf" : "𝗳",
"\\bsansg" : "𝗴",
"\\bsansh" : "𝗵",
"\\bsansi" : "𝗶",
"\\bsansj" : "𝗷",
"\\bsansk" : "𝗸",
"\\bsansl" : "𝗹",
"\\bsansm" : "𝗺",
"\\bsansn" : "𝗻",
"\\bsanso" : "𝗼",
"\\bsansp" : "𝗽",
"\\bsansq" : "𝗾",
"\\bsansr" : "𝗿",
"\\bsanss" : "𝘀",
"\\bsanst" : "𝘁",
"\\bsansu" : "𝘂",
"\\bsansv" : "𝘃",
"\\bsansw" : "𝘄",
"\\bsansx" : "𝘅",
"\\bsansy" : "𝘆",
"\\bsansz" : "𝘇",
"\\isansA" : "𝘈",
"\\isansB" : "𝘉",
"\\isansC" : "𝘊",
"\\isansD" : "𝘋",
"\\isansE" : "𝘌",
"\\isansF" : "𝘍",
"\\isansG" : "𝘎",
"\\isansH" : "𝘏",
"\\isansI" : "𝘐",
"\\isansJ" : "𝘑",
"\\isansK" : "𝘒",
"\\isansL" : "𝘓",
"\\isansM" : "𝘔",
"\\isansN" : "𝘕",
"\\isansO" : "𝘖",
"\\isansP" : "𝘗",
"\\isansQ" : "𝘘",
"\\isansR" : "𝘙",
"\\isansS" : "𝘚",
"\\isansT" : "𝘛",
"\\isansU" : "𝘜",
"\\isansV" : "𝘝",
"\\isansW" : "𝘞",
"\\isansX" : "𝘟",
"\\isansY" : "𝘠",
"\\isansZ" : "𝘡",
"\\isansa" : "𝘢",
"\\isansb" : "𝘣",
"\\isansc" : "𝘤",
"\\isansd" : "𝘥",
"\\isanse" : "𝘦",
"\\isansf" : "𝘧",
"\\isansg" : "𝘨",
"\\isansh" : "𝘩",
"\\isansi" : "𝘪",
"\\isansj" : "𝘫",
"\\isansk" : "𝘬",
"\\isansl" : "𝘭",
"\\isansm" : "𝘮",
"\\isansn" : "𝘯",
"\\isanso" : "𝘰",
"\\isansp" : "𝘱",
"\\isansq" : "𝘲",
"\\isansr" : "𝘳",
"\\isanss" : "𝘴",
"\\isanst" : "𝘵",
"\\isansu" : "𝘶",
"\\isansv" : "𝘷",
"\\isansw" : "𝘸",
"\\isansx" : "𝘹",
"\\isansy" : "𝘺",
"\\isansz" : "𝘻",
"\\bisansA" : "𝘼",
"\\bisansB" : "𝘽",
"\\bisansC" : "𝘾",
"\\bisansD" : "𝘿",
"\\bisansE" : "𝙀",
"\\bisansF" : "𝙁",
"\\bisansG" : "𝙂",
"\\bisansH" : "𝙃",
"\\bisansI" : "𝙄",
"\\bisansJ" : "𝙅",
"\\bisansK" : "𝙆",
"\\bisansL" : "𝙇",
"\\bisansM" : "𝙈",
"\\bisansN" : "𝙉",
"\\bisansO" : "𝙊",
"\\bisansP" : "𝙋",
"\\bisansQ" : "𝙌",
"\\bisansR" : "𝙍",
"\\bisansS" : "𝙎",
"\\bisansT" : "𝙏",
"\\bisansU" : "𝙐",
"\\bisansV" : "𝙑",
"\\bisansW" : "𝙒",
"\\bisansX" : "𝙓",
"\\bisansY" : "𝙔",
"\\bisansZ" : "𝙕",
"\\bisansa" : "𝙖",
"\\bisansb" : "𝙗",
"\\bisansc" : "𝙘",
"\\bisansd" : "𝙙",
"\\bisanse" : "𝙚",
"\\bisansf" : "𝙛",
"\\bisansg" : "𝙜",
"\\bisansh" : "𝙝",
"\\bisansi" : "𝙞",
"\\bisansj" : "𝙟",
"\\bisansk" : "𝙠",
"\\bisansl" : "𝙡",
"\\bisansm" : "𝙢",
"\\bisansn" : "𝙣",
"\\bisanso" : "𝙤",
"\\bisansp" : "𝙥",
"\\bisansq" : "𝙦",
"\\bisansr" : "𝙧",
"\\bisanss" : "𝙨",
"\\bisanst" : "𝙩",
"\\bisansu" : "𝙪",
"\\bisansv" : "𝙫",
"\\bisansw" : "𝙬",
"\\bisansx" : "𝙭",
"\\bisansy" : "𝙮",
"\\bisansz" : "𝙯",
"\\ttA" : "𝙰",
"\\ttB" : "𝙱",
"\\ttC" : "𝙲",
"\\ttD" : "𝙳",
"\\ttE" : "𝙴",
"\\ttF" : "𝙵",
"\\ttG" : "𝙶",
"\\ttH" : "𝙷",
"\\ttI" : "𝙸",
"\\ttJ" : "𝙹",
"\\ttK" : "𝙺",
"\\ttL" : "𝙻",
"\\ttM" : "𝙼",
"\\ttN" : "𝙽",
"\\ttO" : "𝙾",
"\\ttP" : "𝙿",
"\\ttQ" : "𝚀",
"\\ttR" : "𝚁",
"\\ttS" : "𝚂",
"\\ttT" : "𝚃",
"\\ttU" : "𝚄",
"\\ttV" : "𝚅",
"\\ttW" : "𝚆",
"\\ttX" : "𝚇",
"\\ttY" : "𝚈",
"\\ttZ" : "𝚉",
"\\tta" : "𝚊",
"\\ttb" : "𝚋",
"\\ttc" : "𝚌",
"\\ttd" : "𝚍",
"\\tte" : "𝚎",
"\\ttf" : "𝚏",
"\\ttg" : "𝚐",
"\\tth" : "𝚑",
"\\tti" : "𝚒",
"\\ttj" : "𝚓",
"\\ttk" : "𝚔",
"\\ttl" : "𝚕",
"\\ttm" : "𝚖",
"\\ttn" : "𝚗",
"\\tto" : "𝚘",
"\\ttp" : "𝚙",
"\\ttq" : "𝚚",
"\\ttr" : "𝚛",
"\\tts" : "𝚜",
"\\ttt" : "𝚝",
"\\ttu" : "𝚞",
"\\ttv" : "𝚟",
"\\ttw" : "𝚠",
"\\ttx" : "𝚡",
"\\tty" : "𝚢",
"\\ttz" : "𝚣",
"\\bfAlpha" : "𝚨",
"\\bfBeta" : "𝚩",
"\\bfGamma" : "𝚪",
"\\bfDelta" : "𝚫",
"\\bfEpsilon" : "𝚬",
"\\bfZeta" : "𝚭",
"\\bfEta" : "𝚮",
"\\bfTheta" : "𝚯",
"\\bfIota" : "𝚰",
"\\bfKappa" : "𝚱",
"\\bfLambda" : "𝚲",
"\\bfMu" : "𝚳",
"\\bfNu" : "𝚴",
"\\bfXi" : "𝚵",
"\\bfOmicron" : "𝚶",
"\\bfPi" : "𝚷",
"\\bfRho" : "𝚸",
"\\bfvarTheta" : "𝚹",
"\\bfSigma" : "𝚺",
"\\bfTau" : "𝚻",
"\\bfUpsilon" : "𝚼",
"\\bfPhi" : "𝚽",
"\\bfChi" : "𝚾",
"\\bfPsi" : "𝚿",
"\\bfOmega" : "𝛀",
"\\bfalpha" : "𝛂",
"\\bfbeta" : "𝛃",
"\\bfgamma" : "𝛄",
"\\bfdelta" : "𝛅",
"\\bfepsilon" : "𝛆",
"\\bfzeta" : "𝛇",
"\\bfeta" : "𝛈",
"\\bftheta" : "𝛉",
"\\bfiota" : "𝛊",
"\\bfkappa" : "𝛋",
"\\bflambda" : "𝛌",
"\\bfmu" : "𝛍",
"\\bfnu" : "𝛎",
"\\bfxi" : "𝛏",
"\\bfomicron" : "𝛐",
"\\bfpi" : "𝛑",
"\\bfrho" : "𝛒",
"\\bfvarsigma" : "𝛓",
"\\bfsigma" : "𝛔",
"\\bftau" : "𝛕",
"\\bfupsilon" : "𝛖",
"\\bfvarphi" : "𝛗",
"\\bfchi" : "𝛘",
"\\bfpsi" : "𝛙",
"\\bfomega" : "𝛚",
"\\bfvarepsilon" : "𝛜",
"\\bfvartheta" : "𝛝",
"\\bfvarkappa" : "𝛞",
"\\bfphi" : "𝛟",
"\\bfvarrho" : "𝛠",
"\\bfvarpi" : "𝛡",
"\\itAlpha" : "𝛢",
"\\itBeta" : "𝛣",
"\\itGamma" : "𝛤",
"\\itDelta" : "𝛥",
"\\itEpsilon" : "𝛦",
"\\itZeta" : "𝛧",
"\\itEta" : "𝛨",
"\\itTheta" : "𝛩",
"\\itIota" : "𝛪",
"\\itKappa" : "𝛫",
"\\itLambda" : "𝛬",
"\\itMu" : "𝛭",
"\\itNu" : "𝛮",
"\\itXi" : "𝛯",
"\\itOmicron" : "𝛰",
"\\itPi" : "𝛱",
"\\itRho" : "𝛲",
"\\itvarTheta" : "𝛳",
"\\itSigma" : "𝛴",
"\\itTau" : "𝛵",
"\\itUpsilon" : "𝛶",
"\\itPhi" : "𝛷",
"\\itChi" : "𝛸",
"\\itPsi" : "𝛹",
"\\itOmega" : "𝛺",
"\\italpha" : "𝛼",
"\\itbeta" : "𝛽",
"\\itgamma" : "𝛾",
"\\itdelta" : "𝛿",
"\\itepsilon" : "𝜀",
"\\itzeta" : "𝜁",
"\\iteta" : "𝜂",
"\\ittheta" : "𝜃",
"\\itiota" : "𝜄",
"\\itkappa" : "𝜅",
"\\itlambda" : "𝜆",
"\\itmu" : "𝜇",
"\\itnu" : "𝜈",
"\\itxi" : "𝜉",
"\\itomicron" : "𝜊",
"\\itpi" : "𝜋",
"\\itrho" : "𝜌",
"\\itvarsigma" : "𝜍",
"\\itsigma" : "𝜎",
"\\ittau" : "𝜏",
"\\itupsilon" : "𝜐",
"\\itphi" : "𝜑",
"\\itchi" : "𝜒",
"\\itpsi" : "𝜓",
"\\itomega" : "𝜔",
"\\itvarepsilon" : "𝜖",
"\\itvartheta" : "𝜗",
"\\itvarkappa" : "𝜘",
"\\itvarphi" : "𝜙",
"\\itvarrho" : "𝜚",
"\\itvarpi" : "𝜛",
"\\biAlpha" : "𝜜",
"\\biBeta" : "𝜝",
"\\biGamma" : "𝜞",
"\\biDelta" : "𝜟",
"\\biEpsilon" : "𝜠",
"\\biZeta" : "𝜡",
"\\biEta" : "𝜢",
"\\biTheta" : "𝜣",
"\\biIota" : "𝜤",
"\\biKappa" : "𝜥",
"\\biLambda" : "𝜦",
"\\biMu" : "𝜧",
"\\biNu" : "𝜨",
"\\biXi" : "𝜩",
"\\biOmicron" : "𝜪",
"\\biPi" : "𝜫",
"\\biRho" : "𝜬",
"\\bivarTheta" : "𝜭",
"\\biSigma" : "𝜮",
"\\biTau" : "𝜯",
"\\biUpsilon" : "𝜰",
"\\biPhi" : "𝜱",
"\\biChi" : "𝜲",
"\\biPsi" : "𝜳",
"\\biOmega" : "𝜴",
"\\bialpha" : "𝜶",
"\\bibeta" : "𝜷",
"\\bigamma" : "𝜸",
"\\bidelta" : "𝜹",
"\\biepsilon" : "𝜺",
"\\bizeta" : "𝜻",
"\\bieta" : "𝜼",
"\\bitheta" : "𝜽",
"\\biiota" : "𝜾",
"\\bikappa" : "𝜿",
"\\bilambda" : "𝝀",
"\\bimu" : "𝝁",
"\\binu" : "𝝂",
"\\bixi" : "𝝃",
"\\biomicron" : "𝝄",
"\\bipi" : "𝝅",
"\\birho" : "𝝆",
"\\bivarsigma" : "𝝇",
"\\bisigma" : "𝝈",
"\\bitau" : "𝝉",
"\\biupsilon" : "𝝊",
"\\biphi" : "𝝋",
"\\bichi" : "𝝌",
"\\bipsi" : "𝝍",
"\\biomega" : "𝝎",
"\\bivarepsilon" : "𝝐",
"\\bivartheta" : "𝝑",
"\\bivarkappa" : "𝝒",
"\\bivarphi" : "𝝓",
"\\bivarrho" : "𝝔",
"\\bivarpi" : "𝝕",
"\\bsansAlpha" : "𝝖",
"\\bsansBeta" : "𝝗",
"\\bsansGamma" : "𝝘",
"\\bsansDelta" : "𝝙",
"\\bsansEpsilon" : "𝝚",
"\\bsansZeta" : "𝝛",
"\\bsansEta" : "𝝜",
"\\bsansTheta" : "𝝝",
"\\bsansIota" : "𝝞",
"\\bsansKappa" : "𝝟",
"\\bsansLambda" : "𝝠",
"\\bsansMu" : "𝝡",
"\\bsansNu" : "𝝢",
"\\bsansXi" : "𝝣",
"\\bsansOmicron" : "𝝤",
"\\bsansPi" : "𝝥",
"\\bsansRho" : "𝝦",
"\\bsansvarTheta" : "𝝧",
"\\bsansSigma" : "𝝨",
"\\bsansTau" : "𝝩",
"\\bsansUpsilon" : "𝝪",
"\\bsansPhi" : "𝝫",
"\\bsansChi" : "𝝬",
"\\bsansPsi" : "𝝭",
"\\bsansOmega" : "𝝮",
"\\bsansalpha" : "𝝰",
"\\bsansbeta" : "𝝱",
"\\bsansgamma" : "𝝲",
"\\bsansdelta" : "𝝳",
"\\bsansepsilon" : "𝝴",
"\\bsanszeta" : "𝝵",
"\\bsanseta" : "𝝶",
"\\bsanstheta" : "𝝷",
"\\bsansiota" : "𝝸",
"\\bsanskappa" : "𝝹",
"\\bsanslambda" : "𝝺",
"\\bsansmu" : "𝝻",
"\\bsansnu" : "𝝼",
"\\bsansxi" : "𝝽",
"\\bsansomicron" : "𝝾",
"\\bsanspi" : "𝝿",
"\\bsansrho" : "𝞀",
"\\bsansvarsigma" : "𝞁",
"\\bsanssigma" : "𝞂",
"\\bsanstau" : "𝞃",
"\\bsansupsilon" : "𝞄",
"\\bsansphi" : "𝞅",
"\\bsanschi" : "𝞆",
"\\bsanspsi" : "𝞇",
"\\bsansomega" : "𝞈",
"\\bsansvarepsilon" : "𝞊",
"\\bsansvartheta" : "𝞋",
"\\bsansvarkappa" : "𝞌",
"\\bsansvarphi" : "𝞍",
"\\bsansvarrho" : "𝞎",
"\\bsansvarpi" : "𝞏",
"\\bisansAlpha" : "𝞐",
"\\bisansBeta" : "𝞑",
"\\bisansGamma" : "𝞒",
"\\bisansDelta" : "𝞓",
"\\bisansEpsilon" : "𝞔",
"\\bisansZeta" : "𝞕",
"\\bisansEta" : "𝞖",
"\\bisansTheta" : "𝞗",
"\\bisansIota" : "𝞘",
"\\bisansKappa" : "𝞙",
"\\bisansLambda" : "𝞚",
"\\bisansMu" : "𝞛",
"\\bisansNu" : "𝞜",
"\\bisansXi" : "𝞝",
"\\bisansOmicron" : "𝞞",
"\\bisansPi" : "𝞟",
"\\bisansRho" : "𝞠",
"\\bisansvarTheta" : "𝞡",
"\\bisansSigma" : "𝞢",
"\\bisansTau" : "𝞣",
"\\bisansUpsilon" : "𝞤",
"\\bisansPhi" : "𝞥",
"\\bisansChi" : "𝞦",
"\\bisansPsi" : "𝞧",
"\\bisansOmega" : "𝞨",
"\\bisansalpha" : "𝞪",
"\\bisansbeta" : "𝞫",
"\\bisansgamma" : "𝞬",
"\\bisansdelta" : "𝞭",
"\\bisansepsilon" : "𝞮",
"\\bisanszeta" : "𝞯",
"\\bisanseta" : "𝞰",
"\\bisanstheta" : "𝞱",
"\\bisansiota" : "𝞲",
"\\bisanskappa" : "𝞳",
"\\bisanslambda" : "𝞴",
"\\bisansmu" : "𝞵",
"\\bisansnu" : "𝞶",
"\\bisansxi" : "𝞷",
"\\bisansomicron" : "𝞸",
"\\bisanspi" : "𝞹",
"\\bisansrho" : "𝞺",
"\\bisansvarsigma" : "𝞻",
"\\bisanssigma" : "𝞼",
"\\bisanstau" : "𝞽",
"\\bisansupsilon" : "𝞾",
"\\bisansphi" : "𝞿",
"\\bisanschi" : "𝟀",
"\\bisanspsi" : "𝟁",
"\\bisansomega" : "𝟂",
"\\bisansvarepsilon" : "𝟄",
"\\bisansvartheta" : "𝟅",
"\\bisansvarkappa" : "𝟆",
"\\bisansvarphi" : "𝟇",
"\\bisansvarrho" : "𝟈",
"\\bisansvarpi" : "𝟉",
"\\bfzero" : "𝟎",
"\\bfone" : "𝟏",
"\\bftwo" : "𝟐",
"\\bfthree" : "𝟑",
"\\bffour" : "𝟒",
"\\bffive" : "𝟓",
"\\bfsix" : "𝟔",
"\\bfseven" : "𝟕",
"\\bfeight" : "𝟖",
"\\bfnine" : "𝟗",
"\\bbzero" : "𝟘",
"\\bbone" : "𝟙",
"\\bbtwo" : "𝟚",
"\\bbthree" : "𝟛",
"\\bbfour" : "𝟜",
"\\bbfive" : "𝟝",
"\\bbsix" : "𝟞",
"\\bbseven" : "𝟟",
"\\bbeight" : "𝟠",
"\\bbnine" : "𝟡",
"\\sanszero" : "𝟢",
"\\sansone" : "𝟣",
"\\sanstwo" : "𝟤",
"\\sansthree" : "𝟥",
"\\sansfour" : "𝟦",
"\\sansfive" : "𝟧",
"\\sanssix" : "𝟨",
"\\sansseven" : "𝟩",
"\\sanseight" : "𝟪",
"\\sansnine" : "𝟫",
"\\bsanszero" : "𝟬",
"\\bsansone" : "𝟭",
"\\bsanstwo" : "𝟮",
"\\bsansthree" : "𝟯",
"\\bsansfour" : "𝟰",
"\\bsansfive" : "𝟱",
"\\bsanssix" : "𝟲",
"\\bsansseven" : "𝟳",
"\\bsanseight" : "𝟴",
"\\bsansnine" : "𝟵",
"\\ttzero" : "𝟶",
"\\ttone" : "𝟷",
"\\tttwo" : "𝟸",
"\\ttthree" : "𝟹",
"\\ttfour" : "𝟺",
"\\ttfive" : "𝟻",
"\\ttsix" : "𝟼",
"\\ttseven" : "𝟽",
"\\tteight" : "𝟾",
"\\ttnine" : "𝟿",
"\\underbar" : "̲",
"\\underleftrightarrow" : "͍",
}
reverse_latex_symbol = { v:k for k,v in latex_symbols.items()}
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@ipython@py3@IPython@core@latex_symbols.py@.PATH_END.py
|
{
"filename": "test.py",
"repo_name": "bolverk/huji-rich",
"repo_path": "huji-rich_extracted/huji-rich-master/tests/newtonian/two_dimensional/conservation_lagrangian/test.py",
"type": "Python"
}
|
#! /usr/bin/python
def all_equal(ar):
"""
Checks that all terms in the array are equal
Input:
ar - Numerical array
"""
for i in ar:
if i!=ar[0]:
return False
return True
def main():
import numpy
mass, xmom, ymom, enr, tracer = \
numpy.loadtxt('res.txt',unpack=True);
f = open('gradesheet.txt','w')
f.write('mass '+str(all_equal(mass))+'\n')
f.write('xmom '+str(all_equal(xmom))+'\n')
f.write('ymom '+str(all_equal(ymom))+'\n')
f.write('enr '+str(all_equal(enr))+'\n')
f.write('tracer '+str(all_equal(tracer))+'\n')
f.close()
return all_equal(mass) and \
all_equal(xmom) and \
all_equal(ymom) and \
all_equal(enr) and \
all_equal(tracer)
if __name__=='__main__':
import os
if main():
os.system('touch test_passed.res')
else:
os.system('touch test_failed.res')
|
bolverkREPO_NAMEhuji-richPATH_START.@huji-rich_extracted@huji-rich-master@tests@newtonian@two_dimensional@conservation_lagrangian@test.py@.PATH_END.py
|
{
"filename": "_cauto.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/isosurface/_cauto.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class CautoValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="cauto", parent_name="isosurface", **kwargs):
super(CautoValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {}),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@isosurface@_cauto.py@.PATH_END.py
|
{
"filename": "event_selection.py",
"repo_name": "icecube/skyllh",
"repo_path": "skyllh_extracted/skyllh-master/skyllh/core/event_selection.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
import abc
import inspect
import numpy as np
import scipy.sparse
from skyllh.core.py import (
classname,
float_cast,
issequenceof,
)
from skyllh.core.source_hypo_grouping import (
SourceHypoGroupManager,
)
from skyllh.core.source_model import (
SourceModel,
)
from skyllh.core.timing import (
TaskTimer,
)
from skyllh.core.utils.coords import (
angular_separation,
)
class EventSelectionMethod(
object,
metaclass=abc.ABCMeta):
"""This is the abstract base class for all event selection method classes.
The idea is to pre-select only events that contribute to the likelihood
function, i.e. are more signal than background like. The different methods
are implemented through derived classes of this base class.
"""
def __init__(
self,
shg_mgr,
**kwargs):
"""Creates a new event selection method instance.
Parameters
----------
shg_mgr : instance of SourceHypoGroupManager | None
The instance of SourceHypoGroupManager that defines the list of
sources, i.e. the list of SourceModel instances.
It can be ``None`` if the event selection method does not depend on
the sources.
"""
super().__init__(
**kwargs)
self._src_arr = None
self._shg_mgr = shg_mgr
if self._shg_mgr is not None:
if not isinstance(self._shg_mgr, SourceHypoGroupManager):
raise TypeError(
'The shg_mgr argument must be None or an instance of '
'SourceHypoGroupManager! '
f'Its current type is {classname(self._shg_mgr)}.')
# The _src_arr variable holds a numpy record array with the
# necessary source information needed for the event selection
# method.
self._src_arr = self.sources_to_array(
sources=self._shg_mgr.source_list)
@property
def shg_mgr(self):
"""(read-only) The instance of SourceHypoGroupManager, which defines the
list of sources.
"""
return self._shg_mgr
def __and__(self, other):
"""Implements the AND operator (&) for creating an event selection
method, which is the intersection of this event selection method and
another one using the expression ``intersection = self & other``.
Parameters
----------
other : instance of EventSelectionMethod
The instance of EventSelectionMethod that is the other event
selection method.
Returns
-------
intersection : instance of IntersectionEventSelectionMethod
The instance of IntersectionEventSelectionMethod that creates the
intersection of this event selection method and the other.
"""
return IntersectionEventSelectionMethod(self, other)
def change_shg_mgr(self, shg_mgr):
"""Changes the SourceHypoGroupManager instance of the event selection
method. This will also recreate the internal source numpy record array.
Parameters
----------
shg_mgr : instance of SourceHypoGroupManager | None
The new SourceHypoGroupManager instance, that should be used for
this event selection method.
It can be ``None`` if the event selection method does not depend on
the sources.
"""
self._shg_mgr = shg_mgr
self._src_arr = None
if self._shg_mgr is not None:
if not isinstance(self._shg_mgr, SourceHypoGroupManager):
raise TypeError(
'The shg_mgr argument must be None or an instance of '
'SourceHypoGroupManager! '
f'Its current type is {classname(self._shg_mgr)}.')
self._src_arr = self.sources_to_array(
sources=self._shg_mgr.source_list)
def sources_to_array(self, sources):
"""This method is supposed to convert a sequence of SourceModel
instances into a structured numpy ndarray with the source information
in a format that is best understood by the actual event selection
method.
Parameters
----------
sources : sequence of SourceModel
The sequence of source models containing the necessary information
of the source.
Returns
-------
arr : numpy record ndarray | None
The generated numpy record ndarray holding the necessary information
for each source.
By default ``None`` is returned.
"""
return None
@abc.abstractmethod
def select_events(
self,
events,
src_evt_idxs=None,
ret_original_evt_idxs=False,
tl=None):
"""This method selects the events, which will contribute to the
log-likelihood ratio function.
Parameters
----------
events : instance of DataFieldRecordArray
The instance of DataFieldRecordArray of length N_events, holding the
events.
src_evt_idxs : 2-tuple of 1d ndarrays of ints | None
The 2-element tuple holding the two 1d ndarrays of int of length
N_values, specifying to which sources the given events belong to.
ret_original_evt_idxs : bool
Flag if the original indices of the selected events should get
returned as well.
tl : instance of TimeLord | None
The optional instance of TimeLord that should be used to collect
timing information about this method.
Returns
-------
selected_events : instance of DataFieldRecordArray
The instance of DataFieldRecordArray of length N_selected_events,
holding the selected events, i.e. a subset of the ``events``
argument.
(src_idxs, evt_idxs) : 1d ndarrays of ints
The two 1d ndarrays of int of length N_values, holding the indices
of the sources and the selected events.
original_evt_idxs : 1d ndarray of ints
The (N_selected_events,)-shaped numpy ndarray holding the original
indices of the selected events, if ``ret_original_evt_idxs`` is set
to ``True``.
"""
pass
class IntersectionEventSelectionMethod(
EventSelectionMethod):
"""This class provides an event selection method for the intersection of two
event selection methods. It can be created using the ``&`` operator:
``evt_sel_method1 & evt_sel_method2``.
"""
def __init__(
self,
evt_sel_method1,
evt_sel_method2,
**kwargs):
"""Creates a compounded event selection method of two given event
selection methods.
Parameters
----------
evt_sel_method1 : instance of EventSelectionMethod
The instance of EventSelectionMethod for the first event selection
method.
evt_sel_method2 : instance of EventSelectionMethod
The instance of EventSelectionMethod for the second event selection
method.
"""
super().__init__(
shg_mgr=None,
**kwargs)
self.evt_sel_method1 = evt_sel_method1
self.evt_sel_method2 = evt_sel_method2
@property
def evt_sel_method1(self):
"""The instance of EventSelectionMethod for the first event selection
method.
"""
return self._evt_sel_method1
@evt_sel_method1.setter
def evt_sel_method1(self, method):
if not isinstance(method, EventSelectionMethod):
raise TypeError(
'The evt_sel_method1 property must be an instance of '
'EventSelectionMethod!'
f'Its current type is {classname(method)}.')
self._evt_sel_method1 = method
@property
def evt_sel_method2(self):
"""The instance of EventSelectionMethod for the second event selection
method.
"""
return self._evt_sel_method2
@evt_sel_method2.setter
def evt_sel_method2(self, method):
if not isinstance(method, EventSelectionMethod):
raise TypeError(
'The evt_sel_method2 property must be an instance of '
'EventSelectionMethod!'
f'Its current type is {classname(method)}.')
self._evt_sel_method2 = method
def change_shg_mgr(self, shg_mgr):
"""Changes the SourceHypoGroupManager instance of the event selection
method. This will call the ``change_shg_mgr`` of the individual event
selection methods.
Parameters
----------
shg_mgr : instance of SourceHypoGroupManager | None
The new SourceHypoGroupManager instance, that should be used for
this event selection method.
It can be ``None`` if the event selection method does not depend on
the sources.
"""
self._evt_sel_method1.change_shg_mgr(shg_mgr=shg_mgr)
self._evt_sel_method2.change_shg_mgr(shg_mgr=shg_mgr)
def select_events(
self,
events,
src_evt_idxs=None,
ret_original_evt_idxs=False,
tl=None):
"""Selects events by calling the ``select_events`` methods of the
individual event selection methods.
Parameters
----------
events : instance of DataFieldRecordArray
The instance of DataFieldRecordArray holding the events.
src_evt_idxs : 2-tuple of 1d ndarrays of ints | None
The 2-element tuple holding the two 1d ndarrays of int of length
N_values, specifying to which sources the given events belong to.
ret_original_evt_idxs : bool
Flag if the original indices of the selected events should get
returned as well.
tl : instance of TimeLord | None
The optional instance of TimeLord that should be used to collect
timing information about this method.
Returns
-------
selected_events : DataFieldRecordArray
The instance of DataFieldRecordArray holding the selected events,
i.e. a subset of the `events` argument.
(src_idxs, evt_idxs) : 1d ndarrays of ints
The indices of the sources and the selected events.
original_evt_idxs : 1d ndarray of ints
The (N_selected_events,)-shaped numpy ndarray holding the original
indices of the selected events, if ``ret_original_evt_idxs`` is set
to ``True``.
"""
if ret_original_evt_idxs:
(events, src_evt_idxs, org_evt_idxs1) =\
self._evt_sel_method1.select_events(
events=events,
src_evt_idxs=src_evt_idxs,
ret_original_evt_idxs=True)
(events, src_evt_idxs, org_evt_idxs2) =\
self._evt_sel_method2.select_events(
events=events,
src_evt_idxs=src_evt_idxs,
ret_original_evt_idxs=True)
org_evt_idxs = np.take(org_evt_idxs1, org_evt_idxs2)
return (events, src_evt_idxs, org_evt_idxs)
(events, src_evt_idxs) = self._evt_sel_method1.select_events(
events=events,
src_evt_idxs=src_evt_idxs)
(events, src_evt_idxs) = self._evt_sel_method2.select_events(
events=events,
src_evt_idxs=src_evt_idxs)
return (events, src_evt_idxs)
class AllEventSelectionMethod(
EventSelectionMethod):
"""This event selection method selects all events.
"""
def __init__(self, shg_mgr):
"""Creates a new event selection method instance.
Parameters
----------
shg_mgr : instance of SourceHypoGroupManager
The instance of SourceHypoGroupManager that defines the list of
sources, i.e. the list of SourceModel instances. For this particular
event selection method it has no meaning, but it is an interface
parameter.
"""
super().__init__(
shg_mgr=shg_mgr)
def sources_to_array(self, sources):
"""Creates the source array from the given list of sources. This event
selection method does not depend on the sources. Hence, ``None`` is
returned.
Returns
-------
arr : None
The generated numpy record ndarray holding the necessary information
for each source. Since this event selection method does not depend
on any source, ``None`` is returned.
"""
return None
def select_events(
self,
events,
src_evt_idxs=None,
ret_original_evt_idxs=False,
tl=None):
"""Selects all of the given events. Hence, the returned event array is
the same as the given array.
Parameters
----------
events : instance of DataFieldRecordArray
The instance of DataFieldRecordArray holding the events, for which
the selection method should get applied.
src_evt_idxs : 2-tuple of 1d ndarrays of ints | None
The 2-element tuple holding the two 1d ndarrays of int of length
N_values, specifying to which sources the given events belong to.
ret_original_evt_idxs : bool
Flag if the original indices of the selected events should get
returned as well.
tl : instance of TimeLord | None
The optional instance of TimeLord that should be used to collect
timing information about this method.
Returns
-------
selected_events : DataFieldRecordArray
The instance of DataFieldRecordArray holding the selected events,
i.e. a subset of the `events` argument.
(src_idxs, evt_idxs) : 1d ndarrays of ints
The indices of sources and the selected events.
original_evt_idxs : 1d ndarray of ints
The (N_selected_events,)-shaped numpy ndarray holding the original
indices of the selected events, if ``ret_original_evt_idxs`` is set
to ``True``.
"""
with TaskTimer(tl, 'ESM: Calculate indices of selected events.'):
if src_evt_idxs is None:
n_sources = self.shg_mgr.n_sources
src_idxs = np.repeat(np.arange(n_sources), len(events))
evt_idxs = np.tile(events.indices, n_sources)
else:
(src_idxs, evt_idxs) = src_evt_idxs
if ret_original_evt_idxs:
return (events, (src_idxs, evt_idxs), events.indices)
return (events, (src_idxs, evt_idxs))
class SpatialEventSelectionMethod(
EventSelectionMethod,
metaclass=abc.ABCMeta):
"""This abstract base class defines the base class for all spatial event
selection methods.
"""
def __init__(
self,
shg_mgr,
**kwargs):
"""Creates a new event selection method instance.
Parameters
----------
shg_mgr : instance of SourceHypoGroupManager
The instance of SourceHypoGroupManager that defines the list of
sources, i.e. the list of SourceModel instances.
"""
super().__init__(
shg_mgr=shg_mgr,
**kwargs)
def sources_to_array(self, sources):
"""Converts the given sequence of SourceModel instances into a
structured numpy ndarray holding the necessary source information needed
for this event selection method.
Parameters
----------
sources : sequence of SourceModel
The sequence of source models containing the necessary information
of the source.
Returns
-------
arr : numpy record ndarray
The generated numpy record ndarray holding the necessary information
for each source. It contains the following data fields: 'ra', 'dec'.
"""
if not issequenceof(sources, SourceModel):
raise TypeError(
'The sources argument must be a sequence of SourceModel '
'instances! '
f'Its current type is {classname(sources)}.')
arr = np.empty(
(len(sources),),
dtype=[
('ra', np.float64),
('dec', np.float64)
],
order='F')
for (i, src) in enumerate(sources):
arr['ra'][i] = src.ra
arr['dec'][i] = src.dec
return arr
class DecBandEventSectionMethod(
SpatialEventSelectionMethod):
"""This event selection method selects events within a declination band
around a list of point-like source positions.
"""
def __init__(
self,
shg_mgr,
delta_angle):
"""Creates and configures a spatial declination band event selection
method object.
Parameters
----------
shg_mgr : instance of SourceHypoGroupManager
The instance of SourceHypoGroupManager that defines the list of
sources, i.e. the list of SourceModel instances.
delta_angle : float
The half-opening angle around the source in declination for which
events should get selected.
"""
super().__init__(
shg_mgr=shg_mgr)
self.delta_angle = delta_angle
@property
def delta_angle(self):
"""The half-opening angle around the source in declination and
right-ascention for which events should get selected.
"""
return self._delta_angle
@delta_angle.setter
def delta_angle(self, angle):
angle = float_cast(
angle,
'The delta_angle property must be castable to type float!')
self._delta_angle = angle
def select_events(
self,
events,
src_evt_idxs=None,
ret_original_evt_idxs=False,
tl=None):
"""Selects the events within the declination band.
Parameters
----------
events : instance of DataFieldRecordArray
The instance of DataFieldRecordArray that holds the event data.
The following data fields must exist:
``'dec'`` : float
The declination of the event.
src_evt_idxs : 2-tuple of 1d ndarrays of ints | None
The 2-element tuple holding the two 1d ndarrays of int of length
N_values, specifying to which sources the given events belong to.
ret_original_evt_idxs : bool
Flag if the original indices of the selected events should get
returned as well.
tl : instance of TimeLord | None
The optional instance of TimeLord that should be used to collect
timing information about this method.
Returns
-------
selected_events : instance of DataFieldRecordArray
The instance of DataFieldRecordArray holding only the selected
events.
(src_idxs, evt_idxs) : 1d ndarrays of ints
The indices of sources and the selected events.
original_evt_idxs : 1d ndarray of ints
The (N_selected_events,)-shaped numpy ndarray holding the original
indices of the selected events, if ``ret_original_evt_idxs`` is set
to ``True``.
"""
delta_angle = self._delta_angle
src_arr = self._src_arr
# Calculates the minus and plus declination around each source and
# bound it to -90deg and +90deg, respectively.
src_dec_minus = np.maximum(-np.pi/2, src_arr['dec'] - delta_angle)
src_dec_plus = np.minimum(src_arr['dec'] + delta_angle, np.pi/2)
# Determine the mask for the events which fall inside the declination
# window.
# mask_dec is a (N_sources,N_events)-shaped ndarray.
with TaskTimer(tl, 'ESM-DecBand: Calculate mask_dec.'):
mask_dec = (
(events['dec'] > src_dec_minus[:, np.newaxis]) &
(events['dec'] < src_dec_plus[:, np.newaxis])
)
# Determine the mask for the events that fall inside at least one
# source declination band.
# mask is a (N_events,)-shaped ndarray.
with TaskTimer(tl, 'ESM-DecBand: Calculate mask.'):
mask = np.any(mask_dec, axis=0)
# Reduce the events according to the mask.
with TaskTimer(tl, 'ESM-DecBand: Create selected_events.'):
# Using an integer indices array for data selection is several
# factors faster than using a boolean array.
selected_events_idxs = events.indices[mask]
selected_events = events[selected_events_idxs]
# Get selected events indices.
idxs = np.argwhere(mask_dec[:, mask])
src_idxs = idxs[:, 0]
evt_idxs = idxs[:, 1]
if ret_original_evt_idxs:
return (selected_events, (src_idxs, evt_idxs), selected_events_idxs)
return (selected_events, (src_idxs, evt_idxs))
class RABandEventSectionMethod(
SpatialEventSelectionMethod):
"""This event selection method selects events within a right-ascension band
around a list of point-like source positions.
"""
def __init__(
self,
shg_mgr,
delta_angle):
"""Creates and configures a right-ascension band event selection
method object.
Parameters
----------
shg_mgr : instance of SourceHypoGroupManager
The instance of SourceHypoGroupManager that defines the list of
sources, i.e. the list of SourceModel instances.
delta_angle : float
The half-opening angle around the source in right-ascension for
which events should get selected.
"""
super().__init__(
shg_mgr=shg_mgr)
self.delta_angle = delta_angle
@property
def delta_angle(self):
"""The half-opening angle around the source in declination and
right-ascention for which events should get selected.
"""
return self._delta_angle
@delta_angle.setter
def delta_angle(self, angle):
angle = float_cast(
angle,
'The delta_angle property must be castable to type float!')
self._delta_angle = angle
def select_events(
self,
events,
src_evt_idxs=None,
ret_original_evt_idxs=False,
tl=None):
"""Selects the events within the right-ascention band.
The solid angle dOmega = dRA * dSinDec = dRA * dDec * cos(dec) is a
function of declination, i.e. for a constant dOmega, the right-ascension
value has to change with declination.
Parameters
----------
events : instance of DataFieldRecordArray
The instance of DataFieldRecordArray that holds the event data.
The following data fields must exist:
``'ra'`` : float
The right-ascention of the event.
``'dec'`` : float
The declination of the event.
src_evt_idxs : 2-tuple of 1d ndarrays of ints | None
The 2-element tuple holding the two 1d ndarrays of int of length
N_values, specifying to which sources the given events belong to.
ret_original_evt_idxs : bool
Flag if the original indices of the selected events should get
returned as well.
tl : instance of TimeLord | None
The optional instance of TimeLord that should be used to collect
timing information about this method.
Returns
-------
selected_events : instance of DataFieldRecordArray
The instance of DataFieldRecordArray holding only the selected
events.
(src_idxs, evt_idxs) : 1d ndarrays of ints
The indices of the sources and the selected events.
original_evt_idxs : 1d ndarray of ints
The (N_selected_events,)-shaped numpy ndarray holding the original
indices of the selected events, if ``ret_original_evt_idxs`` is set
to ``True``.
"""
delta_angle = self._delta_angle
src_arr = self._src_arr
# Get the minus and plus declination around the sources.
src_dec_minus = np.maximum(-np.pi/2, src_arr['dec'] - delta_angle)
src_dec_plus = np.minimum(src_arr['dec'] + delta_angle, np.pi/2)
# Calculate the cosine factor for the largest declination distance from
# the source. We use np.amin here because smaller cosine values are
# larger angles.
# cosfact is a (N_sources,)-shaped ndarray.
cosfact = np.amin(np.cos([src_dec_minus, src_dec_plus]), axis=0)
# Calculate delta RA, which is a function of declination.
# dRA is a (N_sources,)-shaped ndarray.
dRA_half = np.amin(
[np.repeat(2*np.pi, len(src_arr['ra'])),
np.fabs(delta_angle / cosfact)], axis=0)
# Calculate the right-ascension distance of the events w.r.t. the
# source. We make sure to use the smaller distance on the circle, thus
# the maximal distance is 180deg, i.e. pi.
# ra_dist is a (N_sources,N_events)-shaped 2D ndarray.
with TaskTimer(tl, 'ESM-RaBand: Calculate ra_dist.'):
ra_dist = np.fabs(
np.mod(
events['ra'] - src_arr['ra'][:, np.newaxis] + np.pi,
2*np.pi) - np.pi)
# Determine the mask for the events which fall inside the
# right-ascention window.
# mask_ra is a (N_sources,N_events)-shaped ndarray.
with TaskTimer(tl, 'ESM-RaBand: Calculate mask_ra.'):
mask_ra = ra_dist < dRA_half[:, np.newaxis]
# Determine the mask for the events that fall inside at least one
# source sky window.
# mask is a (N_events,)-shaped ndarray.
with TaskTimer(tl, 'ESM-RaBand: Calculate mask.'):
mask = np.any(mask_ra, axis=0)
# Reduce the events according to the mask.
with TaskTimer(tl, 'ESM-RaBand: Create selected_events.'):
# Using an integer indices array for data selection is several
# factors faster than using a boolean array.
selected_events_idxs = events.indices[mask]
selected_events = events[selected_events_idxs]
# Get selected events indices.
idxs = np.argwhere(mask_ra[:, mask])
src_idxs = idxs[:, 0]
evt_idxs = idxs[:, 1]
if ret_original_evt_idxs:
return (selected_events, (src_idxs, evt_idxs), selected_events_idxs)
return (selected_events, (src_idxs, evt_idxs))
class SpatialBoxEventSelectionMethod(
SpatialEventSelectionMethod):
"""This event selection method selects events within a spatial box in
right-ascention and declination around a list of point-like source
positions.
"""
def __init__(
self,
shg_mgr,
delta_angle):
"""Creates and configures a spatial box event selection method object.
Parameters
----------
shg_mgr : instance of SourceHypoGroupManager
The instance of SourceHypoGroupManager that defines the list of
sources, i.e. the list of SourceModel instances.
delta_angle : float
The half-opening angle around the source for which events should
get selected.
"""
super().__init__(
shg_mgr=shg_mgr)
self.delta_angle = delta_angle
@property
def delta_angle(self):
"""The half-opening angle around the source in declination and
right-ascention for which events should get selected.
"""
return self._delta_angle
@delta_angle.setter
def delta_angle(self, angle):
angle = float_cast(
angle,
'The delta_angle property must be castable to type float!')
self._delta_angle = angle
def select_events(
self,
events,
src_evt_idxs=None,
ret_original_evt_idxs=False,
tl=None):
"""Selects the events within the spatial box in right-ascention and
declination.
The solid angle dOmega = dRA * dSinDec = dRA * dDec * cos(dec) is a
function of declination, i.e. for a constant dOmega, the right-ascension
value has to change with declination.
Parameters
----------
events : instance of DataFieldRecordArray
The instance of DataFieldRecordArray that holds the event data.
The following data fields must exist:
``'ra'`` : float
The right-ascention of the event.
``'dec'`` : float
The declination of the event.
src_evt_idxs : 2-tuple of 1d ndarrays of ints | None
The 2-element tuple holding the two 1d ndarrays of int of length
N_values, specifying to which sources the given events belong to.
ret_original_evt_idxs : bool
Flag if the original indices of the selected events should get
returned as well.
tl : instance of TimeLord | None
The optional instance of TimeLord that should be used to collect
timing information about this method.
Returns
-------
selected_events : instance of DataFieldRecordArray
The instance of DataFieldRecordArray holding only the selected
events.
(src_idxs, evt_idxs) : 1d ndarrays of ints | None
The indices of sources and the selected events.
original_evt_idxs : 1d ndarray of ints
The (N_selected_events,)-shaped numpy ndarray holding the original
indices of the selected events, if ``ret_original_evt_idxs`` is set
to ``True``.
"""
delta_angle = self._delta_angle
src_arr = self._src_arr
n_sources = len(src_arr)
srcs_ra = src_arr['ra']
srcs_dec = src_arr['dec']
# Get the minus and plus declination around the sources.
src_dec_minus = np.maximum(-np.pi/2, srcs_dec - delta_angle)
src_dec_plus = np.minimum(srcs_dec + delta_angle, np.pi/2)
# Calculate the cosine factor for the largest declination distance from
# the source. We use np.amin here because smaller cosine values are
# larger angles.
# cosfact is a (N_sources,)-shaped ndarray.
cosfact = np.amin(np.cos([src_dec_minus, src_dec_plus]), axis=0)
# Calculate delta RA, which is a function of declination.
# dRA is a (N_sources,)-shaped ndarray.
dRA_half = np.amin(
[np.repeat(2*np.pi, n_sources),
np.fabs(delta_angle / cosfact)], axis=0)
# Determine the mask for the events which fall inside the
# right-ascention window.
# mask_ra is a (N_sources,N_events)-shaped ndarray.
with TaskTimer(tl, 'ESM: Calculate mask_ra.'):
evts_ra = events['ra']
# Fill in batch sizes of 128 maximum to save memory.
batch_size = 128
if n_sources > batch_size:
mask_ra = np.zeros((n_sources, len(evts_ra)), dtype=bool)
n_batches = int(np.ceil(n_sources / float(batch_size)))
for bi in range(n_batches):
if bi == n_batches-1:
# We got the last batch of sources.
srcs_slice = slice(bi*batch_size, None)
else:
srcs_slice = slice(bi*batch_size, (bi+1)*batch_size)
ra_diff = np.fabs(
evts_ra - srcs_ra[srcs_slice][:, np.newaxis])
ra_mod = np.where(
ra_diff >= np.pi, 2*np.pi - ra_diff, ra_diff)
mask_ra[srcs_slice, :] = (
ra_mod < dRA_half[srcs_slice][:, np.newaxis]
)
else:
ra_diff = np.fabs(evts_ra - srcs_ra[:, np.newaxis])
ra_mod = np.where(ra_diff >= np.pi, 2*np.pi-ra_diff, ra_diff)
mask_ra = ra_mod < dRA_half[:, np.newaxis]
# Determine the mask for the events which fall inside the declination
# window.
# mask_dec is a (N_sources,N_events)-shaped ndarray.
with TaskTimer(tl, 'ESM: Calculate mask_dec.'):
mask_dec = (
(events['dec'] > src_dec_minus[:, np.newaxis]) &
(events['dec'] < src_dec_plus[:, np.newaxis])
)
# Determine the mask for the events which fall inside the
# right-ascension and declination window.
# mask_sky is a (N_sources,N_events)-shaped ndarray.
with TaskTimer(tl, 'ESM: Calculate mask_sky.'):
mask_sky = mask_ra & mask_dec
del mask_ra
del mask_dec
# Determine the mask for the events that fall inside at least one
# source sky window.
# mask is a (N_events,)-shaped ndarray.
with TaskTimer(tl, 'ESM: Calculate mask.'):
mask = np.any(mask_sky, axis=0)
# Reduce the events according to the mask.
with TaskTimer(tl, 'ESM: Create selected_events.'):
# Using an integer indices array for data selection is several
# factors faster than using a boolean array.
selected_events_idxs = events.indices[mask]
selected_events = events[selected_events_idxs]
# Get selected events indices.
idxs = np.argwhere(mask_sky[:, mask])
src_idxs = idxs[:, 0]
evt_idxs = idxs[:, 1]
if ret_original_evt_idxs:
return (selected_events, (src_idxs, evt_idxs), selected_events_idxs)
return (selected_events, (src_idxs, evt_idxs))
class PsiFuncEventSelectionMethod(
EventSelectionMethod):
"""This event selection method selects events whose psi value, i.e. the
great circle distance of the event to the source, is smaller than the value
of the provided function.
"""
def __init__(
self,
shg_mgr,
psi_name,
func,
axis_name_list):
"""Creates a new PsiFuncEventSelectionMethod instance.
Parameters
----------
shg_mgr : instance of SourceHypoGroupManager
The instance of SourceHypoGroupManager that defines the list of
sources, i.e. the list of SourceModel instances.
psi_name : str
The name of the data field that provides the psi value of the event.
func : callable
The function that should get evaluated for each event. The call
signature must be
``func(*axis_data)``,
where ``*axis_data`` is the event data of each required axis. The
number of axes must match the provided axis names through the
``axis_name_list``.
axis_name_list : list of str
The list of data field names for each axis of the function ``func``.
All field names must be valid field names of the trial data's
DataFieldRecordArray instance.
"""
super().__init__(
shg_mgr=shg_mgr)
self.psi_name = psi_name
self.func = func
self.axis_name_list = axis_name_list
n_func_args = len(inspect.signature(self._func).parameters)
if n_func_args < len(self._axis_name_list):
raise TypeError(
'The func argument must be a callable instance with at least '
f'{len(self._axis_name_list)} arguments! Its current number '
f'of arguments is {n_func_args}.')
n_sources = self.shg_mgr.n_sources
if n_sources != 1:
raise ValueError(
'The `PsiFuncEventSelectionMethod.select_events` currently '
'supports only a single source. It was called with '
f'{n_sources} sources.')
@property
def psi_name(self):
"""The name of the data field that provides the psi value of the event.
"""
return self._psi_name
@psi_name.setter
def psi_name(self, name):
if not isinstance(name, str):
raise TypeError(
'The psi_name property must be an instance of type str! '
f'Its current type is {classname(name)}.')
self._psi_name = name
@property
def func(self):
"""The function that should get evaluated for each event. The call
signature must be ``func(*axis_data)``, where ``*axis_data`` is the
event data of each required axis. The number of axes must match the
provided axis names through the ``axis_name_list`` property.
"""
return self._func
@func.setter
def func(self, f):
if not callable(f):
raise TypeError(
'The func property must be a callable instance! '
f'Its current type is {classname(f)}.')
self._func = f
@property
def axis_name_list(self):
"""The list of data field names for each axis of the function defined
through the ``func`` property.
"""
return self._axis_name_list
@axis_name_list.setter
def axis_name_list(self, names):
if not issequenceof(names, str):
raise TypeError(
'The axis_name_list property must be a sequence of str '
'instances! '
f'Its current type is {classname(names)}.')
self._axis_name_list = list(names)
def select_events(
self,
events,
src_evt_idxs=None,
ret_original_evt_idxs=False,
tl=None):
"""Selects the events whose psi value is smaller than the value of the
predefined function.
Parameters
----------
events : instance of DataFieldRecordArray
The instance of DataFieldRecordArray that holds the event data.
The following data fields must exist:
<psi_name> : float
The great circle distance of the event with the source.
<axis_name(s)> : float
The name of the axis required for the function ``func`` to be
evaluated.
src_evt_idxs : 2-tuple of 1d ndarrays of ints | None
The 2-element tuple holding the two 1d ndarrays of int of length
N_values, specifying to which sources the given events belong to.
ret_original_evt_idxs : bool
Flag if the original indices of the selected events should get
returned as well.
tl : instance of TimeLord | None
The optional instance of TimeLord that should be used to collect
timing information about this method.
Returns
-------
selected_events : instance of DataFieldRecordArray
The instance of DataFieldRecordArray holding only the selected
events.
(src_idxs, evt_idxs) : 1d ndarrays of ints
The indices of the sources and the selected events.
original_evt_idxs : 1d ndarray of ints
The (N_selected_events,)-shaped numpy ndarray holding the original
indices of the selected events, if ``ret_original_evt_idxs`` is set
to ``True``.
"""
cls_name = classname(self)
with TaskTimer(tl, f'{cls_name}: Get psi values.'):
psi = events[self._psi_name]
with TaskTimer(tl, f'{cls_name}: Get axis data values.'):
func_args = [events[axis] for axis in self._axis_name_list]
with TaskTimer(tl, f'{cls_name}: Creating mask.'):
mask = psi < self._func(*func_args)
with TaskTimer(tl, f'{cls_name}: Create selected_events.'):
# Using an integer indices array for data selection is several
# factors faster than using a boolean array.
selected_events_idxs = events.indices[mask]
selected_events = events[selected_events_idxs]
# Get selected events indices.
idxs = np.argwhere(np.atleast_2d(mask))
src_idxs = idxs[:, 0]
evt_idxs = idxs[:, 1]
if ret_original_evt_idxs:
return (selected_events, (src_idxs, evt_idxs), selected_events_idxs)
return (selected_events, (src_idxs, evt_idxs))
class AngErrOfPsiEventSelectionMethod(
SpatialEventSelectionMethod):
"""This event selection method selects events within a spatial box in
right-ascention and declination around a list of point-like source
positions and performs an additional selection of events whose ang_err value
is larger than the value of the provided function at a given psi value.
"""
def __init__(
self,
shg_mgr,
func,
psi_floor=None,
**kwargs):
"""Creates and configures a spatial box and psi func event selection
method object.
Parameters
----------
shg_mgr : instance of SourceHypoGroupManager
The instance of SourceHypoGroupManager that defines the list of
sources, i.e. the list of SourceModel instances.
delta_angle : float
The half-opening angle around the source for which events should
get selected.
psi_name : str | None
The name of the data field that provides the psi value of the event.
If set to ``None``, the psi value will be calculated automatically.
func : callable
The function that should get evaluated for each event. The call
signature must be
``func(psi)``,
where ``psi`` is the opening angle between the source and the event.
psi_floor : float | None
The psi func event selection is excluded for events having psi value
below the ``psi_floor``. If None, set it to default 5 degrees.
"""
super().__init__(
shg_mgr=shg_mgr,
**kwargs)
self.func = func
if psi_floor is None:
psi_floor = np.deg2rad(5)
self.psi_floor = psi_floor
@property
def func(self):
"""The function that should get evaluated for each event. The call
signature must be ``func(*axis_data)``, where ``*axis_data`` is the
event data of each required axis. The number of axes must match the
provided axis names through the ``axis_name_list`` property.
"""
return self._func
@func.setter
def func(self, f):
if not callable(f):
raise TypeError(
'The func property must be a callable instance! '
f'Its current type is {classname(f)}.')
self._func = f
@property
def psi_floor(self):
"""The psi func event selection is excluded for events having psi value
below the `psi_floor`.
"""
return self._psi_floor
@psi_floor.setter
def psi_floor(self, psi):
psi = float_cast(
psi,
'The psi_floor property must be castable to type float!')
self._psi_floor = psi
def select_events(
self,
events,
src_evt_idxs=None,
ret_original_evt_idxs=False,
tl=None):
"""Selects the events within the spatial box in right-ascention and
declination and performs an additional selection of events whose ang_err
value is larger than the value of the provided function at a given psi
value.
The solid angle dOmega = dRA * dSinDec = dRA * dDec * cos(dec) is a
function of declination, i.e. for a constant dOmega, the right-ascension
value has to change with declination.
Parameters
----------
events : instance of DataFieldRecordArray
The instance of DataFieldRecordArray that holds the event data.
The following data fields must exist:
``'ra'`` : float
The right-ascention of the event.
``'dec'`` : float
The declination of the event.
src_evt_idxs : 2-tuple of 1d ndarrays of ints | None
The 2-element tuple holding the two 1d ndarrays of int of length
N_values, specifying to which sources the given events belong to.
If set to ``None`` all given events will be considered to for all
sources.
ret_original_evt_idxs : bool
Flag if the original indices of the selected events should get
returned as well.
tl : instance of TimeLord | None
The optional instance of TimeLord that should be used to collect
timing information about this method.
Returns
-------
selected_events : instance of DataFieldRecordArray
The instance of DataFieldRecordArray holding only the selected
events.
(src_idxs, evt_idxs) : 1d ndarrays of ints
The indices of the sources and the selected events.
original_evt_idxs : 1d ndarray of ints
The (N_selected_events,)-shaped numpy ndarray holding the original
indices of the selected events, if ``ret_original_evt_idxs`` is set
to ``True``.
"""
if src_evt_idxs is None:
n_sources = len(self._src_arr)
n_events = len(events)
src_idxs = np.repeat(np.arange(n_sources), n_events)
evt_idxs = np.tile(np.arange(n_events), n_sources)
else:
(src_idxs, evt_idxs) = src_evt_idxs
# Perform selection based on psi values.
with TaskTimer(tl, 'ESM: Calculate psi values.'):
psi = angular_separation(
ra1=np.take(self._src_arr['ra'], src_idxs),
dec1=np.take(self._src_arr['dec'], src_idxs),
ra2=np.take(events['ra'], evt_idxs),
dec2=np.take(events['dec'], evt_idxs),
)
with TaskTimer(tl, 'ESM: Create mask_psi.'):
mask_psi = (
(events['ang_err'][evt_idxs] >= self._func(psi)) |
(psi < self.psi_floor)
)
with TaskTimer(tl, 'ESM: Create selected_events.'):
# Have to define the shape argument in order to not truncate
# the mask in case last events are not selected.
mask_sky = scipy.sparse.csr_matrix(
(mask_psi, (src_idxs, evt_idxs)),
shape=(len(self._src_arr), len(events))
).toarray()
mask = np.any(mask_sky, axis=0)
# Using an integer indices array for data selection is several
# factors faster than using a boolean array.
selected_events_idxs = events.indices[mask]
selected_events = events[selected_events_idxs]
# Get final selected events indices.
idxs = np.argwhere(mask_sky[:, mask])
src_idxs = idxs[:, 0]
evt_idxs = idxs[:, 1]
if ret_original_evt_idxs:
return (selected_events, (src_idxs, evt_idxs), selected_events_idxs)
return (selected_events, (src_idxs, evt_idxs))
|
icecubeREPO_NAMEskyllhPATH_START.@skyllh_extracted@skyllh-master@skyllh@core@event_selection.py@.PATH_END.py
|
{
"filename": "_text.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/bar/_text.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TextValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="text", parent_name="bar", **kwargs):
super(TextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@bar@_text.py@.PATH_END.py
|
{
"filename": "_side.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/layout/xaxis/_side.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SideValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="side", parent_name="layout.xaxis", **kwargs):
super(SideValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["top", "bottom", "left", "right"]),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@layout@xaxis@_side.py@.PATH_END.py
|
{
"filename": "_textfont.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/histogram/unselected/_textfont.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Textfont(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "histogram.unselected"
_path_str = "histogram.unselected.textfont"
_valid_props = {"color"}
# color
# -----
@property
def color(self):
"""
Sets the text font color of unselected points, applied only
when a selection exists.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the text font color of unselected points, applied
only when a selection exists.
"""
def __init__(self, arg=None, color=None, **kwargs):
"""
Construct a new Textfont object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.histogram.unse
lected.Textfont`
color
Sets the text font color of unselected points, applied
only when a selection exists.
Returns
-------
Textfont
"""
super(Textfont, self).__init__("textfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.histogram.unselected.Textfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.histogram.unselected.Textfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@histogram@unselected@_textfont.py@.PATH_END.py
|
{
"filename": "CITATION.md",
"repo_name": "Huang-CL/Magrathea",
"repo_path": "Magrathea_extracted/Magrathea-master/CITATION.md",
"type": "Markdown"
}
|
# Citation
If you use MAGRATHEA, please cite the following article that has been published in MNRAS.
```
Huang, C., Rice, D.R., Steffen, J.H. 2022. MAGRATHEA: an open-source spherical symmetric planet interior structure code. MNRAS 513, 5256–5269. doi:10.1093/mnras/stac1133.
```
BibTeX:
```
@ARTICLE{Huang2022,
author = {{Huang}, Chenliang and {Rice}, David R. and {Steffen}, Jason H.},
title = "{MAGRATHEA: an open-source spherical symmetric planet interior structure code}",
journal = {\mnras},
keywords = {equation of state, software: public release, planets and satellites: composition, planets and satellites: general, planets and satellites: interiors, Astrophysics - Earth and Planetary Astrophysics, Astrophysics - Instrumentation and Methods for Astrophysics},
year = 2022,
month = jul,
volume = {513},
number = {4},
pages = {5256-5269},
doi = {10.1093/mnras/stac1133},
archivePrefix = {arXiv},
eprint = {2201.03094},
primaryClass = {astro-ph.EP},
adsurl = {https://ui.adsabs.harvard.edu/abs/2022MNRAS.513.5256H},
adsnote = {Provided by the SAO/NASA Astrophysics Data System}
}
```
|
Huang-CLREPO_NAMEMagratheaPATH_START.@Magrathea_extracted@Magrathea-master@CITATION.md@.PATH_END.py
|
{
"filename": "obsset_ql.py",
"repo_name": "igrins/plp",
"repo_path": "plp_extracted/plp-master/igrins/quicklook/obsset_ql.py",
"type": "Python"
}
|
import os
import numpy as np
import pandas as pd
import hashlib
import json
import inspect
from collections import OrderedDict
from ..storage_interface.db_file import load_key, save_key
from ..pipeline.driver import get_obsset as _get_obsset
from ..pipeline.argh_helper import argh, arg, wrap_multi
from ..igrins_libs.logger import info, set_level
from .ql_slit_profile import plot_stacked_profile, plot_per_order_stat
from .ql_flat import plot_flat
from ..igrins_recipes.argh_helper import get_default_values
from ..procedures.readout_pattern_guard import get_guard_column_pattern
def _hash(recipe, band, groupid, basename_postfix, params):
d = dict(recipe=recipe, band=band, groupid=groupid,
basename_postfix=basename_postfix,
params=params)
h = hashlib.new("sha1")
h.update(json.dumps(d, sort_keys=True).encode("utf8"))
return h.hexdigest(), d
class IndexDB(object):
def __init__(self, storage):
self.storage = storage
# self.storage = self.storage.new_sectioned_storage("OUTDATA")
def check_hexdigest(self, recipe, band, groupid, basename_postfix, params):
dbname = "index"
sectionname = recipe
k = "{}/{:04d}".format(band, groupid)
v = load_key(self.storage,
dbname, sectionname, k)
if v is None:
return False
hexdigest_old, value_old = v
hexdigest_new, value_new = _hash(recipe, band, groupid,
basename_postfix, params)
return hexdigest_old == hexdigest_new
def save_hexdigest(self, recipe, band, groupid, basename_postfix, params):
dbname = "index"
sectionname = recipe
hexdigest, d = _hash(recipe, band, groupid, basename_postfix, params)
k = "{}/{:04d}".format(band, groupid)
# k = (groupid, basename_postfix)
save_key(self.storage, dbname, sectionname, k, [hexdigest, d])
def save_dtlog(self, band, obsid, param):
dbname = "index"
sectionname = "dtlog"
k = "{}/{:04d}".format(band, obsid)
save_key(self.storage, dbname, sectionname, k, param)
def get_obsset(obsdate, recipe_name, band,
obsids, frametypes,
groupname=None, recipe_entry=None,
config_file=None, saved_context_name=None,
basename_postfix=""):
obsset = _get_obsset(obsdate, recipe_name, band,
obsids, frametypes,
groupname=groupname, recipe_entry=recipe_entry,
config_file=config_file,
saved_context_name=saved_context_name,
basename_postfix=basename_postfix)
return obsset
driver_args = [arg("-o", "--obsids", default=None),
arg("-t", "--objtypes", default=None),
arg("-f", "--frametypes", default=None),
arg("-b", "--bands", default="HK"),
arg("-c", "--config-file", default=None),
arg("--log-level", default="INFO",
choices=["CRITICAL", "ERROR", "WARNING",
"INFO", "DEBUG", "NOTSET"]),
arg("-ns", "--no-skip", default=False),
# arg("--resume-from-context-file", default=None),
# arg("--save-context-on-exception", default=False),
arg("-d", "--debug", default=False)]
# def _get_default_values(driver_args):
# default_map = OrderedDict()
# for a in driver_args:
# if "default" not in a:
# continue
# for k in a["option_strings"]:
# if k.startswith("--"):
# default_map[k[2:].replace("-", "_")] = a["default"]
# return default_map
driver_args_map = get_default_values(driver_args)
def _get_obsid_obstype_frametype_list(config, obsdate,
obsids, objtypes, frametypes):
from ..igrins_libs import dt_logs
if None not in [obsids, objtypes, frametypes]:
return zip(obsids, objtypes, frametypes)
fn0 = os.path.join(config.root_dir,
config.get_value('INDATA_PATH', obsdate))
# fn0 = config.get_value('INDATA_PATH', obsdate)
df = dt_logs.load_from_dir(obsdate, fn0)
keys = ["OBSID", "OBJNAME", "FRAMETYPE", "OBJTYPE", "EXPTIME", "ROTPA"]
m = df[keys].set_index("OBSID").to_dict(orient="index")
if obsids is None:
if (objtypes is not None) or (frametypes is not None):
raise ValueError("objtypes and frametypes should not be None when obsids is None")
obsids = sorted(m.keys())
# obsids.sort()
if objtypes is None:
objtypes = [m[o]["OBJTYPE"] for o in obsids]
if frametypes is None:
frametypes = [m[o]["FRAMETYPE"] for o in obsids]
dt_rows = [m[o] for o in obsids]
return zip(obsids, objtypes, frametypes, dt_rows)
def do_ql_flat(obsset):
from ..quicklook import ql_flat
hdus = obsset.get_hdus()
jo_raw_list = []
jo_list = []
for hdu, oi, ft in zip(hdus, obsset.obsids, obsset.frametypes):
jo = ql_flat.do_ql_flat(hdus[0], ft)
jo_list.append((oi, jo))
jo_raw_list.append((oi, dict()))
return jo_list, jo_raw_list
def do_ql_std(obsset, band):
from ..quicklook import ql_slit_profile
hdus = obsset.get_hdus()
jo_list = []
jo_raw_list = []
for hdu, oi, ft in zip(hdus, obsset.obsids, obsset.frametypes):
jo, jo_raw = ql_slit_profile.do_ql_slit_profile(hdus[0], band, ft)
jo_list.append((oi, jo))
jo_raw_list.append((oi, jo_raw))
return jo_list, jo_raw_list
do_ql_tar = do_ql_std
def save_jo_list(obsset, jo_list, jo_raw_list):
item_desc = ("QL_PATH", "{basename}{postfix}.quicklook.json")
for oi, jo in jo_list:
obsset.rs.store(str(oi), item_desc, jo)
item_desc = ("QL_PATH", "{basename}{postfix}.quicklook_raw.json")
for oi, jo in jo_raw_list:
obsset.rs.store(str(oi), item_desc, jo)
def save_fig_list(obsset, oi, fig_list):
from .qa_helper import figlist_to_pngs
pngs = figlist_to_pngs(fig_list)
for i, png in enumerate(pngs):
item_desc = ("QL_PATH",
"{basename}{postfix}i"
+ ".fig{:02d}.png".format(i))
obsset.rs.store(str(oi), item_desc, png)
def oi_ot_ft_generator(recipe_name,
obsdate, obsids=None, objtypes=None, frametypes=None,
bands="HK", **kwargs):
from ..igrins_libs.igrins_config import IGRINSConfig
config_file = kwargs.pop("config_file", None)
if config_file is not None:
config = IGRINSConfig(config_file)
else:
config = IGRINSConfig("recipe.config")
fn0 = os.path.join(config.root_dir,
config.get_value('INDATA_PATH', obsdate))
if not os.path.exists(fn0):
raise RuntimeError("directory {} does not exist.".format(fn0))
if isinstance(obsids, str):
obsids = [int(_) for _ in obsids.split(",")]
oi_ot_ft_list = _get_obsid_obstype_frametype_list(config, obsdate,
obsids, objtypes,
frametypes)
oi_ot_ft_list = list(oi_ot_ft_list)
no_skip = kwargs.pop("no_skip", False)
for b in bands:
for oi, ot, ft, dt_row in oi_ot_ft_list:
obsset = get_obsset(obsdate, "quicklook", b,
obsids=[oi], frametypes=[ft],
config_file=config)
storage = obsset.rs.storage.new_sectioned_storage("OUTDATA_PATH")
index_db = IndexDB(storage)
index_db.save_dtlog(b, oi, dt_row)
if (not no_skip and
index_db.check_hexdigest(recipe_name, b, oi, "",
dict(obstype=ot, frametype=ft))):
info("{band}/{obsid:04d} - skipping. already processed"
.format(band=b, obsid=oi, objtype=ot))
continue
stat = (yield b, oi, ot, ft, dt_row, obsset)
# print("send:", stat)
if stat:
index_db.save_hexdigest(recipe_name, b, oi, "",
dict(obstype=ot, frametype=ft))
def quicklook_decorator(recipe_name):
def _decorated(fun):
def _f(obsdate, obsids=None, objtypes=None, frametypes=None,
bands="HK", **kwargs):
# this is very hacky way of populating the default values.
# Better update the function documentation also.
positional_args = inspect.getfullargspec(_f).args
for k, v in driver_args_map.items():
if (k not in positional_args):
kwargs.setdefault(k, v)
log_level = kwargs.get("log_level")
set_level(log_level)
cgen = oi_ot_ft_generator(recipe_name, obsdate,
obsids, objtypes,
frametypes, bands, **kwargs)
stat = None
while True:
try:
_ = cgen.send(stat)
except StopIteration:
break
(b, oi, ot, ft, dt_row, obsset) = _
info("entering {}".format(_))
fun(b, oi, ot, ft, dt_row, obsset, kwargs)
stat = True
return _f
return _decorated
@quicklook_decorator("quicklook")
def quicklook_func(b, oi, ot, ft, dt_row, obsset, kwargs):
if ot == "FLAT":
jo_list, jo_raw_list = do_ql_flat(obsset)
# print(len(jo_list), jo_list[0][1]["stat_profile"])
# df = pd.DataFrame(jo_list[0][1]["stat_profile"])
save_jo_list(obsset, jo_list, jo_raw_list)
jo = jo_list[0][1]
fig1 = plot_flat(jo)
save_fig_list(obsset, oi, [fig1])
elif ot in ["STD"]:
info("{band}/{obsid:04d} - unsupported OBJTYPE:{objtype}"
.format(band=b, obsid=oi, objtype=ot))
jo_list, jo_raw_list = do_ql_std(obsset, b)
# df = pd.DataFrame(jo_list[0][1]["stat_profile"])
# print(df[["y", "t_down_10", "t_up_90"]])
save_jo_list(obsset, jo_list, jo_raw_list)
jo = jo_list[0][1]
jo_raw = jo_raw_list[0][1]
fig1 = plot_stacked_profile(jo)
fig2 = plot_per_order_stat(jo_raw, jo)
save_fig_list(obsset, oi, [fig1, fig2])
elif ot in ["TAR"]:
info("{band}/{obsid:04d} - unsupported OBJTYPE:{objtype}"
.format(band=b, obsid=oi, objtype=ot))
jo_list, jo_raw_list = do_ql_std(obsset, b)
# df = pd.DataFrame(jo_list[0][1]["stat_profile"])
# print(df[["y", "t_down_10", "t_up_90"]])
save_jo_list(obsset, jo_list, jo_raw_list)
jo = jo_list[0][1]
jo_raw = jo_raw_list[0][1]
fig1 = plot_stacked_profile(jo)
fig2 = plot_per_order_stat(jo_raw, jo)
save_fig_list(obsset, oi, [fig1, fig2])
else:
info("{band}/{obsid:04d} - unsupported OBJTYPE:{objtype}"
.format(band=b, obsid=oi, objtype=ot))
# def get_column_percentile(guards, percentiles=None):
# if percentiles is None:
# percentiles = [10, 90]
# # guards = d[:, [0, 1, 2, 3, -4, -3, -2, -1]]
# r = OrderedDict(zip(percentiles, np.percentile(guards, percentiles)))
# r["std"] = np.std(guards[(r[10] < guards) & (guards < r[90])])
# return r
# def get_guard_column_pattern(d, pattern_noise_recipes=None):
# from igrins.procedures.readout_pattern import pipes
# if pattern_noise_recipes is None:
# pipenames_dark1 = ['amp_wise_bias_r2', 'p64_0th_order']
# else:
# pipenames_dark1 = pattern_noise_recipes
# guards = d[:, [0, 1, 2, 3, -4, -3, -2, -1]]
# pp = OrderedDict()
# for k in pipenames_dark1:
# p = pipes[k]
# _ = p.get(guards)
# guards = guards - p.broadcast(guards, _)
# guards = guards - np.median(guards)
# s = get_column_percentile(guards)
# pp[k] = dict(pattern=_, stat=s)
# return guards, pp
@quicklook_decorator("noise_guard")
def noise_guard_func(b, oi, ot, ft, dt_row, obsset, kwargs):
if True:
pattern_noise_recipes = kwargs.get("pattern_noise_recipes", None)
if pattern_noise_recipes is not None:
pattern_noise_recipes = [s.strip() for s
in pattern_noise_recipes.split(",")]
hdus = obsset.get_hdus()
assert len(hdus) == 1
d = hdus[0].data
guard, pp = get_guard_column_pattern(d, pattern_noise_recipes)
# percent = get_column_percentile(guard)
item_desc = ("QL_PATH", "{basename}{postfix}.noise_guard.json")
obsset.rs.store(str(oi), item_desc,
dict(pattern_noise_recipes=pattern_noise_recipes,
pattern_noise=pp))
else:
info("{band}/{obsid:04d} - unsupported OBJTYPE:{objtype}"
.format(band=b, obsid=oi, objtype=ot))
def quicklook_func_deprecated(obsdate, obsids=None, objtypes=None, frametypes=None,
bands="HK", **kwargs):
import os
from ..igrins_libs.igrins_config import IGRINSConfig
config_file = kwargs.pop("config_file", None)
if config_file is not None:
config = IGRINSConfig(config_file)
else:
config = IGRINSConfig("recipe.config")
# fn0 = config.get_value('INDATA_PATH', obsdate)
# if not os.path.exists(fn0):
# raise RuntimeError("directory {} does not exist.".format(fn0))
if isinstance(obsids, str):
obsids = [int(_) for _ in obsids.split(",")]
oi_ot_ft_list = _get_obsid_obstype_frametype_list(config, obsdate,
obsids, objtypes,
frametypes)
no_skip = kwargs.pop("no_skip", False)
for b in bands:
for oi, ot, ft, dt_row in oi_ot_ft_list:
obsset = get_obsset(obsdate, "quicklook", b,
obsids=[oi], frametypes=[ft],
config_file=config_file)
storage = obsset.rs.storage.new_sectioned_storage("OUTDATA_PATH")
index_db = IndexDB(storage)
index_db.save_dtlog(b, oi, dt_row)
if (not no_skip and
index_db.check_hexdigest("quicklook", b, oi, "",
dict(obstype=ot, frametype=ft))):
info("{band}/{obsid:04d} - skipping. already processed"
.format(band=b, obsid=oi, objtype=ot))
continue
if ot == "FLAT":
jo_list, jo_raw_list = do_ql_flat(obsset)
# print(len(jo_list), jo_list[0][1]["stat_profile"])
df = pd.DataFrame(jo_list[0][1]["stat_profile"])
save_jo_list(obsset, jo_list, jo_raw_list)
jo = jo_list[0][1]
fig1 = plot_flat(jo)
save_fig_list(obsset, oi, [fig1])
elif ot in ["STD"]:
info("{band}/{obsid:04d} - unsupported OBJTYPE:{objtype}"
.format(band=b, obsid=oi, objtype=ot))
jo_list, jo_raw_list = do_ql_std(obsset, b)
# df = pd.DataFrame(jo_list[0][1]["stat_profile"])
# print(df[["y", "t_down_10", "t_up_90"]])
save_jo_list(obsset, jo_list, jo_raw_list)
jo = jo_list[0][1]
jo_raw = jo_raw_list[0][1]
fig1 = plot_stacked_profile(jo)
fig2 = plot_per_order_stat(jo_raw, jo)
save_fig_list(obsset, oi, [fig1, fig2])
elif ot in ["TAR"]:
info("{band}/{obsid:04d} - unsupported OBJTYPE:{objtype}"
.format(band=b, obsid=oi, objtype=ot))
jo_list, jo_raw_list = do_ql_std(obsset, b)
# df = pd.DataFrame(jo_list[0][1]["stat_profile"])
save_jo_list(obsset, jo_list, jo_raw_list)
jo = jo_list[0][1]
jo_raw = jo_raw_list[0][1]
fig1 = plot_stacked_profile(jo)
fig2 = plot_per_order_stat(jo_raw, jo)
save_fig_list(obsset, oi, [fig1, fig2])
else:
info("{band}/{obsid:04d} - unsupported OBJTYPE:{objtype}"
.format(band=b, obsid=oi, objtype=ot))
continue
index_db.save_hexdigest("quicklook", b, oi, "",
dict(obstype=ot, frametype=ft))
def create_argh_command_quicklook():
func = wrap_multi(quicklook_func, driver_args)
func = argh.decorators.named("quicklook")(func)
return func
def create_argh_command_noise_guard():
# func = wrap_multi(noise_guard_func, driver_args)
extra_args = [arg("--pattern-noise-recipes",
default="amp_wise_bias_r2,p64_0th_order")]
func = wrap_multi(noise_guard_func, extra_args)
func = argh.decorators.named("noise-guard")(func)
return func
|
igrinsREPO_NAMEplpPATH_START.@plp_extracted@plp-master@igrins@quicklook@obsset_ql.py@.PATH_END.py
|
{
"filename": "_autocolorscale.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/contourcarpet/_autocolorscale.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class AutocolorscaleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="autocolorscale", parent_name="contourcarpet", **kwargs
):
super(AutocolorscaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {}),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@contourcarpet@_autocolorscale.py@.PATH_END.py
|
{
"filename": "test_fringing.py",
"repo_name": "LSSTDESC/Imsim",
"repo_path": "Imsim_extracted/Imsim-main/tests/test_fringing.py",
"type": "Python"
}
|
import numpy as np
import hashlib
import pytest
from imsim import make_batoid_wcs, CCD_Fringing, get_camera
import galsim
def test_fringing():
"""
Test the fringing model.
"""
# Set a random center ra/dec
cra = 54.9348753510528
cdec = -35.8385705255579
world_center = galsim.CelestialCoord(cra*galsim.degrees, cdec*galsim.degrees)
mjd = 60232.3635999295
rottelpos = 350.946271812373
band = 'y'
camera = get_camera()
det_name = 'R22_S11'
serial_num = camera[det_name].getSerial()
seed = int(hashlib.sha256(serial_num.encode('UTF-8')).hexdigest(), 16) & 0xFFFFFFFF
xarr, yarr = np.meshgrid(range(4096), range(4004))
# Testing a CCD with an arbitrary location on the focal plane.
ra = 54.86
dec = -35.76
wcs = make_batoid_wcs(ra, dec, rottelpos, mjd, band, 'LsstCam')
config = {
'image': {
'type': 'LSST_Image',
'xsize': 4096,
'ysize': 4004,
'wcs': wcs,
'nobjects': 0,
'det_name': 'R22_S11',
},
}
image = galsim.config.BuildImage(config)
ccd_fringing = CCD_Fringing(true_center=image.wcs.toWorld(image.true_center),
boresight=world_center,
seed=seed, spatial_vary=True)
# Test zero value error
with pytest.raises(ValueError):
ccd_fringing.calculate_fringe_amplitude(xarr, yarr, amplitude=0)
# Test when spatial vary is True.
fringe_map = ccd_fringing.calculate_fringe_amplitude(xarr,yarr)
# Check std of the diagnoal of fringe map.
np.testing.assert_approx_equal(np.std(np.diag(fringe_map)), 0.0014, significant=2)
# Check the min/max of fringing varaition for the current offset.
np.testing.assert_approx_equal(fringe_map.max(), 1.00205, significant=4)
np.testing.assert_approx_equal(fringe_map.min(), 0.99794, significant=4)
# Actually make a fringing map with this:
sky_level = 1000
sky_image = galsim.Image(bounds=image.bounds, wcs=image.wcs, init_value=sky_level)
sky_image *= fringe_map
# Check that this is the same image that the config processing makes
config = {
'image': {
'type': 'LSST_Image',
'xsize': 4096,
'ysize': 4004,
'wcs': wcs,
'nobjects': 0,
'sky_level_pixel': sky_level,
'apply_fringing': True,
'boresight': world_center,
'det_name': 'R22_S11',
},
'det_name': 'R22_S11'
}
config_sky_image = galsim.config.BuildImage(config)
np.testing.assert_allclose(config_sky_image.array, sky_image.array)
# If boresight is missing, it raises an exception
config = galsim.config.CleanConfig(config)
del config['image']['boresight']
with np.testing.assert_raises(galsim.GalSimConfigError):
galsim.config.BuildImage(config)
# Test when spatial vary is False. The fringe amplitude should be the same for
# sensors at different locations
ccd_fringing_1 = CCD_Fringing(true_center=image.wcs.toWorld(image.true_center),
boresight=world_center,
seed=seed, spatial_vary=False)
fringe_map1 = ccd_fringing_1.calculate_fringe_amplitude(xarr,yarr)
# Try another random location on the focal plane.
ra = 58.86
dec = -38.76
wcs = make_batoid_wcs(ra, dec, rottelpos, mjd, band, 'LsstCam')
ccd_fringing_2 = CCD_Fringing(true_center=image.wcs.toWorld(image.true_center),
boresight=world_center,
seed=seed, spatial_vary=False)
fringe_map2 = ccd_fringing_2.calculate_fringe_amplitude(xarr,yarr)
# Check if the two fringing maps are indentical.
if np.array_equal(fringe_map1,fringe_map2) != True:
raise ValueError("Fringe amplitude should be the same for sensors when spatial vary is False.")
def test_fringing_variation_level():
# Regression test for pkl => fits conversion.
for ra, dec, level in [
(0, 0.1, 1.056503042318907),
(0, 0.2, 1.1207294877266138),
(0.2, -0.1, 1.0044602251026102),
(1.1, 0.2, 1.0166040509448886),
(-1.2, 0.5, 1.0389039410245318),
(1.2, -0.4, 1.0204232685215646),
]:
true_center = galsim.CelestialCoord(
ra*galsim.degrees, dec*galsim.degrees
)
fringing = CCD_Fringing(
true_center=true_center,
boresight=galsim.CelestialCoord(0*galsim.degrees, 0*galsim.degrees),
seed=0,
spatial_vary=True
)
np.testing.assert_allclose(
fringing.fringe_variation_level(),
level,
atol=1e-10, rtol=1e-10
)
if __name__ == '__main__':
test_fringing()
test_fringing_variation_level()
|
LSSTDESCREPO_NAMEImsimPATH_START.@Imsim_extracted@Imsim-main@tests@test_fringing.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "statsmodels/statsmodels",
"repo_path": "statsmodels_extracted/statsmodels-main/statsmodels/sandbox/datarich/__init__.py",
"type": "Python"
}
|
'''
Econometrics for a Datarich Environment
=======================================
Introduction
------------
In many cases we are performing statistical analysis when many observed variables are
available, when we are in a data rich environment. Machine learning has a wide variety
of tools for dimension reduction and penalization when there are many varibles compared
to the number of observation. Chemometrics has a long tradition of using Partial Least
Squares, NIPALS and similar in these cases. In econometrics the same problem shows up
when there are either many possible regressors, many (weak) instruments or when there are
a large number of moment conditions in GMM.
This section is intended to collect some models and tools in this area that are relevant
for the statical analysis and econometrics.
Covariance Matrices
===================
Several methods are available to reduce the small sample noise in estimated covariance
matrices with many variable.
Some applications:
weighting matrix with many moments,
covariance matrix for portfolio choice
Dimension Reduction
===================
Principal Component and Partial Least Squares try to extract the important low dimensional
factors from the data with many variables.
Regression with many regressors
===============================
Factor models, selection of regressors and shrinkage and penalization are used to improve
the statistical properties, when the presence of too many regressors leads to over-fitting
and too noisy small sample estimators and statistics.
Regression with many moments or many instruments
================================================
The same tools apply and can be used in these two cases.
e.g. Tychonov regularization of weighting matrix in GMM, similar to Ridge regression, the
weighting matrix can be shrunk towards the identity matrix.
Simplest case will be part of GMM. I do not know how much will be standalone
functions.
Intended Content
================
PLS
---
what should be available in class?
Factormodel and supporting helper functions
-------------------------------------------
PCA based
~~~~~~~~~
First version based PCA on Stock/Watson and Bai/Ng, and recent papers on the
selection of the number of factors. Not sure about Forni et al. in approach.
Basic support of this needs additional results for PCA, error covariance matrix
of data on reduced factors, required for criteria in Bai/Ng.
Selection criteria based on eigenvalue cutoffs.
Paper on PCA and structural breaks. Could add additional results during
find_nfact to test for parameter stability. I have not read the paper yet.
Idea: for forecasting, use up to h-step ahead endogenous variables to directly
get the forecasts.
Asymptotic results and distribution: not too much idea yet.
Standard OLS results are conditional on factors, paper by Haerdle (abstract
seems to suggest that this is ok, Park 2009).
Simulation: add function to simulate DGP of Bai/Ng and recent extension.
Sensitivity of selection criteria to heteroscedasticity and autocorrelation.
Bai, J. & Ng, S., 2002. Determining the Number of Factors in
Approximate Factor Models. Econometrica, 70(1), pp.191-221.
Kapetanios, G., 2010. A Testing Procedure for Determining the Number
of Factors in Approximate Factor Models With Large Datasets. Journal
of Business and Economic Statistics, 28(3), pp.397-409.
Onatski, A., 2010. Determining the Number of Factors from Empirical
Distribution of Eigenvalues. Review of Economics and Statistics,
92(4), pp.1004-1016.
Alessi, L., Barigozzi, M. & Capasso, M., 2010. Improved penalization
for determining the number of factors in approximate factor models.
Statistics & Probability Letters, 80(23-24), pp.1806-1813.
Breitung, J. & Eickmeier, S., Testing for structural breaks in dynamic
factor models. Journal of Econometrics, In Press, Accepted Manuscript.
Available at:
http://www.sciencedirect.com/science/article/B6VC0-51G3W92-1/2/f45ce2332443374fd770e42e5a68ddb4
[Accessed November 15, 2010].
Croux, C., Renault, E. & Werker, B., 2004. Dynamic factor models.
Journal of Econometrics, 119(2), pp.223-230.
Forni, M. et al., 2009. Opening the Black Box: Structural Factor
Models with Large Cross Sections. Econometric Theory, 25(05),
pp.1319-1347.
Forni, M. et al., 2000. The Generalized Dynamic-Factor Model:
Identification and Estimation. Review of Economics and Statistics,
82(4), pp.540-554.
Forni, M. & Lippi, M., The general dynamic factor model: One-sided
representation results. Journal of Econometrics, In Press, Accepted
Manuscript. Available at:
http://www.sciencedirect.com/science/article/B6VC0-51FNPJN-1/2/4fcdd0cfb66e3050ff5d19bf2752ed19
[Accessed November 15, 2010].
Kapetanios, G., 2010. A Testing Procedure for Determining the Number
of Factors in Approximate Factor Models With Large Datasets. Journal
of Business and Economic Statistics, 28(3), pp.397-409.
Onatski, A., 2010. Determining the Number of Factors from Empirical
Distribution of Eigenvalues. Review of Economics and Statistics,
92(4), pp.1004-1016.
Park, B.U. et al., 2009. Time Series Modelling With Semiparametric
Factor Dynamics. Journal of the American Statistical Association,
104(485), pp.284-298.
other factor algorithm
~~~~~~~~~~~~~~~~~~~~~~
PLS should fit in reasonably well.
Bai/Ng have a recent paper, where they compare LASSO, PCA, and similar, individual
and in combination.
Check how much we can use scikits.learn for this.
miscellaneous
~~~~~~~~~~~~~
Time series modeling of factors for prediction, ARMA, VARMA.
SUR and correlation structure
What about sandwich estimation, robust covariance matrices?
Similarity to Factor-Garch and Go-Garch
Updating: incremental PCA, ...?
TODO next
=========
MVOLS : OLS with multivariate endogenous and identical exogenous variables.
rewrite and expand current varma_process.VAR
PCA : write a class after all, and/or adjust the current donated class
and keep adding required statistics, e.g.
residual variance, projection of X on k-factors, ... updating ?
FactorModelUnivariate : started, does basic principal component regression,
based on standard information criteria, not Bai/Ng adjusted
FactorModelMultivariate : follow pattern for univariate version and use
MVOLS
'''
|
statsmodelsREPO_NAMEstatsmodelsPATH_START.@statsmodels_extracted@statsmodels-main@statsmodels@sandbox@datarich@__init__.py@.PATH_END.py
|
{
"filename": "logoSingularity.py",
"repo_name": "mhardcastle/ddf-pipeline",
"repo_path": "ddf-pipeline_extracted/ddf-pipeline-master/misc/logoSingularity.py",
"type": "Python"
}
|
import os
try:
import DDFacet.Other.ModColor as MC
def Str(*args,**kwargs):
return MC.Str(*args,**kwargs)
except:
def Str(ss,**kwargs):
return ss
import shutil
from subprocess import check_output
from subprocess import Popen, PIPE
import subprocess
def getVersion(Name):
try:
r=getVersionSubprocess(Name)
if isinstance(r,list):
r=" ".join(r)
if "ModuleNotFoundError" in r:
return "ModuleNotFound"
return r.strip()
except Exception as e:
print('%s: failed with: %s'%(Name, str(e)))
def OS(s):
if isinstance(s,list):
s=(" ".join(s))
os.system("%s > /tmp/OS.log 2>&1"%s)
return open("/tmp/OS.log","r").readlines()
def getVersionSubprocess(Name):
if Name=="losoto":
command=["losoto", "--version"]
#out = check_output(command)
out=OS(command)
if "not found" in out[0]: return "Not installed"
return out[0].strip()
elif Name=="wsclean":
command=["wsclean","--version"]
out=OS(command)
#result = subprocess.run(command, capture_output=True, text=True)
#stderr,stdout=result.stderr,result.stdout
out="".join(out)
if "not found" in out[0]: return "Not installed"
v=(out.replace("\n"," ").split(" WSClean version ")[1].split("This software package")[0]).replace(" ","")
return v
elif Name=="DP3" or Name=="aoflagger":
command=[Name,"--version"]
#result = subprocess.run(command, capture_output=True, text=True)
#stderr,stdout=result.stderr,result.stdout
out=OS(command)
if "not found" in out[0]: return "Not installed"
out="".join(out)
v=(out.strip().lower().split(Name.lower())[1].replace(" ",""))
return v
elif Name=="DDF":
command=["%s.py"%Name,"--version"]
#result = subprocess.run(command, capture_output=True, text=True)
#stderr,stdout=result.stderr,result.stdout
r=OS(command)
if "not found" in r[0]: return "Not installed"
# print("rr",r)
# #v= stdout.split("\n")[0].split("DDFacet version is ")[1]
# print(r[-1].split("DDFacet version is "))
rr="Not installed"
for l in r:
if 'DDFacet version is' in l:
rr=l.split("DDFacet version is ")[1].strip()
break
return rr
elif Name=="kMS":
command=["%s.py"%Name,"--version"]
# result = subprocess.run(command, capture_output=True, text=True)
# stderr,stdout=result.stderr,result.stdout
# return stdout.split("\n")[-2]
r=OS(command)
if "not found" in r[0]: return "Not installed"
#print("rr",r)
#v= stdout.split("\n")[0].split("DDFacet version is ")[1]
return r[-1].strip()
elif Name=="DynSpecMS":
command=["ms2dynspec.py","--version"]
result = subprocess.run(command, capture_output=True, text=True)
stderr,stdout=result.stderr,result.stdout
return stdout.split("\n")[-2].split("version ")[-1]
elif Name=="LOFARBeam":
command=["python","-c",'"import lofar.stationresponse"']
# print(command)
# result = subprocess.run(command, capture_output=True, text=True)
# stderr,stdout=result.stderr,result.stdout
s=(" ".join(command))
r=OS(s)
if len(r)==0:
r="Installed, no version available"
return r
elif Name=="nenupy":
command=["python","-c",'"import nenupy,sys; print(str(nenupy.__version__),file=sys.stdout)"']
s=(" ".join(command))
r=OS(s)
return r
#o=os.popen(s).read().strip()
# return o
elif Name=="lsmtool" or Name=="LofarStMan":
command=["python","-c",'"import %s,sys"'%Name]
s=(" ".join(command))
#o=os.popen(s).read().strip()
r=OS(s)
if len(r)==0:
r="Installed, no version available"
return r
elif Name=="drawMS":
command=["drawMS.py","--version"]
r=OS(command)
if "not found" in r[0]: return "Not installed"
# print("rr",r)
# #v= stdout.split("\n")[0].split("DDFacet version is ")[1]
# print(r[-1].split("DDFacet version is "))
rr="Not installed"
for l in r:
if 'drawMS.py version' in l:
rr=l.split("drawMS.py version ")[1].strip()
break
return rr
elif Name=="lotss-query":
command=["python","-c",'"import surveys_db"']
s=(" ".join(command))
r=OS(s)
if len(r)==0:
r="Installed, no version available"
return r
elif Name=="ddf-pipeline":
command=["python","-c",'"from pipeline_version import version; print(version())"']
s=(" ".join(command))
r=OS(s)
if len(r)==0:
r="Installed, no version available"
return r
else:
print(Name)
DicoExe={"losoto":{"Type":"exe",
"Name":"self"},
"DP3":{"Type":"exe",
"Name":"self"},
"wsclean":{"Type":"exe",
"Name":"self"},
"aoflagger":{"Type":"exe",
"Name":"self"},
"DDF":{"Type":"exe",
"Name":"DDF.py"},
"kMS":{"Type":"exe",
"Name":"kMS.py"},
"DynSpecMS":{"Type":"exe",
"Name":"ms2dynspec.py"},
"LOFARBeam":{"Type":"Package",
"Name":"lofar.stationresponse"},
"nenupy":{"Type":"Package",
"Name":"nenupy"},
"lsmtool":{"Type":"exe",
"Name":"self"},
"LofarStMan":{"Type":"Package",
"Name":"LofarStMan"},
"drawMS":{"Type":"exe",
"Name":"self"},
"lotss-query":{"Type":"Package",
"Name":"surveys_db"},
"ddf-pipeline":{"Type":"Package",
"Name":"run_full_field_reprocessing_pipeline"}
}
SHIFT=30
def Print_v():
for Name in DicoExe.keys():
D=DicoExe[Name]
version=getVersion(Name)#.rjust(30," ")
NameS=Name#.rjust(20," ")
#print(NameS,version)
pLine([NameS,version],SHIFT)
# if D["Type"]=="exe":
# exeName=D["Name"]
# if exeName=="self":
# exeName=Name
# Path=shutil.which(exeName)
# if Path is None:
# Status=("Not installed")
# else:
# Status=("Installed ")
# else:
# try:
# exec("import %s"%D["Name"])
# Status=("Installed ")
# except:
# Status=("Not installed")
# ss="%20s : %s"%(Name,Status)
W=90
l="%"+"%is"%(W-5)
ll=" |%s|"%(l)
def pLine(s,justify="center",length=None):
if justify=="center":
#print(ll%(str(s).center(W-5," ")))
ls=len(s)
if length is not None:
ls=length
S=" "*((W-ls-5)//2)+s+" "*((W-ls-5)//2)
S0=" "*((W-ls-5)//2)+" "*ls+" "*((W-ls-5)//2)
if len(S0)%2==0: S+=" "
print(" |%s|"%S)
#print(ll%(str(s).center(W-5," ")))
elif isinstance(s,list):
s0,s1=s
#print(s0,s1)
#print(s0.rjust(justify," "))
ss=(s0.rjust(justify," ")+" : "+s1)
#print(ss,len(ss))
F=" "*(W-len(ss)-5)
print(" |"+ss+F+"|")
Sep="="*(W-5)
print()
pLine(Sep)
s="Radio soft singularity image"
pLine(Str(s.upper(),Bold=1,col="green"),length=len(s))
#pLine("Radio soft singularity image".upper())
pLine("")
pLine(["To source an external dev dir"," source setDev.sh <DirName>"],SHIFT)
pLine(["for example"," source setDev.sh /data/$USER/DEV"],SHIFT)
pLine(Sep)
#pLine(["<Software>","<Versions>"],SHIFT)
#Print_v()
#pLine(Sep)
print()
print()
|
mhardcastleREPO_NAMEddf-pipelinePATH_START.@ddf-pipeline_extracted@ddf-pipeline-master@misc@logoSingularity.py@.PATH_END.py
|
{
"filename": "bitmask.py",
"repo_name": "desihub/desiutil",
"repo_path": "desiutil_extracted/desiutil-main/py/desiutil/bitmask.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""
================
desiutil.bitmask
================
Mask bits for the spectro pipeline.
Individual packages will define their own mask bits and use this as a utility
access wrapper. Typical users will get their bitmasks pre-made from those
packages, not from here.
Stephen Bailey, Lawrence Berkeley National Lab
Fall 2015
Examples
--------
desispec_ could create a ccdmask like this:
>>> from desiutil.bitmask import BitMask
>>> import yaml
>>> _bitdefs = yaml.safe_load('''
... ccdmask:
... - [BAD, 0, "Pre-determined bad pixel (any reason)"]
... - [HOT, 1, "Hot pixel"]
... - [DEAD, 2, "Dead pixel"]
... - [SATURATED, 3, "Saturated pixel from object"]
... - [COSMIC, 4, "Cosmic ray"]
... ''')
...
>>> ccdmask = BitMask('ccdmask', _bitdefs)
Users would then access this mask with:
>>> from desispec.bitmasks import ccdmask
>>> ccdmask.COSMIC | ccdmask.SATURATED #- 2**4 + 2**3
24
>>> ccdmask.mask('COSMIC') # 2**4, same as ccdmask.COSMIC
16
>>> ccdmask.mask(4) # 2**4, same as ccdmask.COSMIC
16
>>> ccdmask.COSMIC # 2**4, same as ccdmask.mask('COSMIC')
16
>>> ccdmask.bitnum('COSMIC')
4
>>> ccdmask.bitname(4)
'COSMIC'
>>> ccdmask.names()
['BAD', 'HOT', 'DEAD', 'SATURATED', 'COSMIC']
>>> ccdmask.names(3)
['BAD', 'HOT']
>>> ccdmask.comment(0)
'Pre-determined bad pixel (any reason)'
>>> ccdmask.comment('COSMIC')
'Cosmic ray'
.. _desispec: http://desispec.readthedocs.io
"""
class _MaskBit(int):
"""A single mask bit.
Subclasses :class:`int` to act like an :class:`int`, but allows the
ability to extend with blat.name, blat.comment, blat.mask, blat.bitnum.
Attributes
----------
name : :class:`str`
The name of the bit.
bitnum : :class:`int`
The number of the bit. The value of the bit is ``2**bitnum``.
mask : :class:`int`
The value of the bit, ``2**bitnum``.
comment : :class:`str`
A comment explaining the meaning of the bit.
"""
def __new__(cls, name, bitnum, comment, extra=dict()):
self = super(_MaskBit, cls).__new__(cls, 2**bitnum)
self.name = name
self.bitnum = bitnum
self.mask = 2**bitnum
self.comment = comment
self._extra = extra
for key, value in extra.items():
if hasattr(self, key):
raise AttributeError(
"Bit {0} extra key '{1}' is already in use by int objects.".format(name, key))
self.__dict__[key] = value
return self
def __str__(self):
return ('{0.name:16s} bit {0.bitnum} mask 0x{0.mask:X} - ' +
'{0.comment}').format(self)
# def __repr__(self):
# return "_MaskBit(name='{0.name}', bitnum={0.bitnum:d}, comment='{0.comment}')".format(self)
# Class to provide mask bit utility functions
class BitMask(object):
"""BitMask object to represent bit names, masks, and comments.
Typical users are not expected to create BitMask objects directly;
other packages like desispec and desitarget will have used this
to pre-create the bitmasks for them using definition files in those
packages.
Parameters
----------
name : :class:`str`
Name of this mask, must be key in `bitdefs`.
bitdefs : :class:`dict`
Dictionary of different mask bit definitions;
each value is a list of ``[bitname, bitnum, comment]``.
A 4th entry is optional, which must be a dictionary.
"""
def __init__(self, name, bitdefs):
"""Init.
"""
self._bits = dict()
self._name = name
for x in bitdefs[name]:
bitname, bitnum, comment = x[0:3]
if len(x) == 4:
extra = x[3]
if not isinstance(extra, dict):
raise ValueError(
'{} extra values should be a dict'.format(bitname))
else:
extra = dict()
self._bits[bitname] = _MaskBit(bitname, bitnum, comment, extra)
self._bits[bitnum] = self._bits[bitname]
def __getitem__(self, bitname):
"""Return mask for individual bitname.
"""
return self._bits[bitname]
def bitnum(self, bitname):
"""Return bit number (int) for this `bitname` (string).
Parameters
----------
bitname : :class:`str`
The bit name.
Returns
-------
:class:`int`
The bit value.
"""
return self._bits[bitname].bitnum
def bitname(self, bitnum):
"""Return bit name (string) for this `bitnum` (integer).
Parameters
----------
bitnum : :class:`int`
The number of the bit.
Returns
-------
:class:`str`
The name of the bit.
"""
return self._bits[bitnum].name
def comment(self, bitname_or_num):
"""Return comment for this bit name or bit number.
Parameters
----------
bitname_or_num : :class:`int` or :class:`str`
Name of number of the mask.
Returns
-------
:class:`str`
The comment string.
"""
return self._bits[bitname_or_num].comment
def mask(self, name_or_num):
"""Return mask value.
Parameters
----------
name_or_num : :class:`int` or :class:`str`
Name of number of the mask.
Returns
-------
:class:`int`
The value of the mask.
Examples
--------
>>> bitmask.mask(3) # 2**3
8
>>> bitmask.mask('BLAT')
>>> bitmask.mask('BLAT|FOO')
"""
if isinstance(name_or_num, int):
return self._bits[name_or_num].mask
else:
mask = 0
for name in name_or_num.split('|'):
mask |= self._bits[name].mask
return mask
def names(self, mask=None):
"""Return list of names of masked bits.
Parameters
----------
mask : :class:`int`, optional
The mask integer to convert to names. If not supplied,
return names of all known bits.
Returns
-------
:class:`list`
The list of names contained in the mask.
"""
names = list()
if mask is None:
# return names in sorted order of bitnum
bitnums = [x for x in self._bits.keys() if isinstance(x, int)]
for bitnum in sorted(bitnums):
names.append(self._bits[bitnum].name)
else:
mask = int(mask) # workaround numpy issue #2955 for uint64
bitnum = 0
while 2**bitnum <= mask:
if (2**bitnum & mask):
if bitnum in self._bits.keys():
names.append(self._bits[bitnum].name)
else:
names.append('UNKNOWN' + str(bitnum))
bitnum += 1
return names
def __getattr__(self, name):
"""Enable ``mask.BITNAME`` equivalent to ``mask['BITNAME']``.
"""
if name in self._bits:
return self._bits[name]
else:
raise AttributeError('Unknown mask bit name ' + name)
def __repr__(self):
'''Return yaml representation defining the bits of this bitmask.
'''
result = list()
result.append(self._name + ':')
# return names in sorted order of bitnum
bitnums = [x for x in self._bits.keys() if isinstance(x, int)]
for bitnum in sorted(bitnums):
bit = self._bits[bitnum]
# format the line for single bit, with or without extra keys
line = ' - [{:16s} {:2d}, "{}"'.format(
bit.name+',', bit.bitnum, bit.comment)
if len(bit._extra) > 0:
line = line + ', '+str(bit._extra)+']'
else:
line = line + ']'
result.append(line)
return "\n".join(result)
|
desihubREPO_NAMEdesiutilPATH_START.@desiutil_extracted@desiutil-main@py@desiutil@bitmask.py@.PATH_END.py
|
{
"filename": "check_drx2drxi_result.py",
"repo_name": "lwa-project/pulsar_archive_pipeline",
"repo_path": "pulsar_archive_pipeline_extracted/pulsar_archive_pipeline-main/check_drx2drxi_result.py",
"type": "Python"
}
|
from __future__ import print_function
import sys
with open(sys.argv[1],'r') as fh:
print("file opened")
for line in fh.readlines():
if '\r' in line:
pars = line.split('\r')
print("got pars")
res = pars[len(pars)-2].split()
print(res)
drx2drxi2_res = float(res[len(res)-2].strip('%'))
if drx2drxi2_res < 99.0:
sys.exit(1)
|
lwa-projectREPO_NAMEpulsar_archive_pipelinePATH_START.@pulsar_archive_pipeline_extracted@pulsar_archive_pipeline-main@check_drx2drxi_result.py@.PATH_END.py
|
{
"filename": "results_varmax.py",
"repo_name": "statsmodels/statsmodels",
"repo_path": "statsmodels_extracted/statsmodels-main/statsmodels/tsa/statespace/tests/results/results_varmax.py",
"type": "Python"
}
|
"""
Results for VARMAX tests
Results from Stata using script `test_varmax_stata.do`.
See also Stata time series documentation, in particular `dfactor`.
Data from:
http://www.jmulti.de/download/datasets/e1.dat
Author: Chad Fulton
License: Simplified-BSD
"""
# See http://www.jmulti.de/download/datasets/e1.dat
# 1960:Q1 - 1982Q4
lutkepohl_data = [
[180, 451, 415], [179, 465, 421], [185, 485, 434], [192, 493, 448],
[211, 509, 459], [202, 520, 458], [207, 521, 479], [214, 540, 487],
[231, 548, 497], [229, 558, 510], [234, 574, 516], [237, 583, 525],
[206, 591, 529], [250, 599, 538], [259, 610, 546], [263, 627, 555],
[264, 642, 574], [280, 653, 574], [282, 660, 586], [292, 694, 602],
[286, 709, 617], [302, 734, 639], [304, 751, 653], [307, 763, 668],
[317, 766, 679], [314, 779, 686], [306, 808, 697], [304, 785, 688],
[292, 794, 704], [275, 799, 699], [273, 799, 709], [301, 812, 715],
[280, 837, 724], [289, 853, 746], [303, 876, 758], [322, 897, 779],
[315, 922, 798], [339, 949, 816], [364, 979, 837], [371, 988, 858],
[375, 1025, 881], [432, 1063, 905], [453, 1104, 934], [460, 1131, 968],
[475, 1137, 983], [496, 1178, 1013], [494, 1211, 1034], [498, 1256, 1064],
[526, 1290, 1101], [519, 1314, 1102], [516, 1346, 1145], [531, 1385, 1173],
[573, 1416, 1216], [551, 1436, 1229], [538, 1462, 1242], [532, 1493, 1267],
[558, 1516, 1295], [524, 1557, 1317], [525, 1613, 1355], [519, 1642, 1371],
[526, 1690, 1402], [510, 1759, 1452], [519, 1756, 1485], [538, 1780, 1516],
[549, 1807, 1549], [570, 1831, 1567], [559, 1873, 1588], [584, 1897, 1631],
[611, 1910, 1650], [597, 1943, 1685], [603, 1976, 1722], [619, 2018, 1752],
[635, 2040, 1774], [658, 2070, 1807], [675, 2121, 1831], [700, 2132, 1842],
[692, 2199, 1890], [759, 2253, 1958], [782, 2276, 1948], [816, 2318, 1994],
[844, 2369, 2061], [830, 2423, 2056], [853, 2457, 2102], [852, 2470, 2121],
[833, 2521, 2145], [860, 2545, 2164], [870, 2580, 2206], [830, 2620, 2225],
[801, 2639, 2235], [824, 2618, 2237], [831, 2628, 2250], [830, 2651, 2271],
]
lutkepohl_var1 = {
'params': [
-0.25034303, 0.28759168, 0.81626475, # Phi, row 1
0.023383, 0.19048278, 0.66502259, # Phi, row 2
-0.01492992, 0.53796097, 0.28114733, # Phi, row 3
# .00199294, # Covariance, lower triangle
# .00006096, .00012986,
# .00018523, .00011695, .00016188,
# Note: the following are the Cholesky of the covariance
# matrix defined just above
0.04464236, # Cholesky, lower triangle
0.00136552, 0.01354125,
0.0029089, 0.00834324, 0.00915471
],
'var_oim': [
.01319669, .19413864, .2386643,
.0012437, .01829378, .02234399,
.00107749, .01584584, .01938099,
1.061e-07,
4.981e-09, 4.549e-09,
9.211e-10, 5.825e-10, 7.016e-10],
'loglike': 587.8481018831948,
'aic': -1145.696,
'bic': -1110.934,
}
lutkepohl_var1_diag = {
'params': [
-0.24817904, 0.29283012, 0.80794938, # Phi, row 1
0.02282985, 0.19672157, 0.66329776, # Phi, row 2
-0.01522531, 0.53500874, 0.28859213, # Phi, row 3
0.00199106, 0.00018529, 0.00016179 # Variances, diagonal
],
'var_oim': [
.01314245, .1902972, .23400828,
.00124336, .01840132, .02229946,
.00107322, .01558391, .01909303,
1.057e-07, 9.233e-10, 7.011e-10
],
'loglike': 562.8168476509002,
'aic': -1101.634,
'bic': -1073.824
}
lutkepohl_var1_diag_meas = {
'params': [
-0.24817904, 0.29283012, 0.80794938, # Phi, row 1
0.02282985, 0.19672157, 0.66329776, # Phi, row 2
-0.01522531, 0.53500874, 0.28859213, # Phi, row 3
0.00199106, 0.00018529, 0.00016179, # Variances, diagonal
0, 0, 0 # Measurement error variances
],
'var_oim': [
.01314245, .1902972, .23400828,
.00124336, .01840132, .02229946,
.00107322, .01558391, .01909303,
1.057e-07, 9.233e-10, 7.011e-10,
None, None, None
],
'loglike': 562.8168476509002,
'aic': None,
'bic': None
}
lutkepohl_var1_obs_intercept = {
'params': [
-.24762, .25961003, .75992623, # Phi, row 1
.03186854, -.07271862, .23697765, # Phi, row 2
-.0053055, .2362571, -.19438311, # Phi, row 3
.00199116, .00013515, .00009937 # Variances, diagonal
],
'obs_intercept': [.01799302, .02065458, .01987525], # Intercepts
'var_oim': [
.01317874, .2311403, .33481866,
.00090084, .0157839, .0229119,
.00065737, .01149729, .01661236,
# .00001802, 1.818e-06, 1.086e-06, # Intercept parameters
1.057e-07, 4.869e-10, 2.630e-10],
'loglike': 593.5252693885262,
'aic': -1101.634,
'bic': -1073.824
}
lutkepohl_var1_exog = {
'params': [
-.25549409, .31149462, .92674046, # Phi, row 1
.02935715, .13699757, .5059042, # Phi, row 2
-.00540021, .4598014, .06065565, # Phi, row 3
-.00007533, .00012771, .00018224, # exog
# .00200617, # Covariance, lower triangle
# .00007053, .00017216,
# .00013934, .00010021, .00013833
# Note: the following are the Cholesky of the covariance
# matrix defined just above
.04479029, # Cholesky, lower triangle
.00157467, .01302614,
.00311094, .00731692, .00866687
],
'var_oim': [
.01350243, .20429977, .29684366, # Phi, row 1
.00115871, .01753203, .02547371, # Phi, row 2
.000931, .01408662, .02046759, # Phi, row 3
3.720e-08, 3.192e-09, 2.565e-09 # exog
],
'loglike': 587.4157014188437,
'aic': None,
'bic': None
}
lutkepohl_var1_exog2 = {
'params': [
-.2552236, .21722691, .81525457, # Phi, row 1
.02998355, -.08130972, .24772266, # Phi, row 2
-.00476998, .24016112, -.19910237, # Phi, row 3
.00811096, -.00015244, # exog, y1
.01878355, -.00005086, # exog, y2
.01889825, 2.577e-06, # exog, y3
# .00199918, # Covariance, lower triangle
# .00005435, .00013469,
# .00012306, .00006251, .00010039
# Note: the following are the Cholesky of the covariance
# matrix defined just above
.04471219, # Cholesky, lower triangle
.00121555, .01102644,
.00275227, .00536569, .00800152
],
'var_oim': None,
# 'loglike': 600.9801664685759, # From Stata
'loglike': 600.65449034396283, # From VARMAX (regression test)
'aic': None,
'bic': None
}
lutkepohl_var2 = {
'params': [
-.25244981, .62528114, # Phi_1, row 1
-.13011679, .58173748, # Phi_1, row 2
.05369178, .35716349, # Phi_2, row 1
.03861472, .43812606, # Phi_2, row 2
# .00197786, # Covariance, lower triangle
# .00008091, .00018269
0.04447314, # Covariance cholesky, lower triangle
0.0018193, 0.01339329
],
'var_oim': [
.01315844, .11805816, # Phi_1, row 1
.01321036, .11300702, # Phi_1, row 2
.00122666, .01064478, # Phi_2, row 1
.0012571, .0106738, # Phi_2, row 2
1.048e-07, # Covariance, lower triangle
4.994e-09, 8.940e-10
],
'loglike': 343.3149718445623,
'aic': -664.6299,
'bic': -639.1376
}
fred_varma11 = {
'params': [
.80580312, 0, # Phi_1, row 1
.17348681, -.48093755, # Phi_1, row 2
-.51890703, 0, # Theta_1, row 1
0, 0, # Theta_1, row 2
.0000582, .00003815, # Variances
],
'var_oim': [
.00272999, 0, # Phi_1, row 1
.00164152, .00248576, # Phi_1, row 2
.0049259, 0, # Theta_1, row 1
0, 0, # Theta_1, row 2
1.529e-11, 6.572e-12, # Variances
],
'loglike': 3156.056423235071,
'aic': -6300.113,
'bic': -6275.551
}
fred_vma1 = {
'params': [
.24803941, 0, # Theta_1, row 1
0, 0, # Theta_1, row 2
.00006514, .00004621, # Variances
],
'var_oim': [
.00154773, 0, # Theta_1, row 1
0, 0, # Theta_1, row 2
1.916e-11, 9.639e-12, # Variances
],
'loglike': 3088.909619417645,
'aic': -6171.819,
'bic': -6159.539
}
|
statsmodelsREPO_NAMEstatsmodelsPATH_START.@statsmodels_extracted@statsmodels-main@statsmodels@tsa@statespace@tests@results@results_varmax.py@.PATH_END.py
|
{
"filename": "comp_zstats_specrels.py",
"repo_name": "desihub/LSS",
"repo_path": "LSS_extracted/LSS-main/scripts/comp_zstats_specrels.py",
"type": "Python"
}
|
import numpy as np
#!pip install astropy
#!pip install fitsio
from scipy import stats
from scipy.stats import norm
import fitsio
import glob
import os
import sys
import matplotlib.pyplot as plt
import statistics
import argparse
import astropy
from astropy.table import Table,join
from astropy.time import Time
from astropy.io import fits
import LSS.common_tools as common
parser = argparse.ArgumentParser()
#parser.add_argument("--type", help="tracer type to be selected")
basedir='/global/cfs/cdirs/desi/survey/catalogs'
parser.add_argument("--basedir", help="base directory for input/output",default=basedir)
parser.add_argument("--survey", help="e.g., main (for all), DA02, any future DA",default='DA02')
parser.add_argument("--verspec",help="version for redshifts",default='guadalupe')
parser.add_argument("--verspec_new",help="version for redshifts",default='newQSOtemp_tagged')
parser.add_argument("--tracer",help="tracer type(s) (e.g., LRG)",default='all')
parser.add_argument("--mbit5",help="whether to screen against zwarn mask bit 5",default='n')
parser.add_argument("--mbit510",help="whether to screen against zwarn mask bits 5 and 10",default='n')
parser.add_argument("--zwarn0",help="only count as success if zwarn == 0",default='n')
args = parser.parse_args()
basedir = args.basedir
survey = args.survey
specver = args.verspec
#tp = args.tracer
#ff = fitsio.read(filepathLF)
#hdul = fits.open(filepathLF)
#ff2 = fitsio.read(filepathBGS)
#hdul = fits.open(filepathBGS)
if args.tracer == 'all':
tracers = ['QSO','LRG','ELG','BGS_ANY']
else:
tracers = [args.tracer]
for tp in tracers:
notqso = ''
if survey == 'DA02':
if tp == 'LRG':
bit = 1 #for selecting LRG
if tp == 'ELG':
bit = 2
notqso = 'notqso'
if tp == 'QSO':
bit = 4
if tp == 'BGS_ANY':
zf = basedir+'/'+survey+'/LSS/'+specver+'/datcomb_bright_tarspecwdup_zdone.fits'
zf_new = basedir+'/'+survey+'/LSS/'+args.verspec_new+'/datcomb_bright_spec_zdone.fits'
dz = Table(fitsio.read(zf))
desitarg = 'BGS_TARGET'
wtype = dz[desitarg] > 0#((dz[desitarg] & bit) > 0)
else:
zf = basedir+'/'+survey+'/LSS/'+specver+'/datcomb_dark_tarspecwdup_zdone.fits'
zf_new = basedir+'/'+survey+'/LSS/'+args.verspec_new+'/datcomb_dark_spec_zdone.fits'
dz = Table(fitsio.read(zf))
desitarg = 'DESI_TARGET'
wtype = ((dz[desitarg] & bit) > 0)
if tp == 'ELG':
wtype &= ((dz[desitarg] & 4) == 0) #remove QSO
print(len(dz[wtype]))
#dz = dz[wtype&wg]
dz = dz[wtype]
dz = common.cut_specdat(dz)
dz_new = Table(fitsio.read(zf_new))
dz_new.keep_columns(['Z','ZWARN','DELTACHI2','TARGETID','TILEID','LOCATION'])
print(len(dz))
dz = join(dz,dz_new,keys=['TARGETID','TILEID','LOCATION'],table_names=['fid','new'])
print(str(len(dz))+' should agree with above')
from LSS.globals import main
pars = main(tp,args.verspec)
elif survey == 'main':
sys.exit(survey+' not supported yet')
zf = basedir+'/'+survey+'/LSS/'+specver+'/datcomb_'+tp+'_tarspecwdup_zdone.fits'
dz = Table(fitsio.read(zf))
if tp == 'ELG':
wtype = ((dz['DESI_TARGET'] & 4) == 0) #remove QSO
dz = dz[wtype]
dz = common.cut_specdat(dz)
from LSS.globals import main
pars = main(tp,args.verspec)
elif survey == 'SV3':
sys.exit('not written for SV3 yet')
zf = basedir+'/'+survey+'/LSS/'+specver+'/datcomb_dark_tarspecwdup_Alltiles.fits'
dz = Table(fitsio.read(zf))
desitarg = 'SV3_DESI_TARGET'
bit = 1 #for selecting LRG
wtype = ((dz[desitarg] & bit) > 0)
print(len(dz[wtype]))
#dz = dz[wtype&wg]
dz = dz[wtype]
wz = dz['ZWARN'] != 999999 #this is what the null column becomes
wz &= dz['ZWARN']*0 == 0 #just in case of nans
wz &= dz['COADD_FIBERSTATUS'] == 0
ff = dz[wz]
zf = basedir+'/'+survey+'/LSS/'+specver+'/datcomb_bright_tarspecwdup_Alltiles.fits'
dz = Table(fitsio.read(zf))
desitarg = 'SV3_BGS_TARGET'
wtype = dz[desitarg] > 0#((dz[desitarg] & bit) > 0)
print(len(dz[wtype]))
#dz = dz[wtype&wg]
dz = dz[wtype]
wz = dz['ZWARN'] != 999999 #this is what the null column becomes
wz &= dz['ZWARN']*0 == 0 #just in case of nans
wz &= dz['COADD_FIBERSTATUS'] == 0
ff2 = dz[wz]
z_tot = dz['ZWARN_fid'] != 999999
z_tot &= dz['ZWARN_fid']*0 == 0
z_new = dz['ZWARN_new'] != 999999
z_new &= dz['ZWARN_new']*0 == 0
print('number with z to consider fid,new')
print(len(dz[z_tot]),len(dz[z_new]))
if tp == 'LRG':
z_suc= dz['ZWARN_fid']==0
z_suc &= dz['DELTACHI2_fid']>15
z_suc &= dz['Z_fid']<1.5
z_sucnew= dz['ZWARN_new']==0
z_sucnew &= dz['DELTACHI2_new']>15
z_sucnew &= dz['Z_new']<1.5
zmin = 0.4
zmax = 1.1
if tp == 'ELG':
o2f = fitsio.read(pars.elgzf,columns=['TARGETID','LOCATION','TILEID','OII_FLUX','OII_FLUX_IVAR'])
dz = join(dz,o2f,keys=['TARGETID','TILEID','LOCATION'])
o2c = np.log10(dz['OII_FLUX'] * np.sqrt(dz['OII_FLUX_IVAR']))+0.2*np.log10(dz['DELTACHI2_fid'])
z_suc = o2c > 0.9
o2f_new = fitsio.read(basedir+'/'+survey+'/LSS/'+args.verspec_new+'/emlin_catalog.fits' ,columns=['TARGETID','LOCATION','TILEID','OII_FLUX','OII_FLUX_IVAR'])
dz = join(dz,o2f_new,keys=['TARGETID','TILEID','LOCATION'],table_names=['fid','new'])
o2c_new = np.log10(dz['OII_FLUX_new'] * np.sqrt(dz['OII_FLUX_IVAR_new']))+0.2*np.log10(dz['DELTACHI2_new'])
z_sucnew = o2c_new > 0.9
zmin = 0.6
zmax = 1.6
if tp == 'QSO':
qsozf = pars.qsozf
if specver == 'guadalupe':
qsozf = '/global/cfs/cdirs/desi/users/edmondc/QSO_catalog/guadalupe/QSO_cat_guadalupe_cumulative.fits'
arz = Table(fitsio.read(qsozf))
arz.keep_columns(['TARGETID','LOCATION','TILEID','Z','Z_QN'])
arz['TILEID'] = arz['TILEID'].astype(int)
#arz = fitsio.read(qsozf,columns=['TARGETID','LOCATION','TILEID','Z','Z_QN'])
#arz['TILEID'] = arz['TILEID'].astype(int)
dz = join(dz,arz,keys=['TARGETID','TILEID','LOCATION'],join_type='left',uniq_col_name='{col_name}{table_name}',table_names=['','_QF'])
#dz['Z'].name = 'Z_RR' #rename the original redrock redshifts
#dz['Z_QF'].name = 'Z' #the redshifts from the quasar file should be used instead
z_suc = dz['Z'].mask == False #previous Z column should have become Z_fid
if args.mbit5 == 'y':
z_suc &= dz['ZWARN_fid'] & 2**5 == 0
qsozf_new = basedir+'/'+survey+'/LSS/'+args.verspec_new+'/QSO_catalog.fits'
arz = Table(fitsio.read(qsozf_new))
arz.keep_columns(['TARGETID','LOCATION','TILEID','Z','Z_QN'])
arz['TILEID'] = arz['TILEID'].astype(int)
dz = join(dz,arz,keys=['TARGETID','TILEID','LOCATION'],join_type='left',uniq_col_name='{col_name}{table_name}',table_names=['','_QF_new'])
#print(dz.dtype.names)
z_sucnew = dz['Z_QF_new'].mask == False
if args.mbit5 == 'y':
z_sucnew &= dz['ZWARN_new'] & 2**5 == 0
if args.mbit510 == 'y':
z_sucnew &= dz['ZWARN_new'] & 2**5 == 0
z_sucnew &= dz['ZWARN_new'] & 2**10 == 0
if args.zwarn0 == 'y':
z_sucnew &= dz['ZWARN_new'] == 0
zmin = 0.8
zmax = 3.5
if tp == 'BGS_ANY':
z_suc = dz['ZWARN_fid']==0
z_suc &= dz['DELTACHI2_fid']>40
z_sucnew = dz['ZWARN_new']==0
z_sucnew &= dz['DELTACHI2_new']>40
zmin = 0.01
zmax = 0.6
#print(len(ff[z_suc]),len(ff[z_tot]))
print("fiducial zsuccess rate for "+tp,len(dz[z_suc&z_tot])/len(dz[z_tot]))
print("new zsuccess rate for "+tp,len(dz[z_sucnew&z_new])/len(dz[z_new]))
print("fraction with zsuccess in both "+tp,len(dz[z_sucnew&z_new&z_suc])/len(dz[z_new]))
if tp != 'QSO':
plt.hist(dz['Z_fid'][z_suc&z_tot],histtype='step',label='fiducial',range=(zmin,zmax),bins=50)
plt.hist(dz['Z_new'][z_sucnew&z_new],histtype='step',label='new',range=(zmin,zmax),bins=50)
plt.legend()
plt.xlabel('redshift')
plt.ylabel('# of good z in bin')
plt.title(tp+notqso)
plt.savefig(basedir+'/'+survey+'/LSS/'+args.verspec_new+'/'+tp+notqso+'_zhistcompGuad.png')
plt.show()
plt.plot(dz['Z_fid'][z_suc&z_tot&z_sucnew],dz['Z_new'][z_suc&z_tot&z_sucnew],'k,')
plt.xlabel('Guadalupe redshift')
plt.ylabel('new redshift')
plt.title(tp+notqso)
plt.savefig(basedir+'/'+survey+'/LSS/'+args.verspec_new+'/'+tp+notqso+'_zcompGuad.png')
plt.show()
else:
plt.hist(dz['Z'][z_suc&z_tot],histtype='step',label='fiducial',range=(zmin,zmax),bins=50)
plt.hist(dz['Z_QF_new'][z_sucnew&z_new],histtype='step',label='new',range=(zmin,zmax),bins=50)
plt.legend()
plt.xlabel('redshift')
plt.ylabel('# of good z in bin')
plt.title(tp+notqso)
fn_app = ''
if args.mbit5 == 'y':
fn_app = '_maskbit5'
if args.mbit510 == 'y':
fn_app = '_maskbits510'
if args.zwarn0 == 'y':
fn_app = '_zwarn0'
plt.savefig(basedir+'/'+survey+'/LSS/'+args.verspec_new+'/'+tp+notqso+'_zhistcompGuad'+fn_app+'.png')
plt.show()
plt.plot(dz['Z'][z_suc&z_tot&z_sucnew],dz['Z_QF_new'][z_suc&z_tot&z_sucnew],'k,')
plt.xlabel('Guadalupe redshift')
plt.ylabel('new redshift')
plt.title(tp+notqso)
plt.savefig(basedir+'/'+survey+'/LSS/'+args.verspec_new+'/'+tp+notqso+'_zcompGuad'+fn_app+'.png')
plt.show()
plt.plot(dz['Z_QF_new'][z_suc&z_tot&z_sucnew],(dz['Z_QF_new'][z_suc&z_tot&z_sucnew]-dz['Z'][z_suc&z_tot&z_sucnew])/(1+dz['Z_QF_new'][z_suc&z_tot&z_sucnew]),'k,')
plt.xlabel('new redshift')
plt.ylabel('(new z-Guadalupe z)/(1+new z)')
plt.ylim(-0.02,0.02)
plt.title(tp+notqso)
plt.savefig(basedir+'/'+survey+'/LSS/'+args.verspec_new+'/'+tp+notqso+'_zdiffGuad'+fn_app+'.png')
plt.show()
plt.plot(dz['Z'][z_suc&z_tot&z_sucnew],dz['Z_QF_new'][z_suc&z_tot&z_sucnew],'k,')
plt.xlabel('Guadalupe redshift')
plt.ylabel('new redshift')
plt.title(tp+notqso)
plt.xlim(1.3,1.6)
plt.ylim(1.3,1.6)
plt.savefig(basedir+'/'+survey+'/LSS/'+args.verspec_new+'/'+tp+notqso+'_zcompGuadzoom'+fn_app+'.png')
plt.show()
|
desihubREPO_NAMELSSPATH_START.@LSS_extracted@LSS-main@scripts@comp_zstats_specrels.py@.PATH_END.py
|
{
"filename": "make_allowed_ngrids.py",
"repo_name": "johnh2o2/ggadt",
"repo_path": "ggadt_extracted/ggadt-master/make_allowed_ngrids.py",
"type": "Python"
}
|
import numpy as np
from math import *
max_gridsize= 10000
fname = "allowed_ngrid_values.txt"
n = max_gridsize/2
sizes = []
for i in range(n):
q = 2**i
if q > max_gridsize: break
for j in range(n):
p = 3**j
if p*q > max_gridsize: break
for k in range(n):
r = 5**k
if p*q*r > max_gridsize: break
sizes.append(p*q*r)
sizes = sorted(sizes)
f = open(fname, 'w')
f.write("%d\n"%(len(sizes)))
for s in sizes:
f.write("%d\n"%(s))
f.close()
|
johnh2o2REPO_NAMEggadtPATH_START.@ggadt_extracted@ggadt-master@make_allowed_ngrids.py@.PATH_END.py
|
{
"filename": "_parents.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/icicle/_parents.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ParentsValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="parents", parent_name="icicle", **kwargs):
super(ParentsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@icicle@_parents.py@.PATH_END.py
|
{
"filename": "_widthsrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/treemap/marker/line/_widthsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class WidthsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="widthsrc", parent_name="treemap.marker.line", **kwargs
):
super(WidthsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@treemap@marker@line@_widthsrc.py@.PATH_END.py
|
{
"filename": "particles.md",
"repo_name": "hannorein/REBOUND",
"repo_path": "REBOUND_extracted/REBOUND-main/docs/particles.md",
"type": "Markdown"
}
|
# Particle structure
A particle is represented by the `reb_particle` structure in C.
The python class `Particle` is an abstraction of the `reb_particle` structure in C.
We will refer to both the C structure and the python object interchangeably as the *particle structure* and *particle object*.
The particle object contains the following variables which can be directly manipulated:
`#!c double m`
: mass
`#!c double r`
: physical radius of the particle
`#!c double x, y, z, vx, vy, vz`
: position and velocity coordinates
`#!c uint32_t hash`
: integer or hash value used to identify the particle
You can create a particle object which is not part of a REBOUND simulation.
=== "C"
```c
struct reb_particle p = {.m=1., x=0., vy=0.};
```
=== "Python"
```python
p = rebound.Particle(m=1., x=0., vy=0.)
```
However, in most cases you will work with particles which have been added to a REBOUND simulation.
You then access the particle using the simulation's `particles` array:
=== "C"
```c
struct reb_simulation* r = reb_simulation_create();
// ... setup simulation, add particles ...
r->particles[0].x = 1;
```
=== "Python"
```python
sim = rebound.Simulation()
# ... setup simulation, add particles ...
sim.particles[0].x = 1
```
Alternatively you can assign a hash value to particles and access them using the following syntax:
=== "C"
```c
struct reb_simulation* r = reb_simulation_create();
reb_simulation_add_fmt(r, "m", 1.);
r->particles[0].hash = reb_hash("star");
reb_simulation_add_fmt(r, "a", 1.);
r->particles[1].hash = reb_hash("planet1");
struct reb_particle* p = reb_simulation_particle_by_hash(r, reb_hash("planet1"));
```
=== "Python"
```python
sim = rebound.Simulation()
sim.add(m=1., hash="star")
sim.add(a=1., hash="planet1")
p = sim.particles["planet1"]
```
|
hannoreinREPO_NAMEREBOUNDPATH_START.@REBOUND_extracted@REBOUND-main@docs@particles.md@.PATH_END.py
|
{
"filename": "unitsystem.py",
"repo_name": "sbird/fake_spectra",
"repo_path": "fake_spectra_extracted/fake_spectra-master/fake_spectra/unitsystem.py",
"type": "Python"
}
|
"""Unit system for the spectral code."""
import math
import numpy as np
class UnitSystem(object):
"""Class to store the various physical constants and units that are relevant here. Factored out of Spectra."""
def __init__(self, UnitMass_in_g=1.98892e43, UnitLength_in_cm=3.085678e21, UnitVelocity_in_cm_per_s = 1e5):
#Internal gadget mass unit: 1e10 M_sun/h in g/h
self.UnitMass_in_g = UnitMass_in_g
#Internal gadget length unit: 1 kpc/h in cm/h
self.UnitLength_in_cm = UnitLength_in_cm
#Some constants and unit systems
self.UnitDensity_in_cgs = self.UnitMass_in_g/self.UnitLength_in_cm**3
#Internal velocity unit : 1 km/s in cm/s
self.UnitVelocity_in_cm_per_s = UnitVelocity_in_cm_per_s
#Internal energy is in erg/g = 1 (km/s)**2 in (cm/s)**2
self.UnitInternalEnergy_in_cgs = self.UnitVelocity_in_cm_per_s**2
#Speed of light in cm/s
self.light = 2.99e10
#proton mass in g
self.protonmass=1.67262178e-24
#Boltzmann constant (cgs)
self.boltzmann=1.38066e-16
#Newton's constant in cm^3/g/s^2
self.gravcgs = 6.674e-8
#100 km/s/Mpc in 1/s
self.h100=3.2407789e-18
#Gas equation of state
self.gamma=5./3
def absorption_distance(self, speclen, red):
"""
Compute X(z), the absorption distance per sightline (dimensionless)
X(z) = int (1+z)^2 H_0 / H(z) dz
When dz is small, dz ~ H(z)/c dL, so
X(z) ~ (1+z)^2 H_0/c dL
Arguments:
speclen - spectral length (usually box size in comoving kpc/h)
red - redshift
"""
#Units: h/s s/cm kpc/h cm/kpc
return self.h100/self.light*speclen*self.UnitLength_in_cm*(1+red)**2
def redshift_distance(self, speclen, red,omegam0):
"""Compute dz over the box, dz = H(z)/c dL
Arguments:
speclen - spectral length (usually box size in comoving kpc/h)
red - redshift
"""
#Units: h/s s/cm kpc/h cm/kpc
return self.hubble(red, omegam0)/self.light*speclen*self.UnitLength_in_cm
def hubble(self, z, omegam0):
"""Hubble parameter"""
return self.h100*np.sqrt(omegam0*(1+z)**3 + (1-omegam0))
def rho_crit(self, hubble):
"""Get the critical density at z=0 in units of g cm^-3"""
#H in units of 1/s
h100=self.h100*hubble
rho_crit=3*h100**2/(8*math.pi*self.gravcgs)
return rho_crit
|
sbirdREPO_NAMEfake_spectraPATH_START.@fake_spectra_extracted@fake_spectra-master@fake_spectra@unitsystem.py@.PATH_END.py
|
{
"filename": "descriptors.py",
"repo_name": "facebookresearch/faiss",
"repo_path": "faiss_extracted/faiss-main/benchs/bench_fw/descriptors.py",
"type": "Python"
}
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
from dataclasses import dataclass
from typing import Any, Dict, List, Optional
import faiss # @manual=//faiss/python:pyfaiss
from .benchmark_io import BenchmarkIO
from .utils import timer
logger = logging.getLogger(__name__)
# Important: filenames end with . without extension (npy, codec, index),
# when writing files, you are required to filename + "npy" etc.
@dataclass
class IndexDescriptorClassic:
bucket: Optional[str] = None
# either path or factory should be set,
# but not both at the same time.
path: Optional[str] = None
factory: Optional[str] = None
codec_alias: Optional[str] = None
construction_params: Optional[List[Dict[str, int]]] = None
search_params: Optional[Dict[str, int]] = None
# range metric definitions
# key: name
# value: one of the following:
#
# radius
# [0..radius) -> 1
# [radius..inf) -> 0
#
# [[radius1, score1], ...]
# [0..radius1) -> score1
# [radius1..radius2) -> score2
#
# [[radius1_from, radius1_to, score1], ...]
# [radius1_from, radius1_to) -> score1,
# [radius2_from, radius2_to) -> score2
range_metrics: Optional[Dict[str, Any]] = None
radius: Optional[float] = None
training_size: Optional[int] = None
def __hash__(self):
return hash(str(self))
@dataclass
class DatasetDescriptor:
# namespace possible values:
# 1. a hive namespace
# 2. 'std_t', 'std_d', 'std_q' for the standard datasets
# via faiss.contrib.datasets.dataset_from_name()
# t - training, d - database, q - queries
# eg. "std_t"
# 3. 'syn' for synthetic data
# 4. None for local files
namespace: Optional[str] = None
# tablename possible values, corresponding to the
# namespace value above:
# 1. a hive table name
# 2. name of the standard dataset as recognized
# by faiss.contrib.datasets.dataset_from_name()
# eg. "bigann1M"
# 3. d_seed, eg. 128_1234 for 128 dimensional vectors
# with seed 1234
# 4. a local file name (relative to benchmark_io.path)
tablename: Optional[str] = None
# partition names and values for hive
# eg. ["ds=2021-09-01"]
partitions: Optional[List[str]] = None
# number of vectors to load from the dataset
num_vectors: Optional[int] = None
embedding_column: Optional[str] = None
embedding_id_column: Optional[str] = None
# unused in open-source
splits_distribution: Optional[List[List[bytes]]] = None
# unused in open-source
splits: Optional[List[bytes]] = None
# unused in open-source
serialized_df: Optional[str] = None
sampling_rate: Optional[float] = None
# sampling column for xdb
sampling_column: Optional[str] = None
# blob store
bucket: Optional[str] = None
path: Optional[str] = None
# desc_name
desc_name: Optional[str] = None
def __hash__(self):
return hash(self.get_filename())
def get_filename(
self,
prefix: Optional[str] = None,
) -> str:
if self.desc_name is not None:
return self.desc_name
filename = ""
if prefix is not None:
filename += prefix + "_"
if self.namespace is not None:
filename += self.namespace + "_"
assert self.tablename is not None
filename += self.tablename
if self.partitions is not None:
filename += "_" + "_".join(
self.partitions
).replace("=", "_").replace("/", "_")
if self.num_vectors is not None:
filename += f"_{self.num_vectors}"
filename += "."
self.desc_name = filename
return self.desc_name
def get_kmeans_filename(self, k):
return f"{self.get_filename()}kmeans_{k}."
def k_means(self, io, k, dry_run):
logger.info(f"k_means {k} {self}")
kmeans_vectors = DatasetDescriptor(
tablename=f"{self.get_filename()}kmeans_{k}"
)
kmeans_filename = kmeans_vectors.get_filename() + "npy"
meta_filename = kmeans_vectors.get_filename() + "json"
if not io.file_exist(kmeans_filename) or not io.file_exist(
meta_filename
):
if dry_run:
return None, None, kmeans_filename
x = io.get_dataset(self)
kmeans = faiss.Kmeans(d=x.shape[1], k=k, gpu=True)
_, t, _ = timer("k_means", lambda: kmeans.train(x))
io.write_nparray(kmeans.centroids, kmeans_filename)
io.write_json({"k_means_time": t}, meta_filename)
else:
t = io.read_json(meta_filename)["k_means_time"]
return kmeans_vectors, t, None
@dataclass
class IndexBaseDescriptor:
d: int
metric: str
desc_name: Optional[str] = None
flat_desc_name: Optional[str] = None
bucket: Optional[str] = None
path: Optional[str] = None
num_threads: int = 1
def get_name(self) -> str:
raise NotImplementedError()
def get_path(self, benchmark_io: BenchmarkIO) -> Optional[str]:
if self.path is not None:
return self.path
self.path = benchmark_io.get_remote_filepath(self.desc_name)
return self.path
@staticmethod
def param_dict_list_to_name(param_dict_list):
if not param_dict_list:
return ""
l = 0
n = ""
for param_dict in param_dict_list:
n += IndexBaseDescriptor.param_dict_to_name(param_dict, f"cp{l}")
l += 1
return n
@staticmethod
def param_dict_to_name(param_dict, prefix="sp"):
if not param_dict:
return ""
n = prefix
for name, val in param_dict.items():
if name == "snap":
continue
if name == "lsq_gpu" and val == 0:
continue
if name == "use_beam_LUT" and val == 0:
continue
n += f"_{name}_{val}"
if n == prefix:
return ""
n += "."
return n
@dataclass
class CodecDescriptor(IndexBaseDescriptor):
# either path or factory should be set,
# but not both at the same time.
factory: Optional[str] = None
construction_params: Optional[List[Dict[str, int]]] = None
training_vectors: Optional[DatasetDescriptor] = None
FILENAME_PREFIX: str = "xt"
def __post_init__(self):
self.get_name()
def is_trained(self):
return self.factory is None and self.path is not None
def is_valid(self):
return self.factory is not None or self.path is not None
def get_name(self) -> str:
if self.desc_name is not None:
return self.desc_name
if self.factory is not None:
self.desc_name = self.name_from_factory()
return self.desc_name
if self.path is not None:
self.desc_name = self.name_from_path()
return self.desc_name
raise ValueError("name, factory or path must be set")
def flat_name(self) -> str:
if self.flat_desc_name is not None:
return self.flat_desc_name
self.flat_desc_name = f"Flat.d_{self.d}.{self.metric.upper()}."
return self.flat_desc_name
def path(self, benchmark_io) -> str:
if self.path is not None:
return self.path
return benchmark_io.get_remote_filepath(self.get_name())
def name_from_factory(self) -> str:
assert self.factory is not None
name = f"{self.factory.replace(',', '_')}."
assert self.d is not None
assert self.metric is not None
name += f"d_{self.d}.{self.metric.upper()}."
if self.factory != "Flat":
assert self.training_vectors is not None
name += self.training_vectors.get_filename(CodecDescriptor.FILENAME_PREFIX)
name += IndexBaseDescriptor.param_dict_list_to_name(self.construction_params)
return name
def name_from_path(self):
assert self.path is not None
filename = os.path.basename(self.path)
ext = filename.split(".")[-1]
if filename.endswith(ext):
name = filename[:-len(ext)]
else: # should never hit this rather raise value error
name = filename
return name
def alias(self, benchmark_io: BenchmarkIO):
if hasattr(benchmark_io, "bucket"):
return CodecDescriptor(desc_name=self.get_name(), bucket=benchmark_io.bucket, path=self.get_path(benchmark_io), d=self.d, metric=self.metric)
return CodecDescriptor(desc_name=self.get_name(), d=self.d, metric=self.metric)
@dataclass
class IndexDescriptor(IndexBaseDescriptor):
codec_desc: Optional[CodecDescriptor] = None
database_desc: Optional[DatasetDescriptor] = None
FILENAME_PREFIX: str = "xb"
def __hash__(self):
return hash(str(self))
def __post_init__(self):
self.get_name()
def is_built(self):
return self.codec_desc is None and self.database_desc is None
def get_name(self) -> str:
if self.desc_name is None:
self.desc_name = self.codec_desc.get_name() + self.database_desc.get_filename(prefix=IndexDescriptor.FILENAME_PREFIX)
return self.desc_name
def flat_name(self):
if self.flat_desc_name is not None:
return self.flat_desc_name
self.flat_desc_name = self.codec_desc.flat_name() + self.database_desc.get_filename(prefix=IndexDescriptor.FILENAME_PREFIX)
return self.flat_desc_name
# alias is used to refer when index is uploaded to blobstore and refered again
def alias(self, benchmark_io: BenchmarkIO):
if hasattr(benchmark_io, "bucket"):
return IndexDescriptor(desc_name=self.get_name(), bucket=benchmark_io.bucket, path=self.get_path(benchmark_io), d=self.d, metric=self.metric)
return IndexDescriptor(desc_name=self.get_name(), d=self.d, metric=self.metric)
@dataclass
class KnnDescriptor(IndexBaseDescriptor):
index_desc: Optional[IndexDescriptor] = None
gt_index_desc: Optional[IndexDescriptor] = None
query_dataset: Optional[DatasetDescriptor] = None
search_params: Optional[Dict[str, int]] = None
reconstruct: bool = False
FILENAME_PREFIX: str = "q"
# range metric definitions
# key: name
# value: one of the following:
#
# radius
# [0..radius) -> 1
# [radius..inf) -> 0
#
# [[radius1, score1], ...]
# [0..radius1) -> score1
# [radius1..radius2) -> score2
#
# [[radius1_from, radius1_to, score1], ...]
# [radius1_from, radius1_to) -> score1,
# [radius2_from, radius2_to) -> score2
range_metrics: Optional[Dict[str, Any]] = None
radius: Optional[float] = None
k: int = 1
range_ref_index_desc: Optional[str] = None
def __hash__(self):
return hash(str(self))
def get_name(self):
name = self.index_desc.get_name()
name += IndexBaseDescriptor.param_dict_to_name(self.search_params)
name += self.query_dataset.get_filename(KnnDescriptor.FILENAME_PREFIX)
name += f"k_{self.k}."
name += f"t_{self.num_threads}."
if self.reconstruct:
name += "rec."
else:
name += "knn."
return name
def flat_name(self):
if self.flat_desc_name is not None:
return self.flat_desc_name
name = self.index_desc.flat_name()
name += self.query_dataset.get_filename(KnnDescriptor.FILENAME_PREFIX)
name += f"k_{self.k}."
name += f"t_{self.num_threads}."
if self.reconstruct:
name += "rec."
else:
name += "knn."
self.flat_desc_name = name
return name
|
facebookresearchREPO_NAMEfaissPATH_START.@faiss_extracted@faiss-main@benchs@bench_fw@descriptors.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "astro-datalab/notebooks-latest",
"repo_path": "notebooks-latest_extracted/notebooks-latest-master/06_EPO/e-TeenAstronomyCafe_Spanish/08_Breaking_the_Solar_System/README.md",
"type": "Markdown"
}
|
**08 Breaking the Solar System**
For information about the program, please visit: http://www.teenastronomycafe.org/
If you want to test this notebook you can:
[](https://colab.research.google.com/github/astro-datalab/notebooks-latest/blob/master/06_EPO/e-TeenAstronomyCafe/08_Breaking_the_Solar_System/Breaking_the_Solar_System.ipynb)
GET IN TOUCH: If you have suggestions to make the notebooks better, or if you encounter problems, please get in touch with the Astro Data Lab team at datalab@noirlab.edu
|
astro-datalabREPO_NAMEnotebooks-latestPATH_START.@notebooks-latest_extracted@notebooks-latest-master@06_EPO@e-TeenAstronomyCafe_Spanish@08_Breaking_the_Solar_System@README.md@.PATH_END.py
|
{
"filename": "alpbetMpi.py",
"repo_name": "jronayne/PyTransport",
"repo_path": "PyTransport_extracted/PyTransport-master/Examples/QuartAx/alpbetMpi.py",
"type": "Python"
}
|
#################### generate alpha beta bispectrum using PyTransAxQrt ############################################################
from matplotlib import pyplot as plt
from pylab import *
import sys
import math
import numpy as np
from mpi4py import MPI
location = "/Users/david/Dropbox/PyTransportDist/PyTransport/" # this should be the location of the PyTransport folder
sys.path.append(location) # sets up python path to give access to PyTransSetup
import PyTransSetup
PyTransSetup.pathSet() # this sets the other paths that PyTransport uses
import PyTransAxQrt as PyT # import module
import PyTransScripts as PyS
comm = MPI.COMM_WORLD
########################### set initial field values and parameters for a simple example run ###################################################
nF=PyT.nF() # gets number of fields (useful check)
nP=PyT.nP() # gets number of parameters needed (useful check)
fields = np.array([23.5,.5-0.001])
params = np.zeros(nP)
params[0]=1.*pow(10.,-10); params[1]=1.; params[2]=25.0**2.0*params[0]/4.0/math.pi**2;
V = PyT.V(fields,params) # calculate potential from some initial conditions
dV=PyT.dV(fields,params) # calculate derivatives of potential (changes dV to derivatives)
initial = np.concatenate((fields,np.array([0.,0.]))) # set initial conditions using slow roll expression
############################################################################################################################################
tols = np.array([10**-8,10**-8])
################################## run the background fiducial run #########################################################################
Nstart = 0.0
Nend = 70.0
t=np.linspace(Nstart, Nend, 1000) # array at which output is returned
back = PyT.backEvolve(t, initial, params,tols,False) # The output is read into the back numpy array
############################################################################################################################################
rank=comm.Get_rank()
side = 140
nsnaps = 150
Nbefore=4.5
NExit = 14.0
kt = PyS.kexitN(NExit, back, params, PyT)
kt =3.*kt
alpha = np.linspace(-1,1,side)
beta=np.linspace(0,1,side/2)
Bztot, Pz1tot, Pz2tot, Pz3tot, times, snaps = PyS.alpBetSpecMpi(kt,alpha, beta, back, params, Nbefore, nsnaps,tols, PyT)
if rank == 0:
bet, alp = np.meshgrid(beta, alpha)
np.save('data/alp',alp);np.save('data/bet',bet); np.save('data/alBetBi',Bztot); np.save('data/times',times)
np.save('data/alBetPz1',Pz1tot); np.save('data/alBetPz2.npy',Pz2tot); np.save('data/alBetPz3',Pz3tot)
np.save('data/snaps',snaps)
print "\n\n process", rank, "done \n\n"
|
jronayneREPO_NAMEPyTransportPATH_START.@PyTransport_extracted@PyTransport-master@Examples@QuartAx@alpbetMpi.py@.PATH_END.py
|
{
"filename": "test_frames.py",
"repo_name": "astropy/astropy",
"repo_path": "astropy_extracted/astropy-main/astropy/coordinates/tests/test_frames.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import re
from copy import deepcopy
import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import (
EarthLocation,
SkyCoord,
galactocentric_frame_defaults,
)
from astropy.coordinates import representation as r
from astropy.coordinates.attributes import (
Attribute,
CoordinateAttribute,
DifferentialAttribute,
EarthLocationAttribute,
QuantityAttribute,
TimeAttribute,
)
from astropy.coordinates.baseframe import BaseCoordinateFrame, RepresentationMapping
from astropy.coordinates.builtin_frames import (
FK4,
FK5,
GCRS,
HCRS,
ICRS,
ITRS,
AltAz,
Galactic,
Galactocentric,
HADec,
)
from astropy.coordinates.representation import (
REPRESENTATION_CLASSES,
CartesianDifferential,
)
from astropy.coordinates.tests.helper import skycoord_equal
from astropy.tests.helper import PYTEST_LT_8_0
from astropy.tests.helper import assert_quantity_allclose as assert_allclose
from astropy.time import Time
from astropy.units import allclose
from .test_representation import unitphysics # this fixture is used below # noqa: F401
def setup_function(func):
"""Copy original 'REPRESENTATIONCLASSES' as attribute in function."""
func.REPRESENTATION_CLASSES_ORIG = deepcopy(REPRESENTATION_CLASSES)
def teardown_function(func):
"""Reset REPRESENTATION_CLASSES to original value."""
REPRESENTATION_CLASSES.clear()
REPRESENTATION_CLASSES.update(func.REPRESENTATION_CLASSES_ORIG)
def test_frame_attribute_descriptor():
"""Unit tests of the Attribute descriptor."""
class TestAttributes:
attr_none = Attribute()
attr_2 = Attribute(default=2)
attr_3_attr2 = Attribute(default=3, secondary_attribute="attr_2")
attr_none_attr2 = Attribute(default=None, secondary_attribute="attr_2")
attr_none_nonexist = Attribute(default=None, secondary_attribute="nonexist")
t = TestAttributes()
# Defaults
assert t.attr_none is None
assert t.attr_2 == 2
assert t.attr_3_attr2 == 3
assert t.attr_none_attr2 == t.attr_2
assert t.attr_none_nonexist is None # No default and non-existent secondary attr
# Setting values via '_'-prefixed internal vars
# (as would normally done in __init__)
t._attr_none = 10
assert t.attr_none == 10
t._attr_2 = 20
assert t.attr_2 == 20
assert t.attr_3_attr2 == 3
assert t.attr_none_attr2 == t.attr_2
t._attr_none_attr2 = 40
assert t.attr_none_attr2 == 40
# Make sure setting values via public attribute fails
with pytest.raises(AttributeError) as err:
t.attr_none = 5
assert "Cannot set frame attribute" in str(err.value)
def test_frame_subclass_attribute_descriptor():
"""Unit test of the attribute descriptors in subclasses."""
_EQUINOX_B1980 = Time("B1980", scale="tai")
class MyFK4(FK4):
# equinox inherited from FK4, obstime overridden, and newattr is new
obstime = TimeAttribute(default=_EQUINOX_B1980)
newattr = Attribute(default="newattr")
mfk4 = MyFK4()
assert mfk4.equinox.value == "B1950.000"
assert mfk4.obstime.value == "B1980.000"
assert mfk4.newattr == "newattr"
assert set(mfk4.get_frame_attr_defaults()) == {"equinox", "obstime", "newattr"}
mfk4 = MyFK4(equinox="J1980.0", obstime="J1990.0", newattr="world")
assert mfk4.equinox.value == "J1980.000"
assert mfk4.obstime.value == "J1990.000"
assert mfk4.newattr == "world"
def test_frame_multiple_inheritance_attribute_descriptor():
"""
Ensure that all attributes are accumulated in case of inheritance from
multiple BaseCoordinateFrames. See
https://github.com/astropy/astropy/pull/11099#issuecomment-735829157
"""
class Frame1(BaseCoordinateFrame):
attr1 = Attribute()
class Frame2(BaseCoordinateFrame):
attr2 = Attribute()
class Frame3(Frame1, Frame2):
pass
assert len(Frame3.frame_attributes) == 2
assert "attr1" in Frame3.frame_attributes
assert "attr2" in Frame3.frame_attributes
# In case the same attribute exists in both frames, the one from the
# left-most class in the MRO should take precedence
class Frame4(BaseCoordinateFrame):
attr1 = Attribute()
attr2 = Attribute()
class Frame5(Frame1, Frame4):
pass
assert Frame5.frame_attributes["attr1"] is Frame1.frame_attributes["attr1"]
assert Frame5.frame_attributes["attr2"] is Frame4.frame_attributes["attr2"]
def test_differentialattribute():
# Test logic of passing input through to allowed class
vel = [1, 2, 3] * u.km / u.s
dif = r.CartesianDifferential(vel)
class TestFrame(BaseCoordinateFrame):
attrtest = DifferentialAttribute(
default=dif, allowed_classes=[r.CartesianDifferential]
)
frame1 = TestFrame()
frame2 = TestFrame(attrtest=dif)
frame3 = TestFrame(attrtest=vel)
assert np.all(frame1.attrtest.d_xyz == frame2.attrtest.d_xyz)
assert np.all(frame1.attrtest.d_xyz == frame3.attrtest.d_xyz)
# This shouldn't work if there is more than one allowed class:
class TestFrame2(BaseCoordinateFrame):
attrtest = DifferentialAttribute(
default=dif,
allowed_classes=[r.CartesianDifferential, r.CylindricalDifferential],
)
frame1 = TestFrame2()
frame2 = TestFrame2(attrtest=dif)
with pytest.raises(TypeError):
TestFrame2(attrtest=vel)
def test_create_data_frames():
# from repr
i1 = ICRS(r.SphericalRepresentation(1 * u.deg, 2 * u.deg, 3 * u.kpc))
i2 = ICRS(r.UnitSphericalRepresentation(lon=1 * u.deg, lat=2 * u.deg))
# from preferred name
i3 = ICRS(ra=1 * u.deg, dec=2 * u.deg, distance=3 * u.kpc)
i4 = ICRS(ra=1 * u.deg, dec=2 * u.deg)
assert i1.data.lat == i3.data.lat
assert i1.data.lon == i3.data.lon
assert i1.data.distance == i3.data.distance
assert i2.data.lat == i4.data.lat
assert i2.data.lon == i4.data.lon
# now make sure the preferred names work as properties
assert_allclose(i1.ra, i3.ra)
assert_allclose(i2.ra, i4.ra)
assert_allclose(i1.distance, i3.distance)
with pytest.raises(AttributeError):
i1.ra = [11.0] * u.deg
def test_create_orderered_data():
TOL = 1e-10 * u.deg
i = ICRS(1 * u.deg, 2 * u.deg)
assert (i.ra - 1 * u.deg) < TOL
assert (i.dec - 2 * u.deg) < TOL
g = Galactic(1 * u.deg, 2 * u.deg)
assert (g.l - 1 * u.deg) < TOL
assert (g.b - 2 * u.deg) < TOL
a = AltAz(1 * u.deg, 2 * u.deg)
assert (a.az - 1 * u.deg) < TOL
assert (a.alt - 2 * u.deg) < TOL
with pytest.raises(TypeError):
ICRS(1 * u.deg, 2 * u.deg, 1 * u.deg, 2 * u.deg)
with pytest.raises(TypeError):
sph = r.SphericalRepresentation(1 * u.deg, 2 * u.deg, 3 * u.kpc)
ICRS(sph, 1 * u.deg, 2 * u.deg)
def test_create_nodata_frames():
i = ICRS()
assert len(i.frame_attributes) == 0
f5 = FK5()
assert f5.equinox == FK5.get_frame_attr_defaults()["equinox"]
f4 = FK4()
assert f4.equinox == FK4.get_frame_attr_defaults()["equinox"]
# obstime is special because it's a property that uses equinox if obstime is not set
assert f4.obstime in (
FK4.get_frame_attr_defaults()["obstime"],
FK4.get_frame_attr_defaults()["equinox"],
)
def test_no_data_nonscalar_frames():
a1 = AltAz(
obstime=Time("2012-01-01") + np.arange(10.0) * u.day,
temperature=np.ones((3, 1)) * u.deg_C,
)
assert a1.obstime.shape == (3, 10)
assert a1.temperature.shape == (3, 10)
assert a1.shape == (3, 10)
match = r".*inconsistent shapes.*"
if PYTEST_LT_8_0:
# Exception.__notes__ are ignored in matching,
# so we'll match manually and post-mortem instead
direct_match = None
else:
direct_match = match
with pytest.raises(ValueError, match=direct_match) as exc:
AltAz(
obstime=Time("2012-01-01") + np.arange(10.0) * u.day,
temperature=np.ones((3,)) * u.deg_C,
)
if direct_match is None:
assert re.match(match, "\n".join(exc.value.__notes__))
def test_frame_repr():
i = ICRS()
assert repr(i) == "<ICRS Frame>"
f5 = FK5()
assert repr(f5).startswith("<FK5 Frame (equinox=")
i2 = ICRS(ra=1 * u.deg, dec=2 * u.deg)
i3 = ICRS(ra=1 * u.deg, dec=2 * u.deg, distance=3 * u.kpc)
assert repr(i2) == "<ICRS Coordinate: (ra, dec) in deg\n (1., 2.)>"
assert (
repr(i3)
== "<ICRS Coordinate: (ra, dec, distance) in (deg, deg, kpc)\n (1., 2., 3.)>"
)
# try with arrays
i2 = ICRS(ra=[1.1, 2.1] * u.deg, dec=[2.1, 3.1] * u.deg)
i3 = ICRS(
ra=[1.1, 2.1] * u.deg, dec=[-15.6, 17.1] * u.deg, distance=[11.0, 21.0] * u.kpc
)
assert (
repr(i2) == "<ICRS Coordinate: (ra, dec) in deg\n [(1.1, 2.1), (2.1, 3.1)]>"
)
assert (
repr(i3) == "<ICRS Coordinate: (ra, dec, distance) in (deg, deg, kpc)\n"
" [(1.1, -15.6, 11.), (2.1, 17.1, 21.)]>"
)
def test_frame_repr_vels():
i = ICRS(
ra=1 * u.deg,
dec=2 * u.deg,
pm_ra_cosdec=1 * u.marcsec / u.yr,
pm_dec=2 * u.marcsec / u.yr,
)
# unit comes out as mas/yr because of the preferred units defined in the
# frame RepresentationMapping
assert (
repr(i) == "<ICRS Coordinate: (ra, dec) in deg\n"
" (1., 2.)\n"
" (pm_ra_cosdec, pm_dec) in mas / yr\n"
" (1., 2.)>"
)
def test_converting_units():
# this is a regular expression that with split (see below) removes what's
# the decimal point to fix rounding problems
rexrepr = re.compile(r"(.*?=\d\.).*?( .*?=\d\.).*?( .*)")
# Use values that aren't subject to rounding down to X.9999...
i2 = ICRS(ra=2.0 * u.deg, dec=2.0 * u.deg)
i2_many = ICRS(ra=[2.0, 4.0] * u.deg, dec=[2.0, -8.1] * u.deg)
# converting from FK5 to ICRS and back changes the *internal* representation,
# but it should still come out in the preferred form
i4 = i2.transform_to(FK5()).transform_to(ICRS())
i4_many = i2_many.transform_to(FK5()).transform_to(ICRS())
ri2 = "".join(rexrepr.split(repr(i2)))
ri4 = "".join(rexrepr.split(repr(i4)))
assert ri2 == ri4
assert i2.data.lon.unit != i4.data.lon.unit # Internal repr changed
ri2_many = "".join(rexrepr.split(repr(i2_many)))
ri4_many = "".join(rexrepr.split(repr(i4_many)))
assert ri2_many == ri4_many
assert i2_many.data.lon.unit != i4_many.data.lon.unit # Internal repr changed
# but that *shouldn't* hold if we turn off units for the representation
class FakeICRS(ICRS):
frame_specific_representation_info = {
"spherical": [
RepresentationMapping("lon", "ra", u.hourangle),
RepresentationMapping("lat", "dec", None),
RepresentationMapping("distance", "distance"),
] # should fall back to default of None unit
}
fi = FakeICRS(i4.data)
ri2 = "".join(rexrepr.split(repr(i2)))
rfi = "".join(rexrepr.split(repr(fi)))
rfi = re.sub("FakeICRS", "ICRS", rfi) # Force frame name to match
assert ri2 != rfi
# the attributes should also get the right units
assert i2.dec.unit == i4.dec.unit
# unless no/explicitly given units
assert i2.dec.unit != fi.dec.unit
assert i2.ra.unit != fi.ra.unit
assert fi.ra.unit == u.hourangle
def test_representation_info():
class NewICRS1(ICRS):
frame_specific_representation_info = {
r.SphericalRepresentation: [
RepresentationMapping("lon", "rara", u.hourangle),
RepresentationMapping("lat", "decdec", u.degree),
RepresentationMapping("distance", "distance", u.kpc),
]
}
i1 = NewICRS1(
rara=10 * u.degree,
decdec=-12 * u.deg,
distance=1000 * u.pc,
pm_rara_cosdecdec=100 * u.mas / u.yr,
pm_decdec=17 * u.mas / u.yr,
radial_velocity=10 * u.km / u.s,
)
assert allclose(i1.rara, 10 * u.deg)
assert i1.rara.unit == u.hourangle
assert allclose(i1.decdec, -12 * u.deg)
assert allclose(i1.distance, 1000 * u.pc)
assert i1.distance.unit == u.kpc
assert allclose(i1.pm_rara_cosdecdec, 100 * u.mas / u.yr)
assert allclose(i1.pm_decdec, 17 * u.mas / u.yr)
# this should auto-set the names of UnitSpherical:
i1.set_representation_cls(
r.UnitSphericalRepresentation, s=r.UnitSphericalCosLatDifferential
)
assert allclose(i1.rara, 10 * u.deg)
assert allclose(i1.decdec, -12 * u.deg)
assert allclose(i1.pm_rara_cosdecdec, 100 * u.mas / u.yr)
assert allclose(i1.pm_decdec, 17 * u.mas / u.yr)
# For backwards compatibility, we also support the string name in the
# representation info dictionary:
class NewICRS2(ICRS):
frame_specific_representation_info = {
"spherical": [
RepresentationMapping("lon", "ang1", u.hourangle),
RepresentationMapping("lat", "ang2", u.degree),
RepresentationMapping("distance", "howfar", u.kpc),
]
}
i2 = NewICRS2(ang1=10 * u.degree, ang2=-12 * u.deg, howfar=1000 * u.pc)
assert allclose(i2.ang1, 10 * u.deg)
assert i2.ang1.unit == u.hourangle
assert allclose(i2.ang2, -12 * u.deg)
assert allclose(i2.howfar, 1000 * u.pc)
assert i2.howfar.unit == u.kpc
# Test that the differential kwargs get overridden
class NewICRS3(ICRS):
frame_specific_representation_info = {
r.SphericalCosLatDifferential: [
RepresentationMapping("d_lon_coslat", "pm_ang1", u.hourangle / u.year),
RepresentationMapping("d_lat", "pm_ang2"),
RepresentationMapping("d_distance", "vlos", u.kpc / u.Myr),
]
}
i3 = NewICRS3(
lon=10 * u.degree,
lat=-12 * u.deg,
distance=1000 * u.pc,
pm_ang1=1 * u.mas / u.yr,
pm_ang2=2 * u.mas / u.yr,
vlos=100 * u.km / u.s,
)
assert allclose(i3.pm_ang1, 1 * u.mas / u.yr)
assert i3.pm_ang1.unit == u.hourangle / u.year
assert allclose(i3.pm_ang2, 2 * u.mas / u.yr)
assert allclose(i3.vlos, 100 * u.km / u.s)
assert i3.vlos.unit == u.kpc / u.Myr
def test_realizing():
rep = r.SphericalRepresentation(1 * u.deg, 2 * u.deg, 3 * u.kpc)
i = ICRS()
i2 = i.realize_frame(rep)
assert not i.has_data
assert i2.has_data
f = FK5(equinox=Time("J2001"))
f2 = f.realize_frame(rep)
assert not f.has_data
assert f2.has_data
assert f2.equinox == f.equinox
assert f2.equinox != FK5.get_frame_attr_defaults()["equinox"]
# Check that a nicer error message is returned:
with pytest.raises(
TypeError, match="Class passed as data instead of a representation"
):
f.realize_frame(f.representation_type)
def test_replicating():
i = ICRS(ra=[1] * u.deg, dec=[2] * u.deg)
icopy = i.replicate(copy=True)
irepl = i.replicate(copy=False)
i.data._lat[:] = 0 * u.deg
assert np.all(i.data.lat == irepl.data.lat)
assert np.all(i.data.lat != icopy.data.lat)
iclone = i.replicate_without_data()
assert i.has_data
assert not i.isscalar
assert i.shape == (1,)
assert len(i) == 1
assert not iclone.has_data
assert iclone.isscalar
assert iclone.shape == ()
with pytest.raises(TypeError, match="no len()"):
len(iclone)
aa = AltAz(alt=1 * u.deg, az=2 * u.deg, obstime=Time("J2000"))
aaclone = aa.replicate_without_data(obstime=Time(["J2001"]))
assert aa.has_data
assert aa.isscalar
assert aa.shape == ()
assert not aaclone.has_data
assert not aaclone.isscalar
assert aaclone.shape == (1,)
assert len(aaclone) == 1
assert not np.any(aa.obstime == aaclone.obstime)
assert aa.pressure == aaclone.pressure
assert aa.obswl == aaclone.obswl
def test_getitem():
rep = r.SphericalRepresentation(
[1, 2, 3] * u.deg, [4, 5, 6] * u.deg, [7, 8, 9] * u.kpc
)
i = ICRS(rep)
assert len(i.ra) == 3
iidx = i[1:]
assert len(iidx.ra) == 2
iidx2 = i[0]
assert iidx2.ra.isscalar
def test_transform():
"""
This test just makes sure the transform architecture works, but does *not*
actually test all the builtin transforms themselves are accurate.
"""
i = ICRS(ra=[1, 2] * u.deg, dec=[3, 4] * u.deg)
f = i.transform_to(FK5())
i2 = f.transform_to(ICRS())
assert i2.data.__class__ == r.UnitSphericalRepresentation
assert_allclose(i.ra, i2.ra)
assert_allclose(i.dec, i2.dec)
i = ICRS(ra=[1, 2] * u.deg, dec=[3, 4] * u.deg, distance=[5, 6] * u.kpc)
f = i.transform_to(FK5())
i2 = f.transform_to(ICRS())
assert i2.data.__class__ != r.UnitSphericalRepresentation
f = FK5(ra=1 * u.deg, dec=2 * u.deg, equinox=Time("J2001"))
f4 = f.transform_to(FK4())
f4_2 = f.transform_to(FK4(equinox=f.equinox))
# make sure attributes are copied over correctly
assert f4.equinox == FK4().equinox
assert f4_2.equinox == f.equinox
# make sure self-transforms also work
i = ICRS(ra=[1, 2] * u.deg, dec=[3, 4] * u.deg)
i2 = i.transform_to(ICRS())
assert_allclose(i.ra, i2.ra)
assert_allclose(i.dec, i2.dec)
f = FK5(ra=1 * u.deg, dec=2 * u.deg, equinox=Time("J2001"))
f2 = f.transform_to(FK5()) # default equinox, so should be *different*
assert f2.equinox == FK5().equinox
with pytest.raises(AssertionError):
assert_allclose(f.ra, f2.ra)
with pytest.raises(AssertionError):
assert_allclose(f.dec, f2.dec)
# finally, check Galactic round-tripping
i1 = ICRS(ra=[1, 2] * u.deg, dec=[3, 4] * u.deg)
i2 = i1.transform_to(Galactic()).transform_to(ICRS())
assert_allclose(i1.ra, i2.ra)
assert_allclose(i1.dec, i2.dec)
def test_transform_to_nonscalar_nodata_frame():
# https://github.com/astropy/astropy/pull/5254#issuecomment-241592353
# Also checks that shape and length of all make sense.
times = Time("2016-08-23") + np.linspace(0, 10, 12) * u.day
coo1 = ICRS(
ra=[[0.0], [10.0], [20.0]] * u.deg, dec=[[-30.0], [30.0], [60.0]] * u.deg
)
assert coo1.shape == (3, 1)
assert len(coo1) == 3
fk5 = FK5(equinox=times)
assert fk5.shape == (12,)
assert len(fk5) == 12
coo2 = coo1.transform_to(fk5)
assert coo2.shape == (3, 12)
assert len(coo2) == 3
def test_setitem_no_velocity():
"""Test different flavors of item setting for a Frame without a velocity."""
obstime = "B1955"
sc0 = FK4([1, 2] * u.deg, [3, 4] * u.deg, obstime=obstime)
sc2 = FK4([10, 20] * u.deg, [30, 40] * u.deg, obstime=obstime)
sc1 = sc0.copy()
sc1_repr = repr(sc1)
assert "representation" in sc1.cache
sc1[1] = sc2[0]
assert sc1.cache == {}
assert repr(sc2) != sc1_repr
assert np.allclose(sc1.ra.to_value(u.deg), [1, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [3, 30])
assert sc1.obstime == sc2.obstime
assert sc1.name == "fk4"
sc1 = sc0.copy()
sc1[:] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 30])
sc1 = sc0.copy()
sc1[:] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 20])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 40])
sc1 = sc0.copy()
sc1[[1, 0]] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [20, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [40, 30])
# Works for array-valued obstime so long as they are considered equivalent
sc1 = FK4(sc0.ra, sc0.dec, obstime=[obstime, obstime])
sc1[0] = sc2[0]
# Multidimensional coordinates
sc1 = FK4([[1, 2], [3, 4]] * u.deg, [[5, 6], [7, 8]] * u.deg)
sc2 = FK4([[10, 20], [30, 40]] * u.deg, [[50, 60], [70, 80]] * u.deg)
sc1[0] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [[10, 20], [3, 4]])
assert np.allclose(sc1.dec.to_value(u.deg), [[50, 60], [7, 8]])
def test_setitem_velocities():
"""Test different flavors of item setting for a Frame with a velocity."""
sc0 = FK4(
[1, 2] * u.deg,
[3, 4] * u.deg,
radial_velocity=[1, 2] * u.km / u.s,
obstime="B1950",
)
sc2 = FK4(
[10, 20] * u.deg,
[30, 40] * u.deg,
radial_velocity=[10, 20] * u.km / u.s,
obstime="B1950",
)
sc1 = sc0.copy()
sc1[1] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [1, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [3, 30])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [1, 10])
assert sc1.obstime == sc2.obstime
assert sc1.name == "fk4"
sc1 = sc0.copy()
sc1[:] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 30])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [10, 10])
sc1 = sc0.copy()
sc1[:] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 20])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 40])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [10, 20])
sc1 = sc0.copy()
sc1[[1, 0]] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [20, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [40, 30])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [20, 10])
def test_setitem_exceptions():
obstime = "B1950"
sc0 = FK4([1, 2] * u.deg, [3, 4] * u.deg)
sc2 = FK4([10, 20] * u.deg, [30, 40] * u.deg, obstime=obstime)
sc1 = Galactic(sc0.ra, sc0.dec)
with pytest.raises(
TypeError, match="can only set from object of same class: Galactic vs. FK4"
):
sc1[0] = sc2[0]
sc1 = FK4(sc0.ra, sc0.dec, obstime="B2001")
with pytest.raises(
ValueError, match="can only set frame item from an equivalent frame"
):
sc1[0] = sc2[0]
sc1 = FK4(sc0.ra[0], sc0.dec[0], obstime=obstime)
with pytest.raises(
TypeError, match="scalar 'FK4' frame object does not support item assignment"
):
sc1[0] = sc2[0]
sc1 = FK4(obstime=obstime)
with pytest.raises(ValueError, match="cannot set frame which has no data"):
sc1[0] = sc2[0]
sc1 = FK4(sc0.ra, sc0.dec, obstime=[obstime, "B1980"])
with pytest.raises(
ValueError, match="can only set frame item from an equivalent frame"
):
sc1[0] = sc2[0]
# Wrong shape
sc1 = FK4([sc0.ra], [sc0.dec], obstime=[obstime, "B1980"])
with pytest.raises(
ValueError, match="can only set frame item from an equivalent frame"
):
sc1[0] = sc2[0]
def test_time_inputs():
"""
Test validation and conversion of inputs for equinox and obstime attributes.
"""
c = FK4(1 * u.deg, 2 * u.deg, equinox="J2001.5", obstime="2000-01-01 12:00:00")
assert c.equinox == Time("J2001.5")
assert c.obstime == Time("2000-01-01 12:00:00")
with pytest.raises(ValueError) as err:
c = FK4(1 * u.deg, 2 * u.deg, equinox=1.5)
assert "Invalid time input" in str(err.value)
with pytest.raises(ValueError) as err:
c = FK4(1 * u.deg, 2 * u.deg, obstime="hello")
assert "Invalid time input" in str(err.value)
# A vector time should work if the shapes match, and we automatically
# broadcast the basic data.
c = FK4([1, 2] * u.deg, [2, 3] * u.deg, obstime=["J2000", "J2001"])
assert c.shape == (2,)
c = FK4(1 * u.deg, 2 * u.deg, obstime=["J2000", "J2001"])
assert c.shape == (2,)
# If the shapes are not broadcastable, then we should raise an exception.
match = r".*inconsistent shapes.*"
if PYTEST_LT_8_0:
# Exception.__notes__ are ignored in matching,
# so we'll match manually and post-mortem instead
direct_match = None
else:
direct_match = match
with pytest.raises(ValueError, match=direct_match) as exc:
FK4([1, 2, 3] * u.deg, [4, 5, 6] * u.deg, obstime=["J2000", "J2001"])
if direct_match is None:
assert re.match(match, "\n".join(exc.value.__notes__))
def test_is_frame_attr_default():
"""
Check that the `is_frame_attr_default` machinery works as expected
"""
c1 = FK5(ra=1 * u.deg, dec=1 * u.deg)
c2 = FK5(
ra=1 * u.deg, dec=1 * u.deg, equinox=FK5.get_frame_attr_defaults()["equinox"]
)
c3 = FK5(ra=1 * u.deg, dec=1 * u.deg, equinox=Time("J2001.5"))
assert c1.equinox == c2.equinox
assert c1.equinox != c3.equinox
assert c1.is_frame_attr_default("equinox")
assert not c2.is_frame_attr_default("equinox")
assert not c3.is_frame_attr_default("equinox")
c4 = c1.realize_frame(r.UnitSphericalRepresentation(3 * u.deg, 4 * u.deg))
c5 = c2.realize_frame(r.UnitSphericalRepresentation(3 * u.deg, 4 * u.deg))
assert c4.is_frame_attr_default("equinox")
assert not c5.is_frame_attr_default("equinox")
def test_altaz_attributes():
aa = AltAz(1 * u.deg, 2 * u.deg)
assert aa.obstime is None
assert aa.location is None
aa2 = AltAz(1 * u.deg, 2 * u.deg, obstime="J2000")
assert aa2.obstime == Time("J2000")
aa3 = AltAz(
1 * u.deg, 2 * u.deg, location=EarthLocation(0 * u.deg, 0 * u.deg, 0 * u.m)
)
assert isinstance(aa3.location, EarthLocation)
def test_hadec_attributes():
hd = HADec(1 * u.hourangle, 2 * u.deg)
assert hd.ha == 1.0 * u.hourangle
assert hd.dec == 2 * u.deg
assert hd.obstime is None
assert hd.location is None
hd2 = HADec(
23 * u.hourangle,
-2 * u.deg,
obstime="J2000",
location=EarthLocation(0 * u.deg, 0 * u.deg, 0 * u.m),
)
assert_allclose(hd2.ha, -1 * u.hourangle)
assert hd2.dec == -2 * u.deg
assert hd2.obstime == Time("J2000")
assert isinstance(hd2.location, EarthLocation)
sr = hd2.represent_as(r.SphericalRepresentation)
assert_allclose(sr.lon, -1 * u.hourangle)
def test_itrs_earth_location():
loc = EarthLocation(lat=0 * u.deg, lon=0 * u.deg, height=0 * u.m)
sat = EarthLocation(
lat=-24.6609379 * u.deg, lon=160.34199789 * u.deg, height=420.17927591 * u.km
)
itrs_geo = sat.get_itrs()
eloc = itrs_geo.earth_location
assert_allclose(sat.lon, eloc.lon)
assert_allclose(sat.lat, eloc.lat)
assert_allclose(sat.height, eloc.height)
topo_itrs_repr = itrs_geo.cartesian - loc.get_itrs().cartesian
itrs_topo = ITRS(topo_itrs_repr, location=loc)
eloc = itrs_topo.earth_location
assert_allclose(sat.lon, eloc.lon)
assert_allclose(sat.lat, eloc.lat)
assert_allclose(sat.height, eloc.height)
obstime = Time("J2010") # Anything different from default
topo_itrs_repr2 = sat.get_itrs(obstime).cartesian - loc.get_itrs(obstime).cartesian
itrs_topo2 = ITRS(topo_itrs_repr2, location=loc, obstime=obstime)
eloc2 = itrs_topo2.earth_location
assert_allclose(sat.lon, eloc2.lon)
assert_allclose(sat.lat, eloc2.lat)
assert_allclose(sat.height, eloc2.height)
wgs84 = ITRS(325 * u.deg, 2 * u.deg, representation_type="wgs84geodetic")
assert wgs84.lon == 325 * u.deg
assert wgs84.lat == 2 * u.deg
assert wgs84.height == 0.0 * u.m
def test_representation():
"""
Test the getter and setter properties for `representation`
"""
# Create the frame object.
icrs = ICRS(ra=1 * u.deg, dec=1 * u.deg)
data = icrs.data
# Create some representation objects.
icrs_cart = icrs.cartesian
icrs_spher = icrs.spherical
icrs_cyl = icrs.cylindrical
# Testing when `_representation` set to `CartesianRepresentation`.
icrs.representation_type = r.CartesianRepresentation
assert icrs.representation_type == r.CartesianRepresentation
assert icrs_cart.x == icrs.x
assert icrs_cart.y == icrs.y
assert icrs_cart.z == icrs.z
assert icrs.data == data
# Testing that an ICRS object in CartesianRepresentation must not have spherical attributes.
for attr in ("ra", "dec", "distance"):
with pytest.raises(AttributeError) as err:
getattr(icrs, attr)
assert "object has no attribute" in str(err.value)
# Testing when `_representation` set to `CylindricalRepresentation`.
icrs.representation_type = r.CylindricalRepresentation
assert icrs.representation_type == r.CylindricalRepresentation
assert icrs.data == data
# Testing setter input using text argument for spherical.
icrs.representation_type = "spherical"
assert icrs.representation_type is r.SphericalRepresentation
assert icrs_spher.lat == icrs.dec
assert icrs_spher.lon == icrs.ra
assert icrs_spher.distance == icrs.distance
assert icrs.data == data
# Testing that an ICRS object in SphericalRepresentation must not have cartesian attributes.
for attr in ("x", "y", "z"):
with pytest.raises(AttributeError) as err:
getattr(icrs, attr)
assert "object has no attribute" in str(err.value)
# Testing setter input using text argument for cylindrical.
icrs.representation_type = "cylindrical"
assert icrs.representation_type is r.CylindricalRepresentation
assert icrs_cyl.rho == icrs.rho
assert icrs_cyl.phi == icrs.phi
assert icrs_cyl.z == icrs.z
assert icrs.data == data
# Testing that an ICRS object in CylindricalRepresentation must not have spherical attributes.
for attr in ("ra", "dec", "distance"):
with pytest.raises(AttributeError) as err:
getattr(icrs, attr)
assert "object has no attribute" in str(err.value)
with pytest.raises(ValueError) as err:
icrs.representation_type = "WRONG"
assert "but must be a BaseRepresentation class" in str(err.value)
with pytest.raises(ValueError) as err:
icrs.representation_type = ICRS
assert "but must be a BaseRepresentation class" in str(err.value)
def test_represent_as():
icrs = ICRS(ra=1 * u.deg, dec=1 * u.deg)
cart1 = icrs.represent_as("cartesian")
cart2 = icrs.represent_as(r.CartesianRepresentation)
assert cart1.x == cart2.x
assert cart1.y == cart2.y
assert cart1.z == cart2.z
# now try with velocities
icrs = ICRS(
ra=0 * u.deg,
dec=0 * u.deg,
distance=10 * u.kpc,
pm_ra_cosdec=0 * u.mas / u.yr,
pm_dec=0 * u.mas / u.yr,
radial_velocity=1 * u.km / u.s,
)
# single string
rep2 = icrs.represent_as("cylindrical")
assert isinstance(rep2, r.CylindricalRepresentation)
assert isinstance(rep2.differentials["s"], r.CylindricalDifferential)
# TODO: this should probably fail in the future once we figure out a better
# workaround for dealing with UnitSphericalRepresentation's with
# RadialDifferential's
# two classes
# rep2 = icrs.represent_as(r.CartesianRepresentation,
# r.SphericalCosLatDifferential)
# assert isinstance(rep2, r.CartesianRepresentation)
# assert isinstance(rep2.differentials['s'], r.SphericalCosLatDifferential)
with pytest.raises(ValueError):
icrs.represent_as("odaigahara")
def test_shorthand_representations():
rep = r.CartesianRepresentation([1, 2, 3] * u.pc)
dif = r.CartesianDifferential([1, 2, 3] * u.km / u.s)
rep = rep.with_differentials(dif)
icrs = ICRS(rep)
cyl = icrs.cylindrical
assert isinstance(cyl, r.CylindricalRepresentation)
assert isinstance(cyl.differentials["s"], r.CylindricalDifferential)
sph = icrs.spherical
assert isinstance(sph, r.SphericalRepresentation)
assert isinstance(sph.differentials["s"], r.SphericalDifferential)
sph = icrs.sphericalcoslat
assert isinstance(sph, r.SphericalRepresentation)
assert isinstance(sph.differentials["s"], r.SphericalCosLatDifferential)
def test_equal():
obstime = "B1955"
sc1 = FK4([1, 2] * u.deg, [3, 4] * u.deg, obstime=obstime)
sc2 = FK4([1, 20] * u.deg, [3, 4] * u.deg, obstime=obstime)
# Compare arrays and scalars
eq = sc1 == sc2
ne = sc1 != sc2
assert np.all(eq == [True, False])
assert np.all(ne == [False, True])
v = sc1[0] == sc2[0]
assert isinstance(v, (bool, np.bool_))
assert v
v = sc1[0] != sc2[0]
assert isinstance(v, (bool, np.bool_))
assert not v
# Broadcasting
eq = sc1[0] == sc2
ne = sc1[0] != sc2
assert np.all(eq == [True, False])
assert np.all(ne == [False, True])
# With diff only in velocity
sc1 = FK4([1, 2] * u.deg, [3, 4] * u.deg, radial_velocity=[1, 2] * u.km / u.s)
sc2 = FK4([1, 2] * u.deg, [3, 4] * u.deg, radial_velocity=[1, 20] * u.km / u.s)
eq = sc1 == sc2
ne = sc1 != sc2
assert np.all(eq == [True, False])
assert np.all(ne == [False, True])
v = sc1[0] == sc2[0]
assert isinstance(v, (bool, np.bool_))
assert v
v = sc1[0] != sc2[0]
assert isinstance(v, (bool, np.bool_))
assert not v
assert (FK4() == ICRS()) is False
assert (FK4() == FK4(obstime="J1999")) is False
def test_equal_exceptions():
# Shape mismatch
sc1 = FK4([1, 2, 3] * u.deg, [3, 4, 5] * u.deg)
with pytest.raises(ValueError, match="cannot compare: shape mismatch"):
sc1 == sc1[:2] # noqa: B015
# Different representation_type
sc1 = FK4(1, 2, 3, representation_type="cartesian")
sc2 = FK4(1 * u.deg, 2 * u.deg, 2, representation_type="spherical")
with pytest.raises(
TypeError,
match=(
"cannot compare: objects must have same "
"class: CartesianRepresentation vs. SphericalRepresentation"
),
):
sc1 == sc2 # noqa: B015
# Different differential type
sc1 = FK4(1 * u.deg, 2 * u.deg, radial_velocity=1 * u.km / u.s)
sc2 = FK4(
1 * u.deg, 2 * u.deg, pm_ra_cosdec=1 * u.mas / u.yr, pm_dec=1 * u.mas / u.yr
)
with pytest.raises(
TypeError,
match=(
"cannot compare: objects must have same "
"class: RadialDifferential vs. UnitSphericalCosLatDifferential"
),
):
sc1 == sc2 # noqa: B015
# Different frame attribute
sc1 = FK5(1 * u.deg, 2 * u.deg)
sc2 = FK5(1 * u.deg, 2 * u.deg, equinox="J1999")
with pytest.raises(
TypeError,
match=r"cannot compare: objects must have equivalent "
r"frames: <FK5 Frame \(equinox=J2000.000\)> "
r"vs. <FK5 Frame \(equinox=J1999.000\)>",
):
sc1 == sc2 # noqa: B015
# Different frame
sc1 = FK4(1 * u.deg, 2 * u.deg)
sc2 = FK5(1 * u.deg, 2 * u.deg, equinox="J2000")
with pytest.raises(
TypeError,
match="cannot compare: objects must have equivalent "
r"frames: <FK4 Frame \(equinox=B1950.000, obstime=B1950.000\)> "
r"vs. <FK5 Frame \(equinox=J2000.000\)>",
):
sc1 == sc2 # noqa: B015
sc1 = FK4(1 * u.deg, 2 * u.deg)
sc2 = FK4()
with pytest.raises(
ValueError, match="cannot compare: one frame has data and the other does not"
):
sc1 == sc2 # noqa: B015
with pytest.raises(
ValueError, match="cannot compare: one frame has data and the other does not"
):
sc2 == sc1 # noqa: B015
def test_dynamic_attrs():
c = ICRS(1 * u.deg, 2 * u.deg)
assert "ra" in dir(c)
assert "dec" in dir(c)
with pytest.raises(AttributeError) as err:
c.blahblah
assert "object has no attribute 'blahblah'" in str(err.value)
with pytest.raises(AttributeError) as err:
c.ra = 1
assert "Cannot set any frame attribute" in str(err.value)
c.blahblah = 1
assert c.blahblah == 1
def test_nodata_error():
i = ICRS()
with pytest.raises(ValueError) as excinfo:
i.data
assert "does not have associated data" in str(excinfo.value)
def test_nodata_len_shape():
i = ICRS()
assert i.shape == ()
with pytest.raises(TypeError, match="Scalar.*has no len()"):
len(i)
def test_len0_data():
i = ICRS([] * u.deg, [] * u.deg)
assert i.has_data
repr(i)
assert len(i) == 0
assert i.shape == (0,)
def test_len0_nodata():
fk5 = FK5(equinox=Time([], format="jyear"))
assert len(fk5) == 0
assert fk5.shape == (0,)
def test_quantity_attributes():
# make sure we can create a GCRS frame with valid inputs
GCRS(obstime="J2002", obsgeoloc=[1, 2, 3] * u.km, obsgeovel=[4, 5, 6] * u.km / u.s)
# make sure it fails for invalid lovs or vels
with pytest.raises(TypeError):
GCRS(obsgeoloc=[1, 2, 3]) # no unit
with pytest.raises(u.UnitsError):
GCRS(obsgeoloc=[1, 2, 3] * u.km / u.s) # incorrect unit
with pytest.raises(ValueError):
GCRS(obsgeoloc=[1, 3] * u.km) # incorrect shape
def test_quantity_attribute_default():
# The default default (yes) is None:
class MyCoord(BaseCoordinateFrame):
someval = QuantityAttribute(unit=u.deg)
frame = MyCoord()
assert frame.someval is None
frame = MyCoord(someval=15 * u.deg)
assert u.isclose(frame.someval, 15 * u.deg)
# This should work if we don't explicitly pass in a unit, but we pass in a
# default value with a unit
class MyCoord2(BaseCoordinateFrame):
someval = QuantityAttribute(15 * u.deg)
frame = MyCoord2()
assert u.isclose(frame.someval, 15 * u.deg)
# Since here no shape was given, we can set to any shape we like.
frame = MyCoord2(someval=np.ones(3) * u.deg)
assert frame.someval.shape == (3,)
assert np.all(frame.someval == 1 * u.deg)
# We should also be able to insist on a given shape.
class MyCoord3(BaseCoordinateFrame):
someval = QuantityAttribute(unit=u.arcsec, shape=(3,))
frame = MyCoord3(someval=np.ones(3) * u.deg)
assert frame.someval.shape == (3,)
assert frame.someval.unit == u.arcsec
assert u.allclose(frame.someval.value, 3600.0)
# The wrong shape raises.
with pytest.raises(ValueError, match="shape"):
MyCoord3(someval=1.0 * u.deg)
# As does the wrong unit.
with pytest.raises(u.UnitsError):
MyCoord3(someval=np.ones(3) * u.m)
# We are allowed a short-cut for zero.
frame0 = MyCoord3(someval=0)
assert frame0.someval.shape == (3,)
assert frame0.someval.unit == u.arcsec
assert np.all(frame0.someval.value == 0.0)
# But not if it has the wrong shape.
with pytest.raises(ValueError, match="shape"):
MyCoord3(someval=np.zeros(2))
# This should fail, if we don't pass in a default or a unit
with pytest.raises(ValueError):
class MyCoord(BaseCoordinateFrame):
someval = QuantityAttribute()
def test_eloc_attributes():
el = EarthLocation(lon=12.3 * u.deg, lat=45.6 * u.deg, height=1 * u.km)
it = ITRS(
r.SphericalRepresentation(lon=12.3 * u.deg, lat=45.6 * u.deg, distance=1 * u.km)
)
gc = GCRS(ra=12.3 * u.deg, dec=45.6 * u.deg, distance=6375 * u.km)
el1 = AltAz(location=el).location
assert isinstance(el1, EarthLocation)
# these should match *exactly* because the EarthLocation
assert el1.lat == el.lat
assert el1.lon == el.lon
assert el1.height == el.height
el2 = AltAz(location=it).location
assert isinstance(el2, EarthLocation)
# these should *not* match because giving something in Spherical ITRS is
# *not* the same as giving it as an EarthLocation: EarthLocation is on an
# elliptical geoid. So the longitude should match (because flattening is
# only along the z-axis), but latitude should not. Also, height is relative
# to the *surface* in EarthLocation, but the ITRS distance is relative to
# the center of the Earth
assert not allclose(el2.lat, it.spherical.lat)
assert allclose(el2.lon, it.spherical.lon)
assert el2.height < -6000 * u.km
el3 = AltAz(location=gc).location
# GCRS inputs implicitly get transformed to ITRS and then onto
# EarthLocation's elliptical geoid. So both lat and lon shouldn't match
assert isinstance(el3, EarthLocation)
assert not allclose(el3.lat, gc.dec)
assert not allclose(el3.lon, gc.ra)
assert np.abs(el3.height) < 500 * u.km
def test_equivalent_frames():
i = ICRS()
i2 = ICRS(1 * u.deg, 2 * u.deg)
assert i.is_equivalent_frame(i)
assert i.is_equivalent_frame(i2)
with pytest.raises(TypeError):
assert i.is_equivalent_frame(10)
with pytest.raises(TypeError):
assert i2.is_equivalent_frame(SkyCoord(i2))
f0 = FK5() # this J2000 is TT
f1 = FK5(equinox="J2000")
f2 = FK5(1 * u.deg, 2 * u.deg, equinox="J2000")
f3 = FK5(equinox="J2010")
f4 = FK4(equinox="J2010")
assert f1.is_equivalent_frame(f1)
assert not i.is_equivalent_frame(f1)
assert f0.is_equivalent_frame(f1)
assert f1.is_equivalent_frame(f2)
assert not f1.is_equivalent_frame(f3)
assert not f3.is_equivalent_frame(f4)
aa1 = AltAz()
aa2 = AltAz(obstime="J2010")
assert aa2.is_equivalent_frame(aa2)
assert not aa1.is_equivalent_frame(i)
assert not aa1.is_equivalent_frame(aa2)
def test_equivalent_frame_coordinateattribute():
class FrameWithCoordinateAttribute(BaseCoordinateFrame):
coord_attr = CoordinateAttribute(HCRS)
# These frames should not be considered equivalent
f0 = FrameWithCoordinateAttribute()
f1 = FrameWithCoordinateAttribute(
coord_attr=HCRS(1 * u.deg, 2 * u.deg, obstime="J2000")
)
f2 = FrameWithCoordinateAttribute(
coord_attr=HCRS(3 * u.deg, 4 * u.deg, obstime="J2000")
)
f3 = FrameWithCoordinateAttribute(
coord_attr=HCRS(1 * u.deg, 2 * u.deg, obstime="J2001")
)
assert not f0.is_equivalent_frame(f1)
assert not f1.is_equivalent_frame(f0)
assert not f1.is_equivalent_frame(f2)
assert not f1.is_equivalent_frame(f3)
assert not f2.is_equivalent_frame(f3)
# They each should still be equivalent to a deep copy of themselves
assert f0.is_equivalent_frame(deepcopy(f0))
assert f1.is_equivalent_frame(deepcopy(f1))
assert f2.is_equivalent_frame(deepcopy(f2))
assert f3.is_equivalent_frame(deepcopy(f3))
def test_equivalent_frame_locationattribute():
class FrameWithLocationAttribute(BaseCoordinateFrame):
loc_attr = EarthLocationAttribute()
# These frames should not be considered equivalent
f0 = FrameWithLocationAttribute()
location = EarthLocation(lat=-34, lon=19, height=300)
f1 = FrameWithLocationAttribute(loc_attr=location)
assert not f0.is_equivalent_frame(f1)
assert not f1.is_equivalent_frame(f0)
# They each should still be equivalent to a deep copy of themselves
assert f0.is_equivalent_frame(deepcopy(f0))
assert f1.is_equivalent_frame(deepcopy(f1))
def test_representation_subclass():
# Regression test for #3354
# Normally when instantiating a frame without a distance the frame will try
# and use UnitSphericalRepresentation internally instead of
# SphericalRepresentation.
frame = FK5(
representation_type=r.SphericalRepresentation, ra=32 * u.deg, dec=20 * u.deg
)
assert type(frame._data) == r.UnitSphericalRepresentation
assert frame.representation_type == r.SphericalRepresentation
# If using a SphericalRepresentation class this used to not work, so we
# test here that this is now fixed.
class NewSphericalRepresentation(r.SphericalRepresentation):
attr_classes = r.SphericalRepresentation.attr_classes
frame = FK5(
representation_type=NewSphericalRepresentation, lon=32 * u.deg, lat=20 * u.deg
)
assert type(frame._data) == r.UnitSphericalRepresentation
assert frame.representation_type == NewSphericalRepresentation
# A similar issue then happened in __repr__ with subclasses of
# SphericalRepresentation.
assert (
repr(frame)
== "<FK5 Coordinate (equinox=J2000.000): (lon, lat) in deg\n (32., 20.)>"
)
# A more subtle issue is when specifying a custom
# UnitSphericalRepresentation subclass for the data and
# SphericalRepresentation or a subclass for the representation.
class NewUnitSphericalRepresentation(r.UnitSphericalRepresentation):
attr_classes = r.UnitSphericalRepresentation.attr_classes
def __repr__(self):
return "<NewUnitSphericalRepresentation spam spam spam>"
frame = FK5(
NewUnitSphericalRepresentation(lon=32 * u.deg, lat=20 * u.deg),
representation_type=NewSphericalRepresentation,
)
assert repr(frame) == "<FK5 Coordinate (equinox=J2000.000): spam spam spam>"
def test_getitem_representation():
"""
Make sure current representation survives __getitem__ even if different
from data representation.
"""
c = ICRS([1, 1] * u.deg, [2, 2] * u.deg)
c.representation_type = "cartesian"
assert c[0].representation_type is r.CartesianRepresentation
def test_component_error_useful():
"""
Check that a data-less frame gives useful error messages about not having
data when the attributes asked for are possible coordinate components
"""
i = ICRS()
with pytest.raises(ValueError) as excinfo:
i.ra
assert "does not have associated data" in str(excinfo.value)
with pytest.raises(AttributeError) as excinfo1:
i.foobar
with pytest.raises(AttributeError) as excinfo2:
i.lon # lon is *not* the component name despite being the underlying representation's name
assert "object has no attribute 'foobar'" in str(excinfo1.value)
assert "object has no attribute 'lon'" in str(excinfo2.value)
def test_cache_clear():
i = ICRS(1 * u.deg, 2 * u.deg)
# Add an in frame units version of the rep to the cache.
repr(i)
assert len(i.cache["representation"]) == 2
i.cache.clear()
assert len(i.cache["representation"]) == 0
def test_inplace_array():
i = ICRS([[1, 2], [3, 4]] * u.deg, [[10, 20], [30, 40]] * u.deg)
# Add an in frame units version of the rep to the cache.
repr(i)
# Check that repr() has added a rep to the cache
assert len(i.cache["representation"]) == 2
# Modify the data
i.data.lon[:, 0] = [100, 200] * u.deg
# Clear the cache
i.cache.clear()
# This will use a second (potentially cached rep)
assert_allclose(i.ra, [[100, 2], [200, 4]] * u.deg)
assert_allclose(i.dec, [[10, 20], [30, 40]] * u.deg)
def test_inplace_change():
i = ICRS(1 * u.deg, 2 * u.deg)
# Add an in frame units version of the rep to the cache.
repr(i)
# Check that repr() has added a rep to the cache
assert len(i.cache["representation"]) == 2
# Modify the data
i.data.lon[()] = 10 * u.deg
# Clear the cache
i.cache.clear()
# This will use a second (potentially cached rep)
assert i.ra == 10 * u.deg
assert i.dec == 2 * u.deg
def test_representation_with_multiple_differentials():
dif1 = r.CartesianDifferential([1, 2, 3] * u.km / u.s)
dif2 = r.CartesianDifferential([1, 2, 3] * u.km / u.s**2)
rep = r.CartesianRepresentation(
[1, 2, 3] * u.pc, differentials={"s": dif1, "s2": dif2}
)
# check warning is raised for a scalar
with pytest.raises(ValueError):
ICRS(rep)
def test_missing_component_error_names():
"""
This test checks that the component names are frame component names, not
representation or differential names, when referenced in an exception raised
when not passing in enough data. For example:
ICRS(ra=10*u.deg)
should state:
TypeError: __init__() missing 1 required positional argument: 'dec'
"""
with pytest.raises(TypeError) as e:
ICRS(ra=150 * u.deg)
assert "missing 1 required positional argument: 'dec'" in str(e.value)
with pytest.raises(TypeError) as e:
ICRS(
ra=150 * u.deg,
dec=-11 * u.deg,
pm_ra=100 * u.mas / u.yr,
pm_dec=10 * u.mas / u.yr,
)
assert "pm_ra_cosdec" in str(e.value)
def test_non_spherical_representation_unit_creation(unitphysics): # noqa: F811
class PhysicsICRS(ICRS):
default_representation = r.PhysicsSphericalRepresentation
pic = PhysicsICRS(phi=1 * u.deg, theta=25 * u.deg, r=1 * u.kpc)
assert isinstance(pic.data, r.PhysicsSphericalRepresentation)
picu = PhysicsICRS(phi=1 * u.deg, theta=25 * u.deg)
assert isinstance(picu.data, unitphysics)
def test_attribute_repr():
class Spam:
def _astropy_repr_in_frame(self):
return "TEST REPR"
class TestFrame(BaseCoordinateFrame):
attrtest = Attribute(default=Spam())
assert "TEST REPR" in repr(TestFrame())
def test_component_names_repr():
# Frame class with new component names that includes a name swap
class NameChangeFrame(BaseCoordinateFrame):
default_representation = r.PhysicsSphericalRepresentation
frame_specific_representation_info = {
r.PhysicsSphericalRepresentation: [
RepresentationMapping("phi", "theta", u.deg),
RepresentationMapping("theta", "phi", u.arcsec),
RepresentationMapping("r", "JUSTONCE", u.AU),
]
}
frame = NameChangeFrame(0 * u.deg, 0 * u.arcsec, 0 * u.AU)
# Check for the new names in the Frame repr
assert "(theta, phi, JUSTONCE)" in repr(frame)
# Check that the letter "r" has not been replaced more than once in the Frame repr
assert repr(frame).count("JUSTONCE") == 1
def test_galactocentric_defaults():
with galactocentric_frame_defaults.set("pre-v4.0"):
galcen_pre40 = Galactocentric()
with galactocentric_frame_defaults.set("v4.0"):
galcen_40 = Galactocentric()
with galactocentric_frame_defaults.set("latest"):
galcen_latest = Galactocentric()
# parameters that changed
assert not u.allclose(galcen_pre40.galcen_distance, galcen_40.galcen_distance)
assert not u.allclose(galcen_pre40.z_sun, galcen_40.z_sun)
for k in galcen_40.frame_attributes:
if isinstance(getattr(galcen_40, k), BaseCoordinateFrame):
continue # skip coordinate comparison...
elif isinstance(getattr(galcen_40, k), CartesianDifferential):
assert u.allclose(
getattr(galcen_40, k).d_xyz, getattr(galcen_latest, k).d_xyz
)
else:
assert getattr(galcen_40, k) == getattr(galcen_latest, k)
# test validate Galactocentric
with galactocentric_frame_defaults.set("latest"):
params = galactocentric_frame_defaults.validate(galcen_latest)
references = galcen_latest.frame_attribute_references
state = {"parameters": params, "references": references}
assert galactocentric_frame_defaults.parameters == params
assert galactocentric_frame_defaults.references == references
assert galactocentric_frame_defaults._state == state
# Test not one of accepted parameter types
with pytest.raises(ValueError):
galactocentric_frame_defaults.validate(ValueError)
# test parameters property
assert (
galactocentric_frame_defaults.parameters
== galactocentric_frame_defaults.parameters
)
def test_galactocentric_references():
# references in the "scientific paper"-sense
with galactocentric_frame_defaults.set("pre-v4.0"):
galcen_pre40 = Galactocentric()
for k in galcen_pre40.frame_attributes:
if k == "roll": # no reference for this parameter
continue
assert k in galcen_pre40.frame_attribute_references
with galactocentric_frame_defaults.set("v4.0"):
galcen_40 = Galactocentric()
for k in galcen_40.frame_attributes:
if k == "roll": # no reference for this parameter
continue
assert k in galcen_40.frame_attribute_references
with galactocentric_frame_defaults.set("v4.0"):
galcen_custom = Galactocentric(z_sun=15 * u.pc)
for k in galcen_custom.frame_attributes:
if k == "roll": # no reference for this parameter
continue
if k == "z_sun":
assert k not in galcen_custom.frame_attribute_references
else:
assert k in galcen_custom.frame_attribute_references
def test_coordinateattribute_transformation():
class FrameWithCoordinateAttribute(BaseCoordinateFrame):
coord_attr = CoordinateAttribute(HCRS)
hcrs = HCRS(1 * u.deg, 2 * u.deg, 3 * u.AU, obstime="2001-02-03")
f1_frame = FrameWithCoordinateAttribute(coord_attr=hcrs)
f1_skycoord = FrameWithCoordinateAttribute(coord_attr=SkyCoord(hcrs))
# The input is already HCRS, so the frame attribute should not change it
assert f1_frame.coord_attr == hcrs
# The output should not be different if a SkyCoord is provided
assert f1_skycoord.coord_attr == f1_frame.coord_attr
gcrs = GCRS(4 * u.deg, 5 * u.deg, 6 * u.AU, obstime="2004-05-06")
f2_frame = FrameWithCoordinateAttribute(coord_attr=gcrs)
f2_skycoord = FrameWithCoordinateAttribute(coord_attr=SkyCoord(gcrs))
# The input needs to be converted from GCRS to HCRS
assert isinstance(f2_frame.coord_attr, HCRS)
# The `obstime` frame attribute should have been "merged" in a SkyCoord-style transformation
assert f2_frame.coord_attr.obstime == gcrs.obstime
# The output should not be different if a SkyCoord is provided
assert f2_skycoord.coord_attr == f2_frame.coord_attr
def test_realize_frame_accepts_kwargs():
c1 = ICRS(
x=1 * u.pc,
y=2 * u.pc,
z=3 * u.pc,
representation_type=r.CartesianRepresentation,
)
new_data = r.CartesianRepresentation(x=11 * u.pc, y=12 * u.pc, z=13 * u.pc)
c2 = c1.realize_frame(new_data, representation_type="cartesian")
c3 = c1.realize_frame(new_data, representation_type="cylindrical")
assert c2.representation_type == r.CartesianRepresentation
assert c3.representation_type == r.CylindricalRepresentation
def test_nameless_frame_subclass():
"""Note: this is a regression test for #11096"""
class Test:
pass
# Subclass from a frame class and a non-frame class.
# This subclassing is the test!
class NewFrame(ICRS, Test):
pass
def test_frame_coord_comparison():
"""Test that frame can be compared to a SkyCoord"""
frame = ICRS(0 * u.deg, 0 * u.deg)
coord = SkyCoord(frame)
other = SkyCoord(ICRS(0 * u.deg, 1 * u.deg))
assert frame == coord
assert frame != other
assert not (frame == other)
error_msg = "objects must have equivalent frames"
with pytest.raises(TypeError, match=error_msg):
frame == SkyCoord(AltAz("0d", "1d")) # noqa: B015
coord = SkyCoord(ra=12 * u.hourangle, dec=5 * u.deg, frame=FK5(equinox="J1950"))
frame = FK5(ra=12 * u.hourangle, dec=5 * u.deg, equinox="J2000")
with pytest.raises(TypeError, match=error_msg):
coord == frame # noqa: B015
frame = ICRS()
coord = SkyCoord(0 * u.deg, 0 * u.deg, frame=frame)
error_msg = "Can only compare SkyCoord to Frame with data"
with pytest.raises(ValueError, match=error_msg):
frame == coord # noqa: B015
@pytest.mark.parametrize(
["s1", "s2"],
(
((1,), (1,)),
((2,), (1,)),
((1,), (2,)),
((2,), (2,)),
((2, 1), (1,)),
((1,), (2, 1)),
((2, 1), (1, 3)),
),
)
def test_altaz_broadcast(s1, s2):
"""Note: Regression test for #5982"""
where = EarthLocation.from_geodetic(lat=45 * u.deg, lon=30 * u.deg, height=0 * u.m)
time = Time(np.full(s1, 58000.0), format="mjd")
angle = np.full(s2, 45.0) * u.deg
result = AltAz(alt=angle, az=angle, obstime=time, location=where)
assert result.shape == np.broadcast_shapes(s1, s2)
def test_transform_altaz_array_obstime():
"""Note: Regression test for #12965"""
obstime = Time("2010-01-01T00:00:00")
location = EarthLocation(0 * u.deg, 0 * u.deg, 0 * u.m)
frame1 = AltAz(location=location, obstime=obstime)
coord1 = SkyCoord(alt=80 * u.deg, az=0 * u.deg, frame=frame1)
obstimes = obstime + np.linspace(0, 15, 50) * u.min
frame2 = AltAz(location=location, obstime=obstimes)
coord2 = SkyCoord(alt=coord1.alt, az=coord1.az, frame=frame2)
assert np.all(coord2.alt == 80 * u.deg)
assert np.all(coord2.az == 0 * u.deg)
assert coord2.shape == (50,)
# test transformation to ICRS works
assert len(coord2.icrs) == 50
def test_spherical_offsets_by_broadcast():
"""Note: Regression test for #14383"""
assert SkyCoord(
ra=np.array([123, 134, 145]), dec=np.array([45, 56, 67]), unit=u.deg
).spherical_offsets_by(2 * u.deg, 2 * u.deg).shape == (3,)
@pytest.mark.parametrize("shape", [(1,), (2,)])
def test_spherical_offsets_with_wrap(shape):
# see https://github.com/astropy/astropy/issues/16219
sc = SkyCoord(ra=np.broadcast_to(123.0, shape), dec=90.0, unit=u.deg)
scop = sc.spherical_offsets_by(+2 * u.deg, 0 * u.deg)
assert scop.shape == shape
scom = sc.spherical_offsets_by(-2 * u.deg, 0 * u.deg)
assert scom.shape == shape
def test_insert():
# Tests are a subset of those in test_sky_coord.
c0 = ICRS([1, 2] * u.deg, [3, 4] * u.deg)
c1 = ICRS(5 * u.deg, 6 * u.deg)
c3 = ICRS([10, 20] * u.deg, [30, 40] * u.deg)
# Insert a scalar
c = c0.insert(1, c1)
assert skycoord_equal(c, ICRS([1, 5, 2] * u.deg, [3, 6, 4] * u.deg))
# Insert length=2 array at start of array
c = c0.insert(0, c3)
assert skycoord_equal(c, ICRS([10, 20, 1, 2] * u.deg, [30, 40, 3, 4] * u.deg))
# Insert length=2 array at end of array
c = c0.insert(2, c3)
assert skycoord_equal(c, ICRS([1, 2, 10, 20] * u.deg, [3, 4, 30, 40] * u.deg))
|
astropyREPO_NAMEastropyPATH_START.@astropy_extracted@astropy-main@astropy@coordinates@tests@test_frames.py@.PATH_END.py
|
{
"filename": "plotting.py",
"repo_name": "ChrisBoettner/plato",
"repo_path": "plato_extracted/plato-main/plato/visualisation/plotting.py",
"type": "Python"
}
|
from typing import Any
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.axes import Axes
from matplotlib.figure import Figure
from matplotlib.colorbar import Colorbar
def contour_plot(
x: np.ndarray,
y: np.ndarray,
z: np.ndarray,
colorbar: bool = True,
contour_kwargs: dict[str, Any] = {},
contourf_kwargs: dict[str, Any] = {},
) -> tuple[Figure, Axes, Colorbar]:
"""
Simple function to plot contours of a 2D grid.
Parameters
----------
x : np.ndarray
The x-axis values, from a meshgrid.
y : np.ndarray
The y-axis values, from a meshgrid.
z : np.ndarray
The z-axis values, applied to the meshgrid.
colorbar : bool, optional
Whether to plot a colorbar, by default True.
contour_kwargs : dict[str, Any], optional
Arguments to pass to the contour plot, by default {}.
contourf_kwargs : dict[str, Any], optional
Arguments to pass to the contourf plot, by default {}.
Returns
-------
tuple[Figure, Axes, Colorbar | None]
The figure, axes, and colorbar objects.
"""
fig, ax = plt.subplots()
contour = ax.contourf(
x,
y,
z,
**contourf_kwargs,
)
ax.contour(
x,
y,
z,
**contour_kwargs,
)
cbar = fig.colorbar(contour, ax=ax)
if not colorbar:
cbar.remove()
return fig, ax, cbar
|
ChrisBoettnerREPO_NAMEplatoPATH_START.@plato_extracted@plato-main@plato@visualisation@plotting.py@.PATH_END.py
|
{
"filename": "Test_Model_Performance.ipynb",
"repo_name": "swagnercarena/ovejero",
"repo_path": "ovejero_extracted/ovejero-master/demos/Test_Model_Performance.ipynb",
"type": "Jupyter Notebook"
}
|
```python
import numpy as np
from tqdm import tqdm
from matplotlib import pyplot as plt
import matplotlib.lines as mlines
import matplotlib
%matplotlib inline
from ovejero import model_trainer, data_tools, bnn_inference
import corner
import os
def NOTIMPLEMENTED():
raise NotImplementedError('Must specify config/save path')
```
# Testing the Performance of a Model That Has Been Fit
__Author:__ Sebastian Wagner-Carena
__Last Run:__ 08/04/2020
__Goals:__ Learn how to test the performance of a trained model on the validation set.
__Before running this notebook:__ Run the Train_Toy_Model notebook to understand how to train a model. Then train a model with whatever configuration you want. You will have to add the path to the config file in this notebook.
To start, all we have to do is load up our model weights and run it on the validation set. Thankfully, that's pretty easy to do with the BNN inference class. If you don't have a GPU, generating samples for the full validation set can be time consuming (30-40 minutes for 1000 samples). However, by specifying a save path for the samples we only need to do this once.
```python
# First specify the config path
config_path = NOTIMPLEMENTED()
# Check that the config has what you need
cfg = model_trainer.load_config(config_path)
# The InferenceClass will do all the heavy lifting of preparing the model from the configuration file,
# initializing the validation dataset, and providing outputs correctly marginalized over the BNN uncertainties.
bnn_infer = bnn_inference.InferenceClass(cfg)
# Now we just have to ask the InferenceClass to spin up some samples from our BNN. The more samples, the more
# accurate our plots and metrics will be. The right value to use unfortunately requires a bit of trial and error.
# 1000 is a good starting point though.
num_samples = 1000
sample_save_dir = NOTIMPLEMENTED()
bnn_infer.gen_samples(num_samples,sample_save_dir=sample_save_dir)
```
Now that we set up our infastructure, the first thing we want to do is inspect the statistics of our network's performance over the validation set.
```python
bnn_infer.report_stats()
```
We can also inspect a coverage plot of our parameters. If our model is performing well, we expect our data to roughly follow the 68-95-99.7 rule.
```python
bnn_infer.gen_coverage_plots()
```
Another good check is to see the posterior of some example images.
```python
image_index = 5
bnn_infer.plot_posterior_contours(image_index)
```
It's important to understand where our uncertainty is coming from. We can inspect wether our uncertainty is dominated by aleatoric or epistemic sources.
```python
bnn_infer.comp_al_ep_unc()
```
At the end what we want our network's posterior to be well calibrated. That means that the truth should be a representative draw from the distribution we're predicting. The exact sampling that goes into the calibration plot is complicated, but the x axis repesents how much of the data the model expects to fall within a certain region of our posterior, and the y axis represents how much data actually falls within that region. Ideally this would be a straight line (y=x), but in practice our model is likely to be overconfident, underconfident, or some combination of both. The lower right hand corner of our plot represents overconfidence, and the upper right hand corner represents underconfidence.
```python
color_map = ["#377eb8", "#4daf4a"]
n_perc_points = 30
fig = bnn_infer.plot_calibration(color_map=color_map,n_perc_points=n_perc_points)
```
## Understanding the Calibration Plot
Throughout our paper we argue that the calibration plot is the best metric to asses the quality of the BNN posterior. Here, we include a few examples to give a better feel for the calibration plot. We focus on toy 2D models since those are easy to visualize and conceptualize. We can start with a biased 2D posterior prediction.
```python
# First we'll make a class to generate our comparison
matplotlib.rcParams.update({'font.size': 13})
def plot_toy_model_calibration(data_mean,data_cov,post_mean,post_cov,toy_batch_size,n_draws,
fit_guass_data=False):
bnn_toy = bnn_inference.InferenceClass(cfg)
# We generate our toy data
data = np.random.multivariate_normal(data_mean,data_cov,(toy_batch_size))
# Now we generate our posterior means and covariances
post_samples = np.random.multivariate_normal(post_mean,post_cov,(n_draws,toy_batch_size))
# We change our bnn inference instance to have these values
bnn_toy.samples_init = True
bnn_toy.y_pred = np.mean(post_samples,axis=0)
bnn_toy.predict_samps = post_samples
bnn_toy.y_test = data
# We can visualize the true data and the posterior, and compare that to the calibration plot.
color_map=["#377eb8", "#4daf4a"]
fig = corner.corner(post_samples.reshape(-1,2),bins=20,labels=['x','y'],show_titles=False, plot_datapoints=False,
label_kwargs=dict(fontsize=15),levels=[0.68,0.95],dpi=200,
color=color_map[1],fill_contours=True,range=[[-6,6],[-6,6]])
fig.axes[2].plot(data[:,0],data[:,1],'.',c=color_map[0],alpha=0.1)
post_line = mlines.Line2D([], [], color=color_map[0], label='True Posterior')
data_line = mlines.Line2D([], [], color=color_map[1], label='Inferred Posterior')
plt.legend(handles=[post_line,data_line], bbox_to_anchor=(0.05, 1.0, 1., .0), loc=4,fontsize=12)
plt.show()
cal_fig = bnn_toy.plot_calibration(n_perc_points=30,title='',
legend=['Perfect Calibration','Inferred Posterior Calibration'])
return (fig,cal_fig)
```
```python
# We start with our offset posterior
data_mean = np.zeros(2)
data_cov = np.eye(2)
toy_batch_size = 10000
n_draws = 1000
post_mean = np.ones(2)*2
post_cov=np.eye(2)
post_fig, cal_fig = plot_toy_model_calibration(data_mean,data_cov,post_mean,post_cov,toy_batch_size,n_draws)
post_fig.savefig('../paper/figures/appendix/offset_corn.pdf')
cal_fig.savefig('../paper/figures/appendix/offset_cal.pdf')
```
The posterior we're predicting is offset from the truth, so our model is consistently overconfident. We can repeat the exercise with a posterior that is correctly centered but has a much tighter contour. We still expect our model to be overconfident.
```python
data_mean = np.zeros(2)
data_cov = np.eye(2)
toy_batch_size = 10000
n_draws = 1000
post_mean = np.zeros(2)
post_cov=np.eye(2)*0.3
_ = plot_toy_model_calibration(data_mean,data_cov,post_mean,post_cov,toy_batch_size,n_draws = 1000)
```
Once again, our model is overconfident. We can similary see what happens when our model is underconfident by expanding our contours.
```python
data_mean = np.zeros(2)
data_cov = np.eye(2)
toy_batch_size = 10000
n_draws = 1000
post_mean = np.zeros(2)
post_cov=np.eye(2)*3
post_fig, cal_fig = plot_toy_model_calibration(data_mean,data_cov,post_mean,post_cov,toy_batch_size,n_draws)
post_fig.savefig('../paper/figures/appendix/underconf_corn.pdf')
cal_fig.savefig('../paper/figures/appendix/underconf_cal.pdf')
```
The model posterior here is underconfident - almost 90% of the data falls within the 1 sigma countour. We can look at a more realistic example - a Gaussian posterior with no covariance trying to fit data with covariance.
```python
# We start with our offset posterior
data_mean = np.zeros(2)
data_cov = np.array([[1,0.99],[0.99,1]])
toy_batch_size = 10000
n_draws = 1000
post_mean = np.zeros(2)
post_cov=np.diag(np.std(np.random.multivariate_normal(data_mean,data_cov,(toy_batch_size)),axis=0))
_ = plot_toy_model_calibration(data_mean,data_cov,post_mean,post_cov,toy_batch_size,n_draws)
```
This comes off mostly as overconfident by our network - it's not capturing the extreme covariance in the data, causing the networks contours to assign too little probabilistic weight to the tails.
Another issue our network may have is that the posterior we pick is not sufficiently multimodal to capture the true distribution of the data (or the multimodality is poorly tuned). We can see what this looks like by fitting a full covariance matrix posterior to multimodal data.
```python
# First we'll make a class to generate our comparison
def plot_toy_model_calibration_gm(data_means,data_covs,post_mean,post_cov,toy_batch_size,ps,n_draws,
fit_guass_data=False):
bnn_toy = bnn_inference.InferenceClass(cfg)
# We generate our toy data
data = []
for dmi in range(len(data_means)):
data.append(np.random.multivariate_normal(data_means[dmi],data_covs[dmi],(int(toy_batch_size*ps[dmi]))))
data = np.concatenate(data,axis=0)
if fit_guass_data == True:
post_mean = np.mean(data,axis=0)
post_cov=np.diag(np.std(data,axis=0))
# Now we generate our posterior means and covariances
post_samples = np.random.multivariate_normal(post_mean,post_cov,(n_draws,toy_batch_size))
# We change our bnn inference instance to have these values
bnn_toy.samples_init = True
bnn_toy.y_pred = np.mean(post_samples,axis=0)
bnn_toy.predict_samps = post_samples
bnn_toy.y_test = data
# We can visualize the true data and the posterior, and compare that to the calibration plot.
color_map=["#377eb8", "#4daf4a"]
fig = corner.corner(post_samples.reshape((-1,2)),bins=20,labels=['x','y'],show_titles=False,
plot_datapoints=False,label_kwargs=dict(fontsize=15),levels=[0.68,0.95],dpi=1600,
color=color_map[1],fill_contours=True,range=[[-6,6],[-6,6]])
fig.axes[2].plot(data[:,0],data[:,1],'.',c=color_map[0],alpha=0.1)
post_line = mlines.Line2D([], [], color=color_map[0], label='True Posterior')
data_line = mlines.Line2D([], [], color=color_map[1], label='Inferred Posterior')
plt.legend(handles=[data_line,post_line], bbox_to_anchor=(0.05, 1.0, 1., .0), loc=4,fontsize=12.0)
plt.show()
cal_fig = bnn_toy.plot_calibration(n_perc_points=30,title='',
legend=['Perfect Calibration','Inferred Posterior Calibration'])
return (fig,cal_fig)
```
```python
# Estimate a single Gaussian from the multimodal data.
data_means = [np.ones(2)*3,np.zeros(2)]
data_covs = [np.array([[0.4,0],[0,0.4]]),np.array([[0.4,0],[0,0.4]])]
ps = [0.9,0.1]
toy_batch_size = 10000
n_draws = 1000
data = []
for dmi in range(len(data_means)):
data.append(np.random.multivariate_normal(data_means[dmi],data_covs[dmi],(toy_batch_size//len(
data_mean))))
data = np.concatenate(data,axis=0)
post_mean = np.mean(data,axis=0)
post_cov=np.diag(np.std(data,axis=0))
post_fig, cal_fig = plot_toy_model_calibration_gm(data_means,data_covs,post_mean,post_cov,toy_batch_size,
ps,n_draws,fit_guass_data=True)
post_fig.savefig('../paper/figures/appendix/biv_corn.pdf')
cal_fig.savefig('../paper/figures/appendix/biv_cal.pdf')
```
Interestingly, the multimodal data leads to both under and over confidence by our network. In the interior region, corresponding to the principle mode, the toy prediction is has slightly too large covariances. In the tails, where our second mode becomes relevant, our single Gaussian prediction is suddenly very underconfident (since it assigns almost no weight to the second mode).
|
swagnercarenaREPO_NAMEovejeroPATH_START.@ovejero_extracted@ovejero-master@demos@Test_Model_Performance.ipynb@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/box/marker/__init__.py",
"type": "Python"
}
|
import sys
if sys.version_info < (3, 7):
from ._line import Line
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(__name__, [], ["._line.Line"])
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@box@marker@__init__.py@.PATH_END.py
|
{
"filename": "tSplit.py",
"repo_name": "lofar-astron/DP3",
"repo_path": "DP3_extracted/DP3-master/steps/test/integration/tSplit.py",
"type": "Python"
}
|
# Copyright (C) 2021 ASTRON (Netherlands Institute for Radio Astronomy)
# SPDX-License-Identifier: GPL-3.0-or-later
# Append current directory to system path in order to import testconfig
import sys
from subprocess import check_call
import pytest
sys.path.append(".")
import testconfig as tcf
from utils import assert_taql, run_in_tmp_path, untar
"""
Script can be invoked in two ways:
- as standalone from the build/steps/test/integration directory,
using `pytest source/tSplit.py` (extended with pytest options of your choice)
- using ctest, see DP3/steps/test/integration/CMakeLists.txt
"""
MSIN = "tNDPPP-generic.MS"
MSAPPLYBEAM = "tApplyBeam.tab"
@pytest.fixture(autouse=True)
def source_env(run_in_tmp_path):
untar(f"{tcf.RESOURCEDIR}/{MSIN}.tgz")
untar(f"{tcf.SRCDIR}/{MSAPPLYBEAM}.tgz")
def test_split():
msout_list = ["splitout1.ms", "splitout2.ms"]
check_call(
[
tcf.DP3EXE,
f"msin={MSIN}",
"steps=[split]",
"split.steps=[applybeam,out]",
"split.replaceparms=[out.name,applybeam.usechannelfreq]",
f"out.name=[{msout_list[0]},{msout_list[1]}]",
f"applybeam.usechannelfreq=[false,true]",
"applybeam.invert=true",
]
)
for msout in msout_list:
data_column = "DATA_noucf" if msout == "splitout1.ms" else "DATA_ucf"
taql_command = f"select from {msout} t1, {MSAPPLYBEAM} t2 where not all(near(t1.DATA,t2.{data_column},8e-5) || (isnan(t1.DATA) && isnan(t2.{data_column})))"
assert_taql(taql_command)
|
lofar-astronREPO_NAMEDP3PATH_START.@DP3_extracted@DP3-master@steps@test@integration@tSplit.py@.PATH_END.py
|
{
"filename": "test_flow_runs.py",
"repo_name": "PrefectHQ/prefect",
"repo_path": "prefect_extracted/prefect-main/tests/deployment/test_flow_runs.py",
"type": "Python"
}
|
import re
from typing import TYPE_CHECKING
from unittest import mock
from uuid import uuid4
import pendulum
import pytest
import respx
from httpx import Response
from prefect import flow
from prefect.context import FlowRunContext
from prefect.deployments import run_deployment
from prefect.server.schemas.core import TaskRunResult
from prefect.settings import (
PREFECT_API_URL,
)
from prefect.tasks import task
from prefect.utilities.slugify import slugify
if TYPE_CHECKING:
from prefect.client.orchestration import PrefectClient
class TestRunDeployment:
@pytest.fixture
async def test_deployment(self, prefect_client):
flow_id = await prefect_client.create_flow_from_name("foo")
deployment_id = await prefect_client.create_deployment(
name="foo-deployment", flow_id=flow_id, parameter_openapi_schema={}
)
deployment = await prefect_client.read_deployment(deployment_id)
return deployment
async def test_run_deployment_with_ephemeral_api(
self, prefect_client, test_deployment
):
deployment = test_deployment
flow_run = await run_deployment(
f"foo/{deployment.name}",
timeout=0,
poll_interval=0,
client=prefect_client,
)
assert flow_run.deployment_id == deployment.id
assert flow_run.state
async def test_run_deployment_with_deployment_id_str(
self,
test_deployment,
prefect_client,
):
deployment = test_deployment
flow_run = await run_deployment(
f"{deployment.id}",
timeout=0,
poll_interval=0,
client=prefect_client,
)
assert flow_run.deployment_id == deployment.id
assert flow_run.state
async def test_run_deployment_with_deployment_id_uuid(
self,
test_deployment,
prefect_client,
):
deployment = test_deployment
flow_run = await run_deployment(
deployment.id,
timeout=0,
poll_interval=0,
client=prefect_client,
)
assert flow_run.deployment_id == deployment.id
assert flow_run.state
async def test_run_deployment_with_job_vars_creates_run_with_job_vars(
self,
test_deployment,
prefect_client,
):
# This can be removed once the flow run infra overrides is no longer an experiment
deployment = test_deployment
job_vars = {"foo": "bar"}
flow_run = await run_deployment(
deployment.id,
timeout=0,
job_variables=job_vars,
client=prefect_client,
)
assert flow_run.job_variables == job_vars
flow_run = await prefect_client.read_flow_run(flow_run.id)
assert flow_run.job_variables == job_vars
async def test_returns_flow_run_on_timeout(
self,
test_deployment,
use_hosted_api_server,
):
deployment = test_deployment
mock_flowrun_response = {
"id": str(uuid4()),
"flow_id": str(uuid4()),
}
with respx.mock(
base_url=PREFECT_API_URL.value(), assert_all_mocked=True, using="httpx"
) as router:
router.get("/csrf-token", params={"client": mock.ANY}).pass_through()
router.get(f"/deployments/name/foo/{deployment.name}").pass_through()
router.post(f"/deployments/{deployment.id}/create_flow_run").pass_through()
flow_polls = router.request(
"GET", re.compile(PREFECT_API_URL.value() + "/flow_runs/.*")
).mock(
return_value=Response(
200, json={**mock_flowrun_response, "state": {"type": "SCHEDULED"}}
)
)
flow_run = await run_deployment(
f"foo/{deployment.name}", timeout=1, poll_interval=0
)
assert len(flow_polls.calls) > 0
assert flow_run.state
async def test_returns_flow_run_immediately_when_timeout_is_zero(
self,
test_deployment,
use_hosted_api_server,
):
deployment = test_deployment
mock_flowrun_response = {
"id": str(uuid4()),
"flow_id": str(uuid4()),
}
with respx.mock(
base_url=PREFECT_API_URL.value(),
assert_all_mocked=True,
assert_all_called=False,
using="httpx",
) as router:
router.get("/csrf-token", params={"client": mock.ANY}).pass_through()
router.get(f"/deployments/name/foo/{deployment.name}").pass_through()
router.post(f"/deployments/{deployment.id}/create_flow_run").pass_through()
flow_polls = router.request(
"GET", re.compile(PREFECT_API_URL.value() + "/flow_runs/.*")
).mock(
return_value=Response(
200, json={**mock_flowrun_response, "state": {"type": "SCHEDULED"}}
)
)
flow_run = await run_deployment(
f"foo/{deployment.name}", timeout=0, poll_interval=0
)
assert len(flow_polls.calls) == 0
assert flow_run.state.is_scheduled()
async def test_returns_flow_run_from_2_dot_0(
self,
test_deployment,
use_hosted_api_server,
):
"""
See https://github.com/PrefectHQ/prefect/issues/15694
"""
deployment = test_deployment
mock_flowrun_response = {
"id": str(uuid4()),
"flow_id": str(uuid4()),
}
side_effects = [
Response(
200, json={**mock_flowrun_response, "state": {"type": "SCHEDULED"}}
)
]
side_effects.append(
Response(
200,
json={
**mock_flowrun_response,
"state": {"type": "COMPLETED", "data": {"type": "unpersisted"}},
},
)
)
with respx.mock(
base_url=PREFECT_API_URL.value(),
assert_all_mocked=True,
assert_all_called=False,
using="httpx",
) as router:
router.get("/csrf-token", params={"client": mock.ANY}).pass_through()
router.get(f"/deployments/name/foo/{deployment.name}").pass_through()
router.post(f"/deployments/{deployment.id}/create_flow_run").pass_through()
router.request(
"GET", re.compile(PREFECT_API_URL.value() + "/flow_runs/.*")
).mock(side_effect=side_effects)
flow_run = await run_deployment(
f"foo/{deployment.name}", timeout=None, poll_interval=0
)
assert flow_run.state.is_completed()
assert flow_run.state.data is None
async def test_polls_indefinitely(
self,
test_deployment,
use_hosted_api_server,
):
deployment = test_deployment
mock_flowrun_response = {
"id": str(uuid4()),
"flow_id": str(uuid4()),
}
side_effects = [
Response(
200, json={**mock_flowrun_response, "state": {"type": "SCHEDULED"}}
)
] * 99
side_effects.append(
Response(
200, json={**mock_flowrun_response, "state": {"type": "COMPLETED"}}
)
)
with respx.mock(
base_url=PREFECT_API_URL.value(),
assert_all_mocked=True,
assert_all_called=False,
using="httpx",
) as router:
router.get("/csrf-token", params={"client": mock.ANY}).pass_through()
router.get(f"/deployments/name/foo/{deployment.name}").pass_through()
router.post(f"/deployments/{deployment.id}/create_flow_run").pass_through()
flow_polls = router.request(
"GET", re.compile(PREFECT_API_URL.value() + "/flow_runs/.*")
).mock(side_effect=side_effects)
await run_deployment(
f"foo/{deployment.name}", timeout=None, poll_interval=0
)
assert len(flow_polls.calls) == 100
async def test_schedules_immediately_by_default(
self, test_deployment, use_hosted_api_server
):
deployment = test_deployment
scheduled_time = pendulum.now("UTC")
flow_run = await run_deployment(
f"foo/{deployment.name}",
timeout=0,
poll_interval=0,
)
assert (flow_run.expected_start_time - scheduled_time).total_seconds() < 1
async def test_accepts_custom_scheduled_time(
self, test_deployment, use_hosted_api_server
):
deployment = test_deployment
scheduled_time = pendulum.now("UTC") + pendulum.Duration(minutes=5)
flow_run = await run_deployment(
f"foo/{deployment.name}",
scheduled_time=scheduled_time,
timeout=0,
poll_interval=0,
)
assert (flow_run.expected_start_time - scheduled_time).total_seconds() < 1
async def test_custom_flow_run_names(self, test_deployment, use_hosted_api_server):
deployment = test_deployment
flow_run = await run_deployment(
f"foo/{deployment.name}",
flow_run_name="a custom flow run name",
timeout=0,
poll_interval=0,
)
assert flow_run.name == "a custom flow run name"
async def test_accepts_tags(self, test_deployment):
deployment = test_deployment
flow_run = await run_deployment(
f"foo/{deployment.name}",
tags=["I", "love", "prefect"],
timeout=0,
poll_interval=0,
)
assert sorted(flow_run.tags) == ["I", "love", "prefect"]
async def test_accepts_idempotency_key(self, test_deployment):
deployment = test_deployment
flow_run_a = await run_deployment(
f"foo/{deployment.name}",
idempotency_key="12345",
timeout=0,
poll_interval=0,
)
flow_run_b = await run_deployment(
f"foo/{deployment.name}",
idempotency_key="12345",
timeout=0,
poll_interval=0,
)
assert flow_run_a.id == flow_run_b.id
async def test_links_to_parent_flow_run_when_used_in_flow_by_default(
self, test_deployment, use_hosted_api_server, prefect_client: "PrefectClient"
):
my_deployment = test_deployment
@flow
async def foo():
return await run_deployment(
f"foo/{my_deployment.name}",
timeout=0,
poll_interval=0,
)
parent_state = await foo(return_state=True)
child_flow_run = await parent_state.result()
assert child_flow_run.parent_task_run_id is not None
task_run = await prefect_client.read_task_run(child_flow_run.parent_task_run_id)
assert task_run.flow_run_id == parent_state.state_details.flow_run_id
deployment_name = f"foo/{my_deployment.name}"
assert isinstance(deployment_name, str)
assert slugify(deployment_name) in task_run.task_key
async def test_optionally_does_not_link_to_parent_flow_run_when_used_in_flow(
self, test_deployment, use_hosted_api_server, prefect_client: "PrefectClient"
):
deployment = test_deployment
@flow
async def foo():
return await run_deployment(
f"foo/{deployment.name}",
timeout=0,
poll_interval=0,
as_subflow=False,
)
parent_state = await foo(return_state=True)
child_flow_run = await parent_state.result()
assert child_flow_run.parent_task_run_id is None
@pytest.mark.usefixtures("use_hosted_api_server")
async def test_links_to_parent_flow_run_when_used_in_task_without_flow_context(
self, test_deployment, prefect_client
):
"""
Regression test for deployments in a task on Dask and Ray task runners
which do not have access to the flow run context - https://github.com/PrefectHQ/prefect/issues/9135
"""
deployment = test_deployment
@task
async def yeet_deployment():
with mock.patch.object(FlowRunContext, "get", return_value=None):
assert FlowRunContext.get() is None
result = await run_deployment(
f"foo/{deployment.name}",
timeout=0,
poll_interval=0,
)
return result
@flow
async def foo():
return await yeet_deployment()
parent_state = await foo(return_state=True)
child_flow_run = await parent_state.result()
assert child_flow_run.parent_task_run_id is not None
task_run = await prefect_client.read_task_run(child_flow_run.parent_task_run_id)
assert task_run.flow_run_id == parent_state.state_details.flow_run_id
assert slugify(f"foo/{deployment.name}") in task_run.task_key
async def test_tracks_dependencies_when_used_in_flow(
self, test_deployment, use_hosted_api_server, prefect_client, events_pipeline
):
deployment = test_deployment
@task
def bar():
return "hello-world!!"
@flow
async def foo():
upstream_task_state = bar(return_state=True)
upstream_result = await upstream_task_state.result()
child_flow_run = await run_deployment(
f"foo/{deployment.name}",
timeout=0,
poll_interval=0,
parameters={"x": upstream_result},
)
return upstream_task_state, child_flow_run
parent_state = await foo(return_state=True)
upstream_task_state, child_flow_run = await parent_state.result()
assert child_flow_run.parent_task_run_id is not None
task_run = await prefect_client.read_task_run(child_flow_run.parent_task_run_id)
assert task_run.task_inputs == {
"x": [
TaskRunResult(
input_type="task_run",
id=upstream_task_state.state_details.task_run_id,
)
]
}
|
PrefectHQREPO_NAMEprefectPATH_START.@prefect_extracted@prefect-main@tests@deployment@test_flow_runs.py@.PATH_END.py
|
{
"filename": "report.md",
"repo_name": "toros-astro/corral",
"repo_path": "corral_extracted/corral-master/docs/qareport_output_examples/report.md",
"type": "Markdown"
}
|
# pipeline Quality Report
- **Created at:** 2017-04-10 20:20:14.275414
- **Corral Version:** 0.2.6
## 1. Summary
- **Tests Success:** `Yes`
- **Tests Ran:** `1`
- **Processors:** `6`
- **Coverage:** `62.63%`
- **Maintainability & Style Errors:** `8`
<!-- -->
- **QA Index:** `8.64%`
- **QA Qualification:** `F`
### 1.1 About The Corral Quality Assurance Index (QAI)
```
QAI = 2 * (TP * (T/PNC) * COV) / (1 + exp(MSE/tau))
Where:
TP: If all tests passes is 1, 0 otherwise.
T: The number of test cases.
PCN: The number number of processors (Loader, Steps and Alerts)
and commands.
COV: The code coverage (between 0 and 1).
MSE: The Maintainability and Style Errors.
tau: Tolerance of style errors per file
```
**Current Value of Tau:**: `13.00` per file
### 1.2 About The Qualification
The Corral qualification is a quantitave scale based on QAI
- QAI >= 0.00% -- `F`
- QAI >= 60.00% -- `D-`
- QAI >= 63.00% -- `D`
- QAI >= 67.00% -- `D+`
- QAI >= 70.00% -- `C-`
- QAI >= 73.00% -- `C`
- QAI >= 77.00% -- `C+`
- QAI >= 80.00% -- `B-`
- QAI >= 83.00% -- `B`
- QAI >= 87.00% -- `B+`
- QAI >= 90.00% -- `A-`
- QAI >= 93.00% -- `A`
- QAI >= 95.00% -- `A+`
## 2. Full Output
### 2.1 Tests
```
runTest (pipeline.tests.StatisticsCreateAnyNameTest) ... ok
----------------------------------------------------------------------
Ran 1 test in 0.481s
OK
```
---
### 2.2 Coverage
```
Name Stmts Miss Cover
------------------------------------------
pipeline/__init__.py 1 0 100%
pipeline/alerts.py 10 1 90%
pipeline/commands.py 6 1 83%
pipeline/load.py 25 14 44%
pipeline/models.py 34 1 97%
pipeline/pipeline.py 4 0 100%
pipeline/settings.py 17 0 100%
pipeline/steps.py 79 54 32%
pipeline/tests.py 14 0 100%
------------------------------------------
TOTAL 190 71 63%
```
---
### 2.3 MAINTAINABILITY & STYLE
```
Found pep8-style errors.
Please check the Python code style reference: https://www.python.org/dev/peps/pep-0008/
Errors found:
pipeline/alerts.py:51:0: W391 blank line at end of file
pipeline/commands.py:27:0: E302 expected 2 blank lines, found 1
pipeline/settings.py:41:8: E126 continuation line over-indented for hanging indent
pipeline/steps.py:36:37: E225 missing whitespace around operator
pipeline/steps.py:53:43: E225 missing whitespace around operator
pipeline/steps.py:85:43: E225 missing whitespace around operator
pipeline/steps.py:117:43: E225 missing whitespace around operator
pipeline/tests.py:39:41: E225 missing whitespace around operator
pipeline/tests.py:45:56: E225 missing whitespace around operator
```
|
toros-astroREPO_NAMEcorralPATH_START.@corral_extracted@corral-master@docs@qareport_output_examples@report.md@.PATH_END.py
|
{
"filename": "test_stride_tricks.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/numpy/py3/numpy/lib/tests/test_stride_tricks.py",
"type": "Python"
}
|
import numpy as np
from numpy.core._rational_tests import rational
from numpy.testing import (
assert_equal, assert_array_equal, assert_raises, assert_,
assert_raises_regex, assert_warns,
)
from numpy.lib.stride_tricks import (
as_strided, broadcast_arrays, _broadcast_shape, broadcast_to,
broadcast_shapes, sliding_window_view,
)
import pytest
def assert_shapes_correct(input_shapes, expected_shape):
# Broadcast a list of arrays with the given input shapes and check the
# common output shape.
inarrays = [np.zeros(s) for s in input_shapes]
outarrays = broadcast_arrays(*inarrays)
outshapes = [a.shape for a in outarrays]
expected = [expected_shape] * len(inarrays)
assert_equal(outshapes, expected)
def assert_incompatible_shapes_raise(input_shapes):
# Broadcast a list of arrays with the given (incompatible) input shapes
# and check that they raise a ValueError.
inarrays = [np.zeros(s) for s in input_shapes]
assert_raises(ValueError, broadcast_arrays, *inarrays)
def assert_same_as_ufunc(shape0, shape1, transposed=False, flipped=False):
# Broadcast two shapes against each other and check that the data layout
# is the same as if a ufunc did the broadcasting.
x0 = np.zeros(shape0, dtype=int)
# Note that multiply.reduce's identity element is 1.0, so when shape1==(),
# this gives the desired n==1.
n = int(np.multiply.reduce(shape1))
x1 = np.arange(n).reshape(shape1)
if transposed:
x0 = x0.T
x1 = x1.T
if flipped:
x0 = x0[::-1]
x1 = x1[::-1]
# Use the add ufunc to do the broadcasting. Since we're adding 0s to x1, the
# result should be exactly the same as the broadcasted view of x1.
y = x0 + x1
b0, b1 = broadcast_arrays(x0, x1)
assert_array_equal(y, b1)
def test_same():
x = np.arange(10)
y = np.arange(10)
bx, by = broadcast_arrays(x, y)
assert_array_equal(x, bx)
assert_array_equal(y, by)
def test_broadcast_kwargs():
# ensure that a TypeError is appropriately raised when
# np.broadcast_arrays() is called with any keyword
# argument other than 'subok'
x = np.arange(10)
y = np.arange(10)
with assert_raises_regex(TypeError, 'got an unexpected keyword'):
broadcast_arrays(x, y, dtype='float64')
def test_one_off():
x = np.array([[1, 2, 3]])
y = np.array([[1], [2], [3]])
bx, by = broadcast_arrays(x, y)
bx0 = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]])
by0 = bx0.T
assert_array_equal(bx0, bx)
assert_array_equal(by0, by)
def test_same_input_shapes():
# Check that the final shape is just the input shape.
data = [
(),
(1,),
(3,),
(0, 1),
(0, 3),
(1, 0),
(3, 0),
(1, 3),
(3, 1),
(3, 3),
]
for shape in data:
input_shapes = [shape]
# Single input.
assert_shapes_correct(input_shapes, shape)
# Double input.
input_shapes2 = [shape, shape]
assert_shapes_correct(input_shapes2, shape)
# Triple input.
input_shapes3 = [shape, shape, shape]
assert_shapes_correct(input_shapes3, shape)
def test_two_compatible_by_ones_input_shapes():
# Check that two different input shapes of the same length, but some have
# ones, broadcast to the correct shape.
data = [
[[(1,), (3,)], (3,)],
[[(1, 3), (3, 3)], (3, 3)],
[[(3, 1), (3, 3)], (3, 3)],
[[(1, 3), (3, 1)], (3, 3)],
[[(1, 1), (3, 3)], (3, 3)],
[[(1, 1), (1, 3)], (1, 3)],
[[(1, 1), (3, 1)], (3, 1)],
[[(1, 0), (0, 0)], (0, 0)],
[[(0, 1), (0, 0)], (0, 0)],
[[(1, 0), (0, 1)], (0, 0)],
[[(1, 1), (0, 0)], (0, 0)],
[[(1, 1), (1, 0)], (1, 0)],
[[(1, 1), (0, 1)], (0, 1)],
]
for input_shapes, expected_shape in data:
assert_shapes_correct(input_shapes, expected_shape)
# Reverse the input shapes since broadcasting should be symmetric.
assert_shapes_correct(input_shapes[::-1], expected_shape)
def test_two_compatible_by_prepending_ones_input_shapes():
# Check that two different input shapes (of different lengths) broadcast
# to the correct shape.
data = [
[[(), (3,)], (3,)],
[[(3,), (3, 3)], (3, 3)],
[[(3,), (3, 1)], (3, 3)],
[[(1,), (3, 3)], (3, 3)],
[[(), (3, 3)], (3, 3)],
[[(1, 1), (3,)], (1, 3)],
[[(1,), (3, 1)], (3, 1)],
[[(1,), (1, 3)], (1, 3)],
[[(), (1, 3)], (1, 3)],
[[(), (3, 1)], (3, 1)],
[[(), (0,)], (0,)],
[[(0,), (0, 0)], (0, 0)],
[[(0,), (0, 1)], (0, 0)],
[[(1,), (0, 0)], (0, 0)],
[[(), (0, 0)], (0, 0)],
[[(1, 1), (0,)], (1, 0)],
[[(1,), (0, 1)], (0, 1)],
[[(1,), (1, 0)], (1, 0)],
[[(), (1, 0)], (1, 0)],
[[(), (0, 1)], (0, 1)],
]
for input_shapes, expected_shape in data:
assert_shapes_correct(input_shapes, expected_shape)
# Reverse the input shapes since broadcasting should be symmetric.
assert_shapes_correct(input_shapes[::-1], expected_shape)
def test_incompatible_shapes_raise_valueerror():
# Check that a ValueError is raised for incompatible shapes.
data = [
[(3,), (4,)],
[(2, 3), (2,)],
[(3,), (3,), (4,)],
[(1, 3, 4), (2, 3, 3)],
]
for input_shapes in data:
assert_incompatible_shapes_raise(input_shapes)
# Reverse the input shapes since broadcasting should be symmetric.
assert_incompatible_shapes_raise(input_shapes[::-1])
def test_same_as_ufunc():
# Check that the data layout is the same as if a ufunc did the operation.
data = [
[[(1,), (3,)], (3,)],
[[(1, 3), (3, 3)], (3, 3)],
[[(3, 1), (3, 3)], (3, 3)],
[[(1, 3), (3, 1)], (3, 3)],
[[(1, 1), (3, 3)], (3, 3)],
[[(1, 1), (1, 3)], (1, 3)],
[[(1, 1), (3, 1)], (3, 1)],
[[(1, 0), (0, 0)], (0, 0)],
[[(0, 1), (0, 0)], (0, 0)],
[[(1, 0), (0, 1)], (0, 0)],
[[(1, 1), (0, 0)], (0, 0)],
[[(1, 1), (1, 0)], (1, 0)],
[[(1, 1), (0, 1)], (0, 1)],
[[(), (3,)], (3,)],
[[(3,), (3, 3)], (3, 3)],
[[(3,), (3, 1)], (3, 3)],
[[(1,), (3, 3)], (3, 3)],
[[(), (3, 3)], (3, 3)],
[[(1, 1), (3,)], (1, 3)],
[[(1,), (3, 1)], (3, 1)],
[[(1,), (1, 3)], (1, 3)],
[[(), (1, 3)], (1, 3)],
[[(), (3, 1)], (3, 1)],
[[(), (0,)], (0,)],
[[(0,), (0, 0)], (0, 0)],
[[(0,), (0, 1)], (0, 0)],
[[(1,), (0, 0)], (0, 0)],
[[(), (0, 0)], (0, 0)],
[[(1, 1), (0,)], (1, 0)],
[[(1,), (0, 1)], (0, 1)],
[[(1,), (1, 0)], (1, 0)],
[[(), (1, 0)], (1, 0)],
[[(), (0, 1)], (0, 1)],
]
for input_shapes, expected_shape in data:
assert_same_as_ufunc(input_shapes[0], input_shapes[1],
"Shapes: %s %s" % (input_shapes[0], input_shapes[1]))
# Reverse the input shapes since broadcasting should be symmetric.
assert_same_as_ufunc(input_shapes[1], input_shapes[0])
# Try them transposed, too.
assert_same_as_ufunc(input_shapes[0], input_shapes[1], True)
# ... and flipped for non-rank-0 inputs in order to test negative
# strides.
if () not in input_shapes:
assert_same_as_ufunc(input_shapes[0], input_shapes[1], False, True)
assert_same_as_ufunc(input_shapes[0], input_shapes[1], True, True)
def test_broadcast_to_succeeds():
data = [
[np.array(0), (0,), np.array(0)],
[np.array(0), (1,), np.zeros(1)],
[np.array(0), (3,), np.zeros(3)],
[np.ones(1), (1,), np.ones(1)],
[np.ones(1), (2,), np.ones(2)],
[np.ones(1), (1, 2, 3), np.ones((1, 2, 3))],
[np.arange(3), (3,), np.arange(3)],
[np.arange(3), (1, 3), np.arange(3).reshape(1, -1)],
[np.arange(3), (2, 3), np.array([[0, 1, 2], [0, 1, 2]])],
# test if shape is not a tuple
[np.ones(0), 0, np.ones(0)],
[np.ones(1), 1, np.ones(1)],
[np.ones(1), 2, np.ones(2)],
# these cases with size 0 are strange, but they reproduce the behavior
# of broadcasting with ufuncs (see test_same_as_ufunc above)
[np.ones(1), (0,), np.ones(0)],
[np.ones((1, 2)), (0, 2), np.ones((0, 2))],
[np.ones((2, 1)), (2, 0), np.ones((2, 0))],
]
for input_array, shape, expected in data:
actual = broadcast_to(input_array, shape)
assert_array_equal(expected, actual)
def test_broadcast_to_raises():
data = [
[(0,), ()],
[(1,), ()],
[(3,), ()],
[(3,), (1,)],
[(3,), (2,)],
[(3,), (4,)],
[(1, 2), (2, 1)],
[(1, 1), (1,)],
[(1,), -1],
[(1,), (-1,)],
[(1, 2), (-1, 2)],
]
for orig_shape, target_shape in data:
arr = np.zeros(orig_shape)
assert_raises(ValueError, lambda: broadcast_to(arr, target_shape))
def test_broadcast_shape():
# tests internal _broadcast_shape
# _broadcast_shape is already exercised indirectly by broadcast_arrays
# _broadcast_shape is also exercised by the public broadcast_shapes function
assert_equal(_broadcast_shape(), ())
assert_equal(_broadcast_shape([1, 2]), (2,))
assert_equal(_broadcast_shape(np.ones((1, 1))), (1, 1))
assert_equal(_broadcast_shape(np.ones((1, 1)), np.ones((3, 4))), (3, 4))
assert_equal(_broadcast_shape(*([np.ones((1, 2))] * 32)), (1, 2))
assert_equal(_broadcast_shape(*([np.ones((1, 2))] * 100)), (1, 2))
# regression tests for gh-5862
assert_equal(_broadcast_shape(*([np.ones(2)] * 32 + [1])), (2,))
bad_args = [np.ones(2)] * 32 + [np.ones(3)] * 32
assert_raises(ValueError, lambda: _broadcast_shape(*bad_args))
def test_broadcast_shapes_succeeds():
# tests public broadcast_shapes
data = [
[[], ()],
[[()], ()],
[[(7,)], (7,)],
[[(1, 2), (2,)], (1, 2)],
[[(1, 1)], (1, 1)],
[[(1, 1), (3, 4)], (3, 4)],
[[(6, 7), (5, 6, 1), (7,), (5, 1, 7)], (5, 6, 7)],
[[(5, 6, 1)], (5, 6, 1)],
[[(1, 3), (3, 1)], (3, 3)],
[[(1, 0), (0, 0)], (0, 0)],
[[(0, 1), (0, 0)], (0, 0)],
[[(1, 0), (0, 1)], (0, 0)],
[[(1, 1), (0, 0)], (0, 0)],
[[(1, 1), (1, 0)], (1, 0)],
[[(1, 1), (0, 1)], (0, 1)],
[[(), (0,)], (0,)],
[[(0,), (0, 0)], (0, 0)],
[[(0,), (0, 1)], (0, 0)],
[[(1,), (0, 0)], (0, 0)],
[[(), (0, 0)], (0, 0)],
[[(1, 1), (0,)], (1, 0)],
[[(1,), (0, 1)], (0, 1)],
[[(1,), (1, 0)], (1, 0)],
[[(), (1, 0)], (1, 0)],
[[(), (0, 1)], (0, 1)],
[[(1,), (3,)], (3,)],
[[2, (3, 2)], (3, 2)],
]
for input_shapes, target_shape in data:
assert_equal(broadcast_shapes(*input_shapes), target_shape)
assert_equal(broadcast_shapes(*([(1, 2)] * 32)), (1, 2))
assert_equal(broadcast_shapes(*([(1, 2)] * 100)), (1, 2))
# regression tests for gh-5862
assert_equal(broadcast_shapes(*([(2,)] * 32)), (2,))
def test_broadcast_shapes_raises():
# tests public broadcast_shapes
data = [
[(3,), (4,)],
[(2, 3), (2,)],
[(3,), (3,), (4,)],
[(1, 3, 4), (2, 3, 3)],
[(1, 2), (3,1), (3,2), (10, 5)],
[2, (2, 3)],
]
for input_shapes in data:
assert_raises(ValueError, lambda: broadcast_shapes(*input_shapes))
bad_args = [(2,)] * 32 + [(3,)] * 32
assert_raises(ValueError, lambda: broadcast_shapes(*bad_args))
def test_as_strided():
a = np.array([None])
a_view = as_strided(a)
expected = np.array([None])
assert_array_equal(a_view, np.array([None]))
a = np.array([1, 2, 3, 4])
a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,))
expected = np.array([1, 3])
assert_array_equal(a_view, expected)
a = np.array([1, 2, 3, 4])
a_view = as_strided(a, shape=(3, 4), strides=(0, 1 * a.itemsize))
expected = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]])
assert_array_equal(a_view, expected)
# Regression test for gh-5081
dt = np.dtype([('num', 'i4'), ('obj', 'O')])
a = np.empty((4,), dtype=dt)
a['num'] = np.arange(1, 5)
a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))
expected_num = [[1, 2, 3, 4]] * 3
expected_obj = [[None]*4]*3
assert_equal(a_view.dtype, dt)
assert_array_equal(expected_num, a_view['num'])
assert_array_equal(expected_obj, a_view['obj'])
# Make sure that void types without fields are kept unchanged
a = np.empty((4,), dtype='V4')
a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))
assert_equal(a.dtype, a_view.dtype)
# Make sure that the only type that could fail is properly handled
dt = np.dtype({'names': [''], 'formats': ['V4']})
a = np.empty((4,), dtype=dt)
a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))
assert_equal(a.dtype, a_view.dtype)
# Custom dtypes should not be lost (gh-9161)
r = [rational(i) for i in range(4)]
a = np.array(r, dtype=rational)
a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))
assert_equal(a.dtype, a_view.dtype)
assert_array_equal([r] * 3, a_view)
class TestSlidingWindowView:
def test_1d(self):
arr = np.arange(5)
arr_view = sliding_window_view(arr, 2)
expected = np.array([[0, 1],
[1, 2],
[2, 3],
[3, 4]])
assert_array_equal(arr_view, expected)
def test_2d(self):
i, j = np.ogrid[:3, :4]
arr = 10*i + j
shape = (2, 2)
arr_view = sliding_window_view(arr, shape)
expected = np.array([[[[0, 1], [10, 11]],
[[1, 2], [11, 12]],
[[2, 3], [12, 13]]],
[[[10, 11], [20, 21]],
[[11, 12], [21, 22]],
[[12, 13], [22, 23]]]])
assert_array_equal(arr_view, expected)
def test_2d_with_axis(self):
i, j = np.ogrid[:3, :4]
arr = 10*i + j
arr_view = sliding_window_view(arr, 3, 0)
expected = np.array([[[0, 10, 20],
[1, 11, 21],
[2, 12, 22],
[3, 13, 23]]])
assert_array_equal(arr_view, expected)
def test_2d_repeated_axis(self):
i, j = np.ogrid[:3, :4]
arr = 10*i + j
arr_view = sliding_window_view(arr, (2, 3), (1, 1))
expected = np.array([[[[0, 1, 2],
[1, 2, 3]]],
[[[10, 11, 12],
[11, 12, 13]]],
[[[20, 21, 22],
[21, 22, 23]]]])
assert_array_equal(arr_view, expected)
def test_2d_without_axis(self):
i, j = np.ogrid[:4, :4]
arr = 10*i + j
shape = (2, 3)
arr_view = sliding_window_view(arr, shape)
expected = np.array([[[[0, 1, 2], [10, 11, 12]],
[[1, 2, 3], [11, 12, 13]]],
[[[10, 11, 12], [20, 21, 22]],
[[11, 12, 13], [21, 22, 23]]],
[[[20, 21, 22], [30, 31, 32]],
[[21, 22, 23], [31, 32, 33]]]])
assert_array_equal(arr_view, expected)
def test_errors(self):
i, j = np.ogrid[:4, :4]
arr = 10*i + j
with pytest.raises(ValueError, match='cannot contain negative values'):
sliding_window_view(arr, (-1, 3))
with pytest.raises(
ValueError,
match='must provide window_shape for all dimensions of `x`'):
sliding_window_view(arr, (1,))
with pytest.raises(
ValueError,
match='Must provide matching length window_shape and axis'):
sliding_window_view(arr, (1, 3, 4), axis=(0, 1))
with pytest.raises(
ValueError,
match='window shape cannot be larger than input array'):
sliding_window_view(arr, (5, 5))
def test_writeable(self):
arr = np.arange(5)
view = sliding_window_view(arr, 2, writeable=False)
assert_(not view.flags.writeable)
with pytest.raises(
ValueError,
match='assignment destination is read-only'):
view[0, 0] = 3
view = sliding_window_view(arr, 2, writeable=True)
assert_(view.flags.writeable)
view[0, 1] = 3
assert_array_equal(arr, np.array([0, 3, 2, 3, 4]))
def test_subok(self):
class MyArray(np.ndarray):
pass
arr = np.arange(5).view(MyArray)
assert_(not isinstance(sliding_window_view(arr, 2,
subok=False),
MyArray))
assert_(isinstance(sliding_window_view(arr, 2, subok=True), MyArray))
# Default behavior
assert_(not isinstance(sliding_window_view(arr, 2), MyArray))
def as_strided_writeable():
arr = np.ones(10)
view = as_strided(arr, writeable=False)
assert_(not view.flags.writeable)
# Check that writeable also is fine:
view = as_strided(arr, writeable=True)
assert_(view.flags.writeable)
view[...] = 3
assert_array_equal(arr, np.full_like(arr, 3))
# Test that things do not break down for readonly:
arr.flags.writeable = False
view = as_strided(arr, writeable=False)
view = as_strided(arr, writeable=True)
assert_(not view.flags.writeable)
class VerySimpleSubClass(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, subok=True, **kwargs).view(cls)
class SimpleSubClass(VerySimpleSubClass):
def __new__(cls, *args, **kwargs):
self = np.array(*args, subok=True, **kwargs).view(cls)
self.info = 'simple'
return self
def __array_finalize__(self, obj):
self.info = getattr(obj, 'info', '') + ' finalized'
def test_subclasses():
# test that subclass is preserved only if subok=True
a = VerySimpleSubClass([1, 2, 3, 4])
assert_(type(a) is VerySimpleSubClass)
a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,))
assert_(type(a_view) is np.ndarray)
a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,), subok=True)
assert_(type(a_view) is VerySimpleSubClass)
# test that if a subclass has __array_finalize__, it is used
a = SimpleSubClass([1, 2, 3, 4])
a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,), subok=True)
assert_(type(a_view) is SimpleSubClass)
assert_(a_view.info == 'simple finalized')
# similar tests for broadcast_arrays
b = np.arange(len(a)).reshape(-1, 1)
a_view, b_view = broadcast_arrays(a, b)
assert_(type(a_view) is np.ndarray)
assert_(type(b_view) is np.ndarray)
assert_(a_view.shape == b_view.shape)
a_view, b_view = broadcast_arrays(a, b, subok=True)
assert_(type(a_view) is SimpleSubClass)
assert_(a_view.info == 'simple finalized')
assert_(type(b_view) is np.ndarray)
assert_(a_view.shape == b_view.shape)
# and for broadcast_to
shape = (2, 4)
a_view = broadcast_to(a, shape)
assert_(type(a_view) is np.ndarray)
assert_(a_view.shape == shape)
a_view = broadcast_to(a, shape, subok=True)
assert_(type(a_view) is SimpleSubClass)
assert_(a_view.info == 'simple finalized')
assert_(a_view.shape == shape)
def test_writeable():
# broadcast_to should return a readonly array
original = np.array([1, 2, 3])
result = broadcast_to(original, (2, 3))
assert_equal(result.flags.writeable, False)
assert_raises(ValueError, result.__setitem__, slice(None), 0)
# but the result of broadcast_arrays needs to be writeable, to
# preserve backwards compatibility
for is_broadcast, results in [(False, broadcast_arrays(original,)),
(True, broadcast_arrays(0, original))]:
for result in results:
# This will change to False in a future version
if is_broadcast:
with assert_warns(FutureWarning):
assert_equal(result.flags.writeable, True)
with assert_warns(DeprecationWarning):
result[:] = 0
# Warning not emitted, writing to the array resets it
assert_equal(result.flags.writeable, True)
else:
# No warning:
assert_equal(result.flags.writeable, True)
for results in [broadcast_arrays(original),
broadcast_arrays(0, original)]:
for result in results:
# resets the warn_on_write DeprecationWarning
result.flags.writeable = True
# check: no warning emitted
assert_equal(result.flags.writeable, True)
result[:] = 0
# keep readonly input readonly
original.flags.writeable = False
_, result = broadcast_arrays(0, original)
assert_equal(result.flags.writeable, False)
# regression test for GH6491
shape = (2,)
strides = [0]
tricky_array = as_strided(np.array(0), shape, strides)
other = np.zeros((1,))
first, second = broadcast_arrays(tricky_array, other)
assert_(first.shape == second.shape)
def test_writeable_memoryview():
# The result of broadcast_arrays exports as a non-writeable memoryview
# because otherwise there is no good way to opt in to the new behaviour
# (i.e. you would need to set writeable to False explicitly).
# See gh-13929.
original = np.array([1, 2, 3])
for is_broadcast, results in [(False, broadcast_arrays(original,)),
(True, broadcast_arrays(0, original))]:
for result in results:
# This will change to False in a future version
if is_broadcast:
# memoryview(result, writable=True) will give warning but cannot
# be tested using the python API.
assert memoryview(result).readonly
else:
assert not memoryview(result).readonly
def test_reference_types():
input_array = np.array('a', dtype=object)
expected = np.array(['a'] * 3, dtype=object)
actual = broadcast_to(input_array, (3,))
assert_array_equal(expected, actual)
actual, _ = broadcast_arrays(input_array, np.ones(3))
assert_array_equal(expected, actual)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@numpy@py3@numpy@lib@tests@test_stride_tricks.py@.PATH_END.py
|
{
"filename": "poly.py",
"repo_name": "lpsinger/ligo.skymap",
"repo_path": "ligo.skymap_extracted/ligo.skymap-main/ligo/skymap/plot/poly.py",
"type": "Python"
}
|
#
# Copyright (C) 2012-2024 Leo Singer
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Plotting tools for drawing polygons."""
import numpy as np
import healpy as hp
import shapely
from .angle import reference_angle, wrapped_angle
__all__ = ('subdivide_vertices', 'cut_dateline',
'cut_prime_meridian', 'make_rect_poly')
def subdivide_vertices(vertices, subdivisions):
"""Subdivide a list of vertices by inserting subdivisions additional
vertices between each original pair of vertices using linear
interpolation.
"""
subvertices = np.empty((subdivisions * len(vertices), vertices.shape[1]))
frac = np.atleast_2d(
np.arange(subdivisions + 1, dtype=float) / subdivisions).T.repeat(
vertices.shape[1], 1)
for i in range(len(vertices)):
subvertices[i * subdivisions:(i + 1) * subdivisions] = \
frac[:0:-1, :] * \
np.expand_dims(vertices[i - 1, :], 0).repeat(subdivisions, 0) + \
frac[:-1, :] * \
np.expand_dims(vertices[i, :], 0).repeat(subdivisions, 0)
return subvertices
def cut_dateline(vertices):
"""Cut a polygon across the dateline, possibly splitting it into multiple
polygons. Vertices consist of (longitude, latitude) pairs where longitude
is always given in terms of a reference angle (between -π and π).
This routine is not meant to cover all possible cases; it will only work
for convex polygons that extend over less than a hemisphere.
Examples
--------
>>> cut_dateline(np.asarray([[3, 0.1],
... [4, 0.1],
... [4, -0.1],
... [3, -0.1],
... [3, 0.1]]))
[array([[-2.28318531, 0.1 ],
[-2.28318531, -0.1 ],
[-3.14159265, -0.1 ],
[-3.14159265, 0.1 ],
[-2.28318531, 0.1 ]]),
array([[ 3.14159265, 0.1 ],
[ 3.14159265, -0.1 ],
[ 3. , -0.1 ],
[ 3. , 0.1 ],
[ 3.14159265, 0.1 ]])]
"""
vertices = vertices.copy()
vertices[:, 0] += np.pi
vertices = cut_prime_meridian(vertices)
for v in vertices:
v[:, 0] -= np.pi
return vertices
def cut_prime_meridian(vertices):
"""Cut a polygon across the prime meridian, possibly splitting it into
multiple polygons. Vertices consist of (longitude, latitude) pairs where
longitude is always given in terms of a wrapped angle (between 0 and 2π).
This routine is not meant to cover all possible cases; it will only work
for convex polygons that extend over less than a hemisphere.
Examples
--------
>>> cut_prime_meridian(np.asarray([[6, 0.1],
... [7, 0.1],
... [7, -0.1],
... [6, -0.1],
... [6, 0.1]]))
[array([[ 0.71681469, 0.1 ],
[ 0.71681469, -0.1 ],
[ 0. , -0.1 ],
[ 0. , 0.1 ],
[ 0.71681469, 0.1 ]]),
array([[ 6.28318531, 0.1 ],
[ 6.28318531, -0.1 ],
[ 6. , -0.1 ],
[ 6. , 0.1 ],
[ 6.28318531, 0.1 ]])]
"""
# Ensure that the list of vertices does not contain a repeated endpoint.
if (vertices[0] == vertices[-1]).all():
vertices = vertices[:-1]
# Ensure that the longitudes are wrapped from 0 to 2π.
vertices = np.column_stack((wrapped_angle(vertices[:, 0]), vertices[:, 1]))
# Test if the segment consisting of points i-1 and i croses the meridian.
#
# If the two longitudes are in [0, 2π), then the shortest arc connecting
# them crosses the meridian if the difference of the angles is greater
# than π.
phis = vertices[:, 0]
phi0, phi1 = np.sort(np.vstack((np.roll(phis, 1), phis)), axis=0)
crosses_meridian = (phi1 - phi0 > np.pi)
# Count the number of times that the polygon crosses the meridian.
meridian_crossings = np.sum(crosses_meridian)
if meridian_crossings == 0:
# There were zero meridian crossings, so we can use the
# original vertices as is.
out_vertices = [vertices]
elif meridian_crossings == 1:
# There was one meridian crossing, so the polygon encloses the pole.
# Any meridian-crossing edge has to be extended
# into a curve following the nearest polar edge of the map.
i, = np.flatnonzero(crosses_meridian)
v0 = vertices[i - 1]
v1 = vertices[i]
# Find the latitude at which the meridian crossing occurs by
# linear interpolation.
delta_lon = abs(reference_angle(v1[0] - v0[0]))
lat = (abs(reference_angle(v0[0])) / delta_lon * v0[1] +
abs(reference_angle(v1[0])) / delta_lon * v1[1])
# FIXME: Use this simple heuristic to decide which pole to enclose.
sign_lat = np.sign(np.sum(vertices[:, 1]))
# Find the closer of the left or the right map boundary for
# each vertex in the line segment.
lon_0 = 0. if v0[0] < np.pi else 2 * np.pi
lon_1 = 0. if v1[0] < np.pi else 2 * np.pi
# Set the output vertices to the polar cap plus the original
# vertices.
out_vertices = [
np.vstack((
vertices[:i],
[[lon_0, lat],
[lon_0, sign_lat * np.pi / 2],
[lon_1, sign_lat * np.pi / 2],
[lon_1, lat]],
vertices[i:]))]
elif meridian_crossings == 2:
# Since the polygon is assumed to be convex, if there is an even number
# of meridian crossings, we know that the polygon does not enclose
# either pole. Then we can use ordinary Euclidean polygon intersection
# algorithms.
out_vertices = []
# Construct polygon representing map boundaries.
frame_poly = shapely.geometry.Polygon(np.asarray([
[0., 0.5 * np.pi],
[0., -0.5 * np.pi],
[2 * np.pi, -0.5 * np.pi],
[2 * np.pi, 0.5 * np.pi]]))
# Intersect with polygon re-wrapped to lie in [-π, π) or [π, 3π).
for shift in [0, 2 * np.pi]:
poly = shapely.geometry.Polygon(np.column_stack((
reference_angle(vertices[:, 0]) + shift, vertices[:, 1])))
intersection = poly.intersection(frame_poly)
if intersection:
assert isinstance(intersection, shapely.geometry.Polygon)
assert intersection.is_simple
out_vertices += [
shapely.get_coordinates(intersection.exterior)]
else:
# There were more than two intersections. Not implemented!
raise NotImplementedError('The polygon intersected the map boundaries '
'two or more times, so it is probably not '
'simple and convex.')
# Done!
return out_vertices
def make_rect_poly(width, height, theta, phi, subdivisions=10):
"""Create a Polygon patch representing a rectangle with half-angles width
and height rotated from the north pole to (theta, phi).
"""
# Convert width and height to radians, then to Cartesian coordinates.
w = np.sin(np.deg2rad(width))
h = np.sin(np.deg2rad(height))
# Generate vertices of rectangle.
v = np.asarray([[-w, -h], [w, -h], [w, h], [-w, h]])
# Subdivide.
v = subdivide_vertices(v, subdivisions)
# Project onto sphere by calculating z-coord from normalization condition.
v = np.hstack((v, np.sqrt(1. - np.expand_dims(np.square(v).sum(1), 1))))
# Transform vertices.
v = np.dot(v, hp.rotator.euler_matrix_new(phi, theta, 0, Y=True))
# Convert to spherical polar coordinates.
thetas, phis = hp.vec2ang(v)
# Return list of vertices as longitude, latitude pairs.
return np.column_stack((wrapped_angle(phis), 0.5 * np.pi - thetas))
|
lpsingerREPO_NAMEligo.skymapPATH_START.@ligo.skymap_extracted@ligo.skymap-main@ligo@skymap@plot@poly.py@.PATH_END.py
|
{
"filename": "test_core_resources.py",
"repo_name": "simonsobs/sotodlib",
"repo_path": "sotodlib_extracted/sotodlib-master/tests/test_core_resources.py",
"type": "Python"
}
|
import os
import unittest
import json
import shutil
import pytest
from sotodlib.core.resources import get_local_file
class TestCoreResources(unittest.TestCase):
def test_get_local_file(self):
# t = get_local_file("de421.bsp", cache=True)
# expected_path = os.path.join(
# os.path.expanduser("~"), ".sotodlib/filecache/de421.bsp"
# )
# self.assertEqual(expected_path, t)
# shutil.rmtree(os.path.join(os.path.expanduser("~"), ".sotodlib/"))
os.environ["SOTODLIB_RESOURCES"] = json.dumps(
{"someotherfile": "file://somepath/otherfile"}
)
t = get_local_file("de421.bsp", cache=False)
expected_path = "/tmp/de421.bsp"
self.assertEqual(expected_path, t)
os.environ["SOTODLIB_RESOURCES"] = json.dumps(
{"de421.bsp": "file://somepath/de421.bsp"}
)
t = get_local_file("de421.bsp")
self.assertEqual("somepath/de421.bsp", t)
with pytest.raises(RuntimeError):
get_local_file("doesnotexist.file")
|
simonsobsREPO_NAMEsotodlibPATH_START.@sotodlib_extracted@sotodlib-master@tests@test_core_resources.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "cmbant/CosmoMC",
"repo_path": "CosmoMC_extracted/CosmoMC-master/python/paramgrid/__init__.py",
"type": "Python"
}
|
__author__ = 'Antony Lewis'
|
cmbantREPO_NAMECosmoMCPATH_START.@CosmoMC_extracted@CosmoMC-master@python@paramgrid@__init__.py@.PATH_END.py
|
{
"filename": "_hoverinfosrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/candlestick/_hoverinfosrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class HoverinfosrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="hoverinfosrc", parent_name="candlestick", **kwargs):
super(HoverinfosrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@candlestick@_hoverinfosrc.py@.PATH_END.py
|
{
"filename": "custom_kernels.ipynb",
"repo_name": "exoclam/gaspery",
"repo_path": "gaspery_extracted/gaspery-main/tutorials/custom_kernels.ipynb",
"type": "Jupyter Notebook"
}
|
### Custom kernels
Let's say you don't want to be stuck with vanilla quasi-periodic Gaussian Process kernels to describe your correlated noise. We want to be agnostic about your noise so that we can generalize to diverse science use cases, after all!
Enter: tinygp (https://tinygp.readthedocs.io/en/stable/; Dan Foreman-Mackey and friends). We provide an interface with tinygp -- simply define your correlated noise kernel in tinygp, and then pass the kernel object into the gaspery covariance matrix class.
```python
import numpy as np
import scipy
print(np.__version__)
print(scipy.__version__)
import pandas as pd
import random
import exoplanet
import astropy
import pymc3
import pymc3_ext
import tinygp
from tinygp import kernels, GaussianProcess
from numpy.linalg import inv, det, solve, cond
from tqdm import tqdm
from astropy.time import Time
import matplotlib.pyplot as plt
import matplotlib
import jax
import jax.numpy as jnp
from jax import grad, jit, vmap
#from jax import random
from gaspery import calculate_fi, strategies, utils
```
1.22.3
1.7.3
```python
import matplotlib.pylab as pylab
pylab_params = {'legend.fontsize': 'large',
'axes.labelsize': 'x-large',
'axes.titlesize':'x-large',
'xtick.labelsize':'large',
'ytick.labelsize':'large'}
pylab.rcParams.update(pylab_params)
path = '/Users/chrislam/Desktop/gaspery/'
```
List Planet Class parameters
```python
### Planet parameters
p = 8.46 # orbital period, days
K = 850 # cm/s
T0 = 2458651.993 # central transit time, in BJD, on 19 June 2019
### choose start time as date of this writing
start = '2023-03-01T10:00:00'
start = Time(start, format='isot', scale='utc').jd
```
List Star Class parameters, but these won't necessarily go into a quasiperiodic GP kernel.
```python
### correlated noise parameters, from Klein+ 2021 for AU Mic
Prot = 4.86 # rotation period, days
Tau = 100/np.sqrt(2) # active region lifetime; days
eta = 0.4/np.sqrt(2) # 0.1, 0.3, 0.58, 0.9 # smoothing parameter
sigma_qp_rv = 47 # modified Jeffreys prior +11, -8 [m/s]
sigma_wn_rv = 5 # photon noise level [m/s]
params = [sigma_wn_rv, Tau, eta, Prot, sigma_qp_rv]
theta = [K, p, T0]
```
Define an observing strategy
```python
# build strategy aka time series of observations
cadence = 1 # observe once a day every day
n_obs = 30
strategy = strategies.Strategy(n_obs = n_obs, start = start, offs=[], dropout=0.)
strat = strategy.gappy(cadence)
```
#### Define a custom kernel with tinygp
Here we re-invent the wheel by custom defining a quasi-periodic kernel. But you can imagine truly going wild with your own kernel, if your use case calls for going beyond what the tinygp toolkit gives you!
```python
class Quasiperiodic(tinygp.kernels.Kernel):
def __init__(self, Tau, eta, Prot, sigma_qp_rv):
self.sigma_wn_rv = sigma_wn_rv
self.Tau = Tau
self.eta = eta
self.Prot = Prot
self.sigma_qp_rv = sigma_qp_rv
def evaluate(self, X1, X2):
tdiff = jnp.abs(X1 - X2)
term1 = (tdiff**2) / (2*self.Tau**2)
term2 = (1/(2*self.eta**2)) * (jnp.sin(jnp.pi * (tdiff)/self.Prot))**2
arg = -term1 - term2
k = jnp.exp(arg)
K = self.sigma_qp_rv**2 * k
return K
def build_gp(params, strat):
kernel = Quasiperiodic(
params[1], params[2], params[3], params[4] # omit the first param, which is for white noise and will be applied later
)
return kernel
# instantiate kernel object
kernel = build_gp(params, strat)
# call kernel on observation strategy time series to get covariance matrix with correlated noise terms only
k = kernel(strat, strat)
# add white noise to correlated noise kernel
k = k * sigma_wn_rv**2 * jnp.diag(np.ones(len(strat)))
k += 1e-6
```
```python
sigma_ks_qp = []
for n_obs in tqdm(range(100)[4:]):
# instantiate Star object in order to feed covariance matrix with white/correlated noise
star = calculate_fi.Star(sigma_wn_rv = sigma_wn_rv, Tau = Tau, eta = eta,
Prot = Prot, sigma_qp_rv = sigma_qp_rv)
# populate list of parameters to feed into cov_matrix_jax()
params = star.param_list()
# instantiate Planets object in order to feed Fisher Info calculation machinery
planet = calculate_fi.Planets(K = K, p = p, T0 = T0)
# populate list of parameters to feed into clam_jax_fim()
theta = planet.theta_list()
# instantiate Strategy object in order to build time series of observations
strategy = strategies.Strategy(n_obs = n_obs, start = start, offs=[], dropout=0.)
# build strategy aka time series of observations
strat = strategy.gappy(cadence)
# build covariance matrix, characterized by a custom quasi-periodic noise model of the stellar signal
kernel = build_gp(params, strat)
#sigma = calculate_fi.cov_matrix_jax(strat, params) # non-object-oriented
#sigma = star.cov_matrix(strat) # object-oriented but w/manual QP GP kernel
sigma = star.cov_matrix_general(strat, kernel) # object-oriented with custom kernels
sigma += 1e-6
# populate arguments for Fisher Info calculator
args = np.array(strat), sigma, jnp.array(theta, dtype=float)
# calculate FI
fim = calculate_fi.clam_jax_fim(*args).block_until_ready()
# invert FI matrix
inv_fim = inv(fim)
# top left element of matrix corresponds with RV semi-amplitude, K
sigma_k = np.sqrt(inv_fim)[0][0]
sigma_ks_qp.append(sigma_k)
```
0%| | 0/96 [00:00<?, ?it/s]/var/folders/h2/sp_lfvz5515bhg_y92psw7f80000gn/T/ipykernel_88168/2798112378.py:41: RuntimeWarning: invalid value encountered in sqrt
sigma_k = np.sqrt(inv_fim)[0][0]
100%|████████████████████████████████████████████████████████████████| 96/96 [00:00<00:00, 187.71it/s]
```python
plt.plot(np.arange(len(sigma_ks_qp))+5, sigma_ks_qp, label='calculated $\sigma_K$, correlated noise')
plt.xlabel('number of observations')
plt.ylabel(r"$\sigma_k$[m/s]")
plt.title(f"cadence: {cadence} day(s)")
plt.legend()
#plt.ylim([0,200])
plt.show()
```

And again with an out-of-the-box quasi-periodic GP kernel from tinygp.
```python
sigma_ks_qp = []
for n_obs in tqdm(range(100)[4:]):
# instantiate Star object in order to feed covariance matrix with white/correlated noise
star = calculate_fi.Star(sigma_wn_rv = sigma_wn_rv, Tau = Tau, eta = eta,
Prot = Prot, sigma_qp_rv = sigma_qp_rv)
# populate list of parameters to feed into cov_matrix_jax()
params = star.param_list()
# instantiate Planets object in order to feed Fisher Info calculation machinery
planet = calculate_fi.Planets(K = K, p = p, T0 = T0)
# populate list of parameters to feed into clam_jax_fim()
theta = planet.theta_list()
# instantiate Strategy object in order to build time series of observations
strategy = strategies.Strategy(n_obs = n_obs, start = start, offs=[], dropout=0.)
# build strategy aka time series of observations
strat = strategy.gappy(cadence)
# build covariance matrix, characterized by a correlated noise model of the stellar signal
kernel = kernels.ExpSineSquared(scale=Prot, gamma=1/(2*eta**2)) # first term of exponential
kernel *= kernels.ExpSquared(scale=Tau) # other term of exponential
kernel *= sigma_qp_rv**2 # multiply by scalar
#sigma = calculate_fi.cov_matrix_jax(strat, params) # non-object-oriented
#sigma = star.cov_matrix(strat) # object-oriented but w/manual QP GP kernel
sigma = star.cov_matrix_general(strat, kernel) # object-oriented with custom kernels
sigma += 1e-6
#print(strat, sigma)
#fadsfa
# populate arguments for Fisher Info calculator
args = np.array(strat), sigma, jnp.array(theta, dtype=float)
# calculate FI
fim = calculate_fi.clam_jax_fim(*args).block_until_ready()
# invert FI matrix
inv_fim = inv(fim)
# top left element of matrix corresponds with RV semi-amplitude, K
sigma_k = np.sqrt(inv_fim)[0][0]
sigma_ks_qp.append(sigma_k)
```
0%| | 0/96 [00:00<?, ?it/s]/var/folders/h2/sp_lfvz5515bhg_y92psw7f80000gn/T/ipykernel_88168/241485815.py:46: RuntimeWarning: invalid value encountered in sqrt
sigma_k = np.sqrt(inv_fim)[0][0]
100%|█████████████████████████████████████████████████████████████████| 96/96 [00:05<00:00, 16.13it/s]
```python
plt.plot(np.arange(len(sigma_ks_qp))+5, sigma_ks_qp, label='calculated $\sigma_K$, correlated noise')
plt.xlabel('number of observations')
plt.ylabel(r"$\sigma_k$[m/s]")
plt.title(f"cadence: {cadence} day(s)")
plt.legend()
#plt.ylim([0,200])
plt.show()
```

Except for a dropped point where the covariance matrix based on the custom kernel is singular (and thus can't be inverted), the custom quasi-periodic Gaussian Process kernel matches the result from the out-of-the-box quasi-periodic GP kernel.
```python
```
|
exoclamREPO_NAMEgasperyPATH_START.@gaspery_extracted@gaspery-main@tutorials@custom_kernels.ipynb@.PATH_END.py
|
{
"filename": "_xaxes.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/grid/_xaxes.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class XaxesValidator(_plotly_utils.basevalidators.InfoArrayValidator):
def __init__(self, plotly_name="xaxes", parent_name="layout.grid", **kwargs):
super(XaxesValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
free_length=kwargs.pop("free_length", True),
items=kwargs.pop(
"items",
{
"editType": "plot",
"valType": "enumerated",
"values": ["/^x([2-9]|[1-9][0-9]+)?( domain)?$/", ""],
},
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@grid@_xaxes.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "NASA-Planetary-Science/sbpy",
"repo_path": "sbpy_extracted/sbpy-main/sbpy/activity/tests/__init__.py",
"type": "Python"
}
|
NASA-Planetary-ScienceREPO_NAMEsbpyPATH_START.@sbpy_extracted@sbpy-main@sbpy@activity@tests@__init__.py@.PATH_END.py
|
|
{
"filename": "README.md",
"repo_name": "mattkjames7/libinternalfield",
"repo_path": "libinternalfield_extracted/libinternalfield-main/test/README.md",
"type": "Markdown"
}
|
# libinternalfield
This is a C++ library for various internal magnetic field models which use spherical harmonics.
## Dependencies
The following things are required for building this library:
- Python 3
- numpy
- make
- g++
- binutils
## Building
Clone the repo and build in Linux or Mac OS:
```bash
git clone https://github.com/mattkjames7/libinternalfield.git
cd libinternalfield
make
#optionally install system wide
sudo make install
```
This will create a library file ```libinternalfield.so``` (`.dylib` in Mac, `.dll` in Windows). Installing system wide will place the library file in `/usr/local/lib` and the header files `internalfield.h` (for C++) and `internalfieldc.h` (for C) in `/usr/local/include` by default.
## Supported Models
Model coefficients are stored in `libinternalfield/coeffs/` as `name.dat` files, where `name` is the name of the model. Each file contains for columns:
1. Parameter string ("*g*" or "*h*")
2. Polynomial degree (*n*, integer)
3. Polynomial order (*m*, integer)
4. Magnitude (in nT, float or integer)
Any correctly formatted `.dat` file place within this folder will automatically be included within the library when it is compiled. Any additional models will be accessible using the `name` from the `.dat` file as the model string.
Here is a list of the currently supported models (more will most likely be added):
### Mercury
| Model | C String | Maximum Degree | Default Degree | Reference |
| ------------------------------- | ------------------- | -------------- | -------------- | --------------------- |
| Ness 1975 | `ness1975` | 1 | 1 | Ness et al., 1975 |
| Anderson 2010 Dipole | `anderson2010d` | 1 | 1 | Anderson et al., 2010 |
| Anderson 2010 Dipole + SHA | `anderson2010dsha` | 1 | 1 | Anderson et al., 2010 |
| Anderson 2010 Dipole + TS04 | `anderson2010dts04` | 1 | 1 | Anderson et al., 2010 |
| Anderson 2010 Quadrupole | `anderson2010q` | 2 | 2 | Anderson et al., 2010 |
| Anderson 2010 Quadrupole + SHA | `anderson2010qsha` | 2 | 2 | Anderson et al., 2010 |
| Anderson 2010 Quadrupole + TS04 | `anderson2010qts04` | 2 | 2 | Anderson et al., 2010 |
| Uno 2009 | `uno2009` | 8 | 8 | Uno et al., 2009 |
| Uno 2009 SVD | `uno2009svd` | 2 | 2 | Uno et al., 2009 |
| Thebault 2018 M1 | `thebault2018m1` | 5 | 5 | Thebault et al., 2018 |
| Thebault 2018 M2 | `thebault2018m2` | 5 | 5 | Thebault et al., 2018 |
| Thebault 2018 M3 | `thebault2018m3` | 5 | 5 | Thebault et al., 2018 |
### Earth
There is an IGRF model for Earth's magnetic field for every 5 years, starting in 1900 and ending 2025. A new object will be created soon which will allow the interpolation between each of the IGRF models.
| Model | C String | Maximum Degree | Default Degree | Reference |
| --------------------- | ------------------------ | -------------- | -------------- | ------------------ |
| IGRF 1900 - IGRF 2025 | `igrf1900` to `igrf2025` | 13 | 13 | Alken et al., 2021 |
### Mars
| Model | C String | Maximum Degree | Default Degree | Reference |
| ------------------- | -------------- | -------------- | -------------- | ------------------------ |
| Gau 2021 | `gau2021` | 110 | 110 | Gau et al., 2021 |
| Langlais et al 2019 | `langlais2019` | 134 | 134 | Langlais et al., 2019 |
| Morschhauser 2014 | `mh2014` | 110 | 110 | Morchhauser et al., 2014 |
| Cain 2003 | `cain2003` | 90 | 90 | Cain et al., 2003 |
### Jupiter
| Model | C String | Maximum Degree | Default Degree | Reference |
| -------------- | ----------- | -------------- | -------------- | ---------------------- |
| JRM33 | `jrm33` | 30 | 13 | Connerney et al., 2022 |
| JRM09 | `jrm09` | 20 | 10 | Connerney et al., 2018 |
| ISaAC | `isaac` | 10 | 10 | Hess et al., 2017 |
| VIPAL | `vipal` | 5 | 5 | Hess et al., 2011 |
| VIP4 | `vip4` | 4 | 4 | Connerney 2007 |
| VIT4 | `vit4` | 4 | 4 | Connerney 2007 |
| O4 | `o4` | 3 | 3 | Connerney 1981 |
| O6 | `o6` | 3 | 3 | Connerney 2007 |
| GSFC15evs | `gsfc15evs` | 3 | 3 | Connerney 1981 |
| GSFC15ev | `gsfc15ev` | 3 | 3 | Connerney 1981 |
| GSFC13ev | `gsfc13ev` | 3 | 3 | Connerney 1981 |
| Ulysses 17ev | `u17ev` | 3 | 3 | Connerney 2007 |
| SHA | `sha` | 3 | 3 | Connerney 2007 |
| Voyager 1 17ev | `v117ev` | 3 | 3 | Connerney 2007 |
| JPL15ev | `jpl15ev` | 3 | 3 | Connerney 1981 |
| JPL15evs | `jpl15evs` | 3 | 3 | Connerney 1981 |
| P11A | `p11a` | 3 | 3 | |
### Saturn
| Model | C String | Maximum Degree | Default Degree | Reference |
| ---------------- | ------------ | -------------- | -------------- | ---------------------- |
| Burton 2009 | `burton2009` | 3 | 3 | Burton et al., 2009 |
| Cassini 3 | `cassini3` | 3 | 3 | Cao et al., 2011 |
| Cassini 5 | `cassini5` | 5 | 5 | Cao et al., 2012 |
| Cassini 11 | `cassini11` | 12 | 11 | Dougherty et al., 2018 |
| P11A | `p11as` * | 3 | 3 | Connerney 2007 |
| P<sub>11</sub>84 | `p1184` | 3 | 3 | Davis and Smith 1986 |
| SOI | `soi` | 3 | 3 | Dougherty et al., 2007 |
| SPV | `spv` | 3 | 3 | Davis and Smith 1990 |
| V1 | `v1` | 3 | 3 | Connerney et al., 1982 |
| V2 | `v2` | 3 | 3 | Connerney et al., 1982 |
| Z3 | `z3` | 3 | 3 | Connerney et al., 1982 |
### Uranus
| Model | C String | Maximum Degree | Default Degree | Reference |
| ----------------------- | ------------ | -------------- | -------------- | ---------------------- |
| AH5 | `ah5` | 4 | 4 | Herbert 2009 |
| GSFC Q3 | `gsfcq3` | 2 | 2 | Connerney et al., 1987 |
| GSFC Q3 (unconstrained) | `gsfcq3full` | 3 | 2 | Connerney et al., 1987 |
| Umoh | `umoh` | 16 | 16 | Holme and Bloxham 1996 |
### Neptune
| Model | C String | Maximum Degree | Default Degree | Reference |
| ----------------------- | ------------ | -------------- | -------------- | ---------------------- |
| GSFC O8 | `gsfco8` | 3 | 3 | Connerney et al., 1991 |
| GSFC O8 (unconstrained) | `gsfco8full` | 8 | 3 | Connerney et al., 1991 |
| Nmoh | `nmoh` | 16 | 16 | Holme and Bloxham 1996 |
### Ganymede
| Model | C String | Maximum Degree | Default Degree | Reference |
| ----------------------- | ------------ | -------------- | -------------- | ---------------------- |
| Kivelson et al., 2002 | `kivelson2002a` <br /> `kivelson2002b` <br /> `kivelson2002c` | 2 <br /> 1 <br /> 1 | 2 <br /> 1 <br /> 1 | Kivelson et al., 2002 |
| Weber et al., 2022 | `weber2022dip` <br /> `weber2022quad` | 1 <br /> 2 | 1 <br /> 2 | Weber et al., 2022 |
### Time varying models
For models which vary with time (e.g. IGRF) a chronological list of model names with associated dates and times should be provided in `libinternalfield/variable/planet/nameofmodellist.dat`
with the following columns:
1. Model C-strings
2. Integer date, in the format yyyymmdd
3. Floating point time of day in hours (e.g. 15:45 = 15.75 UT)
## Accessing Via C++
When using C++, the models field can be obtained using the ```InternalModel``` class. An instance of this class is initialized with the library called `internalModel`.
```cpp
#include <internal.h>
int main() {
/* set current model */
internalModel.SetModel("jrm09");
/* set intput and output coordinates to Cartesian */
internalModel.SetCartIn(true);
internalModel.SetCartOut(true);
/* input position (cartesian)*/
double x = 35.0;
double y = 10.0;
double z = -4.0;
/* output field */
double Bx, By, Bz;
internalModel.Field(x,y,z,&Bx,&By,&Bz);
}
```
## Accessing Via Python
...and probably other languages. Wrapper functions are included which can be accessed from other languages without directly interacting with the `internalModel` object:
```cpp
/* calculate the magnetic field at some sets of coordinates (p0,p1,p2) */
void InternalField(int n, double *p0, double *p1, double *p2,
double *B0, double *B1, double *B2);
/* same as above, with a custom maximum model degree */
void InternalFieldDeg(int n, double *p0, double *p1, double *p2,
int MaxDeg, double *B0, double *B1, double *B2);
/* Set the model and its input and output coordinates */
void SetInternalCFG(char *Model, bool CartIn, bool CartOut);
/* return the current configuration */
void GetInternalCFG(char *Model, bool *CartIn, bool *CartOut);
```
## Accessing Via C
This project includes a C-compatible header file which includes prototypes for the wrapper functions mentioned in the Python section above. It also includes wrapper functions for every single model included in the library, where each function is named with the format `XXXXXField`, where `XXXXX` can be replaced with the lower-case name of the model (identical to the C string in the table above). The `getModelFieldPtr` function returns a pointer to a model wrapper function when given a string, see below for an example.
```c
/* contents of ctest.c */
#include <stdio.h>
#include <stdbool.h>
#include <internalfieldc.h>
int main() {
printf("Testing C\n");
/* try getting a model function */
modelFieldPtr model = getModelFieldPtr("jrm33");
double x = 10.0;
double y = 10.0;
double z = 0.0;
double Bx, By, Bz;
model(x,y,z,&Bx,&By,&Bz);
printf("B = [%6.1f,%6.1f,%6.1f] nT at [%4.1f,%4.1f,%4.1f]\n",Bx,By,Bz,x,y,z);
printf("C test done\n");
}
```
which can be compiled, then run using
```bash
gcc ctest.c -o ctest -lm -linternalfield
./ctest
```
## References
International Geomagnetic Reference Field: the 13th generation, Alken, P., Thébault, E., Beggan, C.D. et al. International Geomagnetic Reference Field: the thirteenth generation. Earth Planets Space 73, 49 (2021). https://doi.org/10.1186/s40623-020-01288-x
Anderson, B.J., Acuña, M.H., Korth, H. et al. The Magnetic Field of Mercury. Space Sci Rev 152, 307–339 (2010). https://doi.org/10.1007/s11214-009-9544-3
Burton, M.E., Dougherty, M.K., Russell, C.T. (2009) Model of Saturn's internal planetary magnetic field based on Cassini observations. Planetary and Space Science, 57 (14). 1706-1713 doi:10.1016/j.pss.2009.04.008
Cain, J. C., B. B. Ferguson, and D. Mozzoni, An n = 90 internal potential function of the Martian crustal magnetic field, J. Geophys. Res., 108(E2), 5008, doi:10.1029/2000JE001487, 2003.
Cao, Hao, Russell, Christopher T., Christensen, Ulrich R., Dougherty, Michele K., Burton, Marcia E. (2011) Saturn's very axisymmetric magnetic field: No detectable secular variation or tilt. Earth and Planetary Science Letters, 304 (1). 22-28 doi:10.1016/j.epsl.2011.02.035
Cao, Hao, Russell, Christopher T., Wicht, Johannes, Christensen, Ulrich R., Dougherty, Michele K. (2012) Saturn’s high degree magnetic moments: Evidence for a unique planetary dynamo. Icarus, 221 (1). 388-394 doi:10.1016/j.icarus.2012.08.007
Connerney, J. E. P. (1981), The magnetic field of Jupiter: A generalized inverse approach, *J. Geophys. Res.*, 86( A9), 7679– 7693, doi:[10.1029/JA086iA09p07679](https://doi.org/10.1029/JA086iA09p07679 "Link to external resource: 10.1029/JA086iA09p07679").
Connerney, J. E. P., Acuña, M. H., and Ness, N. F. (1982), Voyager 1 assessment of Jupiter's planetary magnetic field, *J. Geophys. Res.*, 87( A5), 3623– 3627, doi:[10.1029/JA087iA05p03623](https://doi.org/10.1029/JA087iA05p03623 "Link to external resource: 10.1029/JA087iA05p03623").
Connerney, J. E. P., Acuña, M. H., and Ness, N. F. (1987), The magnetic field of Uranus, J. Geophys. Res., 92( A13), 15329– 15336, doi:10.1029/JA092iA13p15329.
Connerney, J. E. P., Acuña, M. H., and Ness, N. F. (1991), The magnetic field of Neptune, J. Geophys. Res., 96( S01), 19023– 19042, doi:10.1029/91JA01165.
Connerney, J.E.P.. (2007). Planetary Magnetism. Treatise on Geophysics. 10. 243-280. 10.1016/B978-044452748-6.00159-0.
Connerney, J. E. P., Kotsiaros, S., Oliversen, R. J., Espley, J. R., Joergensen, J. L., Joergensen, P. S., et al. (2018). A new model of Jupiter's magnetic field from Juno's first nine orbits. Geophysical Research Letters, 45, 2590– 2596. https://doi.org/10.1002/2018GL077312
Connerney, J. E. P., Timmins, S., Oliversen, R. J., Espley, J. R., Joergensen, J. L., Kotsiaros, S., et al. (2022). A new model of Jupiter's magnetic field at the completion of Juno's Prime Mission. Journal of Geophysical Research: Planets, 127, e2021JE007055. https://doi.org/10.1029/2021JE007055
Davis, L., and Smith, E. J. (1986), New models of Saturn's magnetic field using Pioneer 11 vector helium magnetometer data, J. Geophys. Res., 91( A2), 1373– 1380, doi:10.1029/JA091iA02p01373.
Davis, L., and Smith, E. J. (1990), A model of Saturn's magnetic field based on all available data, J. Geophys. Res., 95( A9), 15257– 15261, doi:10.1029/JA095iA09p15257.
Dougherty MK, Achilleos N, Andre N, Arridge CS, Balogh A, Bertucci C, Burton ME, Cowley SW, Erdos G, Giampieri G, Glassmeier KH, Khurana KK, Leisner J, Neubauer FM, Russell CT, Smith EJ, Southwood DJ, Tsurutani BT. Cassini magnetometer observations during Saturn orbit insertion. Science. 2005 Feb 25;307(5713):1266-70. doi: 10.1126/science.1106098. PMID: 15731444.
Dougherty, M., Cao, H., Khurana, K., Hunt, G., Provan, G., Kellock, S., et al. (2018). Saturn's magnetic field revealed by the Cassini grand finale. Science, 362, eaat5434. https://doi.org/10.1126/science.aat5434
Gao, J. W., Rong, Z. J., Klinger, L., Li, X. Z., Liu, D., & Wei, Y. (2021). A spherical harmonic Martian crustal magnetic field model combining data sets of MAVEN and MGS. Earth and Space Science, 8, e2021EA001860. https://doi.org/10.1029/2021EA001860
Herbert, F. (2009), Aurora and magnetic field of Uranus, J. Geophys. Res., 114, A11206, doi:10.1029/2009JA014394.
Hess, S. L. G., Bonfond, B., Zarka, P., and Grodent, D. (2011), Model of the Jovian magnetic field topology constrained by the Io auroral emissions, *J. Geophys. Res.*, 116, A05217, doi:[10.1029/2010JA016262](https://doi.org/10.1029/2010JA016262 "Link to external resource: 10.1029/2010JA016262").
Hess, S., Bonfond, B., Bagenal, F., & Lamy, L. (2017). A model of the Jovian internal field derived from in-situ and auroral constraints, doi:[10.1553/PRE8s157](https://doi.org/10.1553/PRE8s157)
Holme, R., and Bloxham, J. (1996), The magnetic fields of Uranus and Neptune: Methods and models, *J. Geophys. Res.*, 101( E1), 2177– 2200, doi:[10.1029/95JE03437](https://doi.org/10.1029/95JE03437 "Link to external resource: 10.1029/95JE03437").
M.G. Kivelson, K.K. Khurana, M. Volwerk, The Permanent and Inductive Magnetic Moments of Ganymede, Icarus, Volume 157, Issue 2, 2002, Pages 507-522, ISSN 0019-1035, https://doi.org/10.1006/icar.2002.6834.
Langlais, B., Thébault, E., Houliez, A., Purucker, M. E., & Lillis, R. J. (2019). A new model of the crustal magnetic field of Mars using MGS and MAVEN. *Journal of Geophysical Research: Planets*, 124, 1542– 1569. [A New Model of the Crustal Magnetic Field of Mars Using MGS and MAVEN - Langlais - 2019 - Journal of Geophysical Research: Planets - Wiley Online Library](https://doi.org/10.1029/2018JE005854)
Morschhauser, A., V. Lesur, and M. Grott (2014), A spherical harmonic model of the lithospheric magnetic field of Mars, J. Geophys. Res. Planets, 119, 1162–1188, doi:10.1002/2013JE004555.
Ness, N. F., Behannon, K. W., Lepping, R. P., and Whang, Y. C. (1975), The magnetic field of Mercury, 1, *J. Geophys. Res.*, 80( 19), 2708– 2716, doi:[10.1029/JA080i019p02708](https://doi.org/10.1029/JA080i019p02708 "Link to external resource: 10.1029/JA080i019p02708").
Thebault, E., Langlais, B., Oliveira, J.S., et al., 2018. A time-averaged regional model of the Hermean magnetic field. Phys. Earth Planet. In. 276, 93–105. https://doi.org/10.1016/j.pepi.2017.07.001.
Uno, H., Johnson, C.L., Anderson, B.J., Korth, H., Solomon, S.C., 2009. Modeling Mercury’s internal magnetic field with smooth inversions. Earth Planet. Sci. Lett. 285, 328–339. http://dx.doi.org/10.1016/j.epsl.2009.02.032
Weber, T., Moore, K., Connerney, J., Espley, J., DiBraccio, G., & Romanelli, N. (2022). Updated spherical harmonic magnetic field moments of Ganymede from the Juno flyby. Geophysical Research Letters, 49, e2022GL098633. https://doi.org/10.1029/2022GL098633
|
mattkjames7REPO_NAMElibinternalfieldPATH_START.@libinternalfield_extracted@libinternalfield-main@test@README.md@.PATH_END.py
|
{
"filename": "parallel_copy_test.py",
"repo_name": "triton-inference-server/server",
"repo_path": "server_extracted/server-main/qa/L0_parallel_copy/parallel_copy_test.py",
"type": "Python"
}
|
#!/usr/bin/env python3
# Copyright 2021-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
sys.path.append("../common")
import functools
import time
import unittest
from builtins import range
import numpy as np
import test_util as tu
import tritonclient.grpc as grpcclient
from tritonclient.utils import InferenceServerException
class ParallelCopyTest(tu.TestResultCollector):
def setUp(self):
self.client_ = grpcclient.InferenceServerClient("localhost:8001")
self.dtype_ = np.float32
self.model_name_ = tu.get_zero_model_name("plan", 1, self.dtype_)
def _batch_input_duration(self, batch_size):
stats = self.client_.get_inference_statistics(self.model_name_, "1")
self.assertEqual(len(stats.model_stats), 1, "expect 1 model stats")
self.assertEqual(
stats.model_stats[0].name,
self.model_name_,
"expect model stats for model {}".format(self.model_name_),
)
self.assertEqual(
stats.model_stats[0].version,
"1",
"expect model stats for model {} version 1".format(self.model_name_),
)
batch_stats = stats.model_stats[0].batch_stats
batch_input_duration = 0
for batch_stat in batch_stats:
if batch_stat.batch_size == batch_size:
batch_input_duration = batch_stat.compute_input.ns
return batch_input_duration
def _run(self, batch_sizes):
batch_size = functools.reduce(lambda a, b: a + b, batch_sizes, 0)
input_data = [
np.random.random([bs, 16 * 1024 * 1024]).astype(self.dtype_)
for bs in batch_sizes
]
inputs = [
[grpcclient.InferInput("INPUT0", [bs, 16 * 1024 * 1024], "FP32")]
for bs in batch_sizes
]
output = [grpcclient.InferRequestedOutput("OUTPUT0")]
for idx in range(len(inputs)):
inputs[idx][0].set_data_from_numpy(input_data[idx])
def callback(user_data, idx, result, error):
if error:
user_data[idx] = error
else:
user_data[idx] = result
# list to hold the results of inference.
user_data = [None] * len(batch_sizes)
before_compute_input_duration = self._batch_input_duration(batch_size)
for idx in range(len(batch_sizes)):
self.client_.async_infer(
model_name=self.model_name_,
inputs=inputs[idx],
callback=functools.partial(callback, user_data, idx),
outputs=output,
)
# Wait until the results are available in user_data
time_out = 20
while time_out > 0:
done = True
for res in user_data:
if res is None:
done = False
break
if done:
break
time_out = time_out - 1
time.sleep(1)
done_cnt = functools.reduce(
lambda dc, x: dc + 1 if x is not None else dc, user_data, 0
)
self.assertEqual(
done_cnt,
len(batch_sizes),
"expected {} responses, got {}".format(len(batch_sizes), done_cnt),
)
for idx in range(len(batch_sizes)):
res = user_data[idx]
self.assertFalse(
type(res) == InferenceServerException,
"expected response for request {}, got exception {}".format(idx, res),
)
output_data = res.as_numpy("OUTPUT0")
self.assertTrue(
np.array_equal(output_data, input_data[idx]),
"Mismatched output data for request {}".format(idx),
)
after_compute_input_duration = self._batch_input_duration(batch_size)
return after_compute_input_duration - before_compute_input_duration
def test_performance(self):
model_status = self.client_.is_model_ready(self.model_name_, "1")
self.assertTrue(model_status, "expected model to be ready")
# Send 1 request with batch size 8 so that the copy is not parallelized
serialized_time = self._run([8])
parallelized_time = self._run([2, 2, 2, 2])
# The following check is loose, local runs show that the speedup is not
# significant (~15%), may be due to the dispatch overhead
# which cancels part of the improvement
self.assertTrue(
serialized_time > parallelized_time,
"Expected parallelized copy is faster than serialized copy",
)
print(
"serialized v.s. parallelized : {} v.s. {}".format(
serialized_time, parallelized_time
)
)
if __name__ == "__main__":
unittest.main()
|
triton-inference-serverREPO_NAMEserverPATH_START.@server_extracted@server-main@qa@L0_parallel_copy@parallel_copy_test.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/graph_objs/indicator/legendgrouptitle/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._font import Font
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(__name__, [], ["._font.Font"])
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@graph_objs@indicator@legendgrouptitle@__init__.py@.PATH_END.py
|
{
"filename": "summary.py",
"repo_name": "michaelhb/superplot",
"repo_path": "superplot_extracted/superplot-master/superplot/summary.py",
"type": "Python"
}
|
"""
Summary of chain
================
A stand-alone script to print summary statistics about a chain.
"""
import os
from argparse import ArgumentParser as arg_parser
# External modules
from prettytable import PrettyTable as pt
# superplot modules
import data_loader
from plot_options import default
import superplot.statslib.point as stats
import superplot.statslib.one_dim as one_dim
def _summary(name, param, posterior, chi_sq):
"""
Find summary statistics for a single parameter.
:param name: Name of parameter
:type name: string
:param param: Data column of parameter
:type param:
:param posterior:
:type posterior:
:param chi_sq:
:type chi_sq:
:returns: List of summary statistics for a particular parameter
:rtype: list
"""
# Best-fit point
bestfit = stats.best_fit(chi_sq, param)
# Posterior mean
post_mean = stats.posterior_mean(posterior, param)
# Credible regions
pdf_data = one_dim.posterior_pdf(param,
posterior,
nbins=default("nbins"),
bin_limits=default("bin_limits")
)
lower_credible_region = one_dim.credible_region(pdf_data.pdf,
pdf_data.bin_centers,
alpha=default("alpha")[1],
region="lower")
upper_credible_region = one_dim.credible_region(pdf_data.pdf,
pdf_data.bin_centers,
alpha=default("alpha")[1],
region="upper")
summary = [name,
bestfit,
post_mean,
lower_credible_region,
upper_credible_region
]
return summary
def _summary_table(labels, data, names=None, datafile=None, infofile=None):
"""
Summarize multiple parameters in a table.
:returns: Table of summary statistics for particular parameters
:rtype: string
"""
# Summarize all parameters by default
if names is None:
names = labels.values()
# Make a string describing credible interval
beta_percent = 100. * (1. - default("alpha")[1])
credible_name = "%.2g%% credible region" % beta_percent
# Headings for a table
headings = ["Name",
"best-fit",
"posterior mean",
credible_name,
""
]
param_table = pt(headings)
param_table.align = "l"
param_table.float_format = "4.2"
# Make summary data and add it to table
posterior = data[0]
chi_sq = data[1]
for key, name in labels.iteritems():
if name in names:
param = data[key]
param_table.add_row(_summary(name, param, posterior, chi_sq))
# Best-fit information and information about chain
min_chi_sq = data[1].min()
p_value = stats.p_value(data[1], default("dof"))
bestfit_table = pt(header=False)
bestfit_table.align = "l"
bestfit_table.float_format = "4.2"
bestfit_table.add_row(["File", datafile])
bestfit_table.add_row(["Info-file", infofile])
bestfit_table.add_row(["Minimum chi-squared", min_chi_sq])
bestfit_table.add_row(["p-value", p_value])
return bestfit_table.get_string() + "\n\n" + param_table.get_string()
def main():
# Select chain and info file with a GUI.
# datafile = open_file_gui(add_pattern="*.txt")
# infofile = open_file_gui(add_pattern="*.txt")
parser = arg_parser(description='Superplot summary tool', conflict_handler='resolve')
parser.add_argument('--data_file',
'-d',
help='Chain file to summarise',
type=str,
required=True)
parser.add_argument('--info_file',
'-i',
help='Info file to summarise',
type=str,
default=None,
required=False)
args = vars(parser.parse_args())
datafile = os.path.abspath(args['data_file'])
infofile = args['info_file']
if infofile:
infofile = os.path.abspath(infofile)
# Load and label data
labels, data = data_loader.load(infofile, datafile)
summary_table = _summary_table(labels, data, datafile=datafile, infofile=infofile)
return summary_table
if __name__ == "__main__":
print main()
|
michaelhbREPO_NAMEsuperplotPATH_START.@superplot_extracted@superplot-master@superplot@summary.py@.PATH_END.py
|
{
"filename": "template_subtraction_and_WCS.ipynb",
"repo_name": "Astro-Sean/autophot",
"repo_path": "autophot_extracted/autophot-master/example_notebooks/template_subtraction_and_WCS.ipynb",
"type": "Jupyter Notebook"
}
|
<h1 align="center"> Using Template Subtraction and updating WCS in AutoPhoT </h1>
This notebook will cover how to run AutoPhoT with template subtraction. Additional this notebook will explain now to setup AutoPhoT such that it can update an image's WCS automatically
To review the basic operations of AutoPHoT, see ([here](https://github.com/Astro-Sean/autophot/blob/master/example_notebooks/autophot_example.ipynb))
<div class="alert alert-info">
<strong>info!</strong> For this notebook you will need HOTPANTS, PyZOGY and Astrometry.Net installed on your machine, for detailed installation instructions, see <a href=https://github.com/Astro-Sean/autophot>here</a>.
</div>
<div class="alert alert-danger">
<strong>Advice</strong> Template subtraction can be a black box of pain and frustration. AutoPHoT works well when the template is from the same telescope and instrument, with varying results if there is discrepancy from where the template and image are from. Check all template subtracted images!
</div>
As before we will load in the autophot control file - we will need to update a few options to prepare AutoPhot for template subtractions
```python
from autophot.prep_input import load
autophot_input = load()
```
Default input loaded in from:
/Users/seanbrennan/miniconda3/envs/autophot/lib/python3.7/site-packages/autophot/databases/default_input.yml
<h2 align="center">Loading in some example Data</h2>
In this example we will use the function below with *template_subtraction_example = True* to create a new folder on your Desktop called *autophot_template_subtrction_example*.
<div class="alert alert-warning">
<strong>Warning!</strong> If you are using your own data and are familiar with the directory setup this cell is not needed.
</div>
```python
from autophot.example import save_example_data
fpath = save_example_data.save_fits_to_desktop(template_subtraction_example = True)
```
Successful copy of transient_with_host.fits written to: /Users/seanbrennan/Desktop/autophot_host_subtraction_example/transient_with_host.fits
Successful copy of template_with_host.fits written to: /Users/seanbrennan/Desktop/autophot_host_subtraction_example/templates/gp_template
The above function returns the file location, *fpath*, which is the filepath to our fits images. This image contained a transient that is heavily contaminated by it's host Galaxy. Additionally the function has created a folder named templates, In the cell below we can list out the contents of this directory we created.
```python
import os
# Lets just see that everything is in place
dirpath = os.path.dirname(fpath)
# List contents of folder. We use this generator function to ignore hidden files
dir_contents = [i for i in os.listdir(dirpath) if not i.startswith('.')]
print('\nlist of contects in: %s \n%s' % (dirpath,dir_contents))# returns list of folder contents
```
list of contects in: /Users/seanbrennan/Desktop/autophot_host_subtraction_example
['templates', 'transient_with_host.fits']
<h2 align="center">Template Directory structure - Important</h2>
Now we can check the contents of this *templates* fodler. In the cell below we list out the directory contents, we can see that there is a set of directories labeled X_template, where X is the name of the filter which AutoPHoT accepts, UBVRI, ugriz, and JHK bands.
<div class="alert alert-info">
<strong>Info!</strong> Because of the case-sensitive name of Python 3, we've decided to allocate ugriz template folders name to have up, gp, rp, ip, and zp. See below.
</div>
```python
main_template_foler = os.path.join(dirpath,'templates')
template_dir_contents = [i for i in os.listdir(main_template_foler) if not i.startswith('.')]
print('Contents of template folder:',template_dir_contents)
#os.listdir(main_template_foler)
```
Contents of template folder: ['B_template', 'I_template', 'H_template', 'V_template', 'ip_template', 'zp_template', 'gp_template', 'U_template', 'rp_template', 'J_template', 'K_template', 'R_template', 'up_template']
In this exmaple, the image and template are both in *g* band. We can check the contents of the *gp_template* directory
```python
gp_template_folder = os.path.join(main_template_foler,'gp_template')
gp_template_dir_contents = [i for i in os.listdir(gp_template_folder) if not i.startswith('.')]
print('Contents of gp template folder:',gp_template_dir_contents )
```
Contents of gp template folder: ['template_with_host.fits']
<h2 align="center">Setting up AutoPHoT For template subtraction</h2>
We will first set up AutoPHoT for template subtraction with HOTPANTS and then show the commands nessecary for template subtractions using ZOGY
<div class="alert alert-info">
<strong>Info!</strong> In the following cell we set up AutoPHoT for basic execution, for details on this step see <a href=https://github.com/Astro-Sean/autophot/blob/master/example_notebooks/autophot_example.ipynb>here</a>.
</div>
```python
# Location of our fits files
autophot_input['fits_dir'] = dirpath
print('Setting file directory (fits_dir) to: %s' % dirpath)
autophot_input['wdir'] = dirpath
print('Setting work directory (wdir) to: %s' % dirpath)
# set the catalog as before # Can choose skymapper, apass, pan_starrs, 2mass
autophot_input['catalog']['use_catalog'] = 'sdss'
```
Setting file directory (fits_dir) to: /Users/seanbrennan/Desktop/autophot_host_subtraction_example
Setting work directory (wdir) to: /Users/seanbrennan/Desktop/autophot_host_subtraction_example
Select our Target - in this example we are looking at AT 2018cow which is heaviliy contaminated by it's host galaxy, CGCG137-068
```python
# Select a source and update our syntax input
# For this example lets use the location of AT2016jbu as that won't be removed in the template subtraction
ra = 244.000917
dec = 22.268031
from astropy.coordinates import SkyCoord
c = SkyCoord(ra,dec , unit="deg")
# Not tell autophot where to look
autophot_input['target_ra'] = c.ra.degree
autophot_input['target_dec'] = c.dec.degree
```
<h3 align="center">Why we need template subtraction in this case</h3>
Below we will plot the general location of the transient. The plot shows that there is a lot of contamination from the host galaxy. This will lead to poor fitting and high errors on our target magnitude.
In this specific example AT 2018cow is contaminated by its host as well as a source south-west of its' locaiton
```python
# We will plot out the image
import matplotlib.pyplot as plt
from astropy.visualization import ImageNormalize,SquaredStretch,ZScaleInterval
# autophot functions to find image data and header from fits files
from autophot.packages.functions import getimage
from autophot.packages.functions import getheader
# To retrieve the WCS information from this image
from astropy import wcs
from astropy.coordinates import SkyCoord
# image
data = getimage(fpath)
# header
header = getheader(fpath)
# Create an ImageNormalize object
vmin,vmax = (ZScaleInterval(nsamples = 1000)).get_limits(data)
# WCS information of image
w = wcs.WCS(header)
# get pixel coordinates of this source
c = SkyCoord(ra,dec , unit="deg")
x_pix,y_pix = w.all_world2pix(c.ra.degree, c.dec.degree, 1)
# plot image
fig = plt.figure(figsize = (8,6))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
ax1.imshow(data,
vmin = vmin,
vmax = vmax,
origin = 'lower',
cmap = 'viridis')
ax1.scatter(x_pix,y_pix,
marker = 'o',
facecolor = 'none',
edgecolor = 'r',
s = 50)
# Plot a close up cutout of the source too
cutout_size = 10
cutout = data[int(y_pix-cutout_size):int(y_pix+cutout_size),
int(x_pix-cutout_size):int(x_pix+cutout_size)]
ax2.imshow(cutout,
origin = 'lower',
cmap = 'viridis')
ax2.scatter(cutout_size,cutout_size,
marker = 'o',
facecolor = 'none',
edgecolor = 'r',
s = 50)
plt.show()
```

<h3 align="center">Updating AutoPHoT commands</h3>
AutoPHoT needs to be told where to find certain executables for Astrometry.net and HOTPANTS. For my machine they are as follows. See [here](https://github.com/Astro-Sean/autophot) for how to find these file paths on your machine.
```python
# Location of solve-field executable for Astrometry.Net
autophot_input['wcs']['solve_field_exe_loc'] = '/usr/local/Cellar/astrometry-net/0.85_1/bin/solve-field'
# Location of hotpants executable from HOTPANTS
autophot_input['template_subtraction']['hotpants_exe_loc'] = '/usr/local/hotpants-master/hotpants'
```
We also need to tell AutoPHoT that we want to perform image subtraction
```python
autophot_input['template_subtraction']['do_subtraction'] = True
```
<h3 align="center">Preprosseing of Template images</h3>
AutoPHoT includes a package to prep the template images for use. This includes redoing the WCS values, cleaning comsic rays and building a PSF model and saving it as a fits file. The later step is vital for using ZOGY with AutoPHoT.
```python
autophot_input['template_subtraction']['prepare_templates'] = True
```
We can now execute AutoPHoT. This first execution will prepare our tenplate files
```python
from autophot.autophot_main import run_automatic_autophot
run_automatic_autophot(autophot_input)
```
_ _ ___ _ _ _____
/_\ _ _| |_ ___| _ \ || |__|_ _|
/ _ \ || | _/ _ \ _/ __ / _ \| |
/_/ \_\_,_|\__\___/_| |_||_\___/|_|
---------------------------------------
Automated Photometry of Transients
S. J. Brennan et al. 2021
Please provide feedback/bugs to:
Email: sean.brennan2@ucdconnect.ie
---------------------------------------
Directory of fits file: /Users/seanbrennan/Desktop/autophot_host_subtraction_example
Found Telescopes:
- Liverpool Telescope
Adding new Telescope: Liverpool Telescope
Do you want to update location of Liverpool Telescope
( Press enter for n )
( Accepted answers - y or n )
> n
-> n
*** Instrument Found ***
Liverpool Telescope -> INSTRUME -> IO:O
Enter name of Telescope and Instrument for labelling
( Press enter for Liverpool Telescope+IO:O )
>
-> Liverpool Telescope+IO:O
Enter Pixel scale in arcsec/pixel
( Press enter for 0.4 )
>
-> 0.4
Cannot find any keywords similar to GAIN (File: template_with_host.fits)
Enter header key that represents GAIN key in e/ADU, type skip to give header key
( Press enter for ignore )
>
-> ignore
Cannot find any keywords similar to READNOISE (File: template_with_host.fits)
Enter header key that represents READNOISE key in e/pixel, type skip to give header key
( Press enter for ignore )
>
-> ignore
Cannot find any keywords similar to AIRMASS (File: template_with_host.fits)
Enter header key that represents AIRMASS key , type skip to give header key
( Press enter for ignore )
>
-> ignore
-> Telescope check complete
Checking Filter keywords and database
Corrosponding filter name for - SDSS-G?
( Telescope: Liverpool Telescope :: Inst: IO:O )
( Press enter for no_filter )
Accepted answers:
| - H - I - J |
| - K - R - U |
| - B - V - g |
| - i - r - u |
| - z - - |
> g
-> Filter check complete
Checking Filter information for each image
Files removed - Wrong Image Type: 0
Files removed - No/Wrong filter(s): 0
Filters not included: []
Files removed: 0
------------------------
Preparing Template Files
------------------------
+-----------+
|File: 1 / 1|
+-----------+
File: template_with_host.fits - PID: 19211
Start Time: 2022-02-10 13:44:29.398422
Filter keyoward used: FILTER
Write Directory: /Users/seanbrennan/Desktop/autophot_host_subtraction_example/templates/gp_template
Read noise key not found for template file
Read Noise: 0.0 [e^- /pixel]
Template GAIN: 1.0 [e^- /count]
Template Exposure Time: 4.0 [s]
+-------------------------+
|Preparing templates files|
+-------------------------+
Detecting/removing cosmic ray sources
Starting Astroscrappy ...
Contaminated pixels with Cosmic rays removed: 76
Cosmic rays removed - image updated
ASTROMETRY started...
ASTROMETRY finished: 7s
Removing any pre-existing WCS keys
Updating WCS keys with new values
Searching for FWHM
Using Gaussian Profile for fitting
+-------------------------------+
|Finding Full Width Half Maximum|
+-------------------------------+
Number of sources before cleaning [ 25.0 sigma ]: 25
Updating search FWHM value
Updated guess for FWHM: 4.5 pixels
Number of sources before cleaning [ 25.0 sigma ]: 87
Removed 7 sources near boundary
Removed 11 crowded sources
Fitting source for FWHM: 69/69
Removed 8 FWHM outliers
Removed 21 median outliers
Useable sources found [ 25 sigma ]: 69
Removes 0 sources within minimum seperation [ 22 pixel ]
Large error on FWHM - returning plots for user diagnostic
/Users/seanbrennan/miniconda3/envs/autophot/lib/python3.7/site-packages/matplotlib/axes/_base.py:2283: UserWarning: Warning: converting a masked element to nan.
xys = np.asarray(xys)
/Users/seanbrennan/miniconda3/envs/autophot/lib/python3.7/site-packages/matplotlib/axes/_base.py:2283: UserWarning: Warning: converting a masked element to nan.
xys = np.asarray(xys)
FWHM: 4.823 +/- 2.196 [ pixels ]
Residual table updated: 1 / 10
SNR: 278 :: FWHM fitted 4.529
Residual table updated: 2 / 10
SNR: 259 :: FWHM fitted 4.396
Residual table updated: 3 / 10
SNR: 230 :: FWHM fitted 4.477
+-------------------------------------------+
|Building PSF model using stars in the field|
+-------------------------------------------+
Residual table updated: 4 / 10
SNR: 223 :: FWHM fitted 4.518
Residual table updated: 5 / 10
SNR: 188 :: FWHM fitted 4.484
Residual table updated: 6 / 10
SNR: 157 :: FWHM fitted 4.465
Residual table updated: 7 / 10
SNR: 155 :: FWHM fitted 4.435
Residual table updated: 8 / 10
SNR: 153 :: FWHM fitted 4.351
Residual table updated: 9 / 10
SNR: 144 :: FWHM fitted 4.427
Residual table updated: 10 / 10
SNR: 100 :: FWHM fitted 4.551
PSF built using 10 sources
PSF model saved as: /Users/seanbrennan/Desktop/autophot_host_subtraction_example/templates/gp_template/PSF_model_template_with_host.fits
------------------------------------------------------------
Templates ready - Please check to make sure they are correct
set 'prepare_templates' to False and execute
------------------------------------------------------------
DONE
Done - Time Taken: 37.3
<div class="alert alert-warning">
<strong>Warning!</strong> Make sure to set autophot_input['template_subtraction']['prepare_templates'] to False afterwards
</div>
```python
autophot_input['template_subtraction']['prepare_templates'] = False
```
We can list the contents of the gp_template folder to check out the new contents
```python
gp_template_folder = os.path.join(main_template_foler,'gp_template')
gp_template_dir_contents = [i for i in os.listdir(gp_template_folder) if not i.startswith('.')]
print('Contents of gp template folder:',gp_template_dir_contents )
```
Contents of gp template folder: ['PSF_model_template_with_host.fits', 'template_with_host.fits', 'template_with_host.fits.log', 'template_with_host_astrometry.log', 'image_analysis_template_with_host.pdf', 'fwhm_histogram_template_with_host.pdf', 'calib_template.csv', 'image_analysis_template_with_host.csv']
<h2 align="center">Using HOTPANTS with AutoPHoT</h2>
HOTPANTS is set to the default template subtraction method in AutoPHoT. Since we have updated the *hotpants_exe_loc* comamnd above we can simply run the AutoPHoT code again.
```python
run_automatic_autophot(autophot_input)
```
_ _ ___ _ _ _____
/_\ _ _| |_ ___| _ \ || |__|_ _|
/ _ \ || | _/ _ \ _/ __ / _ \| |
/_/ \_\_,_|\__\___/_| |_||_\___/|_|
---------------------------------------
Automated Photometry of Transients
S. J. Brennan et al. 2021
Please provide feedback/bugs to:
Email: sean.brennan2@ucdconnect.ie
---------------------------------------
Directory of fits file: /Users/seanbrennan/Desktop/autophot_host_subtraction_example
User instrument database: /Users/seanbrennan/Desktop/autophot_host_subtraction_example/telescope.yml
Number of files: 1
1 telescope(s) detected - checking header keywords
Found Telescopes:
- Liverpool Telescope
-> Telescope check complete
Checking Filter keywords and database
-> Filter check complete
Checking Filter information for each image
Files removed - Wrong Image Type: 0
Files removed - No/Wrong filter(s): 0
Filters not included: []
Files removed: 0
+-----------+
|File: 1 / 1|
+-----------+
File: transient_with_host_APT.fits - PID: 19211
Start Time: 2022-02-10 13:44:53.394970
Filter keyoward used: FILTER
Telescope: Liverpool Telescope
Filter: g
MJD: 58346.923
Date of Observation : 2018-08-16
Read Noise: 0.0 [e^- /pixel]
GAIN: 1.0 [e^- /count]
Exposure time: 4 [s]
Detecting/removing cosmic ray sources
Starting Astroscrappy ...
Contaminated pixels with Cosmic rays removed: 296
Cosmic rays removed - image updated
Astrometry.net already excuted
Searching for FWHM
Using Gaussian Profile for fitting
+-------------------------------+
|Finding Full Width Half Maximum|
+-------------------------------+
Number of sources before cleaning [ 25.0 sigma ]: 25
Updating search FWHM value
Updated guess for FWHM: 4.9 pixels
Number of sources before cleaning [ 25.0 sigma ]: 84
Removed 6 sources near boundary
Removed 12 crowded sources
Fitting source for FWHM: 66/66
Removed 8 FWHM outliers
Removed 22 median outliers
Useable sources found [ 25 sigma ]: 66
Removes 0 sources within minimum seperation [ 24 pixel ]
Large error on FWHM - returning plots for user diagnostic
/Users/seanbrennan/miniconda3/envs/autophot/lib/python3.7/site-packages/matplotlib/axes/_base.py:2283: UserWarning: Warning: converting a masked element to nan.
xys = np.asarray(xys)
/Users/seanbrennan/miniconda3/envs/autophot/lib/python3.7/site-packages/matplotlib/axes/_base.py:2283: UserWarning: Warning: converting a masked element to nan.
xys = np.asarray(xys)
FWHM: 5.173 +/- 2.238 [ pixels ]
Seeing: 1.568 [ arcsec ]
Aperture size: 8.8 pixels
Aperture correction: -0.031 +/- 0.017 [ mag ]
Searching for new catalog [sdss] for target_ra_244_dec_22
+----------------------------------------------+
|Searching for catalog for target_ra_244_dec_22|
+----------------------------------------------+
Catalog length: 7958
Removed 7021 sources fainter than cutoff [20 mag]
Using Gaussian Profile for fitting
Catalog Length: 87
+---------------------------------+
|Matching catalog sources to image|
+---------------------------------+
Removed 595 sources too close to boundary or off image
Matching catalog to image: 81 / 87 :: Useful sources 80 / 87
Median offset: 2.1 [ pixels ] / 0.6 [ arcsec ]
Matching catalog to image: 82 / 87 :: Useful sources 81 / 87
Matching catalog to image: 83 / 87 :: Useful sources 82 / 87
Matching catalog to image: 84 / 87 :: Useful sources 83 / 87
Matching catalog to image: 85 / 87 :: Useful sources 84 / 87
Matching catalog to image: 86 / 87 :: Useful sources 85 / 87
Matching catalog to image: 87 / 87 :: Useful sources 86 / 87 .. done
Broken cutouts: 0
Not in correct location: 0
Not detected: 1
Saturated: 0
Error: 0
+-------------------------------------------+
|Building PSF model using stars in the field|
+-------------------------------------------+
Residual table updated: 1 / 10
SNR: 293 :: FWHM fitted 4.990
Residual table updated: 2 / 10
SNR: 174 :: FWHM fitted 4.961
Residual table updated: 3 / 10
SNR: 169 :: FWHM fitted 4.898
Residual table updated: 4 / 10
SNR: 168 :: FWHM fitted 4.947
Residual table updated: 5 / 10
SNR: 157 :: FWHM fitted 4.975
Residual table updated: 6 / 10
SNR: 135 :: FWHM fitted 4.917
Residual table updated: 7 / 10
SNR: 102 :: FWHM fitted 4.934
Residual table updated: 8 / 10
SNR: 99 :: FWHM fitted 4.900
Residual table updated: 9 / 10
SNR: 94 :: FWHM fitted 4.945
Residual table updated: 10 / 10
SNR: 93 :: FWHM fitted 4.929
PSF built using 10 sources
Unity PSF: 29.9 [counts]
Unity Residual table: 2.2 [counts]
Using PSF Photometry on Sequence Stars
Approx PSF mag -11.307 mag
PSF model saved as: /Users/seanbrennan/Desktop/autophot_host_subtraction_example_REDUCED/transient_with_host/PSF_model_transient_with_host_APT.fits
+-------------------+
|Measuring PSF model|
+-------------------+
+-----------------------------------+
|Fitting PSF to sources in the image|
+-----------------------------------+
Fitting PSF to source: 85 / 86
WARNING: Input data contains invalid values (NaNs or infs), which were automatically clipped. [astropy.stats.sigma_clipping]
Input data contains invalid values (NaNs or infs), which were automatically clipped.
Mean g-band zeropoint: 27.918 +/- 0.024
Fitting PSF to source: 86 / 86
+-----------------------+
|Finding Zeropoint value|
+-----------------------+
Checking for suitable catalog sources
Removed 84 sources lower than SNR of 10.0
Looking for User template in /Users/seanbrennan/Desktop/autophot_host_subtraction_example/templates
Template filepath: /Users/seanbrennan/Desktop/autophot_host_subtraction_example/templates/gp_template/template_with_host.fits
Aligning via WCS with reproject_interp
Template smaller than image, cropping to exlcude zeros
Trimmed template shape:(1957 1938)
Trimmed image shape:(1957 1938)
Image subtracion
Performing image subtraction using HOTPANTS
HOTPANTS finished: 26s
Subtraction saved as transient_with_host_APT_image_cutout_subtraction
Target photometry on subtracted image
Setting target background to zero in template subtraction image
+-----------------------------------------------+
|Performing PSF photometry on at target location|
+-----------------------------------------------+
Setting target background to zero in template subtraction image
Approximate Target SNR: 8.8
SNR = 9 - skipping limiting magnitude
Pixel Offset: 1.978
Limiting Magnitude: skipped
Target Detection probability: 99 %
Target flux: 477.110 +/- 16.275 [counts/s]
Noise: 439.153 [counts/s]
Target SNR: 8.841 +/- 0.116
Instrumental Magnitude: -6.697 +/- 0.065
Zeropoint: 27.918 +/- 0.024
Target Magnitude: 21.222 +/- 0.183
*** Transient well detected ***
Time Taken [ 19211 ]: 87s
Sucess: transient_with_host_APT.fits :: PID 19211
Error from multlocation [10] recovery: 0.054 [mag]
---
Files that failed : []
DONE
Done - Time Taken: 87.5
Lets check the host-subtracted image and transient location
<div class="alert alert-success">
<strong>Success!</strong> AT2018cow is now clearly seen with little/no contamination from any unwanted flux.
</div>
```python
fname = os.path.basename(fpath)
output_dir = dirpath+'_REDUCED/'+fname.replace('.fits','')
host_subtracted_fpath = os.path.join(output_dir,fname.replace('.fits','_APT_image_cutout_subtraction.fits'))
# We will plot out the image
import matplotlib.pyplot as plt
from astropy.visualization import ImageNormalize,SquaredStretch,ZScaleInterval
# autophot functions to find image data and header from fits files
from autophot.packages.functions import getimage
from autophot.packages.functions import getheader
# To retrieve the WCS information from this image
from astropy import wcs
from astropy.coordinates import SkyCoord
# image
data = getimage(host_subtracted_fpath)
# header
header = getheader(host_subtracted_fpath)
# Create an ImageNormalize object
vmin,vmax = (ZScaleInterval(nsamples = 1000)).get_limits(data)
# WCS information of image
w = wcs.WCS(header)
# get pixel coordinates of this source
c = SkyCoord(ra,dec , unit="deg")
x_pix,y_pix = w.all_world2pix(c.ra.degree, c.dec.degree, 1)
# plot image
fig = plt.figure(figsize = (10,6))
fig.suptitle('Host Subtraction using HOTPANTS')
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
ax1.imshow(data,
vmin = vmin,
vmax = vmax,
origin = 'lower',
cmap = 'viridis')
ax1.scatter(x_pix,y_pix,
marker = 'o',
facecolor = 'none',
edgecolor = 'r',
s = 50)
# Plot a close up cutout of the source too
cutout_size = 15
cutout = data[int(y_pix-cutout_size):int(y_pix+cutout_size),
int(x_pix-cutout_size):int(x_pix+cutout_size)]
ax2.imshow(cutout,
origin = 'lower',
cmap = 'viridis')
ax2.scatter(cutout_size,cutout_size,
marker = 'o',
facecolor = 'none',
edgecolor = 'r',
s = 50)
plt.show()
```

<h2 align="center">Using ZOGY with AutoPHoT</h2>
As an alternative to HOTPANTS, AutoPHoT is also setup to use <a href="https://arxiv.org/abs/1601.02655">ZOGY</a> (more specifically <a href="https://github.com/dguevel/PyZOGY">PyZOGY</a> )
```python
autophot_input['template_subtraction']['use_zogy'] = True
```
```python
run_automatic_autophot(autophot_input)
```
User instrument database: /Users/seanbrennan/Desktop/autophot_host_subtraction_example/telescope.yml
Number of files: 1
1 telescope(s) detected - checking header keywords
File: transient_with_host_APT.fits - PID: 19211
Start Time: 2022-02-10 13:46:22.037261
_ _ ___ _ _ _____
/_\ _ _| |_ ___| _ \ || |__|_ _|
/ _ \ || | _/ _ \ _/ __ / _ \| |
/_/ \_\_,_|\__\___/_| |_||_\___/|_|
---------------------------------------
Automated Photometry of Transients
S. J. Brennan et al. 2021
Please provide feedback/bugs to:
Email: sean.brennan2@ucdconnect.ie
---------------------------------------
Directory of fits file: /Users/seanbrennan/Desktop/autophot_host_subtraction_example
Found Telescopes:
- Liverpool Telescope
-> Telescope check complete
Checking Filter keywords and database
-> Filter check complete
Checking Filter information for each image
Files removed - Wrong Image Type: 0
Files removed - No/Wrong filter(s): 0
Filters not included: []
Files removed: 0
+-----------+
|File: 1 / 1|
+-----------+
Filter keyoward used: FILTER
Telescope: Liverpool Telescope
Filter: g
MJD: 58346.923
Date of Observation : 2018-08-16
Read Noise: 0.0 [e^- /pixel]
GAIN: 1.0 [e^- /count]
Exposure time: 4 [s]
Detecting/removing cosmic ray sources
Starting Astroscrappy ...
Contaminated pixels with Cosmic rays removed: 296
Cosmic rays removed - image updated
Astrometry.net already excuted
Searching for FWHM
Using Gaussian Profile for fitting
+-------------------------------+
|Finding Full Width Half Maximum|
+-------------------------------+
Number of sources before cleaning [ 25.0 sigma ]: 25
Updating search FWHM value
Updated guess for FWHM: 4.9 pixels
Number of sources before cleaning [ 25.0 sigma ]: 84
Removed 6 sources near boundary
Removed 12 crowded sources
Fitting source for FWHM: 66/66
Removed 8 FWHM outliers
Removed 22 median outliers
Useable sources found [ 25 sigma ]: 66
Removes 0 sources within minimum seperation [ 24 pixel ]
Large error on FWHM - returning plots for user diagnostic
/Users/seanbrennan/miniconda3/envs/autophot/lib/python3.7/site-packages/matplotlib/axes/_base.py:2283: UserWarning: Warning: converting a masked element to nan.
xys = np.asarray(xys)
/Users/seanbrennan/miniconda3/envs/autophot/lib/python3.7/site-packages/matplotlib/axes/_base.py:2283: UserWarning: Warning: converting a masked element to nan.
xys = np.asarray(xys)
FWHM: 5.173 +/- 2.238 [ pixels ]
Seeing: 1.568 [ arcsec ]
Aperture size: 8.8 pixels
Aperture correction: -0.031 +/- 0.017 [ mag ]
Catalog found for target_ra_244_dec_22
Catalog: SDSS
File: target_ra_244_dec_22_r_0.25
Catalog length: 7958
Removed 7021 sources fainter than cutoff [20 mag]
Using Gaussian Profile for fitting
Catalog Length: 87
+----------------------------------------------+
|Searching for catalog for target_ra_244_dec_22|
+----------------------------------------------+
+---------------------------------+
|Matching catalog sources to image|
+---------------------------------+
Removed 595 sources too close to boundary or off image
Matching catalog to image: 83 / 87 :: Useful sources 82 / 87
Median offset: 2.1 [ pixels ] / 0.6 [ arcsec ]
Matching catalog to image: 84 / 87 :: Useful sources 83 / 87
Matching catalog to image: 85 / 87 :: Useful sources 84 / 87
Matching catalog to image: 86 / 87 :: Useful sources 85 / 87
Matching catalog to image: 87 / 87 :: Useful sources 86 / 87 .. done
Broken cutouts: 0
Not in correct location: 0
Not detected: 1
Saturated: 0
Error: 0
+-------------------------------------------+
|Building PSF model using stars in the field|
+-------------------------------------------+
Residual table updated: 1 / 10
SNR: 293 :: FWHM fitted 4.990
Residual table updated: 2 / 10
SNR: 174 :: FWHM fitted 4.961
Residual table updated: 3 / 10
SNR: 169 :: FWHM fitted 4.898
Residual table updated: 4 / 10
SNR: 168 :: FWHM fitted 4.947
Residual table updated: 5 / 10
SNR: 157 :: FWHM fitted 4.975
Residual table updated: 6 / 10
SNR: 135 :: FWHM fitted 4.917
Residual table updated: 7 / 10
SNR: 102 :: FWHM fitted 4.934
Residual table updated: 8 / 10
SNR: 99 :: FWHM fitted 4.900
Residual table updated: 9 / 10
SNR: 94 :: FWHM fitted 4.945
Residual table updated: 10 / 10
SNR: 93 :: FWHM fitted 4.929
PSF built using 10 sources
Unity PSF: 29.9 [counts]
Unity Residual table: 2.2 [counts]
Using PSF Photometry on Sequence Stars
Approx PSF mag -11.307 mag
PSF model saved as: /Users/seanbrennan/Desktop/autophot_host_subtraction_example_REDUCED/transient_with_host/PSF_model_transient_with_host_APT.fits
+-------------------+
|Measuring PSF model|
+-------------------+
+-----------------------------------+
|Fitting PSF to sources in the image|
+-----------------------------------+
Fitting PSF to source: 85 / 86
WARNING: Input data contains invalid values (NaNs or infs), which were automatically clipped. [astropy.stats.sigma_clipping]
Input data contains invalid values (NaNs or infs), which were automatically clipped.
Mean g-band zeropoint: 27.918 +/- 0.024
Fitting PSF to source: 86 / 86
+-----------------------+
|Finding Zeropoint value|
+-----------------------+
Checking for suitable catalog sources
Removed 84 sources lower than SNR of 10.0
Looking for User template in /Users/seanbrennan/Desktop/autophot_host_subtraction_example/templates
Template filepath: /Users/seanbrennan/Desktop/autophot_host_subtraction_example/templates/gp_template/template_with_host.fits
Aligning via WCS with reproject_interp
Template smaller than image, cropping to exlcude zeros
Trimmed template shape:(1957 1938)
Trimmed image shape:(1957 1938)
Image subtracion
Performing image subtraction using PyZOGY
Using Image : /Users/seanbrennan/Desktop/autophot_host_subtraction_example_REDUCED/transient_with_host/transient_with_host_APT_image_cutout.fits
Using Image PSF: /Users/seanbrennan/Desktop/autophot_host_subtraction_example_REDUCED/transient_with_host/PSF_model_transient_with_host_APT.fits
Using Template : /Users/seanbrennan/Desktop/autophot_host_subtraction_example_REDUCED/transient_with_host/transient_with_host_APT_template.fits
Using Template PSF: /Users/seanbrennan/Desktop/autophot_host_subtraction_example/templates/gp_template/PSF_model_template_with_host.fits
Running Zogy...
/Users/seanbrennan/Desktop/autophot_host_subtraction_example_REDUCED/transient_with_host/transient_with_host_APT_image_cutout.fits:Shifted PSF from [21 21] to [0 0]
/Users/seanbrennan/Desktop/autophot_host_subtraction_example_REDUCED/transient_with_host/transient_with_host_APT_image_cutout.fits:Masked 0 saturated pixels
/Users/seanbrennan/Desktop/autophot_host_subtraction_example_REDUCED/transient_with_host/transient_with_host_APT_image_cutout.fits:Global median is 304.4674072265625
/Users/seanbrennan/Desktop/autophot_host_subtraction_example_REDUCED/transient_with_host/transient_with_host_APT_image_cutout.fits:Global standard deviation is 6.187968476674211
/Users/seanbrennan/Desktop/autophot_host_subtraction_example_REDUCED/transient_with_host/transient_with_host_APT_image_cutout.fits:Interpolated 0 pixels
/Users/seanbrennan/Desktop/autophot_host_subtraction_example_REDUCED/transient_with_host/transient_with_host_APT_template.fits:Shifted PSF from [20 20] to [0 0]
/Users/seanbrennan/Desktop/autophot_host_subtraction_example_REDUCED/transient_with_host/transient_with_host_APT_template.fits:Masked 0 saturated pixels
/Users/seanbrennan/Desktop/autophot_host_subtraction_example_REDUCED/transient_with_host/transient_with_host_APT_template.fits:Global median is 86.53247337502458
/Users/seanbrennan/Desktop/autophot_host_subtraction_example_REDUCED/transient_with_host/transient_with_host_APT_template.fits:Global standard deviation is 4.061979364198531
/Users/seanbrennan/Desktop/autophot_host_subtraction_example_REDUCED/transient_with_host/transient_with_host_APT_template.fits:Interpolated 0 pixels
:Shifted PSF from [21 21] to [0 0]
:Shifted PSF from [20 20] to [0 0]
:Global median is -0.00610497407509015
:Global standard deviation is 0.8263638435946065
:Global median is -0.007667348668316625
:Global standard deviation is 0.5505606113300967
Found 145 stars in common for gain matching
Iteration 1: Gain = 1.710520635967522
:Global median is -0.007730591046423366
:Global standard deviation is 0.6946035072966212
:Global median is -0.001868798491601073
:Global standard deviation is 0.42226852750789395
Found 143 stars in common for gain matching
Iteration 2: Gain = 1.707454252435168
:Global median is -0.00771393784169483
:Global standard deviation is 0.6951136710543224
:Global median is -0.0018804936532389163
:Global standard deviation is 0.4227267762264336
Found 143 stars in common for gain matching
Iteration 3: Gain = 1.707513426585089
:Global median is -0.007715347528622457
:Global standard deviation is 0.6951039927627732
:Global median is -0.0018803339963795165
:Global standard deviation is 0.4227172662969071
Found 143 stars in common for gain matching
Iteration 4: Gain = 1.7075135504067163
Fit done in 5 iterations
Global difference image zero point is 0.18370191843005979
Difference normalized to science
Subtraction saved as transient_with_host_APT_image_cutout_subtraction
Target photometry on subtracted image
Setting target background to zero in template subtraction image
+-----------------------------------------------+
|Performing PSF photometry on at target location|
+-----------------------------------------------+
Setting target background to zero in template subtraction image
Approximate Target SNR: 8.6
SNR = 9 - skipping limiting magnitude
Pixel Offset: 2.103
Limiting Magnitude: skipped
Target Detection probability: 99 %
Target flux: 482.365 +/- 16.357 [counts/s]
Noise: 453.909 [counts/s]
Target SNR: 8.648 +/- 0.119
Instrumental Magnitude: -6.708 +/- 0.062
Zeropoint: 27.918 +/- 0.024
Target Magnitude: 21.210 +/- 0.182
*** Transient well detected ***
Time Taken [ 19211 ]: 107s
Sucess: transient_with_host_APT.fits :: PID 19211
Error from multlocation [10] recovery: 0.050 [mag]
---
Files that failed : []
DONE
Done - Time Taken: 107.6
```python
fname = os.path.basename(fpath)
output_dir = dirpath+'_REDUCED/'+fname.replace('.fits','')
host_subtracted_fpath = os.path.join(output_dir,fname.replace('.fits','_APT_image_cutout_subtraction.fits'))
# We will plot out the image
import matplotlib.pyplot as plt
from astropy.visualization import ImageNormalize,SquaredStretch,ZScaleInterval
# autophot functions to find image data and header from fits files
from autophot.packages.functions import getimage
from autophot.packages.functions import getheader
# To retrieve the WCS information from this image
from astropy import wcs
from astropy.coordinates import SkyCoord
# image
data = getimage(host_subtracted_fpath)
# header
header = getheader(host_subtracted_fpath)
# Create an ImageNormalize object
vmin,vmax = (ZScaleInterval(nsamples = 1000)).get_limits(data)
# WCS information of image
w = wcs.WCS(header)
# get pixel coordinates of this source
c = SkyCoord(ra,dec , unit="deg")
x_pix,y_pix = w.all_world2pix(c.ra.degree, c.dec.degree, 1)
# plot image
fig = plt.figure(figsize = (10,6))
fig.suptitle('Host Subtraction using PyZOGY')
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
ax1.imshow(data,
vmin = vmin,
vmax = vmax,
origin = 'lower',
cmap = 'viridis')
ax1.scatter(x_pix,y_pix,
marker = 'o',
facecolor = 'none',
edgecolor = 'r',
s = 50)
# Plot a close up cutout of the source too
cutout_size = 15
cutout = data[int(y_pix-cutout_size):int(y_pix+cutout_size),
int(x_pix-cutout_size):int(x_pix+cutout_size)]
ax2.imshow(cutout,
origin = 'lower',
cmap = 'viridis')
ax2.scatter(cutout_size,cutout_size,
marker = 'o',
facecolor = 'none',
edgecolor = 'r',
s = 50)
plt.show()
```

<div class="alert alert-success">
<strong>Success!</strong> Again AT2018cow is well isolated.
</div>
```python
```
|
Astro-SeanREPO_NAMEautophotPATH_START.@autophot_extracted@autophot-master@example_notebooks@template_subtraction_and_WCS.ipynb@.PATH_END.py
|
{
"filename": "toas_controller.py",
"repo_name": "plazar/TOASTER",
"repo_path": "TOASTER_extracted/TOASTER-master/webtoaster/app/controllers/toas_controller.py",
"type": "Python"
}
|
from django.http import Http404
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.shortcuts import *
from django.template import Context, RequestContext
from django.template import loader
from app.models import *
from httplib import HTTPResponse
from lib.toaster import Pulsars
from lib.toaster import Toas, ObsSystems
from django.core.context_processors import csrf
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.core.exceptions import ObjectDoesNotExist
import oauth2
from django.contrib.auth.decorators import login_required
from django import forms
from django.core.paginator import Paginator
class ParfileForm(forms.Form):
name = forms.CharField()
def index(request):
toas = Toas.show()
print toas != list()
t = loader.get_template('toas/index.html')
c = RequestContext(request, {
'toas': toas,
})
return HttpResponse(t.render(c))
def new(request):
import os
pulsars = Pulsars.show()
obs_sys = ObsSystems.show()
print "---"
print obs_sys.__class__
print "---"
for key, info in obs_sys.iteritems():
print "%s : %s" % (id, info.name)
if request.method == 'POST':
if request.POST['pulsar_select'] == "-1":
request.session['flash'] = { 'type': 'error', 'message': 'You must specify a Pulsar to parse Tim-file for.' }
return redirect('/webtoaster/toas/new')
if request.POST['obssys_select'] == "-1":
request.session['flash'] = { 'type': 'error', 'message': 'You must specify an Ovservation System to parse Tim-file for.' }
return redirect('/webtoaster/toas/new')
if not request.FILES.get('timfile'):
request.session['flash'] = { 'type': 'error', 'message': 'Seems you forgot to attach a Time-File to parse.' }
return redirect('/webtoaster/toas/new')
if request.method == 'POST' and request.FILES.get('timfile'):
try:
uf = request.FILES['timfile']
temp_path = settings.TEMP_DIR
fn = uf.name
file_path = os.path.join( temp_path, fn )
open( file_path, 'w' ).write( uf.read() )
obs = obs_sys[int(request.POST['obssys_select'])]
obs_args ={'obssystem_name':obs.name}
load_status = Toas.upload( username=request.user.username, path=file_path, pulsar_id=request.POST['pulsar_select'], reader='tempo2', obssys=obs_args )
request.session['flash'] = { 'type': 'success', 'message': 'Tim file was parse.'}
except Exception as e:
request.session['flash'] = { 'type': 'error', 'message': 'There was an error parsing Tim file. Message: %s' % str(e) }
return redirect('/webtoaster/toas/new')
return redirect('/webtoaster/toas')
t = loader.get_template('toas/new.html')
c = RequestContext(request, {
'pulsars': pulsars,
'obs_sys': obs_sys
})
c.update(csrf(request))
return HttpResponse(t.render(c))
def destroy(request, parfile_id):
parfile_id = int( parfile_id )
try:
response = Parfiles.destroy( parfile_id )
request.session['flash'] = { 'type': 'success', 'message': 'Par file was deleted.'}
except Exception as e:
request.session['flash'] = { 'type': 'error', 'message': 'Toaster produced an error while deleting Par file. Message: %s' % str(e) }
if request.GET.get('after'):
redirect_url = request.GET.get('after')
else:
redirect_url = '/webtoaster/parfiles'
return redirect( redirect_url )
def download(request, parfile_id):
from django.http import HttpResponse
from django.core.servers.basehttp import FileWrapper
import os
parfile = Parfiles.show(parfile_id=int(parfile_id) )[0]
file_name = parfile.filename
try:
myfile = file(os.path.join(parfile.filepath, parfile.filename) )
except:
request.session['flash'] = { 'type': 'error', 'message': 'Could not open the requested file.' }
return redirect( '/webtoaster/parfiles' )
response = HttpResponse(myfile, content_type='application/par')
response['Content-Disposition'] = "attachment; filename=%s" % file_name
return response
def view(request, parfile_id):
from django.http import HttpResponse
from django.core.servers.basehttp import FileWrapper
import os
parfile = Parfiles.show(parfile_id=int(parfile_id) )[0]
filename = parfile.filename
try:
myfile = file(os.path.join(parfile.filepath, parfile.filename) )
except:
request.session['flash'] = { 'type': 'error', 'message': 'Could not open the requested file.' }
return redirect( '/webtoaster/parfiles' )
t = loader.get_template('parfiles/view.html')
c = RequestContext(request, {
'filename': filename,
'filecontent': myfile.read()
})
c.update(csrf(request))
return HttpResponse(t.render(c))
return response
|
plazarREPO_NAMETOASTERPATH_START.@TOASTER_extracted@TOASTER-master@webtoaster@app@controllers@toas_controller.py@.PATH_END.py
|
{
"filename": "test_linearize.py",
"repo_name": "lsst/ip_isr",
"repo_path": "ip_isr_extracted/ip_isr-main/tests/test_linearize.py",
"type": "Python"
}
|
# This file is part of ip_isr.
#
# Developed for the LSST Data Management System.
# This product includes software developed by the LSST Project
# (https://www.lsst.org).
# See the COPYRIGHT file at the top-level directory of this distribution
# for details of code ownership.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import unittest
import logging
import numpy as np
import lsst.utils.tests
import lsst.utils
import lsst.afw.image as afwImage
import lsst.afw.math as afwMath
import lsst.afw.cameraGeom as cameraGeom
from lsst.afw.geom.testUtils import BoxGrid
from lsst.afw.image.testUtils import makeRampImage
from lsst.ip.isr import applyLookupTable, Linearizer
def referenceImage(image, detector, linearityType, inputData, table=None):
"""Generate a reference linearization.
Parameters
----------
image: `lsst.afw.image.Image`
Image to linearize.
detector: `lsst.afw.cameraGeom.Detector`
Detector this image is from.
linearityType: `str`
Type of linearity to apply.
inputData: `numpy.array`
An array of values for the linearity correction.
table: `numpy.array`, optional
An optional lookup table to use.
Returns
-------
outImage: `lsst.afw.image.Image`
The output linearized image.
numOutOfRange: `int`
The number of values that could not be linearized.
Raises
------
RuntimeError :
Raised if an invalid linearityType is supplied.
"""
numOutOfRange = 0
for ampIdx, amp in enumerate(detector.getAmplifiers()):
ampIdx = (ampIdx // 3, ampIdx % 3)
bbox = amp.getBBox()
imageView = image.Factory(image, bbox)
if linearityType == 'Squared':
sqCoeff = inputData[ampIdx]
array = imageView.getArray()
array[:] = array + sqCoeff*array**2
elif linearityType == 'LookupTable':
rowInd, colIndOffset = inputData[ampIdx]
rowInd = int(rowInd)
tableRow = table[rowInd, :]
numOutOfRange += applyLookupTable(imageView, tableRow, colIndOffset)
elif linearityType == 'Polynomial':
coeffs = inputData[ampIdx]
array = imageView.getArray()
summation = np.zeros_like(array)
for index, coeff in enumerate(coeffs):
summation += coeff*np.power(array, (index + 2))
array += summation
elif linearityType == 'Spline':
centers, values = np.split(inputData, 2) # This uses the full data
# Note that we are using the slow afw AKIMA_SPLINE interpolator
# to offset the data, but using the equivalent but faster scipy
# Akima1DInterpolator to correct the data.
interp = afwMath.makeInterpolate(centers.tolist(), values.tolist(),
afwMath.stringToInterpStyle('AKIMA_SPLINE'))
array = imageView.getArray()
delta = interp.interpolate(array.flatten())
array -= np.array(delta).reshape(array.shape)
else:
raise RuntimeError(f"Unknown linearity: {linearityType}")
return image, numOutOfRange
class LinearizeTestCase(lsst.utils.tests.TestCase):
"""Unit tests for linearizers.
"""
def setUp(self):
# This uses the same arbitrary values used in previous tests.
self.bbox = lsst.geom.Box2I(lsst.geom.Point2I(-31, 22), lsst.geom.Extent2I(100, 85))
self.ampArrangement = (2, 3)
self.numAmps = self.ampArrangement[0]*self.ampArrangement[1]
# Squared Parameters
self.sqCoeffs = np.array([[0, 5e-6, 2.5e-5], [1e-5, 1.1e-6, 2.1e-6]], dtype=float)
# Lookup Table Parameters
self.colIndOffsets = np.array([[0, -50, 2.5], [37, 1, -3]], dtype=float)
self.rowInds = np.array([[0, 1, 4], [3, 5, 2]])
# This creates a 2x3 array (matching the amplifiers) that contains a
# 2x1 array containing [colIndOffset_i, rowInd_i].
self.lookupIndices = np.transpose(np.stack((self.rowInds, self.colIndOffsets), axis=0),
axes=[1, 2, 0])
self.table = np.random.normal(scale=55, size=(self.numAmps, 2500))
self.assertLess(np.max(self.rowInds), self.numAmps, "error in test conditions; invalid row index")
# Polynomial Parameters: small perturbation on Squared
self.polyCoeffs = np.array([[[0, 1e-7], [5e-6, 1e-7], [2.5e-5, 1e-7]],
[[1e-5, 1e-7], [1.1e-6, 1e-7], [2.1e-6, 1e-7]]], dtype=float)
# Spline coefficients: should match a 1e-6 Squared solution
self.splineCoeffs = np.array([0.0, 1000, 2000, 3000, 4000, 5000,
0.0, 1.0, 4.0, 9.0, 16.0, 25.0])
self.log = logging.getLogger("lsst.ip.isr.testLinearizer")
def tearDown(self):
# destroy LSST objects so memory test passes.
self.bbox = None
self.detector = None
def compareResults(self, linearizedImage, linearizedOutOfRange, linearizedCount, linearizedAmps,
referenceImage, referenceOutOfRange, referenceCount, referenceAmps):
"""Run assert tests on results.
Parameters
----------
linearizedImage : `lsst.afw.image.Image`
Corrected image.
linearizedOutOfRange : `int`
Number of measured out-of-range pixels.
linearizedCount : `int`
Number of amplifiers that should be linearized.
linearizedAmps : `int`
Total number of amplifiers checked.
referenceImage : `lsst.afw.image.Image`
Truth image to compare against.
referenceOutOfRange : `int`
Number of expected out-of-range-pixels.
referenceCount : `int`
Number of amplifiers that are expected to be linearized.
referenceAmps : `int`
Expected number of amplifiers checked.
"""
self.assertImagesAlmostEqual(linearizedImage, referenceImage)
self.assertEqual(linearizedOutOfRange, referenceOutOfRange)
self.assertEqual(linearizedCount, referenceCount)
self.assertEqual(linearizedAmps, referenceAmps)
def testBasics(self):
"""Test basic linearization functionality.
"""
for imageClass in (afwImage.ImageF, afwImage.ImageD):
inImage = makeRampImage(bbox=self.bbox, start=-5, stop=2500, imageClass=imageClass)
for linearityType in ('Squared', 'LookupTable', 'Polynomial', 'Spline'):
detector = self.makeDetector(linearityType)
table = None
inputData = {'Squared': self.sqCoeffs,
'LookupTable': self.lookupIndices,
'Polynomial': self.polyCoeffs,
'Spline': self.splineCoeffs}[linearityType]
if linearityType == 'LookupTable':
table = np.array(self.table, dtype=inImage.getArray().dtype)
linearizer = Linearizer(detector=detector, table=table)
measImage = inImage.Factory(inImage, True)
result = linearizer.applyLinearity(measImage, detector=detector, log=self.log)
refImage, refNumOutOfRange = referenceImage(inImage.Factory(inImage, True),
detector, linearityType, inputData, table)
# This is necessary for the same tests to be used on
# all types. The first amplifier has 0.0 for the
# coefficient, which should be tested (it has a log
# message), but we are not linearizing an amplifier
# with no correction, so it fails the test that
# numLinearized == numAmps.
zeroLinearity = 1 if linearityType == 'Squared' else 0
self.compareResults(measImage, result.numOutOfRange, result.numLinearized, result.numAmps,
refImage, refNumOutOfRange, self.numAmps - zeroLinearity, self.numAmps)
# Test a stand alone linearizer. This ignores validate checks.
measImage = inImage.Factory(inImage, True)
storedLinearizer = self.makeLinearizer(linearityType)
storedResult = storedLinearizer.applyLinearity(measImage, log=self.log)
self.compareResults(measImage, storedResult.numOutOfRange, storedResult.numLinearized,
storedResult.numAmps,
refImage, refNumOutOfRange, self.numAmps - zeroLinearity, self.numAmps)
# "Save to yaml" and test again
storedDict = storedLinearizer.toDict()
storedLinearizer = Linearizer().fromDict(storedDict)
measImage = inImage.Factory(inImage, True)
storedResult = storedLinearizer.applyLinearity(measImage, log=self.log)
self.compareResults(measImage, storedResult.numOutOfRange, storedResult.numLinearized,
storedResult.numAmps,
refImage, refNumOutOfRange, self.numAmps - zeroLinearity, self.numAmps)
# "Save to fits" and test again
storedTable = storedLinearizer.toTable()
storedLinearizer = Linearizer().fromTable(storedTable)
measImage = inImage.Factory(inImage, True)
storedResult = storedLinearizer.applyLinearity(measImage, log=self.log)
self.compareResults(measImage, storedResult.numOutOfRange, storedResult.numLinearized,
storedResult.numAmps,
refImage, refNumOutOfRange, self.numAmps - zeroLinearity, self.numAmps)
# Use a gain and test again
measImage = inImage.Factory(inImage, True)
storedLinearizer = self.makeLinearizer(linearityType)
gains = {key: 1.0 for key in storedLinearizer.linearityType.keys()}
storedResult = storedLinearizer.applyLinearity(measImage, log=self.log, gains=gains)
self.compareResults(measImage, storedResult.numOutOfRange, storedResult.numLinearized,
storedResult.numAmps,
refImage, refNumOutOfRange, self.numAmps - zeroLinearity, self.numAmps)
def makeDetector(self, linearityType, bbox=None):
"""Generate a fake detector for the test.
Parameters
----------
linearityType : `str`
Which linearity to assign to the detector's cameraGeom.
bbox : `lsst.geom.Box2I`, optional
Bounding box to use for the detector.
Returns
-------
detBuilder : `lsst.afw.cameraGeom.Detector`
The fake detector.
"""
bbox = bbox if bbox is not None else self.bbox
numAmps = self.ampArrangement
detName = "det_a"
detId = 1
detSerial = "123"
orientation = cameraGeom.Orientation()
pixelSize = lsst.geom.Extent2D(1, 1)
camBuilder = cameraGeom.Camera.Builder("fakeCam")
detBuilder = camBuilder.add(detName, detId)
detBuilder.setSerial(detSerial)
detBuilder.setBBox(bbox)
detBuilder.setOrientation(orientation)
detBuilder.setPixelSize(pixelSize)
boxArr = BoxGrid(box=bbox, numColRow=numAmps)
for i in range(numAmps[0]):
for j in range(numAmps[1]):
ampInfo = cameraGeom.Amplifier.Builder()
ampInfo.setName("amp %d_%d" % (i + 1, j + 1))
ampInfo.setBBox(boxArr[i, j])
ampInfo.setLinearityType(linearityType)
if linearityType == 'Squared':
ampInfo.setLinearityCoeffs([self.sqCoeffs[i, j]])
elif linearityType == 'LookupTable':
# setLinearityCoeffs is picky about getting a mixed
# int/float list.
ampInfo.setLinearityCoeffs(np.array([self.rowInds[i, j], self.colIndOffsets[i, j],
0, 0], dtype=float))
elif linearityType == 'Polynomial':
ampInfo.setLinearityCoeffs(self.polyCoeffs[i, j])
elif linearityType == 'Spline':
ampInfo.setLinearityCoeffs(self.splineCoeffs)
detBuilder.append(ampInfo)
return detBuilder
def makeLinearizer(self, linearityType, bbox=None):
"""Construct a linearizer with the test coefficients.
Parameters
----------
linearityType : `str`
Type of linearity to use. The coefficients are set by the
setUp method.
bbox : `lsst.geom.Box2I`
Bounding box for the full detector. Used to assign
amp-based bounding boxes.
Returns
-------
linearizer : `lsst.ip.isr.Linearizer`
A fully constructed, persistable linearizer.
"""
bbox = bbox if bbox is not None else self.bbox
numAmps = self.ampArrangement
boxArr = BoxGrid(box=bbox, numColRow=numAmps)
linearizer = Linearizer()
linearizer.hasLinearity = True
for i in range(numAmps[0]):
for j in range(numAmps[1]):
ampName = f"amp {i+1}_{j+1}"
ampBox = boxArr[i, j]
linearizer.ampNames.append(ampName)
if linearityType == 'Squared':
linearizer.linearityCoeffs[ampName] = np.array([self.sqCoeffs[i, j]])
elif linearityType == 'LookupTable':
linearizer.linearityCoeffs[ampName] = np.array(self.lookupIndices[i, j])
linearizer.tableData = self.table
elif linearityType == 'Polynomial':
linearizer.linearityCoeffs[ampName] = np.array(self.polyCoeffs[i, j])
elif linearityType == 'Spline':
linearizer.linearityCoeffs[ampName] = np.array(self.splineCoeffs)
linearizer.linearityType[ampName] = linearityType
linearizer.linearityBBox[ampName] = ampBox
linearizer.fitParams[ampName] = np.array([])
linearizer.fitParamsErr[ampName] = np.array([])
linearizer.fitChiSq[ampName] = np.nan
linearizer.fitResiduals[ampName] = np.array([])
linearizer.fitResidualsSigmaMad[ampName] = np.nan
linearizer.linearFit[ampName] = np.array([])
linearizer.linearityTurnoff[ampName] = np.nan
linearizer.linearityMaxSignal[ampName] = np.nan
return linearizer
class MemoryTester(lsst.utils.tests.MemoryTestCase):
pass
def setup_module(module):
lsst.utils.tests.init()
if __name__ == "__main__":
lsst.utils.tests.init()
unittest.main()
|
lsstREPO_NAMEip_isrPATH_START.@ip_isr_extracted@ip_isr-main@tests@test_linearize.py@.PATH_END.py
|
{
"filename": "mips.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/Pygments/py3/pygments/lexers/mips.py",
"type": "Python"
}
|
"""
pygments.lexers.mips
~~~~~~~~~~~~~~~~~~~~
Lexers for MIPS assembly.
:copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, words
from pygments.token import Whitespace, Comment, String, Keyword, Name, Text
__all__ = ["MIPSLexer"]
class MIPSLexer(RegexLexer):
"""
A MIPS Assembly Lexer.
Based on the Emacs major mode by hlissner:
https://github.com/hlissner/emacs-mips-mode
"""
name = 'MIPS'
aliases = ['mips']
version_added = ''
# TODO: add '*.s' and '*.asm', which will require designing an analyse_text
# method for this lexer and refactoring those from Gas and Nasm in order to
# have relatively reliable detection
filenames = ['*.mips', '*.MIPS']
url = 'https://mips.com'
keywords = [
# Arithmetic insturctions
"add", "sub", "subu", "addi", "subi", "addu", "addiu",
# Multiplication/division
"mul", "mult", "multu", "mulu", "madd", "maddu", "msub", "msubu", "div", "divu",
# Bitwise operations
"and", "or", "nor", "xor", "andi", "ori", "xori", "clo", "clz",
# Shifts
"sll", "srl", "sllv", "srlv", "sra", "srav",
# Comparisons
"slt", "sltu", "slti", "sltiu",
# Move data
"mfhi", "mthi", "mflo", "mtlo", "movn", "movz", "movf", "movt",
# Jump
"j", "jal", "jalr", "jr",
# branch
"bc1f", "bc1t", "beq", "bgez", "bgezal", "bgtz", "blez", "bltzal", "bltz", "bne",
# Load
"lui", "lb", "lbu", "lh", "lhu", "lw", "lwcl", "lwl", "lwr",
# Store
"sb", "sh", "sw", "swl", "swr", # coproc: swc1 sdc1
# Concurrent load/store
"ll", "sc",
# Trap handling
"teq", "teqi", "tne", "tneqi", "tge", "tgeu", "tgei", "tgeiu", "tlt", "tltu", "tlti",
"tltiu",
# Exception / Interrupt
"eret", "break", "bop", "syscall",
# --- Floats -----------------------------------------------------
# Arithmetic
"add.s", "add.d", "sub.s", "sub.d", "mul.s", "mul.d", "div.s", "div.d", "neg.d",
"neg.s",
# Comparison
"c.e.d", "c.e.s", "c.le.d", "c.le.s", "c.lt.s", "c.lt.d", # "c.gt.s", "c.gt.d",
"madd.s", "madd.d", "msub.s", "msub.d",
# Move Floats
"mov.d", "move.s", "movf.d", "movf.s", "movt.d", "movt.s", "movn.d", "movn.s",
"movnzd", "movz.s", "movz.d",
# Conversion
"cvt.d.s", "cvt.d.w", "cvt.s.d", "cvt.s.w", "cvt.w.d", "cvt.w.s", "trunc.w.d",
"trunc.w.s",
# Math
"abs.s", "abs.d", "sqrt.s", "sqrt.d", "ceil.w.d", "ceil.w.s", "floor.w.d",
"floor.w.s", "round.w.d", "round.w.s",
]
pseudoinstructions = [
# Arithmetic & logical
"rem", "remu", "mulo", "mulou", "abs", "neg", "negu", "not", "rol", "ror",
# branches
"b", "beqz", "bge", "bgeu", "bgt", "bgtu", "ble", "bleu", "blt", "bltu", "bnez",
# loads
"la", "li", "ld", "ulh", "ulhu", "ulw",
# Store
"sd", "ush", "usw",
# move
"move", # coproc: "mfc1.d",
# comparisons
"sgt", "sgtu", "sge", "sgeu", "sle", "sleu", "sne", "seq",
# --- Floats -----------------------------------------------------
# load-store
"l.d", "l.s", "s.d", "s.s",
]
directives = [
".align", ".ascii", ".asciiz", ".byte", ".data", ".double", ".extern", ".float",
".globl", ".half", ".kdata", ".ktext", ".space", ".text", ".word",
]
deprecated = [
"beql", "bnel", "bgtzl", "bgezl", "bltzl", "blezl", "bltzall", "bgezall",
]
tokens = {
'root': [
(r'\s+', Whitespace),
(r'#.*', Comment),
(r'"', String, 'string'),
(r'-?[0-9]+?', Keyword.Constant),
(r'\w*:', Name.Function),
(words(deprecated, suffix=r'\b'), Keyword.Pseudo), # need warning face
(words(pseudoinstructions, suffix=r'\b'), Name.Variable),
(words(keywords, suffix=r'\b'), Keyword),
(r'[slm][ftwd]c[0-9]([.]d)?', Keyword),
(r'\$(f?[0-2][0-9]|f?3[01]|[ft]?[0-9]|[vk][01]|a[0-3]|s[0-7]|[gsf]p|ra|at|zero)',
Keyword.Type),
(words(directives, suffix=r'\b'), Name.Entity), # Preprocessor?
(r':|,|;|\{|\}|=>|@|\$|=', Name.Builtin),
(r'\w+', Text),
(r'.', Text),
],
'string': [
(r'\\.', String.Escape),
(r'"', String, '#pop'),
(r'[^\\"]+', String),
],
}
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@Pygments@py3@pygments@lexers@mips.py@.PATH_END.py
|
{
"filename": "setup.py",
"repo_name": "desihub/desitarget",
"repo_path": "desitarget_extracted/desitarget-main/setup.py",
"type": "Python"
}
|
#!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function
#
# Standard imports
#
import glob
import os
import sys
from setuptools import setup, find_packages
#
# DESI support code.
#
from desiutil.setup import DesiTest, DesiVersion, get_version
#
# Begin setup
#
setup_keywords = dict()
#
# THESE SETTINGS NEED TO BE CHANGED FOR EVERY PRODUCT.
#
setup_keywords['name'] = 'desitarget'
setup_keywords['description'] = 'DESI targeting'
setup_keywords['author'] = 'DESI Collaboration'
setup_keywords['author_email'] = 'desi-data@desi.lbl.gov'
setup_keywords['license'] = 'BSD'
setup_keywords['url'] = 'https://github.com/desihub/desitarget'
#
# END OF SETTINGS THAT NEED TO BE CHANGED.
#
setup_keywords['version'] = get_version(setup_keywords['name'])
#
# Use README.rst as long_description.
#
setup_keywords['long_description'] = ''
if os.path.exists('README.rst'):
with open('README.rst') as readme:
setup_keywords['long_description'] = readme.read()
#
# Set other keywords for the setup function. These are automated, & should
# be left alone unless you are an expert.
#
# Treat everything in bin/ except *.rst as a script to be installed.
#
if os.path.isdir('bin'):
setup_keywords['scripts'] = [fname for fname in glob.glob(os.path.join('bin', '*'))
if not os.path.basename(fname).endswith('.rst')]
setup_keywords['provides'] = [setup_keywords['name']]
setup_keywords['requires'] = ['Python (>2.7.0)']
# setup_keywords['install_requires'] = ['Python (>2.7.0)']
setup_keywords['zip_safe'] = False
setup_keywords['use_2to3'] = False
setup_keywords['packages'] = find_packages('py')
setup_keywords['package_dir'] = {'':'py'}
setup_keywords['cmdclass'] = {'version': DesiVersion,'test': DesiTest}
setup_keywords['test_suite']='{name}.test.{name}_test_suite.{name}_test_suite'.format(**setup_keywords)
#
# Autogenerate command-line scripts.
#
# setup_keywords['entry_points'] = {'console_scripts':['install_desimodel_data = desimodel.install:main']}
#
# Add internal data directories
#
setup_keywords['package_data'] = {'desitarget': ['data/*',],
'desitarget.cmx': ['data/*',],
'desitarget.sv1': ['data/*',],
'desitarget.sv2': ['data/*',],
'desitarget.sv3': ['data/*',],
'desitarget.test': ['t/*',],
'desitarget.mock': [os.path.relpath(_,'py/desitarget/mock') for _ in [os.path.join(_[0],'*') for _ in os.walk('py/desitarget/mock/data')]],
'desitarget.streams.gaia_dr3_parallax_zero_point': ['coefficients/*',],
}
#
# Run setup command.
#
setup(**setup_keywords)
|
desihubREPO_NAMEdesitargetPATH_START.@desitarget_extracted@desitarget-main@setup.py@.PATH_END.py
|
{
"filename": "_texttemplatesrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scatter3d/_texttemplatesrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TexttemplatesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="texttemplatesrc", parent_name="scatter3d", **kwargs
):
super(TexttemplatesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scatter3d@_texttemplatesrc.py@.PATH_END.py
|
{
"filename": "_scattergl.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/layout/template/data/_scattergl.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ScatterglValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(
self, plotly_name="scattergl", parent_name="layout.template.data", **kwargs
):
super(ScatterglValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Scattergl"),
data_docs=kwargs.pop(
"data_docs",
"""
""",
),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@layout@template@data@_scattergl.py@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.